2015-08-10 17:31:16 +02:00
|
|
|
#!/usr/bin/env python
|
2016-02-29 00:34:27 +01:00
|
|
|
#
|
|
|
|
# Copyright 2015-2016 Thomas Martitz <kugel@rockbox.org>
|
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License as published by
|
|
|
|
# the Free Software Foundation; either version 2 of the License, or
|
|
|
|
# (at your option) any later version.
|
|
|
|
#
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with this program; if not, write to the Free Software
|
|
|
|
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
|
|
|
|
# MA 02110-1301, USA.
|
2015-08-10 17:31:16 +02:00
|
|
|
|
|
|
|
import os
|
|
|
|
import sys
|
|
|
|
import re
|
|
|
|
from lxml import etree
|
|
|
|
from optparse import OptionParser
|
|
|
|
|
2016-02-28 04:49:51 +01:00
|
|
|
|
2015-08-10 17:31:16 +02:00
|
|
|
def normalize_text(s):
|
2016-02-07 10:06:02 -08:00
|
|
|
r"""
|
|
|
|
Normalizes whitespace in text.
|
2015-08-10 17:31:16 +02:00
|
|
|
|
2016-02-07 10:06:02 -08:00
|
|
|
>>> normalize_text("asd xxx")
|
|
|
|
'asd xxx'
|
|
|
|
>>> normalize_text(" asd\nxxx ")
|
|
|
|
'asd xxx'
|
|
|
|
"""
|
|
|
|
return s.replace("\n", " ").strip()
|
2015-08-10 17:31:16 +02:00
|
|
|
|
2016-02-28 04:49:51 +01:00
|
|
|
|
2016-02-07 09:52:55 -08:00
|
|
|
CXX_NAMESPACE_RE = re.compile(r'[_a-zA-Z][_0-9a-zA-Z]*::')
|
2015-08-10 17:31:16 +02:00
|
|
|
def fix_definition(s):
|
2016-02-07 10:06:02 -08:00
|
|
|
"""
|
|
|
|
Removes C++ name qualifications from some definitions.
|
|
|
|
|
|
|
|
For example:
|
2015-08-10 17:31:16 +02:00
|
|
|
|
2016-02-07 10:06:02 -08:00
|
|
|
>>> fix_definition("bool flag")
|
|
|
|
'bool flag'
|
|
|
|
>>> fix_definition("bool FooBar::flag")
|
|
|
|
'bool flag'
|
|
|
|
>>> fix_definition("void(* _GeanyObjectClass::project_open) (GKeyFile *keyfile)")
|
|
|
|
'void(* project_open) (GKeyFile *keyfile)'
|
|
|
|
|
|
|
|
"""
|
2016-02-28 04:49:51 +01:00
|
|
|
return CXX_NAMESPACE_RE.sub(r"", s)
|
2015-08-10 17:31:16 +02:00
|
|
|
|
|
|
|
|
|
|
|
class AtDoc(object):
|
2016-02-07 09:48:29 -08:00
|
|
|
def __init__(self):
|
|
|
|
self.retval = None
|
|
|
|
self.since = ""
|
|
|
|
self.annot = []
|
|
|
|
|
|
|
|
def cb(self, type, str):
|
|
|
|
if (type == "param"):
|
2016-02-28 04:49:51 +01:00
|
|
|
words = str.split(" ", 2)
|
2016-02-07 09:48:29 -08:00
|
|
|
self.annot = []
|
|
|
|
elif (type == "return"):
|
|
|
|
self.annot = []
|
|
|
|
elif (type == "since"):
|
|
|
|
self.since = str.rstrip()
|
2016-02-19 00:03:30 +01:00
|
|
|
elif type in ("geany:nullable",
|
2016-02-28 01:58:48 +01:00
|
|
|
"geany:optional",
|
|
|
|
"geany:out",
|
2016-02-19 00:03:30 +01:00
|
|
|
"geany:skip",
|
|
|
|
"geany:closure",
|
|
|
|
"geany:destroy"):
|
2016-02-07 09:48:29 -08:00
|
|
|
self.annot.append(type.split(":")[1])
|
2017-03-23 22:12:39 +01:00
|
|
|
elif type in ("geany:array",
|
|
|
|
"geany:transfer",
|
2016-02-19 00:03:30 +01:00
|
|
|
"geany:element-type",
|
|
|
|
"geany:scope"):
|
2016-02-07 09:48:29 -08:00
|
|
|
type = type.split(":")[1]
|
2017-03-23 22:12:39 +01:00
|
|
|
if len(str):
|
|
|
|
str = " " + str
|
|
|
|
self.annot.append("%s%s" % (type, str))
|
2016-02-28 04:49:51 +01:00
|
|
|
elif (type == "see"):
|
2016-02-07 09:48:29 -08:00
|
|
|
return "See " + str
|
2016-02-28 01:02:52 +01:00
|
|
|
elif type in ("a", "c") and str in ("NULL", "TRUE", "FALSE"):
|
|
|
|
# FIXME: some of Geany does @a NULL instead of @c NULL
|
|
|
|
return "%" + str
|
2016-02-28 04:49:51 +01:00
|
|
|
elif (type == "a"):
|
2016-02-28 01:02:52 +01:00
|
|
|
return "@" + str
|
2016-02-07 09:48:29 -08:00
|
|
|
else:
|
|
|
|
return str
|
|
|
|
|
|
|
|
return ""
|
2015-08-10 17:31:16 +02:00
|
|
|
|
2016-02-28 04:49:51 +01:00
|
|
|
|
2015-08-10 17:31:16 +02:00
|
|
|
class DoxygenProcess(object):
|
2016-02-07 09:48:29 -08:00
|
|
|
def __init__(self):
|
|
|
|
self.at = None
|
|
|
|
|
|
|
|
# http://stackoverflow.com/questions/4624062/get-all-text-inside-a-tag-in-lxml
|
|
|
|
@staticmethod
|
|
|
|
def stringify_children(node):
|
|
|
|
from lxml.etree import tostring
|
|
|
|
from itertools import chain
|
|
|
|
parts = ([node.text] +
|
2016-02-28 04:49:51 +01:00
|
|
|
list(chain(*([c.text, tostring(c).decode("utf-8"), c.tail] for c in node.getchildren()))) +
|
|
|
|
[node.tail])
|
2016-02-07 09:48:29 -08:00
|
|
|
# filter removes possible Nones in texts and tails
|
|
|
|
return "".join(filter(None, parts))
|
|
|
|
|
|
|
|
def get_program_listing(self, xml):
|
|
|
|
from lxml.etree import tostring
|
|
|
|
arr = ["", "|[<!-- language=\"C\" -->"]
|
|
|
|
for l in xml.getchildren():
|
|
|
|
if (l.tag == "codeline"):
|
|
|
|
# a codeline is of the form
|
|
|
|
# <highlight class="normal">GeanyDocument<sp/>*doc<sp/>=<sp/>...;</highlight>
|
|
|
|
# <sp/> tags must be replaced with spaces, then just use the text
|
|
|
|
h = l.find("highlight")
|
|
|
|
if h is not None:
|
|
|
|
html = tostring(h).decode("utf-8")
|
|
|
|
html = html.replace("<sp/>", " ")
|
|
|
|
arr.append(" " + tostring(etree.HTML(html), method="text").decode("utf-8"))
|
|
|
|
arr.append("]|")
|
|
|
|
return "\n".join(arr)
|
|
|
|
|
|
|
|
def join_annot(self):
|
|
|
|
s = " ".join(map(lambda x: "(%s)" % x, self.at.annot))
|
|
|
|
return s + ": " if s else ""
|
|
|
|
|
|
|
|
def process_element(self, xml):
|
|
|
|
self.at = AtDoc()
|
|
|
|
s = self.__process_element(xml)
|
|
|
|
return s
|
|
|
|
|
|
|
|
def get_extra(self):
|
|
|
|
return self.join_annot()
|
|
|
|
|
|
|
|
def get_return(self):
|
|
|
|
return self.at.retval
|
|
|
|
|
|
|
|
def get_since(self):
|
|
|
|
return self.at.since
|
|
|
|
|
|
|
|
def __process_element(self, xml):
|
|
|
|
s = ""
|
|
|
|
|
|
|
|
if xml.text:
|
|
|
|
s += xml.text
|
|
|
|
for n in xml.getchildren():
|
|
|
|
if n.tag == "emphasis":
|
|
|
|
s += self.at.cb("a", self.__process_element(n))
|
|
|
|
if n.tag == "computeroutput":
|
|
|
|
s += self.at.cb("c", self.__process_element(n))
|
|
|
|
if n.tag == "itemizedlist":
|
|
|
|
s += "\n" + self.__process_element(n)
|
|
|
|
if n.tag == "listitem":
|
|
|
|
s += " - " + self.__process_element(n)
|
|
|
|
if n.tag == "para":
|
|
|
|
s += self.__process_element(n) + "\n"
|
|
|
|
if n.tag == "ref":
|
|
|
|
s += n.text if n.text else ""
|
|
|
|
if n.tag == "simplesect":
|
|
|
|
ss = self.at.cb(n.get("kind"), self.__process_element(n))
|
2016-02-15 17:51:32 +01:00
|
|
|
s += ss + "\n" if ss else ""
|
2016-02-07 09:48:29 -08:00
|
|
|
if n.tag == "programlisting":
|
|
|
|
s += self.get_program_listing(n)
|
|
|
|
if n.tag == "xrefsect":
|
|
|
|
s += self.__process_element(n)
|
|
|
|
if n.tag == "xreftitle":
|
|
|
|
s += self.__process_element(n) + ": "
|
|
|
|
if n.tag == "xrefdescription":
|
|
|
|
s += self.__process_element(n)
|
|
|
|
if n.tag == "ulink":
|
|
|
|
s += self.__process_element(n)
|
|
|
|
if n.tag == "linebreak":
|
|
|
|
s += "\n"
|
|
|
|
if n.tag == "ndash":
|
|
|
|
s += "--"
|
|
|
|
# workaround for doxygen bug #646002
|
|
|
|
if n.tag == "htmlonly":
|
|
|
|
s += ""
|
|
|
|
if n.tail:
|
|
|
|
s += n.tail
|
|
|
|
if n.tag.startswith("param"):
|
2016-02-28 04:49:51 +01:00
|
|
|
pass # parameters are handled separately in DoxyFunction::from_memberdef()
|
2016-02-07 09:48:29 -08:00
|
|
|
return s
|
2015-08-10 17:31:16 +02:00
|
|
|
|
2016-02-28 04:49:51 +01:00
|
|
|
|
2015-08-10 17:31:16 +02:00
|
|
|
class DoxyMember(object):
|
2016-02-28 04:49:51 +01:00
|
|
|
def __init__(self, name, brief, extra=""):
|
2016-02-07 09:48:29 -08:00
|
|
|
self.name = name
|
|
|
|
self.brief = brief
|
|
|
|
self.extra = extra
|
2015-08-10 17:31:16 +02:00
|
|
|
|
|
|
|
|
|
|
|
class DoxyElement(object):
|
|
|
|
|
2016-02-07 09:48:29 -08:00
|
|
|
def __init__(self, name, definition, **kwargs):
|
|
|
|
self.name = name
|
|
|
|
self.definition = definition
|
|
|
|
self.brief = kwargs.get('brief', "")
|
|
|
|
self.detail = kwargs.get('detail', "")
|
|
|
|
self.members = kwargs.get('members', [])
|
|
|
|
self.since = kwargs.get('since', "")
|
|
|
|
self.extra = kwargs.get('extra', "")
|
|
|
|
self.retval = kwargs.get('retval', None)
|
|
|
|
|
|
|
|
def is_documented(self):
|
|
|
|
if (normalize_text(self.brief)) != "":
|
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
|
|
|
def add_brief(self, xml):
|
|
|
|
proc = DoxygenProcess()
|
|
|
|
self.brief = proc.process_element(xml)
|
|
|
|
self.extra += proc.get_extra()
|
|
|
|
|
|
|
|
def add_detail(self, xml):
|
|
|
|
proc = DoxygenProcess()
|
|
|
|
self.detail = proc.process_element(xml)
|
|
|
|
self.extra += proc.get_extra()
|
|
|
|
self.since = proc.get_since()
|
|
|
|
|
|
|
|
def add_member(self, xml):
|
|
|
|
name = xml.find("name").text
|
|
|
|
proc = DoxygenProcess()
|
|
|
|
brief = proc.process_element(xml.find("briefdescription"))
|
|
|
|
# optional doxygen command output appears within <detaileddescription />
|
|
|
|
proc.process_element(xml.find("detaileddescription"))
|
|
|
|
self.members.append(DoxyMember(name, normalize_text(brief), proc.get_extra()))
|
|
|
|
|
|
|
|
def add_param(self, xml):
|
|
|
|
name = xml.find("parameternamelist").find("parametername").text
|
|
|
|
proc = DoxygenProcess()
|
|
|
|
brief = proc.process_element(xml.find("parameterdescription"))
|
|
|
|
self.members.append(DoxyMember(name, normalize_text(brief), proc.get_extra()))
|
|
|
|
|
|
|
|
def add_return(self, xml):
|
|
|
|
proc = DoxygenProcess()
|
|
|
|
brief = proc.process_element(xml)
|
|
|
|
self.retval = DoxyMember("ret", normalize_text(brief), proc.get_extra())
|
|
|
|
|
|
|
|
def to_gtkdoc(self):
|
2016-02-28 04:49:51 +01:00
|
|
|
s = []
|
2016-02-07 09:48:29 -08:00
|
|
|
s.append("/**")
|
|
|
|
s.append(" * %s: %s" % (self.name, self.extra))
|
|
|
|
for p in self.members:
|
|
|
|
s.append(" * @%s: %s %s" % (p.name, p.extra, p.brief))
|
|
|
|
s.append(" *")
|
|
|
|
s.append(" * %s" % self.brief.replace("\n", "\n * "))
|
|
|
|
s.append(" *")
|
|
|
|
s.append(" * %s" % self.detail.replace("\n", "\n * "))
|
|
|
|
s.append(" *")
|
|
|
|
if self.retval:
|
|
|
|
s.append(" * Returns: %s %s" % (self.retval.extra, self.retval.brief))
|
|
|
|
if self.since:
|
|
|
|
s.append(" *")
|
|
|
|
s.append(" * Since: %s" % self.since)
|
|
|
|
s.append(" */")
|
|
|
|
s.append("")
|
|
|
|
return "\n".join(s)
|
2015-08-10 17:31:16 +02:00
|
|
|
|
|
|
|
|
2016-02-28 04:49:51 +01:00
|
|
|
class DoxyTypedef(DoxyElement):
|
2016-02-07 09:48:29 -08:00
|
|
|
@staticmethod
|
|
|
|
def from_memberdef(xml):
|
|
|
|
name = xml.find("name").text
|
2016-03-01 16:34:05 +01:00
|
|
|
d = normalize_text(xml.find("definition").text)
|
2016-02-07 09:48:29 -08:00
|
|
|
d += ";"
|
|
|
|
return DoxyTypedef(name, d)
|
2015-08-10 17:31:16 +02:00
|
|
|
|
|
|
|
|
2016-02-28 04:49:51 +01:00
|
|
|
class DoxyEnum(DoxyElement):
|
2016-02-07 09:48:29 -08:00
|
|
|
@staticmethod
|
|
|
|
def from_memberdef(xml):
|
|
|
|
name = xml.find("name").text
|
|
|
|
d = "typedef enum {\n"
|
|
|
|
for member in xml.findall("enumvalue"):
|
|
|
|
v = member.find("initializer")
|
2016-02-28 04:49:51 +01:00
|
|
|
d += "\t%s%s,\n" % (member.find("name").text, " "+v.text if v is not None else "")
|
2016-02-07 09:48:29 -08:00
|
|
|
d += "} %s;\n" % name
|
|
|
|
|
|
|
|
e = DoxyEnum(name, d)
|
|
|
|
e.add_brief(xml.find("briefdescription"))
|
|
|
|
for p in xml.findall("enumvalue"):
|
|
|
|
e.add_member(p)
|
|
|
|
return e
|
2015-08-10 17:31:16 +02:00
|
|
|
|
|
|
|
|
2016-02-28 04:49:51 +01:00
|
|
|
class DoxyStruct(DoxyElement):
|
2016-02-07 09:48:29 -08:00
|
|
|
@staticmethod
|
2016-02-28 04:49:51 +01:00
|
|
|
def from_compounddef(xml, typedefs=[]):
|
2016-02-07 09:48:29 -08:00
|
|
|
name = xml.find("compoundname").text
|
2016-02-28 04:49:51 +01:00
|
|
|
d = "struct %s {\n" % name
|
2016-03-14 08:26:45 +01:00
|
|
|
memberdefs = xml.xpath(".//sectiondef[@kind='public-attrib']/memberdef")
|
|
|
|
for p in memberdefs:
|
2016-02-07 09:48:29 -08:00
|
|
|
# workaround for struct members. g-ir-scanner can't properly map struct members
|
|
|
|
# (beginning with struct GeanyFoo) to the typedef and assigns a generic type for them
|
|
|
|
# thus we fix that up here and enforce usage of the typedef. These are written
|
|
|
|
# out first, before any struct definition, for this reason
|
|
|
|
# Exception: there are no typedefs for GeanyFooPrivate so skip those. Their exact
|
|
|
|
# type isn't needed anyway
|
|
|
|
s = fix_definition(p.find("definition").text).lstrip()
|
2016-02-19 17:51:47 +01:00
|
|
|
proc = DoxygenProcess()
|
|
|
|
brief = proc.process_element(p.find("briefdescription"))
|
|
|
|
private = (normalize_text(brief) == "")
|
2016-02-07 09:48:29 -08:00
|
|
|
words = s.split()
|
|
|
|
if (words[0] == "struct"):
|
|
|
|
if not (words[1].endswith("Private") or words[1].endswith("Private*")):
|
|
|
|
s = " ".join(words[1:])
|
2016-02-19 17:51:47 +01:00
|
|
|
d += "\t/*< %s >*/\n\t%s;\n" % ("private" if private else "public", s)
|
2016-02-07 09:48:29 -08:00
|
|
|
|
|
|
|
d += "};\n"
|
|
|
|
e = DoxyStruct(name, d)
|
|
|
|
e.add_brief(xml.find("briefdescription"))
|
2016-03-14 08:26:45 +01:00
|
|
|
for p in memberdefs:
|
2016-02-07 09:48:29 -08:00
|
|
|
e.add_member(p)
|
|
|
|
return e
|
2015-08-10 17:31:16 +02:00
|
|
|
|
|
|
|
|
2016-02-28 04:49:51 +01:00
|
|
|
class DoxyFunction(DoxyElement):
|
2016-02-07 09:48:29 -08:00
|
|
|
@staticmethod
|
|
|
|
def from_memberdef(xml):
|
|
|
|
name = xml.find("name").text
|
2016-03-01 16:34:05 +01:00
|
|
|
d = normalize_text(xml.find("definition").text)
|
2016-02-07 09:48:29 -08:00
|
|
|
d += " " + xml.find("argsstring").text + ";"
|
2016-03-01 16:34:05 +01:00
|
|
|
d = normalize_text(d)
|
2016-02-07 09:48:29 -08:00
|
|
|
|
|
|
|
e = DoxyFunction(name, d)
|
|
|
|
e.add_brief(xml.find("briefdescription"))
|
|
|
|
e.add_detail(xml.find("detaileddescription"))
|
|
|
|
for p in xml.xpath(".//detaileddescription/*/parameterlist[@kind='param']/parameteritem"):
|
|
|
|
e.add_param(p)
|
|
|
|
x = xml.xpath(".//detaileddescription/*/simplesect[@kind='return']")
|
|
|
|
if (len(x) > 0):
|
|
|
|
e.add_return(x[0])
|
|
|
|
return e
|
2015-08-10 17:31:16 +02:00
|
|
|
|
2016-02-07 09:46:57 -08:00
|
|
|
|
2016-02-28 04:49:51 +01:00
|
|
|
def main(args):
|
2016-02-07 09:48:29 -08:00
|
|
|
xml_dir = None
|
|
|
|
outfile = None
|
2016-03-05 21:45:22 +01:00
|
|
|
scioutfile = None
|
2016-02-07 09:48:29 -08:00
|
|
|
|
|
|
|
parser = OptionParser(usage="usage: %prog [options] XML_DIR")
|
|
|
|
parser.add_option("--xmldir", metavar="DIRECTORY", help="Path to Doxygen-generated XML files",
|
2016-02-28 04:49:51 +01:00
|
|
|
action="store", dest="xml_dir")
|
2016-02-07 09:48:29 -08:00
|
|
|
parser.add_option("-d", "--outdir", metavar="DIRECTORY", help="Path to Doxygen-generated XML files",
|
2016-02-28 04:49:51 +01:00
|
|
|
action="store", dest="outdir", default=".")
|
2016-02-07 09:48:29 -08:00
|
|
|
parser.add_option("-o", "--output", metavar="FILE", help="Write output to FILE",
|
2016-02-28 04:49:51 +01:00
|
|
|
action="store", dest="outfile")
|
2016-03-05 21:45:22 +01:00
|
|
|
parser.add_option("--sci-output", metavar="FILE", help="Write output to FILE (only sciwrappers)",
|
|
|
|
action="store", dest="scioutfile")
|
2016-02-07 09:48:29 -08:00
|
|
|
opts, args = parser.parse_args(args[1:])
|
|
|
|
|
|
|
|
xml_dir = args[0]
|
|
|
|
|
|
|
|
if not (os.path.exists(xml_dir)):
|
|
|
|
sys.stderr.write("invalid xml directory\n")
|
|
|
|
return 1
|
|
|
|
|
|
|
|
transform = etree.XSLT(etree.parse(os.path.join(xml_dir, "combine.xslt")))
|
|
|
|
doc = etree.parse(os.path.join(xml_dir, "index.xml"))
|
|
|
|
root = transform(doc)
|
|
|
|
|
|
|
|
other = []
|
2016-03-09 19:09:43 +01:00
|
|
|
enums = []
|
2016-02-07 09:48:29 -08:00
|
|
|
typedefs = []
|
|
|
|
|
|
|
|
c_files = root.xpath(".//compounddef[@kind='file']/compoundname[substring(.,string-length(.)-1)='.c']/..")
|
|
|
|
h_files = root.xpath(".//compounddef[@kind='file']/compoundname[substring(.,string-length(.)-1)='.h']/..")
|
|
|
|
|
|
|
|
for f in h_files:
|
|
|
|
if not (f.find("compoundname").text.endswith("private.h")):
|
|
|
|
for n0 in f.xpath(".//*/memberdef[@kind='typedef' and @prot='public']"):
|
2016-03-04 23:21:23 +01:00
|
|
|
if not (DoxygenProcess.stringify_children(n0.find("type")).startswith("enum")):
|
2016-02-07 09:48:29 -08:00
|
|
|
e = DoxyTypedef.from_memberdef(n0)
|
|
|
|
typedefs.append(e)
|
|
|
|
|
|
|
|
for n0 in f.xpath(".//*/memberdef[@kind='enum' and @prot='public']"):
|
|
|
|
e = DoxyEnum.from_memberdef(n0)
|
2016-03-09 19:09:43 +01:00
|
|
|
enums.append(e)
|
2016-02-07 09:48:29 -08:00
|
|
|
|
|
|
|
for n0 in root.xpath(".//compounddef[@kind='struct' and @prot='public']"):
|
|
|
|
e = DoxyStruct.from_compounddef(n0)
|
|
|
|
other.append(e)
|
|
|
|
|
|
|
|
for f in c_files:
|
|
|
|
for n0 in f.xpath(".//*/memberdef[@kind='function' and @prot='public']"):
|
|
|
|
e = DoxyFunction.from_memberdef(n0)
|
|
|
|
other.append(e)
|
|
|
|
|
2016-02-15 17:51:32 +01:00
|
|
|
if (opts.outfile):
|
|
|
|
try:
|
|
|
|
outfile = open(opts.outfile, "w+")
|
|
|
|
except OSError as err:
|
|
|
|
sys.stderr.write("failed to open \"%s\" for writing (%s)\n" % (opts.outfile, err.strerror))
|
|
|
|
return 1
|
|
|
|
else:
|
|
|
|
outfile = sys.stdout
|
|
|
|
|
2016-03-05 21:45:22 +01:00
|
|
|
if (opts.scioutfile):
|
|
|
|
try:
|
|
|
|
scioutfile = open(opts.scioutfile, "w+")
|
|
|
|
except OSError as err:
|
|
|
|
sys.stderr.write("failed to open \"%s\" for writing (%s)\n" % (opts.scioutfile, err.strerror))
|
|
|
|
return 1
|
|
|
|
else:
|
|
|
|
scioutfile = outfile
|
|
|
|
|
2016-02-15 17:51:32 +01:00
|
|
|
try:
|
|
|
|
outfile.write("/*\n * Automatically generated file - do not edit\n */\n\n")
|
2016-03-09 22:19:10 +01:00
|
|
|
outfile.write("#include \"gtkcompat.h\"\n")
|
|
|
|
outfile.write("#include \"Scintilla.h\"\n")
|
|
|
|
outfile.write("#include \"ScintillaWidget.h\"\n")
|
2016-03-05 21:45:22 +01:00
|
|
|
if (scioutfile != outfile):
|
|
|
|
scioutfile.write("/*\n * Automatically generated file - do not edit\n */\n\n")
|
|
|
|
scioutfile.write("#include \"gtkcompat.h\"\n")
|
|
|
|
scioutfile.write("#include \"Scintilla.h\"\n")
|
|
|
|
scioutfile.write("#include \"ScintillaWidget.h\"\n")
|
2016-02-15 17:51:32 +01:00
|
|
|
|
2016-03-09 19:09:43 +01:00
|
|
|
# write enums first, so typedefs to them are valid (as forward enum declaration
|
|
|
|
# is invalid). It's fine as an enum can't contain reference to other types.
|
|
|
|
for e in filter(lambda x: x.is_documented(), enums):
|
|
|
|
outfile.write("\n\n")
|
|
|
|
outfile.write(e.to_gtkdoc())
|
|
|
|
outfile.write(e.definition)
|
|
|
|
outfile.write("\n\n")
|
|
|
|
|
|
|
|
# write typedefs second, they are possibly undocumented but still required (even
|
2016-02-15 17:51:32 +01:00
|
|
|
# if they are documented, they must be written out without gtkdoc)
|
|
|
|
for e in typedefs:
|
|
|
|
outfile.write(e.definition)
|
|
|
|
outfile.write("\n\n")
|
|
|
|
|
2016-03-09 19:09:43 +01:00
|
|
|
# write the rest (structures, functions, ...)
|
2016-02-15 17:51:32 +01:00
|
|
|
for e in filter(lambda x: x.is_documented(), other):
|
|
|
|
outfile.write("\n\n")
|
|
|
|
outfile.write(e.to_gtkdoc())
|
|
|
|
outfile.write(e.definition)
|
|
|
|
outfile.write("\n\n")
|
2016-03-05 21:45:22 +01:00
|
|
|
if (e.name.startswith("sci_")):
|
|
|
|
if (scioutfile != outfile):
|
|
|
|
scioutfile.write("\n\n")
|
|
|
|
scioutfile.write(e.to_gtkdoc())
|
|
|
|
scioutfile.write(e.definition)
|
|
|
|
scioutfile.write("\n\n")
|
|
|
|
|
2016-02-15 17:51:32 +01:00
|
|
|
except BrokenPipeError:
|
|
|
|
# probably piped to head or tail
|
|
|
|
return 0
|
2016-02-07 09:48:29 -08:00
|
|
|
|
|
|
|
return 0
|
2016-02-07 09:46:57 -08:00
|
|
|
|
|
|
|
if __name__ == "__main__":
|
2016-02-07 09:48:29 -08:00
|
|
|
sys.exit(main(sys.argv))
|