summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authordimitri <dimitri@afe2bf4a-e733-0410-8a33-86f594647bc7>2004-02-22 19:57:25 (GMT)
committerdimitri <dimitri@afe2bf4a-e733-0410-8a33-86f594647bc7>2004-02-22 19:57:25 (GMT)
commit5edb3c85109d09e5fa43529bf8b584382f7501a5 (patch)
treebb6a2ec7453702ed2fe2348793b0bddf5d80042a
parenta9b8e48237d4094095b91031ac7c9fb0f4cc028e (diff)
downloadDoxygen-5edb3c85109d09e5fa43529bf8b584382f7501a5.zip
Doxygen-5edb3c85109d09e5fa43529bf8b584382f7501a5.tar.gz
Doxygen-5edb3c85109d09e5fa43529bf8b584382f7501a5.tar.bz2
Release-1.3.6-20040222
-rw-r--r--INSTALL4
-rw-r--r--README4
-rw-r--r--VERSION2
-rw-r--r--addon/doxmlparser/examples/metrics/main.cpp3
-rwxr-xr-xconfigure35
-rw-r--r--doc/Makefile.in4
-rw-r--r--doc/commands.doc28
-rw-r--r--doc/config.doc2
-rw-r--r--doc/language.doc580
-rw-r--r--doc/language.tpl105
-rw-r--r--doc/maintainers.txt60
-rw-r--r--doc/translator.bat1
-rw-r--r--doc/translator.pl1381
-rw-r--r--doc/translator.py2633
-rw-r--r--packages/rpm/doxygen.spec153
-rw-r--r--src/classdef.cpp106
-rw-r--r--src/cmdmapper.cpp2
-rw-r--r--src/cmdmapper.h4
-rw-r--r--src/config.l4
-rw-r--r--src/declinfo.l3
-rw-r--r--src/docparser.cpp19
-rw-r--r--src/docparser.h2
-rw-r--r--src/doctokenizer.h1
-rw-r--r--src/doctokenizer.l15
-rw-r--r--src/doxygen.cpp106
-rw-r--r--src/entry.cpp5
-rw-r--r--src/entry.h67
-rw-r--r--src/htmldocvisitor.cpp1
-rw-r--r--src/htmlgen.cpp33
-rw-r--r--src/latexdocvisitor.cpp1
-rw-r--r--src/mandocvisitor.cpp3
-rw-r--r--src/mangen.cpp2
-rw-r--r--src/memberdef.cpp29
-rw-r--r--src/perlmodgen.cpp1
-rw-r--r--src/printdocvisitor.h2
-rw-r--r--src/rtfdocvisitor.cpp1
-rw-r--r--src/scanner.l64
-rw-r--r--src/tagreader.cpp25
-rw-r--r--src/util.cpp21
-rw-r--r--src/xmldocvisitor.cpp5
40 files changed, 2551 insertions, 2966 deletions
diff --git a/INSTALL b/INSTALL
index 1b79237..5949cd1 100644
--- a/INSTALL
+++ b/INSTALL
@@ -1,7 +1,7 @@
-DOXYGEN Version 1.3.6
+DOXYGEN Version 1.3.6-20040222
Please read the installation section of the manual
(http://www.doxygen.org/install.html) for instructions.
--------
-Dimitri van Heesch (12 February 2004)
+Dimitri van Heesch (22 February 2004)
diff --git a/README b/README
index bf5c967..89c5297 100644
--- a/README
+++ b/README
@@ -1,4 +1,4 @@
-DOXYGEN Version 1.3.6
+DOXYGEN Version 1.3.6_20040222
Please read INSTALL for compilation instructions.
@@ -17,4 +17,4 @@ to subscribe to the lists or to visit the archives.
Enjoy,
-Dimitri van Heesch (dimitri@stack.nl) (12 February 2004)
+Dimitri van Heesch (dimitri@stack.nl) (22 February 2004)
diff --git a/VERSION b/VERSION
index 95b25ae..93b97f6 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-1.3.6
+1.3.6-20040222
diff --git a/addon/doxmlparser/examples/metrics/main.cpp b/addon/doxmlparser/examples/metrics/main.cpp
index 2129988..2c4bee3 100644
--- a/addon/doxmlparser/examples/metrics/main.cpp
+++ b/addon/doxmlparser/examples/metrics/main.cpp
@@ -204,7 +204,8 @@ int main(int argc,char **argv)
{
numParams++;
}
- if (strcmp(mem->typeString()->latin1(),"void")!=0)
+ const char *type = mem->typeString()->latin1();
+ if (type && strcmp(type, "void"))
{
numParams++; // count non-void return types as well
}
diff --git a/configure b/configure
index 1d0e39f..facf9b7 100755
--- a/configure
+++ b/configure
@@ -224,30 +224,33 @@ fi
#- check for qt --------------------------------------------------------------
if test "$f_wizard" = YES; then
- echo -n " Checking for Qt..."
- if test -d "/usr/lib/qt3/lib"; then
- if test -d "/usr/lib/qt3/include"; then
- if test -x "/usr/lib/qt3/bin/moc"; then
- QTDIR="/usr/lib/qt3";
+ if test -z "$QTDIR"; then
+ echo " QTDIR environment variable not set!"
+ echo -n " Checking for Qt..."
+ if test -d "/usr/lib/qt3/lib"; then
+ if test -d "/usr/lib/qt3/include"; then
+ if test -x "/usr/lib/qt3/bin/moc"; then
+ QTDIR="/usr/lib/qt3";
+ fi
fi
fi
- fi
- if test -d "/usr/lib/qt2/lib"; then
- if test -d "/usr/lib/qt2/include"; then
- if test -x "/usr/lib/qt2/bin/moc"; then
- QTDIR="/usr/lib/qt2";
+ if test -d "/usr/lib/qt2/lib"; then
+ if test -d "/usr/lib/qt2/include"; then
+ if test -x "/usr/lib/qt2/bin/moc"; then
+ QTDIR="/usr/lib/qt2";
+ fi
fi
fi
- fi
- if test -d "/usr/lib/qt/lib"; then
- if test -d "/usr/lib/qt/include"; then
- if test -x "/usr/lib/qt/bin/moc"; then
- QTDIR="/usr/lib/qt";
+ if test -d "/usr/lib/qt/lib"; then
+ if test -d "/usr/lib/qt/include"; then
+ if test -x "/usr/lib/qt/bin/moc"; then
+ QTDIR="/usr/lib/qt";
+ fi
fi
fi
fi
if test -z "$QTDIR"; then
- echo "QTDIR not set!"
+ echo "QTDIR not set and Qt not found at standard locations!"
echo
echo "tmake requires the QTDIR environment variable to be set."
echo "check your Qt installation!"
diff --git a/doc/Makefile.in b/doc/Makefile.in
index 3a0dd7b..1c50c08 100644
--- a/doc/Makefile.in
+++ b/doc/Makefile.in
@@ -30,7 +30,7 @@ clean:
language: language.doc
-language.doc: $(wildcard ../src/translator*.h) maintainers.txt language.tpl translator.pl
- $(ENV) VERSION=$(VERSION) DOXYGEN_DOCDIR=. $(PERL) translator.pl
+language.doc: $(wildcard ../src/translator*.h) maintainers.txt language.tpl translator.py
+ python translator.pl
FORCE:
diff --git a/doc/commands.doc b/doc/commands.doc
index 44ad5c4..b315af3 100644
--- a/doc/commands.doc
+++ b/doc/commands.doc
@@ -68,6 +68,7 @@ documentation:
\refitem cmdendif \\endif
\refitem cmdendlatexonly \\endlatexonly
\refitem cmdendlink \\endlink
+\refitem cmdendmanonly \\endmanonly
\refitem cmdendverbatim \\endverbatim
\refitem cmdendxmlonly \\endxmlonly
\refitem cmdenum \\enum
@@ -94,6 +95,7 @@ documentation:
\refitem cmdline \\line
\refitem cmdlink \\link
\refitem cmdmainpage \\mainpage
+\refitem cmdmanonly \\manonly
\refitem cmdn \\n
\refitem cmdname \\name
\refitem cmdnamespace \\namespace
@@ -1627,6 +1629,14 @@ class C {};
\sa section \ref cmdlatexonly "\\latexonly".
<hr>
+\section cmdendmanonly \endmanonly
+
+ \addindex \\endmanonly
+ Ends a block of text that was started with a \\manonly command.
+
+ \sa section \ref cmdmanonly "\\manonly".
+
+<hr>
\section cmdendverbatim \endverbatim
\addindex \\endverbatim
@@ -1685,7 +1695,7 @@ class C {};
environment variables (like \$(HOME) ) are resolved inside a
HTML-only block.
- \sa section \ref cmdhtmlonly "\\htmlonly" and section
+ \sa section \ref cmdmanonly "\\manonly" and section
\ref cmdlatexonly "\\latexonly".
<hr>
@@ -1764,6 +1774,22 @@ class C {};
and section \ref cmdhtmlonly "\\htmlonly".
<hr>
+\section cmdmanonly \manonly
+
+ \addindex \\manonly
+ Starts a block of text that will be verbatim included in the
+ generated MAN documentation only. The block ends with a
+ endmanonly command.
+
+ This command can be used to include groff code directly into
+ MAN pages. You can use the \\htmlonly and \\latexonly and
+ \\endhtmlonly and \\endlatexonly pairs to provide proper
+ HTML and \f$\mbox{\LaTeX}\f$ alternatives.
+
+ \sa section \ref cmdhtmlonly "\\htmlonly" and section
+ \ref cmdlatexonly "\\latexonly".
+
+<hr>
\section cmdli \li { item-description }
\addindex \\li
diff --git a/doc/config.doc b/doc/config.doc
index 47fdf82..ca6105d 100644
--- a/doc/config.doc
+++ b/doc/config.doc
@@ -676,7 +676,7 @@ function's detailed documentation block.
in the directories. If left blank the following patterns are tested:
<code>
*.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx *.hpp
- *.h++ *.idl *.odl *.cs
+ *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm
</code>
\anchor cfg_recursive
diff --git a/doc/language.doc b/doc/language.doc
index 98188f3..dcb3783 100644
--- a/doc/language.doc
+++ b/doc/language.doc
@@ -1,9 +1,5 @@
/******************************************************************************
- * Warning: this file was generated from the language.tpl template
- * and the maintainers.txt files by the translator.pl script.
- *
- * Do not edit this file. Edit the above mentioned files!
- *
+ * Do not edit this file. It was generated by the translator.py script.
*
* Copyright (C) 1997-2004 by Dimitri van Heesch.
*
@@ -16,23 +12,24 @@
* Documents produced by Doxygen are derivative works derived from the
* input used in their production; they are not affected by this license.
*
+ * $Id$
*/
/*! \page langhowto Internationalization
<h3>Support for multiple languages</h3>
-Doxygen has built-in support for multiple languages. This means
-that the text fragments that doxygen generates can be produced in
-languages other than English (the default) at configuration time.
+Doxygen has built-in support for multiple languages. This means that the
+text fragments, generated by doxygen, can be produced in languages other
+than English (the default). The output language is chosen through the
+configuration file (with default name and known as Doxyfile).
-Currently (version 1.3.5-20040202), 30 languages
-are supported (sorted alphabetically):
-Brazilian Portuguese, Catalan, Chinese, Chinese Traditional, Croatian,
-Czech, Danish, Dutch, English, Finnish,
-French, German, Greek, Hungarian, Italian,
-Japanese, JapaneseEn, Korean, KoreanEn, Norwegian,
-Polish, Portuguese, Romanian, Russian, Serbian,
-Slovak, Slovene, Spanish, Swedish, and Ukrainian.
+Currently (version 1.3.6), 28 languages
+are supported (sorted alphabetically):
+Brazilian Portuguese, Catalan, Chinese, Chinese Traditional, Croatian,
+Czech, Danish, Dutch, English, Finnish, French, German, Greek,
+Hungarian, Italian, Japanese (+En), Korean (+En), Norwegian, Polish,
+Portuguese, Romanian, Russian, Serbian, Slovak, Slovene, Spanish,
+Swedish, and Ukrainian..
The table of information related to the supported languages follows.
It is sorted by language alphabetically. The <b>Status</b> column
@@ -40,273 +37,279 @@ was generated from sources and shows approximately the last version
when the translator was updated.
\htmlonly
-<TABLE ALIGN=center CELLSPACING=0 CELLPADDING=0 BORDER=0>
-<TR BGCOLOR="#000000">
-<TD>
- <TABLE CELLSPACING=1 CELLPADDING=2 BORDER=0>
- <TR BGCOLOR="#4040c0">
- <TD ><b><font size=+1 color="#ffffff"> Language </font></b></TD>
- <TD ><b><font size=+1 color="#ffffff"> Maintainer </font></b></TD>
- <TD ><b><font size=+1 color="#ffffff"> Contact address </font>
- <font size=-2 color="#ffffff">(remove the NOSPAM.)</font></b></TD>
- <TD ><b><font size=+1 color="#ffffff"> Status </font></b></TD>
- </TR>
- <TR BGCOLOR="#ffffff">
- <TD>Brazilian Portuguese</TD>
- <TD>Fabio "FJTC" Jun Takada Chino</TD>
- <TD>chino@NOSPAM.icmc.sc.usp.br</TD>
- <TD>up-to-date</TD>
- </TR>
- <TR BGCOLOR="#ffffff">
- <TD>Catalan</TD>
- <TD>Albert Mora</TD>
- <TD>amora@NOSPAM.iua.upf.es</TD>
- <TD>1.2.17</TD>
- </TR>
- <TR BGCOLOR="#ffffff">
- <TD>Chinese</TD>
- <TD>Wei Liu<br>Wang Weihan</TD>
- <TD>liuwei@NOSPAM.asiainfo.com<br>wangweihan@NOSPAM.capinfo.com.cn</TD>
- <TD>1.2.13</TD>
- </TR>
- <TR BGCOLOR="#ffffff">
- <TD>Chinese Traditional</TD>
- <TD>Daniel YC Lin<br>Gary Lee</TD>
- <TD>daniel@NOSPAM.twpda.com<br>garylee@NOSPAM.ecosine.com.tw</TD>
- <TD>up-to-date</TD>
- </TR>
- <TR BGCOLOR="#ffffff">
- <TD>Croatian</TD>
- <TD>Boris Bralo</TD>
- <TD>boris.bralo@NOSPAM.zg.tel.hr</TD>
- <TD>up-to-date</TD>
- </TR>
- <TR BGCOLOR="#ffffff">
- <TD>Czech</TD>
- <TD>Petr P&#x0159;ikryl</TD>
- <TD>prikrylp@NOSPAM.skil.cz</TD>
- <TD>up-to-date</TD>
- </TR>
- <TR BGCOLOR="#ffffff">
- <TD>Danish</TD>
- <TD>Erik S&oslash;e S&oslash;rensen</TD>
- <TD>erik@NOSPAM.mail.nu</TD>
- <TD>up-to-date</TD>
- </TR>
- <TR BGCOLOR="#ffffff">
- <TD>Dutch</TD>
- <TD>Dimitri van Heesch</TD>
- <TD>dimitri@NOSPAM.stack.nl</TD>
- <TD>up-to-date</TD>
- </TR>
- <TR BGCOLOR="#ffffff">
- <TD>English</TD>
- <TD>Dimitri van Heesch</TD>
- <TD>dimitri@NOSPAM.stack.nl</TD>
- <TD>up-to-date</TD>
- </TR>
- <TR BGCOLOR="#ffffff">
- <TD>Finnish</TD>
- <TD>Olli Korhonen</TD>
- <TD>Olli.Korhonen@NOSPAM.ccc.fi</TD>
- <TD>obsolete</TD>
- </TR>
- <TR BGCOLOR="#ffffff">
- <TD>French</TD>
- <TD>Xavier Outhier</TD>
- <TD>xouthier@NOSPAM.yahoo.fr</TD>
- <TD>up-to-date</TD>
- </TR>
- <TR BGCOLOR="#ffffff">
- <TD>German</TD>
- <TD>Jens Seidel</TD>
- <TD>jensseidel@NOSPAM.users.sf.net</TD>
- <TD>up-to-date</TD>
- </TR>
- <TR BGCOLOR="#ffffff">
- <TD>Greek</TD>
- <TD>Harry Kalogirou</TD>
- <TD>harkal@NOSPAM.rainbow.cs.unipi.gr</TD>
- <TD>1.2.11</TD>
- </TR>
- <TR BGCOLOR="#ffffff">
- <TD>Hungarian</TD>
- <TD>F&ouml;ldv&aacute;ri Gy&ouml;rgy<br>&Aacute;kos Kiss</TD>
- <TD>foldvari@NOSPAM.diatronltd.com<br>akiss@NOSPAM.users.sourceforge.net</TD>
- <TD>up-to-date</TD>
- </TR>
- <TR BGCOLOR="#ffffff">
- <TD>Italian</TD>
- <TD>Alessandro Falappa<br>Ahmed Aldo Faisal</TD>
- <TD>alessandro@NOSPAM.falappa.net<br>aaf23@NOSPAM.cam.ac.uk</TD>
- <TD>up-to-date</TD>
- </TR>
- <TR BGCOLOR="#ffffff">
- <TD>Japanese</TD>
- <TD>Ryunosuke Satoh<br>Kenji Nagamatsu</TD>
- <TD>sun594@NOSPAM.hotmail.com<br>naga@NOSPAM.joyful.club.ne.jp</TD>
- <TD>1.3.3</TD>
- </TR>
- <TR BGCOLOR="#ffffff">
- <TD>JapaneseEn</TD>
- <TD>unknown</TD>
- <TD>unknown</TD>
- <TD>obsolete</TD>
- </TR>
- <TR BGCOLOR="#ffffff">
- <TD>Korean</TD>
- <TD>Richard Kim</TD>
- <TD>ryk@NOSPAM.dspwiz.com</TD>
- <TD>up-to-date</TD>
- </TR>
- <TR BGCOLOR="#ffffff">
- <TD>KoreanEn</TD>
- <TD>unknown</TD>
- <TD>unknown</TD>
- <TD>obsolete</TD>
- </TR>
- <TR BGCOLOR="#ffffff">
- <TD>Norwegian</TD>
- <TD>Lars Erik Jordet</TD>
- <TD>lej@NOSPAM.circuitry.no</TD>
- <TD>1.2.2</TD>
- </TR>
- <TR BGCOLOR="#ffffff">
- <TD>Polish</TD>
- <TD>Piotr Kaminski<br>Grzegorz Kowal</TD>
- <TD>Piotr.Kaminski@NOSPAM.ctm.gdynia.pl<br>g_kowal@NOSPAM.poczta.onet.pl</TD>
- <TD>1.3</TD>
- </TR>
- <TR BGCOLOR="#ffffff">
- <TD>Portuguese</TD>
- <TD>Rui Godinho Lopes</TD>
- <TD>ruiglopes@NOSPAM.yahoo.com</TD>
- <TD>1.3.3</TD>
- </TR>
- <TR BGCOLOR="#ffffff">
- <TD>Romanian</TD>
- <TD>Alexandru Iosup</TD>
- <TD>aiosup@NOSPAM.yahoo.com</TD>
- <TD>1.2.16</TD>
- </TR>
- <TR BGCOLOR="#ffffff">
- <TD>Russian</TD>
- <TD>Alexandr Chelpanov</TD>
- <TD>cav@NOSPAM.cryptopro.ru</TD>
- <TD>up-to-date</TD>
- </TR>
- <TR BGCOLOR="#ffffff">
- <TD>Serbian</TD>
- <TD>Dejan Milosavljevic</TD>
- <TD>dmilos@NOSPAM.email.com</TD>
- <TD>up-to-date</TD>
- </TR>
- <TR BGCOLOR="#ffffff">
- <TD>Slovak</TD>
- <TD>Stanislav Kudl&aacute;&#x010d;</TD>
- <TD>skudlac@NOSPAM.pobox.sk</TD>
- <TD>1.2.18</TD>
- </TR>
- <TR BGCOLOR="#ffffff">
- <TD>Slovene</TD>
- <TD>Matjaz Ostroversnik</TD>
- <TD>matjaz.ostroversnik@NOSPAM.zrs-tk.si</TD>
- <TD>1.2.16</TD>
- </TR>
- <TR BGCOLOR="#ffffff">
- <TD>Spanish</TD>
- <TD>Francisco Oltra Thennet</TD>
- <TD>foltra@NOSPAM.puc.cl</TD>
- <TD>1.3.3</TD>
- </TR>
- <TR BGCOLOR="#ffffff">
- <TD>Swedish</TD>
- <TD>Mikael Hallin</TD>
- <TD>mikaelhallin@NOSPAM.yahoo.se</TD>
- <TD>1.3.3</TD>
- </TR>
- <TR BGCOLOR="#ffffff">
- <TD>Ukrainian</TD>
- <TD>Olexij Tkatchenko</TD>
- <TD>olexij.tkatchenko@NOSPAM.gmx.de</TD>
- <TD>1.2.11</TD>
- </TR>
- </TABLE>
-</TD>
-</TR>
-</TABLE>
+<table align=center cellspacing=0 cellpadding=0 border=0>
+<tr bgcolor="#000000">
+<td>
+ <table cellspacing=1 cellpadding=2 border=0>
+ <tr bgcolor="#4040c0">
+ <td ><b><font size=+1 color="#ffffff"> Language </font></b></td>
+ <td ><b><font size=+1 color="#ffffff"> Maintainer </font></b></td>
+ <td ><b><font size=+1 color="#ffffff"> Contact address </font>
+ <font size=-2 color="#ffffff">(remove the NOSPAM.)</font></b></td>
+ <td ><b><font size=+1 color="#ffffff"> Status </font></b></td>
+ </tr>
+ <!-- table content begin -->
+
+ <tr bgcolor="#ffffff">
+ <td>Brazilian Portuguese</td>
+ <td>Fabio "FJTC" Jun Takada Chino</td>
+ <td>chino@NOSPAM.icmc.sc.usp.br</td>
+ <td>up-to-date</td>
+ </tr>
+ <tr bgcolor="#ffffff">
+ <td>Catalan</td>
+ <td>Albert Mora</td>
+ <td>amora@NOSPAM.iua.upf.es</td>
+ <td>1.2.17</td>
+ </tr>
+ <tr bgcolor="#ffffff">
+ <td>Chinese</td>
+ <td>Wei Liu<br>Wang Weihan</td>
+ <td>liuwei@NOSPAM.asiainfo.com<br>wangweihan@NOSPAM.capinfo.com.cn</td>
+ <td>1.2.13</td>
+ </tr>
+ <tr bgcolor="#ffffff">
+ <td>Chinese Traditional</td>
+ <td>Daniel YC Lin<br>Gary Lee</td>
+ <td>daniel@NOSPAM.twpda.com<br>garylee@NOSPAM.ecosine.com.tw</td>
+ <td>up-to-date</td>
+ </tr>
+ <tr bgcolor="#ffffff">
+ <td>Croatian</td>
+ <td>Boris Bralo</td>
+ <td>boris.bralo@NOSPAM.zg.tel.hr</td>
+ <td>up-to-date</td>
+ </tr>
+ <tr bgcolor="#ffffff">
+ <td>Czech</td>
+ <td>Petr P&#x0159;ikryl</td>
+ <td>prikrylp@NOSPAM.skil.cz</td>
+ <td>up-to-date</td>
+ </tr>
+ <tr bgcolor="#ffffff">
+ <td>Danish</td>
+ <td>Erik S&oslash;e S&oslash;rensen</td>
+ <td>erik@NOSPAM.mail.nu</td>
+ <td>up-to-date</td>
+ </tr>
+ <tr bgcolor="#ffffff">
+ <td>Dutch</td>
+ <td>Dimitri van Heesch</td>
+ <td>dimitri@NOSPAM.stack.nl</td>
+ <td>up-to-date</td>
+ </tr>
+ <tr bgcolor="#ffffff">
+ <td>English</td>
+ <td>Dimitri van Heesch</td>
+ <td>dimitri@NOSPAM.stack.nl</td>
+ <td>up-to-date</td>
+ </tr>
+ <tr bgcolor="#ffffff">
+ <td>Finnish</td>
+ <td>Olli Korhonen</td>
+ <td>Olli.Korhonen@NOSPAM.ccc.fi</td>
+ <td>obsolete</td>
+ </tr>
+ <tr bgcolor="#ffffff">
+ <td>French</td>
+ <td>Xavier Outhier</td>
+ <td>xouthier@NOSPAM.yahoo.fr</td>
+ <td>up-to-date</td>
+ </tr>
+ <tr bgcolor="#ffffff">
+ <td>German</td>
+ <td>Jens Seidel</td>
+ <td>jensseidel@NOSPAM.users.sf.net</td>
+ <td>up-to-date</td>
+ </tr>
+ <tr bgcolor="#ffffff">
+ <td>Greek</td>
+ <td>Harry Kalogirou</td>
+ <td>harkal@NOSPAM.rainbow.cs.unipi.gr</td>
+ <td>1.2.11</td>
+ </tr>
+ <tr bgcolor="#ffffff">
+ <td>Hungarian</td>
+ <td>F&ouml;ldv&aacute;ri Gy&ouml;rgy<br>&Aacute;kos Kiss</td>
+ <td>foldvari@NOSPAM.diatronltd.com<br>akiss@NOSPAM.users.sourceforge.net</td>
+ <td>up-to-date</td>
+ </tr>
+ <tr bgcolor="#ffffff">
+ <td>Italian</td>
+ <td>Alessandro Falappa<br>Ahmed Aldo Faisal</td>
+ <td>alessandro@NOSPAM.falappa.net<br>aaf23@NOSPAM.cam.ac.uk</td>
+ <td>up-to-date</td>
+ </tr>
+ <tr bgcolor="#ffffff">
+ <td>Japanese</td>
+ <td>Ryunosuke Satoh<br>Kenji Nagamatsu</td>
+ <td>sun594@NOSPAM.hotmail.com<br>naga@NOSPAM.joyful.club.ne.jp</td>
+ <td>1.3.3</td>
+ </tr>
+ <tr bgcolor="#ffffff">
+ <td>JapaneseEn</td>
+ <td>see the Japanese language</td>
+ <td>&nbsp;</td>
+ <td>English based</td>
+ </tr>
+ <tr bgcolor="#ffffff">
+ <td>Korean</td>
+ <td>Richard Kim</td>
+ <td>ryk@NOSPAM.dspwiz.com</td>
+ <td>up-to-date</td>
+ </tr>
+ <tr bgcolor="#ffffff">
+ <td>KoreanEn</td>
+ <td>see the Korean language</td>
+ <td>&nbsp;</td>
+ <td>English based</td>
+ </tr>
+ <tr bgcolor="#ffffff">
+ <td>Norwegian</td>
+ <td>Lars Erik Jordet</td>
+ <td>lej@NOSPAM.circuitry.no</td>
+ <td>1.2.2</td>
+ </tr>
+ <tr bgcolor="#ffffff">
+ <td>Polish</td>
+ <td>Piotr Kaminski<br>Grzegorz Kowal</td>
+ <td>Piotr.Kaminski@NOSPAM.ctm.gdynia.pl<br>g_kowal@NOSPAM.poczta.onet.pl</td>
+ <td>1.3</td>
+ </tr>
+ <tr bgcolor="#ffffff">
+ <td>Portuguese</td>
+ <td>Rui Godinho Lopes</td>
+ <td>ruiglopes@NOSPAM.yahoo.com</td>
+ <td>1.3.3</td>
+ </tr>
+ <tr bgcolor="#ffffff">
+ <td>Romanian</td>
+ <td>Alexandru Iosup</td>
+ <td>aiosup@NOSPAM.yahoo.com</td>
+ <td>1.2.16</td>
+ </tr>
+ <tr bgcolor="#ffffff">
+ <td>Russian</td>
+ <td>Alexandr Chelpanov</td>
+ <td>cav@NOSPAM.cryptopro.ru</td>
+ <td>up-to-date</td>
+ </tr>
+ <tr bgcolor="#ffffff">
+ <td>Serbian</td>
+ <td>Dejan Milosavljevic</td>
+ <td>dmilos@NOSPAM.email.com</td>
+ <td>up-to-date</td>
+ </tr>
+ <tr bgcolor="#ffffff">
+ <td>Slovak</td>
+ <td>Stanislav Kudl&aacute;&#x010d;</td>
+ <td>skudlac@NOSPAM.pobox.sk</td>
+ <td>1.2.18</td>
+ </tr>
+ <tr bgcolor="#ffffff">
+ <td>Slovene</td>
+ <td>Matjaz Ostroversnik</td>
+ <td>matjaz.ostroversnik@NOSPAM.zrs-tk.si</td>
+ <td>1.2.16</td>
+ </tr>
+ <tr bgcolor="#ffffff">
+ <td>Spanish</td>
+ <td>Francisco Oltra Thennet</td>
+ <td>foltra@NOSPAM.puc.cl</td>
+ <td>up-to-date</td>
+ </tr>
+ <tr bgcolor="#ffffff">
+ <td>Swedish</td>
+ <td>Mikael Hallin</td>
+ <td>mikaelhallin@NOSPAM.yahoo.se</td>
+ <td>1.3.3</td>
+ </tr>
+ <tr bgcolor="#ffffff">
+ <td>Ukrainian</td>
+ <td>Olexij Tkatchenko</td>
+ <td>olexij.tkatchenko@NOSPAM.gmx.de</td>
+ <td>1.2.11</td>
+ </tr>
+ <!-- table content end -->
+ </table>
+</td>
+</tr>
+</table>
\endhtmlonly
+
+
\latexonly
\begin{tabular}{|l|l|l|l|}
\hline
{\bf Language} & {\bf Maintainer} & {\bf Contact address} & {\bf Status} \\
\hline
+
\hline
- Brazilian Portuguese & Fabio "FJTC" Jun Takada Chino & {\tt chino@icmc.sc.usp.br} & up-to-date \\
+ Brazilian Portuguese & Fabio "FJTC" Jun Takada Chino & {\tt\tiny chino@icmc.sc.usp.br} & up-to-date \\
\hline
- Catalan & Albert Mora & {\tt amora@iua.upf.es} & 1.2.17 \\
+ Catalan & Albert Mora & {\tt\tiny amora@iua.upf.es} & 1.2.17 \\
\hline
- Chinese & Wei Liu & {\tt liuwei@asiainfo.com} & 1.2.13 \\
- & Wang Weihan & {\tt wangweihan@capinfo.com.cn} & \\
+ Chinese & Wei Liu & {\tt\tiny liuwei@asiainfo.com} & 1.2.13 \\
+ ~ & Wang Weihan & {\tt\tiny wangweihan@capinfo.com.cn} & ~ \\
\hline
- Chinese Traditional & Daniel YC Lin & {\tt daniel@twpda.com} & up-to-date \\
- & Gary Lee & {\tt garylee@ecosine.com.tw} & \\
+ Chinese Traditional & Daniel YC Lin & {\tt\tiny daniel@twpda.com} & up-to-date \\
+ ~ & Gary Lee & {\tt\tiny garylee@ecosine.com.tw} & ~ \\
\hline
- Croatian & Boris Bralo & {\tt boris.bralo@zg.tel.hr} & up-to-date \\
+ Croatian & Boris Bralo & {\tt\tiny boris.bralo@zg.tel.hr} & up-to-date \\
\hline
- Czech & Petr P\v{r}ikryl & {\tt prikrylp@skil.cz} & up-to-date \\
+ Czech & Petr P\v{r}ikryl & {\tt\tiny prikrylp@skil.cz} & up-to-date \\
\hline
- Danish & Erik S\o{}e S\o{}rensen & {\tt erik@mail.nu} & up-to-date \\
+ Danish & Erik S\o{}e S\o{}rensen & {\tt\tiny erik@mail.nu} & up-to-date \\
\hline
- Dutch & Dimitri van Heesch & {\tt dimitri@stack.nl} & up-to-date \\
+ Dutch & Dimitri van Heesch & {\tt\tiny dimitri@stack.nl} & up-to-date \\
\hline
- English & Dimitri van Heesch & {\tt dimitri@stack.nl} & up-to-date \\
+ English & Dimitri van Heesch & {\tt\tiny dimitri@stack.nl} & up-to-date \\
\hline
- Finnish & Olli Korhonen & {\tt Olli.Korhonen@ccc.fi} & obsolete \\
+ Finnish & Olli Korhonen & {\tt\tiny Olli.Korhonen@ccc.fi} & obsolete \\
\hline
- French & Xavier Outhier & {\tt xouthier@yahoo.fr} & up-to-date \\
+ French & Xavier Outhier & {\tt\tiny xouthier@yahoo.fr} & up-to-date \\
\hline
- German & Jens Seidel & {\tt jensseidel@users.sf.net} & up-to-date \\
+ German & Jens Seidel & {\tt\tiny jensseidel@users.sf.net} & up-to-date \\
\hline
- Greek & Harry Kalogirou & {\tt harkal@rainbow.cs.unipi.gr} & 1.2.11 \\
+ Greek & Harry Kalogirou & {\tt\tiny harkal@rainbow.cs.unipi.gr} & 1.2.11 \\
\hline
- Hungarian & F\"{o}ldv\'{a}ri Gy\"{o}rgy & {\tt foldvari@diatronltd.com} & up-to-date \\
- & \'{A}kos Kiss & {\tt akiss@users.sourceforge.net} & \\
+ Hungarian & F\"{o}ldv\'{a}ri Gy\"{o}rgy & {\tt\tiny foldvari@diatronltd.com} & up-to-date \\
+ ~ & \'{A}kos Kiss & {\tt\tiny akiss@users.sourceforge.net} & ~ \\
\hline
- Italian & Alessandro Falappa & {\tt alessandro@falappa.net} & up-to-date \\
- & Ahmed Aldo Faisal & {\tt aaf23@cam.ac.uk} & \\
+ Italian & Alessandro Falappa & {\tt\tiny alessandro@falappa.net} & up-to-date \\
+ ~ & Ahmed Aldo Faisal & {\tt\tiny aaf23@cam.ac.uk} & ~ \\
\hline
- Japanese & Ryunosuke Satoh & {\tt sun594@hotmail.com} & 1.3.3 \\
- & Kenji Nagamatsu & {\tt naga@joyful.club.ne.jp} & \\
+ Japanese & Ryunosuke Satoh & {\tt\tiny sun594@hotmail.com} & 1.3.3 \\
+ ~ & Kenji Nagamatsu & {\tt\tiny naga@joyful.club.ne.jp} & ~ \\
\hline
- JapaneseEn & unknown & {\tt unknown} & obsolete \\
+ JapaneseEn & see the Japanese language & {\tt\tiny ~} & English based \\
\hline
- Korean & Richard Kim & {\tt ryk@dspwiz.com} & up-to-date \\
+ Korean & Richard Kim & {\tt\tiny ryk@dspwiz.com} & up-to-date \\
\hline
- KoreanEn & unknown & {\tt unknown} & obsolete \\
+ KoreanEn & see the Korean language & {\tt\tiny ~} & English based \\
\hline
- Norwegian & Lars Erik Jordet & {\tt lej@circuitry.no} & 1.2.2 \\
+ Norwegian & Lars Erik Jordet & {\tt\tiny lej@circuitry.no} & 1.2.2 \\
\hline
- Polish & Piotr Kaminski & {\tt Piotr.Kaminski@ctm.gdynia.pl} & 1.3 \\
- & Grzegorz Kowal & {\tt g\_kowal@poczta.onet.pl} & \\
+ Polish & Piotr Kaminski & {\tt\tiny Piotr.Kaminski@ctm.gdynia.pl} & 1.3 \\
+ ~ & Grzegorz Kowal & {\tt\tiny g\_kowal@poczta.onet.pl} & ~ \\
\hline
- Portuguese & Rui Godinho Lopes & {\tt ruiglopes@yahoo.com} & 1.3.3 \\
+ Portuguese & Rui Godinho Lopes & {\tt\tiny ruiglopes@yahoo.com} & 1.3.3 \\
\hline
- Romanian & Alexandru Iosup & {\tt aiosup@yahoo.com} & 1.2.16 \\
+ Romanian & Alexandru Iosup & {\tt\tiny aiosup@yahoo.com} & 1.2.16 \\
\hline
- Russian & Alexandr Chelpanov & {\tt cav@cryptopro.ru} & up-to-date \\
+ Russian & Alexandr Chelpanov & {\tt\tiny cav@cryptopro.ru} & up-to-date \\
\hline
- Serbian & Dejan Milosavljevic & {\tt dmilos@email.com} & up-to-date \\
+ Serbian & Dejan Milosavljevic & {\tt\tiny dmilos@email.com} & up-to-date \\
\hline
- Slovak & Stanislav Kudl\'{a}\v{c} & {\tt skudlac@pobox.sk} & 1.2.18 \\
+ Slovak & Stanislav Kudl\'{a}\v{c} & {\tt\tiny skudlac@pobox.sk} & 1.2.18 \\
\hline
- Slovene & Matjaz Ostroversnik & {\tt matjaz.ostroversnik@zrs-tk.si} & 1.2.16 \\
+ Slovene & Matjaz Ostroversnik & {\tt\tiny matjaz.ostroversnik@zrs-tk.si} & 1.2.16 \\
\hline
- Spanish & Francisco Oltra Thennet & {\tt foltra@puc.cl} & 1.3.3 \\
+ Spanish & Francisco Oltra Thennet & {\tt\tiny foltra@puc.cl} & up-to-date \\
\hline
- Swedish & Mikael Hallin & {\tt mikaelhallin@yahoo.se} & 1.3.3 \\
+ Swedish & Mikael Hallin & {\tt\tiny mikaelhallin@yahoo.se} & 1.3.3 \\
\hline
- Ukrainian & Olexij Tkatchenko & {\tt olexij.tkatchenko@gmx.de} & 1.2.11 \\
+ Ukrainian & Olexij Tkatchenko & {\tt\tiny olexij.tkatchenko@gmx.de} & 1.2.11 \\
\hline
\end{tabular}
\endlatexonly
@@ -322,7 +325,7 @@ please read the next section.
<h3>Adding a new language to doxygen</h3>
-This short HOWTO explains how to add support for a new language to Doxygen:
+This short HOWTO explains how to add support for the new language to Doxygen:
Just follow these steps:
<ol>
@@ -351,7 +354,7 @@ Just follow these steps:
\endverbatim
Remember to use the same symbol LANG_xx that you added to \c lang_cfg.h.
I.e., the \c xx should be capital letters that identify your language.
- On the other hand, the \c xx inside your \c translator_xx.h should be
+ On the other hand, the \c xx inside your \c translator_xx.h should use
lower case.
<p>Now, in <code>setTranslator()</code> add
\verbatim
@@ -526,19 +529,19 @@ maintainers should try to reach the state with the minimal number of
translator adapter classes.
<b>To simplify the maintenance of the language translator classes</b>
-for the supported languages, the \c translator.pl perl
+for the supported languages, the \c translator.py Python
script was developed (located in \c doxygen/doc directory).
It extracts the important information about obsolete and
new methods from the source files for each of the languages.
The information is stored in the <em>translator report</em> ASCII file
-(<code>doxygen/doc/translator_report.txt</code>). \htmlonly If you compiled this documentation
+(translator_report.txt). \htmlonly If you compiled this documentation
from sources and if you have also doxygen sources available the
-link <a href="../doc/translator_report.txt">
- <code>doxygen/doc/translator_report.txt</code></a> should be valid.\endhtmlonly
+link <a href="../doc/translator_report.txt"
+><code>doxygen/doc/translator_report.txt</code></a> should be valid.\endhtmlonly
Looking at the base class of the language translator, the script
guesses also the status of the translator -- see the last column of
-the table with languages above. The \c translator.pl is called
+the table with languages above. The \c translator.py is called
automatically when the doxygen documentation is generated. You can
also run the script manualy whenever you feel that it can help you.
Of course, you are not forced to use the results of the script. You
@@ -573,49 +576,48 @@ implement anything else than the methods required by the Translator
class (i.e. the pure virtual methods of the \c Translator -- they
end with <code>=0;</code>).
-If everything compiles fine, try to run \c translator.pl, and have a
+If everything compiles fine, try to run \c translator.py, and have a
look at the translator report (ASCII file) at the \c doxygen/doc
-directory. Even if your translator is marked as up-to-date, there
-still may be some remarks related to your souce code. Namely, the
+directory. Even if your translator is marked as up-to-date, there
+still may be some remarks related to your souce code. Namely, the
obsolete methods--that are not used at all--may be listed in the
-section for your language. Simply, remove their code (and run the
-\c translator.pl again).
+section for your language. Simply, remove their code (and run the \c
+translator.py again). Also, you will be informed when you forgot to
+change the base class of your translator class to some newer adapter
+class or directly to the Translator class.
<b>If you do not have time to finish all the updates</b> you should
still start with <em>the most radical approach</em> as described
above. You can always change the base class to the translator
adapter class that implements all of the not-yet-implemented methods.
-<b>If you prefer to update your translator gradually</b>, look
-at the <em>translator report</em> generated by the \c translator.pl script
-and choose one of the missing method that is implemented by the
-translator adapter, that is used as your base class. When there is
-not such a method in your translator adapter base class, you probably
-can change the translator adapter base to the newer one.
-
-Probably the easiest approach of the gradual update is to look at
-the translator report to the part where the list of the implemented
-translator adapters is shown. Then:
- - Look how many required methods each adapter implements and guess
- how many methods you are willing to update (to spend the time
- with).
- - Choose the related oldest translator adapters to be removed (i.e.
- not used by your translator).
- - Change the base class of your translator class to the translator
- adapter that you want to use.
- - Implement the methods that were implemented by the older translator
- adapters.
-
-Notice: Do not blindly implement all methods that are implemented by
-your translator adapter base class. The reason is that the adapter
-classes implement also obsolete methods. Another reason is that
-some of the methods could become obsolete from some newer adapter
-on. Focus on the methods listed as \e required.
-
-<b>The really obsolete language translators</b> may lead to too much
-complicated adapters. Because of that, doxygen developers may decide
-to derive such translators from the \c TranslatorEnglish class, which
-is by definition always up-to-date.
+<b>If you prefer to update your translator gradually</b>, have a look
+at \c TranslatorEnglish (the \c translator_en.h file). Inside, you
+will find the comments like <code>new since 1.2.4</code> that separate
+always a number of methods that were implemented in the stated
+version. Do implement the group of methods that are placed below the
+comment that uses the same version numbers as your translator adapter
+class. (For example, your translator class have to use the \c
+TranslatorAdapter_1_2_4, if it does not implement the methods below
+the comment <code>new since 1.2.4</code>. When you implement them,
+your class should use newer translator adapter.
+
+Run the \c translator.py script occasionaly and give it your \c xx
+identification (from \c translator_xx.h) to create the translator
+report shorter (also produced faster) -- it will contain only the
+information related to your translator. Once you reach the state when
+the base class should be changed to some newer adapter, you will see
+the note in the translator report.
+
+Warning: Don't forget to compile Doxygen to discover, whether it is
+compilable. The \c translator.py does not check if everything is
+correct with respect to the compiler. Because of that, it may lie
+sometimes about the necessary base class.
+
+<b>The most obsolete language translators</b> would lead to
+implementation of too complicated adapters. Because of that, doxygen
+developers may decide to derive such translators from the \c
+TranslatorEnglish class, which is by definition always up-to-date.
When doing so, all the missing methods will be replaced by the
English translation. This means that not-implemented methods will
diff --git a/doc/language.tpl b/doc/language.tpl
index f33a256..d8cc0d3 100644
--- a/doc/language.tpl
+++ b/doc/language.tpl
@@ -1,7 +1,9 @@
+
+ATTENTION! This is the template for generating language.doc. If you want to
+change the language.doc, make the changes here and inside maintainers.txt.
+
/******************************************************************************
- * <notice>This is the template for generating language.doc.
- * Edit manually this file, not the language.doc!</notice>
- *
+ * %(editnote)s
*
* Copyright (C) 1997-2004 by Dimitri van Heesch.
*
@@ -14,25 +16,27 @@
* Documents produced by Doxygen are derivative works derived from the
* input used in their production; they are not affected by this license.
*
+ * $Id$
*/
/*! \page langhowto Internationalization
<h3>Support for multiple languages</h3>
-Doxygen has built-in support for multiple languages. This means
-that the text fragments that doxygen generates can be produced in
-languages other than English (the default) at configuration time.
+Doxygen has built-in support for multiple languages. This means that the
+text fragments, generated by doxygen, can be produced in languages other
+than English (the default). The output language is chosen through the
+configuration file (with default name and known as Doxyfile).
-Currently (version $version), $numlang languages
-are supported (sorted alphabetically):
-$languages.
+Currently (version %(doxVersion)s), %(numLangStr)s languages
+are supported (sorted alphabetically):
+%(supportedLangReadableStr)s.
The table of information related to the supported languages follows.
It is sorted by language alphabetically. The <b>Status</b> column
was generated from sources and shows approximately the last version
when the translator was updated.
-$information_table
+%(informationTable)s
Most people on the list have indicated that they were also busy
doing other things, so if you want to help to speed things up please
@@ -44,7 +48,7 @@ please read the next section.
<h3>Adding a new language to doxygen</h3>
-This short HOWTO explains how to add support for a new language to Doxygen:
+This short HOWTO explains how to add support for the new language to Doxygen:
Just follow these steps:
<ol>
@@ -73,7 +77,7 @@ Just follow these steps:
\endverbatim
Remember to use the same symbol LANG_xx that you added to \c lang_cfg.h.
I.e., the \c xx should be capital letters that identify your language.
- On the other hand, the \c xx inside your \c translator_xx.h should be
+ On the other hand, the \c xx inside your \c translator_xx.h should use
lower case.
<p>Now, in <code>setTranslator()</code> add
\verbatim
@@ -248,18 +252,18 @@ maintainers should try to reach the state with the minimal number of
translator adapter classes.
<b>To simplify the maintenance of the language translator classes</b>
-for the supported languages, the \c translator.pl perl
+for the supported languages, the \c translator.py Python
script was developed (located in \c doxygen/doc directory).
It extracts the important information about obsolete and
new methods from the source files for each of the languages.
The information is stored in the <em>translator report</em> ASCII file
-($translator_report_file_name). \htmlonly If you compiled this documentation
+(%(translatorReportFileName)s). \htmlonly If you compiled this documentation
from sources and if you have also doxygen sources available the
-link $translator_report_link should be valid.\endhtmlonly
+link %(translatorReportLink)s should be valid.\endhtmlonly
Looking at the base class of the language translator, the script
guesses also the status of the translator -- see the last column of
-the table with languages above. The \c translator.pl is called
+the table with languages above. The \c translator.py is called
automatically when the doxygen documentation is generated. You can
also run the script manualy whenever you feel that it can help you.
Of course, you are not forced to use the results of the script. You
@@ -294,49 +298,48 @@ implement anything else than the methods required by the Translator
class (i.e. the pure virtual methods of the \c Translator -- they
end with <code>=0;</code>).
-If everything compiles fine, try to run \c translator.pl, and have a
+If everything compiles fine, try to run \c translator.py, and have a
look at the translator report (ASCII file) at the \c doxygen/doc
-directory. Even if your translator is marked as up-to-date, there
-still may be some remarks related to your souce code. Namely, the
+directory. Even if your translator is marked as up-to-date, there
+still may be some remarks related to your souce code. Namely, the
obsolete methods--that are not used at all--may be listed in the
-section for your language. Simply, remove their code (and run the
-\c translator.pl again).
+section for your language. Simply, remove their code (and run the \c
+translator.py again). Also, you will be informed when you forgot to
+change the base class of your translator class to some newer adapter
+class or directly to the Translator class.
<b>If you do not have time to finish all the updates</b> you should
still start with <em>the most radical approach</em> as described
above. You can always change the base class to the translator
adapter class that implements all of the not-yet-implemented methods.
-<b>If you prefer to update your translator gradually</b>, look
-at the <em>translator report</em> generated by the \c translator.pl script
-and choose one of the missing method that is implemented by the
-translator adapter, that is used as your base class. When there is
-not such a method in your translator adapter base class, you probably
-can change the translator adapter base to the newer one.
-
-Probably the easiest approach of the gradual update is to look at
-the translator report to the part where the list of the implemented
-translator adapters is shown. Then:
- - Look how many required methods each adapter implements and guess
- how many methods you are willing to update (to spend the time
- with).
- - Choose the related oldest translator adapters to be removed (i.e.
- not used by your translator).
- - Change the base class of your translator class to the translator
- adapter that you want to use.
- - Implement the methods that were implemented by the older translator
- adapters.
-
-Notice: Do not blindly implement all methods that are implemented by
-your translator adapter base class. The reason is that the adapter
-classes implement also obsolete methods. Another reason is that
-some of the methods could become obsolete from some newer adapter
-on. Focus on the methods listed as \e required.
-
-<b>The really obsolete language translators</b> may lead to too much
-complicated adapters. Because of that, doxygen developers may decide
-to derive such translators from the \c TranslatorEnglish class, which
-is by definition always up-to-date.
+<b>If you prefer to update your translator gradually</b>, have a look
+at \c TranslatorEnglish (the \c translator_en.h file). Inside, you
+will find the comments like <code>new since 1.2.4</code> that separate
+always a number of methods that were implemented in the stated
+version. Do implement the group of methods that are placed below the
+comment that uses the same version numbers as your translator adapter
+class. (For example, your translator class have to use the \c
+TranslatorAdapter_1_2_4, if it does not implement the methods below
+the comment <code>new since 1.2.4</code>. When you implement them,
+your class should use newer translator adapter.
+
+Run the \c translator.py script occasionaly and give it your \c xx
+identification (from \c translator_xx.h) to create the translator
+report shorter (also produced faster) -- it will contain only the
+information related to your translator. Once you reach the state when
+the base class should be changed to some newer adapter, you will see
+the note in the translator report.
+
+Warning: Don't forget to compile Doxygen to discover, whether it is
+compilable. The \c translator.py does not check if everything is
+correct with respect to the compiler. Because of that, it may lie
+sometimes about the necessary base class.
+
+<b>The most obsolete language translators</b> would lead to
+implementation of too complicated adapters. Because of that, doxygen
+developers may decide to derive such translators from the \c
+TranslatorEnglish class, which is by definition always up-to-date.
When doing so, all the missing methods will be replaced by the
English translation. This means that not-implemented methods will
diff --git a/doc/maintainers.txt b/doc/maintainers.txt
index 14339af..9985f2e 100644
--- a/doc/maintainers.txt
+++ b/doc/maintainers.txt
@@ -1,99 +1,101 @@
+% $Id$
+%
% Comments start with % sign at the beginning.
% XML entities like &auml; are used for special characters.
% There is one record for each language. The records are separated
% by the empty line and they do not contain empty lines.
-% First line of the record identifies the language.
+% First line of the record identifies the translator class for the language.
% The following one or more lines contain information about
% the maintainer(s) for the language (one line, one maintainer)
% in the form: <readable name><colon><e-mail>
-Brazilian
+TranslatorBrazilian
Fabio "FJTC" Jun Takada Chino: chino@icmc.sc.usp.br
-Catalan
+TranslatorCatalan
Albert Mora: amora@iua.upf.es
-Chinese
+TranslatorChinese
Wei Liu: liuwei@asiainfo.com
Wang Weihan: wangweihan@capinfo.com.cn
-ChineseTraditional
+TranslatorChinesetraditional
Daniel YC Lin: daniel@twpda.com
Gary Lee: garylee@ecosine.com.tw
-Croatian
+TranslatorCroatian
Boris Bralo: boris.bralo@zg.tel.hr
-Czech
+TranslatorCzech
Petr P&rcaron;ikryl: prikrylp@skil.cz
-Danish
+TranslatorDanish
Erik S&oslash;e S&oslash;rensen: erik@mail.nu
-Dutch
+TranslatorDutch
Dimitri van Heesch: dimitri@stack.nl
-English
+TranslatorEnglish
Dimitri van Heesch: dimitri@stack.nl
-Finnish
+TranslatorFinnish
Olli Korhonen: Olli.Korhonen@ccc.fi
-French
+TranslatorFrench
Xavier Outhier: xouthier@yahoo.fr
-German
+TranslatorGerman
Jens Seidel: jensseidel@users.sf.net
-Greek
+TranslatorGreek
Harry Kalogirou: harkal@rainbow.cs.unipi.gr
-Hungarian
+TranslatorHungarian
F&ouml;ldv&aacute;ri Gy&ouml;rgy: foldvari@diatronltd.com
&Aacute;kos Kiss: akiss@users.sourceforge.net
-Italian
+TranslatorItalian
Alessandro Falappa: alessandro@falappa.net
Ahmed Aldo Faisal: aaf23@cam.ac.uk
-Japanese
+TranslatorJapanese
Ryunosuke Satoh: sun594@hotmail.com
Kenji Nagamatsu: naga@joyful.club.ne.jp
-Korean
+TranslatorKorean
Richard Kim: ryk@dspwiz.com
-Norwegian
+TranslatorNorwegian
Lars Erik Jordet: lej@circuitry.no
-Polish
+TranslatorPolish
Piotr Kaminski: Piotr.Kaminski@ctm.gdynia.pl
Grzegorz Kowal: g_kowal@poczta.onet.pl
-Portuguese
+TranslatorPortuguese
Rui Godinho Lopes: ruiglopes@yahoo.com
-Romanian
+TranslatorRomanian
Alexandru Iosup: aiosup@yahoo.com
-Russian
+TranslatorRussian
Alexandr Chelpanov: cav@cryptopro.ru
-Serbian
+TranslatorSerbian
Dejan Milosavljevic: dmilos@email.com
-Slovak
+TranslatorSlovak
Stanislav Kudl&aacute;&ccaron;: skudlac@pobox.sk
-Slovene
+TranslatorSlovene
Matjaz Ostroversnik: matjaz.ostroversnik@zrs-tk.si
-Spanish
+TranslatorSpanish
Francisco Oltra Thennet: foltra@puc.cl
-Swedish
+TranslatorSwedish
Mikael Hallin: mikaelhallin@yahoo.se
-Ukrainian
+TranslatorUkrainian
Olexij Tkatchenko: olexij.tkatchenko@gmx.de
diff --git a/doc/translator.bat b/doc/translator.bat
deleted file mode 100644
index 3dccb13..0000000
--- a/doc/translator.bat
+++ /dev/null
@@ -1 +0,0 @@
-@call perl -w translator.pl
diff --git a/doc/translator.pl b/doc/translator.pl
deleted file mode 100644
index 89721f7..0000000
--- a/doc/translator.pl
+++ /dev/null
@@ -1,1381 +0,0 @@
-#! /usr/bin/perl -w
-# -*- mode: perl; mode: fold -*-
-
-# This is a Perl script for Doxygen developers.
-# Its main purpose is to extract the information from sources
-# related to internationalization (the translator classes).
-# It uses the information to generate documentation (language.doc,
-# translator_report.txt) from templates (language.tpl, maintainers.txt).
-#
-# Petr Prikryl (prikrylp@skil.cz)
-# History:
-# --------
-# 2001/04/27
-# - First version of the script.
-#
-# 2001/05/02
-# - Update to accept updateNeededMessage() in the Translator class.
-# - First version that generates doc/language.doc.
-#
-# 2001/05/07
-# - Environment variable $doxygenrootdir now points to the
-# Doxygen's root directory.
-#
-# 2001/05/11
-# - Updated to reflect using TranslatorAdapterCVS as the base
-# class for "almost up-to-date" translators.
-# - $doxygenrootdir and other global variables for storing
-# directories determined from DOXYGEN_DOCDIR environment
-# variable. The change was done because the DOXYGEN_DOCDIR
-# was already used before.
-# - $version mark can be used in the language.tpl template.
-#
-# 2001/05/18
-# - Character entity &oslash; recognized in maintainers.txt.
-#
-# 2001/06/06
-# - Implementation of the methods recognized even when the
-# argument list does not contain argument identifiers
-# (i.e., when it contains type information only).
-#
-# 2001/06/11
-# - Character entity &ccaron; recognized in maintainers.txt.
-#
-# 2001/07/17
-# - Perl version checking is less confusing now. The script stops
-# immediately after the first command below when your perl
-# is older that required.
-# - The information below the table of languages is not produced
-# with the table. Another symbol replacement is done, so language.tpl
-# can be updated so that the generated language.doc does not contain
-# the link to the translator_report.txt.
-#
-# 2001/08/20
-# - StripArgIdentifiers() enhanced to be more robust in producing
-# equal prototypes from the base class and from the derived
-# classes (if they should be considered equal).
-#
-# 2001/08/28
-# - "see details" added to the up-to-date translator list
-# (in translator report) to translators for which some details
-# are listed below in the report. This is usually the case
-# when the up-to-date translator still implements a obsolete
-# method that will never be called (i.e. the code should be removed).
-#
-# 2001/09/10
-# - The script now always exits with 0. If the sources are not
-# found, translator report is not generated. If the $flangdoc
-# is not also found and no sources are available, the simplified
-# result with remarks inside is generated from the template.
-# The consequences, translator.pl should never break the "make",
-# and the language.doc should always be present after running
-# this script -- no problem should occur when generating doxygen
-# documentation.
-#
-# 2001/09/11
-# - Minor (say cosmetic) enhancement. The code for generating
-# the simplified language.doc from the template was moved to
-# the separate function CopyTemplateToLanguageDoc().
-#
-# 2001/10/17
-# - Minor update of GetInfoFrom() to ignore spaces between the
-# method identifier and the opening parenthesis to match better
-# the method prototype with the one in the translator.h.
-#
-# 2001/11/06
-# - TranslatorAdapterCVS is not used any more. There is nothing
-# like "almost up-to-date" any more. The script was simplified
-# to reflect the changes.
-#
-# 2001/11/26
-# - Information about version of doxygen added to the top
-# of the translator report (the ASCII file).
-# - TranslatorEnglish can be used to solve really obsolete translators
-# to make adapter classes simpler. Such translators are marked
-# as "obsolete" in the status (i.e. no guessing when it was last updated).
-# The translator report include the notice about that situation.
-#
-# 2002/01/03
-# - Minor correction of regexp to obtain the list of translator_xx.h files.
-# - Translator report ASCII file now lists the implemented translator
-# adapter classes; so you can check how many steps behind the up-to-date
-# status your translator is.
-#
-# 2002/01/07
-# - The list of the implemented translator-adapter classes now shows
-# how many and what required methods the translator adapter implements.
-#
-# 2002/01/08
-# - The mistake in comments inside the translator report corrected.
-# The older translator adapters are derived from newer ones.
-# The mistaken comment said the opposite.
-#
-# 2002/01/23
-# - The nasty bug corrected. GetAdapterClassesInfo() did not
-# strip trailing blanks from the method prototype; consequently,
-# the required methods implemented by a translator adapter was
-# not recognized as the required one (i.e. not listed).
-# - Some defined() operators used on hash elements were replaced
-# by exists() operators where appropriate.
-#
-# 2002/05/21
-# - Changes to display languages with two words more naturally
-# (like "Chinese Traditional" instead of "Chinesetraditional"
-# or "Brazilian Portuguese" instead of "Brazilian").
-#
-# ATTENTION! Development of this script is stopped. Only the errors
-# are going to be fixed. Look for the translator.py as the replacement.
-#
-# 2003/12/18
-# - Fixed the bug for maintainers with name starting with special
-# character (like &Aacute;).
-# - The &Aacute; translation to LaTeX.
-#
-################################################################
-
-use 5.005;
-use strict;
-use Carp;
-
-# Global variables
-#
-my $doxygenrootdir = 'directory set at the beginning of the body';
-my $srcdir = 'directory set at the beginning of the body';
-my $docdir = 'directory set at the beginning of the body';
-
-my $doxversion = 'set at the beginning of the body';
-
-# Names of the output files.
-#
-my $ftranslatortxt = "translator_report.txt";
-my $flangdoc = "language.doc";
-
-# Names of the template files and other intput files.
-#
-my $flangtpl = "language.tpl"; # template for language.doc
-my $fmaintainers = "maintainers.txt"; # database of local lang. maintainers
-
-
-################################################################
-# GetPureVirtual returns the list of pure virtual method prototypes
-# as separate strings (one prototype, one line, one list item).
-# The input argument is the full name of the source file.
-#
-sub GetPureVirtualFrom ##{{{
-{
- my $fin = shift; # Get the file name.
-
- # Let's open the file and read it into a single string.
- #
- open(FIN, "< $fin") or die "\nError when open < $fin: $!";
- my @content = <FIN>;
- close FIN;
- my $cont = join("", @content);
-
- # Remove comments and empty lines.
- #
- $cont =~ s{\s*//.*$}{}mg; # remove one-line comments
- while ($cont =~ s{/\*.+?\*/}{}sg ) {} # remove C comments
- $cont =~ s{\n\s*\n}{\n}sg; # remove empty lines
-
- # Remove the beginning up to the first virtual method.
- # Remove also the text behind the class.
- #
- $cont =~ s/^.*?virtual/virtual/s;
- $cont =~ s/\n\};.*$//s;
-
- # Erase anything between "=0;" and "virtual". Only the pure
- # virtual methods will remain. Remove also the text behind
- # the last "= 0;"
- #
- $cont =~ s{(=\s*0\s*;).*?(virtual)}{$1 $2}sg;
- $cont =~ s{^(.+=\s*0\s*;).*?$}{$1}s;
-
- # Remove the empty implementation of the updateNeededMessage()
- # method which is to be implemented by adapters only, not by
- # translators.
- #
- $cont =~ s{\s*virtual
- \s+QCString
- \s+updateNeededMessage\(\)
- \s+\{\s*return\s+"";\s*\}
- }
- {}xs;
-
- # Replace all consequent white spaces (including \n) by a single
- # space. Trim also the leading and the trailing space.
- #
- $cont =~ s{\s+}{ }sg;
- $cont =~ s{^\s+}{}s;
- $cont =~ s{\s+$}{}s;
-
- # Split the result to the lines again. Remove the "= 0;".
- #
- $cont =~ s{\s*=\s*0\s*;\s*}{\n}sg;
-
- # Remove the keyword "virtual" because the derived classes
- # may not use it.
- #
- $cont =~ s{^virtual\s+}{}mg;
-
- # Split the string into array of lines and return it as
- # the output list.
- #
- return split(/\n/, $cont);
-}
-##}}}
-
-
-################################################################
-# StripArgIdentifiers takes a method prototype (one line string),
-# removes the argument identifiers, and returns only the necessary
-# form of the prototype as the function result.
-#
-sub StripArgIdentifiers ##{{{
-{
- my $prototype = shift; # Get the prototype string.
-
- # Extract the list of arguments from the prototype.
- #
- $prototype =~ s{^(.+\()(.*)(\).*)$}{$1#ARGS#$3};
- my $a = (defined $2) ? $2 : '';
-
- # Split the list of arguments.
- #
- my @a = split(/,/, $a);
-
- # Strip each of the arguments.
- #
- my @stripped = ();
-
- foreach my $arg (@a) {
-
- # Only the type of the identifier is important...
- #
- $arg =~ s{^(\s* # there can be spaces behind comma,
- (const\s+)? # possibly const at the beginning
- [A-Za-z0-9_:]+ # type identifier can be qualified
- (\s*[*&])? # could be reference or pointer
- ) # ... the above is important,
- .*$ # the rest contains the identifier
- }
- {$1}x; # remember only the important things
-
- # People may differ in opinion whether a space should
- # or should not be written between a type identifier and
- # the '*' or '&' (when the argument is a pointer or a reference).
- #
- $arg =~ s{\s*([*&])}{ $1};
-
- # Whitespaces are not only spaces. Moreover, the difference
- # may be in number of them in a sequence or in the type
- # of a whitespace. This is the reason to replace each sequence
- # of whitespaces by a single, real space.
- #
- $arg =~ s{\s+}{ }g;
-
- # Remember the stripped form of the arguments
- push(@stripped, $arg);
- }
-
- # Join the stripped arguments into one line again, and
- # insert it back.
- #
- $a = join(',', @stripped);
- $prototype =~ s{#ARGS#}{$a};
-
- # Finally, return the stripped prototype.
- #
- return $prototype;
-}
-##}}}
-
-
-################################################################
-# GetInfoFrom returns the list of information related to the
-# parsed source file. The input argument is the name of the
-# translator_xx.h file including path.
-#
-# The output list contains the following items:
-# - class identifier
-# - base class identifier
-# - method prototypes (each in a separate item)
-#
-sub GetInfoFrom ##{{{
-{
- # Get the file name.
- #
- my $fin = shift;
-
- # Let's open the file and read it into a single string.
- #
- open(FIN, "< $fin") or die "\nError when open < $fin: $!";
- my @content = <FIN>;
- close FIN;
- my $cont = join("", @content);
-
- # Remove comments and empty lines.
- #
- $cont =~ s{\s*//.*$}{}mg; # remove one-line comments
- $cont =~ s{/\*.+?\*/}{}sg; # remove C comments
- $cont =~ s{\n\s*\n}{\n}sg; # remove empty lines
-
- # Extract the class and base class identifiers. Remove the
- # opening curly brace. Remove also the first "public:"
- # Put the class and the base class into the output list.
- #
- $cont =~ s{^.*class\s+(Translator\w+)[^:]*:
- \s*public\s+(\w+)\b.*?\{\s*
- (public\s*:\s+)?
- }
- {}sx;
-
- @content = ($1, $2);
-
- # Cut the things after the class.
- #
- $cont =~ s{\}\s*;\s*#endif\s*$}{}s;
-
- # Remove the "virtual" keyword, because some the derived class
- # is not forced to use it.
- #
- $cont =~ s{^\s*virtual\s+}{}mg;
-
- # Remove all strings from lines.
- #
- $cont =~ s{".*?"}{}mg;
-
- # Remove all bodies of methods;
- #
- while ($cont =~ s/{[^{}]+?}//sg) {}
-
- # Remove all private methods, i.e. from "private:" to "public:"
- # included. Later, remove also all from "private:" to the end.
- #
- $cont =~ s{private\s*:.*?public\s*:}{}sg;
- $cont =~ s{private\s*:.*$}{}s;
-
- # Some of the translators use conditional compilation where
- # the branches define the body of the method twice. Remove
- # the ifdef/endif block content.
- #
- $cont =~ s{#ifdef.*?#endif}{}sg;
-
- # Now the string should containt only method prototypes.
- # Let's unify their format by removing all spaces that
- # are not necessary. Then let's put all of them on separate
- # lines (one protototype -- one line; no empty lines).
- #
- $cont =~ s{\s+}{ }sg;
- $cont =~ s{^\s+}{}s;
- $cont =~ s{\s+$}{}s;
-
- $cont =~ s{\s+\(}{(}g;
- $cont =~ s{\)\s*}{)\n}g;
-
- # Split the string and add it to the ouptut list.
- #
- @content = (@content, split(/\n/, $cont));
- return @content;
-}
-##}}}
-
-
-################################################################
-# GetAdapterClassesInfo returns the list of strings with information
-# related to the adapter classes. Each one-line string contains the
-# identifier of the adapter class and the number of required methods
-# that are implemented by the adapter.
-#
-# The function takes one agument -- the reference to the hash with
-# stripped prototypes of the required methods.
-#
-sub GetAdapterClassesInfo ##{{{
-{
- # Get the reference to the hash with required prototypes.
- #
- my $reqref = shift;
-
- # Let's open the file with the translator adapter classes.
- #
- my $fin = "$srcdir/translator_adapter.h";
- open(FIN, "< $fin") or die "\nError when open < $fin: $!";
- my @content = <FIN>;
- close FIN;
- my $cont = join("", @content);
-
- # Prepare the list that will be returned as result.
- #
- my @result = ();
-
- # Remove the preprocessor directives.
- #
- $cont =~ s{^\s*#\w+.+$}{}mg;
-
- # Remove comments and empty lines.
- #
- $cont =~ s{\s*//.*$}{}mg; # remove one-line comments
- $cont =~ s{/\*.+?\*/}{}sg; # remove C comments
- $cont =~ s{\n\s*\n}{\n}sg; # remove empty lines
-
- # Place delimiters to separate the classes, and remove
- # the TranslatorAdapterBase class.
- #
- $cont =~ s{\};\s*class\s+}{<class>}sg;
- $cont =~ s{class\s+TranslatorAdapterBase\s+.+?<class>}{<class>}s;
- $cont =~ s{\};}{}sg;
-
- # Remove the base classes and the beginning of the the class
- # definitions.
- #
- $cont =~ s{(TranslatorAdapter[_0-9]+)\s*:.+?\{\s*(public\s*:)?}{$1}sg;
-
- # Remove all bodies of methods;
- #
- while ($cont =~ s/{[^{}]+?}//sg) {}
-
- # Remove the "virtual" keywords.
- #
- $cont =~ s{^\s*virtual\s+}{}mg;
-
- # Remove the empty lines.
- #
- $cont =~ s{\n\s*\n}{\n}sg;
-
- # Trim the spaces.
- #
- $cont =~ s{^\s+}{}mg;
- $cont =~ s{\s+$}{}mg;
-
- # Split the string into the lines again.
- #
- @content = split(/\n/, $cont);
-
- # Now the list contains only two kinds of lines. The first
- # kind of lines starts with the <class> tag and contains the
- # identifier of the class. The following lines list the
- # non-stripped prototypes of implemented methods without the
- # "virtual" keyword.
- #
- # Now we will produce the result by looping through all the
- # lines and counting the prototypes of the required methods
- # that are implemented by the adapter class.
- #
- my $info = '';
- my $cnt = 0;
- my $methods = '';
-
- foreach my $line (@content)
- {
- if ($line =~ m{^<class>(\w+)\s*$}i )
- {
- # Another adapter class found.
- #
- my $adapter_class = $1;
-
- # If the $info is not empty then it contains partial
- # information about the previously processed adapter.
- #
- if ($info ne '')
- {
- # Complete the $info and push it into the @result.
- #
- $info .= sprintf("\timplements %2d required method%s...\n",
- $cnt, (($cnt != 1) ? 's' : ''));
- $methods .= "\n";
- push(@result, "$info$methods");
- }
-
- # Initialize the counter and store the adapter class identifier
- # in the $info.
- #
- $info = $adapter_class;
- $cnt = 0;
- $methods = '';
- }
- else
- {
- # The line contains the prototype of the implemented method.
- # If it is the required method, count it, and add it to the
- # string of methods.
- #
- my $stripped_prototype = StripArgIdentifiers($line);
-
- if (exists($$reqref{$stripped_prototype}))
- {
- ++$cnt;
- $methods .= " $line\n";
- }
- }
- }
-
- # If the $info is not empty then it contains partial
- # information about the last processed adapter.
- #
- if ($info ne '')
- {
- # Complete the $info and push it into the @result.
- #
- $info .= sprintf("\timplements %2d required method%s...\n",
- $cnt, (($cnt != 1) ? 's' : ''));
- $methods .= "\n";
- push(@result, "$info$methods");
- }
-
- # Return the result list.
- #
- return @result;
-}
-##}}}
-
-
-################################################################
-# GenerateLanguageDoc takes document templates and code sources
-# generates the content as expected in the $flangdoc file (the
-# part of the Doxygen documentation), and returns the result as a
-# string.
-#
-sub GenerateLanguageDoc ##{{{
-{
- # Get the references to the hash of class/base class.
- #
- my $rcb = shift;
-
- # Define templates for HTML table parts of the documentation. #{{{
- #
- my $htmlTableHead = <<'xxxTABLE_HEADxxx';
-\htmlonly
-<TABLE ALIGN=center CELLSPACING=0 CELLPADDING=0 BORDER=0>
-<TR BGCOLOR="#000000">
-<TD>
- <TABLE CELLSPACING=1 CELLPADDING=2 BORDER=0>
- <TR BGCOLOR="#4040c0">
- <TD ><b><font size=+1 color="#ffffff"> Language </font></b></TD>
- <TD ><b><font size=+1 color="#ffffff"> Maintainer </font></b></TD>
- <TD ><b><font size=+1 color="#ffffff"> Contact address </font>
- <font size=-2 color="#ffffff">(remove the NOSPAM.)</font></b></TD>
- <TD ><b><font size=+1 color="#ffffff"> Status </font></b></TD>
- </TR>
-xxxTABLE_HEADxxx
-
- my $htmlTableRow = <<'xxxTABLE_ROWxxx';
- <TR BGCOLOR="#ffffff">
- <TD>$lang</TD>
- <TD>$maintainer</TD>
- <TD>$email</TD>
- <TD>$status</TD>
- </TR>
-xxxTABLE_ROWxxx
-
- my $htmlTableFoot = <<'xxxTABLE_FOOTxxx';
- </TABLE>
-</TD>
-</TR>
-</TABLE>
-\endhtmlonly
-xxxTABLE_FOOTxxx
- ##}}}
-
- # Define templates for LaTeX table parts of the documentation. #{{{
- #
- my $latexTableHead = <<'xxxTABLE_HEADxxx';
-\latexonly
-\begin{tabular}{|l|l|l|l|}
- \hline
- {\bf Language} & {\bf Maintainer} & {\bf Contact address} & {\bf Status} \\
- \hline
-xxxTABLE_HEADxxx
-
- my $latexTableRow = <<'xxxTABLE_ROWxxx';
- $lang & $maintainer & {\tt $email} & $status \\
-xxxTABLE_ROWxxx
-
- my $latexTableFoot = <<'xxxTABLE_FOOTxxx';
- \hline
-\end{tabular}
-\endlatexonly
-xxxTABLE_FOOTxxx
- ##}}}
-
- # Read the template of the documentation, and join the content
- # to a single string. #{{{
- #
- my $fin = "$docdir/$flangtpl";
- open(FIN, "< $fin") or die "\nError when open < $fin: $!";
- my @content = <FIN>;
- close FIN;
-
- my $output = join("", @content);
- ##}}}
-
- # Make and substitute the list of supported languages and their
- # number. #{{{
- #
- my @languages = sort grep { s{^Translator}{} } keys %{$rcb};
-
- my $numlang = @languages;
-
- $output =~ s{\$numlang}{$numlang};
-
- my $languages = join(", ", @languages);
- $languages =~ s{((\w+,\s){5})}{$1\n}g;
- $languages =~ s{Brazilian}{Brazilian Portuguese};
- $languages =~ s{Chinesetraditional}{Chinese Traditional};
- $languages =~ s{(,\s+)(\w+)$}{$1and $2}s;
-
- $output =~ s{\$languages}{$languages};
- ##}}}
-
- # Create the hash of languages with the initial info. #{{{
- #
- my %language = ();
-
- foreach (@languages) {
- $language{$_} = $$rcb{"Translator$_"} . "<msep/>unknown: unknown";
- }
- ##}}}
-
- # Read the information related to maintainers into the
- # string using suitable separators -- one line, one language. #{{{
- #
- $fin = "$docdir/$fmaintainers";
- open(FIN, "< $fin") or die "\nError when open < $fin: $!";
- my @maintainers = <FIN>;
- close FIN;
-
- my $maintainers = join("", @maintainers);
-
- # Trim the spaces on the lines. Strip the comment lines that
- # start with % sign.
- #
- $maintainers =~ s{^[ \t]+}{}mg;
- $maintainers =~ s{[ \t]+$}{}mg;
-
- $maintainers =~ s{^%.*$}{}mg;
-
- # Join the information for one language into one line,
- # and remove empty lines.
- #
- $maintainers =~ s{\b\n(\S)}{<sep/>$1}sg;
- $maintainers =~ s{\n{2,}}{\n}sg;
- $maintainers =~ s{^\n+}{}s;
- $maintainers =~ s{\n+$}{}s;
- ##}}}
-
- # Split the string back to the list, and update the information
- # in the hash with information for languages. #{{{
- #
- foreach my $line (sort split(/\n/, $maintainers)) {
-
- # Split the line for one language to separate lines for
- # the language and one or more maintainers.
- #
- my @info = split(/<sep\/>/, $line);
-
- my $lang = shift @info;
-
- # Ensure that the language starts with uppercase and
- # continues with lowercase.
- #
- $lang =~ s{^(\w)(\w+)}{\U$1\L$2\E};
-
- # Add information to the %language hash. If the language
- # was not defined in sources, add the question mark to the
- # language identifier.
- #
- if (exists($language{$lang})) {
- $language{$lang} = $$rcb{"Translator$lang"} . '<msep/>'
- . join("<sep/>", @info);
- }
- else {
- $lang .= " (?)";
- $language{$lang} = "unknown<msep/>" . join("<sep/>", @info);
- }
- }
- ##}}}
-
- # Now, the %language hash contains all the information needed for
- # generating the tables (HTML and LaTeX). Define string variables
- # for each of the tables, and initialize them. #{{{
- #
- my $tableHTML = $htmlTableHead;
- my $tableLATEX = $latexTableHead;
- ##}}}
-
- # Loop through sorted keys for the languages, parse the
- # information, and add it to the tables. #{{{
- #
- foreach my $lang (sort keys %language) {
-
- # Transform the key for the language into more human readable
- # form. Basically, only languages with two words are going to be
- # corrected. #{{{
- #
- my $lang_readable = $lang;
- $lang_readable =~ s{Brazilian}{Brazilian Portuguese};
- $lang_readable =~ s{Chinesetraditional}{Chinese Traditional};
- ##}}}
-
- # Read the line with info for the language and separate
- # the status. #{{{
- #
- my @list = split(/<msep\/>/, $language{$lang});
- my $status = shift @list;
-
- my $i = $status =~ s{^Translator$}{up-to-date};
-
- if ($i == 0) {
- $i = $status =~ s{^TranslatorAdapter_(\d)_(\d)_(\d)}
- {$1.$2.$3}x;
- }
-
- if ($i == 0) {
- $i = $status =~ s{^TranslatorAdapter_(\d)_(\d)}
- {$1.$2}x;
- }
-
- if ($i == 0) {
- $i = $status =~ s{^TranslatorEnglish$}
- {obsolete}x;
- }
-
- if ($i == 0) { $status = 'strange'; }
-
- ##}}}
-
- # Split the rest of the list (should be a single item) into
- # the list with one or more maintainers -- one line, one
- # maintainer. #{{{
- #
- my $rest = shift @list;
- @list = split(/<sep\/>/, $rest);
- ##}}}
-
- # In HTML table, maintainer names are placed in the same
- # cell. Also their e-mails are placed in a single cell.
- # Extract the string with concatenated names and the string
- # with concatenated e-mails. Add the row to the HTML
- # table. #{{{
- #
- my $name = '';
- my $email = '';
-
- foreach my $maintainer (@list) {
-
- if ($name ne '') { $name .= '<br>'; }
- if ($email ne '') { $email .= '<br>'; }
-
- $maintainer =~ m{^\s*(.+?)\s*:\s*(.+?)\s*$};
-
- $name .= $1;
- $email .= $2;
- }
-
- # Prepare the HTML row template, modify it, and add the
- # result to the HTML table.
- #
- my $item = $htmlTableRow;
-
- $item =~ s{\$lang}{$lang_readable};
- $item =~ s{\$maintainer}{$name};
- $item =~ s{\$email}{$email};
- $item =~ s{\$status}{$status};
-
- $tableHTML .= $item;
-
- ##}}}
-
- # For LaTeX, more maintainers for the same language are
- # placed on separate rows in the table. The line separator
- # in the table is placed explicitly above the first
- # maintainer. Add rows for all maintainers to the LaTeX
- # table. #{{{
- #
- # Prepare the LATEX row template, modify it, and add the
- # result to the LATEX table.
- #
- $item = $latexTableRow;
-
- my $first = shift @list; # the first maintainer.
- $first =~ m{^\s*(.+?)\s*:\s*(.+?)\s*$};
-
- $name = $1;
- $email = $2;
-
- $item =~ s{\$lang}{$lang_readable};
- $item =~ s{\$maintainer}{$name};
- $item =~ s{\$email}{$email};
- $item =~ s{\$status}{$status};
-
- $tableLATEX .= " \\hline\n" . $item;
-
- # List the other maintainers for the language. Do not set
- # lang and status for them.
- #
- while (@list) {
- my $next = shift @list;
- $next =~ m{^\s*(.+?)\s*:\s*(.+?)\s*$};
-
- my $name = $1;
- my $email = $2;
- my $item = $latexTableRow;
-
- $item =~ s{\$lang}{};
- $item =~ s{\$maintainer}{$name};
- $item =~ s{\$email}{$email};
- $item =~ s{\$status}{};
-
- $tableLATEX .= $item;
- }
- ##}}}
- }
- ##}}}
-
- # Finish the tables, and substitute the mark in the doc
- # template by the concatenation of the tables. Add NOSPAM to
- # email addresses in the HTML table. Replace the special
- # character sequences. #{{{
- #
- $tableHTML .= $htmlTableFoot;
- $tableLATEX .= $latexTableFoot;
-
- $tableHTML =~ s{@}{\@NOSPAM.}sg;
- $tableHTML =~ s{&ccaron;}{&#x010d;}sg;
- $tableHTML =~ s{&rcaron;}{&#x0159;}sg;
-
- $tableLATEX =~ s/&aacute;/\\'{a}/sg;
- $tableLATEX =~ s/&Aacute;/\\'{A}/sg;
- $tableLATEX =~ s/&auml;/\\"{a}/sg;
- $tableLATEX =~ s/&ouml;/\\"{o}/sg;
- $tableLATEX =~ s/&oslash;/\\o{}/sg;
- $tableLATEX =~ s/&ccaron;/\\v{c}/sg;
- $tableLATEX =~ s/&rcaron;/\\v{r}/sg;
- $tableLATEX =~ s/_/\\_/sg;
-
- $output =~ s{\$information_table}{$tableHTML$tableLATEX};
-
- ##}}}
-
- # Replace the other symbols in the template by the expected
- # information. ##{{{
- #
- $output =~ s{\$version}{$doxversion};
-
- $output =~ s{\$translator_report_file_name}
- {<code>doxygen/doc/$ftranslatortxt</code>}x;
-
- $output =~ s{\$translator_report_link}
- {<a href=\"../doc/$ftranslatortxt\">
- <code>doxygen/doc/$ftranslatortxt</code></a>}x;
- ##}}}
-
- # Replace the introduction notice in the output. #{{{
- #
- $output =~ s{<notice>.+?</notice>}
-{Warning: this file was generated from the $flangtpl template
- * and the $fmaintainers files by the $0 script.
- *
- * Do not edit this file. Edit the above mentioned files!}sx;
- ##}}}
-
- # Return the content of the generated output file.
- #
- return $output;
-}
-##}}}
-
-
-################################################################
-# CopyTemplateToLanguageDoc takes the $flangtpl template and
-# generates $flangdoc without using information from other
-# sources. This function is called when source files were not found.
-# The marks inside the template are replaced by warning-like
-# explanations that something could not be done because sources
-# were not available. Writes directly to the file, returns nothing.
-#
-sub CopyTemplateToLanguageDoc ##{{{
-{
- # The template file will be the source.
- #
- my $fin = "$docdir/$flangtpl";
-
- # Let's open the template and read it all into one string.
- #
- open(FIN, "< $fin") or die "\nError when open < $fin: $!";
- my @content = <FIN>;
- close FIN;
- my $cont = join("", @content);
-
- # Replace the template marks by some notices.
- #
- $cont =~ s{<notice>.+?</notice>}
-{Warning: this file was generated from the $flangtpl template
- * by the $0 script. As doxygen sources were not available
- * in that time, some information could not be extracted
- * and inserted into this file.
- *
- * Do not edit this file. Edit the above mentioned files!}sx;
-
- $cont =~ s{\$version}{$doxversion};
-
- $cont =~ s{\$numlang}
- {<b>[number of supported languages could not be extracted -- no sources]</b>};
-
- $cont =~ s{\$languages}
- {<b>[names of languages could not be extracted -- no sources]</b>};
-
- $cont =~ s{\$information_table}
- {<b>[Information table could not be extracted -- no sources.]</b>};
-
- $cont =~ s{\$translator_report_file_name}
- {$ftranslatortxt <b>[translator report could not be
- generated -- no sources]</b>}x;
-
- $cont =~ s{\$translator_report_link}{<b>[no sources, no link]</b>};
-
- # Let's open the output file and copy the template content there.
- #
- my $fout = "$docdir/$flangdoc";
-
- open(FOUT, "> $fout") or die "\nError when open > $fout: $!";
- print FOUT $cont;
- close FOUT;
-}
-##}}}
-
-
-################################################################
-# Body
-#
-{
- # Set the content of global variables using the environment
- # variables. #{{{
- #
- $docdir = (defined $ENV{'DOXYGEN_DOCDIR'})
- ? $ENV{'DOXYGEN_DOCDIR'} : '.';
-
- $docdir =~ s{\\}{/}g;
- $docdir =~ s{/$}{};
-
- $doxygenrootdir = ($docdir eq '.') ? '..' : $docdir;
- $doxygenrootdir =~ s{/doc$}{};
-
- $srcdir = "$doxygenrootdir/src";
-
-=pod
-# Show the environment variables (for debugging only).
-#
-foreach (sort keys %ENV) { print STDERR "$_=$ENV{$_}\n"; }
-print STDERR "\n\n";
-=cut
-
- $doxversion = (defined $ENV{'VERSION'}) ? $ENV{'VERSION'} : 'unknown';
-
- ##}}}
-
- # The translator base class must be present. Exit otherwise,
- # but be kind to those who already have the documentation
- # generated by this script ready, but who do not have sources.
- # If no $flangdoc is present, copy the template to it. #{{{
- #
- if (!-f "$srcdir/translator.h") {
- print STDERR "\nThe $0 warning:\n"
- . "\tThe translator.h not found in $srcdir.\n"
- . "\tThe $ftranslatortxt will not be "
- . "generated (you don't need it).\n";
-
- # $flangdoc is present, copy the template to it.
- #
- if (!-f "$docdir/$flangdoc") {
-
- # Copy the template document to $flandoc with simplified
- # replacement of the markers inside the template.
- #
- CopyTemplateToLanguageDoc();
-
- # Generate the warning about $flangdoc content.
- #
- print STDERR "\nThe $0 warning:\n"
- . "\tThe $flangdoc not found in the '$docdir' directory.\n"
- . "\tThe $flangtpl template content copied into it.\n"
- . "\tAs the sources are not available, some information\n"
- . "\tcould not be extracted and inserted into $flangdoc.\n";
- }
-
- # Exit as if nothing happened.
- #
- exit 0;
- }
- ##}}}
-
- # Find all translator_xx.h file names. #{{{
- #
- my @entries = (); # init
-
- opendir DIR, $srcdir or confess "opendir error for $srcdir: $!";
- foreach (readdir(DIR)) { if (!/^\./) { push @entries, $_; } }
- closedir DIR; # ignore names with dot at the beginning
-
- my @files = sort
- grep { ! m{^translator_adapter\.h$}i }
- grep { -f "$srcdir/$_" && m{^translator_\w+\.h$}i }
- @entries;
- ##}}}
-
- # Get only the pure virtual methods from the Translator class
- # into a hash structure for later testing present/not present.
- #
- my @expected = GetPureVirtualFrom("$srcdir/translator.h");
-
- # The details for translators will be collected into the output
- # string. If some details are listed for a translator, the flag
- # will be set to produce possible warning to the list of
- # up-to-date translators.
- #
- my $output = '';
- my %details = ();
-
- # Initialize the list of the required methods.
- #
- my %required = ();
-
- # Remove the argument identifiers from the method prototypes
- # to get only the required form of the prototype. Fill the
- # hash with them. #{{{
- #
- foreach (@expected) {
- my $prototype = StripArgIdentifiers($_);
- $required{$prototype} = 1;
- }
- ##}}}
-
- # Collect base classes of translators in the hash. CB stands
- # for Class and Base.
- #
- my %cb = ();
-
- # Loop through all translator files. Extract the implemented
- # virtual methods and compare it with the requirements. Prepare
- # the output.
- #
- foreach (@files) {
-
- # Get the information from the sources. Remember the base
- # class for each of the classes. Clear the flag for
- # details for the class. #{{{
- #
- my @info = GetInfoFrom("$srcdir/$_");
-
- my $class = shift @info;
- my $base = shift @info;
-
- $cb{$class} = $base;
- $details{$class} = 0;
-
- ##}}}
-
- # Set the value of the required methods to 1 (true). Let
- # this indicate that the method was not defined in the
- # translator class.
- #
- foreach (keys %required) { $required{$_} = 1; }
-
- # Loop through all items and compare the prototypes. Mark
- # the implemented method and collect the old ones. #{{{
- #
- my @old_methods = ();
-
- foreach my $implemented (@info) {
-
- # Get only the necessary form of the prototype.
- #
- my $prototype = StripArgIdentifiers($implemented);
-
- # Mark as recognized when the prototype is required.
- # Otherwise, remember it as old method which is
- # implemented, but not required.
- #
- if (exists($required{$prototype})) {
- $required{$prototype} = 0; # satisfaction
- }
- else {
- push(@old_methods, $implemented);
- }
- }
- ##}}}
-
- # Loop through the list of expected methods and collect
- # the missing (new) methods. Do this only when it derives
- # from Translator or TranslatorAdapter classes (i.e. ignore
- # any unusual kind of TranslatorXxxx implementation).
- # Accept also deriving from TranslatorEnglish, that can
- # be done by doxygen developers to solve problems with
- # some really outdated translators. #{{{
- #
- my @missing_methods = ();
-
- if ($base =~ m/^Translator(Adapter.*)?$/
- || $base =~ m/^TranslatorEnglish$/) {
- foreach my $method (@expected) {
-
- # Get the stripped version of the prototype.
- #
- my $prototype = StripArgIdentifiers($method);
-
- # If the prototype is stored in the %required
- # table, and if it was not marked as implemented,
- # then it should be. It is a missing method.
- #
- if (defined $required{$prototype} && $required{$prototype}) {
- push(@missing_methods, $method);
- }
- }
- }
- ##}}}
-
- # The detailed output will be produced only when it is
- # needed. #{{{
- #
- if (@old_methods || @missing_methods
- || $base !~ m/^Translator(Adapter.*)?$/) {
-
- $output .= "\n\n\n";
- $output .= $class . " ($base)\n" . '-' x length($class) . "\n";
-
- if ($base =~ m/^TranslatorEnglish$/) {
- $output .= "\nThis translator is implemented via deriving "
- . "from the English translator.\n"
- . "This should be done only in the case when "
- . "the language maintainer\n"
- . "or the doxygen "
- . "developers need to update some really old-dated "
- . "translator.\n"
- . "Otherwise, deriving from "
- . "the translator adapter classes should be used\n"
- . "for obsolete translators. "
- . "If you still want some texts to be in English\n"
- . "copy the sources of the English translator.\n\n"
- . "The obsolete and missing method lists (below) "
- . "reflect what have to be done\n"
- . "to derive "
- . "directly from the Translator class "
- . "(i.e. to reach up-to-date status).\n";
- }
- elsif ($base !~ m/^Translator(Adapter.*)?$/) {
- $output .= "\nThis is some unusual implementation of the "
- . "translator class. It is derived\n"
- . "from the $base base class. The usual translator"
- . "class derives\n"
- . "or from the Translator class or from some "
- . "TranslatorAdapter_x_x_x classes.\n"
- . "Because of that, nothing can be guessed about "
- . "missing or obsolete methods.\n";
- }
-
- if (@missing_methods) {
- $output .= "\nMissing methods (should be implemented):\n\n";
- foreach (@missing_methods) { $output .= " $_\n"; }
- }
-
- if (@old_methods) {
- $output .= "\nObsolete methods (should be removed):\n\n";
- foreach (sort @old_methods) { $output .= " $_\n"; }
- }
-
- # Some details were listed, set the details flag for
- # the class.
- #
- $details{$class} = 1;
- }
- ##}}}
- }
-
-
- # Generate the ASCII output file.
- #
- my $fout = "$docdir/$ftranslatortxt";
-
- # Open it first, and output the version information. #{{{
- #
- open(FOUT, "> $fout") or die "\nError when open > $fout: $!";
-
- print FOUT "(version $doxversion)\n\n";
- ##}}}
-
- # List the supported languages. #{{{
- #
- my @all_translators = keys %cb;
-
- print FOUT "Doxygen supports the following (" . @all_translators
- . ") languages (sorted alphabetically):\n\n";
-
- my @languages = sort
- grep { s/^Translator(\w+)\b.*$/$1/ }
- @all_translators;
-
- my $languages = join(", ", @languages);
- $languages =~ s{((\w+,\s){5})}{$1\n}g;
- $languages =~ s{Brazilian}{Brazilian Portuguese};
- $languages =~ s{Chinesetraditional}{Chinese Traditional};
- $languages =~ s{(,\s+)(\w+)$}{$1and $2.}s;
-
- print FOUT "$languages\n";
- ##}}}
-
- # If there are up-to-date translators, list them. #{{{
- #
- my @list = sort grep { $cb{$_} =~ m/^Translator$/ } keys %cb;
-
- if (@list) {
- print FOUT "\n" .'-' x 70 . "\n";
- print FOUT "The following translator classes are up-to-date "
- . "(sorted alphabetically).\n"
- . "This means that they derive from the Translator class. "
- . "Anyway, there still\n"
- . "may be some details listed even for "
- . "the up-to-date translators.\n"
- . "Please, check the text below if the translator "
- . "is marked so.\n\n";
-
- foreach (@list) {
-
- # Print the class name.
- #
- print FOUT " $_";
-
- # If some details were listed for the translator class,
- # add a notice.
- #
- if ($details{$_}) {
- print FOUT "\t-- see details below in the report";
- }
-
- print FOUT "\n";
- }
- }
- ##}}}
-
- # If there are obsolete translators, list them. #{{{
- #
- @list = sort grep { $cb{$_} =~ m/^TranslatorAdapter_/ } keys %cb;
-
- if (@list) {
- print FOUT "\n" .'-' x 70 . "\n";
- print FOUT "The following translator classes are obsolete "
- . "(sorted alphabetically).\n"
- . "This means that they derive from some of "
- . "the adapter classes.\n\n";
-
- foreach (@list) { print FOUT " $_\t($cb{$_})\n"; }
- }
- ##}}}
-
- # If there are translators derived from TranslatorEnglish, list them
- # and name them as obsolete. #{{{
- #
- @list = sort grep { $cb{$_} =~ m/^TranslatorEnglish$/ } keys %cb;
-
- if (@list) {
- print FOUT "\n" .'-' x 70 . "\n";
- print FOUT "The following translator classes are implemented "
- . "via deriving\n"
- . "from the English translator. This should be done only "
- . "in the case\n"
- . "when the language maintainer or the doxygen "
- . "developers need to update\n"
- . "some really outdated translator. Otherwise, deriving "
- . "from\n"
- . "the translator adapter classes should be prefered "
- . "for obsolete translators.\n"
- . "See details below in the report.\n\n";
-
- foreach (@list) { print FOUT " $_\t($cb{$_})\n"; }
- }
- ##}}}
-
- # If there are other translators, list them. #{{{
- #
- @list = sort
- grep { $cb{$_} !~ m/^Translator$/ }
- grep { $cb{$_} !~ m/^TranslatorAdapter_/ }
- grep { $cb{$_} !~ m/^TranslatorEnglish$/ }
- keys %cb;
-
- if (@list) {
- print FOUT "\n" .'-' x 70 . "\n";
- print FOUT "The following translator classes are somehow different\n"
- . "(sorted alphabetically). This means that they "
- . "do not derive from\n"
- . "the Translator class, nor from some of the adapter "
- . "classes,\n"
- . "nor from the TranslatorEnglish. Nothing can be guessed "
- . "about the methods.\n\n";
-
- foreach (@list) { print FOUT " $_\t($cb{$_})\n"; }
- }
- ##}}}
-
- # List all the translator adapter classes to show for which versions
- # the adapters had to be created. Show, how many and what new methods
- # are implemented by the adapters. #{{{
- #
- print FOUT "\n" .'-' x 70 . "\n";
- print FOUT <<'xxxENDxxx';
-The following translator adapter classes are implemented -- the older (with
-lower number) are always derived from the newer. They implement the
-listed required methods. Notice that some versions of doxygen did not
-introduce any changes related to the language translators. From here you may
-guess how much work should be done to update your translator:
-
-xxxENDxxx
-
- my @adapter_info = GetAdapterClassesInfo(\%required);
-
- foreach (@adapter_info) { print FOUT " $_"; }
-
- ##}}}
-
- # List the methods that are expected to be implemented. #{{{
- #
- print FOUT "\n" .'-' x 70 . "\n";
- print FOUT "Localized translators are expected to implement "
- . "the following methods\n"
- . "(prototypes sorted aplhabetically):\n\n";
-
- foreach (sort @expected) { print FOUT "$_\n"; }
- ##}}}
-
- # If there are some details for the translators, show them. #{{{
- #
- if ($output !~ m/^\s*$/) {
- print FOUT "\n\n" .'=' x 70 . "\n";
- print FOUT "Details related to specific translator classes follow.\n";
-
- print FOUT $output . "\n";
- }
- ##}}}
-
- # Close the ASCII output file
- #
- close FOUT;
-
- # Generate the language.doc file.
- #
- $fout = "$docdir/$flangdoc";
-
- # Open it first for the output.
- #
- open(FOUT, "> $fout") or die "\nError when open > $fout: $!";
-
- print FOUT GenerateLanguageDoc(\%cb);
-
- # Close the output file
- #
- close FOUT;
-
-
- exit 0;
-}
-# end of body
-################################################################
-
diff --git a/doc/translator.py b/doc/translator.py
index 177db97..aa182f0 100644
--- a/doc/translator.py
+++ b/doc/translator.py
@@ -14,1106 +14,1651 @@
python translator.py en nl cz
Originally, the script was written in Perl and was known as translator.pl.
- The last Perl version was dated 2002/05/21
-
- Petr Prikryl (prikrylp@skil.cz)
-"""
-
-# History:
-# --------
-# 2002/05/21
-# - This was the last Perl version.
-# 2003/05/16
-# - If the script is given list of languages, only the translator report
-# is generated and only for those languages.
-#
-################################################################
-
-import os, re, sys
+ The last Perl version was dated 2002/05/21 (plus some later corrections)
-# Global informations are stored in the global 'info' dictionary.
-# This dictionary should be accessed only via GetInfo() because the first
-# call initializes the empty dictionary. (I was too lazy to create
-# a singleton.)
-#
-info = {}
-
-def GetInfo():
- """Returns reference to the info dictionary.
-
- If the dictionary is empty, it will be filled with some initial values.
- """
-
- global info
-
- # If the dictionary with globally shared information is empty, then
- # fill the static values.
- if not info:
- # Get the name of the script without the path and the path without name.
- scriptpath, scriptname = os.path.split(os.path.abspath(sys.argv[0]))
- info['scriptname'] = scriptname
-
- # Determine the Doxygen's doc directory. If the DOXYGEN_DOCDIR
- # environment variable is defined, then it says where the directory
- # is. If it is not, then it will be directory where this script is
- # placed.
- docdir = os.getenv('DOXYGEN_DOCDIR', '*')
- if docdir == '*':
- docdir = scriptpath
+ $Id$
+
+ Petr Prikryl (prikrylp@skil.cz)
+
+ History:
+ --------
+ 2002/05/21
+ - This was the last Perl version.
+ 2003/05/16
+ - If the script is given list of languages, only the translator report
+ is generated and only for those languages.
+ 2004/01/24
+ - Total reimplementation just started: classes TrManager, and Transl.
+ 2004/02/05
+ - First version that produces translator report. The documentation
+ in the language.doc is not generated yet.
+ 2004/02/10
+ - First fully functional version that generates both the translator
+ report and the documentation. It is a bit slower than the Perl version,
+ but is much less tricky and much more flexible. It also solves some
+ problems that were not solved by the Perl version. The translator report
+ content should be more useful for developers.
+ 2004/02/11
+ - Some tuning-up to provide more useful information.
+ """
+
+import os, re, sys, textwrap
+
+
+def fill(s):
+ """Returns string formated to the wrapped paragraph multiline string.
+
+ Replaces whitespaces by one space and then uses he textwrap.fill()."""
+ rexWS = re.compile(r'\s+')
+ return textwrap.fill(rexWS.sub(' ', s))
+
+
+class Transl:
+ """One instance is build for each translator.
+
+ The abbreviation of the source file--part after 'translator_'--is used as
+ the identification of the object. The empty string is used for the
+ abstract Translator class from translator.h. The other information is
+ extracted from inside the source file."""
+
+ def __init__(self, fname, manager):
+ """Bind to the manager and initialize."""
+
+ # Store the filename and the reference to the manager object.
+ self.fname = fname
+ self.manager = manager
+
+ # The instance is responsible for loading the source file, so it checks
+ # for its existence and quits if something goes wrong.
+ if not os.path.isfile(fname):
+ sys.stderr.write("\a\nFile '%s' not found!\n" % fname)
+ sys.exit(1)
- docdir = os.path.abspath(docdir)
- info['docdir'] = docdir
-
- # Doxygen's root directory is just one above the docdir.
- doxygenrootdir = os.path.abspath(os.path.join(docdir, '..'))
- info['doxygenrootdir'] = doxygenrootdir
-
- # Doxygen's src directory is just below its root.
- info['srcdir'] = os.path.join(doxygenrootdir, 'src')
-
- # Doxygen's current version is read from the 'version' file in the root.
- try:
- fver = file(os.path.join(doxygenrootdir, 'version'))
- doxversion = fver.readline().strip()
- fver.close()
- info['doxversion'] = doxversion
- except IOError:
- info['doxversion'] = 'unknown'
+ # Initialize the other collected information.
+ self.classId = None
+ self.baseClassId = None
+ self.readableStatus = None # 'up-to-date', '1.2.3', '1.3', etc.
+ self.status = None # '', '1.2.03', '1.3.00', etc.
+ self.lang = None # like 'Brasilian'
+ self.langReadable = None # like 'Brasilian Portuguese'
+ self.note = None # like 'should be cleaned up'
+ self.prototypeDic = {} # uniPrototype -> prototype
+ self.obsoleteMethods = None # list of prototypes to be removed
+ self.missingMethods = None # list of prototypes to be implemented
+ self.implementedMethods = None # list of implemented required methods
+ self.adaptMinClass = None # The newest adapter class that can be used
+ def __tokenGenerator(self):
+ """Generator that reads the file and yields tokens as 4-tuples.
- # Names of the template files and other intput files (template for
- # language.doc and the database of local language maintainers).
- info['flangtplname'] = 'language.tpl'
- info['flangtpl'] = os.path.join(docdir, info['flangtplname'])
- info['fmaintainersname'] = 'maintainers.txt'
- info['fmaintainers'] = os.path.join(docdir, info['fmaintainersname'])
+ The tokens have the form (tokenId, tokenString, lineNo). The
+ last returned token has the form ('eof', None, None). When trying
+ to access next token afer that, the exception would be raised."""
+
+ # Set the dictionary for recognizing tokenId for keywords, separators
+ # and the similar categories. The key is the string to be recognized,
+ # the value says its token identification.
+ tokenDic = { 'class': 'class',
+ 'const': 'const',
+ 'public': 'public',
+ 'protected': 'protected',
+ 'private': 'private',
+ 'static': 'static',
+ 'virtual': 'virtual',
+ ':': 'colon',
+ ';': 'semic',
+ ',': 'comma',
+ '[': 'lsqbra',
+ ']': 'rsqbra',
+ '(': 'lpar',
+ ')': 'rpar',
+ '{': 'lcurly',
+ '}': 'rcurly',
+ '=': 'assign',
+ '*': 'star',
+ '&': 'amp',
+ '+': 'plus',
+ '-': 'minus',
+ '!': 'excl',
+ '?': 'qmark',
+ '<': 'lt',
+ '>': 'gt',
+ "'": 'quot',
+ '"': 'dquot',
+ '.': 'dot',
+ '%': 'perc',
+ }
+
+ # Regular expression for recognizing identifiers.
+ rexId = re.compile(r'^[a-zA-Z]\w*$')
+
+ # Open the file for reading and extracting tokens until the eof.
+ # Initialize the finite automaton.
+ f = file(self.fname)
+ lineNo = 0
+ line = '' # init -- see the pos initialization below
+ linelen = 0 # init
+ pos = 100 # init -- pos after the end of line
+ status = 0
+
+ tokenId = None # init
+ tokenStr = '' # init -- the characters will be appended.
+ tokenLineNo = 0
- # Names of the output files.
- info['ftranslatortxtname'] = 'translator_rep.txt'
- info['ftranslatortxt'] = os.path.join(docdir, info['ftranslatortxtname'])
- info['flangdocname'] = 'language.doc'
- info['flangdoc'] = os.path.join(docdir, info['flangdocname'])
-
- # If the script is given one or more arguments, they should be codes
- # of languages (two letters). Convert them into lower case and
- # build the list of them. Empty list will be interpreted as the request
- # for processing all languages.
- langlist = []
- if len(sys.argv) > 1:
- langlist = sys.argv[1:]
- info['languages'] = langlist
+ while status != 777:
- # Create the dictionary of the required method. Keys will be the unified
- # method prototypes, values will be True (for easy testing).
- info['required_methods'] = {}
+ # Get the next character. Read next line first, if necessary.
+ if pos < linelen:
+ c = line[pos]
+ else:
+ lineNo += 1
+ line = f.readline()
+ linelen = len(line)
+ pos = 0
+ if line == '': # eof
+ status = 777
+ else:
+ c = line[pos]
+
+ # Consume the character based on the status
+
+ if status == 0: # basic status
+
+ # This is the initial status. If tokenId is set, yield the
+ # token here and only here (except when eof is found).
+ # Initialize the token variables after the yield.
+ if tokenId:
+ # If it is an unknown item, it can still be recognized
+ # here. Keywords and separators are the example.
+ if tokenId == 'unknown':
+ if tokenDic.has_key(tokenStr):
+ tokenId = tokenDic[tokenStr]
+ elif tokenStr.isdigit():
+ tokenId = 'num'
+ elif rexId.match(tokenStr):
+ tokenId = 'id'
+ else:
+ msg = '\aWarning: unknown token "' + tokenStr + '"'
+ msg += '\tfound on line %d' % tokenLineNo
+ msg += 'in "' + self.fname + '".\n'
+ sys.stderr.write(msg)
+
+ yield (tokenId, tokenStr, tokenLineNo)
+ tokenId = None
+ tokenStr = ''
+ tokenLineNo = 0
+
+ # Now process the character. When we just skip it (spaces),
+ # stay in this status. All characters that will be part of
+ # some token cause moving to the specific status. And only
+ # when moving to the status == 0 (or the final state 777),
+ # the token is yielded. With respect to that the automaton
+ # behaves as Moore's one (output bound to status). When
+ # collecting tokens, the automaton is the Mealy's one
+ # (actions bound to transitions).
+ if c.isspace():
+ pass # just skip whitespace characters
+ elif c == '/': # Possibly comment starts here, but
+ tokenId = 'unknown' # it could be only a slash in code.
+ tokenStr = c
+ tokenLineNo = lineNo
+ status = 1
+ elif c == '#':
+ tokenId = 'preproc' # preprocessor directive
+ tokenStr = c
+ tokenLineNo = lineNo
+ status = 5
+ elif c == '"': # string starts here
+ tokenId = 'string'
+ tokenStr = c
+ tokenLineNo = lineNo
+ status = 6
+ elif c == "'": # char literal starts here
+ tokenId = 'charlit'
+ tokenStr = c
+ tokenLineNo = lineNo
+ status = 8
+ elif tokenDic.has_key(c): # known one-char token
+ tokenId = tokenDic[c]
+ tokenStr = c
+ tokenLineNo = lineNo
+ # stay in this state to yield token immediately
+ else:
+ tokenId = 'unknown' # totally unknown
+ tokenStr = c
+ tokenLineNo = lineNo
+ status = 333
+
+ pos += 1 # move position in any case
+
+ elif status == 1: # possibly a comment
+ if c == '/': # ... definitely the C++ comment
+ tokenId = 'comment'
+ tokenStr += c
+ pos += 1
+ status = 2
+ elif c == '*': # ... definitely the C comment
+ tokenId = 'comment'
+ tokenStr += c
+ pos += 1
+ status = 3
+ else:
+ status = 0 # unrecognized, don't move pos
+
+ elif status == 2: # inside the C++ comment
+ if c == '\n': # the end of C++ comment
+ status = 0 # yield the token
+ else:
+ tokenStr += c # collect the C++ comment
+ pos += 1
+
+ elif status == 3: # inside the C comment
+ if c == '*': # possibly the end of the C comment
+ tokenStr += c
+ status = 4
+ else:
+ tokenStr += c # collect the C comment
+ pos += 1
+
+ elif status == 4: # possibly the end of the C comment
+ if c == '/': # definitely the end of the C comment
+ tokenStr += c
+ status = 0 # yield the token
+ elif c == '*': # more stars inside the comment
+ tokenStr += c
+ else:
+ tokenStr += c # this cannot be the end of comment
+ status = 3
+ pos += 1
+
+ elif status == 5: # inside the preprocessor directive
+ if c == '\n': # the end of the preproc. command
+ status = 0 # yield the token
+ else:
+ tokenStr += c # collect the preproc
+ pos += 1
+
+ elif status == 6: # inside the string
+ if c == '\\': # escaped char inside the string
+ tokenStr += c
+ status = 7
+ elif c == '"': # end of the string
+ tokenStr += c
+ status = 0
+ else:
+ tokenStr += c # collect the chars of the string
+ pos += 1
+
+ elif status == 7: # escaped char inside the string
+ tokenStr += c # collect the char of the string
+ status = 6
+ pos += 1
+
+ elif status == 8: # inside the char literal
+ tokenStr += c # collect the char of the literal
+ status = 9
+ pos += 1
+
+ elif status == 9: # end of char literal expected
+ if c == "'": # ... and found
+ tokenStr += c
+ status = 0
+ pos += 1
+ else:
+ tokenId = 'error' # end of literal was expected
+ tokenStr += c
+ status = 0
+
+ elif status == 333: # start of the unknown token
+ if c.isspace():
+ pos += 1
+ status = 0 # tokenId may be determined later
+ elif tokenDic.has_key(c): # separator, don't move pos
+ status = 0
+ else:
+ tokenStr += c # collect
+ pos += 1
+
+ # We should have finished in the final status. If some token
+ # have been extracted, yield it first.
+ assert(status == 777)
+ if tokenId:
+ yield (tokenId, tokenStr, tokenLineNo)
+ tokenId = None
+ tokenStr = ''
+ tokenLineNo = 0
+
+ # The file content is processed. Close the file. Then always yield
+ # the eof token.
+ f.close()
+ yield ('eof', None, None)
+
+
+ def __collectClassInfo(self, tokenIterator):
+ """Collect the information about the class and base class.
+
+ The tokens including the opening left curly brace of the class are
+ consumed."""
+
+ status = 0 # initial state
+
+ while status != 777: # final state
- return info
+ # Always assume that the previous tokens were processed. Get
+ # the next one.
+ tokenId, tokenStr, tokenLineNo = tokenIterator.next()
+
+ # Process the token and never return back.
+ if status == 0: # waiting for the 'class' keyword.
+ if tokenId == 'class':
+ status = 1
+
+ elif status == 1: # expecting the class identification
+ if tokenId == 'id':
+ self.classId = tokenStr
+ status = 2
+ else:
+ self.__unexpectedToken(status, tokenId, tokenLineNo)
+
+ elif status == 2: # expecting the curly brace or base class info
+ if tokenId == 'lcurly':
+ status = 777 # correctly finished
+ elif tokenId == 'colon':
+ status = 3
+ else:
+ self.__unexpectedToken(status, tokenId, tokenLineNo)
+
+ elif status == 3: # expecting the 'public' in front of base class id
+ if tokenId == 'public':
+ status = 4
+ else:
+ self.__unexpectedToken(status, tokenId, tokenLineNo)
+
+ elif status == 4: # expecting the base class id
+ if tokenId == 'id':
+ self.baseClassId = tokenStr
+ status = 5
+ else:
+ self.__unexpectedToken(status, tokenId, tokenLineNo)
+
+ elif status == 5: # expecting the curly brace and quitting
+ if tokenId == 'lcurly':
+ status = 777 # correctly finished
+ elif tokenId == 'comment':
+ pass
+ else:
+ self.__unexpectedToken(status, tokenId, tokenLineNo)
+
+ # Extract the status of the TranslatorXxxx class. The readable form
+ # will be used in reports the status form is a string that can be
+ # compared lexically (unified length, padding with zeros, etc.).
+ if self.baseClassId:
+ lst = self.baseClassId.split('_')
+ if lst[0] == 'Translator':
+ self.readableStatus = 'up-to-date'
+ self.status = ''
+ elif lst[0] == 'TranslatorAdapter':
+ self.status = lst[1] + '.' + lst[2]
+ self.readableStatus = self.status
+ if len(lst) > 3: # add the last part of the number
+ self.status += '.' + ('%02d' % int(lst[3]))
+ self.readableStatus += '.' + lst[3]
+ else:
+ self.status += '.00'
+ elif lst[0] == 'TranslatorEnglish':
+ # Obsolete or Based on English.
+ if self.classId[-2:] == 'En':
+ self.readableStatus = 'English based'
+ self.status = 'En'
+ else:
+ self.readableStatus = 'obsolete'
+ self.status = '0.0.00'
+
+ # Check whether status was set, or set 'strange'.
+ if self.status == None:
+ self.status = 'strange'
+ if not self.readableStatus:
+ self.readableStatus = 'strange'
+
+ # Extract the name of the language and the readable form.
+ self.lang = self.classId[10:] # without 'Translator'
+ if self.lang == 'Brazilian':
+ self.langReadable = 'Brazilian Portuguese'
+ elif self.lang == 'Chinesetraditional':
+ self.langReadable = 'Chinese Traditional'
+ else:
+ self.langReadable = self.lang
+
+
+ def __unexpectedToken(self, status, tokenId, tokenLineNo):
+ """Reports unexpected token and quits with exit code 1."""
-def CopyTemplateToLanguageDoc():
- """'flangtpl' + no src --> 'flangdoc'
-
- The function takes the 'flangtpl' template and generates 'flangdoc'
- without using information from other sources. This function is called
- when source files were not found. The marks inside the template are
- replaced by warning-like explanations that something could not be done
- because sources were not available. Writes directly to the file, returns
- nothing.
-
- If the script was called only for selected languages, the documentation
- is not generated.
- """
+ import inspect
+ calledFrom = inspect.stack()[1][3]
+ msg = "\a\nUnexpected token '%s' on the line %d in '%s'.\n"
+ msg = msg % (tokenId, tokenLineNo, self.fname)
+ msg += 'status = %d in %s()\n' % (status, calledFrom)
+ sys.stderr.write(msg)
+ sys.exit(1)
+
+
+ def collectPureVirtualPrototypes(self):
+ """Returns dictionary 'unified prototype' -> 'full prototype'.
- # Get the reference to the initialized info dictionary. If the script was
- # called for selected languages, return immediately.
- info = GetInfo()
- if not info['langlist']:
- return
-
- # Read the content of the template file.
- fin = file(info['flangtpl'])
- cont = fin.read()
- fin.close()
-
- # Replace the template marks by some notices.
- cont = re.sub(r'(?s)<notice>.+?</notice>',
- """Warning: this file was generated from the %(flangtplname)s template
- * by the %(scriptname)s script. As doxygen sources were not available
- * in that time, some information could not be extracted
- * and inserted into this file.
- *
- * Do not edit this file. Edit the above mentioned files!""", cont)
-
- cont = re.sub(r'(?s)\$version', '%(doxversion)s', cont)
- cont = re.sub(r'(?s)\$numlang',
- '<b>[number of supported languages could not be extracted -- no sources]</b>',
- cont)
+ The method is expected to be called only for the translator.h. It
+ extracts only the pure virtual method and build the dictionary where
+ key is the unified prototype without argument identifiers."""
+
+ # Prepare empty dictionary that will be returned.
+ resultDic = {}
+
+ # Start the token generator which parses the class source file.
+ tokenIterator = self.__tokenGenerator()
+
+ # Collect the class and the base class identifiers.
+ self.__collectClassInfo(tokenIterator)
+ assert(self.classId == 'Translator')
+
+ # Let's collect readable form of the public virtual pure method
+ # prototypes in the readable form -- as defined in translator.h.
+ # Let's collect also unified form of the same prototype that omits
+ # everything that can be omitted, namely 'virtual' and argument
+ # identifiers.
+ prototype = '' # readable prototype (with everything)
+ uniPrototype = '' # unified prototype (without arg. identifiers)
+
+ # Collect the pure virtual method prototypes. Stop on the closing
+ # curly brace followed by the semicolon (end of class).
+ status = 0
+ curlyCnt = 0 # counter for the level of curly braces
+
+ # Loop until the final state 777 is reached. The errors are processed
+ # immediately. In this implementation, it always quits the application.
+ while status != 777:
- cont = re.sub(r'(?s)\$languages',
- '<b>[names of languages could not be extracted -- no sources]</b>', cont)
+ # Get the next token.
+ tokenId, tokenStr, tokenLineNo = tokenIterator.next()
- cont = re.sub(r'(?s)\$information_table',
- '<b>[Information table could not be extracted -- no sources.]</b>', cont)
+ if status == 0: # waiting for 'public:'
+ if tokenId == 'public':
+ status = 1
+
+ elif status == 1: # colon after the 'public'
+ if tokenId == 'colon':
+ status = 2
+ else:
+ self.__unexpectedToken(status, tokenId, tokenLineNo)
+
+ elif status == 2: # waiting for 'virtual'
+ if tokenId == 'virtual':
+ prototype = tokenStr # but not to unified prototype
+ status = 3
+ elif tokenId == 'comment':
+ pass
+ elif tokenId == 'rcurly':
+ status = 11 # expected end of class
+ else:
+ self.__unexpectedToken(status, tokenId, tokenLineNo)
+
+ elif status == 3: # return type of the method expected
+ if tokenId == 'id':
+ prototype += ' ' + tokenStr
+ uniPrototype = tokenStr # start collecting the unified prototype
+ status = 4
+ else:
+ self.__unexpectedToken(status, tokenId, tokenLineNo)
+
+ elif status == 4: # method identifier expected
+ if tokenId == 'id':
+ prototype += ' ' + tokenStr
+ uniPrototype += ' ' + tokenStr
+ status = 5
+ else:
+ self.__unexpectedToken(status, tokenId, tokenLineNo)
+
+ elif status == 5: # left bracket of the argument list expected
+ if tokenId == 'lpar':
+ prototype += tokenStr
+ uniPrototype += tokenStr
+ status = 6
+ else:
+ self.__unexpectedToken(status, tokenId, tokenLineNo)
+
+ elif status == 6: # collecting arguments of the method
+ if tokenId == 'rpar':
+ prototype += tokenStr
+ uniPrototype += tokenStr
+ status = 7
+ elif tokenId == 'const':
+ prototype += tokenStr
+ uniPrototype += tokenStr
+ status = 12
+ elif tokenId == 'id': # type identifier
+ prototype += tokenStr
+ uniPrototype += tokenStr
+ status = 13
+ else:
+ self.__unexpectedToken(status, tokenId, tokenLineNo)
+
+ elif status == 7: # assignment expected or left curly brace
+ if tokenId == 'assign':
+ status = 8
+ elif tokenId == 'lcurly':
+ curlyCnt = 1 # method body entered
+ status = 10
+ else:
+ self.__unexpectedToken(status, tokenId, tokenLineNo)
+
+ elif status == 8: # zero expected
+ if tokenId == 'num' and tokenStr == '0':
+ status = 9
+ else:
+ self.__unexpectedToken(status, tokenId, tokenLineNo)
+
+ elif status == 9: # after semicolon, produce the dic item
+ if tokenId == 'semic':
+ assert(not resultDic.has_key(uniPrototype))
+ resultDic[uniPrototype] = prototype
+ status = 2
+ else:
+ self.__unexpectedToken(status, tokenId, tokenLineNo)
+
+ elif status == 10: # consuming the body of the method
+ if tokenId == 'rcurly':
+ curlyCnt -= 1
+ if curlyCnt == 0:
+ status = 2 # body consumed
+ elif tokenId == 'lcurly':
+ curlyCnt += 1
+
+ elif status == 11: # probably the end of class
+ if tokenId == 'semic':
+ status = 777
+ else:
+ self.__unexpectedToken(status, tokenId, tokenLineNo)
+
+ elif status == 12: # type id for argument expected
+ if tokenId == 'id':
+ prototype += ' ' + tokenStr
+ uniPrototype += ' ' + tokenStr
+ status = 13
+ else:
+ self.__unexpectedToken(status, tokenId, tokenLineNo)
+
+ elif status == 13: # namespace qualification or * or & expected
+ if tokenId == 'colon': # was namespace id
+ prototype += tokenStr
+ uniPrototype += tokenStr
+ status = 14
+ elif tokenId == 'star' or tokenId == 'amp': # pointer or reference
+ prototype += ' ' + tokenStr
+ uniPrototype += ' ' + tokenStr
+ status = 16
+ elif tokenId == 'id': # argument identifier
+ prototype += ' ' + tokenStr
+ # don't put this into unified prototype
+ status = 17
+ else:
+ self.__unexpectedToken(status, tokenId, tokenLineNo)
+
+ elif status == 14: # second colon for namespace:: expected
+ if tokenId == 'colon':
+ prototype += tokenStr
+ uniPrototype += tokenStr
+ status = 15
+ else:
+ self.__unexpectedToken(status, tokenId, tokenLineNo)
+
+ elif status == 15: # type after namespace:: expected
+ if tokenId == 'id':
+ prototype += tokenStr
+ uniPrototype += tokenStr
+ status = 13
+ else:
+ self.__unexpectedToken(status, tokenId, tokenLineNo)
+
+ elif status == 16: # argument identifier expected
+ if tokenId == 'id':
+ prototype += ' ' + tokenStr
+ # don't put this into unified prototype
+ status = 17
+ else:
+ self.__unexpectedToken(status, tokenId, tokenLineNo)
+
+ elif status == 17: # comma or ')' after argument identifier expected
+ if tokenId == 'comma':
+ prototype += ', '
+ uniPrototype += ', '
+ status = 6
+ elif tokenId == 'rpar':
+ prototype += tokenStr
+ uniPrototype += tokenStr
+ status = 7
+ else:
+ self.__unexpectedToken(status, tokenId, tokenLineNo)
+
+ # Eat the rest of the source to cause closing the file.
+ while tokenId != 'eof':
+ tokenId, tokenStr, tokenLineNo = tokenIterator.next()
- cont = re.sub(r'(?s)\$translator_report_file_name',
- '%(ftranslatortxt)s <b>[translator report could not be generated -- no sources]</b>',
- cont)
+ # Return the resulting dictionary with 'uniPrototype -> prototype'.
+ return resultDic
- cont = re.sub(r'(?s)\$translator_report_link',
- '<b>[no sources, no link]</b>', cont)
-
- # Replace the generated marks by the info from the info dictionary.
- #
- cont = cont % info
-
- # Write the template with replacements to the output doc.
- fout = file(info['flangdoc'], 'w')
- fout.write(cont)
- fout.close()
-
-
-def GetPureVirtualFrom(filename):
- """Returns the list of pure virtual method prototypes from the filename.
-
- Each method prototype is returned as one string, one line, one list item).
- The input argument is the full name of the source file."""
-
- # Read the content of the file to one string and remove C comments,
- # one line comments, leading text to the first 'virtual' keyword,
- # text behind the class, and finally empty lines.
- f = file(filename)
- cont = f.read()
- f.close()
-
- cont = re.sub(r'(?s)/\*.+?\*/', '', cont) # C comments
- cont = re.sub(r'(?m)//.*$', '', cont) # C++ comments
- cont = 'virtual ' + re.sub(r'(?s)^.+?virtual\s', '', cont) # prefix
- cont = re.sub(r'(?s)};.+$', '', cont) # suffix
- cont = re.sub(r'(?s)\n\s*\n', r'\n', cont) # empty lines
-
- # Remove the empty implementation of the updateNeededMessage() method
- # which is to be implemented by adapters only, not by translators.
- cont = re.sub(r'(?s)\s*virtual\s+QCString\s+updateNeededMessage.+?}.*?\n',
- '', cont)
-
- # Erase anything between "=0;" and "virtual". Only the pure virtual
- # methods will remain. Remove also the text behind the last "= 0;"
- cont = re.sub(r'(?s)(=\s*0\s*;).*?(?P<vir>virtual)', r'=0;\n\g<vir>', cont)
- cont = re.sub(r'(?s)^(?P<x>.+=\s*0\s*;).*$', r'\g<x>', cont)
-
- # Replace all consequent white spaces (including \n) by a single
- # space. Strip also the leading and the trailing space.
- cont = re.sub(r'(?s)\s+', ' ', cont)
- cont = cont.strip()
-
- # Split internally the string into lines by replacing the '= 0;' by '\n'.
- # Keep the string stil as one multiline string.
- cont = re.sub(r'(?s)\s*=\s*0\s*;\s*', r'\n', cont)
-
- # Remove the keyword 'virtual' because the derived classes may not use it.
- cont = re.sub(r'(?m)^virtual\s+', '', cont)
-
- # Split the string to the list of striped lines. Do strip the string
- # first so that no empty line list item is generated.
- L = cont.strip().split('\n')
-
- # Build the list of unified prototypes and return it.
- return L
-
-def StripArgIdentifiers(prototype):
- """Returns the method prototype without argument identifiers.
-
- The goal of the function is to get only the necessary, single form
- of the method prototype. This way the prototypes from derived classes
- can be compared one by one with the methods in the base class."""
-
- # Parse the prototype, and split the string of arguments it can be empty).
- m = re.match(r'^(?P<prefix>.+?)\((?P<args>.*?)\)(?P<suffix>.*)$', prototype)
- str_prefix = m.group('prefix')
- str_args = m.group('args')
- str_suffix = m.group('suffix')
- args = str_args.split(',')
-
- # Each argument will be stripped and put to the new list. Only the types
- # are important. Using the spaces has to be normalized because people
- # differ in opinion where to put spaces. Let's prepare regular
- # expressions for the tasks.
- rex_type = re.compile(r'''^(?P<type> # name of the type group
- \s* # there can be spaces behind comma,
- (const\s+)? # possibly const at the beginning
- [A-Za-z0-9_:]+ # type identifier can be qualified
- (\s*[*&])? # could be reference or pointer
- ) # ... the above is important,
- .*$''', # the rest contains the identifier
- re.VERBOSE)
-
- # People may differ in opinion whether a space should or should not
- # be written between a type identifier and the '*' or '&' (when
- # the argument is a pointer or a reference).
- rex_norm = re.compile(r'\s*(?P<x>[*&])')
-
- # Strip each of the arguments and put them to the 'stripped' list.
- # Only the type of the identifier is important. Extract it, normalize
- # the using of spaces, and append the result to the list of striped
- # arguments. (Sequence of more than one space is solved later.)
- stripped = []
- for arg in args:
- arg = rex_type.sub(r'\g<type>', arg)
- arg = rex_norm.sub(r' \g<x>', arg)
- stripped.append(arg)
-
- # Join the stripped arguments into one line again, and build the striped
- # form of the prototype. Remove the duplicit spaces.
- result = re.sub(r'\s+', ' ',
- str_prefix + '(' + ', '.join(stripped) + ')' + str_suffix)
-
- return result
-
-def GetInfoFrom(input_filename):
- """Returns list of info related to the parsed file.
-
- GetInfoFrom returns the list of information related to the
- parsed source file. The input argument is the name of the
- translator_xx.h file including path.
-
- The output list contains the following items:
- - class identifier
- - base class identifier
- - method prototypes (each in a separate item)"""
-
- # Let's open the file and read it into a single string.
- f = file(input_filename)
- cont = f.read()
- f.close()
-
- # Remove comments and empty lines.
- cont = re.sub(r'(?m)//.*$', '', cont) # C++ comments
- cont = re.sub(r'(?s)/\*.+?\*/', '', cont) # C comments
- cont = re.sub(r'(?s)\n\s*\n', r'\n', cont) # empty lines
-
- # Extract the class and base class identifiers.
- rex = re.compile(r'''^.*class\s+
- (?P<class>Translator\w+?)\s*:
- \s*public\s+(?P<base>\w+)\b
- ''', re.VERBOSE | re.DOTALL)
- m = rex.match(cont)
- assert(m)
- assert(m.group('class'))
- assert(m.group('base'))
-
- # Put the class and the base class into the output list.
- result = [m.group('class'), m.group('base')]
-
- # Remove the opening curly brace. Remove also the first "public:"
- cont = re.sub(r'(?s)^.*?{', '', cont)
- cont = re.sub(r'(?s)(^.*\spublic:\s+)?', '', cont)
-
- # Cut the things after the class.
- cont = re.sub(r'(?s)}\s*;\s*#endif\s*$', '', cont)
-
- # Remove the "virtual" keyword, because the derived class is not forced
- # to use it.
- cont = re.sub(r'\bvirtual\s+', '', cont)
-
- # Remove all strings from lines.
- cont = re.sub(r'(?s)".*?"', '', cont)
-
- # Remove all bodies of methods
- while cont.find('{') >= 0:
- cont = re.sub(r'(?s){[^{}]+?}', '', cont)
-
- # Remove all private methods, i.e. from "private:" to "public:"
- # included. Later, remove also all from "private:" to the end.
- cont = re.sub(r'(?s)\bprivate\s*:.*?\bpublic\s*:', '', cont)
- cont = re.sub(r'(?s)\bprivate\s*:.*$', '', cont)
-
- # Some of the translators use conditional compilation where
- # the branches define the body of the method twice. Remove
- # the ifdef/endif block content.
- cont = re.sub(r'(?s)#ifdef.*?#endif', '', cont)
-
- # Now the string should containt only method prototypes. Let's unify
- # their format by removing all spaces that are not necessary.
- cont = re.sub(r'\s+', ' ', cont)
- cont = re.sub(r'^\s+', '', cont)
- cont = re.sub(r'\s+$', '', cont)
-
- # Then let's put all of them on separate lines (one protototype --
- # one line; no empty lines).
- cont = re.sub(r'\s+\(', '(', cont)
- cont = re.sub(r'(?s)\)\s*', ')\n', cont)
-
- # Split the string, add it to the output list, and return the result.
- result.extend(cont.strip().split('\n'))
- return result
-
-
-def GetAdapterClassesInfo(required):
- """Returns the list of strings with information related to the adapter classes.
-
- Each one-line string contains the identifier of the adapter class and
- the number of required methods that are implemented by the adapter.
-
- The function takes one agument -- the reference to the hash with
- stripped prototypes of the required methods."""
-
- # Let's open the file with the translator adapter classes.
- info = GetInfo()
- filename = os.path.join(info['srcdir'], 'translator_adapter.h')
- f = file(filename)
- cont = f.read()
- f.close()
-
- # Remove the preprocessor directives.
- cont = re.sub(r'(?m)^\s*#\w+.+$', '', cont)
-
- # Remove comments and empty lines.
- cont = re.sub(r'(?m)//.*$', '', cont) # C++ comments
- cont = re.sub(r'(?s)/\*.+?\*/', '', cont) # C comments
- cont = re.sub(r'(?s)\n\s*\n', r'\n', cont) # empty lines
-
- # Place delimiters to separate the classes, and remove
- # the TranslatorAdapterBase class.
- #
- cont = re.sub(r'(?s)};\s*class\s+', '<class>', cont)
- cont = re.sub(r'(?s)class\s+TranslatorAdapterBase\s+.+?<class>', '<class>', cont)
- cont = re.sub(r'(?s)};', '', cont)
-
- # Remove the base classes and the beginning of the the class definitions.
- cont = re.sub(r'(?s)(TranslatorAdapter[_0-9]+)\s*:.+?{\s*(public\s*:)?',
- '\g<1>', cont)
-
- # Remove all bodies of methods
- while cont.find('{') >= 0:
- cont = re.sub(r'(?s){[^{}]+?}', '', cont)
-
- # Remove the "virtual" keywords.
- cont = re.sub(r'(?m)^\s*virtual\s+', '', cont)
-
- # Remove the empty lines.
- cont = re.sub(r'(?s)\n\s*\n', '\n', cont)
-
- # Trim the spaces.
- cont = re.sub(r'(?m)^\s+', '', cont)
- cont = re.sub(r'(?m)\s+$', '', cont)
-
- # Split the string into the lines again.
- content = cont.split('\n')
-
- # Now the list contains only two kinds of lines. The first
- # kind of lines starts with the <class> tag and contains the
- # identifier of the class. The following lines list the
- # non-stripped prototypes of implemented methods without the
- # "virtual" keyword.
- #
- # Now we will produce the result by looping through all the
- # lines and counting the prototypes of the required methods
- # that are implemented by the adapter class.
- #
- cinfo = ''
- cnt = 0
- methods = ''
- result = []
-
- rex_class = re.compile(r'^<class>(?P<class>\w+)\s*$')
- for line in content:
- m = rex_class.match(line)
- if m:
- # Another adapter class found.
- adapter_class = m.group('class')
+
+ def __collectPublicMethodPrototypes(self, tokenIterator):
+ """Collects prototypes of public methods and fills self.prototypeDic.
+
+ The dictionary is filled by items: uniPrototype -> prototype.
+ The method is expected to be called only for TranslatorXxxx classes,
+ i.e. for the classes that implement translation to some language.
+ It assumes that the openning curly brace of the class was already
+ consumed. The source is consumed until the end of the class.
+ The caller should consume the source until the eof to cause closing
+ the source file."""
+
+ assert(self.classId != 'Translator')
+ assert(self.baseClassId != None)
+
+ # The following finite automaton slightly differs from the one
+ # inside self.collectPureVirtualPrototypes(). It produces the
+ # dictionary item just after consuming the body of the method
+ # (transition from from state 10 to state 2). It also does not allow
+ # definitions of public pure virtual methods, except for
+ # TranslatorAdapterBase (states 8 and 9). Argument identifier inside
+ # method argument lists can be omitted or commented.
+ #
+ # Let's collect readable form of all public method prototypes in
+ # the readable form -- as defined in the source file.
+ # Let's collect also unified form of the same prototype that omits
+ # everything that can be omitted, namely 'virtual' and argument
+ # identifiers.
+ prototype = '' # readable prototype (with everything)
+ uniPrototype = '' # unified prototype (without arg. identifiers)
+
+ # Collect the method prototypes. Stop on the closing
+ # curly brace followed by the semicolon (end of class).
+ status = 0
+ curlyCnt = 0 # counter for the level of curly braces
+
+ # Loop until the final state 777 is reached. The errors are processed
+ # immediately. In this implementation, it always quits the application.
+ while status != 777:
- # If the cinfo is not empty then it contains partial
- # information about the previously processed adapter.
- if cinfo != '':
- # Complete the cinfo and push it into the result.
- s = ''
- if cnt != 1:
- s = 's'
- cinfo += '\timplements %2d required method%s...\n' % (cnt, s)
- result.append(cinfo + methods)
+ # Get the next token.
+ tokenId, tokenStr, tokenLineNo = tokenIterator.next()
- # Initialize the counter and store the adapter class identifier
- # in the cinfo.
- #
- cinfo = adapter_class
- cnt = 0;
- methods = ''
- else:
- # The line contains the prototype of the implemented method.
- # If it is the required method, count it, and add it to the
- # string of methods.
- stripped_prototype = StripArgIdentifiers(line)
-
- if required.has_key(stripped_prototype):
- cnt += 1
- methods += ' %s\n' % line
-
- # If the cinfo is not empty then it contains partial
- # information about the last processed adapter.
- if cinfo != '':
- # Complete the cinfo and push it into the @result.
- s = ''
- if cnt != 1:
- s = 's'
- cinfo += '\timplements %2d required method%s...\n' % (cnt, s)
- result.append(cinfo + methods)
-
- # Return the result list.
- return result
-
-
-def GetLanguagesInfo(seq_or_dic):
- """Returns (numlang, langlst, formated langstr).
-
- The numlang is the number of supported languages. The langlst is the
- list of pairs like ('TranslatorBrazilian', 'Brazilian Portuguese')
- """
-
- languages = []
- for k in seq_or_dic:
- # Remove the 'Translator' prefix from the class name to obtain
- # the brief name of the language.
- assert(k[:len('Translator')] == 'Translator')
- lang = k[len('Translator'):]
-
- # Do correction of the language name for the selected languages.
- if lang == 'Brazilian': lang = 'Brazilian Portuguese'
- if lang == 'Chinesetraditional': lang = 'Chinese Traditional'
+ if status == 0: # waiting for 'public:'
+ if tokenId == 'public':
+ status = 1
+ elif tokenId == 'eof': # non-public things until the eof
+ status = 777
+
+ elif status == 1: # colon after the 'public'
+ if tokenId == 'colon':
+ status = 2
+ else:
+ self.__unexpectedToken(status, tokenId, tokenLineNo)
+
+ elif status == 2: # waiting for 'virtual' (can be omitted)
+ if tokenId == 'virtual':
+ prototype = tokenStr # but not to unified prototype
+ status = 3
+ elif tokenId == 'id': # 'virtual' was omitted
+ prototype = tokenStr
+ uniPrototype = tokenStr # start collecting the unified prototype
+ status = 4
+ elif tokenId == 'comment':
+ pass
+ elif tokenId == 'protected' or tokenId == 'private':
+ status = 0
+ elif tokenId == 'rcurly':
+ status = 11 # expected end of class
+ else:
+ self.__unexpectedToken(status, tokenId, tokenLineNo)
+
+ elif status == 3: # return type of the method expected
+ if tokenId == 'id':
+ prototype += ' ' + tokenStr
+ uniPrototype = tokenStr # start collecting the unified prototype
+ status = 4
+ else:
+ self.__unexpectedToken(status, tokenId, tokenLineNo)
+
+ elif status == 4: # method identifier expected
+ if tokenId == 'id':
+ prototype += ' ' + tokenStr
+ uniPrototype += ' ' + tokenStr
+ status = 5
+ else:
+ self.__unexpectedToken(status, tokenId, tokenLineNo)
+
+ elif status == 5: # left bracket of the argument list expected
+ if tokenId == 'lpar':
+ prototype += tokenStr
+ uniPrototype += tokenStr
+ status = 6
+ else:
+ self.__unexpectedToken(status, tokenId, tokenLineNo)
+
+ elif status == 6: # collecting arguments of the method
+ if tokenId == 'rpar':
+ prototype += tokenStr
+ uniPrototype += tokenStr
+ status = 7
+ elif tokenId == 'const':
+ prototype += tokenStr
+ uniPrototype += tokenStr
+ status = 12
+ elif tokenId == 'id': # type identifier
+ prototype += tokenStr
+ uniPrototype += tokenStr
+ status = 13
+ else:
+ self.__unexpectedToken(status, tokenId, tokenLineNo)
+
+ elif status == 7: # left curly brace expected
+ if tokenId == 'lcurly':
+ curlyCnt = 1 # method body entered
+ status = 10
+ elif tokenId == 'comment':
+ pass
+ elif tokenId == 'assign': # allowed only for TranslatorAdapterBase
+ assert(self.classId == 'TranslatorAdapterBase')
+ status = 8
+ else:
+ self.__unexpectedToken(status, tokenId, tokenLineNo)
+
+ elif status == 8: # zero expected (TranslatorAdapterBase)
+ assert(self.classId == 'TranslatorAdapterBase')
+ if tokenId == 'num' and tokenStr == '0':
+ status = 9
+ else:
+ self.__unexpectedToken(status, tokenId, tokenLineNo)
+
+ elif status == 9: # after semicolon (TranslatorAdapterBase)
+ assert(self.classId == 'TranslatorAdapterBase')
+ if tokenId == 'semic':
+ status = 2
+ else:
+ self.__unexpectedToken(status, tokenId, tokenLineNo)
+
+ elif status == 10: # consuming the body of the method, then dic item
+ if tokenId == 'rcurly':
+ curlyCnt -= 1
+ if curlyCnt == 0:
+ # Insert new dictionary item.
+ assert(not self.prototypeDic.has_key(uniPrototype))
+ self.prototypeDic[uniPrototype] = prototype
+ status = 2 # body consumed
+ elif tokenId == 'lcurly':
+ curlyCnt += 1
+ elif status == 11: # probably the end of class
+ if tokenId == 'semic':
+ status = 777
+ else:
+ self.__unexpectedToken(status, tokenId, tokenLineNo)
+
+ elif status == 12: # type id for argument expected
+ if tokenId == 'id':
+ prototype += ' ' + tokenStr
+ uniPrototype += ' ' + tokenStr
+ status = 13
+ else:
+ self.__unexpectedToken(status, tokenId, tokenLineNo)
+
+ elif status == 13: # :: or * or & or id or ) expected
+ if tokenId == 'colon': # was namespace id
+ prototype += tokenStr
+ uniPrototype += tokenStr
+ status = 14
+ elif tokenId == 'star' or tokenId == 'amp': # pointer or reference
+ prototype += ' ' + tokenStr
+ uniPrototype += ' ' + tokenStr
+ status = 16
+ elif tokenId == 'id': # argument identifier
+ prototype += ' ' + tokenStr
+ # don't put this into unified prototype
+ status = 17
+ elif tokenId == 'comment': # probably commented-out identifier
+ prototype += tokenStr
+ elif tokenId == 'rpar':
+ prototype += tokenStr
+ uniPrototype += tokenStr
+ status = 7
+ elif tokenId == 'comma':
+ prototype += ', '
+ uniPrototype += ', '
+ status = 6
+ else:
+ self.__unexpectedToken(status, tokenId, tokenLineNo)
+
+ elif status == 14: # second colon for namespace:: expected
+ if tokenId == 'colon':
+ prototype += tokenStr
+ uniPrototype += tokenStr
+ status = 15
+ else:
+ self.__unexpectedToken(status, tokenId, tokenLineNo)
+
+ elif status == 15: # type after namespace:: expected
+ if tokenId == 'id':
+ prototype += tokenStr
+ uniPrototype += tokenStr
+ status = 13
+ else:
+ self.__unexpectedToken(status, tokenId, tokenLineNo)
+
+ elif status == 16: # argument identifier or ) expected
+ if tokenId == 'id':
+ prototype += ' ' + tokenStr
+ # don't put this into unified prototype
+ status = 17
+ elif tokenId == 'rpar':
+ prototype += tokenStr
+ uniPrototype += tokenStr
+ status = 7
+ elif tokenId == 'comment':
+ prototype += tokenStr
+ else:
+ self.__unexpectedToken(status, tokenId, tokenLineNo)
+
+ elif status == 17: # comma or ')' after argument identifier expected
+ if tokenId == 'comma':
+ prototype += ', '
+ uniPrototype += ', '
+ status = 6
+ elif tokenId == 'rpar':
+ prototype += tokenStr
+ uniPrototype += tokenStr
+ status = 7
+ else:
+ self.__unexpectedToken(status, tokenId, tokenLineNo)
+
+
- # Append the language to the list.
- languages.append((k, lang))
-
- # Sort the languages and construct the output string. Insert new line
- # after each line. Add the 'and' before the last language
- languages.sort()
- count = 0
- output = ''
- for L in languages:
- output += L[1] # readable form of the language
- count += 1
-
- if count < len(languages) - 1: # separate by comma
- output += ', '
- elif count == len(languages) - 1: # separate by comma and 'and'
- output += ', and '
-
- if count % 5 == 0:
- output += '\n'
+ def collectAdapterPrototypes(self):
+ """Returns the dictionary of prototypes implemented by adapters.
+
+ It is created to process the translator_adapter.h. The returned
+ dictionary has the form: unifiedPrototype -> (version, classId)
+ thus by looking for the prototype, we get the information what is
+ the newest (least adapting) adapter that is sufficient for
+ implementing the method."""
- return (len(languages), languages, output)
-
-
-def GenerateLanguageDoc(cb):
- """Generates the content as expected in the 'flangdoc' file.
-
- GenerateLanguageDoc takes document templates and code sources
- generates the content as expected in the 'flangdoc' file (the
- part of the Doxygen documentation), and returns the result as a
- string.
-
- The input parameter is the reference to the class/base dictionary."""
-
- # Define templates for HTML table parts of the documentation.
- htmlTableHead = r'''\htmlonly
-<TABLE ALIGN=center CELLSPACING=0 CELLPADDING=0 BORDER=0>
-<TR BGCOLOR="#000000">
-<TD>
- <TABLE CELLSPACING=1 CELLPADDING=2 BORDER=0>
- <TR BGCOLOR="#4040c0">
- <TD ><b><font size=+1 color="#ffffff"> Language </font></b></TD>
- <TD ><b><font size=+1 color="#ffffff"> Maintainer </font></b></TD>
- <TD ><b><font size=+1 color="#ffffff"> Contact address </font>
- <font size=-2 color="#ffffff">(remove the NOSPAM.)</font></b></TD>
- <TD ><b><font size=+1 color="#ffffff"> Status </font></b></TD>
- </TR>
-'''
-
- htmlTableRow = r''' <TR BGCOLOR="#ffffff">
- <TD>$lang</TD>
- <TD>$maintainer</TD>
- <TD>$email</TD>
- <TD>$status</TD>
- </TR>
-'''
+ # Start the token generator which parses the class source file.
+ assert(os.path.split(self.fname)[1] == 'translator_adapter.h')
+ tokenIterator = self.__tokenGenerator()
- htmlTableFoot = r''' </TABLE>
-</TD>
-</TR>
-</TABLE>
-\endhtmlonly
-'''
-
- # Define templates for LaTeX table parts of the documentation.
- latexTableHead = r'''\latexonly
-\begin{tabular}{|l|l|l|l|}
- \hline
- {\bf Language} & {\bf Maintainer} & {\bf Contact address} & {\bf Status} \\
- \hline
-'''
-
- latexTableRow = r''' $lang & $maintainer & {\tt $email} & $status \\
-'''
+ # Get the references to the involved dictionaries.
+ reqDic = self.manager.requiredMethodsDic
- latexTableFoot = r''' \hline
-\end{tabular}
-\endlatexonly
-'''
+ # Create the empty dictionary that will be returned.
+ adaptDic = {}
+
+
+ # Loop through the source of the adapter file until no other adapter
+ # class is found.
+ while True:
+ try:
+ # Collect the class and the base class identifiers.
+ self.__collectClassInfo(tokenIterator)
+
+ # Extract the comparable version of the adapter class.
+ # Note: The self.status as set by self.__collectClassInfo()
+ # contains similar version, but is related to the base class,
+ # not to the class itself.
+ lst = self.classId.split('_')
+ version = ''
+ if lst[0] == 'TranslatorAdapter': # TranslatorAdapterBase otherwise
+ version = lst[1] + '.' + lst[2]
+ if len(lst) > 3: # add the last part of the number
+ version += '.' + ('%02d' % int(lst[3]))
+ else:
+ version += '.00'
+
+ # Collect the prototypes of implemented public methods.
+ self.__collectPublicMethodPrototypes(tokenIterator)
+
+ # For the required methods, update the dictionary of methods
+ # implemented by the adapter.
+ for protoUni in self.prototypeDic:
+ if reqDic.has_key(protoUni):
+ # This required method will be marked as implemented
+ # by this adapter class. This implementation assumes
+ # that newer adapters do not reimplement any required
+ # methods already implemented by older adapters.
+ assert(not adaptDic.has_key(protoUni))
+ adaptDic[protoUni] = (version, self.classId)
+
+ # Clear the dictionary object and the information related
+ # to the class as the next adapter class is to be processed.
+ self.prototypeDic.clear()
+ self.classId = None
+ self.baseClassId = None
+
+ except StopIteration:
+ break
- # Read the template of the documentation, and join the content
- # to a single string.
- info = GetInfo()
- filename = os.path.join(info['docdir'], info['flangtpl'])
- f = file(filename)
- output = f.read()
- f.close()
-
- # Get the number of languages, list of their names and formated string
- # with the list in human readable English form.
- (numlang, langlst, langstr) = GetLanguagesInfo(cb)
-
- # Substitute the marks inside the template.
- output = re.sub(r'(?s)\$version', info['doxversion'], output)
- output = re.sub(r'(?s)\$numlang', str(numlang), output)
- output = re.sub(r'(?s)\$languages', langstr, output)
+ # Return the result dictionary.
+ return adaptDic
- # Create the dictionary for info for each language.
- langinfo = {}
- for (trClass, langname) in langlst:
- langinfo[langname] = cb[trClass] + '<msep/>unknown: unknown'
+
+ def processing(self):
+ """Processing of the source file -- only for TranslatorXxxx classes."""
- # Read the information related to maintainers into the
- # string using suitable separators -- one line, one language. #{{{
- filename = os.path.join(info['docdir'], info['fmaintainers'])
- print filename
- f = file(filename)
- maintainers = f.read()
- f.close()
-
- # Trim the spaces on the lines. Strip the comment lines that
- # start with % sign.
- maintainers = re.sub(r'(?m)^[ \t]+', '', maintainers)
- maintainers = re.sub(r'(?m)[ \t]+$', '', maintainers)
- maintainers = re.sub(r'(?m)^%.*$', '', maintainers)
-
- # Join the information for one language into one line,
- # and remove empty lines.
- maintainers = re.sub(r'(?s)\b\n\b', '<sep/>', maintainers)
- maintainers = re.sub(r'(?s)\n{2,}', '\n', maintainers)
- maintainers = re.sub(r'(?s)^\n+', '', maintainers)
- maintainers = re.sub(r'(?s)\n+$', '', maintainers)
-
- # Split the string back to the list, and update the information
- # in the hash with information for languages.
- lst = maintainers.split('\n')
- lst.sort()
- for line in lst:
- # Split the line for one language to separate lines for
- # the language and one or more maintainers. Ensure that the language
- # starts with uppercase and continues with lowercase. (It will be used
- # for reconstructing the translator class identifier.)
- linfo = line.split('<sep/>')
- lang = linfo[0].capitalize()
- del linfo[0]
-
- # Add information to the langinfo dictionary. If the language
- # was not defined in sources, add the question mark to the
- # language identifier.
- #
- if langinfo.has_key(lang):
- langinfo[lang] = cb['Translator' + lang] + '<msep/>' + \
- '<sep/>'.join(linfo)
- else:
- lang += ' (?)'
- langinfo[lang] = 'unknown<msep/>' + '<sep/>'.join(linfo)
+ # Start the token generator which parses the class source file.
+ tokenIterator = self.__tokenGenerator()
- # Now, the langinfo dictionary contains all the information needed for
- # generating the tables (HTML and LaTeX). Define string variables
- # for each of the tables, and initialize them.
- #
- tableHTML = htmlTableHead
- tableLATEX = latexTableHead
-
- # Loop through sorted keys for the languages, parse the
- # information, and add it to the tables.
- langs = langinfo.keys()
- langs.sort()
- for lang in langs:
- # Transform the key for the language into more human readable
- # form. Basically, only languages with two words are going to be
- # corrected.
- if lang == 'Brazilian':
- lang_readable = 'Brazilian Portuguese'
- elif lang == 'Chinesetraditional':
- lang_readable = 'Chinese Traditional'
- else:
- lang_readable = lang
+ # Collect the class and the base class identifiers.
+ self.__collectClassInfo(tokenIterator)
+ assert(self.classId != 'Translator')
+ assert(self.classId[:17] != 'TranslatorAdapter')
+
+ # Collect the prototypes of implemented public methods.
+ self.__collectPublicMethodPrototypes(tokenIterator)
+
+ # Eat the rest of the source to cause closing the file.
+ while True:
+ try:
+ t = tokenIterator.next()
+ except StopIteration:
+ break
+
+ # Shorthands for the used dictionaries.
+ reqDic = self.manager.requiredMethodsDic
+ adaptDic = self.manager.adaptMethodsDic
+ myDic = self.prototypeDic
+
+ # Build the list of obsolete methods.
+ self.obsoleteMethods = []
+ for p in myDic:
+ if not reqDic.has_key(p):
+ self.obsoleteMethods.append(p)
+
+ # Build the list of missing methods and the list of implemented
+ # required methods.
+ self.missingMethods = []
+ self.implementedMethods = []
+ for p in reqDic:
+ if myDic.has_key(p):
+ self.implementedMethods.append(p)
+ else:
+ self.missingMethods.append(p)
+
+ # Check whether adapter must be used or suggest the newest one.
+ # Change the status and set the note accordingly.
+ if self.baseClassId != 'Translator':
+ if not self.missingMethods:
+ self.note = 'Change the base class to Translator.'
+ self.status = ''
+ self.readableStatus = 'up-to-date'
+ elif self.baseClassId != 'TranslatorEnglish':
+ # The translator uses some of the adapters.
+ # Look at the missing methods and check what adapter
+ # implements them. Remember the one with the lowest version.
+ adaptMinVersion = '9.9.99'
+ adaptMinClass = 'TranslatorAdapter_9_9_99'
+ for uniProto in self.missingMethods:
+ if adaptDic.has_key(uniProto):
+ version, cls = adaptDic[uniProto]
+ if version < adaptMinVersion:
+ adaptMinVersion = version
+ adaptMinClass = cls
+
+ # Test against the current status -- preserve the self.status.
+ # Possibly, the translator implements enough methods to
+ # use some newer adapter.
+ status = self.status
+
+ # If the version of the used adapter is smaller than
+ # the required, set the note and update the status as if
+ # the newer adapter was used.
+ if adaptMinVersion > status:
+ self.note = 'Change the base class to %s.' % adaptMinClass
+ self.status = adaptMinVersion
+ self.adaptMinClass = adaptMinClass
+ self.readableStatus = adaptMinVersion # simplified
+
+ # If everything seems OK, but there are obsolete methods, set
+ # the note to clean-up source. This note will be used only when
+ # the previous code did not set another note (priority).
+ if not self.note and self.status == '' and self.obsoleteMethods:
+ self.note = 'Remove the obsolete methods (never used).'
+
+ def report(self, fout):
+ """Returns the report part for the source as a multiline string.
+
+ No output for up-to-date translators without problem."""
+
+ # If there is nothing to report, return immediately.
+ if self.status == '' and not self.note:
+ return
- print lang, lang_readable
- """
+ # Report the number of not implemented methods.
+ fout.write('\n\n\n')
+ fout.write(self.classId + ' (' + self.baseClassId + ')')
+ if self.missingMethods:
+ fout.write(' %d' % len(self.missingMethods))
+ fout.write(' methods to implement')
+ fout.write('\n' + '-' * len(self.classId))
- # Read the line with info for the language and separate
- # the status. #{{{
- #
- my @list = split(/<msep\/>/, $language{$lang});
- my $status = shift @list;
+ # Write the info about the implemented required methods.
+ fout.write('\n\n Implements %d' % len(self.implementedMethods))
+ fout.write(' of the required methods.')
- my $i = $status =~ s{^Translator$}{up-to-date};
+ # Report the missing method, but only when it is not English-based
+ # translator.
+ if self.missingMethods and self.status != 'En':
+ fout.write('\n\n Missing methods (should be implemented):\n')
+ reqDic = self.manager.requiredMethodsDic
+ for p in self.missingMethods:
+ fout.write('\n ' + reqDic[p])
+
+ # Always report obsolete methods.
+ if self.obsoleteMethods:
+ fout.write('\n\n Obsolete methods (should be removed, never used):\n')
+ myDic = self.prototypeDic
+ for p in self.obsoleteMethods:
+ fout.write('\n ' + myDic[p])
+
+ # For English-based translator, report the implemented methods.
+ if self.status == 'En' and self.implementedMethods:
+ fout.write('\n\n This English-based translator implements ')
+ fout.write('the following methods:\n')
+ reqDic = self.manager.requiredMethodsDic
+ for p in self.implementedMethods:
+ fout.write('\n ' + reqDic[p])
- if ($i == 0) {
- $i = $status =~ s{^TranslatorAdapter_(\d)_(\d)_(\d)}
- {$1.$2.$3}x;
- }
+
+ def getmtime(self):
+ """Returns the last modification time of the source file."""
+ assert(os.path.isfile(self.fname))
+ return os.path.getmtime(self.fname)
- if ($i == 0) {
- $i = $status =~ s{^TranslatorEnglish$}
- {obsolete}x;
- }
+
+class TrManager:
+ """Collects basic info and builds subordinate Transl objects."""
+
+ def __init__(self):
+ """Determines paths, creates and initializes structures.
- if ($i == 0) { $status = 'strange'; }
+ The arguments of the script may explicitly say what languages should
+ be processed. Write the two letter identifications that are used
+ for composing the source filenames, so...
+
+ python translator.py cz
+
+ this will process only translator_cz.h source.
+ """
- ##}}}
+ # Determine the path to the script and the absolute path to the
+ # Doxygen's root subdirectory.
+ self.script = os.path.abspath(sys.argv[0])
+ self.script_path, self.script_name = os.path.split(self.script)
+ self.script_path = os.path.abspath(self.script_path)
+ self.doxy_path = os.path.abspath(os.path.join(self.script_path, '..'))
- # Split the rest of the list (should be a single item) into
- # the list with one or more maintainers -- one line, one
- # maintainer. #{{{
- #
- my $rest = shift @list;
- @list = split(/<sep\/>/, $rest);
- ##}}}
-
- # In HTML table, maintainer names are placed in the same
- # cell. Also their e-mails are placed in a single cell.
- # Extract the string with concatenated names and the string
- # with concatenated e-mails. Add the row to the HTML
- # table. #{{{
- #
- my $name = '';
- my $email = '';
-
- foreach my $maintainer (@list) {
+ # Get the explicit arguments of the script.
+ self.script_argLst = sys.argv[1:]
- if ($name ne '') { $name .= '<br>'; }
- if ($email ne '') { $email .= '<br>'; }
-
- $maintainer =~ m{^\s*(.+?)\s*:\s*(.+?)\s*$};
+ # Build the path names based on the Doxygen's root knowledge.
+ self.doc_path = os.path.join(self.doxy_path, 'doc')
+ self.src_path = os.path.join(self.doxy_path, 'src')
- $name .= $1;
- $email .= $2;
- }
+ # Create the empty dictionary for Transl object identitied by the
+ # class identifier of the translator.
+ self.__translDic = {}
+
+ # Create the None dictionary of required methods. The key is the
+ # unified prototype, the value is the full prototype. Set inside
+ # the self.__build().
+ self.requiredMethodsDic = None
- # Prepare the HTML row template, modify it, and add the
- # result to the HTML table.
- #
- my $item = $htmlTableRow;
+ # Create the empty dictionary that says what method is implemented
+ # by what adapter.
+ self.adaptMethodsDic = {}
- $item =~ s{\$lang}{$lang_readable};
- $item =~ s{\$maintainer}{$name};
- $item =~ s{\$email}{$email};
- $item =~ s{\$status}{$status};
+ # The last modification time will capture the modification of this
+ # script, of the translator.h, of the translator_adapter.h (see the
+ # self.__build() for the last two) of all the translator_xx.h files
+ # and of the template for generating the documentation. So, this
+ # time can compared with modification time of the generated
+ # documentation to decide, whether the doc should be re-generated.
+ self.lastModificationTime = os.path.getmtime(self.script)
- $tableHTML .= $item;
+ # Set the names of the translator report text file, of the template
+ # for generating "Internationalization" document, for the generated
+ # file itself, and for the maintainers list.
+ self.translatorReportFileName = 'translator_report.txt'
+ self.maintainersFileName = 'maintainers.txt'
+ self.languageTplFileName = 'language.tpl'
+ self.languageDocFileName = 'language.doc'
- ##}}}
+ # The information about the maintainers will be stored
+ # in the dictionary with the following name.
+ self.__maintainersDic = None
- # For LaTeX, more maintainers for the same language are
- # placed on separate rows in the table. The line separator
- # in the table is placed explicitly above the first
- # maintainer. Add rows for all maintainers to the LaTeX
- # table. #{{{
- #
- # Prepare the LATEX row template, modify it, and add the
- # result to the LATEX table.
- #
- $item = $latexTableRow;
-
- my $first = shift @list; # the first maintainer.
- $first =~ m{^\s*(.+?)\s*:\s*(.+?)\s*$};
+ # Define the other used structures and variables for information.
+ self.langLst = None # including English based
+ self.supportedLangReadableStr = None # coupled En-based as a note
+ self.numLang = None # excluding coupled En-based
+ self.doxVersion = None # Doxygen version
- $name = $1;
- $email = $2;
+ # Build objects where each one is responsible for one translator.
+ self.__build()
- $item =~ s{\$lang}{$lang_readable};
- $item =~ s{\$maintainer}{$name};
- $item =~ s{\$email}{$email};
- $item =~ s{\$status}{$status};
- $tableLATEX .= " \\hline\n" . $item;
+ def __build(self):
+ """Find the translator files and build the objects for translators."""
+
+ # The translator.h must exist (the Transl object will check it),
+ # create the object for it and let it build the dictionary of
+ # required methods.
+ tr = Transl(os.path.join(self.src_path, 'translator.h'), self)
+ self.requiredMethodsDic = tr.collectPureVirtualPrototypes()
+ tim = tr.getmtime()
+ if tim > self.lastModificationTime:
+ self.lastModificationTime = tim
+
+ # The translator_adapter.h must exist (the Transl object will check it),
+ # create the object for it and store the reference in the dictionary.
+ tr = Transl(os.path.join(self.src_path, 'translator_adapter.h'), self)
+ self.adaptMethodsDic = tr.collectAdapterPrototypes()
+ tim = tr.getmtime()
+ if tim > self.lastModificationTime:
+ self.lastModificationTime = tim
+
+ # Create the list of the filenames with language translator sources.
+ # If the explicit arguments of the script were typed, process only
+ # those files.
+ if self.script_argLst:
+ lst = ['translator_' + x + '.h' for x in self.script_argLst]
+ for fname in lst:
+ if not os.path.isfile(os.path.join(self.src_path, fname)):
+ sys.stderr.write("\a\nFile '%s' not found!\n" % fname)
+ sys.exit(1)
+ else:
+ lst = os.listdir(self.src_path)
+ lst = filter(lambda x: x[:11] == 'translator_'
+ and x[-2:] == '.h'
+ and x != 'translator_adapter.h', lst)
+
+ # Build the object for the translator_xx.h files, and process the
+ # content of the file. Then insert the object to the dictionary
+ # accessed via classId.
+ for fname in lst:
+ fullname = os.path.join(self.src_path, fname)
+ tr = Transl(fullname, self)
+ tr.processing()
+ assert(tr.classId != 'Translator')
+ self.__translDic[tr.classId] = tr
+
+ # Extract the global information of the processed info.
+ self.__extractProcessedInfo()
- # List the other maintainers for the language. Do not set
- # lang and status for them.
- #
- while (@list) {
- my $next = shift @list;
- $next =~ m{^\s*(.+?)\s*:\s*(.+?)\s*$};
-
- my $name = $1;
- my $email = $2;
- my $item = $latexTableRow;
-
- $item =~ s{\$lang}{};
- $item =~ s{\$maintainer}{$name};
- $item =~ s{\$email}{$email};
- $item =~ s{\$status}{};
-
- $tableLATEX .= $item;
- }
- ##}}}
- }
- ##}}}
-
- # Finish the tables, and substitute the mark in the doc
- # template by the concatenation of the tables. Add NOSPAM to
- # email addresses in the HTML table. Replace the special
- # character sequences. #{{{
- #
- $tableHTML .= $htmlTableFoot;
- $tableLATEX .= $latexTableFoot;
-
- $tableHTML =~ s{@}{\@NOSPAM.}sg;
- $tableHTML =~ s{&ccaron;}{&#x010d;}sg;
- $tableHTML =~ s{&rcaron;}{&#x0159;}sg;
-
- $tableLATEX =~ s/&aacute;/\\'{a}/sg;
- $tableLATEX =~ s/&auml;/\\"{a}/sg;
- $tableLATEX =~ s/&ouml;/\\"{o}/sg;
- $tableLATEX =~ s/&oslash;/\\o{}/sg;
- $tableLATEX =~ s/&ccaron;/\\v{c}/sg;
- $tableLATEX =~ s/&rcaron;/\\v{r}/sg;
- $tableLATEX =~ s/_/\\_/sg;
-
- $output =~ s{\$information_table}{$tableHTML$tableLATEX};
-
- ##}}}
-
- # Replace the other symbols in the template by the expected
- # information. ##{{{
- #
- $output =~ s{\$version}{$doxversion};
-
- $output =~ s{\$translator_report_file_name}
- {<code>doxygen/doc/$ftranslatortxt</code>}x;
-
- $output =~ s{\$translator_report_link}
- {<a href=\"../doc/$ftranslatortxt\">
- <code>doxygen/doc/$ftranslatortxt</code></a>}x;
- ##}}}
-
- # Replace the introduction notice in the output. #{{{
- #
- $output =~ s{<notice>.+?</notice>}
-{Warning: this file was generated from the $flangtpl template
- * and the $fmaintainers files by the $0 script.
- *
- * Do not edit this file. Edit the above mentioned files!}sx;
- ##}}}
-
- # Return the content of the generated output file.
- #
- return $output;
-}
-"""
- return output
-
-
-################################################################# Body
-if __name__ == '__main__':
- # Get the reference to the initialized dictionary with the shared info.
- info = GetInfo()
-
- # File with the translator base class must be present. Exit otherwise,
- # but be kind to those who already have the documentation
- # generated by this script ready, but who do not have sources.
- # If no 'flangdoc' is present, copy the template to it.
- if not os.path.isfile(os.path.join(info['srcdir'], 'translator.h')):
- sys.stderr.write(('\nThe %(scriptname)s warning:\n' +
- '\tThe translator.h not found in %(srcdir)s.\n' +
- '\tThe %(ftranslatortxtname)s will not be ' +
- "generated (you don't need it).\n") % info)
-
- # $flangdoc is not present, copy the template to it, and do the simplified
- # replacement of the markers inside the template. Generate the warning
- # about 'flangdoc' content.
- if not os.path.isfile(info['flangdoc']):
- CopyTemplateToLanguageDoc();
- sys.stderr.write(('\nThe %(scriptname)s warning:\n' +
- "\tThe %(flangdoc)s not found in the '%(docdir)s' directory.\n" +
- '\tThe %(flangtpl)s template content copied into it.\n' +
- '\tAs the sources are not available, some information\n' +
- '\tcould not be extracted and inserted into %(flangdoc)s.\n') % info)
-
- # Exit as if nothing happened.
- sys.exit(0)
-
- # Create the list of all translator_xxxx.h file names. If the script was
- # given a list of languages (two letters for each language), then fill
- # the list only by the translator files for the languages.
- directory = info['srcdir']
- langlist = info['languages']
- if langlist:
- files = [os.path.join(directory, 'translator_%s.h' % lang)
- for lang in langlist
- if os.path.isfile(os.path.join(directory,
- 'translator_%s.h' % lang))]
- else:
- rex_tr = re.compile(r'^translator_\w\w\w?\.h$', re.IGNORECASE)
- files = [os.path.join(directory, f)
- for f in os.listdir(directory)
- if os.path.isfile(os.path.join(directory, f))
- and rex_tr.match(f)]
-
- # Get only the pure virtual methods from the Translator class
- # into a list for later testing present/not present.
- expected_lst = GetPureVirtualFrom(os.path.join(info['srcdir'],
- 'translator.h'))
- # Fill the 'required_methods' dictionary for unified form
- # of the prototypes.
- required = info['required_methods']
- for method in expected_lst:
- prototype = StripArgIdentifiers(method)
- required[prototype] = True
-
- # The details for translators will be collected into the output
- # string. If some details are listed for a translator, the flag
- # will be set to produce possible warning to the list of
- # up-to-date translators.
- output = ''
- details = {}
-
- # Collect base classes of translators in the hash. CB stands
- # for Class and Base.
- cb = {}
-
- # Loop through all translator files. Extract the implemented
- # virtual methods and compare it with the requirements. Prepare
- # the output.
- rex_trAdapter = re.compile(r'^TranslatorAdapter_')
- rex_trEN = re.compile(r'^TranslatorEnglish$')
- for filename in files:
- # Get the information from the sources. Remember the base
- # class for each of the classes. Clear the flag for
- # details for the class.
- finfo = GetInfoFrom(filename)
- (class_, base_) = finfo[0:2]
- cb[class_] = base_
- details[class_] = False
-
- # Set the value of the required methods to 1 (true). Let
- # this indicate that the method was not defined in the
- # translator class.
- for method in required:
- required[method] = True
+ def __extractProcessedInfo(self):
+ """Build lists and strings of the processed info."""
- # Loop through all items and compare the prototypes. Mark
- # the implemented method and collect the old ones.
- old_methods = []
- for method in finfo[2:]:
- # Get only the necessary form of the prototype.
- prototype = StripArgIdentifiers(method)
+ # Build the auxiliary list with strings compound of the status,
+ # readable form of the language, and classId.
+ statLst = []
+ for obj in self.__translDic.values():
+ assert(obj.classId != 'Translator')
+ s = obj.status + '|' + obj.langReadable + '|' + obj.classId
+ statLst.append(s)
+
+ # Sort the list and extract the object identifiers (classId's) for
+ # the up-to-date translators and English-based translators.
+ statLst.sort()
+ self.upToDateIdLst = [x.split('|')[2] for x in statLst if x[0] == '|']
+ self.EnBasedIdLst = [x.split('|')[2] for x in statLst if x[:2] == 'En']
+
+ # Reverse the list and extract the TranslatorAdapter based translators.
+ statLst.reverse()
+ self.adaptIdLst = [x.split('|')[2] for x in statLst if x[0].isdigit()]
+
+ # Build the list of tuples that contain (langReadable, obj).
+ # Sort it by readable name.
+ self.langLst = []
+ for obj in self.__translDic.values():
+ self.langLst.append((obj.langReadable, obj))
+ self.langLst.sort(lambda a, b: cmp(a[0], b[0]))
- # Mark as recognized when the prototype is required.
- # Otherwise, remember it as old method which is
- # implemented, but not required.
- if (required.has_key(prototype)):
- required[prototype] = False # satisfaction
- else:
- old_methods.append(method)
-
- # Loop through the list of expected methods and collect
- # the missing (new) methods. Do this only when it derives
- # from Translator or TranslatorAdapter classes (i.e. ignore
- # any unusual kind of TranslatorXxxx implementation).
- # Accept also deriving from TranslatorEnglish, that can
- # be done by doxygen developers to solve problems with
- # some really outdated translators.
- missing_methods = []
- if rex_trAdapter.match(base_) or rex_trEN.match(base_):
+ # Create the list with readable language names. If the language has
+ # also the English-based version, modify the item by appending
+ # the note. Number of the supported languages is equal to the length
+ # of the list.
+ langReadableLst = []
+ for name, obj in self.langLst:
+ if obj.status == 'En': continue
- for method in expected_lst:
- # Get the stripped version of the prototype.
- prototype = StripArgIdentifiers(method)
-
- # If the prototype is stored in the required
- # table, and if it was not marked as implemented,
- # then it should be. It is a missing method.
- #try:
- if required[prototype]:
- missing_methods.append(method)
-
- # The detailed output will be produced only when it is needed.
- if old_methods or missing_methods or rex_trAdapter.match(base_):
- output += '\n\n\n'
- output += '%s (%s)\n%s\n' % (class_, base_, '-' * len(class_))
+ # Append the 'En' to the classId to possibly obtain the classId
+ # of the English-based object. If the object exists, modify the
+ # name for the readable list of supported languages.
+ classIdEn = obj.classId + 'En'
+ if self.__translDic.has_key(classIdEn):
+ name += ' (+En)'
- if rex_trEN.match(base_):
- output += '''
-This translator is implemented via deriving from the English translator.
-This should be done only in the case when the language maintainer
-or the doxygen developers need to update some really old-dated translator.
-Otherwise, deriving from the translator adapter classes should be used
-for obsolete translators. If you still want some texts to be in English
-copy the sources of the English translator.
+ # Append the result name of the language, possibly with note.
+ langReadableLst.append(name)
+
+ # Create the multiline string of readable language names,
+ # with punctuation, wrapped to paragraph.
+ if len(langReadableLst) == 1:
+ s = langReadableLst[0]
+ elif len(langReadableLst) == 2:
+ s = ' and '.join(langReadableLst)
+ else:
+ s = ', '.join(langReadableLst[:-1]) + ', and '
+ s += langReadableLst[-1]
+
+ self.supportedLangReadableStr = fill(s + '.')
+
+ # Find the number of the supported languages. The English based
+ # languages are not counted if the non-English based also exists.
+ self.numLang = len(self.langLst)
+ for name, obj in self.langLst:
+ if obj.status == 'En':
+ classId = obj.classId[:-2]
+ if self.__translDic.has_key(classId):
+ self.numLang -= 1 # the couple will be counted as one
+
+ # Extract the version of Doxygen.
+ f = file(os.path.join(self.doxy_path, 'version'))
+ self.doxVersion = f.readline().strip()
+ f.close()
+
+ # Update the last modification time.
+ for tr in self.__translDic.values():
+ tim = tr.getmtime()
+ if tim > self.lastModificationTime:
+ self.lastModificationTime = tim
+
+
+
+ def generateTranslatorReport(self):
+ """Generates the translator report."""
-The obsolete and missing method lists (below) reflect what have to be done
-to derive directly from the Translator class (i.e. to reach up-to-date status).
-'''
+ output = os.path.join(self.doc_path, self.translatorReportFileName)
+
+ # Open the textual report file for the output.
+ f = file(output, 'w')
- elif not rex_trAdapter.match(base_):
- output += '''
-This is some unusual implementation of the translator class. It is derived
-from the %s base class. The usual translator class derives
-or from the Translator class or from some TranslatorAdapter_x_x_x classes.
-Because of that, nothing can be guessed about missing or obsolete methods.
-''' % base_
+ # Output the information about the version.
+ f.write('(' + self.doxVersion + ')\n\n')
+
+ # Output the information about the number of the supported languages
+ # and the list of the languages, or only the note about the explicitly
+ # given languages to process.
+ if self.script_argLst:
+ f.write('The report was generated for the following, explicitly')
+ f.write(' identified languages:\n\n')
+ f.write(self.supportedLangReadableStr + '\n\n')
+ else:
+ f.write('Doxygen supports the following ')
+ f.write(str(self.numLang))
+ f.write(' languages (sorted alphabetically):\n\n')
+ f.write(self.supportedLangReadableStr + '\n\n')
+
+ # Write the summary about the status of language translators (how
+ # many translators) are up-to-date, etc.
+ s = 'Of them, %d translators are up-to-date, ' % len(self.upToDateIdLst)
+ s += '%d translators are based on some adapter class, ' % len(self.adaptIdLst)
+ s += 'and %d are English based.' % len(self.EnBasedIdLst)
+ f.write(fill(s) + '\n\n')
+
+ # Write the list of up-to-date translator classes.
+ if self.upToDateIdLst:
+ s = '''The following translator classes are up-to-date (sorted
+ alphabetically). This means that they derive from the
+ Translator class and they implement all %d of the required
+ methods. Anyway, there still may be some details listed even
+ for them. Please, see the details for them:'''
+ s = s % len(self.requiredMethodsDic)
+ f.write('-' * 70 + '\n')
+ f.write(fill(s) + '\n\n')
+ for x in self.upToDateIdLst:
+ obj = self.__translDic[x]
+ f.write(' ' + obj.classId)
+ if obj.note:
+ f.write(' -- ' + obj.note)
+ f.write('\n')
+
+ # Write the list of the adapter based classes. The very obsolete
+ # translators that derive from TranslatorEnglish are included.
+ if self.adaptIdLst:
+ s = '''The following translator classes need some maintenance
+ (the most obsolete at the end). The other info shows the
+ estimation of Doxygen version when the class was last
+ updated and number of methods that must be implemented to
+ become up-to-date:'''
+ f.write('\n' + '-' * 70 + '\n')
+ f.write(fill(s) + '\n\n')
+
+ # Find also whether some adapter classes may be removed.
+ adaptMinVersion = '9.9.99'
+
+ for x in self.adaptIdLst:
+ obj = self.__translDic[x]
+ f.write(' %-30s' % obj.classId)
+ f.write(' %-6s' % obj.readableStatus)
+ f.write('\t%2d methods to implement' % len(obj.missingMethods))
+ if obj.note:
+ f.write('\n\tNote: ' + obj.note + '\n')
+ f.write('\n')
- if missing_methods:
- output += '\nMissing methods (should be implemented):\n\n'
- for m in missing_methods:
- output += ' ' + m + '\n'
-
- if old_methods:
- output += '\nObsolete methods (should be removed):\n\n'
- old_methods.sort()
- for m in old_methods:
- output += ' ' + m + '\n'
+ # Check the level of required adapter classes.
+ if obj.status != '0.0.00' and obj.status < adaptMinVersion:
+ adaptMinVersion = obj.status
- # Some details were listed, set the details flag for the class.
- details[class_] = 1;
-
-
- # Generate the ASCII output file.
- fout_name = info['ftranslatortxt']
-
- # Open it first, and output the version information.
- fout = file(fout_name, 'w')
- fout.write('(version %s)\n\n' % info['doxversion'])
-
- # List the supported languages.
- (numlang, langlst, langstr) = GetLanguagesInfo(cb)
- fout.write('Doxygen supports the following (' + str(numlang) +
- ') languages (sorted alphabetically):\n\n')
- fout.write(langstr + '.\n')
-
- # If there are up-to-date translators, list them.
- L = [k for k in cb if cb[k] == 'Translator']
- L.sort()
-
- if L:
- fout.write('\n' + '-' * 70 + '\n')
- fout.write('''The \
-following translator classes are up-to-date (sorted alphabetically).
-This means that they derive from the Translator class. Anyway, there still
-may be some details listed even for the up-to-date translators.
-Please, check the text below if the translator is marked so.
-
-''')
- for tr in L:
- # Print the class name. If some details were listed for
- # the translator class, add a notice.
- fout.write(' ' + tr)
- if details[tr]:
- fout.write('\t-- see details below in the report')
- fout.write('\n')
+ # Set the note if some old translator adapters are not needed
+ # any more.
+ for version, adaptClassId in self.adaptMethodsDic.values():
+ if version < adaptMinVersion:
+ f.write('\nNote: The %s class ' % adaptClassId)
+ f.write('is not used and can be removed.\n')
+
+ # Write the list of the English-based classes.
+ if self.EnBasedIdLst:
+ s = '''The following translator classes derive directly from the
+ TranslatorEnglish. The class identifier has the suffix 'En'
+ that says that this is intentional. Usually, there is also
+ a non-English based version of the translator for
+ the language:'''
+ f.write('\n' + '-' * 70 + '\n')
+ f.write(fill(s) + '\n\n')
+
+ for x in self.EnBasedIdLst:
+ obj = self.__translDic[x]
+ f.write(' ' + obj.classId)
+ f.write('\timplements %d methods' % len(obj.implementedMethods))
+ if obj.note:
+ f.write(' -- ' + obj.note)
+ f.write('\n')
+
+ # Write the details for the translators.
+ f.write('\n' + '=' * 70)
+ f.write('\nDetails for translators (classes sorted alphabetically):\n')
+
+ cls = self.__translDic.keys()
+ cls.sort()
+
+ for c in cls:
+ obj = self.__translDic[c]
+ assert(obj.classId != 'Translator')
+ obj.report(f)
+
+ # Close the report file.
+ f.close()
+
- # If there are obsolete translators, list them.
- L = [k for k in cb if rex_trAdapter.match(cb[k])]
- L.sort()
-
- if L:
- fout.write('\n' + '-' * 70 + '\n')
- fout.write('''The \
-following translator classes are obsolete (sorted alphabetically).
-This means that they derive from some of the adapter classes.
-
-''')
- for tr in L:
- fout.write(' %s\t(%s)\n' % (tr, cb[tr]))
-
- # If there are translators derived from TranslatorEnglish, list them
- # and name them as really obsolete.
- L = [k for k in cb if cb[k] == 'TranslatorEnglish']
- L.sort()
- if L:
- fout.write('\n' + '-' * 70 + '\n')
- fout.write('''The \
-following translator classes are implemented via deriving
-from the English translator. This should be done only in the case
-when the language maintainer or the doxygen developers need to update
-some really outdated translator. Otherwise, deriving from
-the translator adapter classes should be prefered for obsolete translators.
-See details below in the report.
-
-''')
- for tr in L:
- fout.write(' %s\t(%s)\n' % (tr, cb[tr]))
-
- # If there are other translators, list them. #{{{
- #
- L = [k for k in cb
- if not rex_trAdapter.match(cb[k])
- and cb[k] != 'TranslatorEnglish'
- and cb[k] != 'Translator'
- ]
- L.sort()
-
- if L:
- fout.write('\n' + '-' * 70 + '\n')
- fout.write('''The \
-following translator classes are somehow different
-(sorted alphabetically). This means that they do not derive from
-the Translator class, nor from some of the adapter classes,
-nor from the TranslatorEnglish. Nothing can be guessed about the methods.
-
-''')
- for tr in L:
- fout.write(' %s\t(%s)\n' % (tr, cb[tr]))
-
- # List all the translator adapter classes to show for which versions
- # the adapters had to be created. Show, how many and what new methods
- # are implemented by the adapters.
- #
- fout.write('\n' + '-' * 70 + '\n')
- fout.write('''The \
-following translator adapter classes are implemented -- the older (with
-lower number) are always derived from the newer. They implement the
-listed required methods. Notice that some versions of doxygen did not
-introduce any changes related to the language translators. From here you may
-guess how much work should be done to update your translator:
-
-''')
- adapter_info = GetAdapterClassesInfo(required)
-
- for ad in adapter_info:
- fout.write(' %s\n' % ad)
-
- # List the methods that are expected to be implemented.
- fout.write('\n' + '-' * 70 + '\n')
- fout.write('''Localized \
-translators are expected to implement the following methods
-(prototypes sorted aplhabetically):
-
-''')
- expected_lst.sort()
- for m in expected_lst:
- fout.write('%s\n' % m)
- # If there are some details for the translators, show them.
- if output != '':
- fout.write('\n\n' + '=' * 70 + '\n')
- fout.write('Details related to specific translator classes follow.\n')
- fout.write(output + '\n')
+ def __loadMaintainers(self):
+ """Load and process the file with the maintainers.
+
+ Fills the dictionary classId -> [(name, e-mail), ...]."""
+
+ fname = os.path.join(self.script_path, self.maintainersFileName)
- # Close the ASCII output file
- fout.close()
+ # Include the maintainers file to the checked group of files with
+ # respect to the modification time.
+ tim = os.path.getmtime(fname)
+ if tim > self.lastModificationTime:
+ self.lastModificationTime = tim
+
+ # Process the content of the maintainers file.
+ f = file(fname)
+ inside = False # inside the record for the language
+ lineReady = True
+ classId = None
+ maintainersLst = None
+ self.__maintainersDic = {}
+ while lineReady:
+ line = f.readline() # next line
+ lineReady = line != '' # when eof, then line == ''
+
+ line = line.strip() # eof should also behave as separator
+ if line != '' and line[0] == '%': # skip the comment line
+ continue
+
+ if not inside: # if outside of the record
+ if line != '': # should be language identifier
+ classId = line
+ maintainersLst = []
+ inside = True
+ # Otherwise skip empty line that do not act as separators.
+
+ else: # if inside the record
+ if line == '': # separator found
+ inside = False
+ else:
+ # If it is the first maintainer, create the empty list.
+ if not self.__maintainersDic.has_key(classId):
+ self.__maintainersDic[classId] = []
+
+ # Split the information about the maintainer and append
+ # the tuple.
+ lst = line.split(':', 1)
+ assert(len(lst) == 2)
+ t = (lst[0].strip(), lst[1].strip())
+ self.__maintainersDic[classId].append(t)
+ f.close()
+
+ def generateLanguageDoc(self):
+ """Checks the modtime of files and generates language.doc."""
+ self.__loadMaintainers()
+
+ # Check the last modification time of the template file. It is the
+ # last file from the group that decide whether the documentation
+ # should or should not be generated.
+ fTplName = os.path.join(self.script_path, self.languageTplFileName)
+ tim = os.path.getmtime(fTplName)
+ if tim > self.lastModificationTime:
+ self.lastModificationTime = tim
+
+ # If the generated documentation exists and is newer than any of
+ # the source files from the group, do not generate it and quit
+ # quietly.
+ fDocName = os.path.join(self.doc_path, self.languageDocFileName)
+ if os.path.isfile(fDocName):
+ if os.path.getmtime(fDocName) > self.lastModificationTime:
+ return
+
+ # The document or does not exist or is older than some of the
+ # sources. It must be generated again.
+ #
+ # Read the template of the documentation, and remove the first
+ # attention lines.
+ f = file(fTplName)
+ line = f.readline()
+ while line[0] != '/':
+ line = f.readline()
+ doctpl = line + f.read()
+ f.close()
+
+ # Fill the tplDic by symbols that will be inserted into the
+ # document template.
+ tplDic = {}
+
+ s = 'Do not edit this file. It was generated by the %s script.' % self.script_name
+ tplDic['editnote'] = s
+
+ tplDic['doxVersion'] = self.doxVersion
+ tplDic['supportedLangReadableStr'] = self.supportedLangReadableStr
+ tplDic['translatorReportFileName'] = self.translatorReportFileName
+
+ ahref = '<a href="../doc/' + self.translatorReportFileName
+ ahref += '"\n><code>doxygen/doc/' + self.translatorReportFileName
+ ahref += '</code></a>'
+ tplDic['translatorReportLink'] = ahref
+ tplDic['numLangStr'] = str(self.numLang)
+
+ # Define templates for HTML table parts of the documentation.
+ htmlTableTpl = '''\
+ \\htmlonly
+ <table align=center cellspacing=0 cellpadding=0 border=0>
+ <tr bgcolor="#000000">
+ <td>
+ <table cellspacing=1 cellpadding=2 border=0>
+ <tr bgcolor="#4040c0">
+ <td ><b><font size=+1 color="#ffffff"> Language </font></b></td>
+ <td ><b><font size=+1 color="#ffffff"> Maintainer </font></b></td>
+ <td ><b><font size=+1 color="#ffffff"> Contact address </font>
+ <font size=-2 color="#ffffff">(remove the NOSPAM.)</font></b></td>
+ <td ><b><font size=+1 color="#ffffff"> Status </font></b></td>
+ </tr>
+ <!-- table content begin -->
+ %s
+ <!-- table content end -->
+ </table>
+ </td>
+ </tr>
+ </table>
+ \\endhtmlonly
+ '''
+ htmlTableTpl = textwrap.dedent(htmlTableTpl)
+ htmlTrTpl = '\n <tr bgcolor="#ffffff">%s\n </tr>'
+ htmlTdTpl = '\n <td>%s</td>'
+
+ # Loop through transl objects in the order of sorted readable names
+ # and add generate the content of the HTML table.
+ trlst = []
+ for name, obj in self.langLst:
+ # Fill the table data elements for one row. The first element
+ # contains the readable name of the language.
+ lst = [ htmlTdTpl % obj.langReadable ]
+
+ # The next two elements contain the list of maintainers
+ # and the list of their mangled e-mails. For English-based
+ # translators that are coupled with the non-English based,
+ # insert the 'see' note.
+ mm = None # init
+ ee = None # init
+ if obj.status == 'En':
+ # Check whether there is the coupled non-English.
+ classId = obj.classId[:-2]
+ if self.__translDic.has_key(classId):
+ lang = self.__translDic[classId].langReadable
+ mm = 'see the %s language' % lang
+ ee = '&nbsp;'
+
+ if not mm and self.__maintainersDic.has_key(obj.classId):
+ lm = [ m[0] for m in self.__maintainersDic[obj.classId] ]
+ mm = '<br>'.join(lm)
+ le = [ m[1] for m in self.__maintainersDic[obj.classId] ]
+ ee = '<br>'.join(le)
+
+ # Mangle the e-mail and replace the entity references.
+ if ee:
+ ee = ee.replace('@', '@NOSPAM.')
+ if mm:
+ mm = mm.replace('&ccaron;', '&#x010d;')
+ mm = mm.replace('&rcaron;', '&#x0159;')
+
+ # Append the maintainer and e-mail elements.
+ lst.append(htmlTdTpl % mm)
+ lst.append(htmlTdTpl % ee)
+
+ # The last element contains the readable form of the status.
+ lst.append(htmlTdTpl % obj.readableStatus)
+
+ # Join the table data to one table row.
+ trlst.append(htmlTrTpl % (''.join(lst)))
+
+ # Join the table rows and insert into the template.
+ htmlTable = htmlTableTpl % (''.join(trlst))
- # Generate the same using the original perl script.
- os.system('translator.pl')
+ # Define templates for LaTeX table parts of the documentation.
+ latexTableTpl = r'''
+ \latexonly
+ \begin{tabular}{|l|l|l|l|}
+ \hline
+ {\bf Language} & {\bf Maintainer} & {\bf Contact address} & {\bf Status} \\
+ \hline
+ %s
+ \hline
+ \end{tabular}
+ \endlatexonly
+ '''
+ latexTableTpl = textwrap.dedent(latexTableTpl)
+ latexLineTpl = '\n' + r' %s & %s & {\tt\tiny %s} & %s \\'
+
+ # Loop through transl objects in the order of sorted readable names
+ # and add generate the content of the LaTeX table.
+ trlst = []
+ for name, obj in self.langLst:
+ # For LaTeX, more maintainers for the same language are
+ # placed on separate rows in the table. The line separator
+ # in the table is placed explicitly above the first
+ # maintainer. Prepare the arguments for the LaTeX row template.
+ maintainers = []
+ if self.__maintainersDic.has_key(obj.classId):
+ maintainers = self.__maintainersDic[obj.classId]
+
+ lang = obj.langReadable
+ maintainer = None # init
+ email = None # init
+ if obj.status == 'En':
+ # Check whether there is the coupled non-English.
+ classId = obj.classId[:-2]
+ if self.__translDic.has_key(classId):
+ langNE = self.__translDic[classId].langReadable
+ maintainer = 'see the %s language' % langNE
+ email = '~'
+
+ if not maintainer and self.__maintainersDic.has_key(obj.classId):
+ lm = [ m[0] for m in self.__maintainersDic[obj.classId] ]
+ maintainer = maintainers[0][0]
+ email = maintainers[0][1]
+
+ status = obj.readableStatus
+
+ # Use the template to produce the line of the table and insert
+ # the hline plus the constructed line into the table content.
+ trlst.append('\n \\hline')
+ trlst.append(latexLineTpl % (lang, maintainer, email, status))
+
+ # List the other maintainers for the language. Do not set
+ # lang and status for them.
+ lang = '~'
+ status = '~'
+ for m in maintainers[1:]:
+ maintainer = m[0]
+ email = m[1]
+ trlst.append(latexLineTpl % (lang, maintainer, email, status))
+
+ # Join the table lines and insert into the template.
+ latexTable = latexTableTpl % (''.join(trlst))
+
+ # Do the LaTeX replacements.
+ latexTable = latexTable.replace('&aacute;', "\\'{a}")
+ latexTable = latexTable.replace('&Aacute;', "\\'{A}")
+ latexTable = latexTable.replace('&auml;', '\\"{a}')
+ latexTable = latexTable.replace('&ouml;', '\\"{o}')
+ latexTable = latexTable.replace('&oslash;', '\\o{}')
+ latexTable = latexTable.replace('&ccaron;', '\\v{c}')
+ latexTable = latexTable.replace('&rcaron;', '\\v{r}')
+ latexTable = latexTable.replace('_', '\\_')
- # Generate the language.doc file.
- filename = os.path.join(info['docdir'], info['flangdoc'])
- f = file(filename, 'w')
- f.write(GenerateLanguageDoc(cb))
- f.close()
+ # Put the HTML and LaTeX parts together and define the dic item.
+ tplDic['informationTable'] = htmlTable + '\n' + latexTable
+
+ # Insert the symbols into the document template and write it down.
+ f = file(fDocName, 'w')
+ f.write(doctpl % tplDic)
+ f.close()
+
+if __name__ == '__main__':
+
+ # Create the manager, build the transl objects, and parse the related
+ # sources.
+ trMan = TrManager()
- sys.exit(0)
+ # Generate the language.doc.
+ trMan.generateLanguageDoc()
+
+ # Generate the translator report.
+ trMan.generateTranslatorReport()
diff --git a/packages/rpm/doxygen.spec b/packages/rpm/doxygen.spec
index e69de29..f289012 100644
--- a/packages/rpm/doxygen.spec
+++ b/packages/rpm/doxygen.spec
@@ -0,0 +1,153 @@
+Summary: A documentation system for C/C++.
+Name: doxygen
+Version: 1.3.6_20040222
+Release: 1
+Epoch: 1
+Source0: ftp://ftp.stack.nl/pub/users/dimitri/%{name}-%{version}.src.tar.gz
+Patch: doxygen-1.2.7-redhat.patch
+Patch1: doxygen-1.2.12-qt2.patch
+Group: Development/Tools
+License: GPL
+Url: http://www.stack.nl/~dimitri/doxygen/index.html
+Prefix: %{_prefix}
+BuildPrereq: libstdc++-devel >= 2.96, /usr/bin/perl
+BuildRoot: %{_tmppath}/%{name}-%{version}-root
+
+%description
+Doxygen can generate an online class browser (in HTML) and/or a
+reference manual (in LaTeX) from a set of documented source files. The
+documentation is extracted directly from the sources. Doxygen can
+also be configured to extract the code structure from undocumented
+source files.
+
+%package doxywizard
+Summary: A GUI for creating and editing configuration files.
+Group: User Interface/X
+Requires: %{name} = %{version}
+BuildPrereq: qt-devel => 2.3.0
+Requires: qt >= 2.3.0
+
+%description doxywizard
+Doxywizard is a GUI for creating and editing configuration files that
+are used by doxygen.
+
+%prep
+%setup -q
+%patch -p1 -b .redhat
+%patch1 -p1 -b .qt2
+
+%build
+QTDIR="" && . /etc/profile.d/qt.sh
+export OLD_PO_FILE_INPUT=yes
+
+./configure --prefix %{_prefix} --shared --release --with-doxywizard
+make all docs
+
+%install
+rm -rf ${RPM_BUILD_ROOT}
+
+export OLD_PO_FILE_INPUT=yes
+make install INSTALL=$RPM_BUILD_ROOT%{_prefix}
+
+%clean
+rm -rf ${RPM_BUILD_ROOT}
+
+%files
+%defattr(-,root,root)
+%doc LANGUAGE.HOWTO README examples html
+%{_bindir}/doxygen
+%{_bindir}/doxytag
+
+%files doxywizard
+%defattr(-,root,root)
+%{_bindir}/doxywizard
+
+%changelog
+* Sun Jan 06 2002 Than Ngo <than@redhat.com> 1.2.13.1-1
+- update to 1.2.13.1
+
+* Sun Dec 30 2001 Jeff Johnson <jbj@redhat.com> 1.2.13-1
+- update to 1.2.13
+
+* Sun Nov 18 2001 Than Ngo <than@redhat.com> 1.2.12-1
+- update to 1.2.12
+- s/Copyright/License
+
+* Wed Sep 12 2001 Tim Powers <timp@redhat.com>
+- rebuild with new gcc and binutils
+
+* Wed Jun 13 2001 Than Ngo <than@redhat.com>
+- update tp 1.2.8.1
+- make doxywizard as separat package
+- fix to use install as default
+
+* Tue Jun 05 2001 Than Ngo <than@redhat.com>
+- update to 1.2.8
+
+* Tue May 01 2001 Than Ngo <than@redhat.com>
+- update to 1.2.7
+- clean up specfile
+- patch to use RPM_OPT_FLAG
+
+* Wed Mar 14 2001 Jeff Johnson <jbj@redhat.com>
+- update to 1.2.6
+
+* Wed Feb 28 2001 Trond Eivind Glomsrød <teg@redhat.com>
+- rebuild
+
+* Tue Dec 26 2000 Than Ngo <than@redhat.com>
+- update to 1.2.4
+- remove excludearch ia64
+- bzip2 sources
+
+* Mon Dec 11 2000 Than Ngo <than@redhat.com>
+- rebuild with the fixed fileutils
+
+* Mon Oct 30 2000 Jeff Johnson <jbj@redhat.com>
+- update to 1.2.3.
+
+* Sun Oct 8 2000 Jeff Johnson <jbj@redhat.com>
+- update to 1.2.2.
+- enable doxywizard.
+
+* Sat Aug 19 2000 Preston Brown <pbrown@redhat.com>
+- 1.2.1 is latest stable, so we upgrade before Winston is released.
+
+* Wed Jul 12 2000 Prospector <bugzilla@redhat.com>
+- automatic rebuild
+
+* Tue Jul 4 2000 Jakub Jelinek <jakub@redhat.com>
+- Rebuild with new C++
+
+* Fri Jun 30 2000 Florian La Roche <laroche@redhat.de>
+- fix QTDIR detection
+
+* Fri Jun 09 2000 Preston Brown <pbrown@redhat.com>
+- compile on x86 w/o optimization, revert when compiler fixed!!
+
+* Wed Jun 07 2000 Preston Brown <pbrown@redhat.com>
+- use newer RPM macros
+
+* Tue Jun 6 2000 Jeff Johnson <jbj@redhat.com>
+- add to distro.
+
+* Tue May 9 2000 Tim Powers <timp@redhat.com>
+- rebuilt for 7.0
+
+* Wed Feb 2 2000 Bernhard Rosenkraenzer <bero@redhat.com>
+- recompile with current Qt (2.1.0/1.45)
+
+* Wed Jan 5 2000 Jeff Johnson <jbj@redhat.com>
+- update to 1.0.0.
+- recompile with qt-2.0.1 if available.
+- relocatable package.
+
+* Mon Nov 8 1999 Tim Powers <timp@redhat.com>
+-updated to 0.49-991106
+
+* Tue Jul 13 1999 Tim Powers <timp@redhat.com>
+- updated source
+- cleaned up some stuff in the spec file
+
+* Thu Apr 22 1999 Jeff Johnson <jbj@redhat.com>
+- Create Power Tools 6.0 package.
diff --git a/src/classdef.cpp b/src/classdef.cpp
index ff014c9..ffc6779 100644
--- a/src/classdef.cpp
+++ b/src/classdef.cpp
@@ -934,6 +934,7 @@ void ClassDef::writeDocumentation(OutputList &ol)
QCString pageTitle=displayName().copy();
QCString pageType;
QCString cType=compoundTypeString();
+ //printf("ClassDef::writeDocumentation() cType=%s\n",cType.data());
toupper(cType.at(0));
pageType+=" ";
pageType+=cType;
@@ -941,7 +942,9 @@ void ClassDef::writeDocumentation(OutputList &ol)
if (m_tempArgs) pageTitle.prepend(" Template");
startFile(ol,getOutputFileBase(),name(),pageTitle);
startTitle(ol,getOutputFileBase());
- ol.parseText(theTranslator->trCompoundReference(displayName(),m_compType,m_tempArgs!=0));
+ ol.parseText(theTranslator->trCompoundReference(displayName(),
+ m_isObjC && m_compType==Interface ? Class : m_compType,
+ m_tempArgs!=0));
addGroupListToTitle(ol,this);
endTitle(ol,getOutputFileBase(),name());
@@ -1039,7 +1042,9 @@ void ClassDef::writeDocumentation(OutputList &ol)
if (!Config_getString("GENERATE_TAGFILE").isEmpty())
{
Doxygen::tagFile << " <compound kind=\"" << compoundTypeString();
- Doxygen::tagFile << "\">" << endl;
+ Doxygen::tagFile << "\"";
+ if (isObjectiveC()) { Doxygen::tagFile << " objc=\"yes\""; }
+ Doxygen::tagFile << ">" << endl;
Doxygen::tagFile << " <name>" << convertToXML(name()) << "</name>" << endl;
Doxygen::tagFile << " <filename>" << convertToXML(getOutputFileBase()) << Doxygen::htmlFileExtension << "</filename>" << endl;
if (m_tempArgs)
@@ -1371,7 +1376,9 @@ void ClassDef::writeDocumentation(OutputList &ol)
{
ol.disable(OutputGenerator::Man);
ol.writeRuler();
- ol.parseText(theTranslator->trGeneratedFromFiles(m_compType,m_files.count()==1));
+ ol.parseText(theTranslator->trGeneratedFromFiles(
+ m_isObjC && m_compType==Interface ? Class : m_compType,
+ m_files.count()==1));
bool first=TRUE;
const char *file = m_files.first();
@@ -1498,7 +1505,7 @@ void ClassDef::writeMemberList(OutputList &ol)
ol.parseText(theTranslator->trIncludingInheritedMembers());
//ol.startItemList();
- ol.writeString("<table>\n");
+ ol.writeString("<p><table>\n");
//MemberNameInfo *mni=m_allMemberNameInfoList->first();
MemberNameInfoSDict::Iterator mnii(*m_allMemberNameInfoSDict);
@@ -1526,24 +1533,45 @@ void ClassDef::writeMemberList(OutputList &ol)
QCString name=mi->ambiguityResolutionScope+md->name();
//ol.writeListItem();
ol.writeString(" <tr class=\"memlist\"><td>");
- //Definition *bd = md->getGroupDef();
- //if (bd==0) bd=cd;
- ol.writeObjectLink(md->getReference(),
- md->getOutputFileBase(),
- md->anchor(),name);
-
- if ( md->isFunction() || md->isSignal() || md->isSlot() ||
- (md->isFriend() && md->argsString()))
- ol.docify(md->argsString());
- else if (md->isEnumerate())
- ol.parseText(" "+theTranslator->trEnumName());
- else if (md->isEnumValue())
- ol.parseText(" "+theTranslator->trEnumValue());
- else if (md->isTypedef())
- ol.docify(" typedef");
- else if (md->isFriend() && !strcmp(md->typeString(),"friend class"))
- ol.docify(" class");
- //ol.writeString("\n");
+ if (cd->isObjectiveC())
+ {
+ if (md->isObjCMethod())
+ {
+ if (md->isStatic())
+ ol.writeString("+&nbsp;</td><td>");
+ else
+ ol.writeString("-&nbsp;</td><td>");
+ }
+ else
+ ol.writeString("</td><td>");
+ }
+ if (md->isObjCMethod())
+ {
+ ol.writeObjectLink(md->getReference(),
+ md->getOutputFileBase(),
+ md->anchor(),md->name());
+ }
+ else
+ {
+ //Definition *bd = md->getGroupDef();
+ //if (bd==0) bd=cd;
+ ol.writeObjectLink(md->getReference(),
+ md->getOutputFileBase(),
+ md->anchor(),name);
+
+ if ( md->isFunction() || md->isSignal() || md->isSlot() ||
+ (md->isFriend() && md->argsString()))
+ ol.docify(md->argsString());
+ else if (md->isEnumerate())
+ ol.parseText(" "+theTranslator->trEnumName());
+ else if (md->isEnumValue())
+ ol.parseText(" "+theTranslator->trEnumValue());
+ else if (md->isTypedef())
+ ol.docify(" typedef");
+ else if (md->isFriend() && !strcmp(md->typeString(),"friend class"))
+ ol.docify(" class");
+ //ol.writeString("\n");
+ }
ol.writeString("</td>");
memberWritten=TRUE;
}
@@ -1554,17 +1582,32 @@ void ClassDef::writeMemberList(OutputList &ol)
{
//ol.writeListItem();
ol.writeString(" <tr bgcolor=\"#f0f0f0\"><td>");
+ if (cd->isObjectiveC())
+ {
+ if (md->isObjCMethod())
+ {
+ if (md->isStatic())
+ ol.writeString("+&nbsp;</td><td>");
+ else
+ ol.writeString("-&nbsp;</td><td>");
+ }
+ else
+ ol.writeString("</td><td>");
+ }
ol.startBold();
ol.docify(md->name());
ol.endBold();
- if ( md->isFunction() || md->isSignal() || md->isSlot() )
- ol.docify(md->argsString());
- else if (md->isEnumerate())
- ol.parseText(" "+theTranslator->trEnumName());
- else if (md->isEnumValue())
- ol.parseText(" "+theTranslator->trEnumValue());
- else if (md->isTypedef())
- ol.docify(" typedef");
+ if (!md->isObjCMethod())
+ {
+ if ( md->isFunction() || md->isSignal() || md->isSlot() )
+ ol.docify(md->argsString());
+ else if (md->isEnumerate())
+ ol.parseText(" "+theTranslator->trEnumName());
+ else if (md->isEnumValue())
+ ol.parseText(" "+theTranslator->trEnumValue());
+ else if (md->isTypedef())
+ ol.docify(" typedef");
+ }
ol.writeString(" (");
ol.parseText(theTranslator->trDefinedIn()+" ");
if (cd->isLinkable())
@@ -1589,7 +1632,8 @@ void ClassDef::writeMemberList(OutputList &ol)
ol.writeString("</td>");
ol.writeString("<td>");
}
- if ((prot!=Public || virt!=Normal ||
+ if (
+ (prot!=Public || virt!=Normal ||
md->isFriend() || md->isRelated() || md->isExplicit() ||
md->isMutable() || (md->isInline() && Config_getBool("INLINE_INFO")) ||
md->isSignal() || md->isSlot() ||
diff --git a/src/cmdmapper.cpp b/src/cmdmapper.cpp
index d531d55..959be75 100644
--- a/src/cmdmapper.cpp
+++ b/src/cmdmapper.cpp
@@ -103,6 +103,8 @@ CommandMap cmdMap[] =
{ "_internalref", CMD_INTERNALREF },
{ "dot", CMD_DOT },
{ "enddot", CMD_ENDDOT },
+ { "manonly", CMD_MANONLY },
+ { "endmanonly", CMD_ENDMANONLY },
{ 0, 0 }
};
diff --git a/src/cmdmapper.h b/src/cmdmapper.h
index e990716..18e43e3 100644
--- a/src/cmdmapper.h
+++ b/src/cmdmapper.h
@@ -103,7 +103,9 @@ enum CommandType
CMD_XREFITEM = 69 | SIMPLESECT_BIT,
CMD_XMLONLY = 70,
CMD_DOT = 71,
- CMD_ENDDOT = 72
+ CMD_ENDDOT = 72,
+ CMD_MANONLY = 73,
+ CMD_ENDMANONLY = 74
};
enum HtmlTagType
diff --git a/src/config.l b/src/config.l
index 3e11d90..7c1cc28 100644
--- a/src/config.l
+++ b/src/config.l
@@ -1100,6 +1100,8 @@ void Config::check()
filePatternList.append("*.php");
filePatternList.append("*.php3");
filePatternList.append("*.inc");
+ filePatternList.append("*.m");
+ filePatternList.append("*.mm");
}
// add default pattern if needed
@@ -1819,7 +1821,7 @@ void Config::create()
"and *.h) to filter out the source-files in the directories. If left \n"
"blank the following patterns are tested: \n"
"*.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx *.hpp \n"
- "*.h++ *.idl *.odl *.cs *.php *.php3 *.inc\n"
+ "*.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm\n"
);
cb = addBool(
"RECURSIVE",
diff --git a/src/declinfo.l b/src/declinfo.l
index 68d4e81..7058e43 100644
--- a/src/declinfo.l
+++ b/src/declinfo.l
@@ -128,6 +128,9 @@ ID ([a-z_A-Z][a-z_A-Z0-9]*)|(@[0-9]+)
}
name.resize(0);
}
+<Start>":" { // Objective-C argument separator
+ name+=yytext;
+ }
<Start>[*&]+ {
addType();
type+=yytext;
diff --git a/src/docparser.cpp b/src/docparser.cpp
index 760e1b5..703ed07 100644
--- a/src/docparser.cpp
+++ b/src/docparser.cpp
@@ -826,6 +826,15 @@ reparsetoken:
doctokenizerYYsetStatePara();
}
break;
+ case CMD_MANONLY:
+ {
+ doctokenizerYYsetStateManOnly();
+ tok = doctokenizerYYlex();
+ children.append(new DocVerbatim(parent,g_context,g_token->verb,DocVerbatim::ManOnly,g_isExample,g_exampleName));
+ if (tok==0) warn_doc_error(g_fileName,doctokenizerYYlineno,"Warning: manonly section ended without end marker");
+ doctokenizerYYsetStatePara();
+ }
+ break;
case CMD_LATEXONLY:
{
doctokenizerYYsetStateLatexOnly();
@@ -3664,6 +3673,15 @@ int DocPara::handleCommand(const QString &cmdName)
doctokenizerYYsetStatePara();
}
break;
+ case CMD_MANONLY:
+ {
+ doctokenizerYYsetStateManOnly();
+ retval = doctokenizerYYlex();
+ m_children.append(new DocVerbatim(this,g_context,g_token->verb,DocVerbatim::ManOnly,g_isExample,g_exampleName));
+ if (retval==0) warn_doc_error(g_fileName,doctokenizerYYlineno,"Warning: manonly section ended without end marker");
+ doctokenizerYYsetStatePara();
+ }
+ break;
case CMD_LATEXONLY:
{
doctokenizerYYsetStateLatexOnly();
@@ -3702,6 +3720,7 @@ int DocPara::handleCommand(const QString &cmdName)
break;
case CMD_ENDCODE:
case CMD_ENDHTMLONLY:
+ case CMD_ENDMANONLY:
case CMD_ENDLATEXONLY:
case CMD_ENDXMLONLY:
case CMD_ENDLINK:
diff --git a/src/docparser.h b/src/docparser.h
index 05d6c93..0aba864 100644
--- a/src/docparser.h
+++ b/src/docparser.h
@@ -349,7 +349,7 @@ class DocWhiteSpace : public DocNode
class DocVerbatim : public DocNode
{
public:
- enum Type { Code, HtmlOnly, LatexOnly, XmlOnly, Verbatim, Dot };
+ enum Type { Code, HtmlOnly, ManOnly, LatexOnly, XmlOnly, Verbatim, Dot };
DocVerbatim(DocNode *parent,const QString &context,
const QString &text, Type t,bool isExample,
const QString &exampleFile) :
diff --git a/src/doctokenizer.h b/src/doctokenizer.h
index bd63611..4bac78d 100644
--- a/src/doctokenizer.h
+++ b/src/doctokenizer.h
@@ -120,6 +120,7 @@ void doctokenizerYYsetStatePara();
void doctokenizerYYsetStateTitle();
void doctokenizerYYsetStateCode();
void doctokenizerYYsetStateHtmlOnly();
+void doctokenizerYYsetStateManOnly();
void doctokenizerYYsetStateLatexOnly();
void doctokenizerYYsetStateXmlOnly();
void doctokenizerYYsetStateVerbatim();
diff --git a/src/doctokenizer.l b/src/doctokenizer.l
index 4507ef6..3c3dcaf 100644
--- a/src/doctokenizer.l
+++ b/src/doctokenizer.l
@@ -324,6 +324,7 @@ LABELID [a-z_A-Z][a-z_A-Z0-9\-]*
%x St_TitleV
%x St_Code
%x St_HtmlOnly
+%x St_ManOnly
%x St_LatexOnly
%x St_XmlOnly
%x St_Verbatim
@@ -502,6 +503,14 @@ LABELID [a-z_A-Z][a-z_A-Z0-9\-]*
<St_HtmlOnly>. {
g_token->verb+=yytext;
}
+<St_ManOnly>{CMD}"endmanonly" {
+ return RetVal_OK;
+ }
+<St_ManOnly>[^\\@\n$]+ |
+<St_ManOnly>\n |
+<St_ManOnly>. {
+ g_token->verb+=yytext;
+ }
<St_LatexOnly>{CMD}"endlatexonly" {
return RetVal_OK;
}
@@ -880,6 +889,12 @@ void doctokenizerYYsetStateHtmlOnly()
BEGIN(St_HtmlOnly);
}
+void doctokenizerYYsetStateManOnly()
+{
+ g_token->verb="";
+ BEGIN(St_ManOnly);
+}
+
void doctokenizerYYsetStateXmlOnly()
{
g_token->verb="";
diff --git a/src/doxygen.cpp b/src/doxygen.cpp
index 869cb8c..bf1cbc9 100644
--- a/src/doxygen.cpp
+++ b/src/doxygen.cpp
@@ -491,7 +491,7 @@ static void addIncludeFile(ClassDef *cd,FileDef *ifd,Entry *root)
}
else // put #include in the class documentation without link
{
- cd->setIncludeFile(0,iName,local,FALSE);
+ cd->setIncludeFile(0,iName,local,TRUE);
}
}
}
@@ -729,6 +729,10 @@ static void addClassToContext(Entry *root)
case Entry::PROTOCOLDOC_SEC:
sec=ClassDef::Protocol;
break;
+ case Entry::CATEGORY_SEC:
+ case Entry::CATEGORYDOC_SEC:
+ sec=ClassDef::Category;
+ break;
case Entry::EXCEPTION_SEC:
case Entry::EXCEPTIONDOC_SEC:
sec=ClassDef::Exception;
@@ -811,6 +815,22 @@ static void addClassToContext(Entry *root)
cd->setBriefDescription(root->brief,root->briefFile,root->briefLine);
cd->insertUsedFile(root->fileName);
+ //int bi;
+ //if (root->objc && (bi=fullName.find('<'))!=-1 && root->extends->count()==0)
+ //{
+ // // add protocols as base classes
+ // int be=fullName.find('>'),len;
+ // static QRegExp re("[A-Z_a-z][A-Z_a-z0-9]*");
+ // int p=0;
+ // while ((p=re.match(fullName,bi+1,&len))!=-1 && p<be)
+ // {
+ // QCString baseName = fullName.mid(p,len);
+ // printf("Adding artifical base class %s to %s\n",baseName.data(),fullName.data());
+ // root->extends->append(new BaseInfo(baseName,Public,Normal));
+ // bi=p+len;
+ // }
+ //}
+
// add class to the list
//printf("ClassDict.insert(%s)\n",resolveDefines(fullName).data());
Doxygen::classSDict.append(fullName,cd);
@@ -824,7 +844,8 @@ static void addClassToContext(Entry *root)
static void buildClassList(Entry *root)
{
if (
- (root->section & Entry::COMPOUND_MASK) && !root->name.isEmpty()
+ ((root->section & Entry::COMPOUND_MASK) ||
+ root->section==Entry::OBJCIMPL_SEC) && !root->name.isEmpty()
)
{
addClassToContext(root);
@@ -2232,11 +2253,12 @@ static void buildFunctionList(Entry *root)
)
)
{
- Debug::print(Debug::Functions,0,"--> member of class %s!\n",rname.data(),cd->name().data());
+ Debug::print(Debug::Functions,0,"--> member %s of class %s!\n",
+ rname.data(),cd->name().data());
addMethodToClass(root,cd,rname,isFriend);
}
else if (root->parent &&
- !(root->parent->section & Entry::COMPOUND_MASK) &&
+ !((root->parent->section & Entry::COMPOUND_MASK) || root->parent->section==Entry::OBJCIMPL_SEC) &&
!isMember &&
(root->relates.isEmpty() || root->relatesDup) &&
root->type.left(7)!="extern " &&
@@ -2310,24 +2332,6 @@ static void buildFunctionList(Entry *root)
md->setDocsForDefinition(!root->proto);
ArgumentList *argList = new ArgumentList;
stringToArgumentList(root->args,argList);
- //printf("root->argList=%p\n",root->argList);
- //if (root->argList)
- //{
- // ArgumentListIterator ali1(*root->argList);
- // ArgumentListIterator ali2(*argList);
- // Argument *sa,*da;
- // for (;(sa=ali1.current()) && (da=ali2.current());++ali1,++ali2)
- // {
- // printf("sa->name=%s (doc=%s) da->name=%s (doc=%s)\n",
- // sa->name.data(),sa->docs.data(),
- // da->name.data(),da->docs.data()
- // );
- // if (!sa->docs.isEmpty() && da->docs.isEmpty())
- // {
- // da->docs=sa->docs.copy();
- // }
- // }
- //}
if (root->proto)
{
//printf("setDeclArgumentList to %p\n",argList);
@@ -2359,19 +2363,6 @@ static void buildFunctionList(Entry *root)
// merge ingroup specifiers
if (md->getGroupDef()==0 && root->groups->first()!=0)
{
- //printf("new member is grouped, existing member not\n");
- // if we do addMemberToGroups here an undocumented declaration may prevent
- // the documented implementation below it from being added
- //addMemberToGroups(root,md);
- //GroupDef *gd=Doxygen::groupSDict[root->groups->first()->groupname.data()];
- //if (gd)
- //{
- // bool success = gd->insertMember(md);
- // if (success)
- // {
- // md->setGroupDef(gd, root->groups->first()->pri, root->fileName, root->startLine, !root->doc.isEmpty());
- // }
- //}
addMemberToGroups(root,md);
}
else if (md->getGroupDef()!=0 && root->groups->count()==0)
@@ -3302,6 +3293,7 @@ static bool findTemplateInstanceRelation(Entry *root,
ClassDef *instanceClass = templateClass->insertTemplateInstance(
root->fileName,root->startLine,templSpec,freshInstance);
if (isArtificial) instanceClass->setClassIsArtificial();
+ instanceClass->setIsObjectiveC(root->objc);
if (freshInstance)
{
@@ -4207,9 +4199,16 @@ static bool findGlobalMember(Entry *root,
}
else // got docs for an undefined member!
{
- warn(root->fileName,root->startLine,
- "Warning: documented function `%s' was not defined.",decl
- );
+ if (root->parent && root->parent->section==Entry::OBJCIMPL_SEC)
+ {
+ // probably a local member ObjC method not found in the interface
+ }
+ else
+ {
+ warn(root->fileName,root->startLine,
+ "Warning: documented function `%s' was not defined.",decl
+ );
+ }
}
return TRUE;
}
@@ -4752,7 +4751,9 @@ static void findMember(Entry *root,
delete nl;
}
}
- if (count==0 && !(isFriend && funcType=="class"))
+ if (count==0 && !(isFriend && funcType=="class") &&
+ (root->parent==0 || root->parent->section!=Entry::OBJCIMPL_SEC)
+ )
{
int candidates=0;
if (mn->count()>0)
@@ -5193,6 +5194,32 @@ static void findMemberDocumentation(Entry *root)
}
//----------------------------------------------------------------------
+
+static void findObjCMethodDefinitions(Entry *root)
+{
+ EntryListIterator eli(*root->sublist);
+ Entry *objCImpl;
+ for (;(objCImpl=eli.current());++eli)
+ {
+ if (objCImpl->section==Entry::OBJCIMPL_SEC)
+ {
+ //printf("Found ObjC class implementation %s\n",objCImpl->name.data());
+ EntryListIterator seli(*objCImpl->sublist);
+ Entry *objCMethod;
+ for (;(objCMethod=seli.current());++seli)
+ {
+ if (objCMethod->section==Entry::FUNCTION_SEC)
+ {
+ //Printf(" Found ObjC method definition %s\n",objCMethod->name.data());
+ findMember(objCMethod, objCMethod->type+" "+objCImpl->name+"::"+objCMethod->name+" "+objCMethod->args, FALSE,TRUE);
+ objCMethod->section=Entry::EMPTY_SEC;
+ }
+ }
+ }
+ }
+}
+
+//----------------------------------------------------------------------
// find and add the enumeration to their classes, namespaces or files
static void findEnums(Entry *root)
@@ -8129,6 +8156,7 @@ void parseInput()
findEnumDocumentation(root);
msg("Searching for member function documentation...\n");
+ findObjCMethodDefinitions(root);
findMemberDocumentation(root); // may introduce new members !
transferRelatedFunctionDocumentation();
transferFunctionDocumentation();
diff --git a/src/entry.cpp b/src/entry.cpp
index f111442..3bcaf31 100644
--- a/src/entry.cpp
+++ b/src/entry.cpp
@@ -200,8 +200,9 @@ Entry::~Entry()
void Entry::addSubEntry(Entry *current)
{
- //printf("Entry %d with name %s type 0x%x added\n",
- // current->num,current->name.data(),current->section);
+ //printf("Entry %d with name %s type 0x%x added to %s type 0x%x\n",
+ // current->num,current->name.data(),current->section,
+ // name.data(),section);
//printf("Entry::addSubEntry(%s) %p\n",current->name.data(),current->tArgList);
current->parent=this;
sublist->append(current);
diff --git a/src/entry.h b/src/entry.h
index 82d1a36..44504fc 100644
--- a/src/entry.h
+++ b/src/entry.h
@@ -191,42 +191,45 @@ class Entry
PROTOCOL_SEC | CATEGORY_SEC,
SCOPE_MASK = COMPOUND_MASK | NAMESPACE_SEC,
- CLASSDOC_SEC = 0x00000100,
- STRUCTDOC_SEC = 0x00000200,
- UNIONDOC_SEC = 0x00000400,
- EXCEPTIONDOC_SEC = 0x00000800,
- NAMESPACEDOC_SEC = 0x00001000,
- INTERFACEDOC_SEC = 0x00002000,
- PROTOCOLDOC_SEC = 0x00004000,
+ CLASSDOC_SEC = 0x00000800,
+ STRUCTDOC_SEC = 0x00001000,
+ UNIONDOC_SEC = 0x00002000,
+ EXCEPTIONDOC_SEC = 0x00004000,
+ NAMESPACEDOC_SEC = 0x00008000,
+ INTERFACEDOC_SEC = 0x00010000,
+ PROTOCOLDOC_SEC = 0x00020000,
+ CATEGORYDOC_SEC = 0x00040000,
COMPOUNDDOC_MASK = CLASSDOC_SEC | STRUCTDOC_SEC | UNIONDOC_SEC |
- INTERFACEDOC_SEC | EXCEPTIONDOC_SEC | PROTOCOLDOC_SEC,
+ INTERFACEDOC_SEC | EXCEPTIONDOC_SEC | PROTOCOLDOC_SEC |
+ CATEGORYDOC_SEC,
- SOURCE_SEC = 0x00010000,
- HEADER_SEC = 0x00020000,
+ SOURCE_SEC = 0x00400000,
+ HEADER_SEC = 0x00800000,
FILE_MASK = SOURCE_SEC | HEADER_SEC,
- ENUMDOC_SEC = 0x00100000,
- ENUM_SEC = 0x00200000,
- EMPTY_SEC = 0x00300000,
- PAGEDOC_SEC = 0x00400000,
- VARIABLE_SEC = 0x00500000,
- FUNCTION_SEC = 0x00600000,
- TYPEDEF_SEC = 0x00700000,
- MEMBERDOC_SEC = 0x00800000,
- OVERLOADDOC_SEC = 0x00900000,
- EXAMPLE_SEC = 0x00a00000,
- VARIABLEDOC_SEC = 0x00b00000,
- FILEDOC_SEC = 0x00c00000,
- DEFINEDOC_SEC = 0x00d00000,
- INCLUDE_SEC = 0x00e00000,
- DEFINE_SEC = 0x00f00000,
- GROUPDOC_SEC = 0x01000000,
- USINGDIR_SEC = 0x01100000,
- MAINPAGEDOC_SEC = 0x01200000,
- MEMBERGRP_SEC = 0x01300000,
- USINGDECL_SEC = 0x01400000,
- PACKAGE_SEC = 0x01500000,
- PACKAGEDOC_SEC = 0x01600000
+ ENUMDOC_SEC = 0x01000000,
+ ENUM_SEC = 0x02000000,
+ EMPTY_SEC = 0x03000000,
+ PAGEDOC_SEC = 0x04000000,
+ VARIABLE_SEC = 0x05000000,
+ FUNCTION_SEC = 0x06000000,
+ TYPEDEF_SEC = 0x07000000,
+ MEMBERDOC_SEC = 0x08000000,
+ OVERLOADDOC_SEC = 0x09000000,
+ EXAMPLE_SEC = 0x0a000000,
+ VARIABLEDOC_SEC = 0x0b000000,
+ FILEDOC_SEC = 0x0c000000,
+ DEFINEDOC_SEC = 0x0d000000,
+ INCLUDE_SEC = 0x0e000000,
+ DEFINE_SEC = 0x0f000000,
+ GROUPDOC_SEC = 0x10000000,
+ USINGDIR_SEC = 0x11000000,
+ MAINPAGEDOC_SEC = 0x12000000,
+ MEMBERGRP_SEC = 0x13000000,
+ USINGDECL_SEC = 0x14000000,
+ PACKAGE_SEC = 0x15000000,
+ PACKAGEDOC_SEC = 0x16000000,
+ OBJCIMPL_SEC = 0x17000000
};
enum MemberSpecifier
{
diff --git a/src/htmldocvisitor.cpp b/src/htmldocvisitor.cpp
index bea84fa..1496366 100644
--- a/src/htmldocvisitor.cpp
+++ b/src/htmldocvisitor.cpp
@@ -200,6 +200,7 @@ void HtmlDocVisitor::visit(DocVerbatim *s)
case DocVerbatim::HtmlOnly:
m_t << s->text();
break;
+ case DocVerbatim::ManOnly:
case DocVerbatim::LatexOnly:
case DocVerbatim::XmlOnly:
/* nothing */
diff --git a/src/htmlgen.cpp b/src/htmlgen.cpp
index a5113b6..30d9a5c 100644
--- a/src/htmlgen.cpp
+++ b/src/htmlgen.cpp
@@ -43,7 +43,7 @@
static const char *defaultStyleSheet =
"H1 {\n"
" text-align: center;\n"
-" font-family: Arial, Helvetica, sans-serif;\n"
+" font-family: Geneva, Arial, Helvetica, sans-serif;\n"
"}\n"
"H2 {\n"
" font-family: Geneva, Arial, Helvetica, sans-serif;\n"
@@ -56,6 +56,7 @@ static const char *defaultStyleSheet =
" text-align: center;\n"
" margin: 2px;\n"
" padding: 2px;\n"
+" line-height: 120%;\n"
"}\n"
"A.qindex {\n"
" text-decoration: none;\n"
@@ -79,9 +80,9 @@ static const char *defaultStyleSheet =
" font-weight: bold;\n"
" background-color: #6666cc;\n"
" color: #ffffff;\n"
-" padding: 2 6px;\n"
+" padding: 2px 6px;\n"
" border: 1px double #9295C2;\n"
-" }\n"
+"}\n"
"A.qindexHL:hover {\n"
" text-decoration: none;\n"
" background-color: #6666cc;\n"
@@ -116,7 +117,13 @@ static const char *defaultStyleSheet =
"TD.md { background-color: #F4F4FB; font-weight: bold; }\n"
"TD.mdname1 { background-color: #F4F4FB; font-weight: bold; color: #602020; }\n"
"TD.mdname { background-color: #F4F4FB; font-weight: bold; color: #602020; width: 600px; }\n"
-"DIV.groupHeader { margin-left: 16px; margin-top: 12px; margin-bottom: 6px; font-weight: bold }\n"
+"DIV.groupHeader {\n"
+" margin-left: 16px;\n"
+" margin-top: 12px;\n"
+" margin-bottom: 6px;\n"
+" font-weight: bold;\n"
+" font-family: Geneva, Arial, Helvetica, sans-serif;\n"
+"}\n"
"DIV.groupText { margin-left: 16px; font-style: italic; font-size: smaller }\n"
"BODY {\n"
" background: white;\n"
@@ -172,7 +179,7 @@ static const char *defaultStyleSheet =
"}\n"
".mdescLeft {\n"
" font-size: smaller;\n"
-" font-family: Arial, Helvetica, sans-serif;\n"
+" font-style: italic;\n"
" background-color: #FAFAFA;\n"
" padding-left: 8px;\n"
" border-top: 1px none #E0E0E0;\n"
@@ -183,7 +190,6 @@ static const char *defaultStyleSheet =
"}\n"
".mdescRight {\n"
" font-size: smaller;\n"
-" font-family: Arial, Helvetica, sans-serif;\n"
" font-style: italic;\n"
" background-color: #FAFAFA;\n"
" padding-left: 4px;\n"
@@ -282,8 +288,16 @@ void HtmlGenerator::init()
exit(1);
}
writeLogo(dname);
- if (!Config_getString("HTML_HEADER").isEmpty()) g_header=fileToString(Config_getString("HTML_HEADER"));
- if (!Config_getString("HTML_FOOTER").isEmpty()) g_footer=fileToString(Config_getString("HTML_FOOTER"));
+ if (!Config_getString("HTML_HEADER").isEmpty())
+ {
+ g_header=fileToString(Config_getString("HTML_HEADER"));
+ //printf("g_header='%s'\n",g_header.data());
+ }
+ if (!Config_getString("HTML_FOOTER").isEmpty())
+ {
+ g_footer=fileToString(Config_getString("HTML_FOOTER"));
+ //printf("g_footer='%s'\n",g_footer.data());
+ }
}
void HtmlGenerator::writeStyleSheetFile(QFile &file)
@@ -422,8 +436,7 @@ static void writePageFooter(QTextStream &t,const QCString lastTitle)
);
t << endl << "<a href=\"http://www.doxygen.org/index.html\">";
t << endl << "<img src=\"doxygen.png\" alt=\"doxygen\" "
- << "align=\"middle\" border=0 > " << endl <<
- "</a>" << versionString <<" ";
+ << "align=\"middle\" border=0 > " << "</a>" << versionString << " ";
t << "</small></address>\n</body>\n</html>\n";
}
else
diff --git a/src/latexdocvisitor.cpp b/src/latexdocvisitor.cpp
index 17fc3cc..30305db 100644
--- a/src/latexdocvisitor.cpp
+++ b/src/latexdocvisitor.cpp
@@ -258,6 +258,7 @@ void LatexDocVisitor::visit(DocVerbatim *s)
break;
case DocVerbatim::HtmlOnly:
case DocVerbatim::XmlOnly:
+ case DocVerbatim::ManOnly:
/* nothing */
break;
case DocVerbatim::LatexOnly:
diff --git a/src/mandocvisitor.cpp b/src/mandocvisitor.cpp
index d697bfc..2345670 100644
--- a/src/mandocvisitor.cpp
+++ b/src/mandocvisitor.cpp
@@ -198,6 +198,9 @@ void ManDocVisitor::visit(DocVerbatim *s)
m_t << ".PP" << endl;
m_firstCol=TRUE;
break;
+ case DocVerbatim::ManOnly:
+ m_t << s->text();
+ break;
case DocVerbatim::HtmlOnly:
case DocVerbatim::XmlOnly:
case DocVerbatim::LatexOnly:
diff --git a/src/mangen.cpp b/src/mangen.cpp
index b17d76d..6981372 100644
--- a/src/mangen.cpp
+++ b/src/mangen.cpp
@@ -153,6 +153,8 @@ void ManGenerator::endTitleHead(const char *,const char *name)
{
t << ".TH \"" << name << "\" " << getExtension() << " \""
<< dateToString(FALSE) << "\" \"";
+ if (!Config_getString("PROJECT_NUMBER").isEmpty())
+ t << "Version " << Config_getString("PROJECT_NUMBER") << "\" \"";
if (Config_getString("PROJECT_NAME").isEmpty())
t << "Doxygen";
else
diff --git a/src/memberdef.cpp b/src/memberdef.cpp
index 8a3b927..c47ab09 100644
--- a/src/memberdef.cpp
+++ b/src/memberdef.cpp
@@ -144,21 +144,24 @@ static void writeDefArgumentList(OutputList &ol,ClassDef *cd,
{
QCString n=a->type;
if (md->isObjCMethod()) { n.prepend("("); n.append(")"); }
- if (!cName.isEmpty()) n=addTemplateNames(n,cd->name(),cName);
- linkifyText(TextGeneratorOLImpl(ol),cd,md->getBodyDef(),md->name(),n);
+ if (a->type!="...")
+ {
+ if (!cName.isEmpty()) n=addTemplateNames(n,cd->name(),cName);
+ linkifyText(TextGeneratorOLImpl(ol),cd,md->getBodyDef(),md->name(),n);
+ }
}
if (!md->isDefine())
{
ol.endParameterType();
ol.startParameterName(defArgList->count()<2);
}
- if (!a->name.isEmpty()) // argument has a name
+ if (!a->name.isEmpty() || (a->name.isEmpty() && a->type=="...")) // argument has a name
{
ol.docify(" ");
ol.disable(OutputGenerator::Man);
ol.startEmphasis();
ol.enable(OutputGenerator::Man);
- ol.docify(a->name);
+ if (a->name.isEmpty()) ol.docify(a->type); else ol.docify(a->name);
ol.disable(OutputGenerator::Man);
ol.endEmphasis();
ol.enable(OutputGenerator::Man);
@@ -191,7 +194,8 @@ static void writeDefArgumentList(OutputList &ol,ClassDef *cd,
{
//printf("Found parameter keyword %s\n",a->attrib.data());
// strip [ and ]
- key=a->attrib.mid(1,a->attrib.length()-2)+":";
+ key=a->attrib.mid(1,a->attrib.length()-2);
+ if (key!=",") key+=":"; // for normal keywords add colon
}
ol.endParameterName(FALSE,FALSE);
ol.startParameterType(FALSE,key);
@@ -800,9 +804,9 @@ void MemberDef::writeDeclaration(OutputList &ol,
if (prot!=Public)
{
Doxygen::tagFile << "\" protection=\"";
- if (prot==Protected) Doxygen::tagFile << "public";
+ if (prot==Protected) Doxygen::tagFile << "protected";
else if (prot==Package) Doxygen::tagFile << "package";
- else /* Private */ Doxygen::tagFile << "protected";
+ else /* Private */ Doxygen::tagFile << "private";
}
if (virt!=Normal)
{
@@ -1321,6 +1325,17 @@ void MemberDef::writeDocumentation(MemberList *ml,OutputList &ol,
ol.startMemberDocName(isObjCMethod());
if (isObjCMethod())
{
+ // strip scope name
+ int ep = ldef.find("::");
+ if (ep!=-1)
+ {
+ int sp=ldef.findRev(' ',ep);
+ if (sp!=-1)
+ {
+ ldef=ldef.left(sp+1)+ldef.mid(ep+2);
+ }
+ }
+ // strip keywords
int dp = ldef.find(':');
if (dp!=-1)
{
diff --git a/src/perlmodgen.cpp b/src/perlmodgen.cpp
index 8072a53..20520c1 100644
--- a/src/perlmodgen.cpp
+++ b/src/perlmodgen.cpp
@@ -619,6 +619,7 @@ void PerlModDocVisitor::visit(DocVerbatim *s)
return;
case DocVerbatim::Verbatim: type = "preformatted"; break;
case DocVerbatim::HtmlOnly: type = "htmlonly"; break;
+ case DocVerbatim::ManOnly: type = "manonly"; break;
case DocVerbatim::LatexOnly: type = "latexonly"; break;
case DocVerbatim::XmlOnly: type = "xmlonly"; break;
case DocVerbatim::Dot: type = "dot"; break;
diff --git a/src/printdocvisitor.h b/src/printdocvisitor.h
index 42b418c..52062ec 100644
--- a/src/printdocvisitor.h
+++ b/src/printdocvisitor.h
@@ -141,6 +141,7 @@ class PrintDocVisitor : public DocVisitor
case DocVerbatim::Code: printf("<code>"); break;
case DocVerbatim::Verbatim: printf("<verbatim>"); break;
case DocVerbatim::HtmlOnly: printf("<htmlonly>"); break;
+ case DocVerbatim::ManOnly: printf("<manonly>"); break;
case DocVerbatim::LatexOnly: printf("<latexonly>"); break;
case DocVerbatim::XmlOnly: printf("<xmlonly>"); break;
case DocVerbatim::Dot: printf("<dot>"); break;
@@ -151,6 +152,7 @@ class PrintDocVisitor : public DocVisitor
case DocVerbatim::Code: printf("</code>"); break;
case DocVerbatim::Verbatim: printf("</verbatim>"); break;
case DocVerbatim::HtmlOnly: printf("</htmlonly>"); break;
+ case DocVerbatim::ManOnly: printf("</manonly>"); break;
case DocVerbatim::LatexOnly: printf("</latexonly>"); break;
case DocVerbatim::XmlOnly: printf("</xmlonly>"); break;
case DocVerbatim::Dot: printf("</dot>"); break;
diff --git a/src/rtfdocvisitor.cpp b/src/rtfdocvisitor.cpp
index 0d77bce..6c65ee2 100644
--- a/src/rtfdocvisitor.cpp
+++ b/src/rtfdocvisitor.cpp
@@ -313,6 +313,7 @@ void RTFDocVisitor::visit(DocVerbatim *s)
case DocVerbatim::HtmlOnly:
case DocVerbatim::LatexOnly:
case DocVerbatim::XmlOnly:
+ case DocVerbatim::ManOnly:
/* nothing */
break;
case DocVerbatim::Dot:
diff --git a/src/scanner.l b/src/scanner.l
index 873b377..70ae82e 100644
--- a/src/scanner.l
+++ b/src/scanner.l
@@ -167,6 +167,7 @@ static QCString oldStyleArgType;
static QCString docBackup;
static QCString briefBackup;
+
//-----------------------------------------------------------------------------
static void initParser()
@@ -439,11 +440,12 @@ static void setContext()
insideIDL = fileName.right(4)==".idl" || fileName.right(5)==".pidl" ||
fileName.right(4)==".odl";
insideJava = fileName.right(5)==".java";
- insideCS = fileName.right(3)==".cs";
- insideD = fileName.right(3)==".d";
+ insideCS = fileName.right(3)==".cs"; // for normal keywords add colon
+ insideD = fileName.right(3)==".d"; // for normal keywords add colon
insidePHP = fileName.right(4)==".php" || fileName.right(5)==".php4" ||
fileName.right(4)==".inc" || fileName.right(6)==".phtml";
- insideObjC = fileName.right(2)==".m";
+ insideObjC = fileName.right(2)==".m" || fileName.right(2)==".M" ||
+ fileName.right(3)==".mm";
if ( insidePHP )
{
useOverrideCommands = TRUE;
@@ -1024,6 +1026,11 @@ PHPKW ("require"|"require_once"|"include"|"include_once"|"echo")[^a-zA-Z0-9_;]
}
else
{
+ lineCount();
+ current->fileName = yyFileName;
+ current->startLine = yyLineNr;
+ current->bodyLine = yyLineNr;
+ current->section = Entry::FUNCTION_SEC;
current->protection = protection = Public ;
current->stat=yytext[0]=='+';
current->mtype = mtype = Method;
@@ -1044,7 +1051,7 @@ PHPKW ("require"|"require_once"|"include"|"include_once"|"echo")[^a-zA-Z0-9_;]
}
current->name = yytext;
}
-<ObjCMethod>":" { // start of parameter list
+<ObjCMethod>":"{B}* { // start of parameter list
current->name += ':';
Argument *a = new Argument;
current->argList->append(a);
@@ -1060,11 +1067,13 @@ PHPKW ("require"|"require_once"|"include"|"include_once"|"echo")[^a-zA-Z0-9_;]
current->argList->getLast()->attrib=(QCString)"["+yytext+"]";
current->name += yytext;
}
-<ObjCParams>{ID} { // name of parameter
- current->argList->getLast()->name=yytext;
+<ObjCParams>{ID}{BN}* { // name of parameter
+ lineCount();
+ current->argList->getLast()->name=QCString(yytext).stripWhiteSpace();
}
<ObjCParams>"..." { // name of parameter
- current->argList->getLast()->name=yytext;
+ current->argList->getLast()->attrib="[,]";
+ current->argList->getLast()->type="...";
}
<ObjCParams>":" {
current->name += ':';
@@ -1084,7 +1093,7 @@ PHPKW ("require"|"require_once"|"include"|"include_once"|"echo")[^a-zA-Z0-9_;]
<ObjCParamType>[^)]* {
current->argList->last()->type=yytext;
}
-<ObjCParamType>")" {
+<ObjCParamType>")"{B}* {
BEGIN( ObjCParams );
}
<ObjCMethod,ObjCParams>";" { // end of method declaration
@@ -1093,6 +1102,10 @@ PHPKW ("require"|"require_once"|"include"|"include_once"|"echo")[^a-zA-Z0-9_;]
unput(';');
BEGIN( Function );
}
+<ObjCMethod,ObjCParams>"{" { // start of a method body
+ unput('{');
+ BEGIN( Function );
+ }
<FindMembers>{BN}{1,80} {
lineCount();
}
@@ -1226,7 +1239,20 @@ PHPKW ("require"|"require_once"|"include"|"include_once"|"echo")[^a-zA-Z0-9_;]
current->name = QCString(yytext).stripWhiteSpace();
}
}
-<FindMembers>{B}*"@interface"{BN}+ { // Objective-C interface
+<FindMembers>{B}*"@implementation"{BN}+ { // Objective-C class implementation
+ lineCount();
+ isTypedef=FALSE;
+ current->section = Entry::OBJCIMPL_SEC;
+ current->objc = insideObjC = TRUE;
+ current->protection = protection = Public ;
+ addType( current ) ;
+ current->type += " implementation" ;
+ current->fileName = yyFileName;
+ current->startLine = yyLineNr;
+ current->bodyLine = yyLineNr;
+ BEGIN( CompoundName );
+ }
+<FindMembers>{B}*"@interface"{BN}+ { // Objective-C class interface
lineCount();
isTypedef=FALSE;
current->section = Entry::INTERFACE_SEC;
@@ -3327,10 +3353,19 @@ PHPKW ("require"|"require_once"|"include"|"include_once"|"echo")[^a-zA-Z0-9_;]
<ClassTemplSpec>. {
current->name += yytext;
}
+<CompoundName>{SCOPENAME}{BN}*";" { // forward declaration
+ unput(';');
+ if (isTypedef) // typedef of a class, put typedef keyword back
+ {
+ current->type.prepend("typedef");
+ }
+ BEGIN( FindMembers );
+ }
<CompoundName>{SCOPENAME} {
current->name = yytext ;
lineCount();
- if (current->section == Entry::PROTOCOL_SEC)
+ if (current->section == Entry::PROTOCOL_SEC ||
+ current->section == Entry::OBJCIMPL_SEC)
{
unput('{'); // fake start of body
}
@@ -3454,16 +3489,17 @@ PHPKW ("require"|"require_once"|"include"|"include_once"|"echo")[^a-zA-Z0-9_;]
current->name.sprintf("@%d",anonCount++);
}
curlyCount=0;
- if (current->section==Entry::PROTOCOL_SEC)
- {
+ if (current->section==Entry::PROTOCOL_SEC ||
+ current->section==Entry::OBJCIMPL_SEC)
+ { // ObjC body that ends with @end
BEGIN( ReadBodyIntf );
}
else if (current->section==Entry::NAMESPACE_SEC)
- {
+ { // namespace body
BEGIN( ReadNSBody );
}
else
- {
+ { // class body
BEGIN( ReadBody ) ;
}
}
diff --git a/src/tagreader.cpp b/src/tagreader.cpp
index 6301305..f3b5ee8 100644
--- a/src/tagreader.cpp
+++ b/src/tagreader.cpp
@@ -55,8 +55,8 @@ class TagMemberInfo
class TagClassInfo
{
public:
- enum Kind { Class, Struct, Union, Interface, Exception };
- TagClassInfo() { bases=0, templateArguments=0; members.setAutoDelete(TRUE); }
+ enum Kind { Class, Struct, Union, Interface, Exception, Protocol, Category };
+ TagClassInfo() { bases=0, templateArguments=0; members.setAutoDelete(TRUE); isObjC=FALSE; }
~TagClassInfo() { delete bases; delete templateArguments; }
QString name;
QString filename;
@@ -65,6 +65,7 @@ class TagClassInfo
QList<TagMemberInfo> members;
QList<QString> *templateArguments;
Kind kind;
+ bool isObjC;
};
/*! Container for namespace specific info that can be read from a tagfile */
@@ -194,6 +195,7 @@ class TagFileParser : public QXmlDefaultHandler
{
m_curString = "";
QString kind = attrib.value("kind");
+ QString isObjC = attrib.value("objc");
if (kind=="class")
{
m_curClass = new TagClassInfo;
@@ -224,6 +226,18 @@ class TagFileParser : public QXmlDefaultHandler
m_curClass->kind = TagClassInfo::Exception;
m_state = InClass;
}
+ else if (kind=="protocol")
+ {
+ m_curClass = new TagClassInfo;
+ m_curClass->kind = TagClassInfo::Protocol;
+ m_state = InClass;
+ }
+ else if (kind=="category")
+ {
+ m_curClass = new TagClassInfo;
+ m_curClass->kind = TagClassInfo::Category;
+ m_state = InClass;
+ }
else if (kind=="file")
{
m_curFile = new TagFileInfo;
@@ -253,6 +267,10 @@ class TagFileParser : public QXmlDefaultHandler
{
err("Error: Unknown compound attribute `%s' found!\n",kind.data());
}
+ if (isObjC=="yes" && m_curClass)
+ {
+ m_curClass->isObjC = TRUE;
+ }
}
void endCompound()
{
@@ -1004,6 +1022,8 @@ void TagFileParser::buildLists(Entry *root)
case TagClassInfo::Union: ce->section = Entry::UNION_SEC; break;
case TagClassInfo::Interface: ce->section = Entry::INTERFACE_SEC; break;
case TagClassInfo::Exception: ce->section = Entry::EXCEPTION_SEC; break;
+ case TagClassInfo::Protocol: ce->section = Entry::PROTOCOL_SEC; break;
+ case TagClassInfo::Category: ce->section = Entry::CATEGORY_SEC; break;
}
ce->name = tci->name;
addDocAnchors(ce,tci->docAnchors);
@@ -1011,6 +1031,7 @@ void TagFileParser::buildLists(Entry *root)
ti->tagName = m_tagName;
ti->fileName = tci->filename;
ce->tagInfo = ti;
+ ce->objc = tci->isObjC;
// transfer base class list
if (tci->bases)
{
diff --git a/src/util.cpp b/src/util.cpp
index ff0ec8d..a72a968 100644
--- a/src/util.cpp
+++ b/src/util.cpp
@@ -557,6 +557,7 @@ NamespaceDef *getResolvedNamespace(const char *name)
}
static QDict<MemberDef> g_resolvedTypedefs;
+static QDict<Definition> g_visitedNamespaces;
// forward declaration
ClassDef *getResolvedClassRec(Definition *scope,
@@ -746,12 +747,13 @@ bool accessibleViaUsingNamespace(const NamespaceSDict *nl,
//printf("Trying via used namespace %s\n",und->name().data());
Definition *sc = explicitScopePart.isEmpty() ? und : followPath(und,fileScope,explicitScopePart);
if (sc && item->getOuterScope()==sc) return TRUE;
- //printf("Try via used namespac done\n");
+ //printf("Try via used namespace done\n");
}
}
return FALSE;
}
+
/* Returns the "distance" (=number of levels up) from item to scope, or -1
* if item in not inside scope.
*/
@@ -857,6 +859,7 @@ int isAccessibleFrom(Definition *scope,FileDef *fileScope,Definition *item,
int i=-1;
if (newScope->definitionType()==Definition::TypeNamespace)
{
+ g_visitedNamespaces.insert(newScope->name(),newScope);
// this part deals with the case where item is a class
// A::B::C but is explicit referenced as A::C, where B is imported
// in A via a using directive.
@@ -884,11 +887,14 @@ int isAccessibleFrom(Definition *scope,FileDef *fileScope,Definition *item,
NamespaceDef *nd;
for (nli.toFirst();(nd=nli.current());++nli)
{
- i = isAccessibleFrom(scope,fileScope,item,nd->name());
- if (i!=-1)
+ if (g_visitedNamespaces.find(nd->name())==0)
{
- //printf("> found via explicit scope of used namespace\n");
- goto done;
+ i = isAccessibleFrom(scope,fileScope,item,nd->name());
+ if (i!=-1)
+ {
+ //printf("> found via explicit scope of used namespace\n");
+ goto done;
+ }
}
}
}
@@ -1009,6 +1015,7 @@ ClassDef *getResolvedClassRec(Definition *scope,
if (d->definitionType()==Definition::TypeClass ||
d->definitionType()==Definition::TypeMember)
{
+ g_visitedNamespaces.clear();
// test accessibility of definition within scope.
int distance = isAccessibleFrom(scope,fileScope,d,explicitScopePart);
if (distance!=-1) // definition is accessible
@@ -1525,6 +1532,10 @@ int filterCRLF(char *buf,int len)
if (src<len && buf[src] == '\n')
++src; // skip LF just after CR (DOS)
}
+ else if ( c == '\0' && src<len-1) // filter out internal \0 characters, as it will confuse the parser
+ {
+ c = ' '; // turn into a space
+ }
buf[dest++] = c; // copy the (modified) character to dest
}
return dest; // length of the valid part of the buf
diff --git a/src/xmldocvisitor.cpp b/src/xmldocvisitor.cpp
index 25b7d44..5d50020 100644
--- a/src/xmldocvisitor.cpp
+++ b/src/xmldocvisitor.cpp
@@ -181,6 +181,11 @@ void XmlDocVisitor::visit(DocVerbatim *s)
filter(s->text());
m_t << "</htmlonly>";
break;
+ case DocVerbatim::ManOnly:
+ m_t << "<manonly>";
+ filter(s->text());
+ m_t << "</manonly>";
+ break;
case DocVerbatim::LatexOnly:
m_t << "<latexonly>";
filter(s->text());