summaryrefslogtreecommitdiff
path: root/tools/linuxdoc-tools/LinuxDocTools
diff options
context:
space:
mode:
Diffstat (limited to 'tools/linuxdoc-tools/LinuxDocTools')
-rw-r--r--tools/linuxdoc-tools/LinuxDocTools/BackEnd.pm185
-rw-r--r--tools/linuxdoc-tools/LinuxDocTools/CharEnts.pm176
-rw-r--r--tools/linuxdoc-tools/LinuxDocTools/FixRef.pm76
-rw-r--r--tools/linuxdoc-tools/LinuxDocTools/Html2Html.pm583
-rw-r--r--tools/linuxdoc-tools/LinuxDocTools/InfoUtils.pm357
-rw-r--r--tools/linuxdoc-tools/LinuxDocTools/Lang.pm238
-rw-r--r--tools/linuxdoc-tools/LinuxDocTools/Utils.pm392
-rw-r--r--tools/linuxdoc-tools/LinuxDocTools/Vars.pm22
8 files changed, 2029 insertions, 0 deletions
diff --git a/tools/linuxdoc-tools/LinuxDocTools/BackEnd.pm b/tools/linuxdoc-tools/LinuxDocTools/BackEnd.pm
new file mode 100644
index 00000000..e402cc5d
--- /dev/null
+++ b/tools/linuxdoc-tools/LinuxDocTools/BackEnd.pm
@@ -0,0 +1,185 @@
+#
+# BackEnd.pm
+#
+# $Id: BackEnd.pm,v 1.1.1.1 2001/05/24 15:57:41 sano Exp $
+#
+# Dummy module containing backend specification.
+#
+# © Copyright 1997, Cees de Groot
+#
+package LinuxDocTools::BackEnd;
+
+die "This is a documentation package only!";
+
+=head1 NAME
+
+LinuxDocTools::BackEnd - LinuxDocTools back-end specification
+
+=head1 SYNOPSIS
+
+ require LinuxDocTools::BackEnd;
+ $BackEnd->{...};
+
+=head1 DESCRIPTION
+
+LinuxDoc-Tools backend modules need to conform to a certain interface which is
+detailed in this document. The interface makes sure that new backend modules
+(or customer overrides) are compatible with what the main B<LinuxDocTools>
+package expects. Note that this interface is still subject to change, you
+should check this document on new releases of LinuxDoc-Tools.
+
+=head1 INTERFACE
+
+The interface between the main package and individual backends is very
+minimal - only one global variable is modified, everything else is local. It
+relies heavily on references and complex datatypes, so you want to make
+sure that you're up-to-date with Perl5.
+
+Every backend creates a reference to a hash and stores this reference in
+the global I<%Formats> hash:
+
+ my $BackEnd = {};
+ $Formats{"BackEnd"} = $BackEnd;
+
+The rest of this document will deal with the entries in the local hash
+referenced by I<$BackEnd>.
+
+=head1 HASH ENTRIES
+
+=over 4
+
+=item NAME
+
+Specify the name of the backend, for help messages etcetera.
+
+ $BackEnd->{NAME} = "BackEnd";
+
+=item HELP
+
+Specify an optional extra help message printed when the default usage
+function is executed (see L<LinuxDocTools::Utils>).
+
+ $BackEnd->{HELP} = "This is just and example message";
+
+=item OPTIONS
+
+This specifies the local set of options, which is added to the global set
+of options (available in I<$global>). The options are specified as an
+array of hashes containing a number of keys:
+
+=over 4
+
+=item option
+
+The long option name
+
+=item type
+
+The type of the option, one of B<f> (flag), B<l> (list of allowed values),
+B<s> (string), or B<i> (integer).
+
+=item values
+
+An array of allowed values, in case the option is of the list type.
+
+=item short
+
+A short (single-letter) version of the option name.
+
+=back
+
+Options can be specified as long options:
+
+ --papersize=a4
+
+or as short options:
+
+ -p a4
+
+Note that both the long options as the short options must not conflict with
+the global options (an override is not - yet - possible) and should not
+conflict with other backends.
+
+ $BackEnd->{OPTIONS} = [
+ { option => "split", type => "l",
+ 'values' => [ "0", "1", "2" ], short => "s" },
+ { option => "dosnames", type => "f", short => "D" },
+ { option => "imagebuttons", type => "f", short => "I"}
+ ];
+
+The long names themselves function as hash keys; a default can be given
+here and the option processing function will store any values found
+at the same place:
+
+ $BackEnd->{'split'} = 1;
+ $BackEnd->{dosnames} = 0;
+ $BackEnd->{imagebuttons} = 0;
+
+=item preNSGMLS
+
+If defined, this should contain a subroutine that normally does two things: it
+can modify the global value C<$global-E<gt>{NsgmlsOpts}> and it can set the
+global value C<$global-E<gt>{NsgmlsPrePipe}>. The first variable contains
+the option string passed to B<nsgmls>, and the second variable can contain
+a command that generates the input for B<nsgmls>, presumably using the
+current input file in some way (the current input file can be found
+in C<$global-E<gt>{file}>).
+
+ $BackEnd->{preNSGMLS} = sub {
+ $global->{NsgmlsOpts} .= " -ifmtBackEnd ";
+ $global->{NsgmlsPrePipe} = "sed 's/\@/\@\@/g' $global->{file}";
+ };
+
+=item preASP
+
+If defined, this should contain a subroutine accepting an input and an output
+file descriptor. The input file descriptor contains the raw output from
+B<nsgmls>, and the output file descriptor should be filled with input
+to B<sgmlsasp>. This stage is often used to munch character entities
+before they're fed to B<sgmlsasp>, see L<LinuxDocTools::CharEnts>. If the routine
+doesn't return C<0>, LinuxDocTools aborts.
+
+ $BackEnd->{preASP} = sub
+ {
+ my ($infile, $outfile) = @_;
+
+ while (<$infile>)
+ {
+ s/([^\\])\\n/$1 \\n/g;
+ print $outfile $_;
+ }
+ return 0;
+ };
+
+=item postASP
+
+This entry should always be defined, because it needs to contain a routine
+that receives the output from B<sgmlsasp> which normally needs finalization.
+LinuxDocTools itself doesn't know about file-naming conventions, etcetera, of
+the backend so writing the final file is left to the backend. The subroutine
+receives a reference to a filehandle (containing B<sgmlsasp> output) and
+should do whatever it likes with this datastream.
+
+ $BackEnd->{postASP} = sub
+ {
+ my $infile = shift;
+
+ copy ($infile, "$global->{filename}.ext");
+ return 0;
+ };
+
+=back
+
+=head1 SEE ALSO
+
+L<LinuxDocTools> and subpackages.
+
+=head1 AUTHOR
+
+SGML-Tools are written by Cees de Groot, C<E<lt>cg@cdegroot.comE<gt>>,
+and various SGML-Tools contributors as listed in C<CONTRIBUTORS>.
+Taketoshi Sano C<E<lt>sano@debian.org<gt>> rename it to LinuxDocTools,
+and do some bug-fixes and updates on it.
+
+=cut
+1;
diff --git a/tools/linuxdoc-tools/LinuxDocTools/CharEnts.pm b/tools/linuxdoc-tools/LinuxDocTools/CharEnts.pm
new file mode 100644
index 00000000..b0bcd532
--- /dev/null
+++ b/tools/linuxdoc-tools/LinuxDocTools/CharEnts.pm
@@ -0,0 +1,176 @@
+#
+# CharEnts.pm
+#
+# $Id: CharEnts.pm,v 1.1.1.1 2001/05/24 15:57:41 sano Exp $
+#
+# SGML Character Entity utilities -- interface to Perl module
+# Text::EntityMap.
+#
+package LinuxDocTools::CharEnts;
+use strict;
+
+=head1 NAME
+
+LinuxDocTools::CharEnts - Interface to Text::EntityMap
+
+=head1 SYNOPSIS
+
+ my $char_maps = load_char_maps ('.2ext', [ Text::EntityMap::sdata_dirs() ]);
+
+ $value = parse_data ($value, $char_maps, $escape_sub);
+
+=head1 DESCRIPTION
+
+This module provides a simple interface to the entity map handling provided by
+B<Text::EntityMap>.
+
+=head1 FUNCTIONS
+
+=over 4
+
+=cut
+
+use Text::EntityMap;
+use Exporter;
+
+use vars qw(@ISA @EXPORT $VERSION);
+@ISA = qw(Exporter);
+@EXPORT = qw(load_char_maps parse_data);
+$VERSION = sprintf("%d.%02d", q$Revision: 1.1.1.1 $ =~ /(\d+)\.(\d+)/);
+
+# `%warn_map' tracks entities that were not able to be mapped so they
+# are only warned once.
+my %warn_map = ();
+
+=item parse_data ($data, $char_map, $escape_sub)
+
+B<parse_data> takes a string of I<$data> in the output format of
+B<nsgmls> (see SP's C<sgmlsout.htm> document) without the leading dash.
+B<parse_data> calls I<$char_map>'s lookup method for each sdata
+entity reference. If the entity reference is undefined, it is
+left alone (without the (n)sgmls C<\|>). For all remaining data,
+B<parse_data> calls back into I<$escape_sub> to properly escape
+characters for the backend formatter. Strings returned from the
+lookup method are assumed to be already escaped.
+
+This routine is derived from David Megginson's SGMLSpm.
+
+=cut
+
+sub parse_data {
+ my ($data, $char_map, $escape_sub) = @_;
+ my ($result) = "";
+
+ my $sdata_flag = 0;
+ my $out = '';
+
+ while ($data =~ /\\(\\|n|\||[0-7]{1,3})/) {
+ $out .= $`;
+ $data = $';
+
+ if ($1 eq '|') {
+ # beginning or end of SDATA
+ if ("$out" ne '') {
+ if ($sdata_flag) {
+ my ($mapping) = $char_map->lookup ($out);
+ if (defined $mapping) {
+ # escape `\' in mapping for ASP
+ $mapping =~ s/\\/\\\\/g;
+ $result .= $mapping;
+ } else {
+ if (!$warn_map{$out}) {
+ warn "parse_data: no entity map for \`$out'\n";
+ $warn_map{$out} = 1;
+ }
+ # output the entity reference inside of `{}'
+ $result .= &$escape_sub ("{" . $out . "}");
+ }
+ } else {
+ $result .= &$escape_sub ($out);
+ }
+ $out = '';
+ }
+ $sdata_flag = !$sdata_flag;
+
+ } elsif ($1 eq 'n') {
+ # record end
+
+ # pass '\\n' through to ASP
+ $result .= &$escape_sub ($out) . '\\n';
+ $out = '';
+ } elsif ($1 eq '\\') {
+ # backslash
+
+ $result .= &$escape_sub ($out);
+
+ $out = '[bsol ]'; # bsol == entity name for backslash
+ my ($mapping) = $char_map->lookup ($out);
+ if (defined $mapping) {
+ # escape `\' in mapping for ASP
+ $mapping =~ s/\\/\\\\/g;
+ $result .= $mapping;
+ } else {
+ if (!$warn_map{$out}) {
+ warn "parse_data: no entity map for \`$out'\n";
+ $warn_map{$out} = 1;
+ }
+ # output the entity reference inside of `{}'
+ $result .= &$escape_sub ("{" . $out . "}");
+ }
+ $out = '';
+ } else {
+ # other octal character
+ $result .= &$escape_sub ($out . chr(oct($1)));
+ $out = '';
+ }
+ }
+ $out .= $data;
+ if ("$out" ne '') {
+ $result .= &$escape_sub ($out);
+ }
+
+ return ($result);
+}
+
+=item load_char_maps ($format, $paths)
+
+B<load_char_maps> takes an EntityMap format suffix and loads all of the
+character entity replacement sets for that suffix into an EntityMapGroup.
+It searches every directory in I<@{$path}>.
+
+=cut
+
+sub load_char_maps {
+ my ($format, $paths) = @_;
+
+ my (@char_maps) = ();
+ my ($path, $file_name, $char_map);
+
+ foreach $path (@{$paths}) {
+ if (-d $path) {
+ opendir (SDATADIR, $path)
+ || die "load_char_map: opening directory \`$path' for reading: $!\n";
+ foreach $file_name (readdir (SDATADIR)) {
+ next if ($file_name !~ /$format$/);
+ eval {$char_map = Text::EntityMap->load ("$path/$file_name")}
+ || die "load_char_map: loading \`$path/$file_name'\n$@\n";
+ push (@char_maps, $char_map);
+ }
+ closedir (SDATADIR);
+ }
+ }
+
+ warn "load_char_maps: no entity maps found\n"
+ if ($#char_maps == -1);
+
+ return (Text::EntityMap->group (@char_maps));
+}
+
+=back
+
+=head1 AUTHOR
+
+Ken MacLeod, C<E<lt>ken@bitsko.slc.ut.usE<gt>>
+
+=cut
+1;
diff --git a/tools/linuxdoc-tools/LinuxDocTools/FixRef.pm b/tools/linuxdoc-tools/LinuxDocTools/FixRef.pm
new file mode 100644
index 00000000..d2549857
--- /dev/null
+++ b/tools/linuxdoc-tools/LinuxDocTools/FixRef.pm
@@ -0,0 +1,76 @@
+#
+# FixRef.pm
+#
+# $Id: FixRef.pm,v 1.1.1.1 2001/05/24 15:57:41 sano Exp $
+#
+# Start conversion from parsed linuxdoc-sgml to html.
+# - Identify references and file count
+#
+# Rules based on fixref.l
+#
+package LinuxDocTools::FixRef;
+
+# Externally visible variables
+$fixref = {};
+
+# Initialize: set splitlevel before using rules
+# Usage: &{$fixref->{init}}(<split level>);
+ # 0 - super page mode
+ # 1 - big page mode
+ # 2 - small page mode
+$fixref->{init} = sub {
+ $splitlevel = shift;
+};
+
+# Outputs: Read after using rules
+$fixref->{filenum} = 0; # Count of files we will create
+$fixref->{lrec} = {}; # label -> filenum
+
+# Package variables
+$chapter_mode = 0; # <report> vs. <article>
+$splitlevel = 0; # See $fixref->{init} above;
+ # Automatically reduced by 1 for chapter mode
+
+# Finalize parsing
+$fixref->{finish} = sub { }; # Do nothing when we're done
+
+# Ruleset
+$fixref->{rules} = {}; # Individual parsing rules
+$fixref->{defaultrule} = sub { }; # If line does not match any rules
+
+# Set the rules
+# <@@ssect> - split file if necessary
+$fixref->{rules}->{'^<@@ssect>.*$'} = sub { &splitfile(2); };
+
+# <@@sect> - split file if necessary
+$fixref->{rules}->{'^<@@sect>.*$'} = sub { &splitfile(1); };
+
+# <@@chapt> - set chapter mode; reduce splitlevel if needed; split file
+$fixref->{rules}->{'^<@@chapt>.*$'} = sub {
+ $splitlevel-- if (!$chapter_mode);
+ $chapter_mode = 1; &splitfile(0);
+};
+
+# <@@label> - Identify label location
+$fixref->{rules}->{'^<@@label>(.*)$'} = sub {
+ $fixref->{lrec}->{$1} = $fixref->{filenum};
+};
+
+#==============================
+# Split the file (-split option; level in parentheses):
+# non-chapter mode: -0 -> don't split
+# -1 -> split at sect (1)
+# -2 -> split at sect (1) and ssect (2)
+# chapter mode: -0 -> split at chapt (0)
+# -1 -> split at chapt (0)
+# -2 -> split at chapt (0) and sect (1)
+sub splitfile
+{
+ my ($level) = @_;
+ if (($level == 0) || ($splitlevel >= $level)) {
+ $fixref->{filenum}++;
+ }
+}
+
+1;
+
diff --git a/tools/linuxdoc-tools/LinuxDocTools/Html2Html.pm b/tools/linuxdoc-tools/LinuxDocTools/Html2Html.pm
new file mode 100644
index 00000000..9ff2e4cc
--- /dev/null
+++ b/tools/linuxdoc-tools/LinuxDocTools/Html2Html.pm
@@ -0,0 +1,583 @@
+#
+# Html2Html.pm
+#
+# $Id: Html2Html.pm,v 1.4 2001/08/31 23:09:10 sano Exp $
+#
+# Convert parsed linuxdoc-sgml to html.
+# - Split files; match references, generate TOC and navigation
+# aids, etc.
+#
+# Rules based on html2html.l
+#
+package LinuxDocTools::Html2Html;
+
+use FileHandle;
+use LinuxDocTools::Lang;
+
+# Externally visible variables
+$html2html = {};
+
+# Initialize: set splitlevel, extension, images, filename,
+# filenumber, label, header, footer, toclevel,
+# tmpbase, debug.
+# Usage:
+# &{$html2html->{init}}(split,ext,img,filename,filenum,label,hdr,ftr,toc,tmpbase, debug);
+# split level: 0 - super page mode
+# 1 - big page mode
+# 2 - small page mode
+$html2html->{init} = sub {
+ $splitlevel = shift;
+ SWITCH: {
+ $super_page_mode = 0, $big_page_mode = 1, last SWITCH
+ if ($splitlevel == 1);
+ $super_page_mode = 0, $big_page_mode = 0, last SWITCH
+ if ($splitlevel == 2);
+ }
+
+ $fileext = shift;
+ $use_imgs = shift;
+ $firstname = shift;
+ $filecount = 1 + shift;
+ $lprec = shift;
+
+ $header = shift;
+ $footer = shift;
+
+ $toclevel = shift;
+ if ($toclevel == -1) {
+ if ($splitlevel == 0) {
+ $toclevel = 0;
+ } else {
+ $toclevel = 2;
+ }
+ }
+
+ $tmpbase = shift;
+ $content_file = $tmpbase . ".content";
+
+ $debug = shift;
+
+ $nextlabel = Xlat ("Next");
+ $prevlabel = Xlat ("Previous");
+ $toclabel = Xlat ("Contents");
+};
+
+# Package variables
+$big_page_mode = 0; # '-2' subsection splitting
+$super_page_mode = 1; # One page vs. page/section
+$chapter_mode = 0; # <article> vs. <report>
+$current = ""; # State of section/subsection/etc.
+$filenum = 1; # Current output file number
+$filecount = 1;
+$firstname = "$$"; # Base name for file
+$headbuf = ""; # Buffer for URL's
+$fileext = "html"; # "html" vs. "htm" for 8.3
+$in_appendix = 0; # n.n vs. a.n section numbers
+$in_section_list = 0; # List of sections flag
+$language = ""; # Default English; use '-Lname'
+# $lprec{label} # Label record
+$nextlabel = ""; # Link string
+$outfh = STDOUT; # Output filehandle
+$outname = ""; # Output file name
+$prevlabel = ""; # Link string
+$refname = ""; # Ref string
+$sectname = ""; # Section name
+$secnr = 0; # Section count
+$ssectname = ""; # Subsection name
+$ssecnr = 0; # Subsection count
+$skipnewline = 0; # Flag to ignore new line
+$toclabel = ""; # Link string
+$titlename = ""; # Title of document
+$use_imgs = 0; # '-img' pictorial links
+$urlname = ""; # Name for url links
+$header = "";
+$footer = "";
+$toclevel = -1;
+$tmpbase = "/tmp/sgmltmp" . $$;
+$debug = 0;
+$content_file = $tmpbase . ".content.init";
+
+# Ruleset
+$html2html->{rules} = {}; # Individual parsing rules
+
+$html2html->{rules}->{'^<@@appendix>.*$'} = sub {
+ $in_appendix = 1; $secnr = 0; $ssecnr = 0;
+};
+
+$html2html->{rules}->{'^<@@url>(.*)$'} = sub {
+ $skipnewline = 1; $urlname = $1; $headbuf = qq(<A HREF="$1">);
+};
+
+$html2html->{rules}->{'^<@@urlnam>(.*)$'} = sub {
+ $headbuf = $headbuf . "$urlname</A>";
+};
+
+$html2html->{rules}->{'^<@@endurl>.*$'} = sub {
+ $skipnewline = -1; $outfh->print($headbuf); $headbuf = "";
+};
+
+$html2html->{rules}->{'^<@@title>(.*)$'} = sub {
+ $titlename = $1; &heading(STDOUT); print(STDOUT "<H1>$1</H1>\n\n");
+};
+
+$html2html->{rules}->{'^<@@head>(.*)$'} = sub {
+ $skipnewline = 1; $headbuf = $1;
+};
+
+$html2html->{rules}->{'^<@@part>.*$'} = sub { $current = "PART"; };
+
+$html2html->{rules}->{'^<@@endhead>.*$'} = sub {
+ SWITCH: {
+ $outfh->print("<H1>$headbuf</H1>\n\n"), last SWITCH
+ if ($current eq "PART");
+ $outfh->print("<H1>$headbuf</H1>\n\n"), last SWITCH
+ if ($current eq "CHAPTER");
+ $outfh->print("<H2>$headbuf</H2>\n\n"), last SWITCH
+ if ($current eq "SECTION");
+ $outfh->print("<H2>$headbuf</H2>\n\n"), last SWITCH
+ if ($current eq "SUBSECT");
+ $outfh->print("<H3>$headbuf</H3>\n\n"), last SWITCH;
+ }
+ $current = ""; $headbuf = ""; $skipnewline = 0;
+};
+
+$html2html->{rules}->{'^<@@chapt>(.*)$'} = sub {
+ $chapter_mode = 1; $skipnewline = 1; $sectname = $1;
+ &start_chapter($sectname);
+};
+
+$html2html->{rules}->{'^<@@sect>(.*)$'} = sub {
+ $skipnewline = 1; $ssectname = $1;
+ if ($chapter_mode) {
+ &start_section($ssectname);
+ } else {
+ $sectname = $ssectname; &start_chapter($ssectname);
+ }
+};
+
+$html2html->{rules}->{'^<@@ssect>(.*)$'} = sub {
+ $skipnewline = 1; $ssectname = $1;
+ if (!$chapter_mode) {
+ &start_section($ssectname);
+ } else {
+ $current = ""; $headbuf = $ssectname;
+ }
+};
+
+$html2html->{rules}->{'^<@@endchapt>.*$'} = sub {
+ STDOUT->print("</UL>\n") if ($in_section_list);
+ if ($outfh->fileno != STDOUT->fileno) {
+ &footing($outfh) if (!$super_page_mode);
+ $outfh->close; $outfh = STDOUT;
+ }
+};
+
+$html2html->{rules}->{'^<@@endsect>.*$'} = sub {
+ STDOUT->print("</UL>\n") if (!$chapter_mode && $in_section_list);
+ if (($outfh->fileno != STDOUT->fileno)
+ && ((!$chapter_mode) || (!$big_page_mode))) {
+ &footing($outfh) if (!$super_page_mode);
+ $outfh->close; $outfh = STDOUT;
+ }
+};
+
+$html2html->{rules}->{'^<@@endssect>.*$'} = sub {
+ if (($outfh->fileno != STDOUT->fileno)
+ && (!$chapter_mode) && (!$big_page_mode) && (!$super_page_mode)) {
+ &footing($outfh); $outfh->close; $outfh = STDOUT;
+ }
+};
+
+$html2html->{rules}->{'^<@@enddoc>.*$'} = sub { };
+
+$html2html->{rules}->{'^<@@label>(.*)$'} = sub {
+ if (!defined($lprec->{$1})) {
+ STDERR->print(qq(html2html: Problem with label "$1"\n)); next;
+ }
+ if ($skipnewline) {
+ $headbuf = sprintf(qq(<A NAME="%s"></A> %s), $1, $headbuf);
+ } else {
+ $outfh->print(qq(<A NAME="$1"></A> ));
+ }
+};
+
+$html2html->{rules}->{'^<@@ref>(.*)$'} = sub {
+ my $tmp;
+
+ $refname = $1;
+ if (!defined($lprec->{$1})) {
+ STDERR->print(qq(html2html: Problem with ref "$1"\n));
+ $skipnewline++; next;
+ }
+ SWITCH: {
+ $tmp = qq(<A HREF="#$1">), last SWITCH
+ if ($lprec->{$1} == $filenum - 1);
+ $tmp = qq(<A HREF="$firstname.$fileext#$1">), last SWITCH
+ if ($lprec->{$1} == 0);
+ $tmp = qq(<A HREF="$firstname-$lprec->{$1}.$fileext#$1">),
+ last SWITCH;
+ }
+ if ($skipnewline) {
+ $headbuf = "$headbuf$tmp";
+ } else {
+ $headbuf = $tmp;
+ }
+ $skipnewline++;
+};
+
+$html2html->{rules}->{'^<@@refnam>.*$'} = sub {
+ $headbuf = "$headbuf$refname</A>\n";
+};
+
+$html2html->{rules}->{'^<@@endref>.*$'} = sub {
+ if ($skipnewline == 1) {
+ $outfh->print($headbuf); $skipnewline = -1;
+ } elsif ($skipnewline == 2) {
+ $skipnewline--;
+ } else {
+ STDERR->print("html2html: Problem with endref\n");
+ $skipnewline--;
+ }
+};
+
+# Default parsing rule
+$html2html->{defaultrule} = sub {
+ $skipnewline++ if ($skipnewline < 0);
+ if ($skipnewline) {
+ chop; $headbuf = "$headbuf$_";
+ } else {
+ $outfh->print($_);
+ }
+};
+
+# Finalize parsing process
+$html2html->{finish} = sub {
+ # Finish footers
+ if ($outfh->fileno != STDOUT->fileno) {
+ if (!$super_page_mode) {
+ &footing($outfh);
+ $outfh->close;
+ }
+ }
+ #
+ if ($super_page_mode) {
+ if ($toclevel > 0) { STDOUT->print("\n<HR>\n"); }
+ $outfh->close if ($outfh->fileno != STDOUT->fileno);
+ if ( -r $content_file ) {
+ open CONTENT, "<$content_file"
+ or die "Can't open content file\n";
+ while (<CONTENT>) {
+ STDOUT->print($_);
+ }
+ close CONTENT;
+ unlink $content_file if (! $debug);
+ }
+ }
+ # Finish the TOC; ensure "next" points to the first page.
+ &browse_links(STDOUT, 1, 0) if (!$super_page_mode);
+ #
+ # add Footer
+ if ( -r "$footer" ) {
+ open FTRFILE, "<$footer" or die "Cannot open footer file\n";
+ while (<FTRFILE>) {
+ STDOUT->print($_);
+ }
+ close FTRFILE;
+ } else {
+ STDOUT->print("</BODY>\n</HTML>\n");
+ }
+};
+
+
+###################################################################
+# Secondary Functions
+###################################################################
+
+# Print standard links
+sub browse_links {
+ my ($outfh, $myfilenum, $top) = @_;
+
+ return if ($super_page_mode);
+
+ $outfh->print("<HR>\n") unless ($top);
+
+ # NOTE: For pages where a next or prev button isn't appropriate, include
+ # the graphic anyway - just don't make it a link. That way, the mouse
+ # position of each button is unchanged from page to page.
+ # Use the passed myfilenum since filenum may already be incremented
+
+ # Next link (first)
+ my $next = $use_imgs
+ ? qq(<IMG SRC="next.png" ALT="$nextlabel">)
+ : qq($nextlabel);
+ $next = qq(<A HREF="$firstname-$myfilenum.$fileext">$next</A>)
+ if ($myfilenum < $filecount);
+ $next = join "", $next, "\n";
+ $outfh->print($next);
+
+ # Previous link
+ my $prev = $use_imgs
+ ? qq(<IMG SRC="prev.png" ALT="$prevlabel">)
+ : qq($prevlabel);
+ $prev = join "", qq(<A HREF="$firstname-), ($myfilenum - 2),
+ qq(.$fileext">$prev</A>)
+ if ($myfilenum >= 3);
+ $prev = join "", $prev, "\n";
+ $outfh->print($prev);
+
+ # Table of contents link
+ my $toc = $use_imgs
+ ? qq(<IMG SRC="toc.png" ALT="$toclabel">)
+ : qq($toclabel);
+ $toc = join "", qq(<A HREF="$firstname.$fileext#toc),
+ &section_num($secnr, 0), qq(">$toc</A>)
+ if ($outfh->fileno != STDOUT->fileno);
+ $toc = join "", $toc, "\n";
+ $outfh->print($toc);
+
+ print($outfh "<HR>\n") if ($top);
+}
+
+# Print end-of-file markup
+sub footing {
+ my $outfh = shift;
+ &browse_links($outfh, $filenum, 0);
+ if ( -r "$footer" ) {
+ open FTRFILE, "<$footer" or die "Cannot open footer file\n";
+ while (<FTRFILE>) {
+ $outfh->print($_);
+ }
+ close FTRFILE;
+ } else {
+ $outfh->print("</BODY>\n</HTML>\n");
+ }
+}
+
+# Print top-of-file markup
+sub heading {
+ my $outfh = shift; my $match;
+
+ # Emit 3.2 HTML until somebody comes up with a better idea - CdG
+ if ( -r "$header" ) {
+ open HDRFILE, "<$header" or die "Cannot open header file\n";
+ while (<HDRFILE>) {
+ $outfh->print($_);
+ }
+ close HDRFILE;
+ } else {
+ $outfh->print(
+ qq(<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n));
+ $outfh->print("<HTML>\n<HEAD>\n");
+ }
+ open VERSFILE, "<$main::DataDir/VERSION" or die "Cannot open version file\n";
+ $version = <VERSFILE>;
+ close VERSFILE;
+ chop $version;
+ $outfh->print(
+ " <META NAME=\"GENERATOR\" CONTENT=\"LinuxDoc-Tools $version\">\n");
+
+ $outfh->print(" <TITLE>");
+ $match = $titlename;
+ $match =~ s/<[^>]*>//g;
+ $outfh->print($match);
+ if ($secnr > 0) {
+ $match = $sectname;
+ $match =~ s/<[^>]*>//g;
+ $outfh->print(": $match");
+ }
+ if ($ssecnr > 0) {
+ $match = $ssectname;
+ $match =~ s/<[^>]*>//g;
+ $outfh->print(": $match");
+ }
+ $outfh->print("</TITLE>\n");
+
+ if (!$super_page_mode) {
+ #
+ # <LINK> Information for next, previous, contents, etc...
+ #
+ $outfh->print(qq( <LINK HREF="$firstname-$filenum.$fileext" REL=next>),"\n")
+ if ($filenum < $filecount);
+ my $prev;
+ $prev = join "", qq( <LINK HREF="$firstname-), ($filenum - 2),
+ qq(.$fileext" REL=previous>)
+ if ($filenum >= 3);
+ $outfh->print($prev,"\n");
+
+ #
+ # Table of contents link
+ #
+ my $toc ;
+ $toc = join "", qq( <LINK HREF="$firstname.$fileext#toc),
+ &section_num($secnr, 0), qq(" REL=contents>)
+ if ($outfh->fileno != STDOUT->fileno);
+ $outfh->print($toc,"\n");
+ } # (!$super_page_mode)
+
+ $outfh->print("</HEAD>\n<BODY>\n");
+ &browse_links($outfh, $filenum, 1);
+}
+
+# Return the section and subsection as a dotted string
+sub section_num {
+ my ($sec, $ssec) = @_;
+ my $l = "A";
+
+ if ($in_appendix) {
+ $sec--;
+ while ($sec) { $l++; $sec--; }
+ return("$l.$ssec") if ($ssec > 0);
+ return("$l");
+ } else {
+ return("$sec.$ssec") if ($ssec > 0);
+ return("$sec");
+ }
+}
+
+# Create a chapter head; start a new file, etc.
+sub start_chapter {
+ my $sectname = shift;
+
+ if (!$super_page_mode && $outfh->fileno != STDOUT->fileno) {
+ &footing($outfh); $outfh->close;
+ }
+ $current = "SECTION"; $secnr++; $ssecnr = 0;
+ if ($super_page_mode) {
+ $outname = $content_file;
+ $outfh = new FileHandle ">>$outname"
+ or die qq(html2html: Fatal: Could not open file "$outname"\n);
+ if ($toclevel > 0) {
+ $headbuf = sprintf(
+ qq(<A NAME="s%s">%s.</A> <A HREF="#toc%s">%s</A>),
+ &section_num($secnr, 0), &section_num($secnr, 0),
+ &section_num($secnr, 0),
+ $sectname);
+ STDOUT->printf(
+ qq(<P>\n<H2><A NAME="toc%s">%s.</A> <A HREF="%s#s%s">%s</A></H2>\n\n),
+ &section_num($secnr, 0), &section_num($secnr, 0),
+ "$firstname.$fileext", &section_num($secnr, 0), $sectname);
+ } else {
+ $headbuf = sprintf(
+ qq(<A NAME="s%s">%s. %s</A>),
+ &section_num($secnr, 0), &section_num($secnr, 0),
+ $sectname);
+ }
+ } else {
+ $outname = "$firstname-$filenum.$fileext"; $filenum++;
+ $outfh = new FileHandle ">$outname"
+ or die qq(html2html: Fatal: Could not open file "$outname"\n);
+ &heading($outfh);
+ if ($toclevel > 0) {
+ $headbuf = sprintf(
+ qq(<A NAME="s%s">%s.</A> <A HREF="%s#toc%s">%s</A>),
+ &section_num($secnr, 0), &section_num($secnr, 0),
+ "$firstname.$fileext", &section_num($secnr, 0),
+ $sectname);
+ STDOUT->printf(
+ qq(<P>\n<H2><A NAME="toc%s">%s.</A> <A HREF="%s">%s</A></H2>\n\n),
+ &section_num($secnr, 0), &section_num($secnr, 0),
+ $outname, $sectname);
+ } else {
+ $headbuf = sprintf(
+ qq(<A NAME="s%s">%s. %s</A>),
+ &section_num($secnr, 0), &section_num($secnr, 0),
+ $sectname);
+ }
+ }
+ $in_section_list = 0;
+}
+
+# Create a section; start a new file, etc.
+sub start_section {
+ my $ssectname = shift;
+
+ $current = "SUBSECT"; $ssecnr++;
+ if ($toclevel > 1) {
+ if (!$in_section_list) {
+ STDOUT->print("<UL>\n"); $in_section_list = 1;
+ }
+ }
+ if ($super_page_mode) {
+ if ($outfh->fileno != STDOUT->fileno && !$chapter_mode) {
+ $outfh->close;
+ }
+ $outname = $content_file;
+ $outfh = new FileHandle ">>$outname"
+ or die qq(html2html: Fatal: Could not open file "$outname"\n);
+ if ($toclevel > 1) {
+ $headbuf = sprintf(qq(<A NAME="ss%s">%s</A> <A HREF="#toc%s">%s</A>\n),
+ &section_num($secnr, $ssecnr),
+ &section_num($secnr, $ssecnr),
+ &section_num($secnr, $ssecnr),
+ $ssectname);
+ STDOUT->printf(
+ qq(<LI><A NAME="toc%s">%s</A> <A HREF="%s#ss%s">%s</A>\n),
+ &section_num($secnr, $ssecnr),
+ &section_num($secnr, $ssecnr),
+ "$firstname.$fileext",
+ &section_num($secnr, $ssecnr),
+ $ssectname);
+ } else {
+ $headbuf = sprintf(qq(<A NAME="ss%s">%s %s</A>\n),
+ &section_num($secnr, $ssecnr),
+ &section_num($secnr, $ssecnr),
+ $ssectname);
+ }
+ } else {
+ if (!$big_page_mode) {
+ if ($outfh->fileno != STDOUT->fileno) {
+ &footing($outfh); $outfh->close;
+ }
+ $outname = "$firstname-$filenum.$fileext"; $filenum++;
+ $outfh = new FileHandle ">$outname"
+ or die qq(html2html: Fatal: Could not open file "$outname"\n);
+ heading($outfh);
+
+ # Since only one section is on any page,
+ # don't use # so that when we
+ # jump to this page, we see the browse
+ # links at the top of the page.
+ if ($toclevel > 1) {
+ $headbuf = sprintf("%s <A HREF=\"%s#toc%s\">%s</A>",
+ &section_num($secnr, $ssecnr),
+ "$firstname.$fileext",
+ &section_num($secnr, $ssecnr),
+ $ssectname);
+ STDOUT->printf(
+ qq(<LI><A NAME="toc%s">%s</A> <A HREF="%s">%s</A>\n),
+ &section_num($secnr, $ssecnr),
+ &section_num($secnr, $ssecnr),
+ $outname, $ssectname);
+ } else {
+ $headbuf = sprintf("%s %s</A>",
+ &section_num($secnr, $ssecnr),
+ $ssectname);
+ }
+ } else {
+ # Since many sections are on one page, we need to use #
+ if ($toclevel > 1) {
+ $headbuf = sprintf(
+ qq(<A NAME="ss%s">%s</A> <A HREF="%s#toc%s">%s</A>\n),
+ &section_num($secnr, $ssecnr),
+ &section_num($secnr, $ssecnr),
+ "$firstname.$fileext",
+ &section_num($secnr, $ssecnr),
+ $ssectname);
+ STDOUT->printf(
+ qq(<LI><A NAME="toc%s">%s</A> <A HREF="%s#ss%s">%s</A>\n),
+ &section_num($secnr, $ssecnr),
+ &section_num($secnr, $ssecnr),
+ $outname,
+ &section_num($secnr, $ssecnr),
+ $ssectname);
+ } else {
+ $headbuf = sprintf(
+ qq(<A NAME="ss%s">%s %s</A>\n),
+ &section_num($secnr, $ssecnr),
+ &section_num($secnr, $ssecnr),
+ $ssectname);
+ }
+ }
+ }
+}
+
diff --git a/tools/linuxdoc-tools/LinuxDocTools/InfoUtils.pm b/tools/linuxdoc-tools/LinuxDocTools/InfoUtils.pm
new file mode 100644
index 00000000..b4bd50bd
--- /dev/null
+++ b/tools/linuxdoc-tools/LinuxDocTools/InfoUtils.pm
@@ -0,0 +1,357 @@
+# InfoUtils.pm
+#
+# Some utils for the linuxdoc info backend.
+#
+# * Create menus
+# * Normalize node names and associated text
+# * Point references to the associated node as needed
+#
+# Copyright (C) 2009 Agustín Martín Domingo, agmartin at debian org
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+# --------------------------------------------------------------------
+
+
+package LinuxDocTools::InfoUtils;
+
+use base qw(Exporter);
+
+# List all exported symbols here.
+our @EXPORT_OK = qw(info_process_texi);
+
+# Import :all to get everything.
+our %EXPORT_TAGS = (all => [@EXPORT_OK]);
+
+=head1 NAME
+
+ InfoUtils - Some utils for the linuxdoc info backend.
+
+=head1 SYNOPSIS
+
+use InfoUtils q{:all};
+
+info_process_texi($infile, $outfile, $infoname)
+
+=head1 DESCRIPTION
+
+This module contains some utils to process the raw texinfo file
+creating menus, normalizing node names and associated text and
+pointing references to the associated node as needed.
+
+=head1 FUNCTIONS
+
+=over 4
+
+=cut
+
+# -------------------------------------------------------------------------
+sub info_normalize_node_text {
+# -------------------------------------------------------------------------
+# Filter characters not allowed in section names
+# -------------------------------------------------------------------------
+ my $text = shift;
+
+ $text =~ s/\s+/ /g;
+ $text =~ s/\@[A-Za-z][A-Za-z0-9]*//g;
+ $text =~ s/(\{|\})//g;
+ $text =~ s/\,//g;
+# $text =~ s/\.+$//g;
+ $text =~ s/\./-/g;
+ $text =~ s/\s+$//g;
+
+ return $text;
+}
+
+# -------------------------------------------------------------------------
+sub info_normalize_node_name {
+# -------------------------------------------------------------------------
+# Filter characters not allowed in node names. Previous filtering of
+# characters not allowed in section names is supposed.
+# -------------------------------------------------------------------------
+ my $text = shift;
+# my $tmpnodedata = shift;
+
+ $text =~ s/\://g;
+ $text =~ s/\;//g;
+
+# die "Error: Reference \"$text\" already used"
+# if defined $tmpnodedata->{$text};
+
+ return $text;
+}
+
+# -------------------------------------------------------------------------
+sub info_parse_raw_file {
+# -------------------------------------------------------------------------
+# Parse raw texinfo file. It does not yet contain section names, menus,
+# correct references or title.
+# -------------------------------------------------------------------------
+ my $inputfile = shift;
+ my $INPUT;
+
+ my @inputtext = (); # Copy of input file with some preprocessing
+ my %nodedata = # A hash of hashes with all node info
+ ( 'Top' =>
+ { 'text' => "Top",
+ 'depth' => 0,
+ 'up' => "",
+ 'next' => '',
+ 'previous' => "",
+ 'sort' => 0,
+ 'debug' => "",
+ 'menu' => []}
+ );
+
+ my %levellast = (0 => "Top");
+ my %labels = ();
+ my %docdata = # Some misc data for the document
+ ( 'title' => "",
+ 'author' => "",
+ 'subtitle' => ""
+ );
+
+ my $depth = my $lastdepth = 0;
+ my $lastnode = "";
+ my $sort = 0;
+
+ my $inauthor;
+ my $authorline;
+
+ open ($INPUT, "< $inputfile")
+ or die "info-postASP: Could not open $inputfile for read. Aborting ...\n";
+
+ while (<$INPUT>){
+ chomp;
+ if ( s/^\@SUB\s+// ){
+ my $updepth = $depth;
+ my $uppernode = $levellast{$updepth};
+ $depth++;
+ $sort++;
+
+ my @levelmenu = ();
+
+ if ( defined $nodedata{$uppernode}->{'menu'} ){
+ @levelmenu = @{ $nodedata{$uppernode}->{'menu'} };
+ }
+
+ my $nodetext = info_normalize_node_text($_);
+ my $nodename = info_normalize_node_name($nodetext,\%nodedata);
+
+ # Make first appearing node the next node for top node
+ $nodedata{'Top'}->{'next'} = $nodename if ( $lastdepth eq 0);
+
+ # Fill info for current node (and 'next' for last one in level)
+ $nodedata{$nodename}->{'orig'} = $_;
+ $nodedata{$nodename}->{'text'} = $nodetext;
+ $nodedata{$nodename}->{'depth'} = $depth;
+ $nodedata{$nodename}->{'previous'} =
+ defined $levellast{$depth} ? $levellast{$depth} : "";
+ $nodedata{$levellast{$depth}}->{'next'} = $nodename
+ if defined $levellast{$depth};
+ $nodedata{$nodename}->{'up'} = $uppernode;
+ $nodedata{$nodename}->{'sort'} = $sort;
+ $nodedata{$nodename}->{'debug'} =
+ "updepth: $updepth, lastdepth: $lastdepth, up: $uppernode";
+
+ # Keep this defined in case tbere is no next node in the same level.
+ $nodedata{$nodename}->{'next'} = "";
+
+ push @inputtext, "\@SUB $nodename"; # Rewrite @SUB with the new name
+ push @levelmenu, $nodename; # Add $nodename to the level menu list
+
+ # Prepare things for next @SUB entry found
+ $levellast{$depth} = $lastnode = $nodename;
+ $lastdepth = $depth;
+ $nodedata{$uppernode}->{'menu'} = \@levelmenu;
+
+ } elsif ( s/^\@ENDSUB// ){
+ $depth--;
+ push @inputtext, $_;
+ } elsif (s/^\@LABEL\s+//){
+ # Keep record of node labels vs nodenames. Will use the last.
+ $labels{$_} = $lastnode;
+ } elsif (s/^\@title\s+//){
+ $docdata{'title'} = $_;
+ } elsif (/^\@ldt_endauthor/){
+ $inauthor = '';
+ my @authors;
+ if ( @$docdata{'authors'} ){
+ @authors = @$docdata{'authors'};
+ }
+ push @authors, $authorline;
+ $docdata{'authors'} = \@authors;
+ $authorline = "";
+ } elsif ( s/^\@author\s+// ){
+ $inauthor = 1;
+ $authorline = $_;
+ } elsif ( $inauthor ){
+ next if m/^\s*$/;
+ s/^\s+//;
+ $authorline .= " $_ ";
+ } elsif (s/^\@subtitle\s+//){
+ $docdata{'subtitle'} = $_;
+ } elsif (s/^\@ldt_translator\s+//){
+ $docdata{'translator'} = $_;
+ } elsif (s/^\@ldt_tdate\s+//){
+ $docdata{'tdate'} = $_;
+ } else {
+ push @inputtext, $_;
+ }
+ }
+ close $INPUT;
+
+ $docdata{'nodedata'} = \%nodedata;
+ $docdata{'labels'} = \%labels;
+ $docdata{'inputtext'} = \@inputtext;
+
+ return \%docdata;
+}
+
+# -------------------------------------------------------------------------
+sub info_write_preprocessed_file {
+# -------------------------------------------------------------------------
+# Write processed texinfo file. Add section names, menus, correct
+# references and title.
+# -------------------------------------------------------------------------
+ my $docdata = shift;
+ my $infoname = shift;
+ my $texiout = shift;
+
+ die "InfoUtils.pm: No info file name $infoname.\n" unless $infoname;
+ die "InfoUtils.pm: No output texi file $texiout\n" unless $texiout;
+
+ my $nodedata = $docdata->{'nodedata'};
+ my $labels = $docdata->{'labels'};
+ my $inputtext = $docdata->{'inputtext'};
+
+ my $OUTFILE;
+
+ # info_check_parsed_data($nodedata);
+
+ my %sections = ( 1 => "\@chapter",
+ 2 => "\@section",
+ 3 => "\@subsection",
+ 4 => "\@subsubsection");
+
+ my $lastdepth = 0;
+ my $lastnode = "Top";
+ my $texinfo = "\@c %** START OF HEADER
+\@setfilename $infoname
+\@c %** END OF HEADER\n";
+
+ foreach ( @$inputtext ) {
+ if ( s/^\@SUB\s+// ){
+ my $key = $_;
+ my $depth = $nodedata->{$key}->{'depth'};
+ my $name = $nodedata->{$key}->{'text'};
+
+ if ( $depth le 4 ){
+ my $next = $nodedata->{$key}->{'next'};
+ my $previous = $nodedata->{$key}->{'previous'};
+ my $up = $nodedata->{$key}->{'up'};
+ # my $txt = "\@comment nodename, next, previous, up\n";
+ my $txt = "";
+
+ # $txt .= "\@node $key, $previous, $next, $up\n";
+ $txt .= "\@node $key\n";
+ $txt .= "$sections{$depth} $name\n";
+
+ if ( $depth gt $lastdepth && defined $nodedata->{$lastnode}->{'menu'}){
+ $txt = "\n\@menu\n\* "
+ . join("::\n\* ",@{$nodedata->{$lastnode}->{'menu'}})
+ . "::\n\@end menu\n"
+ . "\n$txt";
+ }
+
+ $texinfo .= $txt;
+ $lastdepth = $depth;
+ $lastnode = $key;
+ } elsif ( $depth eq 5 ){
+ $texinfo .= "\@subsubheading $nodedata->{$key}->{'text'}\n";
+ } else {
+ die "info-postASP: Entry \"$key\" has wrong depth $depth\n";
+ }
+ } elsif (s/^\@REF\s+//){
+ if ( defined $labels->{$_} ){
+ # If this reference is to a node, use its nodename
+ $texinfo .= "\@ref{" . $labels->{$_} . "}\n";
+ } else {
+ $texinfo .= "\@ref{$_}\n";
+ }
+ } elsif (s/^\@TOP//){
+ $texinfo .= "\@node top\n"
+ . "\@top " . $docdata->{'title'} . "\n"
+ . "\@example\n";
+
+ $texinfo .= join(' and ',@{$docdata->{'authors'}}) . "\n"
+ if ( @{$docdata->{'authors'}} );
+
+ $texinfo .= $docdata->{'subtitle'} . "\n"
+ if ( defined $docdata->{'subtitle'} );
+
+ $texinfo .= $docdata->{'translator'} . "\n"
+ if ( defined $docdata->{'translator'} );
+
+ $texinfo .= $docdata->{'tdate'} . "\n"
+ if ( defined $docdata->{'tdate'} );
+
+ $texinfo .= "\@end example\n";
+ } else {
+ $texinfo .= "$_\n";
+ }
+ }
+
+ open ($OUTFILE, "> $texiout")
+ or die "Could not open \"$texiout\" for write. Aborting ...\n";
+ print $OUTFILE $texinfo;
+ close $OUTFILE;
+}
+
+# -------------------------------------------------------------------------
+sub info_check_parsed_data {
+# -------------------------------------------------------------------------
+# -------------------------------------------------------------------------
+ my $tmpnodedata = shift;
+ my @sections = sort {
+ $tmpnodedata->{$a}->{'sort'} <=> $tmpnodedata->{$b}->{'sort'}
+ } keys %$tmpnodedata;
+
+ foreach ( @sections ){
+ my $ref = $tmpnodedata->{$_};
+ print STDERR "Node: $_\n";
+ print STDERR " orig: $ref->{'orig'}\n";
+ print STDERR " text: $ref->{'text'}\n";
+ print STDERR " debug: $ref->{'debug'}\n";
+ print STDERR " up: $ref->{'up'}\n";
+ print STDERR " depth: $ref->{'depth'}\n";
+ print STDERR " previous: $ref->{'previous'}\n";
+ print STDERR " next: $ref->{'next'}\n";
+ print STDERR " sort: $ref->{'sort'}\n";
+ print STDERR " menu:\n * " . join("\n * ",@{$ref->{'menu'}}) . "\n" if defined $ref->{'menu'};
+ }
+}
+
+# -------------------------------------------------------------------------
+sub info_process_texi {
+# -------------------------------------------------------------------------
+# info_process_texi($infile, $outfile, $infoname)
+#
+# Call the other functions.
+# -------------------------------------------------------------------------
+ my $infile = shift;
+ my $outfile = shift;
+ my $infoname = shift;
+
+ info_write_preprocessed_file(info_parse_raw_file($infile),$infoname,$outfile);
+}
diff --git a/tools/linuxdoc-tools/LinuxDocTools/Lang.pm b/tools/linuxdoc-tools/LinuxDocTools/Lang.pm
new file mode 100644
index 00000000..2b0e99d6
--- /dev/null
+++ b/tools/linuxdoc-tools/LinuxDocTools/Lang.pm
@@ -0,0 +1,238 @@
+#
+# Lang.pm
+#
+# $Id: Lang.pm,v 1.1.1.1 2001/05/24 15:57:41 sano Exp $
+#
+# Language support.
+#
+# © Copyright 1997, Cees de Groot
+#
+
+package LinuxDocTools::Lang;
+
+use strict;
+use vars qw($VERSION @ISA @EXPORT @Languages $translations);
+
+require 5.0004;
+use Exporter;
+use LinuxDocTools::Vars;
+
+$VERSION = sprintf("%d.%02d", q$Revision: 1.1.1.1 $ =~ /(\d+)\.(\d+)/);
+@ISA = qw(Exporter);
+@EXPORT = qw(Any2ISO ISO2Native ISO2English Xlat);
+
+=head1 NAME
+
+LinuxDocTools::Lang - language name and translation functions
+
+=head1 SYNOPSIS
+
+ $isoname = Any2ISO ('deutsch');
+ $native = ISO2Native ('de');
+ $engname = ISO2English ('nederlands');
+
+ $global->{language} = 'nl';
+ $dutch = Xlat ('Table of Contents');
+
+=head1 DESCRIPTION
+
+B<LinuxDocTools::Lang> gives a simple interface to various forms of language
+names, and provides a translation service. Languages can be specified in
+three different ways: by their native name, by their english name, and
+by their 2-letter ISO code. For example, you can specify the German
+language as C<deutsch>, as C<german> or as C<de>.
+
+=head1 FUNCTIONS
+
+=over 4
+
+=cut
+
+@Languages = qw(
+ en english english
+ de deutsch german
+ nl nederlands dutch
+ fr français french
+ es español spanish
+ da dansk danish
+ no norsk norwegian
+ se svenska swedish
+ pt portuges portuguese
+ ca català catalan
+ it italiano italian
+ ro românã romanian
+ ja japanese japanese
+ pl polski polish
+ ko korean korean
+ fi suomi finnish
+);
+
+
+=item Any2ISO
+
+Maps any of the three forms of languages to the ISO name. So either of
+these invocations:
+
+ Any2ISO ('dutch');
+ Any2ISO ('nederlands');
+ Any2ISO ('nl');
+
+will return the string C<"nl">.
+
+=cut
+
+sub Any2ISO
+{
+ my $lang = shift (@_);
+
+ my $i = 0;
+ foreach my $l (@Languages)
+ {
+ ($l eq $lang) && last;
+ $i++;
+ }
+ return $Languages[(int $i / 3) * 3];
+}
+
+
+=item ISO2Native
+
+Maps the ISO code to the native name of the language.
+
+=cut
+
+sub ISO2Native
+{
+ my $iso = shift (@_);
+
+ my $i = 0;
+ foreach my $l (@Languages)
+ {
+ ($l eq $iso) && last;
+ $i++;
+ }
+ return $Languages[$i + 1];
+
+}
+
+
+=item ISO2English
+
+Maps the ISO code to the english name of the language.
+
+=cut
+
+sub ISO2English
+{
+ my $iso = shift (@_);
+
+ my $i = 0;
+ foreach my $l (@Languages)
+ {
+ ($l eq $iso) && last;
+ $i++;
+ }
+ return $Languages[$i + 2];
+}
+
+=item Xlat
+
+Translates its (English) argument to the language specified by the
+current value of C<$gobal-E<gt>{language}>. The module, in its source
+file, contains a data structure, indexed by the English strings, that
+has all available translations.
+
+=cut
+
+sub Xlat
+{
+ my ($txt) = @_;
+
+ return $txt if ($global->{language} eq "en");
+ return $translations->{$txt}{$global->{language}};
+};
+
+
+#
+# By the time this grows big, we'll make up something else.
+#
+$translations = {
+ "Previous" => {
+ "nl" => "Terug",
+ "de" => "Zurück",
+ "es" => "Página anterior",
+ "fr" => "Page précédente",
+ "da" => "Forrige",
+ "no" => "Forrige",
+ "se" => "Föregående",
+ "pt" => "Página anterior",
+ "ca" => "Pàgina anterior",
+ "it" => "Indietro",
+ "ro" => "Înapoi",
+ "ja" => "Á°¤Î¥Ú¡¼¥¸",
+ "pl" => "Poprzedni",
+ "ko" => "ÀÌÀü",
+ "fi" => "Edellinen"
+ },
+ "Next" => {
+ "nl" => "Verder",
+ "de" => "Weiter",
+ "es" => "Página siguiente",
+ "fr" => "Page suivante",
+ "da" => "Næste",
+ "no" => "Neste",
+ "se" => "Nästa",
+ "pt" => "Página seguinte",
+ "ca" => "Pàgina següent",
+ "it" => "Avanti",
+ "ro" => "Înainte",
+ "ja" => "¼¡¤Î¥Ú¡¼¥¸",
+ "pl" => "Nastny",
+ "ko" => "´ÙÀ½",
+ "fi" => "Seuraava"
+ },
+ "Contents" => {
+ "nl" => "Inhoud",
+ "de" => "Inhalt",
+ "es" => "Índice general",
+ "fr" => "Table des matières",
+ "da" => "Indhold",
+ "no" => "Innhold",
+ "se" => "Innehållsförteckning",
+ "pt" => "Índice",
+ "ca" => "Índex",
+ "it" => "Indice",
+ "ro" => "Cuprins",
+ "ja" => "Ìܼ¡¤Ø",
+ "pl" => "Spis Trei",
+ "ko" => "Â÷·Ê",
+ "fi" => "Sisällys"
+ },
+ "Table of Contents" => {
+ "nl" => "Inhoudsopgave",
+ "de" => "Inhaltsverzeichnis",
+ "es" => "Índice general",
+ "fr" => "Table des matières",
+ "da" => "Indholdsfortegnelse",
+ "no" => "Innholdsfortegnelse",
+ "se" => "Innehållsförteckning",
+ "pt" => "Índice geral",
+ "ca" => "Índex general",
+ "it" => "Indice Generale",
+ "ro" => "Cuprins",
+ "ja" => "Ìܼ¡",
+ "pl" => "Spis Trei",
+ "ko" => "Â÷·Ê",
+ "fi" => "Sisällysluettelo"
+ }
+};
+
+=back
+
+=head1 AUTHOR
+
+Cees de Groot, C<E<lt>cg@pobox.comE<gt>>
+
+=cut
+
+1;
diff --git a/tools/linuxdoc-tools/LinuxDocTools/Utils.pm b/tools/linuxdoc-tools/LinuxDocTools/Utils.pm
new file mode 100644
index 00000000..63fe5f91
--- /dev/null
+++ b/tools/linuxdoc-tools/LinuxDocTools/Utils.pm
@@ -0,0 +1,392 @@
+#
+# Utils.pm
+#
+# $Id: Utils.pm,v 1.2 2001/08/31 22:39:44 sano Exp $
+#
+# Utilities, split off from other modules in order to cut down file size.
+#
+# © Copyright 1996, 1997, Cees de Groot
+#
+package LinuxDocTools::Utils;
+use strict;
+
+=head1 NAME
+
+LinuxDocTools::Utils - various supporting routines
+
+=head1 SYNOPSIS
+
+ @files = process_options (@args);
+
+ usage ($msg);
+
+ trap_signals;
+
+ cleanup;
+
+ create_temp($tempfile)
+
+=head1 DESCRIPTION
+
+The B<LinuxDocTools::Utils> module contains a number of generic routines, mainly
+split off from the main module in order to keep file size down.
+
+=head1 FUNCTIONS
+
+=over 4
+
+=cut
+
+use DirHandle;
+use FileHandle;
+use Cwd;
+use File::Basename;
+use Exporter;
+use LinuxDocTools::Vars;
+
+use vars qw($VERSION @ISA @EXPORT @EXPORT_OK $in_signal);
+@ISA = qw(Exporter);
+@EXPORT = qw(usage process_options);
+@EXPORT_OK = qw(cleanup trap_signals remove_tmpfiles create_temp);
+$VERSION = sprintf("%d.%02d", q$Revision: 1.2 $ =~ /(\d+)\.(\d+)/);
+
+use subs qw(usage);
+
+# check whether options are unique
+sub check_option_consistency
+{
+ my $owner = {};
+ my ($fmt, $opt);
+ foreach $fmt (keys %FmtList)
+ {
+ my $add = sub { # add to options of $fmt
+ my $str = shift;
+ if ($owner->{$str}) {
+ push(@{$owner->{$str}}, $fmt);
+ }
+ else {
+ $owner->{$str} = [$fmt];
+ }
+ };
+ foreach $opt (@{$Formats{$fmt}{OPTIONS}})
+ {
+ &$add("--$opt->{option}");
+ &$add("-$opt->{short}");
+ }
+ }
+ my $error = 0;
+ foreach $opt (keys %$owner)
+ {
+ if (scalar @{$owner->{$opt}} > 1)
+ {
+ warn "duplicate option: $opt in " .
+ join(', ', @{$owner->{$opt}}) . "\n";
+ $error = 1;
+ }
+ }
+ die "Internal error detected" if $error;
+}
+
+
+=item process_options
+
+This function processes the command line, and sets the variables associated
+with the options along the way. When successful, it returns the arguments
+on the command line it didn't interpret. Normally, this will be a list of
+filenames.
+
+=cut
+
+sub process_options
+{
+ my @args = @_;
+ my @retval;
+
+ OPTPROC: while ($args[0])
+ {
+ my $long;
+ my $curarg = $args[0];
+ if ($curarg =~ /^--.*/)
+ {
+ #
+ # Long option, --opt[==value]
+ #
+ $long = 1;
+ }
+ elsif ($curarg =~ /^-.*/)
+ {
+ #
+ # Short option, -o value
+ #
+ $long = 0;
+ }
+ else
+ {
+ #
+ # Filename
+ #
+ push @retval, $curarg;
+ next OPTPROC;
+ }
+
+ #
+ # Start looking for the option
+ #
+ foreach my $fmt (keys %FmtList)
+ {
+ foreach my $opt (@{$Formats{$fmt}{OPTIONS}})
+ {
+ if (($long && $curarg =~ /^--$opt->{option}.*/) ||
+ $curarg =~ /^-$opt->{short}/)
+ {
+ #
+ # Found it! Get the argument and see whether all is OK
+ # with the option.
+ #
+ my $optval = "";
+ if ($long)
+ {
+ if ($curarg =~ /^--$opt->{option}=.*/)
+ {
+ $optval = $curarg;
+ $optval =~ s/[^=]*=(.*)/$1/;
+ }
+ }
+ else
+ {
+ if ($args[1] =~ /^[^-].*/)
+ {
+ $optval = $args[1];
+ }
+ }
+ $opt->{type} eq "f" && do
+ {
+ #
+ # "f" -> flag. Increment, so '-v -v' can work.
+ #
+ $Formats{$fmt}{$opt->{option}} += 1;
+ next OPTPROC;
+ };
+ #
+ # All other types require a value (for now).
+ #
+ shift @args unless $long;
+ if ($optval eq "")
+ {
+ usage "Option $curarg: value required";
+ }
+ ($opt->{type} eq "i" || $opt->{type} eq "s") && do
+ {
+ #
+ # "i" -> numeric value.
+ # "s" -> string value.
+ #
+ # No type checking yet...
+ #
+ if ($opt->{option} eq "define")
+ {
+ $Formats{$fmt}{$opt->{option}} .= " " . $optval;
+ }
+ else
+ {
+ $Formats{$fmt}{$opt->{option}} = $optval;
+ }
+ next OPTPROC;
+ };
+ $opt->{type} eq "l" && do
+ {
+ #
+ # "l" -> list of values.
+ #
+ foreach my $val (@{$opt->{'values'}})
+ {
+ if ($val eq $optval)
+ {
+ $Formats{$fmt}{$opt->{option}} = $optval;
+ next OPTPROC;
+ }
+ }
+ usage "Invalid value '$optval' for '--$opt->{option}'";
+ };
+ usage "Unknown option type $opt->{type} in $fmt/$opt";
+ }
+ }
+ }
+ usage "Unknown option $curarg";
+ }
+ continue
+ {
+ shift @args;
+ }
+ return @retval;
+}
+
+
+=item usage
+
+Prints out a generated help message about calling convention and allowed
+options, then the argument string, and finally exits.
+
+=cut
+
+sub usage
+{
+ my ($msg) = @_;
+
+ print "LinuxDoc-Tools version " . `cat $main::DataDir/VERSION` . "\n";
+ check_option_consistency;
+ print "Usage:\n";
+ print " " . $global->{myname} . " [options] <infile>\n\n";
+ my @helplist = sort(keys %Formats);
+ @helplist = sort (keys %FmtList) if ($global->{format});
+ foreach my $fmt (@helplist)
+ {
+ if ($fmt eq "global")
+ {
+ print "General options:\n";
+ }
+ else
+ {
+ print "Format: " . $fmt . "\n";
+ }
+ print $Formats{$fmt}{HELP};
+ for my $opt (@{$Formats{$fmt}{OPTIONS}})
+ {
+ my $value = '';
+ if ($opt->{type} eq "i")
+ {
+ $value = "number";
+ }
+ elsif ($opt->{type} eq "l")
+ {
+ $value = "{";
+ my $first = 1;
+ for my $val (@{$opt->{'values'}})
+ {
+ $first || ($value .= ",");
+ $first = 0;
+ $value .= $val;
+ }
+ $value .= "}";
+ }
+ elsif ($opt->{type} eq "s")
+ {
+ $value = "string";
+ }
+ print " --$opt->{option}"; print "=$value" if $value;
+ print " -$opt->{short}"; print " $value" if $value;
+ print "\n";
+ }
+ print "\n";
+ }
+
+ $msg && print "Error: $msg\n\n";
+ exit 1;
+}
+
+
+=item cleanup
+
+This function cleans out all temporary files and exits. The unlink step
+is skipped if debugging is turned on.
+
+=cut
+
+sub cleanup
+{
+ my ($signame) = @_;
+
+ if( $signame ) {
+ if ( $in_signal ) {
+ if( $global->{debug} ) {
+ print STDERR "Caught SIG$signame during cleanup -- aborting\n";
+ }
+ exit -1;
+ }
+ else {
+ if( $global->{debug} ) {
+ print STDERR "Caught SIG$signame -- cleaning up\n";
+ }
+ $in_signal = 1;
+ }
+ }
+
+ if( !$global->{debug} && $global->{tmpbase} ) {
+ remove_tmpfiles($global->{tmpbase});
+ }
+ exit 0;
+}
+
+=item remove_tmpfiles( $tmpbase )
+
+This function cleans out all temporary files, using the argument $tmpbase to
+determine the directory and pattern to use to find the temporary files.
+
+=cut
+
+sub remove_tmpfiles($) {
+ my $tmpbase = shift;
+ my ($name,$tmpdir) = fileparse($tmpbase,"");
+ my $namelength = length $name;
+ my $savdir = cwd;
+
+ chdir($tmpdir);
+ my $dir = new DirHandle(".");
+
+ if (!defined($dir) ) {
+ warn "Couldn't open temp directory $tmpdir: $!\n";
+ } else {
+ foreach my $tmpfile ($dir->read()) {
+ if (substr ($tmpfile, 0, $namelength) eq $name) {
+ unlink ($tmpfile) || warn "Couldn't unlink $tmpfile: $! \n";
+ }
+ }
+ $dir->close();
+ }
+
+ chdir($savdir);
+ rmdir($tmpdir) || return -1;
+}
+
+=item trap_signals
+
+This function traps all known signals, making sure that the B<cleanup>
+function is executed on them. It should be called once at initialization
+time.
+
+=cut
+
+sub trap_signals
+{
+ foreach my $sig ( 'HUP', 'INT', 'QUIT', 'ILL',
+ 'TRAP', 'IOT', 'BUS', 'FPE',
+ 'USR1', 'SEGV', 'USR2',
+ 'PIPE', 'ALRM', 'TERM', )
+ {
+ $SIG{$sig} = \&cleanup;
+ }
+}
+
+=item create_temp ( $tmpfile )
+
+This function creates an empty temporary file with the required
+permission for security reasons.
+
+=cut
+
+sub create_temp($) {
+ my $tmpnam = shift;
+ my $fh = new FileHandle($tmpnam,O_CREAT|O_EXCL|O_WRONLY,0600);
+ $fh or die "$0: failed to create temporary file: $!";
+ $fh->close;
+}
+
+=back
+
+=head1 AUTHOR
+
+Cees de Groot, C<E<lt>cg@pobox.comE<gt>>.
+
+=cut
+
+1;
diff --git a/tools/linuxdoc-tools/LinuxDocTools/Vars.pm b/tools/linuxdoc-tools/LinuxDocTools/Vars.pm
new file mode 100644
index 00000000..49cf630b
--- /dev/null
+++ b/tools/linuxdoc-tools/LinuxDocTools/Vars.pm
@@ -0,0 +1,22 @@
+#
+# Vars.pm
+#
+# $Id: Vars.pm,v 1.1.1.1 2001/05/24 15:57:41 sano Exp $
+#
+# Shared variables.
+#
+# © Copyright 1996, 1997, Cees de Groot
+#
+package LinuxDocTools::Vars;
+use strict;
+
+use Exporter;
+
+use vars qw($VERSION @ISA @EXPORT);
+@ISA = qw(Exporter);
+@EXPORT = qw(%Formats $global %FmtList);
+$VERSION = sprintf("%d.%02d", q$Revision: 1.1.1.1 $ =~ /(\d+)\.(\d+)/);
+
+use vars @EXPORT;
+
+1;