summaryrefslogtreecommitdiff
path: root/tools/linuxdoc-tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools/linuxdoc-tools')
-rw-r--r--tools/linuxdoc-tools/LinuxDocTools.pm668
-rw-r--r--tools/linuxdoc-tools/LinuxDocTools/BackEnd.pm185
-rw-r--r--tools/linuxdoc-tools/LinuxDocTools/CharEnts.pm176
-rw-r--r--tools/linuxdoc-tools/LinuxDocTools/FixRef.pm76
-rw-r--r--tools/linuxdoc-tools/LinuxDocTools/Html2Html.pm583
-rw-r--r--tools/linuxdoc-tools/LinuxDocTools/InfoUtils.pm357
-rw-r--r--tools/linuxdoc-tools/LinuxDocTools/Lang.pm238
-rw-r--r--tools/linuxdoc-tools/LinuxDocTools/Utils.pm392
-rw-r--r--tools/linuxdoc-tools/LinuxDocTools/Vars.pm22
-rw-r--r--tools/linuxdoc-tools/Text/EntityMap.pm121
-rw-r--r--tools/linuxdoc-tools/copyright85
11 files changed, 2903 insertions, 0 deletions
diff --git a/tools/linuxdoc-tools/LinuxDocTools.pm b/tools/linuxdoc-tools/LinuxDocTools.pm
new file mode 100644
index 00000000..12b77319
--- /dev/null
+++ b/tools/linuxdoc-tools/LinuxDocTools.pm
@@ -0,0 +1,668 @@
+#! /usr/bin/perl
+#
+# LinuxDocTools.pm
+#
+# LinuxDoc-Tools driver core. This contains all the basic functionality
+# we need to control all other components.
+#
+# Copyright © 1996, Cees de Groot.
+# Copyright © 2000, Taketoshi Sano
+# Copyright © 2006-2018, Agustin Martin
+# --------------------------------------------------------------------------------
+
+package LinuxDocTools;
+
+require 5.006;
+use strict;
+
+=head1 NAME
+
+LinuxDocTools - SGML conversion utilities for LinuxDoc DTD.
+
+=head1 SYNOPSIS
+
+ use LinuxDocTools;
+ LinuxDocTools::init;
+ @files = LinuxDocTools::process_options ($0, @ARGV);
+ for $curfile (@files) {
+ LinuxDocTools::process_file ($curfile);
+ }
+
+=head1 DESCRIPTION
+
+The LinuxDocTools package encapsulates all the functionality offered by
+LinuxDoc-Tools. It is used, of course, by LinuxDoc-Tools;
+but the encapsulation should provide for a simple interface for other users as well.
+
+=head1 FUNCTIONS
+
+=over 4
+
+=cut
+
+use File::Copy;
+use File::Temp qw(tempdir);
+use File::Basename qw(fileparse);
+use LinuxDocTools::Lang;
+use LinuxDocTools::Utils qw(usage cleanup trap_signals remove_tmpfiles create_temp);
+use LinuxDocTools::Vars;
+
+sub BEGIN
+{
+ #
+ # Make sure we're always looking here. Note that "use lib" adds
+ # on the front of the search path, so we first push dist, then
+ # site, so that site is searched first.
+ #
+ use lib "$main::DataDir/dist";
+ use lib "$main::DataDir/site";
+}
+
+# -----------------------------------------------------------------------------------
+sub ldt_searchfile {
+# -----------------------------------------------------------------------------------
+# Look for a readable file in the locations. Return first math.
+# -----------------------------------------------------------------------------------
+ my $files = shift;
+ foreach my $file ( @$files ){
+ return $file if -r $file;
+ }
+}
+
+# -----------------------------------------------------------------------------------
+sub ldt_getdtd_v1 {
+# -----------------------------------------------------------------------------------
+# Get the dtd
+# -----------------------------------------------------------------------------------
+ my $file = shift;
+ my $error_header = "LinuxdocTools::ldt_getdtd_v1";
+ my $dtd;
+
+ open ( my $FILE, "< $file")
+ or die "$error_header: Could not open \"$file\" for reading. Aborting ...\n";
+
+ while ( <$FILE> ) {
+ tr/A-Z/a-z/;
+ # check for [<!doctype ... system] type definition
+ if ( /<!doctype\s*(\w*)\s*system/ ) {
+ $dtd = $1;
+ last;
+ # check for <!doctype ... PUBLIC ... DTD ...
+ } elsif ( /<!doctype\s*\w*\s*public\s*.*\/\/dtd\s*(\w*)/mi ) {
+ $dtd = $1;
+ last;
+ # check for <!doctype ...
+ # PUBLIC ... DTD ...
+ # (multi-line version)
+ } elsif ( /<!doctype\s*(\w*)/ ) {
+ $dtd = "precheck";
+ next;
+ } elsif ( /\s*public\s*.*\/\/dtd\s*(\w*)/ && $dtd eq "precheck" ) {
+ $dtd = $1;
+ last;
+ }
+ }
+ close $FILE;
+
+ return $dtd;
+}
+
+# -----------------------------------------------------------------------------------
+sub ldt_getdtd_v2 {
+# -----------------------------------------------------------------------------------
+# Second way of getting dtd, fron nsgmls output.
+# -----------------------------------------------------------------------------------
+ my $preaspout = shift;
+ my $error_header = "LinuxdocTools::ldt_getdtd_v2";
+ my $dtd2;
+
+ open (my $TMP,"< $preaspout")
+ or die "%error_header: Could not open $preaspout for reading. Aborting ...\n";
+ while ( defined ($dtd2 = <$TMP>) && ! ( $dtd2 =~ /^\(/) ) { };
+ close $TMP;
+ $dtd2 =~ s/^\(//;
+ $dtd2 =~ tr/A-Z/a-z/;
+ chomp $dtd2;
+ return $dtd2;
+}
+
+# -----------------------------------------------------------------------------------
+sub ldt_latin1tosgml {
+# -----------------------------------------------------------------------------------
+# Convert latin1 chars in input filehandle to sgml entities in the returned string
+# -----------------------------------------------------------------------------------
+ my $FILE = shift;
+ my $sgmlout;
+
+ while (<$FILE>){
+ # Outline these commands later on - CdG
+ #change latin1 characters to SGML
+ #by Farzad Farid, adapted by Greg Hankins
+ s/À/\&Agrave;/g;
+ s/Á/\&Aacute;/g;
+ s/Â/\&Acirc;/g;
+ s/Ã/\&Atilde;/g;
+ s/Ä/\&Auml;/g;
+ s/Å/\&Aring;/g;
+ s/Æ/\&AElig;/g;
+ s/Ç/\&Ccedil;/g;
+ s/È/\&Egrave;/g;
+ s/É/\&Eacute;/g;
+ s/Ê/\&Ecirc;/g;
+ s/Ë/\&Euml;/g;
+ s/Ì/\&Igrave;/g;
+ s/Í/\&Iacute;/g;
+ s/Î/\&Icirc;/g;
+ s/Ï/\&Iuml;/g;
+ s/Ñ/\&Ntilde;/g;
+ s/Ò/\&Ograve;/g;
+ s/Ó/\&Oacute;/g;
+ s/Ô/\&Ocirc;/g;
+ s/Õ/\&Otilde;/g;
+ s/Ö/\&Ouml;/g;
+ s/Ø/\&Oslash;/g;
+ s/Ù/\&Ugrave;/g;
+ s/Ú/\&Uacute;/g;
+ s/Û/\&Ucirc;/g;
+ s/Ü/\&Uuml;/g;
+ s/Ý/\&Yacute;/g;
+ s/Þ/\&THORN;/g;
+ s/ß/\&szlig;/g;
+ s/à/\&agrave;/g;
+ s/á/\&aacute;/g;
+ s/â/\&acirc;/g;
+ s/ã/\&atilde;/g;
+ s/ä/\&auml;/g;
+ s/å/\&aring;/g;
+ s/æ/\&aelig;/g;
+ s/ç/\&ccedil;/g;
+ s/è/\&egrave;/g;
+ s/é/\&eacute;/g;
+ s/ê/\&ecirc;/g;
+ s/ë/\&euml;/g;
+ s/ì/\&igrave;/g;
+ s/í/\&iacute;/g;
+ s/î/\&icirc;/g;
+ s/ï/\&iuml;/g;
+ s/µ/\&mu;/g;
+ s/ð/\&eth;/g;
+ s/ñ/\&ntilde;/g;
+ s/ò/\&ograve;/g;
+ s/ó/\&oacute;/g;
+ s/ô/\&ocirc;/g;
+ s/õ/\&otilde;/g;
+ s/ö/\&ouml;/g;
+ s/ø/\&oslash;/g;
+ s/ù/\&ugrave;/g;
+ s/ú/\&uacute;/g;
+ s/û/\&ucirc;/g;
+ s/ü/\&uuml;/g;
+ s/ý/\&yacute;/g;
+ s/þ/\&thorn;/g;
+ s/ÿ/\&yuml;/g;
+ $sgmlout .= $_;
+ }
+ return $sgmlout;
+}
+
+# ------------------------------------------------------------------------
+
+=item LinuxDocTools::init
+
+Takes care of initialization of package-global variables (which are actually
+defined in L<LinuxDocTools::Vars>). The package-global variables are I<$global>,
+a reference to a hash containing numerous settings, I<%Formats>, a hash
+containing all the formats, and I<%FmtList>, a hash containing the currently
+active formats for help texts.
+
+Apart from this, C<LinuxDocTools::init> also finds all distributed and site-local
+formatting backends and C<require>s them.
+
+=cut
+
+# -----------------------------------------------------------------------------------
+sub init {
+# -----------------------------------------------------------------------------------
+ trap_signals;
+
+ # Register the ``global'' pseudoformat. Apart from the global settings, we
+ # also use $global to keep the global variable name space clean everything
+ # that we need to provide to other modules is stuffed into $global.
+ $global = {};
+ $global->{NAME} = "global";
+ $global->{HELP} = "";
+ $global->{OPTIONS} = [
+ { option => "backend",
+ type => "l",
+ 'values' => [ "html", "info", "latex", "lyx", "rtf", "txt", "check" ],
+ short => "B" },
+ { option => "papersize",
+ type => "l",
+ 'values' => [ "a4", "letter" ],
+ short => "p" },
+ { option => "language",
+ type => "l",
+ 'values' => [ @LinuxDocTools::Lang::Languages ],
+ short => "l" },
+ { option => "charset", type => "l",
+ 'values' => [ "latin", "ascii", "nippon", "euc-kr" ], short => "c" },
+ { option => "style", type => "s", short => "S" },
+ { option => "tabsize", type => "i", short => "t" },
+ # { option => "verbose", type => "f", short => "v" },
+ { option => "debug", type => "f", short => "d" },
+ { option => "define", type => "s", short => "D" },
+ { option => "include", type => "s", short => "i" },
+ { option => "pass", type => "s", short => "P" }
+ ];
+ $global->{backend} = "linuxdoc";
+ $global->{papersize} = "a4";
+ $global->{language} = "en";
+ $global->{charset} = "ascii";
+ $global->{style} = "";
+ $global->{tabsize} = 8;
+ $global->{verbose} = 0;
+ $global->{define} = "";
+ $global->{debug} = 0;
+ $global->{include} = "";
+ $global->{pass} = "";
+ $global->{InFiles} = [];
+ $global->{fmtlist} = ""; # List of loaded fmt files
+ $Formats{$global->{NAME}} = $global; # All formats we know.
+ $FmtList{$global->{NAME}} = $global; # List of formats for help msgs.
+
+ $global->{sgmlpre} = "$main::AuxBinDir/sgmlpre";
+ my $error_header = "LinuxdocTools::init";
+
+ if ( -e "/etc/papersize" ){
+ open (my $PAPERSIZE,"< /etc/papersize") ||
+ die "$error_header: Count not open \"/etc/papersize\" for reading\n";
+ chomp (my $paper = <$PAPERSIZE>);
+ $global->{papersize} = "letter" if ( $paper eq "letter");
+ close $PAPERSIZE;
+ }
+
+ # automatic language detection: disabled by default
+ # {
+ # my $lang;
+ # foreach $lang (@LinuxDocTools::Lang::Languages)
+ # {
+ # if (($ENV{"LC_ALL"} =~ /^$lang/i) ||
+ # ($ENV{"LC_CTYPE"} =~ /^$lang/i) ||
+ # ($ENV{"LANG"} =~ /^$lang/i)) {
+ # $global->{language} = Any2ISO($lang);
+ # }
+ # }
+ # }
+
+ # --------------------------------------------------------------------------------
+ $global->{preNSGMLS} = sub {
+ # ------------------------------------------------------------------------------
+ # Define a fallback preNSGMLS. Used when the format is "global" (from sgmlcheck).
+ # ------------------------------------------------------------------------------
+ $global->{NsgmlsOpts} .= " -s ";
+ $global->{NsgmlsPrePipe} = "cat $global->{file}";
+ };
+
+ # We need to load all fmt files here, so the allowed options for all
+ # format are put into $global and a complete usage message is built,
+ # including options for all formats.
+ my %locations = ();
+ foreach my $path ("$main::DataDir/site",
+ "$main::DataDir/dist",
+ "$main::DataDir/fmt"){
+ foreach my $location (<$path/fmt_*.pl>){
+ my $fmt = $location;
+ $fmt =~ s/^.*_//;
+ $fmt =~ s/\.pl$//;
+ $locations{$fmt} = $location unless defined $locations{$fmt};
+ }
+ }
+
+ foreach my $fmt ( keys %locations ){
+ $global->{fmtlist} .= " Loading $locations{$fmt}\n";
+ require $locations{$fmt};
+ }
+}
+
+# ------------------------------------------------------------------------
+
+=item LinuxDocTools::process_options ($0, @ARGV)
+
+This function contains all initialization that is bound to the current
+invocation of LinuxDocTools. It looks in C<$0> to deduce the backend that
+should be used (ld2txt activates the I<txt> backend) and parses the
+options array. It returns an array of filenames it encountered during
+option processing.
+
+As a side effect, the environment variable I<SGML_CATALOG_FILES> is
+modified and, once I<$global->{format}> is known, I<SGMLDECL> is set.
+
+=cut
+
+# ------------------------------------------------------------------------
+sub process_options {
+# ------------------------------------------------------------------------
+ my $progname = shift;
+ my @tmpargs = @_;
+ my @args = ();
+ my $format = '';
+
+ # Try getting the format. We need to do this here so process_options
+ # knows which is the format and which format options are allowed
+
+ # First, see if we have an explicit backend option by looping over command line.
+ # Do not shift in the while condition itself, 0 in options like '-s 0' will
+ # otherwise stop looping
+ while ( @tmpargs ){
+ $_ = shift @tmpargs;
+ if ( s/--backend=// ){
+ $format = $_;
+ } elsif ( $_ eq "-B" ){
+ $format = shift @tmpargs;
+ } else {
+ push @args, $_;
+ }
+ }
+
+ unless ( $format ){
+ my ($tmpfmt, $dummy1, $dummy2) = fileparse($progname, "");
+ if ( $tmpfmt =~ s/^sgml2// ) { # Calling program through sgml2xx symlinks
+ $format = $tmpfmt;
+ } elsif ( $tmpfmt eq "sgmlcheck" ) { # Calling program through sgmlcheck symlink
+ $format = "global";
+ }
+ }
+
+ if ( $format ) {
+ if ( $format eq "check" ){
+ $format = "global";
+ } elsif ( $format eq "latex" ){
+ $format = "latex2e";
+ }
+ $FmtList{$format} = $Formats{$format} or
+ usage("$format: Unknown format");
+ $global->{format} = $format;
+ } else {
+ usage("");
+ }
+
+ # Parse all the options from @args, and return files.
+ my @files = LinuxDocTools::Utils::process_options(@args);
+
+ # Check the number of given files
+ $#files > -1 || usage("No filenames given");
+
+ # Normalize language string
+ $global->{language} = Any2ISO($global->{language});
+
+ # Setup the SGML environment.
+ my @sgmlcatalogs =
+ (# SGML iso-entities catalog location in Debian sgml-data package
+ "$main::isoentities_prefix/share/sgml/entities/sgml-iso-entities-8879.1986/catalog",
+ # SGML iso-entities catalog location in ArchLinux, Fedora and Gentoo
+ "$main::isoentities_prefix/share/sgml/sgml-iso-entities-8879.1986/catalog",
+ # SGML iso-entities catalog location when installed from linuxdoc-tools
+ "$main::isoentities_prefix/share/sgml/iso-entities-8879.1986/iso-entities.cat",
+ # dtd/catalog for SGML-Tools
+ "$main::DataDir/linuxdoc-tools.catalog",
+ # The super catalog
+ "/etc/sgml/catalog");
+
+ @sgmlcatalogs = ($ENV{SGML_CATALOG_FILES}, @sgmlcatalogs) if defined $ENV{SGML_CATALOG_FILES};
+
+ $ENV{SGML_CATALOG_FILES} = join(':', @sgmlcatalogs);
+
+ # Set to one of these if readable, nil otherwise
+ $ENV{SGMLDECL} = ldt_searchfile(["$main::DataDir/dtd/$global->{format}.dcl",
+ "$main::DataDir/dtd/$global->{style}.dcl",
+ "$main::DataDir/dtd/sgml.dcl"]);
+
+ # Show the list of loaded fmt_*.pl files if debugging
+ print STDERR $global->{fmtlist} if $global->{debug};
+
+ # Return the list of files to be processed
+ return @files;
+}
+
+# ------------------------------------------------------------------------
+
+=item LinuxDocTools::process_file
+
+With all the configuration done, this routine will take a single filename
+and convert it to the currently active backend format. The conversion is
+done in a number of steps in tight interaction with the currently active
+backend (see also L<LinuxDocTools::BackEnd>):
+
+=over
+
+=item 1. Backend: set NSGMLS options and optionally create a pre-NSGMLS pipe.
+
+=item 2. Here: Run the preprocessor to handle conditionals.
+
+=item 3. Here: Run NSGMLS.
+
+=item 4. Backend: run pre-ASP conversion.
+
+=item 5. Here: Run SGMLSASP.
+
+=item 6. Backend: run post-ASP conversion, generating the output.
+
+=back
+
+All stages are influenced by command-line settings, currently active format,
+etcetera. See the code for details.
+
+=cut
+
+# ------------------------------------------------------------------------
+sub process_file {
+# ------------------------------------------------------------------------
+ my $file = $global->{origfile} = shift (@_);
+ my $saved_umask = umask;
+ my $error_header = "LinuxdocTools::process_file";
+
+ print "Processing file $file\n";
+ umask 0077;
+
+ my ($filename, $filepath, $filesuffix) = fileparse($file, "\.sgml");
+ $global->{filename} = $filename;
+ $global->{filepath} = $filepath;
+ $global->{file} = ldt_searchfile(["$filepath/$filename.sgml",
+ "$filepath/$filename.SGML"])
+ or die "$error_header: Cannot find $file. Aborting ...\n";
+
+ my $dtd = ldt_getdtd_v1("$global->{file}");
+ print STDERR "DTD: " . $dtd . "\n" if $global->{debug};
+
+ # Prepare temporary directory
+ my $tmpdir = $ENV{'TMPDIR'} || '/tmp';
+ $tmpdir = tempdir("linuxdoc-tools.XXXXXXXXXX", DIR => "$tmpdir");
+
+ # Set common base name for temp files and temp file names
+ my $tmpbase = $global->{tmpbase} = $tmpdir . '/sgmltmp.' . $filename;
+ my $precmdout = "$tmpbase.01.precmdout";
+ my $nsgmlsout = "$tmpbase.02.nsgmlsout"; # Was $tmpbase.1
+ my $preaspout = "$tmpbase.03.preaspout"; # Was $tmpbase.2
+ my $aspout = "$tmpbase.04.aspout"; # Was $tmpbase.3
+
+ # Set up the preprocessing command. Conditionals have to be
+ # handled here until they can be moved into the DTD, otherwise
+ # a validating SGML parser will choke on them.
+
+ # Check if output option for latex is pdf or not
+ if ($global->{format} eq "latex2e") {
+ if ($Formats{$global->{format}}{output} eq "pdf") {
+ $global->{define} .= " pdflatex=yes";
+ }
+ }
+
+ # Set the actual pre-processing command
+ my($precmd) = "| $global->{sgmlpre} output=$global->{format} $global->{define}";
+
+ # Make sure path of file to be processed is in SGML_SEARCH_PATH
+ $ENV{"SGML_SEARCH_PATH"} .= ":$filepath";
+
+ # You can hack $NsgmlsOpts here, etcetera.
+ $global->{NsgmlsOpts} .= "-D $main::prefix/share/sgml -D $main::DataDir";
+ $global->{NsgmlsOpts} .= "-i$global->{include}" if ($global->{include});
+
+ # If a preNSGMLS function is defined in the fmt file, pipe its output to $FILE,
+ # otherwise just open $global->{file} as $IFILE
+ # ----------------------------------------------------------------------------
+ my $IFILE;
+ if ( defined $Formats{$global->{format}}{preNSGMLS} ) {
+ $global->{NsgmlsPrePipe} = &{$Formats{$global->{format}}{preNSGMLS}};
+ open ($IFILE,"$global->{NsgmlsPrePipe} |")
+ || die "$error_header: Could not open pipe from $global->{NsgmlsPrePipe}. Aborting ...\n";
+ } else {
+ open ($IFILE,"< $global->{file}")
+ || die "$error_header: Could not open $global->{file} for reading. Aborting ...\n";
+ }
+
+ # Create a temp file with $precmd output
+ my $precmd_command = "$precmd > $precmdout";
+
+ open (my $PRECMDOUT, "$precmd_command")
+ or die "$error_header: Could not open pipe to $precmdout. Aborting ...\n";
+
+ if ($global->{charset} eq "latin") {
+ print $PRECMDOUT ldt_latin1tosgml($IFILE);
+ } else {
+ copy($IFILE,$PRECMDOUT);
+ }
+
+ close $IFILE;
+ close $PRECMDOUT;
+
+ # Process with nsgmls.
+ my $nsgmls_command = "$main::progs->{NSGMLS} $global->{NsgmlsOpts} $ENV{SGMLDECL} $precmdout > $nsgmlsout";
+ system($nsgmls_command) == 0
+ or die "Error: \"$nsgmls_command\" failed with exit status: ",$? >> 8,"\n";
+
+ # Special case: if format is global, we're just checking.
+ cleanup if ( $global->{format} eq "global");
+
+ # If output file does not exists or is empty, something went wrong.
+ if ( ! -e "$nsgmlsout" ) {
+ die "$error_header: Can't create file $nsgmlsout. Aborting ...\n";
+ } elsif ( -z "$nsgmlsout" ){
+ die "$error_header: $nsgmlsout empty, SGML parsing error. Aborting ...\n";
+ }
+
+ print "- Nsgmls stage finished.\n" if $global->{debug};
+
+ # If a preASP stage is defined, let the format handle it.
+ # --------------------------------------------------------
+ open (my $PREASP_IN, "< $nsgmlsout")
+ or die "$error_header: Could not open $nsgmlsout for reading. Aborting ...\n";
+ open (my $PREASP_OUT, "> $preaspout")
+ or die "$error_header: Could not open $preaspout for writing. Aborting ...\n";
+
+ if (defined $Formats{$global->{format}}{preASP}) {
+ # Usage: preASP ($INHANDLE, $OUTHANDLE);
+ &{$Formats{$global->{format}}{preASP}}($PREASP_IN, $PREASP_OUT) == 0
+ or die "$error_header: Error pre-processing $global->{format}.\n";
+ } else {
+ copy ($PREASP_IN, $PREASP_OUT);
+ }
+
+ close $PREASP_IN;
+ close $PREASP_OUT;
+
+ die "$error_header: Can't create $preaspout file. Aborting ...\n"
+ unless -e "$preaspout";
+
+ print "- PreASP stage finished.\n" if ( $global->{debug} );
+
+ # Run sgmlsasp, with an optional style if specified.
+ # -----------------------------------------------------------
+ my $dtd2 = ldt_getdtd_v2($preaspout)
+ or die "$error_header: Could not read dtd from $preaspout. Aborting ...\n";
+
+ unless ( $dtd eq $dtd2 ){
+ print STDERR "Warning: Two different values for dtd, dtd1: $dtd, dtd2: $dtd2\n";
+ $dtd = $dtd2;
+ }
+
+ $global->{'dtd'} = $dtd;
+
+ # Search order:
+ # - datadir/site/<dtd>/<format>
+ # - datadir/dist/<dtd>/<format>
+
+ my $style = ($global->{style}) ?
+ ldt_searchfile(["$main::DataDir/site/$dtd/$global->{format}/$global->{style}mapping",
+ "$main::DataDir/dist/$dtd/$global->{format}/$global->{style}mapping",
+ "$main::DataDir/mappings/$global->{format}/$global->{style}mapping"])
+ :
+ '';
+
+ my $mapping = ldt_searchfile(["$main::DataDir/site/$dtd/$global->{format}/mapping",
+ "$main::DataDir/dist/$dtd/$global->{format}/mapping",
+ "$main::DataDir/mappings/$global->{format}/mapping"])
+ or die "$error_header: Could not find mapping file for $dtd/$global->{format}. Aborting ...\n";
+
+ $mapping = "$style $mapping" if $style;
+
+ $global->{charset} = "nippon" if ($global->{language} eq "ja");
+
+ # We don't have Korean groff so charset should be latin1.
+ if ($global->{language} eq "ko") {
+ if ($global->{format} eq "groff") {
+ $global->{charset} = "latin1";
+ } else {
+ $global->{charset} = "euc-kr";
+ }
+ }
+
+ if ($global->{format} eq "groff"){
+ if ($dtd eq "linuxdoctr") {
+ $mapping = "$main::DataDir/mappings/$global->{format}/tr-mapping";
+ }
+ }
+
+ my $sgmlsasp_command = "$main::progs->{SGMLSASP} $mapping < $preaspout |
+ expand -t $global->{tabsize} > $aspout";
+ system ($sgmlsasp_command) == 0
+ or die "$error_header: Error running $sgmlsasp_command. Aborting ...\n";
+
+ die "$error_header: Can't create $aspout file. Aborting ...\n"
+ unless -e "$aspout";
+
+ print "- ASP stage finished.\n" if ( $global->{debug} );
+
+ # If a postASP stage is defined, let the format handle it.
+ # ----------------------------------------------------------------
+ umask $saved_umask;
+
+ open (my $INPOSTASP, "< $aspout" )
+ or die "$error_header: Could not open $aspout for reading. Aborting ...\n";
+ if (defined $Formats{$global->{format}}{postASP}) {
+ # Usage: postASP ($INHANDLE)
+ # Should leave whatever it thinks is right based on $INHANDLE.
+ &{$Formats{$global->{format}}{postASP}}($INPOSTASP) == 0
+ or die "$error_header: Error post-processing $global->{format}. Aborting ...\n";
+ }
+ close $INPOSTASP;
+
+ print "- postASP stage finished.\n" if ( $global->{debug} );
+
+ # All done, remove the temporaries.
+ remove_tmpfiles($tmpbase) unless ( $global->{debug} );
+}
+
+=pod
+
+=back
+
+=head1 SEE ALSO
+
+Documentation for various sub-packages of LinuxDocTools.
+
+=head1 AUTHOR
+SGMLTools are written by Cees de Groot, C<E<lt>cg@cdegroot.comE<gt>>,
+and various SGML-Tools contributors as listed in C<CONTRIBUTORS>.
+Taketoshi Sano C<E<lt>sano@debian.org<gt>> rename to LinuxDocTools.
+
+=cut
+1;
diff --git a/tools/linuxdoc-tools/LinuxDocTools/BackEnd.pm b/tools/linuxdoc-tools/LinuxDocTools/BackEnd.pm
new file mode 100644
index 00000000..e402cc5d
--- /dev/null
+++ b/tools/linuxdoc-tools/LinuxDocTools/BackEnd.pm
@@ -0,0 +1,185 @@
+#
+# BackEnd.pm
+#
+# $Id: BackEnd.pm,v 1.1.1.1 2001/05/24 15:57:41 sano Exp $
+#
+# Dummy module containing backend specification.
+#
+# © Copyright 1997, Cees de Groot
+#
+package LinuxDocTools::BackEnd;
+
+die "This is a documentation package only!";
+
+=head1 NAME
+
+LinuxDocTools::BackEnd - LinuxDocTools back-end specification
+
+=head1 SYNOPSIS
+
+ require LinuxDocTools::BackEnd;
+ $BackEnd->{...};
+
+=head1 DESCRIPTION
+
+LinuxDoc-Tools backend modules need to conform to a certain interface which is
+detailed in this document. The interface makes sure that new backend modules
+(or customer overrides) are compatible with what the main B<LinuxDocTools>
+package expects. Note that this interface is still subject to change, you
+should check this document on new releases of LinuxDoc-Tools.
+
+=head1 INTERFACE
+
+The interface between the main package and individual backends is very
+minimal - only one global variable is modified, everything else is local. It
+relies heavily on references and complex datatypes, so you want to make
+sure that you're up-to-date with Perl5.
+
+Every backend creates a reference to a hash and stores this reference in
+the global I<%Formats> hash:
+
+ my $BackEnd = {};
+ $Formats{"BackEnd"} = $BackEnd;
+
+The rest of this document will deal with the entries in the local hash
+referenced by I<$BackEnd>.
+
+=head1 HASH ENTRIES
+
+=over 4
+
+=item NAME
+
+Specify the name of the backend, for help messages etcetera.
+
+ $BackEnd->{NAME} = "BackEnd";
+
+=item HELP
+
+Specify an optional extra help message printed when the default usage
+function is executed (see L<LinuxDocTools::Utils>).
+
+ $BackEnd->{HELP} = "This is just and example message";
+
+=item OPTIONS
+
+This specifies the local set of options, which is added to the global set
+of options (available in I<$global>). The options are specified as an
+array of hashes containing a number of keys:
+
+=over 4
+
+=item option
+
+The long option name
+
+=item type
+
+The type of the option, one of B<f> (flag), B<l> (list of allowed values),
+B<s> (string), or B<i> (integer).
+
+=item values
+
+An array of allowed values, in case the option is of the list type.
+
+=item short
+
+A short (single-letter) version of the option name.
+
+=back
+
+Options can be specified as long options:
+
+ --papersize=a4
+
+or as short options:
+
+ -p a4
+
+Note that both the long options as the short options must not conflict with
+the global options (an override is not - yet - possible) and should not
+conflict with other backends.
+
+ $BackEnd->{OPTIONS} = [
+ { option => "split", type => "l",
+ 'values' => [ "0", "1", "2" ], short => "s" },
+ { option => "dosnames", type => "f", short => "D" },
+ { option => "imagebuttons", type => "f", short => "I"}
+ ];
+
+The long names themselves function as hash keys; a default can be given
+here and the option processing function will store any values found
+at the same place:
+
+ $BackEnd->{'split'} = 1;
+ $BackEnd->{dosnames} = 0;
+ $BackEnd->{imagebuttons} = 0;
+
+=item preNSGMLS
+
+If defined, this should contain a subroutine that normally does two things: it
+can modify the global value C<$global-E<gt>{NsgmlsOpts}> and it can set the
+global value C<$global-E<gt>{NsgmlsPrePipe}>. The first variable contains
+the option string passed to B<nsgmls>, and the second variable can contain
+a command that generates the input for B<nsgmls>, presumably using the
+current input file in some way (the current input file can be found
+in C<$global-E<gt>{file}>).
+
+ $BackEnd->{preNSGMLS} = sub {
+ $global->{NsgmlsOpts} .= " -ifmtBackEnd ";
+ $global->{NsgmlsPrePipe} = "sed 's/\@/\@\@/g' $global->{file}";
+ };
+
+=item preASP
+
+If defined, this should contain a subroutine accepting an input and an output
+file descriptor. The input file descriptor contains the raw output from
+B<nsgmls>, and the output file descriptor should be filled with input
+to B<sgmlsasp>. This stage is often used to munch character entities
+before they're fed to B<sgmlsasp>, see L<LinuxDocTools::CharEnts>. If the routine
+doesn't return C<0>, LinuxDocTools aborts.
+
+ $BackEnd->{preASP} = sub
+ {
+ my ($infile, $outfile) = @_;
+
+ while (<$infile>)
+ {
+ s/([^\\])\\n/$1 \\n/g;
+ print $outfile $_;
+ }
+ return 0;
+ };
+
+=item postASP
+
+This entry should always be defined, because it needs to contain a routine
+that receives the output from B<sgmlsasp> which normally needs finalization.
+LinuxDocTools itself doesn't know about file-naming conventions, etcetera, of
+the backend so writing the final file is left to the backend. The subroutine
+receives a reference to a filehandle (containing B<sgmlsasp> output) and
+should do whatever it likes with this datastream.
+
+ $BackEnd->{postASP} = sub
+ {
+ my $infile = shift;
+
+ copy ($infile, "$global->{filename}.ext");
+ return 0;
+ };
+
+=back
+
+=head1 SEE ALSO
+
+L<LinuxDocTools> and subpackages.
+
+=head1 AUTHOR
+
+SGML-Tools are written by Cees de Groot, C<E<lt>cg@cdegroot.comE<gt>>,
+and various SGML-Tools contributors as listed in C<CONTRIBUTORS>.
+Taketoshi Sano C<E<lt>sano@debian.org<gt>> rename it to LinuxDocTools,
+and do some bug-fixes and updates on it.
+
+=cut
+1;
diff --git a/tools/linuxdoc-tools/LinuxDocTools/CharEnts.pm b/tools/linuxdoc-tools/LinuxDocTools/CharEnts.pm
new file mode 100644
index 00000000..b0bcd532
--- /dev/null
+++ b/tools/linuxdoc-tools/LinuxDocTools/CharEnts.pm
@@ -0,0 +1,176 @@
+#
+# CharEnts.pm
+#
+# $Id: CharEnts.pm,v 1.1.1.1 2001/05/24 15:57:41 sano Exp $
+#
+# SGML Character Entity utilities -- interface to Perl module
+# Text::EntityMap.
+#
+package LinuxDocTools::CharEnts;
+use strict;
+
+=head1 NAME
+
+LinuxDocTools::CharEnts - Interface to Text::EntityMap
+
+=head1 SYNOPSIS
+
+ my $char_maps = load_char_maps ('.2ext', [ Text::EntityMap::sdata_dirs() ]);
+
+ $value = parse_data ($value, $char_maps, $escape_sub);
+
+=head1 DESCRIPTION
+
+This module provides a simple interface to the entity map handling provided by
+B<Text::EntityMap>.
+
+=head1 FUNCTIONS
+
+=over 4
+
+=cut
+
+use Text::EntityMap;
+use Exporter;
+
+use vars qw(@ISA @EXPORT $VERSION);
+@ISA = qw(Exporter);
+@EXPORT = qw(load_char_maps parse_data);
+$VERSION = sprintf("%d.%02d", q$Revision: 1.1.1.1 $ =~ /(\d+)\.(\d+)/);
+
+# `%warn_map' tracks entities that were not able to be mapped so they
+# are only warned once.
+my %warn_map = ();
+
+=item parse_data ($data, $char_map, $escape_sub)
+
+B<parse_data> takes a string of I<$data> in the output format of
+B<nsgmls> (see SP's C<sgmlsout.htm> document) without the leading dash.
+B<parse_data> calls I<$char_map>'s lookup method for each sdata
+entity reference. If the entity reference is undefined, it is
+left alone (without the (n)sgmls C<\|>). For all remaining data,
+B<parse_data> calls back into I<$escape_sub> to properly escape
+characters for the backend formatter. Strings returned from the
+lookup method are assumed to be already escaped.
+
+This routine is derived from David Megginson's SGMLSpm.
+
+=cut
+
+sub parse_data {
+ my ($data, $char_map, $escape_sub) = @_;
+ my ($result) = "";
+
+ my $sdata_flag = 0;
+ my $out = '';
+
+ while ($data =~ /\\(\\|n|\||[0-7]{1,3})/) {
+ $out .= $`;
+ $data = $';
+
+ if ($1 eq '|') {
+ # beginning or end of SDATA
+ if ("$out" ne '') {
+ if ($sdata_flag) {
+ my ($mapping) = $char_map->lookup ($out);
+ if (defined $mapping) {
+ # escape `\' in mapping for ASP
+ $mapping =~ s/\\/\\\\/g;
+ $result .= $mapping;
+ } else {
+ if (!$warn_map{$out}) {
+ warn "parse_data: no entity map for \`$out'\n";
+ $warn_map{$out} = 1;
+ }
+ # output the entity reference inside of `{}'
+ $result .= &$escape_sub ("{" . $out . "}");
+ }
+ } else {
+ $result .= &$escape_sub ($out);
+ }
+ $out = '';
+ }
+ $sdata_flag = !$sdata_flag;
+
+ } elsif ($1 eq 'n') {
+ # record end
+
+ # pass '\\n' through to ASP
+ $result .= &$escape_sub ($out) . '\\n';
+ $out = '';
+ } elsif ($1 eq '\\') {
+ # backslash
+
+ $result .= &$escape_sub ($out);
+
+ $out = '[bsol ]'; # bsol == entity name for backslash
+ my ($mapping) = $char_map->lookup ($out);
+ if (defined $mapping) {
+ # escape `\' in mapping for ASP
+ $mapping =~ s/\\/\\\\/g;
+ $result .= $mapping;
+ } else {
+ if (!$warn_map{$out}) {
+ warn "parse_data: no entity map for \`$out'\n";
+ $warn_map{$out} = 1;
+ }
+ # output the entity reference inside of `{}'
+ $result .= &$escape_sub ("{" . $out . "}");
+ }
+ $out = '';
+ } else {
+ # other octal character
+ $result .= &$escape_sub ($out . chr(oct($1)));
+ $out = '';
+ }
+ }
+ $out .= $data;
+ if ("$out" ne '') {
+ $result .= &$escape_sub ($out);
+ }
+
+ return ($result);
+}
+
+=item load_char_maps ($format, $paths)
+
+B<load_char_maps> takes an EntityMap format suffix and loads all of the
+character entity replacement sets for that suffix into an EntityMapGroup.
+It searches every directory in I<@{$path}>.
+
+=cut
+
+sub load_char_maps {
+ my ($format, $paths) = @_;
+
+ my (@char_maps) = ();
+ my ($path, $file_name, $char_map);
+
+ foreach $path (@{$paths}) {
+ if (-d $path) {
+ opendir (SDATADIR, $path)
+ || die "load_char_map: opening directory \`$path' for reading: $!\n";
+ foreach $file_name (readdir (SDATADIR)) {
+ next if ($file_name !~ /$format$/);
+ eval {$char_map = Text::EntityMap->load ("$path/$file_name")}
+ || die "load_char_map: loading \`$path/$file_name'\n$@\n";
+ push (@char_maps, $char_map);
+ }
+ closedir (SDATADIR);
+ }
+ }
+
+ warn "load_char_maps: no entity maps found\n"
+ if ($#char_maps == -1);
+
+ return (Text::EntityMap->group (@char_maps));
+}
+
+=back
+
+=head1 AUTHOR
+
+Ken MacLeod, C<E<lt>ken@bitsko.slc.ut.usE<gt>>
+
+=cut
+1;
diff --git a/tools/linuxdoc-tools/LinuxDocTools/FixRef.pm b/tools/linuxdoc-tools/LinuxDocTools/FixRef.pm
new file mode 100644
index 00000000..d2549857
--- /dev/null
+++ b/tools/linuxdoc-tools/LinuxDocTools/FixRef.pm
@@ -0,0 +1,76 @@
+#
+# FixRef.pm
+#
+# $Id: FixRef.pm,v 1.1.1.1 2001/05/24 15:57:41 sano Exp $
+#
+# Start conversion from parsed linuxdoc-sgml to html.
+# - Identify references and file count
+#
+# Rules based on fixref.l
+#
+package LinuxDocTools::FixRef;
+
+# Externally visible variables
+$fixref = {};
+
+# Initialize: set splitlevel before using rules
+# Usage: &{$fixref->{init}}(<split level>);
+ # 0 - super page mode
+ # 1 - big page mode
+ # 2 - small page mode
+$fixref->{init} = sub {
+ $splitlevel = shift;
+};
+
+# Outputs: Read after using rules
+$fixref->{filenum} = 0; # Count of files we will create
+$fixref->{lrec} = {}; # label -> filenum
+
+# Package variables
+$chapter_mode = 0; # <report> vs. <article>
+$splitlevel = 0; # See $fixref->{init} above;
+ # Automatically reduced by 1 for chapter mode
+
+# Finalize parsing
+$fixref->{finish} = sub { }; # Do nothing when we're done
+
+# Ruleset
+$fixref->{rules} = {}; # Individual parsing rules
+$fixref->{defaultrule} = sub { }; # If line does not match any rules
+
+# Set the rules
+# <@@ssect> - split file if necessary
+$fixref->{rules}->{'^<@@ssect>.*$'} = sub { &splitfile(2); };
+
+# <@@sect> - split file if necessary
+$fixref->{rules}->{'^<@@sect>.*$'} = sub { &splitfile(1); };
+
+# <@@chapt> - set chapter mode; reduce splitlevel if needed; split file
+$fixref->{rules}->{'^<@@chapt>.*$'} = sub {
+ $splitlevel-- if (!$chapter_mode);
+ $chapter_mode = 1; &splitfile(0);
+};
+
+# <@@label> - Identify label location
+$fixref->{rules}->{'^<@@label>(.*)$'} = sub {
+ $fixref->{lrec}->{$1} = $fixref->{filenum};
+};
+
+#==============================
+# Split the file (-split option; level in parentheses):
+# non-chapter mode: -0 -> don't split
+# -1 -> split at sect (1)
+# -2 -> split at sect (1) and ssect (2)
+# chapter mode: -0 -> split at chapt (0)
+# -1 -> split at chapt (0)
+# -2 -> split at chapt (0) and sect (1)
+sub splitfile
+{
+ my ($level) = @_;
+ if (($level == 0) || ($splitlevel >= $level)) {
+ $fixref->{filenum}++;
+ }
+}
+
+1;
+
diff --git a/tools/linuxdoc-tools/LinuxDocTools/Html2Html.pm b/tools/linuxdoc-tools/LinuxDocTools/Html2Html.pm
new file mode 100644
index 00000000..9ff2e4cc
--- /dev/null
+++ b/tools/linuxdoc-tools/LinuxDocTools/Html2Html.pm
@@ -0,0 +1,583 @@
+#
+# Html2Html.pm
+#
+# $Id: Html2Html.pm,v 1.4 2001/08/31 23:09:10 sano Exp $
+#
+# Convert parsed linuxdoc-sgml to html.
+# - Split files; match references, generate TOC and navigation
+# aids, etc.
+#
+# Rules based on html2html.l
+#
+package LinuxDocTools::Html2Html;
+
+use FileHandle;
+use LinuxDocTools::Lang;
+
+# Externally visible variables
+$html2html = {};
+
+# Initialize: set splitlevel, extension, images, filename,
+# filenumber, label, header, footer, toclevel,
+# tmpbase, debug.
+# Usage:
+# &{$html2html->{init}}(split,ext,img,filename,filenum,label,hdr,ftr,toc,tmpbase, debug);
+# split level: 0 - super page mode
+# 1 - big page mode
+# 2 - small page mode
+$html2html->{init} = sub {
+ $splitlevel = shift;
+ SWITCH: {
+ $super_page_mode = 0, $big_page_mode = 1, last SWITCH
+ if ($splitlevel == 1);
+ $super_page_mode = 0, $big_page_mode = 0, last SWITCH
+ if ($splitlevel == 2);
+ }
+
+ $fileext = shift;
+ $use_imgs = shift;
+ $firstname = shift;
+ $filecount = 1 + shift;
+ $lprec = shift;
+
+ $header = shift;
+ $footer = shift;
+
+ $toclevel = shift;
+ if ($toclevel == -1) {
+ if ($splitlevel == 0) {
+ $toclevel = 0;
+ } else {
+ $toclevel = 2;
+ }
+ }
+
+ $tmpbase = shift;
+ $content_file = $tmpbase . ".content";
+
+ $debug = shift;
+
+ $nextlabel = Xlat ("Next");
+ $prevlabel = Xlat ("Previous");
+ $toclabel = Xlat ("Contents");
+};
+
+# Package variables
+$big_page_mode = 0; # '-2' subsection splitting
+$super_page_mode = 1; # One page vs. page/section
+$chapter_mode = 0; # <article> vs. <report>
+$current = ""; # State of section/subsection/etc.
+$filenum = 1; # Current output file number
+$filecount = 1;
+$firstname = "$$"; # Base name for file
+$headbuf = ""; # Buffer for URL's
+$fileext = "html"; # "html" vs. "htm" for 8.3
+$in_appendix = 0; # n.n vs. a.n section numbers
+$in_section_list = 0; # List of sections flag
+$language = ""; # Default English; use '-Lname'
+# $lprec{label} # Label record
+$nextlabel = ""; # Link string
+$outfh = STDOUT; # Output filehandle
+$outname = ""; # Output file name
+$prevlabel = ""; # Link string
+$refname = ""; # Ref string
+$sectname = ""; # Section name
+$secnr = 0; # Section count
+$ssectname = ""; # Subsection name
+$ssecnr = 0; # Subsection count
+$skipnewline = 0; # Flag to ignore new line
+$toclabel = ""; # Link string
+$titlename = ""; # Title of document
+$use_imgs = 0; # '-img' pictorial links
+$urlname = ""; # Name for url links
+$header = "";
+$footer = "";
+$toclevel = -1;
+$tmpbase = "/tmp/sgmltmp" . $$;
+$debug = 0;
+$content_file = $tmpbase . ".content.init";
+
+# Ruleset
+$html2html->{rules} = {}; # Individual parsing rules
+
+$html2html->{rules}->{'^<@@appendix>.*$'} = sub {
+ $in_appendix = 1; $secnr = 0; $ssecnr = 0;
+};
+
+$html2html->{rules}->{'^<@@url>(.*)$'} = sub {
+ $skipnewline = 1; $urlname = $1; $headbuf = qq(<A HREF="$1">);
+};
+
+$html2html->{rules}->{'^<@@urlnam>(.*)$'} = sub {
+ $headbuf = $headbuf . "$urlname</A>";
+};
+
+$html2html->{rules}->{'^<@@endurl>.*$'} = sub {
+ $skipnewline = -1; $outfh->print($headbuf); $headbuf = "";
+};
+
+$html2html->{rules}->{'^<@@title>(.*)$'} = sub {
+ $titlename = $1; &heading(STDOUT); print(STDOUT "<H1>$1</H1>\n\n");
+};
+
+$html2html->{rules}->{'^<@@head>(.*)$'} = sub {
+ $skipnewline = 1; $headbuf = $1;
+};
+
+$html2html->{rules}->{'^<@@part>.*$'} = sub { $current = "PART"; };
+
+$html2html->{rules}->{'^<@@endhead>.*$'} = sub {
+ SWITCH: {
+ $outfh->print("<H1>$headbuf</H1>\n\n"), last SWITCH
+ if ($current eq "PART");
+ $outfh->print("<H1>$headbuf</H1>\n\n"), last SWITCH
+ if ($current eq "CHAPTER");
+ $outfh->print("<H2>$headbuf</H2>\n\n"), last SWITCH
+ if ($current eq "SECTION");
+ $outfh->print("<H2>$headbuf</H2>\n\n"), last SWITCH
+ if ($current eq "SUBSECT");
+ $outfh->print("<H3>$headbuf</H3>\n\n"), last SWITCH;
+ }
+ $current = ""; $headbuf = ""; $skipnewline = 0;
+};
+
+$html2html->{rules}->{'^<@@chapt>(.*)$'} = sub {
+ $chapter_mode = 1; $skipnewline = 1; $sectname = $1;
+ &start_chapter($sectname);
+};
+
+$html2html->{rules}->{'^<@@sect>(.*)$'} = sub {
+ $skipnewline = 1; $ssectname = $1;
+ if ($chapter_mode) {
+ &start_section($ssectname);
+ } else {
+ $sectname = $ssectname; &start_chapter($ssectname);
+ }
+};
+
+$html2html->{rules}->{'^<@@ssect>(.*)$'} = sub {
+ $skipnewline = 1; $ssectname = $1;
+ if (!$chapter_mode) {
+ &start_section($ssectname);
+ } else {
+ $current = ""; $headbuf = $ssectname;
+ }
+};
+
+$html2html->{rules}->{'^<@@endchapt>.*$'} = sub {
+ STDOUT->print("</UL>\n") if ($in_section_list);
+ if ($outfh->fileno != STDOUT->fileno) {
+ &footing($outfh) if (!$super_page_mode);
+ $outfh->close; $outfh = STDOUT;
+ }
+};
+
+$html2html->{rules}->{'^<@@endsect>.*$'} = sub {
+ STDOUT->print("</UL>\n") if (!$chapter_mode && $in_section_list);
+ if (($outfh->fileno != STDOUT->fileno)
+ && ((!$chapter_mode) || (!$big_page_mode))) {
+ &footing($outfh) if (!$super_page_mode);
+ $outfh->close; $outfh = STDOUT;
+ }
+};
+
+$html2html->{rules}->{'^<@@endssect>.*$'} = sub {
+ if (($outfh->fileno != STDOUT->fileno)
+ && (!$chapter_mode) && (!$big_page_mode) && (!$super_page_mode)) {
+ &footing($outfh); $outfh->close; $outfh = STDOUT;
+ }
+};
+
+$html2html->{rules}->{'^<@@enddoc>.*$'} = sub { };
+
+$html2html->{rules}->{'^<@@label>(.*)$'} = sub {
+ if (!defined($lprec->{$1})) {
+ STDERR->print(qq(html2html: Problem with label "$1"\n)); next;
+ }
+ if ($skipnewline) {
+ $headbuf = sprintf(qq(<A NAME="%s"></A> %s), $1, $headbuf);
+ } else {
+ $outfh->print(qq(<A NAME="$1"></A> ));
+ }
+};
+
+$html2html->{rules}->{'^<@@ref>(.*)$'} = sub {
+ my $tmp;
+
+ $refname = $1;
+ if (!defined($lprec->{$1})) {
+ STDERR->print(qq(html2html: Problem with ref "$1"\n));
+ $skipnewline++; next;
+ }
+ SWITCH: {
+ $tmp = qq(<A HREF="#$1">), last SWITCH
+ if ($lprec->{$1} == $filenum - 1);
+ $tmp = qq(<A HREF="$firstname.$fileext#$1">), last SWITCH
+ if ($lprec->{$1} == 0);
+ $tmp = qq(<A HREF="$firstname-$lprec->{$1}.$fileext#$1">),
+ last SWITCH;
+ }
+ if ($skipnewline) {
+ $headbuf = "$headbuf$tmp";
+ } else {
+ $headbuf = $tmp;
+ }
+ $skipnewline++;
+};
+
+$html2html->{rules}->{'^<@@refnam>.*$'} = sub {
+ $headbuf = "$headbuf$refname</A>\n";
+};
+
+$html2html->{rules}->{'^<@@endref>.*$'} = sub {
+ if ($skipnewline == 1) {
+ $outfh->print($headbuf); $skipnewline = -1;
+ } elsif ($skipnewline == 2) {
+ $skipnewline--;
+ } else {
+ STDERR->print("html2html: Problem with endref\n");
+ $skipnewline--;
+ }
+};
+
+# Default parsing rule
+$html2html->{defaultrule} = sub {
+ $skipnewline++ if ($skipnewline < 0);
+ if ($skipnewline) {
+ chop; $headbuf = "$headbuf$_";
+ } else {
+ $outfh->print($_);
+ }
+};
+
+# Finalize parsing process
+$html2html->{finish} = sub {
+ # Finish footers
+ if ($outfh->fileno != STDOUT->fileno) {
+ if (!$super_page_mode) {
+ &footing($outfh);
+ $outfh->close;
+ }
+ }
+ #
+ if ($super_page_mode) {
+ if ($toclevel > 0) { STDOUT->print("\n<HR>\n"); }
+ $outfh->close if ($outfh->fileno != STDOUT->fileno);
+ if ( -r $content_file ) {
+ open CONTENT, "<$content_file"
+ or die "Can't open content file\n";
+ while (<CONTENT>) {
+ STDOUT->print($_);
+ }
+ close CONTENT;
+ unlink $content_file if (! $debug);
+ }
+ }
+ # Finish the TOC; ensure "next" points to the first page.
+ &browse_links(STDOUT, 1, 0) if (!$super_page_mode);
+ #
+ # add Footer
+ if ( -r "$footer" ) {
+ open FTRFILE, "<$footer" or die "Cannot open footer file\n";
+ while (<FTRFILE>) {
+ STDOUT->print($_);
+ }
+ close FTRFILE;
+ } else {
+ STDOUT->print("</BODY>\n</HTML>\n");
+ }
+};
+
+
+###################################################################
+# Secondary Functions
+###################################################################
+
+# Print standard links
+sub browse_links {
+ my ($outfh, $myfilenum, $top) = @_;
+
+ return if ($super_page_mode);
+
+ $outfh->print("<HR>\n") unless ($top);
+
+ # NOTE: For pages where a next or prev button isn't appropriate, include
+ # the graphic anyway - just don't make it a link. That way, the mouse
+ # position of each button is unchanged from page to page.
+ # Use the passed myfilenum since filenum may already be incremented
+
+ # Next link (first)
+ my $next = $use_imgs
+ ? qq(<IMG SRC="next.png" ALT="$nextlabel">)
+ : qq($nextlabel);
+ $next = qq(<A HREF="$firstname-$myfilenum.$fileext">$next</A>)
+ if ($myfilenum < $filecount);
+ $next = join "", $next, "\n";
+ $outfh->print($next);
+
+ # Previous link
+ my $prev = $use_imgs
+ ? qq(<IMG SRC="prev.png" ALT="$prevlabel">)
+ : qq($prevlabel);
+ $prev = join "", qq(<A HREF="$firstname-), ($myfilenum - 2),
+ qq(.$fileext">$prev</A>)
+ if ($myfilenum >= 3);
+ $prev = join "", $prev, "\n";
+ $outfh->print($prev);
+
+ # Table of contents link
+ my $toc = $use_imgs
+ ? qq(<IMG SRC="toc.png" ALT="$toclabel">)
+ : qq($toclabel);
+ $toc = join "", qq(<A HREF="$firstname.$fileext#toc),
+ &section_num($secnr, 0), qq(">$toc</A>)
+ if ($outfh->fileno != STDOUT->fileno);
+ $toc = join "", $toc, "\n";
+ $outfh->print($toc);
+
+ print($outfh "<HR>\n") if ($top);
+}
+
+# Print end-of-file markup
+sub footing {
+ my $outfh = shift;
+ &browse_links($outfh, $filenum, 0);
+ if ( -r "$footer" ) {
+ open FTRFILE, "<$footer" or die "Cannot open footer file\n";
+ while (<FTRFILE>) {
+ $outfh->print($_);
+ }
+ close FTRFILE;
+ } else {
+ $outfh->print("</BODY>\n</HTML>\n");
+ }
+}
+
+# Print top-of-file markup
+sub heading {
+ my $outfh = shift; my $match;
+
+ # Emit 3.2 HTML until somebody comes up with a better idea - CdG
+ if ( -r "$header" ) {
+ open HDRFILE, "<$header" or die "Cannot open header file\n";
+ while (<HDRFILE>) {
+ $outfh->print($_);
+ }
+ close HDRFILE;
+ } else {
+ $outfh->print(
+ qq(<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n));
+ $outfh->print("<HTML>\n<HEAD>\n");
+ }
+ open VERSFILE, "<$main::DataDir/VERSION" or die "Cannot open version file\n";
+ $version = <VERSFILE>;
+ close VERSFILE;
+ chop $version;
+ $outfh->print(
+ " <META NAME=\"GENERATOR\" CONTENT=\"LinuxDoc-Tools $version\">\n");
+
+ $outfh->print(" <TITLE>");
+ $match = $titlename;
+ $match =~ s/<[^>]*>//g;
+ $outfh->print($match);
+ if ($secnr > 0) {
+ $match = $sectname;
+ $match =~ s/<[^>]*>//g;
+ $outfh->print(": $match");
+ }
+ if ($ssecnr > 0) {
+ $match = $ssectname;
+ $match =~ s/<[^>]*>//g;
+ $outfh->print(": $match");
+ }
+ $outfh->print("</TITLE>\n");
+
+ if (!$super_page_mode) {
+ #
+ # <LINK> Information for next, previous, contents, etc...
+ #
+ $outfh->print(qq( <LINK HREF="$firstname-$filenum.$fileext" REL=next>),"\n")
+ if ($filenum < $filecount);
+ my $prev;
+ $prev = join "", qq( <LINK HREF="$firstname-), ($filenum - 2),
+ qq(.$fileext" REL=previous>)
+ if ($filenum >= 3);
+ $outfh->print($prev,"\n");
+
+ #
+ # Table of contents link
+ #
+ my $toc ;
+ $toc = join "", qq( <LINK HREF="$firstname.$fileext#toc),
+ &section_num($secnr, 0), qq(" REL=contents>)
+ if ($outfh->fileno != STDOUT->fileno);
+ $outfh->print($toc,"\n");
+ } # (!$super_page_mode)
+
+ $outfh->print("</HEAD>\n<BODY>\n");
+ &browse_links($outfh, $filenum, 1);
+}
+
+# Return the section and subsection as a dotted string
+sub section_num {
+ my ($sec, $ssec) = @_;
+ my $l = "A";
+
+ if ($in_appendix) {
+ $sec--;
+ while ($sec) { $l++; $sec--; }
+ return("$l.$ssec") if ($ssec > 0);
+ return("$l");
+ } else {
+ return("$sec.$ssec") if ($ssec > 0);
+ return("$sec");
+ }
+}
+
+# Create a chapter head; start a new file, etc.
+sub start_chapter {
+ my $sectname = shift;
+
+ if (!$super_page_mode && $outfh->fileno != STDOUT->fileno) {
+ &footing($outfh); $outfh->close;
+ }
+ $current = "SECTION"; $secnr++; $ssecnr = 0;
+ if ($super_page_mode) {
+ $outname = $content_file;
+ $outfh = new FileHandle ">>$outname"
+ or die qq(html2html: Fatal: Could not open file "$outname"\n);
+ if ($toclevel > 0) {
+ $headbuf = sprintf(
+ qq(<A NAME="s%s">%s.</A> <A HREF="#toc%s">%s</A>),
+ &section_num($secnr, 0), &section_num($secnr, 0),
+ &section_num($secnr, 0),
+ $sectname);
+ STDOUT->printf(
+ qq(<P>\n<H2><A NAME="toc%s">%s.</A> <A HREF="%s#s%s">%s</A></H2>\n\n),
+ &section_num($secnr, 0), &section_num($secnr, 0),
+ "$firstname.$fileext", &section_num($secnr, 0), $sectname);
+ } else {
+ $headbuf = sprintf(
+ qq(<A NAME="s%s">%s. %s</A>),
+ &section_num($secnr, 0), &section_num($secnr, 0),
+ $sectname);
+ }
+ } else {
+ $outname = "$firstname-$filenum.$fileext"; $filenum++;
+ $outfh = new FileHandle ">$outname"
+ or die qq(html2html: Fatal: Could not open file "$outname"\n);
+ &heading($outfh);
+ if ($toclevel > 0) {
+ $headbuf = sprintf(
+ qq(<A NAME="s%s">%s.</A> <A HREF="%s#toc%s">%s</A>),
+ &section_num($secnr, 0), &section_num($secnr, 0),
+ "$firstname.$fileext", &section_num($secnr, 0),
+ $sectname);
+ STDOUT->printf(
+ qq(<P>\n<H2><A NAME="toc%s">%s.</A> <A HREF="%s">%s</A></H2>\n\n),
+ &section_num($secnr, 0), &section_num($secnr, 0),
+ $outname, $sectname);
+ } else {
+ $headbuf = sprintf(
+ qq(<A NAME="s%s">%s. %s</A>),
+ &section_num($secnr, 0), &section_num($secnr, 0),
+ $sectname);
+ }
+ }
+ $in_section_list = 0;
+}
+
+# Create a section; start a new file, etc.
+sub start_section {
+ my $ssectname = shift;
+
+ $current = "SUBSECT"; $ssecnr++;
+ if ($toclevel > 1) {
+ if (!$in_section_list) {
+ STDOUT->print("<UL>\n"); $in_section_list = 1;
+ }
+ }
+ if ($super_page_mode) {
+ if ($outfh->fileno != STDOUT->fileno && !$chapter_mode) {
+ $outfh->close;
+ }
+ $outname = $content_file;
+ $outfh = new FileHandle ">>$outname"
+ or die qq(html2html: Fatal: Could not open file "$outname"\n);
+ if ($toclevel > 1) {
+ $headbuf = sprintf(qq(<A NAME="ss%s">%s</A> <A HREF="#toc%s">%s</A>\n),
+ &section_num($secnr, $ssecnr),
+ &section_num($secnr, $ssecnr),
+ &section_num($secnr, $ssecnr),
+ $ssectname);
+ STDOUT->printf(
+ qq(<LI><A NAME="toc%s">%s</A> <A HREF="%s#ss%s">%s</A>\n),
+ &section_num($secnr, $ssecnr),
+ &section_num($secnr, $ssecnr),
+ "$firstname.$fileext",
+ &section_num($secnr, $ssecnr),
+ $ssectname);
+ } else {
+ $headbuf = sprintf(qq(<A NAME="ss%s">%s %s</A>\n),
+ &section_num($secnr, $ssecnr),
+ &section_num($secnr, $ssecnr),
+ $ssectname);
+ }
+ } else {
+ if (!$big_page_mode) {
+ if ($outfh->fileno != STDOUT->fileno) {
+ &footing($outfh); $outfh->close;
+ }
+ $outname = "$firstname-$filenum.$fileext"; $filenum++;
+ $outfh = new FileHandle ">$outname"
+ or die qq(html2html: Fatal: Could not open file "$outname"\n);
+ heading($outfh);
+
+ # Since only one section is on any page,
+ # don't use # so that when we
+ # jump to this page, we see the browse
+ # links at the top of the page.
+ if ($toclevel > 1) {
+ $headbuf = sprintf("%s <A HREF=\"%s#toc%s\">%s</A>",
+ &section_num($secnr, $ssecnr),
+ "$firstname.$fileext",
+ &section_num($secnr, $ssecnr),
+ $ssectname);
+ STDOUT->printf(
+ qq(<LI><A NAME="toc%s">%s</A> <A HREF="%s">%s</A>\n),
+ &section_num($secnr, $ssecnr),
+ &section_num($secnr, $ssecnr),
+ $outname, $ssectname);
+ } else {
+ $headbuf = sprintf("%s %s</A>",
+ &section_num($secnr, $ssecnr),
+ $ssectname);
+ }
+ } else {
+ # Since many sections are on one page, we need to use #
+ if ($toclevel > 1) {
+ $headbuf = sprintf(
+ qq(<A NAME="ss%s">%s</A> <A HREF="%s#toc%s">%s</A>\n),
+ &section_num($secnr, $ssecnr),
+ &section_num($secnr, $ssecnr),
+ "$firstname.$fileext",
+ &section_num($secnr, $ssecnr),
+ $ssectname);
+ STDOUT->printf(
+ qq(<LI><A NAME="toc%s">%s</A> <A HREF="%s#ss%s">%s</A>\n),
+ &section_num($secnr, $ssecnr),
+ &section_num($secnr, $ssecnr),
+ $outname,
+ &section_num($secnr, $ssecnr),
+ $ssectname);
+ } else {
+ $headbuf = sprintf(
+ qq(<A NAME="ss%s">%s %s</A>\n),
+ &section_num($secnr, $ssecnr),
+ &section_num($secnr, $ssecnr),
+ $ssectname);
+ }
+ }
+ }
+}
+
diff --git a/tools/linuxdoc-tools/LinuxDocTools/InfoUtils.pm b/tools/linuxdoc-tools/LinuxDocTools/InfoUtils.pm
new file mode 100644
index 00000000..b4bd50bd
--- /dev/null
+++ b/tools/linuxdoc-tools/LinuxDocTools/InfoUtils.pm
@@ -0,0 +1,357 @@
+# InfoUtils.pm
+#
+# Some utils for the linuxdoc info backend.
+#
+# * Create menus
+# * Normalize node names and associated text
+# * Point references to the associated node as needed
+#
+# Copyright (C) 2009 Agustín Martín Domingo, agmartin at debian org
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+# --------------------------------------------------------------------
+
+
+package LinuxDocTools::InfoUtils;
+
+use base qw(Exporter);
+
+# List all exported symbols here.
+our @EXPORT_OK = qw(info_process_texi);
+
+# Import :all to get everything.
+our %EXPORT_TAGS = (all => [@EXPORT_OK]);
+
+=head1 NAME
+
+ InfoUtils - Some utils for the linuxdoc info backend.
+
+=head1 SYNOPSIS
+
+use InfoUtils q{:all};
+
+info_process_texi($infile, $outfile, $infoname)
+
+=head1 DESCRIPTION
+
+This module contains some utils to process the raw texinfo file
+creating menus, normalizing node names and associated text and
+pointing references to the associated node as needed.
+
+=head1 FUNCTIONS
+
+=over 4
+
+=cut
+
+# -------------------------------------------------------------------------
+sub info_normalize_node_text {
+# -------------------------------------------------------------------------
+# Filter characters not allowed in section names
+# -------------------------------------------------------------------------
+ my $text = shift;
+
+ $text =~ s/\s+/ /g;
+ $text =~ s/\@[A-Za-z][A-Za-z0-9]*//g;
+ $text =~ s/(\{|\})//g;
+ $text =~ s/\,//g;
+# $text =~ s/\.+$//g;
+ $text =~ s/\./-/g;
+ $text =~ s/\s+$//g;
+
+ return $text;
+}
+
+# -------------------------------------------------------------------------
+sub info_normalize_node_name {
+# -------------------------------------------------------------------------
+# Filter characters not allowed in node names. Previous filtering of
+# characters not allowed in section names is supposed.
+# -------------------------------------------------------------------------
+ my $text = shift;
+# my $tmpnodedata = shift;
+
+ $text =~ s/\://g;
+ $text =~ s/\;//g;
+
+# die "Error: Reference \"$text\" already used"
+# if defined $tmpnodedata->{$text};
+
+ return $text;
+}
+
+# -------------------------------------------------------------------------
+sub info_parse_raw_file {
+# -------------------------------------------------------------------------
+# Parse raw texinfo file. It does not yet contain section names, menus,
+# correct references or title.
+# -------------------------------------------------------------------------
+ my $inputfile = shift;
+ my $INPUT;
+
+ my @inputtext = (); # Copy of input file with some preprocessing
+ my %nodedata = # A hash of hashes with all node info
+ ( 'Top' =>
+ { 'text' => "Top",
+ 'depth' => 0,
+ 'up' => "",
+ 'next' => '',
+ 'previous' => "",
+ 'sort' => 0,
+ 'debug' => "",
+ 'menu' => []}
+ );
+
+ my %levellast = (0 => "Top");
+ my %labels = ();
+ my %docdata = # Some misc data for the document
+ ( 'title' => "",
+ 'author' => "",
+ 'subtitle' => ""
+ );
+
+ my $depth = my $lastdepth = 0;
+ my $lastnode = "";
+ my $sort = 0;
+
+ my $inauthor;
+ my $authorline;
+
+ open ($INPUT, "< $inputfile")
+ or die "info-postASP: Could not open $inputfile for read. Aborting ...\n";
+
+ while (<$INPUT>){
+ chomp;
+ if ( s/^\@SUB\s+// ){
+ my $updepth = $depth;
+ my $uppernode = $levellast{$updepth};
+ $depth++;
+ $sort++;
+
+ my @levelmenu = ();
+
+ if ( defined $nodedata{$uppernode}->{'menu'} ){
+ @levelmenu = @{ $nodedata{$uppernode}->{'menu'} };
+ }
+
+ my $nodetext = info_normalize_node_text($_);
+ my $nodename = info_normalize_node_name($nodetext,\%nodedata);
+
+ # Make first appearing node the next node for top node
+ $nodedata{'Top'}->{'next'} = $nodename if ( $lastdepth eq 0);
+
+ # Fill info for current node (and 'next' for last one in level)
+ $nodedata{$nodename}->{'orig'} = $_;
+ $nodedata{$nodename}->{'text'} = $nodetext;
+ $nodedata{$nodename}->{'depth'} = $depth;
+ $nodedata{$nodename}->{'previous'} =
+ defined $levellast{$depth} ? $levellast{$depth} : "";
+ $nodedata{$levellast{$depth}}->{'next'} = $nodename
+ if defined $levellast{$depth};
+ $nodedata{$nodename}->{'up'} = $uppernode;
+ $nodedata{$nodename}->{'sort'} = $sort;
+ $nodedata{$nodename}->{'debug'} =
+ "updepth: $updepth, lastdepth: $lastdepth, up: $uppernode";
+
+ # Keep this defined in case tbere is no next node in the same level.
+ $nodedata{$nodename}->{'next'} = "";
+
+ push @inputtext, "\@SUB $nodename"; # Rewrite @SUB with the new name
+ push @levelmenu, $nodename; # Add $nodename to the level menu list
+
+ # Prepare things for next @SUB entry found
+ $levellast{$depth} = $lastnode = $nodename;
+ $lastdepth = $depth;
+ $nodedata{$uppernode}->{'menu'} = \@levelmenu;
+
+ } elsif ( s/^\@ENDSUB// ){
+ $depth--;
+ push @inputtext, $_;
+ } elsif (s/^\@LABEL\s+//){
+ # Keep record of node labels vs nodenames. Will use the last.
+ $labels{$_} = $lastnode;
+ } elsif (s/^\@title\s+//){
+ $docdata{'title'} = $_;
+ } elsif (/^\@ldt_endauthor/){
+ $inauthor = '';
+ my @authors;
+ if ( @$docdata{'authors'} ){
+ @authors = @$docdata{'authors'};
+ }
+ push @authors, $authorline;
+ $docdata{'authors'} = \@authors;
+ $authorline = "";
+ } elsif ( s/^\@author\s+// ){
+ $inauthor = 1;
+ $authorline = $_;
+ } elsif ( $inauthor ){
+ next if m/^\s*$/;
+ s/^\s+//;
+ $authorline .= " $_ ";
+ } elsif (s/^\@subtitle\s+//){
+ $docdata{'subtitle'} = $_;
+ } elsif (s/^\@ldt_translator\s+//){
+ $docdata{'translator'} = $_;
+ } elsif (s/^\@ldt_tdate\s+//){
+ $docdata{'tdate'} = $_;
+ } else {
+ push @inputtext, $_;
+ }
+ }
+ close $INPUT;
+
+ $docdata{'nodedata'} = \%nodedata;
+ $docdata{'labels'} = \%labels;
+ $docdata{'inputtext'} = \@inputtext;
+
+ return \%docdata;
+}
+
+# -------------------------------------------------------------------------
+sub info_write_preprocessed_file {
+# -------------------------------------------------------------------------
+# Write processed texinfo file. Add section names, menus, correct
+# references and title.
+# -------------------------------------------------------------------------
+ my $docdata = shift;
+ my $infoname = shift;
+ my $texiout = shift;
+
+ die "InfoUtils.pm: No info file name $infoname.\n" unless $infoname;
+ die "InfoUtils.pm: No output texi file $texiout\n" unless $texiout;
+
+ my $nodedata = $docdata->{'nodedata'};
+ my $labels = $docdata->{'labels'};
+ my $inputtext = $docdata->{'inputtext'};
+
+ my $OUTFILE;
+
+ # info_check_parsed_data($nodedata);
+
+ my %sections = ( 1 => "\@chapter",
+ 2 => "\@section",
+ 3 => "\@subsection",
+ 4 => "\@subsubsection");
+
+ my $lastdepth = 0;
+ my $lastnode = "Top";
+ my $texinfo = "\@c %** START OF HEADER
+\@setfilename $infoname
+\@c %** END OF HEADER\n";
+
+ foreach ( @$inputtext ) {
+ if ( s/^\@SUB\s+// ){
+ my $key = $_;
+ my $depth = $nodedata->{$key}->{'depth'};
+ my $name = $nodedata->{$key}->{'text'};
+
+ if ( $depth le 4 ){
+ my $next = $nodedata->{$key}->{'next'};
+ my $previous = $nodedata->{$key}->{'previous'};
+ my $up = $nodedata->{$key}->{'up'};
+ # my $txt = "\@comment nodename, next, previous, up\n";
+ my $txt = "";
+
+ # $txt .= "\@node $key, $previous, $next, $up\n";
+ $txt .= "\@node $key\n";
+ $txt .= "$sections{$depth} $name\n";
+
+ if ( $depth gt $lastdepth && defined $nodedata->{$lastnode}->{'menu'}){
+ $txt = "\n\@menu\n\* "
+ . join("::\n\* ",@{$nodedata->{$lastnode}->{'menu'}})
+ . "::\n\@end menu\n"
+ . "\n$txt";
+ }
+
+ $texinfo .= $txt;
+ $lastdepth = $depth;
+ $lastnode = $key;
+ } elsif ( $depth eq 5 ){
+ $texinfo .= "\@subsubheading $nodedata->{$key}->{'text'}\n";
+ } else {
+ die "info-postASP: Entry \"$key\" has wrong depth $depth\n";
+ }
+ } elsif (s/^\@REF\s+//){
+ if ( defined $labels->{$_} ){
+ # If this reference is to a node, use its nodename
+ $texinfo .= "\@ref{" . $labels->{$_} . "}\n";
+ } else {
+ $texinfo .= "\@ref{$_}\n";
+ }
+ } elsif (s/^\@TOP//){
+ $texinfo .= "\@node top\n"
+ . "\@top " . $docdata->{'title'} . "\n"
+ . "\@example\n";
+
+ $texinfo .= join(' and ',@{$docdata->{'authors'}}) . "\n"
+ if ( @{$docdata->{'authors'}} );
+
+ $texinfo .= $docdata->{'subtitle'} . "\n"
+ if ( defined $docdata->{'subtitle'} );
+
+ $texinfo .= $docdata->{'translator'} . "\n"
+ if ( defined $docdata->{'translator'} );
+
+ $texinfo .= $docdata->{'tdate'} . "\n"
+ if ( defined $docdata->{'tdate'} );
+
+ $texinfo .= "\@end example\n";
+ } else {
+ $texinfo .= "$_\n";
+ }
+ }
+
+ open ($OUTFILE, "> $texiout")
+ or die "Could not open \"$texiout\" for write. Aborting ...\n";
+ print $OUTFILE $texinfo;
+ close $OUTFILE;
+}
+
+# -------------------------------------------------------------------------
+sub info_check_parsed_data {
+# -------------------------------------------------------------------------
+# -------------------------------------------------------------------------
+ my $tmpnodedata = shift;
+ my @sections = sort {
+ $tmpnodedata->{$a}->{'sort'} <=> $tmpnodedata->{$b}->{'sort'}
+ } keys %$tmpnodedata;
+
+ foreach ( @sections ){
+ my $ref = $tmpnodedata->{$_};
+ print STDERR "Node: $_\n";
+ print STDERR " orig: $ref->{'orig'}\n";
+ print STDERR " text: $ref->{'text'}\n";
+ print STDERR " debug: $ref->{'debug'}\n";
+ print STDERR " up: $ref->{'up'}\n";
+ print STDERR " depth: $ref->{'depth'}\n";
+ print STDERR " previous: $ref->{'previous'}\n";
+ print STDERR " next: $ref->{'next'}\n";
+ print STDERR " sort: $ref->{'sort'}\n";
+ print STDERR " menu:\n * " . join("\n * ",@{$ref->{'menu'}}) . "\n" if defined $ref->{'menu'};
+ }
+}
+
+# -------------------------------------------------------------------------
+sub info_process_texi {
+# -------------------------------------------------------------------------
+# info_process_texi($infile, $outfile, $infoname)
+#
+# Call the other functions.
+# -------------------------------------------------------------------------
+ my $infile = shift;
+ my $outfile = shift;
+ my $infoname = shift;
+
+ info_write_preprocessed_file(info_parse_raw_file($infile),$infoname,$outfile);
+}
diff --git a/tools/linuxdoc-tools/LinuxDocTools/Lang.pm b/tools/linuxdoc-tools/LinuxDocTools/Lang.pm
new file mode 100644
index 00000000..2b0e99d6
--- /dev/null
+++ b/tools/linuxdoc-tools/LinuxDocTools/Lang.pm
@@ -0,0 +1,238 @@
+#
+# Lang.pm
+#
+# $Id: Lang.pm,v 1.1.1.1 2001/05/24 15:57:41 sano Exp $
+#
+# Language support.
+#
+# © Copyright 1997, Cees de Groot
+#
+
+package LinuxDocTools::Lang;
+
+use strict;
+use vars qw($VERSION @ISA @EXPORT @Languages $translations);
+
+require 5.0004;
+use Exporter;
+use LinuxDocTools::Vars;
+
+$VERSION = sprintf("%d.%02d", q$Revision: 1.1.1.1 $ =~ /(\d+)\.(\d+)/);
+@ISA = qw(Exporter);
+@EXPORT = qw(Any2ISO ISO2Native ISO2English Xlat);
+
+=head1 NAME
+
+LinuxDocTools::Lang - language name and translation functions
+
+=head1 SYNOPSIS
+
+ $isoname = Any2ISO ('deutsch');
+ $native = ISO2Native ('de');
+ $engname = ISO2English ('nederlands');
+
+ $global->{language} = 'nl';
+ $dutch = Xlat ('Table of Contents');
+
+=head1 DESCRIPTION
+
+B<LinuxDocTools::Lang> gives a simple interface to various forms of language
+names, and provides a translation service. Languages can be specified in
+three different ways: by their native name, by their english name, and
+by their 2-letter ISO code. For example, you can specify the German
+language as C<deutsch>, as C<german> or as C<de>.
+
+=head1 FUNCTIONS
+
+=over 4
+
+=cut
+
+@Languages = qw(
+ en english english
+ de deutsch german
+ nl nederlands dutch
+ fr français french
+ es español spanish
+ da dansk danish
+ no norsk norwegian
+ se svenska swedish
+ pt portuges portuguese
+ ca català catalan
+ it italiano italian
+ ro românã romanian
+ ja japanese japanese
+ pl polski polish
+ ko korean korean
+ fi suomi finnish
+);
+
+
+=item Any2ISO
+
+Maps any of the three forms of languages to the ISO name. So either of
+these invocations:
+
+ Any2ISO ('dutch');
+ Any2ISO ('nederlands');
+ Any2ISO ('nl');
+
+will return the string C<"nl">.
+
+=cut
+
+sub Any2ISO
+{
+ my $lang = shift (@_);
+
+ my $i = 0;
+ foreach my $l (@Languages)
+ {
+ ($l eq $lang) && last;
+ $i++;
+ }
+ return $Languages[(int $i / 3) * 3];
+}
+
+
+=item ISO2Native
+
+Maps the ISO code to the native name of the language.
+
+=cut
+
+sub ISO2Native
+{
+ my $iso = shift (@_);
+
+ my $i = 0;
+ foreach my $l (@Languages)
+ {
+ ($l eq $iso) && last;
+ $i++;
+ }
+ return $Languages[$i + 1];
+
+}
+
+
+=item ISO2English
+
+Maps the ISO code to the english name of the language.
+
+=cut
+
+sub ISO2English
+{
+ my $iso = shift (@_);
+
+ my $i = 0;
+ foreach my $l (@Languages)
+ {
+ ($l eq $iso) && last;
+ $i++;
+ }
+ return $Languages[$i + 2];
+}
+
+=item Xlat
+
+Translates its (English) argument to the language specified by the
+current value of C<$gobal-E<gt>{language}>. The module, in its source
+file, contains a data structure, indexed by the English strings, that
+has all available translations.
+
+=cut
+
+sub Xlat
+{
+ my ($txt) = @_;
+
+ return $txt if ($global->{language} eq "en");
+ return $translations->{$txt}{$global->{language}};
+};
+
+
+#
+# By the time this grows big, we'll make up something else.
+#
+$translations = {
+ "Previous" => {
+ "nl" => "Terug",
+ "de" => "Zurück",
+ "es" => "Página anterior",
+ "fr" => "Page précédente",
+ "da" => "Forrige",
+ "no" => "Forrige",
+ "se" => "Föregående",
+ "pt" => "Página anterior",
+ "ca" => "Pàgina anterior",
+ "it" => "Indietro",
+ "ro" => "Înapoi",
+ "ja" => "Á°¤Î¥Ú¡¼¥¸",
+ "pl" => "Poprzedni",
+ "ko" => "ÀÌÀü",
+ "fi" => "Edellinen"
+ },
+ "Next" => {
+ "nl" => "Verder",
+ "de" => "Weiter",
+ "es" => "Página siguiente",
+ "fr" => "Page suivante",
+ "da" => "Næste",
+ "no" => "Neste",
+ "se" => "Nästa",
+ "pt" => "Página seguinte",
+ "ca" => "Pàgina següent",
+ "it" => "Avanti",
+ "ro" => "Înainte",
+ "ja" => "¼¡¤Î¥Ú¡¼¥¸",
+ "pl" => "Nastny",
+ "ko" => "´ÙÀ½",
+ "fi" => "Seuraava"
+ },
+ "Contents" => {
+ "nl" => "Inhoud",
+ "de" => "Inhalt",
+ "es" => "Índice general",
+ "fr" => "Table des matières",
+ "da" => "Indhold",
+ "no" => "Innhold",
+ "se" => "Innehållsförteckning",
+ "pt" => "Índice",
+ "ca" => "Índex",
+ "it" => "Indice",
+ "ro" => "Cuprins",
+ "ja" => "Ìܼ¡¤Ø",
+ "pl" => "Spis Trei",
+ "ko" => "Â÷·Ê",
+ "fi" => "Sisällys"
+ },
+ "Table of Contents" => {
+ "nl" => "Inhoudsopgave",
+ "de" => "Inhaltsverzeichnis",
+ "es" => "Índice general",
+ "fr" => "Table des matières",
+ "da" => "Indholdsfortegnelse",
+ "no" => "Innholdsfortegnelse",
+ "se" => "Innehållsförteckning",
+ "pt" => "Índice geral",
+ "ca" => "Índex general",
+ "it" => "Indice Generale",
+ "ro" => "Cuprins",
+ "ja" => "Ìܼ¡",
+ "pl" => "Spis Trei",
+ "ko" => "Â÷·Ê",
+ "fi" => "Sisällysluettelo"
+ }
+};
+
+=back
+
+=head1 AUTHOR
+
+Cees de Groot, C<E<lt>cg@pobox.comE<gt>>
+
+=cut
+
+1;
diff --git a/tools/linuxdoc-tools/LinuxDocTools/Utils.pm b/tools/linuxdoc-tools/LinuxDocTools/Utils.pm
new file mode 100644
index 00000000..63fe5f91
--- /dev/null
+++ b/tools/linuxdoc-tools/LinuxDocTools/Utils.pm
@@ -0,0 +1,392 @@
+#
+# Utils.pm
+#
+# $Id: Utils.pm,v 1.2 2001/08/31 22:39:44 sano Exp $
+#
+# Utilities, split off from other modules in order to cut down file size.
+#
+# © Copyright 1996, 1997, Cees de Groot
+#
+package LinuxDocTools::Utils;
+use strict;
+
+=head1 NAME
+
+LinuxDocTools::Utils - various supporting routines
+
+=head1 SYNOPSIS
+
+ @files = process_options (@args);
+
+ usage ($msg);
+
+ trap_signals;
+
+ cleanup;
+
+ create_temp($tempfile)
+
+=head1 DESCRIPTION
+
+The B<LinuxDocTools::Utils> module contains a number of generic routines, mainly
+split off from the main module in order to keep file size down.
+
+=head1 FUNCTIONS
+
+=over 4
+
+=cut
+
+use DirHandle;
+use FileHandle;
+use Cwd;
+use File::Basename;
+use Exporter;
+use LinuxDocTools::Vars;
+
+use vars qw($VERSION @ISA @EXPORT @EXPORT_OK $in_signal);
+@ISA = qw(Exporter);
+@EXPORT = qw(usage process_options);
+@EXPORT_OK = qw(cleanup trap_signals remove_tmpfiles create_temp);
+$VERSION = sprintf("%d.%02d", q$Revision: 1.2 $ =~ /(\d+)\.(\d+)/);
+
+use subs qw(usage);
+
+# check whether options are unique
+sub check_option_consistency
+{
+ my $owner = {};
+ my ($fmt, $opt);
+ foreach $fmt (keys %FmtList)
+ {
+ my $add = sub { # add to options of $fmt
+ my $str = shift;
+ if ($owner->{$str}) {
+ push(@{$owner->{$str}}, $fmt);
+ }
+ else {
+ $owner->{$str} = [$fmt];
+ }
+ };
+ foreach $opt (@{$Formats{$fmt}{OPTIONS}})
+ {
+ &$add("--$opt->{option}");
+ &$add("-$opt->{short}");
+ }
+ }
+ my $error = 0;
+ foreach $opt (keys %$owner)
+ {
+ if (scalar @{$owner->{$opt}} > 1)
+ {
+ warn "duplicate option: $opt in " .
+ join(', ', @{$owner->{$opt}}) . "\n";
+ $error = 1;
+ }
+ }
+ die "Internal error detected" if $error;
+}
+
+
+=item process_options
+
+This function processes the command line, and sets the variables associated
+with the options along the way. When successful, it returns the arguments
+on the command line it didn't interpret. Normally, this will be a list of
+filenames.
+
+=cut
+
+sub process_options
+{
+ my @args = @_;
+ my @retval;
+
+ OPTPROC: while ($args[0])
+ {
+ my $long;
+ my $curarg = $args[0];
+ if ($curarg =~ /^--.*/)
+ {
+ #
+ # Long option, --opt[==value]
+ #
+ $long = 1;
+ }
+ elsif ($curarg =~ /^-.*/)
+ {
+ #
+ # Short option, -o value
+ #
+ $long = 0;
+ }
+ else
+ {
+ #
+ # Filename
+ #
+ push @retval, $curarg;
+ next OPTPROC;
+ }
+
+ #
+ # Start looking for the option
+ #
+ foreach my $fmt (keys %FmtList)
+ {
+ foreach my $opt (@{$Formats{$fmt}{OPTIONS}})
+ {
+ if (($long && $curarg =~ /^--$opt->{option}.*/) ||
+ $curarg =~ /^-$opt->{short}/)
+ {
+ #
+ # Found it! Get the argument and see whether all is OK
+ # with the option.
+ #
+ my $optval = "";
+ if ($long)
+ {
+ if ($curarg =~ /^--$opt->{option}=.*/)
+ {
+ $optval = $curarg;
+ $optval =~ s/[^=]*=(.*)/$1/;
+ }
+ }
+ else
+ {
+ if ($args[1] =~ /^[^-].*/)
+ {
+ $optval = $args[1];
+ }
+ }
+ $opt->{type} eq "f" && do
+ {
+ #
+ # "f" -> flag. Increment, so '-v -v' can work.
+ #
+ $Formats{$fmt}{$opt->{option}} += 1;
+ next OPTPROC;
+ };
+ #
+ # All other types require a value (for now).
+ #
+ shift @args unless $long;
+ if ($optval eq "")
+ {
+ usage "Option $curarg: value required";
+ }
+ ($opt->{type} eq "i" || $opt->{type} eq "s") && do
+ {
+ #
+ # "i" -> numeric value.
+ # "s" -> string value.
+ #
+ # No type checking yet...
+ #
+ if ($opt->{option} eq "define")
+ {
+ $Formats{$fmt}{$opt->{option}} .= " " . $optval;
+ }
+ else
+ {
+ $Formats{$fmt}{$opt->{option}} = $optval;
+ }
+ next OPTPROC;
+ };
+ $opt->{type} eq "l" && do
+ {
+ #
+ # "l" -> list of values.
+ #
+ foreach my $val (@{$opt->{'values'}})
+ {
+ if ($val eq $optval)
+ {
+ $Formats{$fmt}{$opt->{option}} = $optval;
+ next OPTPROC;
+ }
+ }
+ usage "Invalid value '$optval' for '--$opt->{option}'";
+ };
+ usage "Unknown option type $opt->{type} in $fmt/$opt";
+ }
+ }
+ }
+ usage "Unknown option $curarg";
+ }
+ continue
+ {
+ shift @args;
+ }
+ return @retval;
+}
+
+
+=item usage
+
+Prints out a generated help message about calling convention and allowed
+options, then the argument string, and finally exits.
+
+=cut
+
+sub usage
+{
+ my ($msg) = @_;
+
+ print "LinuxDoc-Tools version " . `cat $main::DataDir/VERSION` . "\n";
+ check_option_consistency;
+ print "Usage:\n";
+ print " " . $global->{myname} . " [options] <infile>\n\n";
+ my @helplist = sort(keys %Formats);
+ @helplist = sort (keys %FmtList) if ($global->{format});
+ foreach my $fmt (@helplist)
+ {
+ if ($fmt eq "global")
+ {
+ print "General options:\n";
+ }
+ else
+ {
+ print "Format: " . $fmt . "\n";
+ }
+ print $Formats{$fmt}{HELP};
+ for my $opt (@{$Formats{$fmt}{OPTIONS}})
+ {
+ my $value = '';
+ if ($opt->{type} eq "i")
+ {
+ $value = "number";
+ }
+ elsif ($opt->{type} eq "l")
+ {
+ $value = "{";
+ my $first = 1;
+ for my $val (@{$opt->{'values'}})
+ {
+ $first || ($value .= ",");
+ $first = 0;
+ $value .= $val;
+ }
+ $value .= "}";
+ }
+ elsif ($opt->{type} eq "s")
+ {
+ $value = "string";
+ }
+ print " --$opt->{option}"; print "=$value" if $value;
+ print " -$opt->{short}"; print " $value" if $value;
+ print "\n";
+ }
+ print "\n";
+ }
+
+ $msg && print "Error: $msg\n\n";
+ exit 1;
+}
+
+
+=item cleanup
+
+This function cleans out all temporary files and exits. The unlink step
+is skipped if debugging is turned on.
+
+=cut
+
+sub cleanup
+{
+ my ($signame) = @_;
+
+ if( $signame ) {
+ if ( $in_signal ) {
+ if( $global->{debug} ) {
+ print STDERR "Caught SIG$signame during cleanup -- aborting\n";
+ }
+ exit -1;
+ }
+ else {
+ if( $global->{debug} ) {
+ print STDERR "Caught SIG$signame -- cleaning up\n";
+ }
+ $in_signal = 1;
+ }
+ }
+
+ if( !$global->{debug} && $global->{tmpbase} ) {
+ remove_tmpfiles($global->{tmpbase});
+ }
+ exit 0;
+}
+
+=item remove_tmpfiles( $tmpbase )
+
+This function cleans out all temporary files, using the argument $tmpbase to
+determine the directory and pattern to use to find the temporary files.
+
+=cut
+
+sub remove_tmpfiles($) {
+ my $tmpbase = shift;
+ my ($name,$tmpdir) = fileparse($tmpbase,"");
+ my $namelength = length $name;
+ my $savdir = cwd;
+
+ chdir($tmpdir);
+ my $dir = new DirHandle(".");
+
+ if (!defined($dir) ) {
+ warn "Couldn't open temp directory $tmpdir: $!\n";
+ } else {
+ foreach my $tmpfile ($dir->read()) {
+ if (substr ($tmpfile, 0, $namelength) eq $name) {
+ unlink ($tmpfile) || warn "Couldn't unlink $tmpfile: $! \n";
+ }
+ }
+ $dir->close();
+ }
+
+ chdir($savdir);
+ rmdir($tmpdir) || return -1;
+}
+
+=item trap_signals
+
+This function traps all known signals, making sure that the B<cleanup>
+function is executed on them. It should be called once at initialization
+time.
+
+=cut
+
+sub trap_signals
+{
+ foreach my $sig ( 'HUP', 'INT', 'QUIT', 'ILL',
+ 'TRAP', 'IOT', 'BUS', 'FPE',
+ 'USR1', 'SEGV', 'USR2',
+ 'PIPE', 'ALRM', 'TERM', )
+ {
+ $SIG{$sig} = \&cleanup;
+ }
+}
+
+=item create_temp ( $tmpfile )
+
+This function creates an empty temporary file with the required
+permission for security reasons.
+
+=cut
+
+sub create_temp($) {
+ my $tmpnam = shift;
+ my $fh = new FileHandle($tmpnam,O_CREAT|O_EXCL|O_WRONLY,0600);
+ $fh or die "$0: failed to create temporary file: $!";
+ $fh->close;
+}
+
+=back
+
+=head1 AUTHOR
+
+Cees de Groot, C<E<lt>cg@pobox.comE<gt>>.
+
+=cut
+
+1;
diff --git a/tools/linuxdoc-tools/LinuxDocTools/Vars.pm b/tools/linuxdoc-tools/LinuxDocTools/Vars.pm
new file mode 100644
index 00000000..49cf630b
--- /dev/null
+++ b/tools/linuxdoc-tools/LinuxDocTools/Vars.pm
@@ -0,0 +1,22 @@
+#
+# Vars.pm
+#
+# $Id: Vars.pm,v 1.1.1.1 2001/05/24 15:57:41 sano Exp $
+#
+# Shared variables.
+#
+# © Copyright 1996, 1997, Cees de Groot
+#
+package LinuxDocTools::Vars;
+use strict;
+
+use Exporter;
+
+use vars qw($VERSION @ISA @EXPORT);
+@ISA = qw(Exporter);
+@EXPORT = qw(%Formats $global %FmtList);
+$VERSION = sprintf("%d.%02d", q$Revision: 1.1.1.1 $ =~ /(\d+)\.(\d+)/);
+
+use vars @EXPORT;
+
+1;
diff --git a/tools/linuxdoc-tools/Text/EntityMap.pm b/tools/linuxdoc-tools/Text/EntityMap.pm
new file mode 100644
index 00000000..d878fa3c
--- /dev/null
+++ b/tools/linuxdoc-tools/Text/EntityMap.pm
@@ -0,0 +1,121 @@
+# -*- perl -*-
+#
+# Copyright (C) 1996 Ken MacLeod
+# See the file COPYING for distribution terms.
+#
+# This file is preprocessed during the build to fix-up the references
+# in `sdata_dirs'.
+#
+# $Id: EntityMap.pm.in,v 1.1.1.1 2001/05/24 15:57:40 sano Exp $
+#
+
+package Text::EntityMap;
+
+use strict;
+
+=head1 NAME
+
+Text::EntityMap - map character entities to output formats
+
+=head1 SYNOPSIS
+
+use Text::EntityMap;
+
+$tex_iso_lat1 = Text::EntityMap->load ("ISOlat1.2tex");
+$tex_iso_lat2 = Text::EntityMap->load ("ISOlat2.2tex");
+$ent_group = Text::EntityMap->group ($tex_iso_lat1, $tex_iso_lat2);
+
+$ent_group->lookup ('[copy ]');
+
+@dirs = Text::EntityMap->sdata_dirs ();
+
+=head1 DESCRIPTION
+
+Text::EntityMap is a module that can look-up an output-format
+equivalent for special character or other entities. This was inspired
+by SGML character entities but can be used in any scenario where
+output formatting codes are different for special characters.
+
+The C<load()> function takes a file name of a mapping table and
+returns an Text::EntityMap object.
+
+The C<group()> function takes a ordered list of Text::EntityMap and
+returns an Text::EntityMapGroup object. Looking up entities in a
+group object returns the entity replacement returned by the first
+EntityMap object. This can be used both to group sets of mapping
+files into one object as well as overriding entity replacements. A
+EntityMapGroup may contain other EntityMapGroup's.
+
+The C<lookup()> function can be used with either a EntityMap or
+EntityMapGroup object. It takes an entity name and returns the
+output-format equivalent.
+
+C<sdata_dirs()> returns an array containing the local site directory
+and ``this'' version of EntityMap's installed directory that contain
+the entity maps. Callers can use these paths when looking for tables
+to pass to C<load()>.
+
+=head1 AUTHOR
+
+Ken MacLeod E<lt>ken@bitsko.slc.ut.usE<gt>
+
+=cut
+
+sub sdata_dirs {
+ return ("/usr/share/entity-map", "/usr/share/entity-map/0.1.0");
+}
+
+sub load {
+ my ($type, $file_name) = @_;
+
+ my ($self) = {};
+ bless ($self, $type);
+
+ open (FILE, "$file_name")
+ || die "Can't open \`$file_name' for reading: $!\n";
+ while (<FILE>) {
+ chop;
+ m/(^[^\t]+)\t(.*)/;
+ $self->{"$1"} = $2;
+ }
+ close (FILE);
+
+ return ($self);
+}
+
+sub group {
+ my ($type) = shift;
+
+ my ($self) = [{}, @_];
+ bless ($self, 'Text::EntityMapGroup');
+
+ return ($self);
+}
+
+sub lookup {
+ my ($self, $entity) = @_;
+
+ return ($self->{$entity});
+}
+
+package Text::EntityMapGroup;
+
+sub lookup {
+ my ($self, $entity) = @_;
+
+ my ($replacement) = $self->[0]{$entity};
+ return $replacement if defined $replacement;
+
+ my ($ii);
+ for ($ii = 1; $ii <= $#{$self}; $ii ++) {
+ $replacement = $self->[$ii]->lookup($entity);
+ if (defined $replacement) {
+ $self->[0]{$entity} = $replacement;
+ return ($replacement);
+ }
+ }
+
+ return (undef);
+}
+
+1;
diff --git a/tools/linuxdoc-tools/copyright b/tools/linuxdoc-tools/copyright
new file mode 100644
index 00000000..a89b6ada
--- /dev/null
+++ b/tools/linuxdoc-tools/copyright
@@ -0,0 +1,85 @@
+
+This is `linuxdoc-tools', a series of tools to implement the Linux
+Documentation Project HOWTO and book styles in SGML.
+
+This copy was modified in order to work with birddoc DTD. It is
+based on version 0.9.73-2 from Debian 10.
+
+
+----------------------------------
+ The linuxdoc-tools license
+----------------------------------
+
+linuxdoc-tools is derived from linuxdoc-SGML, originally written by
+Matt Welsh and later maintained by Cees de Groot. Linuxdoc-SGML is
+based on James Clark's sgmls parser, and the QWERTZ DTD by Tom
+Gordon. Magnus Alvestad provided the current HTML support. For
+the rest of linuxdoc-SGML,
+
+ Copyright (C) 1994-1996 Matt Welsh <mdw@cs.cornell.edu>
+ Copyright (C) 1996-1998 Cees de Groot <cg@pobox.com>
+
+Original Linuxdoc-SGML itself does not have any limitations.
+Everything not having explicit additional conditions can be freely
+used, modified, and redistributed, under the usual fair use clauses:
+
+ * No warranty. Use at your own risk.
+ * Do not pretend to have written what you did not (Preserve credits
+ and Copyright notices of the different elements if present).
+
+Since then, lots of smaller and bigger changes resulted in a rename
+to SGML-Tools (and then to SGMLtools, the hyphen caused confusion)
+to indicate that it wasn't just for Linux anymore. See files
+CHANGES.old-v1 and CONTRIBUTORS.old-v1 for changelog and list of
+contributors to old linuxdoc-sgml and sgmltools-v1.
+
+When sgml-tools dropped support for the linuxdoc DTD, Taketoshi Sano
+<sano@debian.org> forked the code to linuxdoc-tools. See README file.
+
+Changes after the fork
+
+ Copyright (C) 1999-2002 Taketoshi Sano <sano@debian.org>
+ Copyright (C) 2000 Juan Jose Amor
+ Copyright (C) 2007-2018 Agustin Martin Domingo <agmartin@debian.org>
+
+Unless conflicting with other licenses, changes by Agustin Martin
+Domingo are free software: you can redistribute and/or modify them
+under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version. Otherwise they honour previous
+license.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see <http://www.gnu.org/licenses/>.
+In Debian systems you can find a copy under /usr/share/common-licenses.
+
+
+----------------------------------
+ The entity-map license
+----------------------------------
+
+Copyright (C) 1997 Ken MacLeod
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL KEN MACLEOD BE LIABLE FOR ANY CLAIM, DAMAGES OR
+OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
+OR OTHER DEALINGS IN THE SOFTWARE.
+
+Except as contained in this notice, the name of Ken MacLeod shall not
+be used in advertising or otherwise to promote the sale, use or other
+dealings in this Software without prior written authorization from
+Ken MacLeod.