#!/usr/local/bin/perl
#
# @(#) Perl - Batch download ftp or http file with config file settings.
# @(#) $Id: mywebget.pl,v 1.22 1999/02/22 18:48:07 jaalto Exp $
#
#  File id
#
#       .Copyright (C) 1998 Jari Aalto
#       .Created: 1999-02
#       .$Contactid: <jari.aalto@poboxes.com> $
#       .$URL: ftp://cs.uta.fi/pub/ssjaaa/ $
#       .$Keywords: Perl txt html conversion $
#       .$Perl: 5.004 $
#
#       This program is free software; you can redistribute it and/or
#       modify it under the terms of the GNU General Public License as
#       published by the Free Software Foundation; either version 2 of
#       the License, or (at your option) any later version.
#
#       This program is distributed in the hope that it will be useful, but
#       WITHOUT ANY WARRANTY; without even the implied warranty of
#       MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
#       General Public License for more details.
#
#       You should have received a copy of the GNU General Public License along
#       with this program; if not, write to the Free Software Foundation,
#       Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
#   About program layout
#
#	Code written with Unix Emacs and indentation controlled with
#	Emacs package tinytab.el, a generic tab minor mode for programming.
#
#       The {{ }}} marks you see in this file are party of file "fold"
#       control package called folding.el (Unix Emacs lisp package).
#       ftp://ftp.csd.uu.se/pub/users/andersl/beta/ to get the latest.
#
#       There is also lines that look like # ....... &tag ... and they
#       are generated by Emacs Lisp package `tinybm.el', which is also
#       document structure tool. You can jump between the blocks with
#       Ctrl-up and Ctrl-down keys and create those "bookmarks" with
#       Emacs M-x tibm-insert. See www contact site below.
#
#   Funny identifiers at the top of file
#
#       The GNU RCS ident(1) program can print useful information out
#       of all variables that are in format $ IDENTIFIER: text $
#       See also Unix man pages for command what(1) which outputs all lines
#       matching @( # ). Try commands:
#
#       % what  PRGNAME
#       % ident PRGNAME
#
#   Introduction
#
#       Please start this perl script with options
#
#           --help      to get the help page
#
#   Www contact site
#
#       See http://www.netforward.com/poboxes/?jari.aalto and navigate
#       to html pages in the site to get more information about me
#	and my tools (Emacs, Perl, procmail mostly)
#
#   Description
#
#	If you retrieve latest versions of certain program blocks
#	periodically, this is the Perl script for you. Run from cron job
#	or once a week to upload newest versions of files around the net.
#
#	_Note:_ This in simple file by file copier and does not offer
#	any date comparing or recursive features like found from C-program
#	wget(1) http://www.ccp14.ac.uk/mirror/wget.htm and
#	ftp://prep.ai.mit.edu/pub/gnu/
#
#   Change Log
#
#	(none)

use strict;

BEGIN { require 5.004 }

#       A U T O L O A D
#
#       The => operator quotes only words, and File::Basename is not
#       Perl "word"

use autouse 'Carp'          => qw( croak carp cluck confess );
use autouse 'Text::Tabs'    => qw( expand                   );
use autouse 'Cwd'           => qw( cwd                      );
use autouse 'Pod::Text'     => qw( pod2text                 );
use autouse 'File::Copy'    => qw( copy move                );
use autouse 'File::Path'    => qw( mkpath rmtree            );


#   Standard perl modules

use Env;
use English;
use File::Basename;
use Getopt::Long;

#   Other CPAN modules

use LWP::UserAgent;
use Net::FTP;


    use vars qw ( $VERSION );

    #   This is for use of Makefile.PL and ExtUtils::MakeMaker
    #   So that it puts the tardist number in format YYYY.MMDD
    #   The REAL version number is defined later

    #   The following variable is updated by my Emacs setup whenever
    #   this file is saved

    $VERSION = '1999.0222';

# ****************************************************************************
#
#   DESCRIPTION
#
#       Set global variables for the program
#
#   INPUT PARAMETERS
#
#       none
#
#   RETURN VALUES
#
#       none
#
# ****************************************************************************

sub Initialize ()
{
    use vars qw
    (
        $PROGNAME
        $LIB

        $RCS_ID
        $VERSION
        $CONTACT
        $URL
    );

    $LIB	= basename $PROGRAM_NAME;
    $PROGNAME   = $LIB;

    $RCS_ID   = '$Id: mywebget.pl,v 1.22 1999/02/22 18:48:07 jaalto Exp $';
    $VERSION  = (split (' ', $RCS_ID))[2];   # version number in format N.NN+
    $CONTACT  = "<jari.aalto\@poboxes.com>"; # Who is the maintainer
    $URL      = "ftp://cs.uta.fi/pub/ssjaaa/";

    $OUTPUT_AUTOFLUSH = 1;
}


# ***************************************************************** &help ****
#
#   DESCRIPTION
#
#       Print help and exit.
#
#   INPUT PARAMETERS
#
#       $msg    [optional] Reason why function was called.
#
#   RETURN VALUES
#
#       none
#
# ****************************************************************************

=pod

=head1 NAME

@(#) mywebget.pl - Perl Web URL retrieve program

=head1 SYNOPSIS

    mywebget.pl http://example.com/ [URL] ..
    mywebget.pl --file file-with-urls.txt
    mywebget.pl --verbose --overwrite http://example.com/
    mywebget.pl --verbose --overwrite --Output ~/dir/ http://example.com/

=head1 OPTIONS

=head2 General options

=over 4

=otem B<--create-paths>

Create paths that do not exist in C<lcd:> directives. Normally any
LCD command that fails to find the path would interrupt the program. With
this option the local directory is created as needed.

=item B<--Firewall FIREWALL>

Use FIREWALL when accessing files via ftp:// protocol.

=item B<--file FILE>

Read URLs from file. File can contains comments starting with # and the
syntax is:

    #   @(#) $HOME/.mywebget - Perl mywebget.pl configuration file
    #
    #	This is comment
    #	Another comment

    tag1: local-copies

	file://absolute/dir/file-1.23.tar.gz

	lcd:HOME/updates	# chdir here

    tag1: external

	lcd:HOME/download

      tag2: external-http

	http://www.example.com/page.html
	http://www.example.com/page.html save:/dir/dir/page.html

      tag2: external-ftp

	ftp://ftp.com/dir/file.txt save:xx-file.txt login:foo pass:passwd

	lcd:$HOME/download-kit

	ftp://ftp.com/dir/kit-1.1.tar.gz new:

Possible keywords in the B<ftp://> line are

=over 4

C<lcd:DIRECTORY>

Set Local download directory to DIRECTORY. Any environment variables are
substituted in path name. If this tag is found, it replaces setting of
B<--Output>. If path is not a directory, terminate with error. See also
B<--create-paths>.

C<login:LOGIN-NAME>

Ftp login. Default value used is "ftp".

C<new:>

If this is found from a current line, then the newest file will be retrieved.
This variable is reset to the value of C<--new> after the line has been
processed.

C<pass:PASSWORD>

Defulet value is generic mail\@some.com email address.

C<regexp:REGEXP>

Get all afiles in ftp directory matching regexp. Keyword SAVE: is ignored.

C<save:LOCAL-FILE-NAME>

Save file under this name to local disk.

C<tagN:NAME>

You can group files under TAGn, so that eg. option B<--tag1> would start
downloading files only from that point on until next TAGn appears. There
are currently unlimited number of tag levels: tag1, tag2 and tag3, so that you
can arrange your downlods hierarchially to the configuration file. For
example to download all linux files, you would give option B<--tag linux>.
To download only the NT Emacs latest binary, you would give option
B<--tag emacs-nt>.

The downloading stops at next tag of the C<same level>. That is, tag1 stops
only at next tag1 or until end of file.

    tag1: linux		    # All Linux downlods under this category

	tag2: sunsite    tag2: another-name-for-this-spot

	#   List of files to download from here

	tag2: ftp.funet.fi

	#   List of files to download from here

    tag1: emacs-binary

	tag2: emacs-nt

	tag2: xemacs-nt

	tag2: emacs

	tag2: xemacs

=back

=item B<--new>

Get newest file. If filename does not end to .asp .html .htm, then
it is considered that the URL point to some program or data file.
When new releases are announced, the version number in filename usually
tells which is the current one so getting harcoded file with:

    mtwebget.pl -o -v http://example.com/dir/program-1.3.tar.gz

is not usually good choice. Adding B<--new> option to the command line
causes double pass: a) the whole http://example.com/dir/ is examined for all
files. b) files matching approximately filename program-1.3.tar.gz
are examined, sorted and file with latest version number in a is retrieved.

=item B<--Output DIR>

Before retrieving any files, chdir to DIR.

=item B<--overwrite>

Allow overwriting existing files when retrieving URLs.

=item B<--Proxy PROXY>

Use PROXY for HTTP. (See B<--Firewall> for FTP.)

=item B<--prefix PREFIX>

Add PREFIX to all retrieved files.

=item B<--Postfix POSTFIX -P POSTFIX>

Add PREFIX to all retrieved files.

=item B<--prefix-date -D>

Add iso8601 ":YYYY-MM-DD" prefix to all retrived files.
This is added before possible B<--prefix-www> or B<--prefix>.

Add POSTFIX to all retrieved files.

=item B<--prefix-www -W>

Usually the files are stored with the same names as the URL page, but
if you retrieve files that have identical names you can store each
page separately so that the file name is prefixed by the site name.

    http://example.com/page.html    --> example.com::page.html
    http://example2.com/page.html   --> example2.com::page.html

=item B<--regexp REGEXP>

Retrieve URLs lines matching REGEXP from your configuration
file.  This cancels B<--tag> option.

=item B<--tag NAME>

Search tag NAME from the config file and download only entries defined
under that tag. Refer to B<--file FILE> option description. You can give
only one B<--tag> option at a time. This cancels B<--regexp> option.

=back

=head2 Miscellaneous options

=over 4

=item B<--debug LEVEL -d LEVEL>

Turn on debug with positive LEVEL number. Zero means no debug.
This option turns on B<--verbose> too.

=item B<--help> B<-h>

Print help page.

=item B<--Version -V>

Print program's version information.

=back

=head1 README

This small utility makes it possible to keep a list of URLs in a file and
periodically retrieve those pages or files with simple command. This utility
is best suited for small batch jobs to download eg. most recent versions
of the software files. If you pass an URL that is already on disk, be sure
to supply option B<--overwrite> to allow overwriting old files.

If the URL ends to slash, then the directory is list on the remote machine
is stored to file name:

    !path!000root-file

The content of this file can be either index.html or the directory listing
depending on the used http or ftp protocol.

While you can run this program from command line to retrieve individual
files, it has been designed t use separate configuration file via B<--file>
option. In that configuration file you can control the downloading with
separate directived like C<save:> which tells to save the file under
different name.

The siplest way to retreive a latest version of a kit from FTP site is:

    mywebget.pl --new --overwite --verbose \
       http://www.example.com/kit-1.00.tar.gz

Don't worry about the filename "kit-1.00.tar.gz". If there were
kit-3.08.tar.gz in the site that one would be retrieve. The option B<--new>
instructs to find newer versions.

=head1 EXAMPLES

Read directory. It will be stored to YYYY-MM-DD::!dir!000root-file. Notice
that you give the http directory and not the file name: C<-D -o -v>

    mywebget.pl --prefix-date --overwrite --verbose http://www.example.com/dir/

To overwrite file and add a date prefix to the file name: C<-D -o -v>

    mywebget.pl --prefix-date --overwrite --verbose \
       http://www.example.com/file.pl

    --> YYYY-MM-DD::file.pl

To add date and WWW site prefix to the filenames: C<-D -W -o -v>

    mywebget.pl --prefix-date --prefix-www --overwrite --verbose \
       http://www.example.com/file.pl

    --> YYYY-MM-DD::www.example.com::file.pl

=head1 ENVIRONMENT

No environment settings.

=head1 SEE ALSO

C program wget(1) http://www.ccp14.ac.uk/mirror/wget.htm and
Old Perl 4 program webget(1) http://www.wg.omron.co.jp/~jfriedl/perl/
With the Libwww Perl library you find scripts
lwp-download(1) lwp-mirror(1) lwp-request(1) lwp-rget(1)

=head1 AVAILABILITY

CPAN entry is http://www.perl.com/CPAN-local//scripts/
Reach author at jari.aalto@poboxes.com or
http://www.netforward.com/poboxes/?jari.aalto

=head1 SCRIPT CATEGORIES

CPAN/Administrative

=head1 PREREQUISITES

Modules C<LWP::UserAgent> and C<use Net::FTP> are required.

=head1 COREQUISITES

No optional CPAN modules needed.

=head1 OSNAMES

C<any>

=head1 VERSION

$Id: mywebget.pl,v 1.22 1999/02/22 18:48:07 jaalto Exp $

=head1 AUTHOR

Copyright (C) 1996-1999 Jari Aalto. All rights reserved. This program is
free software; you can redistribute it and/or modify it under the same
terms as Perl itself or in terms of Gnu General Public licence v2 or later.

=cut

sub Help (;$)
{
    my $id  = "$LIB.Help";
    my $msg = shift;  # optional arg, why are we here...

    pod2text $PROGRAM_NAME;

    exit 1;
}

# ************************************************************** &args *******
#
#   DESCRIPTION
#
#       Read and interpret command line arguments
#
#   INPUT PARAMETERS
#
#       none
#
#   RETURN VALUES
#
#       none
#
# ****************************************************************************

sub HandleCommandLineArgs ()
{
    my $id = "$LIB.HandleCommandLineArgs";

    my ( $version );

    # .......................................... command line options ...

    use vars qw
    (
	$CHECK_NEWEST

	$debug
	$DIR_DATE

	$FILE
	$FIREWALL

	$HELP

	$LCD_CREATE

	$OVERWRITE
	$OUT_DIR

	$PREFIX
	$PREFIX_DATE
	$PREFIX_WWW

	$POSTFIX
	$PROXY

	$URL_REGEXP

	$TAG_REGEXP
	$TAG_NAME
	$verb
    );

    $FIREWALL = "";

    # .................................................... read args ...

    Getopt::Long::config( qw
    (
        require_order
        no_ignore_case
        no_ignore_case_always
    ));

    GetOptions      # Getopt::Long
    (
	  "Version"	    => \$version

	, "create-paths"    => \$LCD_CREATE

	, "debug:i"	    => \$debug
        , "d"               => \$debug

	, "file:s"	    => \$FILE
	, "Firewall:s"	    => \$FIREWALL

        , "help"            => \$HELP
	, "new"		    => \$CHECK_NEWEST
	, "overwrite"	    => \$OVERWRITE
	, "Output:s"	    => \$OUT_DIR

	, "prefix:s"	    => \$PREFIX
	, "D|prefix-date"   => \$PREFIX_DATE
	, "W|prefix-www"    => \$PREFIX_WWW

	, "Postfix:s"	    => \$POSTFIX
	, "Proxy:s"	    => \$PROXY

	, "regexp:s"	    => \$URL_REGEXP
	, "tag:s"	    => \$TAG_NAME

	, "verbose"	    => \$verb

    );

    $version	and  die "$VERSION $PROGNAME $CONTACT $URL\n";
    $HELP	and  Help();
    $debug	and  $verb = 1;


    if ( defined $URL_REGEXP  and  defined $TAG_NAME )
    {
	die "You can't use both --tg and --regexp options.";
    }


    if ( defined $TAG_NAME )
    {
	$TAG_REGEXP = 'tag(\d+):\s*(\S+)\b'
    }

}


# ****************************************************************************
#
#   DESCRIPTION
#
#       Return ISO 8601 date YYYY-MM-DD
#
#   INPUT PARAMETERS
#
#       none
#
#   RETURN VALUES
#
#       $str
#
# ****************************************************************************

sub DateYYYY_MM_DD ()
{
    my $id        = "$LIB.DateYYYY_MM_DD";

    my (@time)    = localtime(time);
    my $YY        = 1900 + $time[5];
    my ($DD, $MM) = @time[3..4];
#   my ($mm, $hh) = @time[1..2];

    $debug and warn "$id: @time\n";

    #   I don't know why Month(MM) is one less that the number month
    #   in my calendar. That's why +1. Does it count from zero?

    sprintf "%d-%02d-%02d", $YY, $MM + 1, $DD;
}


# ****************************************************************************
#
#   DESCRIPTION
#
#	Expand given PATH by substituting any Environment variables in it.
#
#   INPUT PARAMETERS
#
#	$string	    Path information, like $HOME/.example
#
#   RETURN VALUES
#
#	string	    Expanded path.
#
# ****************************************************************************

sub ExpandPath ($)
{
    my $id	    = "$LIB.ExpandPath";
    local ( $ARG )  = @ARG;

    my ( $key, $value );

    while ( ($key, $value) = each %ENV )
    {
	s/\$$key/$value/;
    }

    #	The env variables may contain leading slashes, get rid of them
    #
    #	[$ENV = /dir/ ]
    #
    #	$ENV/path   --> /dir//path
    #

    s,//+,/,;

    $ARG;
}


# ****************************************************************************
#
#   DESCRIPTION
#
#	Read directory content
#
#   INPUT PARAMETERS
#
#	$path
#
#   RETURN VALUES
#
#	@	list of files
#
# ****************************************************************************

sub DirContent ($)
{
    my $id	 = "$LIB.DirContent";
    my ( $path ) = @ARG;

    $debug and warn "$id: $path\n";

    local *DIR;

    unless ( opendir DIR, $path )
    {
	print "$id: can't read $path $ERRNO";
	next;
    }

    my @tmp = readdir DIR;
    closedir DIR;

    $debug > 1 and warn "$id: @tmp";

    @tmp;
}

# ****************************************************************************
#
#   DESCRIPTION
#
#	Scan until valid tag line shows up. Return line if it is under the
#	TAG
#
#   INPUT PARAMETERS
#
#	$line		    Read line
#
#   RETURN VALUES
#
#	($LINE, $stop)	    The $LINE is non-empty if it belongs to the TAG.
#			    The $stop flag is non-zero if TAG has ended.
#
# ****************************************************************************

{
    my
    (
	  $staticTagLevel
	, $staticTagName
	, $staticTagFound
    );

sub TagHandle ($)
{
    my $id 	    = "$LIB.TagHandle";
    local ($ARG)    = @ARG;

    # ...................................................... tag ...

    my ($tagNbr, $tagN ) = /$TAG_REGEXP/o;
    my $stop;

    unless ( $staticTagFound )
    {
	if ( $debug and $tagNbr )
	{
	    warn "$id: tag $tagNbr [$tagN]\n";
	}

	if ( $tagNbr and   $tagN eq $TAG_NAME )
	{
	    $staticTagLevel  = $tagNbr;
	    $staticTagName   = $tagN;
	    $staticTagFound  = 1;

	    $debug and warn "$id: TAG [$staticTagName] $ARG\n"
	}
	else
	{
	    $ARG = "";			# Read until TAG
	}
    }
    else
    {
	#   We're reading lines after the tag was found.
	#   Terminate teminate on next found tag name

	if ( $tagNbr  and  $tagNbr <= $staticTagLevel )
	{
	    $debug and warn "$id: End at [$staticTagName] $ARG\n";
	    $stop = 1;
	}
    }

    ($ARG, $stop);

}}



# ****************************************************************************
#
#   DESCRIPTION
#
#	Handle Local directory change and die if can't checnge to
#	directory.
#
#   INPUT PARAMETERS
#
#	$dir	    Where to chdir
#	$make	    Flag, if allowed to create directory
#
#   RETURN VALUES
#
#       none
#
# ****************************************************************************

sub DirectiveLcd ($;$)
{
    my $id 	  = "$LIB.DirectiveLcd";
    my ( $dir , $mkdir ) = @ARG;

    my $lcd = ExpandPath $dir;

    unless ( -d $lcd )
    {
	not $mkdir  and  die "$id: LCD [$dir $lcd] is not a directory";

	$verb and warn "$id: Creating directory $lcd";

	mkpath( $lcd, $verb) or die "$id: mkpath $lcd failed $ERRNO";
    }


    $verb	    and  print "$id: chdir $lcd";
    chdir $lcd	    or   die   "$id: chdir $lcd $ERRNO";

}


# ****************************************************************************
#
#   DESCRIPTION
#
#	Examine list of files and return the newest file that match FILE
#	the idea is that we divide the filename into 3 parts
#
#	    PREFIX VERSION REST
#
#	So that for example filename
#
#	    emacs-20.3.5.1-lisp.tar.gz
#
#	is exploded to parts
#
#	    emacs -20.3.5.1- lisp.tar.gz
#
#	After this, the VERSION part is examined and all the numbers from
#	it are read and converted to zero filled hhs keys, so that sorting
#	between versions is possible:
#
#	    (20 3 5 1) --> "0020.0003.0005.0001"
#
#	A hash table for each file is build according to this version key
#
#	    VERSION-KEY => FILE-NAME
#
#	When we finally sort the has by key, we get the latest version number
#	and the associated file.
#
#   INPUT PARAMETERS
#
#	$file	    file to use as base
#	\@files	    list of files
#
#   RETURN VALUES
#
#	$file	    File that is newest, based on version number.
#
# ****************************************************************************

sub LatestVersion ( $ $ )
{
    my $id 		  = "$LIB.LatestVersion";
    my ( $file , $array ) = @ARG;

    $debug and warn "$id: $file --> @$array\n\n";

    local   $ARG    = $file;
    my	    $ret    = $file;
    my	    $regexp = '^(\\D+)(-[-\\d.]+)(\\D+)';

    my ( $pfx, $post, $ver, $max );


    if ( /$regexp/o  )
    {
	$pfx	= $1;
	$ver	= '-([-\d.]+)'; 	    #	NN.NN   YYYY-MM-DD
	$post	= $3;

	$debug and warn "$id: PFX: $pfx POSTFIX: $post\n";

	my ( @try, %hash, %hash2, $key , @v , $version, $file );

        # .................................................. arrange ...
	# If there is verison numbers, then sort all according
	# to version.

	for ( @$array )
	{
	    unless ( /$pfx.*$post/  and  /$regexp/o )
	    {
		$debug and warn "$id: REJECTED\t\t$ARG\n";
		next;
	    }

	    $debug and warn "$id: MATCH: $1 $2 $3\n";


	    $key = "";

	    @v = ( /(\d+)/g );

	    # Record how many separate digits we found.

	    $max = @v	    if @v > $max;

	    while ( @v < 8 )    # fill until 8 elements
	    {
		push @v, 0;
	    }

	    for $version ( @v )
	    {
		#	1.0 --> 0001.0000.0000.0000.0000.0000
		$key .= sprintf "%015d.", $version;
	    }

	    $hash  { $key  } = $ARG;
	    $hash2 { $v[0] } = $ARG;
	}


	#   If there were date based versions:
	#
	#	wemi-199802080907.tar.gz
	#	wemi-19980804.tar.gz
	#	wemi-199901260856.tar.gz
	#	wemi-199901262204.tar.gz
	#
	#   Then sort directly by the %hash2, which only contains direct
	#   NUMBER key without prefixed zeroes. For multiple numbers we
	#   sort according to %hash

	if ( $max == 1 )
	{
	    @try  = sort  { $b <=> $a } keys %hash2;
	    %hash = %hash2;
	}
	else
	{
	    @try = sort { $b cmp $a } keys %hash;
	}


	if ( $debug )
	{
	    warn "$id: Choices: $ver $pfx.*$post\n";
	    my $arg;
	    for $arg ( @try )
	    {
		warn "$id: $hash{$arg}\n";
	    }
	}

	#   If nonly one answer, then use that. Or if we grepped versioned
	#   files, take the latest one.

	if ( @try  )
	{
	    $ret = $hash{ $try[0] };
	}
    }

    $debug and warn "$id: RETURN [$ret]\n";

    $ret eq ''	    and die "$id: Internal error, Run with debug on.";

    $ret;
}





# ****************************************************************************
#
#   DESCRIPTION
#
#       Get file via FTP
#
#   INPUT PARAMETERS
#
#	$site	    Dite to connect
#	$path	    dir in SITE
#
#	$getFile    File to get
#	$saveFile   File to save on local disk
#
#	$firewall
#
#	$new	    Flag, Should only the newest fiel retrieved?
#
#   RETURN VALUES
#
#       none
#
# ****************************************************************************

sub UrlFtp ($$ $$$ ; $ $$ $)
{
    my $id                = "$LIB.UrlFtp";

    # ......................................................... args ...

    my
    (
	$site, $path,

	$getFile, $saveFile, $regexp,

	$firewall,
	$login, $pass,

	$new
    ) = @ARG;

    $login = "ftp"			if $login   eq "" ;
    $pass  = "batch-ftp\@example.com"	if $pass    eq "" ;

    # ............................................ private variables ...

    my $timeout	    = 120;
    my $ftp;
    local $ARG;

    $debug and warn
	"$id: $site, $path [firewall:$firewall] $login,$pass [new:$new]\n";

    $verb  and print
	   "$id: Connecting to $site $getFile --> $saveFile $regexp \n";

    # .................................................. make object ...

    if ( $firewall ne '' )
    {
        $ftp = Net::FTP->new
        (
            $site,
            (
                Firewall => $firewall,
                Timeout  => $timeout
            )
        );
    }
    else
    {
        $ftp = Net::FTP->new
        (
            $site, ( Timeout  => $timeout )
        );
    }

    unless ( defined $ftp )
    {
	print "$id: Cannot make route to $site\n";
	return;
    }

    # ........................................................ login ...

    $debug  and print "$id: Login to $site...\n";

    unless ( $ftp->login($login, $pass) )
    {
        print  "$id: Login failed $login, $pass\n";
	goto QUIT;
    }

    $ftp->binary();


    my $cd = $path;
    $cd = dirname $path	    unless $path =~ m,/$, ;

    if ( $cd ne '' )
    {
	unless ( $ftp->cwd($cd) )
	{
	    print "$id: Remote cd $cd failed.\n";
	}
    }

    # .......................................................... get ...

    $ftp->binary();

    if ( (not defined $regexp  or  $regexp eq '') and  not $new )
    {
	$verb  and  print "$id: Getting file... $getFile\n";
	unless ( $ftp->get($getFile, $saveFile) )
	{
	    print  "$id: ... ** error $getFile\n";
	}
    }
    else
    {
	my ( @list, $i);

	$debug  and print "$id: Getting list of files $site ...\n";

	$i    = 0;

	$debug  and warn "$id: Running ftp dir ls()\n";

	@list = $ftp->ls();

	if ( $regexp ne '' )
	{
	    @list = sort grep $regexp, @list;
	}
	else
	{
	    my $name = basename $getFile;
	    my $file = LatestVersion $name, \@list;

	    $verb  and print "$id: ... Getting latest version: $file\n";

	    @list = ( $file );
	}

	$debug and warn "$id:", scalar @list, "@list\n";

	for ( @list )
	{
	    $i++;
	    $verb and printf "$id: ... %2d%%", int ( $i * 100 / @list);
	    $verb and print " $ARG\n";

	    unless ( $ftp->get($ARG) )
	    {
		print "$id: ... ** error $ARG\n";
	    }
	}
    }


    QUIT:
    {
	$ftp->quit() if defined $ftp;
    }
}


# ****************************************************************************
#
#   DESCRIPTION
#
#	Get content of URL
#
#   INPUT PARAMETERS
#
#	$url			    The URL pointer
#	\%errUrlHashRef		    Hahs where to store the URL-ERROR_CODE
#	\%errExplanationHashRef	    Hash  where to store ERROR_CODE-EXPLANATION
#
#   RETURN VALUES
#
#	strng	Error reason
#	""	ok
#
# ****************************************************************************

sub UrlHttp ( $$$ $$ )
{
    my $id = "$LIB.UrlHttp";
    my
    (
	  $url
	, $file
	, $proxy
	, $errUrlHashRef
	, $errExplanationHashRef
    ) = @ARG;

    $verb and print "$id: $url --> $file\n";

    my ( $ret );

    my $ua      = new LWP::UserAgent;
    my $request = new HTTP::Request( 'GET' => $url );
    my $obj	= $ua->request( $request , $file );
    my $stat	= $obj->is_success;

    $debug and  print "$id: content-type: ", $obj->content_type, "\n";

    if ( defined $proxy )
    {
	$ua->proxy("http", $proxy);
    }

    $debug and warn "$id: status $stat\n";

    unless ( $stat )
    {
	$errUrlHashRef->{ $url } = $obj->code;

	#  There is new error code, record it.

	if ( not defined $errUrlHashRef->{ $obj->code }  )
	{
	    $errExplanationHashRef->{ $obj->code } = $obj->message;
	}

	$ret = $errUrlHashRef->{ $obj->code };

	print "  ** error: ",  $obj->message, "\n";
    }

    $ret;
}




# ****************************************************************************
#
#   DESCRIPTION
#
#	Copy content of PATH to FILE.
#
#   INPUT PARAMETERS
#
#	$path	    From where to read. If this is directory, read files
#		    in directory. If this is file, copy file.
#
#	$file	    Where to put resuts.
#	$prefix	    [optional] Filename prefix
#	$postfif    [optional] postfix
#
#   RETURN VALUES
#
#       none
#
# ****************************************************************************

sub UrlFile ($ $ ; $$)
{
    my $id = "$LIB.UrlFile";
    my ( $path, $file , $prefix, $postfix ) = @ARG;

    $debug and warn "$id: $path, $file\n";

    if ( -f $path  and  not -d $path )
    {
	if ( $CHECK_NEWEST )
	{
	    my @dir = DirContent dirname( $path );

	    if ( @dir )
	    {
		my $base = dirname($path);
		$file = LatestVersion basename($path) , \@dir;
    		$path = $base . "/" . $file;
	    }
	    else
	    {
		$verb and print "$id: Can't set newest $file";
	    }
	}

	$file = $prefix . $file . $postfix;

	$debug and warn "$id: FileCopy $path => $file\n";

	unless ( copy($path, $file)  )
	{
	    print "$id: FileCopy $path => $file $ERRNO";
	}
    }
    else
    {
	my @tmp = DirContent $path;

	local *FILE;

	$file =~ s,/,!,g;

	if ( -e $file and not $OVERWRITE )
	{
	    print "$id: [ignored, exists] $file\n";
	    return;
	}

	unless ( open FILE, ">$file" )
	{
	    print "$id: can't write $file $ERRNO\n";
	    return;
	}

	print FILE join "\n", @tmp;
	close FILE;
    }
}

# ............................................................ &main ...

    Initialize();
    HandleCommandLineArgs();

    my $id   = "$LIB.main";
    my $date = DateYYYY_MM_DD();

    my ( %URL_ERROR_HASH , %URL_ERROR_REASON_HASH  );
    my ( $type, $url, $path, $site, @data , $stat , $file , $line);
    my ( $origFile, $login, $pass , $sitePath, $regexp, $lcd, $new, $stop );
    my ( $count );

    my $prefix 	= "";
    my $postfix = "";

    local $ARG;

    # ......................................................... args ...

    if ( defined $FILE )
    {
	$verb and print "$id: Reading $FILE\n";
	local *F;
	open F, $FILE	    or die "$id: [$FILE] $ERRNO";
	@data = <F>;
	close F;
    }

    push @data, @ARGV if @ARGV;	# Add command line URLs


    # ............................................... prepare output ...

    if ( $OUT_DIR )
    {
	$verb and print "$id: chdir $OUT_DIR\n";
	chdir $OUT_DIR	or die "$id: chdir $OUT_DIR $ERRNO";
    }

    for ( @data )
    {
	$line = $ARG;
	chomp;

	$pass = $login = $regexp = $lcd = $file = "";

	s/[#].*$//;				# Kill comments
	next if /^\s*$/;			# ignore empty lines

        # ................................................... regexp ...

	if ( defined $URL_REGEXP )
	{
	    if ( not /$URL_REGEXP/o )
	    {
		$debug  and  warn "$id: [regexp ignored] $ARG\n";
		next;
	    }
	}

	if ( defined $TAG_REGEXP )
	{
	    ($ARG, $stop ) = TagHandle $ARG;
	    last if $stop;
	    next if $ARG eq '';
	}

        # ............................................... directives ...

	$new = $CHECK_NEWEST;

	$pass	= $1	if /pass:\s*(\S+)/;
	$login	= $1	if /login:\s*(\S+)/;
	$regexp	= $1	if /regexp:\s*(\S+)/;
	$new	= 1	if /new:/;

	/lcd:(\S+)/	and DirectiveLcd $1, $LCD_CREATE;

        # ................................................. grab url ...

	m!^\s*((http|ftp|file):/?(/([^/\s]+)(\S*)))!;

	unless ( defined $1 and defined $2 )
	{
	    $debug and warn "$id: [skipped] $line\n";
	    next;
	}

        # ............................................... components ...

	$url  = $1;
	$type = $2;
	$path = $3;
	$site = $4;
	$sitePath = $5;

	$origFile = $sitePath;

	( $file  = $url ) =~ s,^\s*\S+/,,;

	$file = $path . "000root-file" if $file eq "";

	$file	= $1	if /save:(\S+)/;

	$postfix = $POSTFIX		    if defined $POSTFIX;
	$prefix	 = $PREFIX . $prefix	    if defined $PREFIX;
	$prefix  = $site . "::" . $prefix   if $PREFIX_WWW;
	$prefix  = $date . "::" . $prefix   if $PREFIX_DATE;

	$file = $prefix . $file . $postfix;

	if ( -e $file  and  not $OVERWRITE )
	{
	    $verb and print "$id: [ignored, exists] $file\n";
	    next;
	}

        # .................................................... do-it ...

	$debug and warn "$id: <$type> <$site> <$path> <$url> <$file>\n";

	$ARG = $type;

	if ( /http/ )
	{
	    $count++;
	    if ( $new )
	    {

	    }
	    else
	    {
		$stat = UrlHttp  $url, $file, $PROXY,
		    \%URL_ERROR_HASH , \%URL_ERROR_REASON_HASH;
	    }
	}
	elsif ( /ftp/ )
	{
	    $count++;
	    $stat = UrlFtp $site, $sitePath
		    , $origFile, $file, $regexp
		    , $FIREWALL, $login, $pass
		    , $new
		    ;
	}
	elsif ( /file/ )
	{
	    UrlFile $path, $origFile, $prefix, $postfix;
	}
    }

    if ( not $count  and  $verb)
    {
	printf "$id: No matchig %s items in $FILE\n", $URL_REGEXP ;
    }


0;
__END__
