init
This commit is contained in:
243
wcmtools/bin/apidoc.pl
Executable file
243
wcmtools/bin/apidoc.pl
Executable file
@@ -0,0 +1,243 @@
|
||||
#!/usr/bin/perl
|
||||
|
||||
# This script parses LJ function info from all the library files
|
||||
# that make up the site. See cgi-bin/ljlib.pl for an example
|
||||
# of the necessary syntax.
|
||||
|
||||
use strict;
|
||||
use Getopt::Long;
|
||||
use Data::Dumper;
|
||||
|
||||
my $opt_warn = 0;
|
||||
my $opt_file;
|
||||
my $opt_stubs = 0; # generate stubs of undoced funcs
|
||||
my $opt_class = 0; # group by class
|
||||
my ($opt_include, $opt_exclude); # which packages to inc/excl
|
||||
my @do_dirs;
|
||||
my $basedir;
|
||||
my $opt_conf;
|
||||
die unless GetOptions(
|
||||
'warn' => \$opt_warn,
|
||||
'file=s' => \$opt_file,
|
||||
'stubs' => \$opt_stubs,
|
||||
'class' => \$opt_class,
|
||||
'include=s' => \$opt_include,
|
||||
'exclude=s' => \$opt_exclude,
|
||||
'conf=s' => \$opt_conf,
|
||||
);
|
||||
|
||||
die "Unknown arguments.\n" if @ARGV;
|
||||
die "Can't exclude and include at same time!\n" if $opt_include && $opt_exclude;
|
||||
|
||||
my (@classes, %classname, %common_args);
|
||||
if ($opt_conf) {
|
||||
open (C, $opt_conf) or die "Can't open conf file: $opt_conf\n";
|
||||
while (<C>)
|
||||
{
|
||||
chomp;
|
||||
if (/^basedir\s+(\S+)$/) {
|
||||
$basedir = $1;
|
||||
$basedir =~ s/\$(\w+)/$ENV{$1} or die "Undefined ENV: $1"/eg;
|
||||
} elsif (/^dodir\s+(\S+)$/) {
|
||||
push @do_dirs, $1;
|
||||
} elsif (/^class\s+(\w+)\s+(.+)/) {
|
||||
push @classes, $1;
|
||||
$classname{$1} = $2;
|
||||
} elsif (/^arg\s+(\S+)\s+(.+)/) {
|
||||
$common_args{$1} = $2;
|
||||
} elsif (/\S/) {
|
||||
die "Unknown line in conf file:\n$_\n";
|
||||
}
|
||||
}
|
||||
close C;
|
||||
}
|
||||
|
||||
my %funcs;
|
||||
if ($opt_file) {
|
||||
check_file($opt_file);
|
||||
} else {
|
||||
unless ($basedir) {
|
||||
die "No base directory specified.\n";
|
||||
}
|
||||
chdir $basedir or die "Can't cd to base: $basedir\n";
|
||||
foreach (@do_dirs) {
|
||||
find($_);
|
||||
}
|
||||
}
|
||||
|
||||
exit if $opt_warn;
|
||||
|
||||
if ($opt_class)
|
||||
{
|
||||
my %by_class;
|
||||
foreach my $n (sort keys %funcs) {
|
||||
my $f = $funcs{$n};
|
||||
push @{$by_class{$f->{'class'}}}, $f;
|
||||
}
|
||||
my $ret = [];
|
||||
foreach my $cn (@classes) {
|
||||
push @$ret, [ $classname{$cn}, $by_class{$cn} ];
|
||||
}
|
||||
print Dumper($ret);
|
||||
exit;
|
||||
}
|
||||
|
||||
print Dumper(\%funcs);
|
||||
exit;
|
||||
|
||||
sub find
|
||||
{
|
||||
my @dirs = @_;
|
||||
while (@dirs)
|
||||
{
|
||||
my $dir = shift @dirs;
|
||||
|
||||
opendir (D, $dir) or die "Can't open dir: $dir\n";
|
||||
my @files = sort { $a cmp $b } readdir(D);
|
||||
close D;
|
||||
|
||||
foreach my $f (@files) {
|
||||
next if ($f eq "." || $f eq "..");
|
||||
my $full = "$dir/$f";
|
||||
if (-d $full) { find($full); }
|
||||
elsif (-f $full) { check_file($full); }
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
sub check_file
|
||||
{
|
||||
$_ = shift;
|
||||
return unless (-f);
|
||||
return if (/\.(gif|jpg|png|class|jar|zip|exe|orig|rej)$/);
|
||||
return if (/~$/);
|
||||
|
||||
my $curpackage = "";
|
||||
|
||||
my $file = $_;
|
||||
my $infunc = 0;
|
||||
my $f; # the current function info we're loading
|
||||
|
||||
my $prefix;
|
||||
my $curkey;
|
||||
my $contlen;
|
||||
|
||||
open (F, $file) or die "Can't open file: $file\n";
|
||||
while (my $l = <F>)
|
||||
{
|
||||
if ($l =~ /^package\s*(.+);/) {
|
||||
$curpackage = $1;
|
||||
}
|
||||
if ($opt_warn && $curpackage && $l =~ /^sub\s+([a-zA-Z0-9]\S+)/) {
|
||||
my $s = $1;
|
||||
my $total = $curpackage . "::" . $s;
|
||||
unless ($funcs{$total}) {
|
||||
print STDERR "Undocumented: $total\n";
|
||||
|
||||
if ($opt_stubs) {
|
||||
print "# <LJFUNC>\n";
|
||||
print "# name: $total\n";
|
||||
print "# class: \n";
|
||||
print "# des: \n";
|
||||
print "# info: \n";
|
||||
print "# args: \n";
|
||||
print "# des-: \n";
|
||||
print "# returns: \n";
|
||||
print "# </LJFUNC>\n";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
print $l if $opt_stubs;
|
||||
|
||||
if (! $infunc) {
|
||||
if ($l =~ /<LJFUNC>/) {
|
||||
$infunc = 1;
|
||||
$f = {};
|
||||
}
|
||||
next;
|
||||
}
|
||||
|
||||
if ($l =~ /<\/LJFUNC>/) {
|
||||
$infunc = 0;
|
||||
$prefix = "";
|
||||
$curkey = "";
|
||||
$contlen = 0;
|
||||
my $include = 0;
|
||||
if ($opt_exclude) {
|
||||
$include = 1;
|
||||
$include = 0 if $f->{'name'} =~ /^$opt_exclude/;
|
||||
} elsif ($opt_include) {
|
||||
$include = 1 if $f->{'name'} =~ /^$opt_include/;
|
||||
} elsif (! $opt_include && ! $opt_exclude) {
|
||||
$include = 1;
|
||||
}
|
||||
if ($f->{'name'} && $include) {
|
||||
$f->{'source'} = $file;
|
||||
$f->{'class'} ||= "general";
|
||||
unless ($classname{$f->{'class'}}) {
|
||||
print STDERR "Unknown class: $f->{'class'} ($f->{'name'})\n";
|
||||
}
|
||||
$funcs{$f->{'name'}} = $f;
|
||||
treeify($f);
|
||||
}
|
||||
next;
|
||||
}
|
||||
|
||||
# continuing a line from line before... must have
|
||||
# same indenting.
|
||||
if ($prefix && $contlen) {
|
||||
my $cont = $prefix . " "x$contlen;
|
||||
if ($l =~ /^\Q$cont\E(.+)/) {
|
||||
my $v = $1;
|
||||
$v =~ s/^\s+//;
|
||||
$v =~ s/\s+$//;
|
||||
$f->{$curkey} .= " " . $v;
|
||||
next;
|
||||
}
|
||||
}
|
||||
|
||||
if ($l =~ /^(\W*)([\w\-]+)(:\s*)(.+)/) {
|
||||
$prefix = $1;
|
||||
my $k = $2;
|
||||
my $v = $4;
|
||||
$v =~ s/^\s+//;
|
||||
$v =~ s/\s+$//;
|
||||
$f->{$k} = $v;
|
||||
$curkey = $k;
|
||||
$contlen = length($2) + length($3);
|
||||
}
|
||||
}
|
||||
close (F);
|
||||
|
||||
}
|
||||
|
||||
sub treeify
|
||||
{
|
||||
my $f = shift;
|
||||
my $args = $f->{'args'};
|
||||
$f->{'args'} = [];
|
||||
|
||||
$args =~ s/\s+//g;
|
||||
foreach my $arg (split(/\,/, $args))
|
||||
{
|
||||
my $opt = 0;
|
||||
if ($arg =~ s/\?$//) { $opt = 1; }
|
||||
my $list = 0;
|
||||
if ($arg =~ s/\*$//) { $list = 1; }
|
||||
my $a = { 'name' => $arg };
|
||||
if ($opt) { $a->{'optional'} = 1; }
|
||||
if ($list) { $a->{'list'} = 1; }
|
||||
$a->{'des'} = $f->{"des-$arg"} || $common_args{$arg};
|
||||
delete $f->{"des-$arg"};
|
||||
unless ($a->{'des'}) {
|
||||
if ($opt_warn) {
|
||||
print "Warning: undescribed argument '$arg' in $a->{'name'}\n";
|
||||
}
|
||||
}
|
||||
push @{$f->{'args'}}, $a;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
268
wcmtools/bin/multicvs.pl
Executable file
268
wcmtools/bin/multicvs.pl
Executable file
@@ -0,0 +1,268 @@
|
||||
#!/usr/bin/perl
|
||||
#
|
||||
|
||||
use strict;
|
||||
use Getopt::Long;
|
||||
|
||||
$| = 1;
|
||||
|
||||
my $help = 0;
|
||||
my $sync = 0;
|
||||
my $diff = 0;
|
||||
my $cvsonly = 0;
|
||||
my $liveonly = 0;
|
||||
my $init = 0;
|
||||
my $conf;
|
||||
my $opt_update;
|
||||
my $opt_justfiles;
|
||||
my $opt_ignore_space;
|
||||
my $these_flag;
|
||||
|
||||
exit 1 unless GetOptions('conf=s' => \$conf,
|
||||
'help' => \$help,
|
||||
'sync' => \$sync,
|
||||
'diff' => \$diff,
|
||||
'cvsonly|c' => \$cvsonly,
|
||||
'liveonly' => \$liveonly,
|
||||
'init' => \$init,
|
||||
'update' => \$opt_update,
|
||||
'justfiles|1' => \$opt_justfiles,
|
||||
'no-space-changes|b|w' => \$opt_ignore_space,
|
||||
'these|t' => \$these_flag,
|
||||
);
|
||||
|
||||
if ($help or not defined $conf) {
|
||||
die "Usage: multicvs.pl --conf=/path/to/multicvs.conf [opts] [files]\n" .
|
||||
" --help Get this help\n" .
|
||||
" --sync Put files where they need to go.\n" .
|
||||
" All files, unless you specify which ones.\n".
|
||||
" --diff Show diffs of changed files.\n".
|
||||
" --cvsonly Don't consider files changed in live dirs.\n".
|
||||
" --liveonly Don't consider files changed in the CVS dirs.\n".
|
||||
" --init Copy all files from cvs to main, unconditionally.\n" .
|
||||
" --update Updates files in the CVS dirs from the cvs repositories.\n".
|
||||
" --justfiles -1 Only output files, not the old -> new arrow. (good for xargs)\n".
|
||||
" --no-space-changes -b Do not display whitespace differences.\n".
|
||||
" --these -t Refuse to --sync if no files are specified.\n";
|
||||
}
|
||||
|
||||
if ($init) {
|
||||
$sync = 1;
|
||||
die "Can't set --liveonly or --cvsonly with --init\n"
|
||||
if $cvsonly or $liveonly;
|
||||
$diff = 0;
|
||||
}
|
||||
|
||||
unless (-e $conf) {
|
||||
die "Specified conf file doesn't exist: $conf\n";
|
||||
}
|
||||
|
||||
my ($DIR_LIVE, $DIR_CVS);
|
||||
my @paths;
|
||||
|
||||
my $read_conf = sub
|
||||
{
|
||||
my $file = shift;
|
||||
my $main = shift;
|
||||
|
||||
open (C, $file) or die "Error opening conf file.\n";
|
||||
while (<C>)
|
||||
{
|
||||
s/\#.*//;
|
||||
next unless /\S/;
|
||||
s/^\s+//;
|
||||
s/\s+$//;
|
||||
s/\$(\w+)/$ENV{$1} or die "Environment variable \$$1 not set.\n"/ge;
|
||||
|
||||
if (/(\w+)\s*=\s*(.+)/) {
|
||||
my ($k, $v) = ($1, $2);
|
||||
unless ($main) {
|
||||
die "Included config files can't set variables such as $k.\n";
|
||||
}
|
||||
if ($k eq "LIVEDIR") { $DIR_LIVE = $v; }
|
||||
elsif ($k eq "CVSDIR") { $DIR_CVS = $v; }
|
||||
else { die "Unknown option $k = $v\n"; }
|
||||
next;
|
||||
}
|
||||
|
||||
if (/(\S+)\s+(.+)/) {
|
||||
my ($from, $to) = ($1, $2);
|
||||
my $maybe = 0;
|
||||
if ($from =~ s/\?$//) { $maybe = 1; }
|
||||
push @paths, {
|
||||
'from' => $from,
|
||||
'to' => $to,
|
||||
'maybe' => $maybe,
|
||||
};
|
||||
} else {
|
||||
die "Bogus line: $_\n";
|
||||
}
|
||||
}
|
||||
close C;
|
||||
};
|
||||
$read_conf->($conf, 1);
|
||||
|
||||
if ($conf =~ /^(.+)(multicvs\.conf)$/) {
|
||||
my $localconf = "$1multicvs-local.conf";
|
||||
$read_conf->($localconf) if -e $localconf;
|
||||
}
|
||||
|
||||
my %cvspath; # live path -> cvs path
|
||||
my %have_updated;;
|
||||
|
||||
foreach my $p (@paths)
|
||||
{
|
||||
unless (-e "$DIR_CVS/$p->{'from'}") {
|
||||
warn "WARNING: $p->{'from'} doesn't exist under $DIR_CVS\n"
|
||||
unless $p->{'maybe'};
|
||||
next;
|
||||
}
|
||||
|
||||
if ($opt_update) {
|
||||
my $root = $p->{'from'};
|
||||
$root =~ s!/.*!!;
|
||||
my $dir = "$DIR_CVS/$root";
|
||||
if (-d $dir && ! $have_updated{$dir}) {
|
||||
chdir $dir or die "Can't cd to $dir\n";
|
||||
print "Updating CVS dir '$root' ...\n";
|
||||
system("cvs", "update", "-dP");
|
||||
$have_updated{$dir} = 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (-f "$DIR_CVS/$p->{'from'}") {
|
||||
$cvspath{$p->{'to'}} = $p->{'from'};
|
||||
next;
|
||||
}
|
||||
|
||||
$p->{'to'} =~ s!/$!!;
|
||||
my $to_prefix = "$p->{'to'}/";
|
||||
$to_prefix =~ s!^\./!!;
|
||||
|
||||
my @dirs = ($p->{'from'});
|
||||
while (@dirs)
|
||||
{
|
||||
my $dir = shift @dirs;
|
||||
my $fulldir = "$DIR_CVS/$dir";
|
||||
|
||||
opendir (MD, $fulldir) or die "Can't open $fulldir.";
|
||||
while (my $file = readdir(MD)) {
|
||||
next if ($file =~ /~$/); # ignore emacs files
|
||||
next if ($file =~ /^\.\#/); # ignore CVS archived versions
|
||||
next if ($file =~ /\bCVS\b/);
|
||||
next if $file eq "." or $file eq "..";
|
||||
if (-d "$fulldir/$file") {
|
||||
unshift @dirs, "$dir/$file";
|
||||
} elsif (-f "$fulldir/$file") {
|
||||
my $to = "$dir/$file";
|
||||
$to =~ s!^$p->{'from'}/!!;
|
||||
$cvspath{"$to_prefix$to"} = "$dir/$file";
|
||||
}
|
||||
}
|
||||
close MD;
|
||||
}
|
||||
}
|
||||
|
||||
# If the user has specified that there must be arguments, require @ARGV to
|
||||
# contain soemthing.
|
||||
die "These what?\n\nWith --these specified, you must provide at least one file to sync.\n"
|
||||
if $these_flag && $sync && !@ARGV;
|
||||
|
||||
my @files = scalar(@ARGV) ? @ARGV : sort keys %cvspath;
|
||||
foreach my $relfile (@files)
|
||||
{
|
||||
my $status;
|
||||
next unless exists $cvspath{$relfile};
|
||||
my $root = $cvspath{$relfile};
|
||||
$root =~ s!/.*!!;
|
||||
|
||||
my ($from, $to); # if set, do action (diff and/or sync)
|
||||
|
||||
my $lfile = "$DIR_LIVE/$relfile";
|
||||
my $cfile = "$DIR_CVS/$cvspath{$relfile}";
|
||||
|
||||
if ($init) {
|
||||
$status = "main <- $root";
|
||||
($from, $to) = ($cfile, $lfile);
|
||||
} else {
|
||||
my $ltime = mtime($lfile);
|
||||
my $ctime = mtime($cfile);
|
||||
next if $ltime == $ctime;
|
||||
if ($ltime > $ctime && ! $cvsonly) {
|
||||
$status = "main -> $root";
|
||||
($from, $to) = ($lfile, $cfile);
|
||||
}
|
||||
if ($ctime > $ltime && ! $liveonly) {
|
||||
$status = "main <- $root";
|
||||
($from, $to) = ($cfile, $lfile);
|
||||
}
|
||||
}
|
||||
|
||||
next unless $status;
|
||||
|
||||
my $the_diff;
|
||||
if ($diff && -e $from && -e $to) {
|
||||
my $opt;
|
||||
$opt = '-b' if $opt_ignore_space;
|
||||
$the_diff = `diff -u $opt $to $from`; # getting from destination to source
|
||||
if ($the_diff) {
|
||||
# fix the -p level to be -p0
|
||||
my $slashes = ($DIR_LIVE =~ tr!/!/!);
|
||||
$the_diff =~ s/((^|\n)[\-\+]{3,3} )\/([^\/]+?\/){$slashes,$slashes}/$1/g;
|
||||
} else {
|
||||
# don't touch the files that don't have a diff if we're ignoring spaces
|
||||
# as there might really be one and we just don't see it
|
||||
next if $opt_ignore_space;
|
||||
|
||||
# no real change (just touched/copied?), so copy
|
||||
# cvs one on top to fix times up.
|
||||
copy($from, $to);
|
||||
next;
|
||||
}
|
||||
}
|
||||
if ($sync) {
|
||||
make_dirs($relfile);
|
||||
copy($from, $to);
|
||||
}
|
||||
|
||||
if ($opt_justfiles) {
|
||||
print "$relfile\n";
|
||||
} else {
|
||||
printf "%-25s %s\n", $status, $relfile;
|
||||
print $the_diff;
|
||||
}
|
||||
}
|
||||
|
||||
sub mtime
|
||||
{
|
||||
my $file = shift;
|
||||
return (stat($file))[9];
|
||||
}
|
||||
|
||||
my %MADE_DIR;
|
||||
sub make_dirs
|
||||
{
|
||||
my $file = shift;
|
||||
return 1 unless $file =~ s!/[^/]*$!!;
|
||||
return 1 if $MADE_DIR{$file};
|
||||
my @dirs = split(m!/!, $file);
|
||||
for (my $i=0; $i<scalar(@dirs); $i++) {
|
||||
my $sd = join("/", @dirs[0..$i]);
|
||||
my $makedir = "$DIR_LIVE/$sd";
|
||||
unless (-d $makedir) {
|
||||
mkdir $makedir, 0755
|
||||
or die "Couldn't make directory $makedir\n";
|
||||
}
|
||||
}
|
||||
$MADE_DIR{$file} = 1;
|
||||
}
|
||||
|
||||
# was using perl's File::Copy, but I want to preserve the file time.
|
||||
sub copy
|
||||
{
|
||||
my ($src, $dest) = @_;
|
||||
my $ret = system("cp", "-p", $src, $dest);
|
||||
return ($ret == 0);
|
||||
}
|
||||
|
||||
__END__
|
||||
182
wcmtools/blobserver/lib/Apache/Blob.pm
Executable file
182
wcmtools/blobserver/lib/Apache/Blob.pm
Executable file
@@ -0,0 +1,182 @@
|
||||
#!/usr/bin/perl
|
||||
# vim:ts=4 sw=4 et:
|
||||
|
||||
package Apache::Blob;
|
||||
|
||||
use strict;
|
||||
use File::Path;
|
||||
use Fcntl ':flock';
|
||||
use Apache::Constants qw(:common HTTP_BAD_REQUEST HTTP_NO_CONTENT M_GET M_PUT M_DELETE);
|
||||
use lib "$ENV{'BLOBHOME'}";
|
||||
|
||||
my $ROOT = "$ENV{'BLOBHOME'}/root";
|
||||
|
||||
sub handler
|
||||
{
|
||||
my $r = shift;
|
||||
$r->set_handlers(PerlTransHandler => [ \&trans ]);
|
||||
return OK;
|
||||
}
|
||||
|
||||
sub trans
|
||||
{
|
||||
my $r = shift;
|
||||
my $uri = $r->uri;
|
||||
|
||||
my $path = $ROOT . $uri;
|
||||
|
||||
if ($r->method_number == M_GET) {
|
||||
# get requests just go through to the file system.
|
||||
$r->handler("perl-script");
|
||||
$r->push_handlers(PerlHandler => sub {
|
||||
my $r = shift;
|
||||
# let apache handle it.
|
||||
$r->filename($path);
|
||||
return DECLINED;
|
||||
});
|
||||
return OK;
|
||||
} elsif ($r->method_number == M_PUT ||
|
||||
$r->method_number == M_DELETE) {
|
||||
# /cluster/u1/u2/u3/type/m1/m2
|
||||
# 1 2 3 4 5 6 7
|
||||
return HTTP_BAD_REQUEST unless $uri =~ m#^/\d+/\d+/\d+/\d+/\w+/\d+/\d+\.\w+$#;
|
||||
$r->handler("perl-script");
|
||||
$r->push_handlers(PerlHandler => sub {
|
||||
my $r = shift;
|
||||
return delete_blob($r) if $r->method_number == M_DELETE;
|
||||
return HTTP_NO_CONTENT if $r->method_number == M_PUT && save_blob($r, $path);
|
||||
return SERVER_ERROR;
|
||||
});
|
||||
return OK;
|
||||
}
|
||||
return HTTP_BAD_REQUEST;
|
||||
}
|
||||
|
||||
# directory listing
|
||||
# sub dir_trans
|
||||
# {
|
||||
# my ($r, $uri) = @_;
|
||||
# if ($uri =~ m#^/(\d+)/(\d+)/(\w+)/?$#) {
|
||||
# my ($cid, $uid) = ($1, $2, $3);
|
||||
# $r->handler("perl-script");
|
||||
# $r->notes(dir => make_path($cid, $uid));
|
||||
# $r->push_handlers(PerlHandler => \&dirlisting);
|
||||
# return OK;
|
||||
# }
|
||||
# if ($uri =~ m#^/(\d+)/(\d+)/?$#) {
|
||||
# my ($cid, $uid) = ($1, $2);
|
||||
# $r->handler("perl-script");
|
||||
# $r->notes(dir => make_path($cid, $uid));
|
||||
# $r->push_handlers(PerlHandler => \&dirlisting);
|
||||
# return OK;
|
||||
# }
|
||||
# return 400;
|
||||
# }
|
||||
|
||||
# sub dirlisting
|
||||
# {
|
||||
# my $r = shift;
|
||||
# return 404 unless (opendir(DIR, $r->notes('dir')));
|
||||
# $r->content_type("text/plain");
|
||||
# $r->send_http_header();
|
||||
# foreach my $f (readdir(DIR)) {
|
||||
# next if $f eq '.' or $f eq '..';
|
||||
# $r->print("$f\n");
|
||||
# }
|
||||
# closedir(DIR);
|
||||
# return OK;
|
||||
# }
|
||||
|
||||
# blob access
|
||||
# sub blob_trans
|
||||
# {
|
||||
# my ($r, $uri, $cid, $uid, $mid) = @_;
|
||||
# my $path = make_path($cid, $uid, $mid);
|
||||
#
|
||||
# if ($r->method_number == M_PUT) {
|
||||
# } else {
|
||||
# return 404 unless -r $path;
|
||||
# $r->handler("perl-script");
|
||||
# $r->push_handlers(PerlHandler => sub {
|
||||
# my $r = shift;
|
||||
#
|
||||
# # these content-types aren't exactly correct.
|
||||
# if ($blobtype eq 'audio') {
|
||||
# $r->content_type("audio/mp3");
|
||||
# } else {
|
||||
# $r->content_type("application/octet-stream");
|
||||
# }
|
||||
# $r->send_http_header();
|
||||
#
|
||||
# # let apache handle sending the file.
|
||||
# $r->filename($path);
|
||||
# return DECLINED;
|
||||
# });
|
||||
# }
|
||||
# }
|
||||
|
||||
sub make_dirs
|
||||
{
|
||||
my $filename = shift;
|
||||
my $dir = File::Basename::dirname($filename);
|
||||
eval { File::Path::mkpath($dir, 0, 0775); };
|
||||
return $@ ? 0 : 1;
|
||||
}
|
||||
|
||||
sub save_blob
|
||||
{
|
||||
my ($r, $path) = @_;
|
||||
|
||||
my $length = $r->header_in("Content-Length");
|
||||
|
||||
make_dirs($path);
|
||||
open(FILE, ">$path.tmp") or die "couldn't make $path";
|
||||
binmode(FILE);
|
||||
flock(FILE, LOCK_EX) or die "couldn't lock";
|
||||
|
||||
my ($buff, $lastsize);
|
||||
my $got = 0;
|
||||
my $nextread = 4096;
|
||||
$r->soft_timeout("save_blob"); # ?
|
||||
while ($got <= $length && ($lastsize = $r->read_client_block($buff, $nextread))) {
|
||||
$r->reset_timeout;
|
||||
$got += $lastsize;
|
||||
print FILE $buff;
|
||||
if ($length - $got < 4096) { $nextread = $length - $got; }
|
||||
}
|
||||
$r->kill_timeout;
|
||||
|
||||
flock(FILE, LOCK_UN) or die "couldn't unlock";
|
||||
close(FILE) or die "couldn't close";
|
||||
|
||||
if ($got != $length) {
|
||||
unlink("$path.tmp");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (-s "$path.tmp" == $length) {
|
||||
return 1 if rename("$path.tmp", $path);
|
||||
}
|
||||
|
||||
unlink("$path.tmp");
|
||||
return 0;
|
||||
}
|
||||
|
||||
sub delete_blob
|
||||
{
|
||||
my $r = shift;
|
||||
my $uri = $r->uri;
|
||||
my $path = $ROOT . $uri;
|
||||
return NOT_FOUND unless -e $path;
|
||||
|
||||
unlink($path) or return SERVER_ERROR;
|
||||
|
||||
for (1..2) {
|
||||
next unless $uri =~ s!/[^/]+$!!;
|
||||
$path = $ROOT . $uri;
|
||||
last unless rmdir $path;
|
||||
}
|
||||
|
||||
return HTTP_NO_CONTENT;
|
||||
}
|
||||
|
||||
16
wcmtools/blobserver/lib/modperl.pl
Executable file
16
wcmtools/blobserver/lib/modperl.pl
Executable file
@@ -0,0 +1,16 @@
|
||||
#!/usr/bin/perl
|
||||
#
|
||||
|
||||
use strict;
|
||||
use lib "$ENV{'BLOBHOME'}/lib";
|
||||
use Apache;
|
||||
|
||||
Apache->httpd_conf(qq{
|
||||
PerlInitHandler +Apache::Blob
|
||||
});
|
||||
|
||||
# delete this file from %INC to ensure it's reloaded
|
||||
# after restarts
|
||||
delete $INC{"$ENV{'BLOBHOME'}/lib/modperl.pl"};
|
||||
|
||||
1;
|
||||
363
wcmtools/ddlockd/api/perl/DDLockClient.pm
Executable file
363
wcmtools/ddlockd/api/perl/DDLockClient.pm
Executable file
@@ -0,0 +1,363 @@
|
||||
#!/usr/bin/perl
|
||||
###########################################################################
|
||||
|
||||
=head1 NAME
|
||||
|
||||
DDLockClient - Client library for distributed lock daemon
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
use DDLockClient ();
|
||||
|
||||
my $cl = new DDLockClient (
|
||||
servers => ['locks.localnet:7004', 'locks2.localnet:7002', 'localhost']
|
||||
);
|
||||
|
||||
# Do something that requires locking
|
||||
if ( my $lock = $cl->trylock("foo") ) {
|
||||
...do some 'foo'-synchronized stuff...
|
||||
} else {
|
||||
die "Failed to lock 'foo': $!";
|
||||
}
|
||||
|
||||
# You can either just let $lock go out of scope or explicitly release it:
|
||||
$lock->release;
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
This is a client library for ddlockd, a distributed lock daemon not entirely
|
||||
unlike a very simplified version of the CPAN module IPC::Locker.
|
||||
|
||||
=head1 REQUIRES
|
||||
|
||||
L<Socket>
|
||||
|
||||
=head1 EXPORTS
|
||||
|
||||
Nothing.
|
||||
|
||||
=head1 AUTHOR
|
||||
|
||||
Brad Fitzpatrick <brad@danga.com>
|
||||
|
||||
Copyright (c) 2004 Danga Interactive, Inc.
|
||||
|
||||
=cut
|
||||
|
||||
###########################################################################
|
||||
|
||||
#####################################################################
|
||||
### D D L O C K C L A S S
|
||||
#####################################################################
|
||||
package DDLock;
|
||||
|
||||
BEGIN {
|
||||
use Socket qw{:DEFAULT :crlf};
|
||||
use IO::Socket::INET ();
|
||||
|
||||
use constant DEFAULT_PORT => 7002;
|
||||
|
||||
use fields qw( name sockets pid );
|
||||
}
|
||||
|
||||
|
||||
|
||||
### (CONSTRUCTOR) METHOD: new( $name, @sockets )
|
||||
### Create a new lock object that corresponds to the specified I<name> and is
|
||||
### held by the given I<sockets>.
|
||||
sub new {
|
||||
my DDLock $self = shift;
|
||||
$self = fields::new( $self ) unless ref $self;
|
||||
|
||||
$self->{pid} = $$;
|
||||
$self->{name} = shift;
|
||||
$self->{sockets} = $self->getlocks( $self->{name}, @_ );
|
||||
|
||||
return $self;
|
||||
}
|
||||
|
||||
|
||||
### (PROTECTED) METHOD: getlocks( $lockname, @servers )
|
||||
### Try to obtain locks with the specified I<lockname> from one or more of the
|
||||
### given I<servers>.
|
||||
sub getlocks {
|
||||
my DDLock $self = shift;
|
||||
my $lockname = shift;
|
||||
my @servers = @_;
|
||||
|
||||
my (
|
||||
@sockets,
|
||||
$sock,
|
||||
$res,
|
||||
);
|
||||
|
||||
# First create connected sockets to all the lock hosts
|
||||
@sockets = ();
|
||||
SERVER: foreach my $server ( @servers ) {
|
||||
my ( $host, $port ) = split /:/, $server;
|
||||
$port ||= DEFAULT_PORT;
|
||||
|
||||
my $sock = new IO::Socket::INET (
|
||||
PeerAddr => $host,
|
||||
PeerPort => $port,
|
||||
Proto => "tcp",
|
||||
Type => SOCK_STREAM,
|
||||
ReuseAddr => 1,
|
||||
Blocking => 1,
|
||||
) or next SERVER;
|
||||
|
||||
$sock->printf( "trylock lock=%s%s", eurl($lockname), CRLF );
|
||||
chomp( $res = <$sock> );
|
||||
die "$server: '$lockname' $res\n" unless $res =~ m{^ok\b}i;
|
||||
|
||||
push @sockets, $sock;
|
||||
}
|
||||
|
||||
die "No available lock hosts" unless @sockets;
|
||||
return \@sockets;
|
||||
}
|
||||
|
||||
|
||||
### METHOD: release()
|
||||
### Release the lock held by the lock object. Returns the number of sockets that
|
||||
### were released on success, and dies with an error on failure.
|
||||
sub release {
|
||||
my DDLock $self = shift;
|
||||
|
||||
my (
|
||||
$count,
|
||||
$res,
|
||||
$sock,
|
||||
);
|
||||
|
||||
# lock server might have gone away, but we don't really care.
|
||||
local $SIG{'PIPE'} = "IGNORE";
|
||||
|
||||
$count = 0;
|
||||
while (( $sock = shift @{$self->{sockets}} )) {
|
||||
$sock->printf( "releaselock lock=%s%s", eurl($self->{name}), CRLF );
|
||||
chomp( $res = <$sock> );
|
||||
|
||||
if ( $res && $res !~ m{^ok\b}i ) {
|
||||
my $port = $sock->peerport;
|
||||
my $addr = $sock->peerhost;
|
||||
die "releaselock ($addr): $res\n";
|
||||
}
|
||||
|
||||
$count++;
|
||||
}
|
||||
|
||||
return $count;
|
||||
}
|
||||
|
||||
|
||||
### FUNCTION: eurl( $arg )
|
||||
### URL-encode the given I<arg> and return it.
|
||||
sub eurl
|
||||
{
|
||||
my $a = $_[0];
|
||||
$a =~ s/([^a-zA-Z0-9_,.\\: -])/uc sprintf("%%%02x",ord($1))/eg;
|
||||
$a =~ tr/ /+/;
|
||||
return $a;
|
||||
}
|
||||
|
||||
|
||||
|
||||
#####################################################################
|
||||
### D D F I L E L O C K C L A S S
|
||||
#####################################################################
|
||||
package DDFileLock;
|
||||
|
||||
BEGIN {
|
||||
use Fcntl qw{:DEFAULT :flock};
|
||||
use File::Spec qw{};
|
||||
use File::Path qw{mkpath};
|
||||
use IO::File qw{};
|
||||
|
||||
use fields qw{name path tmpfile pid};
|
||||
}
|
||||
|
||||
|
||||
our $TmpDir = File::Spec->tmpdir;
|
||||
|
||||
### (CONSTRUCTOR) METHOD: new( $lockname )
|
||||
### Createa a new file-based lock with the specified I<lockname>.
|
||||
sub new {
|
||||
my DDFileLock $self = shift;
|
||||
$self = fields::new( $self ) unless ref $self;
|
||||
my ( $name, $lockdir ) = @_;
|
||||
|
||||
$self->{pid} = $$;
|
||||
|
||||
$lockdir ||= $TmpDir;
|
||||
if ( ! -d $lockdir ) {
|
||||
# Croaks if it fails, so no need for error-checking
|
||||
mkpath $lockdir;
|
||||
}
|
||||
|
||||
my $lockfile = File::Spec->catfile( $lockdir, eurl($name) );
|
||||
|
||||
# First open a temp file
|
||||
my $tmpfile = "$lockfile.$$.tmp";
|
||||
if ( -e $tmpfile ) {
|
||||
unlink $tmpfile or die "unlink: $tmpfile: $!";
|
||||
}
|
||||
|
||||
my $fh = new IO::File $tmpfile, O_WRONLY|O_CREAT|O_EXCL
|
||||
or die "open: $tmpfile: $!";
|
||||
$fh->close;
|
||||
undef $fh;
|
||||
|
||||
# Now try to make a hard link to it
|
||||
link( $tmpfile, $lockfile )
|
||||
or die "link: $tmpfile -> $lockfile: $!";
|
||||
unlink $tmpfile or die "unlink: $tempfile: $!";
|
||||
|
||||
$self->{path} = $lockfile;
|
||||
$self->{tmpfile} = $tmpfile;
|
||||
|
||||
return $self;
|
||||
}
|
||||
|
||||
|
||||
### METHOD: release()
|
||||
### Release the lock held by the object.
|
||||
sub release {
|
||||
my DDFileLock $self = shift;
|
||||
return unless $self->{path};
|
||||
unlink $self->{path} or die "unlink: $self->{path}: $!";
|
||||
unlink $self->{tmpfile};
|
||||
}
|
||||
|
||||
|
||||
### FUNCTION: eurl( $arg )
|
||||
### URL-encode the given I<arg> and return it.
|
||||
sub eurl
|
||||
{
|
||||
my $a = $_[0];
|
||||
$a =~ s/([^a-zA-Z0-9_,.\\: -])/sprintf("%%%02X",ord($1))/eg;
|
||||
$a =~ tr/ /+/;
|
||||
return $a;
|
||||
}
|
||||
|
||||
|
||||
DESTROY {
|
||||
my $self = shift;
|
||||
$self->release if $$ == $self->{pid};
|
||||
}
|
||||
|
||||
|
||||
#####################################################################
|
||||
### D D L O C K C L I E N T C L A S S
|
||||
#####################################################################
|
||||
package DDLockClient;
|
||||
use strict;
|
||||
|
||||
BEGIN {
|
||||
use fields qw( servers lockdir );
|
||||
use vars qw{$Error};
|
||||
}
|
||||
|
||||
$Error = undef;
|
||||
|
||||
our $Debug = 0;
|
||||
|
||||
|
||||
### (CLASS) METHOD: DebugLevel( $level )
|
||||
sub DebugLevel {
|
||||
my $class = shift;
|
||||
|
||||
if ( @_ ) {
|
||||
$Debug = shift;
|
||||
if ( $Debug ) {
|
||||
*DebugMsg = *RealDebugMsg;
|
||||
} else {
|
||||
*DebugMsg = sub {};
|
||||
}
|
||||
}
|
||||
|
||||
return $Debug;
|
||||
}
|
||||
|
||||
|
||||
sub DebugMsg {}
|
||||
|
||||
|
||||
### (CLASS) METHOD: DebugMsg( $level, $format, @args )
|
||||
### Output a debugging messages formed sprintf-style with I<format> and I<args>
|
||||
### if I<level> is greater than or equal to the current debugging level.
|
||||
sub RealDebugMsg {
|
||||
my ( $class, $level, $fmt, @args ) = @_;
|
||||
return unless $Debug >= $level;
|
||||
|
||||
chomp $fmt;
|
||||
printf STDERR ">>> $fmt\n", @args;
|
||||
}
|
||||
|
||||
|
||||
### (CONSTRUCTOR) METHOD: new( %args )
|
||||
### Create a new DDLockClient
|
||||
sub new {
|
||||
my DDLockClient $self = shift;
|
||||
my %args = @_;
|
||||
|
||||
$self = fields::new( $self ) unless ref $self;
|
||||
die "Servers argument must be an arrayref if specified"
|
||||
unless !exists $args{servers} || ref $args{servers} eq 'ARRAY';
|
||||
$self->{servers} = $args{servers} || [];
|
||||
$self->{lockdir} = $args{lockdir} || '';
|
||||
|
||||
return $self;
|
||||
}
|
||||
|
||||
|
||||
### METHOD: trylock( $name )
|
||||
### Try to get a lock from the lock daemons with the specified I<name>. Returns
|
||||
### a DDLock object on success, and undef on failure.
|
||||
sub trylock {
|
||||
my DDLockClient $self = shift;
|
||||
my $lockname = shift;
|
||||
|
||||
my $lock;
|
||||
|
||||
# If there are servers to connect to, use a network lock
|
||||
if ( @{$self->{servers}} ) {
|
||||
$self->DebugMsg( 2, "Creating a new DDLock object." );
|
||||
$lock = eval { DDLock->new($lockname, @{$self->{servers}}) };
|
||||
}
|
||||
|
||||
# Otherwise use a file lock
|
||||
else {
|
||||
$self->DebugMsg( 2, "No servers configured: Creating a new DDFileLock object." );
|
||||
$lock = eval { DDFileLock->new($lockname, $self->{lockdir}) };
|
||||
}
|
||||
|
||||
# If no lock was acquired, fail and put the reason in $Error.
|
||||
unless ( $lock ) {
|
||||
return $self->lock_fail( $@ ) if $@;
|
||||
return $self->lock_fail( "Unknown failure." );
|
||||
}
|
||||
|
||||
return $lock;
|
||||
}
|
||||
|
||||
|
||||
### (PROTECTED) METHOD: lock_fail( $msg )
|
||||
### Set C<$!> to the specified message and return undef.
|
||||
sub lock_fail {
|
||||
my DDLockClient $self = shift;
|
||||
my $msg = shift;
|
||||
|
||||
$Error = $msg;
|
||||
return undef;
|
||||
}
|
||||
|
||||
|
||||
1;
|
||||
|
||||
|
||||
# Local Variables:
|
||||
# mode: perl
|
||||
# c-basic-indent: 4
|
||||
# indent-tabs-mode: nil
|
||||
# End:
|
||||
5
wcmtools/ddlockd/api/perl/MANIFEST
Executable file
5
wcmtools/ddlockd/api/perl/MANIFEST
Executable file
@@ -0,0 +1,5 @@
|
||||
DDLockClient.pm
|
||||
Makefile.PL
|
||||
MANIFEST
|
||||
t/00_require.t
|
||||
testlock.pl
|
||||
11
wcmtools/ddlockd/api/perl/MANIFEST.SKIP
Executable file
11
wcmtools/ddlockd/api/perl/MANIFEST.SKIP
Executable file
@@ -0,0 +1,11 @@
|
||||
^#
|
||||
\bCVS\b
|
||||
^MANIFEST\.
|
||||
^Makefile$
|
||||
~$
|
||||
\.html$
|
||||
\.old$
|
||||
^blib/
|
||||
_blib$
|
||||
^MakeMaker-\d
|
||||
^\.exists
|
||||
33
wcmtools/ddlockd/api/perl/Makefile.PL
Executable file
33
wcmtools/ddlockd/api/perl/Makefile.PL
Executable file
@@ -0,0 +1,33 @@
|
||||
#!/usr/bin/perl
|
||||
#
|
||||
# Perl Makefile for DDLockClient/DDLock
|
||||
# $Id: Makefile.PL,v 1.2 2004/05/27 22:08:51 deveiant Exp $
|
||||
#
|
||||
# Invoke with 'perl Makefile.PL'
|
||||
#
|
||||
# See ExtUtils::MakeMaker (3) for more information on how to influence
|
||||
# the contents of the Makefile that is written
|
||||
#
|
||||
|
||||
use ExtUtils::MakeMaker;
|
||||
my $version = do { my @r = (q$Revision: 1.2 $ =~ /\d+/g); sprintf "%d."."%02d" x $#r, @r };
|
||||
|
||||
my %config = (
|
||||
NAME => 'DDLockClient',
|
||||
VERSION => "0." . $version,
|
||||
AUTHOR => 'Brad Fitzpatrick <brad@danga.com>',
|
||||
ABSTRACT => 'A lock client for the distributed lock daemon ddlockd',
|
||||
PREREQ_PM => {
|
||||
Socket => 0,
|
||||
},
|
||||
dist => {
|
||||
CI => "cvs commit",
|
||||
RCS_LABEL => 'cvs tag r$(VERSION_SYM)',
|
||||
SUFFIX => ".bz2",
|
||||
DIST_DEFAULT => 'all tardist',
|
||||
COMPRESS => "bzip2",
|
||||
},
|
||||
);
|
||||
|
||||
|
||||
WriteMakefile( %config );
|
||||
57
wcmtools/ddlockd/api/perl/stresslock.pl
Executable file
57
wcmtools/ddlockd/api/perl/stresslock.pl
Executable file
@@ -0,0 +1,57 @@
|
||||
#!/usr/bin/perl -w
|
||||
|
||||
use Fcntl;
|
||||
use lib "blib/lib";
|
||||
use DDLockClient ();
|
||||
use Data::Dumper ();
|
||||
|
||||
$Data::Dumper::Terse = 1;
|
||||
$Data::Dumper::Indent = 1;
|
||||
|
||||
$| = 1;
|
||||
|
||||
my $DDServers = [
|
||||
# 'localhost:7003',
|
||||
# 'localhost:7004',
|
||||
'localhost:7002',
|
||||
];
|
||||
|
||||
foreach my $servers ( $DDServers, [] ) {
|
||||
print "Creating client (@$servers)...";
|
||||
my $cl = new DDLockClient ( servers => $servers )
|
||||
or die $DDLockClient::Error;
|
||||
print "done:\n";
|
||||
|
||||
for ( my $i = 0; $i < 10; $i++ ) {
|
||||
if ( my $pid = fork ) {
|
||||
print "Created child: $pid\n";
|
||||
} else {
|
||||
for ( my $ct = 0; $ct < 150; $ct++ ) {
|
||||
my $rand = int(rand(10));
|
||||
#print "Trying to create lock 'lock$rand' lock in process $$...\n";
|
||||
if ( my $lock = $cl->trylock("lock$rand") ) {
|
||||
my $file = ".stressfile-$rand";
|
||||
my $fh = new IO::File $file, O_WRONLY|O_EXCL|O_CREAT;
|
||||
die "Couldn't create file $file: $!" unless $fh;
|
||||
$fh->close;
|
||||
unlink $file;
|
||||
}
|
||||
}
|
||||
exit 0;
|
||||
}
|
||||
}
|
||||
|
||||
while ((my $pid = wait) != -1) {
|
||||
if ($? == 0) {
|
||||
print "$pid is done, okay.\n";
|
||||
} else {
|
||||
die "$pid FAILED\n";
|
||||
}
|
||||
}
|
||||
|
||||
print "done.\n\n";
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
12
wcmtools/ddlockd/api/perl/t/00_require.t
Executable file
12
wcmtools/ddlockd/api/perl/t/00_require.t
Executable file
@@ -0,0 +1,12 @@
|
||||
#!/usr/bin/perl -w
|
||||
|
||||
use strict;
|
||||
use Test;
|
||||
|
||||
BEGIN { plan tests => 3 }
|
||||
|
||||
ok( eval { require DDLockClient; 1 } );
|
||||
ok( exists $::{"DDLockClient::"} );
|
||||
ok( exists $::{"DDLock::"} );
|
||||
|
||||
|
||||
46
wcmtools/ddlockd/api/perl/testlock.pl
Executable file
46
wcmtools/ddlockd/api/perl/testlock.pl
Executable file
@@ -0,0 +1,46 @@
|
||||
#!/usr/bin/perl -w
|
||||
|
||||
use lib "blib/lib";
|
||||
use DDLockClient ();
|
||||
use Data::Dumper ();
|
||||
|
||||
$Data::Dumper::Terse = 1;
|
||||
$Data::Dumper::Indent = 1;
|
||||
|
||||
$| = 1;
|
||||
|
||||
my $DDServers = [
|
||||
'localhost:7003',
|
||||
'localhost:7004',
|
||||
'localhost',
|
||||
];
|
||||
|
||||
foreach my $servers ( $DDServers, [] ) {
|
||||
print "Creating client...";
|
||||
my $cl = new DDLockClient ( servers => $servers )
|
||||
or die $DDLockClient::Error;
|
||||
print "done:\n";
|
||||
|
||||
print "Creating a 'foo' lock...";
|
||||
my $lock = $cl->trylock( "foo" )
|
||||
or print "Error: $DDLockClient::Error\n";
|
||||
print "done.\n";
|
||||
|
||||
if ( my $pid = fork ) {
|
||||
waitpid( $pid, 0 );
|
||||
} else {
|
||||
print "Trying to create a 'foo' lock in process $$...";
|
||||
my $lock2 = $cl->trylock( "foo" )
|
||||
or print "Error: $DDLockClient::Error\n";
|
||||
print "done:\n";
|
||||
exit;
|
||||
}
|
||||
|
||||
print "Releasing the 'foo' lock...";
|
||||
$lock->release or die;
|
||||
print "done.\n\n";
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
316
wcmtools/ddlockd/server/ddlockd
Executable file
316
wcmtools/ddlockd/server/ddlockd
Executable file
@@ -0,0 +1,316 @@
|
||||
#!/usr/bin/perl
|
||||
#
|
||||
# Danga's Distributed Lock Daemon
|
||||
#
|
||||
# Status: 2004-05-18: quick hack. not for production yet.
|
||||
#
|
||||
# Copyright 2004, Danga Interactive
|
||||
#
|
||||
# Authors:
|
||||
# Brad Fitzpatrick <brad@danga.com>
|
||||
#
|
||||
# License:
|
||||
# undecided.
|
||||
#
|
||||
|
||||
use strict;
|
||||
use Getopt::Long;
|
||||
use Carp;
|
||||
use Danga::Socket;
|
||||
use IO::Socket::INET;
|
||||
use POSIX ();
|
||||
|
||||
use vars qw($DEBUG);
|
||||
$DEBUG = 0;
|
||||
|
||||
my (
|
||||
$daemonize,
|
||||
$nokeepalive,
|
||||
);
|
||||
my $conf_port = 7002;
|
||||
|
||||
Getopt::Long::GetOptions(
|
||||
'd|daemon' => \$daemonize,
|
||||
'p|port=i' => \$conf_port,
|
||||
'debug=i' => \$DEBUG,
|
||||
'n|no-keepalive' => \$nokeepalive,
|
||||
);
|
||||
|
||||
daemonize() if $daemonize;
|
||||
|
||||
use Socket qw(IPPROTO_TCP SO_KEEPALIVE TCP_NODELAY SOL_SOCKET);
|
||||
|
||||
# Linux-specific:
|
||||
use constant TCP_KEEPIDLE => 4; # Start keeplives after this period
|
||||
use constant TCP_KEEPINTVL => 5; # Interval between keepalives
|
||||
use constant TCP_KEEPCNT => 6; # Number of keepalives before death
|
||||
|
||||
$SIG{'PIPE'} = "IGNORE"; # handled manually
|
||||
|
||||
# establish SERVER socket, bind and listen.
|
||||
my $server = IO::Socket::INET->new(LocalPort => $conf_port,
|
||||
Type => SOCK_STREAM,
|
||||
Proto => IPPROTO_TCP,
|
||||
Blocking => 0,
|
||||
Reuse => 1,
|
||||
Listen => 10 )
|
||||
or die "Error creating socket: $@\n";
|
||||
|
||||
# Not sure if I'm crazy or not, but I can't see in strace where/how
|
||||
# Perl 5.6 sets blocking to 0 without this. In Perl 5.8, IO::Socket::INET
|
||||
# obviously sets it from watching strace.
|
||||
IO::Handle::blocking($server, 0);
|
||||
|
||||
my $accept_handler = sub {
|
||||
my $csock = $server->accept();
|
||||
return unless $csock;
|
||||
|
||||
printf("Listen child making a Client for %d.\n", fileno($csock))
|
||||
if $DEBUG;
|
||||
|
||||
IO::Handle::blocking($csock, 0);
|
||||
setsockopt($csock, IPPROTO_TCP, TCP_NODELAY, pack("l", 1)) or die;
|
||||
|
||||
# Enable keep alive
|
||||
unless ( $nokeepalive ) {
|
||||
(setsockopt($csock, SOL_SOCKET, SO_KEEPALIVE, pack("l", 1)) &&
|
||||
setsockopt($csock, IPPROTO_TCP, TCP_KEEPIDLE, pack("l", 30)) &&
|
||||
setsockopt($csock, IPPROTO_TCP, TCP_KEEPCNT, pack("l", 10)) &&
|
||||
setsockopt($csock, IPPROTO_TCP, TCP_KEEPINTVL, pack("l", 30)) &&
|
||||
1
|
||||
) || die "Couldn't set keep-alive settings on socket (Not on Linux?)";
|
||||
}
|
||||
|
||||
my $client = Client->new($csock);
|
||||
$client->watch_read(1);
|
||||
};
|
||||
|
||||
Client->OtherFds(fileno($server) => $accept_handler);
|
||||
Client->EventLoop();
|
||||
|
||||
sub daemonize {
|
||||
my($pid, $sess_id, $i);
|
||||
|
||||
## Fork and exit parent
|
||||
if ($pid = fork) { exit 0; }
|
||||
|
||||
## Detach ourselves from the terminal
|
||||
croak "Cannot detach from controlling terminal"
|
||||
unless $sess_id = POSIX::setsid();
|
||||
|
||||
## Prevent possibility of acquiring a controling terminal
|
||||
$SIG{'HUP'} = 'IGNORE';
|
||||
if ($pid = fork) { exit 0; }
|
||||
|
||||
## Change working directory
|
||||
chdir "/";
|
||||
|
||||
## Clear file creation mask
|
||||
umask 0;
|
||||
|
||||
## Close open file descriptors
|
||||
close(STDIN);
|
||||
close(STDOUT);
|
||||
close(STDERR);
|
||||
|
||||
## Reopen stderr, stdout, stdin to /dev/null
|
||||
open(STDIN, "+>/dev/null");
|
||||
open(STDOUT, "+>&STDIN");
|
||||
open(STDERR, "+>&STDIN");
|
||||
}
|
||||
|
||||
#####################################################################
|
||||
### C L I E N T C L A S S
|
||||
#####################################################################
|
||||
package Client;
|
||||
|
||||
use Danga::Socket;
|
||||
use base 'Danga::Socket';
|
||||
use fields (
|
||||
'locks', # hashref of locks held by this connection. values are 1
|
||||
'read_buf',
|
||||
);
|
||||
|
||||
our (%holder); # hash of lock -> Client object holding it
|
||||
# TODO: out %waiters, lock -> arrayref of client waiters (waker should check not closed)
|
||||
|
||||
sub new {
|
||||
my Client $self = shift;
|
||||
$self = fields::new($self) unless ref $self;
|
||||
$self->SUPER::new( @_ );
|
||||
|
||||
$self->{locks} = {};
|
||||
$self->{read_buf} = '';
|
||||
return $self;
|
||||
}
|
||||
|
||||
# Client
|
||||
sub event_read {
|
||||
my Client $self = shift;
|
||||
|
||||
my $bref = $self->read(1024);
|
||||
return $self->close() unless defined $bref;
|
||||
$self->{read_buf} .= $$bref;
|
||||
|
||||
if ($self->{read_buf} =~ s/^(.+?)\r?\n//) {
|
||||
my $line = $1;
|
||||
$self->process_line( $line );
|
||||
}
|
||||
}
|
||||
|
||||
sub process_line {
|
||||
my Client $self = shift;
|
||||
my $line = shift;
|
||||
|
||||
if ($line =~ /^(\w+)\s*(.*)/) {
|
||||
my ($cmd, $args) = ($1, $2);
|
||||
$cmd = lc($cmd);
|
||||
|
||||
no strict 'refs';
|
||||
my $cmd_handler = *{"cmd_$cmd"}{CODE};
|
||||
if ($cmd_handler) {
|
||||
my $args = decode_url_args(\$args);
|
||||
$cmd_handler->($self, $args);
|
||||
next;
|
||||
}
|
||||
}
|
||||
|
||||
return $self->err_line('unknown_command');
|
||||
}
|
||||
|
||||
sub close {
|
||||
my Client $self = shift;
|
||||
|
||||
foreach my $lock (keys %{$self->{locks}}) {
|
||||
_release_lock($self, $lock);
|
||||
}
|
||||
|
||||
$self->SUPER::close;
|
||||
}
|
||||
|
||||
sub _release_lock {
|
||||
my Client $self = shift;
|
||||
my $lock = shift;
|
||||
|
||||
# TODO: notify waiters
|
||||
delete $self->{locks}{$lock};
|
||||
delete $holder{$lock};
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
# Client
|
||||
sub event_err { my $self = shift; $self->close; }
|
||||
sub event_hup { my $self = shift; $self->close; }
|
||||
|
||||
|
||||
# gets a lock or fails with 'taken'
|
||||
sub cmd_trylock {
|
||||
my Client $self = shift;
|
||||
my $args = shift;
|
||||
|
||||
my $lock = $args->{lock};
|
||||
return $self->err_line("empty_lock") unless length($lock);
|
||||
return $self->err_line("taken") if defined $holder{$lock};
|
||||
|
||||
$holder{$lock} = $self;
|
||||
$self->{locks}{$lock} = 1;
|
||||
|
||||
return $self->ok_line();
|
||||
}
|
||||
|
||||
# releases a lock or fails with 'didnthave'
|
||||
sub cmd_releaselock {
|
||||
my Client $self = shift;
|
||||
my $args = shift;
|
||||
|
||||
my $lock = $args->{lock};
|
||||
return $self->err_line("empty_lock") unless length($lock);
|
||||
return $self->err_line("didnthave") unless $self->{locks}{$lock};
|
||||
|
||||
_release_lock($self, $lock);
|
||||
return $self->ok_line;
|
||||
}
|
||||
|
||||
# shows current locks
|
||||
sub cmd_locks {
|
||||
my Client $self = shift;
|
||||
my $args = shift;
|
||||
|
||||
$self->write("LOCKS:\n");
|
||||
foreach my $k (sort keys %holder) {
|
||||
$self->write(" $k = " . $holder{$k}->as_string . "\n");
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub cmd_noop {
|
||||
my Client $self = shift;
|
||||
# TODO: set self's last activity time so it isn't cleaned in a purge
|
||||
# of stale connections?
|
||||
return $self->ok_line;
|
||||
}
|
||||
|
||||
sub ok_line {
|
||||
my Client $self = shift;
|
||||
my $args = shift || {};
|
||||
my $argline = join('&', map { eurl($_) . "=" . eurl($args->{$_}) } keys %$args);
|
||||
$self->write("OK $argline\r\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub err_line {
|
||||
my Client $self = shift;
|
||||
my $err_code = shift;
|
||||
my $err_text = {
|
||||
'unknown_command' => "Unknown server command",
|
||||
}->{$err_code};
|
||||
|
||||
$self->write("ERR $err_code " . eurl($err_text) . "\r\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
sub eurl
|
||||
{
|
||||
my $a = $_[0];
|
||||
$a =~ s/([^a-zA-Z0-9_\,\-.\/\\\: ])/uc sprintf("%%%02x",ord($1))/eg;
|
||||
$a =~ tr/ /+/;
|
||||
return $a;
|
||||
}
|
||||
|
||||
sub durl
|
||||
{
|
||||
my ($a) = @_;
|
||||
$a =~ tr/+/ /;
|
||||
$a =~ s/%([a-fA-F0-9][a-fA-F0-9])/pack("C", hex($1))/eg;
|
||||
return $a;
|
||||
}
|
||||
|
||||
sub decode_url_args
|
||||
{
|
||||
my $a = shift;
|
||||
my $buffer = ref $a ? $a : \$a;
|
||||
my $ret = {};
|
||||
|
||||
my $pair;
|
||||
my @pairs = split(/&/, $$buffer);
|
||||
my ($name, $value);
|
||||
foreach $pair (@pairs)
|
||||
{
|
||||
($name, $value) = split(/=/, $pair);
|
||||
$value =~ tr/+/ /;
|
||||
$value =~ s/%([a-fA-F0-9][a-fA-F0-9])/pack("C", hex($1))/eg;
|
||||
$name =~ tr/+/ /;
|
||||
$name =~ s/%([a-fA-F0-9][a-fA-F0-9])/pack("C", hex($1))/eg;
|
||||
$ret->{$name} .= $ret->{$name} ? "\0$value" : $value;
|
||||
}
|
||||
return $ret;
|
||||
}
|
||||
|
||||
|
||||
# Local Variables:
|
||||
# mode: perl
|
||||
# c-basic-indent: 4
|
||||
# indent-tabs-mode: nil
|
||||
# End:
|
||||
385
wcmtools/dinsertd/server/dinsertd
Executable file
385
wcmtools/dinsertd/server/dinsertd
Executable file
@@ -0,0 +1,385 @@
|
||||
#!/usr/bin/perl
|
||||
#
|
||||
# Danga's Delayed Insert Daemon
|
||||
#
|
||||
# ... because MySQL forces connections to be threads, limiting
|
||||
# total connections
|
||||
#
|
||||
# ... and because TCP makes it so easy to run out of local ports
|
||||
#
|
||||
# Status: 2004-12-22: experimental hack.
|
||||
#
|
||||
# Copyright 2004, Danga Interactive
|
||||
#
|
||||
# Authors:
|
||||
# Brad Fitzpatrick <brad@danga.com>
|
||||
#
|
||||
# License:
|
||||
# undecided.
|
||||
#
|
||||
|
||||
use strict;
|
||||
use Getopt::Long;
|
||||
use Carp;
|
||||
use Danga::Socket;
|
||||
use IO::Socket::INET;
|
||||
use POSIX ();
|
||||
|
||||
use vars qw($DEBUG);
|
||||
$DEBUG = 0;
|
||||
|
||||
my (
|
||||
$daemonize,
|
||||
$nokeepalive,
|
||||
);
|
||||
my $conf_port = 7400;
|
||||
|
||||
Getopt::Long::GetOptions(
|
||||
'd|daemon' => \$daemonize,
|
||||
'p|port=i' => \$conf_port,
|
||||
'debug=i' => \$DEBUG,
|
||||
'n|no-keepalive' => \$nokeepalive,
|
||||
);
|
||||
|
||||
daemonize() if $daemonize;
|
||||
|
||||
use Socket qw(IPPROTO_TCP SO_KEEPALIVE TCP_NODELAY SOL_SOCKET);
|
||||
|
||||
# Linux-specific:
|
||||
use constant TCP_KEEPIDLE => 4; # Start keeplives after this period
|
||||
use constant TCP_KEEPINTVL => 5; # Interval between keepalives
|
||||
use constant TCP_KEEPCNT => 6; # Number of keepalives before death
|
||||
|
||||
$SIG{'PIPE'} = "IGNORE"; # handled manually
|
||||
|
||||
# establish SERVER socket, bind and listen.
|
||||
my $server = IO::Socket::INET->new(LocalPort => $conf_port,
|
||||
Type => SOCK_STREAM,
|
||||
Proto => IPPROTO_TCP,
|
||||
Blocking => 0,
|
||||
Reuse => 1,
|
||||
Listen => 10 )
|
||||
or die "Error creating socket: $@\n";
|
||||
|
||||
# Not sure if I'm crazy or not, but I can't see in strace where/how
|
||||
# Perl 5.6 sets blocking to 0 without this. In Perl 5.8, IO::Socket::INET
|
||||
# obviously sets it from watching strace.
|
||||
IO::Handle::blocking($server, 0);
|
||||
|
||||
my $accept_handler = sub {
|
||||
my $csock = $server->accept();
|
||||
return unless $csock;
|
||||
|
||||
printf("Listen child making a Client for %d.\n", fileno($csock))
|
||||
if $DEBUG;
|
||||
|
||||
IO::Handle::blocking($csock, 0);
|
||||
setsockopt($csock, IPPROTO_TCP, TCP_NODELAY, pack("l", 1)) or die;
|
||||
|
||||
# Enable keep alive
|
||||
unless ( $nokeepalive ) {
|
||||
(setsockopt($csock, SOL_SOCKET, SO_KEEPALIVE, pack("l", 1)) &&
|
||||
setsockopt($csock, IPPROTO_TCP, TCP_KEEPIDLE, pack("l", 30)) &&
|
||||
setsockopt($csock, IPPROTO_TCP, TCP_KEEPCNT, pack("l", 10)) &&
|
||||
setsockopt($csock, IPPROTO_TCP, TCP_KEEPINTVL, pack("l", 30)) &&
|
||||
1
|
||||
) || die "Couldn't set keep-alive settings on socket (Not on Linux?)";
|
||||
}
|
||||
|
||||
my $client = Client->new($csock);
|
||||
$client->watch_read(1);
|
||||
};
|
||||
|
||||
Client->Init;
|
||||
Danga::Socket->OtherFds(fileno($server) => $accept_handler);
|
||||
Danga::Socket->EventLoop();
|
||||
|
||||
sub daemonize {
|
||||
my($pid, $sess_id, $i);
|
||||
|
||||
## Fork and exit parent
|
||||
if ($pid = fork) { exit 0; }
|
||||
|
||||
## Detach ourselves from the terminal
|
||||
croak "Cannot detach from controlling terminal"
|
||||
unless $sess_id = POSIX::setsid();
|
||||
|
||||
## Prevent possibility of acquiring a controling terminal
|
||||
$SIG{'HUP'} = 'IGNORE';
|
||||
if ($pid = fork) { exit 0; }
|
||||
|
||||
## Change working directory
|
||||
chdir "/";
|
||||
|
||||
## Clear file creation mask
|
||||
umask 0;
|
||||
|
||||
## Close open file descriptors
|
||||
close(STDIN);
|
||||
close(STDOUT);
|
||||
close(STDERR);
|
||||
|
||||
## Reopen stderr, stdout, stdin to /dev/null
|
||||
open(STDIN, "+>/dev/null");
|
||||
open(STDOUT, "+>&STDIN");
|
||||
open(STDERR, "+>&STDIN");
|
||||
}
|
||||
|
||||
#####################################################################
|
||||
### C L I E N T C L A S S
|
||||
#####################################################################
|
||||
package Client;
|
||||
|
||||
use strict;
|
||||
use Danga::Socket;
|
||||
use base 'Danga::Socket';
|
||||
use fields (
|
||||
'read_buf',
|
||||
'listen_queue_num', # undef, its own fd, or -1 if listening to system queue
|
||||
);
|
||||
|
||||
our %queue; # fd -> [ [ $table, $values ] ... ]
|
||||
our %note; # arbitrary client-generated key/value data
|
||||
|
||||
our @listeners; # client objects that are listening
|
||||
|
||||
our $MAX_QUEUE_DEPTH;
|
||||
|
||||
our $is_system_attached; # bool: if somebody is watching the system queue
|
||||
|
||||
sub Init {
|
||||
$MAX_QUEUE_DEPTH = 5000;
|
||||
|
||||
#fd=-1 is magic and is the system default queue, which always exists
|
||||
$queue{-1} = [];
|
||||
$is_system_attached = 0;
|
||||
}
|
||||
|
||||
sub new {
|
||||
my Client $self = shift;
|
||||
$self = fields::new($self) unless ref $self;
|
||||
$self->SUPER::new( @_ );
|
||||
|
||||
$self->{read_buf} = '';
|
||||
$self->{listen_queue_num} = undef;
|
||||
return $self;
|
||||
}
|
||||
|
||||
# Client
|
||||
sub event_read {
|
||||
my Client $self = shift;
|
||||
|
||||
my $bref = $self->read(1024);
|
||||
return $self->close() unless defined $bref;
|
||||
$self->{read_buf} .= $$bref;
|
||||
|
||||
if ($self->{read_buf} =~ s/^(.+?)\r?\n//) {
|
||||
my $line = $1;
|
||||
$self->process_line( $line );
|
||||
}
|
||||
}
|
||||
|
||||
sub event_write {
|
||||
my Client $self = shift;
|
||||
|
||||
# stop watching for writability if we're not subscribed to anything
|
||||
unless (defined $self->{listen_queue_num}) {
|
||||
$self->watch_write(0);
|
||||
return;
|
||||
}
|
||||
|
||||
my $q = $queue{$self->{listen_queue_num}};
|
||||
|
||||
while (@$q) {
|
||||
my $rec = shift @$q;
|
||||
next if $self->write("ROW $rec->[0] $rec->[1]\r\n");
|
||||
print " Buffer was full!\n";
|
||||
return;
|
||||
}
|
||||
$self->watch_write(0);
|
||||
}
|
||||
|
||||
sub process_line {
|
||||
my Client $self = shift;
|
||||
my $line = shift;
|
||||
|
||||
if ($line =~ /^(\w+)\s*(.*)/) {
|
||||
my ($cmd, $args) = ($1, $2);
|
||||
$cmd = lc($cmd);
|
||||
|
||||
no strict 'refs';
|
||||
my $cmd_handler = *{"cmd_$cmd"}{CODE};
|
||||
if ($cmd_handler) {
|
||||
$cmd_handler->($self, $args);
|
||||
next;
|
||||
}
|
||||
}
|
||||
|
||||
return $self->err_line('unknown_command');
|
||||
}
|
||||
|
||||
# Client
|
||||
sub event_err { my $self = shift; $self->close; }
|
||||
sub event_hup { my $self = shift; $self->close; }
|
||||
|
||||
|
||||
# gets a lock or fails with 'taken'
|
||||
sub cmd_set_note {
|
||||
my Client $self = shift;
|
||||
my $args = shift;
|
||||
return $self->err_line("bogus_format")
|
||||
unless $args =~ /(\S+)\s+(.+)/;
|
||||
$note{$1} = $2;
|
||||
return $self->ok_line;
|
||||
}
|
||||
|
||||
# gets a lock or fails with 'taken'
|
||||
sub cmd_get_note {
|
||||
my Client $self = shift;
|
||||
my $args = shift;
|
||||
return $self->err_line("bogus_format")
|
||||
unless $args =~ /(\S+)/;
|
||||
|
||||
$self->write("NOTE $note{$1}\r\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
# gets a lock or fails with 'taken'
|
||||
sub cmd_insert {
|
||||
my Client $self = shift;
|
||||
my $args = shift;
|
||||
|
||||
return $self->err_line("bogus_format")
|
||||
unless $args =~ /(\w+)\s+(.+)/;
|
||||
|
||||
my $rec = [ $1, $2 ];
|
||||
foreach my $fd (keys %queue) {
|
||||
my $q = $queue{$fd};
|
||||
shift @$q if scalar @$q >= $MAX_QUEUE_DEPTH;
|
||||
push @$q, $rec;
|
||||
}
|
||||
|
||||
foreach (@listeners) {
|
||||
$_->watch_write(1);
|
||||
}
|
||||
|
||||
return $self->ok_line;
|
||||
}
|
||||
|
||||
sub close {
|
||||
my Client $self = shift;
|
||||
|
||||
# remove ourselves from the listeners array
|
||||
@listeners = grep { $_ != $self } @listeners;
|
||||
|
||||
# delete our queue, unless it's the system queue
|
||||
if ($self->{listen_queue_num} != -1) {
|
||||
delete $queue{$self->{listen_queue_num}};
|
||||
} else {
|
||||
$is_system_attached = 0;
|
||||
}
|
||||
|
||||
$self->SUPER::close;
|
||||
}
|
||||
|
||||
sub cmd_subscribe {
|
||||
my Client $self = shift;
|
||||
my $args = shift;
|
||||
|
||||
my $which_fd = undef;
|
||||
if ($args =~ /system/) {
|
||||
return $self->err_line("dup_sys") if $is_system_attached++;
|
||||
$which_fd = -1;
|
||||
} else {
|
||||
$which_fd = $self->{fd};
|
||||
}
|
||||
|
||||
$self->{listen_queue_num} = $which_fd;
|
||||
push @listeners, $self;
|
||||
|
||||
$queue{$which_fd} ||= [];
|
||||
$self->watch_write(1);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
# shows current locks
|
||||
sub cmd_locks {
|
||||
my Client $self = shift;
|
||||
my $args = shift;
|
||||
|
||||
$self->write("LOCKS:\n");
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub cmd_noop {
|
||||
my Client $self = shift;
|
||||
# TODO: set self's last activity time so it isn't cleaned in a purge
|
||||
# of stale connections?
|
||||
return $self->ok_line;
|
||||
}
|
||||
|
||||
sub ok_line {
|
||||
my Client $self = shift;
|
||||
my $args = shift || {};
|
||||
my $argline = join('&', map { eurl($_) . "=" . eurl($args->{$_}) } keys %$args);
|
||||
$self->write("OK $argline\r\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub err_line {
|
||||
my Client $self = shift;
|
||||
my $err_code = shift;
|
||||
my $err_text = {
|
||||
'unknown_command' => "Unknown server command",
|
||||
'dup_sys' => "Can't have two listeners on the system log",
|
||||
}->{$err_code};
|
||||
|
||||
$self->write("ERR $err_code " . eurl($err_text) . "\r\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
sub eurl
|
||||
{
|
||||
my $a = $_[0];
|
||||
$a =~ s/([^a-zA-Z0-9_\,\-.\/\\\: ])/uc sprintf("%%%02x",ord($1))/eg;
|
||||
$a =~ tr/ /+/;
|
||||
return $a;
|
||||
}
|
||||
|
||||
sub durl
|
||||
{
|
||||
my ($a) = @_;
|
||||
$a =~ tr/+/ /;
|
||||
$a =~ s/%([a-fA-F0-9][a-fA-F0-9])/pack("C", hex($1))/eg;
|
||||
return $a;
|
||||
}
|
||||
|
||||
sub decode_url_args
|
||||
{
|
||||
my $a = shift;
|
||||
my $buffer = ref $a ? $a : \$a;
|
||||
my $ret = {};
|
||||
|
||||
my $pair;
|
||||
my @pairs = split(/&/, $$buffer);
|
||||
my ($name, $value);
|
||||
foreach $pair (@pairs)
|
||||
{
|
||||
($name, $value) = split(/=/, $pair);
|
||||
$value =~ tr/+/ /;
|
||||
$value =~ s/%([a-fA-F0-9][a-fA-F0-9])/pack("C", hex($1))/eg;
|
||||
$name =~ tr/+/ /;
|
||||
$name =~ s/%([a-fA-F0-9][a-fA-F0-9])/pack("C", hex($1))/eg;
|
||||
$ret->{$name} .= $ret->{$name} ? "\0$value" : $value;
|
||||
}
|
||||
return $ret;
|
||||
}
|
||||
|
||||
|
||||
# Local Variables:
|
||||
# mode: perl
|
||||
# c-basic-indent: 4
|
||||
# indent-tabs-mode: nil
|
||||
# End:
|
||||
269
wcmtools/diskchecker/diskchecker.pl
Executable file
269
wcmtools/diskchecker/diskchecker.pl
Executable file
@@ -0,0 +1,269 @@
|
||||
#!/usr/bin/perl
|
||||
#
|
||||
# Brad's el-ghetto do-our-storage-stacks-lie?-script
|
||||
#
|
||||
|
||||
sub usage {
|
||||
die <<'END';
|
||||
Usage: diskchecker.pl -s <server[:port]> verify <file>
|
||||
diskchecker.pl -s <server[:port]> create <file> <size_in_MB>
|
||||
diskchecker.pl -l [port]
|
||||
END
|
||||
}
|
||||
|
||||
use strict;
|
||||
use IO::Socket::INET;
|
||||
use IO::Handle;
|
||||
use Getopt::Long;
|
||||
use Socket qw(IPPROTO_TCP TCP_NODELAY);
|
||||
|
||||
my $server;
|
||||
my $listen;
|
||||
usage() unless GetOptions('server=s' => \$server,
|
||||
'listen:5400' => \$listen);
|
||||
usage() unless $server || $listen;
|
||||
usage() if $server && $listen;
|
||||
|
||||
# LISTEN MODE:
|
||||
listen_mode($listen) if $listen;
|
||||
|
||||
# CLIENT MODE:
|
||||
my $LEN = 16 * 1024; # 16kB (same as InnoDB page)
|
||||
my $mode = shift;
|
||||
usage() unless $mode =~ /^verify|create$/;
|
||||
|
||||
my $file = shift or usage();
|
||||
my $size;
|
||||
if ($mode eq "create") {
|
||||
$size = shift or usage();
|
||||
}
|
||||
|
||||
$server .= ":5400" unless $server =~ /:/;
|
||||
|
||||
my $sock = IO::Socket::INET->new(PeerAddr => $server)
|
||||
or die "Couldn't connect to host:port of '$server'\n";
|
||||
|
||||
setsockopt($sock, IPPROTO_TCP, TCP_NODELAY, pack("l", 1)) or die;
|
||||
|
||||
create() if $mode eq "create";
|
||||
verify() if $mode eq "verify";
|
||||
exit 0;
|
||||
|
||||
sub verify {
|
||||
sendmsg($sock, "read");
|
||||
|
||||
my $error_ct = 0;
|
||||
my %error_ct;
|
||||
|
||||
my $size = -s $file;
|
||||
my $max_pages = int($size / $LEN);
|
||||
|
||||
my $percent;
|
||||
my $last_dump = 0;
|
||||
my $show_percent = sub {
|
||||
printf " verifying: %.02f%%\n", $percent;
|
||||
};
|
||||
|
||||
open (F, $file) or die "Couldn't open file $file for read\n";
|
||||
|
||||
while (<$sock>) {
|
||||
chomp;
|
||||
my ($page, $good, $val, $ago) = split(/\t/, $_);
|
||||
$percent = 100 * $page / ($max_pages || 1);
|
||||
|
||||
my $now = time;
|
||||
if ($last_dump != $now) {
|
||||
$last_dump = $now;
|
||||
$show_percent->();
|
||||
}
|
||||
|
||||
next unless $good;
|
||||
|
||||
my $offset = $page * $LEN;
|
||||
sysseek F, $offset, 0;
|
||||
my $buf;
|
||||
my $rv = sysread(F, $buf, $LEN);
|
||||
my $tobe = sprintf("%08x", $val) x ($LEN / 8);
|
||||
substr($tobe, $LEN-1, 1) = "\n";
|
||||
|
||||
unless ($buf eq $tobe) {
|
||||
$error_ct{$ago}++;
|
||||
$error_ct++;
|
||||
print " Error at page $page, $ago seconds before end.\n";
|
||||
}
|
||||
}
|
||||
$show_percent->();
|
||||
|
||||
print "Total errors: $error_ct\n";
|
||||
if ($error_ct) {
|
||||
print "Histogram of seconds before end:\n";
|
||||
foreach (sort { $a <=> $b } keys %error_ct) {
|
||||
printf " %4d %4d\n", $_, $error_ct{$_};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sub create {
|
||||
open (F, ">$file") or die "Couldn't open file $file\n";
|
||||
|
||||
my $ioh = IO::Handle->new_from_fd(fileno(F), "w")
|
||||
or die;
|
||||
|
||||
my $pages = int( ($size * 1024 * 1024) / $LEN ); # 50 MiB of 16k pages (3200 pages)
|
||||
|
||||
my %page_hit;
|
||||
my $pages_hit = 0;
|
||||
my $uniq_pages_hit = 0;
|
||||
my $start = time();
|
||||
my $last_dump = $start;
|
||||
|
||||
while (1) {
|
||||
my $rand = int rand 2000000;
|
||||
my $buf = sprintf("%08x", $rand) x ($LEN / 8);
|
||||
substr($buf, $LEN-1, 1) = "\n";
|
||||
|
||||
my $pagenum = int rand $pages;
|
||||
my $offset = $pagenum * $LEN;
|
||||
|
||||
sendmsg($sock, "pre\t$pagenum\t$rand");
|
||||
|
||||
# now wait for acknowledgement
|
||||
my $ok = readmsg($sock);
|
||||
die "didn't get 'ok' from server ($pagenum $rand), msg=[$ok] = $!" unless $ok eq "ok";
|
||||
|
||||
sysseek F,$offset,0;
|
||||
my $wv = syswrite(F, $buf, $LEN);
|
||||
die "return value wasn't $LEN\n" unless $wv == $LEN;
|
||||
$ioh->sync or die "couldn't do IO::Handle::sync"; # does fsync
|
||||
|
||||
sendmsg($sock, "post\t$pagenum\t$rand");
|
||||
|
||||
$pages_hit++;
|
||||
unless ($page_hit{$pagenum}++) {
|
||||
$uniq_pages_hit++;
|
||||
}
|
||||
|
||||
my $now = time;
|
||||
if ($now != $last_dump) {
|
||||
$last_dump = $now;
|
||||
my $runtime = $now - $start;
|
||||
printf(" diskchecker: running %d sec, %.02f%% coverage of %d MB (%d writes; %d/s)\n",
|
||||
$runtime,
|
||||
(100 * $uniq_pages_hit / $pages),
|
||||
$size,
|
||||
$pages_hit,
|
||||
$pages_hit / $runtime,
|
||||
);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
sub readmsg {
|
||||
my $sock = shift;
|
||||
my $len;
|
||||
my $rv = sysread($sock, $len, 1);
|
||||
return undef unless $rv == 1;
|
||||
my $msg;
|
||||
$rv = sysread($sock, $msg, ord($len));
|
||||
return $msg;
|
||||
}
|
||||
|
||||
sub sendmsg {
|
||||
my ($sock, $msg) = @_;
|
||||
my $rv = syswrite($sock, chr(length($msg)) . $msg);
|
||||
my $expect = length($msg) + 1;
|
||||
die "sendmsg failed rv=$rv, expect=$expect" unless $rv == $expect;
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub listen_mode {
|
||||
my $port = shift;
|
||||
my $server = IO::Socket::INET->new(ReuseAddr => 1,
|
||||
Listen => 1,
|
||||
LocalPort => $port)
|
||||
or die "couldn't make server socket\n";
|
||||
|
||||
while (1) {
|
||||
print "[server] diskchecker.pl: waiting for connection...\n";
|
||||
my $sock = $server->accept()
|
||||
or die " die: no connection?";
|
||||
setsockopt($sock, IPPROTO_TCP, TCP_NODELAY, pack("l", 1)) or die;
|
||||
|
||||
fork and next;
|
||||
process_incoming_conn($sock);
|
||||
exit 0;
|
||||
}
|
||||
}
|
||||
|
||||
sub process_incoming_conn {
|
||||
my $sock = shift;
|
||||
my $peername = getpeername($sock) or
|
||||
die "connection not there?\n";
|
||||
my ($port, $iaddr) = sockaddr_in($peername);
|
||||
my $ip = inet_ntoa($iaddr);
|
||||
|
||||
my $file = "/tmp/$ip.diskchecker";
|
||||
die "[$ip] $file is a symlink" if -l $file;
|
||||
|
||||
print "[$ip] New connection\n";
|
||||
|
||||
my $lines = 0;
|
||||
my %state;
|
||||
my $end;
|
||||
|
||||
while (1) {
|
||||
if ($lines) {
|
||||
last unless wait_for_readability(fileno($sock), 3);
|
||||
}
|
||||
my $line = readmsg($sock);
|
||||
last unless $line;
|
||||
|
||||
if ($line eq "read") {
|
||||
print "[$ip] Sending state info from ${ip}'s last create.\n";
|
||||
open (S, "$file") or die "Couldn't open $file for reading.";
|
||||
while (<S>) {
|
||||
print $sock $_;
|
||||
}
|
||||
close S;
|
||||
print "[$ip] Done.\n";
|
||||
exit 0;
|
||||
}
|
||||
|
||||
$lines++;
|
||||
my $now = time;
|
||||
$end = $now;
|
||||
my ($state, $pagenum, $rand) = split(/\t/, $line);
|
||||
if ($state eq "pre") {
|
||||
$state{$pagenum} = [ 0, $rand+0, $now ];
|
||||
sendmsg($sock, "ok");
|
||||
} elsif ($state eq "post") {
|
||||
$state{$pagenum} = [ 1, $rand+0, $now ];
|
||||
}
|
||||
print "[$ip] $lines writes\n" if $lines % 1000 == 0;
|
||||
}
|
||||
|
||||
print "[$ip] Writing state file...\n";
|
||||
open (S, ">$file") or die "Couldn't open $file for writing.";
|
||||
foreach (sort { $a <=> $b } keys %state) {
|
||||
my $v = $state{$_};
|
||||
my $before_end = $end - $v->[2];
|
||||
print S "$_\t$v->[0]\t$v->[1]\t$before_end\n";
|
||||
}
|
||||
print "[$ip] Done.\n";
|
||||
}
|
||||
|
||||
sub wait_for_readability {
|
||||
my ($fileno, $timeout) = @_;
|
||||
return 0 unless $fileno && $timeout;
|
||||
|
||||
my $rin;
|
||||
vec($rin, $fileno, 1) = 1;
|
||||
my $nfound = select($rin, undef, undef, $timeout);
|
||||
|
||||
return 0 unless defined $nfound;
|
||||
return $nfound ? 1 : 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
11
wcmtools/dmtpd/README.txt
Executable file
11
wcmtools/dmtpd/README.txt
Executable file
@@ -0,0 +1,11 @@
|
||||
This is a server to inject mail into Sendmail/Postfix/etc's outgoing
|
||||
mail queue, without blocking the client (in our case, web nodes which
|
||||
can't block on outgoing email).
|
||||
|
||||
Works with any MTA that has 'sendmail -i -f ....'
|
||||
|
||||
This might all be temporary until we figure out mail better. (like
|
||||
how to get postfix to trust our outgoing email and queue it
|
||||
immediately, rather than blocking the web clients while it sends)
|
||||
|
||||
|
||||
42
wcmtools/dmtpd/api/perl/test.pl
Executable file
42
wcmtools/dmtpd/api/perl/test.pl
Executable file
@@ -0,0 +1,42 @@
|
||||
#!/usr/bin/perl
|
||||
#
|
||||
|
||||
use strict;
|
||||
use MIME::Lite ();
|
||||
use IO::File;
|
||||
use IO::Socket::INET;
|
||||
|
||||
my $msg = new MIME::Lite ('From' => 'brad@danga.com (Brad Fitzpatrick)',
|
||||
'To' => 'brad@danga.com (Fitz)',
|
||||
'Cc' => 'brad@livejournal.com',
|
||||
'Subject' => "Subjecto el Email testo",
|
||||
'Data' => "word\n.\n\nthe end.\n");
|
||||
|
||||
my $as = $msg->as_string;
|
||||
my $len = length($as);
|
||||
|
||||
my $sock = IO::Socket::INET->new(PeerAddr => 'localhost',
|
||||
PeerPort => '7005',
|
||||
Proto => 'tcp');
|
||||
|
||||
my $message = "Content-Length: $len\r\nEnvelope-Sender: brad\@danga.com\r\n\r\n$as";
|
||||
|
||||
$sock->print("$message$message");
|
||||
|
||||
sleep 1;
|
||||
|
||||
$sock->print("Content-Len");
|
||||
sleep 1;
|
||||
$sock->print("gth: $len\r\nEnvelope-Sender: brad\@danga.com\r\n");
|
||||
sleep 1;
|
||||
$sock->print("\r\n${as}Content-Length: $len\r\nEnvelope-Sender: ");
|
||||
sleep 1;
|
||||
$sock->print("brad\@danga.com\r\n\r\n$as");
|
||||
|
||||
while ($_ = $sock->getline) {
|
||||
$_ =~ s/[\r\n]+$//;
|
||||
print "RES: $_\n";
|
||||
}
|
||||
$sock->close;
|
||||
|
||||
|
||||
231
wcmtools/dmtpd/server/dmtpd
Executable file
231
wcmtools/dmtpd/server/dmtpd
Executable file
@@ -0,0 +1,231 @@
|
||||
#!/usr/bin/perl
|
||||
#
|
||||
# Danga's Mail Transfer Daemon
|
||||
#
|
||||
# Status: 2004-06-07: quick hack.
|
||||
#
|
||||
# Copyright 2004, Danga Interactive
|
||||
#
|
||||
# Authors:
|
||||
# Brad Fitzpatrick <brad@danga.com>
|
||||
#
|
||||
# License:
|
||||
# Artistic/GPL. Your choice.
|
||||
#
|
||||
|
||||
use strict;
|
||||
use Getopt::Long;
|
||||
use Carp;
|
||||
use Danga::Socket;
|
||||
use IO::Socket::INET;
|
||||
use POSIX ();
|
||||
|
||||
use vars qw($DEBUG);
|
||||
$DEBUG = 0;
|
||||
|
||||
my (
|
||||
$daemonize,
|
||||
);
|
||||
my $conf_port = 7005;
|
||||
|
||||
Getopt::Long::GetOptions(
|
||||
'd|daemon' => \$daemonize,
|
||||
'p|port=i' => \$conf_port,
|
||||
'debug=i' => \$DEBUG,
|
||||
);
|
||||
|
||||
daemonize() if $daemonize;
|
||||
|
||||
use Socket qw(IPPROTO_TCP SO_KEEPALIVE TCP_NODELAY SOL_SOCKET);
|
||||
|
||||
$SIG{'PIPE'} = "IGNORE"; # handled manually
|
||||
|
||||
# establish SERVER socket, bind and listen.
|
||||
my $server = IO::Socket::INET->new(LocalPort => $conf_port,
|
||||
Type => SOCK_STREAM,
|
||||
Proto => IPPROTO_TCP,
|
||||
Blocking => 0,
|
||||
Reuse => 1,
|
||||
Listen => 10 )
|
||||
or die "Error creating socket: $@\n";
|
||||
|
||||
# Not sure if I'm crazy or not, but I can't see in strace where/how
|
||||
# Perl 5.6 sets blocking to 0 without this. In Perl 5.8, IO::Socket::INET
|
||||
# obviously sets it from watching strace.
|
||||
IO::Handle::blocking($server, 0);
|
||||
|
||||
my $accept_handler = sub {
|
||||
my $csock = $server->accept();
|
||||
return unless $csock;
|
||||
|
||||
printf("Listen child making a Client for %d.\n", fileno($csock))
|
||||
if $DEBUG;
|
||||
|
||||
IO::Handle::blocking($csock, 0);
|
||||
setsockopt($csock, IPPROTO_TCP, TCP_NODELAY, pack("l", 1)) or die;
|
||||
|
||||
my $client = Client->new($csock);
|
||||
$client->watch_read(1);
|
||||
};
|
||||
|
||||
Client->OtherFds(fileno($server) => $accept_handler);
|
||||
Client->EventLoop();
|
||||
|
||||
sub daemonize {
|
||||
my($pid, $sess_id, $i);
|
||||
|
||||
## Fork and exit parent
|
||||
if ($pid = fork) { exit 0; }
|
||||
|
||||
## Detach ourselves from the terminal
|
||||
croak "Cannot detach from controlling terminal"
|
||||
unless $sess_id = POSIX::setsid();
|
||||
|
||||
## Prevent possibility of acquiring a controling terminal
|
||||
$SIG{'HUP'} = 'IGNORE';
|
||||
if ($pid = fork) { exit 0; }
|
||||
|
||||
## Change working directory
|
||||
chdir "/";
|
||||
|
||||
## Clear file creation mask
|
||||
umask 0;
|
||||
|
||||
## Close open file descriptors
|
||||
close(STDIN);
|
||||
close(STDOUT);
|
||||
close(STDERR);
|
||||
|
||||
## Reopen stderr, stdout, stdin to /dev/null
|
||||
open(STDIN, "+>/dev/null");
|
||||
open(STDOUT, "+>&STDIN");
|
||||
open(STDERR, "+>&STDIN");
|
||||
}
|
||||
|
||||
#####################################################################
|
||||
### C L I E N T C L A S S
|
||||
#####################################################################
|
||||
package Client;
|
||||
|
||||
use Danga::Socket;
|
||||
use base 'Danga::Socket';
|
||||
use fields (
|
||||
'headers', # hashref of header fields read
|
||||
'line', # partial line read so far
|
||||
'readcount', # how much into the message body we've read
|
||||
'sendmail', # IO::File pipe to sendmail
|
||||
'gotheaders', # bool: if we've finished reading headers
|
||||
'err', # bool: error has occurred so far
|
||||
);
|
||||
use Errno qw(EPIPE);
|
||||
use IO::File;
|
||||
|
||||
sub new {
|
||||
my Client $self = shift;
|
||||
$self = fields::new($self) unless ref $self;
|
||||
$self->SUPER::new( @_ );
|
||||
|
||||
$self->reset_for_next_message;
|
||||
return $self;
|
||||
}
|
||||
|
||||
sub reset_for_next_message {
|
||||
my Client $self = shift;
|
||||
$self->{line} = '';
|
||||
$self->{headers} = {};
|
||||
$self->{readcount} = 0;
|
||||
$self->{gotheaders} = 0;
|
||||
$self->{sendmail} = undef;
|
||||
$self->{err} = 0;
|
||||
return $self;
|
||||
}
|
||||
|
||||
# Client
|
||||
sub event_read {
|
||||
my Client $self = shift;
|
||||
my $bref = $self->read(8192);
|
||||
return $self->close() unless defined $bref;
|
||||
$self->process_read_buf($bref);
|
||||
}
|
||||
|
||||
sub process_read_buf {
|
||||
my Client $self = shift;
|
||||
my $bref = shift;
|
||||
|
||||
if (! $self->{gotheaders}) {
|
||||
$self->{line} .= $$bref;
|
||||
while ($self->{line} =~ s/^(.*?)\r?\n//) {
|
||||
my $line = $1;
|
||||
if ($line =~ /^(\S+)\s*:\s*(.+)/) {
|
||||
$self->{headers}{lc($1)} = $2;
|
||||
} elsif ($line eq "") {
|
||||
$self->{gotheaders} = 1;
|
||||
$self->{readcount} = 0;
|
||||
my $opts = "";
|
||||
my $h = $self->{headers};
|
||||
# pass the '-f' option to sendmail, if the given
|
||||
# Envelope-Sender header is clean
|
||||
if (my $es = $h->{'envelope-sender'}) {
|
||||
if ($es =~ /^[\w\-\+\.]+\@[\w\-\.]+$/) {
|
||||
$opts = "-f $es";
|
||||
}
|
||||
}
|
||||
unless ($self->{sendmail} =
|
||||
IO::File->new("| /usr/sbin/sendmail -t -i $opts")) {
|
||||
$self->{err} = 1;
|
||||
}
|
||||
$self->close unless $h->{'content-length'} > 0 &&
|
||||
$h->{'content-length'} =~ /^\d+$/;
|
||||
|
||||
$bref = \$self->{line};
|
||||
last;
|
||||
}
|
||||
}
|
||||
}
|
||||
return unless $self->{gotheaders};
|
||||
|
||||
my $need = $self->{headers}{'content-length'} - $self->{readcount};
|
||||
my $len = length($$bref);
|
||||
|
||||
# if we read into the next message (pipelined messages)
|
||||
# then we need to push the overflow piece back into $overflow
|
||||
my $overflow;
|
||||
if ($len > $need) {
|
||||
my $needed = substr($$bref, 0, $need);
|
||||
$overflow = substr($$bref, $need);
|
||||
$bref = \$needed;
|
||||
$len = $need;
|
||||
}
|
||||
|
||||
$self->{readcount} += $len;
|
||||
if ($self->{sendmail} && ! $self->{err}) {
|
||||
$self->{sendmail}->print($$bref);
|
||||
$self->{err} = 1 if $! == EPIPE;
|
||||
}
|
||||
|
||||
# if we're done, close sendmail
|
||||
if ($len == $need) {
|
||||
if (! $self->{err} &&
|
||||
$self->{sendmail} &&
|
||||
$self->{sendmail}->close()) {
|
||||
$self->write("OK\r\n");
|
||||
} else {
|
||||
$self->write("FAIL\r\n");
|
||||
}
|
||||
$self->reset_for_next_message;
|
||||
$self->process_read_buf(\$overflow) if defined $overflow;
|
||||
}
|
||||
|
||||
$self->watch_read(1);
|
||||
}
|
||||
|
||||
|
||||
# Client
|
||||
sub event_err { my $self = shift; $self->close; }
|
||||
sub event_hup { my $self = shift; $self->close; }
|
||||
|
||||
# Local Variables:
|
||||
# mode: perl
|
||||
# c-basic-indent: 4
|
||||
# indent-tabs-mode: nil
|
||||
# End:
|
||||
77
wcmtools/gearman/dev/DMap.pm
Executable file
77
wcmtools/gearman/dev/DMap.pm
Executable file
@@ -0,0 +1,77 @@
|
||||
#!/usr/bin/perl
|
||||
|
||||
package DMap;
|
||||
use strict;
|
||||
use Exporter;
|
||||
use Storable;
|
||||
use IO::Socket::INET;
|
||||
use Gearman::Util;
|
||||
use vars qw(@ISA @EXPORT);
|
||||
@ISA = qw(Exporter);
|
||||
@EXPORT = qw(dmap);
|
||||
|
||||
$Storable::Deparse = 1;
|
||||
$Storable::Eval = 1;
|
||||
|
||||
our @js;
|
||||
|
||||
sub set_job_servers {
|
||||
@js = @_;
|
||||
}
|
||||
|
||||
sub dmap (&@) {
|
||||
my $code = shift;
|
||||
my $fz = Storable::freeze($code);
|
||||
|
||||
my $sock;
|
||||
foreach (@js) {
|
||||
$_ .= ":7003" unless /:/;
|
||||
$sock = IO::Socket::INET->new(PeerAddr => $js[0]);
|
||||
last if $sock;
|
||||
}
|
||||
die "No jobserver available" unless $sock;
|
||||
|
||||
my $send = sub {
|
||||
print $sock Gearman::Util::pack_req_command(@_);
|
||||
};
|
||||
|
||||
my $err;
|
||||
my $get = sub {
|
||||
return Gearman::Util::read_res_packet($sock, \$err);;
|
||||
};
|
||||
|
||||
my $argc = scalar @_;
|
||||
ARG:
|
||||
foreach (@_) {
|
||||
$send->("submit_job", join("\0", "dmap", "", Storable::freeze([ $code, $_ ])));
|
||||
}
|
||||
|
||||
my $waiting = $argc;
|
||||
my %handle; # n -> handle
|
||||
my $hct = 0;
|
||||
my %partial_res;
|
||||
|
||||
while ($waiting) {
|
||||
my $res = $get->()
|
||||
or die "Failure: $err";
|
||||
|
||||
if ($res->{type} eq "job_created") {
|
||||
$handle{$hct} = ${$res->{blobref}};
|
||||
$hct++;
|
||||
next;
|
||||
}
|
||||
|
||||
if ($res->{type} eq "work_complete") {
|
||||
my $br = $res->{blobref};
|
||||
$$br =~ s/^(.+?)\0//;
|
||||
my $handle = $1;
|
||||
$partial_res{$handle} = Storable::thaw($$br);
|
||||
$waiting--;
|
||||
}
|
||||
}
|
||||
|
||||
return map { @{ $partial_res{$handle{$_}} } } (0..$argc-1);
|
||||
}
|
||||
|
||||
|
||||
1;
|
||||
40
wcmtools/gearman/dev/client.pl
Executable file
40
wcmtools/gearman/dev/client.pl
Executable file
@@ -0,0 +1,40 @@
|
||||
#!/usr/bin/perl
|
||||
use strict;
|
||||
use Gearman::Util;
|
||||
use IO::Socket::INET;
|
||||
use Data::Dumper;
|
||||
my $sock = IO::Socket::INET->new(PeerAddr => "localhost:7003")
|
||||
or die "no socket.";
|
||||
|
||||
my $send = sub {
|
||||
print $sock Gearman::Util::pack_req_command(@_);
|
||||
};
|
||||
|
||||
my $err;
|
||||
my $get = sub {
|
||||
return Gearman::Util::read_res_packet($sock, \$err);;
|
||||
};
|
||||
|
||||
#$send->("submit_job_bg", join("\0", "add", "", "5,3"));
|
||||
$send->("get_status", "FOO");
|
||||
my $res = $get->() or die "no handle";
|
||||
die "not a status_res packet" unless $res->{type} eq "status_res";
|
||||
|
||||
while (1) {
|
||||
$send->("submit_job", join("\0", "add", "-", "5,3"));
|
||||
$res = $get->() or die "no handle";
|
||||
print Dumper($res);
|
||||
die "not a job_created res" unless $res->{type} eq "job_created";
|
||||
|
||||
while ($res = $get->()) {
|
||||
print "New packet: " . Dumper($res);
|
||||
}
|
||||
print "Error: $err\n";
|
||||
|
||||
exit 0;
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
76
wcmtools/gearman/dev/dmap-worker.pl
Executable file
76
wcmtools/gearman/dev/dmap-worker.pl
Executable file
@@ -0,0 +1,76 @@
|
||||
#!/usr/bin/perl
|
||||
use strict;
|
||||
use Gearman::Util;
|
||||
use IO::Socket::INET;
|
||||
use Data::Dumper;
|
||||
use Storable;
|
||||
$Storable::Eval = 1;
|
||||
|
||||
my $server = shift;
|
||||
$server ||= "localhost";
|
||||
|
||||
my $sock = IO::Socket::INET->new(PeerAddr => "$server:7003")
|
||||
or die "no socket.";
|
||||
|
||||
my $send = sub {
|
||||
print $sock Gearman::Util::pack_req_command(@_);
|
||||
};
|
||||
|
||||
my $err;
|
||||
my $get = sub {
|
||||
my $res;
|
||||
while (1) {
|
||||
$res = Gearman::Util::read_res_packet($sock, \$err);
|
||||
return undef unless $res;
|
||||
return $res unless $res->{type} eq "noop";
|
||||
}
|
||||
};
|
||||
|
||||
$send->("can_do", "dmap");
|
||||
|
||||
while (1) {
|
||||
$send->("grab_job");
|
||||
|
||||
my $res = $get->();
|
||||
die "ERROR: $err\n" unless $res;
|
||||
print " res.type = $res->{type}\n";
|
||||
|
||||
if ($res->{type} eq "error") {
|
||||
print "ERROR: " . Dumper($res);
|
||||
exit 0;
|
||||
}
|
||||
|
||||
if ($res->{type} eq "no_job") {
|
||||
$send->("pre_sleep");
|
||||
|
||||
print "Sleeping.\n";
|
||||
my $rin;
|
||||
vec($rin, fileno($sock), 1) = 1;
|
||||
my $nfound = select($rin, undef, undef, 2.0);
|
||||
print " select returned = $nfound\n";
|
||||
next;
|
||||
}
|
||||
|
||||
if ($res->{type} eq "job_assign") {
|
||||
my $ar = $res->{blobref};
|
||||
die "uh, bogus res" unless
|
||||
$$ar =~ s/^(.+?)\0(.+?)\0//;
|
||||
my ($handle, $func) = ($1, $2);
|
||||
print "GOT JOB: $handle -- $func\n";
|
||||
|
||||
if ($func eq "dmap") {
|
||||
my $rq = Storable::thaw($$ar);
|
||||
my $code = $rq->[0];
|
||||
my @val = map { &$code; } $rq->[1];
|
||||
print "VALS: [@val]\n";
|
||||
$send->("work_complete", join("\0", $handle, Storable::freeze(\@val)));
|
||||
}
|
||||
next;
|
||||
}
|
||||
|
||||
print "RES: ", Dumper($res);
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
13
wcmtools/gearman/dev/dmap.pl
Executable file
13
wcmtools/gearman/dev/dmap.pl
Executable file
@@ -0,0 +1,13 @@
|
||||
#!/usr/bin/perl
|
||||
|
||||
use strict;
|
||||
use DMap;
|
||||
DMap::set_job_servers("localhost", "sammy", "kenny");
|
||||
|
||||
my @foo = dmap { "$_ = " . `hostname` } (1..10);
|
||||
|
||||
print "dmap says:\n @foo";
|
||||
|
||||
|
||||
|
||||
|
||||
23
wcmtools/gearman/dev/test-gear.pl
Executable file
23
wcmtools/gearman/dev/test-gear.pl
Executable file
@@ -0,0 +1,23 @@
|
||||
#!/usr/bin/perl
|
||||
use strict;
|
||||
use Gearman::Util;
|
||||
use IO::Socket::INET;
|
||||
my $sock = IO::Socket::INET->new(PeerAddr => "localhost:7003")
|
||||
or die "no socket.";
|
||||
|
||||
print $sock "gibberish_cmd\r\n";
|
||||
my $res = <$sock>;
|
||||
die "bogus response" unless $res =~ /^ERR unknown_command /;
|
||||
|
||||
my $cmd;
|
||||
|
||||
my $echo_val = "The time is " . time() . " \r\n and a null\0 is fun.";
|
||||
print $sock Gearman::Util::pack_req_command("echo_req", $echo_val);
|
||||
|
||||
my $err;
|
||||
my $res = Gearman::Util::read_res_packet($sock, \$err);
|
||||
use Data::Dumper;
|
||||
print "ERROR: $err\n";
|
||||
print Dumper($res);
|
||||
|
||||
|
||||
76
wcmtools/gearman/dev/worker.pl
Executable file
76
wcmtools/gearman/dev/worker.pl
Executable file
@@ -0,0 +1,76 @@
|
||||
#!/usr/bin/perl
|
||||
use strict;
|
||||
use Gearman::Util;
|
||||
use IO::Socket::INET;
|
||||
use Data::Dumper;
|
||||
my $sock = IO::Socket::INET->new(PeerAddr => "localhost:7003")
|
||||
or die "no socket.";
|
||||
|
||||
my $send = sub {
|
||||
print $sock Gearman::Util::pack_req_command(@_);
|
||||
};
|
||||
|
||||
my $err;
|
||||
my $get = sub {
|
||||
my $res;
|
||||
while (1) {
|
||||
$res = Gearman::Util::read_res_packet($sock, \$err);
|
||||
return undef unless $res;
|
||||
return $res unless $res->{type} eq "noop";
|
||||
}
|
||||
};
|
||||
|
||||
$send->("can_do", "frob");
|
||||
$send->("cant_do", "frob");
|
||||
$send->("can_do", "bar");
|
||||
$send->("reset_abilities");
|
||||
|
||||
$send->("can_do", "add");
|
||||
|
||||
while (1) {
|
||||
$send->("grab_job");
|
||||
|
||||
my $res = $get->();
|
||||
die "ERROR: $err\n" unless $res;
|
||||
print " res.type = $res->{type}\n";
|
||||
|
||||
if ($res->{type} eq "error") {
|
||||
print "ERROR: " . Dumper($res);
|
||||
exit 0;
|
||||
}
|
||||
|
||||
if ($res->{type} eq "no_job") {
|
||||
$send->("pre_sleep");
|
||||
|
||||
print "Sleeping.\n";
|
||||
my $rin;
|
||||
vec($rin, fileno($sock), 1) = 1;
|
||||
my $nfound = select($rin, undef, undef, 2.0);
|
||||
print " select returned = $nfound\n";
|
||||
next;
|
||||
}
|
||||
|
||||
if ($res->{type} eq "job_assign") {
|
||||
my $ar = $res->{blobref};
|
||||
die "uh, bogus res" unless
|
||||
$$ar =~ s/^(.+)\0(.+)\0//;
|
||||
my ($handle, $func) = ($1, $2);
|
||||
|
||||
print " GOT: handle=$handle, func=$func, args=($$ar)\n";
|
||||
if ($func eq "add") {
|
||||
for (1..10) {
|
||||
$send->("work_status", join("\0", $handle, $_, 10));
|
||||
select undef, undef, undef, 0.5;
|
||||
}
|
||||
my ($n1, $n2) = split(/,/, $$ar);
|
||||
$send->("work_complete", join("\0", $handle, $n1+$n2));
|
||||
}
|
||||
next;
|
||||
}
|
||||
|
||||
print "RES: ", Dumper($res);
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
225
wcmtools/gearman/doc/overview.txt
Executable file
225
wcmtools/gearman/doc/overview.txt
Executable file
@@ -0,0 +1,225 @@
|
||||
[ WARNING: EXTREMELY PRELIMINARY! ]
|
||||
|
||||
GearMan: A distributed job system
|
||||
Brad Whitaker <whitaker@danga.com>
|
||||
|
||||
==================================
|
||||
|
||||
TODO: error responses to malformed/unexpected packets?
|
||||
priorities, expirations of old/irrelevant jobs
|
||||
upper-layer handling of async system going down + reopulation of jobs
|
||||
|
||||
Architecture:
|
||||
|
||||
[ job_server_1 ] \
|
||||
[ job_server_2 ] | <====> application
|
||||
[ job_server_n ] /
|
||||
|
||||
\ | /
|
||||
-*- persistent tcp connections between job servers/workers
|
||||
/ | \
|
||||
|
||||
[ worker_1 ]
|
||||
[ worker_2 ]
|
||||
[ worker_n ]
|
||||
|
||||
|
||||
Guarantees:
|
||||
|
||||
1) Each job server will kill dups within that job server (not global)
|
||||
2) Jobs will be retried as specified, as long as the job server is running
|
||||
3) Each worker will have exactly one task at a time
|
||||
... ?
|
||||
|
||||
Non-Guarantees:
|
||||
|
||||
1) Global duplicate checking is not provided
|
||||
2) No job is guaranteed to complete
|
||||
3) Loss of any job server will lose any jobs registered with it
|
||||
... ?
|
||||
|
||||
|
||||
Work flow:
|
||||
|
||||
1) Job server starts up, all workers connect and announce their
|
||||
|
||||
2) Application sends job to random job server, noting the job record so it can
|
||||
be recreated in the future if necessary.
|
||||
|
||||
a) Synchronous: Application stays connected and waits for response
|
||||
b) Asynchronous:
|
||||
|
||||
3) Job server handles job:
|
||||
|
||||
Possible messages:
|
||||
|
||||
[worker => job server]
|
||||
"here's what i can do."
|
||||
"goodbye."
|
||||
"i'm about to sleep."
|
||||
"got a job for me?"
|
||||
"i'm 1/100 complete now."
|
||||
"i've completed my job."
|
||||
|
||||
[job server => worker]
|
||||
"noop." / "wake up."
|
||||
"here's a job to do."
|
||||
|
||||
[application => job server]
|
||||
"create this new job."
|
||||
"how far along is this job?"
|
||||
"is this job finished?"
|
||||
|
||||
[job server => application]
|
||||
"okay (here is its handle)."
|
||||
"job is 1/100 complete."
|
||||
"job completed as follows: ..."
|
||||
|
||||
|
||||
Request/Response cycles:
|
||||
|
||||
[ worker <=> job server ]
|
||||
"here's what i can do" => (announcement)
|
||||
"goodbye" => (announcement)
|
||||
"i'm about to sleep" => (announcement)
|
||||
"i'm 1/100 complete now" => (announcement)
|
||||
"i've completed my job" => (announcement)
|
||||
"got a job for me?" => "here's a job to do."
|
||||
|
||||
[ application <=> job server ]
|
||||
"create this new job." => "okay (here is its handle)."
|
||||
"how far along is this job?" => "job is 1/100 complete."
|
||||
"is this job finished?" => "job completed as follows: ..."
|
||||
|
||||
[ job server <=> worker ]
|
||||
"wake up." => (worker wakes up from sleep)
|
||||
"here is a job to do" => "i'm 1/100 complete now."
|
||||
=> "i've completed my job"
|
||||
|
||||
[ job server <=> application ]
|
||||
(only speaks in response to application requests)
|
||||
|
||||
|
||||
Best case conversation example:
|
||||
|
||||
worker_n => job_server_n: "got a job for me?"
|
||||
job_server_n => worker_n: "yes, here is a job i've locked for you"
|
||||
worker_n => job_server_n: "here is the result"
|
||||
|
||||
Worse case:
|
||||
|
||||
while ($time < $sleep_threshold) {
|
||||
for $js (1..n) {
|
||||
worker => job_server_$js: "got a job for me?"
|
||||
job_server_$js => worker: "no, sorry"
|
||||
}
|
||||
}
|
||||
|
||||
worker => all_job_servers: "going to sleep"
|
||||
|
||||
[ worker receives wakeup packet ] or [ t seconds elapse ]
|
||||
|
||||
worker wakes up and resumes loop
|
||||
|
||||
|
||||
Packet types:
|
||||
|
||||
Generic header:
|
||||
|
||||
[ 4 byte magic
|
||||
4 byte packet type
|
||||
4 byte length ]
|
||||
|
||||
Magic:
|
||||
4 opaque bytes to verify state machine "\0REQ" or "\0RES"
|
||||
|
||||
Packet type:
|
||||
(see Gearman::Util)
|
||||
|
||||
Length:
|
||||
Post-header data length
|
||||
|
||||
|
||||
Properties of a job:
|
||||
|
||||
func -- what function name
|
||||
opaque scalar arg (passed through end-to-end, no interpretation by libraries/server)
|
||||
uniq key -- for merging (default: don't merge, "-" means merge on opaque scalar)
|
||||
|
||||
retry count
|
||||
fail after time -- treat a timeout as a failure
|
||||
do job if dequeued and no listeners ("submit_job_bg")
|
||||
priority ("submit_job_high")
|
||||
on_* handlers
|
||||
|
||||
behavior when there's no worker registered for that job type?
|
||||
|
||||
|
||||
Notes:
|
||||
|
||||
-- document whether on_fail gets called on all failures, or just last one, when retry_count is in use
|
||||
-- document that uniq merging isn't guaranteed, just that it's permitted. if two tasks must not run
|
||||
at the same time, the task itself needs to do appropriate locking with itself / other tasks.
|
||||
-- the uniq merging will generally work in practice with multiple Job servers because the client
|
||||
hashes the (func + opaque_arg) onto the set of servers
|
||||
|
||||
|
||||
|
||||
Task summary:
|
||||
|
||||
1) mail
|
||||
name => mail
|
||||
dupkey => '' (don't check dups)
|
||||
type => async+handle
|
||||
args => storable MIME::Lite/etc
|
||||
|
||||
2) gal resize
|
||||
name => resize
|
||||
dupkey => uid-gallid-w-h
|
||||
type => async+handle
|
||||
args => storable of images to resize/how
|
||||
|
||||
3) thumb pregen
|
||||
name => thumbgen
|
||||
dupkey => uid-upicid-w-h
|
||||
type => async+handle
|
||||
args => storable of images to resize
|
||||
|
||||
4) LJ crons
|
||||
name => pay_updateaccounts/etc
|
||||
dupkey => '' (no dup checking)
|
||||
type => async
|
||||
args => @ARGV to pass to job?
|
||||
|
||||
6) Dirty Flushing
|
||||
name => dirty
|
||||
dupkey => friends-uid, backup-uid, etc
|
||||
type => async+handle
|
||||
args => none
|
||||
|
||||
7) CmdBuffer jobs
|
||||
name => weblogscom
|
||||
dupkey => uid
|
||||
type => async+throw-away
|
||||
args => none
|
||||
|
||||
8) RSS fetching
|
||||
name => rss
|
||||
dupkey => uid
|
||||
type => async+handle
|
||||
args => none
|
||||
|
||||
9) captcha generation
|
||||
name => captcha
|
||||
dupkey => dd-hh ? maybe '1' or something
|
||||
type => async+throw-away
|
||||
args => none
|
||||
|
||||
10) birthday emails
|
||||
name => bday
|
||||
dupkey => yyyy-mm-dd
|
||||
type => async+handle
|
||||
args => none
|
||||
|
||||
11) restart web nodes
|
||||
-- ask brad about this?
|
||||
303
wcmtools/gearman/lib/Gearman/Client.pm
Executable file
303
wcmtools/gearman/lib/Gearman/Client.pm
Executable file
@@ -0,0 +1,303 @@
|
||||
#!/usr/bin/perl
|
||||
|
||||
#TODO: fail_after_idle
|
||||
|
||||
package Gearman::Client;
|
||||
|
||||
use strict;
|
||||
use IO::Socket::INET;
|
||||
use Socket qw(IPPROTO_TCP TCP_NODELAY SOL_SOCKET);
|
||||
|
||||
use Gearman::Objects;
|
||||
use Gearman::Task;
|
||||
use Gearman::Taskset;
|
||||
use Gearman::JobStatus;
|
||||
|
||||
sub new {
|
||||
my ($class, %opts) = @_;
|
||||
my $self = $class;
|
||||
$self = fields::new($class) unless ref $self;
|
||||
|
||||
$self->{job_servers} = [];
|
||||
$self->{js_count} = 0;
|
||||
$self->{sock_cache} = {};
|
||||
|
||||
$self->job_servers(@{ $opts{job_servers} })
|
||||
if $opts{job_servers};
|
||||
|
||||
return $self;
|
||||
}
|
||||
|
||||
sub new_task_set {
|
||||
my Gearman::Client $self = shift;
|
||||
return Gearman::Taskset->new($self);
|
||||
}
|
||||
|
||||
# getter/setter
|
||||
sub job_servers {
|
||||
my Gearman::Client $self = shift;
|
||||
return $self->{job_servers} unless @_;
|
||||
my $list = [ @_ ];
|
||||
$self->{js_count} = scalar @$list;
|
||||
foreach (@$list) {
|
||||
$_ .= ":7003" unless /:/;
|
||||
}
|
||||
return $self->{job_servers} = $list;
|
||||
}
|
||||
|
||||
sub _get_task_from_args {
|
||||
my Gearman::Task $task;
|
||||
if (ref $_[0]) {
|
||||
$task = $_[0];
|
||||
Carp::croak("Argument isn't a Gearman::Task") unless ref $_[0] eq "Gearman::Task";
|
||||
} else {
|
||||
my ($func, $arg_p, $opts) = @_;
|
||||
my $argref = ref $arg_p ? $arg_p : \$arg_p;
|
||||
Carp::croak("Function argument must be scalar or scalarref")
|
||||
unless ref $argref eq "SCALAR";
|
||||
$task = Gearman::Task->new($func, $argref, $opts);
|
||||
}
|
||||
return $task;
|
||||
|
||||
}
|
||||
|
||||
# given a (func, arg_p, opts?), returns either undef (on fail) or scalarref of result
|
||||
sub do_task {
|
||||
my Gearman::Client $self = shift;
|
||||
my Gearman::Task $task = &_get_task_from_args;
|
||||
|
||||
my $ret = undef;
|
||||
my $did_err = 0;
|
||||
|
||||
$task->{on_complete} = sub {
|
||||
$ret = shift;
|
||||
};
|
||||
|
||||
$task->{on_fail} = sub {
|
||||
$did_err = 1;
|
||||
};
|
||||
|
||||
my $ts = $self->new_task_set;
|
||||
$ts->add_task($task);
|
||||
$ts->wait;
|
||||
|
||||
return $did_err ? undef : $ret;
|
||||
|
||||
}
|
||||
|
||||
# given a (func, arg_p, opts?) or
|
||||
# Gearman::Task, dispatches job in background. returns the handle from the jobserver, or false if any failure
|
||||
sub dispatch_background {
|
||||
my Gearman::Client $self = shift;
|
||||
my Gearman::Task $task = &_get_task_from_args;
|
||||
|
||||
my ($jst, $jss) = $self->_get_random_js_sock;
|
||||
return 0 unless $jss;
|
||||
|
||||
my $req = $task->pack_submit_packet("background");
|
||||
my $len = length($req);
|
||||
my $rv = $jss->write($req, $len);
|
||||
|
||||
my $err;
|
||||
my $res = Gearman::Util::read_res_packet($jss, \$err);
|
||||
return 0 unless $res && $res->{type} eq "job_created";
|
||||
return "$jst//${$res->{blobref}}";
|
||||
}
|
||||
|
||||
sub get_status {
|
||||
my Gearman::Client $self = shift;
|
||||
my $handle = shift;
|
||||
my ($hostport, $shandle) = split(m!//!, $handle);
|
||||
return undef unless grep { $hostport eq $_ } @{ $self->{job_servers} };
|
||||
|
||||
my $sock = $self->_get_js_sock($hostport)
|
||||
or return undef;
|
||||
|
||||
my $req = Gearman::Util::pack_req_command("get_status",
|
||||
$shandle);
|
||||
my $len = length($req);
|
||||
my $rv = $sock->write($req, $len);
|
||||
|
||||
my $err;
|
||||
my $res = Gearman::Util::read_res_packet($sock, \$err);
|
||||
return undef unless $res && $res->{type} eq "status_res";
|
||||
my @args = split(/\0/, ${ $res->{blobref} });
|
||||
return undef unless $args[0];
|
||||
shift @args;
|
||||
$self->_put_js_sock($hostport, $sock);
|
||||
return Gearman::JobStatus->new(@args);
|
||||
}
|
||||
|
||||
# returns a socket from the cache. it should be returned to the
|
||||
# cache with _put_js_sock. the hostport isn't verified. the caller
|
||||
# should verify that $hostport is in the set of jobservers.
|
||||
sub _get_js_sock {
|
||||
my Gearman::Client $self = shift;
|
||||
my $hostport = shift;
|
||||
|
||||
if (my $sock = delete $self->{sock_cache}{$hostport}) {
|
||||
return $sock if $sock->connected;
|
||||
}
|
||||
|
||||
my $sock = IO::Socket::INET->new(PeerAddr => $hostport,
|
||||
Timeout => 1)
|
||||
or return undef;
|
||||
|
||||
setsockopt($sock, IPPROTO_TCP, TCP_NODELAY, pack("l", 1)) or die;
|
||||
$sock->autoflush(1);
|
||||
return $sock;
|
||||
}
|
||||
|
||||
# way for a caller to give back a socket it previously requested.
|
||||
# the $hostport isn't verified, so the caller should verify the
|
||||
# $hostport is still in the set of jobservers.
|
||||
sub _put_js_sock {
|
||||
my Gearman::Client $self = shift;
|
||||
my ($hostport, $sock) = @_;
|
||||
|
||||
$self->{sock_cache}{$hostport} ||= $sock;
|
||||
}
|
||||
|
||||
sub _get_random_js_sock {
|
||||
my Gearman::Client $self = shift;
|
||||
my $getter = shift;
|
||||
return undef unless $self->{js_count};
|
||||
|
||||
$getter ||= sub { my $hostport = shift; return $self->_get_js_sock($hostport); };
|
||||
|
||||
my $ridx = int(rand($self->{js_count}));
|
||||
for (my $try = 0; $try < $self->{js_count}; $try++) {
|
||||
my $aidx = ($ridx + $try) % $self->{js_count};
|
||||
my $hostport = $self->{job_servers}[$aidx];
|
||||
my $sock = $getter->($hostport) or next;
|
||||
return ($hostport, $sock);
|
||||
}
|
||||
return ();
|
||||
}
|
||||
|
||||
1;
|
||||
__END__
|
||||
|
||||
=head1 NAME
|
||||
|
||||
Gearman::Client - Client for gearman distributed job system
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
use Gearman::Client;
|
||||
my $client = Gearman::Client->new;
|
||||
$client->job_servers('127.0.0.1', '10.0.0.1');
|
||||
|
||||
# running a single task
|
||||
my $result_ref = $client->do_task("add", "1+2");
|
||||
print "1 + 2 = $$result_ref\n";
|
||||
|
||||
# waiting on a set of tasks in parallel
|
||||
my $taskset = $client->new_task_set;
|
||||
$taskset->add_task( "add" => "1+2", {
|
||||
on_complete => sub { ... }
|
||||
});
|
||||
$taskset->add_task( "divide" => "5/0", {
|
||||
on_fail => sub { print "divide by zero error!\n"; },
|
||||
});
|
||||
$taskset->wait;
|
||||
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
I<Gearman::Client> is a client class for the Gearman distributed job
|
||||
system, providing a framework for sending jobs to one or more Gearman
|
||||
servers. These jobs are then distributed out to a farm of workers.
|
||||
|
||||
Callers instantiate a I<Gearman::Client> object and from it dispatch
|
||||
single tasks, sets of tasks, or check on the status of tasks.
|
||||
|
||||
=head1 USAGE
|
||||
|
||||
=head2 Gearman::Client->new(\%options)
|
||||
|
||||
Creates a new I<Gearman::Client> object, and returns the object.
|
||||
|
||||
If I<%options> is provided, initializes the new client object with the
|
||||
settings in I<%options>, which can contain:
|
||||
|
||||
=over 4
|
||||
|
||||
=item * job_servers
|
||||
|
||||
Calls I<job_servers> (see below) to initialize the list of job
|
||||
servers. Value in this case should be an arrayref.
|
||||
|
||||
=back
|
||||
|
||||
=head2 $client->job_servers(@servers)
|
||||
|
||||
Initializes the client I<$client> with the list of job servers in I<@servers>.
|
||||
I<@servers> should contain a list of IP addresses, with optional port
|
||||
numbers. For example:
|
||||
|
||||
$client->job_servers('127.0.0.1', '192.168.1.100:7003');
|
||||
|
||||
If the port number is not provided, C<7003> is used as the default.
|
||||
|
||||
=head2 $client-E<gt>do_task($task)
|
||||
|
||||
=head2 $client-E<gt>do_task($funcname, $arg, \%options)
|
||||
|
||||
Dispatches a task and waits on the results. May either provide a
|
||||
L<Gearman::Task> object, or the 3 arguments that the Gearman::Task
|
||||
constructor takes.
|
||||
|
||||
Returns a scalar reference to the result, or undef on failure.
|
||||
|
||||
If you provide on_complete and on_fail handlers, they're ignored, as
|
||||
this function currently overrides them.
|
||||
|
||||
=head2 $client-E<gt>dispatch_background($task)
|
||||
|
||||
=head2 $client-E<gt>dispatch_background($funcname, $arg, \%options)
|
||||
|
||||
Dispatches a task and doesn't wait for the result. Return value
|
||||
is
|
||||
|
||||
=head2 $taskset = $client-E<gt>new_task_set
|
||||
|
||||
Creates and returns a new I<Gearman::Taskset> object.
|
||||
|
||||
=head2 $taskset-E<gt>add_task($task)
|
||||
|
||||
=head2 $taskset-E<gt>add_task($funcname, $arg, $uniq)
|
||||
|
||||
=head2 $taskset-E<gt>add_task($funcname, $arg, \%options)
|
||||
|
||||
Adds a task to a taskset. Three different calling conventions are
|
||||
available.
|
||||
|
||||
=head2 $taskset-E<gt>wait
|
||||
|
||||
Waits for a response from the job server for any of the tasks listed
|
||||
in the taskset. Will call the I<on_*> handlers for each of the tasks
|
||||
that have been completed, updated, etc. Doesn't return until
|
||||
everything has finished running or failing.
|
||||
|
||||
=head1 EXAMPLES
|
||||
|
||||
=head2 Summation
|
||||
|
||||
This is an example client that sends off a request to sum up a list of
|
||||
integers.
|
||||
|
||||
use Gearman::Client;
|
||||
use Storable qw( freeze );
|
||||
my $client = Gearman::Client->new;
|
||||
$client->job_servers('127.0.0.1');
|
||||
my $tasks = $client->new_task_set;
|
||||
my $handle = $tasks->add_task(sum => freeze([ 3, 5 ]), {
|
||||
on_complete => sub { print ${ $_[0] }, "\n" }
|
||||
});
|
||||
$tasks->wait;
|
||||
|
||||
See the I<Gearman::Worker> documentation for the worker for the I<sum>
|
||||
function.
|
||||
|
||||
=cut
|
||||
20
wcmtools/gearman/lib/Gearman/JobStatus.pm
Executable file
20
wcmtools/gearman/lib/Gearman/JobStatus.pm
Executable file
@@ -0,0 +1,20 @@
|
||||
|
||||
package Gearman::JobStatus;
|
||||
|
||||
use strict;
|
||||
|
||||
sub new {
|
||||
my ($class, $known, $running, $nu, $de) = @_;
|
||||
undef $nu unless length($nu);
|
||||
undef $de unless length($de);
|
||||
my $self = [ $known, $running, $nu, $de ];
|
||||
bless $self;
|
||||
return $self;
|
||||
}
|
||||
|
||||
sub known { my $self = shift; return $self->[0]; }
|
||||
sub running { my $self = shift; return $self->[1]; }
|
||||
sub progress { my $self = shift; return defined $self->[2] ? [ $self->[2], $self->[3] ] : undef; }
|
||||
sub percent { my $self = shift; return (defined $self->[2] && $self->[3]) ? ($self->[2] / $self->[3]) : undef; }
|
||||
|
||||
1;
|
||||
52
wcmtools/gearman/lib/Gearman/Objects.pm
Executable file
52
wcmtools/gearman/lib/Gearman/Objects.pm
Executable file
@@ -0,0 +1,52 @@
|
||||
use strict;
|
||||
|
||||
package Gearman::Client;
|
||||
use fields (
|
||||
'job_servers',
|
||||
'js_count',
|
||||
'sock_cache', # hostport -> socket
|
||||
);
|
||||
|
||||
package Gearman::Taskset;
|
||||
|
||||
use fields (
|
||||
'waiting', # { handle => [Task, ...] }
|
||||
'client', # Gearman::Client
|
||||
'need_handle', # arrayref
|
||||
|
||||
'default_sock', # default socket (non-merged requests)
|
||||
'default_sockaddr', # default socket's ip/port
|
||||
|
||||
'loaned_sock', # { hostport => socket }
|
||||
|
||||
);
|
||||
|
||||
|
||||
package Gearman::Task;
|
||||
|
||||
use fields (
|
||||
# from client:
|
||||
'func',
|
||||
'argref',
|
||||
# opts from client:
|
||||
'uniq',
|
||||
'on_complete',
|
||||
'on_fail',
|
||||
'on_retry',
|
||||
'on_status',
|
||||
'retry_count',
|
||||
'fail_after_idle',
|
||||
'high_priority',
|
||||
|
||||
# from server:
|
||||
'handle',
|
||||
|
||||
# maintained by this module:
|
||||
'retries_done',
|
||||
'taskset',
|
||||
'jssock', # jobserver socket. shared by other tasks in the same taskset,
|
||||
# but not w/ tasks in other tasksets using the same Gearman::Client
|
||||
);
|
||||
|
||||
|
||||
1;
|
||||
210
wcmtools/gearman/lib/Gearman/Task.pm
Executable file
210
wcmtools/gearman/lib/Gearman/Task.pm
Executable file
@@ -0,0 +1,210 @@
|
||||
package Gearman::Task;
|
||||
|
||||
use strict;
|
||||
use Carp ();
|
||||
use String::CRC32 ();
|
||||
|
||||
# constructor, given: ($func, $argref, $opts);
|
||||
sub new {
|
||||
my $class = shift;
|
||||
|
||||
my $self = $class;
|
||||
$self = fields::new($class) unless ref $self;
|
||||
|
||||
$self->{func} = shift
|
||||
or Carp::croak("No function given");
|
||||
|
||||
$self->{argref} = shift || do { my $empty = ""; \$empty; };
|
||||
Carp::croak("Argref not a scalar reference") unless ref $self->{argref} eq "SCALAR";
|
||||
|
||||
my $opts = shift || {};
|
||||
for my $k (qw( uniq
|
||||
on_complete on_fail on_retry on_status
|
||||
retry_count fail_after_idle high_priority
|
||||
)) {
|
||||
$self->{$k} = delete $opts->{$k};
|
||||
}
|
||||
|
||||
if (%{$opts}) {
|
||||
Carp::croak("Unknown option(s): " . join(", ", sort keys %$opts));
|
||||
}
|
||||
|
||||
$self->{retries_done} = 0;
|
||||
|
||||
return $self;
|
||||
}
|
||||
|
||||
sub taskset {
|
||||
my Gearman::Task $task = shift;
|
||||
|
||||
# getter
|
||||
return $task->{taskset} unless @_;
|
||||
|
||||
# setter
|
||||
my Gearman::Taskset $ts = shift;
|
||||
$task->{taskset} = $ts;
|
||||
|
||||
my $merge_on = $task->{uniq} && $task->{uniq} eq "-" ?
|
||||
$task->{argref} : \ $task->{uniq};
|
||||
if ($$merge_on) {
|
||||
my $hash_num = _hashfunc($merge_on);
|
||||
$task->{jssock} = $ts->_get_hashed_sock($hash_num);
|
||||
} else {
|
||||
$task->{jssock} = $ts->_get_default_sock;
|
||||
}
|
||||
|
||||
return $task->{taskset};
|
||||
}
|
||||
|
||||
# returns number in range [0,32767] given a scalarref
|
||||
sub _hashfunc {
|
||||
return (String::CRC32::crc32(${ shift() }) >> 16) & 0x7fff;
|
||||
}
|
||||
|
||||
sub pack_submit_packet {
|
||||
my Gearman::Task $task = shift;
|
||||
my $is_background = shift;
|
||||
|
||||
my $mode = $is_background ?
|
||||
"submit_job_bg" :
|
||||
($task->{high_priority} ?
|
||||
"submit_job_high" :
|
||||
"submit_job");
|
||||
|
||||
return Gearman::Util::pack_req_command($mode,
|
||||
join("\0", $task->{func}, $task->{uniq}, ${ $task->{argref} }));
|
||||
}
|
||||
|
||||
sub fail {
|
||||
my Gearman::Task $task = shift;
|
||||
|
||||
# try to retry, if we can
|
||||
if ($task->{retries_done} < $task->{retry_count}) {
|
||||
$task->{retries_done}++;
|
||||
$task->{on_retry}->($task->{retries_done}) if $task->{on_retry};
|
||||
$task->handle(undef);
|
||||
return $task->{taskset}->add_task($task);
|
||||
}
|
||||
|
||||
return undef unless $task->{on_fail};
|
||||
$task->{on_fail}->();
|
||||
return undef;
|
||||
}
|
||||
|
||||
sub complete {
|
||||
my Gearman::Task $task = shift;
|
||||
return unless $task->{on_complete};
|
||||
my $result_ref = shift;
|
||||
$task->{on_complete}->($result_ref);
|
||||
}
|
||||
|
||||
sub status {
|
||||
my Gearman::Task $task = shift;
|
||||
return unless $task->{on_status};
|
||||
my ($nu, $de) = @_;
|
||||
$task->{on_status}->($nu, $de);
|
||||
}
|
||||
|
||||
# getter/setter for the fully-qualified handle of form "IP:port//shandle" where
|
||||
# shandle is an opaque handle specific to the job server running on IP:port
|
||||
sub handle {
|
||||
my Gearman::Task $task = shift;
|
||||
return $task->{handle} unless @_;
|
||||
return $task->{handle} = shift;
|
||||
}
|
||||
|
||||
|
||||
1;
|
||||
__END__
|
||||
|
||||
=head1 NAME
|
||||
|
||||
Gearman::Task - a task in Gearman, from the point of view of a client
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
my $task = Gearman::Task->new("add", "1+2", {
|
||||
.....
|
||||
|
||||
};
|
||||
|
||||
$taskset->add_task($task);
|
||||
$client->do_task($task);
|
||||
$client->dispatch_background($task);
|
||||
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
I<Gearman::Task> is a Gearman::Client's representation of a task to be
|
||||
done.
|
||||
|
||||
=head1 USAGE
|
||||
|
||||
=head2 Gearman::Task->new($func, $arg, \%options)
|
||||
|
||||
Creates a new I<Gearman::Task> object, and returns the object.
|
||||
|
||||
I<$func> is the function name to be run. (that you have a worker registered to process)
|
||||
|
||||
I<$arg> is an opaque scalar or scalarref representing the argument(s)
|
||||
to pass to the distributed function. If you want to pass multiple
|
||||
arguments, you must encode them somehow into this one. That's up to
|
||||
you and your worker.
|
||||
|
||||
I<%options> can contain:
|
||||
|
||||
=over 4
|
||||
|
||||
=item * uniq
|
||||
|
||||
A key which indicates to the server that other tasks with the same
|
||||
function name and key will be merged into one. That is, the task
|
||||
will be run just once, but all the listeners waiting on that job
|
||||
will get the response multiplexed back to them.
|
||||
|
||||
Uniq may also contain the magic value "-" (a single hyphen) which
|
||||
means the uniq key is the contents of the args.
|
||||
|
||||
=item * on_complete
|
||||
|
||||
A subroutine reference to be invoked when the task is completed. The
|
||||
subroutine will be passed a reference to the return value from the worker
|
||||
process.
|
||||
|
||||
=item * on_fail
|
||||
|
||||
A subroutine reference to be invoked when the task fails (or fails for
|
||||
the last time, if retries were specified). No arguments are
|
||||
passed to this callback. This callback won't be called after a failure
|
||||
if more retries are still possible.
|
||||
|
||||
=item * on_retry
|
||||
|
||||
A subroutine reference to be invoked when the task fails, but is about
|
||||
to be retried.
|
||||
|
||||
Is passed one argument, what retry attempt number this is. (starts with 1)
|
||||
|
||||
=item * on_status
|
||||
|
||||
A subroutine reference to be invoked if the task emits status updates.
|
||||
Arguments passed to the subref are ($numerator, $denominator), where those
|
||||
are left up to the client and job to determine.
|
||||
|
||||
=item * retry_count
|
||||
|
||||
Number of times job will be retried if there are failures. Defaults to 0.
|
||||
|
||||
=item * high_priority
|
||||
|
||||
Boolean, whether this job should take priority over other jobs already
|
||||
enqueued.
|
||||
|
||||
=item * fail_after_idle
|
||||
|
||||
Automatically fail after this many seconds have elapsed. Defaults to 0,
|
||||
which means never.
|
||||
|
||||
=back
|
||||
|
||||
=cut
|
||||
269
wcmtools/gearman/lib/Gearman/Taskset.pm
Executable file
269
wcmtools/gearman/lib/Gearman/Taskset.pm
Executable file
@@ -0,0 +1,269 @@
|
||||
package Gearman::Taskset;
|
||||
|
||||
use strict;
|
||||
use Carp ();
|
||||
use Gearman::Util;
|
||||
|
||||
sub new {
|
||||
my $class = shift;
|
||||
my Gearman::Client $client = shift;
|
||||
|
||||
my $self = $class;
|
||||
$self = fields::new($class) unless ref $self;
|
||||
|
||||
$self->{waiting} = {};
|
||||
$self->{need_handle} = [];
|
||||
$self->{client} = $client;
|
||||
$self->{loaned_sock} = {};
|
||||
|
||||
return $self;
|
||||
}
|
||||
|
||||
sub DESTROY {
|
||||
my Gearman::Taskset $ts = shift;
|
||||
|
||||
if ($ts->{default_sock}) {
|
||||
$ts->{client}->_put_js_sock($ts->{default_sockaddr}, $ts->{default_sock});
|
||||
}
|
||||
|
||||
while (my ($hp, $sock) = each %{ $ts->{loaned_sock} }) {
|
||||
$ts->{client}->_put_js_sock($hp, $sock);
|
||||
}
|
||||
}
|
||||
|
||||
sub _get_loaned_sock {
|
||||
my Gearman::Taskset $ts = shift;
|
||||
my $hostport = shift;
|
||||
if (my $sock = $ts->{loaned_sock}{$hostport}) {
|
||||
return $sock if $sock->connected;
|
||||
delete $ts->{loaned_sock}{$hostport};
|
||||
}
|
||||
|
||||
my $sock = $ts->{client}->_get_js_sock($hostport);
|
||||
return $ts->{loaned_sock}{$hostport} = $sock;
|
||||
}
|
||||
|
||||
sub wait {
|
||||
my Gearman::Taskset $ts = shift;
|
||||
|
||||
while (keys %{$ts->{waiting}}) {
|
||||
$ts->_wait_for_packet();
|
||||
# TODO: timeout jobs that have been running too long. the _wait_for_packet
|
||||
# loop only waits 0.5 seconds.
|
||||
}
|
||||
}
|
||||
|
||||
# ->add_task($func, <$scalar | $scalarref>, <$uniq | $opts_hashref>
|
||||
# opts:
|
||||
# -- uniq
|
||||
# -- on_complete
|
||||
# -- on_fail
|
||||
# -- on_status
|
||||
# -- retry_count
|
||||
# -- fail_after_idle
|
||||
# -- high_priority
|
||||
# ->add_task(Gearman::Task)
|
||||
#
|
||||
|
||||
sub add_task {
|
||||
my Gearman::Taskset $ts = shift;
|
||||
my $task;
|
||||
|
||||
if (ref $_[0]) {
|
||||
$task = shift;
|
||||
} else {
|
||||
my $func = shift;
|
||||
my $arg_p = shift; # scalar or scalarref
|
||||
my $opts = shift; # $uniq or hashref of opts
|
||||
|
||||
my $argref = ref $arg_p ? $arg_p : \$arg_p;
|
||||
unless (ref $opts eq "HASH") {
|
||||
$opts = { uniq => $opts };
|
||||
}
|
||||
|
||||
$task = Gearman::Task->new($func, $argref, $opts);
|
||||
}
|
||||
$task->taskset($ts);
|
||||
|
||||
my $req = $task->pack_submit_packet;
|
||||
my $len = length($req);
|
||||
my $rv = $task->{jssock}->syswrite($req, $len);
|
||||
die "Wrote $rv but expected to write $len" unless $rv == $len;
|
||||
|
||||
push @{ $ts->{need_handle} }, $task;
|
||||
while (@{ $ts->{need_handle} }) {
|
||||
my $rv = $ts->_wait_for_packet($task->{jssock});
|
||||
if (! $rv) {
|
||||
shift @{ $ts->{need_handle} }; # ditch it, it failed.
|
||||
# this will resubmit it if it failed.
|
||||
print " INITIAL SUBMIT FAILED\n";
|
||||
return $task->fail;
|
||||
}
|
||||
}
|
||||
|
||||
return $task->handle;
|
||||
}
|
||||
|
||||
sub _get_default_sock {
|
||||
my Gearman::Taskset $ts = shift;
|
||||
return $ts->{default_sock} if $ts->{default_sock};
|
||||
|
||||
my $getter = sub {
|
||||
my $hostport = shift;
|
||||
return
|
||||
$ts->{loaned_sock}{$hostport} ||
|
||||
$ts->{client}->_get_js_sock($hostport);
|
||||
};
|
||||
|
||||
my ($jst, $jss) = $ts->{client}->_get_random_js_sock($getter);
|
||||
$ts->{loaned_sock}{$jst} ||= $jss;
|
||||
|
||||
$ts->{default_sock} = $jss;
|
||||
$ts->{default_sockaddr} = $jst;
|
||||
return $jss;
|
||||
}
|
||||
|
||||
sub _get_hashed_sock {
|
||||
my Gearman::Taskset $ts = shift;
|
||||
my $hv = shift;
|
||||
|
||||
my Gearman::Client $cl = $ts->{client};
|
||||
|
||||
for (my $off = 0; $off < $cl->{js_count}; $off++) {
|
||||
my $idx = ($hv + $off) % ($cl->{js_count});
|
||||
my $sock = $ts->_get_loaned_sock($cl->{job_servers}[$idx]);
|
||||
return $sock if $sock;
|
||||
}
|
||||
|
||||
return undef;
|
||||
}
|
||||
|
||||
# returns boolean when given a sock to wait on.
|
||||
# otherwise, return value is undefined.
|
||||
sub _wait_for_packet {
|
||||
my Gearman::Taskset $ts = shift;
|
||||
my $sock = shift; # optional socket to singularly read from
|
||||
|
||||
my ($res, $err);
|
||||
if ($sock) {
|
||||
$res = Gearman::Util::read_res_packet($sock, \$err);
|
||||
return 0 unless $res;
|
||||
return $ts->_process_packet($res, $sock);
|
||||
} else {
|
||||
# TODO: cache this vector?
|
||||
my ($rin, $rout, $eout);
|
||||
my %watching;
|
||||
|
||||
for my $sock ($ts->{default_sock}, values %{ $ts->{loaned_sock} }) {
|
||||
next unless $sock;
|
||||
my $fd = $sock->fileno;
|
||||
|
||||
vec($rin, $fd, 1) = 1;
|
||||
$watching{$fd} = $sock;
|
||||
}
|
||||
|
||||
my $nfound = select($rout=$rin, undef, $eout=$rin, 0.5);
|
||||
return 0 if ! $nfound;
|
||||
|
||||
foreach my $fd (keys %watching) {
|
||||
next unless vec($rout, $fd, 1);
|
||||
# TODO: deal with error vector
|
||||
my $sock = $watching{$fd};
|
||||
$res = Gearman::Util::read_res_packet($sock, \$err);
|
||||
$ts->_process_packet($res, $sock) if $res;
|
||||
}
|
||||
return 1;
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
sub _ip_port {
|
||||
my $sock = shift;
|
||||
return undef unless $sock;
|
||||
my $pn = getpeername($sock) or return undef;
|
||||
my ($port, $iaddr) = Socket::sockaddr_in($pn);
|
||||
return Socket::inet_ntoa($iaddr) . ":$port";
|
||||
}
|
||||
|
||||
# note the failure of a task given by its jobserver-specific handle
|
||||
sub _fail_jshandle {
|
||||
my Gearman::Taskset $ts = shift;
|
||||
my $shandle = shift;
|
||||
|
||||
my $task_list = $ts->{waiting}{$shandle} or
|
||||
die "Uhhhh: got work_fail for unknown handle: $shandle\n";
|
||||
|
||||
my Gearman::Task $task = shift @$task_list or
|
||||
die "Uhhhh: task_list is empty on work_fail for handle $shandle\n";
|
||||
|
||||
$task->fail;
|
||||
delete $ts->{waiting}{$shandle} unless @$task_list;
|
||||
}
|
||||
|
||||
sub _process_packet {
|
||||
my Gearman::Taskset $ts = shift;
|
||||
my ($res, $sock) = @_;
|
||||
|
||||
if ($res->{type} eq "job_created") {
|
||||
my Gearman::Task $task = shift @{ $ts->{need_handle} } or
|
||||
die "Um, got an unexpected job_created notification";
|
||||
|
||||
my $shandle = ${ $res->{'blobref'} };
|
||||
my $ipport = _ip_port($sock);
|
||||
|
||||
# did sock become disconnected in the meantime?
|
||||
if (! $ipport) {
|
||||
$ts->_fail_jshandle($shandle);
|
||||
return 1;
|
||||
}
|
||||
|
||||
$task->handle("$ipport//$shandle");
|
||||
push @{ $ts->{waiting}{$shandle} ||= [] }, $task;
|
||||
return 1;
|
||||
}
|
||||
|
||||
if ($res->{type} eq "work_fail") {
|
||||
my $shandle = ${ $res->{'blobref'} };
|
||||
$ts->_fail_jshandle($shandle);
|
||||
return 1;
|
||||
}
|
||||
|
||||
if ($res->{type} eq "work_complete") {
|
||||
${ $res->{'blobref'} } =~ s/^(.+?)\0//
|
||||
or die "Bogus work_complete from server";
|
||||
my $shandle = $1;
|
||||
|
||||
my $task_list = $ts->{waiting}{$shandle} or
|
||||
die "Uhhhh: got work_complete for unknown handle: $shandle\n";
|
||||
|
||||
my Gearman::Task $task = shift @$task_list or
|
||||
die "Uhhhh: task_list is empty on work_complete for handle $shandle\n";
|
||||
|
||||
$task->complete($res->{'blobref'});
|
||||
delete $ts->{waiting}{$shandle} unless @$task_list;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
if ($res->{type} eq "work_status") {
|
||||
my ($shandle, $nu, $de) = split(/\0/, ${ $res->{'blobref'} });
|
||||
|
||||
my $task_list = $ts->{waiting}{$shandle} or
|
||||
die "Uhhhh: got work_status for unknown handle: $shandle\n";
|
||||
|
||||
# FIXME: the server is (probably) sending a work_status packet for each
|
||||
# interested client, even if the clients are the same, so probably need
|
||||
# to fix the server not to do that. just put this FIXME here for now,
|
||||
# though really it's a server issue.
|
||||
foreach my Gearman::Task $task (@$task_list) {
|
||||
$task->status($nu, $de);
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
die "Unknown/unimplemented packet type: $res->{type}";
|
||||
|
||||
}
|
||||
|
||||
1;
|
||||
128
wcmtools/gearman/lib/Gearman/Util.pm
Executable file
128
wcmtools/gearman/lib/Gearman/Util.pm
Executable file
@@ -0,0 +1,128 @@
|
||||
|
||||
package Gearman::Util;
|
||||
use strict;
|
||||
|
||||
# I: to jobserver
|
||||
# O: out of job server
|
||||
# W: worker
|
||||
# C: client of job server
|
||||
# J : jobserver
|
||||
our %cmd = (
|
||||
1 => [ 'I', "can_do" ], # from W: [FUNC]
|
||||
2 => [ 'I', "cant_do" ], # from W: [FUNC]
|
||||
3 => [ 'I', "reset_abilities" ], # from W: ---
|
||||
22 => [ 'I', "set_client_id" ], # W->J: [RANDOM_STRING_NO_WHITESPACE]
|
||||
4 => [ 'I', "pre_sleep" ], # from W: ---
|
||||
|
||||
6 => [ 'O', "noop" ], # J->W ---
|
||||
7 => [ 'I', "submit_job" ], # C->J FUNC[0]UNIQ[0]ARGS
|
||||
21 => [ 'I', "submit_job_high" ], # C->J FUNC[0]UNIQ[0]ARGS
|
||||
18 => [ 'I', "submit_job_bg" ], # C->J " " " " "
|
||||
|
||||
8 => [ 'O', "job_created" ], # J->C HANDLE
|
||||
9 => [ 'I', "grab_job" ], # W->J --
|
||||
10 => [ 'O', "no_job" ], # J->W --
|
||||
11 => [ 'O', "job_assign" ], # J->W HANDLE[0]FUNC[0]ARG
|
||||
|
||||
12 => [ 'IO', "work_status" ], # W->J/C: HANDLE[0]NUMERATOR[0]DENOMINATOR
|
||||
13 => [ 'IO', "work_complete" ], # W->J/C: HANDLE[0]RES
|
||||
14 => [ 'IO', "work_fail" ], # W->J/C: HANDLE
|
||||
|
||||
15 => [ 'I', "get_status" ], # C->J: HANDLE
|
||||
20 => [ 'O', "status_res" ], # C->J: HANDLE[0]KNOWN[0]RUNNING[0]NUM[0]DENOM
|
||||
|
||||
16 => [ 'I', "echo_req" ], # ?->J TEXT
|
||||
17 => [ 'O', "echo_res" ], # J->? TEXT
|
||||
|
||||
19 => [ 'O', "error" ], # J->? ERRCODE[0]ERR_TEXT
|
||||
);
|
||||
|
||||
our %num; # name -> num
|
||||
while (my ($num, $ary) = each %cmd) {
|
||||
die if $num{$ary->[1]};
|
||||
$num{$ary->[1]} = $num;
|
||||
}
|
||||
|
||||
sub cmd_name {
|
||||
my $num = shift;
|
||||
my $c = $cmd{$num};
|
||||
return $c ? $c->[1] : undef;
|
||||
}
|
||||
|
||||
sub pack_req_command {
|
||||
my $type_arg = shift;
|
||||
my $type = $num{$type_arg} || $type_arg;
|
||||
die "Bogus type arg of '$type_arg'" unless $type;
|
||||
my $arg = $_[0] || '';
|
||||
my $len = length($arg);
|
||||
return "\0REQ" . pack("NN", $type, $len) . $arg;
|
||||
}
|
||||
|
||||
sub pack_res_command {
|
||||
my $type_arg = shift;
|
||||
my $type = int($type_arg) || $num{$type_arg};
|
||||
die "Bogus type arg of '$type_arg'" unless $type;
|
||||
my $len = length($_[0]);
|
||||
return "\0RES" . pack("NN", $type, $len) . $_[0];
|
||||
}
|
||||
|
||||
# returns undef on closed socket or malformed packet
|
||||
sub read_res_packet {
|
||||
my $sock = shift;
|
||||
my $err_ref = shift;
|
||||
|
||||
my $buf;
|
||||
my $rv;
|
||||
|
||||
my $err = sub {
|
||||
my $code = shift;
|
||||
$$err_ref = $code if ref $err_ref;
|
||||
return undef;
|
||||
};
|
||||
|
||||
return $err->("malformed_header") unless sysread($sock, $buf, 12) == 12;
|
||||
my ($magic, $type, $len) = unpack("a4NN", $buf);
|
||||
return $err->("malformed_magic") unless $magic eq "\0RES";
|
||||
|
||||
if ($len) {
|
||||
$rv = sysread($sock, $buf, $len);
|
||||
return $err->("short_body") unless $rv == $len;
|
||||
}
|
||||
|
||||
$type = $cmd{$type};
|
||||
return $err->("bogus_command") unless $type;
|
||||
return $err->("bogus_command_type") unless index($type->[0], "O") != -1;
|
||||
|
||||
return {
|
||||
'type' => $type->[1],
|
||||
'len' => $len,
|
||||
'blobref' => \$buf,
|
||||
};
|
||||
}
|
||||
|
||||
sub send_req {
|
||||
my ($sock, $reqref) = @_;
|
||||
return 0 unless $sock;
|
||||
|
||||
my $len = length($$reqref);
|
||||
#TODO: catch SIGPIPE
|
||||
my $rv = $sock->syswrite($$reqref, $len);
|
||||
return 0 unless $rv == $len;
|
||||
return 1;
|
||||
}
|
||||
|
||||
# given a file descriptor number and a timeout, wait for that descriptor to
|
||||
# become readable; returns 0 or 1 on if it did or not
|
||||
sub wait_for_readability {
|
||||
my ($fileno, $timeout) = @_;
|
||||
return 0 unless $fileno && $timeout;
|
||||
|
||||
my $rin = 0;
|
||||
vec($rin, $fileno, 1) = 1;
|
||||
my $nfound = select($rin, undef, undef, $timeout);
|
||||
|
||||
# nfound can be undef or 0, both failures, or 1, a success
|
||||
return $nfound ? 1 : 0;
|
||||
}
|
||||
|
||||
1;
|
||||
366
wcmtools/gearman/lib/Gearman/Worker.pm
Executable file
366
wcmtools/gearman/lib/Gearman/Worker.pm
Executable file
@@ -0,0 +1,366 @@
|
||||
#!/usr/bin/perl
|
||||
|
||||
#TODO: retries?
|
||||
|
||||
use strict;
|
||||
use Gearman::Util;
|
||||
use Carp ();
|
||||
use IO::Socket::INET;
|
||||
|
||||
# this is the object that's handed to the worker subrefs
|
||||
package Gearman::Job;
|
||||
|
||||
use fields (
|
||||
'func',
|
||||
'argref',
|
||||
'handle',
|
||||
|
||||
'jss', # job server's socket
|
||||
);
|
||||
|
||||
sub new {
|
||||
my ($class, $func, $argref, $handle, $jss) = @_;
|
||||
my $self = $class;
|
||||
$self = fields::new($class) unless ref $self;
|
||||
|
||||
$self->{func} = $func;
|
||||
$self->{handle} = $handle;
|
||||
$self->{argref} = $argref;
|
||||
$self->{jss} = $jss;
|
||||
return $self;
|
||||
}
|
||||
|
||||
# ->set_status($numerator, $denominator) : $bool_sent_to_jobserver
|
||||
sub set_status {
|
||||
my Gearman::Job $self = shift;
|
||||
my ($nu, $de) = @_;
|
||||
|
||||
my $req = Gearman::Util::pack_req_command("work_status",
|
||||
join("\0", $self->{handle}, $nu, $de));
|
||||
return Gearman::Util::send_req($self->{jss}, \$req);
|
||||
}
|
||||
|
||||
sub argref {
|
||||
my Gearman::Job $self = shift;
|
||||
return $self->{argref};
|
||||
}
|
||||
|
||||
sub arg {
|
||||
my Gearman::Job $self = shift;
|
||||
return ${ $self->{argref} };
|
||||
}
|
||||
|
||||
|
||||
package Gearman::Worker;
|
||||
use Socket qw(IPPROTO_TCP TCP_NODELAY SOL_SOCKET PF_INET SOCK_STREAM);
|
||||
|
||||
use fields (
|
||||
'job_servers',
|
||||
'js_count',
|
||||
'sock_cache', # host:port -> IO::Socket::INET
|
||||
'last_connect_fail', # host:port -> unixtime
|
||||
'down_since', # host:port -> unixtime
|
||||
'connecting', # host:port -> unixtime connect started at
|
||||
'can', # func -> subref
|
||||
'client_id', # random identifer string, no whitespace
|
||||
);
|
||||
|
||||
sub new {
|
||||
my ($class, %opts) = @_;
|
||||
my $self = $class;
|
||||
$self = fields::new($class) unless ref $self;
|
||||
|
||||
$self->{job_servers} = [];
|
||||
$self->{js_count} = 0;
|
||||
$self->{sock_cache} = {};
|
||||
$self->{last_connect_fail} = {};
|
||||
$self->{down_since} = {};
|
||||
$self->{can} = {};
|
||||
$self->{client_id} = join("", map { chr(int(rand(26)) + 97) } (1..30));
|
||||
|
||||
$self->job_servers(@{ $opts{job_servers} })
|
||||
if $opts{job_servers};
|
||||
|
||||
return $self;
|
||||
}
|
||||
|
||||
sub _get_js_sock {
|
||||
my Gearman::Worker $self = shift;
|
||||
my $ipport = shift;
|
||||
|
||||
if (my $sock = $self->{sock_cache}{$ipport}) {
|
||||
return $sock if getpeername($sock);
|
||||
delete $self->{sock_cache}{$ipport};
|
||||
}
|
||||
|
||||
my $now = time;
|
||||
my $down_since = $self->{down_since}{$ipport};
|
||||
if ($down_since) {
|
||||
my $down_for = $now - $down_since;
|
||||
my $retry_period = $down_for > 60 ? 30 : (int($down_for / 2) + 1);
|
||||
if ($self->{last_connect_fail}{$ipport} > $now - $retry_period) {
|
||||
return undef;
|
||||
}
|
||||
}
|
||||
|
||||
return undef unless $ipport =~ /(^\d+\..+):(\d+)/;
|
||||
my ($ip, $port) = ($1, $2);
|
||||
|
||||
my $sock;
|
||||
socket $sock, PF_INET, SOCK_STREAM, IPPROTO_TCP;
|
||||
#IO::Handle::blocking($sock, 0);
|
||||
connect $sock, Socket::sockaddr_in($port, Socket::inet_aton($ip));
|
||||
|
||||
#my $sock = IO::Socket::INET->new(PeerAddr => $ip,
|
||||
# Timeout => 1);
|
||||
unless ($sock) {
|
||||
$self->{down_since}{$ipport} ||= $now;
|
||||
$self->{last_connect_fail}{$ipport} = $now;
|
||||
return undef;
|
||||
}
|
||||
delete $self->{last_connect_fail}{$ipport};
|
||||
delete $self->{down_since}{$ipport};
|
||||
$sock->autoflush(1);
|
||||
setsockopt($sock, IPPROTO_TCP, TCP_NODELAY, pack("l", 1)) or die;
|
||||
|
||||
$self->{sock_cache}{$ipport} = $sock;
|
||||
|
||||
my $cid_req = Gearman::Util::pack_req_command("set_client_id", $self->{client_id});
|
||||
Gearman::Util::send_req($sock, \$cid_req);
|
||||
|
||||
# get this socket's state caught-up
|
||||
foreach my $func (keys %{$self->{can}}) {
|
||||
unless (_set_capability($sock, $func, 1)) {
|
||||
delete $self->{sock_cache}{$ipport};
|
||||
return undef;
|
||||
}
|
||||
}
|
||||
|
||||
return $sock;
|
||||
}
|
||||
|
||||
sub _set_capability {
|
||||
my ($sock, $func, $can) = @_;
|
||||
|
||||
my $req = Gearman::Util::pack_req_command($can ? "can_do" : "cant_do",
|
||||
$func);
|
||||
return Gearman::Util::send_req($sock, \$req);
|
||||
}
|
||||
|
||||
# tell all the jobservers that this worker can't do anything
|
||||
sub reset_abilities {
|
||||
my Gearman::Worker $self = shift;
|
||||
my $req = Gearman::Util::pack_req_command("reset_abilities");
|
||||
foreach my $js (@{ $self->{job_servers} }) {
|
||||
my $jss = $self->_get_js_sock($js);
|
||||
unless (Gearman::Util::send_req($jss, \$req)) {
|
||||
delete $self->{sock_cache}{$js};
|
||||
}
|
||||
}
|
||||
|
||||
$self->{can} = {};
|
||||
}
|
||||
|
||||
# does one job and returns. no return value.
|
||||
sub work {
|
||||
my Gearman::Worker $self = shift;
|
||||
my $grab_req = Gearman::Util::pack_req_command("grab_job");
|
||||
my $presleep_req = Gearman::Util::pack_req_command("pre_sleep");
|
||||
my %fd_map;
|
||||
|
||||
while (1) {
|
||||
|
||||
my @jss;
|
||||
my $need_sleep = 1;
|
||||
|
||||
foreach my $js (@{ $self->{job_servers} }) {
|
||||
my $jss = $self->_get_js_sock($js)
|
||||
or next;
|
||||
|
||||
unless (Gearman::Util::send_req($jss, \$grab_req) &&
|
||||
Gearman::Util::wait_for_readability($jss->fileno, 0.50)) {
|
||||
delete $self->{sock_cache}{$js};
|
||||
next;
|
||||
}
|
||||
push @jss, [$js, $jss];
|
||||
|
||||
my ($res, $err);
|
||||
do {
|
||||
$res = Gearman::Util::read_res_packet($jss, \$err);
|
||||
} while ($res && $res->{type} eq "noop");
|
||||
|
||||
next unless $res;
|
||||
|
||||
if ($res->{type} eq "no_job") {
|
||||
next;
|
||||
}
|
||||
|
||||
die "Uh, wasn't expecting a $res->{type} packet" unless $res->{type} eq "job_assign";
|
||||
|
||||
${ $res->{'blobref'} } =~ s/^(.+?)\0(.+?)\0//
|
||||
or die "Uh, regexp on job_assign failed";
|
||||
my ($handle, $func) = ($1, $2);
|
||||
my $job = Gearman::Job->new($func, $res->{'blobref'}, $handle, $jss);
|
||||
my $handler = $self->{can}{$func};
|
||||
my $ret = eval { $handler->($job); };
|
||||
|
||||
my $work_req;
|
||||
if (defined $ret) {
|
||||
$work_req = Gearman::Util::pack_req_command("work_complete", "$handle\0" . (ref $ret ? $$ret : $ret));
|
||||
} else {
|
||||
$work_req = Gearman::Util::pack_req_command("work_fail", $handle);
|
||||
}
|
||||
|
||||
unless (Gearman::Util::send_req($jss, \$work_req)) {
|
||||
delete $self->{sock_cache}{$js};
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if ($need_sleep) {
|
||||
my $wake_vec = 0;
|
||||
foreach my $j (@jss) {
|
||||
my ($js, $jss) = @$j;
|
||||
unless (Gearman::Util::send_req($jss, \$presleep_req)) {
|
||||
delete $self->{sock_cache}{$js};
|
||||
next;
|
||||
}
|
||||
my $fd = $jss->fileno;
|
||||
vec($wake_vec, $fd, 1) = 1;
|
||||
}
|
||||
|
||||
# chill for some arbitrary time until we're woken up again
|
||||
select($wake_vec, undef, undef, 10);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
sub register_function {
|
||||
my Gearman::Worker $self = shift;
|
||||
my $func = shift;
|
||||
my $subref = shift;
|
||||
|
||||
my $req = Gearman::Util::pack_req_command("can_do", $func);
|
||||
|
||||
foreach my $js (@{ $self->{job_servers} }) {
|
||||
my $jss = $self->_get_js_sock($js);
|
||||
unless (Gearman::Util::send_req($jss, \$req)) {
|
||||
delete $self->{sock_cache}{$js};
|
||||
}
|
||||
}
|
||||
|
||||
$self->{can}{$func} = $subref;
|
||||
}
|
||||
|
||||
# getter/setter
|
||||
sub job_servers {
|
||||
my Gearman::Worker $self = shift;
|
||||
return $self->{job_servers} unless @_;
|
||||
my $list = [ @_ ];
|
||||
$self->{js_count} = scalar @$list;
|
||||
foreach (@$list) {
|
||||
$_ .= ":7003" unless /:/;
|
||||
}
|
||||
return $self->{job_servers} = $list;
|
||||
}
|
||||
|
||||
|
||||
1;
|
||||
__END__
|
||||
|
||||
=head1 NAME
|
||||
|
||||
Gearman::Worker - Worker for gearman distributed job system
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
use Gearman::Worker;
|
||||
my $worker = Gearman::Worker->new;
|
||||
$worker->job_servers('127.0.0.1');
|
||||
$worker->register_function($funcname => $subref);
|
||||
$worker->work while 1;
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
I<Gearman::Worker> is a worker class for the Gearman distributed job system,
|
||||
providing a framework for receiving and serving jobs from a Gearman server.
|
||||
|
||||
Callers instantiate a I<Gearman::Worker> object, register a list of functions
|
||||
and capabilities that they can handle, then enter an event loop, waiting
|
||||
for the server to send jobs.
|
||||
|
||||
The worker can send a return value back to the server, which then gets
|
||||
sent back to the client that requested the job; or it can simply execute
|
||||
silently.
|
||||
|
||||
=head1 USAGE
|
||||
|
||||
=head2 Gearman::Worker->new(\%options)
|
||||
|
||||
Creates a new I<Gearman::Worker> object, and returns the object.
|
||||
|
||||
If I<%options> is provided, initializes the new worker object with the
|
||||
settings in I<%options>, which can contain:
|
||||
|
||||
=over 4
|
||||
|
||||
=item * job_servers
|
||||
|
||||
Calls I<job_servers> (see below) to initialize the list of job servers.
|
||||
|
||||
=back
|
||||
|
||||
=head2 $worker->job_servers(@servers)
|
||||
|
||||
Initializes the worker I<$worker> with the list of job servers in I<@servers>.
|
||||
I<@servers> should contain a list of IP addresses, with optional port numbers.
|
||||
For example:
|
||||
|
||||
$worker->job_servers('127.0.0.1', '192.168.1.100:7003');
|
||||
|
||||
If the port number is not provided, 7003 is used as the default.
|
||||
|
||||
=head2 $worker->register_function($funcname, $subref)
|
||||
|
||||
Registers the function I<$funcname> as being provided by the worker
|
||||
I<$worker>, and advertises these capabilities to all of the job servers
|
||||
defined in this worker.
|
||||
|
||||
I<$subref> must be a subroutine reference that will be invoked when the
|
||||
worker receives a request for this function. It will be passed a
|
||||
I<Gearman::Job> object representing the job that has been received by the
|
||||
worker.
|
||||
|
||||
The subroutine reference can return a return value, which will be sent back
|
||||
to the job server.
|
||||
|
||||
=head2 Gearman::Job->arg
|
||||
|
||||
Returns the scalar argument that the client sent to the job server.
|
||||
|
||||
=head2 Gearman::Job->set_status($numerator, $denominator)
|
||||
|
||||
Updates the status of the job (most likely, a long-running job) and sends
|
||||
it back to the job server. I<$numerator> and I<$denominator> should
|
||||
represent the percentage completion of the job.
|
||||
|
||||
=head1 EXAMPLES
|
||||
|
||||
=head2 Summation
|
||||
|
||||
This is an example worker that receives a request to sum up a list of
|
||||
integers.
|
||||
|
||||
use Gearman::Worker;
|
||||
use Storable qw( thaw );
|
||||
use List::Util qw( sum );
|
||||
my $worker = Gearman::Worker->new;
|
||||
$worker->job_servers('127.0.0.1');
|
||||
$worker->register_function(sum => sub { sum @{ thaw($_[0]->arg) } });
|
||||
$worker->work while 1;
|
||||
|
||||
See the I<Gearman::Client> documentation for a sample client sending the
|
||||
I<sum> job.
|
||||
|
||||
=cut
|
||||
694
wcmtools/gearman/server/gearmand
Executable file
694
wcmtools/gearman/server/gearmand
Executable file
@@ -0,0 +1,694 @@
|
||||
#!/usr/bin/perl
|
||||
#
|
||||
# Gearman
|
||||
#
|
||||
# Status: 2005-04-13
|
||||
#
|
||||
# Copyright 2005, Danga Interactive
|
||||
#
|
||||
# Authors:
|
||||
# Brad Fitzpatrick <brad@danga.com>
|
||||
# Brad Whitaker <whitaker@danga.com>
|
||||
#
|
||||
# License:
|
||||
# terms of Perl itself.
|
||||
#
|
||||
|
||||
use strict;
|
||||
use Getopt::Long;
|
||||
use Carp;
|
||||
use Danga::Socket;
|
||||
use IO::Socket::INET;
|
||||
use POSIX ();
|
||||
use lib '../lib';
|
||||
use Gearman::Util;
|
||||
|
||||
use vars qw($DEBUG);
|
||||
$DEBUG = 0;
|
||||
|
||||
my (
|
||||
$daemonize,
|
||||
$nokeepalive,
|
||||
);
|
||||
my $conf_port = 7003;
|
||||
|
||||
Getopt::Long::GetOptions(
|
||||
'd|daemon' => \$daemonize,
|
||||
'p|port=i' => \$conf_port,
|
||||
'debug=i' => \$DEBUG,
|
||||
);
|
||||
|
||||
daemonize() if $daemonize;
|
||||
|
||||
use Socket qw(IPPROTO_TCP TCP_NODELAY SOL_SOCKET);
|
||||
|
||||
$SIG{'PIPE'} = "IGNORE"; # handled manually
|
||||
|
||||
# establish SERVER socket, bind and listen.
|
||||
my $server = IO::Socket::INET->new(LocalPort => $conf_port,
|
||||
Type => SOCK_STREAM,
|
||||
Proto => IPPROTO_TCP,
|
||||
Blocking => 0,
|
||||
Reuse => 1,
|
||||
Listen => 10 )
|
||||
or die "Error creating socket: $@\n";
|
||||
|
||||
# Not sure if I'm crazy or not, but I can't see in strace where/how
|
||||
# Perl 5.6 sets blocking to 0 without this. In Perl 5.8, IO::Socket::INET
|
||||
# obviously sets it from watching strace.
|
||||
IO::Handle::blocking($server, 0);
|
||||
|
||||
my $accept_handler = sub {
|
||||
my $csock = $server->accept();
|
||||
return unless $csock;
|
||||
|
||||
printf("Listen child making a Client for %d.\n", fileno($csock))
|
||||
if $DEBUG;
|
||||
|
||||
IO::Handle::blocking($csock, 0);
|
||||
setsockopt($csock, IPPROTO_TCP, TCP_NODELAY, pack("l", 1)) or die;
|
||||
|
||||
my $client = Client->new($csock);
|
||||
$client->watch_read(1);
|
||||
};
|
||||
|
||||
Client->OtherFds(fileno($server) => $accept_handler);
|
||||
|
||||
sub daemonize {
|
||||
my($pid, $sess_id, $i);
|
||||
|
||||
## Fork and exit parent
|
||||
if ($pid = fork) { exit 0; }
|
||||
|
||||
## Detach ourselves from the terminal
|
||||
croak "Cannot detach from controlling terminal"
|
||||
unless $sess_id = POSIX::setsid();
|
||||
|
||||
## Prevent possibility of acquiring a controling terminal
|
||||
$SIG{'HUP'} = 'IGNORE';
|
||||
if ($pid = fork) { exit 0; }
|
||||
|
||||
## Change working directory
|
||||
chdir "/";
|
||||
|
||||
## Clear file creation mask
|
||||
umask 0;
|
||||
|
||||
## Close open file descriptors
|
||||
close(STDIN);
|
||||
close(STDOUT);
|
||||
close(STDERR);
|
||||
|
||||
## Reopen stderr, stdout, stdin to /dev/null
|
||||
open(STDIN, "+>/dev/null");
|
||||
open(STDOUT, "+>&STDIN");
|
||||
open(STDERR, "+>&STDIN");
|
||||
}
|
||||
|
||||
#####################################################################
|
||||
### Job definition
|
||||
package Job;
|
||||
use Sys::Hostname;
|
||||
|
||||
use fields (
|
||||
'func',
|
||||
'uniq',
|
||||
'argref',
|
||||
'listeners', # arrayref of interested Clients
|
||||
'worker',
|
||||
'handle',
|
||||
'status', # [1, 100]
|
||||
'require_listener',
|
||||
);
|
||||
|
||||
our $handle_ct = 0;
|
||||
our $handle_base = "H:" . hostname() . ":";
|
||||
|
||||
our %job_queue; # job_name -> [Job, Job*] (key only exists if non-empty)
|
||||
our %jobOfHandle; # handle -> Job
|
||||
our %jobOfUniq; # func -> uniq -> Job
|
||||
|
||||
#####################################################################
|
||||
### Client definition
|
||||
package Client;
|
||||
|
||||
use Danga::Socket;
|
||||
use base 'Danga::Socket';
|
||||
use fields (
|
||||
'can_do', # { $job_name => 1 }
|
||||
'can_do_list',
|
||||
'can_do_iter',
|
||||
'read_buf',
|
||||
'sleeping', # 0/1: they've said they're sleeping and we haven't woken them up
|
||||
'doing', # { $job_handle => Job }
|
||||
'client_id', # opaque string, no whitespace. workers give this so checker scripts
|
||||
# can tell apart the same worker connected to multiple jobservers.
|
||||
);
|
||||
|
||||
|
||||
#####################################################################
|
||||
### J O B C L A S S
|
||||
#####################################################################
|
||||
package Job;
|
||||
|
||||
sub new {
|
||||
my Job $self = shift;
|
||||
my ($func, $uniq, $argref, $highpri) = @_;
|
||||
|
||||
$self = fields::new($self) unless ref $self;
|
||||
|
||||
# if they specified a uniq, see if we have a dup job running already
|
||||
# to merge with
|
||||
if (length($uniq)) {
|
||||
# a unique value of "-" means "use my args as my unique key"
|
||||
$uniq = $$argref if $uniq eq "-";
|
||||
if ($jobOfUniq{$func} && $jobOfUniq{$func}{$uniq}) {
|
||||
# found a match
|
||||
return $jobOfUniq{$func}{$uniq};
|
||||
} else {
|
||||
# create a new key
|
||||
$jobOfUniq{$func} ||= {};
|
||||
$jobOfUniq{$func}{$uniq} = $self;
|
||||
}
|
||||
}
|
||||
|
||||
$self->{'func'} = $func;
|
||||
$self->{'uniq'} = $uniq;
|
||||
$self->{'require_listener'} = 1;
|
||||
$self->{'argref'} = $argref;
|
||||
$self->{'listeners'} = [];
|
||||
|
||||
$handle_ct++;
|
||||
$self->{'handle'} = $handle_base . $handle_ct;
|
||||
|
||||
my $jq = ($job_queue{$func} ||= []);
|
||||
if ($highpri) {
|
||||
unshift @$jq, $self;
|
||||
} else {
|
||||
push @$jq, $self;
|
||||
}
|
||||
|
||||
$jobOfHandle{$self->{'handle'}} = $self;
|
||||
|
||||
return $self;
|
||||
}
|
||||
|
||||
sub Grab {
|
||||
my ($class, $func) = @_;
|
||||
return undef unless $job_queue{$func};
|
||||
|
||||
my $empty = sub {
|
||||
delete $job_queue{$func};
|
||||
return undef;
|
||||
};
|
||||
|
||||
my Job $job;
|
||||
while (1) {
|
||||
$job = shift @{$job_queue{$func}};
|
||||
return $empty->() unless $job;
|
||||
return $job unless $job->{require_listener};
|
||||
|
||||
foreach my Client $c (@{$job->{listeners}}) {
|
||||
return $job unless $c->{closed};
|
||||
}
|
||||
$job->note_finished(0);
|
||||
}
|
||||
}
|
||||
|
||||
sub GetByHandle {
|
||||
my ($class, $handle) = @_;
|
||||
return $jobOfHandle{$handle};
|
||||
}
|
||||
|
||||
sub add_listener {
|
||||
my Job $self = shift;
|
||||
my Client $li = shift;
|
||||
push @{$self->{listeners}}, $li;
|
||||
}
|
||||
|
||||
sub relay_to_listeners {
|
||||
my Job $self = shift;
|
||||
foreach my Client $c (@{$self->{listeners}}) {
|
||||
next if $c->{closed};
|
||||
$c->write($_[0]);
|
||||
}
|
||||
}
|
||||
|
||||
sub note_finished {
|
||||
my Job $self = shift;
|
||||
my $success = shift;
|
||||
|
||||
if (length($self->{uniq})) {
|
||||
delete $jobOfUniq{$self->{func}}{$self->{uniq}};
|
||||
}
|
||||
delete $jobOfHandle{$self->{handle}};
|
||||
}
|
||||
|
||||
# accessors:
|
||||
sub worker {
|
||||
my Job $self = shift;
|
||||
return $self->{'worker'} unless @_;
|
||||
return $self->{'worker'} = shift;
|
||||
}
|
||||
sub require_listener {
|
||||
my Job $self = shift;
|
||||
return $self->{'require_listener'} unless @_;
|
||||
return $self->{'require_listener'} = shift;
|
||||
}
|
||||
|
||||
# takes arrayref of [numerator,denominator]
|
||||
sub status {
|
||||
my Job $self = shift;
|
||||
return $self->{'status'} unless @_;
|
||||
return $self->{'status'} = shift;
|
||||
}
|
||||
|
||||
sub handle {
|
||||
my Job $self = shift;
|
||||
return $self->{'handle'};
|
||||
}
|
||||
|
||||
sub func {
|
||||
my Job $self = shift;
|
||||
return $self->{'func'};
|
||||
}
|
||||
|
||||
sub argref {
|
||||
my Job $self = shift;
|
||||
return $self->{'argref'};
|
||||
}
|
||||
|
||||
|
||||
#####################################################################
|
||||
### C L I E N T C L A S S
|
||||
#####################################################################
|
||||
package Client;
|
||||
|
||||
our %sleepers; # func -> [ sleepers ] (wiped on wakeup)
|
||||
|
||||
our %client_map; # fd -> Client object
|
||||
|
||||
# Class Method:
|
||||
sub new {
|
||||
my Client $self = shift;
|
||||
$self = fields::new($self) unless ref $self;
|
||||
$self->SUPER::new( @_ );
|
||||
|
||||
$self->{read_buf} = '';
|
||||
$self->{sleeping} = 0;
|
||||
$self->{can_do} = {};
|
||||
$self->{doing} = {}; # handle -> Job
|
||||
$self->{can_do_list} = [];
|
||||
$self->{can_do_iter} = 0; # numeric iterator for where we start looking for jobs
|
||||
$self->{client_id} = "-";
|
||||
|
||||
$client_map{$self->{fd}} = $self;
|
||||
|
||||
return $self;
|
||||
}
|
||||
|
||||
# Class Method:
|
||||
sub WakeUpSleepers {
|
||||
my ($class, $func) = @_;
|
||||
return unless $sleepers{$func};
|
||||
my Client $c;
|
||||
foreach $c (@{$sleepers{$func}}) {
|
||||
next if $c->{closed} || ! $c->{sleeping};
|
||||
$c->res_packet("noop");
|
||||
$c->{sleeping} = 0;
|
||||
}
|
||||
delete $sleepers{$func};
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
sub close {
|
||||
my Client $self = shift;
|
||||
|
||||
while (my ($handle, $job) = each %{$self->{doing}}) {
|
||||
my $msg = Gearman::Util::pack_res_command("work_fail", $handle);
|
||||
$job->relay_to_listeners($msg);
|
||||
$job->note_finished(0);
|
||||
}
|
||||
|
||||
delete $client_map{$self->{fd}};
|
||||
$self->CMD_reset_abilities;
|
||||
|
||||
$self->SUPER::close;
|
||||
}
|
||||
|
||||
# Client
|
||||
sub event_read {
|
||||
my Client $self = shift;
|
||||
|
||||
my $bref = $self->read(1024);
|
||||
return $self->close unless defined $bref;
|
||||
$self->{read_buf} .= $$bref;
|
||||
|
||||
my $found_cmd;
|
||||
do {
|
||||
$found_cmd = 1;
|
||||
my $blen = length($self->{read_buf});
|
||||
|
||||
if ($self->{read_buf} =~ /^\0REQ(.{8,8})/s) {
|
||||
my ($cmd, $len) = unpack("NN", $1);
|
||||
if ($blen < $len + 12) {
|
||||
# not here yet.
|
||||
$found_cmd = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
$self->process_cmd($cmd, substr($self->{read_buf}, 12, $len));
|
||||
|
||||
# and slide down buf:
|
||||
$self->{read_buf} = substr($self->{read_buf}, 12+$len);
|
||||
|
||||
} elsif ($self->{read_buf} =~ s/^(\w.+?)?\r?\n//) {
|
||||
# ASCII command case (useful for telnetting in)
|
||||
my $line = $1;
|
||||
$self->process_line($line);
|
||||
} else {
|
||||
$found_cmd = 0;
|
||||
}
|
||||
} while ($found_cmd);
|
||||
}
|
||||
|
||||
# line-based commands
|
||||
sub process_line {
|
||||
my Client $self = shift;
|
||||
my $line = shift;
|
||||
|
||||
if ($line =~ /^(\w+)\s*(.*)/) {
|
||||
my ($cmd, $args) = ($1, $2);
|
||||
$cmd = lc($cmd);
|
||||
|
||||
no strict 'refs';
|
||||
my $cmd_handler = *{"TXTCMD_$cmd"}{CODE};
|
||||
if ($cmd_handler) {
|
||||
my $args = decode_url_args(\$args);
|
||||
$cmd_handler->($self, $args);
|
||||
next;
|
||||
}
|
||||
}
|
||||
|
||||
return $self->err_line('unknown_command');
|
||||
}
|
||||
|
||||
sub TXTCMD_workers {
|
||||
my Client $self = shift;
|
||||
my $args = shift;
|
||||
|
||||
foreach my $fd (sort { $a <=> $b } keys %client_map) {
|
||||
my Client $cl = $client_map{$fd};
|
||||
$self->write("$fd " . $cl->peer_ip_string . " $cl->{client_id} : @{$cl->{can_do_list}}\n");
|
||||
|
||||
}
|
||||
$self->write(".\n");
|
||||
}
|
||||
|
||||
sub CMD_echo_req {
|
||||
my Client $self = shift;
|
||||
my $blobref = shift;
|
||||
|
||||
return $self->res_packet("echo_res", $$blobref);
|
||||
}
|
||||
|
||||
sub CMD_work_status {
|
||||
my Client $self = shift;
|
||||
my $ar = shift;
|
||||
my ($handle, $nu, $de) = split(/\0/, $$ar);
|
||||
|
||||
my $job = $self->{doing}{$handle};
|
||||
return $self->error_packet("not_worker") unless $job && $job->worker == $self;
|
||||
|
||||
my $msg = Gearman::Util::pack_res_command("work_status", $$ar);
|
||||
$job->relay_to_listeners($msg);
|
||||
$job->status([$nu, $de]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub CMD_work_complete {
|
||||
my Client $self = shift;
|
||||
my $ar = shift;
|
||||
|
||||
$$ar =~ s/^(.+?)\0//;
|
||||
my $handle = $1;
|
||||
|
||||
my $job = delete $self->{doing}{$handle};
|
||||
return $self->error_packet("not_worker") unless $job && $job->worker == $self;
|
||||
|
||||
my $msg = Gearman::Util::pack_res_command("work_complete", join("\0", $handle, $$ar));
|
||||
$job->relay_to_listeners($msg);
|
||||
$job->note_finished(1);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub CMD_work_fail {
|
||||
my Client $self = shift;
|
||||
my $ar = shift;
|
||||
my $handle = $$ar;
|
||||
my $job = delete $self->{doing}{$handle};
|
||||
return $self->error_packet("not_worker") unless $job && $job->worker == $self;
|
||||
|
||||
my $msg = Gearman::Util::pack_res_command("work_fail", $handle);
|
||||
$job->relay_to_listeners($msg);
|
||||
$job->note_finished(1);
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub CMD_pre_sleep {
|
||||
my Client $self = shift;
|
||||
$self->{'sleeping'} = 1;
|
||||
|
||||
foreach my $cd (@{$self->{can_do_list}}) {
|
||||
|
||||
# immediately wake the sleeper up if there are things to be done
|
||||
if ($job_queue{$cd}) {
|
||||
$self->res_packet("noop");
|
||||
$self->{sleeping} = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
push @{$sleepers{$cd} ||= []}, $self;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub CMD_grab_job {
|
||||
my Client $self = shift;
|
||||
|
||||
my $job;
|
||||
my $can_do_size = scalar @{$self->{can_do_list}};
|
||||
|
||||
unless ($can_do_size) {
|
||||
$self->res_packet("no_job");
|
||||
return;
|
||||
}
|
||||
|
||||
# the offset where we start asking for jobs, to prevent starvation
|
||||
# of some job types.
|
||||
$self->{can_do_iter} = ($self->{can_do_iter} + 1) % $can_do_size;
|
||||
|
||||
my $tried = 0;
|
||||
while ($tried < $can_do_size) {
|
||||
my $idx = ($tried + $self->{can_do_iter}) % $can_do_size;
|
||||
$tried++;
|
||||
my $job_to_grab = $self->{can_do_list}->[$idx];
|
||||
$job = Job->Grab($job_to_grab);
|
||||
if ($job) {
|
||||
$job->worker($self);
|
||||
$self->{doing}{$job->handle} = $job;
|
||||
return $self->res_packet("job_assign",
|
||||
join("\0",
|
||||
$job->handle,
|
||||
$job->func,
|
||||
${$job->argref},
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
$self->res_packet("no_job");
|
||||
}
|
||||
|
||||
sub CMD_can_do {
|
||||
my Client $self = shift;
|
||||
my $ar = shift;
|
||||
|
||||
$self->{can_do}->{$$ar} = 1;
|
||||
$self->_setup_can_do_list;
|
||||
}
|
||||
|
||||
sub CMD_set_client_id {
|
||||
my Client $self = shift;
|
||||
my $ar = shift;
|
||||
|
||||
$self->{client_id} = $$ar;
|
||||
$self->{client_id} =~ s/\s+//g;
|
||||
$self->{client_id} = "-" unless length $self->{client_id};
|
||||
}
|
||||
|
||||
sub CMD_cant_do {
|
||||
my Client $self = shift;
|
||||
my $ar = shift;
|
||||
|
||||
delete $self->{can_do}->{$$ar};
|
||||
$self->_setup_can_do_list;
|
||||
}
|
||||
|
||||
sub CMD_get_status {
|
||||
my Client $self = shift;
|
||||
my $ar = shift;
|
||||
my $job = Job->GetByHandle($$ar);
|
||||
|
||||
# handles can't contain nulls
|
||||
return if $$ar =~ /\0/;
|
||||
|
||||
my ($known, $running, $num, $den);
|
||||
$known = 0;
|
||||
$running = 0;
|
||||
if ($job) {
|
||||
$known = 1;
|
||||
$running = $job->worker ? 1 : 0;
|
||||
if (my $stat = $job->status) {
|
||||
($num, $den) = @$stat;
|
||||
}
|
||||
}
|
||||
|
||||
$self->res_packet("status_res", join("\0",
|
||||
$$ar,
|
||||
$known,
|
||||
$running,
|
||||
$num,
|
||||
$den));
|
||||
}
|
||||
|
||||
sub CMD_reset_abilities {
|
||||
my Client $self = shift;
|
||||
|
||||
$self->{can_do} = {};
|
||||
$self->_setup_can_do_list;
|
||||
}
|
||||
|
||||
sub _setup_can_do_list {
|
||||
my Client $self = shift;
|
||||
$self->{can_do_list} = [ keys %{$self->{can_do}} ];
|
||||
$self->{can_do_iter} = 0;
|
||||
}
|
||||
|
||||
sub CMD_submit_job { push @_, 1; &_cmd_submit_job; }
|
||||
sub CMD_submit_job_bg { push @_, 0; &_cmd_submit_job; }
|
||||
sub CMD_submit_job_high { push @_, 1, 1; &_cmd_submit_job; }
|
||||
|
||||
sub _cmd_submit_job {
|
||||
my Client $self = shift;
|
||||
my $ar = shift;
|
||||
my $subscribe = shift;
|
||||
my $high_pri = shift;
|
||||
|
||||
return $self->error_packet("invalid_args", "No func/uniq header [$$ar].")
|
||||
unless $$ar =~ s/^(.+?)\0(.*?)\0//;
|
||||
|
||||
my ($func, $uniq) = ($1, $2);
|
||||
|
||||
my $job = Job->new($func, $uniq, $ar, $high_pri);
|
||||
|
||||
if ($subscribe) {
|
||||
$job->add_listener($self);
|
||||
} else {
|
||||
# background mode
|
||||
$job->require_listener(0);
|
||||
}
|
||||
|
||||
$self->res_packet("job_created", $job->handle);
|
||||
Client->WakeUpSleepers($func);
|
||||
}
|
||||
|
||||
sub res_packet {
|
||||
my Client $self = shift;
|
||||
my ($code, $arg) = @_;
|
||||
$self->write(Gearman::Util::pack_res_command($code, $arg));
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub error_packet {
|
||||
my Client $self = shift;
|
||||
my ($code, $msg) = @_;
|
||||
$self->write(Gearman::Util::pack_res_command("error", "$code\0$msg"));
|
||||
return 0;
|
||||
}
|
||||
|
||||
sub process_cmd {
|
||||
my Client $self = shift;
|
||||
my $cmd = shift;
|
||||
my $blob = shift;
|
||||
|
||||
my $cmd_name = "CMD_" . Gearman::Util::cmd_name($cmd);
|
||||
my $ret = eval {
|
||||
$self->$cmd_name(\$blob);
|
||||
};
|
||||
return $ret unless $@;
|
||||
print "Error: $@\n";
|
||||
return $self->error_packet("server_error", $@);
|
||||
}
|
||||
|
||||
# Client
|
||||
sub event_err { my $self = shift; $self->close; }
|
||||
sub event_hup { my $self = shift; $self->close; }
|
||||
|
||||
sub err_line {
|
||||
my Client $self = shift;
|
||||
my $err_code = shift;
|
||||
my $err_text = {
|
||||
'unknown_command' => "Unknown server command",
|
||||
}->{$err_code};
|
||||
|
||||
$self->write("ERR $err_code " . eurl($err_text) . "\r\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
sub eurl
|
||||
{
|
||||
my $a = $_[0];
|
||||
$a =~ s/([^a-zA-Z0-9_\,\-.\/\\\: ])/uc sprintf("%%%02x",ord($1))/eg;
|
||||
$a =~ tr/ /+/;
|
||||
return $a;
|
||||
}
|
||||
|
||||
sub durl
|
||||
{
|
||||
my ($a) = @_;
|
||||
$a =~ tr/+/ /;
|
||||
$a =~ s/%([a-fA-F0-9][a-fA-F0-9])/pack("C", hex($1))/eg;
|
||||
return $a;
|
||||
}
|
||||
|
||||
sub decode_url_args
|
||||
{
|
||||
my $a = shift;
|
||||
my $buffer = ref $a ? $a : \$a;
|
||||
my $ret = {};
|
||||
|
||||
my $pair;
|
||||
my @pairs = split(/&/, $$buffer);
|
||||
my ($name, $value);
|
||||
foreach $pair (@pairs)
|
||||
{
|
||||
($name, $value) = split(/=/, $pair);
|
||||
$value =~ tr/+/ /;
|
||||
$value =~ s/%([a-fA-F0-9][a-fA-F0-9])/pack("C", hex($1))/eg;
|
||||
$name =~ tr/+/ /;
|
||||
$name =~ s/%([a-fA-F0-9][a-fA-F0-9])/pack("C", hex($1))/eg;
|
||||
$ret->{$name} .= $ret->{$name} ? "\0$value" : $value;
|
||||
}
|
||||
return $ret;
|
||||
}
|
||||
|
||||
package main;
|
||||
Client->EventLoop();
|
||||
|
||||
# Local Variables:
|
||||
# mode: perl
|
||||
# c-basic-indent: 4
|
||||
# indent-tabs-mode: nil
|
||||
# End:
|
||||
6
wcmtools/gearman/t/00-use.t
Executable file
6
wcmtools/gearman/t/00-use.t
Executable file
@@ -0,0 +1,6 @@
|
||||
use strict;
|
||||
use Test::More tests => 3;
|
||||
|
||||
use_ok('Gearman::Util');
|
||||
use_ok('Gearman::Worker');
|
||||
use_ok('Gearman::Client');
|
||||
198
wcmtools/gearman/t/10-all.t
Executable file
198
wcmtools/gearman/t/10-all.t
Executable file
@@ -0,0 +1,198 @@
|
||||
use strict;
|
||||
our $Bin;
|
||||
use FindBin qw( $Bin );
|
||||
use File::Spec;
|
||||
use Gearman::Client;
|
||||
use Storable qw( freeze );
|
||||
use Test::More tests => 20;
|
||||
use IO::Socket::INET;
|
||||
use POSIX qw( :sys_wait_h );
|
||||
|
||||
use constant PORT => 9000;
|
||||
our %Children;
|
||||
|
||||
END { kill_children() }
|
||||
|
||||
start_server(PORT);
|
||||
start_server(PORT + 1);
|
||||
|
||||
## Sleep, wait for servers to start up before connecting workers.
|
||||
wait_for_port(PORT);
|
||||
wait_for_port(PORT + 1);
|
||||
|
||||
## Look for 2 job servers, starting at port number PORT.
|
||||
start_worker(PORT, 2);
|
||||
start_worker(PORT, 2);
|
||||
|
||||
my $client = Gearman::Client->new;
|
||||
isa_ok($client, 'Gearman::Client');
|
||||
$client->job_servers('127.0.0.1:' . PORT, '127.0.0.1:' . (PORT + 1));
|
||||
|
||||
eval { $client->do_task(sum => []) };
|
||||
like($@, qr/scalar or scalarref/, 'do_task does not accept arrayref argument');
|
||||
|
||||
my $out = $client->do_task(sum => freeze([ 3, 5 ]));
|
||||
is($$out, 8, 'do_task returned 8 for sum');
|
||||
|
||||
my $tasks = $client->new_task_set;
|
||||
isa_ok($tasks, 'Gearman::Taskset');
|
||||
my $sum;
|
||||
my $failed = 0;
|
||||
my $completed = 0;
|
||||
my $handle = $tasks->add_task(sum => freeze([ 3, 5 ]), {
|
||||
on_complete => sub { $sum = ${ $_[0] } },
|
||||
on_fail => sub { $failed = 1 }
|
||||
});
|
||||
$tasks->wait;
|
||||
is($sum, 8, 'add_task/wait returned 8 for sum');
|
||||
is($failed, 0, 'on_fail not called on a successful result');
|
||||
|
||||
## Now try a task set with 2 tasks, and make sure they are both completed.
|
||||
$tasks = $client->new_task_set;
|
||||
my @sums;
|
||||
$tasks->add_task(sum => freeze([ 1, 1 ]), {
|
||||
on_complete => sub { $sums[0] = ${ $_[0] } },
|
||||
});
|
||||
$tasks->add_task(sum => freeze([ 2, 2 ]), {
|
||||
on_complete => sub { $sums[1] = ${ $_[0] } },
|
||||
});
|
||||
$tasks->wait;
|
||||
is($sums[0], 2, 'First task completed (sum is 2)');
|
||||
is($sums[1], 4, 'Second task completed (sum is 4)');
|
||||
|
||||
## Test some failure conditions:
|
||||
## Normal failure (worker returns undef or dies within eval).
|
||||
is($client->do_task('fail'), undef, 'Job that failed naturally returned undef');
|
||||
## Worker process exits.
|
||||
is($client->do_task('fail_exit'), undef,
|
||||
'Job that failed via exit returned undef');
|
||||
pid_is_dead(wait());
|
||||
|
||||
## Worker process times out (takes longer than fail_after_idle seconds).
|
||||
TODO: {
|
||||
todo_skip 'fail_after_idle is not yet implemented', 1;
|
||||
is($client->do_task('sleep', 5, { fail_after_idle => 3 }), undef,
|
||||
'Job that timed out after 3 seconds returns failure (fail_after_idle)');
|
||||
}
|
||||
|
||||
## Test retry_count.
|
||||
my $retried = 0;
|
||||
is($client->do_task('fail' => '', {
|
||||
on_retry => sub { $retried++ },
|
||||
retry_count => 3,
|
||||
}), undef, 'Failure response is still failure, even after retrying');
|
||||
is($retried, 3, 'Retried 3 times');
|
||||
|
||||
my $tasks = $client->new_task_set;
|
||||
$completed = 0;
|
||||
$failed = 0;
|
||||
$tasks->add_task(fail => '', {
|
||||
on_complete => sub { $completed = 1 },
|
||||
on_fail => sub { $failed = 1 },
|
||||
});
|
||||
$tasks->wait;
|
||||
is($completed, 0, 'on_complete not called on failed result');
|
||||
is($failed, 1, 'on_fail called on failed result');
|
||||
|
||||
## Test high_priority.
|
||||
## Create a taskset with 4 tasks, and have the 3rd fail.
|
||||
## In on_fail, add a new task with high priority set, and make sure it
|
||||
## gets executed before task 4. To make this reliable, we need to first
|
||||
## kill off all but one of the worker processes.
|
||||
my @worker_pids = grep $Children{$_} eq 'W', keys %Children;
|
||||
kill INT => @worker_pids[1..$#worker_pids];
|
||||
$tasks = $client->new_task_set;
|
||||
$out = '';
|
||||
$tasks->add_task(echo_ws => 1, { on_complete => sub { $out .= ${ $_[0] } } });
|
||||
$tasks->add_task(echo_ws => 2, { on_complete => sub { $out .= ${ $_[0] } } });
|
||||
$tasks->add_task(echo_ws => 'x', {
|
||||
on_fail => sub {
|
||||
$tasks->add_task(echo_ws => 'p', {
|
||||
on_complete => sub { $out .= ${ $_[0] } },
|
||||
high_priority => 1
|
||||
});
|
||||
},
|
||||
});
|
||||
$tasks->add_task(echo_ws => 3, { on_complete => sub { $out .= ${ $_[0] } } });
|
||||
$tasks->add_task(echo_ws => 4, { on_complete => sub { $out .= ${ $_[0] } } });
|
||||
$tasks->add_task(echo_ws => 5, { on_complete => sub { $out .= ${ $_[0] } } });
|
||||
$tasks->add_task(echo_ws => 6, { on_complete => sub { $out .= ${ $_[0] } } });
|
||||
$tasks->wait;
|
||||
like($out, qr/p.+6/, 'High priority tasks executed in priority order.');
|
||||
## We just killed off all but one worker--make sure they get respawned.
|
||||
respawn_children();
|
||||
|
||||
## Test dispatch_background and get_status.
|
||||
my $out;
|
||||
my $handle = $client->dispatch_background(long => undef, {
|
||||
on_complete => sub { $out = ${ $_[0] } },
|
||||
});
|
||||
ok($handle, 'Got a handle back from dispatching background job');
|
||||
my $status = $client->get_status($handle);
|
||||
isa_ok($status, 'Gearman::JobStatus');
|
||||
ok($status->running, 'Job is still running');
|
||||
is($status->percent, .5, 'Job is 50 percent complete');
|
||||
do {
|
||||
sleep 1;
|
||||
$status = $client->get_status($handle);
|
||||
} until $status->percent == 1;
|
||||
|
||||
sub pid_is_dead {
|
||||
my($pid) = @_;
|
||||
return if $pid == -1;
|
||||
my $type = delete $Children{$pid};
|
||||
if ($type eq 'W') {
|
||||
## Right now we can only restart workers.
|
||||
start_worker(PORT, 2);
|
||||
}
|
||||
}
|
||||
|
||||
sub respawn_children {
|
||||
for my $pid (keys %Children) {
|
||||
if (waitpid($pid, WNOHANG) > 0) {
|
||||
pid_is_dead($pid);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sub start_server {
|
||||
my($port) = @_;
|
||||
my $server = File::Spec->catfile($Bin, '..', 'server', 'gearmand');
|
||||
my $pid = start_child([ $server, '-p', $port ]);
|
||||
$Children{$pid} = 'S';
|
||||
}
|
||||
|
||||
sub start_worker {
|
||||
my($port, $num) = @_;
|
||||
my $worker = File::Spec->catfile($Bin, 'worker.pl');
|
||||
my $servers = join ',',
|
||||
map '127.0.0.1:' . (PORT + $_),
|
||||
0..$num-1;
|
||||
my $pid = start_child([ $worker, '-s', $servers ]);
|
||||
$Children{$pid} = 'W';
|
||||
}
|
||||
|
||||
sub start_child {
|
||||
my($cmd) = @_;
|
||||
my $pid = fork();
|
||||
die $! unless defined $pid;
|
||||
unless ($pid) {
|
||||
exec 'perl', '-Iblib/lib', '-Ilib', @$cmd or die $!;
|
||||
}
|
||||
$pid;
|
||||
}
|
||||
|
||||
sub kill_children {
|
||||
kill INT => keys %Children;
|
||||
}
|
||||
|
||||
sub wait_for_port {
|
||||
my($port) = @_;
|
||||
my $start = time;
|
||||
while (1) {
|
||||
my $sock = IO::Socket::INET->new(PeerAddr => "127.0.0.1:$port");
|
||||
return 1 if $sock;
|
||||
select undef, undef, undef, 0.25;
|
||||
die "Timeout waiting for port $port to startup" if time > $start + 5;
|
||||
}
|
||||
}
|
||||
41
wcmtools/gearman/t/worker.pl
Executable file
41
wcmtools/gearman/t/worker.pl
Executable file
@@ -0,0 +1,41 @@
|
||||
#!/usr/bin/perl -w
|
||||
use strict;
|
||||
|
||||
use Gearman::Worker;
|
||||
use Storable qw( thaw );
|
||||
use Getopt::Long qw( GetOptions );
|
||||
|
||||
GetOptions(
|
||||
's|servers=s', \my($servers),
|
||||
);
|
||||
die "usage: $0 -s <servers>" unless $servers;
|
||||
my @servers = split /,/, $servers;
|
||||
|
||||
my $worker = Gearman::Worker->new;
|
||||
$worker->job_servers(@servers);
|
||||
|
||||
$worker->register_function(sum => sub {
|
||||
my $sum = 0;
|
||||
$sum += $_ for @{ thaw($_[0]->arg) };
|
||||
$sum;
|
||||
});
|
||||
|
||||
$worker->register_function(fail => sub { undef });
|
||||
$worker->register_function(fail_exit => sub { exit 255 });
|
||||
|
||||
$worker->register_function(sleep => sub { sleep $_[0]->arg });
|
||||
|
||||
$worker->register_function(echo_ws => sub {
|
||||
select undef, undef, undef, 0.25;
|
||||
$_[0]->arg eq 'x' ? undef : $_[0]->arg;
|
||||
});
|
||||
|
||||
$worker->register_function(long => sub {
|
||||
my($job) = @_;
|
||||
$job->set_status(50, 100);
|
||||
sleep 2;
|
||||
$job->set_status(100, 100);
|
||||
sleep 2;
|
||||
});
|
||||
|
||||
$worker->work while 1;
|
||||
55
wcmtools/lib/Apache/CompressClientFixup.pm
Executable file
55
wcmtools/lib/Apache/CompressClientFixup.pm
Executable file
@@ -0,0 +1,55 @@
|
||||
# modified from:
|
||||
# http://devl4.outlook.net/devdoc/Dynagzip/ContentCompressionClients.html
|
||||
|
||||
package Apache::CompressClientFixup;
|
||||
|
||||
use 5.004;
|
||||
use strict;
|
||||
use Apache::Constants qw(OK DECLINED);
|
||||
use Apache::Log();
|
||||
use Apache::URI();
|
||||
|
||||
use vars qw($VERSION);
|
||||
$VERSION = "0.01";
|
||||
|
||||
sub handler {
|
||||
my $r = shift;
|
||||
return DECLINED unless $r->header_in('Accept-Encoding') =~ /gzip/io;
|
||||
|
||||
my $no_gzip = sub {
|
||||
$r->headers_in->unset('Accept-Encoding');
|
||||
return OK;
|
||||
};
|
||||
|
||||
my $ua = $r->header_in('User-Agent');
|
||||
|
||||
if ($r->protocol =~ /http\/1\.0/io) {
|
||||
# it is not supposed to be compressed:
|
||||
# (but if request comes via mod_proxy, it'll be 1.1 regardless of what it actually was)
|
||||
return $no_gzip->();
|
||||
}
|
||||
if ($ua =~ /MSIE 4\./o) {
|
||||
return $no_gzip->() if
|
||||
$r->method =~ /POST/io ||
|
||||
$r->header_in('Range') ||
|
||||
length($r->uri) > 245;
|
||||
}
|
||||
if ($ua =~ /MSIE 6\.0/o) {
|
||||
return $no_gzip->() if $r->parsed_uri->scheme =~ /https/io;
|
||||
}
|
||||
|
||||
if ($r->header_in('Via') =~ /^1\.1\s/o || # MS Proxy 2.0
|
||||
$r->header_in('Via') =~ /^Squid\//o ||
|
||||
$ua =~ /Galeon\)/o ||
|
||||
$ua =~ /Mozilla\/4\.7[89]/o ||
|
||||
$ua =~ /Opera 3\.5/o ||
|
||||
$ua =~ /SkipStone\)/o) {
|
||||
return $no_gzip->();
|
||||
}
|
||||
|
||||
if (($ua =~ /Mozilla\/4\.0/o) and (!($ua =~ /compatible/io))) {
|
||||
return $no_gzip->();
|
||||
}
|
||||
}
|
||||
|
||||
1;
|
||||
392
wcmtools/lib/DBI/Role.pm
Executable file
392
wcmtools/lib/DBI/Role.pm
Executable file
@@ -0,0 +1,392 @@
|
||||
package DBI::Role;
|
||||
|
||||
use 5.006;
|
||||
use strict;
|
||||
use warnings;
|
||||
BEGIN {
|
||||
$DBI::Role::HAVE_HIRES = eval "use Time::HiRes (); 1;";
|
||||
}
|
||||
|
||||
our $VERSION = '1.00';
|
||||
|
||||
# $self contains:
|
||||
#
|
||||
# DBINFO --- hashref. keys = scalar roles, one of which must be 'master'.
|
||||
# values contain DSN info, and 'role' => { 'role' => weight, 'role2' => weight }
|
||||
#
|
||||
# DEFAULT_DB -- scalar string. default db name if none in DSN hashref in DBINFO
|
||||
#
|
||||
# DBREQCACHE -- cleared by clear_req_cache() on each request.
|
||||
# fdsn -> dbh
|
||||
#
|
||||
# DBCACHE -- role -> fdsn, or
|
||||
# fdsn -> dbh
|
||||
#
|
||||
# DBCACHE_UNTIL -- role -> unixtime
|
||||
#
|
||||
# DB_USED_AT -- fdsn -> unixtime
|
||||
#
|
||||
# DB_DEAD_UNTIL -- fdsn -> unixtime
|
||||
#
|
||||
# TIME_CHECK -- if true, time between localhost and db are checked every TIME_CHECK
|
||||
# seconds
|
||||
#
|
||||
# TIME_REPORT -- coderef to pass dsn and dbtime to after a TIME_CHECK occurence
|
||||
|
||||
sub new
|
||||
{
|
||||
my ($class, $args) = @_;
|
||||
my $self = {};
|
||||
$self->{'DBINFO'} = $args->{'sources'};
|
||||
$self->{'TIMEOUT'} = $args->{'timeout'};
|
||||
$self->{'DEFAULT_DB'} = $args->{'default_db'};
|
||||
$self->{'TIME_CHECK'} = $args->{'time_check'};
|
||||
$self->{'TIME_LASTCHECK'} = {}; # dsn -> last check time
|
||||
$self->{'TIME_REPORT'} = $args->{'time_report'};
|
||||
bless $self, ref $class || $class;
|
||||
return $self;
|
||||
}
|
||||
|
||||
sub set_sources
|
||||
{
|
||||
my ($self, $newval) = @_;
|
||||
$self->{'DBINFO'} = $newval;
|
||||
$self;
|
||||
}
|
||||
|
||||
sub clear_req_cache
|
||||
{
|
||||
my $self = shift;
|
||||
$self->{'DBREQCACHE'} = {};
|
||||
}
|
||||
|
||||
sub disconnect_all
|
||||
{
|
||||
my ($self, $opts) = @_;
|
||||
my %except;
|
||||
|
||||
if ($opts && $opts->{except} &&
|
||||
ref $opts->{except} eq 'ARRAY') {
|
||||
$except{$_} = 1 foreach @{$opts->{except}};
|
||||
}
|
||||
|
||||
foreach my $cache (qw(DBREQCACHE DBCACHE)) {
|
||||
next unless ref $self->{$cache} eq "HASH";
|
||||
foreach my $key (keys %{$self->{$cache}}) {
|
||||
next if $except{$key};
|
||||
my $v = $self->{$cache}->{$key};
|
||||
next unless ref $v eq "DBI::db";
|
||||
$v->disconnect;
|
||||
delete $self->{$cache}->{$key};
|
||||
}
|
||||
}
|
||||
$self->{'DBCACHE'} = {};
|
||||
$self->{'DBREQCACHE'} = {};
|
||||
}
|
||||
|
||||
sub same_cached_handle
|
||||
{
|
||||
my $self = shift;
|
||||
my ($role_a, $role_b) = @_;
|
||||
return
|
||||
defined $self->{'DBCACHE'}->{$role_a} &&
|
||||
defined $self->{'DBCACHE'}->{$role_b} &&
|
||||
$self->{'DBCACHE'}->{$role_a} eq $self->{'DBCACHE'}->{$role_b};
|
||||
}
|
||||
|
||||
sub flush_cache
|
||||
{
|
||||
my $self = shift;
|
||||
foreach (keys %{$self->{'DBCACHE'}}) {
|
||||
my $v = $self->{'DBCACHE'}->{$_};
|
||||
next unless ref $v;
|
||||
$v->disconnect;
|
||||
}
|
||||
$self->{'DBCACHE'} = {};
|
||||
$self->{'DBREQCACHE'} = {};
|
||||
}
|
||||
|
||||
# old interface. does nothing now.
|
||||
sub trigger_weight_reload
|
||||
{
|
||||
my $self = shift;
|
||||
return $self;
|
||||
}
|
||||
|
||||
sub use_diff_db
|
||||
{
|
||||
my $self = shift;
|
||||
my ($role1, $role2) = @_;
|
||||
|
||||
return 0 if $role1 eq $role2;
|
||||
|
||||
# this is implied: (makes logic below more readable by forcing it)
|
||||
$self->{'DBINFO'}->{'master'}->{'role'}->{'master'} = 1;
|
||||
|
||||
foreach (keys %{$self->{'DBINFO'}}) {
|
||||
next if /^_/;
|
||||
next unless ref $self->{'DBINFO'}->{$_} eq "HASH";
|
||||
if ($self->{'DBINFO'}->{$_}->{'role'}->{$role1} &&
|
||||
$self->{'DBINFO'}->{$_}->{'role'}->{$role2}) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub get_dbh
|
||||
{
|
||||
my $self = shift;
|
||||
my $opts = ref $_[0] eq "HASH" ? shift : {};
|
||||
|
||||
my @roles = @_;
|
||||
my $role = shift @roles;
|
||||
return undef unless $role;
|
||||
|
||||
my $now = time();
|
||||
|
||||
# if 'nocache' flag is passed, clear caches now so we won't return
|
||||
# a cached database handle later
|
||||
$self->clear_req_cache if $opts->{'nocache'};
|
||||
|
||||
# otherwise, see if we have a role -> full DSN mapping already
|
||||
my ($fdsn, $dbh);
|
||||
if ($role eq "master") {
|
||||
$fdsn = make_dbh_fdsn($self, $self->{'DBINFO'}->{'master'});
|
||||
} else {
|
||||
if ($self->{'DBCACHE'}->{$role} && ! $opts->{'unshared'}) {
|
||||
$fdsn = $self->{'DBCACHE'}->{$role};
|
||||
if ($now > $self->{'DBCACHE_UNTIL'}->{$role}) {
|
||||
# this role -> DSN mapping is too old. invalidate,
|
||||
# and while we're at it, clean up any connections we have
|
||||
# that are too idle.
|
||||
undef $fdsn;
|
||||
|
||||
foreach (keys %{$self->{'DB_USED_AT'}}) {
|
||||
next if $self->{'DB_USED_AT'}->{$_} > $now - 60;
|
||||
delete $self->{'DB_USED_AT'}->{$_};
|
||||
delete $self->{'DBCACHE'}->{$_};
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ($fdsn) {
|
||||
$dbh = get_dbh_conn($self, $fdsn, $role);
|
||||
return $dbh if $dbh;
|
||||
delete $self->{'DBCACHE'}->{$role}; # guess it was bogus
|
||||
}
|
||||
return undef if $role eq "master"; # no hope now
|
||||
|
||||
# time to randomly weightedly select one.
|
||||
my @applicable;
|
||||
my $total_weight;
|
||||
foreach (keys %{$self->{'DBINFO'}}) {
|
||||
next if /^_/;
|
||||
next unless ref $self->{'DBINFO'}->{$_} eq "HASH";
|
||||
my $weight = $self->{'DBINFO'}->{$_}->{'role'}->{$role};
|
||||
next unless $weight;
|
||||
push @applicable, [ $self->{'DBINFO'}->{$_}, $weight ];
|
||||
$total_weight += $weight;
|
||||
}
|
||||
|
||||
while (@applicable) {
|
||||
my $rand = rand($total_weight);
|
||||
my ($i, $t) = (0, 0);
|
||||
for (; $i<@applicable; $i++) {
|
||||
$t += $applicable[$i]->[1];
|
||||
last if $t > $rand;
|
||||
}
|
||||
my $fdsn = make_dbh_fdsn($self, $applicable[$i]->[0]);
|
||||
$dbh = get_dbh_conn($self, $opts, $fdsn);
|
||||
if ($dbh) {
|
||||
$self->{'DBCACHE'}->{$role} = $fdsn;
|
||||
$self->{'DBCACHE_UNTIL'}->{$role} = $now + 5 + int(rand(10));
|
||||
return $dbh;
|
||||
}
|
||||
|
||||
# otherwise, discard that one.
|
||||
$total_weight -= $applicable[$i]->[1];
|
||||
splice(@applicable, $i, 1);
|
||||
}
|
||||
|
||||
# try others
|
||||
return get_dbh($self, $opts, @roles);
|
||||
}
|
||||
|
||||
sub make_dbh_fdsn
|
||||
{
|
||||
my $self = shift;
|
||||
my $db = shift; # hashref with DSN info
|
||||
return $db->{'_fdsn'} if $db->{'_fdsn'}; # already made?
|
||||
|
||||
my $fdsn = "DBI:mysql"; # join("|",$dsn,$user,$pass) (because no refs as hash keys)
|
||||
$db->{'dbname'} ||= $self->{'DEFAULT_DB'} if $self->{'DEFAULT_DB'};
|
||||
$fdsn .= ":$db->{'dbname'}";
|
||||
$fdsn .= ";host=$db->{'host'}" if $db->{'host'};
|
||||
$fdsn .= ";port=$db->{'port'}" if $db->{'port'};
|
||||
$fdsn .= ";mysql_socket=$db->{'sock'}" if $db->{'sock'};
|
||||
$fdsn .= "|$db->{'user'}|$db->{'pass'}";
|
||||
|
||||
$db->{'_fdsn'} = $fdsn;
|
||||
return $fdsn;
|
||||
}
|
||||
|
||||
sub get_dbh_conn
|
||||
{
|
||||
my $self = shift;
|
||||
my $opts = ref $_[0] eq "HASH" ? shift : {};
|
||||
my $fdsn = shift;
|
||||
my $role = shift; # optional.
|
||||
my $now = time();
|
||||
|
||||
my $retdb = sub {
|
||||
my $db = shift;
|
||||
$self->{'DBREQCACHE'}->{$fdsn} = $db;
|
||||
$self->{'DB_USED_AT'}->{$fdsn} = $now;
|
||||
return $db;
|
||||
};
|
||||
|
||||
# have we already created or verified a handle this request for this DSN?
|
||||
return $retdb->($self->{'DBREQCACHE'}->{$fdsn})
|
||||
if $self->{'DBREQCACHE'}->{$fdsn} && ! $opts->{'unshared'};
|
||||
|
||||
# check to see if we recently tried to connect to that dead server
|
||||
return undef if $self->{'DB_DEAD_UNTIL'}->{$fdsn} && $now < $self->{'DB_DEAD_UNTIL'}->{$fdsn};
|
||||
|
||||
# if not, we'll try to find one we used sometime in this process lifetime
|
||||
my $dbh = $self->{'DBCACHE'}->{$fdsn};
|
||||
|
||||
# if it exists, verify it's still alive and return it. (but not
|
||||
# if we're wanting an unshared connection)
|
||||
if ($dbh && ! $opts->{'unshared'}) {
|
||||
return $retdb->($dbh) unless connection_bad($dbh, $opts);
|
||||
undef $dbh;
|
||||
undef $self->{'DBCACHE'}->{$fdsn};
|
||||
}
|
||||
|
||||
# time to make one!
|
||||
my ($dsn, $user, $pass) = split(/\|/, $fdsn);
|
||||
my $timeout = $self->{'TIMEOUT'} || 2;
|
||||
$dsn .= ";mysql_connect_timeout=$timeout";
|
||||
|
||||
my $loop = 1;
|
||||
my $tries = $DBI::Role::HAVE_HIRES ? 8 : 2;
|
||||
while ($loop) {
|
||||
$loop = 0;
|
||||
$dbh = DBI->connect($dsn, $user, $pass, {
|
||||
PrintError => 0,
|
||||
AutoCommit => 1,
|
||||
});
|
||||
|
||||
# if max connections, try again shortly.
|
||||
if (! $dbh && $DBI::err == 1040 && $tries) {
|
||||
$tries--;
|
||||
$loop = 1;
|
||||
if ($DBI::Role::HAVE_HIRES) {
|
||||
Time::HiRes::usleep(250_000);
|
||||
} else {
|
||||
sleep 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
my $DBI_err = $DBI::err || 0;
|
||||
|
||||
# check replication/busy processes... see if we should not use
|
||||
# this one
|
||||
undef $dbh if connection_bad($dbh, $opts);
|
||||
|
||||
# if this is an unshared connection, we don't want to put it
|
||||
# in the cache for somebody else to use later. (which happens below)
|
||||
return $dbh if $opts->{'unshared'};
|
||||
|
||||
# mark server as dead if dead. won't try to reconnect again for 5 seconds.
|
||||
if ($dbh) {
|
||||
$self->{'DB_USED_AT'}->{$fdsn} = $now;
|
||||
|
||||
if ($self->{'TIME_CHECK'} && ref $self->{'TIME_REPORT'} eq "CODE") {
|
||||
my $now = time();
|
||||
$self->{'TIME_LASTCHECK'}->{$dsn} ||= 0; # avoid warnings
|
||||
if ($self->{'TIME_LASTCHECK'}->{$dsn} < $now - $self->{'TIME_CHECK'}) {
|
||||
$self->{'TIME_LASTCHECK'}->{$dsn} = $now;
|
||||
my $db_time = $dbh->selectrow_array("SELECT UNIX_TIMESTAMP()");
|
||||
$self->{'TIME_REPORT'}->($dsn, $db_time, $now);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
# mark the database as dead for a bit, unless it was just because of max connections
|
||||
$self->{'DB_DEAD_UNTIL'}->{$fdsn} = $now + 5
|
||||
unless $DBI_err == 1040;
|
||||
}
|
||||
|
||||
return $self->{'DBREQCACHE'}->{$fdsn} = $self->{'DBCACHE'}->{$fdsn} = $dbh;
|
||||
}
|
||||
|
||||
sub connection_bad {
|
||||
my ($dbh, $opts) = @_;
|
||||
|
||||
return 1 unless $dbh;
|
||||
|
||||
my $ss = eval {
|
||||
$dbh->selectrow_hashref("SHOW SLAVE STATUS");
|
||||
};
|
||||
|
||||
# if there was an error, and it wasn't a permission problem (1227)
|
||||
# then treat this connection as bogus
|
||||
if ($dbh->err && $dbh->err != 1227) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
# connection is good if $ss is undef (not a slave)
|
||||
return 0 unless $ss;
|
||||
|
||||
# otherwise, it's okay if not MySQL 4
|
||||
return 0 if ! $ss->{'Master_Log_File'} || ! $ss->{'Relay_Master_Log_File'};
|
||||
|
||||
# all good if within 100 k
|
||||
if ($opts->{'max_repl_lag'}) {
|
||||
return 0 if
|
||||
$ss->{'Master_Log_File'} eq $ss->{'Relay_Master_Log_File'} &&
|
||||
($ss->{'Read_Master_Log_Pos'} - $ss->{'Exec_master_log_pos'}) < $opts->{'max_repl_lag'};
|
||||
|
||||
# guess we're behind
|
||||
return 1;
|
||||
} else {
|
||||
# default to assuming it's good
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
1;
|
||||
__END__
|
||||
|
||||
=head1 NAME
|
||||
|
||||
DBI::Role - Get DBI cached handles by role, with weighting & failover.
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
use DBI::Role;
|
||||
my $DBIRole = new DBI::Role {
|
||||
'sources' => \%DBINFO,
|
||||
'default_db' => "somedbname", # opt.
|
||||
};
|
||||
my $dbh = $DBIRole->get_dbh("master");
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
To be written.
|
||||
|
||||
=head2 EXPORT
|
||||
|
||||
None by default.
|
||||
|
||||
=head1 AUTHOR
|
||||
|
||||
Brad Fitzparick, E<lt>brad@danga.comE<gt>
|
||||
|
||||
=head1 SEE ALSO
|
||||
|
||||
L<DBI>.
|
||||
|
||||
252
wcmtools/lib/DBIx/StateKeeper.pm
Executable file
252
wcmtools/lib/DBIx/StateKeeper.pm
Executable file
@@ -0,0 +1,252 @@
|
||||
# A StateTracker has a real DBI $dbh handle, and knows
|
||||
# what the active database is (for use with MySQL)
|
||||
#
|
||||
# A StateKeeper (below) mimics the $dbh interface (so it
|
||||
# can be used transparently instead of a real $dbh) and
|
||||
# has a StateTracker and knows what database it wants to
|
||||
# use. If the StateKeeper is ever invoked (->do(), ->prepare(),
|
||||
# or whatever $dbh can do), then it checks its Tracker and
|
||||
# changes the Tracker's database if it doesn't match.
|
||||
#
|
||||
# The point is to connect to a host that has multiple
|
||||
# databases, but only use one connection, and make the
|
||||
# client code oblivious to the fact one connection is
|
||||
# being shared and there are multiple databases.
|
||||
#
|
||||
# Backing up, the point is to get more concurrency out
|
||||
# out the ultra-fast MyISAM table handler by separating
|
||||
# users onto separate databases on the same machine
|
||||
# and across different replication clusters. We could use
|
||||
# InnoDB, which is highly concurrent, but it's pretty slow.
|
||||
# Besides, we hardly ever hit the database with memcache.
|
||||
# The common case for us at the moment is doing 1 or 2
|
||||
# simple queries on 10+ different databases, most of which
|
||||
# are on the same couple hosts. It's a waste to use 10
|
||||
# db connections. The MySQL support people will say
|
||||
# to just jack up max_connections, but we want to limit
|
||||
# the max running threads (and their associated memory).
|
||||
# We keep asking MySQL people for a distinction between
|
||||
# threads and connections, but it's lower on their priority
|
||||
# list. This is our temporary hack.
|
||||
#
|
||||
# UPDATE: Oct-16-2003, it was announced by a MySQL
|
||||
# developer that MySQL 5.0 will have thread vs. connection
|
||||
# context separation. See:
|
||||
# http://krow.livejournal.com/247835.html
|
||||
#
|
||||
# Please, do not use this in other code unless you know
|
||||
# what you're doing.
|
||||
#
|
||||
# -- Brad Fitzpatrick <brad@danga.com>
|
||||
#
|
||||
|
||||
package DBIx::StateTracker;
|
||||
|
||||
use strict;
|
||||
|
||||
# if set externally, EXTRA_PARANOID will validate the
|
||||
# current database before any query. slow, but useful
|
||||
# to make sure nobody is messing with the StateTracker's
|
||||
# beside itself.
|
||||
use vars qw($EXTRA_PARANOID);
|
||||
|
||||
our %dbs_tracked; # $dbh -> 1 (if being tracked)
|
||||
|
||||
sub new {
|
||||
my ($class, $dbh, $init_db) = @_;
|
||||
return undef unless $dbh;
|
||||
my $bless = ref $class || $class;
|
||||
|
||||
my $maker;
|
||||
if (ref $dbh eq "CODE") {
|
||||
$maker = $dbh;
|
||||
$dbh = undef;
|
||||
}
|
||||
|
||||
my $self = {
|
||||
'dbh' => $dbh,
|
||||
'database' => $init_db,
|
||||
'maker' => $maker,
|
||||
};
|
||||
bless $self, $bless;
|
||||
|
||||
$self->reconnect() unless $self->{dbh};
|
||||
|
||||
return $self;
|
||||
}
|
||||
|
||||
sub reconnect {
|
||||
my $self = shift;
|
||||
die "DBIx::StateTracker: no db connector code available\n"
|
||||
unless ref $self->{maker} eq "CODE";
|
||||
|
||||
# in case there was an old handle
|
||||
delete $dbs_tracked{$self->{dbh}};
|
||||
|
||||
my $dbh = $self->{maker}->();
|
||||
my $db;
|
||||
die "DBIx::StateTracker: could not reconnect to database\n"
|
||||
unless $dbh;
|
||||
|
||||
$db = $dbh->selectrow_array("SELECT DATABASE()");
|
||||
die "DBIx::StateTracker: error checking current database: " . $dbh->errstr . "\n"
|
||||
if $dbh->err;
|
||||
|
||||
if ($dbs_tracked{$dbh}++) {
|
||||
die "DBIx::StateTracker: database $dbh already being tracked. ".
|
||||
"Can't have two active trackers.";
|
||||
}
|
||||
|
||||
$self->{dbh} = $dbh;
|
||||
$self->{database} = $db;
|
||||
return $self;
|
||||
}
|
||||
|
||||
sub disconnect {
|
||||
my $self = shift;
|
||||
delete $dbs_tracked{$self->{dbh}};
|
||||
$self->{dbh}->disconnect if $self->{dbh};
|
||||
undef $self->{dbh};
|
||||
undef $self->{database};
|
||||
}
|
||||
|
||||
sub DESTROY {
|
||||
my $self = shift;
|
||||
delete $dbs_tracked{$self->{'dbh'}};
|
||||
}
|
||||
|
||||
sub get_database {
|
||||
my $self = shift;
|
||||
return $self->{'database'};
|
||||
}
|
||||
|
||||
sub set_database {
|
||||
my ($self, $db, $second_try) = @_; # db = desired database
|
||||
|
||||
if ($self->{database} ne $db) {
|
||||
die "Invalid db name" if $db =~ /\W/;
|
||||
my $rc = $self->{'dbh'}->do("USE $db");
|
||||
if (! $rc) {
|
||||
return 0 if $second_try;
|
||||
$self->reconnect();
|
||||
return $self->set_database($db, 1);
|
||||
}
|
||||
$self->{'database'} = $db;
|
||||
}
|
||||
|
||||
elsif ($EXTRA_PARANOID) {
|
||||
my $actual = $self->{'dbh'}->selectrow_array("SELECT DATABASE()");
|
||||
if (! defined $actual) {
|
||||
my $err = $self->{dbh}->err;
|
||||
if (! $second_try && ($err == 2006 || $err == 2013)) {
|
||||
# server gone away, or lost connection (timeout?)
|
||||
$self->reconnect();
|
||||
return $self->set_database($db, 1);
|
||||
} else {
|
||||
$@ = "DBIx::StateTracker: error discovering current database: " .
|
||||
$self->{dbh}->errstr;
|
||||
return 0;
|
||||
}
|
||||
} elsif ($actual ne $db) {
|
||||
$@ = "Aborting without db access. Somebody is messing with the DBIx::StateTracker ".
|
||||
"dbh that's not us. Expecting database $db, but was actually $actual.";
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub do_method {
|
||||
my ($self, $desired_db, $method, @args) = @_;
|
||||
unless ($method eq "quote") {
|
||||
die "DBIx::StateKeeper: unable to switch to database: $desired_db ($@)" unless
|
||||
$self->set_database($desired_db);
|
||||
}
|
||||
|
||||
my $dbh = $self->{dbh};
|
||||
#print "wantarray: ", (wantarray() ? 1 : 0), "\n";
|
||||
return $dbh->$method(@args);
|
||||
}
|
||||
|
||||
sub get_attribute {
|
||||
my ($self, $desired_db, $key) = @_;
|
||||
die "DBIx::StateKeeper: unable to switch to database: $desired_db" unless
|
||||
$self->set_database($desired_db);
|
||||
|
||||
my $dbh = $self->{dbh};
|
||||
return $dbh->{$key};
|
||||
}
|
||||
|
||||
sub set_attribute {
|
||||
my ($self, $desired_db, $key, $val) = @_;
|
||||
die "DBIx::StateKeeper: unable to switch to database: $desired_db" unless
|
||||
$self->set_database($desired_db);
|
||||
|
||||
my $dbh = $self->{dbh};
|
||||
$dbh->{$key} = $val;
|
||||
}
|
||||
|
||||
package DBIx::StateKeeper;
|
||||
|
||||
use strict;
|
||||
use vars qw($AUTOLOAD);
|
||||
|
||||
sub new {
|
||||
my ($class, $tracker, $db) = @_;
|
||||
my $bless = ref $class || $class;
|
||||
my $self = {}; # always empty. real state is stored in tied node.
|
||||
tie %$self, $bless, $tracker, $db;
|
||||
bless $self, $bless;
|
||||
return $self;
|
||||
}
|
||||
|
||||
sub STORE {
|
||||
my ($self, $key, $value) = @_;
|
||||
die "Setting attributes on DBIx::StateKeeper handles not yet supported. Use a real connection.";
|
||||
return $self->{_tracker}->set_attribute($self->{_db}, $key, $value);
|
||||
}
|
||||
|
||||
sub DELETE { die "DELETE not implemented" }
|
||||
sub CLEAR { die "CLEAR not implemented" }
|
||||
sub EXISTS { die "EXISTS not implemented" }
|
||||
sub FIRSTKEY { return undef; }
|
||||
sub NEXTKEY { return undef; }
|
||||
sub DESTROY { die "DELETE not implemented" }
|
||||
sub UNTIE { }
|
||||
|
||||
sub set_database {
|
||||
my $self = shift;
|
||||
return $self->{_tracker}->set_database($self->{_db});
|
||||
}
|
||||
|
||||
sub FETCH {
|
||||
my ($self, $key) = @_;
|
||||
|
||||
# keys starting with underscore are our own. otherwise
|
||||
# we forward them on to the real $dbh.
|
||||
if ($key =~ m!^\_!) {
|
||||
my $ret = $self->{$key};
|
||||
return $ret;
|
||||
}
|
||||
|
||||
return $self->{_tracker}->get_attribute($self->{_db}, $key);
|
||||
}
|
||||
|
||||
sub TIEHASH {
|
||||
my ($class, $tracker, $db) = @_;
|
||||
my $node = {
|
||||
'_tracker' => $tracker,
|
||||
'_db' => $db,
|
||||
};
|
||||
return bless $node, $class;
|
||||
}
|
||||
|
||||
sub AUTOLOAD {
|
||||
my $self = shift;
|
||||
my $method = $AUTOLOAD;
|
||||
$method =~ s/.+:://;
|
||||
return $self->{_tracker}->do_method($self->{_db}, $method, @_);
|
||||
}
|
||||
|
||||
1;
|
||||
463
wcmtools/lib/Danga-Daemon/Daemon.pm
Executable file
463
wcmtools/lib/Danga-Daemon/Daemon.pm
Executable file
@@ -0,0 +1,463 @@
|
||||
#!/usr/bin/perl -w
|
||||
#
|
||||
#--------------------------------------------------
|
||||
|
||||
=head1 Description
|
||||
|
||||
This class will make a proper daemon out of an arbitrary subroutine.
|
||||
Your script will automatically inherit daemon command line flags, that
|
||||
can intermix with your existing flags. (As long as you use Getopt!)
|
||||
|
||||
=head1 Examples
|
||||
|
||||
=head2 Basic usage
|
||||
|
||||
use Danga::Daemon;
|
||||
|
||||
Danga::Daemon::daemonize( \&worker );
|
||||
|
||||
sub worker
|
||||
{
|
||||
# do something
|
||||
}
|
||||
|
||||
=head2 Advanced usage
|
||||
|
||||
# This spawns a listener on localhost:2000, adds a command to the CLUI,
|
||||
# and does the daemon work as user 'nobody' 4 times a second:
|
||||
|
||||
Danga::Daemon::daemonize(
|
||||
\&worker,
|
||||
{
|
||||
interval => .25,
|
||||
shedprivs => 'nobody',
|
||||
|
||||
listenport => 2000,
|
||||
bindaddr => '127.0.0.1',
|
||||
listencode => \&cmd,
|
||||
}
|
||||
);
|
||||
|
||||
sub cmd
|
||||
{
|
||||
my ( $line, $s, $c, $codeloop, $codeopts ) = @_;
|
||||
|
||||
if ($line =~ /newcommand/i) {
|
||||
# do something
|
||||
print $c ".\nOK\n";
|
||||
return 1;
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
=head1 Command line switches
|
||||
|
||||
=over 4
|
||||
|
||||
=item --foreground
|
||||
|
||||
Run the script without daemon code, and print output to screen.
|
||||
|
||||
=item --stop
|
||||
|
||||
Stop an existing daemon.
|
||||
|
||||
=item --pidfile
|
||||
|
||||
Store the pidfile in a location other than /var/run. Useful if you are
|
||||
running the script as a non-root user. Use the string 'none' to disable
|
||||
pidfiles entirely.
|
||||
|
||||
=back
|
||||
|
||||
=head1 Options list
|
||||
|
||||
Options are passed as the second argument to daemonize(), in the form of
|
||||
a hashref.
|
||||
|
||||
=over 4
|
||||
|
||||
=item args [ array of args ]
|
||||
|
||||
A normal list of arguments that will be passed to the worker subroutine.
|
||||
|
||||
=item bindaddr [ ip address ]
|
||||
|
||||
If using a listener, bind to a specific IP. Not defining this will let
|
||||
the listener bind to all IPs.
|
||||
|
||||
=item chdir [ directory ]
|
||||
|
||||
Tell the worker where to 'live'. Listener also, if one exists.
|
||||
Defaults to '/'.
|
||||
|
||||
=item interval [ number in fractional seconds ]
|
||||
|
||||
Default eventloop time is 1 minute. Set this to override, in seconds,
|
||||
or fractions thereof.
|
||||
|
||||
=item listenport [ port ]
|
||||
|
||||
The port the listener will bind to. Setting this option is also the
|
||||
switch to enable a listener.
|
||||
|
||||
=item listencode [ coderef ]
|
||||
|
||||
An optional coderef that can add to the existing default command line
|
||||
options. See the above example.
|
||||
|
||||
=item override_loop [ boolean ]
|
||||
|
||||
Your daemon may need to base its looping on something other than a time
|
||||
value. Setting this puts the looping burden on the caller. Note in
|
||||
this instance, the 'interval' option has no meaning.
|
||||
|
||||
=item shedprivs [ system username ]
|
||||
|
||||
If starting up as root, automatically change process ownership after
|
||||
daemonizing.
|
||||
|
||||
=item shutdowncode [ coderef ]
|
||||
|
||||
If your child is doing special processing and needs to know when it's
|
||||
being killed off, provide a coderef here. It will be called right before
|
||||
the worker process exits.
|
||||
|
||||
=back
|
||||
|
||||
=head1 Default telnet commands
|
||||
|
||||
These commands only apply if you use the 'listenport' option.
|
||||
|
||||
=over 4
|
||||
|
||||
=item pids
|
||||
|
||||
Report the pids in use. First pid is the listener. Any remaining are
|
||||
workers.
|
||||
|
||||
=item ping
|
||||
|
||||
Returns the string 'pong' along with the daemon name.
|
||||
|
||||
=item reload
|
||||
|
||||
Kill off any workers, and reload them. An easy way to restart a worker
|
||||
if library code changes.
|
||||
|
||||
=item stop
|
||||
|
||||
Shutdown the entire daemon.
|
||||
|
||||
=back
|
||||
|
||||
=cut
|
||||
|
||||
#--------------------------------------------------
|
||||
package Danga::Daemon;
|
||||
|
||||
use strict;
|
||||
use Carp qw/ confess /;
|
||||
use Getopt::Long qw/ :config pass_through /;
|
||||
use POSIX 'setsid';
|
||||
use FindBin qw/ $RealBin $RealScript /;
|
||||
|
||||
use vars qw/ $busy $stop $opt $pidfile $pid $shutdowncode /;
|
||||
|
||||
# Make daemonize() and debug() available to the caller
|
||||
*main::debug = \&Danga::Daemon::debug;
|
||||
*main::daemonize = \&Danga::Daemon::daemonize;
|
||||
|
||||
# Insert global daemon command line opts before script specific ones,
|
||||
# With the addition of Getopt::Long's 'config pass_through', this
|
||||
# essentially merges the command line options.
|
||||
BEGIN {
|
||||
$opt = {};
|
||||
GetOptions $opt, qw/ stop foreground pidfile=s /;
|
||||
}
|
||||
|
||||
# put arbitrary code into a loop after forking into the background.
|
||||
sub daemonize
|
||||
{
|
||||
my $codeloop = shift || confess "No coderef loop supplied.\n";
|
||||
confess "Invalid coderef\n" unless ref $codeloop eq 'CODE';
|
||||
my $codeopts = shift || {};
|
||||
|
||||
$SIG{$_} = \&stop_parent foreach qw/ INT TERM /;
|
||||
$SIG{CHLD} = 'IGNORE';
|
||||
$pidfile = $opt->{'pidfile'} || "/var/run/$RealScript.pid";
|
||||
$| = 1;
|
||||
|
||||
# setup shutdown ref if necessary
|
||||
if ( $codeopts->{'shutdowncode'} && ref $codeopts->{'shutdowncode'} eq 'CODE' ) {
|
||||
$shutdowncode = $codeopts->{'shutdowncode'};
|
||||
}
|
||||
|
||||
# shutdown existing daemon?
|
||||
if ( $opt->{'stop'} ) {
|
||||
if ( -e $pidfile ) {
|
||||
open( PID, $pidfile );
|
||||
chomp( $pid = <PID> );
|
||||
close PID;
|
||||
}
|
||||
else {
|
||||
confess "No pidfile, unable to stop daemon.\n";
|
||||
}
|
||||
|
||||
if ( kill 15, $pid ) {
|
||||
print "Shutting down daemon.";
|
||||
unlink $pidfile;
|
||||
}
|
||||
else {
|
||||
print "Daemon not running?\n";
|
||||
exit 0;
|
||||
}
|
||||
|
||||
# display something while we're waiting for a
|
||||
# busy daemon to shutdown
|
||||
while ( kill 0, $pid ) { sleep 1 && print '.'; }
|
||||
print "\n";
|
||||
exit 0;
|
||||
}
|
||||
|
||||
# daemonize.
|
||||
if ( !$opt->{'foreground'} ) {
|
||||
|
||||
if ( -e $pidfile ) {
|
||||
print "Pidfile already exists! ($pidfile)\nUnable to start daemon.\n";
|
||||
exit 0;
|
||||
}
|
||||
|
||||
fork && exit 0;
|
||||
POSIX::setsid() || confess "Unable to become session leader: $!\n";
|
||||
|
||||
$pid = fork;
|
||||
confess "Couldn't fork.\n" unless defined $pid;
|
||||
|
||||
if ( $pid != 0 ) { # we are the parent
|
||||
unless ($pidfile eq 'none') {
|
||||
unless ( open( PID, ">$pidfile" ) ) {
|
||||
kill 15, $pid;
|
||||
confess "Couldn't write PID file. Exiting.\n";
|
||||
}
|
||||
print PID ($codeopts->{listenport} ? $$ : $pid) . "\n";
|
||||
close PID;
|
||||
}
|
||||
print "daemon started with pid: $pid\n";
|
||||
|
||||
# listener port supplied? spawn a listener!
|
||||
spawn_listener( $codeloop, $codeopts )
|
||||
if $codeopts->{listenport};
|
||||
exit 0; # exit from parent if no listener
|
||||
}
|
||||
|
||||
# we're the child from here on out.
|
||||
child_actions( $codeopts );
|
||||
}
|
||||
|
||||
# the event loop
|
||||
if ( $codeopts->{override_loop} ) {
|
||||
|
||||
# the caller subref has its own idea of what
|
||||
# a loop is defined as.
|
||||
chdir ( $codeopts->{chdir} || '/') or die "Can't chdir!";
|
||||
$codeloop->( $codeopts->{args} );
|
||||
|
||||
}
|
||||
else {
|
||||
|
||||
# a loop is just a time interval inbetween
|
||||
# code executions
|
||||
return eventloop( $codeloop, $codeopts );
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub eventloop
|
||||
{
|
||||
my $codeloop = shift || confess "No coderef loop supplied.\n";
|
||||
confess "Invalid coderef\n" unless ref $codeloop eq 'CODE';
|
||||
my $codeopts = shift || {};
|
||||
|
||||
chdir ( $codeopts->{chdir} || '/') or die "Can't chdir!";
|
||||
|
||||
{
|
||||
no warnings;
|
||||
$SIG{CHLD} = undef;
|
||||
}
|
||||
|
||||
while (1) {
|
||||
|
||||
$busy = 1;
|
||||
$codeloop->( $codeopts->{args} );
|
||||
$busy = 0;
|
||||
|
||||
last if $stop;
|
||||
select undef, undef, undef, ( $codeopts->{interval} || 60 );
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
sub child_actions
|
||||
{
|
||||
my $codeopts = shift || {};
|
||||
|
||||
$SIG{$_} = \&stop_child foreach qw/ INT TERM /;
|
||||
$0 = $RealScript . " - worker";
|
||||
umask 0;
|
||||
chdir ( $codeopts->{chdir} || '/') or die "Can't chdir!";
|
||||
|
||||
# shed root privs
|
||||
if ( $codeopts->{shedprivs} ) {
|
||||
my $uid = getpwnam( $codeopts->{shedprivs} );
|
||||
$< = $> = $uid if $uid && ! $<;
|
||||
}
|
||||
|
||||
{
|
||||
no warnings;
|
||||
close STDIN && open STDIN, "</dev/null";
|
||||
close STDOUT && open STDOUT, "+>&STDIN";
|
||||
close STDERR && open STDERR, "+>&STDIN";
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
sub spawn_listener
|
||||
{
|
||||
my $codeloop = shift || confess "No coderef loop supplied.\n";
|
||||
confess "Invalid coderef\n" unless ref $codeloop eq 'CODE';
|
||||
my $codeopts = shift || {};
|
||||
|
||||
use IO::Socket;
|
||||
$0 = $RealScript . " - listener";
|
||||
|
||||
my ( $s, $c );
|
||||
$s = IO::Socket::INET->new(
|
||||
Type => SOCK_STREAM,
|
||||
LocalAddr => $codeopts->{bindaddr}, # undef binds to all
|
||||
ReuseAddr => 1,
|
||||
Listen => 2,
|
||||
LocalPort => $codeopts->{listenport},
|
||||
);
|
||||
unless ($s) {
|
||||
kill 15, $pid;
|
||||
unlink $pidfile;
|
||||
confess "Unable to start listener.\n";
|
||||
}
|
||||
|
||||
# pass incoming connections to listencode()
|
||||
while ($c = $s->accept()) {
|
||||
default_cmdline( $s, $c, $codeloop, $codeopts );
|
||||
}
|
||||
|
||||
# shouldn't reach this.
|
||||
close $s;
|
||||
exit 0;
|
||||
}
|
||||
|
||||
sub stop_parent
|
||||
{
|
||||
debug("Shutting down...\n");
|
||||
|
||||
if ($pid) { # not used in foreground
|
||||
kill 15, $pid;
|
||||
waitpid $pid, 0;
|
||||
unlink $pidfile;
|
||||
}
|
||||
|
||||
exit 0 unless $busy;
|
||||
$stop = 1;
|
||||
}
|
||||
|
||||
sub stop_child
|
||||
{
|
||||
# call our children to have them shut down
|
||||
$shutdowncode->() if $shutdowncode;
|
||||
|
||||
exit 0 unless $busy;
|
||||
$stop = 1;
|
||||
}
|
||||
|
||||
sub debug
|
||||
{
|
||||
return unless $opt->{'foreground'};
|
||||
print STDERR (shift) . "\n";
|
||||
}
|
||||
|
||||
# shutdown daemon remotely
|
||||
sub default_cmdline
|
||||
{
|
||||
my ( $s, $c, $codeloop, $codeopts ) = @_;
|
||||
|
||||
while ( <$c> ) {
|
||||
# remote commands
|
||||
next unless /\w/;
|
||||
|
||||
if (/pids/i) {
|
||||
print $c "OK $$ $pid\n";
|
||||
next;
|
||||
}
|
||||
|
||||
elsif (/ping/i) {
|
||||
print $c "OK pong $0\n";
|
||||
next;
|
||||
}
|
||||
|
||||
elsif (/(?:stop|shutdown)/) {
|
||||
kill 15, $pid;
|
||||
unlink $pidfile;
|
||||
print $c "OK SHUTDOWN\n";
|
||||
exit 0;
|
||||
}
|
||||
|
||||
elsif (/(?:restart|reload)/i) {
|
||||
# shutdown existing worker
|
||||
# wait for it to completely exit
|
||||
kill 15, $pid;
|
||||
wait;
|
||||
|
||||
# re-fork a new worker (no listener)
|
||||
my $newpid = fork;
|
||||
unless ($newpid) {
|
||||
close $s;
|
||||
$0 =~ s/listener/worker/;
|
||||
child_actions( $codeopts );
|
||||
eventloop( $codeloop, $codeopts );
|
||||
exit 0;
|
||||
}
|
||||
|
||||
# remember the new child pid for
|
||||
# future restarts
|
||||
$pid = $newpid;
|
||||
print $c "OK $pid\n";
|
||||
next;
|
||||
}
|
||||
|
||||
else {
|
||||
|
||||
next if
|
||||
$codeopts->{listencode} &&
|
||||
ref $codeopts->{listencode} eq 'CODE' &&
|
||||
$codeopts->{listencode}->( $_, $s, $c, $codeloop, $codeopts );
|
||||
|
||||
if (/help/i) {
|
||||
foreach (sort qw/ ping stop pids reload /) {
|
||||
print $c "\t$_\n";
|
||||
}
|
||||
print $c ".\nOK\n";
|
||||
next;
|
||||
}
|
||||
|
||||
print $c "ERR unknown command\n";
|
||||
next;
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
1;
|
||||
|
||||
1100
wcmtools/lib/Danga-EXIF/EXIF.pm
Executable file
1100
wcmtools/lib/Danga-EXIF/EXIF.pm
Executable file
File diff suppressed because it is too large
Load Diff
4
wcmtools/lib/Danga-Exceptions/MANIFEST
Executable file
4
wcmtools/lib/Danga-Exceptions/MANIFEST
Executable file
@@ -0,0 +1,4 @@
|
||||
lib/Danga/Exceptions.pm
|
||||
Makefile.PL
|
||||
MANIFEST
|
||||
t/basic.t
|
||||
11
wcmtools/lib/Danga-Exceptions/MANIFEST.SKIP
Executable file
11
wcmtools/lib/Danga-Exceptions/MANIFEST.SKIP
Executable file
@@ -0,0 +1,11 @@
|
||||
^#
|
||||
\bCVS\b
|
||||
^MANIFEST\.
|
||||
^Makefile$
|
||||
~$
|
||||
\.html$
|
||||
\.old$
|
||||
^blib/
|
||||
_blib$
|
||||
^MakeMaker-\d
|
||||
^\.exists
|
||||
33
wcmtools/lib/Danga-Exceptions/Makefile.PL
Executable file
33
wcmtools/lib/Danga-Exceptions/Makefile.PL
Executable file
@@ -0,0 +1,33 @@
|
||||
#!/usr/bin/perl
|
||||
#
|
||||
# Perl Makefile for Danga-Exceptions
|
||||
# $Id: Makefile.PL,v 1.1 2004/06/04 22:06:28 deveiant Exp $
|
||||
#
|
||||
# Invoke with 'perl Makefile.PL'
|
||||
#
|
||||
# See ExtUtils::MakeMaker (3) for more information on how to influence
|
||||
# the contents of the Makefile that is written
|
||||
#
|
||||
|
||||
use ExtUtils::MakeMaker;
|
||||
|
||||
WriteMakefile(
|
||||
NAME => 'Danga::Exceptions',
|
||||
VERSION_FROM => 'lib/Danga/Exceptions.pm', # finds $VERSION
|
||||
AUTHOR => 'Michael Granger <ged@danga.com>',
|
||||
ABSTRACT => 'Exception library',
|
||||
PREREQ_PM => {
|
||||
Scalar::Util => 0,
|
||||
Carp => 0,
|
||||
overload => 0,
|
||||
},
|
||||
dist => {
|
||||
CI => "cvs commit",
|
||||
RCS_LABEL => 'cvs tag RELEASE_$(VERSION_SYM)',
|
||||
SUFFIX => ".bz2",
|
||||
DIST_DEFAULT => 'all tardist',
|
||||
COMPRESS => "bzip2",
|
||||
},
|
||||
|
||||
);
|
||||
|
||||
1131
wcmtools/lib/Danga-Exceptions/lib/Danga/Exceptions.pm
Executable file
1131
wcmtools/lib/Danga-Exceptions/lib/Danga/Exceptions.pm
Executable file
File diff suppressed because it is too large
Load Diff
166
wcmtools/lib/Danga-Exceptions/t/basic.t
Executable file
166
wcmtools/lib/Danga-Exceptions/t/basic.t
Executable file
@@ -0,0 +1,166 @@
|
||||
#!/usr/bin/perl -w
|
||||
#
|
||||
# Test script for Danga::Exceptions
|
||||
# $Id: basic.t,v 1.1 2004/06/04 22:06:28 deveiant Exp $
|
||||
#
|
||||
# Before `make install' is performed this script should be runnable with
|
||||
# `make test'. After `make install' it should work as `perl 02_exceptions.t'
|
||||
#
|
||||
# Please do not commit any changes you make to the module without a
|
||||
# successful 'make test'!
|
||||
#
|
||||
package main;
|
||||
use strict;
|
||||
|
||||
BEGIN { $| = 1; }
|
||||
|
||||
### Load up the test framework
|
||||
use Test::SimpleUnit qw{:functions};
|
||||
Test::SimpleUnit::AutoskipFailedSetup( 1 );
|
||||
|
||||
use Danga::Exceptions qw{:syntax};
|
||||
|
||||
### Imported-symbol test-generation function
|
||||
sub genTest {
|
||||
my $functionName = shift;
|
||||
return {
|
||||
name => "Import $functionName",
|
||||
test => sub {
|
||||
no strict 'refs';
|
||||
assertDefined *{"main::${functionName}"}{CODE},
|
||||
"$functionName() was not imported";
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
### Test functions for throwing
|
||||
sub simple_throw {
|
||||
throw Danga::Exception "Simple throw exception.";
|
||||
}
|
||||
sub methoderror_throw {
|
||||
throw Danga::MethodError "Method error.";
|
||||
}
|
||||
|
||||
### Build tests for imported syntax functions
|
||||
my @synFuncTests = map { s{^&}{}; genTest $_ } @{$Danga::Exception::EXPORT_TAGS{syntax}};
|
||||
|
||||
|
||||
### Main test suite (in the order they're run)
|
||||
my @testSuite = (
|
||||
|
||||
# Test for imported symbols first
|
||||
@synFuncTests,
|
||||
|
||||
# try + throw + catch
|
||||
{
|
||||
name => 'Simple throw',
|
||||
test => sub {
|
||||
try {
|
||||
simple_throw();
|
||||
} catch Danga::Exception with {
|
||||
my $except = shift;
|
||||
assertInstanceOf 'Danga::Exception', $except;
|
||||
};
|
||||
},
|
||||
},
|
||||
|
||||
# try + throw subclass + catch general class
|
||||
{
|
||||
name => 'Subclass throw - general handler',
|
||||
test => sub {
|
||||
try {
|
||||
methoderror_throw();
|
||||
} catch Danga::Exception with {
|
||||
my $except = shift;
|
||||
assertInstanceOf 'Danga::MethodError', $except;
|
||||
};
|
||||
},
|
||||
},
|
||||
|
||||
# try + throw subclass + catch subclass + catch general class(skipped)
|
||||
{
|
||||
name => 'Subclass throw - specific and general handlers',
|
||||
test => sub {
|
||||
my ( $sawSpecificHandler, $sawGeneralHandler );
|
||||
|
||||
try {
|
||||
methoderror_throw();
|
||||
} catch Danga::MethodError with {
|
||||
$sawSpecificHandler = 1;
|
||||
} catch Danga::Exception with {
|
||||
$sawGeneralHandler = 1;
|
||||
};
|
||||
|
||||
assertNot $sawGeneralHandler, "Saw general handler with preceeding specific handler";
|
||||
assert $sawSpecificHandler, "Didn't see specific handler";
|
||||
},
|
||||
},
|
||||
|
||||
# try + throw subclass + catch subclass + rethrow + catch general class
|
||||
{
|
||||
name => 'Subclass throw - specific handler with keeptrying',
|
||||
test => sub {
|
||||
my ( $sawSpecificHandler, $sawGeneralHandler );
|
||||
|
||||
try {
|
||||
methoderror_throw();
|
||||
} catch Danga::MethodError with {
|
||||
my ( $e, $keepTrying ) = @_;
|
||||
assertRef 'SCALAR', $keepTrying;
|
||||
$sawSpecificHandler = 1;
|
||||
$$keepTrying = 1;
|
||||
} catch Danga::Exception with {
|
||||
$sawGeneralHandler = 1;
|
||||
};
|
||||
|
||||
assert $sawGeneralHandler,
|
||||
"Didn't see general handler after setting \$keeptrying from ".
|
||||
"preceeding specific handler";
|
||||
assert $sawSpecificHandler,
|
||||
"Didn't see specific handler";
|
||||
},
|
||||
},
|
||||
|
||||
# try + catch + with + otherwise
|
||||
{
|
||||
name => "Throw with otherwise",
|
||||
test => sub {
|
||||
my ( $seenCatch, $seenOtherwise );
|
||||
try {
|
||||
simple_throw();
|
||||
} catch Danga::MethodError with {
|
||||
$seenCatch = 1;
|
||||
} otherwise {
|
||||
$seenOtherwise = 1;
|
||||
};
|
||||
|
||||
assert $seenOtherwise;
|
||||
assertNot $seenCatch;
|
||||
},
|
||||
},
|
||||
|
||||
|
||||
### finally
|
||||
{
|
||||
name => "Throw with finally",
|
||||
test => sub {
|
||||
my ( $sawHandler, $sawFinally );
|
||||
|
||||
try {
|
||||
simple_throw();
|
||||
} catch Danga::Exception with {
|
||||
$sawHandler = 1;
|
||||
} finally {
|
||||
$sawFinally = 1;
|
||||
};
|
||||
|
||||
assert $sawHandler, "Didn't see handler";
|
||||
assert $sawFinally, "Didn't see finally clause.";
|
||||
},
|
||||
},
|
||||
|
||||
|
||||
);
|
||||
|
||||
runTests( @testSuite );
|
||||
|
||||
56
wcmtools/lib/Danga-Socket/CHANGES
Executable file
56
wcmtools/lib/Danga-Socket/CHANGES
Executable file
@@ -0,0 +1,56 @@
|
||||
1.43:
|
||||
-- don't even try epoll if not on a known/tested arch
|
||||
-- updated POD docs
|
||||
|
||||
1.42:
|
||||
-- use the right epoll system call numbers on non-x86
|
||||
machines
|
||||
-- start of a good test suite
|
||||
-- 64-bit struct support (test suite passes on ia64, ppc)
|
||||
(and presumably ppc64, but yet untested)
|
||||
|
||||
|
||||
1.41:
|
||||
-- make the Poll mode behave like Epoll/Kqueue in that
|
||||
fds returned w/ no corresponding Danga::Socket object
|
||||
or OtherFds coderef just get ignored. make it robust
|
||||
against apps with races, perhaps? patch from Justin Azoff
|
||||
<JAzoff@uamail.albany.edu>
|
||||
|
||||
1.40:
|
||||
|
||||
-- Kqueue support from Matt Sergeant
|
||||
|
||||
1.39:
|
||||
|
||||
-- make BSD::Resource optional
|
||||
|
||||
1.38:
|
||||
|
||||
-- added support for profiling (epoll only at the moment while this
|
||||
feature is further fleshed out); user application is required to
|
||||
enable profiling and actually process the resultant data
|
||||
|
||||
-- if epoll_wait returns an event we can't handle, delete it.
|
||||
this means the application fucked up and lost its state somehow.
|
||||
or maybe Danga::Socket did? still debugging this in Perlbal.
|
||||
|
||||
1.25: (2004-10-22)
|
||||
|
||||
-- move the syscall.ph require into "package main" rather than stealing
|
||||
all its definitions into our namespace. now other modules can
|
||||
use syscall.ph and Danga::Socket at the same time (as long as they're
|
||||
also polite and load it into main) (NOTE: if you know a better way
|
||||
to do this, let us know...)
|
||||
|
||||
1.24: (2004-10-21)
|
||||
|
||||
-- ability to steal the underlying socket from the Danga::Socket
|
||||
object. this is useful if a caller wants to hold onto the socket
|
||||
but destroy the Danga::Socket object (previously the Danga::Socket
|
||||
close would close the underlying socket)
|
||||
|
||||
1.22: (2004-10-21)
|
||||
|
||||
-- minimal POD docs
|
||||
-- first public release
|
||||
9
wcmtools/lib/Danga-Socket/MANIFEST
Executable file
9
wcmtools/lib/Danga-Socket/MANIFEST
Executable file
@@ -0,0 +1,9 @@
|
||||
Makefile.PL
|
||||
CHANGES
|
||||
MANIFEST
|
||||
Socket.pm
|
||||
META.yml Module meta-data (added by MakeMaker)
|
||||
t/00-use.t
|
||||
t/05-postloop.t
|
||||
t/10-events.t
|
||||
|
||||
12
wcmtools/lib/Danga-Socket/MANIFEST.SKIP
Executable file
12
wcmtools/lib/Danga-Socket/MANIFEST.SKIP
Executable file
@@ -0,0 +1,12 @@
|
||||
^#
|
||||
\bCVS\b
|
||||
^MANIFEST\.
|
||||
^Makefile$
|
||||
~$
|
||||
\.html$
|
||||
\.old$
|
||||
^blib/
|
||||
_blib$
|
||||
^MakeMaker-\d
|
||||
^\.exists
|
||||
\bdebian\b
|
||||
15
wcmtools/lib/Danga-Socket/META.yml
Executable file
15
wcmtools/lib/Danga-Socket/META.yml
Executable file
@@ -0,0 +1,15 @@
|
||||
# http://module-build.sourceforge.net/META-spec.html
|
||||
#XXXXXXX This is a prototype!!! It will change in the future!!! XXXXX#
|
||||
name: Danga-Socket
|
||||
version: 1.42
|
||||
version_from: Socket.pm
|
||||
installdirs: site
|
||||
requires:
|
||||
fields: 0
|
||||
IO::Poll: 0
|
||||
POSIX: 0
|
||||
Socket: 0
|
||||
Test::More: 0
|
||||
|
||||
distribution_type: module
|
||||
generated_by: ExtUtils::MakeMaker version 6.17
|
||||
35
wcmtools/lib/Danga-Socket/Makefile.PL
Executable file
35
wcmtools/lib/Danga-Socket/Makefile.PL
Executable file
@@ -0,0 +1,35 @@
|
||||
#!/usr/bin/perl
|
||||
#
|
||||
# Perl Makefile for Danga-Socket
|
||||
# $Id: Makefile.PL,v 1.6 2005/03/08 01:02:35 bradfitz Exp $
|
||||
#
|
||||
# Invoke with 'perl Makefile.PL'
|
||||
#
|
||||
# See ExtUtils::MakeMaker (3) for more information on how to influence
|
||||
# the contents of the Makefile that is written
|
||||
#
|
||||
|
||||
use ExtUtils::MakeMaker;
|
||||
|
||||
WriteMakefile(
|
||||
NAME => 'Danga::Socket',
|
||||
VERSION_FROM => 'Socket.pm', # finds $VERSION
|
||||
AUTHOR => 'Brad Fitzpatrick <brad@danga.com>',
|
||||
ABSTRACT => 'Async socket class',
|
||||
PREREQ_PM => {
|
||||
'Socket' => 0,
|
||||
'IO::Poll' => 0,
|
||||
fields => 0,
|
||||
'POSIX' => 0,
|
||||
'Test::More' => 0,
|
||||
},
|
||||
dist => {
|
||||
CI => "cvs commit",
|
||||
RCS_LABEL => 'cvs tag RELEASE_$(VERSION_SYM)',
|
||||
SUFFIX => ".gz",
|
||||
DIST_DEFAULT => 'all tardist',
|
||||
COMPRESS => "gzip",
|
||||
},
|
||||
|
||||
);
|
||||
|
||||
1170
wcmtools/lib/Danga-Socket/Socket.pm
Executable file
1170
wcmtools/lib/Danga-Socket/Socket.pm
Executable file
File diff suppressed because it is too large
Load Diff
17
wcmtools/lib/Danga-Socket/debian/changelog
Executable file
17
wcmtools/lib/Danga-Socket/debian/changelog
Executable file
@@ -0,0 +1,17 @@
|
||||
libdanga-socket-perl (1.40-1) unstable; urgency=low
|
||||
|
||||
* New upstream version
|
||||
|
||||
-- Jay Bonci <jaybonci@debian.org> Tue, 5 Apr 2005 15:33:34 -0400
|
||||
|
||||
libdanga-socket-perl (1.38-1) unstable; urgency=low
|
||||
|
||||
* New upstream version
|
||||
|
||||
-- Jay Bonci <jaybonci@debian.org> Wed, 9 Feb 2005 02:32:07 -0500
|
||||
|
||||
libdanga-socket-perl (1.25-1) unstable; urgency=low
|
||||
|
||||
* Initial release
|
||||
|
||||
-- Jay Bonci <jaybonci@debian.org> Thu, 13 Jan 2005 23:13:18 -0500
|
||||
1
wcmtools/lib/Danga-Socket/debian/compat
Executable file
1
wcmtools/lib/Danga-Socket/debian/compat
Executable file
@@ -0,0 +1 @@
|
||||
4
|
||||
13
wcmtools/lib/Danga-Socket/debian/control
Executable file
13
wcmtools/lib/Danga-Socket/debian/control
Executable file
@@ -0,0 +1,13 @@
|
||||
Source: libdanga-socket-perl
|
||||
Section: perl
|
||||
Priority: optional
|
||||
Maintainer: Jay Bonci <jaybonci@debian.org>
|
||||
Build-Depends-Indep: debhelper (>= 4.1.40), perl (>= 5.8.4)
|
||||
Standards-Version: 3.6.1.0
|
||||
|
||||
Package: libdanga-socket-perl
|
||||
Architecture: all
|
||||
Depends: ${perl:Depends}
|
||||
Description: fast pure-perl asyncronous socket base class
|
||||
Danga::Socket is an abstract base class which provides the basic framework for
|
||||
event-driven asynchronous IO, designed to be fast.
|
||||
26
wcmtools/lib/Danga-Socket/debian/copyright
Executable file
26
wcmtools/lib/Danga-Socket/debian/copyright
Executable file
@@ -0,0 +1,26 @@
|
||||
This package was debianized by Jay Bonci <jaybonci@debian.org> on
|
||||
Thu Jan 13 23:18:32 EST 2005
|
||||
|
||||
It was downloaded from: http://www.danga.com/dist/Danga-Socket/
|
||||
|
||||
Upstream Authors:
|
||||
Brad Fitzpatrick <brad@danga.com>
|
||||
Michael Granger <ged@danga.com>
|
||||
Mark Smith <marksmith@danga.com>
|
||||
|
||||
Copyright:
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of either:
|
||||
|
||||
a) the GNU General Public License as published by the Free
|
||||
Software Foundation; either version 1, or (at your option) any
|
||||
later version, or
|
||||
|
||||
b) the "Artistic License"
|
||||
|
||||
See:
|
||||
|
||||
/usr/share/common-licenses/Artistic
|
||||
/usr/share/common-licenses/GPL
|
||||
|
||||
For more information regarding these licensing options
|
||||
53
wcmtools/lib/Danga-Socket/debian/rules
Executable file
53
wcmtools/lib/Danga-Socket/debian/rules
Executable file
@@ -0,0 +1,53 @@
|
||||
#!/usr/bin/make -f
|
||||
# Sample debian/rules that uses debhelper.
|
||||
# GNU copyright 1997 to 1999 by Joey Hess.
|
||||
|
||||
# Uncomment this to turn on verbose mode.
|
||||
#export DH_VERBOSE=1
|
||||
|
||||
# This is the debhelper compatibility version to use.
|
||||
# export DH_COMPAT=4
|
||||
|
||||
#PACKAGE=`pwd | sed -e "s/.*\/\\(.*\\)-.*/\\1/"`
|
||||
PACKAGE=`cat debian/control | perl -ne 'print if s/Package: (.*)/$$1/'`
|
||||
|
||||
build:
|
||||
dh_testdir
|
||||
# Add here commands to compile the package.
|
||||
perl Makefile.PL verbose INSTALLDIRS=vendor
|
||||
clean:
|
||||
dh_testdir
|
||||
dh_testroot
|
||||
|
||||
-$(MAKE) clean
|
||||
rm -f Makefile.old
|
||||
dh_clean
|
||||
|
||||
install:
|
||||
dh_testdir
|
||||
dh_testroot
|
||||
dh_clean -k
|
||||
dh_installdirs
|
||||
|
||||
$(MAKE) PREFIX=$(CURDIR)/debian/$(PACKAGE)/usr OPTIMIZE="-O2 -g -Wall" test install
|
||||
-find $(CURDIR)/debian -type d | xargs rmdir -p --ignore-fail-on-non-empty
|
||||
|
||||
binary-arch:;
|
||||
binary-indep: build install
|
||||
dh_testdir
|
||||
dh_testroot
|
||||
dh_installdocs
|
||||
dh_installman
|
||||
dh_installchangelogs CHANGES
|
||||
dh_link
|
||||
dh_strip
|
||||
dh_compress
|
||||
dh_fixperms
|
||||
dh_installdeb
|
||||
dh_perl
|
||||
dh_gencontrol
|
||||
dh_md5sums
|
||||
dh_builddeb
|
||||
|
||||
binary: binary-indep binary-arch
|
||||
.PHONY: build clean binary-indep binary-arch binary install configure
|
||||
3
wcmtools/lib/Danga-Socket/debian/watch
Executable file
3
wcmtools/lib/Danga-Socket/debian/watch
Executable file
@@ -0,0 +1,3 @@
|
||||
version=2
|
||||
http://www.danga.com/dist/Danga-Socket/Danga-Socket-([0-9].*)\.tar.gz \
|
||||
debian uupdate
|
||||
8
wcmtools/lib/Danga-Socket/t/00-use.t
Executable file
8
wcmtools/lib/Danga-Socket/t/00-use.t
Executable file
@@ -0,0 +1,8 @@
|
||||
#!/usr/bin/perl -w
|
||||
|
||||
use strict;
|
||||
use Test::More tests => 1;
|
||||
|
||||
my $mod = "Danga::Socket";
|
||||
|
||||
use_ok($mod);
|
||||
24
wcmtools/lib/Danga-Socket/t/05-postloop.t
Executable file
24
wcmtools/lib/Danga-Socket/t/05-postloop.t
Executable file
@@ -0,0 +1,24 @@
|
||||
#!/usr/bin/perl -w
|
||||
|
||||
use strict;
|
||||
use Test::More 'no_plan';
|
||||
use Danga::Socket;
|
||||
|
||||
my ($t1, $t2, $iters);
|
||||
|
||||
$t1 = time();
|
||||
$iters = 0;
|
||||
|
||||
Danga::Socket->SetLoopTimeout(250);
|
||||
Danga::Socket->SetPostLoopCallback(sub {
|
||||
$iters++;
|
||||
return $iters < 4 ? 1 : 0;
|
||||
});
|
||||
|
||||
Danga::Socket->EventLoop;
|
||||
|
||||
$t2 = time();
|
||||
|
||||
ok($iters == 4, "four iters");
|
||||
ok($t2 == $t1 + 1, "took a second");
|
||||
|
||||
147
wcmtools/lib/Danga-Socket/t/10-events.t
Executable file
147
wcmtools/lib/Danga-Socket/t/10-events.t
Executable file
@@ -0,0 +1,147 @@
|
||||
#!/usr/bin/perl -w
|
||||
|
||||
use strict;
|
||||
use Test::More tests => 24;
|
||||
use Danga::Socket;
|
||||
use IO::Socket::INET;
|
||||
use POSIX;
|
||||
|
||||
use vars qw($done);
|
||||
|
||||
Danga::Socket::init_poller();
|
||||
|
||||
SKIP: {
|
||||
my ($sysname, $nodename, $release, $version, $machine) = POSIX::uname();
|
||||
skip "not on linux 2.6", 1 if $^O ne "linux" || $version =~ /^2\.[01234]/;
|
||||
ok(Danga::Socket->HaveEpoll(), "using epoll");
|
||||
}
|
||||
|
||||
|
||||
for my $mode ("auto", "poll") {
|
||||
$done = 0;
|
||||
my $iters = 0;
|
||||
is(Danga::Socket->WatchedSockets, 0, "no watched sockets");
|
||||
Danga::Socket->SetLoopTimeout(150);
|
||||
Danga::Socket->SetPostLoopCallback(sub {
|
||||
return 0 if $done;
|
||||
$iters++;
|
||||
ok(Server->new, "created server") if $iters == 1;
|
||||
if ($iters == 3) {
|
||||
ok(ClientOut->new, "created client outgoing");
|
||||
is(Danga::Socket->WatchedSockets, 2, "two watched sockets");
|
||||
}
|
||||
return 1;
|
||||
});
|
||||
|
||||
if ($mode eq "poll") {
|
||||
require IO::Poll;
|
||||
Danga::Socket->PollEventLoop;
|
||||
} else {
|
||||
Danga::Socket->EventLoop;
|
||||
}
|
||||
|
||||
ok($done, "$mode mode is done");
|
||||
|
||||
# check descriptor map status
|
||||
my $map = Danga::Socket->DescriptorMap;
|
||||
ok(ref $map eq "HASH", "map is hash");
|
||||
is(scalar keys %$map, 3, "watching 3 connections");
|
||||
Danga::Socket->Reset;
|
||||
is(scalar keys %$map, 0, "watching 0 connections");
|
||||
|
||||
}
|
||||
|
||||
ok(1, "finish");
|
||||
|
||||
|
||||
package Server;
|
||||
use base 'Danga::Socket';
|
||||
|
||||
sub new {
|
||||
my $class = shift;
|
||||
my $ssock = IO::Socket::INET->new(Listen => 5,
|
||||
LocalAddr => 'localhost',
|
||||
LocalPort => 60000,
|
||||
Proto => 'tcp',
|
||||
ReuseAddr => 1,
|
||||
Blocking => 0,
|
||||
);
|
||||
die "couldn't create socket" unless $ssock;
|
||||
IO::Handle::blocking($ssock, 0);
|
||||
my $self = $class->SUPER::new($ssock);
|
||||
$self->watch_read(1);
|
||||
return $self;
|
||||
}
|
||||
|
||||
sub event_read {
|
||||
my $self = shift;
|
||||
while (my ($psock, $peeraddr) = $self->{sock}->accept) {
|
||||
IO::Handle::blocking($psock, 0);
|
||||
Test::More::ok($psock, "Server got incoming conn");
|
||||
ClientIn->new($psock);
|
||||
}
|
||||
}
|
||||
|
||||
package ClientIn;
|
||||
use base 'Danga::Socket';
|
||||
use fields (
|
||||
'lines', #[]
|
||||
);
|
||||
|
||||
sub new {
|
||||
my ($class, $sock) = @_;
|
||||
|
||||
my $self = fields::new($class);
|
||||
$self->SUPER::new($sock); # init base fields
|
||||
bless $self, ref $class || $class;
|
||||
$self->watch_read(1);
|
||||
$self->{lines} = [];
|
||||
return $self;
|
||||
}
|
||||
|
||||
sub event_read {
|
||||
my $self = shift;
|
||||
my $bref = $self->read(5000);
|
||||
Test::More::ok($$bref eq "Hello!\n", "ClientIn got hello");
|
||||
$self->watch_read(0);
|
||||
$main::done = 1;
|
||||
}
|
||||
|
||||
|
||||
package ClientOut;
|
||||
use base 'Danga::Socket';
|
||||
use fields (
|
||||
'connected', # 0 or 1
|
||||
);
|
||||
use Socket qw(PF_INET IPPROTO_TCP SOCK_STREAM);
|
||||
|
||||
sub new {
|
||||
my $class = shift;
|
||||
|
||||
my $sock;
|
||||
socket $sock, PF_INET, SOCK_STREAM, IPPROTO_TCP;
|
||||
|
||||
die "can't create outgoing sock" unless $sock && defined fileno($sock);
|
||||
IO::Handle::blocking($sock, 0);
|
||||
connect $sock, Socket::sockaddr_in(60000, Socket::inet_aton('127.0.0.1'));
|
||||
|
||||
my $self = fields::new($class);
|
||||
$self->SUPER::new($sock);
|
||||
bless $self, ref $class || $class;
|
||||
|
||||
$self->{'connected'} = 0;
|
||||
|
||||
$self->watch_write(1);
|
||||
return $self;
|
||||
}
|
||||
|
||||
sub event_write {
|
||||
my $self = shift;
|
||||
if (! $self->{'connected'}) {
|
||||
Test::More::ok(1, "ClientOut connected");
|
||||
$self->{'connected'} = 1;
|
||||
}
|
||||
|
||||
$self->write("Hello!\n");
|
||||
$self->watch_write(0);
|
||||
}
|
||||
119
wcmtools/lib/HTMLCleaner.pm
Executable file
119
wcmtools/lib/HTMLCleaner.pm
Executable file
@@ -0,0 +1,119 @@
|
||||
#!/usr/bin/perl
|
||||
#
|
||||
|
||||
package HTMLCleaner;
|
||||
|
||||
use strict;
|
||||
use base 'HTML::Parser';
|
||||
|
||||
sub new {
|
||||
my ($class, %opts) = @_;
|
||||
|
||||
my $p = new HTML::Parser('api_version' => 3);
|
||||
$p->handler('start' => \&start, 'self, tagname, attr, attrseq, text' );
|
||||
$p->handler('end' => \&end, 'self, tagname' );
|
||||
$p->handler('text' => \&text, 'self, text' );
|
||||
$p->handler('declaration' => \&decl, 'self, tokens' );
|
||||
|
||||
$p->{'output'} = $opts{'output'} || sub {};
|
||||
bless $p, $class;
|
||||
}
|
||||
|
||||
my %bad_attr = (map { $_ => 1 }
|
||||
qw(onabort onactivate onafterprint onafterupdate
|
||||
onbeforeactivate onbeforecopy onbeforecut
|
||||
onbeforedeactivate onbeforeeditfocus
|
||||
onbeforepaste onbeforeprint onbeforeunload
|
||||
onbeforeupdate onblur onbounce oncellchange
|
||||
onchange onclick oncontextmenu oncontrolselect
|
||||
oncopy oncut ondataavailable ondatasetchanged
|
||||
ondatasetcomplete ondblclick ondeactivate
|
||||
ondrag ondragend ondragenter ondragleave
|
||||
ondragover ondragstart ondrop onerror
|
||||
onerrorupdate onfilterchange onfinish onfocus
|
||||
onfocusin onfocusout onhelp onkeydown
|
||||
onkeypress onkeyup onlayoutcomplete onload
|
||||
onlosecapture onmousedown onmouseenter
|
||||
onmouseleave onmousemove onmouseout
|
||||
onmouseover onmouseup onmousewheel onmove
|
||||
onmoveend onmovestart onpaste onpropertychange
|
||||
onreadystatechange onreset onresize
|
||||
onresizeend onresizestart onrowenter onrowexit
|
||||
onrowsdelete onrowsinserted onscroll onselect
|
||||
onselectionchange onselectstart onstart onstop
|
||||
onsubmit onunload datasrc datafld));
|
||||
|
||||
my %eat_tag = (map { $_ => 1 }
|
||||
qw(script iframe object applet embed));
|
||||
|
||||
my @eating; # push tagname whenever we start eating a tag
|
||||
|
||||
sub start {
|
||||
my ($self, $tagname, $attr, $seq, $text) = @_;
|
||||
my $slashclose = 0; # xml-style
|
||||
if ($tagname =~ s!/(.*)!!) {
|
||||
if (length($1)) { push @eating, "$tagname/$1"; } # basically halt parsing
|
||||
else { $slashclose = 1; }
|
||||
}
|
||||
push @eating, $tagname if
|
||||
$eat_tag{$tagname};
|
||||
return if @eating;
|
||||
my $ret = "<$tagname";
|
||||
foreach (@$seq) {
|
||||
if ($_ eq "/") { $slashclose = 1; next; }
|
||||
next if $bad_attr{lc($_)};
|
||||
next if /(?:^=)|[\x0b\x0d]/;
|
||||
|
||||
# IE is brain-dead and lets javascript:, vbscript:, and about: have spaces mixed in
|
||||
if ($attr->{$_} =~ /((?:(?:v\s*b)|(?:j\s*a\s*v\s*a))\s*s\s*c\s*r\s*i\s*p\s*t|
|
||||
a\s*b\s*o\s*u\s*t)\s*:/ix) {
|
||||
delete $attr->{$_};
|
||||
}
|
||||
$ret .= " $_=\"" . ehtml($attr->{$_}) . "\"";
|
||||
}
|
||||
$ret .= " /" if $slashclose;
|
||||
$ret .= ">";
|
||||
$self->{'output'}->($ret);
|
||||
}
|
||||
|
||||
sub end {
|
||||
my ($self, $tagname) = @_;
|
||||
if (@eating) {
|
||||
pop @eating if $eating[-1] eq $tagname;
|
||||
return;
|
||||
}
|
||||
$self->{'output'}->("</$tagname>");
|
||||
}
|
||||
|
||||
sub text {
|
||||
my ($self, $text) = @_;
|
||||
return if @eating;
|
||||
# the parser gives us back text whenever it's confused
|
||||
# on really broken input. sadly, IE parses really broken
|
||||
# input, so let's escape anything going out this way.
|
||||
$self->{'output'}->(eangles($text));
|
||||
}
|
||||
|
||||
sub decl {
|
||||
my ($self, $tokens) = @_;
|
||||
$self->{'output'}->("<!" . join(" ", map { eangles($_) } @$tokens) . ">");
|
||||
}
|
||||
|
||||
sub eangles {
|
||||
my $a = shift;
|
||||
$a =~ s/</</g;
|
||||
$a =~ s/>/>/g;
|
||||
return $a;
|
||||
}
|
||||
|
||||
sub ehtml {
|
||||
my $a = shift;
|
||||
$a =~ s/\&/&/g;
|
||||
$a =~ s/\"/"/g;
|
||||
$a =~ s/\'/&\#39;/g;
|
||||
$a =~ s/</</g;
|
||||
$a =~ s/>/>/g;
|
||||
return $a;
|
||||
}
|
||||
|
||||
1;
|
||||
21
wcmtools/lib/LWPx-ParanoidAgent/ChangeLog
Executable file
21
wcmtools/lib/LWPx-ParanoidAgent/ChangeLog
Executable file
@@ -0,0 +1,21 @@
|
||||
1.02: 2005-05-24
|
||||
- block 0.0.0.0/8 as well (Andy Thomas <andy.thomas2@gmail.com>)
|
||||
|
||||
1.01: 2005-05-23
|
||||
- more POD docs (constructor and method calls)
|
||||
|
||||
- be aware of all forms of IP address (a, a.b, a.b.c, a.b.c.d)
|
||||
where all of a, b, c, and d can be in decimal, octal, or hex.
|
||||
(thanks to Martin Atkins and Timwi for pointing this out) pass
|
||||
in the canonicalized version of the IP address to the bad hosts
|
||||
checker.
|
||||
|
||||
1.00: 2005-05-20
|
||||
- fix holes pointed out by Martin Atkins (led to me doing all the
|
||||
Net::DNS and manual resolving work)
|
||||
|
||||
- bundle the test script by adding a local webserver mode to it,
|
||||
rather than using an xinetd script
|
||||
|
||||
0.99: 2005-05-19
|
||||
- initial release
|
||||
7
wcmtools/lib/LWPx-ParanoidAgent/MANIFEST
Executable file
7
wcmtools/lib/LWPx-ParanoidAgent/MANIFEST
Executable file
@@ -0,0 +1,7 @@
|
||||
Makefile.PL
|
||||
ChangeLog
|
||||
lib/LWPx/Protocol/http_paranoid.pm
|
||||
lib/LWPx/Protocol/https_paranoid.pm
|
||||
lib/LWPx/ParanoidAgent.pm
|
||||
t/00-all.t
|
||||
META.yml Module meta-data (added by MakeMaker)
|
||||
13
wcmtools/lib/LWPx-ParanoidAgent/Makefile.PL
Executable file
13
wcmtools/lib/LWPx-ParanoidAgent/Makefile.PL
Executable file
@@ -0,0 +1,13 @@
|
||||
use ExtUtils::MakeMaker;
|
||||
WriteMakefile( 'NAME' => 'LWPx::ParanoidAgent',
|
||||
'VERSION_FROM' => 'lib/LWPx/ParanoidAgent.pm',
|
||||
'PREREQ_PM' => {
|
||||
'LWP::UserAgent' => 0,
|
||||
'Net::DNS' => 0,
|
||||
'Time::HiRes' => 0,
|
||||
},
|
||||
($] >= 5.005 ?
|
||||
(ABSTRACT_FROM => 'lib/LWPx/ParanoidAgent.pm',
|
||||
AUTHOR => 'Brad Fitzpatrick <brad@danga.com>') : ()),
|
||||
);
|
||||
|
||||
556
wcmtools/lib/LWPx-ParanoidAgent/lib/LWPx/ParanoidAgent.pm
Executable file
556
wcmtools/lib/LWPx-ParanoidAgent/lib/LWPx/ParanoidAgent.pm
Executable file
@@ -0,0 +1,556 @@
|
||||
package LWPx::ParanoidAgent;
|
||||
require LWP::UserAgent;
|
||||
|
||||
use vars qw(@ISA $VERSION);
|
||||
@ISA = qw(LWP::UserAgent);
|
||||
$VERSION = '1.02';
|
||||
|
||||
require HTTP::Request;
|
||||
require HTTP::Response;
|
||||
|
||||
use HTTP::Status ();
|
||||
use strict;
|
||||
use Net::DNS;
|
||||
|
||||
sub new {
|
||||
my $class = shift;
|
||||
my %opts = @_;
|
||||
|
||||
my $blocked_hosts = delete $opts{blocked_hosts} || [];
|
||||
my $whitelisted_hosts = delete $opts{whitelisted_hosts} || [];
|
||||
my $resolver = delete $opts{resolver};
|
||||
$opts{timeout} ||= 15;
|
||||
|
||||
my $self = LWP::UserAgent->new( %opts );
|
||||
|
||||
$self->{'blocked_hosts'} = $blocked_hosts;
|
||||
$self->{'whitelisted_hosts'} = $whitelisted_hosts;
|
||||
$self->{'resolver'} = $resolver;
|
||||
|
||||
$self = bless $self, $class;
|
||||
return $self;
|
||||
}
|
||||
|
||||
# returns seconds remaining given a request
|
||||
sub _time_remain {
|
||||
my $self = shift;
|
||||
my $req = shift;
|
||||
|
||||
my $now = time();
|
||||
my $start_time = $req->{_time_begin} || $now;
|
||||
return $start_time + $self->{timeout} - $now;
|
||||
}
|
||||
|
||||
sub _resolve {
|
||||
my ($self, $host, $request, $timeout, $depth) = @_;
|
||||
my $res = $self->resolver;
|
||||
$depth ||= 0;
|
||||
|
||||
die "CNAME recursion depth limit exceeded.\n" if $depth > 10;
|
||||
die "Suspicious results from DNS lookup" if $self->_bad_host($host);
|
||||
|
||||
# return the IP address if it looks like one and wasn't marked bad
|
||||
return ($host) if $host =~ /^\d+\.\d+\.\d+\.\d+$/;
|
||||
|
||||
my $sock = $res->bgsend($host)
|
||||
or die "No sock from bgsend";
|
||||
|
||||
my $rin = '';
|
||||
vec($rin, fileno($sock), 1) = 1;
|
||||
my $nf = select($rin, undef, undef, $self->_time_remain($request));
|
||||
die "DNS lookup timeout" unless $nf;
|
||||
|
||||
my $packet = $res->bgread($sock)
|
||||
or die "DNS bgread failure";
|
||||
$sock = undef;
|
||||
|
||||
my @addr;
|
||||
my $cname;
|
||||
foreach my $rr ($packet->answer) {
|
||||
if ($rr->type eq "A") {
|
||||
die "Suspicious DNS results from A record\n" if $self->_bad_host($rr->address);
|
||||
push @addr, $rr->address;
|
||||
} elsif ($rr->type eq "CNAME") {
|
||||
# will be checked for validity in the recursion path
|
||||
$cname = $rr->cname;
|
||||
}
|
||||
}
|
||||
|
||||
return @addr if @addr;
|
||||
return () unless $cname;
|
||||
return $self->_resolve($cname, $request, $timeout, $depth + 1);
|
||||
}
|
||||
|
||||
sub _host_list_match {
|
||||
my $self = shift;
|
||||
my $list_name = shift;
|
||||
my $host = shift;
|
||||
|
||||
foreach my $rule (@{ $self->{$list_name} }) {
|
||||
if (ref $rule eq "CODE") {
|
||||
return 1 if $rule->($host);
|
||||
} elsif (ref $rule) {
|
||||
# assume regexp
|
||||
return 1 if $host =~ /$rule/;
|
||||
} else {
|
||||
return 1 if $host eq $rule;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sub _bad_host {
|
||||
my $self = shift;
|
||||
my $host = lc(shift);
|
||||
|
||||
return 0 if $self->_host_list_match("whitelisted_hosts", $host);
|
||||
return 1 if $self->_host_list_match("blocked_hosts", $host);
|
||||
return 1 if
|
||||
$host =~ /^localhost$/i || # localhost is bad. even though it'd be stopped in
|
||||
# a later call to _bad_host with the IP address
|
||||
$host =~ /\s/i; # any whitespace is questionable
|
||||
|
||||
# Let's assume it's an IP address now, and get it into 32 bits.
|
||||
# Uf at any time something doesn't look like a number, then it's
|
||||
# probably a hostname and we've already either whitelisted or
|
||||
# blacklisted those, so we'll just say it's okay and it'll come
|
||||
# back here later when the resolver finds an IP address.
|
||||
my @parts = split(/\./, $host);
|
||||
return 0 if @parts > 4;
|
||||
|
||||
# un-octal/un-hex the parts, or return if there's a non-numeric part
|
||||
my $overflow_flag = 0;
|
||||
foreach (@parts) {
|
||||
return 0 unless /^\d+$/ || /^0x[a-f\d]+$/;
|
||||
local $SIG{__WARN__} = sub { $overflow_flag = 1; };
|
||||
$_ = oct($_) if /^0/;
|
||||
}
|
||||
|
||||
# a purely numeric address shouldn't overflow.
|
||||
return 1 if $overflow_flag;
|
||||
|
||||
my $addr; # network order packed IP address
|
||||
|
||||
if (@parts == 1) {
|
||||
# a - 32 bits
|
||||
return 1 if
|
||||
$parts[0] > 0xffffffff;
|
||||
$addr = pack("N", $parts[0]);
|
||||
} elsif (@parts == 2) {
|
||||
# a.b - 8.24 bits
|
||||
return 1 if
|
||||
$parts[0] > 0xff ||
|
||||
$parts[1] > 0xffffff;
|
||||
$addr = pack("N", $parts[0] << 24 | $parts[1]);
|
||||
} elsif (@parts == 3) {
|
||||
# a.b.c - 8.8.16 bits
|
||||
return 1 if
|
||||
$parts[0] > 0xff ||
|
||||
$parts[1] > 0xff ||
|
||||
$parts[2] > 0xffff;
|
||||
$addr = pack("N", $parts[0] << 24 | $parts[1] << 16 | $parts[2]);
|
||||
} else {
|
||||
# a.b.c.d - 8.8.8.8 bits
|
||||
return 1 if
|
||||
$parts[0] > 0xff ||
|
||||
$parts[1] > 0xff ||
|
||||
$parts[2] > 0xff ||
|
||||
$parts[3] > 0xff;
|
||||
$addr = pack("N", $parts[0] << 24 | $parts[1] << 16 | $parts[2] << 8 | $parts[3]);
|
||||
}
|
||||
|
||||
my $haddr = unpack("N", $addr); # host order IP address
|
||||
return 1 if
|
||||
($haddr & 0xFF000000) == 0x00000000 || # 0.0.0.0/8
|
||||
($haddr & 0xFF000000) == 0x0A000000 || # 10.0.0.0/8
|
||||
($haddr & 0xFF000000) == 0x7F000000 || # 127.0.0.0/8
|
||||
($haddr & 0xFFF00000) == 0xAC100000 || # 172.16.0.0/12
|
||||
($haddr & 0xFFFF0000) == 0xA9FE0000 || # 169.254.0.0/16
|
||||
($haddr & 0xFFFF0000) == 0xC0A80000 || # 192.168.0.0/16
|
||||
$haddr == 0xFFFFFFFF || # 255.255.255.255
|
||||
($haddr & 0xF0000000) == 0xE0000000; # multicast addresses
|
||||
|
||||
# as final IP address check, pass in the canonical a.b.c.d decimal form
|
||||
# to the blacklisted host check to see if matches as bad there.
|
||||
my $can_ip = join(".", map { ord } split //, $addr);
|
||||
return 1 if $self->_host_list_match("blocked_hosts", $can_ip);
|
||||
|
||||
# looks like an okay IP address
|
||||
return 0;
|
||||
}
|
||||
|
||||
sub request {
|
||||
my ($self, $req, $arg, $size, $previous) = @_;
|
||||
|
||||
# walk back to the first request, and set our _time_begin to its _time_begin, or if
|
||||
# we're the first, then use current time. used by LWPx::Protocol::http_paranoid
|
||||
my $first_res = $previous; # previous is the previous response that invoked this request
|
||||
$first_res = $first_res->previous while $first_res && $first_res->previous;
|
||||
$req->{_time_begin} = $first_res ? $first_res->request->{_time_begin} : time();
|
||||
|
||||
my $host = $req->uri->host;
|
||||
if ($self->_bad_host($host)) {
|
||||
my $err_res = HTTP::Response->new(403, "Unauthorized access to blocked host");
|
||||
$err_res->request($req);
|
||||
$err_res->header("Client-Date" => HTTP::Date::time2str(time));
|
||||
$err_res->header("Client-Warning" => "Internal response");
|
||||
$err_res->header("Content-Type" => "text/plain");
|
||||
$err_res->content("403 Unauthorized access to blocked host\n");
|
||||
return $err_res;
|
||||
}
|
||||
|
||||
return $self->SUPER::request($req, $arg, $size, $previous);
|
||||
}
|
||||
|
||||
# taken from LWP::UserAgent and modified slightly. (proxy support removed,
|
||||
# and map http and https schemes to separate protocol handlers)
|
||||
sub send_request
|
||||
{
|
||||
my ($self, $request, $arg, $size) = @_;
|
||||
$self->_request_sanity_check($request);
|
||||
|
||||
my ($method, $url) = ($request->method, $request->uri);
|
||||
|
||||
local($SIG{__DIE__}); # protect against user defined die handlers
|
||||
|
||||
# Check that we have a METHOD and a URL first
|
||||
return _new_response($request, &HTTP::Status::RC_BAD_REQUEST, "Method missing")
|
||||
unless $method;
|
||||
return _new_response($request, &HTTP::Status::RC_BAD_REQUEST, "URL missing")
|
||||
unless $url;
|
||||
return _new_response($request, &HTTP::Status::RC_BAD_REQUEST, "URL must be absolute")
|
||||
unless $url->scheme;
|
||||
return _new_response($request, &HTTP::Status::RC_BAD_REQUEST,
|
||||
"ParanoidAgent doesn't support going through proxies. ".
|
||||
"In that case, do your paranoia at your proxy instead.")
|
||||
if $self->_need_proxy($url);
|
||||
|
||||
my $scheme = $url->scheme;
|
||||
return _new_response($request, &HTTP::Status::RC_BAD_REQUEST, "Only http and https are supported by ParanoidAgent")
|
||||
unless $scheme eq "http" || $scheme eq "https";
|
||||
|
||||
LWP::Debug::trace("$method $url");
|
||||
|
||||
my $protocol;
|
||||
|
||||
{
|
||||
# Honor object-specific restrictions by forcing protocol objects
|
||||
# into class LWP::Protocol::nogo.
|
||||
my $x;
|
||||
if($x = $self->protocols_allowed) {
|
||||
if(grep lc($_) eq $scheme, @$x) {
|
||||
LWP::Debug::trace("$scheme URLs are among $self\'s allowed protocols (@$x)");
|
||||
}
|
||||
else {
|
||||
LWP::Debug::trace("$scheme URLs aren't among $self\'s allowed protocols (@$x)");
|
||||
require LWP::Protocol::nogo;
|
||||
$protocol = LWP::Protocol::nogo->new;
|
||||
}
|
||||
}
|
||||
elsif ($x = $self->protocols_forbidden) {
|
||||
if(grep lc($_) eq $scheme, @$x) {
|
||||
LWP::Debug::trace("$scheme URLs are among $self\'s forbidden protocols (@$x)");
|
||||
require LWP::Protocol::nogo;
|
||||
$protocol = LWP::Protocol::nogo->new;
|
||||
}
|
||||
else {
|
||||
LWP::Debug::trace("$scheme URLs aren't among $self\'s forbidden protocols (@$x)");
|
||||
}
|
||||
}
|
||||
# else fall thru and create the protocol object normally
|
||||
}
|
||||
|
||||
unless ($protocol) {
|
||||
LWP::Protocol::implementor("${scheme}_paranoid", "LWPx::Protocol::${scheme}_paranoid");
|
||||
eval "require LWPx::Protocol::${scheme}_paranoid;";
|
||||
if ($@) {
|
||||
$@ =~ s/ at .* line \d+.*//s; # remove file/line number
|
||||
my $response = _new_response($request, &HTTP::Status::RC_NOT_IMPLEMENTED, $@);
|
||||
return $response;
|
||||
}
|
||||
|
||||
$protocol = eval { LWP::Protocol::create($scheme eq "http" ? "http_paranoid" : "https_paranoid", $self) };
|
||||
if ($@) {
|
||||
$@ =~ s/ at .* line \d+.*//s; # remove file/line number
|
||||
my $response = _new_response($request, &HTTP::Status::RC_NOT_IMPLEMENTED, $@);
|
||||
if ($scheme eq "https") {
|
||||
$response->message($response->message . " (Crypt::SSLeay not installed)");
|
||||
$response->content_type("text/plain");
|
||||
$response->content(<<EOT);
|
||||
LWP will support https URLs if the Crypt::SSLeay module is installed.
|
||||
More information at <http://www.linpro.no/lwp/libwww-perl/README.SSL>.
|
||||
EOT
|
||||
}
|
||||
return $response;
|
||||
}
|
||||
}
|
||||
|
||||
# Extract fields that will be used below
|
||||
my ($timeout, $cookie_jar, $use_eval, $parse_head, $max_size) =
|
||||
@{$self}{qw(timeout cookie_jar use_eval parse_head max_size)};
|
||||
|
||||
my $response;
|
||||
my $proxy = undef;
|
||||
if ($use_eval) {
|
||||
# we eval, and turn dies into responses below
|
||||
eval {
|
||||
$response = $protocol->request($request, $proxy,
|
||||
$arg, $size, $timeout);
|
||||
};
|
||||
if ($@) {
|
||||
$@ =~ s/ at .* line \d+.*//s; # remove file/line number
|
||||
$response = _new_response($request,
|
||||
&HTTP::Status::RC_INTERNAL_SERVER_ERROR,
|
||||
$@);
|
||||
}
|
||||
}
|
||||
else {
|
||||
$response = $protocol->request($request, $proxy,
|
||||
$arg, $size, $timeout);
|
||||
# XXX: Should we die unless $response->is_success ???
|
||||
}
|
||||
|
||||
$response->request($request); # record request for reference
|
||||
$cookie_jar->extract_cookies($response) if $cookie_jar;
|
||||
$response->header("Client-Date" => HTTP::Date::time2str(time));
|
||||
return $response;
|
||||
}
|
||||
|
||||
# blocked hostnames, compiled patterns, or subrefs
|
||||
sub blocked_hosts
|
||||
{
|
||||
my $self = shift;
|
||||
if (@_) {
|
||||
my @hosts = @_;
|
||||
$self->{'blocked_hosts'} = \@hosts;
|
||||
return;
|
||||
}
|
||||
return @{ $self->{'blocked_hosts'} };
|
||||
}
|
||||
|
||||
# whitelisted hostnames, compiled patterns, or subrefs
|
||||
sub whitelisted_hosts
|
||||
{
|
||||
my $self = shift;
|
||||
if (@_) {
|
||||
my @hosts = @_;
|
||||
$self->{'whitelisted_hosts'} = \@hosts;
|
||||
return;
|
||||
}
|
||||
return @{ $self->{'whitelisted_hosts'} };
|
||||
}
|
||||
|
||||
# get/set Net::DNS resolver object
|
||||
sub resolver
|
||||
{
|
||||
my $self = shift;
|
||||
if (@_) {
|
||||
$self->{'resolver'} = shift;
|
||||
require UNIVERSAL ;
|
||||
die "Not a Net::DNS::Resolver object" unless
|
||||
UNIVERSAL::isa($self->{'resolver'}, "Net::DNS::Resolver");
|
||||
}
|
||||
return $self->{'resolver'} ||= Net::DNS::Resolver->new;
|
||||
}
|
||||
|
||||
# Taken directly from LWP::UserAgent because it was private there, and we can't depend on it
|
||||
# staying there in future versions: needed by our modified version of send_request
|
||||
sub _need_proxy
|
||||
{
|
||||
my($self, $url) = @_;
|
||||
$url = $HTTP::URI_CLASS->new($url) unless ref $url;
|
||||
|
||||
my $scheme = $url->scheme || return;
|
||||
if (my $proxy = $self->{'proxy'}{$scheme}) {
|
||||
if (@{ $self->{'no_proxy'} }) {
|
||||
if (my $host = eval { $url->host }) {
|
||||
for my $domain (@{ $self->{'no_proxy'} }) {
|
||||
if ($host =~ /\Q$domain\E$/) {
|
||||
LWP::Debug::trace("no_proxy configured");
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
LWP::Debug::debug("Proxied to $proxy");
|
||||
return $HTTP::URI_CLASS->new($proxy);
|
||||
}
|
||||
LWP::Debug::debug('Not proxied');
|
||||
undef;
|
||||
}
|
||||
|
||||
# Taken directly from LWP::UserAgent because it was private there, and we can't depend on it
|
||||
# staying there in future versions: needed by our modified version of send_request
|
||||
sub _request_sanity_check {
|
||||
my($self, $request) = @_;
|
||||
# some sanity checking
|
||||
if (defined $request) {
|
||||
if (ref $request) {
|
||||
Carp::croak("You need a request object, not a " . ref($request) . " object")
|
||||
if ref($request) eq 'ARRAY' or ref($request) eq 'HASH' or
|
||||
!$request->can('method') or !$request->can('uri');
|
||||
}
|
||||
else {
|
||||
Carp::croak("You need a request object, not '$request'");
|
||||
}
|
||||
}
|
||||
else {
|
||||
Carp::croak("No request object passed in");
|
||||
}
|
||||
}
|
||||
|
||||
# Taken directly from LWP::UserAgent because it was private there, and we can't depend on it
|
||||
# staying there in future versions: needed by our modified version of send_request
|
||||
sub _new_response {
|
||||
my($request, $code, $message) = @_;
|
||||
my $response = HTTP::Response->new($code, $message);
|
||||
$response->request($request);
|
||||
$response->header("Client-Date" => HTTP::Date::time2str(time));
|
||||
$response->header("Client-Warning" => "Internal response");
|
||||
$response->header("Content-Type" => "text/plain");
|
||||
$response->content("$code $message\n");
|
||||
return $response;
|
||||
}
|
||||
|
||||
1;
|
||||
|
||||
__END__
|
||||
|
||||
=head1 NAME
|
||||
|
||||
LWPx::ParanoidAgent - subclass of LWP::UserAgent that protects you from harm
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
require LWPx::ParanoidAgent;
|
||||
|
||||
my $ua = LWPx::ParanoidAgent->new;
|
||||
|
||||
# this is 10 seconds overall, from start to finish. not just between
|
||||
# socket reads. and it includes all redirects. so attackers telling
|
||||
# you to download from a malicious tarpit webserver can only stall
|
||||
# you for $n seconds
|
||||
|
||||
$ua->timeout(10);
|
||||
|
||||
# setup extra block lists, in addition to the always-enforced blocking
|
||||
# of private IP addresses, loopbacks, and multicast addresses
|
||||
|
||||
$ua->blocked_hosts(
|
||||
"foo.com",
|
||||
qr/\.internal\.company\.com$/i,
|
||||
sub { my $host = shift; return 1 if is_bad($host); },
|
||||
);
|
||||
|
||||
$ua->whitelisted_hosts(
|
||||
"brad.lj",
|
||||
qr/^192\.168\.64\.3?/,
|
||||
sub { ... },
|
||||
);
|
||||
|
||||
# get/set the DNS resolver object that's used
|
||||
my $resolver = $ua->resolver;
|
||||
$ua->resolver(Net::DNS::Resolver->new(...));
|
||||
|
||||
# and then just like a normal LWP::UserAgent, because it is one.
|
||||
my $response = $ua->get('http://search.cpan.org/');
|
||||
...
|
||||
if ($response->is_success) {
|
||||
print $response->content; # or whatever
|
||||
}
|
||||
else {
|
||||
die $response->status_line;
|
||||
}
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
The C<LWPx::ParanoidAgent> is a class subclassing C<LWP::UserAgent>,
|
||||
but paranoid against attackers. It's to be used when you're fetching
|
||||
a remote resource on behalf of a possibly malicious user.
|
||||
|
||||
This class can do whatever C<LWP::UserAgent> can (callbacks, uploads from
|
||||
files, etc), except proxy support is explicitly removed, because in
|
||||
that case you should do your paranoia at your proxy.
|
||||
|
||||
Also, the schemes are limited to http and https, which are mapped to
|
||||
C<LWPx::Protocol::http_paranoid> and
|
||||
C<LWPx::Protocol::https_paranoid>, respectively, which are forked
|
||||
versions of the same ones without the "_paranoid". Subclassing them
|
||||
didn't look possible, as they were essentially just one huge function.
|
||||
|
||||
This class protects you from connecting to internal IP ranges (unless you
|
||||
whitelist them), hostnames/IPs that you blacklist, remote webserver
|
||||
tarpitting your process (the timeout parameter is changed to be a global
|
||||
timeout over the entire process), and all combinations of redirects and
|
||||
DNS tricks to otherwise tarpit and/or connect to internal resources.
|
||||
|
||||
=head1 CONSTRUCTOR
|
||||
|
||||
=over 4
|
||||
|
||||
=item C<new>
|
||||
|
||||
my $ua = LWPx::ParanoidAgent->new([ %opts ]);
|
||||
|
||||
In addition to any constructor options from L<LWP::UserAgent>, you may
|
||||
also set C<blocked_hosts> (to an arrayref), C<whitelisted_hosts> (also
|
||||
an arrayref), and C<resolver>, a Net::DNS::Resolver object.
|
||||
|
||||
=back
|
||||
|
||||
=head1 METHODS
|
||||
|
||||
=over 4
|
||||
|
||||
=item $csr->B<resolver>($net_dns_resolver)
|
||||
|
||||
=item $csr->B<resolver>
|
||||
|
||||
Get/set the L<Net::DNS::Resolver> object used to lookup hostnames.
|
||||
|
||||
=item $csr->B<blocked_hosts>(@host_list)
|
||||
|
||||
=item $csr->B<blocked_hosts>
|
||||
|
||||
Get/set the the list of blocked hosts. The items in @host_list may be
|
||||
compiled regular expressions (with qr//), code blocks, or scalar
|
||||
literals. In any case, the thing that is match, passed in, or
|
||||
compared (respectively), is all of the given hostname, given IP
|
||||
address, and IP address in canonical a.b.c.d decimal notation. So if
|
||||
you want to block "1.2.3.4" and the user entered it in a mix of
|
||||
network/host form in a mix of decimal/octal/hex, you need only block
|
||||
"1.2.3.4" and not worry about the details.
|
||||
|
||||
=item $csr->B<whitelisted_hosts>(@host_list)
|
||||
|
||||
=item $csr->B<whitelisted_hosts>
|
||||
|
||||
Like blocked hosts, but matching the hosts/IPs that bypass blocking
|
||||
checks. The only difference is the IP address isn't canonicalized
|
||||
before being whitelisted-matched, mostly because it doesn't make sense
|
||||
for somebody to enter in a good address in a subversive way.
|
||||
|
||||
=back
|
||||
|
||||
=head1 SEE ALSO
|
||||
|
||||
See L<LWP::UserAgent> to see how to use this class.
|
||||
|
||||
=head1 WARRANTY
|
||||
|
||||
This module is supplied "as-is" and comes with no warranty, expressed
|
||||
or implied. It tries to protect you from harm, but maybe it will.
|
||||
Maybe it will destroy your data and your servers. You'd better audit
|
||||
it and send me bug reports.
|
||||
|
||||
=head1 BUGS
|
||||
|
||||
Maybe. See the warranty above.
|
||||
|
||||
=head1 COPYRIGHT
|
||||
|
||||
Copyright 2005 Brad Fitzpatrick
|
||||
|
||||
Lot of code from the the base class, copyright 1995-2004 Gisle Aas.
|
||||
|
||||
This library is free software; you can redistribute it and/or
|
||||
modify it under the same terms as Perl itself.
|
||||
428
wcmtools/lib/LWPx-ParanoidAgent/lib/LWPx/Protocol/http_paranoid.pm
Executable file
428
wcmtools/lib/LWPx-ParanoidAgent/lib/LWPx/Protocol/http_paranoid.pm
Executable file
@@ -0,0 +1,428 @@
|
||||
# $Id: http_paranoid.pm,v 1.1 2005/06/01 23:12:25 bradfitz Exp $
|
||||
#
|
||||
|
||||
package LWPx::Protocol::http_paranoid;
|
||||
|
||||
use strict;
|
||||
|
||||
require LWP::Debug;
|
||||
require HTTP::Response;
|
||||
require HTTP::Status;
|
||||
require Net::HTTP;
|
||||
|
||||
use vars qw(@ISA $TOO_LATE $TIME_REMAIN);
|
||||
|
||||
require LWP::Protocol;
|
||||
@ISA = qw(LWP::Protocol);
|
||||
|
||||
my $CRLF = "\015\012";
|
||||
|
||||
# lame hack using globals in this package to communicate to sysread in the
|
||||
# package at bottom, but whatchya gonna do? Don't want to go modify
|
||||
# Net::HTTP::* to pass explicit timeouts to all the sysreads.
|
||||
sub _set_time_remain {
|
||||
my $now = time;
|
||||
return unless defined $TOO_LATE;
|
||||
$TIME_REMAIN = $TOO_LATE - $now;
|
||||
$TIME_REMAIN = 0 if $TIME_REMAIN < 0;
|
||||
}
|
||||
|
||||
sub _new_socket
|
||||
{
|
||||
my($self, $host, $port, $timeout, $request) = @_;
|
||||
|
||||
my $conn_cache = $self->{ua}{conn_cache};
|
||||
if ($conn_cache) {
|
||||
if (my $sock = $conn_cache->withdraw("http", "$host:$port")) {
|
||||
return $sock if $sock && !$sock->can_read(0);
|
||||
# if the socket is readable, then either the peer has closed the
|
||||
# connection or there are some garbage bytes on it. In either
|
||||
# case we abandon it.
|
||||
$sock->close;
|
||||
}
|
||||
}
|
||||
|
||||
my @addrs = $self->{ua}->_resolve($host, $request, $timeout);
|
||||
unless (@addrs) {
|
||||
die "Can't connect to $host:$port (No suitable addresses found)";
|
||||
}
|
||||
|
||||
my $sock;
|
||||
local($^W) = 0; # IO::Socket::INET can be noisy
|
||||
|
||||
while (! $sock && @addrs) {
|
||||
my $addr = shift @addrs;
|
||||
|
||||
my $conn_timeout = $request->{_timebegin} ?
|
||||
(time() - $request->{_timebegin}) :
|
||||
$timeout;
|
||||
|
||||
$sock = $self->socket_class->new(PeerAddr => $addr,
|
||||
PeerPort => $port,
|
||||
Proto => 'tcp',
|
||||
Timeout => $conn_timeout,
|
||||
KeepAlive => !!$conn_cache,
|
||||
SendTE => 1,
|
||||
);
|
||||
}
|
||||
|
||||
unless ($sock) {
|
||||
# IO::Socket::INET leaves additional error messages in $@
|
||||
$@ =~ s/^.*?: //;
|
||||
die "Can't connect to $host:$port ($@)";
|
||||
}
|
||||
|
||||
# perl 5.005's IO::Socket does not have the blocking method.
|
||||
eval { $sock->blocking(0); };
|
||||
|
||||
$sock;
|
||||
}
|
||||
|
||||
sub socket_class
|
||||
{
|
||||
my $self = shift;
|
||||
(ref($self) || $self) . "::Socket";
|
||||
}
|
||||
|
||||
sub _get_sock_info
|
||||
{
|
||||
my($self, $res, $sock) = @_;
|
||||
if (defined(my $peerhost = $sock->peerhost)) {
|
||||
$res->header("Client-Peer" => "$peerhost:" . $sock->peerport);
|
||||
}
|
||||
}
|
||||
|
||||
sub _fixup_header
|
||||
{
|
||||
my($self, $h, $url, $proxy) = @_;
|
||||
|
||||
# Extract 'Host' header
|
||||
my $hhost = $url->authority;
|
||||
if ($hhost =~ s/^([^\@]*)\@//) { # get rid of potential "user:pass@"
|
||||
# add authorization header if we need them. HTTP URLs do
|
||||
# not really support specification of user and password, but
|
||||
# we allow it.
|
||||
if (defined($1) && not $h->header('Authorization')) {
|
||||
require URI::Escape;
|
||||
$h->authorization_basic(map URI::Escape::uri_unescape($_),
|
||||
split(":", $1, 2));
|
||||
}
|
||||
}
|
||||
$h->init_header('Host' => $hhost);
|
||||
|
||||
}
|
||||
|
||||
sub hlist_remove {
|
||||
my($hlist, $k) = @_;
|
||||
$k = lc $k;
|
||||
for (my $i = @$hlist - 2; $i >= 0; $i -= 2) {
|
||||
next unless lc($hlist->[$i]) eq $k;
|
||||
splice(@$hlist, $i, 2);
|
||||
}
|
||||
}
|
||||
|
||||
sub request
|
||||
{
|
||||
my($self, $request, $proxy, $arg, $size, $timeout) = @_;
|
||||
LWP::Debug::trace('()');
|
||||
|
||||
# paranoid: now $timeout means total time, not just between bytes coming in.
|
||||
# avoids attacker servers from tarpitting a service that fetches URLs.
|
||||
$TOO_LATE = undef;
|
||||
$TIME_REMAIN = undef;
|
||||
if ($timeout) {
|
||||
my $start_time = $request->{_time_begin} || time();
|
||||
$TOO_LATE = $start_time + $timeout;
|
||||
}
|
||||
|
||||
$size ||= 4096;
|
||||
|
||||
# check method
|
||||
my $method = $request->method;
|
||||
unless ($method =~ /^[A-Za-z0-9_!\#\$%&\'*+\-.^\`|~]+$/) { # HTTP token
|
||||
return new HTTP::Response &HTTP::Status::RC_BAD_REQUEST,
|
||||
'Library does not allow method ' .
|
||||
"$method for 'http:' URLs";
|
||||
}
|
||||
|
||||
my $url = $request->url;
|
||||
my($host, $port, $fullpath);
|
||||
|
||||
$host = $url->host;
|
||||
$port = $url->port;
|
||||
$fullpath = $url->path_query;
|
||||
$fullpath = "/$fullpath" unless $fullpath =~ m,^/,;
|
||||
|
||||
# connect to remote sites
|
||||
my $socket = $self->_new_socket($host, $port, $timeout, $request);
|
||||
|
||||
my @h;
|
||||
my $request_headers = $request->headers->clone;
|
||||
$self->_fixup_header($request_headers, $url, $proxy);
|
||||
|
||||
$request_headers->scan(sub {
|
||||
my($k, $v) = @_;
|
||||
$k =~ s/^://;
|
||||
$v =~ s/\n/ /g;
|
||||
push(@h, $k, $v);
|
||||
});
|
||||
|
||||
my $content_ref = $request->content_ref;
|
||||
$content_ref = $$content_ref if ref($$content_ref);
|
||||
my $chunked;
|
||||
my $has_content;
|
||||
|
||||
if (ref($content_ref) eq 'CODE') {
|
||||
my $clen = $request_headers->header('Content-Length');
|
||||
$has_content++ if $clen;
|
||||
unless (defined $clen) {
|
||||
push(@h, "Transfer-Encoding" => "chunked");
|
||||
$has_content++;
|
||||
$chunked++;
|
||||
}
|
||||
}
|
||||
else {
|
||||
# Set (or override) Content-Length header
|
||||
my $clen = $request_headers->header('Content-Length');
|
||||
if (defined($$content_ref) && length($$content_ref)) {
|
||||
$has_content++;
|
||||
if (!defined($clen) || $clen ne length($$content_ref)) {
|
||||
if (defined $clen) {
|
||||
warn "Content-Length header value was wrong, fixed";
|
||||
hlist_remove(\@h, 'Content-Length');
|
||||
}
|
||||
push(@h, 'Content-Length' => length($$content_ref));
|
||||
}
|
||||
}
|
||||
elsif ($clen) {
|
||||
warn "Content-Length set when there is not content, fixed";
|
||||
hlist_remove(\@h, 'Content-Length');
|
||||
}
|
||||
}
|
||||
|
||||
my $req_buf = $socket->format_request($method, $fullpath, @h);
|
||||
#print "------\n$req_buf\n------\n";
|
||||
|
||||
# XXX need to watch out for write timeouts
|
||||
# FIXME_BRAD: make it non-blocking and select during the write
|
||||
{
|
||||
my $n = $socket->syswrite($req_buf, length($req_buf));
|
||||
die $! unless defined($n);
|
||||
die "short write" unless $n == length($req_buf);
|
||||
#LWP::Debug::conns($req_buf);
|
||||
}
|
||||
|
||||
my($code, $mess, @junk);
|
||||
my $drop_connection;
|
||||
|
||||
if ($has_content) {
|
||||
my $write_wait = 0;
|
||||
$write_wait = 2
|
||||
if ($request_headers->header("Expect") || "") =~ /100-continue/;
|
||||
|
||||
my $eof;
|
||||
my $wbuf;
|
||||
my $woffset = 0;
|
||||
if (ref($content_ref) eq 'CODE') {
|
||||
my $buf = &$content_ref();
|
||||
$buf = "" unless defined($buf);
|
||||
$buf = sprintf "%x%s%s%s", length($buf), $CRLF, $buf, $CRLF
|
||||
if $chunked;
|
||||
$wbuf = \$buf;
|
||||
}
|
||||
else {
|
||||
$wbuf = $content_ref;
|
||||
$eof = 1;
|
||||
}
|
||||
|
||||
my $fbits = '';
|
||||
vec($fbits, fileno($socket), 1) = 1;
|
||||
|
||||
while ($woffset < length($$wbuf)) {
|
||||
|
||||
my $time_before;
|
||||
|
||||
my $now = time();
|
||||
if ($now > $TOO_LATE) {
|
||||
die "Request took too long.";
|
||||
}
|
||||
|
||||
my $sel_timeout = $TOO_LATE - $now;
|
||||
if ($write_wait) {
|
||||
$time_before = time;
|
||||
$sel_timeout = $write_wait if $write_wait < $sel_timeout;
|
||||
}
|
||||
|
||||
my $rbits = $fbits;
|
||||
my $wbits = $write_wait ? undef : $fbits;
|
||||
my $nfound = select($rbits, $wbits, undef, $sel_timeout);
|
||||
unless (defined $nfound) {
|
||||
die "select failed: $!";
|
||||
}
|
||||
|
||||
if ($write_wait) {
|
||||
$write_wait -= time - $time_before;
|
||||
$write_wait = 0 if $write_wait < 0;
|
||||
}
|
||||
|
||||
if (defined($rbits) && $rbits =~ /[^\0]/) {
|
||||
# readable
|
||||
my $buf = $socket->_rbuf;
|
||||
|
||||
_set_time_remain();
|
||||
|
||||
my $n = $socket->sysread($buf, 1024, length($buf));
|
||||
unless ($n) {
|
||||
die "EOF";
|
||||
}
|
||||
$socket->_rbuf($buf);
|
||||
if ($buf =~ /\015?\012\015?\012/) {
|
||||
# a whole response present
|
||||
($code, $mess, @h) = $socket->read_response_headers(laxed => 1,
|
||||
junk_out => \@junk,
|
||||
);
|
||||
if ($code eq "100") {
|
||||
$write_wait = 0;
|
||||
undef($code);
|
||||
}
|
||||
else {
|
||||
$drop_connection++;
|
||||
last;
|
||||
# XXX should perhaps try to abort write in a nice way too
|
||||
}
|
||||
}
|
||||
}
|
||||
if (defined($wbits) && $wbits =~ /[^\0]/) {
|
||||
my $n = $socket->syswrite($$wbuf, length($$wbuf), $woffset);
|
||||
unless ($n) {
|
||||
die "syswrite: $!" unless defined $n;
|
||||
die "syswrite: no bytes written";
|
||||
}
|
||||
$woffset += $n;
|
||||
|
||||
if (!$eof && $woffset >= length($$wbuf)) {
|
||||
# need to refill buffer from $content_ref code
|
||||
my $buf = &$content_ref();
|
||||
$buf = "" unless defined($buf);
|
||||
$eof++ unless length($buf);
|
||||
$buf = sprintf "%x%s%s%s", length($buf), $CRLF, $buf, $CRLF
|
||||
if $chunked;
|
||||
$wbuf = \$buf;
|
||||
$woffset = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_set_time_remain();
|
||||
($code, $mess, @h) = $socket->read_response_headers(laxed => 1, junk_out => \@junk)
|
||||
unless $code;
|
||||
($code, $mess, @h) = $socket->read_response_headers(laxed => 1, junk_out => \@junk)
|
||||
if $code eq "100";
|
||||
|
||||
my $response = HTTP::Response->new($code, $mess);
|
||||
my $peer_http_version = $socket->peer_http_version;
|
||||
$response->protocol("HTTP/$peer_http_version");
|
||||
while (@h) {
|
||||
my($k, $v) = splice(@h, 0, 2);
|
||||
$response->push_header($k, $v);
|
||||
}
|
||||
$response->push_header("Client-Junk" => \@junk) if @junk;
|
||||
|
||||
$response->request($request);
|
||||
$self->_get_sock_info($response, $socket);
|
||||
|
||||
if ($method eq "CONNECT") {
|
||||
$response->{client_socket} = $socket; # so it can be picked up
|
||||
return $response;
|
||||
}
|
||||
|
||||
if (my @te = $response->remove_header('Transfer-Encoding')) {
|
||||
$response->push_header('Client-Transfer-Encoding', \@te);
|
||||
}
|
||||
$response->push_header('Client-Response-Num', $socket->increment_response_count);
|
||||
|
||||
my $complete;
|
||||
$response = $self->collect($arg, $response, sub {
|
||||
my $buf = ""; #prevent use of uninitialized value in SSLeay.xs
|
||||
my $n;
|
||||
READ:
|
||||
{
|
||||
_set_time_remain();
|
||||
$n = $socket->read_entity_body($buf, $size);
|
||||
die "Can't read entity body: $!" unless defined $n;
|
||||
redo READ if $n == -1;
|
||||
}
|
||||
$complete++ if !$n;
|
||||
return \$buf;
|
||||
} );
|
||||
$drop_connection++ unless $complete;
|
||||
|
||||
_set_time_remain();
|
||||
@h = $socket->get_trailers;
|
||||
while (@h) {
|
||||
my($k, $v) = splice(@h, 0, 2);
|
||||
$response->push_header($k, $v);
|
||||
}
|
||||
|
||||
# keep-alive support
|
||||
unless ($drop_connection) {
|
||||
if (my $conn_cache = $self->{ua}{conn_cache}) {
|
||||
my %connection = map { (lc($_) => 1) }
|
||||
split(/\s*,\s*/, ($response->header("Connection") || ""));
|
||||
if (($peer_http_version eq "1.1" && !$connection{close}) ||
|
||||
$connection{"keep-alive"})
|
||||
{
|
||||
LWP::Debug::debug("Keep the http connection to $host:$port");
|
||||
$conn_cache->deposit("http", "$host:$port", $socket);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
$response;
|
||||
}
|
||||
|
||||
|
||||
#-----------------------------------------------------------
|
||||
package LWPx::Protocol::http_paranoid::SocketMethods;
|
||||
|
||||
sub sysread {
|
||||
my $self = shift;
|
||||
my $timeout = $LWPx::Protocol::http_paranoid::TIME_REMAIN;
|
||||
|
||||
if (defined $timeout) {
|
||||
die "read timeout" unless $self->can_read($timeout);
|
||||
}
|
||||
else {
|
||||
# since we have made the socket non-blocking we
|
||||
# use select to wait for some data to arrive
|
||||
$self->can_read(undef) || die "Assert";
|
||||
}
|
||||
sysread($self, $_[0], $_[1], $_[2] || 0);
|
||||
}
|
||||
|
||||
sub can_read {
|
||||
my($self, $timeout) = @_;
|
||||
my $fbits = '';
|
||||
vec($fbits, fileno($self), 1) = 1;
|
||||
my $nfound = select($fbits, undef, undef, $timeout);
|
||||
die "select failed: $!" unless defined $nfound;
|
||||
return $nfound > 0;
|
||||
}
|
||||
|
||||
sub ping {
|
||||
my $self = shift;
|
||||
!$self->can_read(0);
|
||||
}
|
||||
|
||||
sub increment_response_count {
|
||||
my $self = shift;
|
||||
return ++${*$self}{'myhttp_response_count'};
|
||||
}
|
||||
|
||||
#-----------------------------------------------------------
|
||||
package LWPx::Protocol::http_paranoid::Socket;
|
||||
use vars qw(@ISA);
|
||||
@ISA = qw(LWPx::Protocol::http_paranoid::SocketMethods Net::HTTP);
|
||||
|
||||
1;
|
||||
49
wcmtools/lib/LWPx-ParanoidAgent/lib/LWPx/Protocol/https_paranoid.pm
Executable file
49
wcmtools/lib/LWPx-ParanoidAgent/lib/LWPx/Protocol/https_paranoid.pm
Executable file
@@ -0,0 +1,49 @@
|
||||
#
|
||||
package LWPx::Protocol::https_paranoid;
|
||||
|
||||
# $Id: https_paranoid.pm,v 1.1 2005/06/01 23:12:25 bradfitz Exp $
|
||||
|
||||
use strict;
|
||||
|
||||
use vars qw(@ISA);
|
||||
require LWPx::Protocol::http_paranoid;
|
||||
@ISA = qw(LWPx::Protocol::http_paranoid);
|
||||
|
||||
sub _check_sock
|
||||
{
|
||||
my($self, $req, $sock) = @_;
|
||||
my $check = $req->header("If-SSL-Cert-Subject");
|
||||
if (defined $check) {
|
||||
my $cert = $sock->get_peer_certificate ||
|
||||
die "Missing SSL certificate";
|
||||
my $subject = $cert->subject_name;
|
||||
die "Bad SSL certificate subject: '$subject' !~ /$check/"
|
||||
unless $subject =~ /$check/;
|
||||
$req->remove_header("If-SSL-Cert-Subject"); # don't pass it on
|
||||
}
|
||||
}
|
||||
|
||||
sub _get_sock_info
|
||||
{
|
||||
my $self = shift;
|
||||
$self->SUPER::_get_sock_info(@_);
|
||||
my($res, $sock) = @_;
|
||||
$res->header("Client-SSL-Cipher" => $sock->get_cipher);
|
||||
my $cert = $sock->get_peer_certificate;
|
||||
if ($cert) {
|
||||
$res->header("Client-SSL-Cert-Subject" => $cert->subject_name);
|
||||
$res->header("Client-SSL-Cert-Issuer" => $cert->issuer_name);
|
||||
}
|
||||
if(! eval { $sock->get_peer_verify }) {
|
||||
$res->header("Client-SSL-Warning" => "Peer certificate not verified");
|
||||
}
|
||||
}
|
||||
|
||||
#-----------------------------------------------------------
|
||||
package LWPx::Protocol::https_paranoid::Socket;
|
||||
|
||||
use vars qw(@ISA);
|
||||
require Net::HTTPS;
|
||||
@ISA = qw(Net::HTTPS LWPx::Protocol::http_paranoid::SocketMethods);
|
||||
|
||||
1;
|
||||
207
wcmtools/lib/LWPx-ParanoidAgent/t/00-all.t
Executable file
207
wcmtools/lib/LWPx-ParanoidAgent/t/00-all.t
Executable file
@@ -0,0 +1,207 @@
|
||||
#!/usr/bin/perl
|
||||
#
|
||||
|
||||
use strict;
|
||||
use LWPx::ParanoidAgent;
|
||||
use Time::HiRes qw(time);
|
||||
use Test::More tests => 25;
|
||||
use Net::DNS;
|
||||
use IO::Socket::INET;
|
||||
|
||||
my ($t1, $td);
|
||||
my $delta = sub { printf " %.03f secs\n", $td; };
|
||||
|
||||
my $ua = LWPx::ParanoidAgent->new;
|
||||
ok((ref $ua) =~ /LWPx::ParanoidAgent/);
|
||||
|
||||
my ($HELPER_IP, $HELPER_PORT) = ("127.66.74.70", 9001);
|
||||
|
||||
my $child_pid = fork;
|
||||
web_server_mode() if ! $child_pid;
|
||||
select undef, undef, undef, 0.5;
|
||||
|
||||
my $HELPER_SERVER = "http://$HELPER_IP:$HELPER_PORT";
|
||||
|
||||
|
||||
$ua->whitelisted_hosts(
|
||||
$HELPER_IP,
|
||||
);
|
||||
|
||||
$ua->blocked_hosts(
|
||||
qr/\.lj$/,
|
||||
"1.2.3.6",
|
||||
);
|
||||
|
||||
my $res;
|
||||
|
||||
# hostnames pointing to internal IPs
|
||||
$res = $ua->get("http://localhost-fortest.danga.com/");
|
||||
ok(! $res->is_success && $res->status_line =~ /Suspicious DNS results/);
|
||||
|
||||
# random IP address forms
|
||||
$res = $ua->get("http://0x7f.1/");
|
||||
ok(! $res->is_success && $res->status_line =~ /blocked/);
|
||||
$res = $ua->get("http://0x7f.0xffffff/");
|
||||
ok(! $res->is_success && $res->status_line =~ /blocked/);
|
||||
$res = $ua->get("http://037777777777/");
|
||||
ok(! $res->is_success && $res->status_line =~ /blocked/);
|
||||
$res = $ua->get("http://192.052000001/");
|
||||
ok(! $res->is_success && $res->status_line =~ /blocked/);
|
||||
$res = $ua->get("http://0x00.00/");
|
||||
ok(! $res->is_success && $res->status_line =~ /blocked/);
|
||||
|
||||
# test the the blocked host above in decimal form is blocked by this non-decimal form:
|
||||
$res = $ua->get("http://0x01.02.0x306/");
|
||||
ok(! $res->is_success && $res->status_line =~ /blocked/);
|
||||
|
||||
# hostnames doing CNAMEs (this one resolves to "brad.lj", which is verboten)
|
||||
my $old_resolver = $ua->resolver;
|
||||
$ua->resolver(Net::DNS::Resolver->new(nameservers => [ qw(66.150.15.140) ] ));
|
||||
$res = $ua->get("http://bradlj-fortest.danga.com/");
|
||||
print $res->status_line, "\n";
|
||||
ok(! $res->is_success);
|
||||
$ua->resolver($old_resolver);
|
||||
|
||||
# black-listed via blocked_hosts
|
||||
$res = $ua->get("http://brad.lj/");
|
||||
print $res->status_line, "\n";
|
||||
ok(! $res->is_success);
|
||||
|
||||
# can't do octal in IPs
|
||||
$res = $ua->get("http://012.1.2.1/");
|
||||
print $res->status_line, "\n";
|
||||
ok(! $res->is_success);
|
||||
|
||||
# can't do decimal/octal IPs
|
||||
$res = $ua->get("http://167838209/");
|
||||
print $res->status_line, "\n";
|
||||
ok(! $res->is_success);
|
||||
|
||||
# checking that port isn't affected
|
||||
$res = $ua->get("http://brad.lj:80/");
|
||||
print $res->status_line, "\n";
|
||||
ok(! $res->is_success);
|
||||
|
||||
# this domain is okay. bradfitz.com isn't blocked
|
||||
$res = $ua->get("http://bradfitz.com/");
|
||||
print $res->status_line, "\n";
|
||||
ok( $res->is_success);
|
||||
|
||||
# SSL should still work
|
||||
$res = $ua->get("https://pause.perl.org/pause/query");
|
||||
ok( $res->is_success && $res->content =~ /Login|PAUSE|Edit/);
|
||||
|
||||
# internal. bad. blocked by default by module.
|
||||
$res = $ua->get("http://10.2.3.4/");
|
||||
print $res->status_line, "\n";
|
||||
ok(! $res->is_success);
|
||||
|
||||
# okay
|
||||
$res = $ua->get("http://danga.com/temp/");
|
||||
print $res->status_line, "\n";
|
||||
ok( $res->is_success);
|
||||
|
||||
# localhost is blocked, case insensitive
|
||||
$res = $ua->get("http://LOCALhost/temp/");
|
||||
print $res->status_line, "\n";
|
||||
ok(! $res->is_success);
|
||||
|
||||
# redirecting to invalid host
|
||||
$res = $ua->get("$HELPER_SERVER/redir/http://10.2.3.4/");
|
||||
print $res->status_line, "\n";
|
||||
ok(! $res->is_success);
|
||||
|
||||
# redirect with tarpitting
|
||||
print "4 second redirect tarpit (tolerance 2)...\n";
|
||||
$ua->timeout(2);
|
||||
$res = $ua->get("$HELPER_SERVER/redir-4/http://www.danga.com/");
|
||||
ok(! $res->is_success);
|
||||
|
||||
# lots of slow redirects adding up to a lot of time
|
||||
print "Three 1-second redirect tarpits (tolerance 2)...\n";
|
||||
$ua->timeout(2);
|
||||
$t1 = time();
|
||||
$res = $ua->get("$HELPER_SERVER/redir-1/$HELPER_SERVER/redir-1/$HELPER_SERVER/redir-1/http://www.danga.com/");
|
||||
$td = time() - $t1;
|
||||
$delta->();
|
||||
ok($td < 2.5);
|
||||
ok(! $res->is_success);
|
||||
|
||||
# redirecting a bunch and getting the final good host
|
||||
$res = $ua->get("$HELPER_SERVER/redir/$HELPER_SERVER/redir/$HELPER_SERVER/redir/http://www.danga.com/");
|
||||
ok( $res->is_success && $res->request->uri->host eq "www.danga.com");
|
||||
|
||||
# dying in a tarpit
|
||||
print "5 second tarpit (tolerance 2)...\n";
|
||||
$ua->timeout(2);
|
||||
$res = $ua->get("$HELPER_SERVER/1.5");
|
||||
ok(! $res->is_success);
|
||||
|
||||
# making it out of a tarpit.
|
||||
print "3 second tarpit (tolerance 4)...\n";
|
||||
$ua->timeout(4);
|
||||
$res = $ua->get("$HELPER_SERVER/1.3");
|
||||
ok( $res->is_success);
|
||||
|
||||
kill 9, $child_pid;
|
||||
|
||||
|
||||
sub web_server_mode {
|
||||
my $ssock = IO::Socket::INET->new(Listen => 5,
|
||||
LocalAddr => $HELPER_IP,
|
||||
LocalPort => $HELPER_PORT,
|
||||
ReuseAddr => 1,
|
||||
Proto => 'tcp')
|
||||
or die "Couldn't start webserver.\n";
|
||||
|
||||
while (my $csock = $ssock->accept) {
|
||||
exit 0 unless $csock;
|
||||
fork and next;
|
||||
|
||||
my $eat = sub {
|
||||
while (<$csock>) {
|
||||
last if ! $_ || /^\r?\n/;
|
||||
}
|
||||
};
|
||||
|
||||
my $req = <$csock>;
|
||||
print STDERR " ####### GOT REQ: $req" if $ENV{VERBOSE};
|
||||
|
||||
if ($req =~ m!^GET /(\d+)\.(\d+) HTTP/1\.\d+\r?\n?$!) {
|
||||
my ($delay, $count) = ($1, $2);
|
||||
$eat->();
|
||||
print $csock
|
||||
"HTTP/1.0 200 OK\r\nContent-Type: text/plain\r\n\r\n";
|
||||
for (1..$count) {
|
||||
print $csock "[$_/$count]\n";
|
||||
sleep $delay;
|
||||
}
|
||||
exit 0;
|
||||
}
|
||||
|
||||
if ($req =~ m!^GET /redir/(\S+) HTTP/1\.\d+\r?\n?$!) {
|
||||
my $dest = $1;
|
||||
$eat->();
|
||||
print $csock
|
||||
"HTTP/1.0 302 Found\r\nLocation: $dest\r\nContent-Length: 0\r\n\r\n";
|
||||
exit 0;
|
||||
}
|
||||
|
||||
if ($req =~ m!^GET /redir-(\d+)/(\S+) HTTP/1\.\d+\r?\n?$!) {
|
||||
my $sleep = $1;
|
||||
sleep $sleep;
|
||||
my $dest = $2;
|
||||
$eat->();
|
||||
print $csock
|
||||
"HTTP/1.0 302 Found\r\nLocation: $dest\r\nContent-Length: 0\r\n\r\n";
|
||||
exit 0;
|
||||
}
|
||||
|
||||
print $csock
|
||||
"HTTP/1.0 500 Server Error\r\n" .
|
||||
"Content-Length: 10\r\n\r\n" .
|
||||
"bogus_req\n";
|
||||
exit 0;
|
||||
}
|
||||
exit 0;
|
||||
}
|
||||
480
wcmtools/lib/MultiCVS.pm
Executable file
480
wcmtools/lib/MultiCVS.pm
Executable file
@@ -0,0 +1,480 @@
|
||||
#!/usr/bin/perl
|
||||
#
|
||||
|
||||
package MultiCVS;
|
||||
use strict;
|
||||
|
||||
BEGIN {
|
||||
use Carp qw{confess croak};
|
||||
use IO::File qw{};
|
||||
use File::Find qw{find};
|
||||
use Fcntl qw{O_RDONLY};
|
||||
|
||||
use constant TRUE => 1;
|
||||
use constant FALSE => ();
|
||||
}
|
||||
|
||||
|
||||
### (CONSTRUCTOR) METHOD: new( $mainconfig )
|
||||
### Create a new MultiCVS object.
|
||||
sub new {
|
||||
my $proto = shift;
|
||||
my $class = ref $proto || $proto;
|
||||
my $mainconfig = shift;
|
||||
|
||||
my $self = bless {
|
||||
dir_live => '',
|
||||
dir_cvs => '',
|
||||
|
||||
directories => undef,
|
||||
filemap => undef,
|
||||
|
||||
_debug => undef,
|
||||
}, $class;
|
||||
|
||||
# Read the first argument as the main config file, and try to find and read
|
||||
# any local variant after that.
|
||||
if ( $mainconfig ) {
|
||||
$self->read_config( $mainconfig, 1 );
|
||||
|
||||
if ( $mainconfig =~ m{^(.+)multicvs.conf$} ) {
|
||||
my $localconf = "$1multicvs-local.conf";
|
||||
$self->read_config( $localconf );
|
||||
}
|
||||
}
|
||||
|
||||
return $self;
|
||||
}
|
||||
|
||||
|
||||
sub debugmsg {
|
||||
my $self = shift or confess "Cannot be used as a function";
|
||||
return unless $self->{_debug};
|
||||
my ( $fmt, @args ) = @_;
|
||||
|
||||
printf STDERR $fmt, @args;
|
||||
}
|
||||
|
||||
|
||||
### METHOD: read_config( $file[, $ismain] )
|
||||
### Read the object's configuration from the given I<file>.
|
||||
sub read_config {
|
||||
my ( $self, $file, $ismain ) = @_;
|
||||
|
||||
my (
|
||||
$ifh,
|
||||
$line,
|
||||
);
|
||||
|
||||
open $ifh, "<$file" or die "open: $file: $!";
|
||||
|
||||
while ( <$ifh> ) {
|
||||
$line = $_;
|
||||
chomp $line;
|
||||
|
||||
# Strip leading, trailing space and comments
|
||||
$line =~ s{(^\s+|#.*|\s+$)}{}g;
|
||||
next unless $line =~ /\S/;
|
||||
|
||||
# Expand environment variables
|
||||
$line =~ s/\$(\w+)/$ENV{$1} or die "Environment variable \$$1 not set.\n"/ge;
|
||||
|
||||
# Set key/value pair variables if this is the main config
|
||||
if ( $line =~ /(\w+)\s*=\s*(.+)/ ) {
|
||||
my ($k, $v) = ($1, $2);
|
||||
die "Included config files can't set variables such as $k.\n" unless $ismain;
|
||||
|
||||
if ( $k eq "LIVEDIR" ) { $self->{dir_live} = $v }
|
||||
elsif ( $k eq "CVSDIR" ) { $self->{dir_cvs} = $v }
|
||||
else { die "Unknown option $k = $v\n"; }
|
||||
}
|
||||
|
||||
# Set name<space>value pairs
|
||||
elsif (/(\S+)\s+(.+)/) {
|
||||
my ($from, $to) = ($1, $2);
|
||||
my $optional = 0;
|
||||
|
||||
if ($from =~ s/\?$//) { $optional = 1; }
|
||||
|
||||
push @{$self->{paths}}, {
|
||||
'from' => $from,
|
||||
'to' => $to,
|
||||
'optional' => $optional,
|
||||
};
|
||||
} else {
|
||||
die "Bogus config line in '$file': $line\n";
|
||||
}
|
||||
}
|
||||
close $ifh;
|
||||
|
||||
# Clear any old entries
|
||||
$self->{directories} = $self->{files} = undef;
|
||||
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
|
||||
### METHOD: cvs_update( [$quiet] )
|
||||
### Update the modules under multicvs's control, optionally with the quiet flag
|
||||
### turned on.
|
||||
sub cvs_update {
|
||||
my $self = shift or confess "can't be called as a function";
|
||||
my $quiet = shift || 0;
|
||||
|
||||
my (
|
||||
$dir,
|
||||
$count,
|
||||
);
|
||||
|
||||
$count = 0;
|
||||
|
||||
# Do a 'cvs update' in directories that haven't been updated yet.
|
||||
foreach my $dir ( $self->directories ) {
|
||||
chdir $dir or die "chdir: $dir: $!\n";
|
||||
$self->debugmsg( "Updating CVS dir '$dir' ...\n" );
|
||||
system( "cvs", "update", "-dP" );
|
||||
|
||||
$count++;
|
||||
}
|
||||
|
||||
return $count;
|
||||
}
|
||||
|
||||
|
||||
|
||||
### METHOD: directories()
|
||||
### Returns a list of the top-level directories which should be checked for
|
||||
### updates.
|
||||
sub directories {
|
||||
my $self = shift or confess "cannot be used as a function";
|
||||
|
||||
my (
|
||||
$root,
|
||||
$dir,
|
||||
);
|
||||
|
||||
unless ( $self->{directories} ) {
|
||||
my %map = ();
|
||||
|
||||
foreach my $path ( @{$self->{paths}} ) {
|
||||
|
||||
# Get the root module which contains the file, fully-qualify it,
|
||||
# then add it to the map
|
||||
( $root = $path->{from} ) =~ s!/.*!!;
|
||||
$dir = "$self->{dir_cvs}/$root";
|
||||
$map{ $dir } = 1 if -d $dir;
|
||||
}
|
||||
|
||||
$self->{directories} = [ keys %map ];
|
||||
}
|
||||
|
||||
return wantarray ? @{$self->{directories}} : $self->{directories};
|
||||
}
|
||||
|
||||
|
||||
### METHOD: filemap()
|
||||
### Make a map of file paths to equivalent cvs path out of the multicvs
|
||||
### configuration. Returns either a hash in list context or a hashref in scalar
|
||||
### context.
|
||||
sub filemap {
|
||||
my $self = shift or confess "can't be used as a function";
|
||||
|
||||
unless ( $self->{filemap} ) {
|
||||
my (
|
||||
$from,
|
||||
$to,
|
||||
$cvsfile,
|
||||
$livefile,
|
||||
$selector,
|
||||
%files,
|
||||
);
|
||||
|
||||
# Process each path from the config
|
||||
foreach my $path ( @{$self->{paths}} ) {
|
||||
|
||||
$self->debugmsg( ">>> Mapping files under $path->{from}...\n" );
|
||||
|
||||
# Calculate the fully-qualified source and destination paths
|
||||
$from = "$self->{dir_cvs}/$path->{from}";
|
||||
$to = "$self->{dir_live}/$path->{to}";
|
||||
|
||||
# Trim leading dot from destination
|
||||
$to =~ s{/\.?/?$}{};
|
||||
$from =~ s{/\.?/?$}{};
|
||||
|
||||
# Search the current directories for files.
|
||||
if ( -d $from ) {
|
||||
$self->debugmsg( "Adding files to the map from directory ${from} under ${to}\n" );
|
||||
|
||||
# Selector proc -- discards backups, saves good files.
|
||||
$selector = sub {
|
||||
my $name = $_;
|
||||
$self->debugmsg( " Examining '$name' in '${File::Find::dir}'...\n" );
|
||||
|
||||
# Skip all but the first dot-dir
|
||||
if ( $name ne '.' || $File::Find::dir ne $from ) {
|
||||
|
||||
# Prune garbage
|
||||
if ( $name eq '..'|| $name =~ m{^\.\#|\bCVS\b|~$} ) {
|
||||
$File::Find::prune = 1;
|
||||
}
|
||||
|
||||
# Add the file to the map after fully-qualifying the
|
||||
# paths.
|
||||
else {
|
||||
$cvsfile = "${File::Find::dir}/${name}";
|
||||
( $livefile = $cvsfile ) =~ s{^$from}{$to}e;
|
||||
$self->debugmsg( " Adding file from %s to map as %s\n",
|
||||
$cvsfile, $livefile );
|
||||
|
||||
$files{ $livefile } = $cvsfile if -f $cvsfile;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
# Now actually do the find
|
||||
File::Find::find( {wanted => $selector, follow => 1}, $from );
|
||||
}
|
||||
|
||||
# Plain file -- just look to see if it exists in the cvs dir, adding
|
||||
# it if so, warning about it if not
|
||||
else {
|
||||
if ( -e $from ) {
|
||||
$self->debugmsg( "Adding file ${from} to map as ${to}\n" );
|
||||
$files{ $to } = $from;
|
||||
} else {
|
||||
warn "WARNING: $from doesn't exist under $self->{dir_cvs}\n"
|
||||
unless $path->{optional};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Cache the results
|
||||
$self->{filemap} = \%files;
|
||||
}
|
||||
|
||||
return wantarray ? %{$self->{filemap}} : $self->{filemap};
|
||||
}
|
||||
|
||||
|
||||
|
||||
### METHOD: find_changed_files( [@files] )
|
||||
### Returns a hash (or hashref in scalar context) of tuples describing changes
|
||||
### which must be made to bring the cvs and live dirs into sync for the given
|
||||
### I<files>, or for all files if no I<files> are given. Each entry in the hash
|
||||
### is keyed by relative filename, and each value is a tuple (an arrayref) of
|
||||
### the following form:
|
||||
###
|
||||
### { from => $from_path, type => $direction, to => $to_path }
|
||||
###
|
||||
### where I<from_path> is the path to the newer file, I<direction> is either
|
||||
### C<c> for a file which is newer in CVS or C<l> for a file which is newer in
|
||||
### the live tree, and I<to_path> is the path to the older file that should be
|
||||
### replaced.
|
||||
sub find_changed_files {
|
||||
my $self = shift or confess "Cannot be called as a function";
|
||||
|
||||
my $filemap = $self->filemap;
|
||||
my %tuples = ();
|
||||
|
||||
my (
|
||||
$module,
|
||||
$relfile,
|
||||
$lfile,
|
||||
$cfile,
|
||||
$live_time,
|
||||
$cvs_time,
|
||||
);
|
||||
|
||||
# Iterate over the list of relative files, fully-qualifying them and then
|
||||
# checking for up-to-dateness.
|
||||
while ( ($lfile, $cfile) = each %$filemap ) {
|
||||
|
||||
# Get the name of the cvs module for this entry, as well as the relative
|
||||
# path in the live site.
|
||||
( $module = $cfile ) =~ s{(^$self->{dir_cvs}/|/.*)}{}g;
|
||||
( $relfile = $lfile ) =~ s{^$self->{dir_live}/}{};
|
||||
|
||||
# Fetch timestamps
|
||||
$live_time = -e $lfile ? (stat _)[9] : 0;
|
||||
$cvs_time = -e $cfile ? (stat _)[9] : 0;
|
||||
|
||||
$self->debugmsg( "Comparing: %s -> %s (%s): %d -> %d\n",
|
||||
$lfile, $cfile, $relfile, $live_time, $cvs_time );
|
||||
|
||||
# If either of them is newer, add an entry for it
|
||||
if ( $live_time > $cvs_time ) {
|
||||
$self->debugmsg( " Live was newer: adding " );
|
||||
$tuples{ $relfile } = {
|
||||
from => $lfile,
|
||||
type => 'l',
|
||||
module => $module,
|
||||
to => $cfile,
|
||||
live_time => $live_time,
|
||||
cvs_time => $cvs_time,
|
||||
diff => undef,
|
||||
};
|
||||
} elsif ( $cvs_time > $live_time ) {
|
||||
$tuples{ $relfile } = {
|
||||
from => $cfile,
|
||||
type => 'c',
|
||||
module => $module,
|
||||
to => $lfile,
|
||||
live_time => $live_time,
|
||||
cvs_time => $cvs_time,
|
||||
diff => undef,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
return wantarray ? %tuples : \%tuples;
|
||||
}
|
||||
|
||||
|
||||
### METHOD: find_init_files( [@files] )
|
||||
### Like find_changed_files(), but assumes that none of the given I<files> are
|
||||
### extant on the live side (for --init).
|
||||
sub find_init_files {
|
||||
my $self = shift or confess "Cannot be called as a function";
|
||||
|
||||
my $filemap = $self->filemap;
|
||||
my %tuples = ();
|
||||
|
||||
my (
|
||||
$module,
|
||||
$relfile,
|
||||
$lfile,
|
||||
$cfile,
|
||||
$cvs_time,
|
||||
);
|
||||
|
||||
while ( ($lfile, $cfile) = each %$filemap ) {
|
||||
( $relfile = $cfile ) =~ s{^$self->{dir_cvs}/}{};
|
||||
( $module = $relfile ) =~ s{/.*}{};
|
||||
|
||||
# Fetch the mtime of the cvs file
|
||||
$cvs_time = -e $cfile ? (stat _)[9] : 0;
|
||||
|
||||
# Add an entry for every file
|
||||
$tuples{ $lfile } = {
|
||||
from => $cfile,
|
||||
type => 'c',
|
||||
module => $module,
|
||||
to => $lfile,
|
||||
live_time => 0,
|
||||
cvs_time => $cvs_time,
|
||||
};
|
||||
}
|
||||
|
||||
return wantarray ? %tuples : \%tuples;
|
||||
}
|
||||
|
||||
|
||||
# :TODO: This should really use Text::Diff or something instead of doing a bunch
|
||||
# of forked reads...
|
||||
|
||||
### METHOD: get_diffs( \@options, @files )
|
||||
### Given one or more tuples like those returned from find_changed_files(),
|
||||
### return a list of diffs the diffs for each one.
|
||||
sub get_diffs {
|
||||
my $self = shift or confess "Cannot be called as a function";
|
||||
my $options = ref $_[0] eq 'ARRAY' ? shift : [];
|
||||
|
||||
my @files = @_;
|
||||
my @diffs = ();
|
||||
my $diff = undef;
|
||||
|
||||
$self->debugmsg( "In get_diffs" );
|
||||
|
||||
foreach my $tuple ( @files ) {
|
||||
|
||||
# Reuse cached diffs
|
||||
if ( $tuple->{diff} ) {
|
||||
push @diffs, $tuple->{diff};
|
||||
}
|
||||
|
||||
# Regular diff
|
||||
elsif ( -e $tuple->{from} && -e $tuple->{to} ) {
|
||||
$self->debugmsg( "Forking for real diff on $tuple->{from} -> $tuple->{to}" );
|
||||
$diff = $self->forkread( 'diff', @$options,
|
||||
$tuple->{to}, $tuple->{from} );
|
||||
$self->debugmsg( "Read diff: '", $diff, "'" );
|
||||
$tuple->{diff} = $diff;
|
||||
push @diffs, $diff;
|
||||
}
|
||||
|
||||
# Simulate a diff for a new file
|
||||
else {
|
||||
$self->debugmsg( "Diff for new file $tuple->{from}" );
|
||||
$diff = sprintf " >>> New File <<<\n%s\n\n", $self->readfile( $tuple->{from} );
|
||||
$self->debugmsg( "Read diff: '", $diff, "'" );
|
||||
$tuple->{diff} = $diff;
|
||||
push @diffs, $diff;
|
||||
}
|
||||
}
|
||||
|
||||
return @diffs;
|
||||
}
|
||||
|
||||
|
||||
### METHOD: readfile( $file )
|
||||
### Return the specified file in and return it as a scalar.
|
||||
sub readfile {
|
||||
my $self = shift or confess "cannot be used as a function";
|
||||
my $filename = shift;
|
||||
|
||||
local $/ = undef;
|
||||
open( my $ifh, $filename, O_RDONLY ) or
|
||||
croak "open: $filename: $!";
|
||||
my $content = <$ifh>;
|
||||
|
||||
return $content;
|
||||
}
|
||||
|
||||
|
||||
### METHOD: forkread( $cmd, @args )
|
||||
### Fork and exec the specified I<cmd>, giving it the specified I<args>, and
|
||||
### return the output of the command as a list of lines.
|
||||
sub forkread {
|
||||
my $self = shift or confess "Cannot be used as a function";
|
||||
my ( $cmd, @args ) = @_;
|
||||
|
||||
my (
|
||||
$fh,
|
||||
@lines,
|
||||
$pid,
|
||||
);
|
||||
|
||||
# Fork-open and read the child's output as the parent
|
||||
if (( $pid = open($fh, "-|") )) {
|
||||
@lines = <$fh>;
|
||||
$fh->close;
|
||||
}
|
||||
|
||||
# Child - capture output for diagnostics and progress display stuff.
|
||||
else {
|
||||
die "Couldn't fork: $!" unless defined $pid;
|
||||
|
||||
open STDERR, ">&STDOUT" or die "Can't dup stdout: $!";
|
||||
{ exec $cmd, @args };
|
||||
|
||||
# Only reached if the exec() fails.
|
||||
close STDERR;
|
||||
close STDOUT;
|
||||
exit 1;
|
||||
}
|
||||
|
||||
return wantarray ? @lines : join( '', @lines );
|
||||
}
|
||||
|
||||
|
||||
|
||||
1;
|
||||
|
||||
__END__
|
||||
|
||||
# Local Variables:
|
||||
# mode: perl
|
||||
# c-basic-indent: 4
|
||||
# indent-tabs-mode: nil
|
||||
# End:
|
||||
12
wcmtools/lib/MySQL-BinLog/MANIFEST
Executable file
12
wcmtools/lib/MySQL-BinLog/MANIFEST
Executable file
@@ -0,0 +1,12 @@
|
||||
docs/log_event.h
|
||||
docs/log_event.ph
|
||||
experiments/cpptokenizer.pl
|
||||
experiments/try.pl
|
||||
lib/Mysql/BinLog.pm
|
||||
lib/Mysql/BinLog/Constants.pm
|
||||
lib/Mysql/BinLog/Events.pm
|
||||
lib/Mysql/BinLog/Header.pm
|
||||
lib/Mysql/BinLog/Net.pm
|
||||
lib/Mysql/tmp
|
||||
Makefile.PL
|
||||
MANIFEST
|
||||
11
wcmtools/lib/MySQL-BinLog/MANIFEST.SKIP
Executable file
11
wcmtools/lib/MySQL-BinLog/MANIFEST.SKIP
Executable file
@@ -0,0 +1,11 @@
|
||||
^#
|
||||
\bCVS\b
|
||||
^MANIFEST\.
|
||||
^Makefile$
|
||||
~$
|
||||
\.html$
|
||||
\.old$
|
||||
^blib/
|
||||
_blib$
|
||||
^MakeMaker-\d
|
||||
^\.exists
|
||||
33
wcmtools/lib/MySQL-BinLog/Makefile.PL
Executable file
33
wcmtools/lib/MySQL-BinLog/Makefile.PL
Executable file
@@ -0,0 +1,33 @@
|
||||
#!/usr/bin/perl
|
||||
#
|
||||
# Perl Makefile for MySQL-BinLog
|
||||
# $Id: Makefile.PL,v 1.2 2004/11/17 01:45:16 marksmith Exp $
|
||||
#
|
||||
# Invoke with 'perl Makefile.PL'
|
||||
#
|
||||
# See ExtUtils::MakeMaker (3) for more information on how to influence
|
||||
# the contents of the Makefile that is written
|
||||
#
|
||||
|
||||
use ExtUtils::MakeMaker;
|
||||
|
||||
WriteMakefile(
|
||||
NAME => 'MySQL::BinLog',
|
||||
VERSION_FROM => 'lib/Mysql/BinLog.pm', # finds $VERSION
|
||||
AUTHOR => 'Michael Granger <ged@danga.com>',
|
||||
ABSTRACT => 'MySQL Replication Binlog Reader Library',
|
||||
PREREQ_PM => {
|
||||
'Net::MySQL' => 0,
|
||||
'Scalar::Util' => 0,
|
||||
fields => 0,
|
||||
},
|
||||
dist => {
|
||||
CI => "cvs commit",
|
||||
RCS_LABEL => 'cvs tag RELEASE_$(VERSION_SYM)',
|
||||
SUFFIX => ".bz2",
|
||||
DIST_DEFAULT => 'all tardist',
|
||||
COMPRESS => "bzip2",
|
||||
},
|
||||
|
||||
);
|
||||
|
||||
795
wcmtools/lib/MySQL-BinLog/docs/log_event.h
Executable file
795
wcmtools/lib/MySQL-BinLog/docs/log_event.h
Executable file
@@ -0,0 +1,795 @@
|
||||
/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
||||
|
||||
|
||||
#ifndef _log_event_h
|
||||
#define _log_event_h
|
||||
|
||||
#ifdef __EMX__
|
||||
#undef write // remove pthread.h macro definition, conflict with write() class member
|
||||
#endif
|
||||
|
||||
#if defined(__GNUC__) && !defined(MYSQL_CLIENT)
|
||||
#pragma interface /* gcc class implementation */
|
||||
#endif
|
||||
|
||||
#define LOG_READ_EOF -1
|
||||
#define LOG_READ_BOGUS -2
|
||||
#define LOG_READ_IO -3
|
||||
#define LOG_READ_MEM -5
|
||||
#define LOG_READ_TRUNC -6
|
||||
#define LOG_READ_TOO_LARGE -7
|
||||
|
||||
#define LOG_EVENT_OFFSET 4
|
||||
#define BINLOG_VERSION 3
|
||||
|
||||
/*
|
||||
We could have used SERVER_VERSION_LENGTH, but this introduces an
|
||||
obscure dependency - if somebody decided to change SERVER_VERSION_LENGTH
|
||||
this would have broke the replication protocol
|
||||
*/
|
||||
#define ST_SERVER_VER_LEN 50
|
||||
|
||||
#define DUMPFILE_FLAG 0x1
|
||||
#define OPT_ENCLOSED_FLAG 0x2
|
||||
#define REPLACE_FLAG 0x4
|
||||
#define IGNORE_FLAG 0x8
|
||||
|
||||
#define FIELD_TERM_EMPTY 0x1
|
||||
#define ENCLOSED_EMPTY 0x2
|
||||
#define LINE_TERM_EMPTY 0x4
|
||||
#define LINE_START_EMPTY 0x8
|
||||
#define ESCAPED_EMPTY 0x10
|
||||
|
||||
struct old_sql_ex
|
||||
{
|
||||
char field_term;
|
||||
char enclosed;
|
||||
char line_term;
|
||||
char line_start;
|
||||
char escaped;
|
||||
char opt_flags;
|
||||
char empty_flags;
|
||||
};
|
||||
|
||||
#define NUM_LOAD_DELIM_STRS 5
|
||||
|
||||
struct sql_ex_info
|
||||
{
|
||||
char* field_term;
|
||||
char* enclosed;
|
||||
char* line_term;
|
||||
char* line_start;
|
||||
char* escaped;
|
||||
int cached_new_format;
|
||||
uint8 field_term_len,enclosed_len,line_term_len,line_start_len, escaped_len;
|
||||
char opt_flags;
|
||||
char empty_flags;
|
||||
|
||||
// store in new format even if old is possible
|
||||
void force_new_format() { cached_new_format = 1;}
|
||||
int data_size()
|
||||
{
|
||||
return (new_format() ?
|
||||
field_term_len + enclosed_len + line_term_len +
|
||||
line_start_len + escaped_len + 6 : 7);
|
||||
}
|
||||
int write_data(IO_CACHE* file);
|
||||
char* init(char* buf,char* buf_end,bool use_new_format);
|
||||
bool new_format()
|
||||
{
|
||||
return ((cached_new_format != -1) ? cached_new_format :
|
||||
(cached_new_format=(field_term_len > 1 ||
|
||||
enclosed_len > 1 ||
|
||||
line_term_len > 1 || line_start_len > 1 ||
|
||||
escaped_len > 1)));
|
||||
}
|
||||
};
|
||||
|
||||
/*
|
||||
Binary log consists of events. Each event has a fixed length header,
|
||||
followed by possibly variable ( depending on the type of event) length
|
||||
data body. The data body consists of an optional fixed length segment
|
||||
(post-header), and an optional variable length segment. See #defines and
|
||||
comments below for the format specifics
|
||||
*/
|
||||
|
||||
/* event-specific post-header sizes */
|
||||
#define LOG_EVENT_HEADER_LEN 19
|
||||
#define OLD_HEADER_LEN 13
|
||||
#define QUERY_HEADER_LEN (4 + 4 + 1 + 2)
|
||||
#define LOAD_HEADER_LEN (4 + 4 + 4 + 1 +1 + 4)
|
||||
#define START_HEADER_LEN (2 + ST_SERVER_VER_LEN + 4)
|
||||
#define ROTATE_HEADER_LEN 8
|
||||
#define CREATE_FILE_HEADER_LEN 4
|
||||
#define APPEND_BLOCK_HEADER_LEN 4
|
||||
#define EXEC_LOAD_HEADER_LEN 4
|
||||
#define DELETE_FILE_HEADER_LEN 4
|
||||
|
||||
/* event header offsets */
|
||||
|
||||
#define EVENT_TYPE_OFFSET 4
|
||||
#define SERVER_ID_OFFSET 5
|
||||
#define EVENT_LEN_OFFSET 9
|
||||
#define LOG_POS_OFFSET 13
|
||||
#define FLAGS_OFFSET 17
|
||||
|
||||
/* start event post-header */
|
||||
|
||||
#define ST_BINLOG_VER_OFFSET 0
|
||||
#define ST_SERVER_VER_OFFSET 2
|
||||
#define ST_CREATED_OFFSET (ST_SERVER_VER_OFFSET + ST_SERVER_VER_LEN)
|
||||
|
||||
/* slave event post-header */
|
||||
|
||||
#define SL_MASTER_PORT_OFFSET 8
|
||||
#define SL_MASTER_POS_OFFSET 0
|
||||
#define SL_MASTER_HOST_OFFSET 10
|
||||
|
||||
/* query event post-header */
|
||||
|
||||
#define Q_THREAD_ID_OFFSET 0
|
||||
#define Q_EXEC_TIME_OFFSET 4
|
||||
#define Q_DB_LEN_OFFSET 8
|
||||
#define Q_ERR_CODE_OFFSET 9
|
||||
#define Q_DATA_OFFSET QUERY_HEADER_LEN
|
||||
|
||||
/* Intvar event post-header */
|
||||
|
||||
#define I_TYPE_OFFSET 0
|
||||
#define I_VAL_OFFSET 1
|
||||
|
||||
/* Rand event post-header */
|
||||
|
||||
#define RAND_SEED1_OFFSET 0
|
||||
#define RAND_SEED2_OFFSET 8
|
||||
|
||||
/* Load event post-header */
|
||||
|
||||
#define L_THREAD_ID_OFFSET 0
|
||||
#define L_EXEC_TIME_OFFSET 4
|
||||
#define L_SKIP_LINES_OFFSET 8
|
||||
#define L_TBL_LEN_OFFSET 12
|
||||
#define L_DB_LEN_OFFSET 13
|
||||
#define L_NUM_FIELDS_OFFSET 14
|
||||
#define L_SQL_EX_OFFSET 18
|
||||
#define L_DATA_OFFSET LOAD_HEADER_LEN
|
||||
|
||||
/* Rotate event post-header */
|
||||
|
||||
#define R_POS_OFFSET 0
|
||||
#define R_IDENT_OFFSET 8
|
||||
|
||||
#define CF_FILE_ID_OFFSET 0
|
||||
#define CF_DATA_OFFSET CREATE_FILE_HEADER_LEN
|
||||
|
||||
#define AB_FILE_ID_OFFSET 0
|
||||
#define AB_DATA_OFFSET APPEND_BLOCK_HEADER_LEN
|
||||
|
||||
#define EL_FILE_ID_OFFSET 0
|
||||
|
||||
#define DF_FILE_ID_OFFSET 0
|
||||
|
||||
#define QUERY_EVENT_OVERHEAD (LOG_EVENT_HEADER_LEN+QUERY_HEADER_LEN)
|
||||
#define QUERY_DATA_OFFSET (LOG_EVENT_HEADER_LEN+QUERY_HEADER_LEN)
|
||||
#define ROTATE_EVENT_OVERHEAD (LOG_EVENT_HEADER_LEN+ROTATE_HEADER_LEN)
|
||||
#define LOAD_EVENT_OVERHEAD (LOG_EVENT_HEADER_LEN+LOAD_HEADER_LEN)
|
||||
#define CREATE_FILE_EVENT_OVERHEAD (LOG_EVENT_HEADER_LEN+\
|
||||
+LOAD_HEADER_LEN+CREATE_FILE_HEADER_LEN)
|
||||
#define DELETE_FILE_EVENT_OVERHEAD (LOG_EVENT_HEADER_LEN+DELETE_FILE_HEADER_LEN)
|
||||
#define EXEC_LOAD_EVENT_OVERHEAD (LOG_EVENT_HEADER_LEN+EXEC_LOAD_HEADER_LEN)
|
||||
#define APPEND_BLOCK_EVENT_OVERHEAD (LOG_EVENT_HEADER_LEN+APPEND_BLOCK_HEADER_LEN)
|
||||
|
||||
|
||||
#define BINLOG_MAGIC "\xfe\x62\x69\x6e"
|
||||
|
||||
#define LOG_EVENT_TIME_F 0x1
|
||||
#define LOG_EVENT_FORCED_ROTATE_F 0x2
|
||||
|
||||
enum Log_event_type
|
||||
{
|
||||
UNKNOWN_EVENT = 0, START_EVENT = 1, QUERY_EVENT =2, STOP_EVENT=3,
|
||||
ROTATE_EVENT = 4, INTVAR_EVENT=5, LOAD_EVENT=6, SLAVE_EVENT=7,
|
||||
CREATE_FILE_EVENT=8, APPEND_BLOCK_EVENT=9, EXEC_LOAD_EVENT=10,
|
||||
DELETE_FILE_EVENT=11, NEW_LOAD_EVENT=12, RAND_EVENT=13
|
||||
};
|
||||
|
||||
enum Int_event_type
|
||||
{
|
||||
INVALID_INT_EVENT = 0, LAST_INSERT_ID_EVENT = 1, INSERT_ID_EVENT = 2
|
||||
};
|
||||
|
||||
|
||||
#ifndef MYSQL_CLIENT
|
||||
class String;
|
||||
class MYSQL_LOG;
|
||||
class THD;
|
||||
#endif
|
||||
|
||||
struct st_relay_log_info;
|
||||
|
||||
class Log_event
|
||||
{
|
||||
public:
|
||||
my_off_t log_pos;
|
||||
char *temp_buf;
|
||||
time_t when;
|
||||
ulong exec_time;
|
||||
uint32 server_id;
|
||||
uint cached_event_len;
|
||||
uint16 flags;
|
||||
bool cache_stmt;
|
||||
#ifndef MYSQL_CLIENT
|
||||
THD* thd;
|
||||
|
||||
Log_event(THD* thd_arg, uint16 flags_arg, bool cache_stmt);
|
||||
Log_event();
|
||||
// if mutex is 0, the read will proceed without mutex
|
||||
static Log_event* read_log_event(IO_CACHE* file,
|
||||
pthread_mutex_t* log_lock,
|
||||
bool old_format);
|
||||
static int read_log_event(IO_CACHE* file, String* packet,
|
||||
pthread_mutex_t* log_lock);
|
||||
void set_log_pos(MYSQL_LOG* log);
|
||||
virtual void pack_info(String* packet);
|
||||
int net_send(THD* thd, const char* log_name, my_off_t pos);
|
||||
static void init_show_field_list(List<Item>* field_list);
|
||||
virtual int exec_event(struct st_relay_log_info* rli);
|
||||
virtual const char* get_db()
|
||||
{
|
||||
return thd ? thd->db : 0;
|
||||
}
|
||||
#else
|
||||
// avoid having to link mysqlbinlog against libpthread
|
||||
static Log_event* read_log_event(IO_CACHE* file, bool old_format);
|
||||
virtual void print(FILE* file, bool short_form = 0, char* last_db = 0) = 0;
|
||||
void print_timestamp(FILE* file, time_t *ts = 0);
|
||||
void print_header(FILE* file);
|
||||
#endif
|
||||
|
||||
static void *operator new(size_t size)
|
||||
{
|
||||
return (void*) my_malloc((uint)size, MYF(MY_WME|MY_FAE));
|
||||
}
|
||||
static void operator delete(void *ptr, size_t size)
|
||||
{
|
||||
my_free((gptr) ptr, MYF(MY_WME|MY_ALLOW_ZERO_PTR));
|
||||
}
|
||||
|
||||
int write(IO_CACHE* file);
|
||||
int write_header(IO_CACHE* file);
|
||||
virtual int write_data(IO_CACHE* file)
|
||||
{ return write_data_header(file) || write_data_body(file); }
|
||||
virtual int write_data_header(IO_CACHE* file __attribute__((unused)))
|
||||
{ return 0; }
|
||||
virtual int write_data_body(IO_CACHE* file __attribute__((unused)))
|
||||
{ return 0; }
|
||||
virtual Log_event_type get_type_code() = 0;
|
||||
virtual bool is_valid() = 0;
|
||||
inline bool get_cache_stmt() { return cache_stmt; }
|
||||
Log_event(const char* buf, bool old_format);
|
||||
virtual ~Log_event() { free_temp_buf();}
|
||||
void register_temp_buf(char* buf) { temp_buf = buf; }
|
||||
void free_temp_buf()
|
||||
{
|
||||
if (temp_buf)
|
||||
{
|
||||
my_free(temp_buf, MYF(0));
|
||||
temp_buf = 0;
|
||||
}
|
||||
}
|
||||
virtual int get_data_size() { return 0;}
|
||||
virtual int get_data_body_offset() { return 0; }
|
||||
int get_event_len()
|
||||
{
|
||||
return (cached_event_len ? cached_event_len :
|
||||
(cached_event_len = LOG_EVENT_HEADER_LEN + get_data_size()));
|
||||
}
|
||||
static Log_event* read_log_event(const char* buf, int event_len,
|
||||
const char **error, bool old_format);
|
||||
const char* get_type_str();
|
||||
};
|
||||
|
||||
|
||||
class Query_log_event: public Log_event
|
||||
{
|
||||
protected:
|
||||
char* data_buf;
|
||||
public:
|
||||
const char* query;
|
||||
const char* db;
|
||||
/*
|
||||
If we already know the length of the query string
|
||||
we pass it with q_len, so we would not have to call strlen()
|
||||
otherwise, set it to 0, in which case, we compute it with strlen()
|
||||
*/
|
||||
uint32 q_len;
|
||||
uint32 db_len;
|
||||
uint16 error_code;
|
||||
ulong thread_id;
|
||||
/*
|
||||
For events created by Query_log_event::exec_event (and
|
||||
Load_log_event::exec_event()) we need the *original* thread id, to be able
|
||||
to log the event with the original (=master's) thread id (fix for
|
||||
BUG#1686).
|
||||
*/
|
||||
ulong slave_proxy_id;
|
||||
#ifndef MYSQL_CLIENT
|
||||
|
||||
Query_log_event(THD* thd_arg, const char* query_arg, ulong query_length,
|
||||
bool using_trans);
|
||||
const char* get_db() { return db; }
|
||||
void pack_info(String* packet);
|
||||
int exec_event(struct st_relay_log_info* rli);
|
||||
#else
|
||||
void print(FILE* file, bool short_form = 0, char* last_db = 0);
|
||||
#endif
|
||||
|
||||
Query_log_event(const char* buf, int event_len, bool old_format);
|
||||
~Query_log_event()
|
||||
{
|
||||
if (data_buf)
|
||||
{
|
||||
my_free((gptr) data_buf, MYF(0));
|
||||
}
|
||||
}
|
||||
Log_event_type get_type_code() { return QUERY_EVENT; }
|
||||
int write(IO_CACHE* file);
|
||||
int write_data(IO_CACHE* file); // returns 0 on success, -1 on error
|
||||
bool is_valid() { return query != 0; }
|
||||
int get_data_size()
|
||||
{
|
||||
return (q_len + db_len + 2
|
||||
+ 4 // thread_id
|
||||
+ 4 // exec_time
|
||||
+ 2 // error_code
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
class Slave_log_event: public Log_event
|
||||
{
|
||||
protected:
|
||||
char* mem_pool;
|
||||
void init_from_mem_pool(int data_size);
|
||||
public:
|
||||
my_off_t master_pos;
|
||||
char* master_host;
|
||||
char* master_log;
|
||||
int master_host_len;
|
||||
int master_log_len;
|
||||
uint16 master_port;
|
||||
|
||||
#ifndef MYSQL_CLIENT
|
||||
Slave_log_event(THD* thd_arg, struct st_relay_log_info* rli);
|
||||
void pack_info(String* packet);
|
||||
int exec_event(struct st_relay_log_info* rli);
|
||||
#else
|
||||
void print(FILE* file, bool short_form = 0, char* last_db = 0);
|
||||
#endif
|
||||
|
||||
Slave_log_event(const char* buf, int event_len);
|
||||
~Slave_log_event();
|
||||
int get_data_size();
|
||||
bool is_valid() { return master_host != 0; }
|
||||
Log_event_type get_type_code() { return SLAVE_EVENT; }
|
||||
int write_data(IO_CACHE* file );
|
||||
};
|
||||
|
||||
class Load_log_event: public Log_event
|
||||
{
|
||||
protected:
|
||||
int copy_log_event(const char *buf, ulong event_len, bool old_format);
|
||||
|
||||
public:
|
||||
ulong thread_id;
|
||||
ulong slave_proxy_id;
|
||||
uint32 table_name_len;
|
||||
uint32 db_len;
|
||||
uint32 fname_len;
|
||||
uint32 num_fields;
|
||||
const char* fields;
|
||||
const uchar* field_lens;
|
||||
uint32 field_block_len;
|
||||
|
||||
const char* table_name;
|
||||
const char* db;
|
||||
const char* fname;
|
||||
uint32 skip_lines;
|
||||
sql_ex_info sql_ex;
|
||||
bool local_fname;
|
||||
|
||||
/* fname doesn't point to memory inside Log_event::temp_buf */
|
||||
void set_fname_outside_temp_buf(const char *afname, uint alen)
|
||||
{
|
||||
fname= afname;
|
||||
fname_len= alen;
|
||||
local_fname= true;
|
||||
}
|
||||
/* fname doesn't point to memory inside Log_event::temp_buf */
|
||||
int check_fname_outside_temp_buf()
|
||||
{
|
||||
return local_fname;
|
||||
}
|
||||
|
||||
#ifndef MYSQL_CLIENT
|
||||
String field_lens_buf;
|
||||
String fields_buf;
|
||||
|
||||
Load_log_event(THD* thd, sql_exchange* ex, const char* db_arg,
|
||||
const char* table_name_arg,
|
||||
List<Item>& fields_arg, enum enum_duplicates handle_dup,
|
||||
bool using_trans);
|
||||
void set_fields(List<Item> &fields_arg);
|
||||
void pack_info(String* packet);
|
||||
const char* get_db() { return db; }
|
||||
int exec_event(struct st_relay_log_info* rli)
|
||||
{
|
||||
return exec_event(thd->slave_net,rli,0);
|
||||
}
|
||||
int exec_event(NET* net, struct st_relay_log_info* rli,
|
||||
bool use_rli_only_for_errors);
|
||||
#else
|
||||
void print(FILE* file, bool short_form = 0, char* last_db = 0);
|
||||
void print(FILE* file, bool short_form, char* last_db, bool commented);
|
||||
#endif
|
||||
|
||||
Load_log_event(const char* buf, int event_len, bool old_format);
|
||||
~Load_log_event()
|
||||
{}
|
||||
Log_event_type get_type_code()
|
||||
{
|
||||
return sql_ex.new_format() ? NEW_LOAD_EVENT: LOAD_EVENT;
|
||||
}
|
||||
int write_data_header(IO_CACHE* file);
|
||||
int write_data_body(IO_CACHE* file);
|
||||
bool is_valid() { return table_name != 0; }
|
||||
int get_data_size()
|
||||
{
|
||||
return (table_name_len + 2 + db_len + 2 + fname_len
|
||||
+ 4 // thread_id
|
||||
+ 4 // exec_time
|
||||
+ 4 // skip_lines
|
||||
+ 4 // field block len
|
||||
+ sql_ex.data_size() + field_block_len + num_fields);
|
||||
}
|
||||
int get_data_body_offset() { return LOAD_EVENT_OVERHEAD; }
|
||||
};
|
||||
|
||||
extern char server_version[SERVER_VERSION_LENGTH];
|
||||
|
||||
class Start_log_event: public Log_event
|
||||
{
|
||||
public:
|
||||
/*
|
||||
If this event is at the start of the first binary log since server startup
|
||||
'created' should be the timestamp when the event (and the binary log) was
|
||||
created.
|
||||
In the other case (i.e. this event is at the start of a binary log created
|
||||
by FLUSH LOGS or automatic rotation), 'created' should be 0.
|
||||
This "trick" is used by MySQL >=4.0.14 slaves to know if they must drop the
|
||||
stale temporary tables or not.
|
||||
Note that when 'created'!=0, it is always equal to the event's timestamp;
|
||||
indeed Start_log_event is written only in log.cc where the first
|
||||
constructor below is called, in which 'created' is set to 'when'.
|
||||
So in fact 'created' is a useless variable. When it is 0
|
||||
we can read the actual value from timestamp ('when') and when it is
|
||||
non-zero we can read the same value from timestamp ('when'). Conclusion:
|
||||
- we use timestamp to print when the binlog was created.
|
||||
- we use 'created' only to know if this is a first binlog or not.
|
||||
In 3.23.57 we did not pay attention to this identity, so mysqlbinlog in
|
||||
3.23.57 does not print 'created the_date' if created was zero. This is now
|
||||
fixed.
|
||||
*/
|
||||
time_t created;
|
||||
uint16 binlog_version;
|
||||
char server_version[ST_SERVER_VER_LEN];
|
||||
|
||||
#ifndef MYSQL_CLIENT
|
||||
Start_log_event() :Log_event(), binlog_version(BINLOG_VERSION)
|
||||
{
|
||||
created = (time_t) when;
|
||||
memcpy(server_version, ::server_version, ST_SERVER_VER_LEN);
|
||||
}
|
||||
void pack_info(String* packet);
|
||||
int exec_event(struct st_relay_log_info* rli);
|
||||
#else
|
||||
void print(FILE* file, bool short_form = 0, char* last_db = 0);
|
||||
#endif
|
||||
|
||||
Start_log_event(const char* buf, bool old_format);
|
||||
~Start_log_event() {}
|
||||
Log_event_type get_type_code() { return START_EVENT;}
|
||||
int write_data(IO_CACHE* file);
|
||||
bool is_valid() { return 1; }
|
||||
int get_data_size()
|
||||
{
|
||||
return START_HEADER_LEN;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
class Intvar_log_event: public Log_event
|
||||
{
|
||||
public:
|
||||
ulonglong val;
|
||||
uchar type;
|
||||
|
||||
#ifndef MYSQL_CLIENT
|
||||
Intvar_log_event(THD* thd_arg,uchar type_arg, ulonglong val_arg)
|
||||
:Log_event(thd_arg,0,0),val(val_arg),type(type_arg)
|
||||
{}
|
||||
void pack_info(String* packet);
|
||||
int exec_event(struct st_relay_log_info* rli);
|
||||
#else
|
||||
void print(FILE* file, bool short_form = 0, char* last_db = 0);
|
||||
#endif
|
||||
|
||||
Intvar_log_event(const char* buf, bool old_format);
|
||||
~Intvar_log_event() {}
|
||||
Log_event_type get_type_code() { return INTVAR_EVENT;}
|
||||
const char* get_var_type_name();
|
||||
int get_data_size() { return sizeof(type) + sizeof(val);}
|
||||
int write_data(IO_CACHE* file);
|
||||
bool is_valid() { return 1; }
|
||||
};
|
||||
|
||||
/*****************************************************************************
|
||||
*
|
||||
* Rand log event class
|
||||
*
|
||||
****************************************************************************/
|
||||
class Rand_log_event: public Log_event
|
||||
{
|
||||
public:
|
||||
ulonglong seed1;
|
||||
ulonglong seed2;
|
||||
|
||||
#ifndef MYSQL_CLIENT
|
||||
Rand_log_event(THD* thd_arg, ulonglong seed1_arg, ulonglong seed2_arg)
|
||||
:Log_event(thd_arg,0,0),seed1(seed1_arg),seed2(seed2_arg)
|
||||
{}
|
||||
void pack_info(String* packet);
|
||||
int exec_event(struct st_relay_log_info* rli);
|
||||
#else
|
||||
void print(FILE* file, bool short_form = 0, char* last_db = 0);
|
||||
#endif
|
||||
|
||||
Rand_log_event(const char* buf, bool old_format);
|
||||
~Rand_log_event() {}
|
||||
Log_event_type get_type_code() { return RAND_EVENT;}
|
||||
int get_data_size() { return sizeof(ulonglong) * 2; }
|
||||
int write_data(IO_CACHE* file);
|
||||
bool is_valid() { return 1; }
|
||||
};
|
||||
|
||||
|
||||
class Stop_log_event: public Log_event
|
||||
{
|
||||
public:
|
||||
#ifndef MYSQL_CLIENT
|
||||
Stop_log_event() :Log_event()
|
||||
{}
|
||||
int exec_event(struct st_relay_log_info* rli);
|
||||
#else
|
||||
void print(FILE* file, bool short_form = 0, char* last_db = 0);
|
||||
#endif
|
||||
|
||||
Stop_log_event(const char* buf, bool old_format):
|
||||
Log_event(buf, old_format)
|
||||
{}
|
||||
~Stop_log_event() {}
|
||||
Log_event_type get_type_code() { return STOP_EVENT;}
|
||||
bool is_valid() { return 1; }
|
||||
};
|
||||
|
||||
|
||||
class Rotate_log_event: public Log_event
|
||||
{
|
||||
public:
|
||||
const char* new_log_ident;
|
||||
ulonglong pos;
|
||||
uint ident_len;
|
||||
bool alloced;
|
||||
#ifndef MYSQL_CLIENT
|
||||
Rotate_log_event(THD* thd_arg, const char* new_log_ident_arg,
|
||||
uint ident_len_arg = 0,
|
||||
ulonglong pos_arg = LOG_EVENT_OFFSET)
|
||||
:Log_event(), new_log_ident(new_log_ident_arg),
|
||||
pos(pos_arg),ident_len(ident_len_arg ? ident_len_arg :
|
||||
(uint) strlen(new_log_ident_arg)), alloced(0)
|
||||
{}
|
||||
void pack_info(String* packet);
|
||||
int exec_event(struct st_relay_log_info* rli);
|
||||
#else
|
||||
void print(FILE* file, bool short_form = 0, char* last_db = 0);
|
||||
#endif
|
||||
|
||||
Rotate_log_event(const char* buf, int event_len, bool old_format);
|
||||
~Rotate_log_event()
|
||||
{
|
||||
if (alloced)
|
||||
my_free((gptr) new_log_ident, MYF(0));
|
||||
}
|
||||
Log_event_type get_type_code() { return ROTATE_EVENT;}
|
||||
int get_data_size() { return ident_len + ROTATE_HEADER_LEN;}
|
||||
bool is_valid() { return new_log_ident != 0; }
|
||||
int write_data(IO_CACHE* file);
|
||||
};
|
||||
|
||||
/* the classes below are for the new LOAD DATA INFILE logging */
|
||||
|
||||
class Create_file_log_event: public Load_log_event
|
||||
{
|
||||
protected:
|
||||
/*
|
||||
Pretend we are Load event, so we can write out just
|
||||
our Load part - used on the slave when writing event out to
|
||||
SQL_LOAD-*.info file
|
||||
*/
|
||||
bool fake_base;
|
||||
public:
|
||||
char* block;
|
||||
const char *event_buf;
|
||||
uint block_len;
|
||||
uint file_id;
|
||||
bool inited_from_old;
|
||||
|
||||
#ifndef MYSQL_CLIENT
|
||||
Create_file_log_event(THD* thd, sql_exchange* ex, const char* db_arg,
|
||||
const char* table_name_arg,
|
||||
List<Item>& fields_arg,
|
||||
enum enum_duplicates handle_dup,
|
||||
char* block_arg, uint block_len_arg,
|
||||
bool using_trans);
|
||||
void pack_info(String* packet);
|
||||
int exec_event(struct st_relay_log_info* rli);
|
||||
#else
|
||||
void print(FILE* file, bool short_form = 0, char* last_db = 0);
|
||||
void print(FILE* file, bool short_form, char* last_db, bool enable_local);
|
||||
#endif
|
||||
|
||||
Create_file_log_event(const char* buf, int event_len, bool old_format);
|
||||
~Create_file_log_event()
|
||||
{
|
||||
my_free((char*) event_buf, MYF(MY_ALLOW_ZERO_PTR));
|
||||
}
|
||||
|
||||
Log_event_type get_type_code()
|
||||
{
|
||||
return fake_base ? Load_log_event::get_type_code() : CREATE_FILE_EVENT;
|
||||
}
|
||||
int get_data_size()
|
||||
{
|
||||
return (fake_base ? Load_log_event::get_data_size() :
|
||||
Load_log_event::get_data_size() +
|
||||
4 + 1 + block_len);
|
||||
}
|
||||
int get_data_body_offset()
|
||||
{
|
||||
return (fake_base ? LOAD_EVENT_OVERHEAD:
|
||||
LOAD_EVENT_OVERHEAD + CREATE_FILE_HEADER_LEN);
|
||||
}
|
||||
bool is_valid() { return inited_from_old || block != 0; }
|
||||
int write_data_header(IO_CACHE* file);
|
||||
int write_data_body(IO_CACHE* file);
|
||||
/*
|
||||
Cut out Create_file extentions and
|
||||
write it as Load event - used on the slave
|
||||
*/
|
||||
int write_base(IO_CACHE* file);
|
||||
};
|
||||
|
||||
|
||||
class Append_block_log_event: public Log_event
|
||||
{
|
||||
public:
|
||||
char* block;
|
||||
uint block_len;
|
||||
uint file_id;
|
||||
/*
|
||||
'db' is filled when the event is created in mysql_load() (the event needs to
|
||||
have a 'db' member to be well filtered by binlog-*-db rules). 'db' is not
|
||||
written to the binlog (it's not used by Append_block_log_event::write()), so
|
||||
it can't be read in the Append_block_log_event(const char* buf, int
|
||||
event_len) constructor.
|
||||
In other words, 'db' is used only for filtering by binlog-*-db rules.
|
||||
Create_file_log_event is different: its 'db' (which is inherited from
|
||||
Load_log_event) is written to the binlog and can be re-read.
|
||||
*/
|
||||
const char* db;
|
||||
|
||||
#ifndef MYSQL_CLIENT
|
||||
Append_block_log_event(THD* thd, const char* db_arg, char* block_arg,
|
||||
uint block_len_arg, bool using_trans);
|
||||
int exec_event(struct st_relay_log_info* rli);
|
||||
void pack_info(String* packet);
|
||||
#else
|
||||
void print(FILE* file, bool short_form = 0, char* last_db = 0);
|
||||
#endif
|
||||
|
||||
Append_block_log_event(const char* buf, int event_len);
|
||||
~Append_block_log_event() {}
|
||||
Log_event_type get_type_code() { return APPEND_BLOCK_EVENT;}
|
||||
int get_data_size() { return block_len + APPEND_BLOCK_HEADER_LEN ;}
|
||||
bool is_valid() { return block != 0; }
|
||||
int write_data(IO_CACHE* file);
|
||||
const char* get_db() { return db; }
|
||||
};
|
||||
|
||||
|
||||
class Delete_file_log_event: public Log_event
|
||||
{
|
||||
public:
|
||||
uint file_id;
|
||||
const char* db; /* see comment in Append_block_log_event */
|
||||
|
||||
#ifndef MYSQL_CLIENT
|
||||
Delete_file_log_event(THD* thd, const char* db_arg, bool using_trans);
|
||||
void pack_info(String* packet);
|
||||
int exec_event(struct st_relay_log_info* rli);
|
||||
#else
|
||||
void print(FILE* file, bool short_form = 0, char* last_db = 0);
|
||||
#endif
|
||||
|
||||
Delete_file_log_event(const char* buf, int event_len);
|
||||
~Delete_file_log_event() {}
|
||||
Log_event_type get_type_code() { return DELETE_FILE_EVENT;}
|
||||
int get_data_size() { return DELETE_FILE_HEADER_LEN ;}
|
||||
bool is_valid() { return file_id != 0; }
|
||||
int write_data(IO_CACHE* file);
|
||||
const char* get_db() { return db; }
|
||||
};
|
||||
|
||||
class Execute_load_log_event: public Log_event
|
||||
{
|
||||
public:
|
||||
uint file_id;
|
||||
const char* db; /* see comment in Append_block_log_event */
|
||||
|
||||
#ifndef MYSQL_CLIENT
|
||||
Execute_load_log_event(THD* thd, const char* db_arg, bool using_trans);
|
||||
void pack_info(String* packet);
|
||||
int exec_event(struct st_relay_log_info* rli);
|
||||
#else
|
||||
void print(FILE* file, bool short_form = 0, char* last_db = 0);
|
||||
#endif
|
||||
|
||||
Execute_load_log_event(const char* buf, int event_len);
|
||||
~Execute_load_log_event() {}
|
||||
Log_event_type get_type_code() { return EXEC_LOAD_EVENT;}
|
||||
int get_data_size() { return EXEC_LOAD_HEADER_LEN ;}
|
||||
bool is_valid() { return file_id != 0; }
|
||||
int write_data(IO_CACHE* file);
|
||||
const char* get_db() { return db; }
|
||||
};
|
||||
|
||||
#ifdef MYSQL_CLIENT
|
||||
class Unknown_log_event: public Log_event
|
||||
{
|
||||
public:
|
||||
Unknown_log_event(const char* buf, bool old_format):
|
||||
Log_event(buf, old_format)
|
||||
{}
|
||||
~Unknown_log_event() {}
|
||||
void print(FILE* file, bool short_form= 0, char* last_db= 0);
|
||||
Log_event_type get_type_code() { return UNKNOWN_EVENT;}
|
||||
bool is_valid() { return 1; }
|
||||
};
|
||||
#endif
|
||||
|
||||
#endif /* _log_event_h */
|
||||
101
wcmtools/lib/MySQL-BinLog/docs/log_event.ph
Executable file
101
wcmtools/lib/MySQL-BinLog/docs/log_event.ph
Executable file
@@ -0,0 +1,101 @@
|
||||
#!/usr/bin/perl -w
|
||||
|
||||
#require '_h2ph_pre.ph';
|
||||
|
||||
unless(defined(&_log_event_h)) {
|
||||
eval 'sub _log_event_h () {1;}' unless defined(&_log_event_h);
|
||||
eval 'sub LOG_READ_EOF () {-1;}' unless defined(&LOG_READ_EOF);
|
||||
eval 'sub LOG_READ_BOGUS () {-2;}' unless defined(&LOG_READ_BOGUS);
|
||||
eval 'sub LOG_READ_IO () {-3;}' unless defined(&LOG_READ_IO);
|
||||
eval 'sub LOG_READ_MEM () {-5;}' unless defined(&LOG_READ_MEM);
|
||||
eval 'sub LOG_READ_TRUNC () {-6;}' unless defined(&LOG_READ_TRUNC);
|
||||
eval 'sub LOG_READ_TOO_LARGE () {-7;}' unless defined(&LOG_READ_TOO_LARGE);
|
||||
eval 'sub LOG_EVENT_OFFSET () {4;}' unless defined(&LOG_EVENT_OFFSET);
|
||||
eval 'sub BINLOG_VERSION () {3;}' unless defined(&BINLOG_VERSION);
|
||||
eval 'sub ST_SERVER_VER_LEN () {50;}' unless defined(&ST_SERVER_VER_LEN);
|
||||
eval 'sub DUMPFILE_FLAG () {0x1;}' unless defined(&DUMPFILE_FLAG);
|
||||
eval 'sub OPT_ENCLOSED_FLAG () {0x2;}' unless defined(&OPT_ENCLOSED_FLAG);
|
||||
eval 'sub REPLACE_FLAG () {0x4;}' unless defined(&REPLACE_FLAG);
|
||||
eval 'sub IGNORE_FLAG () {0x8;}' unless defined(&IGNORE_FLAG);
|
||||
eval 'sub FIELD_TERM_EMPTY () {0x1;}' unless defined(&FIELD_TERM_EMPTY);
|
||||
eval 'sub ENCLOSED_EMPTY () {0x2;}' unless defined(&ENCLOSED_EMPTY);
|
||||
eval 'sub LINE_TERM_EMPTY () {0x4;}' unless defined(&LINE_TERM_EMPTY);
|
||||
eval 'sub LINE_START_EMPTY () {0x8;}' unless defined(&LINE_START_EMPTY);
|
||||
eval 'sub ESCAPED_EMPTY () {0x10;}' unless defined(&ESCAPED_EMPTY);
|
||||
eval 'sub NUM_LOAD_DELIM_STRS () {5;}' unless defined(&NUM_LOAD_DELIM_STRS);
|
||||
eval 'sub LOG_EVENT_HEADER_LEN () {19;}' unless defined(&LOG_EVENT_HEADER_LEN);
|
||||
eval 'sub OLD_HEADER_LEN () {13;}' unless defined(&OLD_HEADER_LEN);
|
||||
eval 'sub QUERY_HEADER_LEN () {(4+ 4+ 1+ 2);}' unless defined(&QUERY_HEADER_LEN);
|
||||
eval 'sub LOAD_HEADER_LEN () {(4+ 4+ 4+ 1+1+ 4);}' unless defined(&LOAD_HEADER_LEN);
|
||||
eval 'sub START_HEADER_LEN () {(2+ &ST_SERVER_VER_LEN + 4);}' unless defined(&START_HEADER_LEN);
|
||||
eval 'sub ROTATE_HEADER_LEN () {8;}' unless defined(&ROTATE_HEADER_LEN);
|
||||
eval 'sub CREATE_FILE_HEADER_LEN () {4;}' unless defined(&CREATE_FILE_HEADER_LEN);
|
||||
eval 'sub APPEND_BLOCK_HEADER_LEN () {4;}' unless defined(&APPEND_BLOCK_HEADER_LEN);
|
||||
eval 'sub EXEC_LOAD_HEADER_LEN () {4;}' unless defined(&EXEC_LOAD_HEADER_LEN);
|
||||
eval 'sub DELETE_FILE_HEADER_LEN () {4;}' unless defined(&DELETE_FILE_HEADER_LEN);
|
||||
eval 'sub EVENT_TYPE_OFFSET () {4;}' unless defined(&EVENT_TYPE_OFFSET);
|
||||
eval 'sub SERVER_ID_OFFSET () {5;}' unless defined(&SERVER_ID_OFFSET);
|
||||
eval 'sub EVENT_LEN_OFFSET () {9;}' unless defined(&EVENT_LEN_OFFSET);
|
||||
eval 'sub LOG_POS_OFFSET () {13;}' unless defined(&LOG_POS_OFFSET);
|
||||
eval 'sub FLAGS_OFFSET () {17;}' unless defined(&FLAGS_OFFSET);
|
||||
eval 'sub ST_BINLOG_VER_OFFSET () {0;}' unless defined(&ST_BINLOG_VER_OFFSET);
|
||||
eval 'sub ST_SERVER_VER_OFFSET () {2;}' unless defined(&ST_SERVER_VER_OFFSET);
|
||||
eval 'sub ST_CREATED_OFFSET () {( &ST_SERVER_VER_OFFSET + &ST_SERVER_VER_LEN);}' unless defined(&ST_CREATED_OFFSET);
|
||||
eval 'sub SL_MASTER_PORT_OFFSET () {8;}' unless defined(&SL_MASTER_PORT_OFFSET);
|
||||
eval 'sub SL_MASTER_POS_OFFSET () {0;}' unless defined(&SL_MASTER_POS_OFFSET);
|
||||
eval 'sub SL_MASTER_HOST_OFFSET () {10;}' unless defined(&SL_MASTER_HOST_OFFSET);
|
||||
eval 'sub Q_THREAD_ID_OFFSET () {0;}' unless defined(&Q_THREAD_ID_OFFSET);
|
||||
eval 'sub Q_EXEC_TIME_OFFSET () {4;}' unless defined(&Q_EXEC_TIME_OFFSET);
|
||||
eval 'sub Q_DB_LEN_OFFSET () {8;}' unless defined(&Q_DB_LEN_OFFSET);
|
||||
eval 'sub Q_ERR_CODE_OFFSET () {9;}' unless defined(&Q_ERR_CODE_OFFSET);
|
||||
eval 'sub Q_DATA_OFFSET () { &QUERY_HEADER_LEN;}' unless defined(&Q_DATA_OFFSET);
|
||||
eval 'sub I_TYPE_OFFSET () {0;}' unless defined(&I_TYPE_OFFSET);
|
||||
eval 'sub I_VAL_OFFSET () {1;}' unless defined(&I_VAL_OFFSET);
|
||||
eval 'sub RAND_SEED1_OFFSET () {0;}' unless defined(&RAND_SEED1_OFFSET);
|
||||
eval 'sub RAND_SEED2_OFFSET () {8;}' unless defined(&RAND_SEED2_OFFSET);
|
||||
eval 'sub L_THREAD_ID_OFFSET () {0;}' unless defined(&L_THREAD_ID_OFFSET);
|
||||
eval 'sub L_EXEC_TIME_OFFSET () {4;}' unless defined(&L_EXEC_TIME_OFFSET);
|
||||
eval 'sub L_SKIP_LINES_OFFSET () {8;}' unless defined(&L_SKIP_LINES_OFFSET);
|
||||
eval 'sub L_TBL_LEN_OFFSET () {12;}' unless defined(&L_TBL_LEN_OFFSET);
|
||||
eval 'sub L_DB_LEN_OFFSET () {13;}' unless defined(&L_DB_LEN_OFFSET);
|
||||
eval 'sub L_NUM_FIELDS_OFFSET () {14;}' unless defined(&L_NUM_FIELDS_OFFSET);
|
||||
eval 'sub L_SQL_EX_OFFSET () {18;}' unless defined(&L_SQL_EX_OFFSET);
|
||||
eval 'sub L_DATA_OFFSET () { &LOAD_HEADER_LEN;}' unless defined(&L_DATA_OFFSET);
|
||||
eval 'sub R_POS_OFFSET () {0;}' unless defined(&R_POS_OFFSET);
|
||||
eval 'sub R_IDENT_OFFSET () {8;}' unless defined(&R_IDENT_OFFSET);
|
||||
eval 'sub CF_FILE_ID_OFFSET () {0;}' unless defined(&CF_FILE_ID_OFFSET);
|
||||
eval 'sub CF_DATA_OFFSET () { &CREATE_FILE_HEADER_LEN;}' unless defined(&CF_DATA_OFFSET);
|
||||
eval 'sub AB_FILE_ID_OFFSET () {0;}' unless defined(&AB_FILE_ID_OFFSET);
|
||||
eval 'sub AB_DATA_OFFSET () { &APPEND_BLOCK_HEADER_LEN;}' unless defined(&AB_DATA_OFFSET);
|
||||
eval 'sub EL_FILE_ID_OFFSET () {0;}' unless defined(&EL_FILE_ID_OFFSET);
|
||||
eval 'sub DF_FILE_ID_OFFSET () {0;}' unless defined(&DF_FILE_ID_OFFSET);
|
||||
eval 'sub QUERY_EVENT_OVERHEAD () {( &LOG_EVENT_HEADER_LEN+ &QUERY_HEADER_LEN);}' unless defined(&QUERY_EVENT_OVERHEAD);
|
||||
eval 'sub QUERY_DATA_OFFSET () {( &LOG_EVENT_HEADER_LEN+ &QUERY_HEADER_LEN);}' unless defined(&QUERY_DATA_OFFSET);
|
||||
eval 'sub ROTATE_EVENT_OVERHEAD () {( &LOG_EVENT_HEADER_LEN+ &ROTATE_HEADER_LEN);}' unless defined(&ROTATE_EVENT_OVERHEAD);
|
||||
eval 'sub LOAD_EVENT_OVERHEAD () {( &LOG_EVENT_HEADER_LEN+ &LOAD_HEADER_LEN);}' unless defined(&LOAD_EVENT_OVERHEAD);
|
||||
eval 'sub CREATE_FILE_EVENT_OVERHEAD () {( &LOG_EVENT_HEADER_LEN+ + &LOAD_HEADER_LEN+ &CREATE_FILE_HEADER_LEN);}' unless defined(&CREATE_FILE_EVENT_OVERHEAD);
|
||||
eval 'sub DELETE_FILE_EVENT_OVERHEAD () {( &LOG_EVENT_HEADER_LEN+ &DELETE_FILE_HEADER_LEN);}' unless defined(&DELETE_FILE_EVENT_OVERHEAD);
|
||||
eval 'sub EXEC_LOAD_EVENT_OVERHEAD () {( &LOG_EVENT_HEADER_LEN+ &EXEC_LOAD_HEADER_LEN);}' unless defined(&EXEC_LOAD_EVENT_OVERHEAD);
|
||||
eval 'sub APPEND_BLOCK_EVENT_OVERHEAD () {( &LOG_EVENT_HEADER_LEN+ &APPEND_BLOCK_HEADER_LEN);}' unless defined(&APPEND_BLOCK_EVENT_OVERHEAD);
|
||||
eval 'sub BINLOG_MAGIC () {"\\xfe\\x62\\x69\\x6e";}' unless defined(&BINLOG_MAGIC);
|
||||
eval 'sub LOG_EVENT_TIME_F () {0x1;}' unless defined(&LOG_EVENT_TIME_F);
|
||||
eval 'sub LOG_EVENT_FORCED_ROTATE_F () {0x2;}' unless defined(&LOG_EVENT_FORCED_ROTATE_F);
|
||||
eval("sub UNKNOWN_EVENT () { 0; }") unless defined(&UNKNOWN_EVENT);
|
||||
eval("sub START_EVENT () { 1; }") unless defined(&START_EVENT);
|
||||
eval("sub QUERY_EVENT () { 2; }") unless defined(&QUERY_EVENT);
|
||||
eval("sub STOP_EVENT () { 3; }") unless defined(&STOP_EVENT);
|
||||
eval("sub ROTATE_EVENT () { 4; }") unless defined(&ROTATE_EVENT);
|
||||
eval("sub INTVAR_EVENT () { 5; }") unless defined(&INTVAR_EVENT);
|
||||
eval("sub LOAD_EVENT () { 6; }") unless defined(&LOAD_EVENT);
|
||||
eval("sub SLAVE_EVENT () { 7; }") unless defined(&SLAVE_EVENT);
|
||||
eval("sub CREATE_FILE_EVENT () { 8; }") unless defined(&CREATE_FILE_EVENT);
|
||||
eval("sub APPEND_BLOCK_EVENT () { 9; }") unless defined(&APPEND_BLOCK_EVENT);
|
||||
eval("sub EXEC_LOAD_EVENT () { 10; }") unless defined(&EXEC_LOAD_EVENT);
|
||||
eval("sub DELETE_FILE_EVENT () { 11; }") unless defined(&DELETE_FILE_EVENT);
|
||||
eval("sub NEW_LOAD_EVENT () { 12; }") unless defined(&NEW_LOAD_EVENT);
|
||||
eval("sub RAND_EVENT () { 13; }") unless defined(&RAND_EVENT);
|
||||
eval("sub INVALID_INT_EVENT () { 0; }") unless defined(&INVALID_INT_EVENT);
|
||||
eval("sub LAST_INSERT_ID_EVENT () { 1; }") unless defined(&LAST_INSERT_ID_EVENT);
|
||||
eval("sub INSERT_ID_EVENT () { 2; }") unless defined(&INSERT_ID_EVENT);
|
||||
}
|
||||
1;
|
||||
38
wcmtools/lib/MySQL-BinLog/experiments/cpptokenizer.pl
Executable file
38
wcmtools/lib/MySQL-BinLog/experiments/cpptokenizer.pl
Executable file
@@ -0,0 +1,38 @@
|
||||
#!/usr/bin/perl -w
|
||||
#
|
||||
# Learning Text::CPP... conclusion: not what I need.
|
||||
#
|
||||
#
|
||||
|
||||
package cpptokenizer;
|
||||
|
||||
use Text::CPP;
|
||||
use Data::Dumper;
|
||||
|
||||
$Data::Dumper::TERSE = 1;
|
||||
$Data::Dumper::INDENT = 1;
|
||||
|
||||
my $reader = new Text::CPP ( Language => "GNUC99" );
|
||||
|
||||
my ( $text, $type, $prettytype, $flags );
|
||||
|
||||
foreach my $file ( @ARGV ) {
|
||||
print "File: $file\n", '-' x 70, "\n";
|
||||
|
||||
$reader->read( $file );
|
||||
|
||||
#print join("\n", $reader->tokens);
|
||||
|
||||
while ( ($text, $type, $flags) = $reader->token ) {
|
||||
$prettytype = $reader->type( $type );
|
||||
chomp( $text );
|
||||
#print "$prettytype: $text ($type) +$flags\n";
|
||||
print Data::Dumper->Dumpxs( [$text,$type,$flags,$prettytype],
|
||||
[qw{text type flags prettytype}] ), "\n";
|
||||
print "---\n";
|
||||
}
|
||||
|
||||
print "\n\n";
|
||||
}
|
||||
|
||||
|
||||
34
wcmtools/lib/MySQL-BinLog/experiments/try.pl
Executable file
34
wcmtools/lib/MySQL-BinLog/experiments/try.pl
Executable file
@@ -0,0 +1,34 @@
|
||||
#!/usr/bin/perl -w
|
||||
package try;
|
||||
use strict;
|
||||
|
||||
BEGIN {
|
||||
use lib qw{lib};
|
||||
use MySQL::BinLog;
|
||||
}
|
||||
|
||||
my %connect_params = (
|
||||
hostname => 'whitaker.lj',
|
||||
database => 'livejournal',
|
||||
user => 'slave',
|
||||
password => 'm&s',
|
||||
port => 3337,
|
||||
debug => 1,
|
||||
|
||||
log_slave_id => 512,
|
||||
);
|
||||
|
||||
sub handler {
|
||||
my $ev = shift;
|
||||
print( ('-' x 70), "\n",
|
||||
">>> QUERY: ", $ev->query_data, "\n",
|
||||
('-' x 70), "\n" );
|
||||
}
|
||||
|
||||
my $filename = shift @ARGV;
|
||||
|
||||
my $log = MySQL::BinLog->open( $filename );
|
||||
#my $log = MySQL::BinLog->connect( %connect_params );
|
||||
|
||||
my @res = $log->handle_events( \&handler, MySQL::QUERY_EVENT );
|
||||
|
||||
244
wcmtools/lib/MySQL-BinLog/lib/Mysql/BinLog.pm
Executable file
244
wcmtools/lib/MySQL-BinLog/lib/Mysql/BinLog.pm
Executable file
@@ -0,0 +1,244 @@
|
||||
#!/usr/bin/perl
|
||||
##############################################################################
|
||||
|
||||
=head1 NAME
|
||||
|
||||
MySQL::BinLog - Binary log parser classes
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
use MySQL::BinLog ();
|
||||
|
||||
my $log = MySQL::BinLog->open( "Foo-relay.bin.001" );
|
||||
|
||||
# -or-
|
||||
|
||||
die unless $MySQL::BinLog::HaveNet;
|
||||
my $log = MySQL::BinLog->connect(
|
||||
hostname => 'db.example.com',
|
||||
database => 'sales',
|
||||
user => 'salesapp',
|
||||
password => '',
|
||||
port => 3337,
|
||||
|
||||
log_name => '',
|
||||
log_pos => 4,
|
||||
log_slave_id => 10,
|
||||
);
|
||||
|
||||
$log->handle_events( \&print_queries, MySQL::BinLog::QUERY_EVENT );
|
||||
|
||||
sub print_queries {
|
||||
my $ev = shift;
|
||||
print "Query: ", $ev->query_data, "\n";
|
||||
}
|
||||
|
||||
|
||||
=head1 REQUIRES
|
||||
|
||||
I<Token requires line>
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
This is a collection of Perl classes for parsing a MySQL binlog.
|
||||
|
||||
=head1 AUTHOR
|
||||
|
||||
Michael Granger <ged@FaerieMUD.org>
|
||||
|
||||
Copyright (c) 2004 Danga Interactive. All rights reserved.
|
||||
|
||||
This module is free software. You may use, modify, and/or redistribute this
|
||||
software under the terms of the Perl Artistic License. (See
|
||||
http://language.perl.com/misc/Artistic.html)
|
||||
|
||||
THIS SOFTWARE IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR IMPLIED WARRANTIES,
|
||||
INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND
|
||||
FITNESS FOR A PARTICULAR PURPOSE.
|
||||
|
||||
=cut
|
||||
|
||||
##############################################################################
|
||||
package MySQL::BinLog;
|
||||
use strict;
|
||||
use warnings qw{all};
|
||||
|
||||
BEGIN {
|
||||
# Versioning stuff
|
||||
use vars qw{$VERSION $RCSID};
|
||||
$VERSION = do { my @r = (q$Revision: 1.2 $ =~ /\d+/g); sprintf "%d."."%02d" x $#r, @r };
|
||||
$RCSID = q$Id: BinLog.pm,v 1.2 2004/11/17 21:58:39 marksmith Exp $;
|
||||
|
||||
use constant TRUE => 1;
|
||||
use constant FALSE => 0;
|
||||
|
||||
# Subordinate classes
|
||||
use MySQL::BinLog::Constants qw{};
|
||||
use MySQL::BinLog::Events qw{};
|
||||
use MySQL::BinLog::Header qw{};
|
||||
|
||||
# Try to load Net::MySQL, but no worries if we can't until they try to use
|
||||
# ->connect.
|
||||
use vars qw{$HaveNet $NetError};
|
||||
$HaveNet = eval { require MySQL::BinLog::Net; 1 };
|
||||
$NetError = $@;
|
||||
|
||||
use Carp qw{croak confess carp};
|
||||
use IO::File qw{};
|
||||
use Fcntl qw{O_RDONLY};
|
||||
}
|
||||
|
||||
|
||||
### (CONSTRUCTOR) METHOD: new
|
||||
### Return a new generic MySQL::BinLog object.
|
||||
sub new {
|
||||
my $class = shift or confess "Cannot be used as a function";
|
||||
return bless {
|
||||
fh => undef,
|
||||
type => undef,
|
||||
}, $class;
|
||||
}
|
||||
|
||||
|
||||
### (CONSTRUCTOR) METHOD: open( $filename )
|
||||
### Return a MySQL::BinLog object that will read events from the file specified
|
||||
### by I<filename>.
|
||||
sub open {
|
||||
my $class = shift or confess "Cannot be used as a function";
|
||||
my $filename = shift or croak "Missing argument: filename";
|
||||
|
||||
my $ifh = new IO::File $filename, O_RDONLY
|
||||
or croak "open: $filename: $!";
|
||||
$ifh->seek( 4, 0 );
|
||||
|
||||
my $self = $class->new;
|
||||
$self->{fh} = $ifh;
|
||||
|
||||
return $self;
|
||||
}
|
||||
|
||||
|
||||
### (CONSTRUCTOR) METHOD: connect( %connect_params )
|
||||
### Open a connection to a MySQL server over the network and read events from
|
||||
### it. The connection parameters are the same as those passed to Net::MySQL. If
|
||||
### Net::MySQL is not installed, this method will raise an exception.
|
||||
sub connect {
|
||||
my $class = shift or confess "Cannot be used as a function";
|
||||
my %connect_params = @_;
|
||||
|
||||
croak "Net::MySQL not available: $NetError" unless $HaveNet;
|
||||
my $self = $class->new;
|
||||
|
||||
my (
|
||||
$logname,
|
||||
$pos,
|
||||
$slave_id,
|
||||
);
|
||||
|
||||
$logname = delete $connect_params{log_name} || '';
|
||||
$pos = delete $connect_params{log_pos} || 0;
|
||||
$slave_id = delete $connect_params{log_slave_id} || 128;
|
||||
|
||||
$self->{net} = new MySQL::BinLog::Net ( %connect_params );
|
||||
$self->{net}->start_binlog( $slave_id, $logname, $pos );
|
||||
|
||||
return $self;
|
||||
}
|
||||
|
||||
|
||||
#####################################################################
|
||||
### I N S T A N C E M E T H O D S
|
||||
#####################################################################
|
||||
|
||||
|
||||
### METHOD: read_next_event()
|
||||
### Read the next event from the registered source and return it.
|
||||
sub read_next_event {
|
||||
my $self = shift;
|
||||
|
||||
# :FIXME: This is some ugly inexcusably ugly shit, but I'm hacking all the
|
||||
# IO into here to get something working, but it really should be made
|
||||
# cleaner by separating out the socket IO routine into the ::Net class and
|
||||
# the file IO into a new File class that reads from a file in an optimized
|
||||
# fashion.
|
||||
|
||||
my $event_data;
|
||||
|
||||
# Reading from a file -- have to read the header, figure out the length of
|
||||
# the rest of the event data, then read the rest.
|
||||
if ( $self->{fh} ) {
|
||||
$event_data = $self->readbytes( $self->{fh}, MySQL::LOG_EVENT_HEADER_LEN );
|
||||
my $len = unpack( 'V', substr($event_data, 9, 4) );
|
||||
$event_data .= $self->readbytes( $self->{fh},
|
||||
$len - MySQL::LOG_EVENT_HEADER_LEN );
|
||||
}
|
||||
|
||||
# Reading from a real master
|
||||
elsif ( $self->{net} ) {
|
||||
$event_data = $self->{net}->read_packet;
|
||||
}
|
||||
|
||||
# An object without a reader
|
||||
else {
|
||||
croak "Cannot read without an event source.";
|
||||
}
|
||||
|
||||
# Let the event class parse the event
|
||||
return MySQL::BinLog::Event->read_event( $event_data );
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
### METHOD: handle_events( \&handler[, @types] )
|
||||
### Start reading events from whatever source is registered, handling those of
|
||||
### the types specified in I<types> with the given I<handler>. If no I<types>
|
||||
### are given, all events will be sent to the I<handler>. Events are sent as
|
||||
### instances of the MySQL::BinLog::Event classes.
|
||||
sub handle_events {
|
||||
my $self = shift or croak "Cannot be used as a function.";
|
||||
my ( $handler, @types ) = @_;
|
||||
|
||||
my @rv = ();
|
||||
|
||||
while (( my $event = $self->read_next_event )) {
|
||||
my $etype = $event->header->event_type;
|
||||
next if @types && !grep { $etype == $_ } @types;
|
||||
|
||||
push @rv, $handler->( $event );
|
||||
}
|
||||
|
||||
return @rv;
|
||||
}
|
||||
|
||||
|
||||
### FUNCTION: readbytes( $fh, $len )
|
||||
### Read and return I<len> bytes from the specified I<fh>.
|
||||
sub readbytes {
|
||||
my ( $self, $fh, $len ) = @_;
|
||||
my ( $buf, $rval, $bytes ) = ('', '', 0);
|
||||
|
||||
until ( length $rval == $len ) {
|
||||
$bytes = $fh->read( $buf, $len - length $rval );
|
||||
if ( !defined $bytes ) {
|
||||
if ( $!{EAGAIN} ) { next }
|
||||
die "Read error: $!";
|
||||
} elsif ( !$bytes && $fh->eof ) {
|
||||
die "EOF before reading $len bytes.\n";
|
||||
}
|
||||
$rval .= $buf;
|
||||
}
|
||||
|
||||
return $rval;
|
||||
}
|
||||
|
||||
|
||||
|
||||
### Destructors
|
||||
DESTROY {}
|
||||
END {}
|
||||
|
||||
|
||||
1;
|
||||
|
||||
|
||||
105
wcmtools/lib/MySQL-BinLog/lib/Mysql/BinLog/Constants.pm
Executable file
105
wcmtools/lib/MySQL-BinLog/lib/Mysql/BinLog/Constants.pm
Executable file
@@ -0,0 +1,105 @@
|
||||
package MySQL;
|
||||
|
||||
BEGIN {
|
||||
no warnings 'redefine';
|
||||
|
||||
sub LOG_READ_EOF () {-1;}
|
||||
sub LOG_READ_BOGUS () {-2;}
|
||||
sub LOG_READ_IO () {-3;}
|
||||
sub LOG_READ_MEM () {-5;}
|
||||
sub LOG_READ_TRUNC () {-6;}
|
||||
sub LOG_READ_TOO_LARGE () {-7;}
|
||||
sub LOG_EVENT_OFFSET () {4;}
|
||||
sub BINLOG_VERSION () {3;}
|
||||
sub ST_SERVER_VER_LEN () {50;}
|
||||
sub DUMPFILE_FLAG () {0x1;}
|
||||
sub OPT_ENCLOSED_FLAG () {0x2;}
|
||||
sub REPLACE_FLAG () {0x4;}
|
||||
sub IGNORE_FLAG () {0x8;}
|
||||
sub FIELD_TERM_EMPTY () {0x1;}
|
||||
sub ENCLOSED_EMPTY () {0x2;}
|
||||
sub LINE_TERM_EMPTY () {0x4;}
|
||||
sub LINE_START_EMPTY () {0x8;}
|
||||
sub ESCAPED_EMPTY () {0x10;}
|
||||
sub NUM_LOAD_DELIM_STRS () {5;}
|
||||
sub LOG_EVENT_HEADER_LEN () {19;}
|
||||
sub OLD_HEADER_LEN () {13;}
|
||||
sub QUERY_HEADER_LEN () {(4+ 4+ 1+ 2);}
|
||||
sub LOAD_HEADER_LEN () {(4+ 4+ 4+ 1+1+ 4);}
|
||||
sub START_HEADER_LEN () {(2+ &ST_SERVER_VER_LEN + 4);}
|
||||
sub ROTATE_HEADER_LEN () {8;}
|
||||
sub CREATE_FILE_HEADER_LEN () {4;}
|
||||
sub APPEND_BLOCK_HEADER_LEN () {4;}
|
||||
sub EXEC_LOAD_HEADER_LEN () {4;}
|
||||
sub DELETE_FILE_HEADER_LEN () {4;}
|
||||
sub EVENT_TYPE_OFFSET () {4;}
|
||||
sub SERVER_ID_OFFSET () {5;}
|
||||
sub EVENT_LEN_OFFSET () {9;}
|
||||
sub LOG_POS_OFFSET () {13;}
|
||||
sub FLAGS_OFFSET () {17;}
|
||||
sub ST_BINLOG_VER_OFFSET () {0;}
|
||||
sub ST_SERVER_VER_OFFSET () {2;}
|
||||
sub ST_CREATED_OFFSET () {( &ST_SERVER_VER_OFFSET + &ST_SERVER_VER_LEN);}
|
||||
sub SL_MASTER_PORT_OFFSET () {8;}
|
||||
sub SL_MASTER_POS_OFFSET () {0;}
|
||||
sub SL_MASTER_HOST_OFFSET () {10;}
|
||||
sub Q_THREAD_ID_OFFSET () {0;}
|
||||
sub Q_EXEC_TIME_OFFSET () {4;}
|
||||
sub Q_DB_LEN_OFFSET () {8;}
|
||||
sub Q_ERR_CODE_OFFSET () {9;}
|
||||
sub Q_DATA_OFFSET () { &QUERY_HEADER_LEN;}
|
||||
sub I_TYPE_OFFSET () {0;}
|
||||
sub I_VAL_OFFSET () {1;}
|
||||
sub RAND_SEED1_OFFSET () {0;}
|
||||
sub RAND_SEED2_OFFSET () {8;}
|
||||
sub L_THREAD_ID_OFFSET () {0;}
|
||||
sub L_EXEC_TIME_OFFSET () {4;}
|
||||
sub L_SKIP_LINES_OFFSET () {8;}
|
||||
sub L_TBL_LEN_OFFSET () {12;}
|
||||
sub L_DB_LEN_OFFSET () {13;}
|
||||
sub L_NUM_FIELDS_OFFSET () {14;}
|
||||
sub L_SQL_EX_OFFSET () {18;}
|
||||
sub L_DATA_OFFSET () { &LOAD_HEADER_LEN;}
|
||||
sub R_POS_OFFSET () {0;}
|
||||
sub R_IDENT_OFFSET () {8;}
|
||||
sub CF_FILE_ID_OFFSET () {0;}
|
||||
sub CF_DATA_OFFSET () { &CREATE_FILE_HEADER_LEN;}
|
||||
sub AB_FILE_ID_OFFSET () {0;}
|
||||
sub AB_DATA_OFFSET () { &APPEND_BLOCK_HEADER_LEN;}
|
||||
sub EL_FILE_ID_OFFSET () {0;}
|
||||
sub DF_FILE_ID_OFFSET () {0;}
|
||||
sub QUERY_EVENT_OVERHEAD () {( &LOG_EVENT_HEADER_LEN+ &QUERY_HEADER_LEN);}
|
||||
sub QUERY_DATA_OFFSET () {( &LOG_EVENT_HEADER_LEN+ &QUERY_HEADER_LEN);}
|
||||
sub ROTATE_EVENT_OVERHEAD () {( &LOG_EVENT_HEADER_LEN+ &ROTATE_HEADER_LEN);}
|
||||
sub LOAD_EVENT_OVERHEAD () {( &LOG_EVENT_HEADER_LEN+ &LOAD_HEADER_LEN);}
|
||||
sub CREATE_FILE_EVENT_OVERHEAD () {( &LOG_EVENT_HEADER_LEN+ + &LOAD_HEADER_LEN+ &CREATE_FILE_HEADER_LEN);}
|
||||
sub DELETE_FILE_EVENT_OVERHEAD () {( &LOG_EVENT_HEADER_LEN+ &DELETE_FILE_HEADER_LEN);}
|
||||
sub EXEC_LOAD_EVENT_OVERHEAD () {( &LOG_EVENT_HEADER_LEN+ &EXEC_LOAD_HEADER_LEN);}
|
||||
sub APPEND_BLOCK_EVENT_OVERHEAD () {( &LOG_EVENT_HEADER_LEN+ &APPEND_BLOCK_HEADER_LEN);}
|
||||
sub BINLOG_MAGIC () {"\\xfe\\x62\\x69\\x6e";}
|
||||
sub LOG_EVENT_TIME_F () {0x1;}
|
||||
sub LOG_EVENT_FORCED_ROTATE_F () {0x2;}
|
||||
sub UNKNOWN_EVENT () { 0; }
|
||||
sub START_EVENT () { 1; }
|
||||
sub QUERY_EVENT () { 2; }
|
||||
sub STOP_EVENT () { 3; }
|
||||
sub ROTATE_EVENT () { 4; }
|
||||
sub INTVAR_EVENT () { 5; }
|
||||
sub LOAD_EVENT () { 6; }
|
||||
sub SLAVE_EVENT () { 7; }
|
||||
sub CREATE_FILE_EVENT () { 8; }
|
||||
sub APPEND_BLOCK_EVENT () { 9; }
|
||||
sub EXEC_LOAD_EVENT () { 10; }
|
||||
sub DELETE_FILE_EVENT () { 11; }
|
||||
sub NEW_LOAD_EVENT () { 12; }
|
||||
sub RAND_EVENT () { 13; }
|
||||
sub INVALID_INT_EVENT () { 0; }
|
||||
sub LAST_INSERT_ID_EVENT () { 1; }
|
||||
sub INSERT_ID_EVENT () { 2; }
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
1;
|
||||
|
||||
793
wcmtools/lib/MySQL-BinLog/lib/Mysql/BinLog/Events.pm
Executable file
793
wcmtools/lib/MySQL-BinLog/lib/Mysql/BinLog/Events.pm
Executable file
@@ -0,0 +1,793 @@
|
||||
#!/usr/bin/perl
|
||||
##############################################################################
|
||||
|
||||
=head1 NAME
|
||||
|
||||
MySQL::BinLog::Event - Event class for MySQL binlog parsing
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
use MySQL::BinLog::Event qw();
|
||||
|
||||
my $event = MySQL::BinLog::Event->read_event( $header, $data );
|
||||
|
||||
=head1 REQUIRES
|
||||
|
||||
I<Token requires line>
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
None yet.
|
||||
|
||||
=head1 AUTHOR
|
||||
|
||||
Michael Granger <ged@FaerieMUD.org>
|
||||
|
||||
Copyright (c) 2004 Danga Interactive. All rights reserved.
|
||||
|
||||
This module is free software. You may use, modify, and/or redistribute this
|
||||
software under the terms of the Perl Artistic License. (See
|
||||
http://language.perl.com/misc/Artistic.html)
|
||||
|
||||
THIS SOFTWARE IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR IMPLIED WARRANTIES,
|
||||
INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND
|
||||
FITNESS FOR A PARTICULAR PURPOSE.
|
||||
|
||||
=cut
|
||||
|
||||
##############################################################################
|
||||
package MySQL::BinLog::Event;
|
||||
use strict;
|
||||
use warnings qw{all};
|
||||
|
||||
|
||||
###############################################################################
|
||||
### I N I T I A L I Z A T I O N
|
||||
###############################################################################
|
||||
BEGIN {
|
||||
### Versioning stuff and custom includes
|
||||
use vars qw{$VERSION $RCSID};
|
||||
$VERSION = do { my @r = (q$Revision: 1.2 $ =~ /\d+/g); sprintf "%d."."%02d" x $#r, @r };
|
||||
$RCSID = q$Id: Events.pm,v 1.2 2004/11/17 21:58:40 marksmith Exp $;
|
||||
|
||||
# MySQL classes
|
||||
use MySQL::BinLog::Header qw{};
|
||||
use Carp qw{croak confess carp};
|
||||
use Scalar::Util qw{blessed};
|
||||
|
||||
use fields qw{header rawdata};
|
||||
use base qw{fields};
|
||||
}
|
||||
|
||||
our $AUTOLOAD;
|
||||
|
||||
# Maps an event type to a subclass
|
||||
our @ClassMap = qw(
|
||||
UnknownEvent
|
||||
StartEvent
|
||||
QueryEvent
|
||||
StopEvent
|
||||
RotateEvent
|
||||
IntvarEvent
|
||||
LoadEvent
|
||||
SlaveEvent
|
||||
CreateFileEvent
|
||||
AppendBlockEvent
|
||||
ExecLoadEvent
|
||||
DeleteFileEvent
|
||||
NewLoadEvent
|
||||
RandEvent
|
||||
UserVarEvent
|
||||
);
|
||||
|
||||
|
||||
### (FACTORY) METHOD: read_event( $fh )
|
||||
### Read the next event from the given string I<str> and return it as a
|
||||
### C<MySQL::BinLog::Event> object.
|
||||
sub read_event {
|
||||
my $class = shift;
|
||||
my $rawdata = shift;
|
||||
my @desired_types = @_;
|
||||
|
||||
my (
|
||||
$hdata,
|
||||
$header,
|
||||
$datalen,
|
||||
$event_data,
|
||||
$reallen,
|
||||
$event_class,
|
||||
);
|
||||
|
||||
debugMsg( "Reading event from ", length $rawdata, " byes of raw data.\n" );
|
||||
|
||||
# Read the header data and create the header object
|
||||
# :TODO: only handles "new" headers; old headers are shorter. Need to
|
||||
# document which version this changed and mention this in the docs.
|
||||
$hdata = substr( $rawdata, 0, MySQL::LOG_EVENT_HEADER_LEN, '' );
|
||||
$header = new MySQL::BinLog::Header $hdata;
|
||||
|
||||
# Read the event data
|
||||
$datalen = $header->{event_len} - MySQL::LOG_EVENT_HEADER_LEN;
|
||||
debugMsg( "Event data is $header->{event_len} bytes long.\n" );
|
||||
$event_data = substr( $rawdata, 0, $datalen, '' );
|
||||
debugMsg( "Read ", length $event_data, " bytes of event data.\n" );
|
||||
|
||||
$reallen = length $event_data;
|
||||
croak "Short read for event data ($reallen of $datalen bytes)"
|
||||
unless $reallen == $datalen;
|
||||
|
||||
# Figure out which class implements the event type and create one with the
|
||||
# header and data
|
||||
$event_class = sprintf "MySQL::BinLog::%s", $ClassMap[ $header->{event_type} ];
|
||||
return $event_class->new( $header, $event_data );
|
||||
}
|
||||
|
||||
|
||||
|
||||
### (CONSTRUCTOR) METHOD: new( $header, $raw_data )
|
||||
### Construct a new Event with the specified I<header> and I<raw_data>. This is
|
||||
### only meant to the called from a subclass.
|
||||
sub new {
|
||||
my MySQL::BinLog::Event $self = shift;
|
||||
my ( $header, $data ) = @_;
|
||||
|
||||
die "Instantiation of abstract class" unless ref $self;
|
||||
|
||||
$self->{header} = $header;
|
||||
$self->{rawdata} = $data;
|
||||
|
||||
return $self;
|
||||
}
|
||||
|
||||
|
||||
# Accessor-generator
|
||||
|
||||
### (PROXY) METHOD: AUTOLOAD( @args )
|
||||
### Proxy method to build (non-translucent) object accessors.
|
||||
sub AUTOLOAD {
|
||||
my MySQL::BinLog::Event $self = shift;
|
||||
( my $name = $AUTOLOAD ) =~ s{.*::}{};
|
||||
|
||||
### Build an accessor for extant attributes
|
||||
if ( blessed $self && exists $self->{$name} ) {
|
||||
|
||||
### Define an accessor for this attribute
|
||||
my $method = sub {
|
||||
my MySQL::BinLog::Event $closureSelf = shift;
|
||||
|
||||
$closureSelf->{$name} = shift if @_;
|
||||
return $closureSelf->{$name};
|
||||
};
|
||||
|
||||
### Install the new method in the symbol table
|
||||
NO_STRICT_REFS: {
|
||||
no strict 'refs';
|
||||
*{$AUTOLOAD} = $method;
|
||||
}
|
||||
|
||||
### Now jump to the new method after sticking the self-ref back onto the
|
||||
### stack
|
||||
unshift @_, $self;
|
||||
goto &$AUTOLOAD;
|
||||
}
|
||||
|
||||
### Try to delegate to our parent's version of the method
|
||||
my $parentMethod = "SUPER::$name";
|
||||
return $self->$parentMethod( @_ );
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
### Destructors
|
||||
DESTROY {}
|
||||
END {}
|
||||
|
||||
|
||||
### Utility functions
|
||||
|
||||
### Debugging function -- switch the commented one for debugging or no.
|
||||
sub debugMsg {}
|
||||
#sub debugMsg { print STDERR @_ }
|
||||
|
||||
|
||||
|
||||
#####################################################################
|
||||
### S T A R T E V E N T C L A S S
|
||||
#####################################################################
|
||||
package MySQL::BinLog::StartEvent;
|
||||
use strict;
|
||||
|
||||
BEGIN {
|
||||
use base 'MySQL::BinLog::Event';
|
||||
use fields qw{binlog_ver server_ver created};
|
||||
|
||||
use constant PACK_TEMPLATE => 'va8a*';
|
||||
}
|
||||
|
||||
|
||||
### (CONSTRUCTOR) METHOD: new( $header_obj, $raw_data )
|
||||
### Create a new StartEvent object from the given I<raw_data> and I<header_obj>
|
||||
### (a MySQL::BinLog::Header object).
|
||||
sub new {
|
||||
my MySQL::BinLog::StartEvent $self = shift;
|
||||
|
||||
$self = fields::new( $self );
|
||||
$self->SUPER::new( @_ );
|
||||
|
||||
if ( $self->{rawdata} ) {
|
||||
my @fields = unpack PACK_TEMPLATE, $self->{rawdata};
|
||||
@{$self}{qw{binlog_ver server_ver created}} = @fields;
|
||||
}
|
||||
|
||||
return $self;
|
||||
}
|
||||
|
||||
|
||||
### METHOD: stringify()
|
||||
### Return a representation of the event as a human-readable string.
|
||||
sub stringify {
|
||||
my MySQL::BinLog::StartEvent $self = shift;
|
||||
return join( ':', @{$self}{qw{binlog_ver server_ver created}} );
|
||||
}
|
||||
|
||||
|
||||
|
||||
#####################################################################
|
||||
### Q U E R Y E V E N T C L A S S
|
||||
#####################################################################
|
||||
package MySQL::BinLog::QueryEvent;
|
||||
use strict;
|
||||
|
||||
BEGIN {
|
||||
use base 'MySQL::BinLog::Event';
|
||||
use fields qw{thread_id exec_time db_len err_code dbname query_data};
|
||||
|
||||
# 4 + 4 + 1 + 2 + variable length data field.
|
||||
use constant PACK_TEMPLATE => 'VVCva*';
|
||||
}
|
||||
|
||||
|
||||
### (CONSTRUCTOR) METHOD: new( $header_obj, $raw_data )
|
||||
### Create a new QueryEvent object from the given I<raw_data> and I<header_obj>
|
||||
### (a MySQL::BinLog::Header object).
|
||||
sub new {
|
||||
my MySQL::BinLog::QueryEvent $self = shift;
|
||||
|
||||
$self = fields::new( $self );
|
||||
$self->SUPER::new( @_ );
|
||||
|
||||
if ( $self->{rawdata} ) {
|
||||
my @fields = unpack PACK_TEMPLATE, $self->{rawdata};
|
||||
|
||||
# The last bit needs further unpacking with a length that is in the data
|
||||
# extracted via the first template. If db_len immediately preceded the
|
||||
# query data it could all be done in one unpack with 'c/a' or something,
|
||||
# but alas...
|
||||
my $template = sprintf( 'a%da*', $fields[2] ); # $fields[2] = length of dbname
|
||||
push @fields, unpack( $template, pop @fields );
|
||||
|
||||
@{$self}{qw{thread_id exec_time db_len err_code dbname query_data}} = @fields;
|
||||
}
|
||||
|
||||
return $self;
|
||||
}
|
||||
|
||||
|
||||
### METHOD: stringify()
|
||||
### Return a representation of the event as a human-readable string.
|
||||
sub stringify {
|
||||
my MySQL::BinLog::QueryEvent $self = shift;
|
||||
return join( ':', @{$self}{qw{thread_id exec_time db_len err_code dbname query_data}} );
|
||||
}
|
||||
|
||||
|
||||
#####################################################################
|
||||
### S T O P E V E N T C L A S S
|
||||
#####################################################################
|
||||
package MySQL::BinLog::StopEvent;
|
||||
use strict;
|
||||
|
||||
BEGIN {
|
||||
use base 'MySQL::BinLog::Event';
|
||||
use fields qw{}; # Stop event has no fields
|
||||
}
|
||||
|
||||
|
||||
### (CONSTRUCTOR) METHOD: new( $header_obj, $raw_data )
|
||||
### Create a new StopEvent object from the given I<raw_data> and I<header_obj>
|
||||
### (a MySQL::BinLog::Header object).
|
||||
sub new {
|
||||
my MySQL::BinLog::StopEvent $self = shift;
|
||||
|
||||
$self = fields::new( $self );
|
||||
$self->SUPER::new( @_ );
|
||||
|
||||
return $self;
|
||||
}
|
||||
|
||||
|
||||
### METHOD: stringify()
|
||||
### Return a representation of the event as a human-readable string.
|
||||
sub stringify {
|
||||
my MySQL::BinLog::StopEvent $self = shift;
|
||||
return join( ':', @{$self}{qw{}} );
|
||||
}
|
||||
|
||||
|
||||
#####################################################################
|
||||
### R O T A T E E V E N T C L A S S
|
||||
#####################################################################
|
||||
package MySQL::BinLog::RotateEvent;
|
||||
use strict;
|
||||
|
||||
BEGIN {
|
||||
use base 'MySQL::BinLog::Event';
|
||||
use fields qw{pos ident};
|
||||
|
||||
use constant PACK_TEMPLATE => 'a8';
|
||||
}
|
||||
|
||||
|
||||
### (CONSTRUCTOR) METHOD: new( $header_obj, $raw_data )
|
||||
### Create a new RotateEvent object from the given I<raw_data> and I<header_obj>
|
||||
### (a MySQL::BinLog::Header object).
|
||||
sub new {
|
||||
my MySQL::BinLog::RotateEvent $self = shift;
|
||||
|
||||
$self = fields::new( $self );
|
||||
$self->SUPER::new( @_ );
|
||||
|
||||
if ( $self->{rawdata} ) {
|
||||
my @fields = unpack PACK_TEMPLATE, $self->{rawdata};
|
||||
@{$self}{qw{pos ident}} = @fields;
|
||||
}
|
||||
|
||||
return $self;
|
||||
}
|
||||
|
||||
|
||||
### METHOD: stringify()
|
||||
### Return a representation of the event as a human-readable string.
|
||||
sub stringify {
|
||||
my MySQL::BinLog::RotateEvent $self = shift;
|
||||
return join( ':', @{$self}{qw{pos ident}} );
|
||||
}
|
||||
|
||||
|
||||
#####################################################################
|
||||
### I N T V A R E V E N T C L A S S
|
||||
#####################################################################
|
||||
package MySQL::BinLog::IntvarEvent;
|
||||
use strict;
|
||||
|
||||
BEGIN {
|
||||
use base 'MySQL::BinLog::Event';
|
||||
use fields qw{type val};
|
||||
|
||||
use constant PACK_TEMPLATE => 'Ca8';
|
||||
}
|
||||
|
||||
|
||||
### (CONSTRUCTOR) METHOD: new( $header_obj, $raw_data )
|
||||
### Create a new IntvarEvent object from the given I<raw_data> and I<header_obj>
|
||||
### (a MySQL::BinLog::Header object).
|
||||
sub new {
|
||||
my MySQL::BinLog::IntvarEvent $self = shift;
|
||||
|
||||
$self = fields::new( $self );
|
||||
$self->SUPER::new( @_ );
|
||||
|
||||
if ( $self->{rawdata} ) {
|
||||
my @fields = unpack PACK_TEMPLATE, $self->{rawdata};
|
||||
@{$self}{qw{type val}} = @fields;
|
||||
}
|
||||
|
||||
return $self;
|
||||
}
|
||||
|
||||
|
||||
### METHOD: stringify()
|
||||
### Return a representation of the event as a human-readable string.
|
||||
sub stringify {
|
||||
my MySQL::BinLog::IntvarEvent $self = shift;
|
||||
return join( ':', @{$self}{qw{type val}} );
|
||||
}
|
||||
|
||||
|
||||
#####################################################################
|
||||
### L O A D E V E N T C L A S S
|
||||
#####################################################################
|
||||
package MySQL::BinLog::LoadEvent;
|
||||
use strict;
|
||||
|
||||
BEGIN {
|
||||
use base 'MySQL::BinLog::Event';
|
||||
use fields qw{thread_id exec_time skip_lines tbl_len db_len num_fields sql_ex ldata};
|
||||
|
||||
use constant PACK_TEMPLATE => 'VVVCCVa*';
|
||||
}
|
||||
|
||||
|
||||
### (CONSTRUCTOR) METHOD: new( $header_obj, $raw_data )
|
||||
### Create a new LoadEvent object from the given I<raw_data> and I<header_obj>
|
||||
### (a MySQL::BinLog::Header object).
|
||||
sub new {
|
||||
my MySQL::BinLog::LoadEvent $self = shift;
|
||||
|
||||
$self = fields::new( $self );
|
||||
$self->SUPER::new( @_ );
|
||||
|
||||
if ( $self->{rawdata} ) {
|
||||
my @fields = unpack PACK_TEMPLATE, $self->{rawdata};
|
||||
@{$self}{qw{thread_id exec_time skip_lines tbl_len db_len num_fields sql_ex ldata}} = @fields;
|
||||
}
|
||||
|
||||
return $self;
|
||||
}
|
||||
|
||||
|
||||
### METHOD: stringify()
|
||||
### Return a representation of the event as a human-readable string.
|
||||
sub stringify {
|
||||
my MySQL::BinLog::LoadEvent $self = shift;
|
||||
return join( ':', @{$self}{qw{thread_id exec_time skip_lines tbl_len db_len num_fields sql_ex ldata}} );
|
||||
}
|
||||
|
||||
|
||||
|
||||
#####################################################################
|
||||
### S L A V E E V E N T C L A S S
|
||||
#####################################################################
|
||||
package MySQL::BinLog::SlaveEvent;
|
||||
use strict;
|
||||
|
||||
BEGIN {
|
||||
use base 'MySQL::BinLog::Event';
|
||||
use fields qw{master_pos master_port master_host};
|
||||
|
||||
use constant PACK_TEMPLATE => 'a8va*';
|
||||
}
|
||||
|
||||
|
||||
### (CONSTRUCTOR) METHOD: new( $header_obj, $raw_data )
|
||||
### Create a new SlaveEvent object from the given I<raw_data> and I<header_obj>
|
||||
### (a MySQL::BinLog::Header object).
|
||||
sub new {
|
||||
my MySQL::BinLog::SlaveEvent $self = shift;
|
||||
|
||||
$self = fields::new( $self );
|
||||
$self->SUPER::new( @_ );
|
||||
|
||||
if ( $self->{rawdata} ) {
|
||||
my @fields = unpack PACK_TEMPLATE, $self->{rawdata};
|
||||
@{$self}{qw{master_pos master_port master_host}} = @fields;
|
||||
}
|
||||
|
||||
return $self;
|
||||
}
|
||||
|
||||
|
||||
### METHOD: stringify()
|
||||
### Return a representation of the event as a human-readable string.
|
||||
sub stringify {
|
||||
my MySQL::BinLog::SlaveEvent $self = shift;
|
||||
return join( ':', @{$self}{qw{master_pos master_port master_host}} );
|
||||
}
|
||||
|
||||
|
||||
|
||||
#####################################################################
|
||||
### C R E A T E F I L E E V E N T C L A S S
|
||||
#####################################################################
|
||||
package MySQL::BinLog::CreateFileEvent;
|
||||
use strict;
|
||||
|
||||
BEGIN {
|
||||
use base 'MySQL::BinLog::Event';
|
||||
use fields qw{thread_id exec_time skip_lines tbl_len db_len num_fields sql_ex ldata};
|
||||
|
||||
use constant PACK_TEMPLATE => 'VVVCCVa*';
|
||||
}
|
||||
|
||||
|
||||
### (CONSTRUCTOR) METHOD: new( $header_obj, $raw_data )
|
||||
### Create a new CreateFileEvent object from the given I<raw_data> and I<header_obj>
|
||||
### (a MySQL::BinLog::Header object).
|
||||
sub new {
|
||||
my MySQL::BinLog::CreateFileEvent $self = shift;
|
||||
|
||||
$self = fields::new( $self );
|
||||
$self->SUPER::new( @_ );
|
||||
|
||||
if ( $self->{rawdata} ) {
|
||||
my @fields = unpack PACK_TEMPLATE, $self->{rawdata};
|
||||
@{$self}{qw{thread_id exec_time skip_lines tbl_len db_len num_fields sql_ex ldata}} = @fields;
|
||||
}
|
||||
|
||||
return $self;
|
||||
}
|
||||
|
||||
|
||||
### METHOD: stringify()
|
||||
### Return a representation of the event as a human-readable string.
|
||||
sub stringify {
|
||||
my MySQL::BinLog::CreateFileEvent $self = shift;
|
||||
return join( ':', @{$self}{qw{thread_id exec_time skip_lines tbl_len db_len num_fields sql_ex ldata}} );
|
||||
}
|
||||
|
||||
|
||||
#####################################################################
|
||||
### A P P E N D B L O C K E V E N T C L A S S
|
||||
#####################################################################
|
||||
package MySQL::BinLog::AppendBlockEvent;
|
||||
use strict;
|
||||
|
||||
BEGIN {
|
||||
use base 'MySQL::BinLog::Event';
|
||||
use fields qw{file_id data};
|
||||
|
||||
use constant PACK_TEMPLATE => 'Va*';
|
||||
}
|
||||
|
||||
|
||||
### (CONSTRUCTOR) METHOD: new( $header_obj, $raw_data )
|
||||
### Create a new AppendBlockEvent object from the given I<raw_data> and I<header_obj>
|
||||
### (a MySQL::BinLog::Header object).
|
||||
sub new {
|
||||
my MySQL::BinLog::AppendBlockEvent $self = shift;
|
||||
|
||||
$self = fields::new( $self );
|
||||
$self->SUPER::new( @_ );
|
||||
|
||||
if ( $self->{rawdata} ) {
|
||||
my @fields = unpack PACK_TEMPLATE, $self->{rawdata};
|
||||
@{$self}{qw{file_id data}} = @fields;
|
||||
}
|
||||
|
||||
return $self;
|
||||
}
|
||||
|
||||
|
||||
### METHOD: stringify()
|
||||
### Return a representation of the event as a human-readable string.
|
||||
sub stringify {
|
||||
my MySQL::BinLog::AppendBlockEvent $self = shift;
|
||||
return join( ':', @{$self}{qw{file_id data}} );
|
||||
}
|
||||
|
||||
|
||||
#####################################################################
|
||||
### E X E C L O A D E V E N T C L A S S
|
||||
#####################################################################
|
||||
package MySQL::BinLog::ExecLoadEvent;
|
||||
use strict;
|
||||
|
||||
BEGIN {
|
||||
use base 'MySQL::BinLog::Event';
|
||||
use fields qw{file_id};
|
||||
|
||||
use constant PACK_TEMPLATE => 'V';
|
||||
}
|
||||
|
||||
|
||||
### (CONSTRUCTOR) METHOD: new( $header_obj, $raw_data )
|
||||
### Create a new ExecLoadEvent object from the given I<raw_data> and I<header_obj>
|
||||
### (a MySQL::BinLog::Header object).
|
||||
sub new {
|
||||
my MySQL::BinLog::ExecLoadEvent $self = shift;
|
||||
|
||||
$self = fields::new( $self );
|
||||
$self->SUPER::new( @_ );
|
||||
|
||||
if ( $self->{rawdata} ) {
|
||||
my @fields = unpack PACK_TEMPLATE, $self->{rawdata};
|
||||
@{$self}{qw{file_id}} = @fields;
|
||||
}
|
||||
|
||||
return $self;
|
||||
}
|
||||
|
||||
|
||||
### METHOD: stringify()
|
||||
### Return a representation of the event as a human-readable string.
|
||||
sub stringify {
|
||||
my MySQL::BinLog::ExecLoadEvent $self = shift;
|
||||
return join( ':', @{$self}{qw{file_id}} );
|
||||
}
|
||||
|
||||
|
||||
#####################################################################
|
||||
### D E L E T E F I L E E V E N T C L A S S
|
||||
#####################################################################
|
||||
package MySQL::BinLog::DeleteFileEvent;
|
||||
use strict;
|
||||
|
||||
BEGIN {
|
||||
use base 'MySQL::BinLog::Event';
|
||||
use fields qw{file_id};
|
||||
|
||||
use constant PACK_TEMPLATE => 'V';
|
||||
}
|
||||
|
||||
|
||||
### (CONSTRUCTOR) METHOD: new( $header_obj, $raw_data )
|
||||
### Create a new DeleteFileEvent object from the given I<raw_data> and I<header_obj>
|
||||
### (a MySQL::BinLog::Header object).
|
||||
sub new {
|
||||
my MySQL::BinLog::DeleteFileEvent $self = shift;
|
||||
|
||||
$self = fields::new( $self );
|
||||
$self->SUPER::new( @_ );
|
||||
|
||||
if ( $self->{rawdata} ) {
|
||||
my @fields = unpack PACK_TEMPLATE, $self->{rawdata};
|
||||
@{$self}{qw{file_id}} = @fields;
|
||||
}
|
||||
|
||||
return $self;
|
||||
}
|
||||
|
||||
|
||||
### METHOD: stringify()
|
||||
### Return a representation of the event as a human-readable string.
|
||||
sub stringify {
|
||||
my MySQL::BinLog::DeleteFileEvent $self = shift;
|
||||
return join( ':', @{$self}{qw{file_id}} );
|
||||
}
|
||||
|
||||
|
||||
#####################################################################
|
||||
### N E W L O A D E V E N T C L A S S
|
||||
#####################################################################
|
||||
package MySQL::BinLog::NewLoadEvent;
|
||||
use strict;
|
||||
|
||||
BEGIN {
|
||||
use base 'MySQL::BinLog::Event';
|
||||
}
|
||||
|
||||
|
||||
### (CONSTRUCTOR) METHOD: new( $header_obj, $raw_data )
|
||||
### Create a new NewLoadEvent object from the given I<raw_data> and I<header_obj>
|
||||
### (a MySQL::BinLog::Header object).
|
||||
sub new {
|
||||
my MySQL::BinLog::NewLoadEvent $self = shift;
|
||||
|
||||
$self = fields::new( $self );
|
||||
$self->SUPER::new( @_ );
|
||||
|
||||
# I don't think these have any data (?) -MG
|
||||
|
||||
return $self;
|
||||
}
|
||||
|
||||
|
||||
### METHOD: stringify()
|
||||
### Return a representation of the event as a human-readable string.
|
||||
sub stringify {
|
||||
my MySQL::BinLog::NewLoadEvent $self = shift;
|
||||
return '(New_load)';
|
||||
}
|
||||
|
||||
|
||||
|
||||
#####################################################################
|
||||
### R A N D E V E N T C L A S S
|
||||
#####################################################################
|
||||
package MySQL::BinLog::RandEvent;
|
||||
use strict;
|
||||
|
||||
BEGIN {
|
||||
use base 'MySQL::BinLog::Event';
|
||||
use fields qw{seed1 seed2};
|
||||
|
||||
use constant PACK_TEMPLATE => 'a8a8';
|
||||
}
|
||||
|
||||
|
||||
### (CONSTRUCTOR) METHOD: new( $header_obj, $raw_data )
|
||||
### Create a new RandEvent object from the given I<raw_data> and I<header_obj>
|
||||
### (a MySQL::BinLog::Header object).
|
||||
sub new {
|
||||
my MySQL::BinLog::RandEvent $self = shift;
|
||||
|
||||
$self = fields::new( $self );
|
||||
$self->SUPER::new( @_ );
|
||||
|
||||
if ( $self->{rawdata} ) {
|
||||
my @fields = unpack PACK_TEMPLATE, $self->{rawdata};
|
||||
@{$self}{qw{seed1 seed2}} = @fields;
|
||||
}
|
||||
|
||||
return $self;
|
||||
}
|
||||
|
||||
|
||||
### METHOD: stringify()
|
||||
### Return a representation of the event as a human-readable string.
|
||||
sub stringify {
|
||||
my MySQL::BinLog::RandEvent $self = shift;
|
||||
return join( ':', @{$self}{qw{seed1 seed2}} );
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
#####################################################################
|
||||
### U S E R V A R E V E N T C L A S S
|
||||
#####################################################################
|
||||
|
||||
# USER_VAR_EVENT
|
||||
# o 4 bytes: the size of the name of the user variable.
|
||||
# o variable-sized part: A concatenation. First is the name of the
|
||||
# user variable. Second is one byte, non-zero if the content of the
|
||||
# variable is the SQL value NULL, ASCII 0 otherwise. If this bytes was
|
||||
# ASCII 0, then the following parts exist in the event. Third is one
|
||||
# byte, the type of the user variable, which corresponds to elements of
|
||||
# enum Item_result defined in `include/mysql_com.h'. Fourth is 4 bytes,
|
||||
# the number of the character set of the user variable (needed for a
|
||||
# string variable). Fifth is 4 bytes, the size of the user variable's
|
||||
# value (corresponds to member val_len of class Item_string). Sixth is
|
||||
# variable-sized: for a string variable it is the string, for a float or
|
||||
# integer variable it is its value in 8 bytes.
|
||||
|
||||
package MySQL::BinLog::UserVarEvent;
|
||||
use strict;
|
||||
|
||||
BEGIN {
|
||||
use base 'MySQL::BinLog::Event';
|
||||
use fields qw{varname value};
|
||||
|
||||
use constant PACK_TEMPLATE => 'V/aca*';
|
||||
}
|
||||
|
||||
|
||||
### (CONSTRUCTOR) METHOD: new( $header_obj, $raw_data )
|
||||
### Create a new UserVarEvent object from the given I<raw_data> and I<header_obj>
|
||||
### (a MySQL::BinLog::Header object).
|
||||
sub new {
|
||||
my MySQL::BinLog::UserVarEvent $self = shift;
|
||||
|
||||
$self = fields::new( $self );
|
||||
$self->SUPER::new( @_ );
|
||||
|
||||
if ( $self->{rawdata} ) {
|
||||
my @fields = unpack PACK_TEMPLATE, $self->{rawdata};
|
||||
|
||||
# If the the second field is null, the value is undef. Otherwise,
|
||||
# unpack the value
|
||||
if ( $fields[1] eq "\0" ) {
|
||||
$fields[2] = undef;
|
||||
} else {
|
||||
my ( $type, $charset, $len, $data ) = unpack 'cVVa*', $fields[2];
|
||||
$fields[2] = {
|
||||
type => $type,
|
||||
charset => $charset,
|
||||
len => $len,
|
||||
data => $data,
|
||||
};
|
||||
}
|
||||
|
||||
@{$self}{qw{varname value}} = @fields[0, 2];
|
||||
}
|
||||
|
||||
return $self;
|
||||
}
|
||||
|
||||
|
||||
### METHOD: stringify()
|
||||
### Return a representation of the event as a human-readable string.
|
||||
sub stringify {
|
||||
my MySQL::BinLog::UserVarEvent $self = shift;
|
||||
# :FIXME: This will obviously have to take into account the fact that the
|
||||
# value field is a complex datatype or undef.
|
||||
return join( ':', @{$self}{qw{varname value}} );
|
||||
}
|
||||
|
||||
|
||||
|
||||
1;
|
||||
|
||||
|
||||
158
wcmtools/lib/MySQL-BinLog/lib/Mysql/BinLog/Header.pm
Executable file
158
wcmtools/lib/MySQL-BinLog/lib/Mysql/BinLog/Header.pm
Executable file
@@ -0,0 +1,158 @@
|
||||
#!/usr/bin/perl
|
||||
##############################################################################
|
||||
|
||||
=head1 NAME
|
||||
|
||||
MySQL::BinLog::Header - Per-event MySQL binlog header class
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
use MySQL::BinLog::Header qw();
|
||||
use MySQL::Constants qw(LOG_EVENT_HEADER_LEN);
|
||||
|
||||
my $hdata = substr( $data, 0, LOG_EVENT_HEADER_LEN );
|
||||
my $header = new MySQL::BinLog::Header $hdata;
|
||||
|
||||
$header->event_type;
|
||||
$header->server_id;
|
||||
$header->event_len;
|
||||
$header->log_pos;
|
||||
$header->flags;
|
||||
|
||||
=head1 REQUIRES
|
||||
|
||||
I<Token requires line>
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
None yet.
|
||||
|
||||
=head1 AUTHOR
|
||||
|
||||
Michael Granger <ged@FaerieMUD.org>
|
||||
|
||||
Copyright (c) 2004 Danga Interactive. All rights reserved.
|
||||
|
||||
This module is free software. You may use, modify, and/or redistribute this
|
||||
software under the terms of the Perl Artistic License. (See
|
||||
http://language.perl.com/misc/Artistic.html)
|
||||
|
||||
THIS SOFTWARE IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR IMPLIED WARRANTIES,
|
||||
INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND
|
||||
FITNESS FOR A PARTICULAR PURPOSE.
|
||||
|
||||
=cut
|
||||
|
||||
##############################################################################
|
||||
package MySQL::BinLog::Header;
|
||||
use strict;
|
||||
use warnings qw{all};
|
||||
|
||||
|
||||
###############################################################################
|
||||
### I N I T I A L I Z A T I O N
|
||||
###############################################################################
|
||||
BEGIN {
|
||||
# Versioning stuff and custom includes
|
||||
use vars qw{$VERSION $RCSID};
|
||||
$VERSION = do { my @r = (q$Revision: 1.2 $ =~ /\d+/g); sprintf "%d."."%02d" x $#r, @r };
|
||||
$RCSID = q$Id: Header.pm,v 1.2 2004/11/17 21:58:40 marksmith Exp $;
|
||||
|
||||
# Data format template and fields definition
|
||||
use constant PACK_TEMPLATE => 'VcVVVv';
|
||||
use fields qw{timestamp event_type server_id event_len log_pos flags};
|
||||
use base qw{fields};
|
||||
|
||||
# MySQL modules
|
||||
use MySQL::BinLog::Constants qw{:all};
|
||||
|
||||
# Other modules
|
||||
use Data::Dumper;
|
||||
use Scalar::Util qw{blessed};
|
||||
}
|
||||
|
||||
|
||||
our $AUTOLOAD;
|
||||
|
||||
|
||||
### (CONSTRUCTOR) new( $data )
|
||||
### Construct a new MySQL::BinLog::::Header object from the given header data.
|
||||
sub new {
|
||||
my MySQL::BinLog::Header $self = shift;
|
||||
my $data = shift || '';
|
||||
|
||||
debugMsg( "Creating a new ", __PACKAGE__, " object for header: ",
|
||||
hexdump($data), ".\n" );
|
||||
die "Invalid header" unless length $data == MySQL::LOG_EVENT_HEADER_LEN;
|
||||
$self = fields::new( $self ) unless ref $self;
|
||||
|
||||
# Extract the fields or provide defaults
|
||||
my @fields = ();
|
||||
if ( $data ) {
|
||||
@fields = unpack PACK_TEMPLATE, $data;
|
||||
debugMsg( "Unpacked fields are: ", Data::Dumper->Dumpxs([\@fields], [qw{fields}]), "\n" );
|
||||
} else {
|
||||
@fields = ( time, MySQL::UNKNOWN_EVENT, 0, 0, 0, 0 );
|
||||
}
|
||||
|
||||
@{$self}{qw{timestamp event_type server_id event_len log_pos flags}} = @fields;
|
||||
|
||||
debugMsg( "Returning header: ", Data::Dumper->Dumpxs([$self]), ".\n" );
|
||||
return $self;
|
||||
}
|
||||
|
||||
|
||||
# Accessor-generator
|
||||
|
||||
### (PROXY) METHOD: AUTOLOAD( @args )
|
||||
### Proxy method to build (non-translucent) object accessors.
|
||||
sub AUTOLOAD {
|
||||
my MySQL::BinLog::Header $self = shift;
|
||||
( my $name = $AUTOLOAD ) =~ s{.*::}{};
|
||||
|
||||
### Build an accessor for extant attributes
|
||||
if ( blessed $self && exists $self->{$name} ) {
|
||||
|
||||
### Define an accessor for this attribute
|
||||
my $method = sub {
|
||||
my MySQL::BinLog::Header $closureSelf = shift;
|
||||
|
||||
$closureSelf->{$name} = shift if @_;
|
||||
return $closureSelf->{$name};
|
||||
};
|
||||
|
||||
### Install the new method in the symbol table
|
||||
NO_STRICT_REFS: {
|
||||
no strict 'refs';
|
||||
*{$AUTOLOAD} = $method;
|
||||
}
|
||||
|
||||
### Now jump to the new method after sticking the self-ref back onto the
|
||||
### stack
|
||||
unshift @_, $self;
|
||||
goto &$AUTOLOAD;
|
||||
}
|
||||
|
||||
### Try to delegate to our parent's version of the method
|
||||
my $parentMethod = "SUPER::$name";
|
||||
return $self->$parentMethod( @_ );
|
||||
}
|
||||
|
||||
|
||||
|
||||
### Utility functions
|
||||
|
||||
#sub debugMsg { print STDERR @_ }
|
||||
sub debugMsg {}
|
||||
sub hexdump { return join( ' ', map {sprintf '%02x', ord($_)} split('', $_[0])) }
|
||||
|
||||
|
||||
|
||||
### Destructors
|
||||
DESTROY {}
|
||||
END {}
|
||||
|
||||
|
||||
1;
|
||||
|
||||
|
||||
207
wcmtools/lib/MySQL-BinLog/lib/Mysql/BinLog/Net.pm
Executable file
207
wcmtools/lib/MySQL-BinLog/lib/Mysql/BinLog/Net.pm
Executable file
@@ -0,0 +1,207 @@
|
||||
#!/usr/bin/perl
|
||||
##############################################################################
|
||||
|
||||
=head1 NAME
|
||||
|
||||
MySQL::BinLog::Net - Read binlog events from a master server over the network.
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
use MySQL::BinLog qw{};
|
||||
|
||||
my %connect_params = (
|
||||
hostname => 'db.example.com',
|
||||
database => 'sales',
|
||||
user => 'salesapp',
|
||||
password => 'bloo$shewz',
|
||||
port => 3306,
|
||||
);
|
||||
my $log = MySQL::BinLog->connect( %connect_params )
|
||||
or die "Couldn't connect.";
|
||||
|
||||
|
||||
=head1 REQUIRES
|
||||
|
||||
I<Net::MySQL>, I<Carp>
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
None yet.
|
||||
|
||||
=head1 AUTHOR
|
||||
|
||||
Michael Granger <ged@Danga.com>
|
||||
|
||||
Copyright (c) 2004 Danga Interactive. All rights reserved.
|
||||
|
||||
This module is free software. You may use, modify, and/or redistribute this
|
||||
software under the terms of the Perl Artistic License. (See
|
||||
http://language.perl.com/misc/Artistic.html)
|
||||
|
||||
THIS SOFTWARE IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR IMPLIED WARRANTIES,
|
||||
INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND
|
||||
FITNESS FOR A PARTICULAR PURPOSE.
|
||||
|
||||
=cut
|
||||
|
||||
##############################################################################
|
||||
package MySQL::BinLog::Net;
|
||||
use strict;
|
||||
use warnings qw{all};
|
||||
|
||||
|
||||
###############################################################################
|
||||
### I N I T I A L I Z A T I O N
|
||||
###############################################################################
|
||||
BEGIN {
|
||||
# Versioning stuff
|
||||
use vars qw{$VERSION $RCSID};
|
||||
$VERSION = do { my @r = (q$Revision: 1.2 $ =~ /\d+/g); sprintf "%d."."%02d" x $#r, @r };
|
||||
$RCSID = q$Id: Net.pm,v 1.2 2004/11/17 21:58:40 marksmith Exp $;
|
||||
|
||||
use Net::MySQL qw{};
|
||||
use Carp qw{carp croak confess};
|
||||
use base qw{Net::MySQL};
|
||||
|
||||
use constant CHUNKSIZE => 16;
|
||||
use constant PKTHEADER_LEN => (3 + 1 + 1);
|
||||
}
|
||||
|
||||
|
||||
### METHOD: start_binlog( $slave_id[, $logname, $position, $flags] )
|
||||
### Contact the remote server and send the command to start reading binlog
|
||||
### events from the given I<logname>, I<position>, I<slave_id>, and optional
|
||||
### I<flags>.
|
||||
sub start_binlog {
|
||||
my $self = shift;
|
||||
my ( $slave_server_id, $logname, $pos, $flags ) = @_;
|
||||
|
||||
# New log: no logname and position = 4
|
||||
$logname ||= '';
|
||||
$pos = 4 unless defined $pos && $pos > 4;
|
||||
|
||||
my (
|
||||
$len,
|
||||
$cmd,
|
||||
$packet,
|
||||
$mysql,
|
||||
);
|
||||
|
||||
# Build the BINLOG_DUMP packet
|
||||
$cmd = Net::MySQL::COMMAND_BINLOG_DUMP;
|
||||
$flags ||= 0;
|
||||
$len = 1 + 4 + 2 + 4 + length( $logname );
|
||||
$packet = pack( 'VaVvVa*', $len, $cmd, $pos, $flags, $slave_server_id, $logname );
|
||||
$mysql = $self->{socket};
|
||||
|
||||
# Send it
|
||||
$mysql->send( $packet, 0 );
|
||||
$self->_dump_packet( $packet ) if $self->debug;
|
||||
|
||||
# Receive the response
|
||||
my $result = $self->read_packet;
|
||||
|
||||
# FIXME I broke error checking by switching to read_packet instead of using
|
||||
# recv... but recv reads a full buffer's worth, which just gets tossed and
|
||||
# causes subsequent read_packe calls to start at arbitrary positions and fail.
|
||||
# real solution is to make read_packet set error flags and then have callers
|
||||
# check them. eventually. oh, FYI, you have to reconstitute the packet before
|
||||
# passing it on to _is_error and _set_error_by_packet, as those are in Net::MySQL
|
||||
# and expect the whole packet, not just the payload that read_packet returns.
|
||||
#return $self->_set_error_by_packet( $result ) if $self->_is_error( $result );
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
### METHOD: read_packet( )
|
||||
### Read a single packet from the connection and return its payload as a scalar.
|
||||
sub read_packet {
|
||||
my $self = shift;
|
||||
|
||||
my $pkt_header = $self->readbytes( PKTHEADER_LEN );
|
||||
my $length = unpack( 'V', substr($pkt_header, 0, 3, '') . "\0" ) - 1;
|
||||
my ( $pktno, $cmd ) = unpack( 'CC', $pkt_header );
|
||||
|
||||
my $pkt = $self->readbytes( $length );
|
||||
$self->_dump_packet( $pkt ) if $self->debug;
|
||||
|
||||
return $pkt;
|
||||
}
|
||||
|
||||
|
||||
### FUNCTION: readbytes( $len )
|
||||
### Read and return I<len> bytes from the connection.
|
||||
sub readbytes {
|
||||
my ( $self, $len ) = @_;
|
||||
my ( $buf, $rval, $bytes ) = ('', '', 0);
|
||||
|
||||
my $sock = $self->{socket};
|
||||
|
||||
until ( length $rval == $len ) {
|
||||
$bytes = $sock->read( $buf, $len - length $rval );
|
||||
if ( !defined $bytes ) {
|
||||
if ( $!{EAGAIN} ) { next }
|
||||
die "Read error: $!";
|
||||
} elsif ( !$bytes && $sock->eof ) {
|
||||
die "EOF before reading $len bytes.\n";
|
||||
}
|
||||
$rval .= $buf;
|
||||
}
|
||||
|
||||
return $rval;
|
||||
}
|
||||
|
||||
|
||||
|
||||
### Utility/debugging methods (overridden).
|
||||
|
||||
sub hexdump { join ' ', map {sprintf "%02x", ord $_} grep {defined} @_ }
|
||||
sub ascdump { join '', map {m/[\d \w\._]/ ? $_ : '.'} grep {defined} @_ }
|
||||
|
||||
sub _dump_packet {
|
||||
my $self = shift;
|
||||
my $packet = shift;
|
||||
|
||||
my (
|
||||
$method_name,
|
||||
@bytes,
|
||||
@chunk,
|
||||
$half,
|
||||
$width,
|
||||
$count,
|
||||
);
|
||||
|
||||
$method_name = (caller(1))[3];
|
||||
print "$method_name:\n";
|
||||
|
||||
@bytes = split //, $packet;
|
||||
$count = 0;
|
||||
while ( @bytes ) {
|
||||
@chunk = grep { defined } splice( @bytes, 0, CHUNKSIZE );
|
||||
$half = CHUNKSIZE / 2;
|
||||
$width = $half * 3;
|
||||
|
||||
printf( " 0x%04x: %-${width}s %-${width}s |%-${half}s %-${half}s|\n",
|
||||
$count,
|
||||
hexdump( @chunk[0..($half-1)] ),
|
||||
hexdump( @chunk[$half..$#chunk] ),
|
||||
ascdump( @chunk[0..($half-1)] ),
|
||||
ascdump( @chunk[$half..$#chunk] ) );
|
||||
|
||||
$count += CHUNKSIZE;
|
||||
}
|
||||
|
||||
print "--\n";
|
||||
}
|
||||
|
||||
|
||||
|
||||
### Destructors
|
||||
DESTROY {}
|
||||
END {}
|
||||
|
||||
|
||||
1;
|
||||
|
||||
|
||||
162
wcmtools/lib/S2/Color.pm
Executable file
162
wcmtools/lib/S2/Color.pm
Executable file
@@ -0,0 +1,162 @@
|
||||
#!/usr/bin/perl
|
||||
#
|
||||
# This is a helper package, useful for creating color lightening/darkening
|
||||
# functions in core layers.
|
||||
#
|
||||
|
||||
package S2::Color;
|
||||
|
||||
# rgb to hsv
|
||||
# r, g, b = [0, 255]
|
||||
# h, s, v = [0, 1), [0, 1], [0, 1]
|
||||
sub rgb_to_hsv
|
||||
{
|
||||
my ($r, $g, $b) = map { $_ / 255 } @_;
|
||||
my ($h, $s, $v);
|
||||
|
||||
my ($max, $min) = ($r, $r);
|
||||
foreach ($g, $b) {
|
||||
$max = $_ if $_ > $max;
|
||||
$min = $_ if $_ < $min;
|
||||
}
|
||||
return (0, 0, 0) if $max == 0;
|
||||
|
||||
$v = $max;
|
||||
|
||||
my $delta = $max - $min;
|
||||
|
||||
$s = $delta / $max;
|
||||
return (0, $s, $v) unless $delta;
|
||||
|
||||
if ($r == $max) {
|
||||
$h = ($g - $b) / $delta;
|
||||
} elsif ($g == $max) {
|
||||
$h = 2 + ($b - $r) / $delta;
|
||||
} else {
|
||||
$h = 4 + ($r - $g) / $delta;
|
||||
}
|
||||
|
||||
$h = ($h * 60) % 360 / 360;
|
||||
|
||||
return ($h, $s, $v);
|
||||
}
|
||||
|
||||
# hsv to rgb
|
||||
# h, s, v = [0, 1), [0, 1], [0, 1]
|
||||
# r, g, b = [0, 255], [0, 255], [0, 255]
|
||||
sub hsv_to_rgb
|
||||
{
|
||||
my ($H, $S, $V) = @_;
|
||||
|
||||
if ($S == 0) {
|
||||
$V *= 255;
|
||||
return ($V, $V, $V);
|
||||
}
|
||||
|
||||
$H *= 6;
|
||||
my $I = POSIX::floor($H);
|
||||
|
||||
my $F = $H - $I;
|
||||
my $P = $V * (1 - $S);
|
||||
my $Q = $V * (1 - $S * $F);
|
||||
my $T = $V * (1 - $S * (1 - $F));
|
||||
|
||||
foreach ($V, $T, $P, $Q) {
|
||||
$_ = int($_ * 255 + 0.5);
|
||||
}
|
||||
|
||||
return ($V, $T, $P) if $I == 0;
|
||||
return ($Q, $V, $P) if $I == 1;
|
||||
return ($P, $V, $T) if $I == 2;
|
||||
return ($P, $Q, $V) if $I == 3;
|
||||
return ($T, $P, $V) if $I == 4;
|
||||
|
||||
return ($V, $P, $Q);
|
||||
}
|
||||
|
||||
# rgb to hsv
|
||||
# r, g, b = [0, 255], [0, 255], [0, 255]
|
||||
# returns: (h, s, l) = [0, 1), [0, 1], [0, 1]
|
||||
sub rgb_to_hsl
|
||||
{
|
||||
# convert rgb to 0-1
|
||||
my ($R, $G, $B) = map { $_ / 255 } @_;
|
||||
|
||||
# get min/max of {r, g, b}
|
||||
my ($max, $min) = ($R, $R);
|
||||
foreach ($G, $B) {
|
||||
$max = $_ if $_ > $max;
|
||||
$min = $_ if $_ < $min;
|
||||
}
|
||||
|
||||
# is gray?
|
||||
my $delta = $max - $min;
|
||||
if ($delta == 0) {
|
||||
return (0, 0, $max);
|
||||
}
|
||||
|
||||
my ($H, $S);
|
||||
my $L = ($max + $min) / 2;
|
||||
|
||||
if ($L < 0.5) {
|
||||
$S = $delta / ($max + $min);
|
||||
} else {
|
||||
$S = $delta / (2.0 - $max - $min);
|
||||
}
|
||||
|
||||
if ($R == $max) {
|
||||
$H = ($G - $B) / $delta;
|
||||
} elsif ($G == $max) {
|
||||
$H = 2 + ($B - $R) / $delta;
|
||||
} elsif ($B == $max) {
|
||||
$H = 4 + ($R - $G) / $delta;
|
||||
}
|
||||
|
||||
$H *= 60;
|
||||
$H += 360.0 if $H < 0.0;
|
||||
$H -= 360.0 if $H >= 360.0;
|
||||
$H /= 360.0;
|
||||
|
||||
return ($H, $S, $L);
|
||||
|
||||
}
|
||||
|
||||
# h, s, l = [0,1), [0,1], [0,1]
|
||||
# returns: rgb: [0,255], [0,255], [0,255]
|
||||
sub hsl_to_rgb {
|
||||
my ($H, $S, $L) = @_;
|
||||
|
||||
# gray.
|
||||
if ($S < 0.0000000000001) {
|
||||
my $gv = int(255 * $L + 0.5);
|
||||
return ($gv, $gv, $gv);
|
||||
}
|
||||
|
||||
my ($t1, $t2);
|
||||
if ($L < 0.5) {
|
||||
$t2 = $L * (1.0 + $S);
|
||||
} else {
|
||||
$t2 = $L + $S - $L * $S;
|
||||
}
|
||||
$t1 = 2.0 * $L - $t2;
|
||||
|
||||
my $fromhue = sub {
|
||||
my $hue = shift;
|
||||
if ($hue < 0) { $hue += 1.0; }
|
||||
if ($hue > 1) { $hue -= 1.0; }
|
||||
|
||||
if (6.0 * $hue < 1) {
|
||||
return $t1 + ($t2 - $t1) * $hue * 6.0;
|
||||
} elsif (2.0 * $hue < 1) {
|
||||
return $t2;
|
||||
} elsif (3.0 * $hue < 2.0) {
|
||||
return ($t1 + ($t2 - $t1)*((2.0/3.0)-$hue)*6.0);
|
||||
} else {
|
||||
return $t1;
|
||||
}
|
||||
};
|
||||
|
||||
return map { int(255 * $fromhue->($_) + 0.5) } ($H + 1.0/3.0, $H, $H - 1.0/3.0);
|
||||
}
|
||||
|
||||
1;
|
||||
135
wcmtools/lib/S2/EXIF.pm
Executable file
135
wcmtools/lib/S2/EXIF.pm
Executable file
@@ -0,0 +1,135 @@
|
||||
#!/usr/bin/perl
|
||||
#
|
||||
# This is a helper package, contains info about EXIF tag categories and how to print them
|
||||
#
|
||||
|
||||
package S2::EXIF;
|
||||
use strict;
|
||||
use vars qw(@TAG_CAT %TAG_CAT);
|
||||
|
||||
# rough categories which can optionally be used to display tags
|
||||
# with coherent ordering
|
||||
|
||||
@TAG_CAT =
|
||||
(
|
||||
[ media => {
|
||||
name => 'Media Information',
|
||||
tags => [ qw (
|
||||
PixelXDimension
|
||||
PixelYDimension
|
||||
ImageWidth
|
||||
ImageLength
|
||||
Compression
|
||||
CompressedBitsPerPixel
|
||||
)
|
||||
],
|
||||
},
|
||||
],
|
||||
|
||||
[ image => {
|
||||
name => 'Image Information',
|
||||
tags => [ qw (
|
||||
DateTime
|
||||
DateTimeOriginal
|
||||
ImageDescription
|
||||
UserComment
|
||||
Make
|
||||
Software
|
||||
Artist
|
||||
Copyright
|
||||
ExifVersion
|
||||
FlashpixVersion
|
||||
)
|
||||
],
|
||||
},
|
||||
],
|
||||
|
||||
[ exposure => {
|
||||
name => 'Exposure Settings',
|
||||
tags => [ qw(
|
||||
Orientation
|
||||
Flash
|
||||
FlashEnergy
|
||||
LightSource
|
||||
ExposureTime
|
||||
ExposureProgram
|
||||
ExposureMode
|
||||
DigitalZoomRatio
|
||||
ShutterSpeedValue
|
||||
ApertureValue
|
||||
MeteringMode
|
||||
WhiteBalance
|
||||
Contrast
|
||||
Saturation
|
||||
Sharpness
|
||||
SensingMethod
|
||||
FocalLength
|
||||
ISOSpeedRatings
|
||||
FNumber
|
||||
)
|
||||
],
|
||||
},
|
||||
],
|
||||
|
||||
[ gps => {
|
||||
name => 'GPS Information',
|
||||
tags => [ qw(
|
||||
GPSLatitudeRef
|
||||
GPSLatitude
|
||||
GPSLongitudeRef
|
||||
GPSLongitude
|
||||
GPSAltitudeRef
|
||||
GPSAltitude
|
||||
GPSTimeStamp
|
||||
GPSDateStamp
|
||||
GPSDOP
|
||||
GPSImgDirectionRef
|
||||
GPSImgDirection
|
||||
)
|
||||
],
|
||||
},
|
||||
],
|
||||
);
|
||||
|
||||
# make mapping into array
|
||||
%TAG_CAT = map { $_->[0] => $_->[1] } @TAG_CAT;
|
||||
|
||||
# return all tags in all categories
|
||||
sub get_tag_info {
|
||||
|
||||
my @ret = ();
|
||||
foreach my $currcat (@S2::EXIF::TAG_CAT) {
|
||||
push @ret, @{$currcat->[1]->{tags}};
|
||||
}
|
||||
|
||||
return @ret;
|
||||
}
|
||||
|
||||
# return hashref of category keys => names
|
||||
sub get_cat_info {
|
||||
return { map { $_->[0] => $_->[1]->{name} } @S2::EXIF::TAG_CAT };
|
||||
}
|
||||
|
||||
# return ordered array of category keys
|
||||
sub get_cat_order {
|
||||
return map { $_->[0] } @S2::EXIF::TAG_CAT;
|
||||
}
|
||||
|
||||
# return the name of a single category
|
||||
sub get_cat_name {
|
||||
return () unless $TAG_CAT{$_[0]};
|
||||
return $TAG_CAT{$_[0]}->{name};
|
||||
}
|
||||
|
||||
# return the tags in a given cateogry
|
||||
sub get_cat_tags {
|
||||
return () unless $TAG_CAT{$_[0]};
|
||||
return @{$TAG_CAT{$_[0]}->{tags}};
|
||||
}
|
||||
|
||||
# return all tags for all categories
|
||||
sub get_all_tags {
|
||||
return map { @{$TAG_CAT{$_}->{tags}} } keys %TAG_CAT;
|
||||
}
|
||||
|
||||
1;
|
||||
170
wcmtools/lib/SafeAgent.pm
Executable file
170
wcmtools/lib/SafeAgent.pm
Executable file
@@ -0,0 +1,170 @@
|
||||
#!/usr/bin/perl
|
||||
#
|
||||
# SafeAgent: fetch HTTP resources with paranoia
|
||||
#
|
||||
# =head1 SYNOPSIS
|
||||
#
|
||||
# my $sua = new SafeAgent;
|
||||
#
|
||||
# $sua->fetch( $url, $max_amount[, $timeout[, $callback]])
|
||||
#
|
||||
#
|
||||
|
||||
package SafeAgent;
|
||||
use strict;
|
||||
use constant MB => 1024*1024;
|
||||
use Socket;
|
||||
|
||||
use LWP::UserAgent;
|
||||
use Carp qw{croak confess};
|
||||
use URI ();
|
||||
|
||||
sub new {
|
||||
my $proto = shift or croak "Not a function";
|
||||
my $class = ref $proto || $proto;
|
||||
|
||||
my $self = bless {
|
||||
realagent => new LWP::UserAgent (),
|
||||
timeout => 10,
|
||||
maxamount => 1*MB,
|
||||
last_response => undef,
|
||||
last_url => undef,
|
||||
}, $class;
|
||||
|
||||
return $self;
|
||||
}
|
||||
|
||||
sub err {
|
||||
my $self = shift;
|
||||
$self->{lasterr} = shift if @_;
|
||||
return $self->{lasterr};
|
||||
}
|
||||
|
||||
sub last_response {
|
||||
my $self = shift;
|
||||
return $self->{last_response};
|
||||
}
|
||||
|
||||
|
||||
sub last_url {
|
||||
my $self = shift;
|
||||
return $self->{last_url};
|
||||
}
|
||||
|
||||
|
||||
sub ret_err {
|
||||
my $self = shift;
|
||||
$self->{lasterr} = shift;
|
||||
return undef;
|
||||
}
|
||||
|
||||
|
||||
sub check_url {
|
||||
my $self = shift;
|
||||
my $url = shift;
|
||||
|
||||
return $self->ret_err("BAD_SCHEME") unless $url =~ m!^https?://!;
|
||||
my $urio = URI->new($url);
|
||||
my $host = $urio->host;
|
||||
|
||||
my $ip;
|
||||
if ($host =~ /^\d+\.\d+\.\d+\.\d+$/) {
|
||||
$ip = $host;
|
||||
} else {
|
||||
my ($name,$aliases,$addrtype,$length,@addrs) = gethostbyname($host);
|
||||
return $self->ret_err("BAD_HOSTNAME") unless @addrs;
|
||||
$ip = inet_ntoa($addrs[0]);
|
||||
}
|
||||
|
||||
# don't connect to private or reserved addresses
|
||||
return $self->ret_err("BAD_IP") if
|
||||
! $ip ||
|
||||
$ip =~ /^(?:10\.|127\.|192\.168\.)/ ||
|
||||
($ip =~ /^172\.(\d+)/ && ($1 >= 16 && $1 <= 31)) ||
|
||||
($ip =~ /^2(\d+)/ && ($1 >= 24 && $1 <= 54));
|
||||
|
||||
return $urio;
|
||||
}
|
||||
|
||||
|
||||
sub fetch {
|
||||
my ($self, $url, $max_amount, $timeout, $callback) = @_;
|
||||
$timeout ||= $self->{timeout} || 10,
|
||||
$max_amount ||= $self->{maxamount} || 1*MB;
|
||||
|
||||
my $urio = $self->check_url($url) or
|
||||
return undef;
|
||||
$self->{last_url} = $url;
|
||||
my $req = HTTP::Request->new('GET' => $url);
|
||||
|
||||
my $hops = 0;
|
||||
my $ret;
|
||||
my $no_callback = ! $callback;
|
||||
$callback ||= sub {
|
||||
my($data, $response, $protocol) = @_;
|
||||
$ret .= $data;
|
||||
};
|
||||
|
||||
HOP:
|
||||
while (1) {
|
||||
# print "Hop $hops.\n";
|
||||
$ret = "";
|
||||
|
||||
my $size = 0;
|
||||
my $toobig = 0;
|
||||
my $ua = $self->{realagent};
|
||||
my $res;
|
||||
my $hard_timeout = 0;
|
||||
|
||||
ALARM: eval {
|
||||
local $SIG{ALRM} = sub { $hard_timeout = 1; die "Hard timeout." };
|
||||
alarm( $self->{timeout} ) if $self->{timeout};
|
||||
$res = $ua->simple_request($req, sub {
|
||||
my($data, $response, $protocol) = @_;
|
||||
$size += length($data);
|
||||
$callback->($data, $response, $protocol);
|
||||
$toobig = 1 && die "TOOBIG" if $size > $max_amount;
|
||||
}, 10_000);
|
||||
alarm( 0 );
|
||||
};
|
||||
return $self->ret_err( "Hard timeout." ) if $hard_timeout;
|
||||
$self->{last_response} = $res;
|
||||
|
||||
# If it's an error response, return failure unless it aborted due
|
||||
# to an overlarge document, in which case just return the chunk we
|
||||
# have so far. Also set the error value if it did overflow.
|
||||
if ( my $err = $res->headers->header('X-Died') ) {
|
||||
$self->err($err);
|
||||
return undef unless $err =~ m{TOOBIG};
|
||||
last HOP;
|
||||
} elsif ( $res->is_error ) {
|
||||
return $self->ret_err("HTTP_Error");
|
||||
} elsif ( $res->is_redirect ) {
|
||||
# follow redirect
|
||||
my $newurl = $res->headers->header('Location');
|
||||
return $self->ret_err("HOPCOUNT") if ++$hops > 1;
|
||||
# print "Redirect to '$newurl'\n";
|
||||
$urio = $self->check_url($newurl) or return undef;
|
||||
$self->{last_url} = $newurl;
|
||||
$req = HTTP::Request->new('GET' => $urio);
|
||||
} else {
|
||||
# print "Success.\n";
|
||||
$self->err( undef );
|
||||
last HOP;
|
||||
}
|
||||
} # end while
|
||||
|
||||
return $no_callback ? $ret : 1;
|
||||
}
|
||||
|
||||
sub agent {
|
||||
my $self = shift;
|
||||
my $old = $self->{realagent}->agent;
|
||||
if (@_) {
|
||||
my $agent = shift;
|
||||
$self->{realagent}->agent($agent);
|
||||
}
|
||||
return $old;
|
||||
}
|
||||
|
||||
1;
|
||||
2
wcmtools/memcached/AUTHORS
Executable file
2
wcmtools/memcached/AUTHORS
Executable file
@@ -0,0 +1,2 @@
|
||||
Anatoly Vorobey <mellon@pobox.com>
|
||||
Brad Fitzpatrick <brad@danga.com>
|
||||
37
wcmtools/memcached/BUILD
Executable file
37
wcmtools/memcached/BUILD
Executable file
@@ -0,0 +1,37 @@
|
||||
Ideally, you want to make a static binary, otherwise the dynamic
|
||||
linker pollutes your address space with shared libs right in the
|
||||
middle. (NOTE: actually, this shouldn't matter so much anymore, now
|
||||
that we only allocate huge, fixed-size slabs)
|
||||
|
||||
Make sure your libevent has epoll (Linux) or kqueue (BSD) support.
|
||||
Using poll or select only is slow, and works for testing, but
|
||||
shouldn't be used for high-traffic memcache installations.
|
||||
|
||||
To build libevent with epoll on Linux, you need two things. First,
|
||||
you need /usr/include/sys/epoll.h . To get it, you can install the
|
||||
userspace epoll library, epoll-lib. The link to the latest version
|
||||
is buried inside
|
||||
http://www.xmailserver.org/linux-patches/nio-improve.html ; currently
|
||||
it's http://www.xmailserver.org/linux-patches/epoll-lib-0.9.tar.gz .
|
||||
If you're having any trouble building/installing it, you can just copy
|
||||
epoll.h from that tarball to /usr/include/sys as that's the only thing
|
||||
from there that libevent really needs.
|
||||
|
||||
Secondly, you need to declare syscall numbers of epoll syscalls, so
|
||||
libevent can use them. Put these declarations somewhere
|
||||
inside <sys/epoll.h>:
|
||||
|
||||
#define __NR_epoll_create 254
|
||||
#define __NR_epoll_ctl 255
|
||||
#define __NR_epoll_wait 256
|
||||
|
||||
After this you should be able to build libevent with epoll support.
|
||||
Once you build/install libevent, you don't need <sys/epoll.h> to
|
||||
compile memcache or link it against libevent. Don't forget that for epoll
|
||||
support to actually work at runtime you need to use a kernel with epoll
|
||||
support patch applied, as explained in the README file.
|
||||
|
||||
BSD users are luckier, and will get kqueue support by default.
|
||||
|
||||
|
||||
|
||||
48
wcmtools/memcached/CONTRIBUTORS
Executable file
48
wcmtools/memcached/CONTRIBUTORS
Executable file
@@ -0,0 +1,48 @@
|
||||
Brad Fitzpatrick <brad@danga.com>
|
||||
-- design/protocol
|
||||
-- Perl client
|
||||
-- prototype Perl server
|
||||
-- memory allocator design
|
||||
-- small enhancements/changes to C server
|
||||
-- website
|
||||
|
||||
Anatoly Vorobey <mellon@pobox.com>
|
||||
-- C server
|
||||
-- memory allocator design
|
||||
-- revised setuid code
|
||||
|
||||
Evan Martin <martine@danga.com>
|
||||
-- automake/autoconf support
|
||||
-- Python client
|
||||
-- portability work to build on OS X
|
||||
|
||||
Ryan <hotrodder@rocketmail.com>
|
||||
-- PHP client
|
||||
|
||||
Jamie McCarthy <jamie@mccarthy.vg>
|
||||
-- Perl client fixes: Makefile.PL, stats, doc updates
|
||||
|
||||
Lisa Marie Seelye <lisa@gentoo.org>
|
||||
-- packaging for Gentoo Linux
|
||||
-- initial setuid code
|
||||
|
||||
Sean Chittenden <seanc@FreeBSD.org>
|
||||
-- packaging for FreeBSD
|
||||
|
||||
Stuart Herbert <stuart@gentoo.org>
|
||||
-- fix for: memcached's php client can run in an infinite loop
|
||||
http://bugs.gentoo.org/show_bug.cgi?id=25385
|
||||
|
||||
Brion Vibber <brion@pobox.com>
|
||||
-- debugging abstraction in PHP client
|
||||
-- debugging the failure of daemon mode on FreeBSD
|
||||
|
||||
Brad Whitaker <whitaker@danga.com>
|
||||
-- compression support for the Perl API
|
||||
|
||||
Richard Russo <russor@msoe.edu>
|
||||
-- Java API
|
||||
|
||||
Ryan T. Dean <rtdean@cytherianage.net>
|
||||
-- Second PHP client with correct parsing (based on Perl client)
|
||||
-- autoconf fixes for mallinfo.arena on BSD (don't just check malloc.h)
|
||||
30
wcmtools/memcached/COPYING
Executable file
30
wcmtools/memcached/COPYING
Executable file
@@ -0,0 +1,30 @@
|
||||
Copyright (c) 2003, Danga Interactive, Inc.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
* Neither the name of the Danga Interactive nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
178
wcmtools/memcached/ChangeLog
Executable file
178
wcmtools/memcached/ChangeLog
Executable file
@@ -0,0 +1,178 @@
|
||||
2005-05-25
|
||||
* patch from Peter van Dijk <peter@nextgear.nl> to make
|
||||
stderr unbuffered, for running under daemontools
|
||||
|
||||
2005-04-04
|
||||
* patch from Don MacAskill <don@smugmug.com> 'flush_all' doesn't
|
||||
seem to work properly. Basically, if you try to add a key which
|
||||
is present, but expired, the store fails but the old key is no
|
||||
longer expired.
|
||||
|
||||
* release 1.1.12
|
||||
|
||||
2005-01-14
|
||||
* Date: Thu, 18 Nov 2004 15:25:59 -0600
|
||||
From: David Phillips <electrum@gmail.com>
|
||||
Here is a patch to configure.ac and Makefile.am to put the man page in
|
||||
the correct location. Trying to install the man page from a
|
||||
subdirectory results in the subdirectory being used in the install
|
||||
path (it tries to install to doc/memcached.1). This is the correct
|
||||
thing to do:
|
||||
|
||||
- create a Makefile.am in the doc directory that installs the man page
|
||||
with man_MANS
|
||||
- modify Makefile.am in the base directory to reference the doc
|
||||
directory using SUBDIRS
|
||||
- modify the AC_CONFIG_FILES macro in configure.ac to output the
|
||||
Makefile in doc
|
||||
|
||||
|
||||
2005-01-14
|
||||
* pidfile saving support from Lisa Seelye <lisa@gentoo.org>, sent
|
||||
Jan 13, 2005
|
||||
|
||||
2005-01-14
|
||||
* don't delete libevent events that haven't been added (the deltimer)
|
||||
patch from Ted Schundler <tschundler@gmail.com>
|
||||
|
||||
2004-12-10
|
||||
* document -M and -r in manpage (Doug Porter <dsp@dsp.name>)
|
||||
|
||||
2004-07-22
|
||||
* fix buffer overflow in items.c with 250 byte keys along with
|
||||
other info on the same line going into a 256 byte char[].
|
||||
thanks to Andrei Nigmatulin <anight@monamour.ru>
|
||||
|
||||
2004-06-15
|
||||
* immediate deletes weren't being unlinked a few seconds,
|
||||
preventing "add" commands to the same key in that time period.
|
||||
thanks to Michael Alan Dorman <mdorman@debian.org> for the
|
||||
bug report and demo script.
|
||||
|
||||
2004-04-30
|
||||
* released 1.1.11
|
||||
|
||||
2004-04-24
|
||||
* Avva: Add a new command line option: -r , to maximize core file
|
||||
limit.
|
||||
|
||||
2004-03-31
|
||||
* Avva: Use getrlimit and setrlimit to set limits for number of
|
||||
simultaneously open file descriptors. Get the current limits and
|
||||
try to raise them if they're not enough for the specified (or the
|
||||
default) setting of max connections.
|
||||
|
||||
2004-02-24
|
||||
* Adds a '-M' flag to turn off tossing items from the cache.
|
||||
(Jason Titus <jtitus@postini.com>)
|
||||
|
||||
2004-02-19 (Evan)
|
||||
* Install manpage on "make install", etc.
|
||||
|
||||
2003-12-30 (Brad)
|
||||
* remove static build stuff. interferes with PAM setuid stuff
|
||||
and was only included as a possible fix with the old memory
|
||||
allocator. really shouldn't make a difference.
|
||||
* add Jay Bonci's Debian scripts and manpage
|
||||
* release version 1.1.10
|
||||
|
||||
2003-12-01 (Avva)
|
||||
* New command: flush_all, causes all existing items to
|
||||
be invalidated immediately (without deleting them from
|
||||
memory, merely causing memcached to no longer return them).
|
||||
2003-10-23
|
||||
* Shift init code around to fix daemon mode on FreeBSD,
|
||||
* and drop root only after creating the server socket (to
|
||||
* allow the use of privileged ports)
|
||||
* version 1.1.10pre
|
||||
|
||||
2003-10-09
|
||||
* BSD compile fixes from Ryan T. Dean
|
||||
* version 1.1.9
|
||||
|
||||
2003-09-29
|
||||
* ignore SIGPIPE at start instead of crashing in rare cases it
|
||||
comes up. no other code had to be modified, since everything
|
||||
else is already dead-connection-aware. (avva)
|
||||
|
||||
2003-09-09 (Avva, Lisa Marie Seelye <lisa@gentoo.org>)
|
||||
* setuid support
|
||||
|
||||
2003-09-05 (Avva)
|
||||
* accept all new connections in the same event (so we work with ET epoll)
|
||||
* mark all items as clsid=0 after slab page reassignment to please future
|
||||
asserts (on the road to making slab page reassignment work fully)
|
||||
|
||||
2003-08-12 (Brad Fitzpatrick)
|
||||
* use TCP_CORK on Linux or TCP_PUSH on BSD
|
||||
* only use TCP_NODELAY when we don't have alternatives
|
||||
|
||||
2003-08-10
|
||||
* disable Nagel's Algorithm (TCP_NODELAY) for better performance (avva)
|
||||
|
||||
2003-08-10
|
||||
* support multiple levels of verbosity (-vv)
|
||||
|
||||
2003-08-10 (Evan Martin)
|
||||
* Makefile.am: debug, optimization, and static flags are controlled
|
||||
by the configure script.
|
||||
* configure.ac:
|
||||
- allow specifying libevent directory with --with-libevent=DIR
|
||||
- check for malloc.h (unavailable on BSDs)
|
||||
- check for socklen_t (unavailable on OSX)
|
||||
* assoc.c, items.c, slabs.c: Remove some unused headers.
|
||||
* memcached.c: allow for nonexistence of malloc.h; #define a POSIX
|
||||
macro to import mlockall flags.
|
||||
|
||||
2003-07-29
|
||||
* version 1.1.7
|
||||
* big bug fix: item exptime 0 meant expire immediately, not never
|
||||
* version 1.1.8
|
||||
|
||||
2003-07-22
|
||||
* make 'delete' take second arg, of time to refuse new add/replace
|
||||
* set/add/replace/delete can all take abs or delta time (delta can't
|
||||
be larger than a month)
|
||||
|
||||
2003-07-21
|
||||
* added doc/protocol.txt
|
||||
|
||||
2003-07-01
|
||||
* report CPU usage in stats
|
||||
|
||||
2003-06-30
|
||||
* version 1.1.6
|
||||
* fix a number of obscure bugs
|
||||
* more stats reporting
|
||||
|
||||
2003-06-10
|
||||
* removing use of Judy; use a hash. (judy caused memory fragmentation)
|
||||
* shrink some structures
|
||||
* security improvements
|
||||
* version 1.1.0
|
||||
|
||||
2003-06-18
|
||||
* changing maxsize back to an unsigned int
|
||||
|
||||
2003-06-16
|
||||
* adding PHP support
|
||||
* added CONTRIBUTORS file
|
||||
* version 1.0.4
|
||||
|
||||
2003-06-15
|
||||
* forgot to distribute website/api (still learning auto*)
|
||||
* version 1.0.3
|
||||
|
||||
2003-06-15
|
||||
* update to version 1.0.2
|
||||
* autoconf/automake fixes for older versions
|
||||
* make stats report version number
|
||||
* change license from GPL to BSD
|
||||
|
||||
Fri, 13 Jun 2003 10:05:51 -0700 Evan Martin <martine@danga.com>
|
||||
|
||||
* configure.ac, autogen.sh, Makefile.am: Use autotools.
|
||||
* items.c, memcached.c: #include <time.h> for time(),
|
||||
printf time_t as %lu (is this correct?),
|
||||
minor warnings fixes.
|
||||
|
||||
30
wcmtools/memcached/LICENSE
Executable file
30
wcmtools/memcached/LICENSE
Executable file
@@ -0,0 +1,30 @@
|
||||
Copyright (c) 2003, Danga Interactive, Inc.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
* Neither the name of the Danga Interactive nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
10
wcmtools/memcached/Makefile.am
Executable file
10
wcmtools/memcached/Makefile.am
Executable file
@@ -0,0 +1,10 @@
|
||||
bin_PROGRAMS = memcached
|
||||
|
||||
memcached_SOURCES = memcached.c slabs.c items.c memcached.h assoc.c
|
||||
|
||||
SUBDIRS = doc
|
||||
DIST_DIRS = scripts
|
||||
EXTRA_DIST = doc scripts TODO
|
||||
|
||||
AM_CFLAGS=-DNDEBUG
|
||||
|
||||
1
wcmtools/memcached/NEWS
Executable file
1
wcmtools/memcached/NEWS
Executable file
@@ -0,0 +1 @@
|
||||
http://www.danga.com/memcached/news.bml
|
||||
22
wcmtools/memcached/README
Executable file
22
wcmtools/memcached/README
Executable file
@@ -0,0 +1,22 @@
|
||||
Dependencies:
|
||||
|
||||
-- libevent, http://www.monkey.org/~provos/libevent/ (libevent-dev)
|
||||
|
||||
If using Linux, you need a kernel with epoll. Sure, libevent will
|
||||
work with normal select, but it sucks.
|
||||
|
||||
epoll isn't in Linux 2.4 yet, but there's a backport at:
|
||||
|
||||
http://www.xmailserver.org/linux-patches/nio-improve.html
|
||||
|
||||
You want the epoll-lt patch (level-triggered).
|
||||
|
||||
Also, be warned that the -k (mlockall) option to memcached might be
|
||||
dangerous when using a large cache. Just make sure the memcached machines
|
||||
don't swap. memcached does non-blocking network I/O, but not disk. (it
|
||||
should never go to disk, or you've lost the whole point of it)
|
||||
|
||||
The memcached website is at:
|
||||
|
||||
http://www.danga.com/memcached/
|
||||
|
||||
8
wcmtools/memcached/TODO
Executable file
8
wcmtools/memcached/TODO
Executable file
@@ -0,0 +1,8 @@
|
||||
* slab class reassignment still buggy and can crash. once that's
|
||||
stable, server should re-assign pages every 60 seconds or so
|
||||
to keep all classes roughly equal. [Update: fixed now?, but
|
||||
not heavily tested. Future: make slab classes, with per-class
|
||||
cleaners functions.]
|
||||
|
||||
* calendar queue for early expirations of items, so they don't push
|
||||
out other objects with infinite expirations.
|
||||
56
wcmtools/memcached/api/java/CHANGELOG.txt
Executable file
56
wcmtools/memcached/api/java/CHANGELOG.txt
Executable file
@@ -0,0 +1,56 @@
|
||||
Version 0.9.1 - 12 Oct 2003
|
||||
-- Altered the SockIO helper class, so it no longer allows accessing
|
||||
the streams it contains directly, instead it has methods
|
||||
with identical signatures to the methods that were called
|
||||
on the streams... This makes the client code prettier.
|
||||
-- Changed looped non blocking read to blocking read, for getting
|
||||
items from the server. This probably reduces CPU usage in
|
||||
cases where the retrieval would block, and cleans up the
|
||||
code a bit. We're blocking on retrieval anyhow.
|
||||
-- Made get() not call get_multi(), and added single socket
|
||||
optimization. This parallels recent changes to the perl
|
||||
client
|
||||
-- Changed a few for loops to use iterators instead, since it's
|
||||
probably marginally more efficient, and it's probably
|
||||
better coding practice.
|
||||
-- Actually spell checked. :)
|
||||
|
||||
Version 0.9.0 - 29 Sep 2003
|
||||
-- Renumbered to reflect that it's not been realworld tested
|
||||
-- changed package to danga.com.MemCached (thanks)
|
||||
-- added dates to changelog
|
||||
-- added .txt to text files
|
||||
-- added to official memcached site :)
|
||||
|
||||
Version 1.0.0 - 28 Sep 2003
|
||||
-- Adjusted namespacing for SockIO, it shouldn't have been public; is now package level.
|
||||
As a bonus, this means I don't have to Javadoc it. :)
|
||||
-- Finished adding complete Javadoc to MemCachedClient.
|
||||
-- spellchecked
|
||||
-- added a couple versions of function variations that I missed. for example, some that
|
||||
didn't take an int directly as a hash value, and i missed a get_multi w/out hashes.
|
||||
-- removed java.net.Socket reference from MemCachedClient, SockIO has a new constructor which
|
||||
takes hostname and port number
|
||||
-- switched to three part version number
|
||||
|
||||
|
||||
|
||||
Version 0.3 - 27 Sep 2003
|
||||
-- Compression, for strings/stringified numbers, this is compatible w/ perl
|
||||
Serialized objects are incompatible w/ perl for obvious reasons. :)
|
||||
-- Added PORTABILITY file, to include information about using the client
|
||||
with various JVM's
|
||||
-- Updated string parsing to StreamTokenizer rather than regexp's in an
|
||||
effort to get sablevm to like the client
|
||||
|
||||
Version 0.2 - 24 Sep 2003
|
||||
-- Serialization works
|
||||
-- Possible BUG: Only the lower byte of the characters of keys are sent
|
||||
This is only a problem if the memcache server can handle
|
||||
unicode keys. (I haven't checked)
|
||||
-- Server Failures handled gracefully
|
||||
-- Partial Javadoc
|
||||
|
||||
Version 0.1 - 23 Sep 2003
|
||||
-- Initial Release
|
||||
-- Storing and Retrieving numbers and strings works
|
||||
18
wcmtools/memcached/api/java/PORTABILITY.txt
Executable file
18
wcmtools/memcached/api/java/PORTABILITY.txt
Executable file
@@ -0,0 +1,18 @@
|
||||
This file lists the portability status of this client. Please send me any
|
||||
additional information.
|
||||
|
||||
Richard 'toast' Russo <russor@msoe.edu>
|
||||
|
||||
|
||||
I have personally tested this on the following platforms, and it works to the
|
||||
best of my knowledge:
|
||||
|
||||
Sun's JRE 1.4.2 on Linux/i86
|
||||
kaffe 1.1.1 on Linux/i86
|
||||
|
||||
I have personally tested this on the following platforms, and it does not work:
|
||||
|
||||
sablevm 1.0.9: complains of todo in native_interface.c
|
||||
gjc(jdk) 3.3.2 20030908 (Debian prerelease): strange compiler errors
|
||||
gij(jre) 3.3.2 20030908 (Debian prerelease): does not get items from server properly
|
||||
|
||||
9
wcmtools/memcached/api/java/TODO.txt
Executable file
9
wcmtools/memcached/api/java/TODO.txt
Executable file
@@ -0,0 +1,9 @@
|
||||
This are the things left to do
|
||||
|
||||
Investigate threading issues (what needs to be synchronized)
|
||||
|
||||
Investigate 3rd party jvm incompatibility
|
||||
|
||||
Non deprecated stream input
|
||||
|
||||
Extensive testing
|
||||
1219
wcmtools/memcached/api/java/com/danga/MemCached/MemCachedClient.java
Executable file
1219
wcmtools/memcached/api/java/com/danga/MemCached/MemCachedClient.java
Executable file
File diff suppressed because it is too large
Load Diff
72
wcmtools/memcached/api/java/com/danga/MemCached/SockIO.java
Executable file
72
wcmtools/memcached/api/java/com/danga/MemCached/SockIO.java
Executable file
@@ -0,0 +1,72 @@
|
||||
/**
|
||||
* MemCached Java client, utility class for Socket IO
|
||||
* Copyright (c) 2003
|
||||
* Richard 'toast' Russo <russor@msoe.edu>
|
||||
* http://people.msoe.edu/~russor/memcached
|
||||
*
|
||||
*
|
||||
* This module is Copyright (c) 2003 Richard Russo.
|
||||
* All rights reserved.
|
||||
* You may distribute under the terms of the GNU General Public License
|
||||
* This is free software. IT COMES WITHOUT WARRANTY OF ANY KIND.
|
||||
*
|
||||
* @author Richard 'toast' Russo <russor@msoe.edu>
|
||||
* @version 0.9.1
|
||||
*/
|
||||
|
||||
|
||||
package com.danga.MemCached;
|
||||
|
||||
|
||||
import java.util.*;
|
||||
import java.net.*;
|
||||
import java.io.*;
|
||||
|
||||
|
||||
|
||||
class SockIO {
|
||||
Socket sock;
|
||||
DataInputStream in;
|
||||
DataOutputStream out;
|
||||
boolean closed = false;
|
||||
|
||||
public SockIO(String host, int port) throws IOException {
|
||||
sock = new Socket(host,port);
|
||||
in = new DataInputStream(sock.getInputStream());
|
||||
out = new DataOutputStream(sock.getOutputStream());
|
||||
|
||||
}
|
||||
|
||||
public void close() {
|
||||
closed = true;
|
||||
try {
|
||||
in.close();
|
||||
out.close();
|
||||
sock.close();
|
||||
} catch (IOException e) {
|
||||
}
|
||||
|
||||
}
|
||||
public boolean isConnected() {
|
||||
return (closed && sock.isConnected());
|
||||
}
|
||||
|
||||
public void readFully(byte[] b) throws IOException {
|
||||
in.readFully(b);
|
||||
}
|
||||
|
||||
public String readLine() throws IOException {
|
||||
return in.readLine();
|
||||
}
|
||||
|
||||
public void writeBytes(String s) throws IOException {
|
||||
out.writeBytes(s);
|
||||
}
|
||||
public void flush() throws IOException {
|
||||
out.flush();
|
||||
}
|
||||
public void write(byte[] b) throws IOException {
|
||||
out.write(b);
|
||||
}
|
||||
|
||||
}
|
||||
79
wcmtools/memcached/api/java/dist.pl
Executable file
79
wcmtools/memcached/api/java/dist.pl
Executable file
@@ -0,0 +1,79 @@
|
||||
#!/usr/bin/perl
|
||||
#
|
||||
# Simplify Brad's life. I'm sure there's a Java-specific way
|
||||
# to do this (or there should be), but I don't have Java
|
||||
# installed.
|
||||
#
|
||||
|
||||
use strict;
|
||||
use Getopt::Long;
|
||||
|
||||
my $opt_tar = 0;
|
||||
my $opt_upload = 0;
|
||||
exit 1 unless GetOptions("tar" => \$opt_tar,
|
||||
"upload" => \$opt_upload);
|
||||
|
||||
# chdir to the directory the script's at, so future
|
||||
# paths need only be relative
|
||||
use FindBin qw($Bin);
|
||||
chdir $Bin or die "Couldn't cd to $Bin\n";
|
||||
|
||||
die "Must use --tar or --upload\n" unless $opt_tar || $opt_upload;
|
||||
|
||||
# files to distribute
|
||||
my @manifest = qw(
|
||||
TODO.txt
|
||||
PORTABILITY.txt
|
||||
CHANGELOG.txt
|
||||
memcachetest.java
|
||||
com
|
||||
com/danga
|
||||
com/danga/MemCached
|
||||
com/danga/MemCached/MemCachedClient.java
|
||||
com/danga/MemCached/SockIO.java
|
||||
);
|
||||
|
||||
# figure out the version number
|
||||
|
||||
open (F, "com/danga/MemCached/MemCachedClient.java") or die;
|
||||
{ local $/ = undef; $_ = <F>; } # suck in the whole file
|
||||
close F;
|
||||
die "Can't find version number\n" unless
|
||||
/\@version\s+(\d[^\'\"\s]+)/s;
|
||||
my $ver = $1;
|
||||
my $dir = "java-memcached-$ver";
|
||||
my $dist = "$dir.tar.gz";
|
||||
|
||||
if ($opt_tar) {
|
||||
# make a fresh directory
|
||||
mkdir $dir or die "Couldn't make directory: $dir\n";
|
||||
|
||||
# copy files to fresh directory
|
||||
foreach my $file (@manifest) {
|
||||
if (-f $file) {
|
||||
system("cp", $file, "$dir/$file")
|
||||
and die "Error copying file $file\n";
|
||||
} elsif (-d $file) {
|
||||
mkdir "$dir/$file"
|
||||
or die "Error creating directory $file\n";
|
||||
}
|
||||
}
|
||||
|
||||
# tar it up
|
||||
system("tar", "zcf", $dist, $dir)
|
||||
and die "Error running tar.\n";
|
||||
|
||||
# remove temp directory
|
||||
system("rm", "-rf", $dir)
|
||||
and die "Error cleaning up temp directory\n";
|
||||
|
||||
print "$dist created.\n";
|
||||
}
|
||||
|
||||
if ($opt_upload) {
|
||||
print "Uploading $dist...\n";
|
||||
system("scp", $dist, 'bradfitz@danga.com:memd/dist/')
|
||||
and die "Error uploading to memcached/dist\n";
|
||||
}
|
||||
|
||||
print "Done.\n";
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user