From af23562d9991ad9eb908b79638506721469bbdf6 Mon Sep 17 00:00:00 2001 From: Ole Tange Date: Mon, 28 Jul 2014 00:30:54 +0200 Subject: [PATCH] parallel: Fixed bug #42842: --bar with weird chars. Fixed bug #42845: rsync 3.1.x fails against 2.5.7. Give warning if reading arguments (for --eta/--bar) takes > 30 sec. A few more characters annoy tmux. parallel.pod: Bash array copy function. --- CREDITS | 2 + src/niceload.texi | 459 ---- src/parallel | 186 +- src/parallel.pod | 30 +- src/parallel.texi | 4510 ------------------------------- src/sem.pdf | Bin 32937 -> 0 bytes src/sem.texi | 359 --- src/sql.texi | 540 ---- testsuite/wanted-results/test61 | 2 +- 9 files changed, 141 insertions(+), 5947 deletions(-) create mode 100644 CREDITS delete mode 100644 src/niceload.texi delete mode 100644 src/parallel.texi delete mode 100644 src/sem.pdf delete mode 100644 src/sem.texi delete mode 100644 src/sql.texi diff --git a/CREDITS b/CREDITS new file mode 100644 index 00000000..1b20c2ab --- /dev/null +++ b/CREDITS @@ -0,0 +1,2 @@ +rici@stackoverflow.com: Documentation on exporting arrays using --env. +Malcolm Cook: The idea to use a general perl expression as replacement strings. diff --git a/src/niceload.texi b/src/niceload.texi deleted file mode 100644 index 9560aa3d..00000000 --- a/src/niceload.texi +++ /dev/null @@ -1,459 +0,0 @@ -\input texinfo -@setfilename niceload.info - -@documentencoding utf-8 - -@settitle niceload - slow down a program when the load average is above a certain limit - -@node Top -@top niceload - -@menu -* NAME:: -* SYNOPSIS:: -* DESCRIPTION:: -* OPTIONS:: -* EXAMPLE@asis{:} See niceload in action:: -* EXAMPLE@asis{:} Run updatedb:: -* EXAMPLE@asis{:} Run rsync:: -* EXAMPLE@asis{:} Ensure enough disk cache:: -* ENVIRONMENT VARIABLES:: -* EXIT STATUS:: -* REPORTING BUGS:: -* AUTHOR:: -* LICENSE:: -* DEPENDENCIES:: -* SEE ALSO:: -@end menu - -@node NAME -@chapter NAME - -niceload - slow down a program when the load average is above a certain limit - -@node SYNOPSIS -@chapter SYNOPSIS - -@strong{niceload} [-v] [-h] [-n nice] [-I io] [-L load] [-M mem] [-N] -[--sensor program] [-t time] [-s time|-f factor] ( command | -p PID [-p PID ...] ) - -@node DESCRIPTION -@chapter DESCRIPTION - -GNU @strong{niceload} will slow down a program when the load average (or -other system activity) is above a certain limit. When the limit is -reached the program will be suspended for some time. Then resumed -again for some time. Then the load average is checked again and we -start over. - -Instead of load average @strong{niceload} can also look at disk I/O, amount -of free memory, or swapping activity. - -If the load is 3.00 then the default settings will run a program -like this: - -run 1 second, suspend (3.00-1.00) seconds, run 1 second, suspend -(3.00-1.00) seconds, run 1 second, ... - -@node OPTIONS -@chapter OPTIONS - -@table @asis -@item @strong{-f} @emph{FACTOR} -@anchor{@strong{-f} @emph{FACTOR}} - -@item @strong{--factor} @emph{FACTOR} -@anchor{@strong{--factor} @emph{FACTOR}} - -Suspend time factor. Dynamically set @strong{-s} as amount over limit * -factor. Default is 1. - -@item @strong{-H} -@anchor{@strong{-H}} - -@item @strong{--hard} -@anchor{@strong{--hard}} - -Hard limit. @strong{--hard} will suspend the process until the system is -under the limits. The default is @strong{--soft}. - -@item @strong{--io} @emph{iolimit} -@anchor{@strong{--io} @emph{iolimit}} - -@item @strong{-I} @emph{iolimit} -@anchor{@strong{-I} @emph{iolimit}} - -Limit for I/O. The amount of disk I/O will be computed as a value 0 - -10, where 0 is no I/O and 10 is at least one disk is 100% saturated. - -@strong{--io} will set both @strong{--start-io} and @strong{run-io}. - -@item @strong{--load} @emph{loadlimit} -@anchor{@strong{--load} @emph{loadlimit}} - -@item @strong{-L} @emph{loadlimit} -@anchor{@strong{-L} @emph{loadlimit}} - -Limit for load average. - -@strong{--load} will set both @strong{--start-load} and @strong{run-load}. - -@item @strong{--mem} @emph{memlimit} -@anchor{@strong{--mem} @emph{memlimit}} - -@item @strong{-M} @emph{memlimit} -@anchor{@strong{-M} @emph{memlimit}} - -Limit for free memory. This is the amount of bytes available as free -+ cache. This limit is treated opposite other limits: If the system -is above the limit the program will run, if it is below the limit the -program will stop - -@emph{memlimit} can be postfixed with K, M, G, T, or P which would -multiply the size with 1024, 1048576, 1073741824, or 1099511627776 -respectively. - -@strong{--mem} will set both @strong{--start-mem} and @strong{run-mem}. - -@item @strong{--noswap} -@anchor{@strong{--noswap}} - -@item @strong{-N} -@anchor{@strong{-N}} - -No swapping. If the system is swapping both in and out it is a good -indication that the system is memory stressed. - -@strong{--noswap} is over limit if the system is swapping both in and out. - -@strong{--noswap} will set both @strong{--start-noswap} and @strong{run-noswap}. - -@item @strong{-n} @emph{niceness} -@anchor{@strong{-n} @emph{niceness}} - -@item @strong{--nice} @emph{niceness} -@anchor{@strong{--nice} @emph{niceness}} - -Sets niceness. See @strong{nice}(1). - -@item @strong{-p} @emph{PID} (alpha testing) -@anchor{@strong{-p} @emph{PID} (alpha testing)} - -@item @strong{--pid} @emph{PID} (alpha testing) -@anchor{@strong{--pid} @emph{PID} (alpha testing)} - -Process ID of process to suspend. You can specify multiple process IDs -with multiple @strong{-p} @emph{PID}. - -@item @strong{--prg} @emph{program} (alpha testing) -@anchor{@strong{--prg} @emph{program} (alpha testing)} - -@item @strong{--program} @emph{program} (alpha testing) -@anchor{@strong{--program} @emph{program} (alpha testing)} - -Name of running program to suspend. You can specify multiple programs -with multiple @strong{--prg} @emph{program}. - -@item @strong{--quote} -@anchor{@strong{--quote}} - -@item @strong{-q} -@anchor{@strong{-q}} - -Quote the command line. Useful if the command contains chars like *, -$, >, and " that should not be interpreted by the shell. - -@item @strong{--run-io} @emph{iolimit} -@anchor{@strong{--run-io} @emph{iolimit}} - -@item @strong{--ri} @emph{iolimit} -@anchor{@strong{--ri} @emph{iolimit}} - -@item @strong{--run-load} @emph{loadlimit} -@anchor{@strong{--run-load} @emph{loadlimit}} - -@item @strong{--rl} @emph{loadlimit} -@anchor{@strong{--rl} @emph{loadlimit}} - -@item @strong{--run-mem} @emph{memlimit} -@anchor{@strong{--run-mem} @emph{memlimit}} - -@item @strong{--rm} @emph{memlimit} -@anchor{@strong{--rm} @emph{memlimit}} - -Run limit. The running program will be slowed down if the system is -above the limit. See: @strong{--io}, @strong{--load}, @strong{--mem}, @strong{--noswap}. - -@item @strong{--sensor} @emph{sensor program} (alpha testing) -@anchor{@strong{--sensor} @emph{sensor program} (alpha testing)} - -Read sensor. Use @emph{sensor program} to read a sensor. - -This will keep the CPU temperature below 80 deg C on GNU/Linux: - -@verbatim - niceload -l 80000 -f 0.001 --sensor 'sort -n /sys/devices/platform/coretemp*/temp*_input' gzip * -@end verbatim - -This will stop if the disk space < 100000. - -@verbatim - niceload -H -l -100000 --sensor "df . | awk '{ print \$4 }'" echo -@end verbatim - -@item @strong{--start-io} @emph{iolimit} -@anchor{@strong{--start-io} @emph{iolimit}} - -@item @strong{--si} @emph{iolimit} -@anchor{@strong{--si} @emph{iolimit}} - -@item @strong{--start-load} @emph{loadlimit} -@anchor{@strong{--start-load} @emph{loadlimit}} - -@item @strong{--sl} @emph{loadlimit} -@anchor{@strong{--sl} @emph{loadlimit}} - -@item @strong{--start-mem} @emph{memlimit} -@anchor{@strong{--start-mem} @emph{memlimit}} - -@item @strong{--sm} @emph{memlimit} -@anchor{@strong{--sm} @emph{memlimit}} - -Start limit. The program will not start until the system is below the -limit. See: @strong{--io}, @strong{--load}, @strong{--mem}, @strong{--noswap}. - -@item @strong{--soft} -@anchor{@strong{--soft}} - -@item @strong{-S} -@anchor{@strong{-S}} - -Soft limit. @strong{niceload} will suspend a process for a while and then -let it run for a second thus only slowing down a process while the -system is over one of the given limits. This is the default. - -@item @strong{--suspend} @emph{SEC} -@anchor{@strong{--suspend} @emph{SEC}} - -@item @strong{-s} @emph{SEC} -@anchor{@strong{-s} @emph{SEC}} - -Suspend time. Suspend the command this many seconds when the max load -average is reached. - -@item @strong{--recheck} @emph{SEC} -@anchor{@strong{--recheck} @emph{SEC}} - -@item @strong{-t} @emph{SEC} -@anchor{@strong{-t} @emph{SEC}} - -Recheck load time. Sleep SEC seconds before checking load -again. Default is 1 second. - -@item @strong{--verbose} -@anchor{@strong{--verbose}} - -@item @strong{-v} -@anchor{@strong{-v}} - -Verbose. Print some extra output on what is happening. Use @strong{-v} until -you know what your are doing. - -@end table - -@node EXAMPLE: See niceload in action -@chapter EXAMPLE: See niceload in action - -In terminal 1 run: top - -In terminal 2 run: - -@strong{niceload -q perl -e '$|=1;do@{$l==$r or print "."; $l=$r@}until(($r=time-$^T)}>@strong{50)'} - -This will print a '.' every second for 50 seconds and eat a lot of -CPU. When the load rises to 1.0 the process is suspended. - -@node EXAMPLE: Run updatedb -@chapter EXAMPLE: Run updatedb - -Running updatedb can often starve the system for disk I/O and thus result in a high load. - -Run updatedb but suspend updatedb if the load is above 2.00: - -@strong{niceload -L 2 updatedb} - -@node EXAMPLE: Run rsync -@chapter EXAMPLE: Run rsync - -rsync can just like updatedb starve the system for disk I/O and thus result in a high load. - -Run rsync but keep load below 3.4. If load reaches 7 sleep for -(7-3.4)*12 seconds: - -@strong{niceload -L 3.4 -f 12 rsync -Ha /home/ /backup/home/} - -@node EXAMPLE: Ensure enough disk cache -@chapter EXAMPLE: Ensure enough disk cache - -Assume the program @strong{foo} uses 2 GB files intensively. @strong{foo} will run -fast if the files are in disk cache and be slow as a crawl if they are -not in the cache. - -To ensure 2 GB are reserved for disk cache run: - -@strong{niceload --hard --run-mem 2g foo} - -This will not guarantee that the 2 GB memory will be used for the -files for @strong{foo}, but it will stop @strong{foo} if the memory for disk cache -is too low. - -@node ENVIRONMENT VARIABLES -@chapter ENVIRONMENT VARIABLES - -None. In future versions $NICELOAD will be able to contain default settings. - -@node EXIT STATUS -@chapter EXIT STATUS - -Exit status should be the same as the command being run (untested). - -@node REPORTING BUGS -@chapter REPORTING BUGS - -Report bugs to . - -@node AUTHOR -@chapter AUTHOR - -Copyright (C) 2004-11-19 Ole Tange, http://ole.tange.dk - -Copyright (C) 2005,2006,2006,2008,2009,2010 Ole Tange, http://ole.tange.dk - -Copyright (C) 2010,2011,2012 Ole Tange, http://ole.tange.dk and Free -Software Foundation, Inc. - -@node LICENSE -@chapter LICENSE - -Copyright (C) 2010,2011,2012 Free Software Foundation, Inc. - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; either version 3 of the License, or -at your option any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program. If not, see . - -@menu -* Documentation license I:: -* Documentation license II:: -@end menu - -@node Documentation license I -@section Documentation license I - -Permission is granted to copy, distribute and/or modify this documentation -under the terms of the GNU Free Documentation License, Version 1.3 or -any later version published by the Free Software Foundation; with no -Invariant Sections, with no Front-Cover Texts, and with no Back-Cover -Texts. A copy of the license is included in the file fdl.txt. - -@node Documentation license II -@section Documentation license II - -You are free: - -@table @asis -@item @strong{to Share} -@anchor{@strong{to Share}} - -to copy, distribute and transmit the work - -@item @strong{to Remix} -@anchor{@strong{to Remix}} - -to adapt the work - -@end table - -Under the following conditions: - -@table @asis -@item @strong{Attribution} -@anchor{@strong{Attribution}} - -You must attribute the work in the manner specified by the author or -licensor (but not in any way that suggests that they endorse you or -your use of the work). - -@item @strong{Share Alike} -@anchor{@strong{Share Alike}} - -If you alter, transform, or build upon this work, you may distribute -the resulting work only under the same, similar or a compatible -license. - -@end table - -With the understanding that: - -@table @asis -@item @strong{Waiver} -@anchor{@strong{Waiver}} - -Any of the above conditions can be waived if you get permission from -the copyright holder. - -@item @strong{Public Domain} -@anchor{@strong{Public Domain}} - -Where the work or any of its elements is in the public domain under -applicable law, that status is in no way affected by the license. - -@item @strong{Other Rights} -@anchor{@strong{Other Rights}} - -In no way are any of the following rights affected by the license: - -@itemize -@item Your fair dealing or fair use rights, or other applicable -copyright exceptions and limitations; - -@item The author's moral rights; - -@item Rights other persons may have either in the work itself or in -how the work is used, such as publicity or privacy rights. - -@end itemize - -@end table - -@table @asis -@item @strong{Notice} -@anchor{@strong{Notice}} - -For any reuse or distribution, you must make clear to others the -license terms of this work. - -@end table - -A copy of the full license is included in the file as cc-by-sa.txt. - -@node DEPENDENCIES -@chapter DEPENDENCIES - -GNU @strong{niceload} uses Perl, and the Perl modules POSIX, and -Getopt::Long. - -@node SEE ALSO -@chapter SEE ALSO - -@strong{parallel}(1), @strong{nice}(1), @strong{uptime}(1) - -@bye diff --git a/src/parallel b/src/parallel index 404d4fcb..85a76859 100755 --- a/src/parallel +++ b/src/parallel @@ -852,7 +852,8 @@ sub parse_options { if(defined $opt::fg) { $Global::semaphore = 1; } if(defined $opt::bg) { $Global::semaphore = 1; } if(defined $opt::wait) { $Global::semaphore = 1; } - if(defined $opt::halt_on_error and $opt::halt_on_error=~/%/) { $opt::halt_on_error /= 100; } + if(defined $opt::halt_on_error and + $opt::halt_on_error=~/%/) { $opt::halt_on_error /= 100; } if(defined $opt::timeout and $opt::timeout !~ /^\d+(\.\d+)?%?$/) { ::error("--timeout must be seconds or percentage\n"); wait_and_exit(255); @@ -1817,6 +1818,8 @@ sub progress { if($opt::bar) { my $arg = $Global::newest_job ? $Global::newest_job->{'commandline'}->replace_placeholders(["\257<\257>"],0,0) : ""; + # [\011\013\014] messes up display in the terminal + $arg =~ s/[\011\013\014]//g; my $bar_text = sprintf("%d%% %d:%d=%ds %s", $pctcomplete*100, $completed, $left, $this_eta, $arg); my $rev = ''; @@ -4229,74 +4232,98 @@ sub control_path_dir { return $self->{'control_path_dir'}; } - sub rsync_transfer_cmd { - # Command to run to transfer a file - # Input: - # $file = filename of file to transfer - # $workdir = destination dir - # Returns: - # $cmd = rsync command to run to transfer $file ("" if unreadable) - my $self = shift; - my $file = shift; - my $workdir = shift; - if(not -r $file) { - ::warning($file, " is not readable and will not be transferred.\n"); - return "true"; - } - my $rsync_destdir; - if($file =~ m:^/:) { - # rsync /foo/bar / - $rsync_destdir = "/"; - } else { - $rsync_destdir = ::shell_quote_file($workdir); - } - $file = ::shell_quote_file($file); - my $sshcmd = $self->sshcommand(); - my $rsync_opt = "-rlDzR -e" . ::shell_quote_scalar($sshcmd); - my $serverlogin = $self->serverlogin(); - # Make dir if it does not exist - return "( $sshcmd $serverlogin mkdir -p $rsync_destdir;" . - "rsync $rsync_opt $file $serverlogin:$rsync_destdir )"; + # Command to run to transfer a file + # Input: + # $file = filename of file to transfer + # $workdir = destination dir + # Returns: + # $cmd = rsync command to run to transfer $file ("" if unreadable) + my $self = shift; + my $file = shift; + my $workdir = shift; + if(not -r $file) { + ::warning($file, " is not readable and will not be transferred.\n"); + return "true"; + } + my $rsync_destdir; + if($file =~ m:^/:) { + # rsync /foo/bar / + $rsync_destdir = "/"; + } else { + $rsync_destdir = ::shell_quote_file($workdir); + } + $file = ::shell_quote_file($file); + my $sshcmd = $self->sshcommand(); + my $rsync_opt = "-rlDzR -e" . ::shell_quote_scalar($sshcmd); + my $serverlogin = $self->serverlogin(); + # Make dir if it does not exist + return "( $sshcmd $serverlogin mkdir -p $rsync_destdir;" . + rsync()." $rsync_opt $file $serverlogin:$rsync_destdir )"; } sub cleanup_cmd { - # Command to run to remove the remote file - # Input: - # $file = filename to remove - # $workdir = destination dir - # Returns: - # $cmd = ssh command to run to remove $file and empty parent dirs - my $self = shift; - my $file = shift; - my $workdir = shift; - my $f = $file; - if($f =~ m:/\./:) { - # foo/bar/./baz/quux => workdir/baz/quux - # /foo/bar/./baz/quux => workdir/baz/quux - $f =~ s:.*/\./:$workdir/:; - } elsif($f =~ m:^[^/]:) { - # foo/bar => workdir/foo/bar - $f = $workdir."/".$f; - } - my @subdirs = split m:/:, ::dirname($f); - my @rmdir; - my $dir = ""; - for(@subdirs) { - $dir .= $_."/"; - unshift @rmdir, ::shell_quote_file($dir); - } - my $rmdir = @rmdir ? "rmdir @rmdir 2>/dev/null;" : ""; - if(defined $opt::workdir and $opt::workdir eq "...") { - $rmdir .= "rm -rf " . ::shell_quote_file($workdir).';'; - } + # Command to run to remove the remote file + # Input: + # $file = filename to remove + # $workdir = destination dir + # Returns: + # $cmd = ssh command to run to remove $file and empty parent dirs + my $self = shift; + my $file = shift; + my $workdir = shift; + my $f = $file; + if($f =~ m:/\./:) { + # foo/bar/./baz/quux => workdir/baz/quux + # /foo/bar/./baz/quux => workdir/baz/quux + $f =~ s:.*/\./:$workdir/:; + } elsif($f =~ m:^[^/]:) { + # foo/bar => workdir/foo/bar + $f = $workdir."/".$f; + } + my @subdirs = split m:/:, ::dirname($f); + my @rmdir; + my $dir = ""; + for(@subdirs) { + $dir .= $_."/"; + unshift @rmdir, ::shell_quote_file($dir); + } + my $rmdir = @rmdir ? "rmdir @rmdir 2>/dev/null;" : ""; + if(defined $opt::workdir and $opt::workdir eq "...") { + $rmdir .= "rm -rf " . ::shell_quote_file($workdir).';'; + } - $f = ::shell_quote_file($f); - my $sshcmd = $self->sshcommand(); - my $serverlogin = $self->serverlogin(); - return "$sshcmd $serverlogin ".::shell_quote_scalar("(rm -f $f; $rmdir)"); + $f = ::shell_quote_file($f); + my $sshcmd = $self->sshcommand(); + my $serverlogin = $self->serverlogin(); + return "$sshcmd $serverlogin ".::shell_quote_scalar("(rm -f $f; $rmdir)"); } +{ + my $rsync; + + sub rsync { + # rsync 3.1.x uses protocol 31 which is unsupported by 2.5.7. + # If the version >= 3.1.0: downgrade to protocol 30 + if(not $rsync) { + my @out = `rsync --version`; + for (@out) { + if(/version (\d+.\d+)(.\d+)?/) { + if($1 >= 3.1) { + # Version 3.1.0 or later: Downgrade to protocol 30 + $rsync = "rsync --protocol 30"; + } else { + $rsync = "rsync"; + } + } + } + $rsync or ::die_bug("Cannot figure out version of rsync: @out"); + } + return $rsync; + } +} + + package JobQueue; sub new { @@ -4352,9 +4379,20 @@ sub total_jobs { if(not defined $self->{'total_jobs'}) { my $job; my @queue; + my $start = time; + while($job = $self->get()) { + if(time - $start > 30) { + ::warning("Reading all arguments takes longer than 30 seconds.\n"); + $opt::eta && ::warning("Consider removing --eta.\n"); + $opt::bar && ::warning("Consider removing --bar.\n"); + last; + } + push @queue, $job; + } while($job = $self->get()) { push @queue, $job; } + $self->unget(@queue); $self->{'total_jobs'} = $#queue+1; } @@ -5042,8 +5080,8 @@ sub transfersize { } sub sshtransfer { - # Returns for each transfer file: - # rsync $file remote:$workdir + # Returns for each transfer file: + # rsync $file remote:$workdir my $self = shift; my @pre; my $sshlogin = $self->sshlogin(); @@ -5109,9 +5147,9 @@ sub sshreturn { my $basename = ::shell_quote_scalar(::shell_quote_file(basename($file))); # --return # mkdir -p /home/tange/dir/subdir/; - # rsync -rlDzR --rsync-path="cd /home/tange/dir/subdir/; rsync" + # rsync (--protocol 30) -rlDzR --rsync-path="cd /home/tange/dir/subdir/; rsync" # server:file.gz /home/tange/dir/subdir/ - $pre .= "mkdir -p $basedir$cd; rsync $rsync_cd $rsync_opt $serverlogin:". + $pre .= "mkdir -p $basedir$cd; ".$sshlogin->rsync()." $rsync_cd $rsync_opt $serverlogin:". $basename . " ".$basedir.$cd.";"; } return $pre; @@ -5365,8 +5403,9 @@ sub tmux_wrap { unlink $tmpfile; my $visual_command = $self->replaced(); my $title = ::undef_as_empty($self->{'commandline'}->replace_placeholders(["\257<\257>"],0,0)).""; - # ascii 194-224 annoys tmux - $title =~ s/[\011-\016;\302-\340]//g; + # ; causes problems + # ascii 194-245 annoys tmux + $title =~ s/[\011-\016;\302-\365]//g; my $tmux; if($Global::total_running == 0) { @@ -5378,11 +5417,12 @@ sub tmux_wrap { } return "mkfifo $tmpfile; $tmux ". # Run in tmux - ::shell_quote_scalar("(".$actual_command.');(echo $?$status;echo 255) >'.$tmpfile.";". - "echo ".::shell_quote_scalar($visual_command).";". - "echo \007Job finished at: `date`;sleep 10"). - # Run outside tmux - "; exit `perl -ne '1..1 and print' $tmpfile;rm $tmpfile` "; + ::shell_quote_scalar( + "(".$actual_command.');(echo $?$status;echo 255) >'.$tmpfile.";". + "echo ".::shell_quote_scalar($visual_command).";". + "echo \007Job finished at: `date`;sleep 10"). + # Run outside tmux + "; exit `perl -ne 'unlink $ARGV; 1..1 and print' $tmpfile;rm $tmpfile` "; } sub is_already_in_results { diff --git a/src/parallel.pod b/src/parallel.pod index 07baca15..692df53c 100644 --- a/src/parallel.pod +++ b/src/parallel.pod @@ -95,7 +95,7 @@ B to export and to set $SHELL to bash: exportf my_func SHELL=/bin/bash parallel "my_func {}" ::: 1 2 -The command cannot contain the character \257 (¯). +The command cannot contain the character \257 (macron: ¯). =item B<{}> (alpha testing) @@ -568,6 +568,26 @@ In Bash I can also be a Bash function - just remember to B. @@ -612,7 +632,7 @@ B uses GNU B to compute this, so you will get an infinite loop. This will likely be fixed in a later release. -=item B<--gnu> +=item B<--gnu> Behave like GNU B. If B<--tollef> and B<--gnu> are both set, B<--gnu> takes precedence. B<--tollef> is retired, but B<--gnu> is @@ -1094,7 +1114,7 @@ with 'y' or 'Y'. Implies B<-t>. Use to define start and end parenthesis for B<{= perl expression =}>. The left and the right parenthesis can be multiple characters and are -assumed to be the same length. The default is B<{==}> giving +assumed to be the same length. The default is B<{==}> giving B<{=> as the start parenthesis and B<=}> as the end parenthesis. Another useful setting is B<,,,,> which would make both parenthesis @@ -1855,7 +1875,7 @@ Compare these two: Arguments will be recycled if one input source has more arguments than the others: - parallel --xapply echo {1} {2} {3} ::: 1 2 ::: I II III ::: a b c d e f g + parallel --xapply echo {1} {2} {3} ::: 1 2 ::: I II III ::: a b c d e f g See also B<--header>. @@ -2459,7 +2479,7 @@ time and be warned if they are ever changed. To do that: (echo 'Host *'; echo StrictHostKeyChecking no) >> .ssh/config parallel --slf my_cluster --nonall true # Remove the disabling of StrictHostKeyChecking - mv .ssh/config.backup .ssh/config + mv .ssh/config.backup .ssh/config The servers in B<.parallel/my_cluster> are now added in B<.ssh/known_hosts>. diff --git a/src/parallel.texi b/src/parallel.texi deleted file mode 100644 index 2c224f66..00000000 --- a/src/parallel.texi +++ /dev/null @@ -1,4510 +0,0 @@ -\input texinfo -@setfilename parallel.info - -@documentencoding utf-8 - -@settitle parallel - build and execute shell command lines from standard input in parallel - -@node Top -@top parallel - -@menu -* NAME:: -* SYNOPSIS:: -* DESCRIPTION:: -* OPTIONS:: -* EXAMPLE@asis{:} Working as xargs -n1. Argument appending:: -* EXAMPLE@asis{:} Reading arguments from command line:: -* EXAMPLE@asis{:} Inserting multiple arguments:: -* EXAMPLE@asis{:} Context replace:: -* EXAMPLE@asis{:} Compute intensive jobs and substitution:: -* EXAMPLE@asis{:} Substitution and redirection:: -* EXAMPLE@asis{:} Composed commands:: -* EXAMPLE@asis{:} Calling Bash functions:: -* EXAMPLE@asis{:} Removing file extension when processing files:: -* EXAMPLE@asis{:} Removing two file extensions when processing files and calling GNU Parallel from itself:: -* EXAMPLE@asis{:} Download 10 images for each of the past 30 days:: -* EXAMPLE@asis{:} Digtal clock with "blinking" @asis{:}:: -* EXAMPLE@asis{:} Breadth first parallel web crawler/mirrorer:: -* EXAMPLE@asis{:} Process files from a tar file while unpacking:: -* EXAMPLE@asis{:} Rewriting a for-loop and a while-read-loop:: -* EXAMPLE@asis{:} Rewriting nested for-loops:: -* EXAMPLE@asis{:} Finding the lowest difference between files:: -* EXAMPLE@asis{:} for-loops with column names:: -* EXAMPLE@asis{:} Count the differences between all files in a dir:: -* EXAMPLE@asis{:} Speeding up fast jobs:: -* EXAMPLE@asis{:} Using shell variables:: -* EXAMPLE@asis{:} Group output lines:: -* EXAMPLE@asis{:} Tag output lines:: -* EXAMPLE@asis{:} Keep order of output same as order of input:: -* EXAMPLE@asis{:} Parallel grep:: -* EXAMPLE@asis{:} Grepping n lines for m regular expressions.:: -* EXAMPLE@asis{:} Using remote computers:: -* EXAMPLE@asis{:} Transferring of files:: -* EXAMPLE@asis{:} Distributing work to local and remote computers:: -* EXAMPLE@asis{:} Running the same command on remote computers:: -* EXAMPLE@asis{:} Parallelizing rsync:: -* EXAMPLE@asis{:} Use multiple inputs in one command:: -* EXAMPLE@asis{:} Use a table as input:: -* EXAMPLE@asis{:} Run the same command 10 times:: -* EXAMPLE@asis{:} Working as cat | sh. Resource inexpensive jobs and evaluation:: -* EXAMPLE@asis{:} Processing a big file using more cores:: -* EXAMPLE@asis{:} Running more than 500 jobs workaround:: -* EXAMPLE@asis{:} Working as mutex and counting semaphore:: -* EXAMPLE@asis{:} Start editor with filenames from stdin (standard input):: -* EXAMPLE@asis{:} Running sudo:: -* EXAMPLE@asis{:} GNU Parallel as queue system/batch manager:: -* EXAMPLE@asis{:} GNU Parallel as dir processor:: -* QUOTING:: -* LIST RUNNING JOBS:: -* COMPLETE RUNNING JOBS BUT DO NOT START NEW JOBS:: -* ENVIRONMENT VARIABLES:: -* DEFAULT PROFILE (CONFIG FILE):: -* PROFILE FILES:: -* EXIT STATUS:: -* DIFFERENCES BETWEEN GNU Parallel AND ALTERNATIVES:: -* BUGS:: -* REPORTING BUGS:: -* AUTHOR:: -* LICENSE:: -* DEPENDENCIES:: -* SEE ALSO:: -@end menu - -@node NAME -@chapter NAME - -parallel - build and execute shell command lines from standard input in parallel - -@node SYNOPSIS -@chapter SYNOPSIS - -@strong{parallel} [options] [@emph{command} [arguments]] < list_of_arguments - -@strong{parallel} [options] [@emph{command} [arguments]] ( @strong{:::} arguments | -@strong{::::} argfile(s) ) ... - -@strong{parallel} --semaphore [options] @emph{command} - -@strong{#!/usr/bin/parallel} --shebang [options] [@emph{command} [arguments]] - -@node DESCRIPTION -@chapter DESCRIPTION - -GNU @strong{parallel} is a shell tool for executing jobs in parallel using -one or more computers. A job can be a single command or a small -script that has to be run for each of the lines in the input. The -typical input is a list of files, a list of hosts, a list of users, a -list of URLs, or a list of tables. A job can also be a command that -reads from a pipe. GNU @strong{parallel} can then split the input into -blocks and pipe a block into each command in parallel. - -If you use xargs and tee today you will find GNU @strong{parallel} very easy to -use as GNU @strong{parallel} is written to have the same options as xargs. If -you write loops in shell, you will find GNU @strong{parallel} may be able to -replace most of the loops and make them run faster by running several -jobs in parallel. - -GNU @strong{parallel} makes sure output from the commands is the same output as -you would get had you run the commands sequentially. This makes it -possible to use output from GNU @strong{parallel} as input for other programs. - -For each line of input GNU @strong{parallel} will execute @emph{command} with -the line as arguments. If no @emph{command} is given, the line of input is -executed. Several lines will be run in parallel. GNU @strong{parallel} can -often be used as a substitute for @strong{xargs} or @strong{cat | bash}. - -@menu -* Reader's guide:: -@end menu - -@node Reader's guide -@section Reader's guide - -Start by watching the intro videos for a quick introduction: -http://www.youtube.com/playlist?list=PL284C9FF2488BC6D1 - -Then look at the @strong{EXAMPLE}s after the list of @strong{OPTIONS}. That will -give you an idea of what GNU @strong{parallel} is capable of. - -Then spend an hour walking through the tutorial (@strong{man -parallel_tutorial}). Your command line will love you for it. - -Finally you may want to look at the rest of this manual if you have -special needs not already covered. - -@node OPTIONS -@chapter OPTIONS - -@table @asis -@item @emph{command} -@anchor{@emph{command}} - -Command to execute. If @emph{command} or the following arguments contain -replacement strings (such as @strong{@{@}}) every instance will be substituted -with the input. - -If @emph{command} is given, GNU @strong{parallel} solve the same tasks as -@strong{xargs}. If @emph{command} is not given GNU @strong{parallel} will behave -similar to @strong{cat | sh}. - -The @emph{command} must be an executable, a script, a composed command, or -a function. - -If it is a Bash function you need to @strong{export -f} the -function first. An alias will, however, not work (see why -http://www.perlmonks.org/index.pl?node_id=484296). - -If it is a zsh function you will need to use this helper function -@strong{exportf} to export and to set $SHELL to bash: - -@verbatim - function exportf (){ - export $(echo $1)="`whence -f $1 | sed -e "s/$1 //" `" - } - - function my_func(){ - echo $1; - echo "hello"; - } - - exportf my_func - SHELL=/bin/bash parallel "my_func {}" ::: 1 2 -@end verbatim - -The command cannot contain the character \257 (¯). - -@item @strong{@{@}} (alpha testing) -@anchor{@strong{@{@}} (alpha testing)} - -Input line. This replacement string will be replaced by a full line -read from the input source. The input source is normally stdin -(standard input), but can also be given with @strong{-a}, @strong{:::}, or -@strong{::::}. - -The replacement string @strong{@{@}} can be changed with @strong{-I}. - -If the command line contains no replacement strings then @strong{@{@}} will be -appended to the command line. - -@item @strong{@{.@}} (alpha testing) -@anchor{@strong{@{.@}} (alpha testing)} - -Input line without extension. This replacement string will be replaced -by the input with the extension removed. If the input line contains -@strong{.} after the last @strong{/} the last @strong{.} till the end of the string will -be removed and @strong{@{.@}} will be replaced with the -remaining. E.g. @emph{foo.jpg} becomes @emph{foo}, @emph{subdir/foo.jpg} becomes -@emph{subdir/foo}, @emph{sub.dir/foo.jpg} becomes @emph{sub.dir/foo}, -@emph{sub.dir/bar} remains @emph{sub.dir/bar}. If the input line does not -contain @strong{.} it will remain unchanged. - -The replacement string @strong{@{.@}} can be changed with @strong{--er}. - -To understand replacement strings see @strong{@{@}}. - -@item @strong{@{/@}} (alpha testing) -@anchor{@strong{@{/@}} (alpha testing)} - -Basename of input line. This replacement string will be replaced by -the input with the directory part removed. - -The replacement string @strong{@{/@}} can be changed with -@strong{--basenamereplace}. - -To understand replacement strings see @strong{@{@}}. - -@item @strong{@{//@}} (alpha testing) -@anchor{@strong{@{//@}} (alpha testing)} - -Dirname of input line. This replacement string will be replaced by the -dir of the input line. See @strong{dirname}(1). - -The replacement string @strong{@{//@}} can be changed with -@strong{--dirnamereplace}. - -To understand replacement strings see @strong{@{@}}. - -@item @strong{@{/.@}} (alpha testing) -@anchor{@strong{@{/.@}} (alpha testing)} - -Basename of input line without extension. This replacement string will -be replaced by the input with the directory and extension part -removed. It is a combination of @strong{@{/@}} and @strong{@{.@}}. - -The replacement string @strong{@{/.@}} can be changed with -@strong{--basenameextensionreplace}. - -To understand replacement strings see @strong{@{@}}. - -@item @strong{@{#@}} (alpha testing) -@anchor{@strong{@{#@}} (alpha testing)} - -Sequence number of the job to run. This replacement string will be -replaced by the sequence number of the job being run. It contains the -same number as $PARALLEL_SEQ. - -The replacement string @strong{@{#@}} can be changed with @strong{--seqreplace}. - -To understand replacement strings see @strong{@{@}}. - -@item @strong{@{%@}} (alpha testing) -@anchor{@strong{@{%@}} (alpha testing)} - -Job slot number. This replacement string will be replaced by the job's -slot number between 1 and number of jobs to run in parallel. There -will never be 2 jobs running at the same time with the same job slot -number. - -The replacement string @strong{@{%@}} can be changed with @strong{--slotreplace}. - -To understand replacement strings see @strong{@{@}}. - -@item @strong{@{}@emph{n}@strong{@}} -@anchor{@strong{@{}@emph{n}@strong{@}}} - -Argument from input source @emph{n} or the @emph{n}'th argument. This -positional replacement string will be replaced by the input from input -source @emph{n} (when used with @strong{-a} or @strong{::::}) or with the @emph{n}'th -argument (when used with @strong{-N}). If @emph{n} is negative it refers to the -@emph{n}'th last argument. - -To understand replacement strings see @strong{@{@}}. - -@item @strong{@{}@emph{n}.@strong{@}} -@anchor{@strong{@{}@emph{n}.@strong{@}}} - -Argument from input source @emph{n} or the @emph{n}'th argument without -extension. It is a combination of @strong{@{}@emph{n}@strong{@}} and @strong{@{.@}}. - -This positional replacement string will be replaced by the input from -input source @emph{n} (when used with @strong{-a} or @strong{::::}) or with the -@emph{n}'th argument (when used with @strong{-N}). The input will have the -extension removed. - -To understand positional replacement strings see @strong{@{}@emph{n}@strong{@}}. - -@item @strong{@{}@emph{n}/@strong{@}} -@anchor{@strong{@{}@emph{n}/@strong{@}}} - -Basename of argument from input source @emph{n} or the @emph{n}'th argument. -It is a combination of @strong{@{}@emph{n}@strong{@}} and @strong{@{/@}}. - -This positional replacement string will be replaced by the input from -input source @emph{n} (when used with @strong{-a} or @strong{::::}) or with the -@emph{n}'th argument (when used with @strong{-N}). The input will have the -directory (if any) removed. - -To understand positional replacement strings see @strong{@{}@emph{n}@strong{@}}. - -@item @strong{@{}@emph{n}//@strong{@}} -@anchor{@strong{@{}@emph{n}//@strong{@}}} - -Dirname of argument from input source @emph{n} or the @emph{n}'th argument. -It is a combination of @strong{@{}@emph{n}@strong{@}} and @strong{@{//@}}. - -This positional replacement string will be replaced by the dir of the -input from input source @emph{n} (when used with @strong{-a} or @strong{::::}) or with -the @emph{n}'th argument (when used with @strong{-N}). See @strong{dirname}(1). - -To understand positional replacement strings see @strong{@{}@emph{n}@strong{@}}. - -@item @strong{@{}@emph{n}/.@strong{@}} -@anchor{@strong{@{}@emph{n}/.@strong{@}}} - -Basename of argument from input source @emph{n} or the @emph{n}'th argument -without extension. It is a combination of @strong{@{}@emph{n}@strong{@}}, @strong{@{/@}}, and -@strong{@{.@}}. - -This positional replacement string will be replaced by the input from -input source @emph{n} (when used with @strong{-a} or @strong{::::}) or with the -@emph{n}'th argument (when used with @strong{-N}). The input will have the -directory (if any) and extension removed. - -To understand positional replacement strings see @strong{@{}@emph{n}@strong{@}}. - -@item @strong{@{=}@emph{perl expression}@strong{=@}} (alpha testing) -@anchor{@strong{@{=}@emph{perl expression}@strong{=@}} (alpha testing)} - -Replace with calculated @emph{perl expression}. @strong{$_} will contain the -same as @strong{@{@}}. After evaluating @emph{perl expression} @strong{$_} will be used -as the value. It is recommended to only change $_ but you have full -access to all of GNU @strong{parallel}'s internal functions and data -structures. - -The @strong{@{=}@emph{perl expression}@strong{=@}} must be given as a single string. - -See also: @strong{--rpl} @strong{--parens} - -@item @strong{@{=}@emph{n} @emph{perl expression}@strong{=@}} (alpha testing) -@anchor{@strong{@{=}@emph{n} @emph{perl expression}@strong{=@}} (alpha testing)} - -Positional equivalent to @strong{@{= perl expression =@}}. To understand -positional replacement strings see @strong{@{}@emph{n}@strong{@}}. - -See also: @strong{@{= perl expression =@}} @strong{@{}@emph{n}@strong{@}}. - -@item @strong{:::} @emph{arguments} -@anchor{@strong{:::} @emph{arguments}} - -Use arguments from the command line as input source instead of stdin -(standard input). Unlike other options for GNU @strong{parallel} @strong{:::} is -placed after the @emph{command} and before the arguments. - -The following are equivalent: - -@verbatim - (echo file1; echo file2) | parallel gzip - parallel gzip ::: file1 file2 - parallel gzip {} ::: file1 file2 - parallel --arg-sep ,, gzip {} ,, file1 file2 - parallel --arg-sep ,, gzip ,, file1 file2 - parallel ::: "gzip file1" "gzip file2" -@end verbatim - -To avoid treating @strong{:::} as special use @strong{--arg-sep} to set the -argument separator to something else. See also @strong{--arg-sep}. - -stdin (standard input) will be passed to the first process run. - -If multiple @strong{:::} are given, each group will be treated as an input -source, and all combinations of input sources will be -generated. E.g. ::: 1 2 ::: a b c will result in the combinations -(1,a) (1,b) (1,c) (2,a) (2,b) (2,c). This is useful for replacing -nested for-loops. - -@strong{:::} and @strong{::::} can be mixed. So these are equivalent: - -@verbatim - parallel echo {1} {2} {3} ::: 6 7 ::: 4 5 ::: 1 2 3 - parallel echo {1} {2} {3} :::: <(seq 6 7) <(seq 4 5) :::: <(seq 1 3) - parallel -a <(seq 6 7) echo {1} {2} {3} :::: <(seq 4 5) :::: <(seq 1 3) - parallel -a <(seq 6 7) -a <(seq 4 5) echo {1} {2} {3} ::: 1 2 3 - seq 6 7 | parallel -a - -a <(seq 4 5) echo {1} {2} {3} ::: 1 2 3 - seq 4 5 | parallel echo {1} {2} {3} :::: <(seq 6 7) - ::: 1 2 3 -@end verbatim - -@item @strong{::::} @emph{argfiles} -@anchor{@strong{::::} @emph{argfiles}} - -Another way to write @strong{-a} @emph{argfile1} @strong{-a} @emph{argfile2} ... - -@strong{:::} and @strong{::::} can be mixed. - -See @strong{-a}, @strong{:::} and @strong{--xapply}. - -@item @strong{--null} -@anchor{@strong{--null}} - -@item @strong{-0} -@anchor{@strong{-0}} - -Use NUL as delimiter. Normally input lines will end in \n -(newline). If they end in \0 (NUL), then use this option. It is useful -for processing arguments that may contain \n (newline). - -@item @strong{--arg-file} @emph{input-file} -@anchor{@strong{--arg-file} @emph{input-file}} - -@item @strong{-a} @emph{input-file} -@anchor{@strong{-a} @emph{input-file}} - -Use @emph{input-file} as input source. If you use this option, stdin -(standard input) is given to the first process run. Otherwise, stdin -(standard input) is redirected from /dev/null. - -If multiple @strong{-a} are given, each @emph{input-file} will be treated as an -input source, and all combinations of input sources will be -generated. E.g. The file @strong{foo} contains @strong{1 2}, the file @strong{bar} -contains @strong{a b c}. @strong{-a foo} @strong{-a bar} will result in the combinations -(1,a) (1,b) (1,c) (2,a) (2,b) (2,c). This is useful for replacing -nested for-loops. - -See also @strong{--xapply} and @strong{@{}@emph{n}@strong{@}}. - -@item @strong{--arg-file-sep} @emph{sep-str} -@anchor{@strong{--arg-file-sep} @emph{sep-str}} - -Use @emph{sep-str} instead of @strong{::::} as separator string between command -and argument files. Useful if @strong{::::} is used for something else by the -command. - -See also: @strong{::::}. - -@item @strong{--arg-sep} @emph{sep-str} -@anchor{@strong{--arg-sep} @emph{sep-str}} - -Use @emph{sep-str} instead of @strong{:::} as separator string. Useful if @strong{:::} -is used for something else by the command. - -Also useful if you command uses @strong{:::} but you still want to read -arguments from stdin (standard input): Simply change @strong{--arg-sep} to a -string that is not in the command line. - -See also: @strong{:::}. - -@item @strong{--bar} -@anchor{@strong{--bar}} - -Show progress as a progress bar. In the bar is shown: % of jobs -completed, estimated seconds left, and number of jobs started. - -It is compatible with @strong{zenity}: - -seq 1000 | parallel -j30 --bar '(echo @{@};sleep 0.1)' 2> >(zenity --progress --auto-kill) | wc - -@item @strong{--basefile} @emph{file} -@anchor{@strong{--basefile} @emph{file}} - -@item @strong{--bf} @emph{file} -@anchor{@strong{--bf} @emph{file}} - -@emph{file} will be transferred to each sshlogin before a jobs is -started. It will be removed if @strong{--cleanup} is active. The file may be -a script to run or some common base data needed for the jobs. -Multiple @strong{--bf} can be specified to transfer more basefiles. The -@emph{file} will be transferred the same way as @strong{--transfer}. - -@item @strong{--basenamereplace} @emph{replace-str} -@anchor{@strong{--basenamereplace} @emph{replace-str}} - -@item @strong{--bnr} @emph{replace-str} -@anchor{@strong{--bnr} @emph{replace-str}} - -Use the replacement string @emph{replace-str} instead of @strong{@{/@}} for -basename of input line. - -@item @strong{--basenameextensionreplace} @emph{replace-str} -@anchor{@strong{--basenameextensionreplace} @emph{replace-str}} - -@item @strong{--bner} @emph{replace-str} -@anchor{@strong{--bner} @emph{replace-str}} - -Use the replacement string @emph{replace-str} instead of @strong{@{/.@}} for basename of input line without extension. - -@item @strong{--bg} -@anchor{@strong{--bg}} - -Run command in background thus GNU @strong{parallel} will not wait for -completion of the command before exiting. This is the default if -@strong{--semaphore} is set. - -See also: @strong{--fg}, @strong{man sem}. - -Implies @strong{--semaphore}. - -@item @strong{--bibtex} -@anchor{@strong{--bibtex}} - -Print the BibTeX entry for GNU @strong{parallel} and disable citation -notice. - -@item @strong{--block} @emph{size} -@anchor{@strong{--block} @emph{size}} - -@item @strong{--block-size} @emph{size} -@anchor{@strong{--block-size} @emph{size}} - -Size of block in bytes. The size can be postfixed with K, M, G, T, P, -k, m, g, t, or p which would multiply the size with 1024, 1048576, -1073741824, 1099511627776, 1125899906842624, 1000, 1000000, -1000000000, 1000000000000, or 1000000000000000 respectively. - -GNU @strong{parallel} tries to meet the block size but can be off by the -length of one record. For performance reasons @emph{size} should be bigger -than a single record. - -@emph{size} defaults to 1M. - -See @strong{--pipe} for use of this. - -@item @strong{--cat} -@anchor{@strong{--cat}} - -Create a temporary file with content. Normally @strong{--pipe} will give -data to the program on stdin (standard input). With @strong{--cat} GNU -@strong{parallel} will create a temporary file with the name in @{@}, so you -can do: @strong{parallel --pipe --cat wc @{@}}. - -See also @strong{--fifo}. - -@item @strong{--cleanup} -@anchor{@strong{--cleanup}} - -Remove transferred files. @strong{--cleanup} will remove the transferred files -on the remote computer after processing is done. - -@verbatim - find log -name '*gz' | parallel \ - --sshlogin server.example.com --transfer --return {.}.bz2 \ - --cleanup "zcat {} | bzip -9 >{.}.bz2" -@end verbatim - -With @strong{--transfer} the file transferred to the remote computer will be -removed on the remote computer. Directories created will not be removed -- even if they are empty. - -With @strong{--return} the file transferred from the remote computer will be -removed on the remote computer. Directories created will not be removed -- even if they are empty. - -@strong{--cleanup} is ignored when not used with @strong{--transfer} or @strong{--return}. - -@item @strong{--colsep} @emph{regexp} -@anchor{@strong{--colsep} @emph{regexp}} - -@item @strong{-C} @emph{regexp} -@anchor{@strong{-C} @emph{regexp}} - -Column separator. The input will be treated as a table with @emph{regexp} -separating the columns. The n'th column can be access using -@strong{@{}@emph{n}@strong{@}} or @strong{@{}@emph{n}.@strong{@}}. E.g. @strong{@{3@}} is the 3rd column. - -@strong{--colsep} implies @strong{--trim rl}. - -@emph{regexp} is a Perl Regular Expression: -http://perldoc.perl.org/perlre.html - -@item @strong{--compress} -@anchor{@strong{--compress}} - -Compress temporary files. If the output is big and very compressible -this will take up less disk space in $TMPDIR and possibly be faster due to less -disk I/O. - -GNU @strong{parallel} will try @strong{lzop}, @strong{pigz}, @strong{gzip}, @strong{pbzip2}, -@strong{plzip}, @strong{bzip2}, @strong{lzma}, @strong{lzip}, @strong{xz} in that order, and use the -first available. - -@item @strong{--compress-program} @emph{prg} -@anchor{@strong{--compress-program} @emph{prg}} - -@item @strong{--decompress-program} @emph{prg} -@anchor{@strong{--decompress-program} @emph{prg}} - -Use @emph{prg} for (de)compressing temporary files. It is assumed that @emph{prg --dc} will decompress stdin (standard input) to stdout (standard -output) unless @strong{--decompress-program} is given. - -@item @strong{--ctrlc} -@anchor{@strong{--ctrlc}} - -Sends SIGINT to tasks running on remote computers thus killing them. - -@item @strong{--delimiter} @emph{delim} -@anchor{@strong{--delimiter} @emph{delim}} - -@item @strong{-d} @emph{delim} -@anchor{@strong{-d} @emph{delim}} - -Input items are terminated by the specified character. Quotes and -backslash are not special; every character in the input is taken -literally. Disables the end-of-file string, which is treated like any -other argument. This can be used when the input consists of simply -newline-separated items, although it is almost always better to design -your program to use --null where this is possible. The specified -delimiter may be a single character, a C-style character escape such -as \n, or an octal or hexadecimal escape code. Octal and -hexadecimal escape codes are understood as for the printf command. -Multibyte characters are not supported. - -@item @strong{--dirnamereplace} @emph{replace-str} -@anchor{@strong{--dirnamereplace} @emph{replace-str}} - -@item @strong{--dnr} @emph{replace-str} -@anchor{@strong{--dnr} @emph{replace-str}} - -Use the replacement string @emph{replace-str} instead of @strong{@{//@}} for -dirname of input line. - -@item @strong{-E} @emph{eof-str} -@anchor{@strong{-E} @emph{eof-str}} - -Set the end of file string to eof-str. If the end of file string -occurs as a line of input, the rest of the input is ignored. If -neither @strong{-E} nor @strong{-e} is used, no end of file string is used. - -@item @strong{--delay} @emph{secs} -@anchor{@strong{--delay} @emph{secs}} - -Delay starting next job @emph{secs} seconds. GNU @strong{parallel} will pause -@emph{secs} seconds after starting each job. @emph{secs} can be less than 1 -seconds. - -@item @strong{--dry-run} -@anchor{@strong{--dry-run}} - -Print the job to run on stdout (standard output), but do not run the -job. Use @strong{-v -v} to include the ssh/rsync wrapping if the job would -be run on a remote computer. Do not count on this literaly, though, as -the job may be scheduled on another computer or the local computer if -: is in the list. - -@item @strong{--eof}[=@emph{eof-str}] -@anchor{@strong{--eof}[=@emph{eof-str}]} - -@item @strong{-e}[@emph{eof-str}] -@anchor{@strong{-e}[@emph{eof-str}]} - -This option is a synonym for the @strong{-E} option. Use @strong{-E} instead, -because it is POSIX compliant for @strong{xargs} while this option is not. -If @emph{eof-str} is omitted, there is no end of file string. If neither -@strong{-E} nor @strong{-e} is used, no end of file string is used. - -@item @strong{--env} @emph{var} -@anchor{@strong{--env} @emph{var}} - -Copy environment variable @emph{var}. This will copy @emph{var} to the -environment that the command is run in. This is especially useful for -remote execution. - -In Bash @emph{var} can also be a Bash function - just remember to @strong{export --f} the function, see @strong{command}. - -The variable '_' is special. It will copy all enviroment variables -except for the ones mentioned in ~/.parallel/ignored_vars. - -See also: @strong{--record-env}. - -@item @strong{--eta} -@anchor{@strong{--eta}} - -Show the estimated number of seconds before finishing. This forces GNU -@strong{parallel} to read all jobs before starting to find the number of -jobs. GNU @strong{parallel} normally only reads the next job to run. -Implies @strong{--progress}. - -@item @strong{--fg} -@anchor{@strong{--fg}} - -Run command in foreground thus GNU @strong{parallel} will wait for -completion of the command before exiting. - -See also @strong{--bg}, @strong{man sem}. - -Implies @strong{--semaphore}. - -@item @strong{--fifo} -@anchor{@strong{--fifo}} - -Create a temporary fifo with content. Normally @strong{--pipe} will give -data to the program on stdin (standard input). With @strong{--fifo} GNU -@strong{parallel} will create a temporary fifo with the name in @{@}, so you -can do: @strong{parallel --pipe --fifo wc @{@}}. - -Beware: If data is not read from the fifo, the job will block forever. - -See also @strong{--cat}. - -@item @strong{--filter-hosts} -@anchor{@strong{--filter-hosts}} - -Remove down hosts. For each remote host: check that login through ssh -works. If not: do not use this host. - -Currently you can @emph{not} put @strong{--filter-hosts} in a profile, -$PARALLEL, /etc/parallel/config or similar. This is because GNU -@strong{parallel} uses GNU @strong{parallel} to compute this, so you will get an -infinite loop. This will likely be fixed in a later release. - -@item @strong{--gnu} -@anchor{@strong{--gnu}} - -Behave like GNU @strong{parallel}. If @strong{--tollef} and @strong{--gnu} are both set, -@strong{--gnu} takes precedence. @strong{--tollef} is retired, but @strong{--gnu} is -kept for compatibility. - -@item @strong{--group} -@anchor{@strong{--group}} - -Group output. Output from each jobs is grouped together and is only -printed when the command is finished. stderr (standard error) first -followed by stdout (standard output). This takes some CPU time. In -rare situations GNU @strong{parallel} takes up lots of CPU time and if it is -acceptable that the outputs from different commands are mixed -together, then disabling grouping with @strong{-u} can speedup GNU -@strong{parallel} by a factor of 10. - -@strong{--group} is the default. Can be reversed with @strong{-u}. - -See also: @strong{--line-buffer} @strong{--ungroup} - -@item @strong{--help} -@anchor{@strong{--help}} - -@item @strong{-h} -@anchor{@strong{-h}} - -Print a summary of the options to GNU @strong{parallel} and exit. - -@item @strong{--halt-on-error} @emph{val} (alpha testing) -@anchor{@strong{--halt-on-error} @emph{val} (alpha testing)} - -@item @strong{--halt} @emph{val} (alpha testing) -@anchor{@strong{--halt} @emph{val} (alpha testing)} - -How should GNU @strong{parallel} terminate if one of more jobs fail? - -@table @asis -@item 0 -@anchor{0} - -Do not halt if a job fails. Exit status will be the number of jobs -failed. This is the default. - -@item 1 -@anchor{1} - -Do not start new jobs if a job fails, but complete the running jobs -including cleanup. The exit status will be the exit status from the -last failing job. - -@item 2 -@anchor{2} - -Kill off all jobs immediately and exit without cleanup. The exit -status will be the exit status from the failing job. - -@item 1-99% -@anchor{1-99%} - -If @emph{val}% of the jobs fail and minimum 3: Do not start new jobs, but -complete the running jobs including cleanup. The exit status will be -the exit status from the last failing job. - -@end table - -@item @strong{--header} @emph{regexp} -@anchor{@strong{--header} @emph{regexp}} - -Use regexp as header. For normal usage the matched header (typically -the first line: @strong{--header '.*\n'}) will be split using @strong{--colsep} -(which will default to '\t') and column names can be used as -replacement variables: @strong{@{column name@}}. - -For @strong{--pipe} the matched header will be prepended to each output. - -@strong{--header :} is an alias for @strong{--header '.*\n'}. - -If @emph{regexp} is a number, it will match that many lines. - -@item @strong{-I} @emph{replace-str} -@anchor{@strong{-I} @emph{replace-str}} - -Use the replacement string @emph{replace-str} instead of @{@}. - -@item @strong{--replace}[=@emph{replace-str}] -@anchor{@strong{--replace}[=@emph{replace-str}]} - -@item @strong{-i}[@emph{replace-str}] -@anchor{@strong{-i}[@emph{replace-str}]} - -This option is a synonym for @strong{-I}@emph{replace-str} if @emph{replace-str} is -specified, and for @strong{-I}@{@} otherwise. This option is deprecated; -use @strong{-I} instead. - -@item @strong{--joblog} @emph{logfile} -@anchor{@strong{--joblog} @emph{logfile}} - -Logfile for executed jobs. Save a list of the executed jobs to -@emph{logfile} in the following TAB separated format: sequence number, -sshlogin, start time as seconds since epoch, run time in seconds, -bytes in files transferred, bytes in files returned, exit status, -signal, and command run. - -To convert the times into ISO-8601 strict do: - -@strong{perl -a -F"\t" -ne 'chomp($F[2]=`date -d \@@$F[2] +%FT%T`); print join("\t",@@F)'} - -See also @strong{--resume}. - -@item @strong{--jobs} @emph{N} -@anchor{@strong{--jobs} @emph{N}} - -@item @strong{-j} @emph{N} -@anchor{@strong{-j} @emph{N}} - -@item @strong{--max-procs} @emph{N} -@anchor{@strong{--max-procs} @emph{N}} - -@item @strong{-P} @emph{N} -@anchor{@strong{-P} @emph{N}} - -Number of jobslots. Run up to N jobs in parallel. 0 means as many as -possible. Default is 100% which will run one job per CPU core. - -If @strong{--semaphore} is set default is 1 thus making a mutex. - -@item @strong{--jobs} @emph{+N} -@anchor{@strong{--jobs} @emph{+N}} - -@item @strong{-j} @emph{+N} -@anchor{@strong{-j} @emph{+N}} - -@item @strong{--max-procs} @emph{+N} -@anchor{@strong{--max-procs} @emph{+N}} - -@item @strong{-P} @emph{+N} -@anchor{@strong{-P} @emph{+N}} - -Add N to the number of CPU cores. Run this many jobs in parallel. -See also @strong{--use-cpus-instead-of-cores}. - -@item @strong{--jobs} @emph{-N} -@anchor{@strong{--jobs} @emph{-N}} - -@item @strong{-j} @emph{-N} -@anchor{@strong{-j} @emph{-N}} - -@item @strong{--max-procs} @emph{-N} -@anchor{@strong{--max-procs} @emph{-N}} - -@item @strong{-P} @emph{-N} -@anchor{@strong{-P} @emph{-N}} - -Subtract N from the number of CPU cores. Run this many jobs in parallel. -If the evaluated number is less than 1 then 1 will be used. See also -@strong{--use-cpus-instead-of-cores}. - -@item @strong{--jobs} @emph{N}% -@anchor{@strong{--jobs} @emph{N}%} - -@item @strong{-j} @emph{N}% -@anchor{@strong{-j} @emph{N}%} - -@item @strong{--max-procs} @emph{N}% -@anchor{@strong{--max-procs} @emph{N}%} - -@item @strong{-P} @emph{N}% -@anchor{@strong{-P} @emph{N}%} - -Multiply N% with the number of CPU cores. Run this many jobs in parallel. -If the evaluated number is less than 1 then 1 will be used. See also -@strong{--use-cpus-instead-of-cores}. - -@item @strong{--jobs} @emph{procfile} -@anchor{@strong{--jobs} @emph{procfile}} - -@item @strong{-j} @emph{procfile} -@anchor{@strong{-j} @emph{procfile}} - -@item @strong{--max-procs} @emph{procfile} -@anchor{@strong{--max-procs} @emph{procfile}} - -@item @strong{-P} @emph{procfile} -@anchor{@strong{-P} @emph{procfile}} - -Read parameter from file. Use the content of @emph{procfile} as parameter -for @emph{-j}. E.g. @emph{procfile} could contain the string 100% or +2 or -10. If @emph{procfile} is changed when a job completes, @emph{procfile} is -read again and the new number of jobs is computed. If the number is -lower than before, running jobs will be allowed to finish but new jobs -will not be started until the wanted number of jobs has been reached. -This makes it possible to change the number of simultaneous running -jobs while GNU @strong{parallel} is running. - -@item @strong{--keep-order} -@anchor{@strong{--keep-order}} - -@item @strong{-k} -@anchor{@strong{-k}} - -Keep sequence of output same as the order of input. Normally the -output of a job will be printed as soon as the job completes. Try this -to see the difference: - -@verbatim - parallel -j4 sleep {}\; echo {} ::: 2 1 4 3 - parallel -j4 -k sleep {}\; echo {} ::: 2 1 4 3 -@end verbatim - -If used with @strong{--onall} or @strong{--nonall} the output will grouped by -sshlogin in sorted order. - -@item @strong{-L} @emph{max-lines} -@anchor{@strong{-L} @emph{max-lines}} - -When used with @strong{--pipe}: Read records of @emph{max-lines}. - -When used otherwise: Use at most @emph{max-lines} nonblank input lines per -command line. Trailing blanks cause an input line to be logically -continued on the next input line. - -@strong{-L 0} means read one line, but insert 0 arguments on the command -line. - -Implies @strong{-X} unless @strong{-m}, @strong{--xargs}, or @strong{--pipe} is set. - -@item @strong{--max-lines}[=@emph{max-lines}] -@anchor{@strong{--max-lines}[=@emph{max-lines}]} - -@item @strong{-l}[@emph{max-lines}] -@anchor{@strong{-l}[@emph{max-lines}]} - -When used with @strong{--pipe}: Read records of @emph{max-lines}. - -When used otherwise: Synonym for the @strong{-L} option. Unlike @strong{-L}, the -@emph{max-lines} argument is optional. If @emph{max-lines} is not specified, -it defaults to one. The @strong{-l} option is deprecated since the POSIX -standard specifies @strong{-L} instead. - -@strong{-l 0} is an alias for @strong{-l 1}. - -Implies @strong{-X} unless @strong{-m}, @strong{--xargs}, or @strong{--pipe} is set. - -@item @strong{--line-buffer} -@anchor{@strong{--line-buffer}} - -Buffer output on line basis. @strong{--group} will keep the output together -for a whole job. @strong{--ungroup} allows output to mixup with half a line -coming from one job and half a line coming from another -job. @strong{--line-buffer} fits between these two: GNU @strong{parallel} will -print a full line, but will allow for mixing lines of different jobs. - -@strong{--line-buffer} takes more CPU power than than both @strong{--group} and -@strong{--ungroup}, but can be faster than @strong{--group} if the CPU is not the -limiting factor. - -See also: @strong{--group} @strong{--ungroup} - -@item @strong{--load} @emph{max-load} -@anchor{@strong{--load} @emph{max-load}} - -Do not start new jobs on a given computer unless the number of running -processes on the computer is less than @emph{max-load}. @emph{max-load} uses -the same syntax as @strong{--jobs}, so @emph{100%} for one per CPU is a valid -setting. Only difference is 0 which is interpreted as 0.01. - -@item @strong{--controlmaster} -@anchor{@strong{--controlmaster}} - -@item @strong{-M} -@anchor{@strong{-M}} - -Use ssh's ControlMaster to make ssh connections faster. Useful if jobs -run remote and are very fast to run. This is disabled for sshlogins -that specify their own ssh command. - -@item @strong{--xargs} -@anchor{@strong{--xargs}} - -Multiple arguments. Insert as many arguments as the command line -length permits. - -If @strong{@{@}} is not used the arguments will be appended to the -line. If @strong{@{@}} is used multiple times each @strong{@{@}} will be replaced -with all the arguments. - -Support for @strong{--xargs} with @strong{--sshlogin} is limited and may fail. - -See also @strong{-X} for context replace. If in doubt use @strong{-X} as that will -most likely do what is needed. - -@item @strong{-m} -@anchor{@strong{-m}} - -Multiple arguments. Insert as many arguments as the command line -length permits. If multiple jobs are being run in parallel: distribute -the arguments evenly among the jobs. Use @strong{-j1} to avoid this. - -If @strong{@{@}} is not used the arguments will be appended to the -line. If @strong{@{@}} is used multiple times each @strong{@{@}} will be replaced -with all the arguments. - -Support for @strong{-m} with @strong{--sshlogin} is limited and may fail. - -See also @strong{-X} for context replace. If in doubt use @strong{-X} as that will -most likely do what is needed. - -@item @strong{--minversion} @emph{version} -@anchor{@strong{--minversion} @emph{version}} - -Print the version GNU @strong{parallel} and exit. If the current version of -GNU @strong{parallel} is less than @emph{version} the exit code is -255. Otherwise it is 0. - -This is useful for scripts that depend on features only available from -a certain version of GNU @strong{parallel}. - -@item @strong{--nonall} -@anchor{@strong{--nonall}} - -@strong{--onall} with no arguments. Run the command on all computers given -with @strong{--sshlogin} but take no arguments. GNU @strong{parallel} will log -into @strong{--jobs} number of computers in parallel and run the job on the -computer. @strong{-j} adjusts how many computers to log into in parallel. - -This is useful for running the same command (e.g. uptime) on a list of -servers. - -@item @strong{--onall} -@anchor{@strong{--onall}} - -Run all the jobs on all computers given with @strong{--sshlogin}. GNU -@strong{parallel} will log into @strong{--jobs} number of computers in parallel -and run one job at a time on the computer. The order of the jobs will -not be changed, but some computers may finish before others. @strong{-j} -adjusts how many computers to log into in parallel. - -When using @strong{--group} the output will be grouped by each server, so -all the output from one server will be grouped together. - -@item @strong{--output-as-files} -@anchor{@strong{--output-as-files}} - -@item @strong{--outputasfiles} -@anchor{@strong{--outputasfiles}} - -@item @strong{--files} -@anchor{@strong{--files}} - -Instead of printing the output to stdout (standard output) the output -of each job is saved in a file and the filename is then printed. - -@item @strong{--pipe} -@anchor{@strong{--pipe}} - -@item @strong{--spreadstdin} -@anchor{@strong{--spreadstdin}} - -Spread input to jobs on stdin (standard input). Read a block of data -from stdin (standard input) and give one block of data as input to one -job. - -The block size is determined by @strong{--block}. The strings @strong{--recstart} -and @strong{--recend} tell GNU @strong{parallel} how a record starts and/or -ends. The block read will have the final partial record removed before -the block is passed on to the job. The partial record will be -prepended to next block. - -If @strong{--recstart} is given this will be used to split at record start. - -If @strong{--recend} is given this will be used to split at record end. - -If both @strong{--recstart} and @strong{--recend} are given both will have to -match to find a split position. - -If neither @strong{--recstart} nor @strong{--recend} are given @strong{--recend} -defaults to '\n'. To have no record separator use @strong{--recend ""}. - -@strong{--files} is often used with @strong{--pipe}. - -See also: @strong{--recstart}, @strong{--recend}, @strong{--fifo}, @strong{--cat}, @strong{--pipepart}. - -@item @strong{--pipepart} (beta testing) -@anchor{@strong{--pipepart} (beta testing)} - -Pipe parts of a physical file. @strong{--pipepart} works similar to -@strong{--pipe}, but is much faster. It has a few limitations: - -@table @asis -@item * -@anchor{*} - -The file must be a physical (seekable) file and must be given using @strong{-a} or @strong{::::}. - -@item * -@anchor{* 1} - -Record counting (@strong{-N}) and line counting (@strong{-L}/@strong{-l}) do not work. - -@end table - -@item @strong{--plain} -@anchor{@strong{--plain}} - -Ignore any @strong{--profile}, $PARALLEL, and ~/.parallel/config to get full -control on the command line (used by GNU @strong{parallel} internally when -called with @strong{--sshlogin}). - -@item @strong{--progress} -@anchor{@strong{--progress}} - -Show progress of computations. List the computers involved in the task -with number of CPU cores detected and the max number of jobs to -run. After that show progress for each computer: number of running -jobs, number of completed jobs, and percentage of all jobs done by -this computer. The percentage will only be available after all jobs -have been scheduled as GNU @strong{parallel} only read the next job when -ready to schedule it - this is to avoid wasting time and memory by -reading everything at startup. - -By sending GNU @strong{parallel} SIGUSR2 you can toggle turning on/off -@strong{--progress} on a running GNU @strong{parallel} process. - -See also @strong{--eta}. - -@item @strong{--max-args}=@emph{max-args} -@anchor{@strong{--max-args}=@emph{max-args}} - -@item @strong{-n} @emph{max-args} -@anchor{@strong{-n} @emph{max-args}} - -Use at most @emph{max-args} arguments per command line. Fewer than -@emph{max-args} arguments will be used if the size (see the @strong{-s} option) -is exceeded, unless the @strong{-x} option is given, in which case -GNU @strong{parallel} will exit. - -@strong{-n 0} means read one argument, but insert 0 arguments on the command -line. - -Implies @strong{-X} unless @strong{-m} is set. - -@item @strong{--max-replace-args}=@emph{max-args} -@anchor{@strong{--max-replace-args}=@emph{max-args}} - -@item @strong{-N} @emph{max-args} -@anchor{@strong{-N} @emph{max-args}} - -Use at most @emph{max-args} arguments per command line. Like @strong{-n} but -also makes replacement strings @strong{@{1@}} .. @strong{@{}@emph{max-args}@strong{@}} that -represents argument 1 .. @emph{max-args}. If too few args the @strong{@{}@emph{n}@strong{@}} will -be empty. - -@strong{-N 0} means read one argument, but insert 0 arguments on the command -line. - -This will set the owner of the homedir to the user: - -@strong{tr ':' '\n' < /etc/passwd | parallel -N7 chown @{1@} @{6@}} - -Implies @strong{-X} unless @strong{-m} or @strong{--pipe} is set. - -When used with @strong{--pipe} @strong{-N} is the number of records to read. This -is somewhat slower than @strong{--block}. - -@item @strong{--max-line-length-allowed} -@anchor{@strong{--max-line-length-allowed}} - -Print the maximal number of characters allowed on the command line and -exit (used by GNU @strong{parallel} itself to determine the line length -on remote computers). - -@item @strong{--number-of-cpus} -@anchor{@strong{--number-of-cpus}} - -Print the number of physical CPUs and exit (used by GNU @strong{parallel} -itself to determine the number of physical CPUs on remote computers). - -@item @strong{--number-of-cores} -@anchor{@strong{--number-of-cores}} - -Print the number of CPU cores and exit (used by GNU @strong{parallel} itself -to determine the number of CPU cores on remote computers). - -@item @strong{--no-notice} -@anchor{@strong{--no-notice}} - -Do not display citation notice. A citation notice is printed on stderr -(standard error) only if stderr (standard error) is a terminal, the -user has not specified @strong{--no-notice}, and the user has not run -@strong{--bibtex} once. - -@item @strong{--nice} @emph{niceness} -@anchor{@strong{--nice} @emph{niceness}} - -Run the command at this niceness. For simple commands you can just add -@strong{nice} in front of the command. But if the command consists of more -sub commands (Like: ls|wc) then prepending @strong{nice} will not always -work. @strong{--nice} will make sure all sub commands are niced. - -@item @strong{--interactive} -@anchor{@strong{--interactive}} - -@item @strong{-p} -@anchor{@strong{-p}} - -Prompt the user about whether to run each command line and read a line -from the terminal. Only run the command line if the response starts -with 'y' or 'Y'. Implies @strong{-t}. - -@item @strong{--parens} @emph{parensstring} (alpha testing) -@anchor{@strong{--parens} @emph{parensstring} (alpha testing)} - -Use to define start and end parenthesis for @strong{@{= perl expression =@}}. The -left and the right parenthesis can be multiple characters and are -assumed to be the same length. The default is @strong{@{==@}} giving -@strong{@{=} as the start parenthesis and @strong{=@}} as the end parenthesis. - -Another useful setting is @strong{,,,,} which would make both parenthesis -@strong{,,}: - -@verbatim - parallel --parens ,,,, echo foo is ,,s/I/O/g,, ::: FII -@end verbatim - -See also: @strong{--rpl} @strong{@{= perl expression =@}} - -@item @strong{--profile} @emph{profilename} -@anchor{@strong{--profile} @emph{profilename}} - -@item @strong{-J} @emph{profilename} -@anchor{@strong{-J} @emph{profilename}} - -Use profile @emph{profilename} for options. This is useful if you want to -have multiple profiles. You could have one profile for running jobs in -parallel on the local computer and a different profile for running jobs -on remote computers. See the section PROFILE FILES for examples. - -@emph{profilename} corresponds to the file ~/.parallel/@emph{profilename}. - -You can give multiple profiles by repeating @strong{--profile}. If parts of -the profiles conflict, the later ones will be used. - -Default: config - -@item @strong{--quote} -@anchor{@strong{--quote}} - -@item @strong{-q} -@anchor{@strong{-q}} - -Quote @emph{command}. This will quote the command line so special -characters are not interpreted by the shell. See the section -QUOTING. Most people will never need this. Quoting is disabled by -default. - -@item @strong{--no-run-if-empty} -@anchor{@strong{--no-run-if-empty}} - -@item @strong{-r} -@anchor{@strong{-r}} - -If the stdin (standard input) only contains whitespace, do not run the command. - -If used with @strong{--pipe} this is slow. - -@item @strong{--noswap} -@anchor{@strong{--noswap}} - -Do not start new jobs on a given computer if there is both swap-in and -swap-out activity. - -The swap activity is only sampled every 10 seconds as the sampling -takes 1 second to do. - -Swap activity is computed as (swap-in)*(swap-out) which in practice is -a good value: swapping out is not a problem, swapping in is not a -problem, but both swapping in and out usually indicates a problem. - -@item @strong{--record-env} -@anchor{@strong{--record-env}} - -Record current environment variables in ~/.parallel/ignored_vars. This -is useful before using @strong{--env _}. - -See also @strong{--env}. - -@item @strong{--recstart} @emph{startstring} -@anchor{@strong{--recstart} @emph{startstring}} - -@item @strong{--recend} @emph{endstring} -@anchor{@strong{--recend} @emph{endstring}} - -If @strong{--recstart} is given @emph{startstring} will be used to split at record start. - -If @strong{--recend} is given @emph{endstring} will be used to split at record end. - -If both @strong{--recstart} and @strong{--recend} are given the combined string -@emph{endstring}@emph{startstring} will have to match to find a split -position. This is useful if either @emph{startstring} or @emph{endstring} -match in the middle of a record. - -If neither @strong{--recstart} nor @strong{--recend} are given then @strong{--recend} -defaults to '\n'. To have no record separator use @strong{--recend ""}. - -@strong{--recstart} and @strong{--recend} are used with @strong{--pipe}. - -Use @strong{--regexp} to interpret @strong{--recstart} and @strong{--recend} as regular -expressions. This is slow, however. - -@item @strong{--regexp} -@anchor{@strong{--regexp}} - -Use @strong{--regexp} to interpret @strong{--recstart} and @strong{--recend} as regular -expressions. This is slow, however. - -@item @strong{--remove-rec-sep} -@anchor{@strong{--remove-rec-sep}} - -@item @strong{--removerecsep} -@anchor{@strong{--removerecsep}} - -@item @strong{--rrs} -@anchor{@strong{--rrs}} - -Remove the text matched by @strong{--recstart} and @strong{--recend} before piping -it to the command. - -Only used with @strong{--pipe}. - -@item @strong{--results} @emph{prefix} -@anchor{@strong{--results} @emph{prefix}} - -@item @strong{--res} @emph{prefix} -@anchor{@strong{--res} @emph{prefix}} - -Save the output into files. The files will be stored in a directory tree -rooted at @emph{prefix}. Within this directory tree, each command will result -in two files: @emph{prefix}//stdout and @emph{prefix}//stderr, where - is a sequence of directories representing the header of the input -source (if using @strong{--header :}) or the number of the input source and -corresponding values. - -E.g: - -@verbatim - parallel --header : --results foo echo {a} {b} ::: a I II ::: b III IIII -@end verbatim - -will generate the files: - -@verbatim - foo/a/I/b/III/stderr - foo/a/I/b/III/stdout - foo/a/I/b/IIII/stderr - foo/a/I/b/IIII/stdout - foo/a/II/b/III/stderr - foo/a/II/b/III/stdout - foo/a/II/b/IIII/stderr - foo/a/II/b/IIII/stdout -@end verbatim - -and - -@verbatim - parallel --results foo echo {1} {2} ::: I II ::: III IIII -@end verbatim - -will generate the files: - -@verbatim - foo/1/I/2/III/stderr - foo/1/I/2/III/stdout - foo/1/I/2/IIII/stderr - foo/1/I/2/IIII/stdout - foo/1/II/2/III/stderr - foo/1/II/2/III/stdout - foo/1/II/2/IIII/stderr - foo/1/II/2/IIII/stdout -@end verbatim - -See also @strong{--files}, @strong{--header}, @strong{--joblog}. - -@item @strong{--resume} -@anchor{@strong{--resume}} - -Resumes from the last unfinished job. By reading @strong{--joblog} or the -@strong{--results} dir GNU @strong{parallel} will figure out the last unfinished -job and continue from there. As GNU @strong{parallel} only looks at the -sequence numbers in @strong{--joblog} then the input, the command, and -@strong{--joblog} all have to remain unchanged; otherwise GNU @strong{parallel} -may run wrong commands. - -See also @strong{--joblog}, @strong{--results}, @strong{--resume-failed}. - -@item @strong{--resume-failed} -@anchor{@strong{--resume-failed}} - -Retry all failed and resume from the last unfinished job. By reading -@strong{--joblog} GNU @strong{parallel} will figure out the failed jobs and run -those again. After that it will resume last unfinished job and -continue from there. As GNU @strong{parallel} only looks at the sequence -numbers in @strong{--joblog} then the input, the command, and @strong{--joblog} -all have to remain unchanged; otherwise GNU @strong{parallel} may run wrong -commands. - -See also @strong{--joblog}, @strong{--resume}. - -@item @strong{--retries} @emph{n} -@anchor{@strong{--retries} @emph{n}} - -If a job fails, retry it on another computer on which it has not -failed. Do this @emph{n} times. If there are fewer than @emph{n} computers in -@strong{--sshlogin} GNU @strong{parallel} will re-use all the computers. This is -useful if some jobs fail for no apparent reason (such as network -failure). - -@item @strong{--return} @emph{filename} -@anchor{@strong{--return} @emph{filename}} - -Transfer files from remote computers. @strong{--return} is used with -@strong{--sshlogin} when the arguments are files on the remote computers. When -processing is done the file @emph{filename} will be transferred -from the remote computer using @strong{rsync} and will be put relative to -the default login dir. E.g. - -@verbatim - echo foo/bar.txt | parallel \ - --sshlogin server.example.com --return {.}.out touch {.}.out -@end verbatim - -This will transfer the file @emph{$HOME/foo/bar.out} from the computer -@emph{server.example.com} to the file @emph{foo/bar.out} after running -@strong{touch foo/bar.out} on @emph{server.example.com}. - -@verbatim - echo /tmp/foo/bar.txt | parallel \ - --sshlogin server.example.com --return {.}.out touch {.}.out -@end verbatim - -This will transfer the file @emph{/tmp/foo/bar.out} from the computer -@emph{server.example.com} to the file @emph{/tmp/foo/bar.out} after running -@strong{touch /tmp/foo/bar.out} on @emph{server.example.com}. - -Multiple files can be transferred by repeating the options multiple -times: - -@verbatim - echo /tmp/foo/bar.txt | \ - parallel --sshlogin server.example.com \ - --return {.}.out --return {.}.out2 touch {.}.out {.}.out2 -@end verbatim - -@strong{--return} is often used with @strong{--transfer} and @strong{--cleanup}. - -@strong{--return} is ignored when used with @strong{--sshlogin :} or when not used -with @strong{--sshlogin}. - -@item @strong{--round-robin} -@anchor{@strong{--round-robin}} - -@item @strong{--round} -@anchor{@strong{--round}} - -Normally @strong{--pipe} will give a single block to each instance of the -command. With @strong{--round-robin} all blocks will at random be written to -commands already running. This is useful if the command takes a long -time to initialize. - -@strong{--keep-order} will not work with @strong{--round-robin} as it is -impossible to track which input block corresponds to which output. - -@item @strong{--rpl} '@emph{tag} @emph{perl expression}' (alpha testing) -@anchor{@strong{--rpl} '@emph{tag} @emph{perl expression}' (alpha testing)} - -Use @emph{tag} as a replacement string for @emph{perl expression}. This makes -it possible to define your own replacement strings. GNU @strong{parallel}'s -7 replacement strings are implemented as: - -@verbatim - --rpl '{} ' - --rpl '{#} $_=$job->seq()' - --rpl '{%} $_=$job->slot()' - --rpl '{/} s:.*/::' - --rpl '{//} $Global::use{"File::Basename"} ||= eval "use File::Basename; 1;"; $_ = dirname($_);' - --rpl '{/.} s:.*/::; s:\.[^/.]+$::;' - --rpl '{.} s:\.[^/.]+$::' -@end verbatim - -If the user defined replacement string starts with '@{' it can also be -used as a positional replacement string (like @strong{@{2.@}}). - -It is recommended to only change $_ but you have full access to all -of GNU @strong{parallel}'s internal functions and data structures. - -Here are a few examples: - -@verbatim - Remove 2 extensions (e.g. .tar.gz) - --rpl '{..} s:\.[^/.]+$::;s:\.[^/.]+$::;' - Keep only the extension - --rpl '{ext} s:.*\.::' - Is the job sequence even or odd? - --rpl '{odd} $_=$job->seq()%2?"odd":"even"' -@end verbatim - -See also: @strong{@{= perl expression =@}} @strong{--parens} - -@item @strong{--max-chars}=@emph{max-chars} -@anchor{@strong{--max-chars}=@emph{max-chars}} - -@item @strong{-s} @emph{max-chars} -@anchor{@strong{-s} @emph{max-chars}} - -Use at most @emph{max-chars} characters per command line, including the -command and initial-arguments and the terminating nulls at the ends of -the argument strings. The largest allowed value is system-dependent, -and is calculated as the argument length limit for exec, less the size -of your environment. The default value is the maximum. - -Implies @strong{-X} unless @strong{-m} is set. - -@item @strong{--show-limits} -@anchor{@strong{--show-limits}} - -Display the limits on the command-line length which are imposed by the -operating system and the @strong{-s} option. Pipe the input from /dev/null -(and perhaps specify --no-run-if-empty) if you don't want GNU @strong{parallel} -to do anything. - -@item @strong{--semaphore} -@anchor{@strong{--semaphore}} - -Work as a counting semaphore. @strong{--semaphore} will cause GNU -@strong{parallel} to start @emph{command} in the background. When the number of -simultaneous jobs is reached, GNU @strong{parallel} will wait for one of -these to complete before starting another command. - -@strong{--semaphore} implies @strong{--bg} unless @strong{--fg} is specified. - -@strong{--semaphore} implies @strong{--semaphorename `tty`} unless -@strong{--semaphorename} is specified. - -Used with @strong{--fg}, @strong{--wait}, and @strong{--semaphorename}. - -The command @strong{sem} is an alias for @strong{parallel --semaphore}. - -See also @strong{man sem}. - -@item @strong{--semaphorename} @emph{name} -@anchor{@strong{--semaphorename} @emph{name}} - -@item @strong{--id} @emph{name} -@anchor{@strong{--id} @emph{name}} - -Use @strong{name} as the name of the semaphore. Default is the name of the -controlling tty (output from @strong{tty}). - -The default normally works as expected when used interactively, but -when used in a script @emph{name} should be set. @emph{$$} or @emph{my_task_name} -are often a good value. - -The semaphore is stored in ~/.parallel/semaphores/ - -Implies @strong{--semaphore}. - -See also @strong{man sem}. - -@item @strong{--semaphoretimeout} @emph{secs} -@anchor{@strong{--semaphoretimeout} @emph{secs}} - -If the semaphore is not released within secs seconds, take it anyway. - -Implies @strong{--semaphore}. - -See also @strong{man sem}. - -@item @strong{--seqreplace} @emph{replace-str} -@anchor{@strong{--seqreplace} @emph{replace-str}} - -Use the replacement string @emph{replace-str} instead of @strong{@{#@}} for -job sequence number. - -@item @strong{--shebang} (alpha testing) -@anchor{@strong{--shebang} (alpha testing)} - -@item @strong{--hashbang} (alpha testing) -@anchor{@strong{--hashbang} (alpha testing)} - -GNU @strong{parallel} can be called as a shebang (#!) command as the first -line of a script. The content of the file will be treated as -inputsource. - -Like this: - -@verbatim - #!/usr/bin/parallel --shebang -r traceroute - - foss.org.my - debian.org - freenetproject.org -@end verbatim - -@strong{--shebang} must be set as the first option. - -On FreeBSD @strong{env} is needed: - -@verbatim - #!/usr/bin/env -S parallel --shebang -r traceroute - - foss.org.my - debian.org - freenetproject.org -@end verbatim - -@item @strong{--shebang-wrap} (alpha testing) -@anchor{@strong{--shebang-wrap} (alpha testing)} - -GNU @strong{parallel} can parallelize scripts by wrapping the shebang -line. If the program can be run like this: - -@verbatim - cat arguments | parallel the_program -@end verbatim - -then the script can be changed to: - -@verbatim - #!/usr/bin/parallel --shebang-wrap /the/original/parser --with-options -@end verbatim - -E.g. - -@verbatim - #!/usr/bin/parallel --shebang-wrap /usr/bin/python -@end verbatim - -If the program can be run like this: - -@verbatim - cat data | parallel --pipe the_program -@end verbatim - -then the script can be changed to: - -@verbatim - #!/usr/bin/parallel --shebang-wrap --pipe /the/original/parser --with-options -@end verbatim - -E.g. - -@verbatim - #!/usr/bin/parallel --shebang-wrap --pipe /usr/bin/perl -w -@end verbatim - -@strong{--shebang-wrap} must be set as the first option. - -@item @strong{--shellquote} -@anchor{@strong{--shellquote}} - -Does not run the command but quotes it. Useful for making quoted -composed commands for GNU @strong{parallel}. - -@item @strong{--skip-first-line} -@anchor{@strong{--skip-first-line}} - -Do not use the first line of input (used by GNU @strong{parallel} itself -when called with @strong{--shebang}). - -@item @strong{--sshdelay} @emph{secs} -@anchor{@strong{--sshdelay} @emph{secs}} - -Delay starting next ssh by @emph{secs} seconds. GNU @strong{parallel} will pause -@emph{secs} seconds after starting each ssh. @emph{secs} can be less than 1 -seconds. - -@item @strong{-S} @emph{[ncpu/]sshlogin[,[ncpu/]sshlogin[,...]]} -@anchor{@strong{-S} @emph{[ncpu/]sshlogin[@comma{}[ncpu/]sshlogin[@comma{}...]]}} - -@item @strong{--sshlogin} @emph{[ncpu/]sshlogin[,[ncpu/]sshlogin[,...]]} -@anchor{@strong{--sshlogin} @emph{[ncpu/]sshlogin[@comma{}[ncpu/]sshlogin[@comma{}...]]}} - -Distribute jobs to remote computers. The jobs will be run on a list of -remote computers. GNU @strong{parallel} will determine the number of CPU -cores on the remote computers and run the number of jobs as specified by -@strong{-j}. If the number @emph{ncpu} is given GNU @strong{parallel} will use this -number for number of CPU cores on the host. Normally @emph{ncpu} will not -be needed. - -An @emph{sshlogin} is of the form: - -@verbatim - [sshcommand [options]] [username@]hostname -@end verbatim - -The sshlogin must not require a password. - -The sshlogin ':' is special, it means 'no ssh' and will therefore run -on the local computer. - -The sshlogin '..' is special, it read sshlogins from ~/.parallel/sshloginfile - -The sshlogin '-' is special, too, it read sshlogins from stdin -(standard input). - -To specify more sshlogins separate the sshlogins by comma or repeat -the options multiple times. - -For examples: see @strong{--sshloginfile}. - -The remote host must have GNU @strong{parallel} installed. - -@strong{--sshlogin} is known to cause problems with @strong{-m} and @strong{-X}. - -@strong{--sshlogin} is often used with @strong{--transfer}, @strong{--return}, -@strong{--cleanup}, and @strong{--trc}. - -@item @strong{--sshloginfile} @emph{filename} -@anchor{@strong{--sshloginfile} @emph{filename}} - -@item @strong{--slf} @emph{filename} -@anchor{@strong{--slf} @emph{filename}} - -File with sshlogins. The file consists of sshlogins on separate -lines. Empty lines and lines starting with '#' are ignored. Example: - -@verbatim - server.example.com - username@server2.example.com - 8/my-8-core-server.example.com - 2/my_other_username@my-dualcore.example.net - # This server has SSH running on port 2222 - ssh -p 2222 server.example.net - 4/ssh -p 2222 quadserver.example.net - # Use a different ssh program - myssh -p 2222 -l myusername hexacpu.example.net - # Use a different ssh program with default number of cores - //usr/local/bin/myssh -p 2222 -l myusername hexacpu.example.net - # Use a different ssh program with 6 cores - 6//usr/local/bin/myssh -p 2222 -l myusername hexacpu.example.net - # Assume 16 cores on the local computer - 16/: -@end verbatim - -When using a different ssh program the last argument must be the hostname. - -Multiple @strong{--sshloginfile} are allowed. - -GNU @strong{parallel} will first look for the file in current dir; if that -fails it look for the file in ~/.parallel. - -The sshloginfile '..' is special, it read sshlogins from -~/.parallel/sshloginfile - -The sshloginfile '.' is special, it read sshlogins from -/etc/parallel/sshloginfile - -The sshloginfile '-' is special, too, it read sshlogins from stdin -(standard input). - -@item @strong{--slotreplace} @emph{replace-str} -@anchor{@strong{--slotreplace} @emph{replace-str}} - -Use the replacement string @emph{replace-str} instead of @strong{@{%@}} for -job slot number. - -@item @strong{--silent} -@anchor{@strong{--silent}} - -Silent. The job to be run will not be printed. This is the default. -Can be reversed with @strong{-v}. - -@item @strong{--tty} -@anchor{@strong{--tty}} - -Open terminal tty. If GNU @strong{parallel} is used for starting an -interactive program then this option may be needed. It will start only -one job at a time (i.e. @strong{-j1}), not buffer the output (i.e. @strong{-u}), -and it will open a tty for the job. When the job is done, the next job -will get the tty. - -@item @strong{--tag} -@anchor{@strong{--tag}} - -Tag lines with arguments. Each output line will be prepended with the -arguments and TAB (\t). When combined with @strong{--onall} or @strong{--nonall} -the lines will be prepended with the sshlogin instead. - -@strong{--tag} is ignored when using @strong{-u}. - -@item @strong{--tagstring} @emph{str} -@anchor{@strong{--tagstring} @emph{str}} - -Tag lines with a string. Each output line will be prepended with -@emph{str} and TAB (\t). @emph{str} can contain replacement strings such as -@{@}. - -@strong{--tagstring} is ignored when using @strong{-u}, @strong{--onall}, and @strong{--nonall}. - -@item @strong{--tmpdir} @emph{dirname} -@anchor{@strong{--tmpdir} @emph{dirname}} - -Directory for temporary files. GNU @strong{parallel} normally buffers output -into temporary files in /tmp. By setting @strong{--tmpdir} you can use a -different dir for the files. Setting @strong{--tmpdir} is equivalent to -setting $TMPDIR. - -@item @strong{--tmux} (alpha testing) -@anchor{@strong{--tmux} (alpha testing)} - -Use @strong{tmux} for output. Start a @strong{tmux} session and run each job in a -window in that session. No other output will be produced. - -@item @strong{--timeout} @emph{val} -@anchor{@strong{--timeout} @emph{val}} - -Time out for command. If the command runs for longer than @emph{val} -seconds it will get killed with SIGTERM, followed by SIGTERM 200 ms -later, followed by SIGKILL 200 ms later. - -If @emph{val} is followed by a % then the timeout will dynamically be -computed as a percentage of the median average runtime. Only values -> 100% will make sense. - -@item @strong{--verbose} -@anchor{@strong{--verbose}} - -@item @strong{-t} -@anchor{@strong{-t}} - -Print the job to be run on stderr (standard error). - -See also @strong{-v}, @strong{-p}. - -@item @strong{--transfer} -@anchor{@strong{--transfer}} - -Transfer files to remote computers. @strong{--transfer} is used with -@strong{--sshlogin} when the arguments are files and should be transferred -to the remote computers. The files will be transferred using @strong{rsync} -and will be put relative to the default work dir. If the path contains -/./ the remaining path will be relative to the work dir. E.g. - -@verbatim - echo foo/bar.txt | parallel \ - --sshlogin server.example.com --transfer wc -@end verbatim - -This will transfer the file @emph{foo/bar.txt} to the computer -@emph{server.example.com} to the file @emph{$HOME/foo/bar.txt} before running -@strong{wc foo/bar.txt} on @emph{server.example.com}. - -@verbatim - echo /tmp/foo/bar.txt | parallel \ - --sshlogin server.example.com --transfer wc -@end verbatim - -This will transfer the file @emph{foo/bar.txt} to the computer -@emph{server.example.com} to the file @emph{/tmp/foo/bar.txt} before running -@strong{wc /tmp/foo/bar.txt} on @emph{server.example.com}. - -@strong{--transfer} is often used with @strong{--return} and @strong{--cleanup}. - -@strong{--transfer} is ignored when used with @strong{--sshlogin :} or when not used with @strong{--sshlogin}. - -@item @strong{--trc} @emph{filename} -@anchor{@strong{--trc} @emph{filename}} - -Transfer, Return, Cleanup. Short hand for: - -@strong{--transfer} @strong{--return} @emph{filename} @strong{--cleanup} - -@item @strong{--trim} -@anchor{@strong{--trim} } - -Trim white space in input. - -@table @asis -@item n -@anchor{n} - -No trim. Input is not modified. This is the default. - -@item l -@anchor{l} - -Left trim. Remove white space from start of input. E.g. " a bc " -> "a bc ". - -@item r -@anchor{r} - -Right trim. Remove white space from end of input. E.g. " a bc " -> " a bc". - -@item lr -@anchor{lr} - -@item rl -@anchor{rl} - -Both trim. Remove white space from both start and end of input. E.g. " -a bc " -> "a bc". This is the default if @strong{--colsep} is used. - -@end table - -@item @strong{--ungroup} -@anchor{@strong{--ungroup}} - -@item @strong{-u} -@anchor{@strong{-u}} - -Ungroup output. Output is printed as soon as possible and by passes -GNU @strong{parallel} internal processing. This may cause output from -different commands to be mixed thus should only be used if you do not -care about the output. Compare these: - -@strong{parallel -j0 'sleep @{@};echo -n start@{@};sleep @{@};echo @{@}end' ::: 1 2 3 4} - -@strong{parallel -u -j0 'sleep @{@};echo -n start@{@};sleep @{@};echo @{@}end' ::: 1 2 3 4} - -It also disables @strong{--tag}. GNU @strong{parallel} runs faster with @strong{-u}. Can -be reversed with @strong{--group}. - -See also: @strong{--line-buffer} @strong{--group} - -@item @strong{--extensionreplace} @emph{replace-str} -@anchor{@strong{--extensionreplace} @emph{replace-str}} - -@item @strong{--er} @emph{replace-str} -@anchor{@strong{--er} @emph{replace-str}} - -Use the replacement string @emph{replace-str} instead of @{.@} for input line without extension. - -@item @strong{--use-cpus-instead-of-cores} -@anchor{@strong{--use-cpus-instead-of-cores}} - -Count the number of physical CPUs instead of CPU cores. When computing -how many jobs to run simultaneously relative to the number of CPU cores -you can ask GNU @strong{parallel} to instead look at the number of physical -CPUs. This will make sense for computers that have hyperthreading as -two jobs running on one CPU with hyperthreading will run slower than -two jobs running on two physical CPUs. Some multi-core CPUs can run -faster if only one thread is running per physical CPU. Most users will -not need this option. - -@item @strong{-v} -@anchor{@strong{-v}} - -Verbose. Print the job to be run on stdout (standard output). Can be reversed -with @strong{--silent}. See also @strong{-t}. - -Use @strong{-v} @strong{-v} to print the wrapping ssh command when running remotely. - -@item @strong{--version} -@anchor{@strong{--version}} - -@item @strong{-V} -@anchor{@strong{-V}} - -Print the version GNU @strong{parallel} and exit. - -@item @strong{--workdir} @emph{mydir} -@anchor{@strong{--workdir} @emph{mydir}} - -@item @strong{--wd} @emph{mydir} -@anchor{@strong{--wd} @emph{mydir}} - -Files transferred using @strong{--transfer} and @strong{--return} will be relative -to @emph{mydir} on remote computers, and the command will be executed in -the dir @emph{mydir}. - -The special @emph{mydir} value @strong{...} will create working dirs under -@strong{~/.parallel/tmp/} on the remote computers. If @strong{--cleanup} is given -these dirs will be removed. - -The special @emph{mydir} value @strong{.} uses the current working dir. If the -current working dir is beneath your home dir, the value @strong{.} is -treated as the relative path to your home dir. This means that if your -home dir is different on remote computers (e.g. if your login is -different) the relative path will still be relative to your home dir. - -To see the difference try: - -@strong{parallel -S server pwd ::: ""} - -@strong{parallel --wd . -S server pwd ::: ""} - -@strong{parallel --wd ... -S server pwd ::: ""} - -@item @strong{--wait} -@anchor{@strong{--wait}} - -Wait for all commands to complete. - -Implies @strong{--semaphore}. - -See also @strong{man sem}. - -@item @strong{-X} -@anchor{@strong{-X}} - -Multiple arguments with context replace. Insert as many arguments as -the command line length permits. If multiple jobs are being run in -parallel: distribute the arguments evenly among the jobs. Use @strong{-j1} -to avoid this. - -If @strong{@{@}} is not used the arguments will be appended to the line. If -@strong{@{@}} is used as part of a word (like @emph{pic@{@}.jpg}) then the whole -word will be repeated. If @strong{@{@}} is used multiple times each @strong{@{@}} will -be replaced with the arguments. - -Normally @strong{-X} will do the right thing, whereas @strong{-m} can give -unexpected results if @strong{@{@}} is used as part of a word. - -Support for @strong{-X} with @strong{--sshlogin} is limited and may fail. - -See also @strong{-m}. - -@item @strong{--exit} -@anchor{@strong{--exit}} - -@item @strong{-x} -@anchor{@strong{-x}} - -Exit if the size (see the @strong{-s} option) is exceeded. - -@item @strong{--xapply} -@anchor{@strong{--xapply}} - -Read multiple input sources like @strong{xapply}. If multiple input sources -are given, one argument will be read from each of the input -sources. The arguments can be accessed in the command as @strong{@{1@}} -.. @strong{@{}@emph{n}@strong{@}}, so @strong{@{1@}} will be a line from the first input source, and -@strong{@{6@}} will refer to the line with the same line number from the 6th -input source. - -Compare these two: - -@verbatim - parallel echo {1} {2} ::: 1 2 3 ::: a b c - parallel --xapply echo {1} {2} ::: 1 2 3 ::: a b c -@end verbatim - -Arguments will be recycled if one input source has more arguments than the others: - -@verbatim - parallel --xapply echo {1} {2} {3} ::: 1 2 ::: I II III ::: a b c d e f g -@end verbatim - -See also @strong{--header}. - -@end table - -@node EXAMPLE: Working as xargs -n1. Argument appending -@chapter EXAMPLE: Working as xargs -n1. Argument appending - -GNU @strong{parallel} can work similar to @strong{xargs -n1}. - -To compress all html files using @strong{gzip} run: - -@strong{find . -name '*.html' | parallel gzip --best} - -If the file names may contain a newline use @strong{-0}. Substitute FOO BAR with -FUBAR in all files in this dir and subdirs: - -@strong{find . -type f -print0 | parallel -q0 perl -i -pe 's/FOO BAR/FUBAR/g'} - -Note @strong{-q} is needed because of the space in 'FOO BAR'. - -@node EXAMPLE: Reading arguments from command line -@chapter EXAMPLE: Reading arguments from command line - -GNU @strong{parallel} can take the arguments from command line instead of -stdin (standard input). To compress all html files in the current dir -using @strong{gzip} run: - -@strong{parallel gzip --best ::: *.html} - -To convert *.wav to *.mp3 using LAME running one process per CPU core -run: - -@strong{parallel lame @{@} -o @{.@}.mp3 ::: *.wav} - -@node EXAMPLE: Inserting multiple arguments -@chapter EXAMPLE: Inserting multiple arguments - -When moving a lot of files like this: @strong{mv *.log destdir} you will -sometimes get the error: - -@strong{bash: /bin/mv: Argument list too long} - -because there are too many files. You can instead do: - -@strong{ls | grep -E '\.log$' | parallel mv @{@} destdir} - -This will run @strong{mv} for each file. It can be done faster if @strong{mv} gets -as many arguments that will fit on the line: - -@strong{ls | grep -E '\.log$' | parallel -m mv @{@} destdir} - -@node EXAMPLE: Context replace -@chapter EXAMPLE: Context replace - -To remove the files @emph{pict0000.jpg} .. @emph{pict9999.jpg} you could do: - -@strong{seq -w 0 9999 | parallel rm pict@{@}.jpg} - -You could also do: - -@strong{seq -w 0 9999 | perl -pe 's/(.*)/pict$1.jpg/' | parallel -m rm} - -The first will run @strong{rm} 10000 times, while the last will only run -@strong{rm} as many times needed to keep the command line length short -enough to avoid @strong{Argument list too long} (it typically runs 1-2 times). - -You could also run: - -@strong{seq -w 0 9999 | parallel -X rm pict@{@}.jpg} - -This will also only run @strong{rm} as many times needed to keep the command -line length short enough. - -@node EXAMPLE: Compute intensive jobs and substitution -@chapter EXAMPLE: Compute intensive jobs and substitution - -If ImageMagick is installed this will generate a thumbnail of a jpg -file: - -@strong{convert -geometry 120 foo.jpg thumb_foo.jpg} - -This will run with number-of-cpu-cores jobs in parallel for all jpg -files in a directory: - -@strong{ls *.jpg | parallel convert -geometry 120 @{@} thumb_@{@}} - -To do it recursively use @strong{find}: - -@strong{find . -name '*.jpg' | parallel convert -geometry 120 @{@} @{@}_thumb.jpg} - -Notice how the argument has to start with @strong{@{@}} as @strong{@{@}} will include path -(e.g. running @strong{convert -geometry 120 ./foo/bar.jpg -thumb_./foo/bar.jpg} would clearly be wrong). The command will -generate files like ./foo/bar.jpg_thumb.jpg. - -Use @strong{@{.@}} to avoid the extra .jpg in the file name. This command will -make files like ./foo/bar_thumb.jpg: - -@strong{find . -name '*.jpg' | parallel convert -geometry 120 @{@} @{.@}_thumb.jpg} - -@node EXAMPLE: Substitution and redirection -@chapter EXAMPLE: Substitution and redirection - -This will generate an uncompressed version of .gz-files next to the .gz-file: - -@strong{parallel zcat @{@} "}>@strong{"@{.@} ::: *.gz} - -Quoting of > is necessary to postpone the redirection. Another -solution is to quote the whole command: - -@strong{parallel "zcat @{@} }>@strong{@{.@}" ::: *.gz} - -Other special shell characters (such as * ; $ > < | >> <<) also need -to be put in quotes, as they may otherwise be interpreted by the shell -and not given to GNU @strong{parallel}. - -@node EXAMPLE: Composed commands -@chapter EXAMPLE: Composed commands - -A job can consist of several commands. This will print the number of -files in each directory: - -@strong{ls | parallel 'echo -n @{@}" "; ls @{@}|wc -l'} - -To put the output in a file called .dir: - -@strong{ls | parallel '(echo -n @{@}" "; ls @{@}|wc -l) }> @strong{@{@}.dir'} - -Even small shell scripts can be run by GNU @strong{parallel}: - -@strong{find . | parallel 'a=@{@}; name=$@{a##*/@}; upper=$(echo "$name" | tr "[:lower:]" "[:upper:]"); echo "$name - $upper"'} - -@strong{ls | parallel 'mv @{@} "$(echo @{@} | tr "[:upper:]" "[:lower:]")"'} - -Given a list of URLs, list all URLs that fail to download. Print the -line number and the URL. - -@strong{cat urlfile | parallel "wget @{@} 2}>@strong{/dev/null || grep -n @{@} urlfile"} - -Create a mirror directory with the same filenames except all files and -symlinks are empty files. - -@strong{cp -rs /the/source/dir mirror_dir; find mirror_dir -type l | parallel -m rm @{@} '&&' touch @{@}} - -Find the files in a list that do not exist - -@strong{cat file_list | parallel 'if [ ! -e @{@} ] ; then echo @{@}; fi'} - -@node EXAMPLE: Calling Bash functions -@chapter EXAMPLE: Calling Bash functions - -If the composed command is longer than a line, it becomes hard to -read. In Bash you can use functions. Just remember to @strong{export -f} the -function. - -@verbatim - doit() { - echo Doing it for $1 - sleep 2 - echo Done with $1 - } - export -f doit - parallel doit ::: 1 2 3 - - doubleit() { - echo Doing it for $1 $2 - sleep 2 - echo Done with $1 $2 - } - export -f doubleit - parallel doubleit ::: 1 2 3 ::: a b -@end verbatim - -To do this on remote servers you need to transfer the function using -@strong{--env}: - -@verbatim - parallel --env doit -S server doit ::: 1 2 3 - parallel --env doubleit -S server doubleit ::: 1 2 3 ::: a b -@end verbatim - -@node EXAMPLE: Removing file extension when processing files -@chapter EXAMPLE: Removing file extension when processing files - -When processing files removing the file extension using @strong{@{.@}} is -often useful. - -Create a directory for each zip-file and unzip it in that dir: - -@strong{parallel 'mkdir @{.@}; cd @{.@}; unzip ../@{@}' ::: *.zip} - -Recompress all .gz files in current directory using @strong{bzip2} running 1 -job per CPU core in parallel: - -@strong{parallel "zcat @{@} | bzip2 }>@strong{@{.@}.bz2 && rm @{@}" ::: *.gz} - -Convert all WAV files to MP3 using LAME: - -@strong{find sounddir -type f -name '*.wav' | parallel lame @{@} -o @{.@}.mp3} - -Put all converted in the same directory: - -@strong{find sounddir -type f -name '*.wav' | parallel lame @{@} -o mydir/@{/.@}.mp3} - -@node EXAMPLE: Removing two file extensions when processing files and calling GNU Parallel from itself -@chapter EXAMPLE: Removing two file extensions when processing files and calling GNU Parallel from itself - -If you have directory with tar.gz files and want these extracted in -the corresponding dir (e.g foo.tar.gz will be extracted in the dir -foo) you can do: - -@strong{ls *.tar.gz| parallel --er @{tar@} 'echo @{tar@}|parallel "mkdir -p @{.@} ; tar -C @{.@} -xf @{.@}.tar.gz"'} - -@node EXAMPLE: Download 10 images for each of the past 30 days -@chapter EXAMPLE: Download 10 images for each of the past 30 days - -Let us assume a website stores images like: - -@verbatim - http://www.example.com/path/to/YYYYMMDD_##.jpg -@end verbatim - -where YYYYMMDD is the date and ## is the number 01-10. This will -download images for the past 30 days: - -@strong{parallel wget http://www.example.com/path/to/'$(date -d "today -@{1@} days" +%Y%m%d)_@{2@}.jpg' ::: $(seq 30) ::: $(seq -w 10)} - -@strong{$(date -d "today -@{1@} days" +%Y%m%d)} will give the dates in -YYYYMMDD with @{1@} days subtracted. - -@node EXAMPLE: Digtal clock with "blinking" : -@chapter EXAMPLE: Digtal clock with "blinking" : - -The : in a digital clock blinks. To make every other line have a ':' -and the rest a ' ' a perl expression is used to look at the 3rd input -source. If the value modudo 2 is 1: Use ":" otherwise use " ": - -@strong{parallel -k echo @{1@}'@{=3 $_=$_%2?":":" "=@}'@{2@}@{3@} ::: @{0..12@} ::: @{0..5@} ::: @{0..9@}} - -@node EXAMPLE: Breadth first parallel web crawler/mirrorer -@chapter EXAMPLE: Breadth first parallel web crawler/mirrorer - -This script below will crawl and mirror a URL in parallel. It -downloads first pages that are 1 click down, then 2 clicks down, then -3; instead of the normal depth first, where the first link link on -each page is fetched first. - -Run like this: - -@strong{PARALLEL=-j100 ./parallel-crawl http://gatt.org.yeslab.org/} - -Remove the @strong{wget} part if you only want a web crawler. - -It works by fetching a page from a list of URLs and looking for links -in that page that are within the same starting URL and that have not -already been seen. These links are added to a new queue. When all the -pages from the list is done, the new queue is moved to the list of -URLs and the process is started over until no unseen links are found. - -@verbatim - #!/bin/bash - - # E.g. http://gatt.org.yeslab.org/ - URL=$1 - # Stay inside the start dir - BASEURL=$(echo $URL | perl -pe 's:#.*::; s:(//.*/)[^/]*:$1:') - URLLIST=$(mktemp urllist.XXXX) - URLLIST2=$(mktemp urllist.XXXX) - SEEN=$(mktemp seen.XXXX) - - # Spider to get the URLs - echo $URL >$URLLIST - cp $URLLIST $SEEN - - while [ -s $URLLIST ] ; do - cat $URLLIST | - parallel lynx -listonly -image_links -dump {} \; wget -qm -l1 -Q1 {} \; echo Spidered: {} \>\&2 | - perl -ne 's/#.*//; s/\s+\d+.\s(\S+)$/$1/ and do { $seen{$1}++ or print }' | - grep -F $BASEURL | - grep -v -x -F -f $SEEN | tee -a $SEEN > $URLLIST2 - mv $URLLIST2 $URLLIST - done - - rm -f $URLLIST $URLLIST2 $SEEN -@end verbatim - -@node EXAMPLE: Process files from a tar file while unpacking -@chapter EXAMPLE: Process files from a tar file while unpacking - -If the files to be processed are in a tar file then unpacking one file -and processing it immediately may be faster than first unpacking all -files. - -@strong{tar xvf foo.tgz | perl -ne 'print $l;$l=$_;END@{print $l@}' | -parallel echo} - -The Perl one-liner is needed to avoid race condition. - -@node EXAMPLE: Rewriting a for-loop and a while-read-loop -@chapter EXAMPLE: Rewriting a for-loop and a while-read-loop - -for-loops like this: - -@verbatim - (for x in `cat list` ; do - do_something $x - done) | process_output -@end verbatim - -and while-read-loops like this: - -@verbatim - cat list | (while read x ; do - do_something $x - done) | process_output -@end verbatim - -can be written like this: - -@strong{cat list | parallel do_something | process_output} - -For example: Find which host name in a list has IP address 1.2.3 4: - -@strong{cat hosts.txt | parallel -P 100 host | grep 1.2.3.4} - -If the processing requires more steps the for-loop like this: - -@verbatim - (for x in `cat list` ; do - no_extension=${x%.*}; - do_something $x scale $no_extension.jpg - do_step2 <$x $no_extension - done) | process_output -@end verbatim - -and while-loops like this: - -@verbatim - cat list | (while read x ; do - no_extension=${x%.*}; - do_something $x scale $no_extension.jpg - do_step2 <$x $no_extension - done) | process_output -@end verbatim - -can be written like this: - -@strong{cat list | parallel "do_something @{@} scale @{.@}.jpg ; do_step2 <@{@} @{.@}" | process_output} - -@node EXAMPLE: Rewriting nested for-loops -@chapter EXAMPLE: Rewriting nested for-loops - -Nested for-loops like this: - -@verbatim - (for x in `cat xlist` ; do - for y in `cat ylist` ; do - do_something $x $y - done - done) | process_output -@end verbatim - -can be written like this: - -@strong{parallel do_something @{1@} @{2@} :::: xlist ylist | process_output} - -Nested for-loops like this: - -@verbatim - (for gender in M F ; do - for size in S M L XL XXL ; do - echo $gender $size - done - done) | sort -@end verbatim - -can be written like this: - -@strong{parallel echo @{1@} @{2@} ::: M F ::: S M L XL XXL | sort} - -@node EXAMPLE: Finding the lowest difference between files -@chapter EXAMPLE: Finding the lowest difference between files - -@strong{diff} is good for finding differences in text files. @strong{diff | wc -l} -gives an indication of the size of the difference. To find the -differences between all files in the current dir do: - -@strong{parallel --tag 'diff @{1@} @{2@} | wc -l' ::: * ::: * | sort -nk3} - -This way it is possible to see if some files are closer to other -files. - -@node EXAMPLE: for-loops with column names -@chapter EXAMPLE: for-loops with column names - -When doing multiple nested for-loops it can be easier to keep track of -the loop variable if is is named instead of just having a number. Use -@strong{--header :} to let the first argument be an named alias for the -positional replacement string: - -@verbatim - parallel --header : echo {gender} {size} ::: gender M F ::: size S M L XL XXL -@end verbatim - -This also works if the input file is a file with columns: - -@verbatim - cat addressbook.tsv | parallel --colsep '\t' --header : echo {Name} {E-mail address} -@end verbatim - -@node EXAMPLE: Count the differences between all files in a dir -@chapter EXAMPLE: Count the differences between all files in a dir - -Using @strong{--results} the results are saved in /tmp/diffcount*. - -@verbatim - parallel --results /tmp/diffcount "diff -U 0 {1} {2} |tail -n +3 |grep -v '^@'|wc -l" ::: * ::: * -@end verbatim - -To see the difference between file A and file B look at the file -'/tmp/diffcount 1 A 2 B' where spaces are TABs (\t). - -@node EXAMPLE: Speeding up fast jobs -@chapter EXAMPLE: Speeding up fast jobs - -Starting a job on the local machine takes around 3 ms. This can be a -big overhead if the job takes very few ms to run. Often you can group -small jobs together using @strong{-X} which will make the overhead less -significant. Compare the speed of these: - -@verbatim - seq -w 0 9999 | parallel touch pict{}.jpg - - seq -w 0 9999 | parallel -X touch pict{}.jpg -@end verbatim - -If your program cannot take multiple arguments, then you can use GNU -@strong{parallel} to spawn multiple GNU @strong{parallel}s: - -@verbatim - seq -w 0 999999 | parallel -j10 --pipe parallel -j0 touch pict{}.jpg -@end verbatim - -If @strong{-j0} normally spawns 506 jobs, then the above will try to spawn -5060 jobs. It is likely that you this way will hit the limit of number -of processes and/or filehandles. Look at 'ulimit -n' and 'ulimit -u' -to raise these limits. - -@node EXAMPLE: Using shell variables -@chapter EXAMPLE: Using shell variables - -When using shell variables you need to quote them correctly as they -may otherwise be split on spaces. - -Notice the difference between: - -@verbatim - V=("My brother's 12\" records are worth <\$\$\$>"'!' Foo Bar) - parallel echo ::: ${V[@]} # This is probably not what you want -@end verbatim - -and: - -@verbatim - V=("My brother's 12\" records are worth <\$\$\$>"'!' Foo Bar) - parallel echo ::: "${V[@]}" -@end verbatim - -When using variables in the actual command that contains special -characters (e.g. space) you can quote them using @strong{'"$VAR"'} or using -"'s and @strong{-q}: - -@verbatim - V="Here are two " - parallel echo "'$V'" ::: spaces - parallel -q echo "$V" ::: spaces -@end verbatim - -@node EXAMPLE: Group output lines -@chapter EXAMPLE: Group output lines - -When running jobs that output data, you often do not want the output -of multiple jobs to run together. GNU @strong{parallel} defaults to grouping -the output of each job, so the output is printed when the job -finishes. If you want full lines to be printed while the job is -running you can use @strong{--line-buffer}. If you want output to be -printed as soon as possible you can use @strong{-u}. - -Compare the output of: - -@strong{parallel traceroute ::: foss.org.my debian.org freenetproject.org} - -to the output of: - -@strong{parallel --line-buffer traceroute ::: foss.org.my debian.org freenetproject.org} - -and: - -@strong{parallel -u traceroute ::: foss.org.my debian.org freenetproject.org} - -@node EXAMPLE: Tag output lines -@chapter EXAMPLE: Tag output lines - -GNU @strong{parallel} groups the output lines, but it can be hard to see -where the different jobs begin. @strong{--tag} prepends the argument to make -that more visible: - -@strong{parallel --tag traceroute ::: foss.org.my debian.org freenetproject.org} - -@strong{--tag} works with @strong{--line-buffer} but not with @strong{-u}: - -@strong{parallel --tag --line-buffer traceroute ::: foss.org.my debian.org freenetproject.org} - -Check the uptime of the servers in @emph{~/.parallel/sshloginfile}: - -@strong{parallel --tag -S .. --nonall uptime} - -@node EXAMPLE: Keep order of output same as order of input -@chapter EXAMPLE: Keep order of output same as order of input - -Normally the output of a job will be printed as soon as it -completes. Sometimes you want the order of the output to remain the -same as the order of the input. This is often important, if the output -is used as input for another system. @strong{-k} will make sure the order of -output will be in the same order as input even if later jobs end -before earlier jobs. - -Append a string to every line in a text file: - -@strong{cat textfile | parallel -k echo @{@} append_string} - -If you remove @strong{-k} some of the lines may come out in the wrong order. - -Another example is @strong{traceroute}: - -@strong{parallel traceroute ::: foss.org.my debian.org freenetproject.org} - -will give traceroute of foss.org.my, debian.org and -freenetproject.org, but it will be sorted according to which job -completed first. - -To keep the order the same as input run: - -@strong{parallel -k traceroute ::: foss.org.my debian.org freenetproject.org} - -This will make sure the traceroute to foss.org.my will be printed -first. - -A bit more complex example is downloading a huge file in chunks in -parallel: Some internet connections will deliver more data if you -download files in parallel. For downloading files in parallel see: -"EXAMPLE: Download 10 images for each of the past 30 days". But if you -are downloading a big file you can download the file in chunks in -parallel. - -To download byte 10000000-19999999 you can use @strong{curl}: - -@strong{curl -r 10000000-19999999 http://example.com/the/big/file} > @strong{file.part} - -To download a 1 GB file we need 100 10MB chunks downloaded and -combined in the correct order. - -@strong{seq 0 99 | parallel -k curl -r \ - @{@}0000000-@{@}9999999 http://example.com/the/big/file} > @strong{file} - -@node EXAMPLE: Parallel grep -@chapter EXAMPLE: Parallel grep - -@strong{grep -r} greps recursively through directories. On multicore CPUs -GNU @strong{parallel} can often speed this up. - -@strong{find . -type f | parallel -k -j150% -n 1000 -m grep -H -n STRING @{@}} - -This will run 1.5 job per core, and give 1000 arguments to @strong{grep}. - -@node EXAMPLE: Grepping n lines for m regular expressions. -@chapter EXAMPLE: Grepping n lines for m regular expressions. - -The simplest solution to grep a big file for a lot of regexps is: - -@verbatim - grep -f regexps.txt bigfile -@end verbatim - -Or if the regexps are fixed strings: - -@verbatim - grep -F -f regexps.txt bigfile -@end verbatim - -There are 2 limiting factors: CPU and disk I/O. CPU is easy to -measure: If the grep takes >90% CPU (e.g. when running top), then the -CPU is a limiting factor, and parallelization will speed this up. If -not, then disk I/O is the limiting factor, and depending on the disk -system it may be faster or slower to parallelize. The only way to know -for certain is to measure. - -If the CPU is the limiting factor parallelization should be done on the regexps: - -@verbatim - cat regexp.txt | parallel --pipe -L1000 --round-robin grep -f - bigfile -@end verbatim - -This will start one grep per CPU and read bigfile one time per CPU, -but as that is done in parallel, all reads except the first will be -cached in RAM. Depending on the size of regexp.txt it may be faster to -use --block 10m instead of -L1000. If regexp.txt is too big to fit in -RAM, remove --round-robin and adjust -L1000. This will cause bigfile -to be read more times. - -Some storage systems perform better when reading multiple chunks in -parallel. This is true for some RAID systems and for some network file -systems. To parallelize the reading of bigfile: - -@verbatim - parallel --pipepart --block 100M -a bigfile grep -f regexp.txt -@end verbatim - -This will split bigfile into 100MB chunks and run grep on each of -these chunks. To parallelize both reading of bigfile and regexp.txt -combine the two using --fifo: - -@verbatim - parallel --pipepart --block 100M -a bigfile --fifo cat regexp.txt \ - \| parallel --pipe -L1000 --round-robin grep -f - {} -@end verbatim - -@node EXAMPLE: Using remote computers -@chapter EXAMPLE: Using remote computers - -To run commands on a remote computer SSH needs to be set up and you -must be able to login without entering a password (The commands -@strong{ssh-copy-id} and @strong{ssh-agent} may help you do that). - -If you need to login to a whole cluster, you typically do not want to -accept the host key for every host. You want to accept them the first -time and be warned if they are ever changed. To do that: - -@verbatim - # Add the servers to the sshloginfile - (echo servera; echo serverb) > .parallel/my_cluster - # Make sure .ssh/config exist - touch .ssh/config - cp .ssh/config .ssh/config.backup - # Disable StrictHostKeyChecking temporarily - (echo 'Host *'; echo StrictHostKeyChecking no) >> .ssh/config - parallel --slf my_cluster --nonall true - # Remove the disabling of StrictHostKeyChecking - mv .ssh/config.backup .ssh/config -@end verbatim - -The servers in @strong{.parallel/my_cluster} are now added in @strong{.ssh/known_hosts}. - -To run @strong{echo} on @strong{server.example.com}: - -@verbatim - seq 10 | parallel --sshlogin server.example.com echo -@end verbatim - -To run commands on more than one remote computer run: - -@verbatim - seq 10 | parallel --sshlogin server.example.com,server2.example.net echo -@end verbatim - -Or: - -@verbatim - seq 10 | parallel --sshlogin server.example.com \ - --sshlogin server2.example.net echo -@end verbatim - -If the login username is @emph{foo} on @emph{server2.example.net} use: - -@verbatim - seq 10 | parallel --sshlogin server.example.com \ - --sshlogin foo@server2.example.net echo -@end verbatim - -If your list of hosts is @emph{server1-88.example.net} with login @emph{foo}: - -@verbatim - seq 10 | parallel -Sfoo@server{1..88}.example.net echo -@end verbatim - -To distribute the commands to a list of computers, make a file -@emph{mycomputers} with all the computers: - -@verbatim - server.example.com - foo@server2.example.com - server3.example.com -@end verbatim - -Then run: - -@verbatim - seq 10 | parallel --sshloginfile mycomputers echo -@end verbatim - -To include the local computer add the special sshlogin ':' to the list: - -@verbatim - server.example.com - foo@server2.example.com - server3.example.com - : -@end verbatim - -GNU @strong{parallel} will try to determine the number of CPU cores on each -of the remote computers, and run one job per CPU core - even if the -remote computers do not have the same number of CPU cores. - -If the number of CPU cores on the remote computers is not identified -correctly the number of CPU cores can be added in front. Here the -computer has 8 CPU cores. - -@verbatim - seq 10 | parallel --sshlogin 8/server.example.com echo -@end verbatim - -@node EXAMPLE: Transferring of files -@chapter EXAMPLE: Transferring of files - -To recompress gzipped files with @strong{bzip2} using a remote computer run: - -@verbatim - find logs/ -name '*.gz' | \ - parallel --sshlogin server.example.com \ - --transfer "zcat {} | bzip2 -9 >{.}.bz2" -@end verbatim - -This will list the .gz-files in the @emph{logs} directory and all -directories below. Then it will transfer the files to -@emph{server.example.com} to the corresponding directory in -@emph{$HOME/logs}. On @emph{server.example.com} the file will be recompressed -using @strong{zcat} and @strong{bzip2} resulting in the corresponding file with -@emph{.gz} replaced with @emph{.bz2}. - -If you want the resulting bz2-file to be transferred back to the local -computer add @emph{--return @{.@}.bz2}: - -@verbatim - find logs/ -name '*.gz' | \ - parallel --sshlogin server.example.com \ - --transfer --return {.}.bz2 "zcat {} | bzip2 -9 >{.}.bz2" -@end verbatim - -After the recompressing is done the @emph{.bz2}-file is transferred back to -the local computer and put next to the original @emph{.gz}-file. - -If you want to delete the transferred files on the remote computer add -@emph{--cleanup}. This will remove both the file transferred to the remote -computer and the files transferred from the remote computer: - -@verbatim - find logs/ -name '*.gz' | \ - parallel --sshlogin server.example.com \ - --transfer --return {.}.bz2 --cleanup "zcat {} | bzip2 -9 >{.}.bz2" -@end verbatim - -If you want run on several computers add the computers to @emph{--sshlogin} -either using ',' or multiple @emph{--sshlogin}: - -@verbatim - find logs/ -name '*.gz' | \ - parallel --sshlogin server.example.com,server2.example.com \ - --sshlogin server3.example.com \ - --transfer --return {.}.bz2 --cleanup "zcat {} | bzip2 -9 >{.}.bz2" -@end verbatim - -You can add the local computer using @emph{--sshlogin :}. This will disable the -removing and transferring for the local computer only: - -@verbatim - find logs/ -name '*.gz' | \ - parallel --sshlogin server.example.com,server2.example.com \ - --sshlogin server3.example.com \ - --sshlogin : \ - --transfer --return {.}.bz2 --cleanup "zcat {} | bzip2 -9 >{.}.bz2" -@end verbatim - -Often @emph{--transfer}, @emph{--return} and @emph{--cleanup} are used together. They can be -shortened to @emph{--trc}: - -@verbatim - find logs/ -name '*.gz' | \ - parallel --sshlogin server.example.com,server2.example.com \ - --sshlogin server3.example.com \ - --sshlogin : \ - --trc {.}.bz2 "zcat {} | bzip2 -9 >{.}.bz2" -@end verbatim - -With the file @emph{mycomputers} containing the list of computers it becomes: - -@verbatim - find logs/ -name '*.gz' | parallel --sshloginfile mycomputers \ - --trc {.}.bz2 "zcat {} | bzip2 -9 >{.}.bz2" -@end verbatim - -If the file @emph{~/.parallel/sshloginfile} contains the list of computers -the special short hand @emph{-S ..} can be used: - -@verbatim - find logs/ -name '*.gz' | parallel -S .. \ - --trc {.}.bz2 "zcat {} | bzip2 -9 >{.}.bz2" -@end verbatim - -@node EXAMPLE: Distributing work to local and remote computers -@chapter EXAMPLE: Distributing work to local and remote computers - -Convert *.mp3 to *.ogg running one process per CPU core on local computer and server2: - -@verbatim - parallel --trc {.}.ogg -S server2,: \ - 'mpg321 -w - {} | oggenc -q0 - -o {.}.ogg' ::: *.mp3 -@end verbatim - -@node EXAMPLE: Running the same command on remote computers -@chapter EXAMPLE: Running the same command on remote computers - -To run the command @strong{uptime} on remote computers you can do: - -@strong{parallel --tag --nonall -S server1,server2 uptime} - -@strong{--nonall} reads no arguments. If you have a list of jobs you want -run on each computer you can do: - -@strong{parallel --tag --onall -S server1,server2 echo ::: 1 2 3} - -Remove @strong{--tag} if you do not want the sshlogin added before the -output. - -If you have a lot of hosts use '-j0' to access more hosts in parallel. - -@node EXAMPLE: Parallelizing rsync -@chapter EXAMPLE: Parallelizing rsync - -@strong{rsync} is a great tool, but sometimes it will not fill up the -available bandwidth. This is often a problem when copying several big -files over high speed connections. - -The following will start one @strong{rsync} per big file in @emph{src-dir} to -@emph{dest-dir} on the server @emph{fooserver}: - -@strong{cd src-dir; find . -type f -size +100000 | parallel -v ssh fooserver -mkdir -p /dest-dir/@{//@}\;rsync -Havessh @{@} fooserver:/dest-dir/@{@}} - -The dirs created may end up with wrong permissions and smaller files -are not being transferred. To fix those run @strong{rsync} a final time: - -@strong{rsync -Havessh src-dir/ fooserver:/dest-dir/} - -If you are unable to push data, but need to pull them and the files -are called digits.png (e.g. 000000.png) you might be able to do: - -@strong{seq -w 0 99 | parallel rsync -Havessh fooserver:src-path/*@{@}.png destdir/} - -@node EXAMPLE: Use multiple inputs in one command -@chapter EXAMPLE: Use multiple inputs in one command - -Copy files like foo.es.ext to foo.ext: - -@strong{ls *.es.* | perl -pe 'print; s/\.es//' | parallel -N2 cp @{1@} @{2@}} - -The perl command spits out 2 lines for each input. GNU @strong{parallel} -takes 2 inputs (using @strong{-N2}) and replaces @{1@} and @{2@} with the inputs. - -Count in binary: - -@strong{parallel -k echo ::: 0 1 ::: 0 1 ::: 0 1 ::: 0 1 ::: 0 1 ::: 0 1} - -Print the number on the opposing sides of a six sided die: - -@strong{parallel --xapply -a <(seq 6) -a <(seq 6 -1 1) echo} - -@strong{parallel --xapply echo :::: <(seq 6) <(seq 6 -1 1)} - -Convert files from all subdirs to PNG-files with consecutive numbers -(useful for making input PNG's for @strong{ffmpeg}): - -@strong{parallel --xapply -a <(find . -type f | sort) -a <(seq $(find . -type f|wc -l)) convert @{1@} @{2@}.png} - -Alternative version: - -@strong{find . -type f | sort | parallel convert @{@} @{#@}.png} - -@node EXAMPLE: Use a table as input -@chapter EXAMPLE: Use a table as input - -Content of table_file.tsv: - -@verbatim - foobar - baz quux -@end verbatim - -To run: - -@verbatim - cmd -o bar -i foo - cmd -o quux -i baz -@end verbatim - -you can run: - -@strong{parallel -a table_file.tsv --colsep '\t' cmd -o @{2@} -i @{1@}} - -Note: The default for GNU @strong{parallel} is to remove the spaces around the columns. To keep the spaces: - -@strong{parallel -a table_file.tsv --trim n --colsep '\t' cmd -o @{2@} -i @{1@}} - -@node EXAMPLE: Run the same command 10 times -@chapter EXAMPLE: Run the same command 10 times - -If you want to run the same command with the same arguments 10 times -in parallel you can do: - -@strong{seq 10 | parallel -n0 my_command my_args} - -@node EXAMPLE: Working as cat | sh. Resource inexpensive jobs and evaluation -@chapter EXAMPLE: Working as cat | sh. Resource inexpensive jobs and evaluation - -GNU @strong{parallel} can work similar to @strong{cat | sh}. - -A resource inexpensive job is a job that takes very little CPU, disk -I/O and network I/O. Ping is an example of a resource inexpensive -job. wget is too - if the webpages are small. - -The content of the file jobs_to_run: - -@verbatim - ping -c 1 10.0.0.1 - wget http://example.com/status.cgi?ip=10.0.0.1 - ping -c 1 10.0.0.2 - wget http://example.com/status.cgi?ip=10.0.0.2 - ... - ping -c 1 10.0.0.255 - wget http://example.com/status.cgi?ip=10.0.0.255 -@end verbatim - -To run 100 processes simultaneously do: - -@strong{parallel -j 100 < jobs_to_run} - -As there is not a @emph{command} the jobs will be evaluated by the shell. - -@node EXAMPLE: Processing a big file using more cores -@chapter EXAMPLE: Processing a big file using more cores - -To process a big file or some output you can use @strong{--pipe} to split up -the data into blocks and pipe the blocks into the processing program. - -If the program is @strong{gzip -9} you can do: - -@strong{cat bigfile | parallel --pipe --recend '' -k gzip -9 }>@strong{bigfile.gz} - -This will split @strong{bigfile} into blocks of 1 MB and pass that to @strong{gzip --9} in parallel. One @strong{gzip} will be run per CPU core. The output of -@strong{gzip -9} will be kept in order and saved to @strong{bigfile.gz} - -@strong{gzip} works fine if the output is appended, but some processing does -not work like that - for example sorting. For this GNU @strong{parallel} can -put the output of each command into a file. This will sort a big file -in parallel: - -@strong{cat bigfile | parallel --pipe --files sort | parallel -Xj1 sort -m @{@} ';' rm @{@} }>@strong{bigfile.sort} - -Here @strong{bigfile} is split into blocks of around 1MB, each block ending -in '\n' (which is the default for @strong{--recend}). Each block is passed -to @strong{sort} and the output from @strong{sort} is saved into files. These -files are passed to the second @strong{parallel} that runs @strong{sort -m} on the -files before it removes the files. The output is saved to -@strong{bigfile.sort}. - -GNU @strong{parallel}'s @strong{--pipe} maxes out at around 100 MB/s because every -byte has to be copied through GNU @strong{parallel}. But if @strong{bigfile} is a -real (seekable) file GNU @strong{parallel} can by-pass the copying and send -the parts directly to the program: - -@strong{parallel --pipepart --block 100m -a bigfile --files sort | parallel -Xj1 sort -m @{@} ';' rm @{@} }>@strong{bigfile.sort} - -@node EXAMPLE: Running more than 500 jobs workaround -@chapter EXAMPLE: Running more than 500 jobs workaround - -If you need to run a massive amount of jobs in parallel, then you will -likely hit the filehandle limit which is often around 500 jobs. If you -are super user you can raise the limit in /etc/security/limits.conf -but you can also use this workaround. The filehandle limit is per -process. That means that if you just spawn more GNU @strong{parallel}s then -each of them can run 500 jobs. This will spawn up to 2500 jobs: - -@strong{cat myinput | parallel --pipe -N 50 --round-robin -j50 parallel -j50 your_prg} - -This will spawn up to 250000 jobs (use with caution - you need 250 GB RAM to do this): - -@strong{cat myinput | parallel --pipe -N 500 --round-robin -j500 parallel -j500 your_prg} - -@node EXAMPLE: Working as mutex and counting semaphore -@chapter EXAMPLE: Working as mutex and counting semaphore - -The command @strong{sem} is an alias for @strong{parallel --semaphore}. - -A counting semaphore will allow a given number of jobs to be started -in the background. When the number of jobs are running in the -background, GNU @strong{sem} will wait for one of these to complete before -starting another command. @strong{sem --wait} will wait for all jobs to -complete. - -Run 10 jobs concurrently in the background: - -@verbatim - for i in *.log ; do - echo $i - sem -j10 gzip $i ";" echo done - done - sem --wait -@end verbatim - -A mutex is a counting semaphore allowing only one job to run. This -will edit the file @emph{myfile} and prepends the file with lines with the -numbers 1 to 3. - -@verbatim - seq 3 | parallel sem sed -i -e 'i{}' myfile -@end verbatim - -As @emph{myfile} can be very big it is important only one process edits -the file at the same time. - -Name the semaphore to have multiple different semaphores active at the -same time: - -@verbatim - seq 3 | parallel sem --id mymutex sed -i -e 'i{}' myfile -@end verbatim - -@node EXAMPLE: Start editor with filenames from stdin (standard input) -@chapter EXAMPLE: Start editor with filenames from stdin (standard input) - -You can use GNU @strong{parallel} to start interactive programs like emacs or vi: - -@strong{cat filelist | parallel --tty -X emacs} - -@strong{cat filelist | parallel --tty -X vi} - -If there are more files than will fit on a single command line, the -editor will be started again with the remaining files. - -@node EXAMPLE: Running sudo -@chapter EXAMPLE: Running sudo - -@strong{sudo} requires a password to run a command as root. It caches the -access, so you only need to enter the password again if you have not -used @strong{sudo} for a while. - -The command: - -@verbatim - parallel sudo echo ::: This is a bad idea -@end verbatim - -is no good, as you would be prompted for the sudo password for each of -the jobs. You can either do: - -@verbatim - sudo echo This - parallel sudo echo ::: is a good idea -@end verbatim - -or: - -@verbatim - sudo parallel echo ::: This is a good idea -@end verbatim - -This way you only have to enter the sudo password once. - -@node EXAMPLE: GNU Parallel as queue system/batch manager -@chapter EXAMPLE: GNU Parallel as queue system/batch manager - -GNU @strong{parallel} can work as a simple job queue system or batch manager. -The idea is to put the jobs into a file and have GNU @strong{parallel} read -from that continuously. As GNU @strong{parallel} will stop at end of file we -use @strong{tail} to continue reading: - -@strong{true }>@strong{jobqueue}; @strong{tail -f jobqueue | parallel} - -To submit your jobs to the queue: - -@strong{echo my_command my_arg }>>@strong{ jobqueue} - -You can of course use @strong{-S} to distribute the jobs to remote -computers: - -@strong{true }>@strong{jobqueue}; @strong{tail -f jobqueue | parallel -S ..} - -There is a a small issue when using GNU @strong{parallel} as queue -system/batch manager: You have to submit JobSlot number of jobs before -they will start, and after that you can submit one at a time, and job -will start immediately if free slots are available. Output from the -running or completed jobs are held back and will only be printed when -JobSlots more jobs has been started (unless you use --ungroup or -u, -in which case the output from the jobs are printed immediately). -E.g. if you have 10 jobslots then the output from the first completed -job will only be printed when job 11 has started, and the output of -second completed job will only be printed when job 12 has started. - -@node EXAMPLE: GNU Parallel as dir processor -@chapter EXAMPLE: GNU Parallel as dir processor - -If you have a dir in which users drop files that needs to be processed -you can do this on GNU/Linux (If you know what @strong{inotifywait} is -called on other platforms file a bug report): - -@strong{inotifywait -q -m -r -e MOVED_TO -e CLOSE_WRITE --format %w%f my_dir | parallel --u echo} - -This will run the command @strong{echo} on each file put into @strong{my_dir} or -subdirs of @strong{my_dir}. - -You can of course use @strong{-S} to distribute the jobs to remote -computers: - -@strong{inotifywait -q -m -r -e MOVED_TO -e CLOSE_WRITE --format %w%f my_dir -| parallel -S .. -u echo} - -If the files to be processed are in a tar file then unpacking one file -and processing it immediately may be faster than first unpacking all -files. Set up the dir processor as above and unpack into the dir. - -Using GNU Parallel as dir processor has the same limitations as using -GNU Parallel as queue system/batch manager. - -@node QUOTING -@chapter QUOTING - -GNU @strong{parallel} is very liberal in quoting. You only need to quote -characters that have special meaning in shell: - -( ) $ ` ' " < > ; | \ - -and depending on context these needs to be quoted, too: - -~ & # ! ? space * @{ - -Therefore most people will never need more quoting than putting '\' -in front of the special characters. - -Often you can simply put \' around every ': - -@verbatim - perl -ne '/^\S+\s+\S+$/ and print $ARGV,"\n"' file -@end verbatim - -can be quoted: - -@verbatim - parallel perl -ne \''/^\S+\s+\S+$/ and print $ARGV,"\n"'\' ::: file -@end verbatim - -However, when you want to use a shell variable you need to quote the -$-sign. Here is an example using $PARALLEL_SEQ. This variable is set -by GNU @strong{parallel} itself, so the evaluation of the $ must be done by -the sub shell started by GNU @strong{parallel}: - -@strong{seq 10 | parallel -N2 echo seq:\$PARALLEL_SEQ arg1:@{1@} arg2:@{2@}} - -If the variable is set before GNU @strong{parallel} starts you can do this: - -@strong{VAR=this_is_set_before_starting} - -@strong{echo test | parallel echo @{@} $VAR} - -Prints: @strong{test this_is_set_before_starting} - -It is a little more tricky if the variable contains more than one space in a row: - -@strong{VAR="two spaces between each word"} - -@strong{echo test | parallel echo @{@} \'"$VAR"\'} - -Prints: @strong{test two spaces between each word} - -If the variable should not be evaluated by the shell starting GNU -@strong{parallel} but be evaluated by the sub shell started by GNU -@strong{parallel}, then you need to quote it: - -@strong{echo test | parallel VAR=this_is_set_after_starting \; echo @{@} \$VAR} - -Prints: @strong{test this_is_set_after_starting} - -It is a little more tricky if the variable contains space: - -@strong{echo test | parallel VAR='"two spaces between each word"' echo @{@} \'"$VAR"\'} - -Prints: @strong{test two spaces between each word} - -$$ is the shell variable containing the process id of the shell. This -will print the process id of the shell running GNU @strong{parallel}: - -@strong{seq 10 | parallel echo $$} - -And this will print the process ids of the sub shells started by GNU -@strong{parallel}. - -@strong{seq 10 | parallel echo \$\$} - -If the special characters should not be evaluated by the sub shell -then you need to protect it against evaluation from both the shell -starting GNU @strong{parallel} and the sub shell: - -@strong{echo test | parallel echo @{@} \\\$VAR} - -Prints: @strong{test $VAR} - -GNU @strong{parallel} can protect against evaluation by the sub shell by -using -q: - -@strong{echo test | parallel -q echo @{@} \$VAR} - -Prints: @strong{test $VAR} - -This is particularly useful if you have lots of quoting. If you want to run a perl script like this: - -@strong{perl -ne '/^\S+\s+\S+$/ and print $ARGV,"\n"' file} - -It needs to be quoted like this: - -@strong{ls | parallel perl -ne '/^\\S+\\s+\\S+\$/\ and\ print\ \$ARGV,\"\\n\"'} -@strong{ls | parallel perl -ne \''/^\S+\s+\S+$/ and print $ARGV,"\n"'\'} - -Notice how spaces, \'s, "'s, and $'s need to be quoted. GNU @strong{parallel} -can do the quoting by using option -q: - -@strong{ls | parallel -q perl -ne '/^\S+\s+\S+$/ and print $ARGV,"\n"'} - -However, this means you cannot make the sub shell interpret special -characters. For example because of @strong{-q} this WILL NOT WORK: - -@strong{ls *.gz | parallel -q "zcat @{@} }>@strong{@{.@}"} - -@strong{ls *.gz | parallel -q "zcat @{@} | bzip2 }>@strong{@{.@}.bz2"} - -because > and | need to be interpreted by the sub shell. - -If you get errors like: - -@verbatim - sh: -c: line 0: syntax error near unexpected token - sh: Syntax error: Unterminated quoted string - sh: -c: line 0: unexpected EOF while looking for matching `'' - sh: -c: line 1: syntax error: unexpected end of file -@end verbatim - -then you might try using @strong{-q}. - -If you are using @strong{bash} process substitution like @strong{<(cat foo)} then -you may try @strong{-q} and prepending @emph{command} with @strong{bash -c}: - -@strong{ls | parallel -q bash -c 'wc -c <(echo @{@})'} - -Or for substituting output: - -@strong{ls | parallel -q bash -c 'tar c @{@} | tee }>@strong{(gzip }>@strong{@{@}.tar.gz) | bzip2 }>@strong{@{@}.tar.bz2'} - -@strong{Conclusion}: To avoid dealing with the quoting problems it may be -easier just to write a small script or a function (remember to -@strong{export -f} the function) and have GNU @strong{parallel} call that. - -@node LIST RUNNING JOBS -@chapter LIST RUNNING JOBS - -If you want a list of the jobs currently running you can run: - -@strong{killall -USR1 parallel} - -GNU @strong{parallel} will then print the currently running jobs on stderr -(standard error). - -@node COMPLETE RUNNING JOBS BUT DO NOT START NEW JOBS -@chapter COMPLETE RUNNING JOBS BUT DO NOT START NEW JOBS - -If you regret starting a lot of jobs you can simply break GNU @strong{parallel}, -but if you want to make sure you do not have half-completed jobs you -should send the signal @strong{SIGTERM} to GNU @strong{parallel}: - -@strong{killall -TERM parallel} - -This will tell GNU @strong{parallel} to not start any new jobs, but wait until -the currently running jobs are finished before exiting. - -@node ENVIRONMENT VARIABLES -@chapter ENVIRONMENT VARIABLES - -@table @asis -@item $PARALLEL_PID -@anchor{$PARALLEL_PID} - -The environment variable $PARALLEL_PID is set by GNU @strong{parallel} and -is visible to the jobs started from GNU @strong{parallel}. This makes it -possible for the jobs to communicate directly to GNU @strong{parallel}. -Remember to quote the $, so it gets evaluated by the correct -shell. - -@strong{Example:} If each of the jobs tests a solution and one of jobs finds -the solution the job can tell GNU @strong{parallel} not to start more jobs -by: @strong{kill -TERM $PARALLEL_PID}. This only works on the local -computer. - -@item $PARALLEL_SEQ -@anchor{$PARALLEL_SEQ} - -$PARALLEL_SEQ will be set to the sequence number of the job -running. Remember to quote the $, so it gets evaluated by the correct -shell. - -@strong{Example:} - -@strong{seq 10 | parallel -N2 echo seq:'$'PARALLEL_SEQ arg1:@{1@} arg2:@{2@}} - -@item $TMPDIR -@anchor{$TMPDIR} - -Directory for temporary files. See: @strong{--tmpdir}. - -@item $PARALLEL -@anchor{$PARALLEL} - -The environment variable $PARALLEL will be used as default options for -GNU @strong{parallel}. If the variable contains special shell characters -(e.g. $, *, or space) then these need to be to be escaped with \. - -@strong{Example:} - -@strong{cat list | parallel -j1 -k -v ls} - -can be written as: - -@strong{cat list | PARALLEL="-kvj1" parallel ls} - -@strong{cat list | parallel -j1 -k -v -S"myssh user@@server" ls} - -can be written as: - -@strong{cat list | PARALLEL='-kvj1 -S myssh\ user@@server' parallel echo} - -Notice the \ in the middle is needed because 'myssh' and 'user@@server' -must be one argument. - -@end table - -@node DEFAULT PROFILE (CONFIG FILE) -@chapter DEFAULT PROFILE (CONFIG FILE) - -The file ~/.parallel/config (formerly known as .parallelrc) will be -read if it exists. Lines starting with '#' will be ignored. It can be -formatted like the environment variable $PARALLEL, but it is often -easier to simply put each option on its own line. - -Options on the command line takes precedence over the environment -variable $PARALLEL which takes precedence over the file -~/.parallel/config. - -@node PROFILE FILES -@chapter PROFILE FILES - -If @strong{--profile} set, GNU @strong{parallel} will read the profile from that file instead of -~/.parallel/config. You can have multiple @strong{--profiles}. - -Example: Profile for running a command on every sshlogin in -~/.ssh/sshlogins and prepend the output with the sshlogin: - -@verbatim - echo --tag -S .. --nonall > ~/.parallel/n - parallel -Jn uptime -@end verbatim - -Example: Profile for running every command with @strong{-j-1} and @strong{nice} - -@verbatim - echo -j-1 nice > ~/.parallel/nice_profile - parallel -J nice_profile bzip2 -9 ::: * -@end verbatim - -Example: Profile for running a perl script before every command: - -@verbatim - echo "perl -e '\$a=\$\$; print \$a,\" \",'\$PARALLEL_SEQ',\" \";';" > ~/.parallel/pre_perl - parallel -J pre_perl echo ::: * -@end verbatim - -Note how the $ and " need to be quoted using \. - -Example: Profile for running distributed jobs with @strong{nice} on the -remote computers: - -@verbatim - echo -S .. nice > ~/.parallel/dist - parallel -J dist --trc {.}.bz2 bzip2 -9 ::: * -@end verbatim - -@node EXIT STATUS -@chapter EXIT STATUS - -If @strong{--halt-on-error} 0 or not specified: - -@table @asis -@item 0 -@anchor{0 1} - -All jobs ran without error. - -@item 1-253 -@anchor{1-253} - -Some of the jobs failed. The exit status gives the number of failed jobs - -@item 254 -@anchor{254} - -More than 253 jobs failed. - -@item 255 -@anchor{255} - -Other error. - -@end table - -If @strong{--halt-on-error} 1 or 2: Exit status of the failing job. - -@node DIFFERENCES BETWEEN GNU Parallel AND ALTERNATIVES -@chapter DIFFERENCES BETWEEN GNU Parallel AND ALTERNATIVES - -There are a lot programs with some of the functionality of GNU -@strong{parallel}. GNU @strong{parallel} strives to include the best of the -functionality without sacrificing ease of use. - -@menu -* SUMMARY TABLE:: -* DIFFERENCES BETWEEN xargs AND GNU Parallel:: -* DIFFERENCES BETWEEN find -exec AND GNU Parallel:: -* DIFFERENCES BETWEEN make -j AND GNU Parallel:: -* DIFFERENCES BETWEEN ppss AND GNU Parallel:: -* DIFFERENCES BETWEEN pexec AND GNU Parallel:: -* DIFFERENCES BETWEEN xjobs AND GNU Parallel:: -* DIFFERENCES BETWEEN prll AND GNU Parallel:: -* DIFFERENCES BETWEEN dxargs AND GNU Parallel:: -* DIFFERENCES BETWEEN mdm/middleman AND GNU Parallel:: -* DIFFERENCES BETWEEN xapply AND GNU Parallel:: -* DIFFERENCES BETWEEN paexec AND GNU Parallel:: -* DIFFERENCES BETWEEN map AND GNU Parallel:: -* DIFFERENCES BETWEEN ClusterSSH AND GNU Parallel:: -@end menu - -@node SUMMARY TABLE -@section SUMMARY TABLE - -The following features are in some of the comparable tools: - -Inputs - I1. Arguments can be read from stdin - I2. Arguments can be read from a file - I3. Arguments can be read from multiple files - I4. Arguments can be read from command line - I5. Arguments can be read from a table - I6. Arguments can be read from the same file using #! (shebang) - I7. Line oriented input as default (Quoting of special chars not needed) - -Manipulation of input - M1. Composed command - M2. Multiple arguments can fill up an execution line - M3. Arguments can be put anywhere in the execution line - M4. Multiple arguments can be put anywhere in the execution line - M5. Arguments can be replaced with context - M6. Input can be treated as complete execution line - -Outputs - O1. Grouping output so output from different jobs do not mix - O2. Send stderr (standard error) to stderr (standard error) - O3. Send stdout (standard output) to stdout (standard output) - O4. Order of output can be same as order of input - O5. Stdout only contains stdout (standard output) from the command - O6. Stderr only contains stderr (standard error) from the command - -Execution - E1. Running jobs in parallel - E2. List running jobs - E3. Finish running jobs, but do not start new jobs - E4. Number of running jobs can depend on number of cpus - E5. Finish running jobs, but do not start new jobs after first failure - E6. Number of running jobs can be adjusted while running - -Remote execution - R1. Jobs can be run on remote computers - R2. Basefiles can be transferred - R3. Argument files can be transferred - R4. Result files can be transferred - R5. Cleanup of transferred files - R6. No config files needed - R7. Do not run more than SSHD's MaxStartups can handle - R8. Configurable SSH command - R9. Retry if connection breaks occasionally - -Semaphore - S1. Possibility to work as a mutex - S2. Possibility to work as a counting semaphore - -Legend - - = no - x = not applicable - ID = yes - -As every new version of the programs are not tested the table may be -outdated. Please file a bug-report if you find errors (See REPORTING -BUGS). - -parallel: -I1 I2 I3 I4 I5 I6 I7 -M1 M2 M3 M4 M5 M6 -O1 O2 O3 O4 O5 O6 -E1 E2 E3 E4 E5 E6 -R1 R2 R3 R4 R5 R6 R7 R8 R9 -S1 S2 - -xargs: -I1 I2 - - - - - -- M2 M3 - - - -- O2 O3 - O5 O6 -E1 - - - - - -- - - - - x - - - -- - - -find -exec: -- - - x - x - -- M2 M3 - - - - -- O2 O3 O4 O5 O6 -- - - - - - - -- - - - - - - - - -x x - -make -j: -- - - - - - - -- - - - - - -O1 O2 O3 - x O6 -E1 - - - E5 - -- - - - - - - - - -- - - -ppss: -I1 I2 - - - - I7 -M1 - M3 - - M6 -O1 - - x - - -E1 E2 ?E3 E4 - - -R1 R2 R3 R4 - - ?R7 ? ? -- - - -pexec: -I1 I2 - I4 I5 - - -M1 - M3 - - M6 -O1 O2 O3 - O5 O6 -E1 - - E4 - E6 -R1 - - - - R6 - - - -S1 - - -xjobs: TODO - Please file a bug-report if you know what features xjobs -supports (See REPORTING BUGS). - -prll: TODO - Please file a bug-report if you know what features prll -supports (See REPORTING BUGS). - -dxargs: TODO - Please file a bug-report if you know what features dxargs -supports (See REPORTING BUGS). - -mdm/middelman: TODO - Please file a bug-report if you know what -features mdm/middelman supports (See REPORTING BUGS). - -xapply: TODO - Please file a bug-report if you know what features xapply -supports (See REPORTING BUGS). - -paexec: TODO - Please file a bug-report if you know what features paexec -supports (See REPORTING BUGS). - -ClusterSSH: TODO - Please file a bug-report if you know what features ClusterSSH -supports (See REPORTING BUGS). - -@node DIFFERENCES BETWEEN xargs AND GNU Parallel -@section DIFFERENCES BETWEEN xargs AND GNU Parallel - -@strong{xargs} offer some of the same possibilities as GNU @strong{parallel}. - -@strong{xargs} deals badly with special characters (such as space, ' and -"). To see the problem try this: - -@verbatim - touch important_file - touch 'not important_file' - ls not* | xargs rm - mkdir -p "My brother's 12\" records" - ls | xargs rmdir -@end verbatim - -You can specify @strong{-0} or @strong{-d "\n"}, but many input generators are not -optimized for using @strong{NUL} as separator but are optimized for -@strong{newline} as separator. E.g @strong{head}, @strong{tail}, @strong{awk}, @strong{ls}, @strong{echo}, -@strong{sed}, @strong{tar -v}, @strong{perl} (@strong{-0} and \0 instead of \n), @strong{locate} -(requires using @strong{-0}), @strong{find} (requires using @strong{-print0}), @strong{grep} -(requires user to use @strong{-z} or @strong{-Z}), @strong{sort} (requires using @strong{-z}). - -So GNU @strong{parallel}'s newline separation can be emulated with: - -@strong{cat | xargs -d "\n" -n1 @emph{command}} - -@strong{xargs} can run a given number of jobs in parallel, but has no -support for running number-of-cpu-cores jobs in parallel. - -@strong{xargs} has no support for grouping the output, therefore output may -run together, e.g. the first half of a line is from one process and -the last half of the line is from another process. The example -@strong{Parallel grep} cannot be done reliably with @strong{xargs} because of -this. To see this in action try: - -@verbatim - parallel perl -e '\$a=\"1{}\"x10000000\;print\ \$a,\"\\n\"' '>' {} ::: a b c d e f - ls -l a b c d e f - parallel -kP4 -n1 grep 1 > out.par ::: a b c d e f - echo a b c d e f | xargs -P4 -n1 grep 1 > out.xargs-unbuf - echo a b c d e f | xargs -P4 -n1 grep --line-buffered 1 > out.xargs-linebuf - echo a b c d e f | xargs -n1 grep 1 > out.xargs-serial - ls -l out* - md5sum out* -@end verbatim - -@strong{xargs} has no support for keeping the order of the output, therefore -if running jobs in parallel using @strong{xargs} the output of the second -job cannot be postponed till the first job is done. - -@strong{xargs} has no support for running jobs on remote computers. - -@strong{xargs} has no support for context replace, so you will have to create the -arguments. - -If you use a replace string in @strong{xargs} (@strong{-I}) you can not force -@strong{xargs} to use more than one argument. - -Quoting in @strong{xargs} works like @strong{-q} in GNU @strong{parallel}. This means -composed commands and redirection require using @strong{bash -c}. - -@strong{ls | parallel "wc @{@} }> @strong{@{@}.wc"} - -becomes (assuming you have 8 cores) - -@strong{ls | xargs -d "\n" -P8 -I @{@} bash -c "wc @{@} }>@strong{ @{@}.wc"} - -and - -@strong{ls | parallel "echo @{@}; ls @{@}|wc"} - -becomes (assuming you have 8 cores) - -@strong{ls | xargs -d "\n" -P8 -I @{@} bash -c "echo @{@}; ls @{@}|wc"} - -@node DIFFERENCES BETWEEN find -exec AND GNU Parallel -@section DIFFERENCES BETWEEN find -exec AND GNU Parallel - -@strong{find -exec} offer some of the same possibilities as GNU @strong{parallel}. - -@strong{find -exec} only works on files. So processing other input (such as -hosts or URLs) will require creating these inputs as files. @strong{find --exec} has no support for running commands in parallel. - -@node DIFFERENCES BETWEEN make -j AND GNU Parallel -@section DIFFERENCES BETWEEN make -j AND GNU Parallel - -@strong{make -j} can run jobs in parallel, but requires a crafted Makefile -to do this. That results in extra quoting to get filename containing -newline to work correctly. - -@strong{make -j} has no support for grouping the output, therefore output -may run together, e.g. the first half of a line is from one process -and the last half of the line is from another process. The example -@strong{Parallel grep} cannot be done reliably with @strong{make -j} because of -this. - -(Very early versions of GNU @strong{parallel} were coincidently implemented -using @strong{make -j}). - -@node DIFFERENCES BETWEEN ppss AND GNU Parallel -@section DIFFERENCES BETWEEN ppss AND GNU Parallel - -@strong{ppss} is also a tool for running jobs in parallel. - -The output of @strong{ppss} is status information and thus not useful for -using as input for another command. The output from the jobs are put -into files. - -The argument replace string ($ITEM) cannot be changed. Arguments must -be quoted - thus arguments containing special characters (space '"&!*) -may cause problems. More than one argument is not supported. File -names containing newlines are not processed correctly. When reading -input from a file null cannot be used as a terminator. @strong{ppss} needs -to read the whole input file before starting any jobs. - -Output and status information is stored in ppss_dir and thus requires -cleanup when completed. If the dir is not removed before running -@strong{ppss} again it may cause nothing to happen as @strong{ppss} thinks the -task is already done. GNU @strong{parallel} will normally not need cleaning -up if running locally and will only need cleaning up if stopped -abnormally and running remote (@strong{--cleanup} may not complete if -stopped abnormally). The example @strong{Parallel grep} would require extra -postprocessing if written using @strong{ppss}. - -For remote systems PPSS requires 3 steps: config, deploy, and -start. GNU @strong{parallel} only requires one step. - -@menu -* EXAMPLES FROM ppss MANUAL:: -@end menu - -@node EXAMPLES FROM ppss MANUAL -@subsection EXAMPLES FROM ppss MANUAL - -Here are the examples from @strong{ppss}'s manual page with the equivalent -using GNU @strong{parallel}: - -@strong{1} ./ppss.sh standalone -d /path/to/files -c 'gzip ' - -@strong{1} find /path/to/files -type f | parallel gzip - -@strong{2} ./ppss.sh standalone -d /path/to/files -c 'cp "$ITEM" /destination/dir ' - -@strong{2} find /path/to/files -type f | parallel cp @{@} /destination/dir - -@strong{3} ./ppss.sh standalone -f list-of-urls.txt -c 'wget -q ' - -@strong{3} parallel -a list-of-urls.txt wget -q - -@strong{4} ./ppss.sh standalone -f list-of-urls.txt -c 'wget -q "$ITEM"' - -@strong{4} parallel -a list-of-urls.txt wget -q @{@} - -@strong{5} ./ppss config -C config.cfg -c 'encode.sh ' -d /source/dir -m -192.168.1.100 -u ppss -k ppss-key.key -S ./encode.sh -n nodes.txt -o -/some/output/dir --upload --download ; ./ppss deploy -C config.cfg ; -./ppss start -C config - -@strong{5} # parallel does not use configs. If you want a different username put it in nodes.txt: user@@hostname - -@strong{5} find source/dir -type f | parallel --sshloginfile nodes.txt --trc @{.@}.mp3 lame -a @{@} -o @{.@}.mp3 --preset standard --quiet - -@strong{6} ./ppss stop -C config.cfg - -@strong{6} killall -TERM parallel - -@strong{7} ./ppss pause -C config.cfg - -@strong{7} Press: CTRL-Z or killall -SIGTSTP parallel - -@strong{8} ./ppss continue -C config.cfg - -@strong{8} Enter: fg or killall -SIGCONT parallel - -@strong{9} ./ppss.sh status -C config.cfg - -@strong{9} killall -SIGUSR2 parallel - -@node DIFFERENCES BETWEEN pexec AND GNU Parallel -@section DIFFERENCES BETWEEN pexec AND GNU Parallel - -@strong{pexec} is also a tool for running jobs in parallel. - -Here are the examples from @strong{pexec}'s info page with the equivalent -using GNU @strong{parallel}: - -@strong{1} pexec -o sqrt-%s.dat -p "$(seq 10)" -e NUM -n 4 -c -- \ - 'echo "scale=10000;sqrt($NUM)" | bc' - -@strong{1} seq 10 | parallel -j4 'echo "scale=10000;sqrt(@{@})" | bc > sqrt-@{@}.dat' - -@strong{2} pexec -p "$(ls myfiles*.ext)" -i %s -o %s.sort -- sort - -@strong{2} ls myfiles*.ext | parallel sort @{@} ">@{@}.sort" - -@strong{3} pexec -f image.list -n auto -e B -u star.log -c -- \ - 'fistar $B.fits -f 100 -F id,x,y,flux -o $B.star' - -@strong{3} parallel -a image.list \ - 'fistar @{@}.fits -f 100 -F id,x,y,flux -o @{@}.star' 2>star.log - -@strong{4} pexec -r *.png -e IMG -c -o - -- \ - 'convert $IMG $@{IMG%.png@}.jpeg ; "echo $IMG: done"' - -@strong{4} ls *.png | parallel 'convert @{@} @{.@}.jpeg; echo @{@}: done' - -@strong{5} pexec -r *.png -i %s -o %s.jpg -c 'pngtopnm | pnmtojpeg' - -@strong{5} ls *.png | parallel 'pngtopnm < @{@} | pnmtojpeg > @{@}.jpg' - -@strong{6} for p in *.png ; do echo $@{p%.png@} ; done | \ - pexec -f - -i %s.png -o %s.jpg -c 'pngtopnm | pnmtojpeg' - -@strong{6} ls *.png | parallel 'pngtopnm < @{@} | pnmtojpeg > @{.@}.jpg' - -@strong{7} LIST=$(for p in *.png ; do echo $@{p%.png@} ; done) - pexec -r $LIST -i %s.png -o %s.jpg -c 'pngtopnm | pnmtojpeg' - -@strong{7} ls *.png | parallel 'pngtopnm < @{@} | pnmtojpeg > @{.@}.jpg' - -@strong{8} pexec -n 8 -r *.jpg -y unix -e IMG -c \ - 'pexec -j -m blockread -d $IMG | \ - jpegtopnm | pnmscale 0.5 | pnmtojpeg | \ - pexec -j -m blockwrite -s th_$IMG' - -@strong{8} Combining GNU @strong{parallel} and GNU @strong{sem}. - -@strong{8} ls *jpg | parallel -j8 'sem --id blockread cat @{@} | jpegtopnm |' \ - 'pnmscale 0.5 | pnmtojpeg | sem --id blockwrite cat > th_@{@}' - -@strong{8} If reading and writing is done to the same disk, this may be -faster as only one process will be either reading or writing: - -@strong{8} ls *jpg | parallel -j8 'sem --id diskio cat @{@} | jpegtopnm |' \ - 'pnmscale 0.5 | pnmtojpeg | sem --id diskio cat > th_@{@}' - -@node DIFFERENCES BETWEEN xjobs AND GNU Parallel -@section DIFFERENCES BETWEEN xjobs AND GNU Parallel - -@strong{xjobs} is also a tool for running jobs in parallel. It only supports -running jobs on your local computer. - -@strong{xjobs} deals badly with special characters just like @strong{xargs}. See -the section @strong{DIFFERENCES BETWEEN xargs AND GNU Parallel}. - -Here are the examples from @strong{xjobs}'s man page with the equivalent -using GNU @strong{parallel}: - -@strong{1} ls -1 *.zip | xjobs unzip - -@strong{1} ls *.zip | parallel unzip - -@strong{2} ls -1 *.zip | xjobs -n unzip - -@strong{2} ls *.zip | parallel unzip >/dev/null - -@strong{3} find . -name '*.bak' | xjobs gzip - -@strong{3} find . -name '*.bak' | parallel gzip - -@strong{4} ls -1 *.jar | sed 's/\(.*\)/\1 > \1.idx/' | xjobs jar tf - -@strong{4} ls *.jar | parallel jar tf @{@} '>' @{@}.idx - -@strong{5} xjobs -s script - -@strong{5} cat script | parallel - -@strong{6} mkfifo /var/run/my_named_pipe; -xjobs -s /var/run/my_named_pipe & -echo unzip 1.zip >> /var/run/my_named_pipe; -echo tar cf /backup/myhome.tar /home/me >> /var/run/my_named_pipe - -@strong{6} mkfifo /var/run/my_named_pipe; -cat /var/run/my_named_pipe | parallel & -echo unzip 1.zip >> /var/run/my_named_pipe; -echo tar cf /backup/myhome.tar /home/me >> /var/run/my_named_pipe - -@node DIFFERENCES BETWEEN prll AND GNU Parallel -@section DIFFERENCES BETWEEN prll AND GNU Parallel - -@strong{prll} is also a tool for running jobs in parallel. It does not -support running jobs on remote computers. - -@strong{prll} encourages using BASH aliases and BASH functions instead of -scripts. GNU @strong{parallel} will never support running aliases (see why -http://www.perlmonks.org/index.pl?node_id=484296). However, scripts, -composed commands, or functions exported with @strong{export -f} work just -fine. - -@strong{prll} generates a lot of status information on stderr (standard -error) which makes it harder to use the stderr (standard error) output -of the job directly as input for another program. - -Here is the example from @strong{prll}'s man page with the equivalent -using GNU @strong{parallel}: - -prll -s 'mogrify -flip $1' *.jpg - -parallel mogrify -flip ::: *.jpg - -@node DIFFERENCES BETWEEN dxargs AND GNU Parallel -@section DIFFERENCES BETWEEN dxargs AND GNU Parallel - -@strong{dxargs} is also a tool for running jobs in parallel. - -@strong{dxargs} does not deal well with more simultaneous jobs than SSHD's -MaxStartups. @strong{dxargs} is only built for remote run jobs, but does not -support transferring of files. - -@node DIFFERENCES BETWEEN mdm/middleman AND GNU Parallel -@section DIFFERENCES BETWEEN mdm/middleman AND GNU Parallel - -middleman(mdm) is also a tool for running jobs in parallel. - -Here are the shellscripts of http://mdm.berlios.de/usage.html ported -to GNU @strong{parallel}: - -@strong{seq 19 | parallel buffon -o - | sort -n }>@strong{ result} - -@strong{cat files | parallel cmd} - -@strong{find dir -execdir sem cmd @{@} \;} - -@node DIFFERENCES BETWEEN xapply AND GNU Parallel -@section DIFFERENCES BETWEEN xapply AND GNU Parallel - -@strong{xapply} can run jobs in parallel on the local computer. - -Here are the examples from @strong{xapply}'s man page with the equivalent -using GNU @strong{parallel}: - -@strong{1} xapply '(cd %1 && make all)' */ - -@strong{1} parallel 'cd @{@} && make all' ::: */ - -@strong{2} xapply -f 'diff %1 ../version5/%1' manifest | more - -@strong{2} parallel diff @{@} ../version5/@{@} < manifest | more - -@strong{3} xapply -p/dev/null -f 'diff %1 %2' manifest1 checklist1 - -@strong{3} parallel --xapply diff @{1@} @{2@} :::: manifest1 checklist1 - -@strong{4} xapply 'indent' *.c - -@strong{4} parallel indent ::: *.c - -@strong{5} find ~ksb/bin -type f ! -perm -111 -print | xapply -f -v 'chmod a+x' - - -@strong{5} find ~ksb/bin -type f ! -perm -111 -print | parallel -v chmod a+x - -@strong{6} find */ -... | fmt 960 1024 | xapply -f -i /dev/tty 'vi' - - -@strong{6} sh <(find */ -... | parallel -s 1024 echo vi) - -@strong{6} find */ -... | parallel -s 1024 -Xuj1 vi - -@strong{7} find ... | xapply -f -5 -i /dev/tty 'vi' - - - - - - -@strong{7} sh <(find ... |parallel -n5 echo vi) - -@strong{7} find ... |parallel -n5 -uj1 vi - -@strong{8} xapply -fn "" /etc/passwd - -@strong{8} parallel -k echo < /etc/passwd - -@strong{9} tr ':' '\012' < /etc/passwd | xapply -7 -nf 'chown %1 %6' - - - - - - - - -@strong{9} tr ':' '\012' < /etc/passwd | parallel -N7 chown @{1@} @{6@} - -@strong{10} xapply '[ -d %1/RCS ] || echo %1' */ - -@strong{10} parallel '[ -d @{@}/RCS ] || echo @{@}' ::: */ - -@strong{11} xapply -f '[ -f %1 ] && echo %1' List | ... - -@strong{11} parallel '[ -f @{@} ] && echo @{@}' < List | ... - -@node DIFFERENCES BETWEEN paexec AND GNU Parallel -@section DIFFERENCES BETWEEN paexec AND GNU Parallel - -@strong{paexec} can run jobs in parallel on both the local and remote computers. - -@strong{paexec} requires commands to print a blank line as the last -output. This means you will have to write a wrapper for most programs. - -@strong{paexec} has a job dependency facility so a job can depend on another -job to be executed successfully. Sort of a poor-man's @strong{make}. - -Here are the examples from @strong{paexec}'s example catalog with the equivalent -using GNU @strong{parallel}: - -@table @asis -@item 1_div_X_run: -@anchor{1_div_X_run:} - -@verbatim - ../../paexec -s -l -c "`pwd`/1_div_X_cmd" -n +1 < My\ brother\'s\ 12\"\ records - - ls | map 'echo -n `gzip < "%" | wc -c`; echo -n '*100/'; wc -c < "%"' | bc -@end verbatim - -It works with GNU @strong{parallel}: - -@verbatim - ls | parallel 'echo -n `gzip < {} | wc -c`; echo -n '*100/'; wc -c < {}' | bc -@end verbatim - -And you can even get the file name prepended: - -@verbatim - ls | parallel --tag '(echo -n `gzip < {} | wc -c`'*100/'; wc -c < {}) | bc' -@end verbatim - -@strong{map} has no support for grouping. So this gives the wrong results -without any warnings: - -@verbatim - parallel perl -e '\$a=\"1{}\"x10000000\;print\ \$a,\"\\n\"' '>' {} ::: a b c d e f - ls -l a b c d e f - parallel -kP4 -n1 grep 1 > out.par ::: a b c d e f - map -p 4 'grep 1' a b c d e f > out.map-unbuf - map -p 4 'grep --line-buffered 1' a b c d e f > out.map-linebuf - map -p 1 'grep --line-buffered 1' a b c d e f > out.map-serial - ls -l out* - md5sum out* -@end verbatim - -The documentation shows a workaround, but not only does that mix -stdout (standard output) with stderr (standard error) it also fails -completely for certain jobs (and may even be considered less readable): - -@verbatim - parallel echo -n {} ::: 1 2 3 - - map -p 4 'echo -n % 2>&1 | sed -e "s/^/$$:/"' 1 2 3 | sort | cut -f2- -d: -@end verbatim - -@strong{map} cannot handle bundled options: @strong{map -vp 0 echo this fails} - -@strong{map} does not have an argument separator on the command line, but -uses the first argument as command. This makes quoting harder which again -may affect readability. Compare: - -@verbatim - map -p 2 perl\\\ -ne\\\ \\\'/^\\\\S+\\\\s+\\\\S+\\\$/\\\ and\\\ print\\\ \\\$ARGV,\\\"\\\\n\\\"\\\' * - - parallel -q perl -ne '/^\S+\s+\S+$/ and print $ARGV,"\n"' ::: * -@end verbatim - -@strong{map} can do multiple arguments with context replace, but not without -context replace: - -@verbatim - parallel --xargs echo 'BEGIN{'{}'}END' ::: 1 2 3 -@end verbatim - -@strong{map} does not set exit value according to whether one of the jobs -failed: - -@verbatim - parallel false ::: 1 || echo Job failed - - map false 1 || echo Never run -@end verbatim - -@strong{map} requires Perl v5.10.0 making it harder to use on old systems. - -@strong{map} has no way of using % in the command (GNU Parallel has -I to -specify another replacement string than @{@}). - -By design @strong{map} is option incompatible with @strong{xargs}, it does not -have remote job execution, a structured way of saving results, -multiple input sources, progress indicator, configurable record -delimiter (only field delimiter), logging of jobs run with possibility -to resume, keeping the output in the same order as input, --pipe -processing, and dynamically timeouts. - -@node DIFFERENCES BETWEEN ClusterSSH AND GNU Parallel -@section DIFFERENCES BETWEEN ClusterSSH AND GNU Parallel - -ClusterSSH solves a different problem than GNU @strong{parallel}. - -ClusterSSH opens a terminal window for each computer and using a -master window you can run the same command on all the computers. This -is typically used for administrating several computers that are almost -identical. - -GNU @strong{parallel} runs the same (or different) commands with different -arguments in parallel possibly using remote computers to help -computing. If more than one computer is listed in @strong{-S} GNU @strong{parallel} may -only use one of these (e.g. if there are 8 jobs to be run and one -computer has 8 cores). - -GNU @strong{parallel} can be used as a poor-man's version of ClusterSSH: - -@strong{parallel --nonall -S server-a,server-b do_stuff foo bar} - -@node BUGS -@chapter BUGS - -@menu -* Quoting of newline:: -* Speed:: -* --nice limits command length:: -* Aliases and functions do not work:: -@end menu - -@node Quoting of newline -@section Quoting of newline - -Because of the way newline is quoted this will not work: - -echo 1,2,3 | parallel -vkd, "echo 'a@{@}b'" - -However, these will all work: - -echo 1,2,3 | parallel -vkd, echo a@{@}b - -echo 1,2,3 | parallel -vkd, "echo 'a'@{@}'b'" - -echo 1,2,3 | parallel -vkd, "echo 'a'"@{@}"'b'" - -@node Speed -@section Speed - -@menu -* Startup:: -* Job startup:: -* SSH:: -* Disk access:: -@end menu - -@node Startup -@subsection Startup - -GNU @strong{parallel} is slow at starting up - around 250 ms the first time -and 150 ms after that. - -@node Job startup -@subsection Job startup - -Starting a job on the local machine takes around 3 ms. This can be a -big overhead if the job takes very few ms to run. Often you can group -small jobs together using @strong{-X} which will make the overhead less -significant. Or you can run multiple GNU @strong{parallel}s as described in -@strong{EXAMPLE: Speeding up fast jobs}. - -Using @strong{--ungroup} the 3 ms can be lowered to around 2 ms. - -@node SSH -@subsection SSH - -When using multiple computers GNU @strong{parallel} opens @strong{ssh} connections -to them to figure out how many connections can be used reliably -simultaneously (Namely SSHD's MaxStartups). This test is done for each -host in serial, so if your @strong{--sshloginfile} contains many hosts it may -be slow. - -If your jobs are short you may see that there are fewer jobs running -on the remove systems than expected. This is due to time spent logging -in and out. @strong{-M} may help here. - -@node Disk access -@subsection Disk access - -A single disk can normally read data faster if it reads one file at a -time instead of reading a lot of files in parallel, as this will avoid -disk seeks. However, newer disk systems with multiple drives can read -faster if reading from multiple files in parallel. - -If the jobs are of the form read-all-compute-all-write-all, so -everything is read before anything is written, it may be faster to -force only one disk access at the time: - -@verbatim - sem --id diskio cat file | compute | sem --id diskio cat > file -@end verbatim - -If the jobs are of the form read-compute-write, so writing starts -before all reading is done, it may be faster to force only one reader -and writer at the time: - -@verbatim - sem --id read cat file | compute | sem --id write cat > file -@end verbatim - -If the jobs are of the form read-compute-read-compute, it may be -faster to run more jobs in parallel than the system has CPUs, as some -of the jobs will be stuck waiting for disk access. - -@node --nice limits command length -@section --nice limits command length - -The current implementation of @strong{--nice} is too pessimistic in the max -allowed command length. It only uses a little more than half of what -it could. This affects @strong{-X} and @strong{-m}. If this becomes a real problem for -you file a bug-report. - -@node Aliases and functions do not work -@section Aliases and functions do not work - -If you get: - -@strong{Can't exec "@emph{command}": No such file or directory} - -or: - -@strong{open3: exec of by @emph{command} failed} - -it may be because @emph{command} is not known, but it could also be -because @emph{command} is an alias or a function. If it is a function you -need to @strong{export -f} the function first. An alias will, however, not -work (see why http://www.perlmonks.org/index.pl?node_id=484296), so -change your alias to a script. - -@node REPORTING BUGS -@chapter REPORTING BUGS - -Report bugs to or -https://savannah.gnu.org/bugs/?func=additem&group=parallel - -Your bug report should always include: - -@itemize -@item The error message you get (if any). - -@item The complete output of @strong{parallel --version}. If you are not running -the latest released version you should specify why you believe the -problem is not fixed in that version. - -@item A complete example that others can run that shows the problem. This -should preferably be small and simple. A combination of @strong{yes}, -@strong{seq}, @strong{cat}, @strong{echo}, and @strong{sleep} can reproduce most errors. If -your example requires large files, see if you can make them by -something like @strong{seq 1000000} > @strong{file} or @strong{yes | head -n 10000000} > -@strong{file}. If your example requires remote execution, see if you can -use @strong{localhost} - maybe using another login. - -@item The output of your example. If your problem is not easily reproduced -by others, the output might help them figure out the problem. - -@item Whether you have watched the intro videos -(http://www.youtube.com/playlist?list=PL284C9FF2488BC6D1), walked -through the tutorial (man parallel_tutorial), and read the EXAMPLE -section in the man page (man parallel - search for EXAMPLE:). - -@end itemize - -If you suspect the error is dependent on your environment or -distribution, please see if you can reproduce the error on one of -these VirtualBox images: -http://sourceforge.net/projects/virtualboximage/files/ - -Specifying the name of your distribution is not enough as you may have -installed software that is not in the VirtualBox images. - -If you cannot reproduce the error on any of the VirtualBox images -above, you should assume the debugging will be done through you. That -will put more burden on you and it is extra important you give any -information that help. In general the problem will be fixed faster and -with less work for you if you can reproduce the error on a VirtualBox. - -@node AUTHOR -@chapter AUTHOR - -When using GNU @strong{parallel} for a publication please cite: - -O. Tange (2011): GNU Parallel - The Command-Line Power Tool, ;login: -The USENIX Magazine, February 2011:42-47. - -Copyright (C) 2007-10-18 Ole Tange, http://ole.tange.dk - -Copyright (C) 2008,2009,2010 Ole Tange, http://ole.tange.dk - -Copyright (C) 2010,2011,2012,2013,2014 Ole Tange, http://ole.tange.dk -and Free Software Foundation, Inc. - -Parts of the manual concerning @strong{xargs} compatibility is inspired by -the manual of @strong{xargs} from GNU findutils 4.4.2. - -@node LICENSE -@chapter LICENSE - -Copyright (C) 2007,2008,2009,2010,2011,2012,2013 Free Software Foundation, -Inc. - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; either version 3 of the License, or -at your option any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program. If not, see . - -@menu -* Documentation license I:: -* Documentation license II:: -@end menu - -@node Documentation license I -@section Documentation license I - -Permission is granted to copy, distribute and/or modify this documentation -under the terms of the GNU Free Documentation License, Version 1.3 or -any later version published by the Free Software Foundation; with no -Invariant Sections, with no Front-Cover Texts, and with no Back-Cover -Texts. A copy of the license is included in the file fdl.txt. - -@node Documentation license II -@section Documentation license II - -You are free: - -@table @asis -@item @strong{to Share} -@anchor{@strong{to Share}} - -to copy, distribute and transmit the work - -@item @strong{to Remix} -@anchor{@strong{to Remix}} - -to adapt the work - -@end table - -Under the following conditions: - -@table @asis -@item @strong{Attribution} -@anchor{@strong{Attribution}} - -You must attribute the work in the manner specified by the author or -licensor (but not in any way that suggests that they endorse you or -your use of the work). - -@item @strong{Share Alike} -@anchor{@strong{Share Alike}} - -If you alter, transform, or build upon this work, you may distribute -the resulting work only under the same, similar or a compatible -license. - -@end table - -With the understanding that: - -@table @asis -@item @strong{Waiver} -@anchor{@strong{Waiver}} - -Any of the above conditions can be waived if you get permission from -the copyright holder. - -@item @strong{Public Domain} -@anchor{@strong{Public Domain}} - -Where the work or any of its elements is in the public domain under -applicable law, that status is in no way affected by the license. - -@item @strong{Other Rights} -@anchor{@strong{Other Rights}} - -In no way are any of the following rights affected by the license: - -@itemize -@item Your fair dealing or fair use rights, or other applicable -copyright exceptions and limitations; - -@item The author's moral rights; - -@item Rights other persons may have either in the work itself or in -how the work is used, such as publicity or privacy rights. - -@end itemize - -@end table - -@table @asis -@item @strong{Notice} -@anchor{@strong{Notice}} - -For any reuse or distribution, you must make clear to others the -license terms of this work. - -@end table - -A copy of the full license is included in the file as cc-by-sa.txt. - -@node DEPENDENCIES -@chapter DEPENDENCIES - -GNU @strong{parallel} uses Perl, and the Perl modules Getopt::Long, -IPC::Open3, Symbol, IO::File, POSIX, and File::Temp. For remote usage -it also uses rsync with ssh. - -@node SEE ALSO -@chapter SEE ALSO - -@strong{ssh}(1), @strong{rsync}(1), @strong{find}(1), @strong{xargs}(1), @strong{dirname}(1), -@strong{make}(1), @strong{pexec}(1), @strong{ppss}(1), @strong{xjobs}(1), @strong{prll}(1), -@strong{dxargs}(1), @strong{mdm}(1) - -@bye diff --git a/src/sem.pdf b/src/sem.pdf deleted file mode 100644 index 464dd3b546603e5d8628dd9d84611f315d8c8f45..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 32937 zcmeFa2UHYIw4>?NCATZ<%5(XG@P6CoqqGTi|VaSpZ$w&qzM?oY> zP*8$Oemw}E{`Wt3y>srn=iRs7a((txch$~S)zsd*sy{lYq%;dVD;G9h_vhUlY<4gk z*xtkneE&WeqVDMc14AT?U5st)Ex-_{u?6gbv8TPO3mBpXx3jQ;A!~lsE83d@cN)$x zC40MzDrYbUKuZ-2(S*a?VNOsdm^sV|W@id>J|_+UO1s+FsF^y!V0K7C|4Kqw7!3S_ z*_i>tU?U0rt58S;XFxW4S34Il7Z{=dH**H-fw|8)vLlsH1+!l~@tr>zfFX)7Gq|z1 zy$1lu2K>j(&kg3|;s66JsxW7JS0|)soaey5StFk>Swo%dP1Rs706GLP8ekuo2Ve?0 zTR;}^i&EmElmn>#Cc^cvM9f@GVNPIrF$V_$0S9|C4hJ)Hup7+D8E$U}W@F{zVE6+S zslz4zU;XjR=o0pJE-*V6q|!X+BJ-aA0=htr0V}&)8vnOu$R3pju+FV73lq1O5Eu7l z=VIgG;jX7nIeZ4pw|PKorQ}fn3D@>9cLFOn6WLkN17U21(c1qO1(#e zjhp>yl@mb#p=bam8#~%dyenm%AZu)32>k-S985?<{b!kRar1Hi3%eDAZF}-={1dI8 zFGR!DQ%{;6NkPGGBV!mF^)UfCvO6Co)9-W<6o%z}*^@Z?zV%t|8gx$1i@~1H>EV=- znX3JV!7pzIQq!Lg8m0}HXP&-#RN;HD*x3>}IZS6=ok1!j$e%gb%q)^|;;ea=`h0z- zdmw5HwMs;fUeYV{2vEZzWR!iy$o8UqJm%5WL-)H;I1P>^UVrMm!J7B9YB}Z1pr?m7KKQ6~TMr*( z2fwNt_%^iold8PM*ZFuOnWa$_RT3N0r*g7pl0jkknMs<$@mubQIp~`)T6BqOVD{nZ z>)hPk@a@ORlxf!SIE(bVnxvs>waKSndBpA*R2C6=$<=^I09w=7Yu zfxheJoSWBKjR!HSg6Uv`iFu$*JDPdz`YQQ#iK>eGs-E$ki+2O!2_=pky&f_!;ZQ2z zy1;U|>h-*9XAB%kk~s~z7jXxkOY(br+LmqbdTp|duxUKv@c6-VpB8m!#lF#omaFg zva@&zuqmVF@ztZm#X#dGhv=zTG`-k-I$!p%cy5IggQH6pkEP>FR&ro$Wrl&VU0UU0 zVhBD0!QxW+k)BEc`W;%~5a-Dm7id76>7;;=_{q=Sqv|*OZpTq?YQ?TKPcKbTesqFl z_4%T4xVh}TK@E}fA+oTRygr?8f+GXHH-}d2!R;eHXqk%&3jatlV#ptz?bXuk+3#R8 z+I8#Ax0STY>k^$Gm);-lxA*L*1f$1xE=*_@ij!$&|JcA+pWXL5+#!qelF=YBP!GO3 z(y8`!;!#oiR1O~K{v$q|`LN)pf~3}@lpY*TgpHmUs7na<;zi=14W8^lXzPpIk;QrW zf~vl(IcEZg=ZGA>Bd2_oc@xCe;!sl;J*|_HCtc_5*s{hrqWgDVbvL>A7z!edfz)F>=GT zDz0%=mhyAdkJ&3~6P0uQ{9sz|x;&e1=whnz#*0JO5Ny#%J{+am2^=tjk5MI7wvt0> zB2E0mdO5AOyvY;V0kT|F*{P$>@393X!p5!5lU+ON6IX}cBySbZ5F}t$uup0CxhRv* zwvn*fnO*Hzq%SU?>~15DrGCCutx5TwflswVNyi~|%4RloeXyVKG2heO(Dc$^Q4r$G z%3hN=#R|zAM=5vms)6+R6+;Amm-bdLnPq_mo>dC)B&|7$!#y%DWuNA>$ir%%gBsmA z>UR2l(Y#47T``k>r*}-3)Hsjs>Q(rW5>dj9#+hdCZ=Gl9XP{1$;m|w}Z#r6!FS4z? zJ1iL6RzGA>&6abIwkSKUY~UGsqmK=d+3-xJK8Z1_A%5kcgOz-WYE=$Fti||5(9a;wx{Xp&s~Uh}v8_wcU^~8-17dWYfVg$drSsVyu8)%(iC(rcHCFhF50?fiv9zX( z14#S*$7_L(DlxhBfy2BZGybODrKAtJ8J{=@F+8_!li@QmGJw4PviF^^v9vOkxsM*M zcl@P5MsJS4UmJUK6Dvq`38AK_AtDYP7r=~C$9EmEE#d1k8H}dMni+5S{1dlbbd>lT zh_>W&i3=_@!meu0BsD1rQ+oiX?DpNEU{VlS_w~4heUhRTuaiW_z?d%WxrbmVi^L-e zD^3()nS7pG3%8bgq||Et=Ob1!|HC)_X0TqC=mo?3j_M z9UV^xBLh|*Tr214cJvXATDExB>q2a#DP|0jCCl*G70A;{QnK(hX$;4B3{&HKhs&|l z>x5}9iU^EdR8IL(oKnjkQ2>v(?Oj5nf-2{d&w#(Ka<*6rI(#^gG1x=K>sl(0PO}=5=;#q9d zAD9$x+XsBjqZBx@3i_(x%b|*rtD(j`fm&)}4G=JsTl?+}3)|$nsocHPw^0_45*0kIW z3VUG_6H;`SDU!9kupR}qM%Q^yJhv-zq`uNsAJbfyIeke#m(0;V=f{2ycTgBzrs%DP zHj;o(cW$xf|^)RTY6(;(|sb|`! z8WaK~{ll`kGgN!glbyVH)q04i{CIXm?9c(;A}kUf_{5eb5mBwZEOik z+|{WUnz$b3ZlA{0K#w&-f=hT+NBWKq=DqM2(|)DSt}iN9HuMcZ<2vs1TZwwlo4#); z9bI|fdVh{i&f#Bm*S6w~DA9Q*BEyF26!2 zyh*5gyYdayG*Nx#)5jr=n4HzJ!mlN;=M=x)bK(qgZ=5>W-(`0GA^QroU@vlEF|HY3 z{q^Sh5_(DsOB8eQBNj@rPDiRNzUDj#4sCy411}ANa@zX4e_z;g-=cm+q-RNB?{ZUk z;ryB()KNWaSyoFRFGk8jxfMFcFg99Hl=ekI$jnjtnvuhNOGw{)yy_4rwiXh898*XZFS@B%$5%~LRP=}q;alNpQ8Eslx|ziuv{=H*I@6lW^I?oQOPMn5jfIo?2v*d<(&(tNnHw^byqgDA zv)ci=VmN5UgjPP;tGSq8ef1|kZ8PlN(EgD6$=t=uAy}M6X;Qo#rLmG_i@3-9dJG3C zj1xqhYJ-`*7&jOcJbk)kCZ6dB6dD>l9gVEg(+@}t>X4K)07_aA0Gtm1 zcL9i-8|owLRd@m&u#yIG)i=%}U{yyrgehZHfy%J^xj&u;#Zy|k5vf{d7TWQ=l+C_7 zxoM8(A;M7{I5zW%HSJo7CgNbfefr9L^uRHA?mIuD1sW59l~a=wkDpe3?+NPq0FBe? zCyUV^B(Iu0aX*^8_QcIjF5cj8 z2;z==`;6N1Np{VIYmph3xxEjDIubDP8-?O3m@;XP6oA!nJLOp?NIOP)!fKc*xsv)5 zYz^PXXa_5Q#j?|Bc*R}gTFTXsl7+e^reDJpLJH0IN7K%58^pR_Of{+aJ!gfK@ibmK zsa&EEpp;-BOk%}=-DVO?(jSE_VBpk*oe8jer zaTQG!7wCNqtIGR+KfW>^u`TG|Q&On<__^>cnfU%wb6KL`4L@w!;$B8Rhqz_SES4DN z0>?_X)Gcq`ll#;uq;_o`QM6TwYh^@1MX;BwnedmHT$N!m3qx8lODPgT#@`0Cc(aOe2e}5i0Vu<2A1bIU zm&?Rw^%XsPu~1W4Q^(PUuAJ;EsS`o5g+g5`wf6l^z&uh8AQGbrFZzndcy<24m8Aj> zXpgvka0zV6cEZ>y)BKlwqrBf3OpKr|>ij4(*qX8B^$CrfC0fK2s0Dr)%WSaH=fGT1 zWL`3g71ZjuzS)s9ym9Q{Xq{b+HlNVKeI19bEF+FtV`Ex@q!}m9YZW?(2Tm5qOvC&xaHGuR7ML)CB|8K zA1Q`EZYU?On4cv75t<9Q5jVOve_W*$QPDXxRb4) zxAwW$T9!rL`o^aHd(;RS9lMMg{ffh#$I>8bt|l($VNzrzI~XEv>Ilpy+TUa8&oq3?L8Cg4+Y6oNbJqEx`~kn3FviavmOcaR=^PEP=>37-9|to`GMu8*uLo_W*ui zZb)p{FCKPqWIWx}-Uf&b15LK@i(4C*Gr$q%=xPioWdQ_3kwH^HYM8SNGK>r{2ILb1 zh9eLo2SX%*(Fep+!4MfBhzmrh!4P>MWDbN>ftWZLq6CI01MyohLy(K~=7E^r%UV!{N-Gys8nh?Fa`4=$g6_pu2u zYXMjE)%`NZFk0{x1{C%-$VP?bn<^&H)?bYYk%R z>V(Yi06pW09Ka?{_SP^v6XXPQhME2vmA~nk+B8mv+MaB1SB}0Y`-6k zO}=`^C=HPKN8U?i z?2vQP>3lWxJnuTcycd!8lg7U>XLgRvh#svk9KPhPZ#?-lJSGTlxLQWo-lP&NzHn9PWhU}#C7sv&O z@t-7&FN84uP0IL}nT&xxGX2L)z$iH<0=pmr`;!RJ+n1vqegOx+fV+63{hH_S-*7Ob znd~l@+g~8rUm)54N!_{B+q|Z z&Qp#MX}FWKi-aXG?>IStS3K}X>imI&n;Ym+V_SgXe~|oxXu-`~ERoqn4qiSmCnqPE ziKk!IA`1zQmdJW!9rF59SL8j?K9_Bm<+*(>=ppNnw2*TBV;3OhiiF|d`&Aa((a%XBF_Ek;NdE%9#JZ%1 zG|V{!0Er}v#Jn`bKLP`SA%(rf;pGI_^Iph>6doXYUI&z1NUj%k$a>^;X@pDK=k+|? z=k)l1tnTH7guCqc|FJxm{=C;Od-0+~68-b~2N9@09}Rz$NWm^g$UkrwBjj>q{HgSx z_sE|9^ZD}rPu|FTq!RzL3(5aKYD3aO)+5V5cMl+Q?Ej0d$6v4G|5Z=vUlvKf*O7m{ zj=x^V?`8k_I{w@x`0I5*{(2pMy^ep@uaLi9$GLa%|5vZ$-*ylFtJi^CH*o@9%H^8s zd{qss@7Vc)W$ncx^8cyVak)-IuEGAffb|wT8?amSeDMb`yj&DsE(R|bi%4YTS{hl8 z6!L%7?YI;IS#t6GT2LYtyi^>?xS&?|a zJyM@@nn=39#R;soIe31RNF!YO2IqbQ5*ENi(m7vk|4I0~0;&B!twT#sQ-5Kk^R>ZQ}$n{ zHvhASAAwV&P7>l$zt1tUgV~WsG*$n@)ad`gbCKujUAkcZ?FS>zt@xki#>L0R{x2sZ z=L{Z4zwe4UVGL}&Iwe=zdD>}iF5 zW8I6ht#Og)2|qqYTc9y{ z1)8VwBT4z7Sg|$bGr)PU8ovZo!?vue_vPdW*-W-EWSS>h9T~e}B{e&X$QFkcpt- zMwooC6{vj3CrixffCr^I0*W9ZqPAUEHtcmjsx_Lcp~}}E0`sZyX(W);%h`IMYd~Lg zKz&5kC#gbUCD?`(I{E6zlWkfjUyfVT)HK6$`!O^maWx*#4n!9-JH26zC?yC-AM&*Q zzF9Zu=4;8$kA*jL95kH+y}pM($BkCv@*UX0T3oZUaL@a6XaBgqL3(7z&5l29oFZs_0M*q8eSDyfk4rtVZ+_aJv#aD^$AMYveHgN!b5i!G z|Dk>xll_}vLPiXMC*#NWauh6-QGDO<1`DfTC8KL&e3IGPu$`+_6ns3jqKJLx%>l$- zUL)XibM~NIR2XAlS|9w4T@X=I;u2J*&%=UFjHaj2lOLRAW?Q+dDZ&yjtpa2#8PiH?r`d-HnP8$?Z84M&Sle^9n$3IU zk~@MmNemcT*|S{XnChW=wQOadKPFz2zKNT^5;qu*Lsz0)!MR37@}OU41S!@g<+`j=QbsoWN;`M|BU`=KoT`a$~>@RFSTTCzeX z=w%D{E_relH7`jo2iqs?6$M<4xvjg36C&ha4A4@arj?KJ=T?6Urs%he$A_af%FZ5{ zc-DO|DB*eCTv*Q9uo(UaIZ>3~!cF#=xNGB50xZ6q-Oh(D= z*G1jfB0uYl;^>`ufD`FIj?LZPffI=WRMJ*Q=hPWZ$35{U#O7}7hK+0?86E* z!DMT34wdRxp~s4lRe4(Ui0ly(=NI_mQo(1gqs|0x-%~dxbUt^a!(8{8P4{pYkc+{W z>>Sp3>>!Az)w?o~xyzBDHt}?+7+XGY_0+5TnTleYbG&roi*B_nM@N@%CIEDaM2e0H3uaqAX^ZlLTU(wPv~LV2br=Jd@G@eX6Hd}osP-S-n3ILu(T=!Z7%(%EoWtc!E_ zJCogAV>U=Q@}aiyWSqNSo?7ByYK#cz7WZ`IMI|n+T!>rFEuvh}i1R)P$6fPV@tw)p?n?{qX2XdNbWK%B zU5Vj+0pDvj>xu`jNtFBSI4u6ehACx%5hE=F<>3-d>}1E!C_G=fFuWuwf=g^0YrA-m zIhqZZ-a^}deNe7E@nkk=f{5Q0GFW62q)eGe{?avM&q1_+uGaajmhA3GXPM5Vd)-{^ z^w{RD?#4!O-v$wD!D^G|bvL~#$oBE(zv=a^zoV(}f0G0y9^H6@A)T=j!yHegn-A?D zRnid*(rR20`grN5f-d4)e7Ke@H)7ah9W&9G($)fPWXQqY6(Jr$My`hFGPU$V3$)2Mx77R+XN zlB^>UGS_%eIs3!?=)e>rF>ZiM-W^0zP=qbPy>Fe{8ynNL5YBJGF_N6s=LFjJ;MiGS z&EO@5(++%1E&o~cOj)jjR-m?Ao4}L_#->dnWFr-|jjyitAW7G>nyXS0poIkl=v~#4R) zpS47IzJz|@%uAOi%V3D)yIG6T(NTw6r$@XaK?5RIn<-RZXf|^+6+Ve)o81Za&($QN zS{?12e;Tgs)6h&lB*u1s{o6#b_fr5nBiJlwvE+;b61wI_4Wvk?*Y1M?kxwZuij% z1Lu7-r(Y`Nq!Q3Y7rOMTmUEeGo{LGi6ZP8>`q$+3m~0y>iTde zmTO{CqtJpcj+qTrZBn9FpO#ml1awYJU&%f^h(znaVPT168yUC`Hl{E&Xu6+&|Ls_r zvU%sTD}_5>bjXbJQoe|dn&9<@>9mQF>9B8di6Tz-AH<|s&hmVz%1ocydaj;;M@(7z zC0Vp%|LNUtSQupaLI|NDrGx!Ew0-HfaE0hYYHbS6O$D!p8`(216h3nYbd7AaxI*;f zB#eoy9=Bz4)yVV&`oZFe#5pemdy=RP4)-xR(~^PPu6T@5xdIe4AY-%OXJ{bw@ma5f zQ}fv`yrW8M>3bg1l&t#td&RD!=}zoBlJPcD%G_5c7&$9S4TyZmS_Lzv=D7=`cL$m1 z^;dPGKji|8kTQG;#QaN13LTZVPnN=6me`=h`v$Wmsfr);n-1hlk{-Rk?eeDI$89Te zmHh^*D&i4Sqmvz|T>ppp+um(Z-hIAn;+@O0`jp0uk&q<4hhX(jj&{m&A_RWI%XPb( zLTe87YqT%<8aQ$*$s{_ne`GCv%z;FduE~Yo5bxB{;O8UNNqc_vUSO69L&=jn{Yz#Q zW@v%gO2TlP7$@qr^*Y7bF=oivb@|hFdO`XFi7e#usG7g5x7|B7dUC<(?M%ieifi2z ztxP-vbuY9G(+H{VItP6ES~;u#g~U*sDB&jYC;n17;~U|-8$WOjBJL`Dmdouoe=5Xm zY3vbYg@zmM&zxj&MI5m)7Q#R&|=h*8(p->?g zYPLL-KIW6Vk(JN-iSdejPp>no!5trHydov!E_+?OPCEOZiYjXLXC`j94m|z)E#0ew z9Rfzj?=qVo9V8K-J*9k)DJ(c5PHlGYv&l`_M3+gh`iEm@S9f>SICpG^7ix4r*-7aY zk~YLm5x&JUcB;*Han9(Q$3=%vLr;!v^*wJUcQ~+1`i{M}WK*9D$woJW_my!~+GL!e zW;e+B!h9dj$GolMYVd}lph@K}q|P13WJXD2utdE~E)&EzEBRO-BGsplW7@yAcTFt! z@%g&ahR8X-D{;^y3a^N+zlYYD#Pd#Mc}EW|XdE`n@`3Zcm%P^@B(l6Hr{D~)=p^50 zFq61K3fob%ye~WMrZWwm828!e<6@zG2Dv49t+9eTZ%0SXT<@`d&S~HSS@jNQ$A|X# zQwHRwq;;}_)$LPZb#{DRHU2K&5by3jCTrD$o|raSZPtBx1F!ou=X-R#(|LC0cbW_# zIf<+A_J7)<{dUbBTy@_nDOUt*4kHY^!zG83^BR5NO>U7p)@-s&n6e86rVo4hpS{S% zs(`b3dUmM?bh&U~2wW=xwVOO-4B$Bpou+<{TK(-nM_wdEtU*AxTY_EaBf&9jv;Ujg zvn2{HjF2Loc_sP9dXeCI6Ndt@iqQkFo5F1qzJ)5ojINn$*0`K%x>5$8?bE=Rw>Q6l2(C% zYv4lwxmW>0_LC8 z{A5ySSW@>pgAEFOg&rUAFiaBfSmr)|0Jf!AY^&{oybZ2^h2LjBWRR{kVdqFSiKUOK zsT?3imyKO@t7%N9(&6I5(jhZqzabt1b(B>jz`zYJ)EcB8YAh3odMliA%JOzeFy-`` zm-lrPj!#_Pe$Mon+Fpc#!3VZ#oVc%<(3%j>y0qxmq%eZ49`l|Z-O}P;5!1)D(Gc_J zowX-*GZ7#hPJZTJ?82Cs)wJ!7Yd60cdnYw$c*QjcZSAyndhkrLRnXCpDRGZkP1{i1 z8bGE{?%hRm2Wh-jV7s%tcTsqUyZJoHG{fyvM*!kAO%hBler-QrVm zgy!yzJ8XB`UutvkbeU^h1x6?0WI)&jX+p8fT?ATz4hOU<|5}5BECRq()*%;_7e1`vdMC8Gqkr`||t+ zI)UiYCkB-4q%&S4o{bRdIQb3gMVo;bFOa1vF8RpwSg)kK7#QPEX~8q0Ssj@w!#qdW z=m8qpg!}$cNUS05c%r^>ytGaap)`mvGRv5Onqaa05iwegmoW-c_rmqetr^Vs&zL7= z>_^NAQ=w9XbCMZnSF!FH3~dL zY#huraqGezx8});8Nc3g8Yrygic!vh4@(LURlDt{NL=CJEthE3W{pfdgo*8@G@vX@ zuMsqTGvaPcW?a>uo{n(xrpwiF>&p@95)_X98g$y>f`%n?vrl?~B_MWtjU|A|+>v1U zsdGH5lqZ=`AnANaye@_#N(NK<&O@_SC<#bvixaO)gYDJ3D`GYCKL(X|EvVPCi9E*G zx$=k)G*E&zd^_1xIe%=2E0nFA`mjX_urdp(guKquB5Ci*!TqtM(!KO#{);=VPE9DL zFFJ`4nn*xVOX-oED@Oj<-XyB)Tkr#Y0SgipW&sPlyzp>qOhPwEG~(u6E7$5f+}Dca zDOK?AY^W7s#1$4`(}s_!1;~{2 z=;0||M?%z7l{=X9V#!khEKle&9^&}FA-2JyEed+Nh+P|mPbik-5y+xrwgNO%r+-i& zz0shAnL=NMZrhw|Pk-;$yCi9~{pwP8)lUu$4;6@v`KP>4jqhaE9dXu7|(79{%on_`B=j@2-bmXGi~@^|S}RV~RX7`}eHp@6FzS z&w2veBmdjadj1Ew&fmKpcz~4d-|Mdp{*HnFGh^Uiwp;%vzVyug>r>C{|NFl6{BK7x z{`Y*)`CQOT-|4@9J1i6do-SsaCjxz}5LNv0bI-^a@GAA$)W3dx2u0me-|d0OT31~ui@@66vRTU1$y z?7s^oewTSxeof;`T$KiQ)%fQZwexuoxGN~fd*KC2^d=hoo85PD|) zeXe+N+hW%D;N)|QMZuZd(d_Bc_qvs{t-Z&TKffshpLXUvdt5ZFar%Q0cLFnRL&rcU zY~R*>{N1sCHmh)>$LZqM4e#$-uR_mOtD=chbiy2uGy6nRi?8}h`1Zsl*?&k&qipdF z7A|(}2Vp|8-c6N^T zUrq}h)BiZ;>TSN}JV=GEVX7LQEih|jTX2<#=~k>jLk=&i3Bs*o@1>PB%}*jM zv+*{MuY{*8uLxovbU{D~)C3Cpqj)R=p8iivOCIR&%PIj`yQ6F*K5aC_x1F3Ye}PL&lubavu~42 z%HgRM=OXkl+CCg~I$UGV!byEvJR~>uz=^y-x-qY z&3mm+2SL9Hh~^~04Q=B)XSBcmJgh=9J5<$0q}(iCH(@c`RL(m1vFjayUuo3LBbQE0 zOwOZv8F*G$Q8 zp_b6%8krS9ttk2;n0Q`v`DY#Sea2c%=yHR=T)0hN6YsHCCnilm6~?nLWrUV^L*x2aKM-!@&zxD{bcsnsZkK5h%jb8`aI3%(F2SxAyqgX z^tr;no+J*(i@++ge6Vg`wiQ*Bb<;(az|=>smk z`yPEC28c)NOb?0@m`gYs8eQ-DXD*VOx;2B|z-jvl{rN!SzOzPK#6hn-3pWCcXf2w> zojewf6#L46{+pr#J)RdK^sX+qnY!KB6I_sd@*h+UC9H!>RBJDnobiq-uX4l zb;V0PmuvgHxwgE#w!dCJTIc%)TSmq*G^y*}(3bt`>rH=$u+9a7keOKe2Yw~)XW5y; zL#zz1Fwwf8Q{@k?tec?Eyilfk(~01@XNneL)vOO$4Jt`0Rj5!UH!ZY0lK}G5gZX-2 zK(4G$pEmOJ<*AAILt=;Ym)|EJia&Hbd_9+ZBJOu`)E`GI0xMWtJNWjlyI}N@sW8>4 zT>6Ux@1HVbBcMghGVTRxg@r!B!lx1l-}}NoCp&YrkDgq=S%Xn1Vr|MyS14i{AyjLz zj90IVZ>}P(iRjzI4cd6($C$8}#!)8ugUUP5K3YjB+NkmDz|!jod&v#UHZO9Pj%{Q< zv(aaFYenp}m@t)en0j!7y=r@pfY9w>!4STTeLCAG3nta6V|3F&S&Nb}Fh%m9@X?UX z{(K$~iNs>o6R*Ia7|AA;T5XQ2rAeeNwh=W4)!m{o>eq#(tQ&qhsp?h%Y41M&Vq16) zv`hPo!Yzz|jaa<<0)-C?k0_;DD7g+-T1oJmuL-(TtQ2!p+{kWNbD#6B7@vJb;TL+4 z01i0njgxfvc3%#C{JYl_1cdb+Y93#WPt;W{60p&5B>J^U?R(SywAS^vw=IoQPa88I zXK}G6d4gf_32qyH*&85)e5^L9hTVgkw1~H%+}R_9jVkUgub}Fg)anu9TDW)lT+N;? za$<&!;AKiC$dGFlB6dU(B-4cO>k~4GPu8-{Vq%%*uTHZUM6hqG!6hZ3F_lcs*I%T| zkQ%$*TnB*&nO@Tb^A1rhe|}dQ@cFpCv8?lD1C2`~PtG)Apevv*M;s$NmQ_x?zZj$f zH)RqSmbfy9)!u5|BZEO^!xmPMP$;(4aJ%3xfwC=T!P{5S{8qSGRnGUDoT+Tpy0YS9 zR&3U*M4_c2w@p8}n+6EX#=0<8heoOeKg5a3-`T(x?w;0dhRgP;+>tH-N3k& zeaPT=Rp(lSZRW{|^_EiIjkCc|%3I#O(}K#6-`V#lpGAN4ZuzYBddpEK=aDbgo>wIk z9hM0CM_OrTkot7OO*LsKOU7au6{_A`{Bi_GS1gO|El|;tM2>*j3&v{MaP1bIw{-;5 z>g0Xxo*>GC)5ZHCVbm12G-3%IE6dD?g4jhyS8?tmOiKod>pP%dFx!_Sh*890X0M8f zP{ctA@kaSSa>L!y1(uxV<=&!;OTEuxgnLgG(XtRKl7k4XnJhIJwfaE23F>8FXvvGL z=Dj!8OZ{~>olRwgX?Z_M)ho~apf7f~^;5|3zEGc_Z2S9uX3n5zYNig{pLd?|6m?Z@ zi9avISjW90a;1-~n}XnxD^-5UtCtI*ax(}*PE|G?7LQPZs*!S#?1|cyDh+aX@hi`vG@9Z#$M}QnC#J= zU(>Tt&5f$yX}?ZmAMJ*Br1hyeco*AnpOVb(&3tA6K5ByM&Nta;2Ql5Q7cMvyKl z9wS@MVBq%5!`9J^S_wff`=QTR_cG)BGK5!8KhOG`fv9qK%?8lH2>K}QBs56 z-qYKlajV1HTeI=&wjd&zcZG#g6oja5jfX?Ft8-y589P^I^-`bCu|5xiHf_FJ!JEA~ z^3YYsD>t(0v$i^!GJCH-_l6SiJASCdZuIJ=%O*{~Rt5UpGouPp&DSl>B*IWtLRd^V z&x93II1jaY!3mXMJO1h}Z(%g#;7*%biypoz!FY1cP^acg(=hQlPa|``bb$tMxpaEL zt-#N+!`&KBFspxjnmgFe>kzJ%Q>WSIKddN{_aqSBCZ*|vx#&JmmokzrpjIEnNtF@d z;a*>q9}%c|EIA!E>nb7%MHG@N`KJb)ILzQOO4irLfH@Xg*%`0P3_Ecks$gNKb(oa zwttqo_VHao!*0Zn0)DK@S(C*?uBRNe$MedW)z9J-*5H8U2{e(Fz z+gKCQg`Qh8_Nq*Z_;}3Hr)_d-=s{t5hp{3#M3P<5UU=RK@b_HZ2dne}p%pC)k*5rZ zFxQU7M?O-Z(rjJuiB;XY=Io01p*bJve&xpJ{(eTUp=Odo^6fq^30s@q24UQ>!IuXo zO!?H5E~nlNyI`_35fIVfK_E1Jum6DR17erhe}-%LK;q%IG!b+S_u*13#Yv}kh^79O zNBJlRIq@L<-U&<7{sfzAL1A9QesTiQApe#EdY|dz#q{oH>}C(G)O2tOEwVk`Zi(qf zBw|@?5J|Z5GZS?TroAkeKDg&lP2nPT`t)!-=T{uRgSDq4VYCr zWc2A34mWnUVnP^RRU$r#ZBVBs0V=intm|Xpmb6=5)=kXQ8b9RvwnoXgd_quTOH|d| zr>>Ge?9QO^U+!CL;;Ho}ZR);1ezKJUE+W~GzpdwrpAZOTPnL((-wx>1(SO!w=m46U zvXOttqIr$Xfo*T3|Gm(6#+H6#T4S4D zZ_RY~NEk!pOKsI&QMgvD6v=066T`!(nYLm5omf(BkFuZ(C)Ya#j~ zBeQ0<%F7PF*d6MqOsdmG=SEbTKUi9;dg(Thyp~gpu=4MgjCu!3SqA=>D?`ApMRG#0 z`pJgL6yECl@Y6cAZtXAEEcdCff`u}}2wHe5$t*;n5kA?emgItHwg$5+fPwhdvGt{MyP(abk?DL`Pcs?^#l&=0V z+re?__;mJ%k=pevFN9R@xRG<&*WJQ^3J8(7v}GsNkqNcBO!&d}hc$})@Ybweb}@72 zak}#0RujvvBoyE~(j8*8BQLU}OWg*ns1XgFWgqamG!5vMMEXE+SSXHVcF}MWy^xTrt<$ObBR>HlgPseXhdcE1D-(E#@|s&Qb@; zAg%U71WE4Llrpv;kn9x9RQ``YL;32o4D z08y9an*0kGUK2G|x*AM5nxjD&=5&9=_?0f6mwyVob-)!EXj zjWSj5f*AYapWk@#HYb9`NK;-EQ9D;@VdltTq;lKHb+tdxdoXENO6Di!Y)*VuWJN6l z?ABWSy*(=R!n|acm=#;$##Y)I+N>glVKs6sm<=(Ct1$EAoSm;UH0p;F)UTVkpydUn z9wz?7x{ZZKD|S`Vc&iEw%2nb%Rya5m_O@H;LzG2voYU_=Gg#(5@4ts^4 z^O0t;B(h3VGL8MMfcTN0>)k@y{#i>J%#Dcb>xTVK!nXSS(}P$28}MbG4)t(e=YcO3 z$!y^2EA9kgE zTf4A99C6Cd{JI+**osY$|Lwa`*`s#KcQlJjY-SN}QhCAjN{cL{#ZgSNMB4;nox=~W zOMR0iH)!kUkpK~zwlg#=4l>a)C3YI(F@}J3hi^}IYcdWxC&LvpNnWL^jY?~($xq@H z@k`7WcRD&Aj#60ap5z#PXF`P;3e=C!g$u`X3n%-tuYOXGC2a9y7Ppm>oN4P%i8*r{ z;VyV9l#tzw{iajaKj}IcG=7Ms@=Z(?(N?_y!qJE&l?9D=-pW~&5XCVSk|#CJ9?Qjp zrcK8|i3@!|q;fZ!5MNAvPrTAwdZUIDDQsDQfWiRts;Hn}9!o>&dMNZx)n@Arzocnb z=ode*jCE>G{xy^(u2qca?3t~GxeUCU)0NM)NsZ~Y$4wBR(Es}4*!Hfo&jNMZn%Q2L zzqzuuz^cLU{;+f+Q-g5UsFbXt>;hs-bC2J_C97?;0C#vRaN}*qd!q-B<35~-b^B~F z?7nvDr|lsbyjt;o2UK)xea!Ps=-7<6-x@0wCyyxM?OO&n$7>ll?t<`o!wl^31rf1%O{Jqqfj={r8Jh@Ku3bZA)C)P@1bTul33K~Y!8JJpn&ui7B zHCUYLXA-&i;D-}k@yt9Ohz1Ix@Q)$Xbl~twAM_qho?#Ne&8o`u|nibw)L{tWn@b2$2#24PbzXG(j;W6zN5y(v?du(tDFo1Rh00 z6;Npsx`-r15Gg^#fFL4GI#L9r1?ebFQ4rxJ*SA~F?hPkna|C{MQ5If06We!|@U_aOqzI zBH0V1SWd7X*XUTq;RXG=7 z4qpx1WKrz#Hc=tf zMtZshbRy$rsEH172TWoA7*$}F|lVFOlgxU_}snM!Ffl6!ZUb>0qWuI+eayF7{6LK4`86i zw<|^_ZV%(5MY`r8?F^VBnM}T%PG12&EtjIKbB1o?FW7JtqP&8*TA60~Nd_Ut(#FAF zrI$^b0GvlJU~Eu*7%wn{*31{~5C6P*#%*+FsSkjikJS|LA$)+&>Q;FTuNwiF>(bS` z3faJTOkdbax|LtfUB{Zrdcx{i>p-m4yzH z+2c6Ms@Jz&-%A#|UgveA0>#F8EB;YsoRY@00%wpM=4Q;v_`=Q+@r45d1C?#3mE-Hm zG3{rYMba(O`&iCC8EJ=X+&ICGD#CT+T&7h>+%a`wj-c{Dx-zYvT2(@SRcuYhHQQ|m zqvVX1)ibdPhu+7jy`DX*VaaV2(|Shv<)Z7GV5ez7kv=XB(IA0`vgfofIhrk&I+}Hs zy3&st_LD-3(VJy;b8x>khMtuzEOS?P_M}&c19M4MF=KTEfjKUgUrNhc5okc<2c8SM zhq>Ee$Yy%o&D|6KdtYd3OAw*E2JI*yH}{C!M*<64J>)$|u@=ebL;9{t zc2VPgdUgu@I?!YKGWEAr*7??n3Ki!rnyOZ$RJn)H7RSi3SVAr2w4oBD^e=$==Y*5= zUlP>JPIGcbOa~8x;|8DGlvM_)1soMF2N!t3(!vYKw{0@RL01nZ0ZxS$sz<%-hhT%} zF1?k5(wDZ8`asK=kNO;&v_h>QI*_8Eo_x?&3*CCMnNw9-0F_{Ob4R7$Wpqh=cni{EU%f_4 z0)gyfv_&Q25STBF`> zt}X!XJ@GMVv*cu1a=dyl&_HF)5hBxB!F*Ktme1!UtR7dujxhVFr?0=B?w4B$FX=ZT z$`Ice5#LPmIXDuMFR~?MplB+m$u>}s{YgpX!Nqdu z;_^~+#B85BVAKDTPL5t%0Cl07zShs5r(L5!a&z%fA&i!1DAiU7f+q4#+%6^r8FGro z7+Ir^Wr}r>?1H+5oGb*?pJ}aq>Uo>!oXv6%#TJl#`lHDCfb2i+kkOUzJ_9@OS@v2#WCBZf9Zb z#L~lnW59D}rF7Z3EUtk?spjbK`NGj<-K!S9sR{adY%Yo0d6pnpWVfl|yj!pI%{*uG zy9`q{ulk&VlhH_*hIND@Jd$=@ckcYr?2q!AGQ69_E@UFi>|s$6np^+b0`Ap~pztk{ z)_B+J#|H}2#!flBwx(IYz@J_m?S~x4T#o4y&$mjE)#edPeT*sA|BazEPc<+9s zH%exh$;=qAB1YpBha}TZg?KMi20TWOX3CRkw`i&^$rgtlF%2p=RoHOvW_Zc`wWLy( z4`=(GX^3T-X@cd{v^U;!Gfz3QDPr+AtnIej{(pND1oBrIfZeRYa_|GMg|Oy3bBwue z<=cmO9P+F@bBcU=R>c@g-d>DMif8wnf3DEcUU?QJ?4q+^t3PpebBJ+{%HeeG#p&dHf@u9`!mu>J{1`*U!K{SAJm$^MH{UZ!S)-@ zcKd|?bI#<`U%-Ij#bm~>fB>f|BXwf?Xzj>#k%wfyZtL^hLo__?q>5TYnmkoRSUAQ% z$>cwnD57c;j9Yc7>7h5+wwFzdxu0evLrC=~-cC?sc~v|7*_HItxPAxyV@y>$hZasljX)WNb?=;t+Y?m3rbq){V{+xwbnK!)(qi}XmNwB5F@JYQm zm9x9;Xc!hnzX^E3JNf4d+~?uD%GLLeVltoc!dNeXsGRjsIQvZD3>qyNU!ORQ-o;sZ zXa4t!d+V$Ky6}?yocU86;XxFoN=Xn!-u{0DCM_j}l!7_HeqvM?cWQxo{KO=rDQX>i zFvJMd5Nk zMwPYPTaT)!|0{-oBP1!O1#0=p0RkyWF?ZODQJmcOVKS2YFj?gOwz5)Elq1itW5`NN z@5PW3l2UueMM}s>@52!A-S0uMpK*4xM}w#q^y(fCo-hjERJ9d7Pl~V0kCWT!Kb-^Z eDRY6c1cSVNC02DRTxSD2UQM5Be{L;cdGB diff --git a/src/sem.texi b/src/sem.texi deleted file mode 100644 index fc1f23f5..00000000 --- a/src/sem.texi +++ /dev/null @@ -1,359 +0,0 @@ -\input texinfo -@setfilename sem.info - -@documentencoding utf-8 - -@settitle sem - semaphore for executing shell command lines in parallel - -@node Top -@top sem - -@menu -* NAME:: -* SYNOPSIS:: -* DESCRIPTION:: -* OPTIONS:: -* EXAMPLE@asis{:} Gzipping *.log:: -* EXAMPLE@asis{:} Protecting pod2html from itself:: -* BUGS:: -* REPORTING BUGS:: -* AUTHOR:: -* LICENSE:: -* DEPENDENCIES:: -* SEE ALSO:: -@end menu - -@node NAME -@chapter NAME - -sem - semaphore for executing shell command lines in parallel - -@node SYNOPSIS -@chapter SYNOPSIS - -@strong{sem} [--fg] [--id ] [--timeout ] [-j ] [--wait] command - -@node DESCRIPTION -@chapter DESCRIPTION - -GNU @strong{sem} is an alias for GNU @strong{parallel --semaphore}. - -It works as a tool for executing shell commands in parallel. GNU -@strong{sem} acts as a counting semaphore. When GNU @strong{sem} is called with -command it will start the command in the background. When @emph{num} -number of commands are running in the background, GNU @strong{sem} will wait -for one of these to complete before starting another command. - -Before looking at the options you may want to check out the examples -after the list of options. That will give you an idea of what GNU -@strong{sem} is capable of. - -@node OPTIONS -@chapter OPTIONS - -@table @asis -@item @emph{command} -@anchor{@emph{command}} - -Command to execute. The command may be followed by arguments for the command. - -@item @strong{--bg} -@anchor{@strong{--bg}} - -Run command in background thus GNU @strong{parallel} will not wait for -completion of the command before exiting. This is the default. - -See also: @strong{--fg} - -@item @strong{-j} @emph{N} -@anchor{@strong{-j} @emph{N}} - -Run up to N commands in parallel. Default is 1 thus acting like a -mutex. - -@item @strong{--jobs} @emph{N} -@anchor{@strong{--jobs} @emph{N}} - -@item @strong{-j} @emph{N} -@anchor{@strong{-j} @emph{N} 1} - -@item @strong{--max-procs} @emph{N} -@anchor{@strong{--max-procs} @emph{N}} - -@item @strong{-P} @emph{N} -@anchor{@strong{-P} @emph{N}} - -Run up to N commands in parallel. Default is 1 thus acting like a -mutex. - -@item @strong{--jobs} @emph{+N} -@anchor{@strong{--jobs} @emph{+N}} - -@item @strong{-j} @emph{+N} -@anchor{@strong{-j} @emph{+N}} - -@item @strong{--max-procs} @emph{+N} -@anchor{@strong{--max-procs} @emph{+N}} - -@item @strong{-P} @emph{+N} -@anchor{@strong{-P} @emph{+N}} - -Add N to the number of CPU cores. Run up to this many jobs in -parallel. For compute intensive jobs @strong{-j} +0 is useful as it will run -number-of-cpu-cores jobs simultaneously. - -@item @strong{--jobs} @emph{-N} -@anchor{@strong{--jobs} @emph{-N}} - -@item @strong{-j} @emph{-N} -@anchor{@strong{-j} @emph{-N}} - -@item @strong{--max-procs} @emph{-N} -@anchor{@strong{--max-procs} @emph{-N}} - -@item @strong{-P} @emph{-N} -@anchor{@strong{-P} @emph{-N}} - -Subtract N from the number of CPU cores. Run up to this many jobs in -parallel. If the evaluated number is less than 1 then 1 will be used. -See also @strong{--use-cpus-instead-of-cores}. - -@item @strong{--jobs} @emph{N}% -@anchor{@strong{--jobs} @emph{N}%} - -@item @strong{-j} @emph{N}% -@anchor{@strong{-j} @emph{N}%} - -@item @strong{--max-procs} @emph{N}% -@anchor{@strong{--max-procs} @emph{N}%} - -@item @strong{-P} @emph{N}% -@anchor{@strong{-P} @emph{N}%} - -Multiply N% with the number of CPU cores. Run up to this many jobs in -parallel. If the evaluated number is less than 1 then 1 will be used. -See also @strong{--use-cpus-instead-of-cores}. - -@item @strong{--jobs} @emph{procfile} -@anchor{@strong{--jobs} @emph{procfile}} - -@item @strong{-j} @emph{procfile} -@anchor{@strong{-j} @emph{procfile}} - -@item @strong{--max-procs} @emph{procfile} -@anchor{@strong{--max-procs} @emph{procfile}} - -@item @strong{-P} @emph{procfile} -@anchor{@strong{-P} @emph{procfile}} - -Read parameter from file. Use the content of @emph{procfile} as parameter -for @emph{-j}. E.g. @emph{procfile} could contain the string 100% or +2 or -10. - -@item @strong{--semaphorename} @emph{name} -@anchor{@strong{--semaphorename} @emph{name}} - -@item @strong{--id} @emph{name} -@anchor{@strong{--id} @emph{name}} - -Use @strong{name} as the name of the semaphore. Default is the name of the -controlling tty (output from @strong{tty}). - -The default normally works as expected when used interactively, but -when used in a script @emph{name} should be set. @emph{$$} or @emph{my_task_name} -are often a good value. - -The semaphore is stored in ~/.parallel/semaphores/ - -@item @strong{--fg} -@anchor{@strong{--fg}} - -Do not put command in background. - -@item @strong{--timeout} @emph{secs} (not implemented) -@anchor{@strong{--timeout} @emph{secs} (not implemented)} - -@item @strong{-t} @emph{secs} (not implemented) -@anchor{@strong{-t} @emph{secs} (not implemented)} - -If the semaphore is not released within @emph{secs} seconds, take it anyway. - -@item @strong{--wait} -@anchor{@strong{--wait}} - -@item @strong{-w} -@anchor{@strong{-w}} - -Wait for all commands to complete. - -@end table - -@node EXAMPLE: Gzipping *.log -@chapter EXAMPLE: Gzipping *.log - -Run one gzip process per CPU core. Block until a CPU core becomes -available. - -@verbatim - for i in *.log ; do - echo $i - sem -j+0 gzip $i ";" echo done - done - sem --wait -@end verbatim - -@node EXAMPLE: Protecting pod2html from itself -@chapter EXAMPLE: Protecting pod2html from itself - -pod2html creates two files: pod2htmd.tmp and pod2htmi.tmp which it -does not clean up. It uses these two files for a short time. But if -you run multiple pod2html in parallel (e.g. in a Makefile with make --j) you need to protect pod2html from running twice at the same -time. @strong{sem} running as a mutex will do just that: - -@verbatim - sem --fg --id pod2html pod2html foo.pod > foo.html - sem --fg --id pod2html rm -f pod2htmd.tmp pod2htmi.tmp -@end verbatim - -@node BUGS -@chapter BUGS - -None known. - -@node REPORTING BUGS -@chapter REPORTING BUGS - -Report bugs to . - -@node AUTHOR -@chapter AUTHOR - -Copyright (C) 2010,2011,2012,2013 Ole Tange, http://ole.tange.dk and Free -Software Foundation, Inc. - -@node LICENSE -@chapter LICENSE - -Copyright (C) 2010,2011,2012,2013 Free Software Foundation, Inc. - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; either version 3 of the License, or -at your option any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program. If not, see . - -@menu -* Documentation license I:: -* Documentation license II:: -@end menu - -@node Documentation license I -@section Documentation license I - -Permission is granted to copy, distribute and/or modify this documentation -under the terms of the GNU Free Documentation License, Version 1.3 or -any later version published by the Free Software Foundation; with no -Invariant Sections, with no Front-Cover Texts, and with no Back-Cover -Texts. A copy of the license is included in the file fdl.txt. - -@node Documentation license II -@section Documentation license II - -You are free: - -@table @asis -@item @strong{to Share} -@anchor{@strong{to Share}} - -to copy, distribute and transmit the work - -@item @strong{to Remix} -@anchor{@strong{to Remix}} - -to adapt the work - -@end table - -Under the following conditions: - -@table @asis -@item @strong{Attribution} -@anchor{@strong{Attribution}} - -You must attribute the work in the manner specified by the author or -licensor (but not in any way that suggests that they endorse you or -your use of the work). - -@item @strong{Share Alike} -@anchor{@strong{Share Alike}} - -If you alter, transform, or build upon this work, you may distribute -the resulting work only under the same, similar or a compatible -license. - -@end table - -With the understanding that: - -@table @asis -@item @strong{Waiver} -@anchor{@strong{Waiver}} - -Any of the above conditions can be waived if you get permission from -the copyright holder. - -@item @strong{Public Domain} -@anchor{@strong{Public Domain}} - -Where the work or any of its elements is in the public domain under -applicable law, that status is in no way affected by the license. - -@item @strong{Other Rights} -@anchor{@strong{Other Rights}} - -In no way are any of the following rights affected by the license: - -@itemize -@item Your fair dealing or fair use rights, or other applicable -copyright exceptions and limitations; - -@item The author's moral rights; - -@item Rights other persons may have either in the work itself or in -how the work is used, such as publicity or privacy rights. - -@end itemize - -@end table - -@table @asis -@item @strong{Notice} -@anchor{@strong{Notice}} - -For any reuse or distribution, you must make clear to others the -license terms of this work. - -@end table - -A copy of the full license is included in the file as cc-by-sa.txt. - -@node DEPENDENCIES -@chapter DEPENDENCIES - -GNU @strong{sem} uses Perl, and the Perl modules Getopt::Long, -Symbol, Fcntl. - -@node SEE ALSO -@chapter SEE ALSO - -@strong{parallel}(1) - -@bye diff --git a/src/sql.texi b/src/sql.texi deleted file mode 100644 index 51858bbc..00000000 --- a/src/sql.texi +++ /dev/null @@ -1,540 +0,0 @@ -\input texinfo -@setfilename sql.info - -@documentencoding utf-8 - -@settitle sql - execute a command on a database determined by a dburl - -@node Top -@top sql - -@menu -* NAME:: -* SYNOPSIS:: -* DESCRIPTION:: -* DBURL:: -* EXAMPLES:: -* REPORTING BUGS:: -* AUTHOR:: -* LICENSE:: -* DEPENDENCIES:: -* FILES:: -* SEE ALSO:: -@end menu - -@node NAME -@chapter NAME - -sql - execute a command on a database determined by a dburl - -@node SYNOPSIS -@chapter SYNOPSIS - -@strong{sql} [options] @emph{dburl} [@emph{commands}] - -@strong{sql} [options] @emph{dburl} < commandfile - -@strong{#!/usr/bin/sql} @strong{--shebang} [options] @emph{dburl} - -@node DESCRIPTION -@chapter DESCRIPTION - -GNU @strong{sql} aims to give a simple, unified interface for accessing -databases through all the different databases' command line -clients. So far the focus has been on giving a common way to specify -login information (protocol, username, password, hostname, and port -number), size (database and table size), and running queries. - -The database is addressed using a DBURL. If @emph{commands} are left out -you will get that database's interactive shell. - -GNU @strong{sql} is often used in combination with GNU @strong{parallel}. - -@table @asis -@item @emph{dburl} -@anchor{@emph{dburl}} - -A DBURL has the following syntax: -[sql:]vendor:// -[[user][:password]@@][host][:port]/[database][?sqlquery] - -See the section DBURL below. - -@item @emph{commands} -@anchor{@emph{commands}} - -The SQL commands to run. Each argument will have a newline -appended. - -Example: "SELECT * FROM foo;" "SELECT * FROM bar;" - -If the arguments contain '\n' or '\x0a' this will be replaced with a -newline: - -Example: "SELECT * FROM foo;\n SELECT * FROM bar;" - -If no commands are given SQL is read from the keyboard or STDIN. - -Example: echo 'SELECT * FROM foo;' | sql mysql:/// - -@item @strong{--db-size} -@anchor{@strong{--db-size}} - -@item @strong{--dbsize} -@anchor{@strong{--dbsize}} - -Size of database. Show the size of the database on disk. For Oracle -this requires access to read the table @emph{dba_data_files} - the user -@emph{system} has that. - -@item @strong{--help} -@anchor{@strong{--help}} - -@item @strong{-h} -@anchor{@strong{-h}} - -Print a summary of the options to GNU @strong{sql} and exit. - -@item @strong{--html} -@anchor{@strong{--html}} - -HTML output. Turn on HTML tabular output. - -@item @strong{--show-processlist} -@anchor{@strong{--show-processlist}} - -@item @strong{--proclist} -@anchor{@strong{--proclist}} - -@item @strong{--listproc} -@anchor{@strong{--listproc}} - -Show the list of running queries. - -@item @strong{--show-databases} -@anchor{@strong{--show-databases}} - -@item @strong{--showdbs} -@anchor{@strong{--showdbs}} - -@item @strong{--list-databases} -@anchor{@strong{--list-databases}} - -@item @strong{--listdbs} -@anchor{@strong{--listdbs}} - -List the databases (table spaces) in the database. - -@item @strong{--show-tables} -@anchor{@strong{--show-tables}} - -@item @strong{--list-tables} -@anchor{@strong{--list-tables}} - -@item @strong{--table-list} -@anchor{@strong{--table-list}} - -List the tables in the database. - -@item @strong{--noheaders} -@anchor{@strong{--noheaders}} - -@item @strong{--no-headers} -@anchor{@strong{--no-headers}} - -@item @strong{-n} -@anchor{@strong{-n}} - -Remove headers and footers and print only tuples. Bug in Oracle: it -still prints number of rows found. - -@item @strong{-p} @emph{pass-through} -@anchor{@strong{-p} @emph{pass-through}} - -The string following -p will be given to the database connection -program as arguments. Multiple -p's will be joined with -space. Example: pass '-U' and the user name to the program: - -@emph{-p "-U scott"} can also be written @emph{-p -U -p scott}. - -@item @strong{-r} -@anchor{@strong{-r}} - -Try 3 times. Short version of @emph{--retries 3}. - -@item @strong{--retries} @emph{ntimes} -@anchor{@strong{--retries} @emph{ntimes}} - -Try @emph{ntimes} times. If the client program returns with an error, -retry the command. Default is @emph{--retries 1}. - -@item @strong{--sep} @emph{string} -@anchor{@strong{--sep} @emph{string}} - -@item @strong{-s} @emph{string} -@anchor{@strong{-s} @emph{string}} - -Field separator. Use @emph{string} as separator between columns. - -@item @strong{--skip-first-line} -@anchor{@strong{--skip-first-line}} - -Do not use the first line of input (used by GNU @strong{sql} itself -when called with @strong{--shebang}). - -@item @strong{--table-size} -@anchor{@strong{--table-size}} - -@item @strong{--tablesize} -@anchor{@strong{--tablesize}} - -Size of tables. Show the size of the tables in the database. - -@item @strong{--verbose} -@anchor{@strong{--verbose}} - -@item @strong{-v} -@anchor{@strong{-v}} - -Print which command is sent. - -@item @strong{--version} -@anchor{@strong{--version}} - -@item @strong{-V} -@anchor{@strong{-V}} - -Print the version GNU @strong{sql} and exit. - -@item @strong{--shebang} -@anchor{@strong{--shebang}} - -@item @strong{-Y} -@anchor{@strong{-Y}} - -GNU @strong{sql} can be called as a shebang (#!) command as the first line of a script. Like this: - -@verbatim - #!/usr/bin/sql -Y mysql:/// - - SELECT * FROM foo; -@end verbatim - -For this to work @strong{--shebang} or @strong{-Y} must be set as the first option. - -@end table - -@node DBURL -@chapter DBURL - -A DBURL has the following syntax: -[sql:]vendor:// -[[user][:password]@@][host][:port]/[database][?sqlquery] - -To quote special characters use %-encoding specified in -http://tools.ietf.org/html/rfc3986#section-2.1 (E.g. a password -containing '/' would contain '%2F'). - -Examples: - mysql://scott:tiger@@my.example.com/mydb - sql:oracle://scott:tiger@@ora.example.com/xe - postgresql://scott:tiger@@pg.example.com/pgdb - pg:/// - postgresqlssl://scott@@pg.example.com:3333/pgdb - sql:sqlite2:////tmp/db.sqlite?SELECT * FROM foo; - sqlite3:///../db.sqlite3?SELECT%20*%20FROM%20foo; - -Currently supported vendors: MySQL (mysql), MySQL with SSL (mysqls, -mysqlssl), Oracle (oracle, ora), PostgreSQL (postgresql, pg, pgsql, -postgres), PostgreSQL with SSL (postgresqlssl, pgs, pgsqlssl, -postgresssl, pgssl, postgresqls, pgsqls, postgress), SQLite2 (sqlite, -sqlite2), SQLite3 (sqlite3). - -Aliases must start with ':' and are read from -/etc/sql/aliases and ~/.sql/aliases. The user's own -~/.sql/aliases should only be readable by the user. - -Example of aliases: - -@verbatim - :myalias1 pg://scott:tiger@pg.example.com/pgdb - :myalias2 ora://scott:tiger@ora.example.com/xe - # Short form of mysql://`whoami`:nopassword@localhost:3306/`whoami` - :myalias3 mysql:/// - # Short form of mysql://`whoami`:nopassword@localhost:33333/mydb - :myalias4 mysql://:33333/mydb - # Alias for an alias - :m :myalias4 - # the sortest alias possible - : sqlite2:////tmp/db.sqlite - # Including an SQL query - :query sqlite:////tmp/db.sqlite?SELECT * FROM foo; -@end verbatim - -@node EXAMPLES -@chapter EXAMPLES - -@menu -* Get an interactive prompt:: -* Run a query:: -* Copy a PostgreSQL database:: -* Empty all tables in a MySQL database:: -* Drop all tables in a PostgreSQL database:: -* Run as a script:: -* Use --colsep to process multiple columns:: -* Retry if the connection fails:: -* Get info about the running database system:: -@end menu - -@node Get an interactive prompt -@section Get an interactive prompt - -The most basic use of GNU @strong{sql} is to get an interactive prompt: - -@strong{sql sql:oracle://scott:tiger@@ora.example.com/xe} - -If you have setup an alias you can do: - -@strong{sql :myora} - -@node Run a query -@section Run a query - -To run a query directly from the command line: - -@strong{sql :myalias "SELECT * FROM foo;"} - -Oracle requires newlines after each statement. This can be done like -this: - -@strong{sql :myora "SELECT * FROM foo;" "SELECT * FROM bar;"} - -Or this: - -@strong{sql :myora "SELECT * FROM foo;\nSELECT * FROM bar;"} - -@node Copy a PostgreSQL database -@section Copy a PostgreSQL database - -To copy a PostgreSQL database use pg_dump to generate the dump and GNU -@strong{sql} to import it: - -@strong{pg_dump pg_database | sql pg://scott:tiger@@pg.example.com/pgdb} - -@node Empty all tables in a MySQL database -@section Empty all tables in a MySQL database - -Using GNU @strong{parallel} it is easy to empty all tables without dropping them: - -@strong{sql -n mysql:/// 'show tables' | parallel sql mysql:/// DELETE FROM @{@};} - -@node Drop all tables in a PostgreSQL database -@section Drop all tables in a PostgreSQL database - -To drop all tables in a PostgreSQL database do: - -@strong{sql -n pg:/// '\dt' | parallel --colsep '\|' -r sql pg:/// DROP TABLE @{2@};} - -@node Run as a script -@section Run as a script - -Instead of doing: - -@strong{sql mysql:/// < sqlfile} - -you can combine the sqlfile with the DBURL to make a -UNIX-script. Create a script called @emph{demosql}: - -@strong{#!/usr/bin/sql -Y mysql:///} - -@strong{SELECT * FROM foo;} - -Then do: - -@strong{chmod +x demosql; ./demosql} - -@node Use --colsep to process multiple columns -@section Use --colsep to process multiple columns - -Use GNU @strong{parallel}'s @strong{--colsep} to separate columns: - -@strong{sql -s '\t' :myalias 'SELECT * FROM foo;' | parallel --colsep '\t' do_stuff @{4@} @{1@}} - -@node Retry if the connection fails -@section Retry if the connection fails - -If the access to the database fails occasionally @strong{--retries} can help -make sure the query succeeds: - -@strong{sql --retries 5 :myalias 'SELECT * FROM really_big_foo;'} - -@node Get info about the running database system -@section Get info about the running database system - -Show how big the database is: - -@strong{sql --db-size :myalias} - -List the tables: - -@strong{sql --list-tables :myalias} - -List the size of the tables: - -@strong{sql --table-size :myalias} - -List the running processes: - -@strong{sql --show-processlist :myalias} - -@node REPORTING BUGS -@chapter REPORTING BUGS - -GNU @strong{sql} is part of GNU @strong{parallel}. Report bugs to . - -@node AUTHOR -@chapter AUTHOR - -When using GNU @strong{sql} for a publication please cite: - -O. Tange (2011): GNU SQL - A Command Line Tool for Accessing Different -Databases Using DBURLs, ;login: The USENIX Magazine, April 2011:29-32. - -Copyright (C) 2008,2009,2010 Ole Tange http://ole.tange.dk - -Copyright (C) 2010,2011 Ole Tange, http://ole.tange.dk and Free -Software Foundation, Inc. - -@node LICENSE -@chapter LICENSE - -Copyright (C) 2007,2008,2009,2010,2011 Free Software Foundation, Inc. - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; either version 3 of the License, or -at your option any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program. If not, see . - -@menu -* Documentation license I:: -* Documentation license II:: -@end menu - -@node Documentation license I -@section Documentation license I - -Permission is granted to copy, distribute and/or modify this documentation -under the terms of the GNU Free Documentation License, Version 1.3 or -any later version published by the Free Software Foundation; with no -Invariant Sections, with no Front-Cover Texts, and with no Back-Cover -Texts. A copy of the license is included in the file fdl.txt. - -@node Documentation license II -@section Documentation license II - -You are free: - -@table @asis -@item @strong{to Share} -@anchor{@strong{to Share}} - -to copy, distribute and transmit the work - -@item @strong{to Remix} -@anchor{@strong{to Remix}} - -to adapt the work - -@end table - -Under the following conditions: - -@table @asis -@item @strong{Attribution} -@anchor{@strong{Attribution}} - -You must attribute the work in the manner specified by the author or -licensor (but not in any way that suggests that they endorse you or -your use of the work). - -@item @strong{Share Alike} -@anchor{@strong{Share Alike}} - -If you alter, transform, or build upon this work, you may distribute -the resulting work only under the same, similar or a compatible -license. - -@end table - -With the understanding that: - -@table @asis -@item @strong{Waiver} -@anchor{@strong{Waiver}} - -Any of the above conditions can be waived if you get permission from -the copyright holder. - -@item @strong{Public Domain} -@anchor{@strong{Public Domain}} - -Where the work or any of its elements is in the public domain under -applicable law, that status is in no way affected by the license. - -@item @strong{Other Rights} -@anchor{@strong{Other Rights}} - -In no way are any of the following rights affected by the license: - -@itemize -@item Your fair dealing or fair use rights, or other applicable -copyright exceptions and limitations; - -@item The author's moral rights; - -@item Rights other persons may have either in the work itself or in -how the work is used, such as publicity or privacy rights. - -@end itemize - -@item @strong{Notice} -@anchor{@strong{Notice}} - -For any reuse or distribution, you must make clear to others the -license terms of this work. - -@end table - -A copy of the full license is included in the file as cc-by-sa.txt. - -@node DEPENDENCIES -@chapter DEPENDENCIES - -GNU @strong{sql} uses Perl. If @strong{mysql} is installed, MySQL dburls will -work. If @strong{psql} is installed, PostgreSQL dburls will work. If -@strong{sqlite} is installed, SQLite2 dburls will work. If @strong{sqlite3} is -installed, SQLite3 dburls will work. If @strong{sqlplus} is installed, -Oracle dburls will work. If @strong{rlwrap} is installed, GNU @strong{sql} will -have a command history for Oracle. - -@node FILES -@chapter FILES - -~/.sql/aliases - user's own aliases with DBURLs - -/etc/sql/aliases - common aliases with DBURLs - -@node SEE ALSO -@chapter SEE ALSO - -@strong{mysql}(1), @strong{psql}(1), @strong{rlwrap}(1), @strong{sqlite}(1), @strong{sqlite3}(1), @strong{sqlplus}(1) - -@bye diff --git a/testsuite/wanted-results/test61 b/testsuite/wanted-results/test61 index 0ff31da1..c68af0d1 100644 --- a/testsuite/wanted-results/test61 +++ b/testsuite/wanted-results/test61 @@ -1,7 +1,7 @@ echo '### Test --return of weirdly named file' ### Test --return of weirdly named file stdout parallel --return {} -vv -S parallel\@parallel-server3 echo '>'{} ::: 'aa<${#}" b'; rm 'aa<${#}" b' -ssh -tt -oLogLevel=quiet parallel@parallel-server3 'eval `echo $SHELL | grep "/t\{0,1\}csh" > /dev/null && echo setenv PARALLEL_SEQ '$PARALLEL_SEQ'\; setenv PARALLEL_PID '$PARALLEL_PID' || echo PARALLEL_SEQ='$PARALLEL_SEQ'\;export PARALLEL_SEQ\; PARALLEL_PID='$PARALLEL_PID'\;export PARALLEL_PID` ;' tty\ \>/dev/null\ \&\&\ stty\ isig\ -onlcr\ -echo\;echo\ \>aa\\\<\\\$\\\{\\\#\\\}\\\"\\\ b;_EXIT_status=$?; mkdir -p ./.; rsync --rsync-path=cd\ ././.\;\ rsync -rlDzR -essh parallel@parallel-server3:./aa\\\<\\\$\\\{\\\#\\\}\\\"\\\ b ./.; exit $_EXIT_status; +ssh -tt -oLogLevel=quiet parallel@parallel-server3 'eval `echo $SHELL | grep "/t\{0,1\}csh" > /dev/null && echo setenv PARALLEL_SEQ '$PARALLEL_SEQ'\; setenv PARALLEL_PID '$PARALLEL_PID' || echo PARALLEL_SEQ='$PARALLEL_SEQ'\;export PARALLEL_SEQ\; PARALLEL_PID='$PARALLEL_PID'\;export PARALLEL_PID` ;' tty\ \>/dev/null\ \&\&\ stty\ isig\ -onlcr\ -echo\;echo\ \>aa\\\<\\\$\\\{\\\#\\\}\\\"\\\ b;_EXIT_status=$?; mkdir -p ./.; rsync --protocol 30 --rsync-path=cd\ ././.\;\ rsync -rlDzR -essh parallel@parallel-server3:./aa\\\<\\\$\\\{\\\#\\\}\\\"\\\ b ./.; exit $_EXIT_status; echo '### Test if remote login shell is csh' ### Test if remote login shell is csh stdout parallel -k -vv -S csh@localhost 'echo $PARALLEL_PID $PARALLEL_SEQ {}| wc -w' ::: a b c