parallel: --timeout now gives warning.

niceload: --pid PID,PID,PID implemented.
This commit is contained in:
Ole Tange 2018-03-21 22:57:28 +01:00
parent e2792e2101
commit 130e299af6
21 changed files with 386 additions and 113 deletions

View file

@ -199,17 +199,23 @@ to:parallel@gnu.org, bug-parallel@gnu.org
stable-bcc: Jesse Alama <jessealama@fastmail.fm> stable-bcc: Jesse Alama <jessealama@fastmail.fm>
Subject: GNU Parallel 20180322 ('') released <<[stable]>> Subject: GNU Parallel 20180322 ('Hawking') released <<[stable]>>
GNU Parallel 20180322 ('') <<[stable]>> has been released. It is available for download at: http://ftpmirror.gnu.org/parallel/ GNU Parallel 20180322 ('Hawking') <<[stable]>> has been released. It is available for download at: http://ftpmirror.gnu.org/parallel/
<<No new functionality was introduced so this is a good candidate for a stable release.>> <<No new functionality was introduced so this is a good candidate for a stable release.>>
Quote of the month: Quote of the month:
If you arent nesting
gnu parallel calls in gnu parallel calls
I dont know how you have fun.
-- Ernest W. Durbin III EWDurbin@twitter
New in this release: New in this release:
https://medium.com/@alonisser/parallel-straight-from-your-command-line-feb6db8b6cee
https://github.com/inpacdb/POAP https://github.com/inpacdb/POAP
https://chiefio.wordpress.com/2018/02/23/pi-cluster-parallel-script-first-fire/ https://chiefio.wordpress.com/2018/02/23/pi-cluster-parallel-script-first-fire/

View file

@ -106,6 +106,8 @@ if(@opt::prg) {
$::resume_process = $process; $::resume_process = $process;
$SIG{TERM} = $SIG{INT} = \&resume; $SIG{TERM} = $SIG{INT} = \&resume;
} elsif(@opt::pid) { } elsif(@opt::pid) {
# Support --pid 3567,25678
@opt::pid = map { split /,/, $_ } @opt::pid;
$process->set_pid(@opt::pid); $process->set_pid(@opt::pid);
$::resume_process = $process; $::resume_process = $process;
$SIG{TERM} = $SIG{INT} = \&resume; $SIG{TERM} = $SIG{INT} = \&resume;

View file

@ -132,12 +132,12 @@ useful to keep the connection alive.
Sets niceness. See B<nice>(1). Sets niceness. See B<nice>(1).
=item B<-p> I<PID> =item B<-p> I<PID>[,I<PID>] (alpha testing)
=item B<--pid> I<PID> =item B<--pid> I<PID>[,I<PID>] (alpha testing)
Process ID of process to suspend. You can specify multiple process IDs Process IDs of processes to suspend. You can specify multiple process
with multiple B<-p> I<PID>. IDs with multiple B<-p> I<PID> or by separating the PIDs with comma.
=item B<--prg> I<program> =item B<--prg> I<program>

View file

@ -3499,7 +3499,10 @@ sub parse_host_filtering {
::debug("init",$_); ::debug("init",$_);
chomp; chomp;
my @col = split /\t/, $_; my @col = split /\t/, $_;
if(defined $col[6]) { if($col[0] =~ /^parallel: Warning:/) {
# Timed out job: Ignore it
next;
} elsif(defined $col[6]) {
# This is a line from --joblog # This is a line from --joblog
# seq host time spent sent received exit signal command # seq host time spent sent received exit signal command
# 2 : 1372607672.654 0.675 0 0 0 0 eval true\ m\;ssh\ m\ parallel\ --number-of-cores # 2 : 1372607672.654 0.675 0 0 0 0 eval true\ m\;ssh\ m\ parallel\ --number-of-cores
@ -4204,32 +4207,28 @@ sub citation {
} }
::status( ::status(
"Academic tradition requires you to cite works you base your article on.", "Academic tradition requires you to cite works you base your article on.",
"If you use programs that use GNU Parallel to process data for an article in a", "If you use programs that use GNU Parallel to process data for an article in a",
"scientific publication, please cite:", "scientific publication, please cite:",
"", "",
"\@article{Tange2011a,", "\@book{tange_ole_2018_1146014,",
" title = {GNU Parallel - The Command-Line Power Tool},", " author = {Tange, Ole},",
" author = {O. Tange},", " title = {GNU Parallel 2018},",
" address = {Frederiksberg, Denmark},", " publisher = {Ole Tange},",
" journal = {;login: The USENIX Magazine},", " year = 2018,",
" month = {Feb},", " month = apr,",
" number = {1},", " doi = {10.5281/zenodo.1146014},",
" volume = {36},", " url = {https://doi.org/10.5281/zenodo.1146014}",
" url = {http://www.gnu.org/s/parallel},", "}",
" year = {2011},", "",
" pages = {42-47},", "(Feel free to use \\nocite{tange_ole_2018_1146014})",
" doi = {http://dx.doi.org/10.5281/zenodo.16303}", "",
"}", "This helps funding further development; AND IT WON'T COST YOU A CENT.",
"", "If you pay 10000 EUR you should feel free to use GNU Parallel without citing.",
"(Feel free to use \\nocite{Tange2011a})", "",
"", "If you send a copy of your published article to tange\@gnu.org, it will be",
"This helps funding further development; AND IT WON'T COST YOU A CENT.", "mentioned in the release notes of next version of GNU Parallel.",
"If you pay 10000 EUR you should feel free to use GNU Parallel without citing.", ""
"",
"If you send a copy of your published article to tange\@gnu.org, it will be",
"mentioned in the release notes of next version of GNU Parallel.",
""
); );
while(not grep { -e "$_/will-cite" } @Global::config_dirs) { while(not grep { -e "$_/will-cite" } @Global::config_dirs) {
print "\nType: 'will cite' and press enter.\n> "; print "\nType: 'will cite' and press enter.\n> ";
@ -4628,7 +4627,9 @@ sub multiply_time_units {
tell $disk_full_fh != 8193) { tell $disk_full_fh != 8193) {
# On raspbian the disk can be full except for 10 chars. # On raspbian the disk can be full except for 10 chars.
if(not $error_printed) { if(not $error_printed) {
::error("Output is incomplete. Cannot append to buffer file in $ENV{'TMPDIR'}. Is the disk full?", ::error("Output is incomplete.",
"Cannot append to buffer file in $ENV{'TMPDIR'}.",
"Is the disk full?",
"Change \$TMPDIR with --tmpdir or use --compress."); "Change \$TMPDIR with --tmpdir or use --compress.");
$error_printed = 1; $error_printed = 1;
} }
@ -11071,6 +11072,8 @@ sub process_timeouts {
# Need to shift off queue before kill # Need to shift off queue before kill
# because kill calls usleep that calls process_timeouts # because kill calls usleep that calls process_timeouts
shift @{$self->{'queue'}}; shift @{$self->{'queue'}};
::warning("This job was killed because it timed out:",
$job->replaced());
$job->kill(); $job->kill();
} else { } else {
# Because they are sorted by start time the rest are later # Because they are sorted by start time the rest are later

View file

@ -4475,7 +4475,7 @@ remote computers:
=head1 EXIT STATUS =head1 EXIT STATUS
Exit status depends on B<--halt-on-error> if one of these are used: Exit status depends on B<--halt-on-error> if one of these is used:
success=X, success=Y%, fail=Y%. success=X, success=Y%, fail=Y%.
=over 6 =over 6

View file

@ -206,6 +206,8 @@ becomes (assuming you have 8 cores)
ls | xargs -d "\n" -P8 -I {} bash -c "wc {} >{}.wc" ls | xargs -d "\n" -P8 -I {} bash -c "wc {} >{}.wc"
ls | xargs -d "\n" -P8 -I {} bash -c "echo {}; ls {}|wc" ls | xargs -d "\n" -P8 -I {} bash -c "echo {}; ls {}|wc"
https://www.gnu.org/software/findutils/
=head2 DIFFERENCES BETWEEN find -exec AND GNU Parallel =head2 DIFFERENCES BETWEEN find -exec AND GNU Parallel
@ -215,6 +217,8 @@ B<find -exec> only works on files. So processing other input (such as
hosts or URLs) will require creating these inputs as files. B<find hosts or URLs) will require creating these inputs as files. B<find
-exec> has no support for running commands in parallel. -exec> has no support for running commands in parallel.
https://www.gnu.org/software/findutils/
=head2 DIFFERENCES BETWEEN make -j AND GNU Parallel =head2 DIFFERENCES BETWEEN make -j AND GNU Parallel
@ -228,6 +232,8 @@ by GNU B<parallel> does not depend on eachother.
(Very early versions of GNU B<parallel> were coincidently implemented (Very early versions of GNU B<parallel> were coincidently implemented
using B<make -j>). using B<make -j>).
https://www.gnu.org/software/make/
=head2 DIFFERENCES BETWEEN ppss AND GNU Parallel =head2 DIFFERENCES BETWEEN ppss AND GNU Parallel
@ -302,6 +308,8 @@ B<9> ./ppss.sh status -C config.cfg
B<9> killall -SIGUSR2 parallel B<9> killall -SIGUSR2 parallel
https://github.com/louwrentius/PPSS
=head2 DIFFERENCES BETWEEN pexec AND GNU Parallel =head2 DIFFERENCES BETWEEN pexec AND GNU Parallel
@ -362,6 +370,8 @@ faster as only one process will be either reading or writing:
B<8> ls *jpg | parallel -j8 'sem --id diskio cat {} | jpegtopnm |' \ B<8> ls *jpg | parallel -j8 'sem --id diskio cat {} | jpegtopnm |' \
'pnmscale 0.5 | pnmtojpeg | sem --id diskio cat > th_{}' 'pnmscale 0.5 | pnmtojpeg | sem --id diskio cat > th_{}'
https://www.gnu.org/software/pexec/
=head2 DIFFERENCES BETWEEN xjobs AND GNU Parallel =head2 DIFFERENCES BETWEEN xjobs AND GNU Parallel
@ -404,6 +414,8 @@ cat /var/run/my_named_pipe | parallel &
echo unzip 1.zip >> /var/run/my_named_pipe; echo unzip 1.zip >> /var/run/my_named_pipe;
echo tar cf /backup/myhome.tar /home/me >> /var/run/my_named_pipe echo tar cf /backup/myhome.tar /home/me >> /var/run/my_named_pipe
http://www.maier-komor.de/xjobs.html
=head2 DIFFERENCES BETWEEN prll AND GNU Parallel =head2 DIFFERENCES BETWEEN prll AND GNU Parallel
@ -424,6 +436,8 @@ using GNU B<parallel>:
prll -s 'mogrify -flip $1' *.jpg prll -s 'mogrify -flip $1' *.jpg
parallel mogrify -flip ::: *.jpg parallel mogrify -flip ::: *.jpg
https://github.com/exzombie/prll
=head2 DIFFERENCES BETWEEN dxargs AND GNU Parallel =head2 DIFFERENCES BETWEEN dxargs AND GNU Parallel
@ -433,6 +447,8 @@ B<dxargs> does not deal well with more simultaneous jobs than SSHD's
MaxStartups. B<dxargs> is only built for remote run jobs, but does not MaxStartups. B<dxargs> is only built for remote run jobs, but does not
support transferring of files. support transferring of files.
http://www.semicomplete.com/blog/geekery/distributed-xargs.html
=head2 DIFFERENCES BETWEEN mdm/middleman AND GNU Parallel =head2 DIFFERENCES BETWEEN mdm/middleman AND GNU Parallel
@ -445,6 +461,8 @@ to GNU B<parallel>:
cat files | parallel cmd cat files | parallel cmd
find dir -execdir sem cmd {} \; find dir -execdir sem cmd {} \;
https://github.com/cklin/mdm
=head2 DIFFERENCES BETWEEN xapply AND GNU Parallel =head2 DIFFERENCES BETWEEN xapply AND GNU Parallel
@ -501,6 +519,8 @@ B<11> xapply -f '[ -f %1 ] && echo %1' List | ...
B<11> parallel '[ -f {} ] && echo {}' < List | ... B<11> parallel '[ -f {} ] && echo {}' < List | ...
http://carrera.databits.net/~ksb/msrc/local/bin/xapply/xapply.html
=head2 DIFFERENCES BETWEEN AIX apply AND GNU Parallel =head2 DIFFERENCES BETWEEN AIX apply AND GNU Parallel
@ -534,6 +554,8 @@ B</usr/joe>, enter:
apply 'ln %1 /usr/joe' * apply 'ln %1 /usr/joe' *
parallel ln {} /usr/joe ::: * parallel ln {} /usr/joe ::: *
https://www.ibm.com/support/knowledgecenter/en/ssw_aix_61/com.ibm.aix.cmds1/apply.htm
=head2 DIFFERENCES BETWEEN paexec AND GNU Parallel =head2 DIFFERENCES BETWEEN paexec AND GNU Parallel
@ -580,8 +602,10 @@ using GNU B<parallel>:
=back =back
https://github.com/cheusov/paexec
=head2 DIFFERENCES BETWEEN map AND GNU Parallel
=head2 DIFFERENCES BETWEEN map(sitaramc) AND GNU Parallel
B<map> sees it as a feature to have less features and in doing so it B<map> sees it as a feature to have less features and in doing so it
also handles corner cases incorrectly. A lot of GNU B<parallel>'s code also handles corner cases incorrectly. A lot of GNU B<parallel>'s code
@ -670,6 +694,8 @@ delimiter (only field delimiter), logging of jobs run with possibility
to resume, keeping the output in the same order as input, --pipe to resume, keeping the output in the same order as input, --pipe
processing, and dynamically timeouts. processing, and dynamically timeouts.
https://github.com/sitaramc/map
=head2 DIFFERENCES BETWEEN ladon AND GNU Parallel =head2 DIFFERENCES BETWEEN ladon AND GNU Parallel
@ -715,6 +741,8 @@ B<4> ladon "~/Music/*.wav" -- lame -V 2 FULLPATH DIRNAME/BASENAME.mp3
B<4> parallel lame -V 2 FULLPATH DIRNAME/BASENAME.mp3 ::: ~/Music/*.wav B<4> parallel lame -V 2 FULLPATH DIRNAME/BASENAME.mp3 ::: ~/Music/*.wav
https://github.com/danielgtaylor/ladon
=head2 DIFFERENCES BETWEEN jobflow AND GNU Parallel =head2 DIFFERENCES BETWEEN jobflow AND GNU Parallel
@ -729,7 +757,7 @@ it can cause the system to run out of memory.
B<jobflow> gives no error if the command is unknown, and like B<xargs> B<jobflow> gives no error if the command is unknown, and like B<xargs>
redirection requires wrapping with B<bash -c>. redirection requires wrapping with B<bash -c>.
B<jobflow> makes it possible to set ressource limits on the running B<jobflow> makes it possible to set resource limits on the running
jobs. This can be emulated by GNU B<parallel> using B<bash>'s B<ulimit>: jobs. This can be emulated by GNU B<parallel> using B<bash>'s B<ulimit>:
@ -756,6 +784,8 @@ B<4> find . -name '*.bmp' | jobflow -threads=8 -exec bmp2jpeg {.}.bmp {.}.jpg
B<4> find . -name '*.bmp' | parallel -j8 bmp2jpeg {.}.bmp {.}.jpg B<4> find . -name '*.bmp' | parallel -j8 bmp2jpeg {.}.bmp {.}.jpg
https://github.com/rofl0r/jobflow
=head2 DIFFERENCES BETWEEN gargs AND GNU Parallel =head2 DIFFERENCES BETWEEN gargs AND GNU Parallel
@ -780,6 +810,8 @@ B<2> cat t.txt | gargs --sep "\s+" -p 2 "echo '{0}:{1}-{2}' full-line: \'{}\'"
B<2> cat t.txt | parallel --colsep "\\s+" -P 2 "echo '{1}:{2}-{3}' full-line: \'{}\'" B<2> cat t.txt | parallel --colsep "\\s+" -P 2 "echo '{1}:{2}-{3}' full-line: \'{}\'"
https://github.com/brentp/gargs
=head2 DIFFERENCES BETWEEN orgalorg AND GNU Parallel =head2 DIFFERENCES BETWEEN orgalorg AND GNU Parallel
@ -817,6 +849,8 @@ If the above is set up you can then do:
B<orgalorg> has a progress indicator for the transferring of a B<orgalorg> has a progress indicator for the transferring of a
file. GNU B<parallel> does not. file. GNU B<parallel> does not.
https://github.com/reconquest/orgalorg
=head2 DIFFERENCES BETWEEN Rust parallel AND GNU Parallel =head2 DIFFERENCES BETWEEN Rust parallel AND GNU Parallel
@ -865,6 +899,8 @@ user uses Rust parallel it will overwrite this file.
If /tmp/parallel runs full during the run, Rust parallel does not If /tmp/parallel runs full during the run, Rust parallel does not
report this, but finishes with success - thereby risking data loss. report this, but finishes with success - thereby risking data loss.
https://github.com/mmstick/parallel
=head2 DIFFERENCES BETWEEN Rush AND GNU Parallel =head2 DIFFERENCES BETWEEN Rush AND GNU Parallel
@ -1279,6 +1315,9 @@ The failed commands can be resumed with:
=back =back
https://github.com/shenwei356/rush
=head2 DIFFERENCES BETWEEN ClusterSSH AND GNU Parallel =head2 DIFFERENCES BETWEEN ClusterSSH AND GNU Parallel
ClusterSSH solves a different problem than GNU B<parallel>. ClusterSSH solves a different problem than GNU B<parallel>.
@ -1298,6 +1337,8 @@ GNU B<parallel> can be used as a poor-man's version of ClusterSSH:
B<parallel --nonall -S server-a,server-b do_stuff foo bar> B<parallel --nonall -S server-a,server-b do_stuff foo bar>
https://github.com/duncs/clusterssh
=head2 DIFFERENCES BETWEEN coshell AND GNU Parallel =head2 DIFFERENCES BETWEEN coshell AND GNU Parallel
@ -1311,6 +1352,9 @@ Output can be buffered by using B<-d>. Output is buffered in memory,
so big output can cause swapping and therefore be terrible slow or so big output can cause swapping and therefore be terrible slow or
even cause out of memory. even cause out of memory.
https://github.com/gdm85/coshell
=head2 DIFFERENCES BETWEEN spread AND GNU Parallel =head2 DIFFERENCES BETWEEN spread AND GNU Parallel
B<spread> runs commands on all directories. B<spread> runs commands on all directories.
@ -1326,6 +1370,7 @@ It can be emulated with GNU B<parallel> using this Bash function:
This works execpt for the B<--exclude> option. This works execpt for the B<--exclude> option.
=head2 DIFFERENCES BETWEEN pyargs AND GNU Parallel =head2 DIFFERENCES BETWEEN pyargs AND GNU Parallel
B<pyargs> deals badly with input containing spaces. It buffers stdout, B<pyargs> deals badly with input containing spaces. It buffers stdout,
@ -1366,6 +1411,8 @@ and fails on B<pyargs traceroute gnu.org fsf.org>.
# Similar, but not exactly the same # Similar, but not exactly the same
parallel seq ::: 1 2 3 4 5 6 parallel seq ::: 1 2 3 4 5 6
https://github.com/robertblackwell/pyargs
=head2 DIFFERENCES BETWEEN concurrently AND GNU Parallel =head2 DIFFERENCES BETWEEN concurrently AND GNU Parallel
@ -1377,7 +1424,7 @@ The output is prepended with the job number, and may be incomplete:
7165 7165
When pretty printing it caches output in memory. Output mixes by using When pretty printing it caches output in memory. Output mixes by using
test A below wether or not output is cached. test MIX below wether or not output is cached.
There seems to be no way of making a template command and have There seems to be no way of making a template command and have
B<concurrently> fill that with different args. The full commands must B<concurrently> fill that with different args. The full commands must
@ -1387,9 +1434,67 @@ There is also no way of controlling how many jobs should be run in
parallel at a time - i.e. "number of jobslots". Instead all jobs are parallel at a time - i.e. "number of jobslots". Instead all jobs are
simply started in parallel. simply started in parallel.
https://github.com/kimmobrunfeldt/concurrently
=head2 DIFFERENCES BETWEEN map(soveran) AND GNU Parallel
B<map> does not run jobs in parallel by default. The README suggests using:
... | map t 'sleep $t && say done &'
But this fails if more jobs are run in parallel than the number of
available processes. Since there is no support for parallelization in
B<map> itself, the output also mixes:
seq 10 | map i 'echo start-$i && sleep 0.$i && echo end-$i &'
The major difference is that GNU B<parallel> is build for parallelization
and map is not. So GNU B<parallel> has lots of ways of dealing with the
issues that parallelization raises:
=over 4
=item *
Keep the number of processes manageable
=item *
Make sure output does not mix
=item *
Make Ctrl-C kill all running processes
=back
Here are the 5 examples converted to GNU Parallel:
1$ ls *.c | map f 'foo $f'
1$ ls *.c | parallel foo
2$ ls *.c | map f 'foo $f; bar $f'
2$ ls *.c | parallel 'foo {}; bar {}'
3$ cat urls | map u 'curl -O $u'
3$ cat urls | parallel curl -O
4$ printf "1\n1\n1\n" | map t 'sleep $t && say done'
4$ printf "1\n1\n1\n" | parallel 'sleep {} && say done'
4$ paralllel 'sleep {} && say done' ::: 1 1 1
5$ printf "1\n1\n1\n" | map t 'sleep $t && say done &'
5$ printf "1\n1\n1\n" | parallel -j0 'sleep {} && say done'
5$ parallel -j0 'sleep {} && say done' ::: 1 1 1
https://github.com/soveran/map
=head2 Todo =head2 Todo
Url for map, spread
machma. Requires Go >= 1.7. machma. Requires Go >= 1.7.
https://github.com/k-bx/par requires Haskell to work. This limits the https://github.com/k-bx/par requires Haskell to work. This limits the
@ -1414,14 +1519,21 @@ There are certain issues that are very common on parallelizing
tools. Here are a few stress tests. Be warned: If the tool is badly tools. Here are a few stress tests. Be warned: If the tool is badly
coded it may overload you machine. coded it may overload you machine.
=head2 A: Output mixes
Output from 2 jobs should not mix. If the tool does not buffer, output =head2 MIX: Output mixes
will most likely mix.
Output from 2 jobs should not mix. If the output is not used, this
does not matter; but if the output I<is> used then it is important
that you do not get half a line from one job followed by half a line
from another job.
If the tool does not buffer, output will most likely mix now and then.
This test stresses whether output mixes.
#!/bin/bash #!/bin/bash
paralleltool=parallel paralleltool="parallel -j0"
cat <<-EOF > mycommand cat <<-EOF > mycommand
#!/bin/bash #!/bin/bash
@ -1439,12 +1551,13 @@ will most likely mix.
chmod +x mycommand chmod +x mycommand
# Run 30 jobs in parallel # Run 30 jobs in parallel
seq 30 | $paralleltool -j0 ./mycommand > >(tr -s abcdef) 2> >(tr -s abcdef >&2) seq 30 | $paralleltool ./mycommand > >(tr -s abcdef) 2> >(tr -s abcdef >&2)
# 'a c e' and 'b d f' should always stay together # 'a c e' and 'b d f' should always stay together
# and there should only be a single line per job # and there should only be a single line per job
=head2 B: Output limited by RAM
=head2 RAM: Output limited by RAM
Some tools cache output in RAM. This makes them extremely slow if the Some tools cache output in RAM. This makes them extremely slow if the
output is bigger than physical memory and crash if the the output is output is bigger than physical memory and crash if the the output is
@ -1452,7 +1565,7 @@ bigger than the virtual memory.
#!/bin/bash #!/bin/bash
paralleltool=parallel paralleltool="parallel -j0"
cat <<'EOF' > mycommand cat <<'EOF' > mycommand
#!/bin/bash #!/bin/bash
@ -1464,9 +1577,45 @@ bigger than the virtual memory.
# Run 20 jobs in parallel # Run 20 jobs in parallel
# Adjust 20 to be > physical RAM and < free space on /tmp # Adjust 20 to be > physical RAM and < free space on /tmp
seq 20 | time $paralleltool -j0 ./mycommand | wc -c seq 20 | time $paralleltool ./mycommand | wc -c
=head2 C: Leaving tmp files at unexpected death
=head2 DISKFULL: Incomplete data if /tmp runs full
If caching is done on disk, the disk can run full during the run. Not
all programs discover this. GNU Parallel discovers it, if it stays
full for at least 2 seconds.
#!/bin/bash
paralleltool="parallel -j0"
# This should be a dir with less than 100 GB free space
smalldisk=/tmp/shm/parallel
TMPDIR="$smalldisk"
export TMPDIR
max_output() {
# Force worst case scenario:
# Make GNU Parallel only check once per second
sleep 10
# Generate 100 GB to fill $TMPDIR
# Adjust if /tmp is bigger than 100 GB
yes | head -c 100G >$TMPDIR/$$
# Generate 10 MB output that will not be buffered due to full disk
perl -e 'print "X"x10_000_000' | head -c 10M
echo This part is missing from incomplete output
sleep 2
rm $TMPDIR/$$
echo Final output
}
export -f max_output
seq 10 | $paralleltool max_output | tr -s X
=head2 CLEANUP: Leaving tmp files at unexpected death
Some tools do not clean up tmp files if they are killed. If the tool Some tools do not clean up tmp files if they are killed. If the tool
buffers on disk, they may not clean up, if they are killed. buffers on disk, they may not clean up, if they are killed.
@ -1485,11 +1634,12 @@ buffers on disk, they may not clean up, if they are killed.
# Should be empty: No files should be left behind # Should be empty: No files should be left behind
diff <(ls /tmp) /tmp/before diff <(ls /tmp) /tmp/before
=head2 D: Dealing badly with special file names.
=head2 SPCCHAR: Dealing badly with special file names.
It is not uncommon for users to create files like: It is not uncommon for users to create files like:
My brother's 12" records cost $$$.txt My brother's 12" *** record (costs $$$).jpg
Some tools break on this. Some tools break on this.
@ -1497,22 +1647,25 @@ Some tools break on this.
paralleltool=parallel paralleltool=parallel
touch "My brother's 12\" records cost \$\$\$.txt" touch "My brother's 12\" *** record (costs \$\$\$).jpg"
ls My*txt | $paralleltool echo ls My*jpg | $paralleltool ls -l
=head2 E: Composed commands do not work
=head2 COMPOSED: Composed commands do not work
Some tools require you to wrap composed commands into B<bash -c>. Some tools require you to wrap composed commands into B<bash -c>.
echo bar | $paralleltool echo foo';' echo {} echo bar | $paralleltool echo foo';' echo {}
=head2 F: Only one replacement string allowed
=head2 ONEREP: Only one replacement string allowed
Some tools can only insert the argument once. Some tools can only insert the argument once.
echo bar | $paralleltool echo {} foo {} echo bar | $paralleltool echo {} foo {}
=head2 G: Speed depends on number of words
=head2 NUMWORDS: Speed depends on number of words
Some tools become very slow if output lines have many words. Some tools become very slow if output lines have many words.

View file

@ -226,11 +226,54 @@ tmpfile in practice is removed by B<cattail> after around 40 ms.
The command given by the user can be wrapped in multiple The command given by the user can be wrapped in multiple
templates. Templates can be wrapped in other templates. templates. Templates can be wrapped in other templates.
=over 15 =over 15
=item B<$COMMAND>
the command to run.
=item B<$INPUT>
the input to run.
=item B<$SHELL>
the shell that started GNU Parallel.
=item B<$SSHLOGIN>
the sshlogin.
=item B<$WORKDIR>
the working dir.
=item B<$FILE>
the file to read parts from.
=item B<$STARTPOS>
the first byte position to read from B<$FILE>.
=item B<$LENGTH>
the number of bytes to read from B<$FILE>.
=item --shellquote =item --shellquote
echo I<shell double quoted input> echo I<Double quoted $INPUT>
=item --nice I<pri> =item --nice I<pri>
@ -240,7 +283,7 @@ Local: B<setpriority(0,0,$nice)>
=item --cat =item --cat
cat > {}; <<command>> {}; cat > {}; $COMMAND {};
perl -e '$bash = shift; perl -e '$bash = shift;
$csh = shift; $csh = shift;
for(@ARGV) { unlink;rmdir; } for(@ARGV) { unlink;rmdir; }
@ -269,38 +312,38 @@ B<*csh>/B<fish> (using $status).
waitpid $pid,0; waitpid $pid,0;
# Cleanup # Cleanup
unlink $f; unlink $f;
exit $?/256;' <<shell>> -c <<command>> $PARALLEL_TMP exit $?/256;' $SHELL -c $COMMAND $PARALLEL_TMP
This is an elaborate way of: mkfifo {}; run I<<<command>>> in the This is an elaborate way of: mkfifo {}; run B<$COMMAND> in the
background using I<<<shell>>>; copying STDIN to {}; waiting for background background using B<$SHELL>; copying STDIN to {}; waiting for background
to complete; remove {} and exit with the exit code from I<<<command>>>. to complete; remove {} and exit with the exit code from B<$COMMAND>.
It is made this way to be compatible with B<*csh>/B<fish>. It is made this way to be compatible with B<*csh>/B<fish>.
=item --pipepart =item --pipepart
< <<file>> perl -e 'while(@ARGV) {
< $FILE perl -e 'while(@ARGV) {
sysseek(STDIN,shift,0) || die; sysseek(STDIN,shift,0) || die;
$left = shift; $left = shift;
while($read = sysread(STDIN,$buf, ($left > 131072 ? 131072 : $left))){ while($read = sysread(STDIN,$buf, ($left > 131072 ? 131072 : $left))){
$left -= $read; $left -= $read;
syswrite(STDOUT,$buf); syswrite(STDOUT,$buf);
} }
}' <<startposition>> <<length>> }' $STARTPOS $LENGTH
This will read I<<<length>>> bytes from I<<<file>>> starting at This will read B<$LENGTH> bytes from B<$FILE> starting at B<$STARTPOS>
I<<<startposition>>> and send it to STDOUT. and send it to STDOUT.
=item --sshlogin I<sln> =item --sshlogin $SSHLOGIN
ssh I<sln> I<shell quoted command> ssh $SSHLOGIN "$COMMAND"
Where I<sln> is the sshlogin and I<shell quoted command> is the
command quoted so it will be passed to the server.
=item --transfer =item --transfer
( ssh I<sln> mkdir -p ./I<workdir>;rsync --protocol 30 -rlDzR -essh ./{} I<sln>:./I<workdir> ); I<<<command>>> ssh $SSHLOGIN mkdir -p ./$WORKDIR;
rsync --protocol 30 -rlDzR -essh ./{} $SSHLOGIN:./$WORKDIR;
ssh $SSHLOGIN "$COMMAND"
Read about B<--protocol 30> in the section B<Rsync protocol version>. Read about B<--protocol 30> in the section B<Rsync protocol version>.
@ -314,7 +357,9 @@ Read about B<--protocol 30> in the section B<Rsync protocol version>.
=item --return I<file> =item --return I<file>
I<<<command>>>; _EXIT_status=$?; mkdir -p I<<<workdir>>>; rsync --protocol 30 --rsync-path=cd\ ./I<<<workdir>>>\;\ rsync -rlDzR -essh I<<<sln>>>:./I<<<file>>> ./I<<<workdir>>>; exit $_EXIT_status; $COMMAND; _EXIT_status=$?; mkdir -p $WORKDIR;
rsync --protocol 30 --rsync-path=cd\ ./$WORKDIR\;\ rsync \
-rlDzR -essh $SSHLOGIN:./$FILE ./$WORKDIR; exit $_EXIT_status;
The B<--rsync-path=cd ...> is needed because old versions of B<rsync> The B<--rsync-path=cd ...> is needed because old versions of B<rsync>
do not support B<--no-implied-dirs>. do not support B<--no-implied-dirs>.
@ -325,9 +370,11 @@ wrapping 'sh -c' is enough?
=item --cleanup =item --cleanup
I<<<command>>> _EXIT_status=$?; <<return>>; $RETURN is the wrapper from B<--return>
ssh I<sln> \(rm\ -f\ ./I<workdir>/{}\;\ rmdir\ ./I<workdir>\ \>\&/dev/null\;\); exit $_EXIT_status; $COMMAND; _EXIT_status=$?; $RETURN;
ssh $SSHLOGIN \(rm\ -f\ ./$WORKDIR/{}\;\ rmdir\ ./$WORKDIR\ \>\&/dev/null\;\);
exit $_EXIT_status;
B<$_EXIT_status>: see B<--return> above. B<$_EXIT_status>: see B<--return> above.
@ -346,9 +393,9 @@ B<$_EXIT_status>: see B<--return> above.
} }
close $fh; close $fh;
exit ($?&127 ? 128+($?&127) : 1+$?>>8) exit ($?&127 ? 128+($?&127) : 1+$?>>8)
}' I<shell> -c I<input> }' $SHELL -c $COMMAND
This small wrapper makes sure that I<input> will never be run if This small wrapper makes sure that B<$COMMAND> will never be run if
there is no data. there is no data.
=item --tmux =item --tmux

View file

@ -138,10 +138,10 @@ par_keeporder_roundrobin() {
par_multiline_commands() { par_multiline_commands() {
echo 'bug #50781: joblog format with multiline commands' echo 'bug #50781: joblog format with multiline commands'
rm -f /tmp/jl.$$ rm -f /tmp/jl.$$
seq 1 3 | parallel --jl /tmp/jl.$$ --timeout 2s 'sleep {}; echo {}; parallel --jl /tmp/jl.$$ --timeout 2s 'sleep {}; echo {};
echo finish {}' echo finish {}' ::: 1 2 4
seq 1 3 | parallel --jl /tmp/jl.$$ --timeout 4s --retry-failed 'sleep {}; echo {}; parallel --jl /tmp/jl.$$ --timeout 5s --retry-failed 'sleep {}; echo {};
echo finish {}' echo finish {}' ::: 1 2 4
rm -f /tmp/jl.$$ rm -f /tmp/jl.$$
} }

View file

@ -3,14 +3,18 @@
par_load_more_10s() { par_load_more_10s() {
echo '### Test --load locally - should take >10s' echo '### Test --load locally - should take >10s'
echo '# This will run 10 processes in parallel for 10s'; echo '# This will run 10 processes in parallel for 10s';
seq 10 | parallel --nice 19 --timeout 13 -j0 -N0 "gzip < /dev/zero > /dev/null" & seq 10 |
sleep 2; stdout /usr/bin/time -f %e parallel --load 10 sleep ::: 1 | perl -ne '$_ > 10 and print "OK\n"' nice stdout parallel --timeout 13 -j0 -N0 "bzip2 < /dev/zero" > /dev/null &
sleep 2
stdout /usr/bin/time -f %e parallel --load 10 sleep ::: 1 |
perl -ne '$_ > 10 and print "OK\n"'
} }
par_load_file_less_10s() { par_load_file_less_10s() {
echo '### Test --load read from a file - less than 10s' echo '### Test --load read from a file - less than 10s'
echo '# This will run 10 processes in parallel for 10s' echo '# This will run 10 processes in parallel for 10s'
seq 10 | parallel --nice 19 --timeout 10 -j0 -N0 "gzip < /dev/zero > /dev/null" & seq 10 |
nice stdout parallel --timeout 10 -j0 -N0 "bzip2 < /dev/zero" > /dev/null &
( echo 8 > /tmp/parallel_load_file2; sleep 10; echo 1000 > /tmp/parallel_load_file2 ) & ( echo 8 > /tmp/parallel_load_file2; sleep 10; echo 1000 > /tmp/parallel_load_file2 ) &
sleep 1 sleep 1
stdout /usr/bin/time -f %e parallel --load /tmp/parallel_load_file2 sleep ::: 1 | stdout /usr/bin/time -f %e parallel --load /tmp/parallel_load_file2 sleep ::: 1 |
@ -21,7 +25,8 @@ par_load_file_less_10s() {
par_load_file_more_10s() { par_load_file_more_10s() {
echo '### Test --load read from a file - more than 10s' echo '### Test --load read from a file - more than 10s'
echo '# This will run 10 processes in parallel for 10s' echo '# This will run 10 processes in parallel for 10s'
seq 10 | parallel --nice 19 --timeout 10 -j0 -N0 "gzip < /dev/zero > /dev/null" & seq 10 |
nice stdout parallel --timeout 10 -j0 -N0 "bzip2 < /dev/zero" > /dev/null &
( echo 8 > /tmp/parallel_load_file; sleep 10; echo 1000 > /tmp/parallel_load_file ) & ( echo 8 > /tmp/parallel_load_file; sleep 10; echo 1000 > /tmp/parallel_load_file ) &
sleep 1 sleep 1
stdout /usr/bin/time -f %e parallel --load /tmp/parallel_load_file sleep ::: 1 | stdout /usr/bin/time -f %e parallel --load /tmp/parallel_load_file sleep ::: 1 |

View file

@ -12,6 +12,18 @@ par_env_parallel myvar_line 2
par_env_parallel myalias2 foo par_env_parallel myalias2 foo
par_load par_load 2>&1 par_load par_load 2>&1
par_load ### Test --load (must give 1=true) par_load ### Test --load (must give 1=true)
par_load parallel: Warning: This job was killed because it timed out:
par_load parallel: Warning: bzip2 < /dev/zero >/dev/null
par_load parallel: Warning: This job was killed because it timed out:
par_load parallel: Warning: bzip2 < /dev/zero >/dev/null
par_load parallel: Warning: This job was killed because it timed out:
par_load parallel: Warning: bzip2 < /dev/zero >/dev/null
par_load parallel: Warning: This job was killed because it timed out:
par_load parallel: Warning: bzip2 < /dev/zero >/dev/null
par_load parallel: Warning: This job was killed because it timed out:
par_load parallel: Warning: bzip2 < /dev/zero >/dev/null
par_load parallel: Warning: This job was killed because it timed out:
par_load parallel: Warning: bzip2 < /dev/zero >/dev/null
par_load par_load
par_load 1 par_load 1
par_no_more_procs par_no_more_procs 2>&1 par_no_more_procs par_no_more_procs 2>&1

View file

@ -113,7 +113,9 @@ echo '### Disk full'
### Disk full ### Disk full
cat /dev/zero >$SMALLDISK/out; parallel --tmpdir $SMALLDISK echo ::: OK; rm $SMALLDISK/out cat /dev/zero >$SMALLDISK/out; parallel --tmpdir $SMALLDISK echo ::: OK; rm $SMALLDISK/out
cat: write error: No space left on device cat: write error: No space left on device
parallel: Error: Output is incomplete. Cannot append to buffer file in /mnt/ram. Is the disk full? parallel: Error: Output is incomplete.
parallel: Error: Cannot append to buffer file in /mnt/ram.
parallel: Error: Is the disk full?
parallel: Error: Change $TMPDIR with --tmpdir or use --compress. parallel: Error: Change $TMPDIR with --tmpdir or use --compress.
Warning: unable to close filehandle properly: No space left on device during global destruction. Warning: unable to close filehandle properly: No space left on device during global destruction.
echo '**' echo '**'
@ -552,7 +554,7 @@ e
echo '### test too long args' echo '### test too long args'
### test too long args ### test too long args
perl -e 'print "z"x1000000' | parallel echo 2>&1 perl -e 'print "z"x1000000' | parallel echo 2>&1
parallel: Error: Command line too long (1000005 >= 131057) at input 0: zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz... parallel: Error: Command line too long (1000005 >= 131049) at input 0: zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz...
perl -e 'print "z"x1000000' | xargs echo 2>&1 perl -e 'print "z"x1000000' | xargs echo 2>&1
xargs: argument line too long xargs: argument line too long
(seq 1 10; perl -e 'print "z"x1000000'; seq 12 15) | stdsort parallel -j1 -km -s 10 echo (seq 1 10; perl -e 'print "z"x1000000'; seq 12 15) | stdsort parallel -j1 -km -s 10 echo
@ -819,7 +821,7 @@ echo far
### Test --show-limits ### Test --show-limits
(echo b; echo c; echo f) | parallel -k --show-limits echo {}ar (echo b; echo c; echo f) | parallel -k --show-limits echo {}ar
Maximal size of command: 131049 Maximal size of command: 131049
Maximal used size of command: 131057 Maximal used size of command: 131049
Execution of will continue now, and it will try to read its input Execution of will continue now, and it will try to read its input
and run commands; if this is not what you wanted to happen, please and run commands; if this is not what you wanted to happen, please

View file

@ -234,6 +234,10 @@ par_k_linebuffer stderr in the middle
par_k_linebuffer stdout end par_k_linebuffer stdout end
par_kill_children_timeout ### Test killing children with --timeout and exit value (failed if timed out) par_kill_children_timeout ### Test killing children with --timeout and exit value (failed if timed out)
par_kill_children_timeout 0 0 0 par_kill_children_timeout 0 0 0
par_kill_children_timeout parallel: Warning: This job was killed because it timed out:
par_kill_children_timeout parallel: Warning: doit 1000000000
par_kill_children_timeout parallel: Warning: This job was killed because it timed out:
par_kill_children_timeout parallel: Warning: doit 1000000001
par_kill_children_timeout 2 par_kill_children_timeout 2
par_kill_children_timeout 0 0 0 par_kill_children_timeout 0 0 0
par_line_buffer ### --line-buffer par_line_buffer ### --line-buffer
@ -253,7 +257,7 @@ par_linebuffer_tag_slow_output a
par_linebuffer_tag_slow_output b b par_linebuffer_tag_slow_output b b
par_linebuffer_tag_slow_output b par_linebuffer_tag_slow_output b
par_maxlinelen_X_I ### Test max line length -X -I par_maxlinelen_X_I ### Test max line length -X -I
par_maxlinelen_X_I 201ecb8ca789cecf39ee914a6d75611b - par_maxlinelen_X_I 3cfc69ee81b0fe7fdbe8eb059ad2da61 -
par_maxlinelen_X_I Chars per line (817788/7): 116826 par_maxlinelen_X_I Chars per line (817788/7): 116826
par_maxlinelen_m_I ### Test max line length -m -I par_maxlinelen_m_I ### Test max line length -m -I
par_maxlinelen_m_I c78bd0799bc23d8946732f8b3ae3c94e - par_maxlinelen_m_I c78bd0799bc23d8946732f8b3ae3c94e -

View file

@ -263,10 +263,10 @@ par_linebuffer_files lrz --results
par_linebuffer_files lrz 1 par_linebuffer_files lrz 1
par_max_length_len_128k ### BUG: The length for -X is not close to max (131072) par_max_length_len_128k ### BUG: The length for -X is not close to max (131072)
par_max_length_len_128k 1 12817 131016 par_max_length_len_128k 1 12817 131016
par_max_length_len_128k 1 10947 131046 par_max_length_len_128k 1 10946 131032
par_max_length_len_128k 1 23693 131052 par_max_length_len_128k 1 23691 131040
par_max_length_len_128k 1 15808 131047 par_max_length_len_128k 1 15806 131030
par_max_length_len_128k 1 11789 131045 par_max_length_len_128k 1 11788 131032
par_max_length_len_128k 1 25543 131043 par_max_length_len_128k 1 25543 131043
par_no_newline_compress bug #41613: --compress --line-buffer - no newline par_no_newline_compress bug #41613: --compress --line-buffer - no newline
par_no_newline_compress tagstring=--tagstring {#} compress=--compress par_no_newline_compress tagstring=--tagstring {#} compress=--compress

View file

@ -4,6 +4,8 @@ par_linebuffer_matters_compress_tag ### (--linebuffer) --compress --tag should g
par_linebuffer_matters_compress_tag OK: --linebuffer makes a difference par_linebuffer_matters_compress_tag OK: --linebuffer makes a difference
par_memfree ### test memfree par_memfree ### test memfree
par_memfree Free mem: 1k par_memfree Free mem: 1k
par_memfree parallel: Warning: This job was killed because it timed out:
par_memfree parallel: Warning: parallel --memfree 1t echo Free mem: ::: 1t
par_memfree parallel: SIGTERM received. No new jobs will be started. par_memfree parallel: SIGTERM received. No new jobs will be started.
par_memfree parallel: Waiting for these 0 jobs to finish. Send SIGTERM again to stop now. par_memfree parallel: Waiting for these 0 jobs to finish. Send SIGTERM again to stop now.
par_memory_leak ### Test for memory leaks par_memory_leak ### Test for memory leaks
@ -39,6 +41,8 @@ par_sigterm parallel: sleep 15; echo 7
par_sigterm parallel: sleep 15; echo 8 par_sigterm parallel: sleep 15; echo 8
par_sigterm parallel: sleep 15; echo 9 par_sigterm parallel: sleep 15; echo 9
par_tmp_full ### Test --tmpdir running full. bug #40733 was caused by this par_tmp_full ### Test --tmpdir running full. bug #40733 was caused by this
par_tmp_full parallel: Error: Output is incomplete. Cannot append to buffer file in /tmp/shm/parallel. Is the disk full? par_tmp_full parallel: Error: Output is incomplete.
par_tmp_full parallel: Error: Cannot append to buffer file in /tmp/shm/parallel.
par_tmp_full parallel: Error: Is the disk full?
par_tmp_full parallel: Error: Change $TMPDIR with --tmpdir or use --compress. par_tmp_full parallel: Error: Change $TMPDIR with --tmpdir or use --compress.
par_tmp_full Warning: unable to close filehandle properly: No space left on device during global destruction. par_tmp_full Warning: unable to close filehandle properly: No space left on device during global destruction.

View file

@ -1,7 +1,11 @@
par_children_receive_sig ### Do children receive --termseq signals par_children_receive_sig ### Do children receive --termseq signals
par_children_receive_sig parallel: Warning: This job was killed because it timed out:
par_children_receive_sig parallel: Warning: show_signals ''
par_children_receive_sig Got TERM par_children_receive_sig Got TERM
par_children_receive_sig Got TERM par_children_receive_sig Got TERM
par_children_receive_sig Got TERM par_children_receive_sig Got TERM
par_children_receive_sig parallel: Warning: This job was killed because it timed out:
par_children_receive_sig parallel: Warning: show_signals ''
par_children_receive_sig Got INT par_children_receive_sig Got INT
par_children_receive_sig Got TERM par_children_receive_sig Got TERM
par_commandline_with_newline bug #51299: --retry-failed with command with newline par_commandline_with_newline bug #51299: --retry-failed with command with newline
@ -58,12 +62,19 @@ par_multiline_commands 1
par_multiline_commands finish 1 par_multiline_commands finish 1
par_multiline_commands 2 par_multiline_commands 2
par_multiline_commands finish 2 par_multiline_commands finish 2
par_multiline_commands parallel: Warning: This job was killed because it timed out:
par_multiline_commands parallel: Warning: sleep 4; echo 4;
par_multiline_commands echo finish 4
par_multiline_commands parallel: Warning: Command lines contain newline. Forcing --null. par_multiline_commands parallel: Warning: Command lines contain newline. Forcing --null.
par_multiline_commands 3 par_multiline_commands 4
par_multiline_commands finish 3 par_multiline_commands finish 4
par_nice Check that --nice works par_nice Check that --nice works
par_nice bzip2 18 par_nice bzip2 18
par_nice bzip2 18 par_nice bzip2 18
par_nice parallel: Warning: This job was killed because it timed out:
par_nice parallel: Warning: bzip2 < /dev/zero
par_nice parallel: Warning: This job was killed because it timed out:
par_nice parallel: Warning: bzip2 < /dev/zero
par_parcat_mixing parcat output should mix: a b a b par_parcat_mixing parcat output should mix: a b a b
par_parcat_mixing astart par_parcat_mixing astart
par_parcat_mixing bstart par_parcat_mixing bstart

View file

@ -2,6 +2,26 @@ echo '### Test --load remote'
### Test --load remote ### Test --load remote
ssh parallel@lo 'seq 10 | parallel --nice 19 --timeout 15 -j0 -qN0 perl -e while\(1\)\{\ \}' & sleep 1; stdout /usr/bin/time -f %e parallel -S parallel@lo --load 10 sleep ::: 1 | perl -ne '$_ > 10 and print "OK\n"' ssh parallel@lo 'seq 10 | parallel --nice 19 --timeout 15 -j0 -qN0 perl -e while\(1\)\{\ \}' & sleep 1; stdout /usr/bin/time -f %e parallel -S parallel@lo --load 10 sleep ::: 1 | perl -ne '$_ > 10 and print "OK\n"'
OK OK
parallel: Warning: This job was killed because it timed out:
parallel: Warning: perl -e while\(1\)\{\ \}
parallel: Warning: This job was killed because it timed out:
parallel: Warning: perl -e while\(1\)\{\ \}
parallel: Warning: This job was killed because it timed out:
parallel: Warning: perl -e while\(1\)\{\ \}
parallel: Warning: This job was killed because it timed out:
parallel: Warning: perl -e while\(1\)\{\ \}
parallel: Warning: This job was killed because it timed out:
parallel: Warning: perl -e while\(1\)\{\ \}
parallel: Warning: This job was killed because it timed out:
parallel: Warning: perl -e while\(1\)\{\ \}
parallel: Warning: This job was killed because it timed out:
parallel: Warning: perl -e while\(1\)\{\ \}
parallel: Warning: This job was killed because it timed out:
parallel: Warning: perl -e while\(1\)\{\ \}
parallel: Warning: This job was killed because it timed out:
parallel: Warning: perl -e while\(1\)\{\ \}
parallel: Warning: This job was killed because it timed out:
parallel: Warning: perl -e while\(1\)\{\ \}
echo '**' echo '**'
** **
echo '### Stop if all hosts are filtered and there are no hosts left to run on' echo '### Stop if all hosts are filtered and there are no hosts left to run on'

View file

@ -37,21 +37,17 @@ To silence this citation notice: run 'parallel --citation' once.
If you use programs that use GNU Parallel to process data for an article in a If you use programs that use GNU Parallel to process data for an article in a
scientific publication, please cite: scientific publication, please cite:
@article{Tange2011a, @book{tange_ole_2018_1146014,
title = {GNU Parallel - The Command-Line Power Tool}, author = {Tange, Ole},
author = {O. Tange}, title = {GNU Parallel 2018},
address = {Frederiksberg, Denmark}, publisher = {Ole Tange},
journal = {;login: The USENIX Magazine}, year = 2018,
month = {Feb}, month = apr,
number = {1}, doi = {10.5281/zenodo.1146014},
volume = {36}, url = {https://doi.org/10.5281/zenodo.1146014}
url = {http://www.gnu.org/s/parallel},
year = {2011},
pages = {42-47},
doi = {http://dx.doi.org/10.5281/zenodo.16303}
} }
(Feel free to use \nocite{Tange2011a}) (Feel free to use \nocite{tange_ole_2018_1146014})
This helps funding further development; AND IT WON'T COST YOU A CENT. This helps funding further development; AND IT WON'T COST YOU A CENT.
If you pay 10000 EUR you should feel free to use GNU Parallel without citing. If you pay 10000 EUR you should feel free to use GNU Parallel without citing.

View file

@ -107,7 +107,7 @@ a1.gif 2.gif 3.gif 4.gif 5.gif 6.gifb1 2 3 4 5 6c1 2 3 4 5 6
a1.gifb1c1 a2.gifb2c2 a3.gifb3c3 a4.gifb4c4 a5.gifb5c5 a6.gifb6c6 a1.gifb1c1 a2.gifb2c2 a3.gifb3c3 a4.gifb4c4 a5.gifb5c5 a6.gifb6c6
echo '### Test -m with 60000 args'; seq 1 60000 | perl -pe 's/$/.gif/' | parallel -j1 -km echo a{}b{.}c{.} | tee >(wc; sleep 1) >(md5sum; sleep 1) >/dev/null; wait; sleep 1 echo '### Test -m with 60000 args'; seq 1 60000 | perl -pe 's/$/.gif/' | parallel -j1 -km echo a{}b{.}c{.} | tee >(wc; sleep 1) >(md5sum; sleep 1) >/dev/null; wait; sleep 1
### Test -m with 60000 args ### Test -m with 60000 args
d025d2d552a9ff809216d17d408de0b4 - f5e1ea298b25c5516d63061df5c56f79 -
10 179980 1286692 10 179980 1286692
echo '### Test -X with 60000 args'; seq 1 60000 | perl -pe 's/$/.gif/' | parallel -j1 -kX echo a{}b{.}c{.} | tee >(wc; sleep 1) >(md5sum; sleep 1) >/dev/null; wait; sleep 1 echo '### Test -X with 60000 args'; seq 1 60000 | perl -pe 's/$/.gif/' | parallel -j1 -kX echo a{}b{.}c{.} | tee >(wc; sleep 1) >(md5sum; sleep 1) >/dev/null; wait; sleep 1
### Test -X with 60000 args ### Test -X with 60000 args

View file

@ -5,12 +5,12 @@ Chars per line: 116300
6 119994 697800 6 119994 697800
echo '### Test of xargs -X command lines > 130k'; seq 1 60000 | parallel -X -j1 echo a{}b{}c | tee >(wc >/tmp/bwc$$) >(sort | (sleep 1; md5sum)) >/tmp/b$$; wait; CHAR=$(cat /tmp/b$$ | wc -c); LINES=$(cat /tmp/b$$ | wc -l); echo "Chars per line:" $(echo "$CHAR/$LINES" | bc); cat /tmp/bwc$$; rm /tmp/b$$ /tmp/bwc$$ echo '### Test of xargs -X command lines > 130k'; seq 1 60000 | parallel -X -j1 echo a{}b{}c | tee >(wc >/tmp/bwc$$) >(sort | (sleep 1; md5sum)) >/tmp/b$$; wait; CHAR=$(cat /tmp/b$$ | wc -c); LINES=$(cat /tmp/b$$ | wc -l); echo "Chars per line:" $(echo "$CHAR/$LINES" | bc); cat /tmp/bwc$$; rm /tmp/b$$ /tmp/bwc$$
### Test of xargs -X command lines > 130k ### Test of xargs -X command lines > 130k
201ecb8ca789cecf39ee914a6d75611b - 3cfc69ee81b0fe7fdbe8eb059ad2da61 -
Chars per line: 116826 Chars per line: 116826
7 60000 817788 7 60000 817788
echo '### Test of xargs -m command lines > 130k'; seq 1 60000 | parallel -k -j1 -m echo | md5sum echo '### Test of xargs -m command lines > 130k'; seq 1 60000 | parallel -k -j1 -m echo | md5sum
### Test of xargs -m command lines > 130k ### Test of xargs -m command lines > 130k
590091fd85dcb98f91c5e89cdddf21ef - 00b8aed2c1bc28368381273aa2ae104e -
echo '### This causes problems if we kill child processes'; # 2>/dev/null to avoid parallel: Warning: Starting 45 processes took > 2 sec. echo '### This causes problems if we kill child processes'; # 2>/dev/null to avoid parallel: Warning: Starting 45 processes took > 2 sec.
### This causes problems if we kill child processes ### This causes problems if we kill child processes
seq 2 40 | parallel -j 0 seq 1 10 2>/dev/null | sort | md5sum seq 2 40 | parallel -j 0 seq 1 10 2>/dev/null | sort | md5sum

View file

@ -74,8 +74,8 @@ please cite as described in 'parallel --citation'.
echo '### bug #39787: --xargs broken' echo '### bug #39787: --xargs broken'
### bug #39787: --xargs broken ### bug #39787: --xargs broken
nice perl -e 'for(1..30000){print "$_\n"}' | $NICEPAR --xargs -k echo | perl -ne 'print length $_,"\n"' nice perl -e 'for(1..30000){print "$_\n"}' | $NICEPAR --xargs -k echo | perl -ne 'print length $_,"\n"'
131052 131040
37842 37854
echo '### --delay should grow by 3 sec per arg' echo '### --delay should grow by 3 sec per arg'
### --delay should grow by 3 sec per arg ### --delay should grow by 3 sec per arg
stdout /usr/bin/time -f %e parallel --delay 3 true ::: 1 2 | perl -ne '$_ >= 3 and $_ <= 8 and print "OK\n"' stdout /usr/bin/time -f %e parallel --delay 3 true ::: 1 2 | perl -ne '$_ >= 3 and $_ <= 8 and print "OK\n"'
@ -88,6 +88,8 @@ echo '### Exit value should not be affected if an earlier job times out'
Exitval Exitval
-1 -1
255 255
parallel: Warning: This job was killed because it timed out:
parallel: Warning: sleep 10
echo '### --header regexp' echo '### --header regexp'
### --header regexp ### --header regexp
(echo %head1; echo %head2; seq 5) | $NICEPAR -kj2 --pipe -N2 --header '(%.*\n)*' echo JOB{#}\;cat (echo %head1; echo %head2; seq 5) | $NICEPAR -kj2 --pipe -N2 --header '(%.*\n)*' echo JOB{#}\;cat

View file

@ -18,6 +18,12 @@ bug #41412: --timeout + --delay causes deadlock
parallel -j10 --timeout 2.2 --delay 3 "sleep {}; echo {}" ::: 1 2 7 8 9 parallel -j10 --timeout 2.2 --delay 3 "sleep {}; echo {}" ::: 1 2 7 8 9
1 1
2 2
parallel: Warning: This job was killed because it timed out:
parallel: Warning: sleep 7; echo 7
parallel: Warning: This job was killed because it timed out:
parallel: Warning: sleep 8; echo 8
parallel: Warning: This job was killed because it timed out:
parallel: Warning: sleep 9; echo 9
echo '### Test --spreadstdin - more procs than args'; rm -f /tmp/parallel.ss.*; seq 1 5 | stdout $NICEPAR -j 10 --spreadstdin 'cat >/tmp/parallel.ss.$PARALLEL_SEQ' >/dev/null; cat /tmp/parallel.ss.*; rm -f /tmp/parallel.ss.* echo '### Test --spreadstdin - more procs than args'; rm -f /tmp/parallel.ss.*; seq 1 5 | stdout $NICEPAR -j 10 --spreadstdin 'cat >/tmp/parallel.ss.$PARALLEL_SEQ' >/dev/null; cat /tmp/parallel.ss.*; rm -f /tmp/parallel.ss.*
### Test --spreadstdin - more procs than args ### Test --spreadstdin - more procs than args
1 1
@ -400,7 +406,7 @@ echo "### BUG: empty lines with --show-limit"
### BUG: empty lines with --show-limit ### BUG: empty lines with --show-limit
echo | $NICEPAR --show-limits echo | $NICEPAR --show-limits
Maximal size of command: 131049 Maximal size of command: 131049
Maximal used size of command: 131057 Maximal used size of command: 131049
Execution of will continue now, and it will try to read its input Execution of will continue now, and it will try to read its input
and run commands; if this is not what you wanted to happen, please and run commands; if this is not what you wanted to happen, please