diff --git a/src/parallel b/src/parallel index 57911446..d98eee08 100755 --- a/src/parallel +++ b/src/parallel @@ -521,7 +521,7 @@ sub index64 { my $block_size = 2**31-1; my $strlen = length($$ref); # No point in doing extra work if we don't need to. - if($strlen < $block_size) { + if($strlen < $block_size or $] > 5.022) { return index($$ref, $match, $pos); } @@ -3401,16 +3401,20 @@ sub which { # # ash bash csh dash fdsh fish fizsh ksh ksh93 mksh pdksh # posh rbash rush rzsh sash sh static-sh tcsh yash zsh - my @shells = qw(ash bash csh dash fdsh fish fizsh ksh - ksh93 mksh pdksh posh rbash rush rzsh - sash sh static-sh tcsh yash zsh -sh -csh); + my @shells = (qw(ash bash csh dash fdsh fish fizsh ksh + ksh93 mksh pdksh posh rbash rush rzsh + sash sh static-sh tcsh yash zsh -sh -csh), + '-sh (sh)' # sh on FreeBSD + ); # Can be formatted as: - # [sh] -sh sh busybox sh + # [sh] -sh sh busybox sh -sh (sh) # /bin/sh /sbin/sh /opt/csw/sh # NOT: foo.sh sshd crash flush pdflush scosh fsflush ssh - my $shell = "(?:".join("|",@shells).")"; - $regexp = '^((\[)('. $shell. ')(\])|(|\S+/|busybox )('. $shell. '))($| )'; + my $shell = "(?:".join("|",map { "\Q$_\E" } @shells).")"; + $regexp = '^((\[)('. $shell. ')(\])|(|\S+/|busybox )('. $shell. '))($| [^(])'; %fakename = ( + # sh disguises itself as -sh (sh) on FreeBSD + "-sh (sh)" => ["sh"], # csh and tcsh disguise themselves as -sh/-csh "-sh" => ["csh", "tcsh"], "-csh" => ["tcsh", "csh"], @@ -4180,7 +4184,8 @@ sub loadavg { local $/ = undef; my $load_out = <$load_fh>; close $load_fh; - my $load =()= ($load_out=~/(^\s?[DOR]\S* [^\[])/gm); + # Count lines starting with D,O,R but command does not start with [ + my $load =()= ($load_out=~/(^\s?[DOR]\S* +(?=[^\[])\S)/gm); if($load > 0) { # load is overestimated by 1 $self->{'loadavg'} = $load - 1; @@ -6187,13 +6192,14 @@ sub sshlogin_wrap { if(not $monitor_parent_sshd_script) { $monitor_parent_sshd_script = # This will be packed in ', so only use " - ::spacefree(0,q{ + ::spacefree(0,'$shell = "'.($ENV{'PARALLEL_SHELL'} || '$ENV{SHELL}').'";'. + q{ $SIG{CHLD} = sub { $done = 1; }; $pid = fork; unless($pid) { # Make own process group to be able to kill HUP it later setpgrp; - exec $ENV{SHELL}, "-c", ($bashfunc."@ARGV"); + exec $shell, "-c", ($bashfunc."@ARGV"); die "exec: $!\n"; } do { diff --git a/src/parallel_tutorial.html b/src/parallel_tutorial.html index 0ce54260..1644636c 100644 --- a/src/parallel_tutorial.html +++ b/src/parallel_tutorial.html @@ -47,7 +47,7 @@
Install the newest version with:
- (wget -O - pi.dk/3 || curl pi.dk/3/) | bash
+ (wget -O - pi.dk/3 || curl pi.dk/3/ || fetch -o - http://pi.dk/3) | bash
This will also install the newest version of the tutorial:
@@ -165,6 +165,14 @@ perl -e 'printf "f1\tf2\nA\tB\nC\tD\n"' > tsv-file.tsv
+
+The file can be generated by:
+ + perl -e 'for(1..128){print "$_\n"}' > num128
+
The command can be a script, a binary or a Bash function if the function is exported using 'export -f':
- # Only works in Bash and only if $SHELL=.../bash
+ # Only works in Bash
my_func() {
echo in my_func $1
}
@@ -465,7 +473,7 @@
parallel -j 2 echo {%} ::: A B C
-Output (the order may be different):
+Output (the order may be different and 1 and 2 may be swapped):
1
2
@@ -527,7 +535,7 @@
parallel -j2 --slotreplace ,, echo ,, ::: A B C
-Output (the order may be different):
+Output (the order may be different and 1 and 2 may be swapped):
1
2
@@ -664,7 +672,7 @@
cat num30000 | parallel --xargs echo | wc -l
-Output:
+Output (if you run this under Bash on GNU/Linux):
2
@@ -686,13 +694,13 @@
cat num30000 | parallel --jobs 4 -m echo | wc -l
-Output:
+Output (if you run this under Bash on GNU/Linux):
5
This is even more visible when running 4 jobs with 10 arguments. The 10 arguments are being spread over 4 jobs:
- parallel --jobs 4 -m echo ::: {1..10}
+ parallel --jobs 4 -m echo ::: 1 2 3 4 5 6 7 8 9 10
Output:
@@ -839,7 +847,7 @@
pre-A-post
-Controling the output
+Controlling the output
The output can prefixed with the argument:
@@ -1020,28 +1028,28 @@
The number of concurrent jobs is given with --jobs/-j:
- /usr/bin/time parallel -N0 -j64 sleep 1 ::: {1..128}
+ /usr/bin/time parallel -N0 -j64 sleep 1 :::: num128
With 64 jobs in parallel the 128 sleeps will take 2-8 seconds to run - depending on how fast your machine is.
By default --jobs is the same as the number of CPU cores. So this:
- /usr/bin/time parallel -N0 sleep 1 ::: {1..128}
+ /usr/bin/time parallel -N0 sleep 1 :::: num128
should take twice the time of running 2 jobs per CPU core:
- /usr/bin/time parallel -N0 --jobs 200% sleep 1 ::: {1..128}
+ /usr/bin/time parallel -N0 --jobs 200% sleep 1 :::: num128
--jobs 0 will run as many jobs in parallel as possible:
- /usr/bin/time parallel -N0 --jobs 0 sleep 1 ::: {1..128}
+ /usr/bin/time parallel -N0 --jobs 0 sleep 1 :::: num128
which should take 1-7 seconds depending on how fast your machine is.
--jobs can read from a file which is re-read when a job finishes:
echo 50% > my_jobs
- /usr/bin/time parallel -N0 --jobs my_jobs sleep 1 ::: {1..128} &
+ /usr/bin/time parallel -N0 --jobs my_jobs sleep 1 :::: num128 &
sleep 1
echo 0 > my_jobs
wait
@@ -1050,7 +1058,7 @@
Instead of basing the percentage on the number of CPU cores GNU Parallel can base it on the number of CPUs:
- parallel --use-cpus-instead-of-cores -N0 sleep 1 ::: {1..128}
+ parallel --use-cpus-instead-of-cores -N0 sleep 1 :::: num128
Interactivity
diff --git a/src/parallel_tutorial.pod b/src/parallel_tutorial.pod
index cc054f81..77633634 100644
--- a/src/parallel_tutorial.pod
+++ b/src/parallel_tutorial.pod
@@ -19,7 +19,7 @@ To run this tutorial you must have the following:
Install the newest version with:
- (wget -O - pi.dk/3 || curl pi.dk/3/) | bash
+ (wget -O - pi.dk/3 || curl pi.dk/3/ || fetch -o - http://pi.dk/3) | bash
This will also install the newest version of the tutorial:
@@ -58,6 +58,12 @@ The file can be generated by:
perl -e 'printf "f1\tf2\nA\tB\nC\tD\n"' > tsv-file.tsv
+=item num128
+
+The file can be generated by:
+
+ perl -e 'for(1..128){print "$_\n"}' > num128
+
=item num30000
The file can be generated by:
@@ -285,7 +291,7 @@ Output (the order may be different):
The command can be a script, a binary or a Bash function if the function is
exported using 'export -f':
- # Only works in Bash and only if $SHELL=.../bash
+ # Only works in Bash
my_func() {
echo in my_func $1
}
@@ -366,7 +372,7 @@ number of jobs to run in parallel):
parallel -j 2 echo {%} ::: A B C
-Output (the order may be different):
+Output (the order may be different and 1 and 2 may be swapped):
1
2
@@ -428,7 +434,7 @@ The replacement string {%} can be changed with --slotreplace:
parallel -j2 --slotreplace ,, echo ,, ::: A B C
-Output (the order may be different):
+Output (the order may be different and 1 and 2 may be swapped):
1
2
@@ -578,7 +584,7 @@ single line:
cat num30000 | parallel --xargs echo | wc -l
-Output:
+Output (if you run this under Bash on GNU/Linux):
2
@@ -607,14 +613,14 @@ total of 5 jobs:
cat num30000 | parallel --jobs 4 -m echo | wc -l
-Output:
+Output (if you run this under Bash on GNU/Linux):
5
This is even more visible when running 4 jobs with 10 arguments. The
10 arguments are being spread over 4 jobs:
- parallel --jobs 4 -m echo ::: {1..10}
+ parallel --jobs 4 -m echo ::: 1 2 3 4 5 6 7 8 9 10
Output:
@@ -763,7 +769,7 @@ Output:
pre-A-post
-=head1 Controling the output
+=head1 Controlling the output
The output can prefixed with the argument:
@@ -948,29 +954,29 @@ The directories are named after the variables and their values.
The number of concurrent jobs is given with --jobs/-j:
- /usr/bin/time parallel -N0 -j64 sleep 1 ::: {1..128}
+ /usr/bin/time parallel -N0 -j64 sleep 1 :::: num128
With 64 jobs in parallel the 128 sleeps will take 2-8 seconds to run -
depending on how fast your machine is.
By default --jobs is the same as the number of CPU cores. So this:
- /usr/bin/time parallel -N0 sleep 1 ::: {1..128}
+ /usr/bin/time parallel -N0 sleep 1 :::: num128
should take twice the time of running 2 jobs per CPU core:
- /usr/bin/time parallel -N0 --jobs 200% sleep 1 ::: {1..128}
+ /usr/bin/time parallel -N0 --jobs 200% sleep 1 :::: num128
--jobs 0 will run as many jobs in parallel as possible:
- /usr/bin/time parallel -N0 --jobs 0 sleep 1 ::: {1..128}
+ /usr/bin/time parallel -N0 --jobs 0 sleep 1 :::: num128
which should take 1-7 seconds depending on how fast your machine is.
--jobs can read from a file which is re-read when a job finishes:
echo 50% > my_jobs
- /usr/bin/time parallel -N0 --jobs my_jobs sleep 1 ::: {1..128} &
+ /usr/bin/time parallel -N0 --jobs my_jobs sleep 1 :::: num128 &
sleep 1
echo 0 > my_jobs
wait
@@ -982,7 +988,7 @@ parallel.
Instead of basing the percentage on the number of CPU cores
GNU Parallel can base it on the number of CPUs:
- parallel --use-cpus-instead-of-cores -N0 sleep 1 ::: {1..128}
+ parallel --use-cpus-instead-of-cores -N0 sleep 1 :::: num128
=head2 Interactivity
diff --git a/testsuite/tests-to-run/parallel-freebsd.sh b/testsuite/tests-to-run/parallel-freebsd.sh
index 3fd294e4..cc12f4b4 100644
--- a/testsuite/tests-to-run/parallel-freebsd.sh
+++ b/testsuite/tests-to-run/parallel-freebsd.sh
@@ -46,7 +46,7 @@ bash -c 'echo bug \#43358: shellshock breaks exporting functions using --env _;
echo Non-shellshock-hardened to non-shellshock-hardened;
funky() { echo Function $1; };
export -f funky;
- parallel --env funky -S localhost funky ::: non-shellshock-hardened'
+ PARALLEL_SHELL=bash parallel --env funky -S localhost funky ::: non-shellshock-hardened'
bash -c 'echo bug \#43358: shellshock breaks exporting functions using --env _;
echo Non-shellshock-hardened to shellshock-hardened;
@@ -54,6 +54,11 @@ bash -c 'echo bug \#43358: shellshock breaks exporting functions using --env _;
export -f funky;
parallel --env funky -S parallel@192.168.1.72 funky ::: shellshock-hardened'
+echo '### Test --load (must give 1=true)'
+ parallel -j0 -N0 --timeout 5 --nice 10 'bzip2 < /dev/zero >/dev/null' ::: 1 2 3 &
+ parallel --argsep ,, --joblog - -N0 parallel --load 100% echo ::: 1 ,, 1 |
+ parallel --colsep '\t' --header : echo '{=4 $_=$_>5=}'
+
EOF
VBoxManage controlvm FreeBSD71 savestate
diff --git a/testsuite/wanted-results/parallel-freebsd b/testsuite/wanted-results/parallel-freebsd
index 3590b7d1..6e8a5463 100644
--- a/testsuite/wanted-results/parallel-freebsd
+++ b/testsuite/wanted-results/parallel-freebsd
@@ -54,11 +54,17 @@ works
with
/usr/bin/perl -w ./shebang-wrap-opt options
options
-bash -c 'echo bug \#43358: shellshock breaks exporting functions using --env _; echo Non-shellshock-hardened to non-shellshock-hardened; funky() { echo Function $1; }; export -f funky; parallel --env funky -S localhost funky ::: non-shellshock-hardened'
+bash -c 'echo bug \#43358: shellshock breaks exporting functions using --env _; echo Non-shellshock-hardened to non-shellshock-hardened; funky() { echo Function $1; }; export -f funky; PARALLEL_SHELL=bash parallel --env funky -S localhost funky ::: non-shellshock-hardened'
bug #43358: shellshock breaks exporting functions using --env _
Non-shellshock-hardened to non-shellshock-hardened
Function non-shellshock-hardened
+parallel: Warning: Shell functions may not be supported in bash
bash -c 'echo bug \#43358: shellshock breaks exporting functions using --env _; echo Non-shellshock-hardened to shellshock-hardened; funky() { echo Function $1; }; export -f funky; parallel --env funky -S parallel@192.168.1.72 funky ::: shellshock-hardened'
bug #43358: shellshock breaks exporting functions using --env _
Non-shellshock-hardened to shellshock-hardened
Function shellshock-hardened
+echo '### Test --load (must give 1=true)'
+### Test --load (must give 1=true)
+ parallel -j0 -N0 --timeout 5 --nice 10 'bzip2 < /dev/zero >/dev/null' ::: 1 2 3 & parallel --argsep ,, --joblog - -N0 parallel --load 100% echo ::: 1 ,, 1 | parallel --colsep '\t' --header : echo '{=4 $_=$_>5=}'
+
+1