mirror of
https://git.savannah.gnu.org/git/parallel.git
synced 2024-12-23 05:07:54 +00:00
Patch for --noswap on Mac. Untested.
This commit is contained in:
parent
020406059e
commit
6f730374d8
|
@ -139,8 +139,7 @@ http://freshmeat.net/projects/parallel/releases/new
|
|||
|
||||
== Update Diaspora Twitter ==
|
||||
|
||||
New version of #GNU Parallel http://pi.dk/0 released. See what is new
|
||||
in this release http://pi.dk/2 Watch the intro videos http://pi.dk/1
|
||||
New release of #GNU Parallel pi.dk/0 New in this release pi.dk/2 See the intro videos pi.dk/1
|
||||
|
||||
[x] Twitter
|
||||
Aspect: Public
|
||||
|
|
58
src/parallel
58
src/parallel
|
@ -543,7 +543,7 @@ sub get_options_from_array {
|
|||
sub parse_options {
|
||||
# Returns: N/A
|
||||
# Defaults:
|
||||
$Global::version = 20120222;
|
||||
$Global::version = 20120224;
|
||||
$Global::progname = 'parallel';
|
||||
$Global::infinity = 2**31;
|
||||
$Global::debug = 0;
|
||||
|
@ -2182,7 +2182,9 @@ sub swap_activity {
|
|||
-e $ENV{'HOME'}."/.parallel" or mkdir $ENV{'HOME'}."/.parallel";
|
||||
-e $ENV{'HOME'}."/.parallel/tmp" or mkdir $ENV{'HOME'}."/.parallel/tmp";
|
||||
my $swap_activity;
|
||||
$swap_activity = "vmstat 1 2 | tail -n1 | awk '{print \$7*\$8}'";
|
||||
$swap_activity = ("vmstat 1 2 2>/dev/null | tail -n1 | awk '{print \$7*\$8}' || ".
|
||||
# If the (remote) machine is Mac and the above fails, try this:
|
||||
"vm_stat 1 | head -n 3 | tail -n1 | awk '{print \$9*\$10}'");
|
||||
if($self->{'string'} ne ":") {
|
||||
$swap_activity = $self->sshcommand() . " " . $self->serverlogin() . " " .
|
||||
::shell_quote_scalar($swap_activity);
|
||||
|
@ -2618,13 +2620,27 @@ sub no_of_cpus {
|
|||
# Returns:
|
||||
# Number of physical CPUs
|
||||
local $/="\n"; # If delimiter is set, then $/ will be wrong
|
||||
my $no_of_cpus = (no_of_cpus_freebsd()
|
||||
|| no_of_cpus_darwin()
|
||||
|| no_of_cpus_solaris()
|
||||
|| no_of_cpus_aix()
|
||||
|| no_of_cpus_gnu_linux()
|
||||
);
|
||||
my $no_of_cpus;
|
||||
if ($^O eq 'linux') {
|
||||
$no_of_cpus = no_of_cpus_gnu_linux();
|
||||
} elsif ($^O eq 'freebsd') {
|
||||
$no_of_cpus = no_of_cpus_freebsd();
|
||||
} elsif ($^O eq 'solaris') {
|
||||
$no_of_cpus = no_of_cpus_solaris();
|
||||
} elsif ($^O eq 'aix') {
|
||||
$no_of_cpus = no_of_cpus_aix();
|
||||
} elsif ($^O eq 'darwin') {
|
||||
$no_of_cpus = no_of_cpus_darwin();
|
||||
} else {
|
||||
$no_of_cpus = (no_of_cpus_freebsd()
|
||||
|| no_of_cpus_darwin()
|
||||
|| no_of_cpus_solaris()
|
||||
|| no_of_cpus_aix()
|
||||
|| no_of_cpus_gnu_linux()
|
||||
);
|
||||
}
|
||||
if($no_of_cpus) {
|
||||
chomp $no_of_cpus;
|
||||
return $no_of_cpus;
|
||||
} else {
|
||||
warn("parallel: Cannot figure out number of cpus. Using 1");
|
||||
|
@ -2636,13 +2652,27 @@ sub no_of_cores {
|
|||
# Returns:
|
||||
# Number of CPU cores
|
||||
local $/="\n"; # If delimiter is set, then $/ will be wrong
|
||||
my $no_of_cores = (no_of_cores_freebsd()
|
||||
|| no_of_cores_darwin()
|
||||
|| no_of_cores_solaris()
|
||||
|| no_of_cores_aix()
|
||||
|| no_of_cores_gnu_linux()
|
||||
);
|
||||
my $no_of_cores;
|
||||
if ($^O eq 'linux') {
|
||||
$no_of_cores = no_of_cores_gnu_linux();
|
||||
} elsif ($^O eq 'freebsd') {
|
||||
$no_of_cores = no_of_cores_freebsd();
|
||||
} elsif ($^O eq 'solaris') {
|
||||
$no_of_cores = no_of_cores_solaris();
|
||||
} elsif ($^O eq 'aix') {
|
||||
$no_of_cores = no_of_cores_aix();
|
||||
} elsif ($^O eq 'darwin') {
|
||||
$no_of_cores = no_of_cores_darwin();
|
||||
} else {
|
||||
$no_of_cores = (no_of_cores_freebsd()
|
||||
|| no_of_cores_darwin()
|
||||
|| no_of_cores_solaris()
|
||||
|| no_of_cores_aix()
|
||||
|| no_of_cores_gnu_linux()
|
||||
);
|
||||
}
|
||||
if($no_of_cores) {
|
||||
chomp $no_of_cores;
|
||||
return $no_of_cores;
|
||||
} else {
|
||||
warn("parallel: Cannot figure out number of CPU cores. Using 1");
|
||||
|
|
|
@ -3133,23 +3133,59 @@ echo 1,2,3 | parallel -vkd, "echo 'a'{}'b'"
|
|||
echo 1,2,3 | parallel -vkd, "echo 'a'"{}"'b'"
|
||||
|
||||
|
||||
=head2 Startup speed
|
||||
=head2 Speed
|
||||
|
||||
GNU B<parallel> is slow at starting up. Half of the startup time on
|
||||
the local computer is spent finding the maximal length of a command
|
||||
line. Setting B<-s> will remove this part of the startup time.
|
||||
=head3 Startup
|
||||
|
||||
GNU B<parallel> is slow at starting up - around 250 ms. Half of the
|
||||
startup time on the local computer is spent finding the maximal length
|
||||
of a command line. Setting B<-s> will remove this part of the startup
|
||||
time.
|
||||
|
||||
=head3 Job startup
|
||||
|
||||
Starting a job takes around 3 ms. This can be a big overhead if the
|
||||
job takes very few ms to run. Often you can group small jobs together
|
||||
using B<-X> which will make the overhead less significant.
|
||||
|
||||
Using B<--ungroup> the 3 ms can be lowered to around 2 ms.
|
||||
|
||||
=head3 SSH
|
||||
|
||||
When using multiple computers GNU B<parallel> opens B<ssh> connections
|
||||
to them to figure out how many connections can be used reliably
|
||||
simultaneously (Namely SSHD's MaxStartup). This test is done for each
|
||||
host in serial, so if your --sshloginfile contains many hosts it may
|
||||
host in serial, so if your B<--sshloginfile> contains many hosts it may
|
||||
be slow.
|
||||
|
||||
=head3 Disk access
|
||||
|
||||
A single disk can normally read data faster if it reads one file at a
|
||||
time instead of reading a lot of files in parallel, as this will avoid
|
||||
disk seeks. However, newer disk systems with multiple drives can read
|
||||
faster if reading from multiple files in parallel.
|
||||
|
||||
If the jobs are of the form read-all-compute-all-write-all, so
|
||||
everything is read before anything is written, it may be faster to
|
||||
force only one disk access at the time:
|
||||
|
||||
sem --id diskio cat file | compute | sem --id diskio cat > file
|
||||
|
||||
If the jobs are of the form read-compute-write, so writing starts
|
||||
before all reading is done, it may be faster to force only one reader
|
||||
and writer at the time:
|
||||
|
||||
sem --id read cat file | compute | sem --id write cat > file
|
||||
|
||||
If the jobs are of the form read-compute-read-compute, it may be
|
||||
faster to run more jobs in parallel than the system has CPUs, as some
|
||||
of the jobs will be stuck waiting for disk access.
|
||||
|
||||
=head2 --nice limits command length
|
||||
|
||||
The current implementation of B<--nice> is too pessimistic in the max
|
||||
allowed command length. It only uses a little more than half of what
|
||||
it could. This affects -X and -m. If this becomes a real problem for
|
||||
it could. This affects B<-X> and B<-m>. If this becomes a real problem for
|
||||
you file a bug-report.
|
||||
|
||||
=head2 Aliases and functions do not work
|
||||
|
|
|
@ -3337,25 +3337,69 @@ echo 1,2,3 | parallel -vkd, "echo 'a'@{@}'b'"
|
|||
|
||||
echo 1,2,3 | parallel -vkd, "echo 'a'"@{@}"'b'"
|
||||
|
||||
@section Startup speed
|
||||
@anchor{Startup speed}
|
||||
@section Speed
|
||||
@anchor{Speed}
|
||||
|
||||
GNU @strong{parallel} is slow at starting up. Half of the startup time on
|
||||
the local computer is spent finding the maximal length of a command
|
||||
line. Setting @strong{-s} will remove this part of the startup time.
|
||||
@subsection Startup
|
||||
@anchor{Startup}
|
||||
|
||||
GNU @strong{parallel} is slow at starting up - around 250 ms. Half of the
|
||||
startup time on the local computer is spent finding the maximal length
|
||||
of a command line. Setting @strong{-s} will remove this part of the startup
|
||||
time.
|
||||
|
||||
@subsection Job startup
|
||||
@anchor{Job startup}
|
||||
|
||||
Starting a job takes around 3 ms. This can be a big overhead if the
|
||||
job takes very few ms to run. Often you can group small jobs together
|
||||
using @strong{-X} which will make the overhead less significant.
|
||||
|
||||
Using @strong{--ungroup} the 3 ms can be lowered to around 2 ms.
|
||||
|
||||
@subsection SSH
|
||||
@anchor{SSH}
|
||||
|
||||
When using multiple computers GNU @strong{parallel} opens @strong{ssh} connections
|
||||
to them to figure out how many connections can be used reliably
|
||||
simultaneously (Namely SSHD's MaxStartup). This test is done for each
|
||||
host in serial, so if your --sshloginfile contains many hosts it may
|
||||
host in serial, so if your @strong{--sshloginfile} contains many hosts it may
|
||||
be slow.
|
||||
|
||||
@subsection Disk access
|
||||
@anchor{Disk access}
|
||||
|
||||
A single disk can normally read data faster if it reads one file at a
|
||||
time instead of reading a lot of files in parallel, as this will avoid
|
||||
disk seeks. However, newer disk systems with multiple drives can read
|
||||
faster if reading from multiple files in parallel.
|
||||
|
||||
If the jobs are of the form read-all-compute-all-write-all, so
|
||||
everything is read before anything is written, it may be faster to
|
||||
force only one disk access at the time:
|
||||
|
||||
@verbatim
|
||||
sem --id diskio cat file | compute | sem --id diskio cat > file
|
||||
@end verbatim
|
||||
|
||||
If the jobs are of the form read-compute-write, so writing starts
|
||||
before all reading is done, it may be faster to force only one reader
|
||||
and writer at the time:
|
||||
|
||||
@verbatim
|
||||
sem --id read cat file | compute | sem --id write cat > file
|
||||
@end verbatim
|
||||
|
||||
If the jobs are of the form read-compute-read-compute, it may be
|
||||
faster to run more jobs in parallel than the system has CPUs, as some
|
||||
of the jobs will be stuck waiting for disk access.
|
||||
|
||||
@section --nice limits command length
|
||||
@anchor{--nice limits command length}
|
||||
|
||||
The current implementation of @strong{--nice} is too pessimistic in the max
|
||||
allowed command length. It only uses a little more than half of what
|
||||
it could. This affects -X and -m. If this becomes a real problem for
|
||||
it could. This affects @strong{-X} and @strong{-m}. If this becomes a real problem for
|
||||
you file a bug-report.
|
||||
|
||||
@section Aliases and functions do not work
|
||||
|
|
Loading…
Reference in a new issue