mirror of
https://git.savannah.gnu.org/git/parallel.git
synced 2024-11-22 14:07:55 +00:00
Fixed bug #38441: CPU usage goes to 100% if load is higher than --load at first job.
This commit is contained in:
parent
7bfb4a5e57
commit
3b752a668c
|
@ -1297,15 +1297,18 @@ sub start_more_jobs {
|
|||
}
|
||||
}
|
||||
|
||||
my $sleep = 0.0001; # 0.01 ms - better performance on highend
|
||||
for my $sshlogin (values %Global::host) {
|
||||
debug("Running jobs before on ".$sshlogin->string().": ".$sshlogin->jobs_running()."\n");
|
||||
while ($sshlogin->jobs_running() < $sshlogin->max_jobs_running()) {
|
||||
if($opt::load and $sshlogin->loadavg_too_high()) {
|
||||
# The load is too high or unknown
|
||||
$sleep = ::reap_usleep($sleep);
|
||||
next;
|
||||
}
|
||||
if($opt::noswap and $sshlogin->swapping()) {
|
||||
# The server is swapping
|
||||
$sleep = ::reap_usleep($sleep);
|
||||
next;
|
||||
}
|
||||
if($sshlogin->too_fast_remote_login()) {
|
||||
|
@ -2482,7 +2485,7 @@ sub loadavg {
|
|||
# load average: 0.76, 1.53, 1.45
|
||||
if($uptime_out =~ /load averages?: (\d+.\d+)/) {
|
||||
$self->{'loadavg'} = $1;
|
||||
::debug("New loadavg: ".$self->{'loadavg'});
|
||||
::debug("New loadavg: ".$self->{'loadavg'}."\n");
|
||||
} else {
|
||||
::die_bug("loadavg_invalid_content: $uptime_out");
|
||||
}
|
||||
|
|
|
@ -1645,6 +1645,14 @@ treated as the relative path to your home dir. This means that if your
|
|||
home dir is different on remote computers (e.g. if your login is
|
||||
different) the relative path will still be relative to your home dir.
|
||||
|
||||
To see the difference try:
|
||||
|
||||
@strong{parallel -S server pwd ::: ""}
|
||||
|
||||
@strong{parallel --wd . -S server pwd ::: ""}
|
||||
|
||||
@strong{parallel --wd ... -S server pwd ::: ""}
|
||||
|
||||
@item @strong{--wait}
|
||||
@anchor{@strong{--wait}}
|
||||
|
||||
|
|
|
@ -1,13 +1,36 @@
|
|||
#!/bin/bash
|
||||
|
||||
highload ()
|
||||
{
|
||||
# Force load > #cpus
|
||||
CPUS=$(parallel --number-of-cores)
|
||||
seq 0 0.1 $CPUS | nice nice parallel -j0 timeout 50 burnP6 2>/dev/null &
|
||||
perl -e 'do{$a=`uptime`} while($a=~/average: *(\S+)/ and $1 < '$CPUS')'
|
||||
# Load is now > $CPUS
|
||||
# Kill off burnP6 and the parent parallel
|
||||
kill %1; sleep 0.1; kill %1; killall burnP6; sleep 0.3; kill -9 %1 2>/dev/null
|
||||
}
|
||||
|
||||
highload 2>/dev/null
|
||||
|
||||
cat <<'EOF' | parallel -j0 -k -L1
|
||||
echo "bug #38441: CPU usage goes to 100% if load is higher than --load at first job"
|
||||
/usr/bin/time -f %e parallel --load 100% true ::: a 2>&1 |
|
||||
perl -ne '$_ > 1 and print "More than 1 secs wall clock: OK\n"'
|
||||
|
||||
/usr/bin/time -f %U parallel --load 100% true ::: a 2>&1 |
|
||||
perl -ne '$_ < 1 and print "Less than 1 secs user time: OK\n"'
|
||||
|
||||
echo '### Test slow arguments generation - https://savannah.gnu.org/bugs/?32834'
|
||||
seq 1 3 | parallel -j1 "sleep 2; echo {}" | parallel -kj2 echo
|
||||
seq 1 3 | parallel -j1 "sleep 2; echo {}" | parallel -kj2 echo
|
||||
|
||||
echo '### Test too slow spawning'
|
||||
killall -9 burnP6 2>/dev/null
|
||||
# Let the commands below run during high load
|
||||
seq `parallel --number-of-cores` | parallel -j200% -N0 timeout -k 25 26 burnP6 &
|
||||
sleep 1
|
||||
seq 1 1000 |
|
||||
stdout nice nice parallel -s 100 -uj0 true |
|
||||
perl -pe '/parallel: Warning: Starting \d+ processes took/ and do {close STDIN; `killall -9 burnP6`; print "OK\n"; exit }'
|
||||
sleep 1;
|
||||
seq 1 1000 | stdout nice nice parallel -s 100 -uj0 true |
|
||||
perl -pe '/parallel: Warning: Starting \d+ processes took/ and do {close STDIN; `killall -9 burnP6`; print "OK\n"; exit }'
|
||||
EOF
|
||||
|
||||
# Make sure we got all the burnP6 killed
|
||||
killall -9 burnP6 2>/dev/null
|
||||
|
|
|
@ -1,3 +1,6 @@
|
|||
bug #38441: CPU usage goes to 100% if load is higher than --load at first job
|
||||
More than 1 secs wall clock: OK
|
||||
Less than 1 secs user time: OK
|
||||
### Test slow arguments generation - https://savannah.gnu.org/bugs/?32834
|
||||
1
|
||||
2
|
||||
|
|
Loading…
Reference in a new issue