diff --git a/doc/release_new_version b/doc/release_new_version index a4701c8c..322f2aea 100644 --- a/doc/release_new_version +++ b/doc/release_new_version @@ -129,10 +129,15 @@ http://nd.gd/2j Watch the intro video http://nd.gd/0s https://savannah.gnu.org/news/?group=parallel + # Only 350 requests per hour: 1 tweet = 3 requests # 119 tweets/hour = sleep 30 per tweet (40 to be safe) +# The above is not enough. +# Went over the limit at number 132 (sleep 40) +# https://support.twitter.com/entries/15364-about-twitter-limits-update-api-dm-and-following +# says 250 direct msg per day = 86400/250 = sleep 345 cat twitters | grep -iv removed | - parallel -j1 sleep 40\; echo @{} You have earlier shown interest in GNU Parallel. \ + parallel -j1 sleep 354\; echo @{} You have earlier shown interest in GNU Parallel. \ A new version is out: http://nd.gd/2j '|' ttytter == Send announce == @@ -158,74 +163,18 @@ cc:Peter Simons , Sandro Cazzaniga , Christian Faulhammer , Ryoichiro Suzuki , Jesse Alama -Subject: GNU Parallel 20110522 ('Pakistan') released +Subject: GNU Parallel 20110622 ('XXX') released -GNU Parallel 20115022 ('Pakistan') has been released. It is +GNU Parallel 20116022 ('XXX') has been released. It is available for download at: http://ftp.gnu.org/gnu/parallel/ -This is a major release. So far GNU Parallel has been focused on -replacing a single for-loop. The Pakistan release introduces ways to -replace nested loops. - -If you are using the {1} {2} syntax for multiple input sources, then you -need to read about --xapply as the function has changed. - New in this release: -* Multiple ::: can be put on the command line and will be treated - similar to contents from multiple -a's. +* Parallel processing without Hadoop! + http://www.solutionhacker.com/parallel-processing-without-hadoop/ -* ::: and :::: can now be mixed on the command line. Use {1} .. {n} to - refer to inputs from the different input sources. - -* --xapply is now needed to get xapply's behaviour of reading one line - from each of the input sources. - -* Multiple input sources will cause all combinations of arguments from - the sources to be generated. E.g. 'parallel echo {1}+{2} ::: 1 2 ::: - a b c' will print 1+a 1+b 1+c 2+a 2+b 2+c. This can often replace - nested loops. - -* Implemented {//} for the input line with the basename removed (dirname). - -* New video showing the new ::: and :::: input sources. - http://tinyogg.com/watch/iOdhU/ http://www.youtube.com/watch?v=fOX1EyHkQwc - -* GNU Parallel now has a logo. - http://www.gnu.org/software/parallel/logo.png - -* Article about GNU SQL in USENIX Magazine ;login: (print) - http://www.usenix.org/publications/login/2011-04/ - -* Using GNU Parallel with EC2. Thanks to Kevin Wu. - http://blog.kevinformatics.com/post/4970574713/interested-in-your-experience-using-gnu-parallel-in - -* Review with idea for {..} and {...} in Japanese. Thanks to ichii386. - http://d.hatena.ne.jp/ichii386/20110426 - -* Upgrade GNU Parallel using Macports. Thanks to Phil Hollenback. - http://www.hollenback.net/index.php/MacportsParallel - -* Robert from Echo One discusses using processes instead of threads: - http://rrees.wordpress.com/2011/04/25/many-cores-many-threads/ - -* How to run GNU Parallel on a SLURM cluster. Thanks to Kevin Stock. - http://www.kevinstock.org/2011/04/using-the-ri-cluster/ - -* Short article about using GNU Parallel with lame: - http://loopkid.net/articles/2011/04/30/accelerate-lame-mp3-conversion - -* Using GNU Parallel to run tail -f in Japanese. Thanks to Clouder. - http://blog.clouder.jp/archives/001140.html - -* BBC Research & Development uses GNU Parallel: - http://www.bbc.co.uk/blogs/researchanddevelopment/2010/11/prototyping-weeknotes-41-26112.shtml - -* Short article about using GNU Parallel on RHEL. Thanks to Rishi Deshpande. - http://nuclear-imaging.info/site_content/2011/05/11/gnu-parallel/ - -* Using GNU Parallel for FLAC->MP3 conversion. Thanks to Derek Marcotte. - http://derek.chezmarcotte.ca/?p=286 +* Article in Linux Magazine (Spanish). Thanks to Ben Martin. + http://www.linux-magazine.es/issue/67/ * Bug fixes and man page updates. diff --git a/packager/obs/Makefile b/packager/obs/Makefile index 2197e807..2140d461 100644 --- a/packager/obs/Makefile +++ b/packager/obs/Makefile @@ -1,5 +1,6 @@ all: cd ../debian/ && make + osc rm home\:tange/parallel/parallel_*.dsc cp ../debian/parallel_*.dsc ../debian/parallel_*.tar.gz home\:tange/parallel/ cp `ls ../../parallel-*.tar.bz2|tail -n1` home\:tange/parallel/ cd home\:tange/parallel/ && osc up diff --git a/src/niceload b/src/niceload index 9336bb99..958b037c 100755 --- a/src/niceload +++ b/src/niceload @@ -236,7 +236,7 @@ B(1), B(1) use strict; use Getopt::Long; $Global::progname="niceload"; -$Global::version = 20110522; +$Global::version = 20110526; Getopt::Long::Configure("bundling","require_order"); get_options_from_array(\@ARGV) || die_usage(); if($::opt_version) { diff --git a/src/parallel b/src/parallel index d3919dcd..0edc8044 100755 --- a/src/parallel +++ b/src/parallel @@ -406,7 +406,7 @@ sub get_options_from_array { sub parse_options { # Returns: N/A # Defaults: - $Global::version = 20110522; + $Global::version = 20110526; $Global::progname = 'parallel'; $Global::infinity = 2**31; $Global::debug = 0; diff --git a/src/parallel.pod b/src/parallel.pod index f7e4f71a..1eb1c2fa 100644 --- a/src/parallel.pod +++ b/src/parallel.pod @@ -630,9 +630,9 @@ Instead of printing the output to stdout (standard output) the output of each job is saved in a file and the filename is then printed. -=item B<--pipe> +=item B<--pipe> (alpha testing) -=item B<--spreadstdin> +=item B<--spreadstdin> (alpha testing) Spread input to jobs on stdin. Read a block of data from stdin (standard input) and give one block of data as input to one job. @@ -1385,15 +1385,10 @@ Let us assume a website stores images like: http://www.example.com/path/to/YYYYMMDD_##.jpg -where YYYYMMDD is the date and ## is the number 01-10. This will -generate the past 30 days as YYYYMMDD: +where YYYYMMDD is the date and ## is the number 01-10. This will +download images for the past 30 days: -B - -Based on this we can let GNU B generate 10 Bs per day: - -I B<| parallel -I {o} seq -w 10 "|" parallel wget -http://www.example.com/path/to/{o}_{}.jpg> +B =head1 EXAMPLE: Process files from a tar file while unpacking @@ -1691,20 +1686,24 @@ B The perl command spits out 2 lines for each input. GNU B takes 2 inputs (using B<-N2>) and replaces {1} and {2} with the inputs. +Count in binary: + +B + Print the number on the opposing sides of a six sided die: -B +B -B +B Convert files from all subdirs to PNG-files with consecutive numbers (useful for making input PNG's for B): -B +B Alternative version: -B +B =head1 EXAMPLE: Use a table as input @@ -2657,7 +2656,7 @@ B<2> parallel diff {} ../version5/{} < manifest | more B<3> xapply -p/dev/null -f 'diff %1 %2' manifest1 checklist1 -B<3> parallel diff {1} {2} :::: manifest1 checklist1 +B<3> parallel --xapply diff {1} {2} :::: manifest1 checklist1 B<4> xapply 'indent' *.c diff --git a/src/sql b/src/sql index 60ec4e97..c32bab3f 100755 --- a/src/sql +++ b/src/sql @@ -536,7 +536,7 @@ $Global::Initfile && unlink $Global::Initfile; exit ($err); sub parse_options { - $Global::version = 20110522; + $Global::version = 20110526; $Global::progname = 'sql'; # This must be done first as this may exec myself