(use eggdoc sxml-transforms) (define doc `((eggdoc:begin (name "mpi") (description "Message Passing Interface (MPI)") (author (url "http://chicken.wiki.br/users/ivan-raikov" "Ivan Raikov")) (history (version "1.6" "Ported to Chicken 4") (version "1.5" "Added a binding for MPI:spawn") (version "1.3" "Bug fix in MPI:scatter-int") (version "1.2" "Bug fix in the meta file") (version "1.1" "Bug fixes and improvements to the regression tests") (version "1.0" "Initial release")) (requires) (usage "(require-extension mpi)") (download "mpi.egg") (documentation (p (url "http://www-unix.mcs.anl.gov/mpi/" "MPI") " is a popular library for distributed-memory parallel programming. " "It offers both point-to-point message passing and group communication " "operations (broadcast, scatter/gather, etc). ") (p (url "http://www.open-mpi.org/" "Open MPI") " is an implementation of " "the MPI standard that combines technologies and resources from several " "other projects (FT-MPI, LA-MPI, LAM/MPI, and PACX-MPI) in order to build " "the best MPI library available. ") (p "The Chicken MPI egg provides a Scheme interface to " "a large subset of the MPI 1.2 procedures for communication. " "It is based on the " (url "http://pauillac.inria.fr/~xleroy/software.html#ocamlmpi" "Ocaml MPI") " library by Xavier Leroy. " "Below is a list of procedures that are included in this egg, " "along with brief descriptions. This egg has been tested with " "Open MPI version 1.2.4. ") (subsection "Initialization and time procedures" (procedure "MPI:init:: [ARG1 ...] -> UNDEFINED" (p "Initializes the MPI execution environment. " "This routine must be called before any other MPI routine. " "MPI can be initialized at most once. ")) (procedure "MPI:spawn:: COMMAND * ARGUMENTS * MAXPROCS * LOCATIONS * ROOT * COMM -> (COMM * S32VECTOR)" (p "Spawns " (tt "MAXPROCS") " identical copies of the MPI program specified by " (tt "COMMAND") " and returns an intercommunicator and a vector of status values. " (tt "ARGUMENTS") " is a list of command-line arguments. " (tt "LOCATIONS") " is a list of string pairs " (tt "(HOST * WDIR)") " that tell " "MPI the host and working directory where to start processes. " )) (procedure "MPI:finalize" (p "Terminates the MPI execution environment. ")) (procedure "MPI:wtime:: VOID -> SECONDS" "Returns the number of seconds representing elapsed wall-clock time on the calling process. ") ) (subsection "Handling of communicators" (procedure "MPI:comm?:: OBJ -> BOOL" (p "Returns true if " (tt "OBJ") " is an MPI communicator object, " "false otherwise. ")) (procedure "MPI:get-comm-world:: VOID -> COMM" (p "Returns the default communicator created by " (tt "MPI_Init") "; " "the group associated with this communicator contains all processes. ")) (procedure "MPI:comm-size:: COMM -> INTEGER" "Returns the size of the group associated with communicator " (tt "COMM") ". ") (procedure "MPI:comm-rank:: COMM -> INTEGER" "Returns the rank of the calling process in communicator " (tt "COMM") ". ") (procedure "MPI:comm-equal?:: COMM1 * COMM2 -> BOOL" "Returns true if the two given communicators are for identical groups" ", false otherwise. ") (procedure "MPI:comm-split:: COMM * COLOR * KEY -> BOOL" "Creates new communicators based on colors and keys. ") (procedure "MPI:comm-create:: COMM * GROUP -> COMM" (p "Creates a new communicator with communication group that spans all processes in " (tt "GROUP") " and a new context. See the procedures in subsection " (i "Handling of communication groups") " for information on how to create process group objects. ")) (procedure "MPI:make-cart:: COMM * DIMS * PERIODS * REORDER -> COMM" (p "Creates a new communicator with Cartesian topology information. " "Argument " (tt "DIMS") " is an SRFI-4 s32vector that contains the number of " "dimensions of the Cartesian grid. " "Argument " (tt "PERIODS") " is an SRFI-4 s32vector of the same length as " (tt "DIMS") " that indicates if the grid is periodic (1) or not (0) in each dimension. " "Argument " (tt "REORDER") " is a boolean value that indicates whether " "process ranking may be reordered. ")) (procedure "MPI:make-dims:: NNODES * NDIMS -> DIMS" (p "Creates a division of processes in a Cartesian grid. " "Argument " (tt "NNODES") " is the number of nodes in the grid. " "Argument " (tt "NDIMS") " is the number of Cartesian dimensions. " "The return values is an SRFI-4 s32vector. ")) (procedure "MPI:cart-coords:: COMM * RANK -> COORDS" (p "Determines process coordinates in Cartesian topology, given a rank in the group. " "The return value is an SRFI-4 s32vector of length " (tt "NDIMS") " (the number of dimensions in the Cartesian topology). ")) ) (subsection "Handling of communication groups" (procedure "MPI:group?:: OBJ -> BOOL" (p "Returns true if " (tt "OBJ") " is an MPI group object, " "false otherwise. ")) (procedure "MPI:comm-group:: COMM -> GROUP" "Returns the group associated with the given communicator. ") (procedure "MPI:group-size:: GROUP -> INTEGER" "Returns the size of the group " (tt "GROUP") ". ") (procedure "MPI:group-rank:: GROUP -> INTEGER" "Returns the rank of the calling process in the given group. ") (procedure "MPI:group-translate-ranks:: GROUP1 * RANKS * GROUP2 -> RANKS2" (p "Translates the ranks of processes in one group to those in another group. " "The return value is an SRFI-4 s32vector. ")) (procedure "MPI:group-union:: GROUP1 * GROUP2 -> GROUP") (procedure "MPI:group-difference:: GROUP1 * GROUP2 -> GROUP") (procedure "MPI:group-intersection:: GROUP1 * GROUP2 -> GROUP") (procedure "MPI:group-incl:: GROUP * RANKS -> GROUP" (p "Produces a group by reordering an existing group and taking only members " "with the given ranks. Argument " (tt "RANKS") " is an SRFI-4 s32vector. ")) (procedure "MPI:group-excl:: GROUP * RANKS -> GROUP" (p "Produces a group by reordering an existing group and taking only members " "that do not have the given ranks. Argument " (tt "RANKS") " is an SRFI-4 s32vector. ")) ) (subsection "Point-to-point communication" (p "Most communication procedures in this library come in several flavors, " "for fixnums, integers, floating point numbers, bytevectors, and for " "each of the SRFI-4 homogeneous vector types. ") (procedure "MPI:send-TYPE:: DATA * DEST * TAG * COMM -> UNDEFINED" (p "Performs a standard-mode blocking send. " "Argument " (tt "DEST") " is the rank of the destination process. " "Argument " (tt "TAG") " is integer message tag. " (tt "TYPE") " is one of the following: " (tt "fixnum, int, flonum, bytevector, s8vector, u8vector, " "s16vector, u16vector, s32vector, u32vector, f32vector, f64vector"))) (procedure "MPI:receive-TYPE:: SOURCE * TAG * COMM -> DATA") (procedure "MPI:receive-TYPE:: LENGTH * SOURCE * TAG * COMM -> DATA" (p "Performs a standard-mode blocking receive. " "Argument " (tt "DEST") " is the rank of the destination process. " "Argument " (tt "TAG") " is integer message tag. " "Argument " (tt "LENGTH") " is present only in the vector procedures. " (tt "TYPE") " is one of the following: " (tt "fixnum, int, flonum, bytevector, s8vector, u8vector, " "s16vector, u16vector, s32vector, u32vector, f32vector, f64vector"))) (procedure "MPI:probe:: SOURCE * TAG * COMM -> (COUNT * SOURCE * TAG)" (p "Check for an incoming message. This is a blocking call " "that returns only after a matching message is found. " "Argument " (tt "SOURCE") " can be " (tt "MPI:any-source") ". " "Argument " (tt "TAG") " can be " (tt "MPI:any-tag") ". ")) ) (subsection "Group communication" (procedure "MPI:barrier:: COMM -> UNDEFINED" "Barrier synchronization. ") (procedure "MPI:broadcast-TYPE:: DATA * ROOT * COMM -> UNDEFINED" (p "Broadcasts a message from the process with rank root to " "all other processes of the group. " (tt "TYPE") " is one of the following: " (tt "fixnum, int, flonum, bytevector, s8vector, u8vector, " "s16vector, u16vector, s32vector, u32vector, f32vector, f64vector"))) (procedure "MPI:scatter-TYPE:: DATA * SENDCOUNT * ROOT * COMM -> DATA" (p "Sends data from the root process to all processes in a group, and " "returns the data received by the calling process. " "all other processes of the group. " "Argument " (tt "SENDCOUNT") " is the number of elements sent to each process. " "Argument " (tt "DATA") " is only required at the root process. " "All other processes can invoke this procedure with (void) as " (tt "DATA") ". " (tt "TYPE") " is one of the following: " (tt "int, flonum, bytevector, s8vector, u8vector, " "s16vector, u16vector, s32vector, u32vector, f32vector, f64vector"))) (procedure "MPI:scatterv-TYPE:: DATA * SENDCOUNT * ROOT * COMM -> DATA" (p "Sends variable-length data from the root process to all processes in a group, and " "returns the data received by the calling process. " "all other processes of the group. " "Argument " (tt "SENDCOUNT") " is the number of elements sent to each process. " "Argument " (tt "DATA") " is only required at the root process, and is a list " "of values of type " (tt "TYPE") ", where each element of the list is sent to " "the process of corresponding rank. " "All other processes can invoke this procedure with (void) as " (tt "DATA") ". " (tt "TYPE") " is one of the following: " (tt "int, flonum, bytevector, s8vector, u8vector, " "s16vector, u16vector, s32vector, u32vector, f32vector, f64vector"))) (procedure "MPI:gather-TYPE:: DATA * SENDCOUNT * ROOT * COMM -> DATA" (p "Gathers data from a group of processes, where each process send data of the same length. " "Argument " (tt "SENDCOUNT") " is the number of data elements being sent by each process. " (tt "TYPE") " is one of the following: " (tt "int, flonum, bytevector, s8vector, u8vector, " "s16vector, u16vector, s32vector, u32vector, f32vector, f64vector"))) (procedure "MPI:gatherv-TYPE:: DATA * ROOT * COMM -> DATA" (p "Gathers data from a group of processes, where each process can send data of variable length. " (tt "TYPE") " is one of the following: " (tt "int, flonum, bytevector, s8vector, u8vector, " "s16vector, u16vector, s32vector, u32vector, f32vector, f64vector"))) (procedure "MPI:allgather-TYPE:: DATA * ROOT * COMM -> DATA" (p "Gathers data of variable length from all processes and distributes it to all processes. " (tt "TYPE") " is one of the following: " (tt "int, flonum, bytevector, s8vector, u8vector, " "s16vector, u16vector, s32vector, u32vector, f32vector, f64vector"))) (procedure "MPI:reduce-TYPE:: DATA * OP * ROOT * COMM -> DATA" (p "Reduces values on all processes within a group, using a global reduce operation, " "and return the result at the root process. " (tt "OP") " is one of the following: " (tt "MPI:i_max, MPI:i_min, MPI:i_sum, MPI:i_prod, MPI:i_land, MPI:i_lor, MPI:i_xor") " (integer operations); and " (tt "MPI:f_max, MPI:f_min, MPI:f_sum, MPI:f_prod") " (floating point operations). " (tt "TYPE") " is one of the following: " (tt "int, flonum, bytevector, s8vector, u8vector, " "s16vector, u16vector, s32vector, u32vector, f32vector, f64vector"))) (procedure "MPI:allreduce-TYPE:: DATA * OP * COMM -> DATA" (p "Reduces values on all processes within a group, using a global reduce operation, " "and return the result at each process. " (tt "OP") " is one of the following: " (tt "MPI:i_max, MPI:i_min, MPI:i_sum, MPI:i_prod, MPI:i_land, MPI:i_lor, MPI:i_xor") " (integer operations); and " (tt "MPI:f_max, MPI:f_min, MPI:f_sum, MPI:f_prod") " (floating point operations). " (tt "TYPE") " is one of the following: " (tt "int, flonum, bytevector, s8vector, u8vector, " "s16vector, u16vector, s32vector, u32vector, f32vector, f64vector"))) (procedure "MPI:scan-TYPE:: DATA * OP * COMM -> DATA" (p "Computes a partial reduction across the processes in a group. " (tt "OP") " is one of the following: " (tt "MPI:i_max, MPI:i_min, MPI:i_sum, MPI:i_prod, MPI:i_land, MPI:i_lor, MPI:i_xor") " (integer operations); and " (tt "MPI:f_max, MPI:f_min, MPI:f_sum, MPI:f_prod") " (floating point operations). " (tt "TYPE") " is one of the following: " (tt "int, flonum, bytevector, s8vector, u8vector, " "s16vector, u16vector, s32vector, u32vector, f32vector, f64vector"))) ) ) (examples (pre #<blob data) 1 0 comm-world) (let ((n (MPI:receive MPI:any-source MPI:any-tag comm-world))) (print myrank ": received " (blob->string n)))) (let* ((n (blob->string (MPI:receive MPI:any-source MPI:any-tag comm-world))) (n1 (string-append n "a"))) (print myrank ": received " n ", resending " n1) (MPI:send (string->blob n1) (modulo (+ myrank 1) size) 0 comm-world))) EOF ) (license "Copyright Ivan Raikov and the Okinawa Institute of Science and Technology Based on the Ocaml MPI library by Xavier Leroy. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. A full copy of the GPL license can be found at ."))))) (define mpi-eggdoc:css (make-parameter #< EOF )) (if (eggdoc->html doc `( (eggdoc-style . ,(lambda (tag) `(""))) (documentation *macro* . ,(lambda (tag . elts) (let* ((sections (pre-post-order elts `((subsection ;; (subsection level "content ...") ((*text* . ,(lambda (tag str) str))) . ,(lambda (tag head-word . elems) `(li (a (@ (href ,(string-append "#" head-word))) ,head-word)))) (*default* . ,(lambda (tag . elems) (list))) (*text* . ,(lambda (trigger str) (list)))))) (toc `(div (@ (class "toc")) (ol ,sections)))) `(section "Documentation" ,(cons toc elts))))) ,@(eggdoc:make-stylesheet doc) )) (void))