* custom guile stdin port for MPI users
@ 2006-11-28 18:52 Alexander Shirokov
2006-12-01 12:59 ` Mario Storti
0 siblings, 1 reply; 4+ messages in thread
From: Alexander Shirokov @ 2006-11-28 18:52 UTC (permalink / raw)
[-- Attachment #1: Type: TEXT/PLAIN, Size: 2239 bytes --]
[please disregard my prev message - the misprint in the subject line is
now fixed]
---
Dear GUILE Developers
I would like to embed guile interpreter into my application - a
parallel program using MPI (message passing interface) and operating
massive data and computations. I would like that program to be able to
process standard input in order to be able to have a live interactive
session with my application. Below I describe the problem i
encountered.
I am currently using Tcl as an embedded
interpreter but looking for a possibility of other alternatives.
Since Tcl does not take over the control of stdin, I am currently just
reading stdin on the MPI process zero until the Tcl command is complete
(checking with Tcl_CommandComplete after each line of stdin) and when
the command is complete i stop reading stdin, copy the string to all
the other MPI processes using MPI_Bcast and process (with Tcl_Eval)
them on worker processes.
With guile however, I am limited to using
scm_shell(argc, argv);
which is supposed to do the stdin processing itself, - I hoped it would
even in the parallel environment. I inserted
MPI_Init(&argc,&argv);
MPI_Finalize()
into the tortoise.c program of the guile tutorial (the complete copy of
the program is attached) and compiled it with 'mpicc', but I do not get
the expected behavior, for example when i run on 4 processes:
mpirun -np 4 ./tortoise2
guile> (tortoise-move 100)
the next guile prompt does not appear after the entered command has
completed.
I looked into the guile archieves using search "MPI" and found
that another person was having the same problem one year ago.
That user has recieved a very informative message :
http://lists.gnu.org/archive/html/guile-user/2005-02/msg00018.html
but unfortunately, the thread stops there.
I did some followup and found nice documentation on setting custom
ports on stdin at
http://www.gnu.org/software/guile/docs/docs-1.8/guile-ref/Port-Types.html#Port-Types
but the resources of my expertise in scheme and setting custom
ports have exhausted there.
There are many people using MPI, I think a solution very be greatly
appreciated by a sizable community of MPI users.
Thank you.
Regards,
Alexander Shirokov
--
[-- Attachment #2: Type: TEXT/PLAIN, Size: 3147 bytes --]
/* Be-Guiled version of the program */
#include <unistd.h>
#include <stdlib.h>
#include <stdio.h>
#include <X11/Xlib.h>
#define WINDOW_SIZE 500
#include <guile/gh.h>
#include <mpi.h>
int myid, serv, nproc;
Display *theDisplay;
Window theWindow;
Screen *theScreen;
GC theGC;
double currentX;
double currentY;
double currentDirection;
int penDown;
#include <math.h>
#define DEGREES_TO_RADIANS (3.1415926535897932384626433832795029L/180.0)
SCM tortoise_reset()
{
currentX = currentY = WINDOW_SIZE/2;
currentDirection = 0;
penDown = 1;
return SCM_EOL;
}
SCM tortoise_pendown()
{
penDown = 1;
return SCM_EOL;
}
SCM tortoise_penup()
{
penDown = 0;
return SCM_EOL;
}
SCM tortoise_turn(SCM s_degrees)
{
int degrees = SCM_INUM(s_degrees);
currentDirection += (double)degrees;
return SCM_EOL;
}
SCM tortoise_move(SCM s_steps)
{
double newX, newY;
int steps = SCM_INUM(s_steps);
/* first work out the new endpoint */
newX = currentX + sin(currentDirection*DEGREES_TO_RADIANS)*(double)steps;
newY = currentY - cos(currentDirection*DEGREES_TO_RADIANS)*(double)steps;
/* if the pen is down, draw a line */
if (penDown) XDrawLine(theDisplay, theWindow, theGC,
(int)currentX, (int)currentY, (int)newX, (int)newY);
/* in either case, move the tortoise */
currentX = newX;
currentY = newY;
return SCM_EOL;
}
void register_procs(void)
{
gh_new_procedure("tortoise-reset", tortoise_reset, 0, 0, 0);
gh_new_procedure("tortoise-pendown", tortoise_pendown, 0, 0, 0);
gh_new_procedure("tortoise-penup", tortoise_penup, 0, 0, 0);
gh_new_procedure("tortoise-turn", tortoise_turn, 1, 0, 0);
gh_new_procedure("tortoise-move", tortoise_move, 1, 0, 0);
}
void inner_main(int argc, char **argv)
{
register_procs();
scm_shell(argc, argv);
MPI_Finalize();
}
int main(int argc, char *argv[])
{
char pname[256];
char fname[256];
int reslen;
MPI_Comm world;
serv = 0;
world = MPI_COMM_WORLD;
MPI_Init(&argc,&argv);
MPI_Comm_rank (world, &myid);
MPI_Comm_size (world, &nproc);
if(myid != 0){
char *name;
asprintf(&name, "so-%d", myid);
stdout = freopen(name, "w", stdout);
if(!stdout){
printf("Error: can not reopen stdout (%s:%d)\n", __FILE__, __LINE__);
exit(2);
}
free(name);
}
theDisplay = XOpenDisplay(NULL);
XSynchronize(theDisplay, True);
theScreen = DefaultScreenOfDisplay(theDisplay);
theWindow = XCreateSimpleWindow(theDisplay, RootWindowOfScreen(theScreen),
0, 0,
WINDOW_SIZE, WINDOW_SIZE, 0,
BlackPixelOfScreen(theScreen),
WhitePixelOfScreen(theScreen));
theGC = XCreateGC(theDisplay, theWindow, 0L, NULL);
XSetForeground(theDisplay, theGC, BlackPixelOfScreen(theScreen));
XMapWindow(theDisplay,theWindow);
/* more stuff to come here . . */
tortoise_reset();
gh_enter(argc, argv, inner_main);
return(0); /* never reached */
return 0;
}
[-- Attachment #3: Type: text/plain, Size: 140 bytes --]
_______________________________________________
Guile-user mailing list
Guile-user@gnu.org
http://lists.gnu.org/mailman/listinfo/guile-user
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: custom guile stdin port for MPI users
2006-11-28 18:52 custom guile stdin port for MPI users Alexander Shirokov
@ 2006-12-01 12:59 ` Mario Storti
2007-01-09 5:24 ` Alexander Shirokov
0 siblings, 1 reply; 4+ messages in thread
From: Mario Storti @ 2006-12-01 12:59 UTC (permalink / raw)
Cc: guile-user
>>>>> On Tue, 28 Nov 2006 13:52:30 -0500 (EST),
>>>>> Alexander Shirokov <shirokov@cita.utoronto.ca> said:
> I would like to embed guile interpreter into my application - a
> parallel program using MPI (message passing interface) and operating
> massive data and computations. I would like that program to be able to
> process standard input in order to be able to have a live interactive
> session with my application. Below I describe the problem i
> encountered.
...
> With guile however, I am limited to using
> scm_shell(argc, argv);
> which is supposed to do the stdin processing itself, - I hoped it would
> even in the parallel environment. I inserted
> MPI_Init(&argc,&argv);
> MPI_Finalize()
> into the tortoise.c program of the guile tutorial (the complete copy of
> the program is attached) and compiled it with 'mpicc', but I do not get
> the expected behavior, for example when i run on 4 processes:
> mpirun -np 4 ./tortoise2
guile> (tortoise-move 100)
> the next guile prompt does not appear after the entered command has
> completed.
> I looked into the guile archieves using search "MPI" and found
> that another person was having the same problem one year ago.
> That user has recieved a very informative message :
> http://lists.gnu.org/archive/html/guile-user/2005-02/msg00018.html
> but unfortunately, the thread stops there.
> I did some followup and found nice documentation on setting custom
> ports on stdin at
> http://www.gnu.org/software/guile/docs/docs-1.8/guile-ref/Port-Types.html#Port-Types
> but the resources of my expertise in scheme and setting custom
> ports have exhausted there.
> There are many people using MPI, I think a solution very be greatly
> appreciated by a sizable community of MPI users.
One issue in wrapping MPI for Guile is calling the `MPI_Init()' before
entering Guile. This is done in the code you sent. With that code you
can use MPI in background (I guess). For instance try to write a small
script and then run it in background with MPI.
$ mpirun -np xx tortoise -s myscript.scm
That should work. (I use `scm_boot_guile' instead of `gh_enter.' I
think that the `gh_..' stuff is deprecated, but I don't know if this
is relevant in the discussion. ) Note that with a small effort you
have something that is not completely useless: you can use it
* interactively in sequential mode, and
* in parallel (but not interactively)
I have made some experiments in this line, wrapping the most simple
MPI functions (mpi-send, mpi-recv, mpi-bcast...) and some basic stuff
from PETSc.
Now, if you want to use it in parallel and in an interactive
environment, then I think the solution is to replace the REPL
evaluator, so that each time he founds a `sexp' or whatever to be
evaluated, it sends the expression to the nodes with `MPI_Bcast'. I
know of something being done with Python and I think it's much the same.
http://www.cimec.org.ar/python/
http://sourceforge.net/projects/mpi4py/
I think that this broadcasting of the input from the master to the
nodes is something that you can't avoid whenever you want to wrap MPI
for any scripting language.
Mario
_______________________________________________
Guile-user mailing list
Guile-user@gnu.org
http://lists.gnu.org/mailman/listinfo/guile-user
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: custom guile stdin port for MPI users
2006-12-01 12:59 ` Mario Storti
@ 2007-01-09 5:24 ` Alexander Shirokov
2007-01-09 15:10 ` Mario Storti
0 siblings, 1 reply; 4+ messages in thread
From: Alexander Shirokov @ 2007-01-09 5:24 UTC (permalink / raw)
Cc: guile-user
Hi Mario,
thanks for your reply!
It looks like I will probbaly have to try
to do it on my own. I would be interested to see
an example on how you wrapped an MPI_Bcast function
and MPI_Send, Receive. Would it be difficult for you
to show me an example?, - would be very nice to have one
since i am a beginner in guile, and i will let you know
how it goes. Thank you.
Alex
On Fri, 1 Dec 2006, Mario Storti wrote:
>
>>>>>> On Tue, 28 Nov 2006 13:52:30 -0500 (EST),
>>>>>> Alexander Shirokov <shirokov@cita.utoronto.ca> said:
>
>> I would like to embed guile interpreter into my application - a
>> parallel program using MPI (message passing interface) and operating
>> massive data and computations. I would like that program to be able to
>> process standard input in order to be able to have a live interactive
>> session with my application. Below I describe the problem i
>> encountered.
>
> ...
>
>> With guile however, I am limited to using
>
>> scm_shell(argc, argv);
>
>> which is supposed to do the stdin processing itself, - I hoped it would
>> even in the parallel environment. I inserted
>
>> MPI_Init(&argc,&argv);
>> MPI_Finalize()
>
>> into the tortoise.c program of the guile tutorial (the complete copy of
>> the program is attached) and compiled it with 'mpicc', but I do not get
>> the expected behavior, for example when i run on 4 processes:
>
>> mpirun -np 4 ./tortoise2
> guile> (tortoise-move 100)
>
>> the next guile prompt does not appear after the entered command has
>> completed.
>
>> I looked into the guile archieves using search "MPI" and found
>> that another person was having the same problem one year ago.
>> That user has recieved a very informative message :
>> http://lists.gnu.org/archive/html/guile-user/2005-02/msg00018.html
>> but unfortunately, the thread stops there.
>> I did some followup and found nice documentation on setting custom
>> ports on stdin at
>> http://www.gnu.org/software/guile/docs/docs-1.8/guile-ref/Port-Types.html#Port-Types
>> but the resources of my expertise in scheme and setting custom
>> ports have exhausted there.
>
>> There are many people using MPI, I think a solution very be greatly
>> appreciated by a sizable community of MPI users.
>
> One issue in wrapping MPI for Guile is calling the `MPI_Init()' before
> entering Guile. This is done in the code you sent. With that code you
> can use MPI in background (I guess). For instance try to write a small
> script and then run it in background with MPI.
>
> $ mpirun -np xx tortoise -s myscript.scm
>
> That should work. (I use `scm_boot_guile' instead of `gh_enter.' I
> think that the `gh_..' stuff is deprecated, but I don't know if this
> is relevant in the discussion. ) Note that with a small effort you
> have something that is not completely useless: you can use it
>
> * interactively in sequential mode, and
> * in parallel (but not interactively)
>
> I have made some experiments in this line, wrapping the most simple
> MPI functions (mpi-send, mpi-recv, mpi-bcast...) and some basic stuff
> from PETSc.
>
> Now, if you want to use it in parallel and in an interactive
> environment, then I think the solution is to replace the REPL
> evaluator, so that each time he founds a `sexp' or whatever to be
> evaluated, it sends the expression to the nodes with `MPI_Bcast'. I
> know of something being done with Python and I think it's much the same.
>
> http://www.cimec.org.ar/python/
> http://sourceforge.net/projects/mpi4py/
>
> I think that this broadcasting of the input from the master to the
> nodes is something that you can't avoid whenever you want to wrap MPI
> for any scripting language.
>
> Mario
>
--
_______________________________________________
Guile-user mailing list
Guile-user@gnu.org
http://lists.gnu.org/mailman/listinfo/guile-user
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: custom guile stdin port for MPI users
2007-01-09 5:24 ` Alexander Shirokov
@ 2007-01-09 15:10 ` Mario Storti
0 siblings, 0 replies; 4+ messages in thread
From: Mario Storti @ 2007-01-09 15:10 UTC (permalink / raw)
Cc: guile-user
>>>>> On Tue, 9 Jan 2007 00:24:38 -0500 (EST),
>>>>> Alexander Shirokov <shirokov@cita.utoronto.ca> said:
> Hi Mario,
> thanks for your reply!
> It looks like I will probbaly have to try
> to do it on my own. I would be interested to see
> an example on how you wrapped an MPI_Bcast function
> and MPI_Send, Receive. Would it be difficult for you
> to show me an example?, - would be very nice to have one
> since i am a beginner in guile, and i will let you know
> how it goes. Thank you.
> Alex
Below is the basic wrapper (in C++). Note the call to MPI_Init() in
the main(). Also below is a simple Guile script using point to point
communication. For compiling it I used a target like this
mpiguile: mpiguile.o
gcc -g -o $@ $< `guile-config link` $(LDFLAGS)
Of course LDFLAGS should contain the mpi libraries. (Sorry I can't send
you more material right now, because I'm on vacation accessing my
office desktop from a cyber-caffe on the beach :-) ) Mario
===== MPIGUILE.CPP =========================================
#include <cassert>
#include <cstdio>
#include <cmath>
#include <libguile.h>
#if 0
#include "vector.h"
#include <petsc.h>
#include "./petscscm.h"
#include "./dvector.h"
#endif
#include <mpi.h>
#define N 5
typedef SCM(*scm_fun)();
//---:---<*>---:---<*>---:---<*>---:---<*>---:---<*>---:---<*>---:
#undef __FUN__
#define __FUN__ "mpi-send"
static SCM
mpi_send(SCM s_val,SCM s_dest) {
SCM_ASSERT(scm_number_p(s_val),s_val,SCM_ARG1,__FUN__);
double val = scm_num2double(s_val,0,__FUN__);
double tval = round(val);
SCM_ASSERT(SCM_INUMP(s_dest),
s_dest,SCM_ARG2,__FUN__);
int dest = SCM_INUM(s_dest);
printf("mpi_send: sending %lg, error %lg\n",val,val-tval);
double v[N];
for (int j=0; j<N; j++) v[j] = val;
printf("mpi_send: sending %lg, error %lg\n",v[0],v[0]-tval);
MPI_Send(v,N,MPI_DOUBLE,dest,0,MPI_COMM_WORLD);
return SCM_UNSPECIFIED;
}
//---:---<*>---:---<*>---:---<*>---:---<*>---:---<*>---:---<*>---:
#undef __FUN__
#define __FUN__ "mpi-recv"
static SCM
mpi_recv(SCM s_source) {
SCM_ASSERT(SCM_INUMP(s_source),
s_source,SCM_ARG1,__FUN__);
int source = SCM_INUM(s_source);
double v[N];
#if 0
for (int j=0; j<N; j++)
v[j] = 0.1234567890123456789;
for (int j=0; j<N; j++)
printf("%g ",v[j]);
printf(" xxx \n");
#endif
MPI_Status status;
MPI_Recv(v,N,MPI_DOUBLE,source,0,
MPI_COMM_WORLD,&status);
double val = v[0];
double tval = round(val);
printf("mpi_recv error: received ");
for (int j=0; j<N; j++)
printf("%g ",v[j]-tval);
printf("\n");
#if 0
for (int j=1; j<N; j++)
assert(v[j]==val);
#endif
return scm_make_real(val);
}
//---:---<*>---:---<*>---:---<*>---:---<*>---:---<*>---:---<*>---:
#undef __FUN__
#define __FUN__ "mpi-rank"
static SCM
mpi_rank() {
int myrank;
MPI_Comm_rank(MPI_COMM_WORLD,&myrank);
return SCM_MAKINUM(myrank);
}
//---:---<*>---:---<*>---:---<*>---:---<*>---:---<*>---:---<*>---:
#undef __FUN__
#define __FUN__ "mpi-size"
static SCM
mpi_size() {
int size;
MPI_Comm_size(MPI_COMM_WORLD,&size);
return SCM_MAKINUM(size);
}
//---:---<*>---:---<*>---:---<*>---:---<*>---:---<*>---:---<*>---:
#undef __FUN__
#define __FUN__ "mpi-finalize"
static SCM
mpi_finalize() {
MPI_Finalize();
return SCM_UNSPECIFIED;
}
//---:---<*>---:---<*>---:---<*>---:---<*>---:---<*>---:---<*>---:
extern "C" void
init_mpi (void) {
scm_c_define_gsubr("mpi-send",2,0,0,scm_fun(mpi_send));
scm_c_define_gsubr("mpi-recv",1,0,0,scm_fun(mpi_recv));
scm_c_define_gsubr("mpi-rank",0,0,0,scm_fun(mpi_rank));
scm_c_define_gsubr("mpi-size",0,0,0,scm_fun(mpi_size));
scm_c_define_gsubr("mpi-finalize",0,0,0,scm_fun(mpi_finalize));
}
//---:---<*>---:---<*>---:---<*>---:---<*>---:---<*>---:---<*>---:
static void
inner_main (void *closure, int argc, char **argv) {
init_mpi();
scm_shell(argc, argv);
MPI_Finalize();
}
//---:---<*>---:---<*>---:---<*>---:---<*>---:---<*>---:---<*>---:
int main (int argc, char **argv) {
MPI_Init(&argc,&argv);
scm_boot_guile (argc, argv, inner_main, 0);
return 0; // never reached
}
===== TRYMPI.SCM =========================================
(define my-rank (mpi-rank))
(define size (mpi-size))
(format #t "myrank ~A, size ~A\n" my-rank size)
(define val #f)
#!
(do ((j 0 (+ j 2))) ((= j 20))
(cond ((= my-rank 0)
(mpi-send j 1)
(set! val (mpi-recv 1)))
(#t
(set! val (mpi-recv 0))
(mpi-send (+ j 1) 0)))
(format #t "[~A] received ~A\n" my-rank val))
!#
(do ((j 0 (+ j 1))) ((= j 20))
(cond ((= my-rank 1)
(mpi-send 23 0))
(#t
(set! val (mpi-recv 1))
(format #t "in guile: received ~A\n" val))))
(mpi-finalize)
_______________________________________________
Guile-user mailing list
Guile-user@gnu.org
http://lists.gnu.org/mailman/listinfo/guile-user
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2007-01-09 15:10 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2006-11-28 18:52 custom guile stdin port for MPI users Alexander Shirokov
2006-12-01 12:59 ` Mario Storti
2007-01-09 5:24 ` Alexander Shirokov
2007-01-09 15:10 ` Mario Storti
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).