unofficial mirror of guile-devel@gnu.org 
 help / color / mirror / Atom feed
* ethreads: event-driven cooperative threads
@ 2012-03-22 16:13 Andy Wingo
  2012-03-23  2:00 ` Nala Ginrut
  2012-04-02 15:25 ` Ludovic Courtès
  0 siblings, 2 replies; 5+ messages in thread
From: Andy Wingo @ 2012-03-22 16:13 UTC (permalink / raw)
  To: guile-devel

Hi list,

I pushed an experimental branch recently, wip-ethreads.  It implements
user-space cooperative threads that yield when they would otherwise
block or sleep.  It seems to be useful for servers.

That branch also implements an HTTP server based on ethreads.  It's
quite pleasant.  There's an ethread that accepts connections:

  (define (socket-loop server esocket)
    (define (have-request client-thread request body)
      (abort-to-prompt (server-have-request-prompt server)
                       client-thread request body))
    (let loop ()
      (let ((client (accept-eport esocket)))
        (setsockopt (eport-fd client) SOL_SOCKET SO_SNDBUF (* 12 1024))
        (spawn (lambda () (client-loop client have-request)))
        (loop))))

And there's another one that handles clients.  It's a bit more
complicated because of the need to handle errors.  It's also more
complicated because it has to invert the inversion of control that is
imposed by the (web server) framework.  It does so with the "suspend"
operator provided by (ice-9 ethreads).

  (define (client-loop client have-request)
    (with-throw-handler #t
      (lambda ()
        (let loop ()
          (call-with-values
              (lambda ()
                (catch #t
                  (lambda ()
                    (let* ((request (read-request client))
                           (body (read-request-body request)))
                      (suspend
                       (lambda (ctx thread)
                         (have-request thread request body)))))
                  (lambda (key . args)
                    (display "While reading request:\n" (current-error-port))
                    (print-exception (current-error-port) #f key args)
                    (values (build-response #:version '(1 . 0) #:code 400
                                            #:headers '((content-length . 0)))
                            #vu8()))))
            (lambda (response body)
              (put-bytevector client
                              (call-with-output-bytevector
                               (lambda (port) (write-response response port))))
              (when body
                (put-bytevector client body))
              (drain-output client)
              (if (and (keep-alive? response)
                       (not (eof-object? (lookahead-u8 client))))
                  (loop)
                  (close-eport client))))))
      (lambda (k . args)
        (catch #t
          (lambda () (close-eport client #:drain-output? #f))
          (lambda (k . args)
            (display "While closing eport:\n" (current-error-port))
            (print-exception (current-error-port) #f k args))))))

Finally the threads are actually run by the server's read hook:

  ;; -> (client request body | #f #f #f)
  (define (server-read server)
    (call-with-prompt
     (server-have-request-prompt server)
     (lambda ()
       (run (server-econtext server)))
     (lambda (k client request body)
       (values client request body))))

When the application comes back with a response, we resume the thread:

  ;; -> 0 values
  (define (server-write server client response body)
    (when (and body (not (bytevector? body)))
      (error "Expected a bytevector for body" body))
    (resume client (lambda () (values response body)) (server-econtext server))
    (values))

It's pretty cool.  Note that the web server framework, the socket loop,
and the client loops all seem to be "in charge" -- in their own
threads.

Here's a little demo with sleepers:

  (use-modules (ice-9 ethreads))

  (define (current-time)
    (/ (get-internal-real-time) 1.0 internal-time-units-per-second))

  (define *num-sleepers* 20)

  (define *max-timeout* 10.0)

  (define (spawn-sleeper n timeout)
    (spawn
     (lambda ()
       (let lp ((prev (current-time)))
         (sleep timeout)
         (let ((next (current-time)))
           (pk n timeout (- next prev timeout))
           (lp next))))))

  (let lp ((n *num-sleepers*))
    (unless (zero? n)
      (spawn-sleeper n (random *max-timeout*))
      (lp (1- n))))

  (run)

An example run:

  $ meta/guile /tmp/ethreads-test.scm

  ;;; (2 0.892998257204006 0.00112451779599421)

  ;;; (14 1.52069073640383 4.70492596168492e-4)

  ;;; (2 0.892998257204006 5.99242795994215e-4)

  ;;; (2 0.892998257204006 0.00113003879599427)

  ;;; (14 1.52069073640383 5.83969596168465e-4)

  ;;; (10 3.33206820106189 2.92359381099949e-5)

  ;;; (2 0.892998257204006 7.76734795994383e-4)

Here you see the jitter is fairly low -- less than a millisecond in
general.

Thoughts?  I think I'd like to merge this into stable-2.0 at some point.
Dunno.  It's a work in progress, and feedback is welcome :-)

Andy
-- 
http://wingolog.org/



^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2013-01-17  9:58 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-03-22 16:13 ethreads: event-driven cooperative threads Andy Wingo
2012-03-23  2:00 ` Nala Ginrut
2012-03-23  9:25   ` Andy Wingo
2012-04-02 15:25 ` Ludovic Courtès
2013-01-17  9:58   ` Andy Wingo

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).