From: Avery Pennarun Date: Mon, 4 Oct 2010 03:41:09 +0000 (-0700) Subject: cmd/web: stream large files asynchronously. X-Git-Url: https://git.michaelhowe.org/gitweb/?a=commitdiff_plain;h=3ce29512e3a402bb1a8fd5461c25710f76169f8f;p=packages%2Fb%2Fbup.git cmd/web: stream large files asynchronously. We had a nice chunkyreader() loop for writing files, but unfortunately, Tornado captured the full content of those files before writing them to the client. Oops. Change things around so we don't end up buffering some multiple of the ENTIRE FILE in memory. Signed-off-by: Avery Pennarun --- diff --git a/cmd/web-cmd.py b/cmd/web-cmd.py index 4b98814..568811c 100755 --- a/cmd/web-cmd.py +++ b/cmd/web-cmd.py @@ -68,7 +68,8 @@ class BupRequestHandler(tornado.web.RequestHandler): def head(self, path): return self._process_request(path) - + + @tornado.web.asynchronous def _process_request(self, path): path = urllib.unquote(path) print 'Handling request for %s' % path @@ -118,12 +119,23 @@ class BupRequestHandler(tornado.web.RequestHandler): self.set_header("Content-Type", ctype) size = n.size() self.set_header("Content-Length", str(size)) + assert(len(n.hash) == 20) + self.set_header("Etag", n.hash.encode('hex')) if self.request.method != 'HEAD': + self.flush() f = n.open() - for blob in chunkyreader(f): - self.write(blob) - f.close() + it = chunkyreader(f) + def write_more(me): + try: + blob = it.next() + except StopIteration: + f.close() + self.finish() + return + self.request.connection.stream.write(blob, + callback=lambda: me(me)) + write_more(write_more) def _guess_type(self, path): """Guess the type of a file.