我正在使用线程池,同时使用Tornado来做一些工作。这是代码:
common/thread_pool.py
import tornado.ioloop
class Worker(threading.Thread):
def __init__(self, queue):
threading.Thread.__init__(self)
self._queue = queue
def run(self):
logging.info('Worker start')
while True:
content = self._queue.get()
if isinstance(content, str) and content == 'quit':
break
#content: (func, args, on_complete)
func = content[0]
args = content[1]
on_complete = content[2]
resp = func(args)
tornado.ioloop.IOLoop.instance().add_callback(lambda: on_complete(resp))
#i dont know is correct to call this
#self._queue.task_done()
logging.info('Worker stop')
class WorkerPool(object):
_workers = []
def __init__(self, num):
self._queue = Queue.Queue()
self._size = num
def start(self):
logging.info('WorkerPool start %d' % self._size)
for _ in range(self._size):
worker = Worker(self._queue)
worker.start()
self._workers.append(worker)
def stop(self):
for worker in self._workers:
self._queue.put('quit')
for worker in self._workers:
worker.join()
logging.info('WorkerPool stopd')
def append(self, content):
self._queue.put(content)
网关.py
import tornado.ioloop
import tornado.web
from common import thread_pool
workers = None
class MainServerHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
def get(self):
start_time = time.time()
method = 'get'
content = (self.handle, (method, self.request, start_time), self.on_complete)
workers.append(content)
@tornado.web.asynchronous
def post(self):
start_time = time.time()
method = 'post'
content = (self.handle, (method, self.request, start_time), self.on_complete)
workers.append(content)
def handle(self, args):
method, request, start_time = args
#for test, just return
return 'test test'
def on_complete(self, res):
logging.debug('on_complete')
self.write(res)
self.finish()
return
def main(argv):
global workers
workers = thread_pool.WorkerPool(conf_mgr.thread_num)
workers.start()
application = tornado.web.Application([(r"/", MainServerHandler)])
application.listen(8888)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main(sys.argv[1:])
当我发出许多并发请求时,我会得到以下错误:
ERROR: 2014-09-15 18:04:03: ioloop.py:435 * 140500107065056 Exception in callback <tornado.stack_context._StackContextWrapper object at 0x7fc8b4d6b9f0>
Traceback (most recent call last):
File "/home/work/nlp_arch/project/ps/se/nlp-arch/gateway/gateway/../third-party/tornado-2.4.1/tornado/ioloop.py", line 421, in _run_callback
callback()
File "/home/work/nlp_arch/project/ps/se/nlp-arch/gateway/gateway/../common/thread_pool.py", line 39, in <lambda>
tornado.ioloop.IOLoop.instance().add_callback(lambda: on_complete(resp))
File "/home/work/nlp_arch/project/ps/se/nlp-arch/gateway/gateway/gateway.py", line 92, in on_complete
self.write(res)
File "/home/work/nlp_arch/project/ps/se/nlp-arch/gateway/gateway/../third-party/tornado-2.4.1/tornado/web.py", line 489, in write
raise RuntimeError("Cannot write() after finish(). May be caused "
RuntimeError: Cannot write() after finish(). May be caused by using async operations without the @asynchronous decorator.
但是在finish
之后我没有呼叫write
。我还使用了@asynchronous
装饰器。同时,在日志中,我看到write
/finish
是由同一个线程调用的。
问题在于将回调添加到I/O循环的方式。这样添加:
tornado.ioloop.IOLoop.instance().add_callback(on_complete, resp)
错误就会消失。
您看到这种奇怪的行为是因为当您使用lambda函数时,您在函数的本地范围内创建了一个闭包,而该闭包中使用的变量在执行lambda时被绑定,而不是在创建它时。考虑这个例子:
funcs = []
def func(a):
print a
for i in range(5):
funcs.append(lambda: func(i))
for f in funcs:
f()
输出:
4
4
4
4
4
因为您的worker方法在while循环中运行,on_complete
最终被重新定义了几次,也更改了lambda中on_complete
的值。这意味着,如果一个工作线程为处理程序a设置了on_complete
,但随后获得了另一个任务,并在为处理程序B运行回调集之前为处理程序设置了on_complete
,那么两个回调最终都会运行处理程序B的on_complete
。
如果您真的想使用lambda,也可以通过在lambda:的本地范围中绑定on_complete
来避免这种情况
tornado.ioloop.IOLoop.instance().add_callback(lambda on_complete=on_complete: on_complete(resp))
但是直接添加函数及其参数要好得多。