class GraphClient(object): def __init__(self, request_connect_addr='tcp://127.0.0.1:6000'): self.context = zmq.Context() self.sockets = SocketManager(zmq, self.context) self.sockets.create('query', zmq.REQ) def request(self, data): self.sockets.send_safe('query', data) return self.sockets.recv_safe('query')
class GraphClient(object): def __init__(self, request_connect_addr='tcp://127.0.0.1:6000'): self.context = zmq.Context() self.sockets = SocketManager(zmq, self.context) self.sockets.ensure_and_connect('query', zmq.REQ, request_connect_addr, zmq.POLLIN | zmq.POLLOUT) print 'connected to', request_connect_addr def request(self, data): self.sockets.send_safe('query', data) result = self.sockets.recv_safe('query') return result def query(self, string): return self.request({'query': string})
class GraphServer(object): def __init__(self, sleep_interval=3, pool_size=8): self.context = zmq.Context() self.sockets = SocketManager(zmq, self.context) self.sleep_interval = sleep_interval self.pool_size = pool_size self.pool = Pool(pool_size) self.allowed_to_run = Event() self.allowed_to_run.set() def should_run(self): return self.allowed_to_run.is_set() def listener_coroutine(self, socket_name, reply_bind_addr): # TODO: DEALER/ROUTER + internal multiplexing of request ids? self.sockets.ensure_and_bind(socket_name, zmq.REP, reply_bind_addr, zmq.POLLIN | zmq.POLLOUT) self.sockets.set_socket_option(socket_name, zmq.IDENTITY, socket_name) while self.should_run(): if not self.process_request(socket_name): logger.debug('waiting %s second%s', self.sleep_interval, self.sleep_interval == 1 and '' or 's') def process_request(self, socket_name): data = self.sockets.recv_safe(socket_name, timeout=self.sleep_interval) if data: logger.info('processed request') pprint(data) query = data['query'] result = schema.execute(query) if result.errors: payload = {'errors': "\n".join(map(unicode, result.errors))} else: payload = {'result': json.dumps(result.data, indent=2)} self.sockets.send_safe(socket_name, payload) return True return False def run(self, reply_bind_addr='tcp://*:6000'): logger.info('listening on %s', reply_bind_addr) self.pool.spawn(self.listener_coroutine, 'reply1', reply_bind_addr) self.pool.join(raise_error=True) def stop(self): self.allowed_to_run.clear() self.sockets.close('reply1') self.pool.kill()
def test_socket_manager_send_safe_not_ready(wait_until_ready): ("SocketManager().send_safe() should return False when the socet is not ready") # Background: wait_until_ready is mocked to return None wait_until_ready.return_value = None # Given a zmq mock zmq = Mock() # And a context context = Mock() # And a serializer serializer = Mock(name='serializer') # And a socket manager manager = SocketManager(zmq, context, serialization_backend=serializer) # When I call .send_safe() sent = manager.send_safe('foobar', 'PAYLOAD') # Then it should have failed sent.should.be.false # And it should not pack the value serializer.pack.called.should.be.false
def test_socket_manager_send_safe(wait_until_ready): ("SocketManager().send_safe() should serialize " "before sending, using the configured backend") # Background: wait_until_ready is mocked to return the socket wait_until_ready.side_effect = lambda name, *args: manager.sockets[name] # Given a zmq mock zmq = Mock() # And a context context = Mock() # And a serializer serializer = Mock(name='serializer') # And a socket manager manager = SocketManager(zmq, context, serialization_backend=serializer) # And a socket socket = manager.create('foobar', zmq.REP) # When I call .send_safe() sent = manager.send_safe('foobar', 'PAYLOAD') # Then it should have sent successfully sent.should.be.true # And it should have packed the payload before sending serializer.pack.assert_called_once_with('PAYLOAD') packed = serializer.pack.return_value socket.send.assert_called_once_with(packed)
def test_socket_manager_send_safe_not_ready(wait_until_ready): ("SocketManager().send_safe() should return False when the socet is not ready" ) # Background: wait_until_ready is mocked to return None wait_until_ready.return_value = None # Given a zmq mock zmq = Mock() # And a context context = Mock() # And a serializer serializer = Mock(name='serializer') # And a socket manager manager = SocketManager(zmq, context, serialization_backend=serializer) # When I call .send_safe() sent = manager.send_safe('foobar', 'PAYLOAD') # Then it should have failed sent.should.be.false # And it should not pack the value serializer.pack.called.should.be.false
def test_socket_manager_send_safe_not_ready(): ("SocketManager.send_safe should return False when the socket is not ready") # Given a manager manager = SocketManager(zmq, context) # And a couple of sockets manager.create('foo', zmq.REP) # When I call .send_safe() result = manager.send_safe('foo', {'some': 'value'}) # Then it should be false result.should.be.false
class Pipeline(object): def __init__(self, name, steps=[]): self.name = name self.actions = Speaker( 'actions', [ 'available', 'failed', 'started', 'success', 'metric', 'error', 'logs', ] ) self.steps = [s.job_type for s in steps] self.total_steps = len(steps) self.context = zmq.Context() self.sockets = SocketManager(zmq, self.context) self.sockets.create('step-events', zmq.SUB) self.sockets.create('jobs-in', zmq.PULL) for step in self.steps: self.sockets.create(step, zmq.PUSH) for action in self.actions.actions.keys(): self.bind_action(action) self.total_actions = len(self.actions.actions) self.pool = gevent.pool.Pool(self.total_actions ** (self.total_steps + 1)) self.greenlets = [] self._allowed_to_run = True self.default_interval = 0.1 self.backend = StorageBackend() self.logger = logging.getLogger('pipeline') def on_started(self, event): worker = Worker.from_event(event) self.logger.info('%s [%s] started to process a job', worker.job_type, worker.id) def on_available(self, event): worker = Worker.from_event(event) if self.backend.register_worker(worker): self.sockets.connect(worker.job_type, worker.address, zmq.POLLOUT) self.logger.info('connected to worker: [%s]', dict(worker)) def on_failed(self, event): worker = Worker.from_event(event) self.logger.warning('%s [%s] failed', worker.job_type, worker.id) def on_success(self, event): worker = Worker.from_event(event) self.logger.info('%s [%s] success', worker.job_type, worker.id) self.enqueue_next_job(event.data) def on_metric(self, event): self.logger.info(' '.join([event.topic, event.data])) def on_error(self, event): worker = Worker.from_event(event) self.logger.warning('%s [%s] errored: %s', worker.job_type, worker.id, event) def on_logs(self, event): msg = event.data.pop('msg', None) if msg: self.logger.debug(msg) def enqueue_next_job(self, data): result = data.pop('instructions') job = Job.from_dict(data) job['instructions'] = result step_index = self.steps.index(job.type) try: next_job_type = self.steps[step_index + 1] except IndexError: next_job_type = None if next_job_type: self.logger.info("enqueuing next job: %s", next_job_type) job['job_type'] = next_job_type self.backend.enqueue_job(job) def bind_action(self, name, method=None): action = getattr(self.actions, name, None) if not action: raise KeyError('undefined action: {0}'.format(name)) method = method or getattr(self, 'on_{0}'.format(name), None) if not method: raise TypeError('{0} does not have method {1}(self, topic, data)'.format(self.__class__, name)) action(lambda _, event: self.spawn(method, event)) def should_run(self): return self._allowed_to_run def listen(self, subscriber_bind_address='tcp://127.0.0.1:6000', pull_bind_address='tcp://127.0.0.1:7000'): self.sockets.bind('step-events', subscriber_bind_address, zmq.POLLIN) self.sockets.bind('jobs-in', pull_bind_address, zmq.POLLIN) self.logger.info('listening for events on %s', subscriber_bind_address) self.logger.info('listening for instructions on %s', pull_bind_address) def route_event(self, event): if not event: return ROUTES = { re.compile(r'available'): self.actions.available, re.compile(r'failed'): self.actions.failed, re.compile(r'success'): self.actions.success, re.compile(r'started'): self.actions.started, re.compile(r'metric'): self.actions.metric, re.compile(r'logs'): self.actions.logs, re.compile(r'error'): self.actions.error, } matched = False for regex, action in ROUTES.items(): if regex.search(event.topic): action.shout(event) matched = True if not matched: print 'unmatched event', event.topic, event.data def drain_jobs_in(self): while self.should_run(): data = self.sockets.recv_safe('jobs-in') if not data: gevent.sleep(0) continue job = Job.new(data) self.backend.enqueue_job(job) gevent.sleep(0) def drain_jobs_out(self): iteration = -1 while self.should_run(): iteration += 1 index = iteration % len(self.steps) job_type = self.steps[index] worker = self.backend.get_next_available_worker_for_type(job_type) if not worker: gevent.sleep(0) continue job = self.backend.dequeue_job_of_type(job_type) if not job: gevent.sleep(0) continue self.sockets.send_safe(worker.job_type, job.to_dict()) gevent.sleep(0) def spawn(self, *args, **kw): self.greenlets.append( self.pool.spawn(*args, **kw) ) def idle(self): gevent.sleep(0) def loop(self): self.listen() self.spawn(self.drain_events) self.spawn(self.drain_jobs_in) self.spawn(self.drain_jobs_out) while self.should_run(): gevent.sleep(5) def drain_events(self): # drain events while self.should_run(): event = self.sockets.recv_event_safe('step-events') if event: self.route_event(event) gevent.sleep(0) else: self.idle()