def initiate_broker(self): if sys.platform == "win32": d = defer.succeed(None) d.addCallback(defer.drop_param, self.become_master) d.addErrback(self._handle_critical_error) return d self._set_idle(False) if not self._is_standalone: try: self.factory = MasterFactory(self) self.listener = reactor.listenUNIX( #@UndefinedVariable self.socket_path, self.factory, mode=self.socket_mode) d = defer.succeed(None) d.addCallback(defer.drop_param, self.become_master) d.addErrback(self._handle_critical_error) d.addBoth(defer.bridge_param, self._set_idle, True) return d except CannotListenError as e: self.info('Cannot listen on socket: %r. '\ 'Assuming slave role.', e) return self._connect_as_slave() elif self._is_standalone: self.info('Standalone role') return self._connect_as_slave()
def config2winged(doc): l = doc.readline() if l == "[BIRD]\n": return defer.succeed(create_config_bird(doc)) if l == "[BAT]\n": return defer.succeed(create_config_bat(doc)) raise TypeError()
def build_query(value, context, *args, **kwargs): def merge_conditions(static_conditions, q): subquery = factory(*static_conditions) return factory(q, query.Operator.AND, subquery, include_value=[]) def merge_query_options(query, kwargs): if kwargs.get('include_value'): query.include_value.extend(kwargs['include_value']) # reset call below is to get rid of cached query plan # if it has been already calculated query.reset() if kwargs.get('aggregate'): aggregate = list() for name in kwargs['aggregate']: definition = model_aggregations[name] aggregate.append((definition[1], definition[2])) query.aggregate = aggregate return query def store_in_context(query): context['query'] = query return query if static_conditions: d = defer.succeed(None) d.addCallback(static_conditions, context) d.addCallback(merge_conditions, kwargs['query']) else: d = defer.succeed(kwargs['query']) d.addCallback(merge_query_options, kwargs) d.addCallback(store_in_context) return d
def xml2mammal(doc): type, values = read_xml(doc) if type == "HUMMAN": return defer.succeed(create_xml_human(**values)) if type == "BAT": return defer.succeed(create_xml_bat(**values)) raise TypeError()
def testSolveConflictAlerts(self): solve = Method() self.patch(conflicts, 'solve', solve) solve.reset(defer.succeed('id')) yield self.agent.conflict_cb('id', 'rev', False, False) self.assertTrue(solve.called) # this should not resolve alert, this would make nagios blow up self.assertNotIn('conflict-id', self.state.alert_statuses) # now fail solving conflict r = defer.fail(conflicts.UnsolvableConflict('bum', {'_id': 'id'})) solve.reset(r) yield self.agent.conflict_cb('id', 'rev', False, False) self.assertTrue(solve.called) # this should raise the alert self.assertIn('couchdb-conflicts', self.state.alert_statuses) self.assertEqual(1, self.state.alert_statuses['couchdb-conflicts'][0]) solve.reset(defer.succeed('id')) yield self.agent.conflict_cb('id', 'rev', False, False) self.assertTrue(solve.called) # this should resolve alert self.assertIn('couchdb-conflicts', self.state.alert_statuses) self.assertEqual(0, self.state.alert_statuses['couchdb-conflicts'][0])
def config2mammal(doc): l = doc.readline() if l == "[HUMMAN]\n": return defer.succeed(create_config_human(doc)) if l == "[BAT]\n": return defer.succeed(create_config_bat(doc)) raise TypeError()
def xml2winged(doc): type, values = read_xml(doc) if type == "BIRD": return defer.succeed(create_xml_bird(**values)) if type == "BAT": return defer.succeed(create_xml_bat(**values)) raise TypeError()
def locate_agency(self, agency_id): def pack_result(port, remote): return self.get_hostname(), port, remote if agency_id == self.agency_id: return defer.succeed(pack_result(self.gateway_port, False)) for slave_id, slave in self._broker.slaves.iteritems(): if slave_id == agency_id: d = slave.callRemote("get_gateway_port") d.addCallback(pack_result, True) return d return defer.succeed(None)
def publish(self, key, shard, message): assert isinstance(message, BaseMessage), str(type(message)) if not self._enabled: self.log("RabbitMQ is disabled, message will not be really sent") return defer.succeed(message) exchange = self._get_exchange(shard) if exchange: self.increase_stat('messages published') exchange.publish(message, key) else: self.error("Exchange %r not found!" % shard) return defer.succeed(message)
def _create_model(self, view_getter=None, source_getters=None, model_factory=None): if view_getter is not None: d = defer.succeed(view_getter) d.addCallback(self._retrieve_view) d.addCallback(self._check_view) else: # views are inherited d = defer.succeed(self.model.view) d.addCallback(self._retrieve_model, source_getters, model_factory) d.addErrback(self._filter_errors) return d
def request(self, method, location, headers=None, body=None, decoder=None, outside_of_the_pool=False, dont_pipeline=False, reset_retry=1): started = time.time() self.debug('%s-ing on %s', method.name, location) self.log('Headers: %r', headers) self.log('Body: %r', body) if headers is None: headers = dict() if (not self._enable_pipelineing or headers.get('connection') == 'close'): dont_pipeline = True # post requests are not idempotent and should not be pipelined can_pipeline = (not dont_pipeline and method != http.Methods.POST and reset_retry == 1) if self._idle and reset_retry == 1: self.log("Reusing existing idle connection.") protocol = self._idle.pop() d = defer.succeed(protocol) else: protocol = None if can_pipeline: protocol = first(x for x in self._connected if x.can_pipeline and x.in_pool) if protocol: self.log("The request will be pipelined.") d = defer.succeed(protocol) else: self.log("The request will be handled when a connection" " returns to a pool.") d = defer.Deferred() self._awaiting_client.append(d) # Regardless if we have pipeline this request or not, check if # we can have more connections, so that the next request can be # handeled by it. if self._pool_len() < self._max: self.log("Initializing new connection.") self._connecting += 1 self._connect() d.addCallback(self._request, method, location, headers, body, decoder, outside_of_the_pool, can_pipeline) d.addErrback(self._handle_connection_reset, method, location, headers, body, decoder, outside_of_the_pool, dont_pipeline, reset_retry) d.addBoth(defer.keep_param, self._log_request_result, method, location, started) return d
def testCreateReplicationSuccessful(self): get_replication_status = Method() self.patch(conflicts, 'get_replication_status', get_replication_status) get_replication_status.reset(defer.succeed({})) submodel = yield self.model_descend(self.model, 'replications') yield submodel.perform_action('post', target='target') view = yield self.connection.query_view(conflicts.Replications, key=('source', 'test'), include_docs=True) self.assertEqual(1, len(view)) repl = view[0] self.assertIsInstance(repl, dict) self.assertEqual(True, repl.get('continuous')) self.assertEqual('target', repl.get('target')) self.assertEqual('test', repl.get('source')) self.assertEqual('featjs/replication', repl.get('filter')) # now test pause action on this replication get_replication_status.reset(defer.succeed( {'target': [(10, True, 'triggered', 'id2')]})) yield self.model.initiate() submodel = yield self.model_descend( self.model, 'replications', 'target') self.assertIsInstance(submodel, api.Replication) yield submodel.perform_action('pause') # the replication should not be continuous anymore view = yield self.connection.query_view(conflicts.Replications, key=('source', 'test'), include_docs=True) self.assertEqual(1, len(view)) repl = view[0] self.assertIsInstance(repl, dict) self.assertNotIn('continuous', repl) self.assertEqual('target', repl.get('target')) self.assertEqual('test', repl.get('source')) self.assertEqual('featjs/replication', repl.get('filter')) # now delete the replication r = yield submodel.perform_action('del') self.assertIsInstance(r, response.Deleted) view = yield self.connection.query_view(conflicts.Replications, key=('source', 'test'), include_docs=True) self.assertEqual(0, len(view))
def terminate(self): if self._cmp_state(ProcessState.initiated): return defer.succeed(self) elif self._cmp_state([ProcessState.starting, ProcessState.started, ProcessState.terminating]): self._set_state(ProcessState.terminating) self._process.signalProcess("TERM") return self.wait_for_state(ProcessState.finished)
def _flush_next(self): if len(self._cache) == 0: return defer.succeed(None) else: d = self._semaphore.run(self._push_entries) d.addCallback(defer.drop_param, time.call_next, self._flush_next) return d
def _disconnect(self): d = defer.succeed(None) d.addCallback(defer.drop_param, self._ssh.stop_listening) d.addCallback(defer.drop_param, self._gateway.cleanup) d.addCallback(defer.drop_param, self._journaler.close) d.addCallback(defer.drop_param, self._broker.disconnect) return d
def full_shutdown(self, stop_process=False): '''Terminate all the slave agencies and shutdowns itself.''' self._shutting_down = True d = defer.succeed(None) d.addCallback(defer.drop_param, self._broker.shutdown_slaves) d.addCallback(defer.drop_param, self.shutdown, stop_process) return d
def on_become_master(self): self._ssh.start_listening() filename = os.path.join(self.config['agency']['rundir'], self.config['agency']['journal']) self._journal_writer = journaler.SqliteWriter( self, filename=filename, encoding='zip', on_rotate=self._force_snapshot_agents) self._journaler.configure_with(self._journal_writer) self._journal_writer.initiate() self._start_master_gateway() self._redirect_text_log() self._create_pid_file() self._link_log_file(options.MASTER_LOG_LINK) if 'enable_host_restart' not in self._broker.shared_state: value = self.config['agency']['force_host_restart'] self._broker.shared_state['enable_host_restart'] = value d = defer.succeed(None) if self.config['agency']['enable_spawning_slave']: d.addCallback(defer.drop_param, self._spawn_backup_agency) d.addCallback(defer.drop_param, self._start_host_agent_if_necessary) return d
def reconnect(self): # ping database to figure trigger changing state to connected if self.is_connected(): return defer.succeed(self) if self.disconnected: return if self.reconnector is None or not self.reconnector.active(): self.retry += 1 wait = min(2**(self.retry - 1), 300) if self.retry > 1: self.debug('CouchDB refused connection for %d time. ' 'This indicates misconfiguration or temporary ' 'network problem. Will try to reconnect in ' '%d seconds.', self.retry, wait) d = defer.Deferred() d.addCallback(defer.drop_param, self.couchdb_call, self.couchdb.get, '/') d.addCallback(self._set_version) d.addCallback(defer.drop_param, self._setup_notifiers) d.addErrback(failure.Failure.trap, NotConnectedError) d.addErrback(failure.Failure.trap, defer.CancelledError) self.reconnector = time.callLater(wait, d.callback, None) return d else: return self.wait_connected()
def get_version(self): if self.version: return defer.succeed(self.version) else: d = self.couchdb_call(self.couchdb.get, '/') d.addCallback(self._set_version) return d
def locate_default_action(model_name, action_name): d = defer.succeed(self.model) d.addCallback(retrieve_model, model_name) d.addCallback(check_model) d.addCallback(retrieve_action, action_name) d.addCallback(wrap_action, fallback=True) return d
def on_become_master(self): self._ssh.start_listening() self._journaler.set_connection_strings( self.config.agency.journal) try: self._start_master_gateway() except Exception as e: error.handle_exception( self, e, "Failed setting up gateway, it will stay disabled.") self._redirect_text_log() self._create_pid_file() self._link_log_file(options.MASTER_LOG_LINK) signal.signal(signal.SIGUSR1, self._sigusr1_handler) signal.signal(signal.SIGUSR2, self._sigusr2_handler) backends = [] backends.append(self._initiate_messaging(self.config.msg)) backends.append(self._initiate_tunneling(self.config.tunnel)) backends.append(unix.Master(self._broker)) backends = filter(None, backends) d = defer.succeed(None) for backend in backends: d.addCallback(defer.drop_param, self._messaging.add_backend, backend) if (self.config.agency.enable_spawning_slave and sys.platform != "win32"): d.addCallback(defer.drop_param, self._spawn_backup_agency) d.addCallback(defer.drop_param, self._start_host_agent) return d
def _disconnect(self): self.debug('In agent._disconnect(), ' 'ssh: %r, gateway: %r, journaler: %r, ' 'database: %r, broker: %r', self._ssh, self._gateway, self._journaler, self._database, self._broker) d = defer.succeed(None) if self._ssh: d.addCallback(defer.drop_param, self._ssh.stop_listening) d.addBoth(defer.inject_param, 1, self.debug, "Ssh stopped: %r") if self._gateway: d.addCallback(defer.drop_param, self._gateway.cleanup) d.addBoth(defer.inject_param, 1, self.debug, "Gateway stopped: %r") if self._journaler: d.addCallback(defer.drop_param, self._journaler.close) d.addBoth(defer.inject_param, 1, self.debug, "Journaler closed: %r") if self._database: d.addCallback(defer.drop_param, self._database.disconnect) d.addBoth(defer.inject_param, 1, self.debug, "Database disconnected: %r") if self._broker: d.addCallback(defer.drop_param, self._broker.disconnect) d.addBoth(defer.inject_param, 1, self.debug, "Broker disconnected: %r") return d
def call_mro_ex(self, method_name, keywords, raise_on_unconsumed=True): d = defer.succeed(None) call_list = self._get_mro_call_list( method_name, keywords, raise_on_unconsumed) for method, kwargs in call_list: d.addCallback(defer.drop_param, method, self, **kwargs) return d
def assertAsyncFailure(self, chain, errorKlasses, value, *args, **kwargs): """Adds an asynchronous assertion for failure to the specified chain. If the chain is None, a new fired one will be created. The checks are serialized and done in order of declaration. If the value is a Deferred, the check wait for its result, if not it compare rightaway. If value is a callable, it is called with specified arguments and keyword WHEN THE PREVIOUS CALL HAS BEEN DONE. Used like this:: d = defer.succeed(None) d = self.assertAsyncFailure(d, ERROR_CLASSES, FIRED_DEFERRED) d = self.assertAsyncFailure(d, ERROR_CLASSES, FUNCTION, ARG) d = self.assertAsyncFailure(d, [ValueError, TypeError], fun(21)) d = self.assertAsyncFailure(d, [ValueError], fun, 21) return d """ def check(failure): if isinstance(errorKlasses, collections.Sequence): self.assertTrue(failure.check(*errorKlasses)) else: self.assertTrue(failure.check(errorKlasses)) return None # Resolve the error if chain is None: chain = defer.succeed(None) return chain.addBoth(self._assertAsync, check, value, *args, **kwargs)
def close(self, flush=True): d = defer.succeed(None) if flush: d.addCallback(defer.drop_param, self._flush_next) d.addCallback(defer.drop_param, self._set_state, State.disconnected) d.addCallback(defer.drop_param, self._set_writer, None) return d
def _flush_next(self): if len(self._cache) == 0: return defer.succeed(None) else: d = self._semaphore.run(self._perform_inserts, self._cache) d.addCallback(defer.drop_param, self._flush_next) return d
def disconnect(self): ''' This is called as part of the agency shutdown. ''' self.log("Disconnecting broker %r.", self) d = defer.succeed(None) if self.is_master(): if self.listener is not None: d.addCallback(defer.drop_param, self.listener.stopListening) d.addCallback(defer.drop_param, self.factory.disconnect) elif self.is_slave(): d = defer.maybeDeferred(self.factory.disconnect) elif self._cmp_state(BrokerRole.disconnected): return defer.succeed(None) d.addCallback(defer.drop_param, self.become_disconnected) return d
def notify_finish(self): if self._finalize_called: if isinstance(self._result, (Exception, failure.Failure)): return defer.fail(self._result) else: return defer.succeed(self._result) return self._fnotifier.wait('finish')
def cleanup(self): if self._server: self.debug("Cleaning up gateway on port %s", self.port) d = self._server.cleanup() self._server = None return d return defer.succeed(self)
def _publish(self, key, shard, message): assert isinstance(message, BaseMessage), "Unexpected message class" if message.expiration_time: delta = message.expiration_time - time.time() if delta < 0: self.log( "Not sending expired message. msg=%s, shard=%s, " "key=%s, delta=%r", message, shard, key, delta ) return serialized = self.serializer.convert(message) content = Content(serialized) content.properties["delivery mode"] = 1 # non-persistent self.log("Publishing msg=%s, shard=%s, key=%s", message, shard, key) if shard is None: self.error( "Tried to send message to exchange=None. This would " "mess up the whole txamqp library state, therefore " "this message is ignored" ) return defer.succeed(None) d = self.channel.basic_publish(exchange=shard, content=content, routing_key=key, immediate=False) d.addCallback(defer.drop_param, self.channel.tx_commit) d.addCallback(defer.override_result, message) return d
def _call_async(self, method, args, kwargs): try: r = method(*args, **kwargs) except: self._defer_queue.put(defer.fail()) return if not isinstance(r, defer.Deferred): self._defer_queue.put(defer.succeed(r)) return self._defer_queue.put(r)
def cleanup(self): self.info('Shutting down.') d = defer.succeed(self) d.addCallback(webserver.Server.cleanup) d.addErrback(defer.inject_param, 1, error.handle_failure, self, "Failure while shutting down webserver.") d.addCallback(defer.drop_param, self.threadpool.stop) d.addErrback(defer.inject_param, 1, error.handle_failure, self, "Failure while stopping the threadpool.") return d
def stop(self): """ Shutdown the threads in the threadpool. """ self.joined = True while self.workers: self.q.put(WorkerStop) self.workers -= 1 for thread in self.threads: # gettattr is used, because thread might not have the attribute # defined yet, if he is just starting up job_id = getattr(thread, 'job_id', None) if job_id: self.cancel_job(job_id) if self.threads: d = self._notifier.wait('threads_joined') d.addCallback(defer.drop_param, self._cancel_delayed_calls) return d else: self._cancel_delayed_calls() return defer.succeed(None)
def async (): called = True return defer.succeed(None)
def initiate(self): return defer.succeed(None)
def cleanup(self): return defer.succeed(None)
def source_getitem(_value, context, **_params): value = context["model"].source[item] return defer.succeed(value)
def give_defer(result): if isinstance(result, Exception): return defer.fail(result) else: return defer.succeed(result)