def test_create_group(self): """ Test that we can create a group in Bobby """ response = mock.Mock() response.code = 201 self.treq.post.return_value = succeed(response) content = mock.Mock() self.treq.json_content.return_value = succeed(content) d = self.client.create_group('t1', 'g1') result = self.successResultOf(d) self.assertEqual(result, content) self.treq.post.assert_called_once_with( 'url/t1/groups', data=mock.ANY ) data = self.treq.post.mock_calls[0][2]['data'] self.assertEqual(json.loads(data), {"notificationPlan": "Damnit, Bobby", "notification": "Damnit, Bobby", "groupId": "g1"}) self.treq.json_content.assert_called_once_with(response)
def side_effect(*args, **kwargs): if 'getSlaveInfo' in args: return defer.fail(twisted_pb.NoSuchMethod()) if 'getCommands' in args: return defer.succeed({'x': 1, 'y': 2}) if 'getVersion' in args: return defer.succeed('TheVersion')
def content(self): def _add_content(data): self._content = data waiting_for_content = self._waiting_for_content self._waiting_for_content = [] for d in waiting_for_content: d.callback(self._content) return self._content if self._method == 'HEAD': return succeed('') if self._content is not None: return succeed(self._content) if self._content_d is not None: d = Deferred() d.addCallback(lambda _: self._content) self._waiting_for_content.append(d) return d d = Deferred() d.addCallback(_add_content) self._content_d = d self._response.deliverBody(_BodyCollector(d)) return d
def fetch_user((consumer_key, timestamp, nonce, signature)): if consumer_key == "root": return defer.succeed((self.root, timestamp, nonce, signature)) if consumer_key == "guest": return defer.succeed((self.guest, timestamp, nonce, signature)) def complete(results): if not len(results): return defer.fail(Exception("Unauthorized.")) return defer.succeed((results[0], timestamp, nonce, signature)) d = userRepository.where(login = consumer_key) d.addCallback(complete) return d
def side_effect(*args, **kwargs): if 'getSlaveInfo' in args: return defer.succeed({'info': 'test'}) if 'getCommands' in args: return defer.succeed({'x': 1, 'y': 2}) if 'getVersion' in args: return defer.succeed('TheVersion')
def test_activation(self): sched = self.makeScheduler(name='n', builderNames=['a']) sched.clock = task.Clock() sched.activate = mock.Mock(return_value=defer.succeed(None)) sched.deactivate = mock.Mock(return_value=defer.succeed(None)) # set the schedulerid, and claim the scheduler on another master self.setSchedulerToMaster(self.OTHER_MASTER_ID) sched.startService() sched.clock.advance(sched.POLL_INTERVAL_SEC / 2) sched.clock.advance(sched.POLL_INTERVAL_SEC / 5) sched.clock.advance(sched.POLL_INTERVAL_SEC / 5) self.assertFalse(sched.activate.called) self.assertFalse(sched.deactivate.called) self.assertFalse(sched.isActive()) self.assertEqual(sched.serviceid, sched.objectid) # objectid is attached by the test helper # clear that masterid self.setSchedulerToMaster(None) sched.clock.advance(sched.POLL_INTERVAL_SEC) self.assertTrue(sched.activate.called) self.assertFalse(sched.deactivate.called) self.assertTrue(sched.isActive()) # stop the service and see that deactivate is called yield sched.stopService() self.assertTrue(sched.activate.called) self.assertTrue(sched.deactivate.called) self.assertFalse(sched.isActive())
def getChange(self, changeid): try: row = self.changes[changeid] except KeyError: return defer.succeed(None) return defer.succeed(self._chdict(row))
def test_load_mails_adds_mails(self): # given mail_root = pkg_resources.resource_filename('test.unit.fixtures', 'mailset') firstMailDeferred = defer.succeed(None) secondMailDeferred = defer.succeed(None) self.mail_store.add_mail.side_effect = [firstMailDeferred, secondMailDeferred] self.mail_store.add_mailbox.return_value = defer.succeed(None) # when d = load_mails(self.args, [mail_root]) # then def assert_mails_added(_): self.assertTrue(self.mail_store.add_mail.called) self.mail_store.add_mail.assert_any_call('INBOX', self._mail_content(join(mail_root, 'new', 'mbox00000000'))) self.mail_store.add_mail.assert_any_call('INBOX', self._mail_content(join(mail_root, 'new', 'mbox00000001'))) # TODO Should we check for flags? def error_callack(err): print err self.assertTrue(False) d.addCallback(assert_mails_added) d.addErrback(error_callack) return d
def test_error_sending(self, logger): """ An error sending to one agent does not prevent others from being notified. """ control_amp_service = build_control_amp_service(self) self.patch(control_amp_service, 'logger', logger) connected_protocol = ControlAMP(control_amp_service) # Patching is bad. # https://clusterhq.atlassian.net/browse/FLOC-1603 connected_protocol.callRemote = lambda *args, **kwargs: succeed({}) error = ConnectionLost() disconnected_protocol = ControlAMP(control_amp_service) results = [succeed({}), fail(error)] # Patching is bad. # https://clusterhq.atlassian.net/browse/FLOC-1603 disconnected_protocol.callRemote = ( lambda *args, **kwargs: results.pop(0)) control_amp_service.connected(disconnected_protocol) control_amp_service.connected(connected_protocol) control_amp_service.node_changed(NodeState(hostname=u"1.2.3.4")) actions = LoggedAction.ofType(logger.messages, LOG_SEND_TO_AGENT) self.assertEqual( [action.end_message["exception"] for action in actions if not action.succeeded], [u"twisted.internet.error.ConnectionLost"])
def append(self, text): if self.partialLine: if len(self.partialLine) > self.MAX_LINELENGTH: if not self.warned: # Unfortunately we cannot give more hint as per which log that is log.warn("Splitting long line: {line_start} {length} (not warning anymore for this log)", line_start=self.partialLine[:30], length=len(self.partialLine)) self.warned = True # switch the variables, and return previous _partialLine_, # split every MAX_LINELENGTH plus a trailing \n self.partialLine, text = text, self.partialLine ret = [] while len(text) > self.MAX_LINELENGTH: ret.append(text[:self.MAX_LINELENGTH]) text = text[self.MAX_LINELENGTH:] ret.append(text) return self.callback("\n".join(ret) + "\n") text = self.partialLine + text self.partialLine = None text = self.newline_re.sub('\n', text) if text: if text[-1] != '\n': i = text.rfind('\n') if i >= 0: i = i + 1 text, self.partialLine = text[:i], text[i:] else: self.partialLine = text return defer.succeed(None) return self.callback(text) return defer.succeed(None)
def after_reply(reply, proto, fetched=0): documents = reply.documents docs_count = len(documents) if limit > 0: docs_count = min(docs_count, limit - fetched) fetched += docs_count options = bson.codec_options.CodecOptions(document_class=as_class) out = [document.decode(codec_options=options) for document in documents[:docs_count]] if reply.cursor_id: if limit == 0: to_fetch = 0 # no limit elif limit < 0: # We won't actually get here because MongoDB won't # create cursor when limit < 0 to_fetch = None else: to_fetch = limit - fetched if to_fetch <= 0: to_fetch = None # close cursor if to_fetch is None: proto.send_KILL_CURSORS(KillCursors(cursors=[reply.cursor_id])) return out, defer.succeed(([], None)) next_reply = proto.send_GETMORE( Getmore(collection=str(self), cursor_id=reply.cursor_id, n_to_return=to_fetch) ) next_reply.addCallback(after_reply, proto, fetched) return out, next_reply return out, defer.succeed(([], None))
def _test_nested_change(case, outer_factory, inner_factory): """ Assert that ``IChangeState`` providers wrapped inside ``inner_factory`` wrapped inside ``outer_factory`` are run with the same deployer argument as is passed to ``run_state_change``. :param TestCase case: A running test. :param outer_factory: Either ``sequentially`` or ``in_parallel`` to construct the top-level change to pass to ``run_state_change``. :param inner_factory: Either ``sequentially`` or ``in_parallel`` to construct a change to include the top-level change passed to ``run_state_change``. :raise: A test failure if the inner change is not run with the same deployer as is passed to ``run_state_change``. """ inner_action = ControllableAction(result=succeed(None)) subchanges = [ ControllableAction(result=succeed(None)), inner_factory(changes=[inner_action]), ControllableAction(result=succeed(None)) ] change = outer_factory(changes=subchanges) run_state_change(change, DEPLOYER) case.assertEqual( (True, DEPLOYER), (inner_action.called, inner_action.deployer) )
def _initEvent(self): if not self._client.started: return succeed(None) # If it already exists, don't re-create calendar = self._calendarsOfType(caldavxml.calendar, "VEVENT")[0] if calendar.events: events = [event for event in calendar.events.values() if event.url.endswith("event_to_update.ics")] if events: return succeed(None) # Copy the template event and fill in some of its fields # to make a new event to create on the calendar. vcalendar = self._eventTemplate.duplicate() vevent = vcalendar.mainComponent() uid = str(uuid4()) dtstart = self._eventStartDistribution.sample() dtend = dtstart + Duration(seconds=self._eventDurationDistribution.sample()) vevent.replaceProperty(Property("CREATED", DateTime.getNowUTC())) vevent.replaceProperty(Property("DTSTAMP", DateTime.getNowUTC())) vevent.replaceProperty(Property("DTSTART", dtstart)) vevent.replaceProperty(Property("DTEND", dtend)) vevent.replaceProperty(Property("UID", uid)) rrule = self._recurrenceDistribution.sample() if rrule is not None: vevent.addProperty(Property(None, None, None, pycalendar=rrule)) href = '%s%s' % (calendar.url, "event_to_update.ics") d = self._client.addEvent(href, vcalendar) return self._newOperation("create", d)
def on_qq_chang_status(self, message): ''' 到这里登陆过程的自动处理完成,开始循环发送活动包,表示自己在线。 ''' self.qq.log.info("您当前的状态为:在线") #开始每隔1分钟发送一次在线包 defer.succeed(self.alive())
def test_bookmarklet(self): """ Does api/bookmarklet fetch, save, and return a response for the recipe? """ fromTest = fromdir(__file__) loc = fromTest('recipe_page_source.html') pageSource = open(loc).read() pGet = patch.object(treq, 'get', return_value=defer.succeed(None), autospec=True) pTreqContent = patch.object(treq, 'content', return_value=defer.succeed(pageSource), autospec=True) with pGet, pTreqContent: # normal bookmarketing u = self._users()[0] req = self.requestJSON([], session_user=u) req.args['uri'] = ['http://www.foodandwine.com/recipes/poutine-style-twice-baked-potatoes'] ret = yield self.handler('bookmarklet', req) self.assertEqual(len(recipe.Recipe.objects()), 1) expectedResults = '{"status": "ok", "recipes": [{"name": "Delicious Meatless Meatballs", "urlKey": "weirdo-gmail-com-delicious-meatless-meatballs-"}], "message": ""}' assert ret == expectedResults # # not signed in to noms; bookmarketing should not be allowed req = self.requestJSON([]) req.args['uri'] = ['http://www.foodandwine.com/recipes/poutine-style-twice-baked-potatoes'] ret = yield self.handler('bookmarklet', req) expectedResults = '{"status": "error", "recipes": [], "message": "User was not logged in."}' assert ret == expectedResults
def start(self): """ Start TLS negotiation. This checks if the receiving entity requires TLS, the SSL library is available and uses the C{required} and C{wanted} instance variables to determine what to do in the various different cases. For example, if the SSL library is not available, and wanted and required by the user, it raises an exception. However if it is not required by both parties, initialization silently succeeds, moving on to the next step. """ if self.wanted: if ssl is None: if self.required: return defer.fail(TLSNotSupported()) else: return defer.succeed(None) else: pass elif self.xmlstream.features[self.feature].required: return defer.fail(TLSRequired()) else: return defer.succeed(None) self._deferred = defer.Deferred() self.xmlstream.addOnetimeObserver("/proceed", self.onProceed) self.xmlstream.addOnetimeObserver("/failure", self.onFailure) self.xmlstream.send(domish.Element((NS_XMPP_TLS, "starttls"))) return self._deferred
def on_qq_login(self,message): ''' 这个也是登陆过程中自动处理的一部分,当登陆成功后,self.qq.login这个属性为1,否则则没有登陆成功。 调用了lib后可以通过这个属性来判断是否登陆成功。 ''' if message.body.fields['status'][0]==1: #self.transport.connect(util.ip2string(message.body.fields['ip']),8000) self.qq.server=(util.ip2string(message.body.fields['ip']),8000) defer.succeed(self.pre_login()) else: if message.body.fields['status'][0]==5: print message.body.fields['data'][0] elif message.body.fields['status'][0]==6: self.printl('您的号码[' + str(self.qq.id) + ']可能存在异常情况,已受到限制登录保护,需激活后才能正常登录。\ 激活地址是:\ 电信或网通用户 :im.qq.com/jh或activate.qq.com\ 教育网用户: activateedu.qq.com') else: self.printl('登陆成功') self.qq.login = 1 self.qq.session=message.body.fields['session'] message = qqmsg.outqqMessage(self.qq) message.setMsgName('qq_chang_status') message.body.setField('online',basic.QQ_status['online']) message.body.setField('video',basic.QQ_video) self.sendDataToQueue(message)
def actionAllowed(self, action, request, *args): """Is this ACTION allowed, given this http REQUEST?""" if action not in self.knownActions: raise KeyError("unknown action") cfg = self.config.get(action, False) if cfg: if cfg == 'auth' or callable(cfg): if not self.auth: return defer.succeed(False) def check_authenticate(res): if callable(cfg) and not cfg(self.getUsername(request), *args): return False return True # retain old behaviour, if people have scripts # without cookie support passwd = self.getPassword(request) if self.authenticated(request): return defer.succeed(check_authenticate(None)) elif passwd != "<no-password>": def check_login(cookie): ret = False if type(cookie) is str: ret = check_authenticate(None) self.sessions.remove(cookie) return ret d = self.login(request) d.addBoth(check_login) return d else: return defer.succeed(False) return defer.succeed(cfg)
def test_single_service_up_txn_not_sent(self): # Test: The AS is up and the txn is not sent. A Recoverer is made and # started. service = Mock() events = [Mock(), Mock()] txn_id = "foobar" txn = Mock(id=txn_id, service=service, events=events) # mock methods self.store.get_appservice_state = Mock( return_value=defer.succeed(ApplicationServiceState.UP) ) self.store.set_appservice_state = Mock(return_value=defer.succeed(True)) txn.send = Mock(return_value=defer.succeed(False)) # fails to send self.store.create_appservice_txn = Mock( return_value=defer.succeed(txn) ) # actual call self.txnctrl.send(service, events) self.store.create_appservice_txn.assert_called_once_with( service=service, events=events ) self.assertEquals(1, self.recoverer_fn.call_count) # recoverer made self.assertEquals(1, self.recoverer.recover.call_count) # and invoked self.assertEquals(1, len(self.txnctrl.recoverers)) # and stored self.assertEquals(0, txn.complete.call_count) # txn not completed self.store.set_appservice_state.assert_called_once_with( service, ApplicationServiceState.DOWN # service marked as down )
def verifyHostKey(self, pubKey, fingerprint): goodKey = self.isInKnownHosts(options['host'], pubKey) if goodKey == 1: # good key return defer.succeed(1) elif goodKey == 2: # AAHHHHH changed return defer.fail(ConchError('changed host key')) else: oldout, oldin = sys.stdout, sys.stdin sys.stdin = sys.stdout = open('/dev/tty','r+') if options['host'] == self.transport.getPeer()[1]: host = options['host'] khHost = options['host'] else: host = '%s (%s)' % (options['host'], self.transport.getPeer()[1]) khHost = '%s,%s' % (options['host'], self.transport.getPeer()[1]) keyType = common.getNS(pubKey)[0] print """The authenticity of host '%s' can't be extablished. %s key fingerprint is %s.""" % (host, {'ssh-dss':'DSA', 'ssh-rsa':'RSA'}[keyType], fingerprint) ans = raw_input('Are you sure you want to continue connecting (yes/no)? ') while ans.lower() not in ('yes', 'no'): ans = raw_input("Please type 'yes' or 'no': ") sys.stdout,sys.stdin=oldout,oldin if ans == 'no': print 'Host key verification failed.' return defer.fail(ConchError('bad host key')) print "Warning: Permanently added '%s' (%s) to the list of known hosts." % (khHost, {'ssh-dss':'DSA', 'ssh-rsa':'RSA'}[keyType]) known_hosts = open(os.path.expanduser('~/.ssh/known_hosts'), 'a') encodedKey = base64.encodestring(pubKey).replace('\n', '') known_hosts.write('\n%s %s %s' % (khHost, keyType, encodedKey)) known_hosts.close() return defer.succeed(1)
def test_blocking_mau(self): self.hs.config.limit_usage_by_mau = False self.hs.config.max_mau_value = 50 lots_of_users = 100 small_number_of_users = 1 # Ensure no error thrown yield self.auth.check_auth_blocking() self.hs.config.limit_usage_by_mau = True self.store.get_monthly_active_count = Mock( return_value=defer.succeed(lots_of_users) ) with self.assertRaises(ResourceLimitError) as e: yield self.auth.check_auth_blocking() self.assertEquals(e.exception.admin_contact, self.hs.config.admin_contact) self.assertEquals(e.exception.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) self.assertEquals(e.exception.code, 403) # Ensure does not throw an error self.store.get_monthly_active_count = Mock( return_value=defer.succeed(small_number_of_users) ) yield self.auth.check_auth_blocking()
def _try_to_connect(reactor, port, stdout, txtorcon): tried.append( (reactor, port, stdout, txtorcon) ) if not reachable: return defer.succeed(None) if port == expected_port: # second one on the list return defer.succeed(tor_state) return defer.succeed(None)
def getMapper(): # We prefer UPnP when available, as it's more robust global _installedShutdownHook if not _installedShutdownHook: from twisted.internet import reactor t = reactor.addSystemEventTrigger('after', 'shutdown', clearCache) _installedShutdownHook = True try: from __main__ import app except: app = None natPref = 'both' if app is not None: print "app is", app natPref = app.getPref('nat') log.msg('NAT preference says to use %s'%(natPref)) if _forcedMapper is not None: return defer.succeed(_forcedMapper) from xshtoom.stun import getSTUN if natPref == 'stun': ud = getSTUN() d = defer.DeferredList([defer.succeed(None), ud]) else: nm = NullMapper() d = defer.DeferredList([defer.succeed(None), defer.succeed(None)]) d.addCallback(cb_getMapper).addErrback(log.err) return d
def test_convergence_sent_state_fail_resends(self): """ If sending state to the control node fails the next iteration will send state even if the state hasn't changed. """ local_state = NodeState(hostname=u'192.0.2.123') configuration = Deployment(nodes=[to_node(local_state)]) state = DeploymentState(nodes=[local_state]) deployer = ControllableDeployer( local_state.hostname, [succeed(local_state), succeed(local_state.copy())], [no_action(), no_action()]) client = self.make_amp_client( [local_state, local_state.copy()], succeed=False ) reactor = Clock() loop = build_convergence_loop_fsm(reactor, deployer) loop.receive(_ClientStatusUpdate( client=client, configuration=configuration, state=state)) reactor.advance(1.0) # Calculating actions happened, result was run... and then we did # whole thing again: self.assertTupleEqual( (deployer.calculate_inputs, client.calls), ( # Check that the loop has run twice [(local_state, configuration, state), (local_state, configuration, state)], # And that state was re-sent even though it remained unchanged [(NodeStateCommand, dict(state_changes=(local_state,))), (NodeStateCommand, dict(state_changes=(local_state,)))], ) )
def test_convergence_done_delays_new_iteration(self, logger): """ An FSM completing the changes from one convergence iteration doesn't instantly start another iteration. """ self.local_state = local_state = NodeState(hostname=u'192.0.2.123') self.configuration = configuration = Deployment() self.cluster_state = received_state = DeploymentState(nodes=[]) self.action = action = ControllableAction(result=succeed(None)) deployer = ControllableDeployer( local_state.hostname, [succeed(local_state)], [action] ) client = self.make_amp_client([local_state]) reactor = Clock() loop = build_convergence_loop_fsm(reactor, deployer) self.patch(loop, "logger", logger) loop.receive(_ClientStatusUpdate( client=client, configuration=configuration, state=received_state)) expected_cluster_state = DeploymentState( nodes=[local_state]) # Calculating actions happened and the result was run. self.assertTupleEqual( (deployer.calculate_inputs, client.calls), ([(local_state, configuration, expected_cluster_state)], [(NodeStateCommand, dict(state_changes=(local_state,)))]) )
def test_parse_relative_path(self): # this makes sure we convert a relative path to absolute # hiddenServiceDir args. see Issue #77 # make sure we have a valid thing from get_global_tor without # actually launching tor config = TorConfig() config.post_bootstrap = defer.succeed(config) from txtorcon import torconfig torconfig._global_tor_config = None get_global_tor( self.reactor, _tor_launcher=lambda react, config, prog: defer.succeed(config) ) orig = os.path.realpath('.') try: with util.TempDir() as t: t = str(t) os.chdir(t) os.mkdir(os.path.join(t, 'foo')) hsdir = os.path.join(t, 'foo', 'blam') os.mkdir(hsdir) ep = serverFromString( self.reactor, 'onion:88:localPort=1234:hiddenServiceDir=foo/blam' ) self.assertEqual( os.path.realpath(hsdir), ep.hidden_service_dir ) finally: os.chdir(orig)
def test_convergence_done_unchanged_notify(self): """ An FSM doing convergence that discovers state unchanged from the last state acknowledged by the control service does not re-send that state. """ local_state = NodeState(hostname=u'192.0.2.123') configuration = Deployment(nodes=[to_node(local_state)]) state = DeploymentState(nodes=[local_state]) deployer = ControllableDeployer( local_state.hostname, [succeed(local_state), succeed(local_state.copy())], [no_action(), no_action()] ) client = self.make_amp_client([local_state]) reactor = Clock() loop = build_convergence_loop_fsm(reactor, deployer) loop.receive(_ClientStatusUpdate( client=client, configuration=configuration, state=state)) reactor.advance(1.0) # Calculating actions happened, result was run... and then we did # whole thing again: self.assertEqual( (deployer.calculate_inputs, client.calls), ( # Check that the loop has run twice [(local_state, configuration, state), (local_state, configuration, state)], # But that state was only sent once. [(NodeStateCommand, dict(state_changes=(local_state,)))], ) )
def resolve(self, guid): """ Given a guid return a `Node` object containing its ip and port or none if it's not found. Args: guid: the 20 raw bytes representing the guid. """ node_to_find = Node(guid) def check_for_node(nodes): for node in nodes: if node.id == node_to_find.id: return node return None index = self.protocol.router.getBucketFor(node_to_find) nodes = self.protocol.router.buckets[index].getNodes() for node in nodes: if node.id == node_to_find.id: return defer.succeed(node) nearest = self.protocol.router.findNeighbors(node_to_find) if len(nearest) == 0: self.log.warning("there are no known neighbors to find node %s" % node_to_find.id.encode("hex")) return defer.succeed(None) spider = NodeSpiderCrawl(self.protocol, node_to_find, nearest, self.ksize, self.alpha) return spider.find().addCallback(check_for_node)
def test_parse_user_path(self): # this makes sure we expand users and symlinks in # hiddenServiceDir args. see Issue #77 # make sure we have a valid thing from get_global_tor without # actually launching tor config = TorConfig() config.post_bootstrap = defer.succeed(config) from txtorcon import torconfig torconfig._global_tor_config = None get_global_tor( self.reactor, _tor_launcher=lambda react, config, prog: defer.succeed(config) ) ep = serverFromString( self.reactor, 'onion:88:localPort=1234:hiddenServiceDir=~/blam/blarg' ) # would be nice to have a fixed path here, but then would have # to run as a known user :/ # maybe using the docker stuff to run integration tests better here? self.assertEqual( os.path.expanduser('~/blam/blarg'), ep.hidden_service_dir )
def cb(res): stdout, stderr, exit = res if errortoo: return defer.succeed(stdout + stderr) if stderr: return defer.fail(IOError("got stderr: %r" % (stderr,))) return defer.succeed(stdout)
def stop(self): self.debug('stop()') if self.comp: return self.comp.stop() return defer.succeed(None)
def test_serializeDeferred(self): """ Test that a deferred is substituted with the current value in the callback chain when flattened. """ self.assertFlattensImmediately(succeed("two"), b"two")
def verifyHostKey(self, hostKey, fingerprint): 'Assume all host keys are valid even if they changed' return defer.succeed(True)
def reconfigService(self): self.reconfig_count += 1 self.configured = True return defer.succeed(None)
def fromChdict(master, chdict): if chdict['author'] != 'this is a test': raise AssertionError("did not get expected chdict") return defer.succeed(self.fake_Change)
def run_flow(self, duration, tasks=None, start_d=None, start_flow=True, stop_flow=True): self.debug('run_flow: tasks: %r' % (tasks, )) flow_d = start_d if tasks is None: tasks = [] if flow_d is None: if start_flow: flow_d = self.start_flow() else: flow_d = defer.succeed(True) flow_started_finished = [False, False] guard_d = None timeout_d = defer.Deferred() stop_d = defer.Deferred() stop_timeout_d = defer.Deferred() chained_d = None callids = [None, None, None] # callLater ids: stop_d, # timeout_d, fire_chained if tasks: # if have tasks, run simultaneously with the main timer deferred chained_d = defer.DeferredList([stop_d] + tasks, fireOnOneErrback=1, consumeErrors=1) def chained_failed(failure): self.info('chained_failed: %r' % (failure, )) failure.trap(defer.FirstError) return failure.value.subFailure chained_d.addErrback(chained_failed) else: # otherwise, just idle... chained_d = stop_d def start_complete(result): self.debug('start_complete: %r' % (result, )) flow_started_finished[0] = True callids[0] = reactor.callLater(duration, stop_d.callback, None) if tasks: def _fire_chained(): callids[2] = None for t in tasks: try: t.callback(result) except defer.AlreadyCalledError: pass callids[2] = reactor.callLater(0, _fire_chained) return chained_d def flow_complete(result): self.debug('flow_complete: %r' % (result, )) flow_started_finished[1] = True return result def flow_timed_out(): self.debug('flow_timed_out!') if not flow_started_finished[0]: timeout_d.errback(StartTimeout('flow start timed out')) elif not flow_started_finished[1]: timeout_d.errback(FlowTimeout('flow run timed out')) else: stop_timeout_d.errback(StopTimeout('flow stop timed out')) def clean_calls(result): self.debug('clean_calls: %r' % (result, )) for i, cid in enumerate(callids): if cid is not None: if cid.active(): cid.cancel() callids[i] = None return result flow_d.addCallback(start_complete) flow_d.addCallback(flow_complete) guard_d = defer.DeferredList([flow_d, timeout_d], consumeErrors=1, fireOnOneErrback=1, fireOnOneCallback=1) def guard_failed(failure): self.info('guard_failed: %r' % (failure, )) failure.trap(defer.FirstError) return failure.value.subFailure if stop_flow: def _force_stop_flow(result): self.debug('_force_stop_flow: %r' % (result, )) d = defer.DeferredList([self.stop_flow(), stop_timeout_d], fireOnOneErrback=1, fireOnOneCallback=1, consumeErrors=1) def _return_orig_result(stop_result): if isinstance(result, failure.Failure): # always return the run's failure first # what do I return if both the run and stop failed? self.debug('_return_orig[R]: %r' % (result, )) return result elif isinstance(stop_result, failure.Failure): # return failure from stop self.debug('_return_orig[S]: %r' % (stop_result, )) return stop_result return result def force_stop_failed(failure): self.info('force_stop_failed: %r' % (failure, )) failure.trap(defer.FirstError) return failure.value.subFailure d.addCallbacks(lambda r: r[0], force_stop_failed) d.addBoth(_return_orig_result) return d guard_d.addBoth(_force_stop_flow) guard_d.addErrback(guard_failed) guard_d.addBoth(clean_calls) callids[1] = reactor.callLater(self.guard_timeout, flow_timed_out) return guard_d
def add_delay(value): self.debug('** 3: add_delay: %r, %r' % (delay, value)) if delay: return delayed_d(delay, value) return defer.succeed(value)
def startStep1(*args, **kw): # Now interrupt the build b.stopBuild("stop it") return defer.succeed(SUCCESS)
def getBuilderId(self): return defer.succeed(self.builderid)
def startStep2(*args, **kw): step2Started[0] = True return defer.succeed(SUCCESS)
def prepare(self, reactor, clock, hs): mock_notifier = hs.get_notifier() self.on_new_event = mock_notifier.on_new_event self.handler = hs.get_typing_handler() self.event_source = hs.get_event_sources().sources["typing"] self.datastore = hs.get_datastore() retry_timings_res = { "destination": "", "retry_last_ts": 0, "retry_interval": 0, "failure_ts": None, } self.datastore.get_destination_retry_timings = Mock( return_value=defer.succeed(retry_timings_res)) self.datastore.get_device_updates_by_remote = Mock( return_value=make_awaitable((0, []))) self.datastore.get_destination_last_successful_stream_ordering = Mock( return_value=make_awaitable(None)) def get_received_txn_response(*args): return defer.succeed(None) self.datastore.get_received_txn_response = get_received_txn_response self.room_members = [] async def check_user_in_room(room_id, user_id): if user_id not in [u.to_string() for u in self.room_members]: raise AuthError(401, "User is not in the room") return None hs.get_auth().check_user_in_room = check_user_in_room def get_joined_hosts_for_room(room_id): return {member.domain for member in self.room_members} self.datastore.get_joined_hosts_for_room = get_joined_hosts_for_room async def get_users_in_room(room_id): return {str(u) for u in self.room_members} self.datastore.get_users_in_room = get_users_in_room self.datastore.get_user_directory_stream_pos = Mock(side_effect=( # we deliberately return a non-None stream pos to avoid doing an initial_spam lambda: make_awaitable(1))) self.datastore.get_current_state_deltas = Mock(return_value=(0, None)) self.datastore.get_to_device_stream_token = lambda: 0 self.datastore.get_new_device_msgs_for_remote = ( lambda *args, **kargs: make_awaitable(([], 0))) self.datastore.delete_device_msgs_for_remote = ( lambda *args, **kargs: make_awaitable(None)) self.datastore.set_received_txn_response = ( lambda *args, **kwargs: make_awaitable(None))
def getBuilderIdForName(self, name): return defer.succeed(self._builders.get(name, None) or self.builderid)
def getThing(self, key): if self.invocations is None: self.invocations = [] self.invocations.append(key) return defer.succeed(key * 2)
def register(self, worker): # TODO: doc that reg.update must be called, too workerName = worker.workername reg = WorkerRegistration(self.master, worker) self.registrations[workerName] = reg return defer.succeed(reg)
def waitUntilFinished(self): return defer.succeed(self)
def get_received_txn_response(*args): return defer.succeed(None)
def registerInDB(self): if self.dbpool: return self.runInteractionWithRetry(self.doRegisterInDB) else: return defer.succeed(0)
def api_data(self, request, name): if name not in self.pulls: return name + ' not exists' request.setHeader('content-type', 'application/json') return defer.succeed(self.pulls[name].getJson())
def queue(self, *args, **kw): return defer.succeed(str(uuid4()))
def _insert_client_ip(*args, **kwargs): return defer.succeed(None)
def eomReceived(self): print("New message received.") self.lines.append('') # Add a trailing newline. messageData = '\n'.join(self.lines) self.mailbox.add(messageData) return defer.succeed(None)
def waitForResult(self, *args, **kw): return defer.succeed(None)
def get_pagination_rows(self, user, pagination_config, key): return defer.succeed(([], pagination_config.from_key))
def clusterQueues(self): return defer.succeed({self.plug.queue_name: [{'uuid': str(uuid4())}]})
def user_rooms_intersect(user_list): room_member_ids = map(lambda u: u.to_string(), self.room_members) shared = all(map(lambda i: i in room_member_ids, user_list)) return defer.succeed(shared)
def get_presence_list(*a, **kw): return defer.succeed([])
def get_profile_displayname(user_id): return defer.succeed("Frank")
def get_current_key(self, direction='f'): return defer.succeed(0)
def setUp(self): self.mock_resource = MockHttpResource(prefix=PATH_PREFIX) # HIDEOUS HACKERY # TODO(paul): This should be injected in via the HomeServer DI system from synapse.streams.events import (PresenceEventSource, EventSources) old_SOURCE_TYPES = EventSources.SOURCE_TYPES def tearDown(): EventSources.SOURCE_TYPES = old_SOURCE_TYPES self.tearDown = tearDown EventSources.SOURCE_TYPES = { k: NullSource for k in old_SOURCE_TYPES.keys() } EventSources.SOURCE_TYPES["presence"] = PresenceEventSource hs = yield setup_test_homeserver( http_client=None, resource_for_client=self.mock_resource, resource_for_federation=self.mock_resource, datastore=Mock(spec=[ "set_presence_state", "get_presence_list", "get_rooms_for_user", ]), clock=Mock(spec=[ "call_later", "cancel_call_later", "time_msec", "looping_call", ]), ) hs.get_clock().time_msec.return_value = 1000000 def _get_user_by_req(req=None, allow_guest=False): return (UserID.from_string(myid), "", False) hs.get_v1auth().get_user_by_req = _get_user_by_req presence.register_servlets(hs, self.mock_resource) events.register_servlets(hs, self.mock_resource) hs.handlers.room_member_handler = Mock(spec=[]) self.room_members = [] def get_rooms_for_user(user): if user in self.room_members: return ["a-room"] else: return [] hs.handlers.room_member_handler.get_joined_rooms_for_user = get_rooms_for_user hs.handlers.room_member_handler.get_room_members = ( lambda r: self.room_members if r == "a-room" else []) hs.handlers.room_member_handler._filter_events_for_client = ( lambda user_id, events, **kwargs: events) self.mock_datastore = hs.get_datastore() self.mock_datastore.get_app_service_by_token = Mock(return_value=None) self.mock_datastore.get_app_service_by_user_id = Mock( return_value=defer.succeed(None)) self.mock_datastore.get_rooms_for_user = (lambda u: [ namedtuple("Room", "room_id")(r) for r in get_rooms_for_user(UserID.from_string(u)) ]) def get_profile_displayname(user_id): return defer.succeed("Frank") self.mock_datastore.get_profile_displayname = get_profile_displayname def get_profile_avatar_url(user_id): return defer.succeed(None) self.mock_datastore.get_profile_avatar_url = get_profile_avatar_url def user_rooms_intersect(user_list): room_member_ids = map(lambda u: u.to_string(), self.room_members) shared = all(map(lambda i: i in room_member_ids, user_list)) return defer.succeed(shared) self.mock_datastore.user_rooms_intersect = user_rooms_intersect def get_joined_hosts_for_room(room_id): return [] self.mock_datastore.get_joined_hosts_for_room = get_joined_hosts_for_room self.presence = hs.get_handlers().presence_handler self.u_apple = UserID.from_string("@apple:test") self.u_banana = UserID.from_string("@banana:test")
def get_profile_avatar_url(user_id): return defer.succeed(None)