VALID_HASH2 = security.hash_password(VALID_PASSWORD2, VALID_SALT2) INVALID_PASSWORD = u'antani' FIXTURES_PATH = os.path.join(TEST_DIR, 'fixtures') with open(os.path.join(TEST_DIR, 'keys/valid_pgp_key1.txt')) as pgp_file: VALID_PGP_KEY1 = unicode(pgp_file.read()) with open(os.path.join(TEST_DIR, 'keys/valid_pgp_key2.txt')) as pgp_file: VALID_PGP_KEY2 = unicode(pgp_file.read()) with open(os.path.join(TEST_DIR, 'keys/expired_pgp_key.txt')) as pgp_file: EXPIRED_PGP_KEY = unicode(pgp_file.read()) transact.tp = FakeThreadPool() authentication.reactor_override = task.Clock() event.reactor_override = task.Clock() token.reactor_override = task.Clock() mailflush_sched.reactor_override = task.Clock() class UTlog: @staticmethod def err(stuff): pass @staticmethod def debug(stuff): pass
def __init__(self): Clock.__init__(self) self.clock = task.Clock()
def test_addChange(self): clock = task.Clock() clock.advance(SOMETIME) d = self.db.changes.addChange( author=u'dustin', files=[u'master/LICENSING.txt', u'slave/LICENSING.txt'], comments=u'fix spelling', revision=u'2d6caa52', when_timestamp=epoch2datetime(266738400), branch=u'master', category=None, revlink=None, properties={u'platform': (u'linux', 'Change')}, repository=u'', codebase=u'cb', project=u'', _reactor=clock) # check all of the columns of the four relevant tables def check_change(changeid): def thd(conn): self.assertEqual(changeid, 1) r = conn.execute(self.db.model.changes.select()) r = r.fetchall() self.assertEqual(len(r), 1) self.assertEqual(r[0].changeid, changeid) self.assertEqual(r[0].author, 'dustin') self.assertEqual(r[0].comments, 'fix spelling') self.assertEqual(r[0].branch, 'master') self.assertEqual(r[0].revision, '2d6caa52') self.assertEqual(r[0].when_timestamp, 266738400) self.assertEqual(r[0].category, None) self.assertEqual(r[0].repository, '') self.assertEqual(r[0].codebase, u'cb') self.assertEqual(r[0].project, '') self.assertEqual(r[0].sourcestampid, 1) return self.db.pool.do(thd) d.addCallback(check_change) def check_change_files(_): def thd(conn): query = self.db.model.change_files.select() query.where(self.db.model.change_files.c.changeid == 1) query.order_by(self.db.model.change_files.c.filename) r = conn.execute(query) r = r.fetchall() self.assertEqual(len(r), 2) self.assertEqual(r[0].filename, 'master/LICENSING.txt') self.assertEqual(r[1].filename, 'slave/LICENSING.txt') return self.db.pool.do(thd) d.addCallback(check_change_files) def check_change_properties(_): def thd(conn): query = self.db.model.change_properties.select() query.where(self.db.model.change_properties.c.changeid == 1) query.order_by(self.db.model.change_properties.c.property_name) r = conn.execute(query) r = r.fetchall() self.assertEqual(len(r), 1) self.assertEqual(r[0].property_name, 'platform') self.assertEqual(r[0].property_value, '["linux", "Change"]') return self.db.pool.do(thd) d.addCallback(check_change_properties) def check_change_users(_): def thd(conn): query = self.db.model.change_users.select() r = conn.execute(query) r = r.fetchall() self.assertEqual(len(r), 0) return self.db.pool.do(thd) d.addCallback(check_change_users) def check_change_sourcestamps(_): def thd(conn): query = self.db.model.sourcestamps.select() r = conn.execute(query) self.assertEqual([dict(row) for row in r.fetchall()], [{ 'branch': u'master', 'codebase': u'cb', 'id': 1, 'patchid': None, 'project': u'', 'repository': u'', 'revision': u'2d6caa52', 'created_at': SOMETIME, 'ss_hash': 'b777dbd10d1d4c76651335f6a78e278e88b010d6', }]) return self.db.pool.do(thd) d.addCallback(check_change_sourcestamps) return d
def setupStep(self, step, worker_version=None, worker_env=None, buildFiles=None, wantDefaultWorkdir=True, wantData=True, wantDb=False, wantMq=False): """ Set up C{step} for testing. This begins by using C{step} as a factory to create a I{new} step instance, thereby testing that the factory arguments are handled correctly. It then creates a comfortable environment for the worker to run in, replete with a fake build and a fake worker. As a convenience, it can set the step's workdir with C{'wkdir'}. @param worker_version: worker version to present, as a dictionary mapping command name to version. A command name of '*' will apply for all commands. @param worker_env: environment from the worker at worker startup @param wantData(bool): Set to True to add data API connector to master. Default value: True. @param wantDb(bool): Set to True to add database connector to master. Default value: False. @param wantMq(bool): Set to True to add mq connector to master. Default value: False. """ if worker_version is None: worker_version = {'*': '99.99'} if worker_env is None: worker_env = dict() if buildFiles is None: buildFiles = list() factory = interfaces.IBuildStepFactory(step) step = self.step = factory.buildStep() self.master = fakemaster.make_master(wantData=wantData, wantDb=wantDb, wantMq=wantMq, testcase=self) # mock out the reactor for updateSummary's debouncing self.debounceClock = task.Clock() self.master.reactor = self.debounceClock # set defaults if wantDefaultWorkdir: step.workdir = step._workdir or 'wkdir' # step.build b = self.build = fakebuild.FakeBuild(master=self.master) b.allFiles = lambda: buildFiles b.master = self.master def getWorkerVersion(cmd, oldversion): if cmd in worker_version: return worker_version[cmd] if '*' in worker_version: return worker_version['*'] return oldversion b.getWorkerCommandVersion = getWorkerVersion b.workerEnvironment = worker_env.copy() step.setBuild(b) # watch for properties being set self.properties = interfaces.IProperties(b) # step.progress step.progress = mock.Mock(name="progress") # step.worker self.worker = step.worker = worker.FakeWorker(self.master) self.worker.attached(None) # step overrides def addLog(name, type='s', logEncoding=None): _log = logfile.FakeLogFile(name, step) self.step.logs[name] = _log return defer.succeed(_log) step.addLog = addLog step.addLog_newStyle = addLog def addHTMLLog(name, html): _log = logfile.FakeLogFile(name, step) html = bytes2NativeString(html) _log.addStdout(html) return defer.succeed(None) step.addHTMLLog = addHTMLLog def addCompleteLog(name, text): _log = logfile.FakeLogFile(name, step) self.step.logs[name] = _log _log.addStdout(text) return defer.succeed(None) step.addCompleteLog = addCompleteLog step.logobservers = self.logobservers = {} def addLogObserver(logname, observer): self.logobservers.setdefault(logname, []).append(observer) observer.step = step step.addLogObserver = addLogObserver # add any observers defined in the constructor, before this # monkey-patch for n, o in step._pendingLogObservers: addLogObserver(n, o) # expectations self.exp_result = None self.exp_state_string = None self.exp_properties = {} self.exp_missing_properties = [] self.exp_logfiles = {} self.exp_hidden = False self.exp_exception = None # check that the step's name is not None self.assertNotEqual(step.name, None) return step
def test_addChange_when_timestamp_None(self): clock = task.Clock() clock.advance(1239898353) d = self.db.changes.addChange(author=u'dustin', files=[], comments=u'fix spelling', is_dir=0, revision=u'2d6caa52', when_timestamp=None, branch=u'master', category=None, revlink=None, properties={}, repository=u'', codebase=u'', project=u'', _reactor=clock) # check all of the columns of the four relevant tables def check_change(changeid): def thd(conn): r = conn.execute(self.db.model.changes.select()) r = r.fetchall() self.assertEqual(len(r), 1) self.assertEqual(r[0].changeid, changeid) self.assertEqual(r[0].when_timestamp, 1239898353) return self.db.pool.do(thd) d.addCallback(check_change) def check_change_files(_): def thd(conn): query = self.db.model.change_files.select() r = conn.execute(query) r = r.fetchall() self.assertEqual(len(r), 0) return self.db.pool.do(thd) d.addCallback(check_change_files) def check_change_properties(_): def thd(conn): query = self.db.model.change_properties.select() r = conn.execute(query) r = r.fetchall() self.assertEqual(len(r), 0) return self.db.pool.do(thd) d.addCallback(check_change_properties) def check_change_users(_): def thd(conn): query = self.db.model.change_users.select() r = conn.execute(query) r = r.fetchall() self.assertEqual(len(r), 0) return self.db.pool.do(thd) d.addCallback(check_change_users) return d
def makeScheduler(self, firstBuildDuration=0, **kwargs): sched = self.attachScheduler(self.Subclass(**kwargs), self.OBJECTID) self.clock = sched._reactor = task.Clock() return sched
def test_providesIReactorTime(self): c = task.Clock() self.assertTrue(interfaces.IReactorTime.providedBy(c), "Clock does not provide IReactorTime")
def setUp(self): self.clock = task.Clock() self.output = [] self.producer = twisted.QueueProducer(self.callback, clock=self.clock)
def test_providesIReactorTime(self): c = task.Clock() self.failUnless(interfaces.IReactorTime.providedBy(c), "Clock does not provide IReactorTime")
def setUp(self): super(CallableTests, self).setUp() self.clock = task.Clock()
def __init__(self): self.blobs_announced = 0 self.clock = task.Clock() self.peerPort = 3333
def setUp(self): super(LongRunningTimingTests, self).setUp() self.clock = task.Clock() self.callable = IncrementingCallable() self.started = IncrementingCallable() self.sc = TestableScheduledCall(self.clock, self.deferredCallable)
def setUp(self): super(SimpleTimingTests, self).setUp() self.clock = task.Clock() self.callable = IncrementingCallable() self.sc = TestableScheduledCall(self.clock, self.callable)
def setUp(self): self.tr = proto_helpers.StringTransportWithDisconnection() self.clock = task.Clock()
def setUp(self): self.transport = proto_helpers.StringTransport() self.clock = task.Clock() self.protocol = RemoteCalculationClientWithTimeout() self.protocol.callLater = self.clock.callLater self.protocol.makeConnection(self.transport)
def test_reverse_timer_is_enabled_when_start(): timer = ReverseTimer(20) with patch('virtual_score_board.models.reactor', new=task.Clock()) as clock: timer.start() assert timer.is_enabled is True
def do_test_maybeBuildsetComplete(self, buildRequestCompletions=None, buildRequestResults=None, buildsetComplete=False, expectComplete=False, expectMessage=False, expectSuccess=True): """Test maybeBuildsetComplete. @param buildRequestCompletions: dict mapping brid to True if complete, else False (and defaulting to False) @param buildRequestResults: dict mapping brid to result (defaulting to SUCCESS) @param buildsetComplete: true if the buildset is already complete @param expectComplete: true if the buildset should be complete at exit @param expectMessage: true if a buildset completion message is expected @param expectSuccess: if expectComplete, whether to expect the buildset to be complete This first adds two buildsets to the database - 72 and 73. Buildset 72 is already complete if buildsetComplete is true; 73 is not complete. It adds four buildrequests - 42, 43, and 44 for buildset 72, and 45 for buildset 73. The completion and results are based on buidlRequestCompletions and buildRequestResults. Then, maybeBuildsetComplete is called for buildset 72, and the expectations are checked. """ if buildRequestCompletions is None: buildRequestCompletions = {} if buildRequestResults is None: buildRequestResults = {} clock = task.Clock() clock.advance(A_TIMESTAMP) def mkbr(brid, bsid=72): return fakedb.BuildRequest( id=brid, buildsetid=bsid, builderid=42, complete=buildRequestCompletions.get(brid), results=buildRequestResults.get(brid, SUCCESS)) yield self.master.db.insertTestData([ fakedb.Builder(id=42, name='bldr1'), fakedb.Buildset( id=72, submitted_at=EARLIER, complete=buildsetComplete, complete_at=A_TIMESTAMP if buildsetComplete else None), mkbr(42), mkbr(43), mkbr(44), fakedb.BuildsetSourceStamp(buildsetid=72, sourcestampid=234), fakedb.Buildset(id=73, complete=False), mkbr(45, bsid=73), fakedb.BuildsetSourceStamp(buildsetid=73, sourcestampid=234), ]) yield self.rtype.maybeBuildsetComplete(72, _reactor=clock) self.master.db.buildsets.assertBuildsetCompletion(72, expectComplete) if expectMessage: self.assertEqual(self.master.mq.productions, [ self._buildsetCompleteMessage( 72, results=SUCCESS if expectSuccess else FAILURE, submitted_at=EARLIER_EPOCH), ]) else: self.assertEqual(self.master.mq.productions, [])
def setUp(self): self.clock = task.Clock() self.cache = app.UrlCache(reactor=self.clock, expiration=60) self.cache.enable()
def test_create_local_snapshot(self, relative_target_path, content): """ ``MagicFolder.local_snapshot_service`` can be used to create a new local snapshot for a file in the folder. """ global_config = create_testing_configuration( FilePath(self.mktemp()), FilePath(self.mktemp()), ) magic_path = FilePath(self.mktemp()) magic_path.makedirs() mf_config = global_config.create_magic_folder( u"foldername", magic_path, create_local_author(u"zara"), random_immutable(directory=True), random_dircap(), 60, None, ) target_path = magic_path.preauthChild(relative_target_path) target_path.parent().makedirs(ignoreExistingDirectory=True) target_path.setContent(content) clock = task.Clock() status_service = WebSocketStatusService(clock, global_config) folder_status = FolderStatus(u"foldername", status_service) local_snapshot_creator = MemorySnapshotCreator() clock = task.Clock() local_snapshot_service = LocalSnapshotService( mf_config, local_snapshot_creator, folder_status, ) uploader = Service() tahoe_client = object() name = u"local-snapshot-service-test" participants = object() magic_folder = MagicFolder( client=tahoe_client, config=mf_config, name=name, local_snapshot_service=local_snapshot_service, folder_status=folder_status, scanner_service=Service(), remote_snapshot_cache=Service(), downloader=MultiService(), uploader=uploader, participants=participants, clock=clock, magic_file_factory=MagicFileFactory( mf_config, tahoe_client, folder_status, local_snapshot_service, uploader, object(), Service(), InMemoryMagicFolderFilesystem(), ), ) magic_folder.startService() self.addCleanup(magic_folder.stopService) adding = magic_folder.local_snapshot_service.add_file( target_path, ) self.assertThat( adding, succeeded(Always()), ) self.assertThat( local_snapshot_creator.processed, Equals([target_path]), )
def setUp(self): self.calls = 0 self.running = False self.duration = 1 self.fail = False self.poll._reactor = self.clock = task.Clock()
def testSeconds(self): """ Test that the C{seconds} method of the fake clock returns fake time. """ c = task.Clock() self.assertEqual(c.seconds(), 0)
def setUp(self): self.calls = 0 self.fail = False self.poll._reactor = self.clock = task.Clock()
def setUp(self): self.clock = task.Clock()
def setUp(self): poll.track_poll_methods() self.calls = 0 self.fail = False self.poll._reactor = self.clock = task.Clock()
def __setstate__(self, d): self.__dict__.update(d) self.clock = task.Clock() self.restartEvents()
def setUp(self): self.timer = TimerService(2, self.call) self.clock = self.timer.clock = task.Clock() self.deferred = Deferred()
def setUp(self): self.clock = task.Clock() self.contact_manager = ContactManager(self.clock.seconds) self.contact = self.contact_manager.make_contact(generate_id(), "127.0.0.1", 4444, None) self.clock.advance(3600) self.assertTrue(self.contact.contact_is_good is None)
def test_resetTimeoutWhileSending(self): """ The timeout is not allowed to expire after the server has accepted a DATA command and the client is actively sending data to it. """ class SlowFile: """ A file-like which returns one byte from each read call until the specified number of bytes have been returned. """ def __init__(self, size): self._size = size def read(self, max=None): if self._size: self._size -= 1 return 'x' return '' failed = [] onDone = defer.Deferred() onDone.addErrback(failed.append) clientFactory = smtp.SMTPSenderFactory('source@address', 'recipient@address', SlowFile(1), onDone, retries=0, timeout=3) clientFactory.domain = "example.org" clock = task.Clock() client = clientFactory.buildProtocol( address.IPv4Address('TCP', 'example.net', 25)) client.callLater = clock.callLater transport = StringTransport() client.makeConnection(transport) client.dataReceived("220 Ok\r\n" # Greet the client "250 Ok\r\n" # Respond to HELO "250 Ok\r\n" # Respond to MAIL FROM "250 Ok\r\n" # Respond to RCPT TO "354 Ok\r\n" # Respond to DATA ) # Now the client is producing data to the server. Any time # resumeProducing is called on the producer, the timeout should be # extended. First, a sanity check. This test is only written to # handle pull producers. self.assertNotIdentical(transport.producer, None) self.assertFalse(transport.streaming) # Now, allow 2 seconds (1 less than the timeout of 3 seconds) to # elapse. clock.advance(2) # The timeout has not expired, so the failure should not have happened. self.assertEqual(failed, []) # Let some bytes be produced, extending the timeout. Then advance the # clock some more and verify that the timeout still hasn't happened. transport.producer.resumeProducing() clock.advance(2) self.assertEqual(failed, []) # The file has been completely produced - the next resume producing # finishes the upload, successfully. transport.producer.resumeProducing() client.dataReceived("250 Ok\r\n") self.assertEqual(failed, []) # Verify that the client actually did send the things expected. self.assertEqual( transport.value(), "HELO example.org\r\n" "MAIL FROM:<source@address>\r\n" "RCPT TO:<recipient@address>\r\n" "DATA\r\n" "x\r\n" ".\r\n" # This RSET is just an implementation detail. It's nice, but this # test doesn't really care about it. "RSET\r\n")
def setUp(self): """ Create a testable, deterministic clock and a C{TimeoutTester} instance. """ self.clock = task.Clock() self.proto = TimeoutTester(self.clock)
def setUp(self): self.protocol = TorControlProtocol() self.protocol.connectionMade = lambda: None self.transport = proto_helpers.StringTransport() self.protocol.makeConnection(self.transport) self.clock = task.Clock()