def step_impl(context): """ :type context behave.runner.Context """ nt.assert_is_none(LcBidRequest.objects.get(pk=context.bid_ids[1]).requested_at, "When a bid is not checked when 'mark as requested button clicked, the bid 'requested_at' " "attribute should remain as 'None'")
def DoFlattenForm(pdf_path_in, pdf_path_out): fh = filehandler.FileHandler(pdf_path_in) #print fh.files #print fh.data # should be {} api_request = api_client.make_request('FlattenForm') api_response = api_request(fh.files, inputName='BadIntentions DoFlattenForm') print 'HTTP Response:', api_response.http_code if api_response.ok: assert_equal(api_response.http_code, requests.codes.ok) # api_response.output is the requested document or image. assert_is_none(api_response.error_code) assert_is_none(api_response.error_message) rh = responsehandler.ResponseHandler(api_response, pdf_path_out) rh.save_output() #print '---------- BadIntentions DoFlattenForm ----------' #print rh #print '-------------------------------------------------' else: print responsehandler.ResponseHandler(api_response, pdf_path_out) exit return pdf_path_out
def test_copyFileHandles__copying_cached_file_handles(): num_files = 3 file_entities = [] # upload temp files to synapse for i in range(num_files): file_path = utils.make_bogus_data_file() schedule_for_cleanup(file_path) file_entities.append(syn.store(File(file_path, name=str(uuid.uuid1()), parent=project))) # a bunch of setup for arguments to the function under test file_handles = [file_entity['_file_handle'] for file_entity in file_entities] file_entity_ids = [file_entity['id'] for file_entity in file_entities] content_types = [file_handle['contentType'] for file_handle in file_handles] filenames = [file_handle['fileName'] for file_handle in file_handles] # remove every other FileHandle from the cache (at even indicies) for i in range(num_files): if i % 2 == 0: syn.cache.remove(file_handles[i]["id"]) # get the new list of file_handles copiedFileHandles = synapseutils.copyFileHandles(syn, file_handles, ["FileEntity"] * num_files, file_entity_ids, content_types, filenames) new_file_handle_ids = [copy_result['newFileHandle']['id'] for copy_result in copiedFileHandles['copyResults']] # verify that the cached paths are the same for i in range(num_files): original_path = syn.cache.get(file_handles[i]['id']) new_path = syn.cache.get(new_file_handle_ids[i]) if i % 2 == 0: # since even indicies are not cached, both should be none assert_is_none(original_path) assert_is_none(new_path) else: # at odd indicies, the file path should have been copied assert_equals(original_path, new_path)
def test_customer_bank_accounts_list(): fixture = helpers.load_fixture('customer_bank_accounts')['list'] helpers.stub_response(fixture) response = helpers.client.customer_bank_accounts.list(*fixture['url_params']) body = fixture['body']['customer_bank_accounts'] assert_is_instance(response, list_response.ListResponse) assert_is_instance(response.records[0], resources.CustomerBankAccount) assert_equal(response.before, fixture['body']['meta']['cursors']['before']) assert_equal(response.after, fixture['body']['meta']['cursors']['after']) assert_is_none(responses.calls[-1].request.headers.get('Idempotency-Key')) assert_equal([r.account_holder_name for r in response.records], [b.get('account_holder_name') for b in body]) assert_equal([r.account_number_ending for r in response.records], [b.get('account_number_ending') for b in body]) assert_equal([r.bank_name for r in response.records], [b.get('bank_name') for b in body]) assert_equal([r.country_code for r in response.records], [b.get('country_code') for b in body]) assert_equal([r.created_at for r in response.records], [b.get('created_at') for b in body]) assert_equal([r.currency for r in response.records], [b.get('currency') for b in body]) assert_equal([r.enabled for r in response.records], [b.get('enabled') for b in body]) assert_equal([r.id for r in response.records], [b.get('id') for b in body]) assert_equal([r.metadata for r in response.records], [b.get('metadata') for b in body])
def DoExportFormData(pdf_path_in, fdf_path_out): fh = filehandler.FileHandler(pdf_path_in) #print fh.files #print fh.data # should be {} options = {'exportXFDF': False} api_request = api_client.make_request('ExportFormData') api_response = api_request(fh.files, inputName='BadIntentions DoExportFormData', options=options) print 'HTTP Response:', api_response.http_code if api_response.ok: assert_equal(api_response.http_code, requests.codes.ok) # api_response.output is the requested document or image. assert_is_none(api_response.error_code) assert_is_none(api_response.error_message) rh = responsehandler.ResponseHandler(api_response, fdf_path_out) rh.save_output() #print '---------- BadIntentions DoExportFormData ----------' #print rh #print '----------------------------------------------------' else: print responsehandler.ResponseHandler(api_response, fdf_path_out) exit return fdf_path_out
def test_no_score(self): models.StateNameVoter.items.create( state_lname_fname="PA_APP_GERRY", persuasion_score='1.2', ) bulk_impute([self.user], 'gotv_score') tools.assert_is_none(self.user.gotv_score)
def test_get_engine(self): self.ip.connected = False e = self.ip.get_engine() nt.assert_is_none(e) self.ip.connected = True e = self.ip.get_engine() nt.assert_equal(self.sa_engine, e)
def test_construct_factory_from_form(): form_factory = construct_factory_from_form(DjangoTestForm) assert_true(issubclass(form_factory, FormFactory)) assert_equal(form_factory._meta.form, DjangoTestForm) assert_is_none(form_factory._meta.fields) assert_is_none(form_factory._meta.exclude) assert_equal(form_factory._meta.settings, {})
def test_require_all(): class DefaultInit(ValueObject): id = Int() name = Unicode() obj = DefaultInit(1, "Dale") obj = DefaultInit(name="Dale", id=1) with nt.assert_raises(InvalidInitInvocation): obj = DefaultInit() class Loose(ValueObject): __require_all__ = False id = Int() name = Unicode() obj = Loose(1) nt.assert_equal(obj.id, 1) nt.assert_is_none(obj.name) # still strict mutation with nt.assert_raises(IllegalMutation): obj.id = 3 # still won't accept extra args with nt.assert_raises(InvalidInitInvocation): Loose(1, "DALE", 123) # no error Loose()
def test_Row_forward_compatibility(): row = Row("2, 3, 4", rowId=1, versionNumber=1, etag=None, new_field="new") assert_equals("2, 3, 4", row.get("values")) assert_equals(1, row.get("rowId")) assert_equals(1, row.get("versionNumber")) assert_is_none(row.get("etag")) assert_equals("new", row.get("new_field"))
def test_get_credentials__provider_not_return_credentials(self): self.cred_provider.get_synapse_credentials.return_value = None creds = self.credential_provider_chain.get_credentials(syn, self.user_login_args) assert_is_none(creds) self.cred_provider.get_synapse_credentials.assert_called_once_with(syn, self.user_login_args)
def DoRenderPages(pdf_path_or_url_in, jpg_path_out): fh = filehandler.FileHandler(pdf_path_or_url_in) #print fh.files #print fh.data options = {'outputFormat': 'jpg', 'printPreview': True} api_request = api_client.make_request('RenderPages') if len(fh.data) > 0: api_response = api_request(fh.files, inputURL=fh.data['inputURL'], inputName='BadIntentions RenderPages', options=options) elif len(fh.files) > 0: api_response = api_request(fh.files, inputName='BadIntentions RenderPages', options=options) else: print 'data set not suitable for WebAPI' exit print 'HTTP Response:', api_response.http_code if api_response.ok: assert_equal(api_response.http_code, requests.codes.ok) # api_response.output is the requested document or image. assert_is_none(api_response.error_code) assert_is_none(api_response.error_message) rh = responsehandler.ResponseHandler(api_response, jpg_path_out) rh.save_output() #print '---------- BadIntentions RenderPages ----------' #print rh #print '----------------------------------------------' else: print responsehandler.ResponseHandler(api_response, jpg_path_out) exit return jpg_path_out
def test_from_config(self): inst = MemoryCodeIndex.from_config({'file_cache': None}) ntools.assert_is_none(inst._file_cache) fp = '/doesnt/exist/yet' inst = MemoryCodeIndex.from_config({'file_cache': fp}) ntools.assert_equal(inst._file_cache, fp)
def test_get_single_board(self, urlread_): cafe = Cafe('loveclimb') # url board = cafe.find_board(url=CLUBALBUM_BOARD_URL) nt.eq_(board.url, CLUBALBUM_BOARD_URL) # name board = cafe.find_board(name=u"클럽앨범") nt.eq_(board.name, u"클럽앨범") # url and name board = cafe.find_board(url=CLUBALBUM_BOARD_URL, name=u"클럽앨범") nt.eq_(board.name, u"클럽앨범") nt.eq_(board.url, CLUBALBUM_BOARD_URL) # lambda board = cafe.find_board(lambda b: b.name == u"클럽앨범") nt.eq_(board.name, u"클럽앨범") # None if none board = cafe.find_board(name=u"NO SUCH BOARD NAME") nt.assert_is_none(board) # error if more than one with nt.assert_raises(Exception): board = cafe.find_board(lambda b: True)
def DoDecorateDocument(pdf_path_or_url_in, xml_path_in, pdf_path_out): fh = filehandler.FileHandler(pdf_path_or_url_in) #print fh.files #print fh.data api_request = api_client.make_request('DecorateDocument') if len(fh.files) > 0: xml_file_handle = open(xml_path_in, 'r') fh.files['decorationData']=xml_file_handle #print fh.files api_response = api_request(fh.files, inputName='BadIntentions DecorateDocument') else: print 'data set not suitable for WebAPI' exit print 'HTTP Response:', api_response.http_code if api_response.ok: assert_equal(api_response.http_code, requests.codes.ok) # api_response.output is the requested document or image. assert_is_none(api_response.error_code) assert_is_none(api_response.error_message) rh = responsehandler.ResponseHandler(api_response, pdf_path_out) rh.save_output() #print '---------- BadIntentions DecorateDocument ----------' #print rh #print '----------------------------------------------------' else: print responsehandler.ResponseHandler(api_response, pdf_path_out) exit return pdf_path_out
def test_reset_password(self, PasswordResetTokenGeneratorMock): data = { 'email': self.user.email, } PasswordResetTokenGeneratorMock.return_value = 'abc' response = self.client.post( path=self.reset_password_url, data=json.dumps(data), content_type='application/json', ) assert_equals(response.status_code, status.HTTP_201_CREATED) assert_equals(len(mail.outbox), 1) assert_equals(mail.outbox[0].subject, 'Reset Your Password') data = { 'reset_token': 'abc', 'new_password': self.new_password, 'password_confirmation': self.new_password, } response = self.client.post( path=self.reset_password_complete_url, data=json.dumps(data), content_type='application/json' ) assert_equals(response.status_code, status.HTTP_200_OK) assert_true(User.objects.get(pk=self.user.id).check_password(self.new_password)) assert_is_none(cache.get(self.user.email))
def test_LockingQueue_put_get(qbcli, app1, item1, item2): nt.assert_false(qbcli.exists(app1)) # instantiating LockingQueue does not create any objects in backend queue = qbcli.LockingQueue(app1) nt.assert_equal(queue.size(), 0) # fail if consuming before you've gotten anything with nt.assert_raises(UserWarning): queue.consume() # get nothing from an empty queue (and don't fail!) nt.assert_is_none(queue.get()) # put item in queue nt.assert_equal(queue.size(), 0) queue.put(item1) queue.put(item2) queue.put(item1) nt.assert_equal(queue.size(), 3) nt.assert_equal(queue.get(0), item1) nt.assert_equal(queue.get(), item1) # Multiple LockingQueue instances can address the same path queue2 = qbcli.LockingQueue(app1) nt.assert_equal(queue2.size(), 3) nt.assert_equal(queue2.get(), item2) nt.assert_equal(queue.get(), item1) # ensure not somehow mutable or linked # cleanup queue.consume() queue2.consume()
def test_get_site_notification_impressions_over(self, request, response, SiteNotification): note = SiteNotification.current.return_value note._id = 'deadbeef' note.impressions = 2 request.cookies = {'site-notification': 'deadbeef-3-false'} assert_is_none(ThemeProvider().get_site_notification()) assert not response.set_cookie.called
def test_init(self): #test default parameters exp = Experiment("test", {"x": MinMaxNumericParamDef(0, 1)}) opt = BayesianOptimizer(exp) assert_equal(opt.initial_random_runs, 10) assert_is_none(opt.acquisition_hyperparams) assert_equal(opt.num_gp_restarts, 10) assert_true(isinstance(opt.acquisition_function, ExpectedImprovement)) assert_dict_equal(opt.kernel_params, {}) assert_equal(opt.kernel, "matern52") #test correct initialization opt_arguments = { "initial_random_runs": 5, "acquisition_hyperparams": {}, "num_gp_restarts": 5, "acquisition": ProbabilityOfImprovement, "kernel_params": {}, "kernel": "matern52", "mcmc": True, } opt = BayesianOptimizer(exp, opt_arguments) assert_equal(opt.initial_random_runs, 5) assert_dict_equal(opt.acquisition_hyperparams, {}) assert_equal(opt.num_gp_restarts, 5) assert_true(isinstance(opt.acquisition_function, ProbabilityOfImprovement)) assert_dict_equal(opt.kernel_params, {}) assert_equal(opt.kernel, "matern52")
def test_after_remove_authorized_user_not_self(self): message = self.node_settings.after_remove_contributor( self.node, self.user_settings.owner) self.node_settings.save() assert_is_none(self.node_settings.user_settings) assert_true(message) assert_in('You can re-authenticate', message)
def test_array_init(self): # normal initialization store = dict() init_array(store, shape=100, chunks=10) a = Array(store) assert_is_instance(a, Array) eq((100,), a.shape) eq((10,), a.chunks) eq('', a.path) assert_is_none(a.name) assert_is(store, a.store) # initialize at path store = dict() init_array(store, shape=100, chunks=10, path='foo/bar') a = Array(store, path='foo/bar') assert_is_instance(a, Array) eq((100,), a.shape) eq((10,), a.chunks) eq('foo/bar', a.path) eq('/foo/bar', a.name) assert_is(store, a.store) # store not initialized store = dict() with assert_raises(KeyError): Array(store) # group is in the way store = dict() init_group(store, path='baz') with assert_raises(KeyError): Array(store, path='baz')
def test_mandates_list(): fixture = helpers.load_fixture('mandates')['list'] helpers.stub_response(fixture) response = helpers.client.mandates.list(*fixture['url_params']) body = fixture['body']['mandates'] assert_is_instance(response, list_response.ListResponse) assert_is_instance(response.records[0], resources.Mandate) assert_equal(response.before, fixture['body']['meta']['cursors']['before']) assert_equal(response.after, fixture['body']['meta']['cursors']['after']) assert_is_none(responses.calls[-1].request.headers.get('Idempotency-Key')) assert_equal([r.created_at for r in response.records], [b.get('created_at') for b in body]) assert_equal([r.id for r in response.records], [b.get('id') for b in body]) assert_equal([r.metadata for r in response.records], [b.get('metadata') for b in body]) assert_equal([r.next_possible_charge_date for r in response.records], [b.get('next_possible_charge_date') for b in body]) assert_equal([r.payments_require_approval for r in response.records], [b.get('payments_require_approval') for b in body]) assert_equal([r.reference for r in response.records], [b.get('reference') for b in body]) assert_equal([r.scheme for r in response.records], [b.get('scheme') for b in body]) assert_equal([r.status for r in response.records], [b.get('status') for b in body])
def test_footer(self): toc = [ navigation.NavItem( url='/preamble/2016_02749/cfr_changes/478', title=navigation.Title('Authority', '27 CFR 478', 'Authority'), markup_id='2016_02749-cfr-478', category='27 CFR 478'), navigation.NavItem( url='/preamble/2016_02749/cfr_changes/478-99', title=navigation.Title( '§ 478.99 Certain', '§ 478.99', 'Certain'), markup_id='2016_02749-cfr-478-99', category='27 CFR 478'), navigation.NavItem( url='/preamble/2016_02749/cfr_changes/478-120', title=navigation.Title( '§ 478.120 Firearms', '§ 478.120', 'Firearms'), markup_id='2016_02749-cfr-478-120', category='27 CFR 478') ] nav = navigation.footer([], toc, '2016_02749-cfr-478') assert_is_none(nav['previous']) assert_equal(nav['next'].section_id, '2016_02749-cfr-478-99') nav = navigation.footer([], toc, '2016_02749-cfr-478-99') assert_equal(nav['previous'].section_id, '2016_02749-cfr-478') assert_equal(nav['next'].section_id, '2016_02749-cfr-478-120') nav = navigation.footer([], toc, '2016_02749-cfr-478-120') assert_equal(nav['previous'].section_id, '2016_02749-cfr-478-99') assert_is_none(nav['next'])
def test_create_debug_session(): gps = GPS(debug=1) assert_equal(gps.host, 'localhost') assert_equal(gps.port, '2947') assert_true(gps.debug) assert_false(gps.exit_flag) assert_is_none(gps.session)
def check_convert_date(raw, expectation): result = convert_date(raw, year = 2014) if expectation == None: n.assert_is_none(result) else: expected_month, expected_day = expectation n.assert_equal(result, datetime.date(2014, expected_month, expected_day))
def test_n_components_and_max_pca_components_none(method): """Test n_components and max_pca_components=None.""" _skip_check_picard(method) raw = read_raw_fif(raw_fname).crop(1.5, stop).load_data() events = read_events(event_name) picks = pick_types(raw.info, eeg=True, meg=False) epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, baseline=(None, 0), preload=True) max_pca_components = None n_components = None random_state = 12345 tempdir = _TempDir() output_fname = op.join(tempdir, 'test_ica-ica.fif') ica = ICA(max_pca_components=max_pca_components, method=method, n_components=n_components, random_state=random_state) with warnings.catch_warnings(record=True): # convergence ica.fit(epochs) ica.save(output_fname) ica = read_ica(output_fname) # ICA.fit() replaced max_pca_components, which was previously None, # with the appropriate integer value. assert_equal(ica.max_pca_components, epochs.info['nchan']) assert_is_none(ica.n_components)
def test_get_next_candidate(self): """ Tests the get next candidate function. Tests: - The candidate's parameters are acceptable """ cand = None counter = 0 while cand is None and counter < 20: cand = self.EAss.get_next_candidate() time.sleep(0.1) counter += 1 if counter == 20: raise Exception("Received no result in the first 2 seconds.") assert_is_none(cand.result) params = cand.params assert_less_equal(params["x"], 1) assert_greater_equal(params["x"], 0) assert_in(params["name"], self.param_defs["name"].values) self.EAss.update(cand, "pausing") time.sleep(1) new_cand = None while new_cand is None and counter < 20: new_cand = self.EAss.get_next_candidate() time.sleep(0.1) counter += 1 if counter == 20: raise Exception("Received no result in the first 2 seconds.") assert_equal(new_cand, cand)
def DoFillForm(xfdf_path_in, pdf_url_in): fh = filehandler.FileHandler(xfdf_path_in) #print fh.files #print fh.data # should be {} options = {'disableCalculation': False, 'disableGeneration': False, 'flatten' : False} api_request = api_client.make_request('FillForm') api_response = api_request(fh.files, inputURL=pdf_url_in, inputName='BadIntentions DoFillForm', options=options) print 'HTTP Response:', api_response.http_code if api_response.ok: assert_equal(api_response.http_code, requests.codes.ok) # api_response.output is the requested document or image. assert_is_none(api_response.error_code) assert_is_none(api_response.error_message) # intermediate file (returned from WebAPI and saved to local storage) pdf_path_out = 'tmp.pdf' # todo: random token rh = responsehandler.ResponseHandler(api_response, pdf_path_out) rh.save_output() #print '---------- BadIntentions DoFillForm ----------' #print rh #print '----------------------------------------------' else: print responsehandler.ResponseHandler(api_response, pdf_path_out) exit return pdf_path_out
def test_pipeline(self): batch = self.client.batch() batch.set('test_set_a','a1') batch.set('test_set_b','b2') batch.set('test_set_c','c3') batch.set('test_set_d','d4') batch.hset('hset_a', 'a', 'a1') batch.hset('hset_a', 'b', 'b1') batch.hset('hset_a', 'c', 'c1') batch.hset('hset_a', 'd', 'd1') a1 = self.client.get('test_set_a') assert_is_none(a1) batch.execute() a1 = self.client.get('test_set_a') assert_equals(a1,'a1') b2 = self.client.get('test_set_b') assert_equals(b2,'b2') c3 = self.client.get('test_set_c') assert_equals(c3,'c3') d4 = self.client.get('test_set_d') assert_equals(d4,'d4') e5 = self.client.get('test_set_e') assert_is_none(e5) a1 = self.client.hget('hset_a', 'a') assert_equals(a1, 'a1') b1 = self.client.hget('hset_a', 'b') assert_equals(b1, 'b1') c1 = self.client.hget('hset_a', 'c') assert_equals(c1, 'c1') d1 = self.client.hget('hset_a', 'd') assert_equals(d1, 'd1') d = self.client.multi_del('test_set_a', 'test_set_b', 'test_set_c', 'test_set_d') d = self.client.hclear('hset_a') assert_true(d)
def test_snapshot(): with tmp_db('snapshot') as db: db.put(b'a', b'a') db.put(b'b', b'b') # Snapshot should have existing values, but not changed values snapshot = db.snapshot() assert_equal(b'a', snapshot.get(b'a')) assert_list_equal( [b'a', b'b'], list(snapshot.iterator(include_value=False))) assert_is_none(snapshot.get(b'c')) db.delete(b'a') db.put(b'c', b'c') assert_is_none(snapshot.get(b'c')) assert_list_equal( [b'a', b'b'], list(snapshot.iterator(include_value=False))) # New snapshot should reflect latest state snapshot = db.snapshot() assert_equal(b'c', snapshot.get(b'c')) assert_list_equal( [b'b', b'c'], list(snapshot.iterator(include_value=False))) # Snapshots are directly iterable, just like DB assert_list_equal( [b'b', b'c'], list(k for k, v in snapshot))
def test_authenticate(self): # No auth request = RequestFactory().post('/api/v1/user/authenticate/') user = utils.authenticate(request) assert_is_none(user) my_credentials = {'username': '******', 'password': '******'} # Correct credentials request = RequestFactory().post( '/api/v1/user/authenticate/', my_credentials, ) user = utils.authenticate(request) assert_is_not_none(user) # Wrong credentials my_credentials['password'] = '******' request = RequestFactory().post( '/api/v1/user/authenticate/', my_credentials, ) user = utils.authenticate(request) assert_is_none(user)
def test_set_folder(self): folder_id = 'fake-folder-id' folder_name = 'fake-folder-name' self.node_settings.clear_settings() self.node_settings.save() assert_is_none(self.node_settings.list_id) provider = self.ProviderClass() provider.set_config( self.node_settings, self.user, folder_id, folder_name, auth=Auth(user=self.user), ) # instance was updated assert_equal( self.node_settings.list_id, 'fake-folder-id', ) # user_settings was updated # TODO: the call to grant_oauth_access should be mocked assert_true( self.user_settings.verify_oauth_access( node=self.node, external_account=self.external_account, metadata={'folder': 'fake-folder-id'})) log = self.node.logs.latest() assert_equal(log.action, '{}_folder_selected'.format(self.short_name)) assert_equal(log.params['folder_id'], folder_id) assert_equal(log.params['folder_name'], folder_name)
def test_events_list(): fixture = helpers.load_fixture('events')['list'] helpers.stub_response(fixture) response = helpers.client.events.list(*fixture['url_params']) body = fixture['body']['events'] assert_is_instance(response, list_response.ListResponse) assert_is_instance(response.records[0], resources.Event) assert_equal(response.before, fixture['body']['meta']['cursors']['before']) assert_equal(response.after, fixture['body']['meta']['cursors']['after']) assert_is_none(responses.calls[-1].request.headers.get('Idempotency-Key')) assert_equal([r.action for r in response.records], [b.get('action') for b in body]) assert_equal([r.created_at for r in response.records], [b.get('created_at') for b in body]) assert_equal([r.customer_notifications for r in response.records], [b.get('customer_notifications') for b in body]) assert_equal([r.id for r in response.records], [b.get('id') for b in body]) assert_equal([r.metadata for r in response.records], [b.get('metadata') for b in body]) assert_equal([r.resource_type for r in response.records], [b.get('resource_type') for b in body])
def test_billing_requests_list(): fixture = helpers.load_fixture('billing_requests')['list'] helpers.stub_response(fixture) response = helpers.client.billing_requests.list(*fixture['url_params']) body = fixture['body']['billing_requests'] assert_is_instance(response, list_response.ListResponse) assert_is_instance(response.records[0], resources.BillingRequest) assert_equal(response.before, fixture['body']['meta']['cursors']['before']) assert_equal(response.after, fixture['body']['meta']['cursors']['after']) assert_is_none(responses.calls[-1].request.headers.get('Idempotency-Key')) assert_equal([r.actions for r in response.records], [b.get('actions') for b in body]) assert_equal([r.created_at for r in response.records], [b.get('created_at') for b in body]) assert_equal([r.fallback_enabled for r in response.records], [b.get('fallback_enabled') for b in body]) assert_equal([r.id for r in response.records], [b.get('id') for b in body]) assert_equal([r.metadata for r in response.records], [b.get('metadata') for b in body]) assert_equal([r.status for r in response.records], [b.get('status') for b in body])
def test_msgs(): m = Msg('set', motor, {'motor': 5}) assert_equal(m.command, 'set') assert_is(m.obj, motor) assert_equal(m.args, ({'motor': 5},)) assert_equal(m.kwargs, {}) m = Msg('read', motor) assert_equal(m.command, 'read') assert_is(m.obj, motor) assert_equal(m.args, ()) assert_equal(m.kwargs, {}) m = Msg('create') assert_equal(m.command, 'create') assert_is_none(m.obj) assert_equal(m.args, ()) assert_equal(m.kwargs, {}) m = Msg('sleep', None, 5) assert_equal(m.command, 'sleep') assert_is_none(m.obj) assert_equal(m.args, (5,)) assert_equal(m.kwargs, {})
def test_billing_request_templates_update(): fixture = helpers.load_fixture('billing_request_templates')['update'] helpers.stub_response(fixture) response = helpers.client.billing_request_templates.update(*fixture['url_params']) body = fixture['body']['billing_request_templates'] assert_is_instance(response, resources.BillingRequestTemplate) assert_is_none(responses.calls[-1].request.headers.get('Idempotency-Key')) assert_equal(response.authorisation_url, body.get('authorisation_url')) assert_equal(response.created_at, body.get('created_at')) assert_equal(response.id, body.get('id')) assert_equal(response.mandate_request_currency, body.get('mandate_request_currency')) assert_equal(response.mandate_request_metadata, body.get('mandate_request_metadata')) assert_equal(response.mandate_request_scheme, body.get('mandate_request_scheme')) assert_equal(response.mandate_request_verify, body.get('mandate_request_verify')) assert_equal(response.metadata, body.get('metadata')) assert_equal(response.name, body.get('name')) assert_equal(response.payment_request_amount, body.get('payment_request_amount')) assert_equal(response.payment_request_currency, body.get('payment_request_currency')) assert_equal(response.payment_request_description, body.get('payment_request_description')) assert_equal(response.payment_request_metadata, body.get('payment_request_metadata')) assert_equal(response.payment_request_scheme, body.get('payment_request_scheme')) assert_equal(response.redirect_uri, body.get('redirect_uri')) assert_equal(response.updated_at, body.get('updated_at'))
def test_image_filename_defaults(): '''test format constraint, and validity of jpeg and png''' tpath = ipath.get_ipython_package_dir() nt.assert_raises(ValueError, display.Image, filename=os.path.join(tpath, 'testing/tests/badformat.gif'), embed=True) nt.assert_raises(ValueError, display.Image) nt.assert_raises(ValueError, display.Image, data='this is not an image', format='badformat', embed=True) from IPython.html import DEFAULT_STATIC_FILES_PATH imgfile = os.path.join(DEFAULT_STATIC_FILES_PATH, 'base/images/ipynblogo.png') img = display.Image(filename=imgfile) nt.assert_equal('png', img.format) nt.assert_is_not_none(img._repr_png_()) img = display.Image(filename=os.path.join(tpath, 'testing/tests/logo.jpg'), embed=False) nt.assert_equal('jpeg', img.format) nt.assert_is_none(img._repr_jpeg_())
def test_array_init(self): # normal initialization store = dict() init_array(store, shape=100, chunks=10) a = Array(store) assert_is_instance(a, Array) eq((100,), a.shape) eq((10,), a.chunks) eq('', a.path) assert_is_none(a.name) assert_is_none(a.basename) assert_is(store, a.store) # initialize at path store = dict() init_array(store, shape=100, chunks=10, path='foo/bar') a = Array(store, path='foo/bar') assert_is_instance(a, Array) eq((100,), a.shape) eq((10,), a.chunks) eq('foo/bar', a.path) eq('/foo/bar', a.name) eq('bar', a.basename) assert_is(store, a.store) # store not initialized store = dict() with assert_raises(ValueError): Array(store) # group is in the way store = dict() init_group(store, path='baz') with assert_raises(ValueError): Array(store, path='baz')
def test_md5(self): de = DummyDataElement() ntools.assert_is_none(de._md5_cache) md5 = de.md5() sha1 = de.sha1() ntools.assert_is_not_none(de._md5_cache) ntools.assert_equal(de._md5_cache, EXPECTED_MD5) ntools.assert_equal(md5, EXPECTED_MD5) ntools.assert_equal(de._sha1_cache, EXPECTED_SHA1) ntools.assert_equal(sha1, EXPECTED_SHA1) # When called a second time, should use cache instead of recomputing with mock.patch( "smqtk.representation.data_element.hashlib") as mock_hashlib: md5 = de.md5() ntools.assert_false(mock_hashlib.md5.called) ntools.assert_equal(md5, EXPECTED_MD5) sha1 = de.sha1() ntools.assert_false(mock_hashlib.sha1.called) ntools.assert_equal(sha1, EXPECTED_SHA1)
def test_download_file_false(): RENAME_SUFFIX = 'blah' # Upload a file filepath = utils.make_bogus_binary_file() schedule_for_cleanup(filepath) schedule_for_cleanup(filepath + RENAME_SUFFIX) file = File(filepath, name='SYNR 619', parent=project) file = syn.store(file) # Now hide the file from the cache and download with downloadFile=False os.rename(filepath, filepath + RENAME_SUFFIX) file = syn.get(file.id, downloadFile=False) # Change something and reupload the file's metadata file.name = "Only change the name, not the file" reupload = syn.store(file) assert_is_none(reupload.path, "Path field should be null: %s" % reupload.path) # This should still get the correct file reupload = syn.get(reupload.id) assert_true(filecmp.cmp(filepath + RENAME_SUFFIX, reupload.path)) assert_equals(reupload.name, file.name)
def test_get_next_candidate(self): """ Tests the get next candidate function. Tests: - The candidate's parameters are acceptable """ optimizer = "RandomSearch" name = "test_init_experiment" param_defs = { "x": MinMaxNumericParamDef(0, 1), "name": NominalParamDef(["A", "B", "C"]) } minimization = True EAss = PrettyExperimentAssistant(name, optimizer, param_defs, minimization=minimization) cand = EAss.get_next_candidate() assert_is_none(cand.result) params = cand.params assert_less_equal(params["x"], 1) assert_greater_equal(params["x"], 0) assert_in(params["name"], param_defs["name"].values)
def test_webhooks_get(): fixture = helpers.load_fixture('webhooks')['get'] helpers.stub_response(fixture) response = helpers.client.webhooks.get(*fixture['url_params']) body = fixture['body']['webhooks'] assert_is_instance(response, resources.Webhook) assert_is_none(responses.calls[-1].request.headers.get('Idempotency-Key')) assert_equal(response.created_at, body.get('created_at')) assert_equal(response.id, body.get('id')) assert_equal(response.is_test, body.get('is_test')) assert_equal(response.request_body, body.get('request_body')) assert_equal(response.request_headers, body.get('request_headers')) assert_equal(response.response_body, body.get('response_body')) assert_equal(response.response_body_truncated, body.get('response_body_truncated')) assert_equal(response.response_code, body.get('response_code')) assert_equal(response.response_headers, body.get('response_headers')) assert_equal(response.response_headers_content_truncated, body.get('response_headers_content_truncated')) assert_equal(response.response_headers_count_truncated, body.get('response_headers_count_truncated')) assert_equal(response.successful, body.get('successful')) assert_equal(response.url, body.get('url'))
def test_tax_rates_list(): fixture = helpers.load_fixture('tax_rates')['list'] helpers.stub_response(fixture) response = helpers.client.tax_rates.list(*fixture['url_params']) body = fixture['body']['tax_rates'] assert_is_instance(response, list_response.ListResponse) assert_is_instance(response.records[0], resources.TaxRate) assert_equal(response.before, fixture['body']['meta']['cursors']['before']) assert_equal(response.after, fixture['body']['meta']['cursors']['after']) assert_is_none(responses.calls[-1].request.headers.get('Idempotency-Key')) assert_equal([r.end_date for r in response.records], [b.get('end_date') for b in body]) assert_equal([r.id for r in response.records], [b.get('id') for b in body]) assert_equal([r.jurisdiction for r in response.records], [b.get('jurisdiction') for b in body]) assert_equal([r.percentage for r in response.records], [b.get('percentage') for b in body]) assert_equal([r.start_date for r in response.records], [b.get('start_date') for b in body]) assert_equal([r.type for r in response.records], [b.get('type') for b in body])
def test_configuration_none_HI(self): c = LSHNearestNeighborIndex.get_default_config() # Check that default is in JSON format and is decoded to the same # result. ntools.assert_equal(json.loads(json.dumps(c)), c) # Make a simple configuration c['lsh_functor']['type'] = 'ItqFunctor' c['descriptor_index']['type'] = 'MemoryDescriptorIndex' c['hash2uuids_kvstore']['type'] = 'MemoryKeyValueStore' c['hash_index']['type'] = None index = LSHNearestNeighborIndex.from_config(c) ntools.assert_is_instance(index.lsh_functor, ItqFunctor) ntools.assert_is_instance(index.descriptor_index, MemoryDescriptorIndex) ntools.assert_is_none(index.hash_index) ntools.assert_is_instance(index.hash2uuids_kvstore, MemoryKeyValueStore) # Can convert instance config to JSON ntools.assert_equal(json.loads(json.dumps(index.get_config())), index.get_config())
def test_create_worker_defaults(): worker = create_worker('worker') nt.assert_equal(worker.identity, 'worker') nt.assert_is_none(worker.shared_queue) nt.assert_is_none(worker.result_queue) nt.assert_is_none(worker.individual_queue) np.testing.assert_equal(worker.best_AICc, np.inf) np.testing.assert_equal(worker.best_values, []) np.testing.assert_equal(worker.best_dof, np.inf) np.testing.assert_equal(worker.last_time, 1)
def test_load_flann_model_empty_data_elements(self): # Construct index with valid, but empty, data URIs instances empty_data = 'base64://' f = FlannNearestNeighborsIndex(empty_data, empty_data, empty_data) # Load method should do nothing but set PID since given data was # empty. f._load_flann_model() ntools.assert_is_none(f._descr_cache) ntools.assert_is_none(f._flann) ntools.assert_is_none(f._flann_build_params) ntools.assert_is_not_none(f._pid)
def test_cache_store_get(): tmp_dir = tempfile.mkdtemp() my_cache = cache.Cache(cache_root_dir=tmp_dir) path1 = utils.touch( os.path.join(my_cache.get_cache_dir(101201), "file1.ext")) my_cache.add(file_handle_id=101201, path=path1) path2 = utils.touch( os.path.join(my_cache.get_cache_dir(101202), "file2.ext")) my_cache.add(file_handle_id=101202, path=path2) # set path3's mtime to be later than path2's new_time_stamp = cache._get_modified_time(path2) + 2 path3 = utils.touch(os.path.join(tmp_dir, "foo", "file2.ext"), (new_time_stamp, new_time_stamp)) my_cache.add(file_handle_id=101202, path=path3) a_file = my_cache.get(file_handle_id=101201) assert utils.equal_paths(a_file, path1) a_file = my_cache.get(file_handle_id=101201, path=path1) assert utils.equal_paths(a_file, path1) a_file = my_cache.get(file_handle_id=101201, path=my_cache.get_cache_dir(101201)) assert utils.equal_paths(a_file, path1) b_file = my_cache.get(file_handle_id=101202, path=os.path.dirname(path2)) assert utils.equal_paths(b_file, path2) b_file = my_cache.get(file_handle_id=101202, path=os.path.dirname(path3)) assert utils.equal_paths(b_file, path3) not_in_cache_file = my_cache.get(file_handle_id=101203, path=tmp_dir) assert_is_none(not_in_cache_file) removed = my_cache.remove(file_handle_id=101201, path=path1, delete=True) assert utils.normalize_path(path1) in removed assert len(removed) == 1 assert_is_none(my_cache.get(file_handle_id=101201)) removed = my_cache.remove(file_handle_id=101202, path=path3, delete=True) b_file = my_cache.get(file_handle_id=101202) assert utils.normalize_path(path3) in removed assert len(removed) == 1 assert utils.equal_paths(b_file, path2) removed = my_cache.remove(file_handle_id=101202, delete=True) assert utils.normalize_path(path2) in removed assert len(removed) == 1 assert_is_none(my_cache.get(file_handle_id=101202))
def test_store_reference(): api_data = { 'reference': None, 'user_uid': 2, 'host': 'www.host.com', 'path': 'my-path', 'issue_id': '3' } assert_is_none(store_reference(api_data)) assert_is_none(store_reference(api_data, 2)) api_data['reference'] = 'some reference text' assert_is_none(store_reference(api_data)) assert_is_not_none(store_reference(api_data, 2))
def test_active_for_user(self): """ Make sure we can retrive a user's active (in progress) verification attempt. """ user = UserFactory.create() # This user has no active at the moment... assert_is_none(SoftwareSecurePhotoVerification.active_for_user(user)) # Create an attempt and mark it ready... attempt = SoftwareSecurePhotoVerification(user=user) attempt.mark_ready() assert_equals(attempt, SoftwareSecurePhotoVerification.active_for_user(user)) # A new user won't see this... user2 = UserFactory.create() user2.save() assert_is_none(SoftwareSecurePhotoVerification.active_for_user(user2)) # If it's got a different status, it doesn't count for status in ["submitted", "must_retry", "approved", "denied"]: attempt.status = status attempt.save() assert_is_none( SoftwareSecurePhotoVerification.active_for_user(user)) # But if we create yet another one and mark it ready, it passes again. attempt_2 = SoftwareSecurePhotoVerification(user=user) attempt_2.mark_ready() assert_equals(attempt_2, SoftwareSecurePhotoVerification.active_for_user(user)) # And if we add yet another one with a later created time, we get that # one instead. We always want the most recent attempt marked ready() attempt_3 = SoftwareSecurePhotoVerification( user=user, created_at=attempt_2.created_at + timedelta(days=1)) attempt_3.save() # We haven't marked attempt_3 ready yet, so attempt_2 still wins assert_equals(attempt_2, SoftwareSecurePhotoVerification.active_for_user(user)) # Now we mark attempt_3 ready and expect it to come back attempt_3.mark_ready() assert_equals(attempt_3, SoftwareSecurePhotoVerification.active_for_user(user))
def test_refresh_record(): """StreamPayload - Refresh Record""" s3_payload = load_stream_payload('s3', 'entity', 'record') # Set some values that are different than the defaults s3_payload.type = 'unit_type' s3_payload.log_source = 'unit_source' s3_payload.records = ['rec1'] s3_payload.valid = True s3_payload._refresh_record('new pre_parsed_record') assert_equal(s3_payload.pre_parsed_record, 'new pre_parsed_record') assert_is_none(s3_payload.type) assert_is_none(s3_payload.log_source) assert_is_none(s3_payload.records) assert_false(s3_payload.valid)
def test_list_item_path(): '''list path mapping - item level''' pm = PathMap() # basic set to a list item assert_equal(pm.metadata({"level1":[{"level2":"level2-value"}]}, 1), 1) # basic get for a list item assert_equal(pm.metadata({"level1":[{"level2":"level2-value"}]}), 1) # basic get - nothing set at the to list level, we have messed only with # elements assert_is_none(pm.metadata({"level1":[{}]})) # basic inherited get # we have set metadata for {"level1":[{"level2":"level2-value"}]} to 1, subelements # inherit so a path under this will have 1 until we change to otherwise assert_equal(pm.metadata({"level1":[{"level2":"level2-value", "content":{}}]}), 1) # wrong key failed get assert_is_none(pm.metadata({"level1":[{"level2":"level3-value"}]})) # wrong key look in depth failed get assert_is_none(pm.metadata({"level1":[{"level2":"level3-value", "content":{}}]}))
def test_successful_update_query(self): with tracer.start_active_span("test"): self.conn.test.records.update_one({"type": "string"}, {"$set": { "type": "int" }}) assert_is_none(tracer.active_span) spans = self.recorder.queued_spans() self.assertEqual(len(spans), 2) db_span = spans[0] test_span = spans[1] self.assertEqual(test_span.t, db_span.t) self.assertEqual(db_span.p, test_span.s) assert_is_none(db_span.ec) self.assertEqual(db_span.n, "mongo") self.assertEqual( db_span.data["mongo"]["service"], "%s:%s" % (testenv['mongodb_host'], testenv['mongodb_port'])) self.assertEqual(db_span.data["mongo"]["namespace"], "test.records") self.assertEqual(db_span.data["mongo"]["command"], "update") assert_is_none(db_span.data["mongo"]["filter"]) assert_is_not_none(db_span.data["mongo"]["json"]) payload = json.loads(db_span.data["mongo"]["json"]) assert_true({ "q": { "type": "string" }, "u": { "$set": { "type": "int" } }, "multi": False, "upsert": False } in payload, db_span.data["mongo"]["json"])
def test_list_item_complex_map (): '''list path mapping - item level, multiple items present''' # same as above, just a more complex use case where there are multiple list # items pm = PathMap() # basic set assert_equal(pm.metadata({"level1":[{"level2":"level2-value1"}]}, 1), 1) assert_equal(pm.metadata({"level1":[{"level2":"level2-value2"}]}, 2), 2) # basic get 1 assert_equal(pm.metadata({"level1":[{"level2":"level2-value1"}]}), 1) # basic get 2 assert_equal(pm.metadata({"level1":[{"level2":"level2-value2"}]}), 2) # basic failed get assert_is_none(pm.metadata({"level1":[{}]})) # basic inherited get assert_equal(pm.metadata({"level1":[{"level2":"level2-value1", "content":{}}]}), 1) # basic inherited get assert_equal(pm.metadata({"level1":[{"level2":"level2-value2", "content":{}}]}), 2) # wrong key failed get assert_is_none(pm.metadata({"level1":[{"level2":"level3-value"}]})) # wrong key look in depth failed get assert_is_none(pm.metadata({"level1":[{"level2":"level3-value", "content":{}}]}))
def test_legend_properties_minimal(): legend_conf = {} print 'Test Legend instance properties minimal' l = Legend(GeoServer, GS_URL, 'black:magic', legend_conf) tools.assert_true(hasattr(l, 'server')) tools.assert_is_instance(l.server, GeoServer) tools.assert_equals(l.server.url, GS_URL) tools.assert_true(hasattr(l, 'title')) tools.assert_equals(l.title, 'black:magic') tools.assert_true(hasattr(l, 'styles')) tools.assert_is_instance(l.styles, list) tools.assert_equals(l.styles, ['default']) tools.assert_true(hasattr(l, 'filter')) tools.assert_is_none(l.filter) tools.assert_true(hasattr(l, 'bbox')) tools.assert_is_none(l.bbox) tools.assert_true(hasattr(l, 'srs')) tools.assert_is_none(l.srs)
def test_successful_insert_query(self): with tracer.start_active_span("test"): self.conn.test.records.insert_one({"type": "string"}) assert_is_none(tracer.active_span) spans = self.recorder.queued_spans() self.assertEqual(len(spans), 2) db_span = spans[0] test_span = spans[1] self.assertEqual(test_span.t, db_span.t) self.assertEqual(db_span.p, test_span.s) assert_is_none(db_span.ec) self.assertEqual(db_span.n, "mongo") self.assertEqual(db_span.data["mongo"]["service"], "%s:%s" % (testenv['mongodb_host'], testenv['mongodb_port'])) self.assertEqual(db_span.data["mongo"]["namespace"], "test.records") self.assertEqual(db_span.data["mongo"]["command"], "insert") assert_is_none(db_span.data["mongo"]["filter"])
def test_parse_garbage(self): assert_is_none(self.handler.parse(b"complete and utter garbage"))
def test_get_flights_when_response_is_not_ok(mock_post): mock_post.return_value.ok = False response = get_flights(departure_airport, arrival_airport, departure_date) assert_is_none(response)
def test_get_dispatcher_bad(log_mock): """StreamAlertOutput - Get Invalid Dispatcher""" dispatcher = StreamAlertOutput.get_dispatcher('aws-s4') assert_is_none(dispatcher) log_mock.assert_called_with('Designated output service [%s] does not exist', 'aws-s4')
def test_get_current_request(self): """ Since we are running outside of Django assert that get_current_request returns None """ assert_is_none(get_current_request())
def test_not_present(self): pth_str = PathStore(self.path_policy) pth_str.candidates = [MagicMock(spec_set=['id']) for i in range(5)] ntools.assert_is_none(pth_str.get_segment(2))