def test_incomplete_heaps(self): payload = bytearray(64) payload[:] = range(64) packets = [ self.flavour.make_packet([ Item(spead2.HEAP_CNT_ID, 1, True), Item(spead2.PAYLOAD_OFFSET_ID, 5, True), Item(spead2.PAYLOAD_LENGTH_ID, 7, True), Item(spead2.HEAP_LENGTH_ID, 96, True), Item(0x1600, 12345, True), Item(0x5000, 0, False, offset=0)], payload[5 : 12]), self.flavour.make_packet([ Item(spead2.HEAP_CNT_ID, 1, True), Item(spead2.PAYLOAD_OFFSET_ID, 32, True), Item(spead2.PAYLOAD_LENGTH_ID, 32, True), Item(spead2.HEAP_LENGTH_ID, 96, True)], payload[32 : 64]) ] heaps = self.data_to_heaps(b''.join(packets), contiguous_only=False, incomplete_keep_payload_ranges=True) assert_equal(1, len(heaps)) assert_is_instance(heaps[0], recv.IncompleteHeap) items = heaps[0].get_items() assert_equal(1, len(items)) # Addressed item must be excluded assert_equal(0x1600, items[0].id) assert_equal(96, heaps[0].heap_length) assert_equal(39, heaps[0].received_length) assert_equal([(5, 12), (32, 64)], heaps[0].payload_ranges)
def test_slice_types(self): ac = AlleleCountsArray(allele_counts_data, dtype='u1') # row slice s = ac[1:] assert_is_instance(s, AlleleCountsArray) # col slice s = ac[:, 1:] assert_is_instance(s, np.ndarray) assert_not_is_instance(s, AlleleCountsArray) # row index s = ac[0] assert_is_instance(s, np.ndarray) assert_not_is_instance(s, AlleleCountsArray) # col index s = ac[:, 0] assert_is_instance(s, np.ndarray) assert_not_is_instance(s, AlleleCountsArray) # item s = ac[0, 0] assert_is_instance(s, np.uint8) assert_not_is_instance(s, AlleleCountsArray)
def test_customer_bank_accounts_list(): fixture = helpers.load_fixture('customer_bank_accounts')['list'] helpers.stub_response(fixture) response = helpers.client.customer_bank_accounts.list(*fixture['url_params']) body = fixture['body']['customer_bank_accounts'] assert_is_instance(response, list_response.ListResponse) assert_is_instance(response.records[0], resources.CustomerBankAccount) assert_equal(response.before, fixture['body']['meta']['cursors']['before']) assert_equal(response.after, fixture['body']['meta']['cursors']['after']) assert_is_none(responses.calls[-1].request.headers.get('Idempotency-Key')) assert_equal([r.account_holder_name for r in response.records], [b.get('account_holder_name') for b in body]) assert_equal([r.account_number_ending for r in response.records], [b.get('account_number_ending') for b in body]) assert_equal([r.bank_name for r in response.records], [b.get('bank_name') for b in body]) assert_equal([r.country_code for r in response.records], [b.get('country_code') for b in body]) assert_equal([r.created_at for r in response.records], [b.get('created_at') for b in body]) assert_equal([r.currency for r in response.records], [b.get('currency') for b in body]) assert_equal([r.enabled for r in response.records], [b.get('enabled') for b in body]) assert_equal([r.id for r in response.records], [b.get('id') for b in body]) assert_equal([r.metadata for r in response.records], [b.get('metadata') for b in body])
def assert_frame(s): """ Check that s is a valid frame. """ assert_is_instance(s, Frame) assert s._kb == ec assert s.test_frame()
def test_slice_types(self): h = HaplotypeArray(haplotype_data, dtype='i1') # row slice s = h[1:] assert_is_instance(s, HaplotypeArray) # col slice s = h[:, 1:] assert_is_instance(s, HaplotypeArray) # row index s = h[0] assert_is_instance(s, np.ndarray) assert_not_is_instance(s, HaplotypeArray) # col index s = h[:, 0] assert_is_instance(s, np.ndarray) assert_not_is_instance(s, HaplotypeArray) # item s = h[0, 0] assert_is_instance(s, np.int8) assert_not_is_instance(s, HaplotypeArray)
def test_group(module, EXAMPLE): assert_hasattr(module, 'Group') assert_is_instance(module.Group, type) shared = module.Shared.from_dict(EXAMPLE['shared']) values = EXAMPLE['values'] for value in values: shared.add_value(value) group1 = module.Group() group1.init(shared) for value in values: group1.add_value(shared, value) group2 = module.Group.from_values(shared, values) assert_close(group1.dump(), group2.dump()) group = module.Group.from_values(shared, values) dumped = group.dump() group.init(shared) group.load(dumped) assert_close(group.dump(), dumped) for value in values: group2.remove_value(shared, value) assert_not_equal(group1, group2) group2.merge(shared, group1) for value in values: group1.score_value(shared, value) for _ in xrange(10): value = group1.sample_value(shared) group1.score_value(shared, value) module.sample_group(shared, 10) group1.score_data(shared) group2.score_data(shared)
def test_retrun(): p = return_statement() node, r = p('return 5') eq_(r, '') assert_is_instance(node, Return) eq_(node.result, NumericLiteral('5'))
def test_self_axes(self): g = ag.PairGrid(self.df) for ax in g.axes.flat: nt.assert_is_instance(ax, plt.Axes) plt.close("all")
def test_value(module, EXAMPLE): assert_hasattr(module, 'Value') assert_is_instance(module.Value, type) values = EXAMPLE['values'] for value in values: assert_is_instance(value, module.Value)
def test_get_context_data(self): self.view.draft = self.dr1 res = self.view.get_context_data() nt.assert_is_instance(res, dict) nt.assert_in('draft', res) nt.assert_is_instance(res['draft'], dict) nt.assert_in('IMMEDIATE', res)
def test_self_axes(self): g = ag.FacetGrid(self.df, row="a", col="b", hue="c") for ax in g.axes.flat: nt.assert_is_instance(ax, plt.Axes) plt.close("all")
def test_sequence_config(): # Test sequence config. conf = SequenceConfig(sequence_class=Sequence) conf.sequence_name = 'test' assert_true(conf.config_ready) seq = conf.build_sequence() assert_is_instance(seq, Sequence)
def check_basic_figure_sanity(self, fig, exp_num_subplots, exp_title, exp_legend_exists, exp_xlabel, exp_ylabel, exp_zlabel): # check type assert_is_instance(fig, mpl.figure.Figure) # check number of subplots axes = fig.get_axes() npt.assert_equal(len(axes), exp_num_subplots) # check title ax = axes[0] npt.assert_equal(ax.get_title(), exp_title) # shouldn't have tick labels for tick_label in (ax.get_xticklabels() + ax.get_yticklabels() + ax.get_zticklabels()): npt.assert_equal(tick_label.get_text(), '') # check if legend is present legend = ax.get_legend() if exp_legend_exists: assert_true(legend is not None) else: assert_true(legend is None) # check axis labels npt.assert_equal(ax.get_xlabel(), exp_xlabel) npt.assert_equal(ax.get_ylabel(), exp_ylabel) npt.assert_equal(ax.get_zlabel(), exp_zlabel)
def test_tuple(): tup = (lambda x: x, 1) bufs = serialize_object(tup) canned = pickle.loads(bufs[0]) nt.assert_is_instance(canned, tuple) t2, r = unserialize_object(bufs) nt.assert_equal(t2[0](t2[1]), tup[0](tup[1]))
def test_list(): lis = [lambda x: x, 1] bufs = serialize_object(lis) canned = pickle.loads(bufs[0]) nt.assert_is_instance(canned, list) l2, r = unserialize_object(bufs) nt.assert_equal(l2[0](l2[1]), lis[0](lis[1]))
def test_create(self): """L5PC: test creation of l5pc template""" import bluepyopt l5pc_cell = self.l5pc_template.create() nt.assert_is_instance(l5pc_cell, bluepyopt.electrical.celltemplate.CellTemplate)
def test_families(): families = table.families() for name, fdesc in families.iteritems(): assert_is_instance(name, basestring) assert_is_instance(fdesc, dict) assert_in('name', fdesc) assert_in('max_versions', fdesc)
def test_multiple_formulas_value_without_params(): values = send(formula = VALID_FORMULA + '+' + VALID_OTHER_FORMULA)['payload']['values'] for formula_name in (VALID_FORMULA, VALID_OTHER_FORMULA): assert_in(formula_name, values) assert_is_instance(values[formula_name], float) assert_equal(values[formula_name], 0)
def test_csr(self): pkey = ezbakeca.private_key() csr = ezbakeca.csr(pkey, CN="Test") nt.assert_is_instance(csr, crypto.X509Req) nt.assert_equal(csr.get_subject().CN, "Test") nt.assert_true(csr.verify(ezbakeca.openssl_key(pkey)))
def test_create_core_default(): """Test `create_core` with factory defaults.""" with temporary_config_file() as cfgfile: # std factory params core = create_core(cfgfile.name) assert_is_instance(core, Core) assert_equal(core.auto_enable_auth, True)
def test_get_initial(self): self.view.user = self.user self.view.get_initial() res = self.view.initial nt.assert_is_instance(res, dict) nt.assert_equal(res['guid'], self.user._id) nt.assert_equal(res['emails'], [(r, r) for r in self.user.emails.values_list('address', flat=True)])
def test_default_data_property_ro(self): assert_is_instance(self.p.data, Maker) def _set(): self.p.data = None assert_raises(AttributeError, _set)
def test_mandates_list(): fixture = helpers.load_fixture('mandates')['list'] helpers.stub_response(fixture) response = helpers.client.mandates.list(*fixture['url_params']) body = fixture['body']['mandates'] assert_is_instance(response, list_response.ListResponse) assert_is_instance(response.records[0], resources.Mandate) assert_equal(response.before, fixture['body']['meta']['cursors']['before']) assert_equal(response.after, fixture['body']['meta']['cursors']['after']) assert_equal([r.created_at for r in response.records], [b.get('created_at') for b in body]) assert_equal([r.id for r in response.records], [b.get('id') for b in body]) assert_equal([r.metadata for r in response.records], [b.get('metadata') for b in body]) assert_equal([r.next_possible_charge_date for r in response.records], [b.get('next_possible_charge_date') for b in body]) assert_equal([r.reference for r in response.records], [b.get('reference') for b in body]) assert_equal([r.scheme for r in response.records], [b.get('scheme') for b in body]) assert_equal([r.status for r in response.records], [b.get('status') for b in body])
def test_resource_definition_via_dict(): """Test programmatic resource definition (as opposed to reading a config file).""" cfg = gc3libs.config.Configuration() # define resource name = 'test' cfg.resources[name].update( name=name, type='shellcmd', auth='none', transport='local', max_cores_per_job=1, max_memory_per_core=1*GB, max_walltime=8*hours, max_cores=2, architecture=Run.Arch.X86_64, ) # make it resources = cfg.make_resources(ignore_errors=False) # check result resource = resources[name] assert_equal(resource.name, name) assert_is_instance(resource, gc3libs.backends.shellcmd.ShellcmdLrms) assert_is_instance(resource.transport, gc3libs.backends.transport.LocalTransport) assert_equal(resource.max_cores_per_job, 1) assert_equal(resource.max_memory_per_core, 1*GB) assert_equal(resource.max_walltime, 8*hours) assert_equal(resource.max_cores, 2) assert_equal(resource.architecture, Run.Arch.X86_64)
def test_subscriptions_create(): fixture = helpers.load_fixture('subscriptions')['create'] helpers.stub_response(fixture) response = helpers.client.subscriptions.create(*fixture['url_params']) body = fixture['body']['subscriptions'] assert_is_instance(response, resources.Subscription) assert_equal(response.amount, body.get('amount')) assert_equal(response.count, body.get('count')) assert_equal(response.created_at, body.get('created_at')) assert_equal(response.currency, body.get('currency')) assert_equal(response.day_of_month, body.get('day_of_month')) assert_equal(response.end_date, body.get('end_date')) assert_equal(response.id, body.get('id')) assert_equal(response.interval, body.get('interval')) assert_equal(response.interval_unit, body.get('interval_unit')) assert_equal(response.metadata, body.get('metadata')) assert_equal(response.month, body.get('month')) assert_equal(response.name, body.get('name')) assert_equal(response.payment_reference, body.get('payment_reference')) assert_equal(response.start_date, body.get('start_date')) assert_equal(response.status, body.get('status')) assert_equal(response.upcoming_payments, body.get('upcoming_payments')) assert_equal(response.links.mandate, body.get('links')['mandate'])
def test_get_item(td, raw): nt.assert_is_instance(td, JSONMapping) nt.assert_equal(td['a'], 1) nt.assert_equal(td['b'], JSONSequence(raw['b'])) nt.assert_equal(td['b'][0], 1) nt.assert_equal(td['c'], JSONMapping(raw['c'])) nt.assert_equal(td['c']['bb'][-1], 3)
def test_additional_backend(): """Test instanciating a non-std backend.""" tmpfile = _setup_config_file(""" [resource/test] type = noop auth = none transport = local max_cores_per_job = 1 max_memory_per_core = 1 max_walltime = 8 max_cores = 2 architecture = x86_64 """) try: cfg = gc3libs.config.Configuration(tmpfile) cfg.TYPE_CONSTRUCTOR_MAP['noop'] = ('gc3libs.backends.noop', 'NoOpLrms') resources = cfg.make_resources(ignore_errors=False) # resources are enabled by default assert 'test' in resources from gc3libs.backends.noop import NoOpLrms assert_is_instance(resources['test'], NoOpLrms) # test types finally: # since TYPE_CONSTRUCTOR_MAP is a class-level variable, we # need to clean up otherwise other tests will see the No-Op # backend del cfg.TYPE_CONSTRUCTOR_MAP['noop'] os.remove(tmpfile)
def test_exif_property(self): assert_is_instance(self.p.exif, exif) def _set(): self.p.exif = None assert_raises(AttributeError, _set)
def test_csr_signing(self): pkey = ca.private_key() req = ca.csr(pkey, CN="App", O="Ezbake", OU="Ezbake Apps", C="US") cert = self.get_client(5049).csr(self.get_token(), ca.pem_csr(req)) cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert) nt.assert_is_instance(cert, crypto.X509) nt.assert_equal(req.get_subject(), cert.get_subject())
def test_get_class_name(self): name = Introspection.get_class_name(DummyClass()) assert_is_instance(name, str) assert_equals(name, 'dummyclass') name = Introspection.get_class_name(DummyClass) assert_is_instance(name, str) assert_equals(name, 'dummyclass')
def test_that_xlsx_is_handled_by_xls_table_set(self): """ Should emit a DeprecationWarning. """ fh = horror_fobj('simple.xlsx') assert_is_instance(XLSXTableSet(fh), XLSTableSet)
def test_events_get(): fixture = helpers.load_fixture('events')['get'] helpers.stub_response(fixture) response = helpers.client.events.get(*fixture['url_params']) body = fixture['body']['events'] assert_is_instance(response, resources.Event) assert_is_none(responses.calls[-1].request.headers.get('Idempotency-Key')) assert_equal(response.action, body.get('action')) assert_equal(response.created_at, body.get('created_at')) assert_equal(response.customer_notifications, body.get('customer_notifications')) assert_equal(response.id, body.get('id')) assert_equal(response.metadata, body.get('metadata')) assert_equal(response.resource_type, body.get('resource_type')) assert_equal(response.details.bank_account_id, body.get('details')['bank_account_id']) assert_equal(response.details.cause, body.get('details')['cause']) assert_equal(response.details.currency, body.get('details')['currency']) assert_equal(response.details.description, body.get('details')['description']) assert_equal(response.details.not_retried_reason, body.get('details')['not_retried_reason']) assert_equal(response.details.origin, body.get('details')['origin']) assert_equal(response.details._property, body.get('details')['property']) assert_equal(response.details.reason_code, body.get('details')['reason_code']) assert_equal(response.details.scheme, body.get('details')['scheme']) assert_equal(response.details.will_attempt_retry, body.get('details')['will_attempt_retry']) assert_equal(response.links.creditor, body.get('links')['creditor']) assert_equal(response.links.customer, body.get('links')['customer']) assert_equal(response.links.customer_bank_account, body.get('links')['customer_bank_account']) assert_equal(response.links.instalment_schedule, body.get('links')['instalment_schedule']) assert_equal(response.links.mandate, body.get('links')['mandate']) assert_equal(response.links.new_customer_bank_account, body.get('links')['new_customer_bank_account']) assert_equal(response.links.new_mandate, body.get('links')['new_mandate']) assert_equal(response.links.organisation, body.get('links')['organisation']) assert_equal(response.links.parent_event, body.get('links')['parent_event']) assert_equal(response.links.payer_authorisation, body.get('links')['payer_authorisation']) assert_equal(response.links.payment, body.get('links')['payment']) assert_equal(response.links.payout, body.get('links')['payout']) assert_equal(response.links.previous_customer_bank_account, body.get('links')['previous_customer_bank_account']) assert_equal(response.links.refund, body.get('links')['refund']) assert_equal(response.links.subscription, body.get('links')['subscription'])
# ## Part 1: Exploring files and the system # First, let's start out by listing the contents of the directory `/user/` in HDFS. When you do this, you will pipe the output into a file called temp1.txt so that you may pass the assertion tests below (the easiest way to do this piping is with the `>temp1.txt` statement after your command-line statement). # In[20]: get_ipython().system('$HADOOP_PREFIX/bin/hdfs dfs -ls / >temp1.txt') #get contents and pipe it to temp1 # In[21]: res1 = get_ipython().getoutput('cat temp1.txt') assert_is_instance(res1, list) assert_is_instance(res1[0], str) assert_is_instance(res1[1], str) assert_true(res1[1], "Found 1 items") assert_true(res1[1][:40], "drwxr-xr-x - data_scientist supergroup") # ## Part 2 Free space: # Now, let's issue a Hadoop command that allows us to see the free space available to us, making sure to make it human readable. Like before, you will pipe the output into a file called temp2.txt so that you may pass the assertion tests below (this piping can be done by putting `>temp2.txt` after your command-line statement). # In[22]: get_ipython().system('$HADOOP_PREFIX/bin/hdfs dfs -df -h >temp2.txt') #pipe free space info to temp2
def test_Lazy_exteded(): from couchpotato.very.lazy import Lazy assert_is_instance(Lazy(lambda x: x, (), {}), Lazy)
def test_GitRepo_instance_from_existing(path): gr = GitRepo(path) assert_is_instance(gr, GitRepo, "GitRepo was not created.") assert_true(exists(opj(path, '.git')))
def test_get_object(self): obj = self.view.get_object() nt.assert_is_instance(obj, User)
def test_read_simple_xlsx(self): fh = horror_fobj('simple.xlsx') table_set = XLSTableSet(fh) assert_equal(1, len(table_set.tables)) row_set = table_set.tables[0] first_row = list(row_set.sample)[0] third_row = list(row_set.sample)[2] assert_is_instance(first_row[0].value, unicode) assert_is_instance(first_row[1].value, unicode) assert_is_instance(first_row[2].value, unicode) assert_is_instance(third_row[0].value, datetime.datetime) assert_is_instance(third_row[1].value, float) assert_is_instance(third_row[2].value, unicode) assert_equal(first_row[0].value, 'date') assert_equal(first_row[1].value, 'temperature') assert_equal(first_row[2].value, 'place') assert_equal(third_row[0].value, datetime.datetime(2011, 1, 2, 0, 0)) assert_equal(third_row[1].value, -1.0) assert_equal(third_row[2].value, u'Galway') for row in list(row_set): assert 3 == len(row), row
def test_connection(): nt.assert_equal(database.session.dbfile, const.DATABASE) nt.assert_false(database.session.is_new_db) nt.assert_is_instance(database.session.connection, sqlite3.Connection) nt.assert_is_instance(database.session.cursor, sqlite3.Cursor)
def check_children(xblock): for child in xblock.children: assert_is_instance(child, UsageKey)
def test_get_templates(): '''Test template getting''' env = folium.utilities.get_templates() nt.assert_is_instance(env, jinja2.environment.Environment)
def test_put_chunk_noraise(self): result = self.store.put_chunk_noraise("x", (1, 2), []) assert_is_instance(result, BadChunk)
def check_children(payload): for child in payload['definition']['children']: assert_is_instance(child, basestring)
def test_get_parser(self): assert_is_instance(build_utils.get_parser(), argparse.ArgumentParser)
def test_handle_metric_new_metric(self): res = wsgi.handle_metric_new(req(dict(type="gauge")), "test") assert_equal(res, "") metric = metrics.metric("test") assert_is_instance(metric, metrics.simple_metrics.Gauge)
def test_Entity(): # Test the basics of creating and accessing properties on an entity for i in range(2): e = Entity(name='Test object', description='I hope this works', annotations=dict(foo=123, nerds=['chris', 'jen', 'janey'], annotations='How confusing!'), properties=dict( annotations='/repo/v1/entity/syn1234/annotations', md5='cdef636522577fc8fb2de4d95875b27c', parentId='syn1234'), concreteType='org.sagebionetworks.repo.model.Data') # Should be able to create an Entity from an Entity if i == 1: e = Entity.create(e) assert_equals(e.parentId, 'syn1234') assert_equals(e['parentId'], 'syn1234') assert_equals(e.properties['parentId'], 'syn1234') assert_equals(e.properties.parentId, 'syn1234') assert_equals(e.foo, 123) assert_equals(e['foo'], 123) assert_equals(e.annotations['foo'], 123) assert_equals(e.annotations.foo, 123) assert_true(hasattr(e, 'parentId')) assert_true(hasattr(e, 'foo')) assert_false(hasattr(e, 'qwerqwer')) # Annotations is a bit funny, because there is a property call # 'annotations', which will be masked by a member of the object # called 'annotations'. Because annotations are open-ended, we # might even have an annotations called 'annotations', which gets # really confusing. assert_is_instance(e.annotations, collections.Mapping) assert_is_instance(e['annotations'], collections.Mapping) assert_equals(e.properties['annotations'], '/repo/v1/entity/syn1234/annotations') assert_equals(e.properties.annotations, '/repo/v1/entity/syn1234/annotations') assert_equals(e.annotations.annotations, 'How confusing!') assert_equals(e.annotations['annotations'], 'How confusing!') assert_equals(e.nerds, ['chris', 'jen', 'janey']) assert_true( all([ k in e for k in [ 'name', 'description', 'foo', 'nerds', 'annotations', 'md5', 'parentId' ] ])) # Test modifying properties e.description = 'Working, so far' assert_equals(e['description'], 'Working, so far') e['description'] = 'Wiz-bang flapdoodle' assert_equals(e.description, 'Wiz-bang flapdoodle') # Test modifying annotations e.foo = 999 assert_equals(e.annotations['foo'], 999) e['foo'] = 12345 assert_equals(e.annotations.foo, 12345) # Test creating a new annotation e['bar'] = 888 assert_equals(e.annotations['bar'], 888) e['bat'] = 7788 assert_equals(e.annotations['bat'], 7788) # Test replacing annotations object e.annotations = { 'splat': 'a totally new set of annotations', 'foo': 456 } assert_equals(e.foo, 456) assert_equals(e['foo'], 456) assert_is_instance(e.annotations, collections.Mapping) assert_is_instance(e['annotations'], collections.Mapping) assert_equals(e.annotations.foo, 456) assert_equals(e.properties['annotations'], '/repo/v1/entity/syn1234/annotations') assert_equals(e.properties.annotations, '/repo/v1/entity/syn1234/annotations') # test unicode properties e.train = '時刻表には記載されない 月への列車が来ると聞いて' e.band = "Motörhead" e.lunch = "すし"
def test_calculate_with_axes(): test_case = { 'output_format': 'variables', "scenarios": [{ "period": { "start": 2014, "unit": "year" }, "axes": [{ "count": 50, "max": 100000, "min": 0, "name": "salaire_imposable" }], "test_case": { "individus": [ { "id": "Personne Principale", "salaire_imposable": 0, "statut_marital": 2 }, { "id": "Personne Conjoint", "salaire_imposable": 2 }, ], "familles": [{ "id": "Famille 1", "parents": ["Personne Principale", "Personne Conjoint"], "enfants": [] }], "foyers_fiscaux": [{ "id": "Déclaration d'impôt 1", "declarants": ["Personne Principale", "Personne Conjoint"], "personnes_a_charge": [] }], "menages": [{ "id": "Logement principal 1", "personne_de_reference": "Personne Principale", "conjoint": "Personne Conjoint", "enfants": [] }], }, }], 'variables': ['impo'] } req = Request.blank( '/api/1/calculate', body=json.dumps(test_case), headers=(('Content-Type', 'application/json'), ), method='POST', ) res = req.get_response(common.app) assert_equal(res.status_code, 200, res.body) res_body_json = json.loads(res.body) impo_values = res_body_json['value'][0]['impo']['2014'] assert_is_instance(impo_values, list) assert_true(impo_values[-1] < 0)
def test_init(self): """Alert Processor - Initialization""" assert_is_instance(self.processor.config, dict)
def test_is_instance(self): assert_is_instance(TestToolsBIT.mylist, list)
def test_AnnexRepo_instance_brand_new(path): ar = AnnexRepo(path) assert_is_instance(ar, AnnexRepo, "AnnexRepo was not created.") assert_true(os.path.exists(os.path.join(path, '.git')))
def test_create_dispatcher(self): """Alert Processor - Create Dispatcher - Success""" dispatcher = self.processor._create_dispatcher( 'aws-s3:unit_test_bucket') assert_is_instance(dispatcher, OutputDispatcher)
def test_put_chunk_noraise(self): name = self.array_name('x') self.store.create_array(name) result = self.store.put_chunk_noraise(name, (1, 2), []) assert_is_instance(result, BadChunk)
def check_table(table): assert_is_instance(table, str) for member in [COMMAND, DESCRIPTION, TEST_RUN]: assert_in(member, table)
def test_multi_target_init(): config = Config() config.update({ "multiprocessing": False, "blocking": True, "device": "cpu", "num_epochs": 1, "num_inputs": 3, "num_outputs": { "t1": 4, "t2": 5 }, "learning_rate": 1.0, }) config.network_topology_json = """ { "fw0": {"class": "hidden", "activation": "identity", "n_out": 3}, "out1": {"class": "softmax", "loss": "ce", "target": "t1", "from": ["fw0"]}, "out2": {"class": "softmax", "loss": "ce", "target": "t2", "from": ["fw0"]} } """ device = Device("cpu", config=config, blocking=True) assert_true(device.trainnet, "train network initialized") assert_true(device.testnet, "test network initialized") param_vars = device.trainnet.get_all_params_vars() print("params:", param_vars) assert_equal(len(param_vars), 6, "W, b vars for each out, and fw") num_params = get_num_params(param_vars) assert_equal(num_params, (3 * 3 + 3) + (3 * 4 + 4) + (3 * 5 + 5), "W, b for each out, and fw") assert_in("fw0", device.testnet.hidden) assert_in("out1", device.testnet.output) assert_in("out2", device.testnet.output) assert_is(device.testnet.j["t1"], device.testnet.output["out1"].index) assert_true(device.updater) update_list = device.updater.getUpdateList() print("update list:") pprint(update_list) update_dict = dict(update_list) assert_equal(len(update_dict), len(update_list), "all params in update list only once") assert_in("fw0", device.trainnet.hidden) assert_equal(len(device.trainnet.hidden), 1) assert_in("W_in_data_fw0", device.trainnet.hidden["fw0"].params) assert_in("b_fw0", device.trainnet.hidden["fw0"].params) assert_equal(len(device.trainnet.hidden["fw0"].params), 2) assert_in("out1", device.trainnet.output) assert_equal(len(device.trainnet.output), 2) assert_in("W_in_fw0_out1", device.trainnet.output["out1"].params) assert_in("b_out1", device.trainnet.output["out1"].params) assert_equal(len(device.trainnet.output["out1"].params), 2) assert_in(device.trainnet.hidden["fw0"].params["W_in_data_fw0"], update_dict) assert_in(device.trainnet.hidden["fw0"].params["b_fw0"], update_dict) assert_in(device.trainnet.output["out1"].params["W_in_fw0_out1"], update_dict) assert_in(device.trainnet.output["out1"].params["b_out1"], update_dict) assert_in(device.trainnet.output["out2"].params["W_in_fw0_out2"], update_dict) assert_in(device.trainnet.output["out2"].params["b_out2"], update_dict) # assert_equal(len(update_dict), 6) # updater adds other stuff... # Set net params. net_params = { "fw0": { "W_in_data_fw0": numpy.identity(3, dtype="float32"), "b_fw0": numpy.zeros((3, ), dtype="float32") }, "out1": { "W_in_fw0_out1": numpy.arange(0.0, 1.2, 0.1, dtype="float32").reshape((3, 4)), "b_out1": numpy.arange(0.0, 4, dtype="float32") }, "out2": { "W_in_fw0_out2": numpy.arange(0.0, 1.5, 0.1, dtype="float32").reshape((3, 5)), "b_out2": numpy.arange(0.0, 5, dtype="float32") } } device.trainnet.set_params_by_dict(net_params) device.testnet.set_params_by_dict(net_params) # Show params. for p in param_vars: print("init %s:" % p) pprint(p.get_value()) # Init dataset. dataset = StaticDataset(data=[{ "data": numpy.array([[0.1, 0.2, -0.3]], dtype="float32"), "t1": numpy.array([2]), "t2": numpy.array([4]) }], output_dim=config.typed_value("num_outputs")) dataset.init_seq_order() assert_equal(dataset.is_data_sparse("data"), False) assert_equal(dataset.is_data_sparse("t1"), True) assert_equal(dataset.is_data_sparse("t2"), True) # Copy to device allocation. success = assign_dev_data_single_seq(device, dataset, 0) assert_true(success, "failed to allocate & assign data") # Check allocated data. assert_equal(device.targets["data"].shape, (1, 1, 3)) # input shape. (time,batch,dim) assert_in("t1", device.targets) assert_in("t2", device.targets) assert_equal(device.targets["t1"].shape, (1, 1)) assert_equal(device.targets["t2"].shape, (1, 1)) assert_equal(device.output_index["data"].shape, (1, 1)) numpy.testing.assert_equal(device.output_index["data"], numpy.array([[1]])) assert_equal(device.output_index["t1"].shape, (1, 1)) numpy.testing.assert_equal(device.output_index["t1"], numpy.array([[1]])) # Forward test. device.update_data() device.testnet.costs["out1"].name = "out1_cost" # nice in the func graph out_i1 = device.testnet.output["out1"].index out_i1_nonzero = device.testnet.output["out1"].i nll1, pcx1 = T.nnet.crossentropy_softmax_1hot( x=device.testnet.output["out1"].y_m[out_i1_nonzero], y_idx=device.testnet.output["out1"].y_data_flat[out_i1_nonzero]) forward_func = theano.function( inputs=[device.block_start, device.block_end], outputs=[ device.testnet.j["t1"], out_i1, out_i1_nonzero[0], nll1, pcx1, device.testnet.costs["out1"], device.testnet.output["out1"].p_y_given_x, device.testnet.costs["out2"], device.testnet.output["out2"].p_y_given_x ], givens=device.make_givens(device.testnet), no_default_updates=True, on_unused_input='warn', name="forward") #print "forward func:" #theano.printing.debugprint(forward_func) net_j1, out_i1_val, out_i1_nz_val, nll1_val, pcx1_val, t1_cost, t1_y, t2_cost, t2_y = forward_func( 0, 1) print("forward results:") pprint(net_j1) pprint(out_i1_val) pprint(out_i1_nz_val) pprint(nll1_val) pprint(pcx1_val) pprint(t1_cost) pprint(t1_y) pprint(t2_cost) pprint(t2_y) assert_equal(net_j1, numpy.array([[1]])) assert_equal(out_i1_val, numpy.array([[1]])) assert_equal(out_i1_nz_val, numpy.array([0])) assert_almost_equal(nll1_val, numpy.array([t1_cost])) numpy.testing.assert_almost_equal(t1_y, pcx1_val[None, ...]) assert_almost_equal(t1_cost, 1.440189698561195, places=6) assert_almost_equal(t2_cost, 0.45191439593759336, places=6) numpy.testing.assert_almost_equal( t1_y, numpy.array([[[0.0320586, 0.08714432, 0.23688282, 0.64391426]]]), decimal=6) numpy.testing.assert_almost_equal(t2_y, numpy.array([[[ 0.01165623, 0.03168492, 0.08612854, 0.23412166, 0.63640865 ]]]), decimal=6) # One train step. device.set_learning_rate(config.typed_value("learning_rate")) device.run("train") output_list, outputs_format = device.result() assert_is_instance(output_list, list) assert_true(outputs_format, "for train, we should always get the format") outputs = Device.make_result_dict(output_list, outputs_format) pprint(outputs) assert_in("cost:out1", outputs) assert_greater(outputs["cost:out1"], 0) assert_almost_equal(outputs["cost:out1"], t1_cost) # Get net params. params = device.get_net_train_params(device.trainnet) references_params = { "W_in_data_fw0": numpy.array([[1.00055406e+00, 5.54056978e-04, 5.54056978e-04], [1.10811396e-03, 1.00110811e+00, 1.10811396e-03], [-1.66217093e-03, -1.66217093e-03, 9.98337829e-01]]), "b_fw0": numpy.array([0.00554057, 0.00554057, 0.00554057]), "W_in_fw0_out1": numpy.array([[-0.00320586, 0.09128557, 0.27631172, 0.23560857], [0.39358828, 0.48257114, 0.75262344, 0.57121715], [0.80961758, 0.9261433, 0.77106485, 1.29317428]]), "b_out1": numpy.array([-0.0320586, 0.91285568, 2.76311718, 2.35608574]), "W_in_fw0_out2": numpy.array([[ -1.16562310e-03, 9.68315079e-02, 1.91387146e-01, 2.76587834e-01, 4.36359135e-01 ], [ 4.97668754e-01, 5.93663016e-01, 6.82774291e-01, 7.53175669e-01, 9.72718271e-01 ], [ 1.00349687e+00, 1.10950548e+00, 1.22583856e+00, 1.37023650e+00, 1.29092259e+00 ]]), "b_out2": numpy.array( [-0.01165623, 0.96831508, 1.91387146, 2.76587834, 4.36359135]) } assert_equal(len(param_vars), len(params)) for p, v in zip(param_vars, params): print("%s:" % p) pprint(v) assert_true(p.name) numpy.testing.assert_almost_equal(references_params[p.name], v, decimal=6)
def test_exception(sefl): e = InventoryException("test") print(e) assert_is_instance(e, InventoryException)
def test_combi_auto_enc(): config = Config() config.update({ "multiprocessing": False, "blocking": True, "device": "cpu", "num_epochs": 1, "num_inputs": 3, "num_outputs": { "classes": 2 }, "learning_rate": 1.0, "network": { "output": { "class": "softmax", "loss": "ce", "target": "classes" }, "auto-enc": { "class": "softmax", "loss": "sse", "dtype": "float32", "target": "data" } } }) device = Device("cpu", config=config, blocking=True) # Set net params. def get_net_params(with_auto_enc=True): d = { "output": { "W_in_data_output": numpy.arange(0.1, 0.7, 0.1, dtype="float32").reshape((3, 2)), "b_output": numpy.arange(0.0, 2, dtype="float32") } } if with_auto_enc: d["auto-enc"] = { "W_in_data_auto-enc": numpy.arange(0.1, 1.0, 0.1, dtype="float32").reshape((3, 3)), "b_auto-enc": numpy.arange(0.0, 3, dtype="float32") } return d device.trainnet.set_params_by_dict(get_net_params()) device.testnet.set_params_by_dict(get_net_params()) # Show params. for p in device.trainnet.get_all_params_vars(): print("init %s:" % p) pprint(p.get_value()) # Init dataset. dataset = StaticDataset(data=[{ "data": numpy.array([[0.1, 0.2, -0.3]], dtype="float32"), "classes": numpy.array([1]), }], output_dim=config.typed_value("num_outputs")) dataset.init_seq_order() # Copy to device allocation. success = assign_dev_data_single_seq(device, dataset, 0) assert_true(success, "failed to allocate & assign data") # One train step. device.set_learning_rate(config.typed_value("learning_rate")) device.run("train") output_list, outputs_format = device.result() assert_is_instance(output_list, list) assert_true(outputs_format, "for train, we should always get the format") outputs = Device.make_result_dict(output_list, outputs_format) pprint(outputs) assert_in("cost:output", outputs) assert_in("cost:auto-enc", outputs) expected_cost_output = 0.3132616877555847 assert_almost_equal(outputs["cost:output"], expected_cost_output, places=6) exact_cost_output = outputs["cost:output"] assert_almost_equal(outputs["cost:auto-enc"], 1.7544001340866089, places=6) # Now, drop the auto-enc from the network, and redo the same thing. del config.typed_value("network")["auto-enc"] device = Device("cpu", config=config, blocking=True) device.trainnet.set_params_by_dict(get_net_params(with_auto_enc=False)) device.testnet.set_params_by_dict(get_net_params(with_auto_enc=False)) for p in device.trainnet.get_all_params_vars(): print("second run, init %s:" % p) pprint(p.get_value()) dataset.init_seq_order() # reset. probably not needed success = assign_dev_data_single_seq(device, dataset, 0) assert_true(success, "failed to allocate & assign data") device.set_learning_rate(config.typed_value("learning_rate")) device.run("train") output_list, outputs_format = device.result() assert_is_instance(output_list, list) assert_true(outputs_format, "for train, we should always get the format") outputs = Device.make_result_dict(output_list, outputs_format) pprint(outputs) assert_in("cost:output", outputs) assert_not_in("cost:auto-enc", outputs) assert_almost_equal(outputs["cost:output"], expected_cost_output, places=6) assert_equal(outputs["cost:output"], exact_cost_output)
def test_create_dispatcher(): """StreamAlertOutput - Create Dispatcher""" dispatcher = StreamAlertOutput.create_dispatcher('aws-s3', REGION, FUNCTION_NAME, CONFIG) assert_is_instance(dispatcher, S3Output)
def test_get_frame(self): ret_1, ret_2 = self.videoThread.get_frame() assert_false(ret_1) assert_is_instance(ret_2, np.ndarray)
def test_combi_auto_enc_longer(): config = Config() config.update({ "multiprocessing": False, "blocking": True, "device": "cpu", "num_epochs": 1, "num_inputs": 3, "num_outputs": { "classes": 2 }, "learning_rate": 1.0, "adadelta": True, "network": { "output": { "class": "softmax", "loss": "ce", "target": "classes" }, "auto-enc": { "class": "softmax", "loss": "sse", "dtype": "float32", "target": "data" } } }) device = Device("cpu", config=config, blocking=True) # Set net params. def get_net_params(with_auto_enc=True): d = { "output": { "W_in_data_output": numpy.arange(0.1, 0.7, 0.1, dtype="float32").reshape((3, 2)), "b_output": numpy.arange(0.0, 2, dtype="float32") } } if with_auto_enc: d["auto-enc"] = { "W_in_data_auto-enc": numpy.arange(0.1, 1.0, 0.1, dtype="float32").reshape((3, 3)), "b_auto-enc": numpy.arange(0.0, 3, dtype="float32") } return d device.trainnet.set_params_by_dict(get_net_params()) device.testnet.set_params_by_dict(get_net_params()) # Show params. for p in device.trainnet.get_all_params_vars(): print("init %s:" % p) pprint(p.get_value()) # Init dataset. dataset = DummyDataset(input_dim=config.typed_value("num_inputs"), output_dim=config.typed_value("num_outputs"), num_seqs=10) dataset.init_seq_order() cost_output_sum = 0.0 for seq_idx in range(dataset.num_seqs): # Copy to device allocation. success = assign_dev_data_single_seq(device, dataset, seq_idx) assert_true(success, "failed to allocate & assign data") # One train step. device.set_learning_rate(config.typed_value("learning_rate")) device.run("train") output_list, outputs_format = device.result() assert_is_instance(output_list, list) assert_true(outputs_format, "for train, we should always get the format") outputs = Device.make_result_dict(output_list, outputs_format) print(("seq %i" % seq_idx)) pprint(outputs) assert_in("cost:output", outputs) assert_in("cost:auto-enc", outputs) cost_output_sum += outputs["cost:output"] # Now, drop the auto-enc from the network, and redo the same thing. del config.typed_value("network")["auto-enc"] device = Device("cpu", config=config, blocking=True) device.trainnet.set_params_by_dict(get_net_params(with_auto_enc=False)) device.testnet.set_params_by_dict(get_net_params(with_auto_enc=False)) for p in device.trainnet.get_all_params_vars(): print("second run, init %s:" % p) pprint(p.get_value()) dataset.init_seq_order() # reset cost2_output_sum = 0.0 for seq_idx in range(dataset.num_seqs): # Copy to device allocation. success = assign_dev_data_single_seq(device, dataset, seq_idx) assert_true(success, "failed to allocate & assign data") # One train step. device.set_learning_rate(config.typed_value("learning_rate")) device.run("train") output_list, outputs_format = device.result() assert_is_instance(output_list, list) assert_true(outputs_format, "for train, we should always get the format") outputs = Device.make_result_dict(output_list, outputs_format) print(("seq %i" % seq_idx)) pprint(outputs) assert_in("cost:output", outputs) assert_not_in("cost:auto-enc", outputs) cost2_output_sum += outputs["cost:output"] assert_equal(cost_output_sum, cost2_output_sum) assert_almost_equal(cost_output_sum, 16.028842568397522, places=6)
def test_get_event_camera_chosen(self): assert_is_instance(self.videoThread.get_event_camera_chosen(), threading._Event)
def test_get_number_of_cameras(self): assert_is_instance(self.videoThread.get_number_of_cameras(), int)