def test_split(self): h5_list = to.split_hdf5(self.fpath, self.dir_tmp, tot_floats=((3*4*2*3))) #hdf5_list = to.split_hdf5(self.fpath, self.dir_tmp, tot_floats=((10*4*2*3))) #hdf5_list = to.split_hdf5(self.fpath, self.dir_tmp, tot_floats=((1*4*2*3))) assert_is_instance(h5_list, list) assert_equals(len(h5_list), 2) name_, ext = os.path.splitext(os.path.basename(self.fpath)) for p in h5_list: assert_in(name_, p) assert_true(p.endswith(ext), "Unexpected extension") offset = 0 with h5py.File(self.fpath, 'r') as h_src: for p in h5_list: with h5py.File(p, 'r') as h: assert_list_equal(['x1', 'x2'], h.keys()) for k in h.keys(): min_len = min(len(h[k]), len(h_src[k])) sub_actual = h[k][0:min_len] sub_expected = h_src[k][offset:offset+min_len] assert_true(np.all(sub_actual==sub_expected)) offset += min_len
def test_to_block_continuous_long(self): s = SwirlFrame() for i, row in enumerate(s.rows): row.set(i, i+1, 0xff, 0) assert_list_equal([1, 0xff, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, 28, 28, 29, 29, 30, 30, 31, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 37, 37, 38, 38, 39, 39, 40, 40, 41, 41, 42, 42, 43, 43, 44, 44, 45, 45, 46, 46, 47, 47, 48, 48, 49, 49, 50, 50, 51, 51, 52, 52, 53, 53, 54, 54, 55, 55, 56, 56, 57, 57, 58, 58, 59, 59, 60, 60, 61, 61, 62, 62, 63, 63, 64, 64, 65, 65, 66, 66, 67, 67, 68, 68, 69, 69, 70, 70, 71, 71, 72, 72, 73, 73, 74, 74, 75, 75, 76, 76, 77, 77, 78, 78, 79, 79, 80, 80, 81, 81, 82, 82, 83, 83, 84, 84, 85, 85, 86, 86, 87, 87, 88, 88, 89, 89, 90, 90, 91, 91, 92, 92, 93, 93, 94, 94, 95, 95, 96, 96, 97, 97, 98, 98, 99, 99, 100, 100, 101, 101, 102, 102, 103, 103, 104, 104, 105, 105, 106, 106, 107, 107, 108, 108, 109, 109, 110, 110, 111, 111, 112, 112, 113, 113, 114, 114, 115, 115, 116, 116, 117, 117, 118, 118, 119, 119, 120, 120, 121, 121, 122, 122, 123, 123, 124, 124, 125, 125, 126, 126, 127, 0xe1, 127, 128, 128, 129, 129, 130, 130, 131, 131, 132, 132, 133, 133, 134, 134, 135, 135, 136, 136, 137, 137, 138, 138, 139, 139, 140, 140, 141, 141, 142, 142, 143, 143, 144, 144, 145, 145, 146, 146, 147, 147, 148, 148, 149, 149, 150, 150, 151, 151, 152, 152, 153, 153, 154, 154, 155, 155, 156, 156, 157, 157, 158, 158, 159, 159, 160, 160, 161, 161, 162, 162, 163, 163, 164, 164, 165, 165, 166, 166, 167, 167, 168, 168, 169, 169, 170, 170, 171, 171, 172, 172, 173, 173, 174, 174, 175, 175, 176, 176, 177, 177, 178, 178, 179, 179, 180, 180, 181, 181, 182, 182, 183, 183, 184, 184, 185, 185, 186, 186, 187, 187, 188, 188, 189, 189, 190, 190, 191, 191, 192, 192, 193, 193, 194, 194, 195, 195, 196, 196, 197, 197, 198, 198, 199, 199, 200, 200, 201, 201, 202, 202, 203, 203, 204, 204, 205, 205, 206, 206, 207, 207, 208, 208, 209, 209, 210, 210, 211, 211, 212, 212, 213, 213, 214, 214, 215, 215, 216, 216, 217, 217, 218, 218, 219, 219, 220, 220, 221, 221, 222, 222, 223, 223, 224, 0], s.block_rep().to_list())
def test_pandas_series_loading(self): """Pandas Series objects are correctly loaded""" # Test valid series types name = ['_x', ' name'] length = [0, 1, 2] index_key = [None, 'ix', 1] index_types = ['int', 'char', 'datetime', 'Timestamp'] value_key = [None, 'x', 1] value_types = ['int', 'char', 'datetime', 'Timestamp', 'float', 'numpy float', 'numpy int'] series_info = product(name, length, index_key, index_types, value_key, value_types) for n, l, ikey, itype, vkey, vtype in series_info: index = sequences[itype](l) series = pd.Series(sequences[vtype](l), index=index, name=n,) vkey = vkey or series.name expected = [{'idx': Data.serialize(i), 'col': vkey, 'val': Data.serialize(v)} for i, v in zip(index, series)] data = Data.from_pandas(series, name=n, series_key=vkey) nt.assert_list_equal(expected, data.values) nt.assert_equal(n, data.name) data.to_json() # Missing a name series = pd.Series(np.random.randn(10)) data = Data.from_pandas(series) nt.assert_equal(data.name, 'table')
def test_profile_table_for_join_with_profile_attrs(self): profile_output = profile_table_for_join(self.table, ['attr']) expected_output_attrs = ['Unique values', 'Missing values', 'Comments'] # verify whether the output dataframe has the necessary attributes. assert_list_equal(list(profile_output.columns.values), expected_output_attrs) expected_unique_column = ['4 (80.0%)'] # verify whether correct values are present in 'Unique values' column. assert_list_equal(list(profile_output['Unique values']), expected_unique_column) expected_missing_column = ['1 (20.0%)'] # verify whether correct values are present in 'Missing values' column. assert_list_equal(list(profile_output['Missing values']), expected_missing_column) expected_comments = ['Joining on this attribute will ignore 1 (20.0%) rows.'] # verify whether correct values are present in 'Comments' column. assert_list_equal(list(profile_output['Comments']), expected_comments) # verify whether index name is set correctly in the output dataframe. assert_equal(profile_output.index.name, 'Attribute') expected_index_column = ['attr'] # verify whether correct values are present in the dataframe index. assert_list_equal(list(profile_output.index.values), expected_index_column)
def test_should_create_varnish_api_for_connected_servers(self): expected_construct_args = [ call(['127.0.0.1', '6082', 1.0], 'secret-1'), call(['127.0.0.2', '6083', 1.0], 'secret-2'), call(['127.0.0.3', '6084', 1.0], 'secret-3')] sample_extractor = Mock(servers=servers) api_init_side_effect = { 'secret-1': Exception(), 'secret-2': None, 'secret-3': None } with patch('vaas.cluster.cluster.ServerExtractor', Mock(return_value=sample_extractor)): with patch.object( VarnishApi, '__init__', side_effect=lambda host_port_timeout, secret: api_init_side_effect[secret] ) as construct_mock: with patch('telnetlib.Telnet.close', Mock()): varnish_cluster = VarnishApiProvider() api_objects = [] for api in varnish_cluster.get_connected_varnish_api(): """ Workaround - we cannot mock __del__ method: https://docs.python.org/3/library/unittest.mock.html We inject sock field to eliminate warning raised by cleaning actions in __del__ method """ api.sock = None api_objects.append(api) assert_equals(2, len(api_objects)) assert_list_equal(expected_construct_args, construct_mock.call_args_list)
def test_set_config(app1, cli, td): dct = { '1': '1', 2: 2, 1.1: 1.1, 'a': 1, 3: ['1', 2], 4: {'a': 1, '1': '1', 2: 2}} # verify set_config does not delete old keys nt.assert_in('key1', td[app1]) nt.assert_equal(1, td[app1]['key1']) td = RedisMapping() # reset's RedisMapping's cache, from prev line set_config(app1, dct, cli=cli) nt.assert_in('key1', td[app1]) nt.assert_equal(1, td[app1]['key1']) # verify set_config adds new keys nt.assert_equal( len(list(td[app1].keys())), len(list(dct.keys()) + ['key1'])) nt.assert_true(all( x in (list(dct.keys()) + ['key1']) for x in td[app1].keys())) nt.assert_equal('1', td[app1]['1']) nt.assert_equal(2, td[app1][2]) nt.assert_equal(1.1, td[app1][1.1]) nt.assert_equal(1, td[app1]['a']) nt.assert_is_instance( td[app1][3], JSONSequence) nt.assert_is_instance( td[app1][4], JSONMapping) nt.assert_list_equal(list(td[app1][3]), dct[3]) nt.assert_dict_equal(dict(td[app1][4]), dct[4])
def test_join_sets(): print "Blah" result = join_sets([set([1, 2, 3]), set([3, 4, 5]), set([2, 6])], 2, 4) assert_list_equal(result, [set([1, 2, 3, 6]), set([3, 4, 5])]) result = join_sets([set([1, 2])], 3, 4) assert_list_equal(result, [set([1, 2])])
def test_read_4bpp_graphic_from_block(): source = Block() source.from_list([0b01010110, 0b00001011, 0b11001110, 0b10010110, 0b01110001, 0b00111011, 0b00001011, 0b10011110, 0b00011000, 0b00000011, 0b10000001, 0b11101011, 0b00000100, 0b01000101, 0b01010110, 0b10001111, 0b00101100, 0b10110000, 0b01010110, 0b10110010, 0b01010000, 0b11000000, 0b00111000, 0b10010111, 0b00101101, 0b11111100, 0b01111101, 0b11101010, 0b10101111, 0b10110111, 0b01100000, 0b11101110]) target = [[0 for x in range(8)] for y in range(8)] assert_equal(32, read_4bpp_graphic_from_block(target=target, source=source, offset=0, x=0, y=0, bit_offset=0)) assert_list_equal(target, [[8, 1, 12, 9, 6, 5, 3, 2], [11, 5, 8, 14, 1, 7, 15, 0], [8, 13, 3, 7, 2, 0, 2, 3], [10, 0, 4, 14, 7, 10, 11, 9], [8, 8, 12, 9, 13, 12, 2, 6], [11, 14, 14, 4, 14, 4, 10, 7], [12, 2, 12, 8, 4, 15, 12, 14], [10, 13, 12, 1, 10, 11, 11, 2]])
def test_from_iter(self): """Test data from single iter""" test = Data.from_iter([10, 20, 30]) test1 = Data.from_iter((10, 20, 30)) values = [{'x': 0, 'y': 10}, {'x': 1, 'y': 20}, {'x': 2, 'y': 30}] nt.assert_list_equal(test.values, values) nt.assert_list_equal(test1.values, values)
def test_crawler_generation(): args = docopt(doc, "genconfig trial1 http://www.google.com -t crawler") gc = genconfig.GenconfigCommand(args) gc.execute_command() with open(os.path.join(os.getcwd(), 'trial1.json'), 'r') as f: config = json.load(f) nextx = [ { "follow_link": "", "scraping": { "data": [{"field": "", "selector": "", "attr": "", "default": "", "connector": ""}], "table": [ { "table_type": "", "header": "", "prefix": "", "suffix": "", "selector": "", "attr": "", "default": "", "connector": "" } ] } } ] assert_list_equal(config['scraping']['next'], nextx)
def test_to_list(self): self.color.from_tuple((20, 40, 69)) test_list = [0] * 5 self.color.to_list(test_list) assert_list_equal(test_list, [20, 40, 69, 0, 0]) self.color.to_list(test_list, 2) assert_list_equal(test_list, [20, 40, 20, 40, 69])
def test_read_labels(self, mock_dat): # mock methods and properties of Datum objects mock_dat.return_value.ParseFromString.return_value = "" type(mock_dat.return_value).label = PropertyMock(side_effect=range(5)) assert_list_equal(r.read_labels(self.path_lmdb), range(5))
def test_init3(self): # partial initialization expected_result = TestLine.expected_result line = spectro.Line(restwlen=1.282, redshift=1., name='Pa_beta') result = [line.restwlen,line.obswlen,line.redshift,line.name] assert_list_equal(result, expected_result)
def test_wqstd(self): nt.assert_true(isinstance(self.db.wqstd, pandas.DataFrame)) nt.assert_tuple_equal(self.db.wqstd.shape, (48, 4)) nt.assert_list_equal( ['parameter', 'units', 'lower_limit', 'upper_limit'], self.db.wqstd.columns.tolist() )
def test_write_2bpp_graphic_to_block_offset_xy(): source = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 2, 1, 2, 3, 2, 1, 2, 1], [0, 0, 2, 3, 1, 0, 2, 3, 2, 2], [0, 0, 3, 0, 3, 2, 2, 2, 0, 2], [0, 0, 1, 3, 3, 0, 2, 0, 2, 3], [0, 0, 1, 0, 1, 1, 0, 3, 3, 3], [0, 0, 1, 3, 3, 3, 3, 2, 1, 2], [0, 0, 2, 2, 3, 1, 2, 2, 1, 0], [0, 0, 2, 0, 3, 3, 2, 3, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] target = Block() target.from_list([0xff] * 18) assert_equal(16, write_2bpp_graphic_to_block(source=source, target=target, offset=1, x=2, y=1, bit_offset=0)) assert_list_equal(target.to_list(), [0xff, 0b01010101, 0b10111010, 0b01100100, 0b11001111, 0b10100000, 0b10111101, 0b11100001, 0b01101011, 0b10110111, 0b00000111, 0b11111010, 0b01111101, 0b00110010, 0b11101100, 0b00110110, 0b10111100, 0xff])
def test_info(): # setup g = zarr.group(store=dict(), chunk_store=dict(), synchronizer=zarr.ThreadSynchronizer()) g.create_group('foo') z = g.zeros('bar', shape=10, filters=[numcodecs.Adler32()]) # test group info items = g.info_items() keys = sorted([k for k, _ in items]) expected_keys = sorted([ 'Type', 'Read-only', 'Synchronizer type', 'Store type', 'Chunk store type', 'No. members', 'No. arrays', 'No. groups', 'Arrays', 'Groups', 'Name' ]) assert_list_equal(expected_keys, keys) # test array info items = z.info_items() keys = sorted([k for k, _ in items]) expected_keys = sorted([ 'Type', 'Data type', 'Shape', 'Chunk shape', 'Order', 'Read-only', 'Filter [0]', 'Compressor', 'Synchronizer type', 'Store type', 'Chunk store type', 'No. bytes', 'No. bytes stored', 'Storage ratio', 'Chunks initialized', 'Name' ]) assert_list_equal(expected_keys, keys)
def test_add_records_to_table2(self): expected_result = [2,[TestObsTable.obsrecord,TestObsTable.obsrecord]] TestObsTable.obstable.add_records_to_table([TestObsTable.obsrecord,TestObsTable.obsrecord]) result = [] result.append(TestObsTable.obstable.length) result.append(TestObsTable.obstable.records) assert_list_equal(result, expected_result)
def _do_test_sparql(self, datasetname, sparql, expected): dataset = getattr(self, datasetname) result = dataset.query(sparql) if isinstance(expected, list): assert_list_equal(list(result), expected) else: assert_set_equal(set(result), expected)
def test_gzipped_files_are_iterable_as_normal(): agz = _make_temporary_gzip(pybedtools.example_filename('a.bed')) agz = pybedtools.BedTool(agz) a = pybedtools.example_bedtool('a.bed') for i in agz: print(i) assert_list_equal(list(a), list(agz))
def test_learning_curve_from_dir(self): lc = LearningCurveFromPath(os.path.split(self.fpath)[0]) assert_is_not_none(lc) train_keys, test_keys = lc.parse() assert_list_equal(train_keys, ['NumIters', 'Seconds', 'LearningRate', 'loss']) assert_list_equal(test_keys, ['NumIters', 'Seconds', 'LearningRate', 'accuracy', 'loss'])
def test_read_2bpp_graphic_from_block_offset_xy(): source = Block() source.from_list([0b01010101, 0b10111010, 0b01100100, 0b11001111, 0b10100000, 0b10111101, 0b11100001, 0b01101011, 0b10110111, 0b00000111, 0b11111010, 0b01111101, 0b00110010, 0b11101100, 0b00110110, 0b10111100, 5]) target = [[0 for x in range(10)] for y in range(10)] assert_equal(16, read_2bpp_graphic_from_block(target=target, source=source, offset=0, x=2, y=1, bit_offset=0)) assert_list_equal(target, [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 2, 1, 2, 3, 2, 1, 2, 1], [0, 0, 2, 3, 1, 0, 2, 3, 2, 2], [0, 0, 3, 0, 3, 2, 2, 2, 0, 2], [0, 0, 1, 3, 3, 0, 2, 0, 2, 3], [0, 0, 1, 0, 1, 1, 0, 3, 3, 3], [0, 0, 1, 3, 3, 3, 3, 2, 1, 2], [0, 0, 2, 2, 3, 1, 2, 2, 1, 0], [0, 0, 2, 0, 3, 3, 2, 3, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
def test_simple_case(): a = [] b = [a] c = [a, b] for perm in permutations([a, b, c]): result = list(toposorted(perm, lambda x: x)) assert_list_equal([a, b, c], result)
def test_to_block(self): block = Block() block.from_list(range(1, 6)) self.pointer.address = 0xabcdef self.pointer.to_block(block, 1) assert_list_equal(block[0:5].to_list(), [1, 0xef, 0xcd, 0xab, 5])
def test_append_val(self): x = FakeClass() knownlist = ['item1', 'item2', 'NA'] testlist = ['item1'] testlist = station._append_val(x, testlist) testlist = station._append_val(None, testlist) ntools.assert_list_equal(testlist, knownlist)
def test_write_2bpp_graphic_to_block(): source = [[2, 1, 2, 3, 2, 1, 2, 1], [2, 3, 1, 0, 2, 3, 2, 2], [3, 0, 3, 2, 2, 2, 0, 2], [1, 3, 3, 0, 2, 0, 2, 3], [1, 0, 1, 1, 0, 3, 3, 3], [1, 3, 3, 3, 3, 2, 1, 2], [2, 2, 3, 1, 2, 2, 1, 0], [2, 0, 3, 3, 2, 3, 1, 0]] target = Block() target.from_list([0] * 16) assert_equal(16, write_2bpp_graphic_to_block(source=source, target=target, offset=0, x=0, y=0, bit_offset=0)) assert_list_equal(target.to_list(), [0b01010101, 0b10111010, 0b01100100, 0b11001111, 0b10100000, 0b10111101, 0b11100001, 0b01101011, 0b10110111, 0b00000111, 0b11111010, 0b01111101, 0b00110010, 0b11101100, 0b00110110, 0b10111100])
def test_load(): """backends.load(): works as expected. This is an interesting function to test, because it is just a wrapper that returns a TestrunResult object. So most of the testing should be happening in the tests for each backend. However, we can test this by injecting a fake backend, and ensuring that we get back what we expect. What we do is inject list(), which menas that we should get back [file_path]. """ backends.BACKENDS['test_backend'] = backends.register.Registry( extensions=['.test_extension'], backend=None, load=lambda x, y: [x], # y is for a compression value meta=None, ) file_path = 'foo.test_extension' with open(file_path, 'w') as f: f.write('foo') test = backends.load(file_path) nt.assert_list_equal([file_path], test)
def test_plain_attachment(self, gpg): attachments = [("blabla", None)] remaining_attachments = import_public_keys_from_attachments(gpg, attachments) assert_list_equal(attachments, remaining_attachments) assert not gpg.import_keys.called
def test_getting_uncompleted_todos_when_todos_is_not_none(self): todo1 = { 'userId': 1, 'id': 1, 'title': 'Make the bed', 'completed': False } todo2 = { 'userId': 2, 'id': 2, 'title': 'Walk the dog', 'completed': True } # Configure mock to return a response with a JSON-serialized list of todos. self.mock_get_todos.return_value = Mock() self.mock_get_todos.return_value.json.return_value = [todo1, todo2] # Call the service, which will get a list of todos filtered on completed. uncompleted_todos = get_uncompleted_todos() # Confirm that the mock was called. assert_true(self.mock_get_todos.called) # Confirm that the expected filtered list of todos was returned. assert_list_equal(uncompleted_todos, [todo1])
def test_comparator(): def comparator(a, b): a = a.lower() b = b.lower() if a < b: return -1 if a > b: return 1 else: return 0 comparator_name = b"CaseInsensitiveComparator" with tmp_db('comparator', create=False) as name: db = DB(name, create_if_missing=True, comparator=comparator, comparator_name=comparator_name) keys = [ b'aaa', b'BBB', b'ccc', ] with db.write_batch() as wb: for key in keys: wb.put(key, b'') assert_list_equal( sorted(keys, key=lambda s: s.lower()), list(db.iterator(include_value=False)))
def test_empty(self): parsed = self.p.parse_args([]) n.assert_false(parsed.urls) n.assert_false(parsed.force_colnames) n.assert_false(parsed.silent) n.assert_in('.pluplusch', parsed.cache_dir) n.assert_list_equal(parsed.catalog, [])
def test_get_labels_lut(self): lut = du.get_labels_lut(self.labels, self.subset) assert_is_instance(lut, np.ndarray) assert_equals(lut.dtype, int) assert_equals(lut.ndim, 1) assert_equals(lut.shape, (9 + 1 + 1, )) lut_expected = [0] * (9 + 1 + 1) lut_expected[4] = 5 lut_expected[5] = 2 lut_expected[7] = 2 lut_expected[9] = 9 assert_list_equal(lut.tolist(), lut_expected)
def test_head_tail(): query = schema.User * schema.Language n = 5 frame = query.head(n, format='frame') assert_true(isinstance(frame, pandas.DataFrame)) array = query.head(n, format='array') assert_equal(array.size, n) assert_equal(len(frame), n) assert_list_equal(query.primary_key, frame.index.names) n = 4 frame = query.tail(n, format='frame') array = query.tail(n, format='array') assert_equal(array.size, n) assert_equal(len(frame), n) assert_list_equal(query.primary_key, frame.index.names)
def test_magic_color(): ip = get_ipython() c = ip.Completer s, matches = c.complete(None, 'colo') nt.assert_in('%colors', matches) s, matches = c.complete(None, 'colo') nt.assert_not_in('NoColor', matches) s, matches = c.complete(None, 'colors ') nt.assert_in('NoColor', matches) s, matches = c.complete(None, '%colors ') nt.assert_in('NoColor', matches) s, matches = c.complete(None, 'colors NoCo') nt.assert_list_equal(['NoColor'], matches) s, matches = c.complete(None, '%colors NoCo') nt.assert_list_equal(['NoColor'], matches)
def test_str_identity_holds_after_compile(): test_input = [ '+++--->>>.....<', '+++---[>.<]...<', '+++[-+.][>+.]>....<', '[-+.][>+.]>....<', '+++[-+.][>+.]', '+++[-+[++].][>+.]', '[[[[[[[[++]]]]]]]]', '[>[>[>[>[>[>[>[++]]]]]]]]' ] assert_list_equal( lift_str(map(compile, test_input)), lift_str(test_input), )
def test_splitlines_with_spans(): data = [ ('1\n1', ['1', '1'], [(0, 2), (2, 3)]), ('2\r2', ['2', '2'], [(0, 2), (2, 3)]), ('3\n\r3', ['3', '3'], [(0, 3), (3, 4)]), ('4\r\n4', ['4', '4'], [(0, 3), (3, 4)]), ('5\r\r\n5', ['5', '', '5'], [(0, 2), (2, 4), (4, 5)]), ('\r\n\r\n\n\r', ['', '', ''], [(0, 2), (2, 4), (4, 6)]), ] for text, expected_lines, expected_spans in data: actual_lines, actual_spans = splitlines_with_spans(text) assert_list_equal(actual_lines, expected_lines, 'Actual lines do not match the expected ' 'lines for text:\n{0}'.format(text)) assert_list_equal(actual_spans, expected_spans, 'Actual spans do not match the expected ' 'spans for text:\n{0}'.format(text))
def test_names_keyword_multiple_names(): grid = RasterModelGrid((4, 5), spacing=(2., 2.)) grid.add_field('node', 'air__temperature', np.arange(20.)) grid.add_field('node', 'land_surface__elevation', np.arange(20.)) with cdtemp() as _: files = write_esri_ascii( 'test.asc', grid, names=['air__temperature', 'land_surface__elevation']) files.sort() assert_list_equal( files, ['test_air__temperature.asc', 'test_land_surface__elevation.asc']) for fname in files: assert_true(os.path.isfile(fname))
def test_catalog_report_harvest_all(self): """catalog_report() marcará todo dataset para cosecha si harvest='all'.""" catalog = os.path.join(self.SAMPLES_DIR, "full_data.json") actual = self.dj.catalog_report( catalog, harvest='all', catalog_id="modernizacion") expected = list(self.EXPECTED_REPORT) expected[0]["harvest"] = 1 expected[1]["harvest"] = 1 # Compruebo explícitamente que el valor de 'harvest' sea el esperado assert_equal(actual[0]["harvest"], expected[0]["harvest"]) # Compruebo que toda la lista sea la esperada assert_list_equal(actual, expected)
def test_contents(self): """ test the ability of tables to self-populate using the contents property """ # test contents assert_true(self.user) assert_true(len(self.user) == len(self.user.contents)) u = self.user.fetch(order_by=['username']) assert_list_equal(list(u['username']), sorted([s[0] for s in self.user.contents])) # test prepare assert_true(self.subject) assert_true(len(self.subject) == len(self.subject.contents)) u = self.subject.fetch(order_by=['subject_id']) assert_list_equal(list(u['subject_id']), sorted([s[0] for s in self.subject.contents]))
def test_groupby_07(self): """ test_groupby_07: Groupby type 'count' """ random.seed(1) groupby_cols = ['f0'] groupby_lambda = lambda x: x[0] agg_list = [['f4', 'count'], ['f5', 'count'], ['f6', 'count']] num_rows = 1000 # -- Data -- g = self.gen_dataset_count_with_NA(num_rows) data = np.fromiter(g, dtype='S1,f8,i8,i4,f8,i8,i4') # -- Bcolz -- print('--> Bcolz') self.rootdir = tempfile.mkdtemp(prefix='bcolz-') os.rmdir(self.rootdir) # folder should be emtpy fact_bcolz = bquery.ctable(data, rootdir=self.rootdir) fact_bcolz.flush() fact_bcolz.cache_factor(groupby_cols, refresh=True) result_bcolz = fact_bcolz.groupby(groupby_cols, agg_list) print(result_bcolz) # Itertools result print('--> Itertools') result_itt = self.helper_itt_groupby(data, groupby_lambda) uniquekeys = result_itt['uniquekeys'] print(uniquekeys) ref = [] for item in result_itt['groups']: f4 = 0 f5 = 0 f6 = 0 for row in item: f0 = groupby_lambda(row) if row[4] == row[4]: f4 += 1 f5 += 1 f6 += 1 ref.append([f0, f4, f5, f6]) assert_list_equal( [list(x) for x in result_bcolz], ref)
def test_get_regulations(): """ Test default get regulations behavior. :return: """ lexnlp_tests.test_extraction_func_on_test_data( get_regulations, expected_data_converter=lambda d: [ (reg_type, reg_code) for reg_type, reg_code, _reg_str in d ], return_source=False, as_dict=False) lexnlp_tests.test_extraction_func_on_test_data( get_regulations, expected_data_converter=lambda d: [ (reg_type, reg_code, reg_str) for reg_type, reg_code, reg_str in d ], return_source=True, as_dict=False) # TODO Impl test_extraction_func_on_test_data() comparing lists of dicts for (_i, text, _input_args, expected) in lexnlp_tests.iter_test_data_text_and_tuple(): expected_no_source_dict = [{ 'regulation_type': reg_type, 'regulation_code': reg_code } for reg_type, reg_code, _reg_str in expected] expected_source_dict = [{ 'regulation_type': reg_type, 'regulation_code': reg_code, 'regulation_str': reg_str } for reg_type, reg_code, reg_str in expected] assert_list_equal( list( lexnlp_tests.benchmark_extraction_func(get_regulations, text, return_source=False, as_dict=True)), expected_no_source_dict) assert_list_equal( list( lexnlp_tests.benchmark_extraction_func(get_regulations, text, return_source=True, as_dict=True)), expected_source_dict)
def test_filter_tables(self, tokenizer, overlap_size, comp_op, allow_missing, args, expected_pairs): overlap_filter = OverlapFilter(tokenizer, overlap_size, comp_op, allow_missing) actual_candset = overlap_filter.filter_tables(*args) expected_output_attrs = ['_id'] l_out_prefix = self.default_l_out_prefix r_out_prefix = self.default_r_out_prefix # Check for l_out_prefix in args. if len(args) > 8: l_out_prefix = args[8] expected_output_attrs.append(l_out_prefix + args[2]) # Check for r_out_prefix in args. if len(args) > 9: r_out_prefix = args[9] expected_output_attrs.append(r_out_prefix + args[3]) # Check for l_out_attrs in args. if len(args) > 6: if args[6]: l_out_attrs = remove_redundant_attrs(args[6], args[2]) for attr in l_out_attrs: expected_output_attrs.append(l_out_prefix + attr) # Check for r_out_attrs in args. if len(args) > 7: if args[7]: r_out_attrs = remove_redundant_attrs(args[7], args[3]) for attr in r_out_attrs: expected_output_attrs.append(r_out_prefix + attr) # verify whether the output table has the necessary attributes. assert_list_equal(list(actual_candset.columns.values), expected_output_attrs) actual_pairs = set() for idx, row in actual_candset.iterrows(): actual_pairs.add(','.join((str(row[l_out_prefix + args[2]]), str(row[r_out_prefix + args[3]])))) # verify whether the actual pairs and the expected pairs match. assert_equal(len(expected_pairs), len(actual_pairs)) common_pairs = actual_pairs.intersection(expected_pairs) assert_equal(len(common_pairs), len(expected_pairs))
def test_setitem_slice(self): self.block.from_file(os.path.join(TEST_DATA_DIR, "binaries", "1kb_rand.bin")) assert_list_equal(self.block[0:3].to_list(), [0x25, 0x20, 0x38]) self.block[0:3] = [0xeb, 0x15, 0x66] assert_list_equal(self.block[0:3].to_list(), [0xeb, 0x15, 0x66]) self.block[0:1024] = [5] * 1024 assert_equal(self.block[0:1024].to_list(), [5] * 1024) assert_raises(InvalidArgumentError, self.block.__setitem__, slice(5, 0), []) assert_raises(InvalidArgumentError, self.block.__setitem__, slice(55, 55), []) assert_raises(OutOfBoundsError, self.block.__setitem__, slice(-1, 2), []) assert_raises(OutOfBoundsError, self.block.__setitem__, slice(1, 1025), [0] * 1024) assert_raises(OutOfBoundsError, self.block.__setitem__, slice(1024, 1025), [1]) assert_raises(InvalidArgumentError, self.block.__setitem__, slice(0, 1), []) assert_raises(InvalidArgumentError, self.block.__setitem__, slice(0, 1), [1, 2, 3]) assert_raises(InvalidArgumentError, self.block.__setitem__, slice(0, 5), [1, 2])
def test_save_passengers_location(mock_validate_token, mock_passengers): input = {'lat': 47.2737, 'lon': 28.284} header = {'token': '838298'} mock_validate_token.return_value = True mock_passengers.update_one.return_value = None app2 = app.test_client() response = app2.put('/passengers/1/location', data=json.dumps(input), headers=header, content_type='application/json') response_json = json.loads(response.get_data()) assert_list_equal([response_json], [200])
def test_read_1bpp_graphic_from_block_offset_target(): source = Block() source.from_list([ 0b00000011, 0b01110000, 0b01001001, 0b11110000, 0b01001010, 0b11001000, 0b01110001, 0b00000001 ]) target = [[0 for x in range(10)] for y in range(10)] assert_equal( 8, read_1bpp_graphic_from_block(source, target, 0, x=2, y=1, height=8)) assert_list_equal( target, [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1], [0, 0, 0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 1, 0, 0, 1], [0, 0, 1, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 1, 0, 1, 0], [0, 0, 1, 1, 0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
def test_find_resources_dir(self, mocked_isdir): # arrange mocked_isdir.side_effect = [False, False, True] start_dir = '/foo/bar/baz' expected_result = '/foo/test_resources' # act actual_result = helpers._find_test_resources_dir(start_dir) # assert assert_equal(expected_result, actual_result) assert_list_equal(mocked_isdir.mock_calls, [ call('/foo/bar/baz/test_resources'), call('/foo/bar/test_resources'), call('/foo/test_resources'), ])
def test_write_custom_section(self): """Test ``write()`` when a custom section is defined""" # Create a ``SampleSheet`` with a [Manifests] section sample_sheet1 = SampleSheet() sample_sheet1.add_section('Manifests') sample_sheet1.Manifests['PoolRNA'] = 'RNAMatrix.txt' # Write to string and make temporary file string_handle = StringIO(newline=None) sample_sheet1.write(string_handle) string_handle.seek(0) filename = string_as_temporary_file(string_handle.read()) # Read temporary file and confirm section and it's data exists. sample_sheet2 = SampleSheet(filename) assert_list_equal(list(sample_sheet2.Manifests.keys()), ['PoolRNA']) eq_(sample_sheet2.Manifests.PoolRNA, 'RNAMatrix.txt')
def test_merge(): data_before_merge = [ { 'time': '100', 'station_id': 1, 'temp': 45 }, { 'time': '100', 'station_id': 1, 'wind_dir': 200 }, { 'time': '200', 'station_id': 1, 'temp': 44 }, { 'time': '300', 'station_id': 3, 'temp': 38 }, { 'time': '300', 'station_id': 3, 'wind_dir': 300, 'wind_speed': 7 }, ] expected = [{ 'time': '100', 'station_id': 1, 'temp': 45, 'wind_dir': 200 }, { 'time': '200', 'station_id': 1, 'temp': 44 }, { 'time': '300', 'station_id': 3, 'temp': 38, 'wind_dir': 300, 'wind_speed': 7 }] assert_list_equal(expected, merge(data_before_merge))
def test_filter_candset(self, tokenizer, sim_measure_type, threshold, args, expected_pairs): size_filter = SizeFilter(tokenizer, sim_measure_type, threshold) actual_output_candset = size_filter.filter_candset(*args) # verify whether the output table has the necessary attributes. assert_list_equal(list(actual_output_candset.columns.values), list(args[0].columns.values)) actual_pairs = set() for idx, row in actual_output_candset.iterrows(): actual_pairs.add(','.join((str(row[args[1]]), str(row[args[2]])))) # verify whether the actual pairs and the expected pairs match. assert_equal(len(expected_pairs), len(actual_pairs)) common_pairs = actual_pairs.intersection(expected_pairs) assert_equal(len(common_pairs), len(expected_pairs))
def test_baseline(self): with utils.OverwriteState(True), utils.WorkSpace(self.ws): wq, cols = analysis.preprocess_wq( monitoring_locations=self.ml, subcatchments=self.sc, id_col='CID', ds_col='DS_CID', output_path=self.results, value_columns=self.wq_cols ) expected = 'expected.shp' pptest.assert_shapefiles_are_close( os.path.join(self.ws, expected), os.path.join(self.ws, self.results), ) nt.assert_true(isinstance(wq, numpy.ndarray)) nt.assert_list_equal(cols, self.expected_cols)
def test_merge_with_special_byte(): factory = bl.BlockFactory() block1 = factory.new_block() block1.data = [ filepack.SPECIAL_BYTE, filepack.SPECIAL_BYTE, 2, 1, 3, filepack.SPECIAL_BYTE, 1, 0, 0 ] block2 = factory.new_block() block2.data = [4, 3, 6, filepack.SPECIAL_BYTE, filepack.EOF_BYTE] data = filepack.merge(factory.blocks) expected_data = [ filepack.SPECIAL_BYTE, filepack.SPECIAL_BYTE, 2, 1, 3, 4, 3, 6 ] assert_list_equal(data, expected_data)
def run_sentence_token_test(text, result, lowercase=False, stopword=False): """ Base test method to run against text with given results. """ # Get list from text sentence_list = get_sentence_list(text) # Check length first assert len(sentence_list) == len(result) # Check each sentence matches for i in range(len(sentence_list)): tokens = lexnlp_tests.benchmark_extraction_func(get_token_list, sentence_list[i], lowercase=lowercase, stopword=stopword) assert_list_equal(tokens, result[i])
def test_params_as_list(self): params = self.tbx._params_as_list() names = [str(p.name) for p in params] known_names = [ 'workspace', 'subcatchments', 'ID_column', 'downstream_ID_column', 'monitoring_locations', 'ml_type_col', 'included_ml_types', 'value_columns', 'streams', 'output_layer', 'add_output_to_map', ] nt.assert_list_equal(names, known_names)
def test_fname_pairs(self): a = ['foo1_a.txt', os.path.join('foo', 'bar_x.txt'), 'foo5.txt'] b = [os.path.join('oof', 'bar_x.txt'), 'foo5_b.txt', 'foo2_b.txt'] pairs = fs.fname_pairs(a, b) for x, y in pairs: assert_in(x, a) assert_in(y, b) assert_list_equal(pairs, [ [ os.path.join('foo', 'bar_x.txt'), os.path.join('oof', 'bar_x.txt') ], ['foo5.txt', 'foo5_b.txt'], ])
def test_simple_read_write(): data = [i % 10 for i in range(bl.BLOCK_SIZE * 5 + 17)] reader = bl.BlockReader() writer = bl.BlockWriter() factory = bl.BlockFactory() block_ids = writer.write(data, factory) blocks = {} for i in block_ids: blocks[i] = factory.blocks[i] recovered_data = reader.read(blocks) assert_list_equal(data, recovered_data)
def test_complex_matlab_blobs(self): blobs = Blob().fetch('blob', order_by='id') assert_equal(blobs[0][0], 'character string') assert_true(np.array_equal(blobs[1][0], np.r_[1:180:15])) assert_list_equal([r[0] for r in blobs[2]], ['string1', 'string2']) assert_list_equal([r[0, 0] for r in blobs[3]['a'][0]], [1, 2]) assert_tuple_equal(blobs[3]['b'][0, 0]['c'][0, 0].shape, (3, 3)) assert_true( np.array_equal(blobs[4], np.r_[1:25].reshape((2, 3, 4), order='F'))) assert_true(blobs[4].dtype == 'float64') assert_true( np.array_equal(blobs[5], np.r_[1:25].reshape((2, 3, 4), order='F'))) assert_true(blobs[5].dtype == 'uint8') assert_tuple_equal(blobs[6].shape, (2, 3, 4)) assert_true(blobs[6].dtype == 'complex128')
def test_data_type(): """Test automatic data type importing""" puts1 = [10, 20, 30, 40, 50] puts2 = {'apples': 10, 'bananas': 20, 'oranges': 30} gets1 = [{'col': 'data', 'idx': 0, 'val': 10}, {'col': 'data', 'idx': 1, 'val': 20}, {'col': 'data', 'idx': 2, 'val': 30}, {'col': 'data', 'idx': 3, 'val': 40}, {'col': 'data', 'idx': 4, 'val': 50}] gets2 = [{'col': 'data', 'idx': 'apples', 'val': 10}, {'col': 'data', 'idx': 'bananas', 'val': 20}, {'col': 'data', 'idx': 'oranges', 'val': 30}] for ins, outs in zip([puts1, puts2], [gets1, gets2]): test = data_type(ins) nt.assert_list_equal(test.values, outs) # From Iters puts = {'x': [1, 2, 3], 'y': [10, 20, 30], 'z': [40, 50, 60]} gets = [{'col': 'y', 'idx': 1, 'val': 10}, {'col': 'y', 'idx': 2, 'val': 20}, {'col': 'y', 'idx': 3, 'val': 30}, {'col': 'z', 'idx': 1, 'val': 40}, {'col': 'z', 'idx': 2, 'val': 50}, {'col': 'z', 'idx': 3, 'val': 60}] test = data_type(puts, iter_idx='x') nt.assert_list_equal(test.values, gets) # Pandas df = pd.DataFrame({'one': [1, 2, 3], 'two': [4, 5, 6]}) series = pd.Series([1, 2, 3], name='test') gets1 = [{'col': 'one', 'idx': 0, 'val': 1}, {'col': 'two', 'idx': 0, 'val': 4}, {'col': 'one', 'idx': 1, 'val': 2}, {'col': 'two', 'idx': 1, 'val': 5}, {'col': 'one', 'idx': 2, 'val': 3}, {'col': 'two', 'idx': 2, 'val': 6}] gets2 = [{'col': 'test', 'idx': 0, 'val': 1}, {'col': 'test', 'idx': 1, 'val': 2}, {'col': 'test', 'idx': 2, 'val': 3}] test_df = data_type(df) test_series = data_type(series) nt.assert_list_equal(test_df.values, gets1) nt.assert_list_equal(test_series.values, gets2) # Bad type class BadType(object): 'Bad data type' pass test = BadType() nt.assert_raises(ValueError, data_type, test)
def test_groupby_09(self): """ test_groupby_09: Groupby's type 'sorted_count_distinct' """ random.seed(1) groupby_cols = ['f0'] groupby_lambda = lambda x: x[0] agg_list = [['f4', 'sorted_count_distinct'], ['f5', 'sorted_count_distinct'], ['f6', 'sorted_count_distinct']] num_rows = 2000 # -- Data -- g = self.gen_dataset_count_with_NA_09(num_rows) sort = sorted([item for item in g], key=lambda x: x[0]) data = np.fromiter(sort, dtype='S1,f8,i8,i4,f8,i8,i4') print('data') print(data) # -- Bcolz -- print('--> Bcolz') self.rootdir = tempfile.mkdtemp(prefix='bcolz-') os.rmdir(self.rootdir) # folder should be emtpy fact_bcolz = bquery.ctable(data, rootdir=self.rootdir) fact_bcolz.flush() result_bcolz = fact_bcolz.groupby(groupby_cols, agg_list) print(result_bcolz) # # Itertools result print('--> Itertools') result_itt = self.helper_itt_groupby(data, groupby_lambda) uniquekeys = result_itt['uniquekeys'] print(uniquekeys) ref = [] for n, (u, item) in enumerate(zip(uniquekeys, result_itt['groups'])): f4 = len(self._get_unique([x[4] for x in result_itt['groups'][n]])) f5 = len(self._get_unique([x[5] for x in result_itt['groups'][n]])) f6 = len(self._get_unique([x[6] for x in result_itt['groups'][n]])) ref.append([u, f4, f5, f6]) print(ref) assert_list_equal( [list(x) for x in result_bcolz], ref)
def test_filter_tables(self, tokenizer, sim_measure_type, threshold, args, expected_pairs): position_filter = PositionFilter(tokenizer, sim_measure_type, threshold) actual_candset = position_filter.filter_tables(*args) expected_output_attrs = ['_id'] l_out_prefix = self.default_l_out_prefix r_out_prefix = self.default_r_out_prefix # Check for l_out_prefix in args. if len(args) > 8: l_out_prefix = args[8] expected_output_attrs.append(l_out_prefix + args[2]) # Check for l_out_attrs in args. if len(args) > 6: if args[6]: for attr in args[6]: expected_output_attrs.append(l_out_prefix + attr) # Check for r_out_prefix in args. if len(args) > 9: r_out_prefix = args[9] expected_output_attrs.append(r_out_prefix + args[3]) # Check for r_out_attrs in args. if len(args) > 7: if args[7]: for attr in args[7]: expected_output_attrs.append(r_out_prefix + attr) # verify whether the output table has the necessary attributes. assert_list_equal(list(actual_candset.columns.values), expected_output_attrs) actual_pairs = set() for idx, row in actual_candset.iterrows(): actual_pairs.add(','.join((str(row[l_out_prefix + args[2]]), str(row[r_out_prefix + args[3]])))) # verify whether the actual pairs and the expected pairs match. assert_equal(len(expected_pairs), len(actual_pairs)) common_pairs = actual_pairs.intersection(expected_pairs) assert_equal(len(common_pairs), len(expected_pairs))
def test_unite_master_parts(): assert_list_equal( unite_master_parts([ "`s`.`a`", "`s`.`a__q`", "`s`.`b`", "`s`.`c`", "`s`.`c__q`", "`s`.`b__q`", "`s`.`d`", "`s`.`a__r`", ]), [ "`s`.`a`", "`s`.`a__q`", "`s`.`a__r`", "`s`.`b`", "`s`.`b__q`", "`s`.`c`", "`s`.`c__q`", "`s`.`d`", ], ) assert_list_equal( unite_master_parts([ "`lab`.`#equipment`", "`cells`.`cell_analysis_method`", "`cells`.`cell_analysis_method_task_type`", "`cells`.`cell_analysis_method_users`", "`cells`.`favorite_selection`", "`cells`.`cell_analysis_method__cell_selection_params`", "`lab`.`#equipment__config`", "`cells`.`cell_analysis_method__field_detect_params`", ]), [ "`lab`.`#equipment`", "`lab`.`#equipment__config`", "`cells`.`cell_analysis_method`", "`cells`.`cell_analysis_method__cell_selection_params`", "`cells`.`cell_analysis_method__field_detect_params`", "`cells`.`cell_analysis_method_task_type`", "`cells`.`cell_analysis_method_users`", "`cells`.`favorite_selection`", ], )
def test_posting_user_card_when_response_is_ok(mock_post, mock_validate_token): card_data = { 'metadata': {}, 'card': { 'ccvv': '274827484', 'expiration_month': '12', 'expiration_year': '1738', 'method': 'card', 'number': '27484277', 'type': 'visa' } } input = { 'ccvv': '274827484', 'expiration_month': '12', 'expiration_year': '1738', 'method': 'card', 'number': '27484277', 'type': 'visa' } app_response = { 'ccvv': '274827484', 'expiration_month': '12', 'expiration_year': '1738', 'method': 'card', 'number': '27484277', 'type': 'visa' } header = {'token': '838298'} mock_validate_token.return_value = True mock_post.return_value = Mock(ok=True) mock_post.return_value.json.return_value = card_data app2 = app.test_client() response = app2.post('/passengers/84748/card', data=json.dumps(input), headers=header, content_type='application/json') response_json = json.loads(response.get_data()) assert_list_equal([response_json], [app_response])