def test_unique_values_from_query(self): """Athena - Unique Values from Query""" query = { 'ResultSet': { 'Rows': [ { 'Data': [{ 'VarCharValue': 'foobar' }] }, { 'Data': [{ 'VarCharValue': 'barfoo' }] }, { 'Data': [{ 'VarCharValue': 'barfoo' }] }, { 'Data': [{ 'VarCharValue': 'foobarbaz' }] }, ] } } expected_result = {'foobar', 'barfoo', 'foobarbaz'} result = self.client._unique_values_from_query(query) assert_count_equal(result, expected_result)
def test_get_tasks_config(): tc = api.get_tasks_config() nt.assert_is_instance(tc, TasksConfigBaseMapping) nt.assert_count_equal(tc, [ 'test_stolos/test_get_tasks_config/depends_on2', 'test_stolos/test_get_tasks_config/custom_job_id1', 'test_stolos/test_get_tasks_config/app2', 'test_stolos/test_get_tasks_config/topological_sort1', 'test_stolos/test_get_tasks_config/fanout1', 'test_stolos/test_get_tasks_config/depends_on1', 'test_stolos/test_get_tasks_config/bash2', 'test_stolos/test_get_tasks_config/app3', 'test_stolos/test_get_tasks_config/app1', 'test_stolos/test_get_tasks_config/bash1', 'test_stolos/test_get_tasks_config/app4', 'test_stolos/test_get_tasks_config/valid1', 'test_stolos/test_get_tasks_config/valid2', 'test_stolos/test_get_tasks_config/valid3', 'test_stolos/test_get_tasks_config/valid3b', 'test_stolos/test_get_tasks_config/valid4', 'test_stolos/test_get_tasks_config/all_test1', 'test_stolos/test_get_tasks_config/all_test2', 'test_stolos/test_get_tasks_config/all_test3', 'test_stolos/test_get_tasks_config/all_test4', 'test_stolos/test_get_tasks_config/all_test4b', 'test_stolos/test_get_tasks_config/all_test5', 'test_stolos/test_get_tasks_config/autofill1', 'test_stolos/test_get_tasks_config/autofill2', 'test_stolos/test_get_tasks_config/autofill3', 'test_stolos/test_get_tasks_config/autofill_getparents', ])
def test_get_table_partitions(self): """Athena - Get Table Partitions""" self.client._client.results = [ { 'Data': [{ 'VarCharValue': 'dt=2018-12-10-10' }] }, { 'Data': [{ 'VarCharValue': 'dt=2018-12-09-10' }] }, { 'Data': [{ 'VarCharValue': 'dt=2018-12-09-10' }] }, { 'Data': [{ 'VarCharValue': 'dt=2018-12-11-10' }] }, ] expected_result = { 'dt=2018-12-10-10', 'dt=2018-12-09-10', 'dt=2018-12-11-10' } result = self.client.get_table_partitions('test_table') assert_count_equal(result, expected_result)
def test_expand_action_with_permission_only_action(self): # There are 17 privileges list as "logs.CreateLogDelivery [permission only]" expanded_actions = expand_action("logs:GetLogDelivery") assert_count_equal(expanded_actions, [{ "service": "logs", "action": "GetLogDelivery" }])
def test_tojson(self): serialise.JSON().write('test', db) expect = { 'Town': [ {'name': 'down', '_uid': self.t1._uid}, {'name': 'up', '_uid': self.t2._uid} ], 'Address': [ {'street': 'easy', 'town': self.t1._uid, '_uid': self.a1._uid}, {'street': 'some', 'town': self.t2._uid, '_uid': self.a2._uid} ], 'Person': [ {'custno': 1, 'name': 'matt', 'age': 43, 'address': self.a1._uid, '_uid': self.p1._uid}, {'custno': 2, 'name': 'bob', 'age': 3, 'address': self.a1._uid, '_uid': self.p2._uid}, {'custno': 3, 'name': 'peter', 'age': 29, 'address': self.a2._uid, '_uid': self.p3._uid} ] } with open('test', 'rt') as fh: got = json.load(fh) assert set(got.keys()) == set(expect.keys()) for table, data in got.items(): assert_count_equal(data, expect[table])
def test_get_tasks_config(): tc = api.get_tasks_config() nt.assert_is_instance(tc, TasksConfigBaseMapping) nt.assert_count_equal( tc, ['test_stolos/test_get_tasks_config/depends_on2', 'test_stolos/test_get_tasks_config/custom_job_id1', 'test_stolos/test_get_tasks_config/app2', 'test_stolos/test_get_tasks_config/topological_sort1', 'test_stolos/test_get_tasks_config/fanout1', 'test_stolos/test_get_tasks_config/depends_on1', 'test_stolos/test_get_tasks_config/bash2', 'test_stolos/test_get_tasks_config/app3', 'test_stolos/test_get_tasks_config/app1', 'test_stolos/test_get_tasks_config/bash1', 'test_stolos/test_get_tasks_config/app4', 'test_stolos/test_get_tasks_config/valid1', 'test_stolos/test_get_tasks_config/valid2', 'test_stolos/test_get_tasks_config/valid3', 'test_stolos/test_get_tasks_config/valid3b', 'test_stolos/test_get_tasks_config/valid4', 'test_stolos/test_get_tasks_config/all_test1', 'test_stolos/test_get_tasks_config/all_test2', 'test_stolos/test_get_tasks_config/all_test3', 'test_stolos/test_get_tasks_config/all_test4', 'test_stolos/test_get_tasks_config/all_test4b', 'test_stolos/test_get_tasks_config/all_test5', 'test_stolos/test_get_tasks_config/autofill1', 'test_stolos/test_get_tasks_config/autofill2', 'test_stolos/test_get_tasks_config/autofill3', 'test_stolos/test_get_tasks_config/autofill_getparents', ])
def test_load_include(): """Shared - Config Loading - Include""" config = load_config(include={'clusters', 'logs.json'}) expected_keys = ['clusters', 'logs'] expected_clusters_keys = ['prod', 'dev'] assert_count_equal(list(config.keys()), expected_keys) assert_count_equal(list(config['clusters'].keys()), expected_clusters_keys)
def then_compare_tables(context): expected = context.table testing.assert_true(expected, "Please ensure table is provided") observed = context.result testing.assert_count_equal([str(x) for x in observed.columns], expected.headings) testing.assert_equals(len(expected.rows), len(observed)) for i in range(len(expected.rows)): testing.assert_count_equal([str(x) for x in observed.loc[i].tolist()], list(expected.rows[i]))
def test_query_result_paginator(self): """Athena - Query Result Paginator""" data = {'Data': [{'VarCharValue': 'result'}]} self.client._client.results = [ data, ] items = list(self.client.query_result_paginator('test query')) assert_count_equal(items, [{'ResultSet': {'Rows': [data]}}] * 4)
def test_iter(self): context = { 'd': 13, 'str': 'string_Test', } exec_context = ExecutionContext.from_ns(context) correct = list(exec_context.keys()) test = list(exec_context) nt.assert_count_equal(test, correct)
def test_hansle_typelets(): """ hansel typelets should be a complete list. this means it will contain superclass typelets as well. """ nt.assert_count_equal(Parent._earthdragon_typelets, ['id']) nt.assert_count_equal(Child._earthdragon_merged_typelets, ['id', 'id2']) nt.assert_count_equal(Child._earthdragon_typelets, [ 'id2']) nt.assert_count_equal(GrandChild._earthdragon_merged_typelets, ['id', 'id2', 'id3']) nt.assert_count_equal(GrandChild._earthdragon_typelets, ['id3'])
def test_multigraph_with_keys(self): G = nx.MultiGraph() nx.add_cycle(G, [0, 1, 2, 3]) G.add_edge(1, 2) G.add_edge(1, 2) edges = list(eulerian_circuit(G, source=0, keys=True)) nodes = [u for u, v, k in edges] assert_equal(nodes, [0, 3, 2, 1, 2, 1]) assert_equal(edges[:2], [(0, 3, 0), (3, 2, 0)]) assert_count_equal(edges[2:5], [(2, 1, 0), (1, 2, 1), (2, 1, 2)]) assert_equal(edges[5:], [(1, 0, 0)])
def subs_eq(got, expected, any_order=False): """ Compare Subtitle objects using vars() so that differences are easy to identify. """ got_vars = [vars(sub) for sub in got] expected_vars = [vars(sub) for sub in expected] if any_order: assert_count_equal(got_vars, expected_vars) else: eq(got_vars, expected_vars)
def subs_eq(got, expected, any_order=False): ''' Compare Subtitle objects using vars() so that differences are easy to identify. ''' got_vars = [vars(sub) for sub in got] expected_vars = [vars(sub) for sub in expected] if any_order: assert_count_equal(got_vars, expected_vars) else: eq(got_vars, expected_vars)
def test_output_loading(): """OutputDispatcher - Loading Output Classes""" loaded_outputs = set(StreamAlertOutput.get_all_outputs()) # Add new outputs to this list to make sure they're loaded properly expected_outputs = { 'aws-firehose', 'aws-lambda', 'aws-s3', 'aws-ses', 'aws-sns', 'aws-sqs', 'aws-cloudwatch-log', 'carbonblack', 'demisto', 'github', 'jira', 'komand', 'pagerduty', 'pagerduty-v2', 'pagerduty-incident', 'phantom', 'slack', 'teams' } assert_count_equal(loaded_outputs, expected_outputs)
def test(self, parse): zk_dict = { 'zk0': {'Addr': 'zkv4', 'L4Port': 2181}, 'zk1': {'Addr': 'zkv6', 'L4Port': 2182}, } inst = Topology() parse.side_effect = lambda x: x # Call inst._parse_zk_dicts({"ZookeeperService": zk_dict}) # Tests ntools.assert_count_equal(inst.zookeepers, ["[zkv4]:2181", "[zkv6]:2182"])
def test(self, router): def _mk_router(type_): m = create_mock(["interface"]) m.interface = create_mock(["link_type"]) m.interface.link_type = type_ routers[type_].append(m) return m routers = defaultdict(list) router_dict = { "br-parent": "PARENT", "br-child": "CHILD", "br-peer": "PEER", "br-core0": "CORE", "br-core1": "CORE", } inst = Topology() router.side_effect = lambda v, k: _mk_router(v) # Call inst._parse_router_dicts({"BorderRouters": router_dict}) # Tests ntools.assert_count_equal(inst.parent_border_routers, routers["PARENT"]) ntools.assert_count_equal(inst.child_border_routers, routers["CHILD"]) ntools.assert_count_equal(inst.peer_border_routers, routers["PEER"]) ntools.assert_count_equal(inst.core_border_routers, routers["CORE"])
def test(self, router): def _mk_router(type_): m = create_mock(["interfaces"]) m.interfaces = {0: create_mock(["link_type"])} m.interfaces[0].link_type = type_ routers[type_].append(m) return m routers = defaultdict(list) router_dict = {"br-parent": "parent"} inst = Topology() router.side_effect = lambda v, k: _mk_router(v) # Call inst._parse_router_dicts({"BorderRouters": router_dict}) # Tests ntools.assert_count_equal(inst.border_routers, routers["parent"])
def test_topological_sort(topological_sort1, app1, app2, depends_on1, bash2, depends_on_job_id1, func_name): nt.assert_count_equal( list(dag_tools.topological_sort(dag_tools.get_parents( topological_sort1, depends_on_job_id1, True,))), [ (app1, '20140601_101_profile-%s' % func_name, u'dep1'), (app1, '20140601_102_profile-%s' % func_name, u'dep1'), (app2, '20140601_101_profile-%s' % func_name, u'dep1'), (app2, '20140601_102_profile-%s' % func_name, u'dep1'), (depends_on1, u'20140601_testID1-%s' % func_name, u'dep1'), (bash2, '20140601_101_profile-%s' % func_name, u'dep1'), (bash2, '20140601_102_profile-%s' % func_name, u'dep1') ] )
def test_eval(self): """L5PC: test evaluation of l5pc evaluator""" result = self.l5pc_evaluator.evaluate_with_dicts( param_dict=release_parameters) expected_results = load_from_json('expected_results.json') # Use two lines below to update expected result # expected_results['TestL5PCEvaluator.test_eval'] = result # dump_to_json(expected_results, 'expected_results.json') try: nt.assert_count_equal( result, expected_results['TestL5PCEvaluator.test_eval']) except AttributeError: nt.assert_items_equal( result, expected_results['TestL5PCEvaluator.test_eval'])
def test_auto_finder(self): data = self.data tr = self.gen_repo(data) # test get for id in data: obj = tr.get(id) self.check_obj_against_data(obj) # check by user user_res = tr.by_user_id(22) for id, obj in user_res.items(): self.check_obj_against_data(obj) nt.assert_count_equal(user_res, [1, 2]) user_res = tr.by_user_id(25) nt.assert_count_equal(user_res, [4])
def test_merge_required_outputs_dne(): """Shared - Merge Required Outputs, Does Not Exist""" # A simple user config that will be merged with required outputs users_config = { 'aws-s3': { 'bucket': 'my.s3.bucket' }, 'aws-sns': { 'topic': 'my-sns-topic' }, 'slack': ['slack_output'] } outputs = resources.merge_required_outputs(users_config, "test") assert_equal(len(outputs), 4) expected_fh = {'alerts': 'test_streamalert_alert_delivery'} assert_count_equal(outputs['aws-firehose'], expected_fh)
def test_expanded_multi_nested_partial(): # we are expecting these to match by execution context ns = {'test1':0, 'test2': 1} leaf = _manifest("(test1 + test2)", ns) ns = {'x':1, 'y': leaf} xy = _manifest("(x + y)", ns) ns = {'a': 1, 'b': xy} sub = _manifest("(a + b)", ns) parent_ns = {'e': 3, 'a': sub} parent = _manifest("e + a", parent_ns) expanded = parent.expand() nt.assert_count_equal(expanded.context.keys(), ['a', 'e', 'x', 'test1', 'test2']) nt.assert_equal(expanded.expression.get_source(), "(e + (a + (x + (test1 + test2))))") nt.assert_equal(expanded.eval(), 6)
async def test(self) -> None: for i in range(0, 8, 4): for j in range(2): await self.add_chunks((j, i)) chunk_info = await self.r.get_chunk_info() expected_calls = [] for i in range(0, 8, 4): for j in range(2): for k in range(i, i + 4, 2): expected_calls.append(mock.call( 'prefix/weights', np.s_[j:j+1, k:k+2, 0:2], mock.ANY)) for i in range(0, 8, 2): expected_calls.append(mock.call( 'prefix/weights_channel', np.s_[0:2, i:i+2], mock.ANY)) assert_count_equal(expected_calls, self.chunk_store.put_chunk.mock_calls) # Check the array values. assert_count_equal doesn't work well for this # because of how equality operators are implemented in numpy. for call in self.chunk_store.put_chunk.mock_calls: name, slices, value = call[1] if name == 'prefix/weights': np.testing.assert_array_equal(self.weights[slices], value) else: np.testing.assert_array_equal(self.weights_channel[slices], value) assert_equal( chunk_info, { 'weights': { 'prefix': 'prefix', 'chunks': ((1, 1), (2, 2, 2, 2), (2,)), 'shape': (2, 8, 2), 'dtype': '|u1' }, 'weights_channel': { 'prefix': 'prefix', 'chunks': ((2,), (2, 2, 2, 2)), 'shape': (2, 8), 'dtype': np.dtype(np.float32).str } })
def test_expand_action_with_expansion_for_prefix_used_multiple_times(self): expanded_actions = expand_action("ses:Describe*") assert_count_equal( expanded_actions, [ { "service": "ses", "action": "DescribeActiveReceiptRuleSet" }, { "service": "ses", "action": "DescribeConfigurationSet" }, { "service": "ses", "action": "DescribeReceiptRule" }, { "service": "ses", "action": "DescribeReceiptRuleSet" }, ], )
def test_expression(self): source = """ arr = np.arange(20) res = np.sum(arr) """ source = dedent(source) lines = source.strip().split('\n') load_names = [['np'], ['np', 'arr']] for i, line in enumerate(lines): code = ast.parse(line, '<>', 'exec') # expression must be evaluable, assignments are not with nt.assert_raises(Exception): Expression(code.body[0]) extracted_expr = grab_expression_from_assign(code) # skip the assign base_expr = ast.parse(line.split('=')[1].strip(), mode='eval') exp1 = Expression(extracted_expr) exp2 = Expression(base_expr) nt.assert_equal(exp1, exp2) nt.assert_is_not(exp1, exp2) nt.assert_count_equal(exp1.load_names(), load_names[i])
def test_get_children(func_name, app1, app2, app4, depends_on1, depends_on2, bash1, bash2): nt.assert_count_equal( list(dag_tools.get_children( depends_on2, '20140601_876_profile-%s' % func_name)), [(depends_on1, u'20140601_testID2-%s' % func_name, u'depgrp2')]) nt.assert_count_equal( list(dag_tools.get_children(bash2, '20140601_9899_purchase')), [] ) nt.assert_count_equal( list(dag_tools.get_children(bash1, '20140601_9899_purchase')), [(bash2, '20140601_9899_purchase', 'default')] ) nt.assert_count_equal( list(dag_tools.get_children( app1, '20140601_999_purchase-%s' % func_name)), [ (depends_on1, u'20140601_testID1-%s' % func_name, u'depgrp1'), (app2, '20140601_999_purchase-%s' % func_name, 'default'), (app4, '20140601_999_purchase-%s' % func_name, 'default'), ] ) nt.assert_count_equal( list(dag_tools.get_children( app1, '20140601_876_purchase-%s' % func_name)), [ (depends_on1, u'20140601_testID1-%s' % func_name, u'depgrp1'), (app2, '20140601_876_purchase-%s' % func_name, 'default'), (app4, '20140601_876_purchase-%s' % func_name, 'default'), ] )
def test_save(self): indexer = RepoFinder(None, {'user_id':{}, 'product_id': {}}) indexer.save(Obj(1, 123, 4)) indexer.save(Obj(3, 123, 5)) # only saving objects for one user user_id_index = indexer.indexers['user_id'] nt.assert_count_equal(user_id_index, [123]) # get obj for user, check ids obj_for_user = user_id_index[123] nt.assert_count_equal(obj_for_user, [1,3]) # saved obj for two products product_id_index = indexer.indexers['product_id'] nt.assert_count_equal(product_id_index, [4,5]) objs_for_product = product_id_index[4] nt.assert_count_equal(objs_for_product, [1])
def test_merge_required_outputs_exists(): """Shared - Merge Required Outputs, Has Existing""" # A simple user config with an exist aws-firehose output # that will be merged with required outputs users_config = { 'aws-firehose': { 'notalerts': 'resource_name' }, 'aws-sns': { 'topic': 'my-sns-topic' }, 'slack': ['slack_output'] } outputs = resources.merge_required_outputs(users_config, "test") assert_equal(len(outputs), 3) expected_fh = { 'notalerts': 'resource_name', 'alerts': 'test_streamalert_alert_delivery' } assert_count_equal(outputs['aws-firehose'], expected_fh)
def test_fan_out_tasks(app1, app2, app4, fanout1, func_name): # test for Many-to-Many relationships between parent and child tasks nt.assert_count_equal( list(dag_tools.get_parents( 'test_stolos/test_fan_out_tasks/fanout1', '20140715_8')), []) nt.assert_count_equal( list(dag_tools.get_parents( 'test_stolos/test_fan_out_tasks/fanout1', '20140715_testID5-%s' % func_name, True)), [ (app1, '20140714_555_profile-%s' % func_name, u'dep2'), (app1, '20140715_555_profile-%s' % func_name, u'dep2'), ]) nt.assert_count_equal( list(dag_tools.get_children( 'test_stolos/test_fan_out_tasks/app1', '20140715_9_profile-%s' % func_name, True,)), [(app2, '20140715_9_profile-%s' % func_name, 'default'), (app4, '20140715_9_profile-%s' % func_name, 'default'), (fanout1, '20140715_testID1-%s' % func_name, u'dep1'), (fanout1, '20140715_testID2-%s' % func_name, u'dep1'), (fanout1, '20140715_testID3-%s' % func_name, u'dep1'), ]) nt.assert_count_equal( list(dag_tools.get_children( app1, '20140715_555_profile-%s' % func_name, True,)), [ (app2, '20140715_555_profile-%s' % func_name, 'default'), (app4, '20140715_555_profile-%s' % func_name, 'default'), (fanout1, u'20140714_testID5-%s' % func_name, u'dep2'), (fanout1, u'20140714_testID6-%s' % func_name, u'dep2'), (fanout1, u'20140715_testID1-%s' % func_name, u'dep1'), (fanout1, u'20140715_testID2-%s' % func_name, u'dep1'), (fanout1, u'20140715_testID3-%s' % func_name, u'dep1'), (fanout1, u'20140715_testID5-%s' % func_name, u'dep2'), (fanout1, u'20140715_testID6-%s' % func_name, u'dep2'), ])
def test_expand_action_with_casing(self): expanded_actions = expand_action("iAm:li*sTuS*rs") assert_count_equal(expanded_actions, [{ "service": "iam", "action": "ListUsers" }])
def test_expand_action_with_expansion(self): expanded_actions = expand_action("s3:listallmybucke*") assert_count_equal(expanded_actions, [{ "service": "s3", "action": "ListAllMyBuckets" }])
def test_required_auth_info(self): """SalesforceApp - Required Auth Info""" assert_count_equal(list(self._app.required_auth_info().keys()), { 'client_id', 'client_secret', 'username', 'password', 'security_token' })
def test_discover_fields(): # can find simple list of properties assert_count_equal(panoptes._discover_fields({}), []) assert_count_equal(panoptes._discover_fields({'f1': 'v1'}), ['f1']) assert_count_equal(panoptes._discover_fields({ 'f1': 'v1', 'f2': 'v2' }), ['f1', 'f2']) assert_count_equal( panoptes._discover_fields({ 'f1': 'v1', 'destination': 'd1', 'f2': 'v2' }), ['f1', 'f2']) # ignores known fields assert_count_equal(panoptes._discover_fields({'destination': 'v1'}), []) assert_count_equal( panoptes._discover_fields({ 'destination': 'v1', 'f2': 'v2' }), ['f2'])
def test_build_dag(): dag = api.build_dag() nt.assert_is_instance(dag, MultiDiGraph) tc = api.get_tasks_config() nt.assert_count_equal(tc.keys(), dag.node.keys())
def test_get_parents(app1, app2, depends_on1, depends_on2, bash1, bash2, depends_on_job_id1, func_name): # test case with no parents nt.assert_equal( list(dag_tools.get_parents(app1, '20140101_876_purchase', True)), [] ) # test the basic inheritance scenario nt.assert_count_equal( list(dag_tools.get_parents(bash2, '20140501_876_profile', True)), [(bash1, '20140501_876_profile', 'default')] ) # test invalid job_id nt.assert_count_equal( list(dag_tools.get_parents(depends_on1, '20140101_999999', True)), [] ) # test invalid metadata in job_id nt.assert_count_equal( list(dag_tools.get_parents(depends_on1, '20140601_999', True)), [] ) # test depends_on for one of the dependency groups nt.assert_count_equal( list(dag_tools.get_parents( depends_on1, '20140601_testID2-%s' % func_name, True)), [ (depends_on2, '20140601_1011_profile-%s' % func_name, u'depgrp2'), (depends_on2, '20140601_9020_profile-%s' % func_name, u'depgrp2'), (depends_on2, '20140601_876_profile-%s' % func_name, u'depgrp2') ]) # test depends_on for one of the dependency groups # also tests that get_parents returns a stable ordering nt.assert_count_equal( list(dag_tools.get_parents(depends_on1, depends_on_job_id1, True)), [ (app1, '20140601_1011_profile-%s' % func_name, u'depgrp1'), (app1, '20140601_1011_purchase-%s' % func_name, u'depgrp1'), (app1, '20140601_9020_profile-%s' % func_name, u'depgrp1'), (app1, '20140601_9020_purchase-%s' % func_name, u'depgrp1'), (app1, '20140601_876_profile-%s' % func_name, u'depgrp1'), (app1, '20140601_876_purchase-%s' % func_name, u'depgrp1'), (app1, '20140601_999_purchase-%s' % func_name, u'depgrp1'), (app2, '20140601_1011_profile-%s' % func_name, u'depgrp1'), (app2, '20140601_1011_purchase-%s' % func_name, u'depgrp1'), (app2, '20140601_9020_profile-%s' % func_name, u'depgrp1'), (app2, '20140601_9020_purchase-%s' % func_name, u'depgrp1'), (app2, '20140601_876_profile-%s' % func_name, u'depgrp1'), (app2, '20140601_876_purchase-%s' % func_name, u'depgrp1') ] ) # test depends_on when multiple dependency groups map to the same job_id # I guess it's okay if they map to the same id? nt.assert_count_equal( list(dag_tools.get_parents( depends_on1, '20140601_testID3-%s' % func_name, True)), [(app1, '20140601_444_profile-%s' % func_name, u'depgrp4'), (app1, '20140601_876_profile-%s' % func_name, u'depgrp3'), ] ) # test the filter_deps option nt.assert_count_equal( list(dag_tools.get_parents( depends_on1, '20140601_testID3-%s' % func_name, True, filter_deps=['depgrp4'])), [(app1, '20140601_444_profile-%s' % func_name, u'depgrp4')] ) with nt.assert_raises(exceptions.DAGMisconfigured): list(dag_tools.get_parents( depends_on1, '20140601_testID3-%s' % func_name, True, filter_deps=['depgrp99999']))
def test_required_auth_info(self): """AliyunApp - Required Auth Info""" assert_count_equal(list(self._app.required_auth_info().keys()), {'access_key_id', 'access_key_secret', 'region_id'})
def test_flatten(): # makes any combination of lists and not-lists into a single flat list assert_count_equal(list(panoptes._flatten([])), []) assert_count_equal(list(panoptes._flatten([1])), [1]) assert_count_equal(list(panoptes._flatten([1, 2])), [1, 2]) assert_count_equal(list(panoptes._flatten([1, [2]])), [1, 2]) assert_count_equal(list(panoptes._flatten([[1], 2])), [1, 2]) assert_count_equal(list(panoptes._flatten([[1], [2]])), [1, 2]) assert_count_equal(list(panoptes._flatten([1, [2], 3])), [1, 2, 3]) assert_count_equal(list(panoptes._flatten([1, [2], 2])), [1, 2, 2]) assert_count_equal(list(panoptes._flatten([[1], 2, []])), [1, 2]) assert_count_equal(list(panoptes._flatten([[1], [], 2])), [1, 2])