Example #1
0
 def test_load_include():
     """Shared - Config Loading - Include"""
     config = load_config(include={'clusters', 'logs.json'})
     expected_keys = ['clusters', 'logs']
     expected_clusters_keys = ['prod', 'dev']
     assert_items_equal(config.keys(), expected_keys)
     assert_items_equal(config['clusters'].keys(), expected_clusters_keys)
 def test_aliases_biblio(self):
     # at the moment this item 
     alias = (u'biblio', {u'title': u'Altmetrics in the wild: Using social media to explore scholarly impact', u'first_author': u'Priem', u'journal': u'arXiv preprint arXiv:1203.4745', u'authors': u'Priem, Piwowar, Hemminger', u'number': u'', u'volume': u'', u'first_page': u'', u'year': u'2012'})
     new_aliases = self.provider.aliases([alias])
     print new_aliases
     expected = [(u'scopus', u'2-s2.0-84904019573'), (u'arxiv', u'1203.4745'), ('url', u'http://www.mendeley.com/research/altmetrics-wild-using-social-media-explore-scholarly-impact'), ('mendeley_uuid', u'dd1ca434-0c00-3d11-8b1f-0226b1d6938c')]
     assert_items_equal(new_aliases, expected)
Example #3
0
 def test_load_exclude_clusters():
     """Shared - Config Loading - Exclude Clusters"""
     config = load_config(exclude={'clusters'})
     expected_keys = [
         'global', 'lambda', 'logs', 'outputs', 'sources', 'types'
     ]
     assert_items_equal(config.keys(), expected_keys)
Example #4
0
 def having_three_trashinfo_in_trashcan(self):
     self.having_a_trashinfo_in_trashcan('foo.trashinfo')
     self.having_a_trashinfo_in_trashcan('bar.trashinfo')
     self.having_a_trashinfo_in_trashcan('baz.trashinfo')
     assert_items_equal(['foo.trashinfo',
                         'bar.trashinfo',
                         'baz.trashinfo'], os.listdir(self.info_dir_path))
Example #5
0
    def test_embedded_json(self):
        """JSON Parser - Embedded JSON"""
        schema = self.config['logs']['json:embedded']['schema']
        options = self.config['logs']['json:embedded']['configuration']

        data = json.dumps({
            'env_key_01':
            'data',
            'env_key_02':
            'time',
            'test_list': [{
                'id':
                'foo',
                'message': ('{\"nested_key_01\":\"bar\",'
                            '\"nested_key_02\":\"baz\"}')
            }]
        })

        parsed_result = self.parser_helper(data=data,
                                           schema=schema,
                                           options=options)
        expected_keys = {
            'nested_key_01', 'nested_key_02', 'streamalert:envelope_keys'
        }
        expected_env_keys = {'env_key_01', 'env_key_02'}
        assert_items_equal(parsed_result[0].keys(), expected_keys)
        assert_items_equal(parsed_result[0]['streamalert:envelope_keys'],
                           expected_env_keys)
Example #6
0
    def test_process_taxonomy_changes_subjects(self):
        custom_taxonomy = {
            'include': [self.subject1.text, self.subject3_1.text],
            'exclude': [self.subject1_1.text],
            'custom': {
                'Changed Subject Name': {
                    'parent': self.subject2.text,
                    'bepress': self.subject2_1.text
                },
                self.subject2.text: {
                    'parent': '',
                    'bepress': self.subject2.text
                }
            }
        }
        self.request.POST = {
            'custom_taxonomy_json': json.dumps(custom_taxonomy),
            'provider_id': self.preprint_provider.id
        }

        self.view.post(self.request)

        actual_preprint_provider_subjects = self.preprint_provider.subjects.all(
        ).values_list('text', flat=True)
        expected_subjects = [
            self.subject1.text, self.subject2.text, self.subject3_1.text,
            'Changed Subject Name'
        ]

        nt.assert_items_equal(actual_preprint_provider_subjects,
                              expected_subjects)
        assert self.preprint_provider.subjects.get(
            text='Changed Subject Name').parent.text == self.subject2.text
Example #7
0
    def test_process_optional_logs(self):
        """Rules Engine - Logs is optional when datatypes are present"""
        @rule(datatypes=['sourceAddress'], outputs=['s3:sample_bucket'])
        def no_logs_has_datatypes(rec):  # pylint: disable=unused-variable
            """Testing rule when logs is not present, datatypes is"""
            results = fetch_values_by_datatype(rec, 'sourceAddress')

            for result in results:
                if result == '1.1.1.2':
                    return True
            return False

        @rule(logs=['cloudwatch:test_match_types'],
              outputs=['s3:sample_bucket'])
        def has_logs_no_datatypes(rec):  # pylint: disable=unused-variable
            """Testing rule when logs is present, datatypes is not"""

            return (rec['source'] == '1.1.1.2'
                    or rec['detail']['sourceIPAddress'] == '1.1.1.2')

        @rule(logs=['cloudwatch:test_match_types'],
              datatypes=['sourceAddress'],
              outputs=['s3:sample_bucket'])
        def has_logs_datatypes(rec):  # pylint: disable=unused-variable
            """Testing rule when logs is present, datatypes is"""
            results = fetch_values_by_datatype(rec, 'sourceAddress')

            for result in results:
                if result == '1.1.1.2':
                    return True
            return False

        kinesis_data_items = [{
            'account': 123456,
            'region': '123456123456',
            'source': '1.1.1.2',
            'detail': {
                'eventName': 'ConsoleLogin',
                'sourceIPAddress': '1.1.1.2',
                'recipientAccountId': '654321'
            }
        }]

        alerts = []
        for data in kinesis_data_items:
            kinesis_data = json.dumps(data)
            service, entity = 'kinesis', 'test_kinesis_stream'
            raw_record = make_kinesis_raw_record(entity, kinesis_data)
            payload = load_and_classify_payload(self.config, service, entity,
                                                raw_record)

            alerts.extend(self.rules_engine.process(payload)[0])

        assert_equal(len(alerts), 3)
        rule_names = [
            'no_logs_has_datatypes', 'has_logs_no_datatypes',
            'has_logs_datatypes'
        ]
        assert_items_equal([alerts[i]['rule_name'] for i in range(3)],
                           rule_names)
 def test_biblio(self):
     biblio_dict = self.provider.biblio([self.testitem_biblio])
     print biblio_dict
     expected = {
         'account':
         u'@researchremix',
         'repository':
         'Twitter',
         'title':
         u'@researchremix',
         'url':
         'https://api.twitter.com/1/statuses/oembed.json?id=400821465828061184&hide_media=1&hide_thread=1&maxwidth=650',
         'year':
         '2013',
         'tweet_text':
         u'The FIRST Act Is the Last Open Access Reform We&#39;d Ever Want <a href="https://t.co/CzALjCncyJ">https://t.co/CzALjCncyJ</a> <a href="https://twitter.com/search?q=%23openaccess&amp;src=hash">#openaccess</a>',
         'authors':
         u'Heather Piwowar',
         'date':
         '2013-11-14T00:00:00',
         'embed':
         u'<blockquote class="twitter-tweet" data-cards="hidden" width="550"><p>The FIRST Act Is the Last Open Access Reform We&#39;d Ever Want <a href="https://t.co/CzALjCncyJ">https://t.co/CzALjCncyJ</a> <a href="https://twitter.com/search?q=%23openaccess&amp;src=hash">#openaccess</a></p>&mdash; Heather Piwowar (@researchremix) <a href="https://twitter.com/researchremix/statuses/400821465828061184">November 14, 2013</a></blockquote>\n<script async src="//platform.twitter.com/widgets.js" charset="utf-8"></script>'
     }
     assert_items_equal(biblio_dict.keys(), expected.keys())
     for key in expected.keys():
         assert_equals(biblio_dict[key], expected[key])
    def test_ordinal_param_def(self):
        with assert_raises(ValueError):
            _ = OrdinalParamDef(False)
        with assert_raises(ValueError):
            _ = OrdinalParamDef([])

        test_values = ["A", "B", "C"]
        pd = OrdinalParamDef(test_values)

        with assert_raises(ValueError):
            pd.compare_values("A", "D")
        with assert_raises(ValueError):
            pd.compare_values("D", "A")
        with assert_raises(ValueError):
            pd.distance("A", "D")
        with assert_raises(ValueError):
            pd.distance("D", "A")
        assert_items_equal(pd.values, test_values)
        x = random.choice(test_values)
        assert_equal(x, pd.warp_out(pd.warp_in(x)))

        assert_true(pd.is_in_parameter_domain("A"))
        assert_false(pd.is_in_parameter_domain(1))
        assert_equal(pd.distance("A", "B"), 1. / 3)
        assert_equal(pd.distance("A", "C"), 2. / 3)
        assert_equal(pd.compare_values("A", "B"), -1)
        assert_equal(pd.compare_values("A", "A"), 0)
    def test_basic_rule_matcher_process(self):
        """Rules Engine - Basic Rule/Matcher"""
        @matcher
        def prod(rec):  # pylint: disable=unused-variable
            return rec['environment'] == 'prod'

        @rule()
        def incomplete_rule(_):  # pylint: disable=unused-variable
            return True

        @rule(logs=['test_log_type_json_nested_with_data'],
              outputs=['s3:sample_bucket'])
        def minimal_rule(rec):  # pylint: disable=unused-variable
            return rec['unixtime'] == 1483139547

        @rule(matchers=['foobar', 'prod'],
              logs=['test_log_type_json_nested_with_data'],
              outputs=['pagerduty:sample_integration'])
        def chef_logs(rec):  # pylint: disable=unused-variable
            return rec['application'] == 'chef'

        @rule(matchers=['foobar', 'prod'],
              logs=['test_log_type_json_nested_with_data'],
              outputs=['pagerduty:sample_integration'])
        def test_nest(rec):  # pylint: disable=unused-variable
            return rec['data']['source'] == 'eu'

        kinesis_data = {
            'date': 'Dec 01 2016',
            'unixtime': '1483139547',
            'host': 'host1.web.prod.net',
            'application': 'chef',
            'environment': 'prod',
            'data': {
                'category': 'web-server',
                'type': '1',
                'source': 'eu'
            }
        }

        # prepare the payloads
        service, entity = 'kinesis', 'test_kinesis_stream'
        raw_record = make_kinesis_raw_record(entity, json.dumps(kinesis_data))
        payload = load_and_classify_payload(self.config, service, entity, raw_record)

        # process payloads
        alerts = StreamRules.process(payload)

        # check alert output
        assert_equal(len(alerts), 3)
        rule_outputs_map = {
            'chef_logs': ['pagerduty:sample_integration'],
            'minimal_rule': ['s3:sample_bucket'],
            'test_nest': ['pagerduty:sample_integration']
        }
        # doing this because after kinesis_data is read in, types are casted per
        # the schema
        for alert in alerts:
            assert_items_equal(alert['record'].keys(), kinesis_data.keys())
            assert_items_equal(alert['outputs'], rule_outputs_map[alert['rule_name']])
 def test_extract_members3(self):
     f = open(SAMPLE_EXTRACT_MEMBER_ITEMS_PAGE3, "r")
     members = self.provider._extract_members(f.read(), TEST_ORCID_ID3)
     print members
     expected = [('biblio', {'isbn': '0702251879', 'title': 'Moving Among Strangers: Randolph Stow and My Family', 'first_author': u'Carey', 'journal': '', 'year': '2013', 'number': '', 'volume': '', 'first_page': '', 'authors': u'Carey', 'genre': 'book', 'full_citation': '@book{RID:1202132010973-11,\ntitle = {Moving Among Strangers: Randolph Stow and My Family},\npublisher = {},\nyear = {2013},\nauthor = {Carey, Gabrielle},\neditor = {}\n}', 'full_citation_type': 'bibtex'}), ('biblio', {'full_citation': '@article{RID:1202132010973-13,\ntitle = {Randolph Stow: An Ambivalent Australian},\njournal = {Kill Your Darlings},\nyear = {2013},\nauthor = {Carey, Gabrielle},\nnumber = {12},\npages = {27}\n}', 'title': 'Randolph Stow: An Ambivalent Australian', 'first_author': u'Carey', 'journal': 'Kill Your Darlings', 'year': '2013', 'number': '12', 'volume': '', 'first_page': '27', 'authors': u'Carey', 'genre': 'journal article', 'full_citation_type': 'bibtex'}), ('biblio', {'isbn': '1742759297', 'title': 'Puberty blues', 'first_author': u'Lette', 'journal': '', 'year': '2012', 'number': '', 'volume': '', 'first_page': '', 'authors': u'Lette, Carey', 'genre': 'book', 'full_citation': '@book{RID:1202132010974-1,\ntitle = {Puberty blues},\npublisher = {},\nyear = {2012},\nauthor = {Lette, Kathy and  Carey, Gabrielle},\neditor = {}\n}', 'full_citation_type': 'bibtex'}), ('biblio', {'full_citation': '@article{RID:1202132010974-15,\ntitle = {Moving Among Strangers, Darkly},\njournal = {},\nyear = {2010},\nauthor = {Gabrielle, Carey}\n}', 'title': 'Moving Among Strangers, Darkly', 'first_author': u'Gabrielle', 'journal': '', 'year': '2010', 'number': '', 'volume': '', 'first_page': '', 'authors': u'Gabrielle', 'genre': 'journal article', 'full_citation_type': 'bibtex'}), ('biblio', {'full_citation': '@article{RID:1202132010974-9,\ntitle = {High-value niche production: what Australian wineries might learn from a Bordeaux first growth},\njournal = {International journal of technology, policy and management},\nyear = {2009},\nauthor = {Aylward, David and  Carey, Gabrielle},\nvolume = {9},\nnumber = {4},\npages = {342-357}\n}', 'title': 'High-value niche production: what Australian wineries might learn from a Bordeaux first growth', 'first_author': u'Aylward', 'journal': 'International journal of technology, policy and management', 'year': '2009', 'number': '4', 'volume': '9', 'first_page': '342-357', 'authors': u'Aylward, Carey', 'genre': 'journal article', 'full_citation_type': 'bibtex'}), ('biblio', {'isbn': '1921372621', 'title': 'Waiting Room: A Memoir', 'first_author': u'Carey', 'journal': '', 'year': '2009', 'number': '', 'volume': '', 'first_page': '', 'authors': u'Carey', 'genre': 'book', 'full_citation': '@book{RID:1202132010974-12,\ntitle = {Waiting Room: A Memoir},\npublisher = {},\nyear = {2009},\nauthor = {Carey, Gabrielle},\neditor = {}\n}', 'full_citation_type': 'bibtex'}), ('biblio', {'full_citation': '@article{RID:1202132010974-10,\ntitle = {Literature and Religion as Rivals},\njournal = {Sydney Studies in Religion},\nyear = {2008},\nauthor = {Carey, Gabrielle}\n}', 'title': 'Literature and Religion as Rivals', 'first_author': u'Carey', 'journal': 'Sydney Studies in Religion', 'year': '2008', 'number': '', 'volume': '', 'first_page': '', 'authors': u'Carey', 'genre': 'journal article', 'full_citation_type': 'bibtex'}), ('doi', '10.1162/daed.2006.135.4.60'), ('biblio', {'isbn': '0733305741', 'title': 'Australian Story: Australian Lives', 'first_author': u'Carey', 'journal': '', 'year': '1997', 'number': '', 'volume': '', 'first_page': '', 'authors': u'Carey', 'genre': 'book', 'full_citation': '@book{RID:1202132010974-7,\ntitle = {Australian Story: Australian Lives},\npublisher = {},\nyear = {1997},\nauthor = {Carey, Gabrielle},\neditor = {}\n}', 'full_citation_type': 'bibtex'}), ('biblio', {'isbn': '0140259384', 'title': 'The Penguin Book of Death', 'first_author': u'Carey', 'journal': '', 'year': '1997', 'number': '', 'volume': '', 'first_page': '', 'authors': u'Carey, Sorensen', 'genre': 'book', 'full_citation': '@book{RID:1202132010975-8,\ntitle = {The Penguin Book of Death},\npublisher = {},\nyear = {1997},\nauthor = {Carey, Gabrielle and  Sorensen, Rosemary Lee},\neditor = {}\n}', 'full_citation_type': 'bibtex'}), ('biblio', {'full_citation': '@article{RID:1202132010975-6,\ntitle = {Prenatal Depression, Postmodern World},\njournal = {Mother love: stories about births, babies & beyond},\nyear = {1996},\nauthor = {Carey, Gabrielle},\npages = {179}\n}', 'title': 'Prenatal Depression, Postmodern World', 'first_author': u'Carey', 'journal': 'Mother love: stories about births, babies & beyond', 'year': '1996', 'number': '', 'volume': '', 'first_page': '179', 'authors': u'Carey', 'genre': 'journal article', 'full_citation_type': 'bibtex'}), ('biblio', {'isbn': '0330355988', 'title': 'The Borrowed Girl', 'first_author': u'Carey', 'journal': '', 'year': '1994', 'number': '', 'volume': '', 'first_page': '', 'authors': u'Carey', 'genre': 'book', 'full_citation': '@book{RID:1202132010975-5,\ntitle = {The Borrowed Girl},\npublisher = {},\nyear = {1994},\nauthor = {Carey, Gabrielle},\neditor = {}\n}', 'full_citation_type': 'bibtex'}), ('biblio', {'isbn': '0330272942', 'title': "In My Father's House", 'first_author': u'Carey', 'journal': '', 'year': '1992', 'number': '', 'volume': '', 'first_page': '', 'authors': u'Carey, Hudson', 'genre': 'book', 'full_citation': "@book{RID:1202132010975-4,\ntitle = {In My Father's House},\npublisher = {},\nyear = {1992},\nauthor = {Carey, Gabrielle and  Hudson, Elaine},\neditor = {}\n}", 'full_citation_type': 'bibtex'}), ('biblio', {'isbn': '0140074252', 'title': 'Just Us', 'first_author': u'Carey', 'journal': '', 'year': '1984', 'number': '', 'volume': '', 'first_page': '', 'authors': u'Carey', 'genre': 'book', 'full_citation': '@book{RID:1202132010975-3,\ntitle = {Just Us},\npublisher = {},\nyear = {1984},\nauthor = {Carey, Gabrielle},\neditor = {}\n}', 'full_citation_type': 'bibtex'}), ('biblio', {'isbn': '0872237680', 'title': 'Puberty blues', 'first_author': u'Carey', 'journal': '', 'year': '1982', 'number': '', 'volume': '', 'first_page': '', 'authors': u'Carey, Lette', 'genre': 'book', 'full_citation': '@book{RID:1202132010975-2,\ntitle = {Puberty blues},\npublisher = {},\nyear = {1982},\nauthor = {Carey, Gabrielle and  Lette, Kathy},\neditor = {}\n}', 'full_citation_type': 'bibtex'})]
     print expected
     assert_items_equal(members, expected)
Example #12
0
def test_getting_all_contacts_by_last_update():
    with _get_portal_connection() as connection:
        _update_random_contact(connection)  # Force an update

        all_contacts = get_all_contacts_by_last_update(connection)
        first_contact = next(all_contacts)
        assert_in('lastmodifieddate', first_contact.properties)

        requested_property_names = ('email',)
        all_contacts = get_all_contacts_by_last_update(
            connection,
            property_names=requested_property_names,
        )
        first_contact = next(all_contacts)
        expected_property_names = \
            ('lastmodifieddate',) + requested_property_names
        assert_items_equal(
            expected_property_names,
            first_contact.properties.keys(),
        )

        contacts_from_future = get_all_contacts_by_last_update(
            connection,
            property_names=requested_property_names,
            cutoff_datetime=datetime.now() + timedelta(days=100),
        )
        eq_([], list(contacts_from_future))
Example #13
0
 def having_three_trashinfo_in_trashcan(self):
     self.having_a_trashinfo_in_trashcan('foo.trashinfo')
     self.having_a_trashinfo_in_trashcan('bar.trashinfo')
     self.having_a_trashinfo_in_trashcan('baz.trashinfo')
     assert_items_equal(['foo.trashinfo',
                         'bar.trashinfo',
                         'baz.trashinfo'], os.listdir(self.info_dir_path))
 def test_biblio(self):
     biblio_dict = self.provider.biblio([self.testitem_biblio])
     print biblio_dict
     expected = {'channel_title': 'ImpactStory', 'repository': 'YouTube', 'title': 'Y Combinator video outtakes', 'url': 'http://www.youtube.com/watch?v=d39DL4ed754', 'published_date': '2013-10-15T21:48:48.000Z', 'year': '2013'}
     assert_items_equal(biblio_dict.keys(), expected.keys())
     for key in ['year', 'published_date', 'title', 'url']:
         assert_equals(biblio_dict[key], expected[key])
 def test_configuration_setter(self):
     raise SkipTest()
     # This should not work anymore
     self.sim.configuration = Configuration(
         coordinates=np.array([[1, 2, 3]])
     )
     assert_items_equal(self.sim.positions, [1, 2])
    def test_update(self):
        """
        Tests whether update works.
            - candidate exists in the list
            - result is equal
            - the status message incorrect error works
            - the candidate instance check works
        """
        optimizer = "RandomSearch"
        name = "test_init_experiment"
        param_defs = {
            "x": MinMaxNumericParamDef(0, 1),
            "name": NominalParamDef(["A", "B", "C"])
        }
        minimization = True

        EAss = PrettyExperimentAssistant(name, optimizer, param_defs, minimization=minimization)
        cand = EAss.get_next_candidate()
        cand.result = 1
        EAss.update(cand)
        assert_items_equal(EAss.experiment.candidates_finished, [cand])
        assert_equal(EAss.experiment.candidates_finished[0].result, 1)

        EAss.update(cand, "pausing")
        EAss.update(cand, "working")
        with assert_raises(ValueError):
            EAss.update(cand, status="No status.")

        with assert_raises(ValueError):
            EAss.update(False)
 def test_load_configuration(self):
     configuration = self.sim.configuration
     assert_items_equal(configuration.coordinates[0],
                        self.sim.positions)
     assert_equal(configuration.potential_energy,
                  self.sim.pes.V(self.sim))
     assert_equal(configuration.box_vectors, None)
Example #18
0
 def test_setitem_itemexists_replica(self):
     testset = SampleSet([self.s0A, self.s1A, self.s2B, self.s2B_])
     testset2 = SampleSet([self.s0A, self.s1A, self.s2B, self.s2B_])
     testset[2] = self.s2B
     testset.consistency_check()
     assert_equal(len(testset), 4)
     assert_items_equal(testset, testset2)
def test_getting_all_contacts_by_last_update():
    with _get_portal_connection() as connection:
        all_contacts = get_all_contacts_by_last_update(connection)
        first_contact = next(all_contacts)
        assert_in('lastmodifieddate', first_contact.properties)

        requested_property_names = ('email',)
        all_contacts = get_all_contacts_by_last_update(
            connection,
            property_names=requested_property_names,
            )
        first_contact = next(all_contacts)
        expected_property_names = \
            ('lastmodifieddate',) + requested_property_names
        assert_items_equal(
            expected_property_names,
            first_contact.properties.keys(),
            )

        contacts_from_future = get_all_contacts_by_last_update(
            connection,
            property_names=requested_property_names,
            cutoff_datetime=datetime.now() + timedelta(days=100),
            )
        eq_([], list(contacts_from_future))
Example #20
0
 def test_additem(self):
     testset2 = SampleSet([self.s0A, self.s1A, self.s2B, self.s2B_])
     self.testset.append(self.s2B_)
     assert_equal(self.s2B_ in self.testset, True)
     assert_equal(len(self.testset), 4)
     self.testset.consistency_check()
     assert_items_equal(self.testset, testset2)
    def test_read(self):
        _expected_chunk = 10000
        _expected_target = None

        csv_datasource = CSVStreamDataSource(self.in_path, _expected_chunk)
        csv_dataset = csv_datasource.read()

        _expected_header = ['a1', 'a2', 'a3', 'a4', 'target']
        _expected_rows = [[0, 0, 0, 0, 1], [1, 1, 0, 1, 0], [1, 0, 0, 1, 0]]

        _pandas_reader = csv_dataset.reader

        _pandas_df = _pandas_reader.next()

        _header = list(_pandas_df.columns)

        _rows = []
        for index, row in _pandas_df.iterrows():
            _rows.append(list(row))

        assert_equals(csv_datasource.chunk, _expected_chunk)
        assert_equals(csv_datasource.target, _expected_target)

        assert_items_equal(_header, _expected_header)
        assert_items_equal(_rows, _expected_rows)
Example #22
0
 def test_additem_itemexists(self):
     # exact sample is already there
     testset2 = SampleSet([self.s0A, self.s1A, self.s2B])
     self.testset.append(self.s2B)
     self.testset.consistency_check()
     assert_equal(len(self.testset), 3)
     assert_items_equal(self.testset, testset2)
Example #23
0
    def test_get_all_apps(self):
        """StreamAlertApp - Get All Apps"""
        expected_apps = {
            'box_admin_events',
            'duo_admin',
            'duo_auth',
            'gsuite_admin',
            'gsuite_calendar',
            'gsuite_drive',
            'gsuite_gplus',
            'gsuite_groups',
            'gsuite_login',
            'gsuite_mobile',
            'gsuite_rules',
            'gsuite_saml',
            'gsuite_token',
            'onelogin_events',
            'salesforce_console',
            'salesforce_login',
            'salesforce_loginas',
            'salesforce_report',
            'salesforce_reportexport',
            'slack_access',
            'slack_integration',
            'aliyun_actiontrail'
        }

        assert_items_equal(expected_apps, StreamAlertApp.get_all_apps())
    def test_has_sequential_ids_bad_data(self):

        f = os.path.join(SWC_PATH, 'Neuron_missing_ids.swc')

        ok = chk.has_sequential_ids(self.load_data(f))
        nt.ok_(not ok)
        nt.assert_items_equal(ok.info, [6, 217, 428, 639])
Example #25
0
def test_dmesg_wrap_partial():
    """ Test that dmesg still works after dmesg wraps partially

    We can overwrite the DMESG_COMMAND class variable to emluate dmesg being
    filled up and overflowing. What this test does is starts with a string that
    looks like this: "a\nb\nc\n" (this is used to emluate the contents of
    dmesg), we then replace that with "b\nc\nd\n", and ensure that the update
    of dmesg contains only 'd', becasue 'd' is the only new value in the
    updated dmesg.

    """
    # We don't want weird side effects of changing DMESG_COMMAND globally, so
    # instead we set it as a class instance and manually clear the
    # _last_messages attribute
    test = dmesg.LinuxDmesg()
    test.DMESG_COMMAND = ['echo', 'a\nb\nc\n']
    test.update_dmesg()

    # Update the DMESG_COMMAND to add d\n and remove a\n, this simluates the
    # wrap
    test.DMESG_COMMAND = ['echo', 'b\nc\nd\n']
    test.update_dmesg()

    nt.assert_items_equal(test._new_messages, ['d'],
                          msg=("_new_messages should be equal to ['d'], but is"
                               " {} instead.".format(test._new_messages)))
Example #26
0
 def test_setitem_itemexists_replica(self):
     testset = SampleSet([self.s0A, self.s1A, self.s2B, self.s2B_])
     testset2 = SampleSet([self.s0A, self.s1A, self.s2B, self.s2B_])
     testset[2] = self.s2B
     testset.consistency_check()
     assert_equal(len(testset), 4)
     assert_items_equal(testset, testset2)
Example #27
0
    def test_get_table_partitions(self):
        """Athena - Get Table Partitions"""
        self.client._client.results = [
            {
                'Data': [{
                    'VarCharValue': 'dt=2018-12-10-10'
                }]
            },
            {
                'Data': [{
                    'VarCharValue': 'dt=2018-12-09-10'
                }]
            },
            {
                'Data': [{
                    'VarCharValue': 'dt=2018-12-09-10'
                }]
            },
            {
                'Data': [{
                    'VarCharValue': 'dt=2018-12-11-10'
                }]
            },
        ]

        expected_result = {
            'dt=2018-12-10-10', 'dt=2018-12-09-10', 'dt=2018-12-11-10'
        }

        result = self.client.get_table_partitions('test_table')
        assert_items_equal(result, expected_result)
def test_dmesg_wrap_partial():
    """dmesg.Dmesg.update_dmesg(): still works after partial wrap of dmesg

    We can overwrite the DMESG_COMMAND class variable to emluate dmesg being
    filled up and overflowing. What this test does is starts with a string that
    looks like this: "a\nb\nc\n" (this is used to emluate the contents of
    dmesg), we then replace that with "b\nc\nd\n", and ensure that the update
    of dmesg contains only 'd', becasue 'd' is the only new value in the
    updated dmesg.

    """
    # We don't want weird side effects of changing DMESG_COMMAND globally, so
    # instead we set it as a class instance and manually clear the
    # _last_messages attribute
    test = dmesg.LinuxDmesg()
    test.DMESG_COMMAND = ["echo", "a\nb\nc\n"]
    test.update_dmesg()

    # Update the DMESG_COMMAND to add d\n and remove a\n, this simluates the
    # wrap
    test.DMESG_COMMAND = ["echo", "b\nc\nd\n"]
    test.update_dmesg()

    nt.assert_items_equal(
        test._new_messages,
        ["d"],
        msg=("_new_messages should be equal to ['d'], but is" " {} instead.".format(test._new_messages)),
    )
Example #29
0
def test_tv_1d():
    problem = compiler.compile_problem(cvxpy_expr.convert_problem(
        tv_1d.create(n=10)))
    assert_items_equal(
        prox_ops(problem.objective),
        [Prox.TOTAL_VARIATION_1D] + [Prox.SUM_SQUARE])
    assert_equal(1, len(problem.constraint))
Example #30
0
 def test_run_forbidden_states(self):
     engine = CalvinistDynamics([-0.5, 0.3, 3.2, -0.1, 0.8, -0.1])
     # first, without setting forbidden_states
     bootstrap1 = FullBootstrapping(
         transition=self.tisAB,
         snapshot=self.snapA,
         engine=engine
     )
     bootstrap1.output_stream = open(os.devnull, "w")
     gs1 = bootstrap1.run()
     assert_equal(len(gs1), 3)
     assert_items_equal(self.cv(gs1[0]), [-0.5, 0.3, 3.2, -0.1])
     # now with setting forbidden_states
     bootstrap2 = FullBootstrapping(
         transition=self.tisAB,
         snapshot=self.snapA,
         engine=engine,
         forbidden_states=[self.stateC]
     )
     bootstrap2.output_stream = open(os.devnull, "w")
     # make sure this is where we get the error
     try:
         gs2 = bootstrap2.run()
     except RuntimeError:
         pass
Example #31
0
    def _test_membership_update(
        self,
        expected_updated_contacts,
        contacts_to_update,
        contacts_in_list=None,
        contacts_in_hubspot=None,
        ):
        if contacts_in_list is None:
            contacts_in_list = []

        if contacts_in_hubspot is None:
            contacts_in_hubspot = \
                set(contacts_to_update) | set(contacts_in_list)

        updated_contacts = self._calculate_updated_contacts(
            contacts_to_update,
            contacts_in_list,
            contacts_in_hubspot,
            )
        connection = self._make_connection(contacts_to_update, updated_contacts)
        with connection:
            added_contact_vids = self._MEMBERSHIP_UPDATER(
                _STUB_CONTACT_LIST,
                contacts_to_update,
                connection,
                )

        expected_updated_contact_vids = \
            _get_contact_vids(expected_updated_contacts)
        assert_items_equal(expected_updated_contact_vids, added_contact_vids)
Example #32
0
def test_least_abs_deviations():
    problem = compiler.compile_problem(cvxpy_expr.convert_problem(
        least_abs_dev.create(m=10, n=5)))
    assert_items_equal(
        prox_ops(problem.objective),
        [Prox.CONSTANT, Prox.NORM_1])
    assert_equal(1, len(problem.constraint))
Example #33
0
    def test_cloudwatch(self):
        """JSON Parser - CloudWatch JSON with envelope keys"""
        schema = self.config['logs']['test_cloudwatch']['schema']
        options = self.config['logs']['test_cloudwatch']['configuration']

        with open('tests/unit/fixtures/cloudwatch.json', 'r') as fixture_file:
            data = fixture_file.readline().strip()

        parsed_result = self.parser_helper(data=data,
                                           schema=schema,
                                           options=options)

        assert_not_equal(parsed_result, False)
        assert_equal(80, len(parsed_result))

        expected_keys = [
            'protocol', 'source', 'destination', 'srcport', 'destport', 'eni',
            'action', 'packets', 'bytes', 'windowstart', 'windowend',
            'version', 'account', 'flowlogstatus', 'streamalert:envelope_keys'
        ]
        expected_envelope_keys = ['logGroup', 'logStream', 'owner']

        for result in parsed_result:
            assert_items_equal(result.keys(), expected_keys)
            assert_items_equal(result['streamalert:envelope_keys'].keys(),
                               expected_envelope_keys)
Example #34
0
    def test_json_regex_key_with_envelope(self):
        """JSON Parser - Regex key with envelope"""
        schema = self.config['logs']['json:regex_key_with_envelope']['schema']
        options = self.config['logs']['json:regex_key_with_envelope'][
            'configuration']

        data = json.dumps({
            'time':
            '14:01',
            'date':
            'Jan 01, 2017',
            'host':
            'host1.test.domain.tld',
            'message':
            '<10> auditd[1300] info: '
            '{"nested_key_1": "nested_info",'
            '"nested_key_2": "more_nested_info",'
            '"nested_key_3": "even_more"}'
        })
        parsed_result = self.parser_helper(data=data,
                                           schema=schema,
                                           options=options)

        assert_items_equal(
            parsed_result[0]['streamalert:envelope_keys'].keys(),
            ['date', 'time', 'host'])
        assert_items_equal(parsed_result[0].keys(), [
            'nested_key_1', 'nested_key_2', 'nested_key_3',
            'streamalert:envelope_keys'
        ])
    def test_has_increasing_ids_bad_data(self):

        f = os.path.join(SWC_PATH, 'non_increasing_trunk_off_1_16pt.swc')

        ok = chk.has_increasing_ids(self.load_data(f))
        nt.ok_(not ok)
        nt.assert_items_equal(ok.info, [6, 12])
Example #36
0
    def test_collection_post_new_collection(self):
        response = self.client.post(
            '/v1/collection' + "?key=validkey",
            data=json.dumps({"aliases": self.aliases, "title":"My Title"}),
            content_type="application/json")

        print response
        print response.data
        assert_equals(response.status_code, 201)  #Created
        assert_equals(response.mimetype, "application/json")
        response_loaded = json.loads(response.data)
        assert_equals(
                set(response_loaded.keys()),
                set(["collection"])
        )
        coll = response_loaded["collection"]
        assert_equals(len(coll["_id"]), 6)
        assert_equals(
            set(coll["alias_tiids"].keys()),
            set([":".join(alias) for alias in self.aliases])
        )

        collection_object = collection.Collection.query.filter_by(cid=coll["_id"]).first()
        assert_items_equal(collection_object.tiids, coll["alias_tiids"].values())
        assert_items_equal([added_item.alias_tuple for added_item in collection_object.added_items], [(unicode(a), unicode(b)) for (a, b) in self.aliases])
Example #37
0
def test_tv_denoise():
    problem = compiler.compile_problem(
        cvxpy_expr.convert_problem(tv_denoise.create(n=10, lam=1)))
    assert_items_equal(
        prox_ops(problem.objective),
        3 * [Prox.SUM_SQUARE] + [Prox.AFFINE] + [Prox.SECOND_ORDER_CONE])
    assert_equal(2, len(problem.constraint))
Example #38
0
 def test_hlist(self):
     params = {
         'hash_a': {
             'a': 1,
             'b': 2,
             'c': 3,
             'd': 4,
         },
         'hash_b': {
             'h': 11,
             'i': 12,
             'j': 13,
             'k': 14,
         },
         'hash_c': {
             'o': 21,
             'p': 22,
             'q': 23,
         },
         'hash_d': {
             'r': 31,
             's': 32,
             't': 33,
         },            
     }
     for k,v in params.items():
         a = self.client.multi_hset(k, **v)
         assert_equals(a,len(v))
     c = self.client.hlist('hash_ ', 'hash_z', 10)
     assert_items_equal(c,params.keys())
     for k,v in params.items():
         a = self.client.hclear(k)
         assert_true(a)
 def test_biblio(self):
     biblio_dict = self.provider.biblio([self.testitem_biblio])
     print biblio_dict
     expected = {'create_date': u'2013-04-28', 'authors': u'Hrynaszkiewicz', 'repository': 'Publons', 'title': u'Data reuse and the open data citation advantage', 'journal': u'PeerJ', 'genre': 'peer review', 'year': u'2013', 'review_type': u'Pre Publication', 'review_url': u'https://peerj.com/articles/175v0.1/reviews/2/'}
     assert_items_equal(biblio_dict.keys(), expected.keys())
     for key in ["authors", "title", "create_date", "year"]:
         assert_equals(biblio_dict[key], expected[key])
Example #40
0
 def test_additem_itemexists(self):
     # exact sample is already there
     testset2 = SampleSet([self.s0A, self.s1A, self.s2B])
     self.testset.append(self.s2B)
     self.testset.consistency_check()
     assert_equal(len(self.testset), 3)
     assert_items_equal(self.testset, testset2)
def test_nested_sequence_persistence1():
    # Test writing a nested pulse sequence to a ConfigObj.
    root = RootSequence()
    context = BaseContext()
    root.context = context
    root.external_vars = {'a': 1.5}

    pulse1 = Pulse(def_1='1.0', def_2='{a}')
    pulse2 = Pulse(def_1='{a} + 1.0', def_2='3.0')
    pulse3 = Pulse(def_1='{2_stop} + 0.5', def_2='10',
                   kind='Analogical', shape=SquareShape())
    seq = Sequence(items=[Pulse(def_1='{2_stop} + 0.5', def_2='10',
                                kind='Analogical', shape=SquareShape())])
    root.items.extend([pulse1, pulse2, pulse3, seq])

    pref = root.preferences_from_members()
    assert_items_equal(pref.keys(),
                       ['name', 'local_vars', 'time_constrained',
                        'enabled', 'item_class', 'sequence_duration',
                        'item_0', 'item_1', 'item_2', 'item_3',
                        'context', 'def_1', 'def_2', 'def_mode',
                        'external_vars'])
    assert_items_equal(pref['item_3'].keys(),
                       ['item_class', 'enabled', 'name', 'item_0',
                        'def_1', 'def_2', 'def_mode', 'local_vars',
                        'time_constrained'])
    def test_ordinal_param_def(self):
        with assert_raises(ValueError):
            _ = OrdinalParamDef(False)
        with assert_raises(ValueError):
            _ = OrdinalParamDef([])



        test_values = ["A", "B", "C"]
        pd = OrdinalParamDef(test_values)

        with assert_raises(ValueError):
            pd.compare_values("A", "D")
        with assert_raises(ValueError):
            pd.compare_values("D", "A")
        with assert_raises(ValueError):
            pd.distance("A", "D")
        with assert_raises(ValueError):
            pd.distance("D", "A")
        assert_items_equal(pd.values, test_values)
        x = random.choice(test_values)
        assert_equal(x, pd.warp_out(pd.warp_in(x)))

        assert_true(pd.is_in_parameter_domain("A"))
        assert_false(pd.is_in_parameter_domain(1))
        assert_equal(pd.distance("A", "B"), 1./3)
        assert_equal(pd.distance("A", "C"), 2./3)
        assert_equal(pd.compare_values("A", "B"), -1)
        assert_equal(pd.compare_values("A", "A"), 0)
Example #43
0
def check_items_equal(actual_value, expected_value, msg=""):
    """
    :param actual_value:
    :param expected_value:
    :param msg:
    :return:

    """
    if isinstance(actual_value, (list, dict, tuple)):
        msg = "\n" + msg + "\n\nDiffering items :\nFirst Argument(Usually Actual) marked with (-)," \
                           "Second Argument(Usually Expected) marked with (+)"
    else:
        msg = "\n" + msg + "\nFirst Argument(Usually Actual), Second Argument(Usually Expected)"

    if not actual_value or not expected_value:
        assert_equal(actual_value, expected_value, u"{}\n{} != {}".format(msg, actual_value, expected_value))

    elif isinstance(actual_value, (list, tuple)):
        assert_items_equal(sorted(actual_value), sorted(expected_value),
                           u"{}\n{}".format(msg, unicode(diff(sorted(actual_value),
                                                          sorted(expected_value)))))
    elif isinstance(actual_value, dict):
        assert_dict_equal(actual_value, expected_value,
                     u"{}\n{}".format(msg, unicode(diff(actual_value, dict(expected_value)))))
    elif isinstance(actual_value, (str, bool)):
        assert_equal(actual_value, expected_value,
                     u"{}\n{} != {}".format(msg, unicode(actual_value), unicode(expected_value)))
    else:
        assert_equal(actual_value, expected_value,
                     u"{}\n{} != {}".format(msg, actual_value, expected_value))
Example #44
0
def test_basis_pursuit():
    problem = compiler.compile_problem(cvxpy_expr.convert_problem(
        basis_pursuit.create(m=10, n=30)))
    assert_items_equal(
        prox_ops(problem.objective),
        [Prox.CONSTANT, Prox.NORM_1])
    assert_equal(2, len(problem.constraint))
 def test_shooting_move(self):
     self.shooter = ForwardShootMover(ensemble=self.ens, selector=FirstFrameSelector(), engine=self.dyn)
     change = self.shooter.move(self.gs)
     samples = change.trials
     assert_equal(len(samples), 1)
     assert_equal(change.accepted, True)
     assert_items_equal([-0.5, -0.4, -0.3, -0.2, -0.1], [s.coordinates[0][0] for s in samples[0].trajectory])
Example #46
0
def test_tv_denoise():
    problem = compiler.compile_problem(cvxpy_expr.convert_problem(
        tv_denoise.create(n=10, lam=1)))
    assert_items_equal(
        prox_ops(problem.objective),
        3*[Prox.SUM_SQUARE] + [Prox.AFFINE] + [Prox.SECOND_ORDER_CONE])
    assert_equal(2, len(problem.constraint))
Example #47
0
    def test_collection_post_new_collection(self):
        response = self.client.post('/v1/collection' + "?key=validkey",
                                    data=json.dumps({
                                        "aliases": self.aliases,
                                        "title": "My Title"
                                    }),
                                    content_type="application/json")

        print response
        print response.data
        assert_equals(response.status_code, 201)  #Created
        assert_equals(response.mimetype, "application/json")
        response_loaded = json.loads(response.data)
        assert_equals(set(response_loaded.keys()), set(["collection"]))
        coll = response_loaded["collection"]
        assert_equals(len(coll["_id"]), 6)
        assert_equals(set(coll["alias_tiids"].keys()),
                      set([":".join(alias) for alias in self.aliases]))

        collection_object = collection.Collection.query.filter_by(
            cid=coll["_id"]).first()
        assert_items_equal(collection_object.tiids,
                           coll["alias_tiids"].values())
        assert_items_equal([
            added_item.alias_tuple
            for added_item in collection_object.added_items
        ], [(unicode(a), unicode(b)) for (a, b) in self.aliases])
Example #48
0
    def _test_membership_update(
        self,
        expected_updated_contacts,
        contacts_to_update,
        contacts_in_list=None,
        contacts_in_hubspot=None,
    ):
        if contacts_in_list is None:
            contacts_in_list = []

        if contacts_in_hubspot is None:
            contacts_in_hubspot = \
                set(contacts_to_update) | set(contacts_in_list)

        updated_contacts = self._calculate_updated_contacts(
            contacts_to_update,
            contacts_in_list,
            contacts_in_hubspot,
        )
        connection = self._make_connection(contacts_to_update,
                                           updated_contacts)
        with connection:
            added_contact_vids = self._MEMBERSHIP_UPDATER(
                _STUB_CONTACT_LIST,
                contacts_to_update,
                connection,
            )

        expected_updated_contact_vids = \
            _get_contact_vids(expected_updated_contacts)
        assert_items_equal(expected_updated_contact_vids, added_contact_vids)
Example #49
0
 def test_mul(self):
     seq = self.get_sequence([1, 2])
     result = seq * 2
     tools.assert_true(result.iterable)
     tools.assert_true(seq.iterable)
     tools.assert_items_equal(result, [1, 2, 1, 2])
     tools.assert_items_equal(seq, [1, 2])
Example #50
0
    def test_metrics2(self):
        metrics_dict = self.provider.metrics([
            ("url", "http://researchremix.wordpress.com/2011/08/10/personal")
        ])
        expected = {
            'topsy:influential_tweets':
            (1,
             'http://topsy.com/trackback?url=http%3A//researchremix.wordpress.com/2011/08/10/personal/&window=a'
             ),
            'topsy:tweets':
            (18,
             'http://topsy.com/trackback?url=http%3A//researchremix.wordpress.com/2011/08/10/personal/&window=a'
             )
        }
        print metrics_dict
        for key in expected:
            assert metrics_dict[key][0] >= expected[key][0], [
                key, metrics_dict[key], expected[key]
            ]
            assert metrics_dict[key][1] == expected[key][1], [
                key, metrics_dict[key], expected[key]
            ]

        #now with trailing slawh
        metrics_dict2 = self.provider.metrics([
            ("url", "http://researchremix.wordpress.com/2011/08/10/personal/")
        ])
        assert_items_equal(metrics_dict, metrics_dict2)
Example #51
0
 def test_setslice_lazy(self):
     iterable = iter([1, 2, 3, 4])
     seq = self.get_sequence(iterable)
     seq[1:3] = (8, 9)
     tools.assert_true(seq.iterable)
     tools.eq_(list(iterable), [4])
     tools.assert_items_equal(seq, [1, 8, 9])
Example #52
0
    def test_unique_values_from_query(self):
        """Athena - Unique Values from Query"""
        query = {
            'ResultSet': {
                'Rows': [
                    {
                        'Data': [{
                            'VarCharValue': 'foobar'
                        }]
                    },
                    {
                        'Data': [{
                            'VarCharValue': 'barfoo'
                        }]
                    },
                    {
                        'Data': [{
                            'VarCharValue': 'barfoo'
                        }]
                    },
                    {
                        'Data': [{
                            'VarCharValue': 'foobarbaz'
                        }]
                    },
                ]
            }
        }
        expected_result = {'foobar', 'barfoo', 'foobarbaz'}

        result = self.client._unique_values_from_query(query)
        assert_items_equal(result, expected_result)
Example #53
0
 def test_getslice_lazy(self):
     seq = self.get_sequence([1, 2, 3])
     sliced = seq[:2]
     tools.assert_is_not(sliced, seq)
     tools.assert_items_equal(sliced, [1, 2])
     tools.assert_true(seq.iterable)
     tools.eq_(list(seq._results.__iter__()), [1, 2])
 def test_run_forbidden_states(self):
     engine = CalvinistDynamics([-0.5, 0.3, 3.2, -0.1, 0.8, -0.1])
     # first, without setting forbidden_states
     bootstrap1 = FullBootstrapping(
         transition=self.tisAB,
         snapshot=self.snapA,
         engine=engine
     )
     bootstrap1.output_stream = open(os.devnull, "w")
     gs1 = bootstrap1.run()
     assert_equal(len(gs1), 3)
     assert_items_equal(self.cv(gs1[0]), [-0.5, 0.3, 3.2, -0.1])
     # now with setting forbidden_states
     bootstrap2 = FullBootstrapping(
         transition=self.tisAB,
         snapshot=self.snapA,
         engine=engine,
         forbidden_states=[self.stateC]
     )
     bootstrap2.output_stream = open(os.devnull, "w")
     # make sure this is where we get the error
     try:
         gs2 = bootstrap2.run()
     except RuntimeError:
         pass
Example #55
0
def test_with_all_filter():
    # The 'all' filter will match a secret that contains ANY field with the criteria. In other words an implicit OR.

    conn = boto_client()

    conn.create_secret(Name="foo", SecretString="secret")
    conn.create_secret(Name="bar", SecretString="secret", Description="foo")
    conn.create_secret(Name="baz",
                       SecretString="secret",
                       Tags=[{
                           "Key": "foo",
                           "Value": "1"
                       }])
    conn.create_secret(Name="qux",
                       SecretString="secret",
                       Tags=[{
                           "Key": "1",
                           "Value": "foo"
                       }])
    conn.create_secret(Name="multi",
                       SecretString="secret",
                       Tags=[{
                           "Key": "foo",
                           "Value": "foo"
                       }])
    conn.create_secret(Name="none", SecretString="secret")

    secrets = conn.list_secrets(Filters=[{"Key": "all", "Values": ["foo"]}])

    secret_names = list(map(lambda s: s["Name"], secrets["SecretList"]))
    assert_items_equal(secret_names, ["foo", "bar", "baz", "qux", "multi"])
Example #56
0
 def test_additem(self):
     testset2 = SampleSet([self.s0A, self.s1A, self.s2B, self.s2B_])
     self.testset.append(self.s2B_)
     assert_equal(self.s2B_ in self.testset, True)
     assert_equal(len(self.testset), 4)
     self.testset.consistency_check()
     assert_items_equal(self.testset, testset2)
Example #57
0
def test_with_duplicate_filter_keys():
    # Multiple filters with the same key combine with an implicit AND operator

    conn = boto_client()

    conn.create_secret(Name="foo",
                       SecretString="secret",
                       Description="one two")
    conn.create_secret(Name="bar", SecretString="secret", Description="one")
    conn.create_secret(Name="baz", SecretString="secret", Description="two")
    conn.create_secret(Name="qux",
                       SecretString="secret",
                       Description="unrelated")

    secrets = conn.list_secrets(Filters=[
        {
            "Key": "description",
            "Values": ["one"]
        },
        {
            "Key": "description",
            "Values": ["two"]
        },
    ])

    secret_names = list(map(lambda s: s["Name"], secrets["SecretList"]))
    assert_items_equal(secret_names, ["foo"])
Example #58
0
    def test_update(self):
        """
        Tests whether update works.
            - candidate exists in the list
            - result is equal
            - the status message incorrect error works
            - the candidate instance check works
        """
        optimizer = "RandomSearch"
        name = "test_init_experiment"
        param_defs = {
            "x": MinMaxNumericParamDef(0, 1),
            "name": NominalParamDef(["A", "B", "C"])
        }
        minimization = True

        EAss = PrettyExperimentAssistant(name,
                                         optimizer,
                                         param_defs,
                                         minimization=minimization)
        cand = EAss.get_next_candidate()
        cand.result = 1
        EAss.update(cand)
        assert_items_equal(EAss.experiment.candidates_finished, [cand])
        assert_equal(EAss.experiment.candidates_finished[0].result, 1)

        EAss.update(cand, "pausing")
        EAss.update(cand, "working")
        with assert_raises(ValueError):
            EAss.update(cand, status="No status.")

        with assert_raises(ValueError):
            EAss.update(False)
Example #59
0
    def test_get_param(self):
        """AppIntegrationConfig - Get parameter"""
        param, _ = AppConfig._get_parameters(
            ['{}_config'.format(FUNCTION_NAME)])

        assert_items_equal(
            param['{}_config'.format(FUNCTION_NAME)].keys(),
            {'cluster', 'app_name', 'type', 'prefix', 'schedule_expression'})