Пример #1
0
    def test_submission_success_fail(self, db, c):

        # make sure the collector calls success() on successful submission
        # and fail() on failed submission

        global success_event
        success_event = threading.Event()
        global fail_event
        fail_event = threading.Event()

        class _custom_collector(TestCollector):
            def __init__(_self, *args, **kwargs):
                super().__init__(*args, **kwargs)
                _self.success_tested = False
                _self.fail_tested = False
                _self.success_signal = False
                _self.fail_signal = False

            def get_next_submission(_self):
                if not _self.success_tested:
                    if _self.success_signal:
                        _self.success_tested = True
                        return _custom_submission()

                if not _self.fail_tested:
                    if _self.fail_signal:
                        _self.fail_tested = True
                        return _custom_submission()

                return None

        # start an engine to get a node created
        engine = Engine()
        engine.start()
        wait_for_log_count('updated node', 1, 5)
        engine.controlled_stop()
        engine.wait()

        self.start_api_server()

        collector = _custom_collector()
        tg1 = collector.add_group('test_group_1', 100, False, saq.COMPANY_ID,
                                  'ace')  # 100% coverage, no full delivery
        collector.start()

        # trigger the "success" test
        collector.success_signal = True
        self.assertTrue(success_event.wait(5))

        self.stop_api_server()

        # trigger the "fail" test
        collector.fail_signal = True
        self.assertTrue(fail_event.wait(5))

        collector.stop()
        collector.wait()
Пример #2
0
    def test_fail_submit_no_coverage(self, db, c):
        class _custom_collector(TestCollector):
            def __init__(_self, *args, **kwargs):
                super().__init__(*args, **kwargs)
                self.available_work = [
                    self.create_submission() for _ in range(1)
                ]

            def get_next_submission(_self):
                if not self.available_work:
                    return None

                return self.available_work.pop()

        # start an engine to get a node created
        engine = Engine()
        engine.start()
        wait_for_log_count('updated node', 1, 5)
        engine.controlled_stop()
        engine.wait()

        # we do NOT start the API server making it unavailable
        #self.start_api_server()

        collector = _custom_collector()
        tg1 = collector.add_group('test_group_1', 100, False, saq.COMPANY_ID,
                                  'ace')  # 100% coverage, full_coverage = no
        collector.start()

        # we should see 1 of these
        wait_for_log_count('scheduled test_description mode analysis', 1, 5)

        # watch for the failure
        wait_for_log_count('unable to submit work item', 1, 5)

        # wait for the queue to clear
        wait_for_log_count('completed work item', 1, 5)

        collector.stop()
        collector.wait()

        # everything should be empty at this point since we do not have full coverage
        c.execute("SELECT COUNT(*) FROM work_distribution WHERE group_id = %s",
                  (tg1.group_id, ))
        self.assertEquals(c.fetchone()[0], 0)
        # both the incoming_workload and work_distribution tables should be empty
        c.execute("SELECT COUNT(*) FROM incoming_workload")
        self.assertEquals(c.fetchone()[0], 0)

        # and we should have 0 in the engine workload
        c.execute("SELECT COUNT(*) FROM workload ")
        self.assertEquals(c.fetchone()[0], 0)
Пример #3
0
    def test_submit(self, db, c):
        class _custom_collector(TestCollector):
            def __init__(_self, *args, **kwargs):
                super().__init__(*args, **kwargs)
                self.available_work = [
                    self.create_submission() for _ in range(1)
                ]

            def get_next_submission(_self):
                if not self.available_work:
                    return None

                return self.available_work.pop()

        # start an engine to get a node created
        engine = Engine()
        engine.start()
        wait_for_log_count('updated node', 1, 5)
        engine.controlled_stop()
        engine.wait()

        self.start_api_server()

        collector = _custom_collector()
        tg1 = collector.add_group('test_group_1', 100, True, saq.COMPANY_ID,
                                  'ace')  # 100% coverage
        collector.start()

        # we should see 1 of these
        wait_for_log_count('scheduled test_description mode analysis', 1, 5)
        wait_for_log_count('submitting 1 items', 1, 5)
        wait_for_log_count('completed work item', 1, 5)

        collector.stop()
        collector.wait()

        # both the incoming_workload and work_distribution tables should be empty
        c.execute("SELECT COUNT(*) FROM work_distribution WHERE group_id = %s",
                  (tg1.group_id, ))
        self.assertEquals(c.fetchone()[0], 0)
        c.execute("SELECT COUNT(*) FROM incoming_workload")
        self.assertEquals(c.fetchone()[0], 0)

        # and we should have one item in the engine workload
        c.execute("SELECT COUNT(*) FROM workload ")
        self.assertEquals(c.fetchone()[0], 1)
Пример #4
0
    def test_cleanup_files(self, db, c):

        fp, file_path = tempfile.mkstemp(dir=saq.TEMP_DIR)
        os.write(fp, b'Hello, world!')
        os.close(fp)

        class _custom_collector(TestCollector):
            def __init__(_self, *args, **kwargs):
                super().__init__(delete_files=True, *args, **kwargs)
                self.work = self.create_submission()
                self.work.files = [file_path]

            def get_next_submission(_self):
                if self.work:
                    result = self.work
                    self.work = None
                    return result

                return None

        # start an engine to get a node created
        engine = Engine()
        engine.start()
        wait_for_log_count('updated node', 1, 5)
        engine.controlled_stop()
        engine.wait()

        self.start_api_server()

        collector = _custom_collector()
        tg1 = collector.add_group('test_group_1', 100, True, saq.COMPANY_ID,
                                  'ace')  # 100% coverage
        collector.start()

        wait_for_log_count('scheduled test_description mode analysis', 1, 5)
        wait_for_log_count('submitting 1 items', 1, 5)

        collector.stop()
        collector.wait()

        # the file should have been deleted
        self.assertFalse(os.path.exists(file_path))
Пример #5
0
    def test_node_invalid_assignment(self, db, c):
        class _custom_collector(TestCollector):
            def __init__(_self, *args, **kwargs):
                super().__init__(*args, **kwargs)
                submission = self.create_submission()
                # we assign to an invalid (unknown) group
                submission.group_assignments = ['test_group_invalid']
                self.available_work = [submission]

            def get_next_submission(_self):
                if not self.available_work:
                    return None

                return self.available_work.pop()

        # start an engine to get a node created
        engine = Engine()
        engine.start()
        wait_for_log_count('updated node', 1, 5)
        engine.controlled_stop()
        engine.wait()

        collector = _custom_collector()
        tg1 = collector.add_group('test_group_1', 100, True, saq.COMPANY_ID,
                                  'ace')  # 100% coverage
        tg2 = collector.add_group('test_group_2', 100, True, saq.COMPANY_ID,
                                  'ace')  # 100% coverage
        collector.execute()

        # after this is executed we should have an assignment to test_group_1 but not test_group_2
        c.execute(
            """SELECT COUNT(*) FROM work_distribution JOIN work_distribution_groups ON work_distribution.group_id = work_distribution_groups.id
                     WHERE work_distribution_groups.name = %s""",
            ('test_group_1', ))
        self.assertEquals(c.fetchone()[0], 1)

        c.execute(
            """SELECT COUNT(*) FROM work_distribution JOIN work_distribution_groups ON work_distribution.group_id = work_distribution_groups.id
                     WHERE work_distribution_groups.name = %s""",
            ('test_group_2', ))
        self.assertEquals(c.fetchone()[0], 1)
Пример #6
0
    def test_node_translation(self, db, c):

        # start an engine to get a node created
        engine = Engine()
        engine.start()
        wait_for_log_count('updated node', 1, 5)
        engine.controlled_stop()
        engine.wait()

        # get the current node settings from the database
        c.execute(
            "SELECT id, name, location, company_id, last_update, is_primary, any_mode, is_local FROM nodes"
        )
        node_id, name, location, _, last_update, _, any_mode, _ = c.fetchone()

        # add a configuration to map this location to a different location
        saq.CONFIG['node_translation']['unittest'] = '{},test:443'.format(
            location)

        remote_node = RemoteNode(node_id, name, location, any_mode,
                                 last_update, ANALYSIS_MODE_ANALYSIS, 0)
        self.assertEquals(remote_node.location, 'test:443')
Пример #7
0
    def test_recovery(self, db, c):
        class _custom_collector(TestCollector):
            def __init__(_self, *args, **kwargs):
                super().__init__(*args, **kwargs)
                self.available_work = [
                    self.create_submission() for _ in range(10)
                ]

            def get_next_submission(_self):
                if not self.available_work:
                    return None

                return self.available_work.pop()

        class _custom_collector_2(TestCollector):
            def get_next_submission(_self):
                return None

        # start an engine to get a node created
        engine = Engine()
        engine.start()
        wait_for_log_count('updated node', 1, 5)
        engine.controlled_stop()
        engine.wait()

        collector = _custom_collector()
        tg1 = collector.add_group('test_group_1', 100, True, saq.COMPANY_ID,
                                  'ace')  # 100% coverage
        collector.start()

        # the API server is not running so these will fail
        wait_for_log_count('scheduled test_description mode analysis', 10, 5)
        wait_for_log_count('unable to submit work item', 10, 5)

        # then we "shut down"
        collector.stop()
        collector.wait()

        # both the incoming_workload and work_distribution tables should have all 10 items
        c.execute("SELECT COUNT(*) FROM work_distribution WHERE group_id = %s",
                  (tg1.group_id, ))
        self.assertEquals(c.fetchone()[0], 10)
        c.execute("SELECT COUNT(*) FROM incoming_workload")
        self.assertEquals(c.fetchone()[0], 10)

        # and we should have no items in the engine workload
        c.execute("SELECT COUNT(*) FROM workload ")
        self.assertEquals(c.fetchone()[0], 0)
        db.commit()

        # NOW start the API server
        self.start_api_server()

        # and then start up the collector
        collector = _custom_collector_2()
        tg1 = collector.add_group('test_group_1', 100, True, saq.COMPANY_ID,
                                  'ace')  # 100% coverage
        collector.start()

        # with the API server running now we should see these go out
        wait_for_log_count('completed work item', 10, 15)

        collector.stop()
        collector.wait()

        # now these should be empty
        c.execute("SELECT COUNT(*) FROM work_distribution WHERE group_id = %s",
                  (tg1.group_id, ))
        self.assertEquals(c.fetchone()[0], 0)
        c.execute("SELECT COUNT(*) FROM incoming_workload")
        self.assertEquals(c.fetchone()[0], 0)

        # and we should have 10 workload entries
        c.execute("SELECT COUNT(*) FROM workload ")
        self.assertEquals(c.fetchone()[0], 10)
Пример #8
0
    def test_full_coverage_missing_node(self, db, c):
        class _custom_collector(TestCollector):
            def __init__(_self, *args, **kwargs):
                super().__init__(*args, **kwargs)
                self.available_work = [
                    self.create_submission() for _ in range(1)
                ]

            def get_next_submission(_self):
                if not self.available_work:
                    return None

                return self.available_work.pop()

        # enable the second ace database schema built that is entirely empty
        # this is where we look for nodes in the "ace_2" remote node group (see below)
        saq.CONFIG['database_ace_2'] = {
            'hostname': saq.CONFIG['database_ace']['hostname'],
            'unix_socket': saq.CONFIG['database_ace']['unix_socket'],
            'database': 'ace-unittest-2',
            'username': saq.CONFIG['database_ace']['username'],
            'password': saq.CONFIG['database_ace']['password'],
            #'ssl_ca': saq.CONFIG['database_ace']['ssl_ca'],
        }

        # start an engine to get a node created for the "ace" node (but not the ace_2 node)
        engine = Engine()
        engine.start()
        wait_for_log_count('updated node', 1, 5)
        engine.controlled_stop()
        engine.wait()

        self.start_api_server()

        collector = _custom_collector()
        tg1 = collector.add_group('test_group_1', 100, True, saq.COMPANY_ID,
                                  'ace')  # 100% coverage, full_coverage = yes
        tg2 = collector.add_group('test_group_2', 100, True, saq.COMPANY_ID,
                                  'ace_2')  # 100% coverage, full_coverage = no
        collector.start()

        # we should see 1 of these
        wait_for_log_count('scheduled test_description mode analysis', 1, 5)

        # watch for the failure
        wait_for_log_count(
            'no remote nodes are avaiable for all analysis modes', 1, 5)

        # this should time out
        with self.assertRaises(WaitTimedOutError):
            wait_for_log_count('completed work item', 1, 3)

        collector.stop()
        collector.wait()

        # the first group assignment should have completed
        c.execute(
            "SELECT COUNT(*) FROM work_distribution WHERE group_id = %s AND status = 'COMPLETED'",
            (tg1.group_id, ))
        self.assertEquals(c.fetchone()[0], 1)
        # the second group assignment should still be in ready status
        c.execute(
            "SELECT COUNT(*) FROM work_distribution WHERE group_id = %s AND status = 'READY'",
            (tg2.group_id, ))
        self.assertEquals(c.fetchone()[0], 1)
        # and we should still have our workload item
        c.execute("SELECT COUNT(*) FROM incoming_workload")
        self.assertEquals(c.fetchone()[0], 1)

        # and we should have 1 in the engine workload
        c.execute("SELECT COUNT(*) FROM workload ")
        self.assertEquals(c.fetchone()[0], 1)
Пример #9
0
    def test_coverage(self, db, c):
        class _custom_collector(TestCollector):
            def __init__(_self, *args, **kwargs):
                super().__init__(*args, **kwargs)
                self.available_work = [
                    self.create_submission() for _ in range(10)
                ]

            def get_next_submission(_self):
                if not self.available_work:
                    return None

                return self.available_work.pop()

        # start an engine to get a node created
        engine = Engine()
        engine.start()
        wait_for_log_count('updated node', 1, 5)
        engine.controlled_stop()
        engine.wait()

        self.start_api_server()

        collector = _custom_collector()
        tg1 = collector.add_group('test_group_1', 100, True, saq.COMPANY_ID,
                                  'ace')  # 100% coverage
        tg2 = collector.add_group('test_group_2', 50, True, saq.COMPANY_ID,
                                  'ace')  # 50% coverage
        tg3 = collector.add_group('test_group_3', 10, True, saq.COMPANY_ID,
                                  'ace')  # 10% coverage
        collector.start()

        # we should see 10 of these
        wait_for_log_count('scheduled test_description mode analysis', 1, 5)
        # and then 16 of these
        wait_for_log_count('got submission result', 16, 15)
        # and 10 of these
        wait_for_log_count('completed work item', 10, 15)

        collector.stop()
        collector.wait()

        # both the incoming_workload and work_distribution tables should be empty
        c.execute("SELECT COUNT(*) FROM work_distribution WHERE group_id = %s",
                  (tg1.group_id, ))
        self.assertEquals(c.fetchone()[0], 0)
        # both the incoming_workload and work_distribution tables should be empty
        c.execute("SELECT COUNT(*) FROM work_distribution WHERE group_id = %s",
                  (tg2.group_id, ))
        self.assertEquals(c.fetchone()[0], 0)
        # both the incoming_workload and work_distribution tables should be empty
        c.execute("SELECT COUNT(*) FROM work_distribution WHERE group_id = %s",
                  (tg3.group_id, ))
        self.assertEquals(c.fetchone()[0], 0)
        c.execute("SELECT COUNT(*) FROM incoming_workload")
        self.assertEquals(c.fetchone()[0], 0)

        # and we should have 16 in the engine workload
        c.execute("SELECT COUNT(*) FROM workload ")
        self.assertEquals(c.fetchone()[0], 16)

        # there should be 10 of these messages for test_group_1
        self.assertEquals(
            len(
                search_log_condition(lambda r: 'test_group_1' in r.getMessage(
                ) and 'got submission result' in r.getMessage())), 10)
        # and then 5 for this one
        self.assertEquals(
            len(
                search_log_condition(lambda r: 'test_group_2' in r.getMessage(
                ) and 'got submission result' in r.getMessage())), 5)
        # and just 1 for this one
        self.assertEquals(
            len(
                search_log_condition(lambda r: 'test_group_3' in r.getMessage(
                ) and 'got submission result' in r.getMessage())), 1)
Пример #10
0
    def test_submission_filter(self, db, c):

        self.tuning_rule_dir = os.path.join(saq.DATA_DIR, 'tuning_rules')
        if os.path.isdir(self.tuning_rule_dir):
            shutil.rmtree(self.tuning_rule_dir)

        os.mkdir(self.tuning_rule_dir)
        saq.CONFIG['collection']['tuning_dir_default'] = self.tuning_rule_dir

        with open(os.path.join(self.tuning_rule_dir, 'filter.yar'), 'w') as fp:
            fp.write("""
rule test_filter {
    meta:
        targets = "submission"
    strings:
        $ = "description = test_description"
    condition:
        all of them
}
""")

        class _custom_collector(TestCollector):
            def __init__(_self, *args, **kwargs):
                super().__init__(*args, **kwargs)
                self.available_work = [
                    self.create_submission() for _ in range(1)
                ]

            def get_next_submission(_self):
                if not self.available_work:
                    return None

                return self.available_work.pop()

        # start an engine to get a node created
        engine = Engine()
        engine.start()
        wait_for_log_count('updated node', 1, 5)
        engine.controlled_stop()
        engine.wait()

        collector = _custom_collector()
        tg1 = collector.add_group('test_group_1', 100, True, saq.COMPANY_ID,
                                  'ace')  # 100% coverage
        collector.start()

        # we should see 1 of these
        wait_for_log_count(
            'submission test_description matched 1 tuning rules', 1, 5)

        collector.stop()
        collector.wait()

        # everything should be empty
        c.execute("SELECT COUNT(*) FROM work_distribution WHERE group_id = %s",
                  (tg1.group_id, ))
        self.assertEquals(c.fetchone()[0], 0)
        c.execute("SELECT COUNT(*) FROM incoming_workload")
        self.assertEquals(c.fetchone()[0], 0)
        c.execute("SELECT COUNT(*) FROM workload ")
        self.assertEquals(c.fetchone()[0], 0)