示例#1
0
class CheckerUnittest(TestCase):

    def setUp(self):
        self._checker = Checker(None)
        self.parsed_ltl1 = ["U[1, 2]", "true", "ap1"]
        self.parsed_ltl2 = ["U[2, 2]", "true", "ap1"]
        self.parsed_ltl3 = ["U[3, 3]", "true", "ap1"]
        self.parsed_ltl4 = ["U[0.001, 0.001]", "true", "ap1"]
        self.parsed_ltl5 = ["U[0.002, 0.002]", "true", "ap1"]
        self.parsed_ltl6 = ["U[0.003, 0.003]", "true", "ap1"]

        apsets = [{"a"}, {"ap1"}]
        next_moves = [
            NextMove(0, 1, None, 1.0, 1.0),
            NextMove(1, 1, None, 1.0, 1.0)
        ]
        next_moves1 = [
            NextMove(0.0, 0.001, None, 1.0, 1.0),
            NextMove(0.001, 0.001, None, 1.0, 1.0)
        ]
        self.path = [Step(apset, move) for apset, move in zip(apsets, next_moves)]
        self.path1 = [Step(_apset, _move) for _apset, _move in zip(apsets, next_moves1)]

    def testCheckUntil(self):
        self.assertTrue(self._checker.verify(self.path, self.parsed_ltl1))
        self.assertTrue(self._checker.verify(self.path, self.parsed_ltl2))
        self.assertFalse(self._checker.verify(self.path, self.parsed_ltl3))

        self.assertTrue(self._checker.verify(self.path1, self.parsed_ltl4))
        self.assertTrue(self._checker.verify(self.path1, self.parsed_ltl5))
        self.assertFalse(self._checker.verify(self.path1, self.parsed_ltl6))
示例#2
0
 def setUp(self):
     filepath = get_prism_model_dir() + get_sep() + "smalltest.prism"
     self.model_constructor = ModelConstructor()
     self.model = self.model_constructor._parse(filepath)
     self.duration = 180
     self.model.duration = self.duration
     ltl = "true U<={} failure".format(self.duration)
     parsed = LTLParser().build_parser().parse_line(ltl)
     self.checker = Checker(model=self.model, ltl=parsed)
示例#3
0
 def setUp(self):
     self.manager = Manager()
     filepath = get_prism_model_dir() + get_sep() + "DPM.prism"
     self.manager.read_model_file(filepath)
     self.model = self.manager.model
     ltl = "true U<=10 failure"
     parsed_ltl = LTLParser().build_parser().parse_line(ltl)
     self.manager.set_manager_param_simple("duration", 10.0)
     self.checker = Checker(model=self.model, ltl=parsed_ltl, duration=10.0)
     self.manager.set_ltl(parsed_ltl)
     self.prism_x, self.prism_y = parse_csv_cols(get_prism_model_dir() +
                                                 get_sep() +
                                                 "Q_TRIGGER_1_20_1.csv")
示例#4
0
def testSPS():
    from util.util import interval
    from module.Module import Constant
    config = SPSConfig.SPSConfig()
    moduleFactory = ModuleFactory(config)
    modelfactory = ModelFactory(moduleFactory)
    model = modelfactory.get_built()
    ltl = ['U[1, {0}]'.format(730), 'T', 'failure']  # 一年之内系统失效
    checker = Checker(model, ltl, duration=730, fb=False)
    wrapper = ExperimentWrapper(checker, samples_per_param=100)
    trainx = interval(1, 10, 0.5)
    testx = interval(1, 10, 0.1)
    thickness_params = [Constant('SCREEN_THICKNESS', st) for st in trainx]
    wrapper.setconstants([thickness_params])
    result = wrapper.do_expe()
    cases = map(lambda tuple: tuple[0], result)
    labels = map(lambda tuple: tuple[1], result)

    regressor = BPNeuralNetwork()
    regressor.setup(1, 5, 1)
    regressor.train(cases, labels)

    test_cases = map(lambda c: Constant('SCREEN_THICKNESS', c), testx)
    test_labels = [regressor.predict(test_case) for test_case in testx]
    # 对多组参数进行模型验证
    # logger.info("mc begin")
    #
    wrapper.setconstants([test_cases])
    # mcresult = wrapper.modelcheck()
    # mc_labels = map(lambda tuple: tuple[1], mcresult)

    plt.plot(testx, [i[0] for i in test_labels], label='predict')
    # plt.plot(map(lambda const: const.get_value(), test_cases), mc_labels, label='mc')
    plt.show()
示例#5
0
 def setUp(self):
     ModelTestBase.setUp(self)
     self._parsed_ltl = self._ltl_parser.parse_line(self._get_ltl())
     self._duration = str2num(self._ltl_parser.parse_duration(self._get_ltl()))
     if self._duration is None:
         self._duration = self._get_duration()
     self._checker = Checker(self._model, self._parsed_ltl, self._duration, self._get_sample_size())
示例#6
0
def do_expr_parsed():
    model = ModelFactory.get_parsed()
    checker = Checker(model=model, ltl=ltl, duration=TIME_LIMIT_IN_DAYS*2)
    # wrapper = ExperimentWrapper(checker, samples_per_param=SAMPLES_PER_PARAM)
    wrapper = ExperimentWrapper(checker)
    wrapper.setconstants([thickness_cnsts,])
    logger.info("Parsed model expr began.")
    result = wrapper.do_expe()
    logger.info("Parsed model expr end.")
    return result
示例#7
0
    def setUp(self):
        self._checker = Checker(None)
        self.parsed_ltl1 = ["U[1, 2]", "true", "ap1"]
        self.parsed_ltl2 = ["U[2, 2]", "true", "ap1"]
        self.parsed_ltl3 = ["U[3, 3]", "true", "ap1"]
        self.parsed_ltl4 = ["U[0.001, 0.001]", "true", "ap1"]
        self.parsed_ltl5 = ["U[0.002, 0.002]", "true", "ap1"]
        self.parsed_ltl6 = ["U[0.003, 0.003]", "true", "ap1"]

        apsets = [{"a"}, {"ap1"}]
        next_moves = [
            NextMove(0, 1, None, 1.0, 1.0),
            NextMove(1, 1, None, 1.0, 1.0)
        ]
        next_moves1 = [
            NextMove(0.0, 0.001, None, 1.0, 1.0),
            NextMove(0.001, 0.001, None, 1.0, 1.0)
        ]
        self.path = [Step(apset, move) for apset, move in zip(apsets, next_moves)]
        self.path1 = [Step(_apset, _move) for _apset, _move in zip(apsets, next_moves1)]
示例#8
0
 def stdmode(self):
     IO.writestr('Judger Identifier: {id}'.format(
         id=Config.getValue('configs/global.json', 'identifier')))
     IO.writestr('Simulation Tool: {sim}'.format(sim=self.task['tool']))
     IO.writestr(' - Standard Mode - ')
     runner = self.runner_type(self.task['src'], self.path)
     r = runner.compile()
     if not r:
         IO.writestr('Compile Error')
         return
     for test in self.testcaseSet:
         if not test.display:
             IO.writestr(
                 '# Test Case #<{name}>: Omitted\nComment: Standard Answer not ready.'
                 .format(name=test.name))
             continue
         outstr = 'Test Case #<{name}>: '.format(name=test.name)
         r = runner.run(test, 'out.txt')
         if not r:
             outstr += "{res} \nComment: {comment}".format(
                 res='Runtime Error', comment='Runtime Error')
             IO.writestr(outstr)
             break
         Checker.timetrim(test.path + '/' + test.display,
                          self.path + '/out/std_t.txt')
         Checker.timetrim(self.path + '/out/out.txt',
                          self.path + '/out/out_t.txt')
         # res = Checker.check(test.path + '/' + test.display, self.path + '/out/out.txt')
         res = Checker.check(self.path + '/out/std_t.txt',
                             self.path + '/out/out_t.txt')
         outstr = 'Test Case #<{name}>: '.format(name=test.name)
         if res == None:
             outstr += 'Checker Error'
         else:
             outstr += "{res} \nComment: {comment}".format(res=res[0],
                                                           comment=res[1])
         IO.writestr(outstr)
示例#9
0
 def patmode(self):
     IO.writestr('Judger Identifier: {id}'.format(
         id=Config.getValue('configs/global.json', 'identifier')))
     IO.writestr('Simulation Tool: {sim}'.format(sim=self.task['tool']))
     IO.writestr(' - Pat Mode - ')
     runner1 = self.runner_type(self.task['src1'], self.path)
     r = runner1.compile()
     if not r:
         IO.writestr('src1 Compile Error')
         return
     runner2 = self.runner_type(self.task['src2'], self.path)
     r = runner2.compile()
     if not r:
         IO.writestr('src2 Compile Error')
         return
     for test in self.testcaseSet:
         outstr = 'Test Case #<{name}>: '.format(name=test.name)
         r = runner1.run(test, 'out1.txt')
         if not r:
             outstr += "{res} \nComment: {comment}".format(
                 res='Runtime Error', comment='src1 Runtime Error')
         r = runner2.run(test, 'out2.txt')
         if not r:
             outstr += "{res} \nComment: {comment}".format(
                 res='Runtime Error', comment='src2 Runtime Error')
         Checker.timetrim(self.path + '/out/out1.txt',
                          self.path + '/out/out1_t.txt')
         Checker.timetrim(self.path + '/out/out2.txt',
                          self.path + '/out/out2_t.txt')
         res = Checker.check(self.path + '/out/out1_t.txt',
                             self.path + '/out/out2_t.txt')
         if res == None:
             outstr += 'Checker Error'
         else:
             outstr += "{res} \nComment: {comment}".format(res=res[0],
                                                           comment=res[1])
         IO.writestr(outstr)
示例#10
0
    def setUp(self):
        self.model = self.modelConstructor._parse(self.model_path)
        self.parsed_ltl = self.ltl_parser.parse_line(self.ltl)

        if self.prism_data_path:
            self.prism_data_rows = parse_csv_rows(self.prism_data_path)
        if self.prism_data_rows:
            self.prism_data_map = {}
            for row in self.prism_data_rows:
                attr = row[:-1]
                label = row[-1]
                self.prism_data_map[tuple(attr)] = label
        checker = Checker(self.model, self.parsed_ltl, duration=self.duration)
        self.experiment_wrapper = ExperimentWrapper(checker, self.params)
        self.logger = logging.getLogger(__name__)
        self.logger.addHandler(logging.StreamHandler(sys.stdout))
        self.logger.setLevel(logging.DEBUG)
示例#11
0
class DTMCSyncUnittest(unittest.TestCase):

    def setUp(self):
        filepath = get_prism_model_dir() + get_sep() + "smalltest.prism"
        self.model_constructor = ModelConstructor()
        self.model = self.model_constructor._parse(filepath)
        self.duration = 180
        self.model.duration = self.duration
        ltl = "true U<={} failure".format(self.duration)
        parsed = LTLParser().build_parser().parse_line(ltl)
        self.checker = Checker(model=self.model, ltl=parsed)

    def test_parsing(self):
        self.assertEqual(self.model.model_type, ModelType.DTMC)

    def test_gen_path(self):
        # 测试生成路径的正确性
        # 生成的路径要么总长为duration,要么出现failure
        for _ in range(5000):
            path = self.model.get_random_path_V2()
            for step in path:
                logger.info(step)
            logger.info("----------------")
            if {"failure"} in [step.ap_set for step in path]:
                continue
            passed_time = path[-1].next_move.passed_time + \
                path[-1].next_move.holding_time
            if int(passed_time) < self.duration:
                for step in path:
                    logger.error(step)
                logger.error("-------------")

    def test_checking(self):
        thickness_vals = range(1, 10, 1)
        constant_objs = [Constant("SCREEN_THICKNESS", v)
                         for v in thickness_vals]
        for obj in constant_objs:
            self.model_constructor._parser.vcf_map[obj.get_name()].set_value(
                obj.get_value())
            self.model.commPrepared = False
            logger.info(
                "param={}, checker's result={}".format(
                    obj.get_value(),
                    self.checker.run_checker()))
示例#12
0
def dpm_regress():
    '''
    证明回归分析的结果和prism的误差和SMC和prism的误差相差不大,
    即证明回归分析可以代替SMC
    :return: null
    '''
    from PathHelper import get_prism_model_dir
    from checker.Checker import Checker
    from compiler.LTLParser import LTLParser
    from util.util import interval
    from experiment.ExperimentWrapper import ExperimentWrapper

    base_dir = get_prism_model_dir()
    model = ModelConstructor(base_dir).parse("smalltest")
    ltl = "true U<=180 failure"
    ltl_parser = LTLParser().build_parser()
    parsed_ltl = ltl_parser.parse_line(ltl)
    checker = Checker(model, parsed_ltl, duration=180)

    expe_executor = ExperimentWrapper(checker, samples_per_param=600)
    train_xs = interval(1, 10, 0.3)
示例#13
0
def get_checker(model):
    # built = ModelFactory.get_built()
    ltl = ["U[1, {}]".format(int(DURATION)), "T", "failure"]
    checker = Checker(model=model, ltl=ltl, duration=DURATION, c=c, d=d)
    return checker
示例#14
0
class TestSyncCommands(unittest.TestCase):
    def setUp(self):
        self.manager = Manager()
        filepath = get_prism_model_dir() + get_sep() + "DPM.prism"
        self.manager.read_model_file(filepath)
        self.model = self.manager.model
        ltl = "true U<=10 failure"
        parsed_ltl = LTLParser().build_parser().parse_line(ltl)
        self.manager.set_manager_param_simple("duration", 10.0)
        self.checker = Checker(model=self.model, ltl=parsed_ltl, duration=10.0)
        self.manager.set_ltl(parsed_ltl)
        self.prism_x, self.prism_y = parse_csv_cols(get_prism_model_dir() +
                                                    get_sep() +
                                                    "Q_TRIGGER_1_20_1.csv")

    def test_parsing(self):
        # 测试解析成功
        self.assertEqual(self.model.model_type, ModelType.CTMC)
        logger.info("parsed commands")
        for module in self.model.modules.values():
            for comms in module.commands.values():
                if isinstance(comms, list):
                    for comm in comms:
                        logger.info("comm {} from module {}".format(
                            comm.name, module.name))
                else:
                    logger.info("comm {} from module {}".format(
                        comm.name, module.name))

    def test_gen_path(self):
        # 测试生成路径的正确性
        # 生成的路径要么总长为duration,要么出现failure

        failure_cnt = 0

        for _ in range(5000):
            self.model.duration = 10
            path = self.model.get_random_path_V2()
            for step in path:
                logger.info(step)
            logger.info("----------------")
            passed_time = path[-1].next_move.passed_time + path[
                -1].next_move.holding_time
            if set(["failure"]) in [step.ap_set for step in path]:
                failure_cnt += 1
                continue
            if int(passed_time) < 10:
                failure = True
                for step in path:
                    logger.error(step)
                logger.error("-------------")
        print "failure_cnt={}".format(failure_cnt)

    def test_checking(self):
        # 测试模型检测的成功,从而检测模型解析和SMC算法的正确性
        logger.info("checker'result is {}".format(self.checker.run_checker()))

    def test_regression(self):
        constants = [("q_trigger", [v for v in range(1, 20, 2)])]
        self.manager.set_train_constants(*constants)
        self.manager.train_network()
        self.manager.set_test_xs([test_x for test_x in product(self.prism_x)])
        self.manager.run_test(prism_data_path=get_prism_model_dir() +
                              get_sep() + "Q_TRIGGER_1_20_1.csv")
示例#15
0
def get_checker(model, _ltl, duration):
    return Checker(model, _ltl, duration=duration)
示例#16
0
 def setUp(self):
     ModelTestBase.setUp(self)
     self.duration = 20
     self.ltl = "true U<={} result_4".format(self.duration)
     self.ltl = self._ltl_parser.parse_line(self.ltl)
     self.checker = Checker(model=self._model, ltl=self.ltl)