예제 #1
0
    def __init__(self, config):
        self.config = config
        # # model
        self.model_dir = config.get('model', 'model_dir')
        self.model_prefix = config.get('model', 'model_prefix')
        self.model_epoch = config.getint('model', 'model_epoch')
        self.result_dir = config.get('model', 'result_dir')
        #if not os.path.isdir(self.result_dir):
        #   os.mkdir(self.result_dir)
        #if not os.path.isdir(os.path.join(self.result_dir, 'visualization')):
        #   os.mkdir(os.path.join(self.result_dir, 'visualization'))
        if not os.path.isdir(os.path.join(self.result_dir, 'score')):
            os.mkdir(os.path.join(self.result_dir, 'score'))

        # data
        self.image_list = config.get('data', 'image_list')
        self.test_img_dir = config.get('data', 'test_img_dir')
        self.result_shape = [
            int(f) for f in config.get('data', 'result_shape').split(',')
        ]
        self.test_shape = [
            int(f) for f in config.get('data', 'test_shape').split(',')
        ]
        # initialize tester
        self.tester = Tester(self.config, self.test_shape, self.result_shape)
예제 #2
0
def main():

    # Train or Test
    if CONFIG.phase.lower() == "train":
        raise RuntimeError("Training Code Will be Avaliable after Paper is Accepted. \
            If You Don't Want to Use FP16, You Can Just Try the Training Code of GCA-Matting.")

    elif CONFIG.phase.lower() == "test":
        CONFIG.log.logging_path += "_test"
        if CONFIG.test.alpha_path is not None:
            utils.make_dir(CONFIG.test.alpha_path)
        utils.make_dir(CONFIG.log.logging_path)

        # Create a logger
        logger = utils.get_logger(CONFIG.log.logging_path,
                                  logging_level=CONFIG.log.logging_level)

        test_image_file = ImageFileTest(alpha_dir=CONFIG.test.alpha,
                                        merged_dir=CONFIG.test.merged,
                                        trimap_dir=CONFIG.test.trimap)
        test_dataset = DataGenerator(test_image_file, phase='test', test_scale=CONFIG.test.scale)
        test_dataloader = DataLoader(test_dataset,
                                     batch_size=CONFIG.test.batch_size,
                                     shuffle=False,
                                     num_workers=CONFIG.data.workers,
                                     drop_last=False)

        tester = Tester(test_dataloader=test_dataloader)
        tester.test()

    else:
        raise NotImplementedError("Unknown Phase: {}".format(CONFIG.phase))
예제 #3
0
 def __init__(self, vc, opts):
     self.vc = vc
     ret,im = vc.read()
     self.numGestures = opts.num
     self.imHeight,self.imWidth,self.channels = im.shape
     self.trainer = Trainer(numGestures=opts.num, numFramesPerGesture=opts.frames, minDescriptorsPerFrame=opts.desc, numWords=opts.words, descType=opts.type, kernel=opts.kernel, numIter=opts.iter, parent=self)
     self.tester = Tester(numGestures=opts.num, minDescriptorsPerFrame=opts.desc, numWords=opts.words, descType=opts.type, numPredictions=7, parent=self)
예제 #4
0
    def __init__(self, debug=True, path_filename="./Dumps/GA.txt"):
        # genetic algorithm settings
        self.__debug = debug  #
        self.__path_filename = "./Dumps/GA.txt"  # by default the output of the GA is put in the Dumps directory in GA.txt

        self.__number_of_generations = 0  # number of generations that the genetic algorithm will run through
        self.__number_of_individual_genes = 0  # number of genes that belong to an individual
        self.__number_of_individuals = 0  # number of individuals in each generations

        # information for when the genetic algorithm runs
        self.__current_generation_num = 0  # a counter which tracks the current generation that the GA is running
        self.__old_populations = dict(
        )  # this is a dict holding a key-value pair (population index (int) : list(Neural Network Weights))
        self.__population = list(
        )  # this is a list holding the GA's current population
        self.__scores = list(
        )  # this is a list of scores of the population, the index of the score should correspond to the index
        # of the scored individual in the population list

        # mutation settings
        self.__mutation_rate_single_point = 50  # the rate in which single point crossover happens in the GA, default is 50%
        self.__mutation_rate_two_point = 75  # the rate in which two point crossover happens in the GA, default is 75%
        self.__mutation_rate = 5  # the rate in which a fixed point mutation occurs in the GA, default is 5%

        # test
        self.__tester = Tester()
예제 #5
0
 def set_super_training_parameters(self,
                                   prefix,
                                   train_root,
                                   test_root,
                                   sample_file_dir,
                                   batch_size,
                                   input_w,
                                   input_h,
                                   test_unseen_root=None,
                                   sample_file_dir_unseen=None,
                                   start_epoch=1,
                                   train_epochs=200,
                                   enable_stop_machanism=True):
     self.prefix = prefix
     self.train_root = train_root
     self.test_root = test_root
     self.sample_file_dir = sample_file_dir
     self.batch_size = batch_size
     self.test_unseen_root = test_unseen_root
     self.sample_file_dir_unseen = sample_file_dir_unseen
     self.start_epoch = start_epoch
     self.train_epochs = train_epochs
     self.enable_stop_machanism = enable_stop_machanism
     self.w = input_w
     self.h = input_h
     self.tester = Tester(model_path=None,
                          model=self.model,
                          sample_file_dir=self.sample_file_dir,
                          test_dir=self.test_root,
                          prefix=self.prefix,
                          input_w=self.w,
                          input_h=self.h)
예제 #6
0
 def schedule_tester(self, cycle=TESTER_CYCLE):
     # 定时测试代理
     tester = Tester()
     while True:
         print('测试模块开始运行')
         tester.run()
         time.sleep(cycle)
예제 #7
0
 def __init__(self):
     self.col_names = ["Amplitude", "Loudness", "RMS", "Pitch", "Label"]
     self.features = ["Amplitude", "Loudness", "RMS", "Pitch"]
     self.model_base = "/Users/shivam-dhar/Downloads/CSE-535-EmergencyAlarm-master/backend/models/"
     self.pickle_file = "../models/model_tree.obj"
     self.test_base = "/Users/shivam-dhar/Downloads/CSE-535-EmergencyAlarm-master/backend/test/"
     self.tester = Tester()
 def run(self):
     tester = Tester()
     predictions, classnames = tester.test(folder_path, cnnModelClass,
                                           cnnModel)
     print(predictions)
     print(classnames)
     self.signals.result.emit({'p': predictions, 'c': classnames})
예제 #9
0
 def schedule_tester(self, cycle=TESTER_CYCLE):
     """定时检测代理"""
     tester = Tester()
     while True:
         print('测试器开始运行')
         tester.run()
         time.sleep(cycle)
예제 #10
0
def cmd_test(args, auth, cookies):
	if args.commonwords is not None:
		words = list()
		for word in args.commonwords:
			word = word.rstrip("\n")
			if word:
				words.append(word)
		args.commonwords.close()
	else:
		words = []

	sensitive_data, vectors = [], []
	for sensitive in args.sensitive:
		sensitive = sensitive.rstrip("\n")
		if sensitive:
			sensitive_data.append(sensitive)
	args.sensitive.close()

	for v in args.vectors:
		v = v.rstrip("\n")
		if v:
			vectors.append(v)
	args.vectors.close()

	crawler = Site(words, args.blacklist)
	crawler.crawl(args.url, auth, cookies)

	fuzzer = Tester(crawler, sensitive_data, vectors, args.slow, args.random)
	if args.random:
		fuzzer.run_random()
	else:
		fuzzer.run()
예제 #11
0
def main(config):
    cudnn.enabled = True
    cudnn.benchmark = True
    cudnn.deterministic = False
    torch.cuda.manual_seed(2020)

    if config.train:
        # Create directories if not exist
        make_folder(config.model_save_path, config.arch)
        make_folder(config.sample_path, config.arch) # test results sample
        make_folder(config.test_pred_label_path, config.arch) # test pred results
        make_folder(config.test_color_label_path, config.arch) # colorful test pred results

        # Transform for Data Augment
        transform = Compose([RandomHorizontallyFlip(p=.5), RandomSized(size=config.imsize), \
            AdjustBrightness(bf=0.1), AdjustContrast(cf=0.1), AdjustHue(hue=0.1), \
            AdjustSaturation(saturation=0.1)])
        
        data_loader = CustomDataLoader(config.img_path, config.label_path, config.imsize,
                                       config.batch_size, num_workers=config.num_workers, 
                                       transform=transform, mode=config.train)
        val_loader = CustomDataLoader(config.val_img_path, config.val_label_path, config.imsize,
                                      config.batch_size, num_workers=config.num_workers, 
                                      transform=None, mode=bool(1 - config.train))
        trainer = Trainer(data_loader.loader(), config, val_loader.loader())
        trainer.train()
    else:
        data_loader = CustomDataLoader(config.test_image_path, config.test_label_path, config.imsize,
                                       config.batch_size, num_workers=config.num_workers, mode=config.train)
        tester = Tester(data_loader.loader(), config)
        tester.test()
예제 #12
0
def main(alg_name):
    args = built_parser(alg_name)
    logger.info('begin training agents with parameter {}'.format(str(args)))
    if args.mode == 'training':
        ray.init(object_store_memory=5120*1024*1024)
        os.makedirs(args.result_dir)
        with open(args.result_dir + '/config.json', 'w', encoding='utf-8') as f:
            json.dump(vars(args), f, ensure_ascii=False, indent=4)
        trainer = Trainer(policy_cls=NAME2POLICYCLS[args.policy_type],
                          worker_cls=NAME2WORKERCLS[args.worker_type],
                          learner_cls=NAME2LEARNERCLS[args.alg_name],
                          buffer_cls=NAME2BUFFERCLS[args.buffer_type],
                          optimizer_cls=NAME2OPTIMIZERCLS[args.optimizer_type],
                          evaluator_cls=NAME2EVALUATORCLS[args.evaluator_type],
                          args=args)
        if args.model_load_dir is not None:
            logger.info('loading model')
            trainer.load_weights(args.model_load_dir, args.model_load_ite)
        if args.ppc_load_dir is not None:
            logger.info('loading ppc parameter')
            trainer.load_ppc_params(args.ppc_load_dir)
        trainer.train()

    elif args.mode == 'testing':
        os.makedirs(args.test_log_dir)
        with open(args.test_log_dir + '/test_config.json', 'w', encoding='utf-8') as f:
            json.dump(vars(args), f, ensure_ascii=False, indent=4)
        tester = Tester(policy_cls=NAME2POLICYCLS[args.policy_type],
                        evaluator_cls=NAME2EVALUATORCLS[args.evaluator_type],
                        args=args)
        tester.test()
예제 #13
0
파일: arena.py 프로젝트: dnet/wsse-arena
def main(args):
    with file('arena.json', 'rb') as json_f:
        config = json.load(json_f)
    svc_list = config['services']
    cns_list = config['consumers']
    sts_list = config['suites']
    if len(args) < 2:
        exit_errmsg(USAGE.format(progname=args[0],
			services=usage_format(svc_list),
            consumers=usage_format(cns_list),
            suites=usage_format(sts_list)))
    else:
        tst = Tester()
        if args[1] == 'test':
            svc_name, cns_name = args[2:4]
            tst.test_pair(resolve(svc_list, svc_name, 'service'),
                    resolve(cns_list, cns_name, 'consumer'))
        elif args[1] == 'clean':
            tst.clean(config[args[2] + 's'][args[3]])
        elif args[1] == 'measure':
            if len(args) < 3:
                suites = sts_list.itervalues()
            else:
                suites = [resolve(sts_list, args[2], 'suite')]
            if len(args) < 4:
                repeats = config['measurement']['repeats']
            else:
                repeats = [int(args[3])]
            if len(args) < 5:
                runs = config['measurement']['runs']
            else:
                runs = int(args[4])
            measure(suites, repeats, runs, svc_list, cns_list)
        else:
            exit_errmsg('Invalid command: "{0}"'.format(args[1]))
예제 #14
0
def main(config):
    prepare_dirs_and_logger(config)
    save_config(config)

    if config.is_train:
        from trainer import Trainer
        if config.dataset == 'line':
            from data_line import BatchManager
        elif config.dataset == 'ch':
            from data_ch import BatchManager
        elif config.dataset == 'kanji':
            from data_kanji import BatchManager
        elif config.dataset == 'baseball' or\
             config.dataset == 'cat':
            from data_qdraw import BatchManager

        batch_manager = BatchManager(config)
        trainer = Trainer(config, batch_manager)
        trainer.train()
    else:
        from tester import Tester
        if config.dataset == 'line':
            from data_line import BatchManager
        elif config.dataset == 'ch':
            from data_ch import BatchManager
        elif config.dataset == 'kanji':
            from data_kanji import BatchManager
        elif config.dataset == 'baseball' or\
             config.dataset == 'cat':
            from data_qdraw import BatchManager

        batch_manager = BatchManager(config)
        tester = Tester(config, batch_manager)
        tester.test()
예제 #15
0
class Scheduler(object):
    def __init__(self):
        self.getter = Getter()
        self.tester = Tester()

    # 定时获取代理
    def scheduler_getter(self, cycle=GETTER_CYCLE):
        while True:
            self.getter.run()
            time.sleep(cycle)

    # 定时测试代理
    def schedule_tester(self, cycle=TESTER_CYCLE):
        while True:
            self.tester.run()
            time.sleep(cycle)

    def run(self):
        print('代理池开始运行!')
        if GETTER_ENABLED:
            getter_process = Process(target = self.scheduler_getter)
            getter_process.start()
        
        if TESTER_ENABLED:
            tester_process = Process(target = self.schedule_tester)
            tester_process.start()
예제 #16
0
        def __init__(self):
            Tester.__init__(self, "Navigation")

            # tests to run:
            #   square with Motion module, minimal.launch
            #   square with Motion module, navigation launch
            # expect all to turn out the same, but need to sanity check
            #self.motion = Motion()

            # flag for a jerky stop
            self.jerky = False

            # I'm a bit concerned about robot safety if we don't slow things down,
            # but I'm also worried it won't be an accurate test if we change the speed
            self.walking_speed = 1  # if not self.jerky else .5

            # linear test
            self.reached_goal = False

            # square test
            self.reached_corner = [False, False, False, False]
            self.cc_square = [(0, 0), (1, 0), (1, 1), (0, 1)]
            self.c_square = [(0, 0), (1, 0), (1, -1), (0, -1)]
            self.corner_counter = 0

            # set up the logger output file
            self.filename = None

            self.navigation = Navigation(self.jerky)
예제 #17
0
def do_all_tests(theIndexes, searchRatio):
    dataSets = [
        DataSet('DATASETS/DATASET1.TXT'),
        DataSet('DATASETS/DATASET2.TXT'),
        DataSet('DATASETS/DATASET3.TXT')
    ]
    allStats = []
    theTester = Tester()
    theModel = ModelWrapper()

    print('[[[[ STARTING THE MOTHER OF ALL TESTS ]]]]')
    for useCNN in [False, True]:
        print('[[[ ONLY CNN LAYERS ' + str(useCNN).upper() + ' ]]]')
        for curIndex in theIndexes:
            print('[[ TESTING MODEL ' + curIndex[0] + ' WITH TEST SET ' +
                  str(curIndex[1] + 1) + ' ]]')
            theModel.load(curIndex[0])
            theTester.set_params(theModel, dataSets[curIndex[1]])
            curStats = theTester.compute_fullstats(useCNN=useCNN,
                                                   searchRatio=searchRatio)
            allStats.append(curStats)
            print('[[ MODEL TESTED ]]')
            with open('ALLSTATS_PCT' + str(int(searchRatio * 100)) + '.pkl',
                      'wb') as outFile:
                dump(allStats, outFile)
        print('[[[ FINISHED ONLY CNN LAYERS ' + str(useCNN).upper() + ' ]]]')
    print('[[[[ FINISHED THE MOTHER OF ALL TESTS ]]]]')
예제 #18
0
 def run_tester(self, cycle=TESTER_CYCLE):
     """定时检测cookie可用情况"""
     tester = Tester()
     while True:
         print('开始检查')
         tester.run()
         time.sleep(cycle)
예제 #19
0
파일: main.py 프로젝트: byungsook/vectornet
def main(config):
    prepare_dirs_and_logger(config)
    save_config(config)

    if config.is_train:
        from trainer import Trainer
        if config.dataset == 'line':
            from data_line import BatchManager
        elif config.dataset == 'ch':
            from data_ch import BatchManager
        elif config.dataset == 'kanji':
            from data_kanji import BatchManager
        elif config.dataset == 'baseball' or\
             config.dataset == 'cat':
            from data_qdraw import BatchManager

        batch_manager = BatchManager(config)
        trainer = Trainer(config, batch_manager)
        trainer.train()
    else:
        from tester import Tester
        if config.dataset == 'line':
            from data_line import BatchManager
        elif config.dataset == 'ch':
            from data_ch import BatchManager
        elif config.dataset == 'kanji':
            from data_kanji import BatchManager
        elif config.dataset == 'baseball' or\
             config.dataset == 'cat':
            from data_qdraw import BatchManager
        
        batch_manager = BatchManager(config)
        tester = Tester(config, batch_manager)
        tester.test()
예제 #20
0
def main(config):
    # For fast training
    cudnn.benchmark = True

    # Data loader
    data_loader = Data_Loader(config.train,
                              config.dataset,
                              config.image_path,
                              config.imsize,
                              config.batch_size,
                              shuf=config.train)

    # Create directories if not exist
    make_folder(config.model_save_path, config.version)
    make_folder(config.sample_path, config.version)
    make_folder(config.log_path, config.version)
    make_folder(config.attn_path, config.version)
    make_folder(config.test_path, config.version)

    if config.train:
        if config.model == 'sagan':
            trainer = Trainer(data_loader.loader(), config)
        elif config.model == 'qgan':
            trainer = qgan_trainer(data_loader.loader(), config)
        trainer.train()
    else:
        tester = Tester(data_loader.loader(), config)
        tester.test()
예제 #21
0
def test():
    generator = Generator()
    tester = Tester(generator,
                    batch_size=32,
                    cuda=False,
                    self_dir='./data/test/',
                    self_test=True)
    tester.test()
예제 #22
0
def do(task):
    tester = Tester()
    try:
        return tester.test(task)
    except:
        traceback.print_exc(file=sys.stderr)
        logging.critical('Failed while cleaning for task %s' % (task['ID']))
        return False
예제 #23
0
    def _load_tester(self):
        print('loading tester...')

        from tester import Tester
        self.tester = Tester(self.args, self.writer, self.data_loader,
                             self.model, self.evaluator)

        print('tester load finished!')
예제 #24
0
def run():
    battery = DiffusionBattery()
    program = TesterProgram()
    tester = Tester(battery, program)

    tester.run_program(config.NUM_TIMESTEPS,
                       do_animate=config.DO_ANIMATE,
                       save=config.SAVE_ANIMATION)
예제 #25
0
 def __init__(self):
     self.client = docker.from_env(timeout=86400)
     self.preparer = Preparer()
     self.trainer = Trainer()
     self.tester = Tester()
     self.interactor = Interactor()
     self.generate_save_tag = lambda tag, save_id: hashlib.sha256(
         (tag + save_id).encode()).hexdigest()
예제 #26
0
 def schedule_tester(self, cycle=TESTER_CYCLE):
     """
     定时测试代理
     """
     tester = Tester()
     while True:
         tester.run()
         time.sleep(cycle)
예제 #27
0
 def schedule_tester(self, cycle=settings.TESTER_CYCLE):
     '''
     定时测试代理
     '''
     tester = Tester()
     while True:
         print('测试开始运行')
         tester.run()
         time.sleep(cycle)
예제 #28
0
 def scheduler_tester(self, cycle=TESTER_CYCLE):
     """
     定时测试代理
     """
     tester = Tester()
     while True:
         print("测试器开始运行")
         tester.run()
         time.sleep(cycle)
 def schedule_tester(self, cycle=TESTER_CYCLE):  # schedule_tester方法用来调度测试模块
     """
     定时测试代理
     """
     tester = Tester()   # 声明一个Tester对象
     while True: # 进入死循环不断调用run方法
         print('测试器开始运行')
         tester.run()    # 只需调用Scheduler的run方法即可启动整个代理池
         time.sleep(cycle)   # 执行完一轮就休眠一段时间,休眠结束后重新再执行
예제 #30
0
 def schedule_tester(self, cycle=TESTER_CYCLE):
     """
     定时测试代理
     """
     tester = Tester()
     while True:
         print('测试器开始运行')
         tester.run()
         time.sleep(10)#如果返回None,等待一段时间
예제 #31
0
 def scheduler_tester(self):
     """
     每隔1小时检测一下cookies是否有效
     :return:
     """
     while True:
         tester = Tester()
         tester.run()
         time.sleep(self.getter_cycle)
예제 #32
0
 def scheduler_tester(self):
     """
     定时测试代理
     :return: None
     """
     tester = Tester()
     while True:
         print('测试器开始运行...')
         tester.run()
         time.sleep(settings.tester_interval)
예제 #33
0
 def tester_scheduler(self, cycle=TESTER_CYCLE):
     """
     定时测试代理
     """
     print('测试器开始执行!')
     tester = Tester()
     while True:
         tester.run()
         print('休息', TESTER_CYCLE, '秒')
         time.sleep(TESTER_CYCLE)
예제 #34
0
def test_two_A_responses():
    qname = "dualstack.mc-12555-1019789594.us-east-1.elb.amazonaws.com."
    T = Tester()
    T.newtest(testname="py.test")
    response = dbdns.query(T, qname, dns.rdatatype.A)
    count = 0
    for rrset in response.answer:
        for rr in rrset:
            if rr.rdtype == dns.rdatatype.A:
                print("IP address for {} is {}".format(qname, rr.address))
                count += 1
    assert count >= 2
예제 #35
0
def test_a_read():
    qname = "google-public-dns-a.google.com."
    T = Tester()
    T.newtest(testname="py.test")
    response = dbdns.query(T, qname, dns.rdatatype.A)
    count = 0
    for rrset in response.answer:
        for rr in rrset:
            if rr.rdtype == dns.rdatatype.A:
                print("IP addr for {} is {}".format(qname, rr.address))
                assert rr.address == "8.8.8.8"
                count += 1
    assert count > 0
예제 #36
0
def test_read_tlsa():
    """Verify that a TLSA record can be read"""
    qname = "_443._tcp.good.dane.verisignlabs.com"
    T = Tester()
    T.newtest(testname="py.test")
    response = dbdns.query(T, qname, dns.rdatatype.TLSA)
    count = 0
    for rrset in response.answer:
        for rr in rrset:
            if rr.rdtype == dns.rdatatype.TLSA:
                print("{}: {} {} {} {}".format(qname, rr.usage, rr.selector, rr.mtype, hexdump(rr.cert)))
                count += 1
    assert count > 0
예제 #37
0
def test_dnssec_response_notpresent():
    qname = "www.google.com"
    T = Tester()
    T.newtest(testname="py.test")
    response = dbdns.query(T, qname, dns.rdatatype.A)
    count = 0
    for rrset in response.answer:
        for rr in rrset:
            if rr.rdtype == dns.rdatatype.A:
                dnssec = response.flags & dns.flags.AD
                print("IP address for {} is {} DNSSEC: {}".format(qname, rr.address, dnssec))
                if dnssec:
                    count += 1
    assert count == 0
예제 #38
0
def test_cname_read():
    # This test makes use of the fact that a.nitroba.org is set as a cname to b.nitroba.org
    qname = "a.nitroba.org"
    T = Tester()
    T.newtest(testname="py.test")
    response = dbdns.query(T, qname, dns.rdatatype.CNAME)
    count = 0
    for rset in response.answer:
        for rr in rset:
            if rr.rdtype == dns.rdatatype.CNAME:
                print("cname for a.nitroba.org is {}".format(rr.target))
                assert str(rr.target) == "b.nitroba.org."
                count += 1
    assert count > 0  # no response?
예제 #39
0
def train_and_test(image_loader, feature_extractor):
    """
    Simple implementation of train and test function
    :param image_loader:
    :param feature_extractor:
    """
    first_class_train_data, first_class_test_data = get_train_and_test_data(params.first_class_params)
    second_class_train_data, second_class_test_data = get_train_and_test_data(params.second_class_params)

    train_data = list(first_class_train_data) + list(second_class_train_data)
    random.shuffle(train_data)
    trainer = Trainer(image_loader, feature_extractor)
    solve_container = trainer.train(train_data, params.svm_params)

    test_data = list(first_class_test_data) + list(second_class_test_data)
    tester = Tester(image_loader, solve_container)
    return tester.test(test_data)
예제 #40
0
파일: meter.py 프로젝트: dnet/wsse-arena
def measure(suites, repeats, runs, svc_list, cns_list):
    filebase = datetime.now().strftime(FILE_FORMAT)
    log = LogFile(filebase)
    csv = path.join(getcwd(), filebase + CSV_SUFFIX)
    with file(csv, 'w') as f:
        f.write('Service;Consumer;Repeats;Suite;Initialization;Invocation')
    for suite, repeat, (svc_name, service), (cns_name, consumer), num in product(
            suites, repeats, svc_list.iteritems(), cns_list.iteritems(), xrange(runs)):
        log.log('{0} using {1} -({2}x)-> {3}, try {4}'.format(
            suite['title'], cns_name, repeat, svc_name, num + 1))
        tst = Tester()
        env = dict(TIMES=str(repeat), CSV_FILE=csv, CSV_PREFIX=';'.join(
            (svc_name, cns_name, str(repeat), suite['title'])))
        for i in suite['env']:
            env[i] = '1'
        tst.extend_env(env)
        tst.test_pair(service, consumer)
예제 #41
0
def run_training(difficulty, algorithm, output_name):
	genotype, haplotype = get_training_file_paths(difficulty)
	if algorithm == 'greedy':
		g = Greedy(genotype)
		t = Tester(g, haplotype, genotype, output_name)
		t.run_analysis()
	elif algorithm == 'optimal':
		o = Optimal(genotype)
		t = Tester(o, haplotype, genotype, output_name)
		t.run_analysis()
	elif algorithm == 'exhaustive':
		e = Exhaustive(genotype)
		t = Tester(e, haplotype, genotype, output_name)
		t.run_analysis()
예제 #42
0
def run_test(difficulty, algorithm, output_name):
	genotype = get_test_data_path(difficulty)
	if algorithm == 'greedy':
		g = Greedy(genotype)
		t = Tester(g, None, genotype, output_name)
		t.run_analysis()
	elif algorithm == 'optimal':
		o = Optimal(genotype)
		t = Tester(o, None, genotype, output_name)
		t.run_analysis()
	elif algorithm == 'exhaustive':
		e = Exhaustive(genotype)
		t = Tester(e, None, genotype, output_name)
		t.run_analysis()
예제 #43
0
	def __init__(self, solution, case):
		self.solution = solution
		self.current_mattes = sum([int(x) for x in solution])
		self.case = case
		self.tester = Tester()
		self.valid_solution = False
		self.steps = 0
		self.METHODS = {
			'random_optimizer': self.random_optimizer,
			'matte_minimizer': self.matte_minimizer
		}
예제 #44
0
def main():
    f = open('try_3.txt','w')
    g = open('accs.txt', 'w')
    g.close()
    task = MarioTask("testbed", initMarioMode = 2)
    task.env.initMarioMode = 2
    task.env.levelDifficulty = 1

    results = [] 
    names = [] 

    
    iterations = 50
    rounds = 15
     
    agent = Supervise(IT,useKMM = False)
    exp = EpisodicExperiment(task, agent) 
    T = Tester(agent,exp)
    sl_data, sup_data, acc = T.test(rounds = rounds, iterations = iterations)

    np.save('./data/sup_data.npy', sup_data)
    np.save('./data/sl_data.npy', sl_data)
    np.save('./data/acc.npy', acc)    
    
    IPython.embed()

    analysis = Analysis()
    analysis.get_perf(sup_data, range(iterations))
    analysis.get_perf(sl_data, range(iterations))
    analysis.plot(names=['Supervisor', 'Supervised Learning'], label='Reward', filename='./results/return_plots.eps')#, ylims=[0, 1600])

    acc_a = Analysis()
    acc_a.get_perf(acc, range(iterations))
    acc_a.plot(names=['Supervised Learning Acc.'], label='Accuracy', filename='./results/acc_plots.eps')

    print "finished"
예제 #45
0
def periodic():
    from tester import Tester

    import argparse

    parser = argparse.ArgumentParser(description="database maintenance")
    parser.add_argument("--debug", action="store_true")
    parser.add_argument("--list", help="List all of the tasks", action="store_true")
    args = parser.parse_args()

    W = Tester()  # get a database connection
    c = W.conn.cursor()

    if args.list:
        c.execute("select workqueueid,testid,created,completed from workqueue")
        for line in c:
            print(line)
        exit(0)

    # Run the queue until there is nothing left to run
    while True:
        c.execute("select workqueueid,testid,task,args from workqueue where isnull(completed)")
        count = 0
        for (workqueueid, testid, task, task_args_str) in c.fetchall():
            count += 1
            T = Tester(testid=testid)
            task_args = json.loads(task_args_str)
            if args.debug or debug:
                print("task_args=", task_args)
                task_args["state"] = "WORKING"
            logging.info("testid={} task={} task_args={}".format(testid, task, task_args))
            if eval(task + "(T,task_args)"):
                c.execute("update workqueue set completed=now() where workqueueid=%s", (workqueueid,))
                W.commit()
        if count == 0:
            break
예제 #46
0
class Recognizer(object):
    def __init__(self, vc, opts):
        self.vc = vc
        ret,im = vc.read()
        self.numGestures = opts.num
        self.imHeight,self.imWidth,self.channels = im.shape
        self.trainer = Trainer(numGestures=opts.num, numFramesPerGesture=opts.frames, minDescriptorsPerFrame=opts.desc, numWords=opts.words, descType=opts.type, kernel=opts.kernel, numIter=opts.iter, parent=self)
        self.tester = Tester(numGestures=opts.num, minDescriptorsPerFrame=opts.desc, numWords=opts.words, descType=opts.type, numPredictions=7, parent=self)

    def train_from_video(self):
        self.trainer.extract_descriptors_from_video()
        variance = self.trainer.kmeans()
        self.trainer.bow()
        score = self.trainer.svm()
        return score

    def train_from_descriptors(self, desList, trainLabels):
        self.trainer.desList = desList
        self.trainer.trainLabels = trainLabels
        #numFramesPerGesture = trainLabels.count(1)
        #self.trainer.desList = desList[:numFramesPerGesture*self.numGestures]
        #self.trainer.trainLabels = trainLabels[:numFramesPerGesture*self.numGestures]

        variance = self.trainer.kmeans()
        self.trainer.bow()
        score = self.trainer.svm()
        return score

    def train_from_images(self, gestureDirList, parentDirPath, trainMask, maskParentDirPath):
        self.trainer.extract_descriptors_from_images(gestureDirList, parentDirPath, trainMask, maskParentDirPath)
        variance = self.trainer.kmeans()
        self.trainer.bow()
        score = self.trainer.svm()
        return score

    def test_on_video(self, clf):
        #print clf.coef_
        self.tester.initialize(clf)
        self.tester.test_on_video()
    
    def test_on_descriptors(self, clf, descList, trueLabels):
        #numFramesPerGesture = trueLabels.count(1)
        #descList = descList[:numFramesPerGesture*self.numGestures]
        #trueLabels = trueLabels[:numFramesPerGesture*self.numGestures]
        self.tester.initialize(clf)
        testLabels = self.tester.test_on_descriptors(descList)
        matchList = [i for i, j in zip(trueLabels, testLabels) if i == j]
        score = float(len(matchList))/len(trueLabels)
        return score
예제 #47
0
 def __init__(self):
     Tester.__init__(self)
예제 #48
0
#Si se le indico uno diferente, se cambia
if args.groupdir:
    groupDirectory = args.groupdir

#Si no se indica el directorio del grupo, tampoco se puede hacer nada con este programa
else:
	print("Se requiere un directorio del grupo")
	exit()

#Si se le indicaron librerias, se crea una lista separandolas con las comas como referencia
#if args.libraries:
#    libraries = args.libraries.split(',')

#Se construye el objeto Tester
#tester = Tester(args.tests,testDir = testDirectory,libraries = libraries)
tester = Tester(args.tests,testDir = testDirectory)

#Si existe la bandera -f, se activa la muestra de diferencias
if args.diff:
    tester.toggleDiff()

#Por cada directorio en el directorio del grupo
for folder in os.listdir(groupDirectory):
    #Se construye el path del directorio del equipo por calificar
    path = groupDirectory+folder+'/'
    #Se verifica que dicho path sea realmente un directorio
    if os.path.isdir(path):
        #Separador e indicación de a quien se está evaluando
        print('|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||')
        print("Evaluando a: ",folder)
        #Al objeto tester se le indica que el codigo fuente que tiene que evaluar esta en el path del directorio del equipo
예제 #49
0
def main():
    f = open('try_3.txt','w')
    g = open('accs.txt', 'w')
    g.close()
    task = MarioTask("testbed", initMarioMode = 2)
    task.env.initMarioMode = 2
    task.env.levelDifficulty = 1

    results = [] 
    names = [] 
    
    with open('type.txt', 'w') as f:
        f.write('dt')
    
    iterations = 20
    rounds = 30
    learning_samples = 33
    eval_samples = 10

    # iterations = 5
    # rounds = 2
    # learning_samples = 3
    # eval_samples = 2

    agent = Dagger(IT,useKMM = False)

    if args['linear']:
        agent.learner.linear = True
        prefix = 'svc-dagger-change-'
    else:
        agent.learner.linear = False
        prefix = 'dt-dagger-change-'

    exp = EpisodicExperiment(task, agent) 
    T = Tester(agent,exp)
    dagger_data, _, acc, loss, js, test_acc = T.test(rounds = rounds, iterations = iterations,
         learning_samples = learning_samples, eval_samples = eval_samples, prefix = prefix)

    np.save('./data/' + prefix + 'dagger_data.npy', dagger_data)
    np.save('./data/' + prefix + 'acc.npy', acc)    
    np.save('./data/' + prefix + 'loss.npy', loss)
    np.save('./data/' + prefix + 'js.npy', js)
    np.save('./data/' + prefix + 'test_acc.npy', test_acc)
    
    analysis = Analysis()
    analysis.get_perf(dagger_data, range(iterations))
    analysis.plot(names=['DAgger'], label='Reward', filename='./results/' + prefix + 'return_plots.eps')

    acc_a = Analysis()
    acc_a.get_perf(acc, range(iterations))
    acc_a.plot(names=['DAgger Acc.'], label='Accuracy', filename='./results/' + prefix + 'acc_plots.eps', ylims=[0,1])

    test_acc_a = Analysis()
    test_acc_a.get_perf(test_acc, range(iterations))
    test_acc_a.plot(names=['DAgger Acc.'], label='Test Accuracy', filename='./results/' + prefix + 'test_acc_plots.eps', ylims=[0, 1])

    loss_a = Analysis()
    loss_a.get_perf(loss, range(iterations))
    loss_a.plot(names=['DAgger Loss'], label='Loss', filename='./results/' + prefix + 'loss_plots.eps', ylims=[0, 1])

    js_a = Analysis()
    js_a.get_perf(js, range(iterations))
    js_a.plot(names=['DAgger'], label='J()', filename='./results/' + prefix + '-js_plots.eps')

    
    print "finished"
예제 #50
0
def bench(args):
    config_dir = '{0}/{1}'.format(args.dir, args.bench_name)
    brname = args.bench_name + '-br'

    ip = IPRoute()
    ctn_intfs = flatten((l.get_attr('IFLA_IFNAME') for l in ip.get_links() if l.get_attr('IFLA_MASTER') == br) for br in ip.link_lookup(ifname=brname))

    if not args.repeat:
        # currently ctn name is same as ctn intf
        # TODO support proper mapping between ctn name and intf name
        for ctn in ctn_intfs:
            dckr.remove_container(ctn, force=True) if ctn_exists(ctn) else None

        if os.path.exists(config_dir):
            shutil.rmtree(config_dir)
    else:
        for ctn in ctn_intfs:
            if ctn != 'tester':
                dckr.remove_container(ctn, force=True) if ctn_exists(ctn) else None

    if args.file:
        with open(args.file) as f:
            conf = yaml.load(f)
    else:
        conf = gen_conf(args)
        if not os.path.exists(config_dir):
            os.makedirs(config_dir)
        with open('{0}/scenario.yaml'.format(config_dir), 'w') as f:
            f.write(yaml.dump(conf))

    if len(conf['tester']) > gc_thresh3():
        print 'gc_thresh3({0}) is lower than the number of peer({1})'.format(gc_thresh3(), len(conf['tester']))
        print 'type next to increase the value'
        print '$ echo 16384 | sudo tee /proc/sys/net/ipv4/neigh/default/gc_thresh3'

    if args.target == 'gobgp':
        target = GoBGP
    elif args.target == 'bird':
        target = BIRD
    elif args.target == 'quagga':
        target = Quagga

    is_remote = True if 'remote' in conf['target'] and conf['target']['remote'] else False

    if is_remote:
        r = ip.get_routes(dst=conf['target']['local-address'].split('/')[0], family=AF_INET)
        if len(r) == 0:
            print 'no route to remote target {0}'.format(conf['target']['local-address'])
            sys.exit(1)

        idx = [t[1] for t in r[0]['attrs'] if t[0] == 'RTA_OIF'][0]
        intf = ip.get_links(idx)[0]

        if intf.get_attr('IFLA_MASTER') not in ip.link_lookup(ifname=brname):
            br = ip.link_lookup(ifname=brname)
            if len(br) == 0:
                ip.link_create(ifname=brname, kind='bridge')
                br = ip.link_lookup(ifname=brname)
            br = br[0]
            ip.link('set', index=idx, master=br)
    else:
        print 'run', args.target
        if args.image:
            target = target(args.target, '{0}/{1}'.format(config_dir, args.target), image=args.image)
        else:
            target = target(args.target, '{0}/{1}'.format(config_dir, args.target))
        target.run(conf, brname)

    print 'run monitor'
    m = Monitor('monitor', config_dir+'/monitor')
    m.run(conf, brname)

    time.sleep(1)

    print 'waiting bgp connection between {0} and monitor'.format(args.target)
    m.wait_established(conf['target']['local-address'].split('/')[0])

    if not args.repeat:
        print 'run tester'
        t = Tester('tester', config_dir+'/tester')
        t.run(conf, brname)

    start = datetime.datetime.now()

    q = Queue()

    m.stats(q)
    if not is_remote:
        target.stats(q)

    def mem_human(v):
        if v > 1000 * 1000 * 1000:
            return '{0:.2f}GB'.format(float(v) / (1000 * 1000 * 1000))
        elif v > 1000 * 1000:
            return '{0:.2f}MB'.format(float(v) / (1000 * 1000))
        elif v > 1000:
            return '{0:.2f}KB'.format(float(v) / 1000)
        else:
            return '{0:.2f}B'.format(float(v))

    f = open(args.output, 'w') if args.output else None
    cpu = 0
    mem = 0
    cooling = -1
    while True:
        info = q.get()

        if not is_remote and info['who'] == target.name:
            cpu = info['cpu']
            mem = info['mem']

        if info['who'] == m.name:
            now = datetime.datetime.now()
            elapsed = now - start
            recved = info['info']['accepted'] if 'accepted' in info['info'] else 0
            if elapsed.seconds > 0:
                rm_line()
            print 'elapsed: {0}sec, cpu: {1:>4.2f}%, mem: {2}, recved: {3}'.format(elapsed.seconds, cpu, mem_human(mem), recved)
            f.write('{0}, {1}, {2}, {3}\n'.format(elapsed.seconds, cpu, mem, recved)) if f else None
            f.flush() if f else None

            if cooling == args.cooling:
                f.close() if f else None
                return

            if cooling >= 0:
                cooling += 1

            if info['checked']:
                cooling = 0
예제 #51
0
import subprocess
import sys
sys.path.insert(0, "../")
from tester import Tester
t = Tester()
##

# from task import parseLine

## write tests here --

# part 1

# part 2

## -- end of tests

def askYesNo(question):
	print(question + " [y/n]")
	yes = set(['yes','y', 'ye', ''])
	no = set(['no','n'])	
	while True:
		choice = raw_input().lower()
		if choice in yes:
			return True
		elif choice in no:
			return False
		else:
			print("Answer with 'y' (yes) or 'n' (no).")

ARGUMENTS = [ ]
예제 #52
0
    OUTPUT_FILE = config['output_file']
    with open(INPUT_FILE, 'rb') as csvfile:
        reader = csv.DictReader(csvfile)
        last_candle = None
        for price in reader:
            new_candle = PriceCandle(int(price['Date']), float(price['Close']), last_candle)
            history.append(new_candle)
            last_candle = new_candle

    first_price = history[0].closing_value
    last_price = history[-1].closing_value
    bh_profit = 100*(last_price - first_price*(1+config['fees']/100)) / first_price
    print "Compare to B&H profit [%.2f to %.2f]: %.2f%%" % (first_price, last_price, bh_profit)

    if config['single_test']:
        backtest = Tester(config, history)
        backtest.print_results()
    else:
        with open(OUTPUT_FILE, 'wb') as csvfile:
            header_row = ['long_EMA']
            for index in range(1, MAX_EMA_SIZE):
                header_row.append(index)
            writer = csv.writer(csvfile)
            writer.writerow(header_row)
            for long_EMA in range(1, MAX_EMA_SIZE):
                config['long_EMA'] = long_EMA
                profit_row = [long_EMA]
                for short_EMA in range(1, long_EMA):
                    config['short_EMA'] = short_EMA
                    backtest = Tester(config, history)
                    profit = backtest.print_results()
예제 #53
0
sourceDirectory = "./"
libraries = []
# Si se le indico uno diferente, se cambia
if args.directory:
    testDirectory = args.directory
# Si se le indicó uno diferente se cambia
if args.sourcedir:
    sourceDirectory = args.sourcedir

# Si se le indicaron librerias, se crea una lista separandolas con las comas como referencia
# if args.libraries:
# 	libraries = args.libraries.split(',')

# Se construye el objeto Tester
# tester = Tester(args.tests,sourceDirectory,testDirectory,libraries)
tester = Tester(args.tests, sourceDirectory, testDirectory)

# Si existe la bandera -f, se activa la muestra de diferencias
if args.diff:
    tester.toggleDiff()

# Si se indicaron algunos programas
if args.program:
    # Se saca una lista de ellos, esperando que estén separados por ,
    programs = args.program.split(",")
    # Por cada programa en la lista indicada se prueba ese programa
    for program in programs:
        tester.testProgram(program)
# Si no se indicaron programas, se asume que se quieren ejecutar todas las pruebas
else:
    tester.runAllTests()
예제 #54
0
__author__ = 'semyon'

from connector import Connector
from tester import Tester

connector = Connector("pythonroot", "test", "localhost", "mlt", {"spam": "spam", "ham": "ham"})
#connector.put_data("asd", True)
tester = Tester(connector)
tester.test_message("Qwerty asd")


def main():
    f = open('try_3.txt','w')
    g = open('accs.txt', 'w')
    g.close()
    task = MarioTask("testbed", initMarioMode = 2)
    task.env.initMarioMode = 2
    task.env.levelDifficulty = 1

    results = [] 
    names = [] 

    with open('type.txt', 'w') as f:
        f.write('ent')
    
    # # #test dagger
    # iterations = 1
    # rounds = 1
    
    iterations = 50
    rounds = 15
    #agent = Dagger(IT,useKMM = False)
    #exp = EpisodicExperiment(task, agent) 
    #T = Tester(agent,exp)
    #dagger_results = T.test(rounds = rounds,iterations = iterations)
    #dagger_data = dagger_results[-1]
    #dagger_results = dagger_results[:-1]
    #results.append(dagger_results)
    #names.append('dagger')
    #pickle.dump(results,open('results.p','wb'))

    #agent = Dagger(IT, useKMM=False)
    #exp = EpisodicExperiment(task, agent)
    #T = Tester(agent, exp)
    #dagger_data, _, acc = T.test(rounds = rounds, iterations = iterations)
    
    agent = Supervise(IT,useKMM = False)
    exp = EpisodicExperiment(task, agent) 
    T = Tester(agent,exp)
    prefix = 'dt-noisy-sup-change-entropy'
    sl_data, sup_data, acc = T.test(rounds = rounds, iterations = iterations, prefix = prefix)

    np.save('./data/' + prefix + '-sup_data.npy', sup_data)
    np.save('./data/' + prefix + '-sl_data.npy', sl_data)
    np.save('./data/' + prefix + '-acc.npy', acc)    
    
    # IPython.embed()

    analysis = Analysis()
    analysis.get_perf(sup_data, range(iterations))
    analysis.get_perf(sl_data, range(iterations))
    analysis.plot(names=['Supervisor', 'Supervised Learning'], label='Reward', filename='./results/' + prefix + '-return_plots.eps')#, ylims=[0, 1600])

    acc_a = Analysis()
    acc_a.get_perf(acc, range(iterations))
    acc_a.plot(names=['Supervised Learning Acc.'], label='Accuracy', filename='./results/' + prefix + '-acc_plots.eps')

    """


    agent = Dagger(IT,useKMM = False)
    exp = EpisodicExperiment(task, agent) 
    T = Tester(agent,exp)
    dagger_data, _, acc = T.test(rounds = rounds, iterations = iterations)

    np.save('./data/dagger_data.npy', dagger_data)
    np.save('./data/acc.npy', acc)    
    
    IPython.embed()

    analysis = Analysis()
    analysis.get_perf(dagger_data, range(iterations))
    analysis.plot(names=['DAgger'], label='Reward', filename='./results/return_plots.eps')

    acc_a = Analysis()
    acc_a.get_perf(acc, range(iterations))
    acc_a.plot(names=['DAgger Acc.'], label='Accuracy', filename='./results/acc_plots.eps')

    """
    
    #agent = Supervise(IT,useKMM = False)
    #exp = EpisodicExperiment(task, agent) 
    #T = Tester(agent,exp)
    #supervise_results = T.test(rounds = rounds, iterations = iterations)
    #supervise_data = supervise_results[-1]
    #supervise_results = supervise_results[:-1]
    #results.append(supervise_results)
    #names.append('supervise')
    #pickle.dump(results,open('results.p','wb'))

    #IPython.embed()

    #analysis = Analysis()
    #analysis.get_perf(supervise_data, results[1][5])
    #analysis.get_perf(dagger_data, results[0][5])
    #analysis.plot(names=['Supervise', 'DAgger'], label='Reward', filename='./return_plot.eps')#, ylims=[-1, 0])




    # agent = Sheath(IT,useKMM = False,sigma = 1.0)
    # exp = EpisodicExperiment(task, agent) 
    # T = Tester(agent,exp)
    # dagger_results = T.test(rounds = 10,iterations = 35)
    # results.append(dagger_results)
    # names.append('sheath_1')
    # pickle.dump(results,open('results.p','wb'))

    # agent = Sheath(IT,useKMM = False,sigma = 1e-1)
    # exp = EpisodicExperiment(task, agent) 
    # T = Tester(agent,exp)
    # dagger_results = T.test(rounds = 10,iterations = 35)
    # results.append(dagger_results)
    # names.append('sheath_1')
    # pickle.dump(results,open('results.p','wb'))


    
    # agent = Sheath(IT,useKMM = False,sigma = 0.5)
    # exp = EpisodicExperiment(task, agent) 
    # T = Tester(agent,exp)
    # dagger_results = T.test(rounds = 10,iterations = 35)
    # results.append(dagger_results)
    # names.append('sheath_1')

    # pickle.dump(results,open('results.p','wb'))
    # agent = Sheath(IT,useKMM = False,sigma = 1e-1)
    # exp = EpisodicExperiment(task, agent) 
    # T = Tester(agent,exp)
    # dagger_results = T.test(rounds = 4,iterations = 35)
    # results.append(dagger_results)
    # names.append('sheath_1')
    

    # agent = Sheath(IT,useKMM = False,sigma = 1e-2)
    # exp = EpisodicExperiment(task, agent) 
    # T = Tester(agent,exp)
    # dagger_results = T.test(rounds = 4,iterations = 35)
    # results.append(dagger_results)
    # names.append('sheath_1')
    # # # # # #test big ahude
    # agent = Ahude(IT,f,gamma = 1e-2,labelState = True, useKMM = True)
    # exp = EpisodicExperiment(task, agent) 
    # T = Tester(agent,exp)
    # ahude_big_results = T.test(rounds = 3)
    # results.append(ahude_big_results)
    # names.append('ahude_1e-1')

    # pickle.dump(results,open('results.p','wb'))


    # # # # # #test med ahude
    # agent = Ahude(IT,f,gamma = 1e-2,labelState = False,useKMM = True)
    # exp = EpisodicExperiment(task, agent) 
    # T = Tester(agent,exp)
    # ahude_med_results = T.test(rounds = 3)
    # results.append(ahude_med_results)
    # names.append('ahude_1e-2')
    
    # # #

    # # # # # # #test small ahude 
    # agent = Ahude(IT,f,gamma = 1e-3)
    # exp = EpisodicExperiment(task, agent) 
    # T = Tester(agent,exp)
    # ahude_small_results = T.test() 
    # results.append(ahude_small_results)
    # names.append('ahude_1e-3')
    
 
    # pickle.dump(results,open('results.p','wb'))

    #plt.figure(1)
    #for i in range(len(results)):
    #    plt.plot(results[i][5],results[i][1])
    
    
    #plt.legend(names,loc='upper left')

    # plt.figure(2)
    # for i in range(len(results)):
    #     plt.plot(results[i][0])

    # plt.legend(names,loc='upper left')

    # plt.figure(3)
    # for i in range(0,len(results)):
    #     plt.plot(results[i][3])

    # plt.legend(names,loc='upper left')


    plt.show()
    
    # IPython.embed()
    f.close()           
       

    #agent.saveModel()
    print "finished"
예제 #56
0
파일: scannerc.py 프로젝트: nicovs/odz
    print "-h --help afficher ce message d'aide"


help()

try:
    opts, args = getopt.getopt(
        sys.argv[1:], "hu:t:e:m:vc", ["help", "url=", "type=", "enumerate=", "theme=", "vuln", "vulnt"]
    )
except getopt.GetoptError as err:
    print (err)
    help()
    sys.exit(2)
fp = FingerPrint()
ic = InfoCollector()
ts = Tester()
os = OnlineSearch()
svn = SvnParser()
for o, u in opts:
    if o in ("-h", "--help"):
        help()
    elif o in ("-u", "--url"):
        url = fp.adrstrip(u)
        t = fp.detect_cms(url)
        # tes = fp.check_if_exist("http://www.123algeriasport.com/wp-content/plugins/social-discussions/")
        print "[!] CMS installed is : " + t
        if t == "wordpress":
            print "[x] Searching for plugins or themes in the code (passive search): \n"
            ic.get_info_passive(url, t)
            print "[x] Fingerprinting using readme.html \n"
            print "[!] Wordpress Version is : " + str(fp.wp_fp_rm(url)) + "\n"
예제 #57
0
def email_receiver(cmd,msg):
    T = Tester()
    T.newtest(testname=cmd)

    # Save the received email mesasge in the database
    messageid = T.insert_email_message(tester.EMAIL_TAG_USER_SENT,str(msg))
    args = {"messageid":messageid,"cmd":cmd}

    # Depending on the command, institute the next step...
    if cmd=="bouncer":
        T.insert_task(tester.TASK_COMPOSE_SIMPLE_RESPONSE, args)
        T.commit()
        
    elif cmd=="register":
        T.insert_task(tester.TASK_REGISTER_FROM_EMAIL, args)
        T.commit()

    else:
         # Log invalid command
         logging.info("Invalid command: {}  Message {}".format(cmd,messageid))
예제 #58
0
파일: run_test.py 프로젝트: kavinyao/SKBPR
import config
from tester import Tester

if __name__ == '__main__':
    print 'Running tests on database [%s]' % config.DB_NAME

    dbm = config.DatabaseManager(config.DB_HOST, config.DB_USER, config.DB_PASSWORD, config.DB_NAME)
    try:
        # set up recommender
        word_segmenter = config.WordSegmenter()
        recommenders = [R(dbm, word_segmenter, M()) for R, M in config.Recommenders]

        # set up tester
        splitter = config.Splitter(dbm, config.K)
        evaluator = config.Evaluator()
        tester = Tester(config.N, config.REPEAT, dbm, recommenders, splitter, evaluator)

        # fire!
        tester.run()
    finally:
        dbm.close()
예제 #59
0
    analysis = Analysis()
    analysis.get_perf(sup_data, range(iterations))
    analysis.get_perf(sl_data, range(iterations))
    analysis.plot(names=['Supervisor', 'Supervised Learning'], label='Reward', filename='./results/return_plots.eps')#, ylims=[0, 1600])

    acc_a = Analysis()
    acc_a.get_perf(acc, range(iterations))
    acc_a.plot(names=['Supervised Learning Acc.'], label='Accuracy', filename='./results/acc_plots.eps')

    """


    agent = Dagger(IT,useKMM = False)
    exp = EpisodicExperiment(task, agent) 
    T = Tester(agent,exp)
    prefix = 'svc-dagger-change'
    dagger_data, _, acc = T.test(rounds = rounds, iterations = iterations, prefix = prefix)

    np.save('./data/svc-dagger-change-dagger_data.npy', dagger_data)
    np.save('./data/svc-dagger-change-acc.npy', acc)    
    
    # IPython.embed()

    analysis = Analysis()
    analysis.get_perf(dagger_data, range(iterations))
    analysis.plot(names=['DAgger'], label='Reward', filename='./results/svc-dagger-change-return_plots.eps')

    acc_a = Analysis()
    acc_a.get_perf(acc, range(iterations))
    acc_a.plot(names=['DAgger Acc.'], label='Accuracy', filename='./results/svc-dagger-change-acc_plots.eps')