예제 #1
0
def load(config_filename, sess):
    """
	Loads a configuration of compoents from a file

	config_filename - Name of the file to load components from
	sess				 - The tensorflow session to run everything in
	"""

    # Does the filename exist?

    # Load the configuration file
    config = ConfigParser.RawConfigParser()
    config.read(config_filename)

    # Parse and construct each part
    environment = load_environment(config)
    network_builder, num_heads = load_network(config)
    memory = load_memory(config, environment, num_heads)

    # Make a counter
    counter_start = load_int(config, 'Counter', 'start', 0)
    counter = Counter(counter_start)

    dqn_agent, agent, eval_agent = load_agent(config, environment,
                                              network_builder, memory, counter,
                                              sess, num_heads)

    # Create a checkpoint object to save the agent and memory
    checkpoint = load_checkpoint(config, dqn_agent.dqn, memory, counter, sess)
    tensorboard = load_tensorboard(config, dqn_agent, sess, counter)

    return environment, agent, eval_agent, counter, checkpoint, tensorboard
예제 #2
0
    def __init__(self, name, total_time, type_speed, loop, cl, interactive):

        self.name = name
        self.total_time = total_time
        self.loop = loop
        self.cl = cl
        self.interactive = interactive
        # needs to be a string for JSON
        self.icon = "True"
        self.birth = False

        # First create task Object
        self.available_tasks = Tasks()

        # Calls the Task object to return all the available lists
        #self.task_list = dict(self.available_tasks.locate_available_tasks().items())
        self.task_list = self.update_available_tasks()

        # empty task dictionary to hold assigned tasks
        self.tasks = {}

        # boolean to track subtask status and calling parent class

        self.creating_subtasks = False
        self.parent_task = ''
        # empty subtask dictionary to hold assigned tasks
        self.subtasks = {}

        # set profile file path to an empty string
        self.profile_path = ''

        # Start counter instance that maps to tasks
        self.counter = Counter()

        # File setup
        self.output_base = "output/"
        self.file_name = name.replace(' ', '_')
        self.file_name = name.lower() + '.au3'
        self.file_name = self.output_base + self.file_name

        print("[>] Creating the file : {}".format(self.cl.red(self.file_name)))
        self.typing_speed = self._typing_speed(type_speed)

        # Whether the task list loops or runs once
        print("[>] Looping set to : {}".format(self.cl.red(str(self.loop))))

        # AutoIT Include Header List
        self.autoIT_include_statement = ''
        self.autoIT_UDF_includes = [
            '#include <Array.au3>', '#include <WinAPI.au3>'
        ]

        # OCD lines
        print()
        # say hello
        self.say_hello()
        print()
예제 #3
0
    def __init__(self, graph, mcts, idx=-1, parent=None):
        n, _ = graph.shape
        self.graph = graph
        self.parent = parent
        self.children = [None for _ in range(n)]
        self.mcts = mcts
        self.idx = idx

        self.visit_cnt = np.zeros(n, dtype=np.float32)
        if not self.is_end():
            hash = self.mcts.nodehash.hash(self.graph)
            if self.mcts.gnnhash.has(hash):
                self.P, self.Q = self.mcts.gnnhash.get(hash)
            else:
                Timer.start('gnn')
                with torch.no_grad():
                    self.P, self.Q = self.mcts.gnn(self.graph)
                Timer.end('gnn')
                self.P = self.P.detach().numpy()
                self.Q = self.Q.detach().numpy()
                self.mcts.gnnhash.save(hash, self.P, self.Q.copy())

            if self.mcts.nodehash.has(hash):
                self.reward_mean, self.reward_std = self.mcts.nodehash.get(
                    hash)
            else:
                # calculate reward mean and std by random sampling
                NUM = min(max(10, 2 * n), 100)
                if self.mcts.performance:
                    NUM = 10
                rewards = np.empty(NUM)
                ss = make_adj_set(graph)
                Timer.start('sample')
                Counter.count('sample')
                for i in range(NUM):
                    rewards[i] = randomplay(ss)
                Timer.end('sample')
                self.reward_mean = rewards.mean()
                # std shoud not be 0!
                self.reward_std = rewards.std(ddof=1) + EPS
                assert not np.isnan(self.reward_std)
                self.mcts.nodehash.save(
                    hash, self.reward_mean, self.reward_std)
예제 #4
0
def train(idx):
    np.random.seed()
    torch.manual_seed(idx)
    test_graphs = [
        read_graph("data/random/{}_{}".format(test_graph, i)).adj
        for i in range(5)
    ]

    gnn = GIN3(layer_num=layer_num, feature=feature)
    gnn.to(device)
    trainer = MCTSTrainer(gnn, test_graphs, "{}_{}th".format(file_prefix, idx))

    Timer.start('all')

    for i in range(epoch):
        print("epoch: ", i)
        graph = generate_random_graph(node, edge).adj
        Timer.start('test')
        trainer.test()
        Timer.end('test')

        Timer.start('train')
        tmp = 0.01**(1 / epoch)
        # 10 * tmp^epoch ~= 0.1
        if train_method == "train1":
            trainer.train1(graph, 10 * tmp**i, iter_p=iter_p)
        elif train_method == "train2":
            trainer.train2(graph, 10 * tmp**i, iter_p=iter_p)
        else:
            print("no such method")
            assert False
        Timer.end('train')

    Timer.start('test')
    trainer.test()
    Timer.end('test')

    Timer.end('all')
    Timer.print()
    Counter.print()

    trainer.save_model()
    trainer.save_test_result()
예제 #5
0
class Producer:
    global_running = True
    all_stopped = Counter('Producer')

    @classmethod
    def from_name(cls, name, **kwargs):
        for sub_cls in cls.__subclasses__():
            if name.lower() + "producer" == sub_cls.__name__.lower():
                return sub_cls(**kwargs)

    def __init__(self, output_queue, datasource):
        self.datasource = datasource
        self.output_queue = output_queue
        self.logger = LoggerRouter().getLogger(__name__)
        self.running = True

    def run(self):
        self.all_stopped.increase()
        try:
            while Producer.global_running and self.running:
                # print("PD")
                data = self.datasource.get()
                ret = self.process(data)
                self.output_queue.put(ret)

        except Exception as e:
            self.exception_handler(e)
        finally:
            self._stop()

    @abc.abstractmethod
    def process(self, data):
        return data

    def _stop(self):
        # if all_instance:
        #     Producer.global_running = False
        self.running = False
        self._clean_up()
        self.logger.info('Producer %s stopped' % (self.__class__.__name__))

        self.all_stopped.decrease()

    def stop(self, all_instance=True):
        self.datasource.stop()

    def exception_handler(self, exc):
        self.logger.error("Unexpected exception %s, now stopping..." % exc)

    def _clean_up(self):
        self.datasource.close()
예제 #6
0
 def __init__(self,
              DLFramework,
              DLFramework_Other,
              input_corpus,
              coverage_function,
              dectection_function,
              initcounter,
              powerschedule,
              precision,
              target_interface,
              csvwriter,
              mcmcflag=0,
              GPU_mode=0):
     """
     :param target_interface: 待测接口名
     :param DLFramework: 原深度学习框架
     :param DLFramework_Other: 比较深度学习框架
     :param input_corpus: 语料集
     :param initCounter: 初始化计数器
     :param powerschedule: 能量函数
     :param coverage_function: 覆盖方法
     :param dectection_function: 差分分析方法
     :param precision: 截断精度
     :param csvwriter: csv写入对象,将测试用例相关数据记录在本地
     :param mcmcflag: 是否采用mcmc策略
     :param GPU_mode: GPU模式,0--只是用GPU,1--采用GPU和CPU对比
     """
     self.DLFramework = DLFramework
     self.DLFramework_Other = DLFramework_Other
     self.DLFramework_Computer = self.decide_DLcomputer(self.DLFramework)
     self.DLFramework_Other_Computer = self.decide_DLcomputer(
         self.DLFramework_Other)
     # self.corpus_dir = corpus_dir 该参数融合到input_corpus中
     # self.sample_function = sample_function 该参数融合到input_corpus中
     self.coverage_funcntion = coverage_function
     self.edges = defaultdict(set)
     self.input_corpus = input_corpus
     # print('初始化完成, 当前corpus数量:', len(self.input_corpus.corpus))
     self.counter = Counter(initcounter)  # 计数器,主要用于MCMC过程
     self.power_schedule = powerschedule
     self.mcmcflag = mcmcflag
     self.dectection_function = dectection_function
     self.precision = precision
     self.target_interface = target_interface
     self.csvwriter = csvwriter
     self.crashwriter = CrashWriter(self.csvwriter.getPath())
     self.gpu_mode = GPU_mode
     # 额外计数器
     self.crashes = 0  # 崩溃次数
예제 #7
0
class Consumer:
    global_running = True
    all_stopped = Counter('Consumer')

    @classmethod
    def from_name(cls, name, **kwargs):
        for sub_cls in cls.__subclasses__():
            if name.lower() + "consumer" == sub_cls.__name__.lower():
                return sub_cls(**kwargs)

    def __init__(self, input_queue):
        self.input_queue = input_queue
        self.logger = LoggerRouter().getLogger(__name__)
        self.running = True

    def run(self):
        if not self.all_stopped.finished.is_set():
            self.all_stopped.increase()
            try:
                while Consumer.global_running and self.running:
                    # print("WK")
                    in_data = self.input_queue.get()
                    ret = self.process(in_data)
                    if ret[1] is not None:
                        self.output_queue.put(ret)
            except Exception as e:
                self.exception_handler(e)
            finally:
                # pass
                self.stop()

    def stop(self, all_instance=False):
        if not self.all_stopped.finished.is_set():
            if all_instance:
                Consumer.global_running = False
            self.running = False
            self._clean_up()
            self.logger.info('Worker %s stopped' % (self.__class__.__name__))
            self.all_stopped.decrease()

    @abc.abstractmethod
    def process(self, data):
        return data

    def exception_handler(self, exc):
        self.logger.error("Unexpected exception %s, now stopping..." % exc)

    def _clean_up(self):
        pass
예제 #8
0
from utils.counter import Counter

if __name__ == "__main__":
    test_graphs = [
        read_graph("data/random/100_250_{}".format(i)).adj for i in range(5)
    ]

    gnn = GIN3(layer_num=6)
    gnn.to(device)
    trainer = MCTSTrainer(gnn, test_graphs, "train2_p2_0th")

    Timer.start('all')

    for i in range(100):
        print("epoch: ", i)
        graph = generate_random_graph(100, 250).adj
        Timer.start('train')
        trainer.train2(graph, 10 * 0.96**i, iter_p=2)
        Timer.end('train')

        Timer.start('test')
        trainer.test()
        Timer.end('test')

    Timer.end('all')
    Timer.print()
    Counter.print()

    trainer.save_model()
    trainer.save_test_result()
예제 #9
0
 def __init__(self, csh, cl):
     super(SubTaskCMD, self).__init__(csh, cl)
     self.subtask_prompt()
     self.subtask_counter = Counter()
예제 #10
0
 def __init__(self):
     self.queue = []
     self.total = Counter()
     self.count = Counter()
예제 #11
0
    for i in range(500):
        print("epoch: ", i)
        graph = generate_random_graph(100, 250).adj
        trainer.train2(graph, 10 * beta ** i)
        trainer.test()
    Timer.end('all')
    score = 0
    coef = 1
    for all_rewards in reversed(trainer.test_result):
        coef *= 0.9
        for rewards in all_rewards:
            score += 10 * coef * np.max(rewards)
            score += coef * np.mean(rewards)

    Timer.print()
    trainer.save_model()
    trainer.save_test_result()

    return -score


if __name__ == '__main__':
    Timer.disable()
    Counter.disable()
    os.makedirs("model", exist_ok=True)
    study = optuna.create_study()
    study.optimize(objective, timeout=3600 * 8, n_jobs=-1)

    print("params_{}".format(study.best_params))
    print("value_{}".format(study.best_value))