Exemplo n.º 1
0
    def eval(self):
        self.env.setup(deepcopy(self.test_g_list))
        t = 0
        while not self.env.isTerminal():
            list_at = self.make_actions(t, greedy=True)
            self.env.step(list_at)
            t += 1
        test_loss = loop_dataset(env.g_list, env.classifier,
                                 list(range(len(env.g_list))))
        print('\033[93m average test: loss %.5f acc %.5f\033[0m' %
              (test_loss[0], test_loss[1]))

        if cmd_args.phase == 'train' and self.best_eval is None or test_loss[
                1] < self.best_eval:
            print(
                '----saving to best attacker since this is the best attack rate so far.----'
            )
            torch.save(self.net.state_dict(),
                       cmd_args.save_dir + '/epoch-best.model')
            with open(cmd_args.save_dir + '/epoch-best.txt', 'w') as f:
                f.write('%.4f\n' % test_loss[1])
            self.best_eval = test_loss[1]

        reward = np.mean(self.env.rewards)
        print(reward)
        return reward, test_loss[1]
    def _eval_data(self):

        data_dir = 'data_dir'
        if not os.path.exists(data_dir):
            os.mkdir(data_dir)

        dir_name = datetime.now().strftime("%m-%d-%H-%M-%S")
        cur_data_dir = os.path.join(data_dir, dir_name)
        os.mkdir(cur_data_dir)

        print('===== Saving original graphs ... =====')
        with open('%s/original_glist.pkl' % (cur_data_dir, ), 'wb') as f:
            original_graphs = [g.to_networkx() for g in self.test_g_list]
            cp.dump(original_graphs, f)
        print('===== Saved original graphs. =====')

        self.env.setup(deepcopy(self.test_g_list))
        t = 0
        while not self.env.isTerminal():
            list_at = self.make_actions(t, greedy=True)
            self.env.step(list_at)
            t += 1
        test_loss = loop_dataset(env.g_list, env.classifier, list(range(len(env.g_list))))
        print('\033[93m average test: loss %.5f acc %.5f\033[0m' % (test_loss[0], test_loss[1]))

        print('===== Saving new graphs ... =====')
        with open('%s/new_glist.pkl' % (cur_data_dir, ), 'wb') as f:
            new_graphs = [g.to_networkx() for g in env.g_list]
            cp.dump(new_graphs, f)
        print('===== Saved new graphs. =====')
        return original_graphs, new_graphs, cur_data_dir
Exemplo n.º 3
0
 def eval(self):
     self.env.setup(deepcopy(self.test_g_list))
     t = 0
     while not self.env.isTerminal():
         list_at = self.make_actions(t, greedy=True)
         self.env.step(list_at)
         t += 1
     test_loss = loop_dataset(env.g_list, env.classifier,
                              list(range(len(env.g_list))))
     print('\033[93m average test: loss %.5f acc %.5f\033[0m' %
           (test_loss[0], test_loss[1]))
     with open('%s/edge_added.txt' % cmd_args.save_dir, 'w') as f:
         for i in range(len(self.test_g_list)):
             f.write('%d %d ' %
                     (self.test_g_list[i].label, env.pred[i] + 1))
             f.write('%d %d\n' % env.added_edges[i])
     reward = np.mean(self.env.rewards)
     print(reward)
     return reward, test_loss[1]
Exemplo n.º 4
0
    def eval(self):
        self.env.setup(deepcopy(self.test_g_list))
        print('===== type of self.test_g_list is\n', type(self.test_g_list))
        print('===== length of self.test_g_list is\n', len(self.test_g_list))
        print('===== self.test_g_list is\n', self.test_g_list)
        print('===== Saving original graphs ...')
        # TODO
        with open('original_glist.pkl', 'wb') as f:
            cp.dump([g.to_networkx() for g in self.test_g_list], f)
        print('===== Saved orignal graphs.')
        t = 0
        while not self.env.isTerminal():
            list_at = self.make_actions(t, greedy=True)
            self.env.step(list_at)
            t += 1
        test_loss = loop_dataset(env.g_list, env.classifier, list(range(len(env.g_list))))
        print('===== type of env.g_list is\n', type(env.g_list))
        print('===== length of env.g_list is\n', len(env.g_list))
        print('===== env.g_list is\n', env.g_list)
        # TODO
        print('===== Saving new graphs ...')
        with open('new_glist.pkl', 'wb') as f:
            cp.dump([g.to_networkx() for g in env.g_list], f)
        print('===== Saved new graphs.')
        print('\033[93m average test: loss %.5f acc %.5f\033[0m' % (test_loss[0], test_loss[1]))

        if cmd_args.phase == 'train' and self.best_eval is None or test_loss[1] < self.best_eval:
            print('----saving to best attacker since this is the best attack rate so far.----')
            torch.save(self.net.state_dict(), cmd_args.save_dir + '/epoch-best.model')
            with open(cmd_args.save_dir + '/epoch-best.txt', 'w') as f:
                f.write('%.4f\n' % test_loss[1])
            self.best_eval = test_loss[1]

        reward = np.mean(self.env.rewards)
        print(reward)
        return reward, test_loss[1]
    if cmd_args.saved_model is not None and cmd_args.saved_model != '':
        print('loading model from %s' % cmd_args.saved_model)
        with open('%s-args.pkl' % cmd_args.saved_model, 'rb') as f:
            base_args = cp.load(f)
        classifier = GraphClassifier(label_map, **vars(base_args))
        classifier.load_state_dict(torch.load(cmd_args.saved_model + '.model'))
    else:
        classifier = GraphClassifier(label_map, **vars(cmd_args))

    # move classifier to gpu if available
    if cmd_args.ctx == 'gpu':
        classifier = classifier.cuda()

    # if phase is test look at the test accuracy and loss
    if cmd_args.phase == 'test':
        test_loss = loop_dataset(test_glist, classifier,
                                 list(range(len(test_glist))))
        print('\033[93maverage test: loss %.5f acc %.5f\033[0m' %
              (test_loss[0], test_loss[1]))

    # if phase is train then fit parameters to the model
    if cmd_args.phase == 'train':
        optimizer = optim.Adam(classifier.parameters(),
                               lr=cmd_args.learning_rate)

        train_idxes = list(range(len(train_glist)))
        best_loss = None
        for epoch in range(cmd_args.num_epochs):
            random.shuffle(train_idxes)
            avg_loss = loop_dataset(train_glist,
                                    classifier,
                                    train_idxes,
Exemplo n.º 6
0
def test_graphs(classifier, test_glist):
    test_loss = loop_dataset(test_glist, classifier, list(range(len(test_glist))))
    print('\033[93maverage test: loss %.5f acc %.5f\033[0m' % (test_loss[0], test_loss[1]))