Ejemplo n.º 1
0
    def train(self, actions=None, format="two"):
        origin_action = actions
        actions = process_action(actions, format, self.args)
        print("train gnn structures:", actions)

        # create model
        model = self.build_gnn(actions)

        try:
            if self.args.cuda:
                model.cuda()
            # use optimizer
            optimizer = torch.optim.Adam(model.parameters(), lr=self.args.lr, weight_decay=self.args.weight_decay)
            model, val_acc = self.run_model(model, optimizer, self.loss_fn, self.data, self.epochs, cuda=self.args.cuda,
                                            half_stop_score=max(self.reward_manager.get_top_average() * 0.7, 0.4)
                                            # , show_info=True
                                            )
        except RuntimeError as e:
            if "cuda" in str(e) or "CUDA" in str(e):
                print(e)
                val_acc = 0
            else:
                raise e
        reward = self.reward_manager.get_reward(val_acc)
        # 模型gnn, reward, val_acc
        # self.record_action_info(origin_action, reward, val_acc)

        return reward, val_acc
Ejemplo n.º 2
0
    def evaluate(self, actions=None, format="two"):
        actions = process_action(actions, format, self.args)
        print("train action:", actions)

        # create model
        model = self.build_gnn(actions)

        if self.args.cuda:
            model.cuda()

        # use optimizer
        optimizer = torch.optim.Adam(model.parameters(), lr=self.args.lr, weight_decay=self.args.weight_decay)
        try:
            model, val_acc, test_acc = self.run_model(model, optimizer, self.loss_fn, self.data, self.epochs,
                                                      cuda=self.args.cuda, return_best=True,
                                                      half_stop_score=max(self.reward_manager.get_top_average() * 0.7,
                                                                          0.4))
        except RuntimeError as e:
            if "cuda" in str(e) or "CUDA" in str(e):
                print(e)
                val_acc = 0
                test_acc = 0
            else:
                raise e
        return val_acc, test_acc
Ejemplo n.º 3
0
 def train(self):
     print("\n\n===== Random Search ====")
     start_time = time.time()
     self.best_ind_score = 0.0
     self.best_ind = []
     for cycle, gnn in enumerate(self.individuals):
         if self.server_proxy is None:
             # evaluate locally if not using server
             # Ignore first return value to dump model
             _, metrics = \
                 self.submodel_manager.train(gnn, format=self.args.format)
         else:
             # Fix candidate arch before passing to server
             candidate_arch = process_action(gnn,
                                             type='two',
                                             args=self.args)
             # Call server evaluation
             lr, in_drop, weight_decay = \
                 self.get_model_params(candidate_arch)
             results = self.server_proxy.evaluate_pipeline(
                 self.server_proxy.build_pipeline(
                     candidate_arch, self.args.search_mode,
                     self.args.dataset, self.args.random_seed,
                     self.args.folds, self.args.batches, lr, in_drop,
                     weight_decay, self.args.layers_of_child_model))
             print('server results for gnn ', gnn, results)
             metrics = {metric: 0.0 for metric in optimization_metrics}
             if not results['error']:
                 # If the model executed successfully,
                 # the error results will be empty
                 metrics = results['train']
             # Save to log
             self.submodel_manager.record_action_info(gnn, metrics)
         # Manage Hall of Fame
         self.hof.add(gnn,
                      metrics[self.args.opt_metric],
                      details="Optimization, {:d} individual".format(cycle +
                                                                     1))
         print("individual:", gnn, " val_score:",
               metrics[self.args.opt_metric])
         # Keep best individual in variable
         if metrics[self.args.opt_metric] > self.best_ind_score:
             self.best_ind = gnn.copy()
             self.best_ind_score = metrics[self.args.opt_metric]
     end_time = time.time()
     total_time = end_time - start_time
     print('Total elapsed time: ' + str(total_time))
     print('[BEST STRUCTURE]', self.best_ind)
     print('[BEST STRUCTURE] Accuracy: ', self.best_ind_score)
     print("===== Random Search DONE ====")
Ejemplo n.º 4
0
    def train(self, actions=None, format="two"):
        origin_action = actions
        actions = process_action(actions, format, self.args)
        print("train action:", actions)

        # create model
        model = self.build_gnn(actions)
        val_acc, metrics = {}, {}
        try:
            if self.args.cuda:
                model.cuda()
            # use optimizer
            optimizer = torch.optim.Adam(model.parameters(),
                                         lr=self.args.lr,
                                         weight_decay=self.args.weight_decay)
            if self.args.batches > 1:
                model, val_acc, metrics = \
                    self.run_batched_model(model, optimizer,
                                           self.loss_fn, self.data,
                                           self.epochs, cuda=self.args.cuda,
                                           n_splits=self.args.batches)
            else:
                if self.args.folds > 1:
                    model, val_acc, metrics = \
                        self.run_k_fold_model(model, optimizer,
                                              self.loss_fn, self.data,
                                              self.epochs, cuda=self.args.cuda,
                                              seed=self.args.random_seed,
                                              n_splits=self.args.folds)
                else:
                    model, val_acc, metrics = \
                        self.run_model(model,
                                       optimizer,
                                       self.loss_fn,
                                       self.data,
                                       self.epochs,
                                       cuda=self.args.cuda,
                                       # , show_info=True
                                       )
        except RuntimeError as e:
            if "cuda" in str(e) or "CUDA" in str(e):
                val_acc = 0
                metrics = {metric: 0.0 for metric in optimization_metrics}
            print(e, repr(e))
            traceback.print_exc(file=sys.stdout)
        # Log the current architecture with metrics
        self.record_action_info(origin_action, metrics)
        return val_acc, metrics
Ejemplo n.º 5
0
    def train(self, actions=None, format="two"):
        origin_action = actions  # ['gat', 'max', 'tanh', 1, 128, 'cos', 'sum', 'tanh', 4, 16]
        actions = process_action(
            actions, format, self.args
        )  # ['gat', 'max', 'tanh', 1, 128, 'cos', 'sum', 'tanh', 4, 6]
        print("train action:", actions)

        # create model
        model = self.build_gnn(
            actions
        )  # -> micro_model_manager.py -> ZengManager(GeoCitationManager)

        try:
            if self.args.cuda:
                model.cuda()
            # use optimizer
            optimizer = torch.optim.Adam(model.parameters(),
                                         lr=self.args.lr,
                                         weight_decay=self.args.weight_decay
                                         )  # lr = 0.2  weight_decay = 0.0005
            model, val_acc = self.run_model(
                model,
                optimizer,
                self.loss_fn,
                self.data,
                self.epochs,
                cuda=self.args.cuda,
                half_stop_score=max(
                    self.reward_manager.get_top_average() * 0.7, 0.4))
        except RuntimeError as e:
            if "cuda" in str(e) or "CUDA" in str(e):
                print(e)
                val_acc = 0
            else:
                raise e
        reward = self.reward_manager.get_reward(val_acc)
        self.save_param(model, update_all=(reward > 0))

        self.record_action_info(origin_action, reward, val_acc)

        return reward, val_acc
Ejemplo n.º 6
0
    def train(self, actions=None, format="two"):
        # actions = ['gat', 'sum', 'relu', 2, 8, 'linear', 'mlp', 'tanh', 2, 4]
        # format="two"
        origin_action = actions

        # 分类任务类别数修改第二层GNN模型的输出维度
        # 修改为任务类别的维度数,Citeseer数据集的类别为6,则第二层GNN最后输出维度为6
        actions = process_action(actions, format, self.args)
        # actions = ['gat', 'sum', 'relu', 2, 8, 'linear', 'mlp', 'tanh', 2, 6]

        print("train action:", actions)

        # create model
        # 基于选择出的GNN结构构建GNN
        model = self.build_gnn(actions)

        try:
            if self.args.cuda:
                model.cuda()
            # use optimizer
            optimizer = torch.optim.Adam(model.parameters(),
                                         lr=self.args.lr,
                                         weight_decay=self.args.weight_decay)

            model, val_acc = self.run_model(
                model,
                optimizer,
                self.loss_fn,
                self.data,
                self.epochs,
                cuda=self.args.cuda,
                half_stop_score=max(
                    self.reward_manager.get_top_average() * 0.7, 0.4))
            """
class TopAverage(object):
    def __init__(self, top_k=10):
        self.scores = []
        self.top_k = top_k

    def get_top_average(self):
        if len(self.scores) > 0:
            return np.mean(self.scores)
        else:
            return 0
            """

        except RuntimeError as e:
            if "cuda" in str(e) or "CUDA" in str(e):
                print(e)
                val_acc = 0
            else:
                raise e

        reward = self.reward_manager.get_reward(val_acc)

        # 当val_acc大于历史top10的val_acc的均值,则reward为正
        # reward被限制在-0.5到0.5之间
        """
class TopAverage(object):
    def get_average(self, score):
        if len(self.scores) > 0:
            avg = np.mean(self.scores)
        else:
            avg = 0
        # print("Top %d average: %f" % (self.top_k, avg))
        self.scores.append(score)
        self.scores.sort(reverse=True)
        self.scores = self.scores[:self.top_k]
        return avg

    def get_reward(self, score):
        reward = score - self.get_average(score)
        return np.clip(reward, -0.5, 0.5)
        """
        self.save_param(model, update_all=(reward > 0))
        # 模型没有共享参数

        self.record_action_info(origin_action, reward, val_acc)
        # 将gnn结构,reward,val_acc信息记录到log文件中

        return reward, val_acc