def build_model(self):
     """Creates and initializes the shared and controller models."""
     if self.args.network_type == 'Net':
         self.shared = models.Network(self.args)
     else:
         raise NotImplementedError(f'Network type '
                                   f'`{self.args.network_type}` is not '
                                   f'defined')
     self.controller = models.Controller(self.args)
Пример #2
0
    def build_model(self):
        """Creates and initializes the shared and controller models."""
        self.shared = models.RNN(self.args, self.dataset)
        self.controller = models.Controller(self.args)

        if self.args.num_gpu == 1:
            self.shared.cuda()
            self.controller.cuda()
        elif self.args.num_gpu > 1:
            raise NotImplementedError('`num_gpu > 1` is in progress')
Пример #3
0
    def build_model(self):
        """Creates and initializes the shared and controller models."""
        if self.args.network_type == 'unet':
            self.shared = models.Unet(self.args)
        else:
            raise NotImplementedError(f'Network type '
                                      f'`{self.args.network_type}` is not '
                                      f'defined')
        self.controller = models.Controller(self.args)

        if self.args.num_gpu == 1:
            self.shared.cuda()
            self.controller.cuda()
        elif self.args.num_gpu > 1:
            raise NotImplementedError('`num_gpu > 1` is in progress')
Пример #4
0
    def build_model(self):
        """Creates and initializes the shared and controller models."""
        if self.args.network_type == 'rnn':
            self.shared = models.RNN(self.args, self.dataset)
        elif self.args.network_type == 'cnn':
            self.shared = models.CNN(self.args, self.dataset)
        else:
            raise NotImplementedError(
                'Network type `{0}` is not defined'.format(
                    self.args.network_type))
        self.controller = models.Controller(
            self.args
        )  # 构建了一个orward:Embedding(130,100)->lstm(100,100)->decoder的列表,对应25个decoder

        if self.args.num_gpu == 1:
            self.shared.cuda()
            self.controller.cuda()
        elif self.args.num_gpu > 1:
            raise NotImplementedError('`num_gpu > 1` is in progress')
Пример #5
0
    def build_model(self):
        """Creates and initializes the shared and controller models."""
        if self.args.network_type == 'rnn':
            self.shared = models.RNN(self.args, self.dataset)
            self.controller = models.Controller(self.args)
        elif self.args.network_type == 'micro_cnn':
            self.shared = models.CNN(self.args, self.dataset)
            self.controller = models.CNNMicroController(self.args)
        else:
            raise NotImplementedError(f'Network type '
                                      f'`{self.args.network_type}` is not '
                                      f'defined')

        if self.args.num_gpu == 1:
            if torch.__version__ == '0.3.1':
                self.shared.cuda()
                self.controller.cuda()
            else:
                self.shared.to(self.device)
                self.controller.to(self.device)

        elif self.args.num_gpu > 1:
            raise NotImplementedError('`num_gpu > 1` is in progress')
Пример #6
0
 def build_model(self):
     """Creates and initializes the shared and controller models."""
     if self.args.network_type == 'rnn':
         self.shared = models.RNN(self.args, self.dataset)
     elif self.args.network_type == 'cnn':
         print("----- begin to init cnn------")
         self.shared = models.CNN(self.args, self.dataset)
         # self.shared = self.shared.cuda()
     else:
         raise NotImplementedError(f'Network type '
                                   f'`{self.args.network_type}` is not '
                                   f'defined')
     print("---- begin to init controller-----")
     self.controller = models.Controller(self.args)
     #self.controller = self.controller.cuda()
     print("===begin to cuda")
     if True:
         print("cuda")
         self.shared.cuda()
         self.controller.cuda()
         print("finish cuda")
     elif self.args.num_gpu > 1:
         raise NotImplementedError('`num_gpu > 1` is in process')
Пример #7
0
    def train_controller(self):

        avg_reward_base = None
        baseline = None
        adv_history = []
        entropy_history = []
        reward_history = []

        controller = models.Controller(self.n_tranformers, self.n_scalers,
                                       self.n_constructers, self.n_selecters,
                                       self.n_models, self.func_names,
                                       self.lstm_size, self.temperature,
                                       self.tanh_constant, self.save_dir)

        controller_optimizer = _get_optimizer(self.optimizer)
        controller_optim = controller_optimizer(controller.parameters(),
                                                lr=self.controller_lr)

        controller.train()
        total_loss = 0

        results_dag = []
        results_acc = []
        random_history = []
        acc_history = []

        for step in range(self.controller_max_step):
            # sample models
            dags, actions, sample_entropy, sample_log_probs = controller()
            sample_entropy = torch.sum(sample_entropy)
            sample_log_probs = torch.sum(sample_log_probs)
            # print(sample_log_probs)
            print(actions)

            random_actions = self.random_actions()
            with torch.no_grad():
                acc = self.get_reward(actions)
                random_acc = self.get_reward(torch.LongTensor(random_actions))

            random_history.append(random_acc)
            results_acc.append(acc)
            results_dag.append(dags)
            acc_history.append(acc)

            rewards = torch.tensor(acc)

            if self.entropy_weight is not None:
                rewards += self.entropy_weight * sample_entropy

            reward_history.append(rewards)
            entropy_history.append(sample_entropy)

            # moving average baseline
            if baseline is None:
                baseline = rewards
            else:
                decay = self.ema_baseline_decay
                baseline = decay * baseline + (1 - decay) * rewards

            adv = rewards - baseline
            adv_history.append(adv)

            # policy loss
            loss = sample_log_probs * adv

            # update
            controller_optim.zero_grad()
            loss.backward()

            if self.controller_grad_clip > 0:
                torch.nn.utils.clip_grad_norm(controller.parameters(),
                                              self.controller_grad_clip)
            controller_optim.step()

            total_loss += loss.item()

            if ((step % self.log_step) == 0) and (step > 0):
                self._summarize_controller_train(total_loss, adv_history,
                                                 entropy_history,
                                                 reward_history, acc_history,
                                                 random_history,
                                                 avg_reward_base, dags)

                reward_history, adv_history, entropy_history,acc_history,random_history = [], [], [],[],[]
                total_loss = 0
            self.controller_step += 1

        max_acc = np.max(results_acc)
        max_dag = results_dag[np.argmax(results_acc)]
        path = os.path.join(self.model_dir, 'networks', 'best.png')
        utils.draw_network(max_dag[0], path)
        # np.sort(results_acc)[-10:]
        return np.sort(list(set(results_acc)))[-10:]