Exemple #1
0
    def infer(self, train_queue, valid_queue, model, criterion, optimizer, lr, epoch):
        objs = utils.AvgrageMeter()

        # VALIDATION
        preds = []
        targets = []

        model.eval()
        for step, graph_batch in enumerate(valid_queue):
            graph_batch = graph_batch.to(self.device)

            if self.model_config['model'] == 'gnn_vs_gae_classifier':
                pred_bins, pred = self.model(graph_batch=graph_batch)
                criterion = torch.nn.BCELoss()
                criterion_2 = torch.nn.MSELoss()

                bins = self.create_bins(lower_bound=0,
                                        width=10,
                                        quantity=9)
                binned_weights = []
                for value in graph_batch.y.cpu().numpy():
                    bin_index = self.find_bin(value, bins)
                    binned_weights.append(bin_index)
                bins = torch.FloatTensor(binned_weights)
                make_one_hot = lambda index: torch.eye(self.model_config['no_bins'])[index.view(-1).long()]
                binns_one_hot = make_one_hot(bins).to(self.device)

                loss_1 = criterion(pred_bins, binns_one_hot)
                loss_2 = criterion_2(pred, self.normalize_data(graph_batch.y))
                alpha = self.model_config['classification_loss']
                beta = self.model_config['regression_loss']

                loss = alpha * loss_1 + beta * loss_2
            else:
                pred = self.model(graph_batch=graph_batch)
                loss = criterion(self.normalize_data(pred), self.normalize_data(graph_batch.y / 100))

            preds.extend(pred.detach().cpu().numpy() * 100)
            targets.extend(graph_batch.y.detach().cpu().numpy())
            n = graph_batch.num_graphs
            objs.update(loss.data.item(), n)

            if step % self.data_config['report_freq'] == 0:
                logging.info('valid %03d %e ', step, objs.avg)

        fig = utils.scatter_plot(np.array(preds), np.array(targets), xlabel='Predicted', ylabel='True', title='')
        fig.savefig(os.path.join(self.log_dir, 'pred_vs_true_valid_{}.jpg'.format(epoch)))
        plt.close()

        val_results = utils.evaluate_metrics(np.array(targets), np.array(preds), prediction_is_first_arg=False)

        return objs.avg, val_results
Exemple #2
0
    def infer(self, train_queue, valid_queue, model, criterion, optimizer, lr,
              epoch):
        objs = utils.AvgrageMeter()

        # VALIDATION
        preds = []
        targets = []

        for step, (arch_path_enc, y_true) in enumerate(valid_queue):
            arch_path_enc = arch_path_enc.to(self.device).float()
            y_true = y_true.to(self.device).float()
            pred = self.model(arch_path_enc)
            loss = torch.mean(
                torch.abs((self.normalize_data(pred) /
                           self.normalize_data(y_true / 100)) - 1))
            preds.extend(pred.detach().cpu().numpy() * 100)
            targets.extend(y_true.detach().cpu().numpy())
            objs.update(loss.data.item(), len(arch_path_enc))

            if step % self.data_config['report_freq'] == 0:
                logging.info('valid %03d %e ', step, objs.avg)

        fig = utils.scatter_plot(np.array(preds),
                                 np.array(targets),
                                 xlabel='Predicted',
                                 ylabel='True',
                                 title='')
        fig.savefig(
            os.path.join(self.log_dir,
                         'pred_vs_true_valid_{}.jpg'.format(epoch)))
        plt.close()

        val_results = utils.evaluate_metrics(np.array(targets),
                                             np.array(preds),
                                             prediction_is_first_arg=False)

        return objs.avg, val_results
Exemple #3
0
    def train_epoch(self, train_queue, valid_queue, model, criterion, optimizer, lr, epoch):
        objs = utils.AvgrageMeter()

        # TRAINING
        preds = []
        targets = []

        model.train()

        for step, graph_batch in enumerate(train_queue):
            graph_batch = graph_batch.to(self.device)
            #             print(step)

            if self.model_config['model'] == 'gnn_vs_gae_classifier':
                pred_bins, pred = self.model(graph_batch=graph_batch)
                criterion = torch.nn.BCELoss()
                criterion_2 = torch.nn.MSELoss()

                bins = self.create_bins(lower_bound=0,
                                        width=10,
                                        quantity=9)
                binned_weights = []
                for value in graph_batch.y.cpu().numpy():
                    bin_index = self.find_bin(value, bins)
                    binned_weights.append(bin_index)
                bins = torch.FloatTensor(binned_weights)
                make_one_hot = lambda index: torch.eye(self.model_config['no_bins'])[index.view(-1).long()]
                binns_one_hot = make_one_hot(bins).to(self.device)
                loss_1 = criterion(pred_bins, binns_one_hot)
                loss_2 = criterion_2(pred, self.normalize_data(graph_batch.y))
                alpha = self.model_config['classification_loss']
                beta = self.model_config['regression_loss']

                loss = alpha * loss_1 + beta * loss_2

            else:
                pred = self.model(graph_batch=graph_batch)
                if self.model_config['loss:loss_log_transform']:
                    loss = criterion(self.normalize_data(pred), self.normalize_data(graph_batch.y / 100))
                else:
                    loss = criterion(pred, graph_batch.y / 100)
                if self.model_config['loss:pairwise_ranking_loss']:
                    m = 0.1
                    '''
                    y = list(map(lambda y_i: 1 if y_i == True else -1, graph_batch.y[0: -1] > graph_batch.y[1:]))
                    pairwise_ranking_loss = torch.nn.HingeEmbeddingLoss(margin=m)(pred[0:-1] - pred[1:],
                                                                                  target=torch.from_numpy(np.array(y)))
                    '''
                    pairwise_ranking_loss = []
                    sort_idx = torch.argsort(graph_batch.y, descending=True)
                    for idx, idx_y_i in enumerate(sort_idx):
                        for idx_y_i_p1 in sort_idx[idx + 1:]:
                            pairwise_ranking_loss.append(torch.max(torch.tensor(0.0, dtype=torch.float),
                                                                   m - (pred[idx_y_i] - pred[idx_y_i_p1])))
                    pairwise_ranking_loss = torch.mean(torch.stack(pairwise_ranking_loss))

                    loss += pairwise_ranking_loss
                    if step % self.data_config['report_freq'] == 0:
                        logging.info('Pairwise ranking loss {}'.format(pairwise_ranking_loss))

            preds.extend(pred.detach().cpu().numpy() * 100)
            targets.extend(graph_batch.y.detach().cpu().numpy())
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            n = graph_batch.num_graphs
            objs.update(loss.data.item(), n)

            if step % self.data_config['report_freq'] == 0:
                logging.info('train %03d %e', step, objs.avg)

        fig = utils.scatter_plot(np.array(preds), np.array(targets), xlabel='Predicted', ylabel='True', title='')
        fig.savefig(os.path.join(self.log_dir, 'pred_vs_true_train_{}.jpg'.format(epoch)))
        plt.close()
        train_results = utils.evaluate_metrics(np.array(targets), np.array(preds), prediction_is_first_arg=False)

        return objs.avg, train_results
Exemple #4
0
    def train_epoch(self, train_queue, valid_queue, model, criterion,
                    optimizer, lr, epoch):
        objs = utils.AvgrageMeter()

        # TRAINING
        preds = []
        targets = []

        model.train()

        for step, (arch_path_enc, y_true) in enumerate(train_queue):
            arch_path_enc = arch_path_enc.to(self.device).float()
            y_true = y_true.to(self.device).float()

            pred = self.model(arch_path_enc)
            if self.model_config['loss:loss_log_transform']:
                loss = torch.mean(
                    torch.abs((self.normalize_data(pred) /
                               self.normalize_data(y_true / 100)) - 1))
            else:
                loss = criterion(1 - pred, 1 - y_true / 100)
            if self.model_config['loss:pairwise_ranking_loss']:
                m = 0.1
                pairwise_ranking_loss = []
                sort_idx = torch.argsort(y_true, descending=True)
                for idx, idx_y_i in enumerate(sort_idx):
                    for idx_y_i_p1 in sort_idx[idx + 1:]:
                        pairwise_ranking_loss.append(
                            torch.max(torch.tensor(0.0, dtype=torch.float),
                                      m - (pred[idx_y_i] - pred[idx_y_i_p1])))
                pairwise_ranking_loss = torch.mean(
                    torch.stack(pairwise_ranking_loss))

                loss += pairwise_ranking_loss
                if step % self.data_config['report_freq'] == 0:
                    logging.info('Pairwise ranking loss {}'.format(
                        pairwise_ranking_loss))

            preds.extend(pred.detach().cpu().numpy() * 100)
            targets.extend(y_true.detach().cpu().numpy())
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            objs.update(loss.data.item(), len(arch_path_enc))

            if step % self.data_config['report_freq'] == 0:
                logging.info('train %03d %e', step, objs.avg)

        fig = utils.scatter_plot(np.array(preds),
                                 np.array(targets),
                                 xlabel='Predicted',
                                 ylabel='True',
                                 title='')
        fig.savefig(
            os.path.join(self.log_dir,
                         'pred_vs_true_train_{}.jpg'.format(epoch)))
        plt.close()
        train_results = utils.evaluate_metrics(np.array(targets),
                                               np.array(preds),
                                               prediction_is_first_arg=False)

        return objs.avg, train_results