Ejemplo n.º 1
0
def evaluate_tsc(filename: str = 'ElectricDevices', datadir: str = '.data'):
    with torch.no_grad():
        # Load model
        path = next(get_path(datadir).glob(f'{filename}*.zip'))
        model = torch.jit.load(str(path))
        model.eval()

        # Fetch the data and prepare it
        hdf = load_hdf(filename=filename, datadir=datadir)

        # Convert the NumPy arrays into PyTorch tensors
        X = torch.FloatTensor(hdf['X_test']).unsqueeze(1)
        y = torch.LongTensor(hdf['y_test']) - 1

        # Convert the NumPy arrays X and y into a PyTorch data set
        dataset = TensorDataset(X, y)

        # Pack the data sets into data loaders, enabling iteration
        dl = DataLoader(dataset, batch_size=32)

        # Set up progress bar
        with tqdm(total=len(dl) * dl.batch_size, desc='Evaluating') as pbar:

            # Calculate mean accuracy
            acc_fn = Accuracy()
            acc = 0.
            for x, y in dl:
                yhat = model(x)
                acc += acc_fn(yhat, y)
                pbar.update(dl.batch_size)
            acc /= len(dl)

        return acc
Ejemplo n.º 2
0
def eval_minibatch(model, images, labels, mean, std, loss_func):
    """
    Calculates the loss and accuracy of the model over a minibatch.
    :param model: the model object
    :param image: minibatch images,which lie in the range [0,1]. Need to normalize with 
     mean,std (which was used for training) before feeding into model
    :param mean: mean of all the channels of the images, a list of 3 floats
    :param std: standard deviation of all the channels of the images, a list of 3 floats
    :param loss_func: The loss_func object used to calculate the error
    """
    model.eval()
    count = 0
    # input images are normalized to [0,1]. After normalization with
    # mean(per channel)=0.5, std(per channel)=0.5, x_norm lies in the range [-1,1]
    x_norm = normalization_function(images, mean, std)
    with torch.no_grad():
        if model.module.type == 'SNN':
            output, _, _ = model(x_norm, 0, False)
            output = output / model.module.timesteps
        elif model.module.type == 'ANN':
            output = model(x_norm)
    count = Accuracy(output, labels)
    loss = loss_func(output, labels)
    return count, loss
Ejemplo n.º 3
0
from utils import load_data, optimizer, Accuracy

np.random.seed(2020)

# Data generation
train_data, test_data = load_data('RedWine')
x_train, y_train = train_data[0], train_data[1]
x_test, y_test = test_data[0], test_data[1]

# Hyper-parameter
_epoch=1000
_batch_size=32
_lr = 0.001
_optim = 'SGD'

# Build model
model = LogisticRegression(num_features=x_train.shape[1])
optimizer = optimizer(_optim)

# Solve
print('Train start!')
model.fit(x=x_train, y=y_train, epochs=_epoch, batch_size=_batch_size, lr=_lr, optim=optimizer)
print('Trained done.')

# Inference
print('Predict on test data')
inference = model.eval(x_test)

# Assess model
error = Accuracy(inference, y_test)
print('Accuracy on Test Data : %.4f' % error)
Ejemplo n.º 4
0
def train(opt):
    if opt.use_model == 'bert':
        # datasets
        train_set = BERTDGLREDataset(opt.train_set, opt.train_set_save, word2id, ner2id, rel2id, dataset_type='train',
                                     opt=opt)
        # dev_set = BERTDGLREDataset(opt.dev_set, opt.dev_set_save, word2id, ner2id, rel2id, dataset_type='dev',
        #                            instance_in_train=train_set.instance_in_train, opt=opt)

        # dataloaders
        train_loader = DGLREDataloader(train_set, batch_size=opt.batch_size, shuffle=True,
                                       negativa_alpha=opt.negativa_alpha)
        # dev_loader = DGLREDataloader(dev_set, batch_size=opt.test_batch_size, dataset_type='dev')

        model = GAIN_BERT(opt)

    elif opt.use_model == 'bilstm':
        # datasets
        train_set = DGLREDataset(opt.train_set, opt.train_set_save, word2id, ner2id, rel2id, dataset_type='train',
                                 opt=opt)
        # dev_set = DGLREDataset(opt.dev_set, opt.dev_set_save, word2id, ner2id, rel2id, dataset_type='dev',
        #                        instance_in_train=train_set.instance_in_train, opt=opt)

        # dataloaders
        train_loader = DGLREDataloader(train_set, batch_size=opt.batch_size, shuffle=True,
                                       negativa_alpha=opt.negativa_alpha)
        # dev_loader = DGLREDataloader(dev_set, batch_size=opt.test_batch_size, dataset_type='dev')

        model = GAIN_GloVe(opt)
    else:
        assert 1 == 2, 'please choose a model from [bert, bilstm].'

    print(model.parameters)
    print_params(model)

    start_epoch = 1
    pretrain_model = opt.pretrain_model
    lr = opt.lr
    model_name = opt.model_name

    if pretrain_model != '':
        chkpt = torch.load(pretrain_model, map_location=torch.device('cpu'))
        model.load_state_dict(chkpt['checkpoint'])
        logging('load model from {}'.format(pretrain_model))
        start_epoch = chkpt['epoch'] + 1
        lr = chkpt['lr']
        logging('resume from epoch {} with lr {}'.format(start_epoch, lr))
    else:
        logging('training from scratch with lr {}'.format(lr))

    model = get_cuda(model)

    if opt.use_model == 'bert':
        bert_param_ids = list(map(id, model.bert.parameters()))
        base_params = filter(lambda p: p.requires_grad and id(p) not in bert_param_ids, model.parameters())

        optimizer = optim.AdamW([
            {'params': model.bert.parameters(), 'lr': lr * 0.01},
            {'params': base_params, 'weight_decay': opt.weight_decay}
        ], lr=lr)
    else:
        optimizer = optim.AdamW(filter(lambda p: p.requires_grad, model.parameters()), lr=lr,
                                weight_decay=opt.weight_decay)

    BCE = nn.BCEWithLogitsLoss(reduction='none')

    if opt.coslr:
        scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=(opt.epoch // 4) + 1)

    checkpoint_dir = opt.checkpoint_dir
    if not os.path.exists(checkpoint_dir):
        os.mkdir(checkpoint_dir)
    fig_result_dir = opt.fig_result_dir
    if not os.path.exists(fig_result_dir):
        os.mkdir(fig_result_dir)

    best_ign_auc = 0.0
    best_ign_f1 = 0.0
    best_epoch = 0

    model.train()

    global_step = 0
    total_loss = 0

    plt.xlabel('Recall')
    plt.ylabel('Precision')
    plt.ylim(0.0, 1.0)
    plt.xlim(0.0, 1.0)
    plt.title('Precision-Recall')
    plt.grid(True)

    acc_NA, acc_not_NA, acc_total = Accuracy(), Accuracy(), Accuracy()
    logging('begin..')

    for epoch in range(start_epoch, opt.epoch + 1):
        start_time = time.time()
        for acc in [acc_NA, acc_not_NA, acc_total]:
            acc.clear()

        for ii, d in enumerate(train_loader):
            relation_multi_label = d['relation_multi_label']
            relation_mask = d['relation_mask']
            relation_label = d['relation_label']

            predictions = model(words=d['context_idxs'],
                                src_lengths=d['context_word_length'],
                                mask=d['context_word_mask'],
                                entity_type=d['context_ner'],
                                entity_id=d['context_pos'],
                                mention_id=d['context_mention'],
                                distance=None,
                                entity2mention_table=d['entity2mention_table'],
                                graphs=d['graphs'],
                                h_t_pairs=d['h_t_pairs'],
                                relation_mask=relation_mask,
                                path_table=d['path_table'],
                                entity_graphs=d['entity_graphs'],
                                ht_pair_distance=d['ht_pair_distance']
                                )
            loss = torch.sum(BCE(predictions, relation_multi_label) * relation_mask.unsqueeze(2)) / (
                    opt.relation_nums * torch.sum(relation_mask))

            optimizer.zero_grad()
            loss.backward()

            if opt.clip != -1:
                nn.utils.clip_grad_value_(model.parameters(), opt.clip)
            optimizer.step()
            if opt.coslr:
                scheduler.step(epoch)

            output = torch.argmax(predictions, dim=-1)
            output = output.data.cpu().numpy()
            relation_label = relation_label.data.cpu().numpy()

            for i in range(output.shape[0]):
                for j in range(output.shape[1]):
                    label = relation_label[i][j]
                    if label < 0:
                        break

                    is_correct = (output[i][j] == label)
                    if label == 0:
                        acc_NA.add(is_correct)
                    else:
                        acc_not_NA.add(is_correct)

                    acc_total.add(is_correct)

            global_step += 1
            total_loss += loss.item()

            log_step = opt.log_step
            if global_step % log_step == 0:
                cur_loss = total_loss / log_step
                elapsed = time.time() - start_time
                logging(
                    '| epoch {:2d} | step {:4d} |  ms/b {:5.2f} | train loss {:5.3f} | NA acc: {:4.2f} | not NA acc: {:4.2f}  | tot acc: {:4.2f} '.format(
                        epoch, global_step, elapsed * 1000 / log_step, cur_loss * 1000, acc_NA.get(), acc_not_NA.get(),
                        acc_total.get()))
                total_loss = 0
                start_time = time.time()

        if epoch % opt.test_epoch == 0:
            logging('-' * 89)
            eval_start_time = time.time()
            model.eval()
            ign_f1, ign_auc, pr_x, pr_y = test(model, dev_loader, model_name, id2rel=id2rel)
            model.train()
            logging('| epoch {:3d} | time: {:5.2f}s'.format(epoch, time.time() - eval_start_time))
            logging('-' * 89)

            if ign_f1 > best_ign_f1:
                best_ign_f1 = ign_f1
                best_ign_auc = ign_auc
                best_epoch = epoch
                path = os.path.join(checkpoint_dir, model_name + '_best.pt')
                torch.save({
                    'epoch': epoch,
                    'checkpoint': model.state_dict(),
                    'lr': lr,
                    'best_ign_f1': ign_f1,
                    'best_ign_auc': ign_auc,
                    'best_epoch': epoch
                }, path)

                plt.plot(pr_x, pr_y, lw=2, label=str(epoch))
                plt.legend(loc="upper right")
                plt.savefig(os.path.join(fig_result_dir, model_name))

        if epoch % opt.save_model_freq == 0:
            path = os.path.join(checkpoint_dir, model_name + '_{}.pt'.format(epoch))
            torch.save({
                'epoch': epoch,
                'lr': lr,
                'checkpoint': model.state_dict()
            }, path)

    print("Finish training")
    print("Best epoch = %d | Best Ign F1 = %f" % (best_epoch, best_ign_f1))
    print("Storing best result...")
    print("Finish storing")
Ejemplo n.º 5
0
def evaluate_by_rel(submission_answer,
                    truth,
                    fact_in_train_annotated,
                    fact_in_train_distant,
                    use_wikidataid=True):
    print("Warning: different definition of Ign")
    rel2id = json.load(open(os.path.join("../src/prepro_data", 'rel2id.json')))
    id2rel = {v: k for k, v in rel2id.items()}

    accu_re = Accuracy()
    accu_re_ign_train_annotated = Accuracy()
    accu_re_ign_train_distant = Accuracy()
    accu_evi = Accuracy()

    accu_re_ign_train_annotated_by_rel = [
        Accuracy() for _ in range(len(rel2id))
    ]
    accu_re_ign_train_distant_by_rel = [Accuracy() for _ in range(len(rel2id))]
    accu_re_by_rel = [Accuracy() for _ in range(len(rel2id))]
    accu_evi_by_rel = [Accuracy() for _ in range(len(rel2id))]

    std = {}
    std_ign_train_annotated = set()
    std_ign_train_distant = set()
    tot_evidences = 0
    titleset = set([])

    title2vectexSet = {}

    for x in truth:
        title = x['title']
        titleset.add(title)

        vertexSet = x['vertexSet']
        title2vectexSet[title] = vertexSet

        for label in x['labels']:
            r = label['r']

            if use_wikidataid:
                rel = r
                r = rel2id[rel]
            else:
                rel = id2rel[r]

            h_idx = label['h']
            t_idx = label['t']
            std[(title, rel, h_idx, t_idx)] = set(label['evidence'])

            accu_evi.inc_labels(len(label['evidence']))
            accu_evi_by_rel[r].inc_labels(len(label['evidence']))

            accu_re_by_rel[r].inc_labels()

            in_train_annotated = False
            in_train_distant = False
            for n1 in vertexSet[h_idx]:
                for n2 in vertexSet[t_idx]:
                    if (n1['name'], n2['name'],
                            rel) in fact_in_train_annotated:
                        in_train_annotated = True
                    if (n1['name'], n2['name'], rel) in fact_in_train_distant:
                        in_train_distant = True

            if not in_train_annotated:
                std_ign_train_annotated.add((title, r, h_idx, t_idx))
                accu_re_ign_train_annotated.inc_labels()
                accu_re_ign_train_annotated_by_rel[r].inc_labels()

            if not in_train_distant:
                std_ign_train_distant.add((title, r, h_idx, t_idx))
                accu_re_ign_train_distant.inc_labels()
                accu_re_ign_train_distant_by_rel[r].inc_labels()
    '''
	for title, r, h_idx, t_idx in std_ign_train_annotated:
		accu_re_ign_train_annotated.inc_labels()
		accu_re_ign_train_annotated_by_rel[r].inc_labels()

	for title, r, h_idx, t_idx in std_ign_train_distant:
		accu_re_ign_train_distant.inc_labels()
		accu_re_ign_train_distant_by_rel[r].inc_labels()
	#'''
    accu_re.inc_labels(len(std))

    correct_in_train_annotated = 0
    correct_in_train_distant = 0
    titleset2 = set([])
    for x in submission_answer:
        if isinstance(x, tuple):
            title, h_idx, t_idx, r = x
        else:
            title = x['title']
            h_idx = x['h_idx']
            t_idx = x['t_idx']
            r = x['r']

        if use_wikidataid:
            rel = r
            r = rel2id[rel]
        else:
            rel = id2rel[r]
        titleset2.add(title)
        if title not in title2vectexSet:
            continue
        vertexSet = title2vectexSet[title]

        if 'evidence' in x:
            evi = set(x['evidence'])
        else:
            evi = set([])

        accu_re.inc_pos()
        accu_re_by_rel[r].inc_pos()
        accu_evi.inc_pos(len(evi))
        accu_evi_by_rel[r].inc_pos(len(evi))

        #'''
        in_train_annotated = in_train_distant = False
        for n1 in vertexSet[h_idx]:
            for n2 in vertexSet[t_idx]:
                if (n1['name'], n2['name'], rel) in fact_in_train_annotated:
                    in_train_annotated = True
                if (n1['name'], n2['name'], rel) in fact_in_train_distant:
                    in_train_distant = True
        #'''

        if not in_train_annotated:
            accu_re_ign_train_annotated.inc_pos()
            accu_re_ign_train_annotated_by_rel[r].inc_pos()
        if not in_train_distant:
            accu_re_ign_train_distant.inc_pos()
            accu_re_ign_train_distant_by_rel[r].inc_pos()

        if (title, rel, h_idx, t_idx) in std:
            accu_re.inc_tp()
            accu_re_by_rel[r].inc_tp()

            stdevi = std[(title, rel, h_idx, t_idx)]
            correct_evidence = len(stdevi & evi)
            accu_evi.inc_tp(correct_evidence)
            accu_evi_by_rel[r].inc_tp(correct_evidence)

            if in_train_annotated:
                correct_in_train_annotated += 1
            if in_train_distant:
                correct_in_train_distant += 1

            if not in_train_annotated:
                accu_re_ign_train_annotated.inc_tp()
                accu_re_ign_train_annotated_by_rel[r].inc_tp()
            if not in_train_distant:
                accu_re_ign_train_distant.inc_tp()
                accu_re_ign_train_distant_by_rel[r].inc_tp()

    re_p, re_r, re_f1 = accu_re.get_result()
    evi_p, evi_r, evi_f1 = accu_evi.get_result()

    return accu_re, accu_re_ign_train_annotated, accu_re_ign_train_distant, accu_evi,\
        accu_re_by_rel, accu_re_ign_train_annotated_by_rel, accu_re_ign_train_distant_by_rel, accu_evi_by_rel
print('Initial weight: \n', model.W.reshape(-1))
print()

model.fit(x=x_data,
          y=y_data,
          epochs=_epoch,
          batch_size=_batch_size,
          lr=_lr,
          optim=optimizer)
print('Trained weight: \n', model.W.reshape(-1))
print()

# model evaluation
inference = model.eval(x_data)

# Error calculation
error = Accuracy(inference, y_data)
print('Accuracy on Check Data : %.4f \n' % error)
'''
You should get results as:

Initial weight:
 [0. 0. 0. 0.]

Trained weight: 
 [-0.30839267  0.07120854  0.27459075  0.08573039  0.34718609]

Accuracy on Check Data : 0.8000

'''
 def _accuracy(self):
     self.TrainAccuracy = Accuracy(type=self.config.task_type)
     self.ValidAccuracy = Accuracy(type=self.config.task_type)
     self.TestAccuracy = Accuracy(type=self.config.task_type)
 def _accuracy(self):
     self.TrainAccuracy = Accuracy(type="semisupervised")
     self.ValidAccuracy = Accuracy(type="semisupervised")
     self.TestAccuracy = Accuracy(type="semisupervised")
class SemiSupGNNWrapper(GNNWrapper):
    class Config:
        def __init__(self):
            self.device = None
            self.use_cuda = None
            self.dataset_path = None
            self.log_interval = None
            self.tensorboard = None
            self.task_type = None

            # hyperparams
            self.lrw = None
            self.loss_f = None
            self.epochs = None
            self.convergence_threshold = None
            self.max_iterations = None
            self.n_nodes = None
            self.state_dim = None
            self.label_dim = None
            self.output_dim = None
            self.graph_based = False
            self.activation = torch.nn.Tanh()
            self.state_transition_hidden_dims = None
            self.output_function_hidden_dims = None

            # optional
            # self.loss_w = 1.
            # self.energy_weight = 0.
            # self.l2_weight = 0.

    def __init__(self, config: Config):
        super().__init__(config)

    def _data_loader(self, dset):  # handle dataset data and metadata
        self.dset = dset.to(self.config.device)
        self.config.label_dim = self.dset.node_label_dim
        self.config.n_nodes = self.dset.num_nodes
        self.config.output_dim = self.dset.num_classes

    def _accuracy(self):
        self.TrainAccuracy = Accuracy(type="semisupervised")
        self.ValidAccuracy = Accuracy(type="semisupervised")
        self.TestAccuracy = Accuracy(type="semisupervised")

    def train_step(self, epoch):
        self.gnn.train()
        data = self.dset
        self.optimizer.zero_grad()
        self.TrainAccuracy.reset()
        # output computation
        output, iterations = self.gnn(data.edges, data.agg_matrix,
                                      data.node_labels)
        # loss computation - semisupervised
        loss = self.criterion(output[data.idx_train],
                              data.targets[data.idx_train])

        loss.backward()

        # with torch.no_grad():
        #     for name, param in self.gnn.named_parameters():
        #         if "state_transition_function" in name:
        #             #self.writer.add_histogram("gradient " + name, param.grad, epoch)
        #             param.grad = 0*  param.grad

        self.optimizer.step()

        # # updating accuracy
        # batch_acc = self.TrainAccuracy.update((output, target), batch_compute=True)
        with torch.no_grad():  # Accuracy computation
            # accuracy_train = torch.mean(
            #     (torch.argmax(output[data.idx_train], dim=-1) == data.targets[data.idx_train]).float())
            self.TrainAccuracy.update(output, data.targets, idx=data.idx_train)
            accuracy_train = self.TrainAccuracy.compute()

            if epoch % self.config.log_interval == 0:
                print(
                    'Train Epoch: {} \t Mean Loss: {:.6f}\tAccuracy Full Batch: {:.6f} \t  Best Accuracy : {:.6f}  \t Iterations: {}'
                    .format(epoch, loss, accuracy_train,
                            self.TrainAccuracy.get_best(), iterations))

                if self.config.tensorboard:
                    self.writer.add_scalar('Training Accuracy', accuracy_train,
                                           epoch)
                    self.writer.add_scalar('Training Loss', loss, epoch)
                    self.writer.add_scalar('Training Iterations', iterations,
                                           epoch)
                    for name, param in self.gnn.named_parameters():
                        self.writer.add_histogram(name, param, epoch)
                        self.writer.add_histogram("gradient " + name,
                                                  param.grad, epoch)
        # self.TrainAccuracy.reset()
        return output  # used for plotting

    def predict(self, edges, agg_matrix, node_labels):
        return self.gnn(edges, agg_matrix, node_labels)

    def test_step(self, epoch):
        ####  TEST
        self.gnn.eval()
        data = self.dset
        self.TestAccuracy.reset()
        with torch.no_grad():
            output, iterations = self.gnn(data.edges, data.agg_matrix,
                                          data.node_labels)
            test_loss = self.criterion(output[data.idx_test],
                                       data.targets[data.idx_test])

            self.TestAccuracy.update(output, data.targets, idx=data.idx_test)
            acc_test = self.TestAccuracy.compute()
            # acc_test = torch.mean(
            #     (torch.argmax(output[data.idx_test], dim=-1) == data.targets[data.idx_test]).float())

            if epoch % self.config.log_interval == 0:
                print(
                    'Test set: Average loss: {:.4f}, Accuracy:  ({:.4f}%) , Best Accuracy:  ({:.4f}%)'
                    .format(test_loss, acc_test, self.TestAccuracy.get_best()))

                if self.config.tensorboard:
                    self.writer.add_scalar('Test Accuracy', acc_test, epoch)
                    self.writer.add_scalar('Test Loss', test_loss, epoch)
                    self.writer.add_scalar('Test Iterations', iterations,
                                           epoch)

    def valid_step(self, epoch):
        ####  TEST
        self.gnn.eval()
        data = self.dset
        self.ValidAccuracy.reset()
        with torch.no_grad():
            output, iterations = self.gnn(data.edges, data.agg_matrix,
                                          data.node_labels)
            test_loss = self.criterion(output[data.idx_valid],
                                       data.targets[data.idx_valid])

            self.ValidAccuracy.update(output, data.targets, idx=data.idx_valid)
            acc_valid = self.ValidAccuracy.compute()
            # acc_test = torch.mean(
            #     (torch.argmax(output[data.idx_test], dim=-1) == data.targets[data.idx_test]).float())

            if epoch % self.config.log_interval == 0:
                print(
                    'Valid set: Average loss: {:.4f}, Accuracy:  ({:.4f}%) , Best Accuracy:  ({:.4f}%)'
                    .format(test_loss, acc_valid,
                            self.ValidAccuracy.get_best()))

                if self.config.tensorboard:
                    self.writer.add_scalar('Valid Accuracy', acc_valid, epoch)
                    self.writer.add_scalar('Valid Loss', test_loss, epoch)
                    self.writer.add_scalar('Valid Iterations', iterations,
                                           epoch)
class GNNWrapper:
    class Config:
        def __init__(self):
            self.device = None
            self.use_cuda = None
            self.dataset_path = None
            self.log_interval = None
            self.tensorboard = None
            self.task_type = None

            # hyperparams
            self.lrw = None
            self.loss_f = None
            self.epochs = None
            self.convergence_threshold = None
            self.max_iterations = None
            self.n_nodes = None
            self.state_dim = None
            self.label_dim = None
            self.output_dim = None
            self.graph_based = False
            self.activation = torch.nn.Tanh()
            self.state_transition_hidden_dims = None
            self.output_function_hidden_dims = None
            self.task_type = "semisupervised"

            # optional
            # self.loss_w = 1.
            # self.energy_weight = 0.
            # self.l2_weight = 0.

    def __init__(self, config: Config):
        self.config = config

        # to be populated
        self.optimizer = None
        self.criterion = None
        self.train_loader = None
        self.test_loader = None

        if self.config.tensorboard:
            self.writer = SummaryWriter('logs/tensorboard')
        self.first_flag_writer = True

    def __call__(self, dset, state_net=None, out_net=None):
        # handle the dataset info
        self._data_loader(dset)
        self.gnn = GNN(self.config, state_net, out_net).to(self.config.device)
        self._criterion()
        self._optimizer()
        self._accuracy()

    def _data_loader(self, dset):  # handle dataset data and metadata
        self.dset = dset.to(self.config.device)
        self.config.label_dim = self.dset.node_label_dim
        self.config.n_nodes = self.dset.num_nodes
        self.config.output_dim = self.dset.num_classes

    def _optimizer(self):
        # for name, param in self.gnn.named_parameters():
        #     if param.requires_grad:
        #         print(name, param.data)
        # exit()
        self.optimizer = optim.Adam(self.gnn.parameters(), lr=self.config.lrw)
        #self.optimizer = optim.SGD(self.gnn.parameters(), lr=self.config.lrw)

    def _criterion(self):
        self.criterion = nn.CrossEntropyLoss()

    def _accuracy(self):
        self.TrainAccuracy = Accuracy(type=self.config.task_type)
        self.ValidAccuracy = Accuracy(type=self.config.task_type)
        self.TestAccuracy = Accuracy(type=self.config.task_type)

    def train_step(self, epoch):
        self.gnn.train()
        data = self.dset
        self.optimizer.zero_grad()
        self.TrainAccuracy.reset()
        # output computation
        output, iterations = self.gnn(data.edges, data.agg_matrix,
                                      data.node_labels)
        # loss computation - semisupervised
        loss = self.criterion(output, data.targets)

        loss.backward()

        self.optimizer.step()

        # # updating accuracy
        # batch_acc = self.TrainAccuracy.update((output, target), batch_compute=True)
        with torch.no_grad():  # Accuracy computation
            # accuracy_train = torch.mean(
            #     (torch.argmax(output[data.idx_train], dim=-1) == data.targets[data.idx_train]).float())
            self.TrainAccuracy.update(output, data.targets)
            accuracy_train = self.TrainAccuracy.compute()

            if epoch % self.config.log_interval == 0:
                print(
                    'Train Epoch: {} \t Mean Loss: {:.6f}\tAccuracy Full Batch: {:.6f} \t  Best Accuracy : {:.6f}  \t Iterations: {}'
                    .format(epoch, loss, accuracy_train,
                            self.TrainAccuracy.get_best(), iterations))

                if self.config.tensorboard:
                    self.writer.add_scalar('Training Accuracy', accuracy_train,
                                           epoch)
                    self.writer.add_scalar('Training Loss', loss, epoch)
                    self.writer.add_scalar('Training Iterations', iterations,
                                           epoch)

                    for name, param in self.gnn.named_parameters():
                        self.writer.add_histogram(name, param, epoch)
        # self.TrainAccuracy.reset()

    def predict(self, edges, agg_matrix, node_labels):
        return self.gnn(edges, agg_matrix, node_labels)

    def test_step(self, epoch):
        ####  TEST
        self.gnn.eval()
        data = self.dset
        self.TestAccuracy.reset()
        with torch.no_grad():
            output, iterations = self.gnn(data.edges, data.agg_matrix,
                                          data.node_labels)
            test_loss = self.criterion(output, data.targets)

            self.TestAccuracy.update(output, data.targets)
            acc_test = self.TestAccuracy.compute()
            # acc_test = torch.mean(
            #     (torch.argmax(output[data.idx_test], dim=-1) == data.targets[data.idx_test]).float())

            if epoch % self.config.log_interval == 0:
                print(
                    'Test set: Average loss: {:.4f}, Accuracy:  ({:.4f}%) , Best Accuracy:  ({:.4f}%)'
                    .format(test_loss, acc_test, self.TestAccuracy.get_best()))

                if self.config.tensorboard:
                    self.writer.add_scalar('Test Accuracy', acc_test, epoch)
                    self.writer.add_scalar('Test Loss', test_loss, epoch)
                    self.writer.add_scalar('Test Iterations', iterations,
                                           epoch)

    def valid_step(self, epoch):
        ####  TEST
        self.gnn.eval()
        data = self.dset
        self.ValidAccuracy.reset()
        with torch.no_grad():
            output, iterations = self.gnn(data.edges, data.agg_matrix,
                                          data.node_labels)
            test_loss = self.criterion(output, data.targets)

            self.ValidAccuracy.update(output, data.targets)
            acc_valid = self.ValidAccuracy.compute()
            # acc_test = torch.mean(
            #     (torch.argmax(output[data.idx_test], dim=-1) == data.targets[data.idx_test]).float())

            if epoch % self.config.log_interval == 0:
                print(
                    'Valid set: Average loss: {:.4f}, Accuracy:  ({:.4f}%) , Best Accuracy:  ({:.4f}%)'
                    .format(test_loss, acc_valid,
                            self.ValidAccuracy.get_best()))

                if self.config.tensorboard:
                    self.writer.add_scalar('Valid Accuracy', acc_valid, epoch)
                    self.writer.add_scalar('Valid Loss', test_loss, epoch)
                    self.writer.add_scalar('Valid Iterations', iterations,
                                           epoch)
Ejemplo n.º 11
0
        out1_1, out1_2 = model(img)
        optimizer.zero_grad()
        heat_loss = heat_criterion(out1_1, label)
        offx_loss = offset_criterion(out1_2[:, :config.num_kpt] * label,
                                     offset[:, :config.num_kpt])
        offy_loss = offset_criterion(out1_2[:, config.num_kpt:] * label,
                                     offset[:, config.num_kpt:])
        loss = heat_loss * 1000 + offx_loss * 100 + offy_loss * 100
        loss.backward()
        optimizer.step()

        batch_loss += loss.item()
        batch_hloss += heat_loss.item()
        batch_xloss += offx_loss.item()
        batch_yloss += offy_loss.item()
        batch_acc += Accuracy((out1_1.data).cpu().numpy(),
                              (label.data).cpu().numpy(), config.num_kpt) * 100
        batch += 1
        if idx % config.display == 0 and idx:
            print(
                'epo.:{} iter.:{} loss:{:.6f} hloss:{:.6f} xloss:{:.6f} yloss:{:.6f} acc.:{:.2f}%'
                .format(epoch, idx, batch_loss / batch, batch_hloss / batch,
                        batch_xloss / batch, batch_yloss / batch,
                        batch_acc / batch))
            batch_loss, batch_hloss, batch_xloss, batch_yloss, batch_acc, batch = 0., 0., 0., 0., 0., 0.

    if epoch % config.evaluation == 0:
        ave_pck = evaluate(config, model)

        if ave_pck > max_pck:
            max_pck = ave_pck
            torch.save(
Ejemplo n.º 12
0
def call_learnable_index(config):
    def schedule(epoch):
        min_lr = 0.0005
        decay_factor = 2
        times_to_decay = math.log(
            config['lr'] / min_lr) / math.log(decay_factor)
        decay_freq = config['EPOCHS'] // times_to_decay
        lr = config['lr'] / (decay_factor**(epoch // decay_freq))
        return lr

    callbacks = [tf.keras.callbacks.LearningRateScheduler(schedule)]

    if config['no_filters'] > 1:
        if config['learnable_depth'] != 0:
            epochs_pre_level = config['EPOCHS'] // config['learnable_depth']
        else:
            epochs_pre_level = config['EPOCHS']
    init_temp = 1
    min_temp = 0.1
    decay = 2**((math.log(init_temp / min_temp) / math.log(2)) /
                epochs_pre_level)
    callbacks.append(TempCallback(epochs_pre_level, decay))
    if config['no_filters'] > 1:
        my_model = get_model(config['depth'])
    else:
        my_model = Phi(config['out_dim'], config['in_dim'],
                       config['filter_width1'], config['filter_width2'],
                       config['phi_no_layers'], sine_func, False,
                       config['degree'], config['batch_normalization'], 'base')

    if config['NN'] or config['NN_dist']:
        optimizer = tf.keras.optimizers.Adam(
            learning_rate=config['lr'])  #, clipnorm=1)
    else:
        optimizer = tf.keras.optimizers.Adam(learning_rate=config['lr'],
                                             clipnorm=4)

    metrics = []
    cifar = False
    if config['on_hot_encode']:
        my_model.compile(optimizer,
                         loss=tf.keras.losses.BinaryCrossentropy(),
                         metrics=[tf.keras.metrics.BinaryAccuracy()])
    else:
        metrics = [
            Accuracy(config['accuracy_threshold'], name='accuracy'),
            Accuracy(config['accuracy_threshold'] / 10, name='accuracy_tenth'),
            Accuracy(config['accuracy_threshold'] / 100,
                     name='accuracy_hundredth'),
            AccuracyMult(config['accuracy_mult_threshold'],
                         name='rel_accuracy'),
            AvgAccuracyMult(name='avg_rel_accuracy'),
            MaxAccuracyMult(name='max_rel_accuracy')
        ]
        if cifar:
            metrics.append(AccuracyDist(0.7, name='accuracy_dist_low'))
            metrics.append(AccuracyDist(0.3, name='accuracy_dist_mean'))
            metrics.append(AccuracyDist(0.5, name='accuracy_dist_high'))

    if config['NN'] or config['NN_dist']:
        if config['no_filters'] == 1:
            if config['NN_dist'] == False:
                my_model.compile(optimizer,
                                 loss=NN_loss,
                                 metrics=[AccuracyNN(0.1)])
            else:
                my_model.compile(optimizer,
                                 loss=tf.keras.losses.MeanSquaredError(),
                                 metrics=metrics)
            history = my_model.fit(queries,
                                   res,
                                   epochs=config['EPOCHS'],
                                   batch_size=config['train_size'],
                                   callbacks=callbacks,
                                   validation_data=(test_queries, test_res))
        else:
            hist = []
            hist_indv = []
            my_model.fit_partially_learnable(
                queries, res, test_queries, test_res,
                config['EPOCHS'], hist, hist_indv,
                tf.keras.losses.MeanSquaredError(), metrics, callbacks,
                config['lr'])
        #history = my_model.fit(queries, res, epochs=config['EPOCHS'], batch_size=config['train_size'], callbacks=callbacks, validation_data=(test_queries, test_res))
        #if config['no_filters'] != 1:
        #    hist_indv = []
        #    my_model.fit_base_only(queries, res, test_queries, test_res, config['EPOCHS'], hist_indv, metrics, optimizer)
    else:
        history = my_model.fit(queries,
                               res,
                               epochs=config['EPOCHS'],
                               batch_size=config['train_size'],
                               callbacks=callbacks,
                               shuffle=False,
                               validation_data=(queries, res))
    if config['no_filters'] != 1:
        for i, h in enumerate(hist):
            hist_df = pd.DataFrame(h.history)
            with open(config['NAME'] + str(i) + 'indx_hist.json', 'w') as f:
                hist_df.to_json(f)

        for i, h in enumerate(hist_indv):
            hist_df = pd.DataFrame(h.history)
            with open(config['NAME'] + str(i) + 'base_hist.json', 'w') as f:
                hist_df.to_json(f)
    else:
        hist_df = pd.DataFrame(history.history)

        with open(config['NAME'] + '_hist.json', 'w') as f:
            hist_df.to_json(f)
            #json.dump(hist, f)
    return my_model
Ejemplo n.º 13
0
def train_model(model: nn.Module,
                epochs: Optional[int] = None,
                batch_size: int = 32,
                val_size: Optional[float] = 0.1,
                learning_rate: float = 3e-3,
                momentum: float = 0.9,
                second_momentum: float = 0.999,
                weight_decay: float = 0.01,
                lr_reduction_factor: float = 0.3,
                lr_reduction_patience: int = 10,
                patience: int = 30,
                datadir: str = '.data',
                filename: str = 'ElectricDevices',
                min_improvement: float = 1e-3):
    ''' Train the model.

    Args:
        model (PyTorch module):
            The model that is to be trained
        epochs (int or None):
            The number of epochs to train for. If None then will train
            indefinitely. Defaults to None.
        batch_size (int):
            The number of time series to use at a time. Defaults to 32.
        val_size (float or None):
            The proportion of the data set used for validation. If set to 0.0
            or None then no validation set will be used. Defaults to 0.1.
        learning_rate (float):
            The learning rate of the AdamW optimiser. Defaults to 3e-4.
        momentum (float):
            The first momentum of the AdamW optimiser. Defaults to 0.9.
        second_momentum (float):
            The second momentum of the AdamW optimiser. Defaults to 0.999.
        weight_decay (float):
            The amount of weight decay used in the AdamW optimiser. Defaults
            to 0.01.
        lr_reduction_factor (float):
            The factor that will be multiplied by the learning rate when
            validation hasn't improved for ``lr_reduction_patience`` many steps.
            Defaults to 0.1.
        lr_reduction_patience (int):
            The amount of epochs to allow no improvement in the validation
            score before reducing the learning rate by ``lr_reduction_factor``.
            Defaults to 10.
        patience (int or None):
            The number of epochs to allow no improvement in the validation
            score before stopping training. If None then no early stopping
            is used. Defaults to 100.
        datadir (str):
            The name of the data directory, from where it fetches the training
            data and where the model will be stored. Defaults to '.data'.
        filename (str):
            The name of the file to which the model will be saved. Defaults
            to 'ElectricDevices'
        min_improvement (float):
            Threshold for measuring a new best loss. A loss counts as being 
            the best if it's below previous_best - min_improvement. Defaults 
            to 1e-3.

    Returns:
        Tuple of two lists of length ``epochs``, the first one containing 
        the training scores and the second one containing the validation
        scores.
    '''

    # Warn the user if a model already exists
    user_response = 'y'
    if list(get_path(datadir).glob(f'{filename}*.zip')) != []:

        message = f'There is already a model stored called "{filename}" '\
                  f'in "{str(get_path(datadir))}", which will be overwritten.'\
                  f'\nDo you want to continue? (y/n)\n>>> '

        user_response = input(message)
        while user_response not in ['y', 'n']:
            user_response = input('Invalid input, please try again.\n>>> ')

    if user_response == 'n':
        return [], []

    elif user_response == 'y':

        #=====================
        # Setting up the data
        #=====================

        # Fetch the data and prepare it
        hdf = load_hdf(filename=filename, datadir=datadir)

        # Convert the NumPy arrays into PyTorch tensors
        X = torch.FloatTensor(hdf['X_train']).unsqueeze(1)
        y = torch.LongTensor(hdf['y_train']) - 1

        # Convert the NumPy arrays X and y into a PyTorch data set
        dataset = TensorDataset(X, y)

        # Split the data set into a training- and testing set
        if val_size is not None and val_size > 0:
            val_size = int(val_size * len(dataset))
            train_size = len(dataset) - val_size
            train, val = random_split(dataset, [train_size, val_size])

            # Pack the data sets into data loaders, enabling iteration
            train_dl = DataLoader(train, batch_size=batch_size, shuffle=True)
            val_dl = DataLoader(val, batch_size=batch_size, shuffle=True)

        else:
            train_dl = DataLoader(dataset, batch_size=batch_size, shuffle=True)

        #======================================
        # Setting up objects used for training
        #======================================

        # Build stationariser and attach it to X
        stat = TimeSeriesStationariser(X)
        model = nn.Sequential(stat, model)

        # Set the optimiser to be AdamW, which is Adam with weight decay
        optimiser = optim.AdamW(model.parameters(),
                                lr=learning_rate,
                                weight_decay=weight_decay,
                                betas=(momentum, second_momentum))

        # Set the learning rate scheduler to reduce the learning rate when
        # the validation performance isn't improving
        scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            optimiser,
            mode='max',
            factor=lr_reduction_factor,
            patience=lr_reduction_patience,
            threshold=min_improvement,
            threshold_mode='abs')

        # Set the loss function
        criterion = nn.CrossEntropyLoss()

        # Set the metric
        metric = Accuracy()

        # Initialise lists that will store the training scores and
        # the validation scores, for later inspection
        train_scores = []
        val_scores = []

        # Initialise the number of "bad epochs", being epochs with no progress
        bad_epochs = 0

        # Initialise the best validation score, which starts by being the
        # worst possible
        best = 0.

        # Output model data
        params = sum(p.numel() for p in model.parameters() if p.requires_grad)
        print(model)
        print(f'Number of trainable model parameters: {params:,}')

        # Define the epoch iterator and set up progress bar if `epochs` is set
        if epochs is not None:
            epoch_mode = True
            epochs = tqdm(range(epochs), desc='Training model')
        else:
            epoch_mode = False
            epochs = it.count()

        # Main training loop
        for epoch in epochs:

            #==========
            # Training
            #==========

            # Enable training mode for the model, which enables dropout and
            # ensures that batch normalisation layers compute batch-wise means
            # and standard deviations
            model.train()

            # Reset the scores
            train_score = 0.
            val_score = 0.

            # Set progress bar description
            if not epoch_mode:
                pbar = tqdm(total=train_size, desc=f'Epoch {epoch}')

            # Iterate through the training data and teach the model what
            # not to do
            for idx, (x_train, y_train) in enumerate(train_dl):

                # Reset all the gradients stored in the optimiser
                optimiser.zero_grad()

                # Get the model's predictions for the batch
                y_hat = model.forward(x_train)

                # Compute the loss. This indicates how badly the model
                # performed in its predictions
                loss = criterion(y_hat, y_train)

                # PyTorch automatically calculates the gradients on the fly, so
                # here we backpropagate the gradients for the computed loss
                loss.backward()

                # Using these gradients, the optimiser changes the weights in
                # the model ever so slightly, towards better performance.
                # It's important that we called `zero_grad` before, to make
                # sure that this change is only affected by the gradients from
                # the current batch
                optimiser.step()

                # Compute training score
                train_score += metric(y_hat, y_train)

                # Update progress bar
                if not epoch_mode: pbar.update(x_train.shape[0])

                # Set progress bar description
                pbar.set_description(
                    f'Epoch {epoch} - train acc {train_score / (idx + 1):.4f}')

            # Store the mean training loss for later inspection
            train_score /= len(train_dl)
            train_scores.append(train_score)

            #============
            # Evaluation
            #============

            if val_size is not None and val_size > 0:

                # Set progress bar description
                if not epoch_mode:
                    pbar.set_description(f'Epoch {epoch} - Evaluating...')

                # Enable validation mode for the model, which disables dropout
                # and makes batch normalisation layers use their stored values
                # for mean and standard deviation
                model.eval()

                # Disable PyTorch's automatic calculation of gradients, as we
                # don't need that for evaluation
                with torch.no_grad():

                    # Iterate through the validation data to evaluate the
                    # model's performance on data that it hasn't seen before.
                    for x_val, y_val in val_dl:

                        # Get the model's predictions for the batch
                        y_hat = model.forward(x_val)

                        # Compute the metric
                        val_score += metric(y_hat, y_val) / len(val_dl)

                # Update the learning rate scheduler, which will reduce the
                # learning rate if no progress has been made for a while
                scheduler.step(val_score)

            #=============
            # Bookkeeping
            #=============

            # If score is best so far
            if val_scores == [] or val_score > best:

                # Set new best
                best = val_score + min_improvement

                # Remove previously saved models
                for fname in get_path(datadir).glob(f'{filename}*.zip'):
                    fname.unlink()

                # Reset number of epochs with no progress
                bad_epochs = 0

                # Save scripted version of the model
                path = get_path(datadir) / f'{filename}-{val_score:.4f}.zip'
                scripted_model = torch.jit.script(model)
                scripted_model.save(str(path))
            else:
                bad_epochs += 1

            # Update the progress bars
            pbar_desc = f' - train acc {train_score:.4f}'\
                        f' - val acc {val_score:.4f}'\
                        f' - bad epochs {bad_epochs}'
            if epoch_mode:
                epochs.set_description('Training model' + pbar_desc)
            else:
                pbar.set_description(f'Epoch {epoch}' + pbar_desc)
                pbar.close()

            # Store the score for later inspection
            val_scores.append(val_score)

            # Early stopping after `patience` epochs with no progress
            if bad_epochs > patience: break

        return train_scores, val_scores
Ejemplo n.º 14
0
basic_model = Basic_Model(input_dim, hidden_size, seq_len, n_class).to(device)
loss = nn.CrossEntropyLoss()

optimizer_ur = optim.Adam(ur_model.parameters(), lr)
optimizer_lstm = optim.Adam(basic_model.parameters(), lr)

losses, losses_test = [], []
epoch_acc_trn, epoch_acc_test = [], []
for epoch in range(max_epoch):
    epoch_loss, acc_trn = [], []
    for iter_, (imgs, labels) in enumerate(dataloader_trn):
        start_t = time.time()
        out_ur = ur_model(imgs.to(device))
        out_lstm = basic_model(imgs.to(device))

        acc_ur = Accuracy(out_ur, labels)
        acc_lstm = Accuracy(out_lstm, labels)
        acc_trn.append([acc_ur, acc_lstm])

        batch_loss = []
        out_optim = zip([out_ur, out_lstm], [optimizer_ur, optimizer_lstm])
        for out, optimizer in out_optim:
            loss_ = loss(out, labels.to(device))

            optimizer.zero_grad()
            loss_.backward()
            optimizer.step()
            batch_loss.append(loss_.cpu().detach().numpy())
        epoch_loss.append(batch_loss)
        iter_t = time.time() - start_t
        print('\rIteration [%4d/%4d] loss(ur): %.3f, loss(lstm): %.3f, time: %.2fs/it' \