Exemplo n.º 1
0
 def score(self, X, y, tag_inverse_transformer):
     test_pred = self.predict(X)
     pred_labels, test_labels = pred2label(test_pred, tag_inverse_transformer), pred2label(y, tag_inverse_transformer)
     
     print utils.get_accuracy(test_labels, pred_labels)
     print
     
     return utils.get_classification_score(test_labels, pred_labels)
Exemplo n.º 2
0
 def valid_and_test(test_program, test_pyreader, get_valid_examples, process, mode, exe, fetch_list):
     """
     return auc and acc
     """
     # Get Batch Data
     batch_data = fluid.io.batch(get_valid_examples, args.batch_size, drop_last=False)
     test_pyreader.decorate_paddle_reader(batch_data)
     test_pyreader.start()
     pred_list = []
     while True:
         try:
             _pred = exe.run(program=test_program,fetch_list=[pred.name])
             pred_list += list(_pred)
         except fluid.core.EOFException:
             test_pyreader.reset()
             break
     pred_list = np.vstack(pred_list)
     if mode == "test":
         label_list = process.get_test_label()
     elif mode == "valid":
         label_list = process.get_valid_label()
     if args.task_mode == "pairwise":
         pred_list = (pred_list + 1) / 2
         pred_list = np.hstack(
             (np.ones_like(pred_list) - pred_list, pred_list))
     metric.reset()
     metric.update(pred_list, label_list)
     auc = metric.eval()
     if args.compute_accuracy:
         acc = utils.get_accuracy(pred_list, label_list, args.task_mode,
                                  args.lamda)
         return auc, acc
     else:
         return auc
Exemplo n.º 3
0
    def test(self, loader, model, mask, epoch_or_phase):
        """Tests the model and return the accuracy"""
        criterion = torch.nn.CrossEntropyLoss().to(self.device)
        model.eval()
        losses, batch_time, accuracy = AverageMeter(), AverageMeter(), AverageMeter()
        mask = to_device(mask, self.device)

        with torch.no_grad():
            start = time.time()
            for inputs, labels in loader:
                # Get outputs
                inputs, labels = to_device((inputs, labels), self.device)
                outputs = model(inputs)
                loss = criterion(outputs, labels)
                losses.update(loss.data, inputs.size(0))

                # Measure accuracy
                prob = torch.softmax(outputs, dim=1)
                acc = get_accuracy(prob, labels, mask)
                accuracy.update(acc, labels.size(0))
                batch_time.update(time.time() - start)
                start = time.time()

        self.logger.info(
            f'==> Test [{epoch_or_phase}]:\tTime:{batch_time.sum:.4f}\tLoss:{losses.avg:.4f}\tAcc:{accuracy.avg:.4f}')
        return accuracy.avg
Exemplo n.º 4
0
 def evaluate(self, x, y, cm=None):
     y_pred = self.predict(x)
     if cm == None:
         return utils.get_accuracy(y_pred, y)
     labels = np.unique(y)
     confusion_matrix = sklearn.metrics.confusion_matrix(y,
                                                         y_pred,
                                                         labels,
                                                         normalize='true')
     fig, ax = plt.subplots()
     fig.set_size_inches(6, 6)
     sklearn.metrics.ConfusionMatrixDisplay(confusion_matrix, labels).plot(
         ax=ax, cmap='YlOrBr', values_format='.2%')
     plt.savefig(cm, dpi=200)
     fig.clf()
     return utils.get_accuracy(y_pred, y)
    def add_train_ops(self, num_classes, joint_rep, minibatch):
        """Add ops for training in the computation graph.

    Args:
      num_classes: number of classes to predict in the task.
      joint_rep: the joint sentence representation if the input is sentence
        pairs or the representation for the sentence if the input is a single
        sentence.
      minibatch: a minibatch of sequences of embeddings.
    Returns:
      train_accuracy: the accuracy on the training dataset
      loss: training loss.
      opt_step: training op.
    """
        if self.linear_classifier is None:
            classifier_layers = []
            classifier_layers.append(snt.Linear(num_classes))
            self.linear_classifier = snt.Sequential(classifier_layers)
        logits = self.linear_classifier(joint_rep)

        # Losses and optimizer.
        def get_loss(logits, labels):
            return tf.reduce_mean(
                tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels,
                                                               logits=logits))

        loss = get_loss(logits, minibatch.sentiment)
        train_accuracy = utils.get_accuracy(logits, minibatch.sentiment)
        opt_step = self._add_optimize_op(loss)
        return train_accuracy, loss, opt_step
Exemplo n.º 6
0
 def valid_and_test(program, feeder, reader, process, mode="test"):
     """
     return auc and acc
     """
     # Get Batch Data
     batch_data = paddle.batch(reader, args.batch_size, drop_last=False)
     pred_list = []
     for data in batch_data():
         _pred = executor.run(program=program,
                              feed=feeder.feed(data),
                              fetch_list=[pred.name])
         pred_list += list(_pred)
     pred_list = np.vstack(pred_list)
     if mode == "test":
         label_list = process.get_test_label()
     elif mode == "valid":
         label_list = process.get_valid_label()
     if args.task_mode == "pairwise":
         pred_list = (pred_list + 1) / 2
         pred_list = np.hstack(
             (np.ones_like(pred_list) - pred_list, pred_list))
     metric.reset()
     metric.update(pred_list, label_list)
     auc = metric.eval()
     if args.compute_accuracy:
         acc = utils.get_accuracy(pred_list, label_list, args.task_mode,
                                  args.lamda)
         return auc, acc
     else:
         return auc
Exemplo n.º 7
0
def train(epoch, max_epoch):
    """ train model
    """
    # if epoch % 10 == 0:
    #     for param_group in solver.param_groups:
    #         param_group['lr'] *= 0.1

    loss_meter = AverageMeter()
    acc_meter = AverageMeter()
    ind = 0
    for x, target in tqdm(training_loader, desc="TRAINING", leave=False):
        act_lengths = get_seq_length(x)
        flatten_target, target_lengths = preprocess_target(target)
        x = x.to(Config.device)
        x = Variable(x)
        act_lengths = Variable(act_lengths)
        flatten_target = Variable(flatten_target)
        target_lengths = Variable(target_lengths)

        output = model(x)

        loss = criterion(output, flatten_target, act_lengths, target_lengths)
        solver.zero_grad()
        loss.backward()
        # nn.utils.clip_grad_norm(model.parameters(), 10)
        solver.step()
        loss_meter.update(loss.data[0])
        acc = get_accuracy(output, target)
        acc_meter.update(acc)
    train_result(epoch, max_epoch, ind, len(training_loader), loss_meter,
                 acc_meter)
    train_result(epoch, max_epoch, ind, len(training_loader), loss_meter,
                 acc_meter, log_file)
    return acc_meter.avg
Exemplo n.º 8
0
def go_through_by_batch(model, X, y, training):
    ''' Given training examples, pass them forward/backward through the network in batches '''
    # If no examples, return invalid loss and accuracy
    if len(X) == 0:
        return float('inf'), float('inf')

    assert len(X) == len(y)  # Sanity check
    total_loss = total_acc = 0
    num_batches = len(X) // config.BATCH_SIZE

    for idx in range(num_batches):
        # Get mini-batches
        start_idx = config.BATCH_SIZE * idx
        end_idx = start_idx + config.BATCH_SIZE
        x_batch = X[start_idx:end_idx]
        y_batch = y[start_idx:end_idx]

        # Forward pass through the model
        x_batch = forward_pass(model, x_batch, training=training)

        y_batch_pred = sce_gate.softmax(x_batch)
        loss = sce_gate.cross_entropy(y_batch_pred, y_batch)
        grad = sce_gate.backward()

        total_loss += np.mean(loss)
        total_acc += utils.get_accuracy(y_batch_pred, y_batch)

        if training:
            backward_pass(model, grad)
            update_model(model)

    # Get average loss and accuracy
    avg_loss = total_loss / num_batches
    avg_acc = total_acc / num_batches
    return avg_loss, avg_acc
Exemplo n.º 9
0
def test_SCU(cnn, X_test, y_test):

    # convert NumPy Array to Torch Tensor
    test_input = torch.from_numpy(X_test)
    test_label = torch.from_numpy(y_test)

    # create the data loader for the test set
    testset = torch.utils.data.TensorDataset(test_input, test_label)
    testloader = torch.utils.data.DataLoader(testset,
                                             batch_size=opt.batch_size,
                                             shuffle=False,
                                             num_workers=4)

    cnn.eval()
    test_cumulative_accuracy = 0
    for i, data in enumerate(testloader, 0):
        # format the data from the dataloader
        test_inputs, test_labels = data
        test_inputs, test_labels = test_inputs.to(device), test_labels.to(
            device)
        test_inputs = test_inputs.float()

        test_outputs = cnn(test_inputs)
        _, test_predicted = torch.max(test_outputs, 1)

        test_acc = get_accuracy(test_labels, test_predicted)
        test_cumulative_accuracy += test_acc

    return test_cumulative_accuracy, len(testloader)
Exemplo n.º 10
0
def train(net, loader, crit, optim, lr_adjuster=None, augmentor=None):
    net.train()
    n_batches = len(loader)
    total_loss = 0
    total_acc = 0
    for inputs, targets in loader:
        inputs = Variable(inputs.cuda())
        targets = Variable(targets.cuda())

        if augmentor is not None:
            inputs = augmentor.transform(inputs)

        output = net(inputs)
        loss = crit(output, targets)

        optim.zero_grad()
        loss.backward()
        optim.step()

        preds = utils.get_argmax(output)
        accuracy = utils.get_accuracy(preds, targets.data.cpu().numpy())

        total_loss += loss.data[0]
        total_acc += accuracy

        if lr_adjuster is not None:
            lr_adjuster.step()
    mean_loss = total_loss / n_batches
    mean_acc = total_acc / n_batches
    return mean_loss, mean_acc
Exemplo n.º 11
0
Arquivo: main.py Projeto: postBG/GDumb
def test(loader, model, criterion, class_mask, logger, epoch):
    model.eval()
    losses, batch_time, accuracy, task_accuracy = AverageMeter(), AverageMeter(
    ), AverageMeter(), AverageMeter()

    with torch.no_grad():
        start = time.time()
        for inputs, labels in loader:
            # Get outputs
            inputs, labels = inputs.half().cuda(
                non_blocking=True), labels.cuda(non_blocking=True)
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            losses.update(loss.data, inputs.size(0))

            # Measure accuracy and task accuracy
            prob = torch.nn.functional.softmax(outputs, dim=1)
            acc, task_acc = get_accuracy(prob, labels, class_mask)
            accuracy.update(acc, labels.size(0))
            task_accuracy.update(task_acc, labels.size(0))
            batch_time.update(time.time() - start)
            start = time.time()

    logger.info(
        '==> Test: [{0}]\tTime:{batch_time.sum:.4f}\tLoss:{losses.avg:.4f}\tAcc:{acc.avg:.4f}\tTask Acc:{task_acc.avg:.4f}\t'
        .format(epoch,
                batch_time=batch_time,
                losses=losses,
                acc=accuracy,
                task_acc=task_accuracy))
    return accuracy.avg
Exemplo n.º 12
0
def train(args):
    logger.warning('This script is an example to showcase the extensions and '
                   'data-loading features of Torchmeta, and as such has been '
                   'very lightly tested.')

    dataset = omniglot(args.folder,
                       shots=args.num_shots,
                       ways=args.num_ways,
                       shuffle=True,
                       test_shots=15,
                       meta_train=True,
                       download=args.download)
    dataloader = BatchMetaDataLoader(dataset,
                                     batch_size=args.batch_size,
                                     shuffle=True,
                                     num_workers=args.num_workers)

    model = PrototypicalNetwork(1,
                                args.embedding_size,
                                hidden_size=args.hidden_size)
    model.to(device=args.device)
    model.train()
    optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)

    # Training loop
    with tqdm(dataloader, total=args.num_batches) as pbar:
        for batch_idx, batch in enumerate(pbar):
            model.zero_grad()

            train_inputs, train_targets = batch['train']
            train_inputs = train_inputs.to(device=args.device)
            train_targets = train_targets.to(device=args.device)
            train_embeddings = model(train_inputs)

            test_inputs, test_targets = batch['test']
            test_inputs = test_inputs.to(device=args.device)
            test_targets = test_targets.to(device=args.device)
            test_embeddings = model(test_inputs)

            prototypes = get_prototypes(train_embeddings, train_targets,
                dataset.num_classes_per_task)
            loss = prototypical_loss(prototypes, test_embeddings, test_targets)

            loss.backward()
            optimizer.step()

            with torch.no_grad():
                accuracy = get_accuracy(prototypes, test_embeddings, test_targets)
                pbar.set_postfix(accuracy='{0:.4f}'.format(accuracy.item()))

            if batch_idx >= args.num_batches:
                break

    # Save model
    if args.output_folder is not None:
        filename = os.path.join(args.output_folder, 'protonet_omniglot_'
            '{0}shot_{1}way.pt'.format(args.num_shots, args.num_ways))
        with open(filename, 'wb') as f:
            state_dict = model.state_dict()
            torch.save(state_dict, f)
Exemplo n.º 13
0
    def evaluate(self, key):
        args = self.param.args
        dm = self.param.volatile.dm

        dm.restart(key, args.batch_size, shuffle=False)

        result_arr = []
        while True:
            incoming = self.get_next_batch(dm, key, restart=False)
            if incoming is None:
                break
            incoming.args = Storage()

            with torch.no_grad():
                self.net.forward(incoming)
            result_arr.append(incoming.result)

        detail_arr = Storage()
        for i in args.show_sample:
            index = [i * args.batch_size + j for j in range(args.batch_size)]
            incoming = self.get_select_batch(dm, key, index)
            incoming.args = Storage()
            with torch.no_grad():
                self.net.forward(incoming)
            detail_arr["show_str%d" % i] = incoming.result.show_str

        detail_arr.update({'loss':get_mean(result_arr, 'loss'), \
         'accuracy':get_accuracy(result_arr, label_key='label', prediction_key='prediction')})
        return detail_arr
    def outer_loop(self, batch, is_train):

        train_inputs, train_targets, test_inputs, test_targets = self.unpack_batch(
            batch)

        loss_log = 0
        acc_log = 0
        grad_list = []
        loss_list = []

        for (train_input, train_target, test_input,
             test_target) in zip(train_inputs, train_targets, test_inputs,
                                 test_targets):

            with higher.innerloop_ctx(self.network,
                                      self.inner_optimizer,
                                      track_higher_grads=False) as (fmodel,
                                                                    diffopt):

                for step in range(self.args.n_inner):
                    self.inner_loop(fmodel, diffopt, train_input, train_target)

                train_logit = fmodel(train_input)
                in_loss = F.cross_entropy(train_logit, train_target)

                test_logit = fmodel(test_input)
                outer_loss = F.cross_entropy(test_logit, test_target)
                loss_log += outer_loss.item() / self.batch_size

                with torch.no_grad():
                    acc_log += get_accuracy(
                        test_logit, test_target).item() / self.batch_size

                if is_train:
                    params = list(fmodel.parameters(time=-1))
                    in_grad = torch.nn.utils.parameters_to_vector(
                        torch.autograd.grad(in_loss, params,
                                            create_graph=True))
                    outer_grad = torch.nn.utils.parameters_to_vector(
                        torch.autograd.grad(outer_loss, params))
                    implicit_grad = self.neumann_approx(
                        in_grad, outer_grad, params)
                    grad_list.append(implicit_grad)
                    loss_list.append(outer_loss.item())

        if is_train:
            self.outer_optimizer.zero_grad()
            weight = torch.ones(len(grad_list))
            weight = weight / torch.sum(weight)
            grad = mix_grad(grad_list, weight)
            grad_log = apply_grad(self.network, grad)
            self.outer_optimizer.step()

            return loss_log, acc_log, grad_log
        else:
            return loss_log, acc_log
Exemplo n.º 15
0
def test(epoch, max_epoch):
    acc_meter = AverageMeter()
    for x, target in tqdm(testing_loader, desc="TESTING", leave=False):
        x = Variable(x).to(Config.device)
        output = model(x)
        acc = get_accuracy(output, target)
        acc_meter.update(acc)
    test_result(epoch, max_epoch, acc_meter)
    test_result(epoch, max_epoch, acc_meter, log_file)
    return acc_meter.avg
Exemplo n.º 16
0
def test(model, dataloader, args):
    model.eval()
    epoch_stats = EpochStats()
    for batch_idx, batch in enumerate(tqdm(dataloader, desc="Batch")):
        _, test_batch, _ = concurrent_multi_task_train_test_split(
            batch, False, tasks=args.tasks)
        test_batch = test_batch[0]
        test_batch = test_batch.to(args.device)
        with torch.no_grad():
            gc_test_logit, nc_test_logit, lp_test_logit = model(test_batch)
            # GC
            if "gc" in args.tasks:
                gc_loss = F.cross_entropy(gc_test_logit, test_batch.y)
                with torch.no_grad():
                    gc_acc = ut.get_accuracy(gc_test_logit, test_batch.y)
                epoch_stats.update("gc", test_batch, gc_loss, gc_acc, False)
            #NC
            if "nc" in args.tasks:
                node_labels = test_batch.node_y.argmax(1)
                train_mask = test_batch.train_mask.squeeze()
                test_mask = (train_mask == 0).float()
                nc_loss = F.cross_entropy(nc_test_logit[test_mask == 1],
                                          node_labels[test_mask == 1])
                with torch.no_grad():
                    nc_acc = ut.get_accuracy(nc_test_logit[test_mask == 1],
                                             node_labels[test_mask == 1])
                epoch_stats.update("nc", test_batch, nc_loss, nc_acc, False)
            # LP
            if "lp" in args.tasks:
                test_link_labels = data_utils.get_link_labels(
                    test_batch.pos_edge_index, test_batch.neg_edge_index)
                lp_loss = F.binary_cross_entropy_with_logits(
                    lp_test_logit.squeeze(), test_link_labels)
                with torch.no_grad():
                    test_labels = test_link_labels.detach().cpu().numpy()
                    test_predictions = lp_test_logit.detach().cpu().numpy()
                    lp_acc = roc_auc_score(test_labels,
                                           test_predictions.squeeze())
                epoch_stats.update("lp", test_batch, lp_loss, lp_acc, False)

    tasks_test_stats = epoch_stats.get_average_stats()
    bl_ut.print_test_stats(tasks_test_stats)
    return tasks_test_stats
Exemplo n.º 17
0
def test(model, k, num_each_class):
    model.eval()  # TODO: notice there is a difference in L0 gate
    base, test, label = utils.get_test_data(num_each_class=num_each_class)
    latent_test = get_latent(test)
    latent_base = get_latent(base)

    pred = kNNClassifer(latent_base, latent_test, k)

    acc = utils.get_accuracy(pred, label).tolist()
    print('test acc of {}-NN is {}:'.format(k, acc))
Exemplo n.º 18
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-i', '--input_file', help="Input csv file with data.")
    args = parser.parse_args()

    dataset = utils.load_csv(args.input_file)
    train_set, test_set = corpus.split_dataset(dataset, 0.67)
    separated = corpus.separate_by_class(train_set)
    summaries = corpus.summarize_by_class(train_set)
    predictions = predict_set(summaries, test_set)
    accuracy = utils.get_accuracy(test_set, predictions)
    print('Accuracy: {0}%').format(accuracy)
Exemplo n.º 19
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-i', '--input_file', help="Input csv file with data.")
    args = parser.parse_args()

    dataset = utils.load_csv(args.input_file)
    train_set, test_set = corpus.split_dataset(dataset, 0.67)
    separated = corpus.separate_by_class(train_set)
    summaries = corpus.summarize_by_class(train_set)
    predictions = predict_set(summaries, test_set)
    accuracy = utils.get_accuracy(test_set, predictions)
    print('Accuracy: {0}%').format(accuracy)
Exemplo n.º 20
0
def compare_crf():
    reader = Reader()
    crf_rnn = CRF_RNN(input_shape=[512, 512, 1],
                      batch_size=1,
                      l2_weight=0.001,
                      input_dim=262144,
                      learning_rate=6e-4,
                      pre_train=True)
    show_all_variables()

    iter = 30
    acc_without_crf = 0.0
    acc_with_crf = 0.0
    for _ in range(iter):
        xs, ys = reader.next_batch(1)

        pred = crf_rnn.predict(xs, as_list=False)
        infe = crf_rnn.predict_n_inference(xs, as_list=False)

        tmp_without_crf = get_accuracy(pred, ys)
        tmp_with_crf = get_accuracy(infe, ys)
        print('--Precision without CRF: %g --Precision with CRF: %g' %
              (tmp_without_crf, tmp_with_crf))
        acc_with_crf += tmp_with_crf
        acc_without_crf += tmp_without_crf

        plt.figure()
        plt.subplot(121)
        plt.imshow(pred.reshape((512, 512)), cmap='gray')
        plt.subplot(122)
        plt.imshow(infe.reshape((512, 512)), cmap='gray')
        plt.show()
        plt.close()
        plt.show()

    print(
        '\nTotal Precision:\n\t--Precision without CRF: %g\n\t--Precision with CRF: %g'
        % (acc_without_crf / float(iter), acc_with_crf / float(iter)))

    print('\nFinished!!!')
Exemplo n.º 21
0
def eval_baseline_nn_output_model(output_model, dataloader, output_task, device="cpu"):
    output_model.eval()
    epoch_stats = EpochStats()
    for batch_idx, batch in enumerate(tqdm(dataloader, desc="Eval Batch")):
        batch = prepare_batch_for_task(batch, output_task, train=False)
        batch = batch.to(device)
        with torch.no_grad():
            # Forward pass 
            if output_task == "gc":
                test_logit = output_model(batch.node_embeddings, batch.batch)
            elif output_task == "nc":
                test_logit = output_model(batch.node_embeddings)
            elif output_task == "lp":
                test_logit = output_model(batch.node_embeddings, batch.pos_edge_index, batch.neg_edge_index)                

            # Evaluate Loss and Accuracy
            if output_task == "gc":
                loss = F.cross_entropy(test_logit, batch.y)
                with torch.no_grad():
                    acc = ut.get_accuracy(test_logit, batch.y)
            elif output_task == "nc":
                node_labels = batch.node_y.argmax(1)
                train_mask = batch.train_mask.squeeze()
                test_mask = (train_mask==0).float()
                loss = F.cross_entropy(test_logit[test_mask==1], node_labels[test_mask==1])
                with torch.no_grad():
                    acc = ut.get_accuracy(test_logit[test_mask==1], node_labels[test_mask==1])
            elif output_task == "lp":
                test_link_labels = data_utils.get_link_labels(batch.pos_edge_index, batch.neg_edge_index)
                loss = F.binary_cross_entropy_with_logits(test_logit.squeeze(), test_link_labels)
                with torch.no_grad():
                    test_labels = test_link_labels.detach().cpu().numpy()
                    test_predictions = test_logit.detach().cpu().numpy()
                    acc = roc_auc_score(test_labels, test_predictions.squeeze())

            epoch_stats.update(output_task, batch, loss, acc, False)

    task_test_stats = epoch_stats.get_average_stats()
    bl_ut.print_test_stats(task_test_stats)
    return task_test_stats
Exemplo n.º 22
0
def train_loop(loader, model, epochs = 3, start_epoch = 0, params = None, device = None, loss_func = torch.nn.CrossEntropyLoss, n_tops = [1, 5]):
	L_RATE, DECAY_RATE, DECAY_EPOCHS, WEIGHT_DECAY, SAVE_MODEL, SAVE_MODEL_N, SAVE_MODEL_DIR, MODEL, N_LAYERS = params
	optimizer = optim.Adam(model.parameters(), lr = L_RATE, weight_decay = WEIGHT_DECAY)
	if SAVE_MODEL:
		if MODEL == 'Darknet':
			path = '{}{}'.format(MODEL, N_LAYERS)
		else:
			path = MODEL
		if not os.path.exists('{}/{}'.format(SAVE_MODEL_DIR, path)):
			os.makedirs('{}/{}'.format(SAVE_MODEL_DIR, path))
	losses, accuracies = {'train': [], 'validate': []}, {'train': [], 'validate': []}

	for epoch in range(start_epoch, epochs + start_epoch):
		t = time()
		if (epoch + 1) % DECAY_EPOCHS == 0:
			L_RATE *= (1 - DECAY_RATE)
			optimizer = optim.Adam(model.parameters(), lr=L_RATE, weight_decay=WEIGHT_DECAY)

		# print epoch number
		print_report(part = 'start', epoch = epoch)
		# train loop
		train_epoch(loader['train'], model, optimizer, device, loss_func)
		# print metrics
		val_acc, val_loss = get_accuracy(loader['val'], model, device, dtype, loss_func, n_tops)
		train_acc, train_loss = get_accuracy(loader['train'], model, device, dtype, loss_func, n_tops)
		metrics = train_loss, val_loss, train_acc, val_acc, n_tops
		print_report(part='accuracy', metrics = metrics)
		# collect metrics
		losses['train'].append(train_loss)
		losses['validate'].append(val_loss)
		accuracies['train'].append(train_acc)
		accuracies['validate'].append(val_acc)

		# save models
		if SAVE_MODEL:
			save_checkpoint(model = model, cfg = cfg, epoch = epoch, loss = round(val_loss, 3))
		
		# print time
		print_report(part='end', t = int(time() - t))
Exemplo n.º 23
0
def test(net, tst_loader, criterion):
    net.eval()
    test_loss = 0
    test_acc = 0
    for data in tst_loader:
        inputs = Variable(data[0].cuda(), volatile=True)
        target = Variable(data[1].cuda())
        output = net(inputs)
        test_loss += criterion(output, target).data[0]
        pred = utils.get_argmax(output)
        test_acc += utils.get_accuracy(pred, target.data.cpu().numpy())
    test_loss /= len(tst_loader)
    test_acc /= len(tst_loader)
    return test_loss, test_acc
Exemplo n.º 24
0
def test(model, dataloader, args):
    model.eval()
    epoch_stats = EpochStats()
    for batch_idx, batch in enumerate(tqdm(dataloader, desc="Batch")):
        test_batch = prepare_batch_for_task(batch, args.task, train=False)
        test_batch = test_batch.to(args.device)
        with torch.no_grad():
            test_logit = model(test_batch)
            if args.task == "gc":
                loss = F.cross_entropy(test_logit, test_batch.y)
                with torch.no_grad():
                    acc = ut.get_accuracy(test_logit, test_batch.y)
            elif args.task == "nc":
                node_labels = test_batch.node_y.argmax(1)
                train_mask = test_batch.train_mask.squeeze()
                test_mask = (train_mask == 0).float()
                loss = F.cross_entropy(test_logit[test_mask == 1],
                                       node_labels[test_mask == 1])
                with torch.no_grad():
                    acc = ut.get_accuracy(test_logit[test_mask == 1],
                                          node_labels[test_mask == 1])
            elif args.task == "lp":
                test_link_labels = data_utils.get_link_labels(
                    test_batch.pos_edge_index, test_batch.neg_edge_index)
                loss = F.binary_cross_entropy_with_logits(
                    test_logit.squeeze(), test_link_labels)
                with torch.no_grad():
                    test_labels = test_link_labels.detach().cpu().numpy()
                    test_predictions = test_logit.detach().cpu().numpy()
                    acc = roc_auc_score(test_labels,
                                        test_predictions.squeeze())

            epoch_stats.update(args.task, test_batch, loss, acc, False)

    task_test_stats = epoch_stats.get_average_stats()
    bl_ut.print_test_stats(task_test_stats)
    return task_test_stats
Exemplo n.º 25
0
def test(test_loader, model_base, model_gaze, model_attn, num_action):
    model_base.eval()
    model_gaze.eval()
    model_attn.eval()

    list_true = []
    list_pred = []

    start_time = time.time()
    with torch.no_grad():
        for i, (rgb, flow, label) in enumerate(test_loader, 1):
            label = label.to(device)
            len_video, height, width = rgb.shape[2:]
            top, left = (height - args.crop) // 2, (width - args.crop) // 2
            jump = args.trange
            if args.test_sparse:
                if len_video > args.trange * 10:
                    jump = len_video // 10

            list_start_idx = list(range(0, len_video - args.trange + 1, jump))

            list_y = []
            for t in list_start_idx:
                t_rgb = rgb[..., t:t + args.trange, top:top + args.crop,
                            left:left + args.crop].cuda()
                t_flow = flow[..., t:t + args.trange, top:top + args.crop,
                              left:left + args.crop].cuda()
                pi, h = model_base(t_rgb, t_flow)
                pi = model_gaze(pi)
                z_hard, pi_g = make_hard_decision(pi, device)
                y = compute_cross_entropy(z_hard, h, model_attn, label)[0]
                list_y.append(y)

            y_avg = torch.cat(list_y, 0).mean(0, keepdim=True)

            list_true.append(label.item())
            list_pred.append(torch.argmax(y_avg, 1).item())

            print('step: %04d, %s' %
                  (i, timedelta(seconds=int(time.time() - start_time))),
                  flush=True)

    mean_class_acc, acc = get_accuracy(
        confusion_matrix(list_true, list_pred, labels=list(range(num_action))))

    print('acc: %.2f, %.2f / %s' %
          (mean_class_acc, acc,
           timedelta(seconds=int(time.time() - start_time))),
          flush=True)
Exemplo n.º 26
0
    def outer_loop(self, batch, is_train):

        train_inputs, train_targets, test_inputs, test_targets = self.unpack_batch(
            batch)

        test_losses = [0 for _ in range(self.args.n_inner)]
        test_corrects = [0 for _ in range(self.args.n_inner)]
        test_accs = [0 for _ in range(self.args.n_inner)]

        for (train_input, train_target, test_input,
             test_target) in zip(train_inputs, train_targets, test_inputs,
                                 test_targets):

            fast_weights = OrderedDict(self.network.named_parameters())

            for i in range(self.args.n_inner):
                train_logit = self.network.functional_forward(
                    train_input, fast_weights)
                train_loss = F.cross_entropy(train_logit, train_target)
                train_grad = torch.autograd.grad(train_loss,
                                                 fast_weights.values(),
                                                 create_graph=True)

                # Update weights manually
                fast_weights = OrderedDict(
                    (name, param - self.args.inner_lr * grad)
                    for ((name, param),
                         grad) in zip(fast_weights.items(), train_grad))

                test_logit = self.network.functional_forward(
                    test_input, fast_weights)
                test_loss = F.cross_entropy(test_logit, test_target)
                test_loss.backward(retain_graph=True)

                test_losses[i] += test_loss
                with torch.no_grad():
                    test_acc = get_accuracy(test_logit, test_target).item()
                    test_accs[i] += test_acc

        acc_log = test_accs[-1] / self.batch_size
        loss_log = test_losses[-1] / self.args.batch_size
        if is_train:
            self.outer_optimizer.zero_grad()
            loss_log.backward()
            self.outer_optimizer.step()

            return loss_log.item(), acc_log, loss_log.item()
        else:
            return loss_log.item(), acc_log
Exemplo n.º 27
0
def train(df, selected_features=utils.SELECTED_FEATURES, alpha=0.1, epsilon=0.0001, reg_param=100, train_size=0.8,
          mode="gradient", batch_size=None, iterations=None):
    train_df = df.sample(frac=train_size, random_state=7)
    test_df = df.drop(train_df.index)

    thetas_dict, cost_list_dict = get_thetas_train(train_df, selected_features=selected_features, alpha=alpha,
                                                   epsilon=epsilon, reg_param=reg_param, mode=mode,
                                                   batch_size=batch_size, iterations=iterations)

    if thetas_dict is None:
        return None, None, None
    prediction = predict(test_df, thetas_dict)
    truth = test_df[["Index", "Hogwarts House"]]
    accuracy = utils.get_accuracy(prediction, truth)
    return thetas_dict, cost_list_dict, accuracy
Exemplo n.º 28
0
    def __init__(self, input_dim, output_dim, init_noisevar=None):
        self.input_dim = input_dim
        self.output_dim = output_dim
        self.true_outputs = tf.placeholder(tf.float32, [
            None,
            output_dim,
        ],
                                           name='trueY')

        self.iblayerobj = iblayer.NoisyIBLayer(init_noisevar=init_noisevar,
                                               name='noisy_ib_layer')

        self.layers = []
        self.layers.append(
            tf.placeholder(tf.float32, [
                None,
                input_dim,
            ], name='X'))
        self.layers.append(
            tf.keras.layers.Dense(cfg['n_wide'],
                                  activation=tf.nn.relu)(self.layers[-1]))
        self.layers.append(
            tf.keras.layers.Dense(cfg['n_wide'],
                                  activation=tf.nn.relu)(self.layers[-1]))
        self.layers.append(
            tf.keras.layers.Dense(cfg['n_hidden'],
                                  activation=None)(self.layers[-1]))
        self.layers.append(self.iblayerobj(self.layers[-1]))
        self.layers.append(
            tf.keras.layers.Dense(cfg['n_wide'],
                                  activation=tf.nn.relu)(self.layers[-1]))
        self.layers.append(
            tf.keras.layers.Dense(output_dim, activation=None,
                                  name='Y')(self.layers[-1]))

        self.inputs = self.layers[0]
        self.predictions = self.layers[-1]

        self.entropyY = tf.placeholder(dtype=tf.float32,
                                       shape=(),
                                       name='entropyY')

        self.cross_entropy = utils.get_error(errtype=data['err'],
                                             y_true=self.true_outputs,
                                             y_pred=self.predictions)
        self.accuracy = utils.get_accuracy(errtype=data['err'],
                                           y_true=self.true_outputs,
                                           y_pred=self.predictions)
Exemplo n.º 29
0
def evaluate(model, test_dataloader, device, criterior):
    test_loss, accuracy, count = 0, 0, 0
    # treshold = args.trs_hold
    model.eval()
    with torch.no_grad():
        for batch_ind, (input, target) in enumerate(test_dataloader):
            input, target = input.to(device), target.to(device) 
            output = model(input)
            output_after_tresh = utils.apply_trashold(output, 0.1)
            tmp, length =  utils.get_accuracy(output_after_tresh, target)
            accuracy+=tmp
            count+=length
            loss = criterior(output, target)
            test_loss+=loss.item()
        print(f'Test loss is:{test_loss/len(test_dataloader):.5}')
        print(f'Accuracy is: {accuracy/count:.5}')
Exemplo n.º 30
0
    def outer_loop(self, batch, is_train):

        self.network.zero_grad()

        train_inputs, train_targets, test_inputs, test_targets = self.unpack_batch(
            batch)

        loss_log = 0
        acc_log = 0
        grad_list = []
        loss_list = []

        for (train_input, train_target, test_input,
             test_target) in zip(train_inputs, train_targets, test_inputs,
                                 test_targets):
            with higher.innerloop_ctx(
                    self.network,
                    self.inner_optimizer,
                    track_higher_grads=is_train) as (fmodel, diffopt):

                for step in range(self.args.n_inner):
                    self.inner_loop(fmodel, diffopt, train_input, train_target)

                test_logit = fmodel(test_input)
                outer_loss = F.cross_entropy(test_logit, test_target)
                loss_log += outer_loss.item() / self.batch_size

                with torch.no_grad():
                    acc_log += get_accuracy(
                        test_logit, test_target).item() / self.batch_size

                if is_train:
                    outer_grad = torch.autograd.grad(outer_loss,
                                                     fmodel.parameters(time=0))
                    grad_list.append(outer_grad)
                    loss_list.append(outer_loss.item())

        if is_train:
            weight = torch.ones(len(grad_list))
            weight = weight / torch.sum(weight)
            grad = mix_grad(grad_list, weight)
            grad_log = apply_grad(self.network, grad)
            self.outer_optimizer.step()

            return loss_log, acc_log, grad_log
        else:
            return loss_log, acc_log
Exemplo n.º 31
0
def train(model,
          dataloader,
          epochs,
          optimizer,
          criterion,
          save_output_every=10,
          save_model_every=50,
          only_show=False):
    device = utils.get_device()

    for epoch in tqdm(range(1, epochs + 1)):
        losses_per_epoch = []
        accuracies_per_epoch = []
        # go over all batches
        for step, (input, target) in enumerate(dataloader):
            model.train()
            input, target = input.to(device), target.to(device)

            target = utils.preprocess_target(target)
            pred = model(input)

            optimizer.zero_grad()
            loss = criterion(pred, target)
            loss.backward()
            optimizer.step()

            acc = utils.get_accuracy(pred, target)

            losses_per_epoch.append(loss.item())
            accuracies_per_epoch.append(acc)

        mean_loss = np.mean(losses_per_epoch)
        mean_acc = np.mean(accuracies_per_epoch)

        print(f'{"-"*30} Epoch {epoch} {"-"*30}')
        print('Loss: %.3f   Accuracy: %.3f' % (mean_loss, mean_acc))

        if epoch % save_output_every == 0:
            utils.save_result(epoch,
                              input,
                              pred,
                              target,
                              name='epoch',
                              only_show=only_show)

        if epoch % save_model_every == 0:
            utils.save_model(epoch, model)
Exemplo n.º 32
0
def predict(in_file, out_file):
    xvals, yvals = utils.load_data(in_file)
    network = build_network()
    predictions = utils.predict(xvals, network, 'circle.tflearn')
    print('Accuracy: {}%'.format(utils.get_accuracy(yvals, predictions)))
    utils.write_predictions(xvals, predictions, out_file)