Esempio n. 1
0
def test():
    model.eval()
    mean_loss = []
    for i, dl_test_pose in enumerate(dl_test):
        targets, preds = [], []

        print(
            '-----------------------------------Evaluating POSE {} ------------------------- '.format(poses[i]))
        for iter, (data, target, _) in enumerate(dl_test_pose):
            data, target = data.cuda(), target.cuda()
            data, target = Variable(data, volatile=True).float(
            ), Variable(target, volatile=True).float()

            output = model(data)
            loss = F.binary_cross_entropy(output, target)
            preds.append(output.data.cpu().numpy())
            targets.append(target.data.cpu().numpy())

            mean_loss.append(loss.data.cpu().numpy())

        pred = np.asarray(
            np.clip(np.rint(np.concatenate(preds)), 0, 1), dtype=np.uint8)
        target = np.clip(np.rint(np.concatenate(targets)),
                         0, 1).astype(np.uint8)

        evaluate_model(target, pred)

    print('-----Mean loss in validation : {}-----'.format(np.mean(mean_loss)))
Esempio n. 2
0
def test():
    model.eval()
    f1s, mus = [], []
    for i, dl_test_pose in enumerate(dl_test):
        targets, preds = [], []
        print(
            '-----------------------------------Evaluating POSE {} ------------------------- '.format(poses[i]))
        for iter, (data, target, _) in enumerate(dl_test_pose):
            target = torch.clamp(target[:, aus], 0, 1)
            data, target = data.cuda(), target.cuda()
            data, target = Variable(data).float(
            ), Variable(target).float()

            if iter % args.log_interval == 0:
                print('Iter {}'.format(iter))

            with torch.no_grad():
                pred, mu, logvar = model(data)
                pred = F.sigmoid(pred)

                preds.append(pred)
                mus.append(mus)
                targets.append(target.data.cpu().numpy())

        preds = np.asarray(np.concatenate(preds))
        '''
        print('preds min:{}, max:{}, mean:{}'.format(
            preds.min(), preds.max(), np.mean(preds)))
        '''
        targets = np.clip(np.rint(np.concatenate(targets)),
                          0, 1).astype(np.uint8)

        ''' Evaluate model per pose'''
        f1_pose = []
        for t in eval_thresholds:
            preds_f = np.copy(preds)
            preds_f[np.where(preds_f < t)] = 0
            preds_f[np.where(preds_f >= t)] = 1

            preds_f = np.reshape(preds_f, (-1, n_classes))

            if t == 0.5:
                print('--------EVAL PRED------ t = {}'.format(t))
                _, _, f1, _, _ = evaluate_model(
                    targets, preds_f, verbose=True)
            else:
                _, _, f1, _, _ = evaluate_model(
                    targets, preds_f, verbose=False)

            f1_pose.append(f1)

        f1s.append(f1_pose)
Esempio n. 3
0
def test(epoch, n_runs):
    model.eval()
    f1s, loss = [], 0
    for i, dl_test_pose in enumerate(dl_test):
        targets, preds = [], []
        print(
            '-----------------------------------Evaluating POSE {} ------------------------- '
            .format(poses[i]))
        for iter, (data, target, _) in enumerate(dl_test_pose):
            data, target = data.cuda(), target.cuda()
            data, target = Variable(data, volatile=True).float(), Variable(
                target, volatile=True).float()

            outputs = [model(data) for run in range(n_runs)]

            for pred in outputs:
                loss += BCE(pred, target).data[0] / \
                    (len(data)*n_runs*n_iter_test_total)

            pred = np.mean(np.asarray([p.data.cpu().numpy() for p in outputs]),
                           axis=0)

            preds.append(pred)
            targets.append(target.data.cpu().numpy())

        pred = np.asarray(np.clip(np.rint(np.concatenate(preds)), 0, 1),
                          dtype=np.uint8)
        target = np.clip(np.rint(np.concatenate(targets)), 0,
                         1).astype(np.uint8)
        ''' Evaluate model per pose'''
        _, _, pose_f1, _, _ = evaluate_model(target, pred)
        f1s.append(pose_f1)

    f1 = np.mean(f1s)
    print('\n====> Test loss: {:.4f}\n'.format(loss))
    print('\n====> Mean F1: {}\n'.format(f1))

    info = {'loss_val': loss, 'f1_val': f1}

    for tag, value in info.items():
        logger.scalar_summary(tag, value, epoch)

    # Get bool not ByteTensor
    is_best = True
    '''
    is_best = bool(f1 > best_f1)

    # Get greater Tensor to keep track best acc
    best_f1 = max(f1, best_f1)
    '''

    # Save checkpoint if is a new best
    save_checkpoint(
        {
            'epoch': epoch,
            'state_dict': model.state_dict(),
            'f1': f1
        }, is_best, 'models/' + oname + '_epoch_' + str(epoch))
Esempio n. 4
0
def test():
    model.eval()
    targets, preds = [], []
    for i, dl_test_pose in enumerate(dl_test):
        print(
            '-----------------------------------Evaluating POSE {} ------------------------- '.format(poses[i]))
        for iter, (data, target, _) in enumerate(dl_test_pose):
            data, target = data.cuda(), target.cuda()
            data, target = Variable(data).float(), Variable(target).float()

            preds.append(model(data).data.cpu().numpy())
            targets.append(target.data.cpu().numpy())

        pred = np.asarray(
            np.clip(np.rint(np.concatenate(preds)), 0, 1), dtype=np.uint8)
        target = np.concatenate(targets)

        evaluate_model(target, pred)
Esempio n. 5
0
def test():
    model.eval()
    test_loss, targets, preds = 0, [], []
    for i, dl_test_pose in enumerate(dl_test):
        print(
            '-----------------------------------Evaluating POSE {} ------------------------- '
            .format(poses[i]))
        for iter, (data, target, _) in enumerate(dl_test_pose):
            data, target = data.cuda(), target.cuda()
            data, target = Variable(data).float(), Variable(target).float()

            recon_data, mu, logvar, pred = model(data)
            test_loss += loss_function(recon_data, data, mu, logvar).data[0]
            preds.append(pred.data.cpu().numpy())
            targets.append(target.data.cpu().numpy())

            if iter == 0:
                n = min(data.size(0), 8)
                comparison = torch.cat([data[:n], recon_data[:n]])
                save_image(comparison.data.cpu() / 255.,
                           'results/vae_discr/reconstruction_pose_' + str(i) +
                           '_epoch_' + str(epoch) + '_lt_' +
                           str(args.size_latent) + '.png',
                           nrow=n)

            preds.append(pred.data.cpu().numpy())
            targets.append(target.data.cpu().numpy())

        pred = np.asarray(np.clip(np.rint(np.concatenate(preds)), 0, 1),
                          dtype=np.uint8)
        target = np.clip(np.rint(np.concatenate(targets)), 0,
                         1).astype(np.uint8)

        evaluate_model(target, pred)

        test_loss /= n_iter_test[i]
        print('====> Test loss: {:.4f}'.format(test_loss))
Esempio n. 6
0
def train_model(net: UNet, epochs: int, device: torch.device,
                train_loader: DataLoader, valid_loader: DataLoader,
                loss_function, optimizer, batch_size, scheduler,
                save_path: str) -> tuple:
    """ Training loop """
    SAVE_EACH_EPOCH = True
    VALIDATE_EVERY = len(train_loader) // 3
    training_loss_history, dice_history = list(), list()
    best_val_accuracy, best_val_loss = 0, float("inf")
    best_accuracy_epoch = 0
    best_model_weights = copy.deepcopy(net.state_dict())
    print("Training commences. Calculations performed on device:", device)
    for epoch in range(epochs):
        print("-" * 30)
        print(f"{epoch + 1} / {epochs}")
        epoch_loss = 0.0
        net.train()

        for i, batch in enumerate(train_loader, start=1):
            #print("Batch:", i)
            images, masks = batch["image"], batch["mask"]
            assert images.shape[
                1] == net.n_channels, "Nb of channels between loaded images and the model do not match"
            images = images.to(device=device, dtype=torch.float32)
            mask_type = torch.float32 if net.n_classes == 1 else torch.long
            masks = masks.to(device=device, dtype=mask_type)

            mask_predictions = net(images)
            loss = loss_function(mask_predictions, masks)
            epoch_loss += loss.item() * len(batch["image"])

            optimizer.zero_grad()
            loss.backward()
            torch.nn.utils.clip_grad_value_(net.parameters(),
                                            clip_value=0.1)  # ????
            optimizer.step()

            # TODO: Scheduler steps - how often?
            if i % VALIDATE_EVERY == 0:
                valid_score = evaluate_model(net, valid_loader, device)
                dice_history.append(valid_score)
                scheduler.step(valid_score)
                if net.n_classes > 1:
                    print(
                        f"Batch: {i}. Validation cross entropy: {valid_score}")
                else:
                    print(
                        f"Batch: {i}. Validation dice coeff: {round(valid_score, 4)}"
                    )

        training_loss_history.append(epoch_loss / len(train_loader))
        print("Epoch loss: {:.4f}".format(epoch_loss / len(train_loader)))

        if SAVE_EACH_EPOCH:
            try:
                torch.save(net.state_dict(),
                           os.path.join(save_path, f"{epoch}_unet.pth"))
                print("Weights saved to:", save_path)
            except Exception as e:
                print(f"Failed to save weights. Error: {e}")

    return dice_history, training_loss_history
Esempio n. 7
0
def test(criterion, metric, regularization, n_runs):
    model.eval()
    f1s, acc_loss, acc_crit, acc_reg = [], [], [], []
    for i, dl_test_pose in enumerate(dl_test):
        targets, preds = [], []
        print(
            '-----------------------------------Evaluating POSE {} ------------------------- '.format(poses[i]))
        for iter, (data, target, _) in enumerate(dl_test_pose):
            target = torch.clamp(target[:, aus], 0, 1)
            data, target = data.cuda(), target.cuda()
            data, target = Variable(data).float(
            ), Variable(target).float()

            with torch.no_grad():
                ''' Pass n_runs times through model '''
                pred, mu, logvar = model(data)

                ''' Compute loss every pass '''
                pred = F.sigmoid(pred)
                crit_val = criterion(pred, target) / len(data)
                reg_val = regularization(mu, logvar) / len(data)
                loss = crit_val + args.beta*reg_val

                acc_crit.append(crit_val.data)
                acc_reg.append(reg_val.data)
                acc_loss.append(loss.data)

                preds.append(pred)
                targets.append(target.data.cpu().numpy())

        preds = np.asarray(np.concatenate(preds))
        print('preds min:{}, max:{}, mean:{}'.format(
            preds.min(), preds.max(), np.mean(preds)))
        targets = np.clip(np.rint(np.concatenate(targets)),
                          0, 1).astype(np.uint8)

        ''' Evaluate model per pose'''
        f1_pose = []
        for t in eval_thresholds:
            preds_f = np.copy(preds)
            preds_f[np.where(preds_f < t)] = 0
            preds_f[np.where(preds_f >= t)] = 1

            preds_f = np.reshape(preds_f, (-1, n_classes))

            if t == 0.5:
                print('--------EVAL PRED------ t = {}'.format(t))
                _, _, f1, _, _ = evaluate_model(
                    targets, preds_f, verbose=True)
            else:
                _, _, f1, _, _ = evaluate_model(
                    targets, preds_f, verbose=False)

            f1_pose.append(f1)

        f1s.append(f1_pose)

    ''' Log validation loss '''
    info = {'loss_test': np.mean(acc_loss),
            'crit_test': np.mean(acc_crit),
            'reg_test': np.mean(acc_reg)}

    for tag, value in info.items():
        logger.scalar_summary(tag, value, epoch)

    ''' Log F1 per threshold'''
    f1s = np.mean(f1s, axis=0)
    for i, t in enumerate(eval_thresholds):
        info = {'f1_val_t_'+str(t): f1s[i]}

        for tag, value in info.items():
            logger.scalar_summary(tag, value, epoch)

    return np.mean(acc_loss), np.mean(acc_crit), np.mean(acc_reg),  f1s
Esempio n. 8
0
def train_n_steps(model,
                  data,
                  hyper,
                  evals,
                  n_steps,
                  batch_size=128,
                  print_every=100,
                  eval_every=1000):
    # TODO: Start epoch timer at last epoch time from evals (but take into
    # acount eval time)
    # TODO: Do not use globals, feed paths dictionary or object.
    epoch_loss = 0
    snapshot_count = len(evals["loss"])
    start_step = evals["step"][-1] if snapshot_count > 0 else 0
    start_step += 1

    epoch_timer = Timer()
    step_timer = Timer()
    eval_timer = Timer()

    for step in range(1, n_steps + 1):
        step_timer.start()
        X, Y = create_random_batch(data["xtrain"],
                                   data["ytrain"],
                                   batchsize=batch_size,
                                   maxlen=hyper["SAMPLE_LENGTH"])
        loss = train_step(model, X, Y)
        epoch_loss += loss

        # PRINTOUTS
        if step % print_every == 0:
            progress = 100 * float(step) / n_steps
            print_train_feedback(start_step + step,
                                 loss=loss,
                                 progress=progress,
                                 elapsed_time=epoch_timer.elapsed(),
                                 avg_time_ms=step_timer.elapsed() / batch_size)

        # EVALUATIONS AND SNAPSHOTS
        if (step % eval_every == 0):
            epoch_time = epoch_timer.elapsed()

            print("=" * 60)
            snapshot_count += 1
            epoch_loss /= eval_every

            # EVALUATE - on train and validation data
            eval_timer.start()
            train_acc = evaluate_model(model,
                                       data["xtrain"],
                                       data["ytrain"],
                                       seq_maxlen=100)
            eval_time = eval_timer.elapsed()
            valid_acc = evaluate_model(model,
                                       data["xvalid"],
                                       data["yvalid"],
                                       seq_maxlen=100)
            print_epoch_feedback(train_acc, valid_acc, epoch_loss)

            # UPDATE EVALS
            update_evals(evals,
                         loss=epoch_loss,
                         train_acc=train_acc,
                         valid_acc=valid_acc,
                         train_time=epoch_time,
                         eval_time=eval_time,
                         alpha=model.alpha,
                         step=start_step + step)

            # SAVE SNAPSHOTS - of model parameters, and evals dict
            epoch_snapshot(model,
                           snapshot_count,
                           accuracy=valid_acc,
                           name=MODEL_NAME,
                           dir=SNAPSHOTS_DIR)
            obj2pickle(evals, EVALS_FILE)
            save_hyper_params(hyper, HYPERPARAMS_FILE)

            # RESET
            epoch_loss = 0
            epoch_timer.start()
            print("=" * 60)
    print("DONE")
Esempio n. 9
0
def main():
    parser = argparse.ArgumentParser(
        description='Neural Collaborative Filtering')
    parser.add_argument('--path',
                        nargs='?',
                        default='Data/',
                        help='Input data path.')
    parser.add_argument('--dataset',
                        nargs='?',
                        default='ml-1m',
                        help='Choose a dataset.')
    parser.add_argument('--epochs',
                        type=int,
                        default=100,
                        help='Number of epochs.')
    parser.add_argument('--batch_size',
                        type=int,
                        default=256,
                        help='Batch size.')
    parser.add_argument('--num_factors',
                        type=int,
                        default=32,
                        help='Embedding size of MF model.')
    parser.add_argument(
        '--layers',
        nargs='?',
        default='[64,64,32,16]',  # [64,32,16,8] [64,64,32,16]
        help=
        "MLP layers. Note that the first layer is the concatenation of user and item embeddings. So layers[0]/2 is the embedding size."
    )
    parser.add_argument(
        '--num_neg',
        type=int,
        default=4,
        help='Number of negative instances to pair with a positive instance.')
    parser.add_argument('--lr',
                        type=float,
                        default=0.0005,
                        help='Learning rate.')
    parser.add_argument('--verbose',
                        type=int,
                        default=1,
                        help='Show performance per X iterations')
    parser.add_argument('--out',
                        type=int,
                        default=1,
                        help='Whether to save the trained model.')
    parser.add_argument(
        '--pretrain_path',
        nargs='?',
        default='',
        help=
        'Specify the pretrain model file path. If empty, no pretrain will be used'
    )

    args = parser.parse_args()

    num_epochs = args.epochs
    batch_size = args.batch_size
    mf_dim = args.num_factors
    layers = eval(args.layers)
    num_negatives = args.num_neg
    learning_rate = args.lr
    verbose = args.verbose
    pretrain_path = args.pretrain_path

    topK = 10
    evaluation_threads = 1
    print("NeuMF arguments: %s " % (args))
    model_out_file = 'ModelWeights/%s_NeuMF_%d_%s_%d.h5' % (
        args.dataset, mf_dim, layers, time())

    # Loading data
    t1 = time()
    data_loader = Dataloader(args.path + args.dataset)
    trainMatrix, testRatings, testNegatives = data_loader.trainMatrix, data_loader.testRatings, data_loader.testNegatives
    num_users, num_items = trainMatrix.shape
    print(
        "Load data done [%.1f s]. #user=%d, #item=%d, #train=%d, #test=%d" %
        (time() - t1, num_users, num_items, trainMatrix.nnz, len(testRatings)))

    # Build model
    model = NeuMF(num_users, num_items, mf_dim, layers)
    optimizer = optim.Adam(model.parameters(), lr=args.lr)
    criterion = nn.BCELoss()

    # Load pretrain model
    if pretrain_path:
        model.load_state_dict(torch.load(pretrain_path))

    # Init performance
    (hits, ndcgs) = evaluate_model(model, testRatings, testNegatives, topK, 1)
    hr, ndcg = np.array(hits).mean(), np.array(ndcgs).mean()
    print('Init: HR = %.4f, NDCG = %.4f' % (hr, ndcg))
    best_hr, best_ndcg, best_iter = hr, ndcg, -1
    if args.out > 0:
        torch.save(model.state_dict(), model_out_file)

    # Training model
    for epoch in range(num_epochs):
        t1 = time()
        # Generate training instances
        user_input, item_input, labels = get_train_instances(
            trainMatrix, num_negatives)

        # Training
        avg_loss = train(model, optimizer, criterion, user_input, item_input,
                         labels, batch_size)
        t2 = time()

        # Evaluation
        if epoch % verbose == 0:
            (hits, ndcgs) = evaluate_model(model, testRatings, testNegatives,
                                           topK, evaluation_threads)
            hr, ndcg = np.array(hits).mean(), np.array(ndcgs).mean()
            print(
                'Iteration %d [%.1f s]: HR = %.4f, NDCG = %.4f, loss = %.4f [%.1f s]'
                % (epoch, t2 - t1, hr, ndcg, avg_loss, time() - t2))
            if hr > best_hr:
                best_hr, best_ndcg, best_iter = hr, ndcg, epoch
                if args.out > 0:
                    torch.save(model.state_dict(), model_out_file)

    print("End. Best Iteration %d:  HR = %.4f, NDCG = %.4f. " %
          (best_iter, best_hr, best_ndcg))

    if args.out > 0:
        print("The best NeuMF model is saved to %s" % (model_out_file))
Esempio n. 10
0
def test(model, epoch):
    model.eval()
    f1s, mus = [], []
    for i, dl_test_pose in enumerate(dl_test):
        targets, preds, mus, acc_loss, acc_crit, acc_reg = [], [], [], [], [], []
        print(
            '-----------------------------------Evaluating POSE {} ------------------------- '.format(poses[i]))
        for iter, (data, target, _) in enumerate(dl_test_pose):
            target = torch.clamp(target[:, aus], 0, 1)
            data, target = data.cuda(), target.cuda()
            data, target = Variable(data).float(
            ), Variable(target).float()

            if iter % args.log_interval == 0:
                print('Iter {}'.format(iter))

            with torch.no_grad():
                pred, mu, logvar = model(data)

                ''' Compute loss every pass '''
                pred = F.sigmoid(pred)
                crit_val = bce_loss(pred, target)
                reg_val = kld(mu, logvar) / len(data)
                loss = crit_val + args.beta*reg_val

                acc_crit.append(crit_val.data)
                acc_reg.append(reg_val.data)
                acc_loss.append(loss.data)

                preds.append(pred)
                targets.append(target.data.cpu().numpy())
                mus.append(mu)

        preds = np.asarray(np.concatenate(preds))
        '''
        print('preds min:{}, max:{}, mean:{}'.format(
            preds.min(), preds.max(), np.mean(preds)))
        '''
        targets = np.clip(np.rint(np.concatenate(targets)),
                          0, 1).astype(np.uint8)
        mus = np.concatenate(mus)

        ''' Evaluate model per pose'''
        f1_pose = []
        for t in eval_thresholds:
            preds_f = np.copy(preds)
            preds_f[np.where(preds_f < t)] = 0
            preds_f[np.where(preds_f >= t)] = 1

            preds_f = np.reshape(preds_f, (-1, n_classes))

            if t == 0.5:
                print('--------EVAL PRED------ t = {}'.format(t))
                _, _, f1, _, _ = evaluate_model(
                    targets, preds_f, verbose=True)
            else:
                _, _, f1, _, _ = evaluate_model(
                    targets, preds_f, verbose=False)

            f1_pose.append(f1)

        f1s.append(f1_pose)

    ''' Log validation loss '''
    info = {'loss_test': np.mean(acc_loss),
            'crit_test': np.mean(acc_crit),
            'reg_test': np.mean(acc_reg)}

    for tag, value in info.items():
        logger.scalar_summary(tag, value, epoch)

    ''' Log F1 per threshold'''
    f1s = np.mean(f1s, axis=0)
    for i, t in enumerate(eval_thresholds):
        info = {'f1_val_t_'+str(t): f1s[i]}

        for tag, value in info.items():
            logger.scalar_summary(tag, value, epoch)

    ''' Save mus '''
    f = open('embedding_fera.pkl', 'wb')
    pkl.dump({'mus': mus, 'targets': targets},
             f, protocol=pkl.HIGHEST_PROTOCOL)

    return np.mean(acc_loss), np.mean(acc_crit), np.mean(acc_reg),  f1s