예제 #1
0
def get_config(return_unparsed=False):
    """Gets config and creates data_dir."""
    config, unparsed = parse_config()

    # If we have unparsed args, print usage and exit
    if len(unparsed) > 0 and not return_unparsed:
        print_usage()
        exit(1)

    def append_data_dir(p):
        return os.path.join(config.data_dir, p)

    # Append data_dir to all filepaths
    config.pre_save_file = append_data_dir(config.pre_save_file)
    config.raw_csv_file = append_data_dir(config.raw_csv_file)
    config.embeddings_model = append_data_dir(config.embeddings_model)
    config.embeddings_file = append_data_dir(config.embeddings_file)

    # Create data_dir if it doesn't exist
    if not os.path.exists(config.data_dir):
        os.makedirs(config.data_dir)

    if return_unparsed:
        return config, unparsed

    return config
예제 #2
0
def main(config):




if __name__ == "__main__":

    # ----------------------------------------
    # Parse configuration
    config, unparsed = get_config()
    # If we have unparsed arguments, print usage and exit
    if len(unparsed) > 0:
        print_usage()
        exit(1)
    print_config(config)
    main(config)
예제 #3
0
    timings_list = []
    for i, pair in enumerate(pairs_per_th['0.0']):
        model_dict[pair] = result[i][0]
        inl_dict[pair] = result[i][1]
        timings_list.append(result[i][2])

    # Check model directory
    if not os.path.exists(get_geom_path(cfg)):
        os.makedirs(get_geom_path(cfg))

    # Finally save packed models
    save_h5(model_dict, get_geom_file(cfg))
    save_h5(inl_dict, get_geom_inl_file(cfg))

    # Save computational cost
    save_h5({'cost': np.mean(timings_list)}, get_geom_cost_file(cfg))
    print('Geometry cost (averaged over image pairs): {:0.2f} sec'.format(
        np.mean(timings_list)))


if __name__ == '__main__':
    cfg, unparsed = get_config()

    # If we have unparsed arguments, print usage and exit
    if len(unparsed) > 0:
        print(unparsed)
        print_usage()
        exit(1)

    main(cfg)
예제 #4
0
            # Compute loss and store as numpy
            loss = data_loss(logits, y)
            te_loss += [loss.cpu().numpy()]
            # Compute accuracy and store as numpy
            pred = torch.argmax(logits, dim=1)
            acc = torch.mean(torch.eq(pred, y).float()) * 100.0
            te_acc += [acc.cpu().numpy()]

    # Report Test loss and accuracy
    print("Test Loss = {}".format(np.mean(te_loss)))
    print("Test Accuracy = {}%".format(np.mean(te_acc)))


def main(config):
    if config.mode == "train":
        train(config)
    elif config.mode == "test":
        test(config)
    else:
        raise ValueError("Unknown run mode \"{}\"".format(config.mode))


if __name__ == "__main__":
    config, unparsed = config_args.get()
    # Verify all arguments are parsed before continuing.
    if len(unparsed) == 0:
        main(config.get)
    else:
        " Unparsed arguments "
        config_args.print_usage()
예제 #5
0
def train():
    # ----------------------------------------
    # Parse configuration
    config, unparsed = get_config()
    # If we have unparsed arguments, print usage and exit
    if len(unparsed) > 0:
        print_usage()
        exit(1)
    print_config(config)
    print("Number of train samples: ", len(train_data))
    print("Number of test samples: ", len(test_data))
    #print("Detected Classes are: ", train_data.class_to_idx) # classes are detected by folder structure

    # Create log directory and save directory if it does not exist
    if not os.path.exists(config.log_dir):
        os.makedirs(config.log_dir)
    if not os.path.exists(config.save_dir):
        os.makedirs(config.save_dir)

    # Initialize training
    iter_idx = -1  # make counter start at zero
    best_loss = -1  # to check if best loss
    # Prepare checkpoint file and model file to save and load from
    checkpoint_file = os.path.join(config.save_dir, "checkpoint.pth")
    bestmodel_file = os.path.join(config.save_dir, "best_model.pth")
    savemodel_file = os.path.join(config.save_dir, "save_model.pth")

    model = sosnet_model.SOSNet32x32().cuda()
    optimizer = optim.SGD(model.parameters(), lr=0.1, weight_decay=0.0001)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=10,
                                                gamma=0.8)

    seed = 42
    torch.manual_seed(seed)
    np.random.seed(seed)
    # Create loss objects
    data_loss = data_criterion(config)
    model_loss = model_criterion(config)
    #print("train_data: ", (train_data))
    print("train_data_loader:", train_data_loader)
    print("test_data_loader:", test_data_loader)

    fpr_per_epoch = []
    # Training loop
    for epoch in range(config.num_epoch):
        # For each iteration
        prefix = "Training Epoch {:3d}: ".format(epoch)
        print("len(train_data_loader):", len(train_data_loader))
        for batch_idx, (data_a, data_p,
                        data_n) in tqdm(enumerate(train_data_loader)):
            print("batch_idx:", batch_idx)
            print("len(train_data_loader):", len(train_data_loader))
            data_a = data_a.unsqueeze(1).float().cuda()
            data_p = data_p.unsqueeze(1).float().cuda()
            data_n = data_n.unsqueeze(1).float().cuda()
            print("data_a.shape:", data_a.shape)
            print("data_p.shape:", data_p.shape)
            print("data_n.shape:", data_n.shape)
            out_a, out_p, out_n = model(data_a), model(data_p), model(data_n)
            print("out_a:", out_a)
            print("out_p:", out_p)
            print("out_n:", out_n)
            loss = F.triplet_margin_loss(out_a,
                                         out_p,
                                         out_n,
                                         margin=2,
                                         swap=True)
            if best_loss == -1:
                best_loss = loss
            if loss < best_loss:
                best_loss = loss
                # Save
                torch.save(
                    {
                        "iter_idx": iter_idx,
                        "best_loss": best_loss,
                        "model": model.state_dict(),
                        "optimizer": optimizer.state_dict(),
                    }, bestmodel_file)
                # Save
                torch.save(model.state_dict(), savemodel_file)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        # Save
        torch.save(
            {
                "iter_idx": iter_idx,
                "best_loss": best_loss,
                "model": model.state_dict(),
                "optimizer": optimizer.state_dict(),
            }, checkpoint_file)

        model.eval()

        l = np.empty((0, ))
        d = np.empty((0, ))
        #evaluate the network after each epoch
        for batch_idx, (data_l, data_r, lbls) in enumerate(test_data_loader):
            data_l = data_l.unsqueeze(1).float().cuda()
            data_r = data_r.unsqueeze(1).float().cuda()
            out_l, out_r = model(data_l), model(data_r)
            dists = torch.norm(out_l - out_r, 2, 1).detach().cpu().numpy()
            l = np.hstack((l, lbls.numpy()))
            d = np.hstack((d, dists))

        # FPR95 code from Yurun Tian
        d = torch.from_numpy(d)
        l = torch.from_numpy(l)
        dist_pos = d[l == 1]
        dist_neg = d[l != 1]
        dist_pos, indice = torch.sort(dist_pos)
        loc_thr = int(np.ceil(dist_pos.numel() * 0.95))
        thr = dist_pos[loc_thr]
        fpr95 = float(dist_neg.le(thr).sum()) / dist_neg.numel()
        print(epoch, fpr95)
        fpr_per_epoch.append([epoch, fpr95])
        scheduler.step()
        np.savetxt('fpr.txt', np.array(fpr_per_epoch), delimiter=',')