def main(args):
    # model
    logger = get_logger(args.output_folder, "nearest_neighbors")
    #raise NotImplementedError("TODO: build model and load weights snapshot")
    # Device configuration
    data_root = args.data_folder
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print('#### This is the device used: ', device, '####')
    model = ResNet18Backbone(pretrained=False).to(device) 
    model.load_state_dict(torch.load(args.weights_init, map_location = device) , strict=False)
    # dataset
    val_transform = Compose([Resize(args.size), CenterCrop((args.size, args.size)), ToTensor()])
    val_data = DataReaderPlainImg(os.path.join(data_root, str(args.size), "val"), transform=val_transform)
    val_loader = torch.utils.data.DataLoader(val_data, batch_size=1, shuffle=False, num_workers=2,
                                             pin_memory=True, drop_last=True, collate_fn=custom_collate)
    val_loader= val_loader.dataset
    #raise NotImplementedError("Load the validation dataset (crops), use the transform above.")
    print(val_loader)
    # choose/sample which images you want to compute the NNs of.
    # You can try different ones and pick the most interesting ones.

    #query_indices = [7,8,10,11,54,200,300,400,500,600,1000,95]
    query_indices = []
    nns = []
    list_images = val_loader.image_files
    for i in range (len(list_images)):
        if list_images[i] == '41.jpg':
            query_indices.append(i)
            break
    print(list_images[i], i , query_indices)
    
    for idx, img in enumerate(val_loader):
        if idx not in query_indices:
            continue
        print("Computing NNs for sample {}".format(idx))
        closest_idx, closest_dist, images_names = find_nn(model, img, val_loader, 5, device)
        #raise NotImplementedError("TODO: retrieve the original NN images, save them and log the results.")
        logger.info("names of closest images are %s " % (str(images_names)))
        logger.info("distances to the closest images are %s " % (str(closest_dist)))
        #import pdb; pdb.set_trace()
        for i in range(1, len(images_names)):
            image_path = check_dir(os.path.join(args.output_folder, str(idx),'auto_algorithm' ))
            
            save_image(val_loader[closest_idx[i]], os.path.join( image_path, 'distance_rank_'+str(i) + ' ' + images_names[i]) ) 
        
        save_image(img, os.path.join(image_path, 'source_image_' +images_names[0]))
Exemplo n.º 2
0
def main(args):
    # model
    model = ResNet18Backbone(pretrained=False).cuda()
    model.load_state_dict(torch.load("/trained_models/best.pth"))
    #raise NotImplementedError("TODO: build model and load weights snapshot")

    # dataset
    data_root = args.data_folder
    val_transform = Compose(
        [Resize(args.size),
         CenterCrop((args.size, args.size)),
         ToTensor()])
    val_data = DataReaderPlainImg(os.path.join(data_root, str(args.size),
                                               "val"),
                                  transform=val_transform)
    val_loader = torch.utils.data.DataLoader(val_data,
                                             batch_size=1,
                                             shuffle=False,
                                             num_workers=2,
                                             pin_memory=True,
                                             drop_last=True,
                                             collate_fn=custom_collate)
    #raise NotImplementedError("Load the validation dataset (crops), use the transform above.")

    # choose/sample which images you want to compute the NNs of.
    # You can try different ones and pick the most interesting ones.
    query_indices = [7, 17, 71, 77, 117, 177]
    nns = []
    for idx, img in enumerate(val_loader):
        if idx not in query_indices:
            continue
        print("Computing NNs for sample {}".format(idx))
        closest_idx, closest_dist = find_nn(model, img, val_loader, 5)
        for i in closest_idx:
            nns.append(val_loader[i][0])

        raise NotImplementedError(
            "TODO: retrieve the original NN images, save them and log the results."
        )
Exemplo n.º 3
0
def main(args):
    # model
    model = ResNet18Backbone(pretrained=False).cuda()
    model.load_state_dict(
        torch.load('epoch_1.pth', map_location=torch.device('cuda')))
    # raise NotImplementedError("TODO: build model and load weights snapshot")

    # dataset
    data_root = "/home/nallapar/workspace/ex1/crops"
    val_transform = Compose(
        [Resize(args.size),
         CenterCrop((args.size, args.size)),
         ToTensor()])
    val_data = DataReaderPlainImg(os.path.join(data_root, str(args.size),
                                               "val"),
                                  transform=val_transform)
    val_loader = torch.utils.data.DataLoader(val_data,
                                             batch_size=1,
                                             shuffle=False,
                                             num_workers=2,
                                             pin_memory=True,
                                             drop_last=True)
    # raise NotImplementedError("Load the validation dataset (crops), use the transform above.")

    # choose/sample which images you want to compute the NNs of.
    # You can try different ones and pick the most interesting ones.
    query_indices = [25, 49, 88, 103]
    nns = []
    for idx, img in enumerate(val_loader):
        if idx not in query_indices:
            continue
        print("Computing NNs for sample {}".format(idx))
        closest_idx, closest_dist = find_nn(model, img, val_loader, 5, idx)
        _, axes = plt.subplots(1, 2)
        axes[0].imshow(img[0].T.numpy())
        axes[1].imshow(val_loader.dataset[closest_idx][0].T.numpy())
        plt.savefig(f"orig_and_nn_{idx}.jpg")
Exemplo n.º 4
0
def main(args):
    # Logging to the file and stdout
    logger = get_logger(args.output_folder, args.exp_name)

    # Device configuration
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print('#### This is the device used: ', device, '####')
    # build model and load weights
    model = ResNet18Backbone(pretrained=False).to(device)

    # checkpoint = torch.load("pretrain_weights_init.pth")
    # model.load_state_dict(checkpoint['model'])

    model.load_state_dict(torch.load(args.weights_init,
                                     map_location=device)['model'],
                          strict=False)
    #raise NotImplementedError("TODO: load weight initialization")

    # load dataset
    data_root = args.data_folder
    train_transform, val_transform = get_transforms_pretraining(args)
    train_data = DataReaderPlainImg(os.path.join(data_root, str(args.size),
                                                 "train"),
                                    transform=train_transform)
    # print(' hereeeeeeeeeeeeeeeeeeee', type(train_data.__getitem__(5)[0][0]))
    # plt.imshow(train_data.__getitem__(5)[0][0].numpy().transpose(2,1,0), interpolation='nearest')
    # plt.show()
    # plt.imshow(train_data.__getitem__(100)[0][0].numpy()[2])
    # plt.show()
    # plt.imshow(train_data.__getitem__(100)[0][1].numpy()[2])
    # plt.show()
    # plt.imshow(train_data.__getitem__(100)[0][2].numpy()[2])
    # plt.show()
    # plt.imshow(train_data.__getitem__(100)[0][3].numpy()[2])
    # plt.show()
    val_data = DataReaderPlainImg(os.path.join(data_root, str(args.size),
                                               "val"),
                                  transform=val_transform)
    train_loader = torch.utils.data.DataLoader(train_data,
                                               batch_size=args.bs,
                                               shuffle=True,
                                               num_workers=2,
                                               pin_memory=True,
                                               drop_last=True,
                                               collate_fn=custom_collate)

    print(train_loader)
    val_loader = torch.utils.data.DataLoader(val_data,
                                             batch_size=1,
                                             shuffle=False,
                                             num_workers=2,
                                             pin_memory=True,
                                             drop_last=True,
                                             collate_fn=custom_collate)

    # TODO: loss function
    criterion = torch.nn.CrossEntropyLoss(
    )  # This criterion combines nn.LogSoftmax() and nn.NLLLoss() in one single class.
    ###########raise NotImplementedError("TODO: loss function")
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=args.lr,
                                momentum=0.9,
                                weight_decay=1e-4)

    expdata = "  \n".join(
        ["{} = {}".format(k, v) for k, v in vars(args).items()])
    logger.info(expdata)
    logger.info('train_data {}'.format(train_data.__len__()))
    logger.info('val_data {}'.format(val_data.__len__()))

    best_val_loss = np.inf
    best_val_acc = np.inf
    # Train-validate for one epoch. You don't have to run it for 100 epochs, preferably until it starts overfitting.
    train_loss_list = []
    train_acc_list = []
    val_loss_list = []
    val_acc_list = []
    for epoch in range(40):
        print("Epoch {}".format(epoch))
        train_loss, train_acc = train(train_loader, model, criterion,
                                      optimizer, device)
        val_loss, val_acc = validate(val_loader, model, criterion, device)
        #print('Epoch: ', epoch, ' val_acc ', val_acc, ' val_loss ', val_loss, ' train_acc: ',train_acc, ' train_loss: ', train_loss )

        logger.info(
            "Epoch %d  train_loss %.3f train_acc %.3f val_loss: %.3f val_acc: %.3f"
            % (epoch, train_loss, train_acc, val_loss, val_acc))
        train_loss_list.append(train_loss)
        train_acc_list.append(train_acc)
        val_loss_list.append(val_loss)
        val_acc_list.append(val_acc)
        # save for every epoch
        if not os.path.exists(args.model_folder):
            os.makedirs(args.model_folder)
        path_model = os.path.join(args.model_folder,
                                  'checkpoint_' + str(epoch) + '_.pth')
        torch.save(model.state_dict(), path_model)

        # save model
        if val_loss < best_val_loss:
            best_val_loss = val_loss
            path_model = os.path.join(
                args.model_folder,
                'checkpoint_best_val_' + str(epoch) + '_.pth')
            torch.save(model.state_dict(), path_model)
        if val_acc < best_val_acc:
            best_val_acc = val_acc
            path_model = os.path.join(
                args.model_folder,
                'checkpoint_best_acc_' + str(epoch) + '_.pth')
            torch.save(model.state_dict(), path_model)
            ######raise NotImplementedError("TODO: save model if a new best validation error was reached")

            ######raise NotImplementedError("TODO: save model if a new best validation error was reached")

        save_fig(train_loss_list, 'train_loss')
        save_fig(train_acc_list, 'train_acc')
        save_fig(val_loss_list, 'val_loss')
        save_fig(val_acc_list, 'val_acc')

        pd.DataFrame({
            'train_loss': train_loss_list
        }).to_csv(os.path.join(args.plots_folder, 'train_loss.csv'),
                  index=False)
        pd.DataFrame({
            'train_acc': train_acc_list
        }).to_csv(os.path.join(args.plots_folder, 'train_acc.csv'),
                  index=False)
        pd.DataFrame({
            'val_loss': val_loss_list
        }).to_csv(os.path.join(args.plots_folder, 'val_loss.csv'), index=False)
        pd.DataFrame({
            'val_acc': val_acc_list
        }).to_csv(os.path.join(args.plots_folder, 'val_acc.csv'), index=False)
Exemplo n.º 5
0
        ApplyAfterRotations(RandomApply([ColorJitter(0.4, 0.4, 0.4, 0.2)], p=0.8)),
        ToTensorAfterRotations(),
        ApplyAfterRotations(Normalize(statistics['mean'], statistics['std']))
    ])
    val_transform = Compose([Resize(size), RandomCrop(size, pad_if_needed=True),
                             ImgRotation(), ToTensorAfterRotations(),
                             ApplyAfterRotations(Normalize(statistics['mean'], statistics['std']))])
    return train_transform, val_transform


model = ResNet18Backbone(pretrained=False)
criterion = nn.CrossEntropyLoss()

data_root = "/home/mbengt/workspace/dl_lab/crops"
train_transform, val_transform = get_transforms_pretraining()
train_data = DataReaderPlainImg(os.path.join(data_root, str(256), "train"), transform=train_transform)
val_data = DataReaderPlainImg(os.path.join(data_root, str(256), "val"), transform=val_transform)
train_loader = torch.utils.data.DataLoader(train_data, batch_size=16, shuffle=True, num_workers=2,
                                           pin_memory = True, drop_last=True, collate_fn=custom_collate)
val_loader = torch.utils.data.DataLoader(val_data, batch_size=1, shuffle=False, num_workers=2,
                                         pin_memory = True, drop_last=True, collate_fn=custom_collate)

train_losses, val_losses, val_accs = [], [], []
for i in range(10):
    with torch.no_grad():
        losses = []
        model.load_state_dict(torch.load(f'epoch_{i}.pth', map_location=torch.device('cpu')))
        for X_train, y_train in train_loader:
            y_preds = model(X_train)
            loss = criterion(y_preds, y_train)
            _, y_preds = y_preds.max(1)
Exemplo n.º 6
0
def main(args):

    # Logging to the file and stdout
    logger = get_logger(args.output_folder, args.exp_name)

    # build model and load weights
    model = ResNet18Backbone(pretrained=False).cuda()

    #sd = torch.load('pretrain_weights_init.pth')
    model.load_state_dict(torch.load('pretrain_weights_init.pth')['model'])
    #model.eval()
    #raise NotImplementedError("TODO: load weight initialization")

    # load dataset
    data_root = args.data_folder
    train_transform, val_transform = get_transforms_pretraining(args)
    train_data = DataReaderPlainImg(os.path.join(data_root, str(args.size),
                                                 "train"),
                                    transform=train_transform)
    val_data = DataReaderPlainImg(os.path.join(data_root, str(args.size),
                                               "val"),
                                  transform=val_transform)
    train_loader = torch.utils.data.DataLoader(train_data,
                                               batch_size=args.bs,
                                               shuffle=True,
                                               num_workers=2,
                                               pin_memory=True,
                                               drop_last=True,
                                               collate_fn=custom_collate)
    val_loader = torch.utils.data.DataLoader(val_data,
                                             batch_size=1,
                                             shuffle=False,
                                             num_workers=2,
                                             pin_memory=True,
                                             drop_last=True,
                                             collate_fn=custom_collate)

    # TODO: loss function
    criterion = None
    criterion = torch.nn.CrossEntropyLoss()
    #raise NotImplementedError("TODO: loss function")
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=args.lr,
                                momentum=0.9,
                                weight_decay=1e-4)

    expdata = "  \n".join(
        ["{} = {}".format(k, v) for k, v in vars(args).items()])
    logger.info(expdata)
    logger.info('train_data {}'.format(train_data.__len__()))
    logger.info('val_data {}'.format(val_data.__len__()))

    best_val_loss = np.inf
    # Train-validate for one epoch. You don't have to run it for 100 epochs, preferably until it starts overfitting.
    for epoch in range(100):
        print("Epoch {}".format(epoch))
        train(train_loader, model, criterion, optimizer)
        val_loss, val_acc = validate(val_loader, model, criterion)

        # save model
        if val_loss < best_val_loss:
            PATH = 'better_model.pth'
            torch.save(model.state_dict(), PATH)
def main(args):
    # Logging to the file and stdout
    logger = get_logger(args.output_folder, args.exp_name)

    # build model and load weights
    model = ResNet18Backbone(pretrained=False).cuda()
    model.load_state_dict(torch.load('pretrain_weights_init.pth', map_location=torch.device('cuda'))['model'])
    #model = torch.load('pretrain_weights_init.pth')#, map_location=torch.device('cpu'))

    # load dataset
    data_root = args.data_folder
    train_transform, val_transform = get_transforms_pretraining(args)
    train_data = DataReaderPlainImg(os.path.join(data_root, str(args.size), "train"), transform=train_transform)
    val_data = DataReaderPlainImg(os.path.join(data_root, str(args.size), "val"), transform=val_transform)
    train_loader = torch.utils.data.DataLoader(train_data, batch_size=args.bs, shuffle=True, num_workers=2,
                                               pin_memory=True, drop_last=True, collate_fn=custom_collate)
    val_loader = torch.utils.data.DataLoader(val_data, batch_size=1, shuffle=False, num_workers=2,
                                             pin_memory=True, drop_last=True, collate_fn=custom_collate)

    # TODO: loss function
    criterion = nn.CrossEntropyLoss().cuda()
#     raise NotImplementedError("TODO: loss function")
    optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-4)

    expdata = "  \n".join(["{} = {}".format(k, v) for k, v in vars(args).items()])
    logger.info(expdata)
    logger.info('train_data {}'.format(train_data.__len__()))
    logger.info('val_data {}'.format(val_data.__len__()))

    best_val_loss = np.inf
    best_model = None
    # Train-validate for one epoch. You don't have to run it for 100 epochs, preferably until it starts overfitting.
    losses, val_losses, val_accs = [], [], []
    for epoch in range(10):
        print("Epoch {}".format(epoch))
        loss = train(train_loader, model, criterion, optimizer)
        losses.append(loss)
        val_loss, val_acc = validate(val_loader, model, criterion)
        val_losses.append(val_loss)
        val_accs.append(val_acc)

        # save model
        if val_loss < best_val_loss:
            best_val_loss = val_loss
            best_model = model
            torch.save(model.state_dict(), f'epoch_{epoch}.pth')
#             raise NotImplementedError("TODO: save model if a new best validation error was reached")
    _, axes = plt.subplots(1, 2, figsize=(20, 10))
    axes[0].plot(range(1, 8), losses)
    axes[0].set_xlabel("Epochs")
    axes[0].set_ylabel("Loss")
    axes[0].set_title("Train")

    axes[1].plot(range(1, 8), val_losses)
    axes[1].set_xlabel("Epochs")
    axes[1].set_ylabel("Loss")
    axes[1].set_title("Validation losses")

    axes[2].plot(range(1, 8), val_accs)
    axes[2].set_xlabel("Epochs")
    axes[2].set_ylabel("Accuracy")
    axes[2].set_title("Validation accuracies")
    plt.savefig("Results.png")