def test_train_function():
    """
    Test the ability of the 'train' function to train a simple network (1 epoch).
    """

    from src.run_model import _train

    trainX = np.array([[10, 0], [10, 0.1], [10, -0.1], [9.9, 0], [10.1, 0],
                       [-10, 0], [-10, 0.1], [-10, -0.1], [-9.9, 1],
                       [-10.1, 0]])
    trainY = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])

    train_dataset = MyDataset(trainX, trainY)
    train_loader = DataLoader(train_dataset, batch_size=1, shuffle=False)

    model = Basic_Model(weight_init=0.001)

    # Symmetry breaking to avoid potentially undefined argmax() behavior on equal outputs
    model.out.bias.data[0] = 0.01

    optimizer = optim.SGD(model.parameters(), lr=1e-1)

    _, _est_loss, _est_acc = _train(model, train_loader, optimizer)
    _est_values = np.array([_est_loss, _est_acc])

    _true_values = np.array([0.70831307, 50.0])

    assert np.allclose(_true_values, _est_values)
def test_test_function():
    """
    Test the ability of the 'test' function to compute the loss and accuracy.
    """
    from src.run_model import _test

    testX = np.array([[1, 1], [-1, 1], [-1, -1], [1, -1]])
    testY = np.array([1, 0, 0, 0])

    test_dataset = MyDataset(testX, testY)
    test_loader = DataLoader(test_dataset,
                             batch_size=len(test_dataset),
                             shuffle=False)

    model = Basic_Model(weight_init=1)

    # Symmetry breaking to avoid potentially undefined argmax() behavior on equal outputs
    model.out.bias.data[0] = 0.1
    model.out.bias.data[1] = -0.1

    _est_loss, _est_acc = _test(model, test_loader)
    _est_values = np.array([_est_loss, _est_acc])

    _true_values = np.array([0.648139, 75.0])

    assert np.allclose(_true_values, _est_values)
Exemplo n.º 3
0
def test_train_function():
    """
	Test the ability of the 'train' function to train a simple network (1 epoch).
	"""

    from src.run_model import _train

    trainX = np.array([[10, 0], [10, 0.1], [10, -0.1], [9.9, 0], [10.1, 0],
                       [-10, 0], [-10, 0.1], [-10, -0.1], [-9.9, 1],
                       [-10.1, 0]])
    trainY = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])

    train_dataset = MyDataset(trainX, trainY)
    train_loader = DataLoader(train_dataset, batch_size=1, shuffle=False)

    model = Basic_Model(weight_init=0.001)

    optimizer = optim.SGD(model.parameters(), lr=1e-1)

    _, _est_loss, _est_acc = _train(model, train_loader, optimizer)
    _est_values = np.array([_est_loss, _est_acc])

    _true_values = np.array([0.70840007, 50.0])

    assert np.allclose(_true_values, _est_values)
Exemplo n.º 4
0
def test_data(models, oerrors, data, n_shuffle_sets, pred_variable='rating', seed=12345):
    """

    :param data:
    :param n_shuffle_sets:
    :param pred_variable:
    :return:
    """
    shuffle_sets = []
    # Random seed to make results reproducible
    np.random.seed(seed)
    # Shuffling works in place, therefore copy is retrieved
    rating = np.array(data[pred_variable]).copy()
    for i in range(n_shuffle_sets):
        np.random.shuffle(rating)
        shuffled_data = data.copy()
        shuffled_data[pred_variable] = rating
        shuffle_sets.append(MyDataset(shuffled_data))

    # Evaluate shuffle sets
    oerrors = np.array([oerrors]).T
    shuffled_errors = np.empty((len(models), n_shuffle_sets))
    for i, data_set in enumerate(shuffle_sets):
        print("Run shuffle set no. %i" % (i + 1))
        feats = data_set.get_X_oc_()
        targets = data_set.targets
        assert feats.shape[0] == targets.shape[0]
        for j, model in enumerate(models):
            err_test, std_err_test, err_train, std_err_train = k_fold_cv(model, feats, targets)
            shuffled_errors[j, i] = err_test

    n_greater_errors_models = np.sum((shuffled_errors > oerrors), axis=1)
    return n_greater_errors_models
def test_run_model():
    """
    Test the ability of the 'run_model' function to train a simple network (5 epochs).
    """

    from src.run_model import run_model

    trainX = np.array([[0, 5], [0, 4.9], [0, 5.1], [0.1, 5], [-0.1, 5],
                       [0, -5], [0, -4.9], [0, -5.1], [0.1, -5], [-0.1, -5]])
    trainY = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])

    validX = np.array([[-1, 4.5], [-3, 3], [-2, 6], [1, -0.5]])
    validY = [0, 0, 0, 1]

    train_dataset = MyDataset(trainX, trainY)
    valid_dataset = MyDataset(validX, validY)

    model = Basic_Model(weight_init=0.5)

    # Symmetry breaking to avoid potentially undefined argmax() behavior on equal outputs
    model.out.bias.data[0] = -0.001

    _, _est_loss, _est_acc = run_model(model,
                                       running_mode='train',
                                       train_set=train_dataset,
                                       valid_set=valid_dataset,
                                       batch_size=1,
                                       learning_rate=1e-3,
                                       n_epochs=5,
                                       shuffle=False)

    _est_loss_train = np.mean(_est_loss['train'])
    _est_loss_valid = np.mean(_est_loss['valid'])

    _est_acc_train = np.mean(_est_acc['train'])
    _est_acc_valid = np.mean(_est_acc['valid'])

    _est_values = np.array(
        [_est_loss_train, _est_loss_valid, _est_acc_train, _est_acc_valid])

    _true_values = np.array([0.63102476, 0.63921592, 70., 50.])

    assert np.allclose(_true_values, _est_values)
Exemplo n.º 6
0
def test_run_model():
    """
	Test the ability of the 'run_model' function to train a simple network (5 epochs).
	"""

    from src.run_model import run_model

    trainX = np.array([[0, 5], [0, 4.9], [0, 5.1], [0.1, 5], [-0.1, 5],
                       [0, -5], [0, -4.9], [0, -5.1], [0.1, -5], [-0.1, -5]])
    trainY = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])

    validX = np.array([[-1, 4.5], [-3, 3], [-2, 6], [1, -0.5]])
    validY = [0, 0, 0, 1]

    train_dataset = MyDataset(trainX, trainY)
    valid_dataset = MyDataset(validX, validY)

    model = Basic_Model(weight_init=0.5)

    _, _est_loss, _est_acc = run_model(model,
                                       running_mode='train',
                                       train_set=train_dataset,
                                       valid_set=valid_dataset,
                                       batch_size=1,
                                       learning_rate=1e-3,
                                       n_epochs=5,
                                       shuffle=False)

    _est_loss_train = np.mean(_est_loss['train'])
    _est_loss_valid = np.mean(_est_loss['valid'])

    _est_acc_train = np.mean(_est_acc['train'])
    _est_acc_valid = np.mean(_est_acc['valid'])

    _est_values = np.array(
        [_est_loss_train, _est_loss_valid, _est_acc_train, _est_acc_valid])

    _true_values = np.array([0.63108519, 0.63902633, 60., 50.])

    assert np.allclose(_true_values, _est_values)
Exemplo n.º 7
0
def test_test_function():
    """
	Test the ability of the 'test' function to compute the loss and accuracy.
	"""
    from src.run_model import _test

    testX = np.array([[1, 1], [-1, 1], [-1, -1], [1, -1]])
    testY = np.array([1, 0, 0, 0])

    test_dataset = MyDataset(testX, testY)
    test_loader = DataLoader(test_dataset,
                             batch_size=len(test_dataset),
                             shuffle=False)

    model = Basic_Model(weight_init=1)

    _est_loss, _est_acc = _test(model, test_loader)
    _est_values = np.array([_est_loss, _est_acc])

    _true_values = np.array([0.69314718, 25.0])

    assert np.allclose(_true_values, _est_values)
Exemplo n.º 8
0
def demo_p(data,
           save,
           metric,
           loss,
           sampler='rand',
           embed_size=16,
           num_cls=196,
           lambda_=0,
           lr=0.001,
           T=0.005,
           model_saved=None,
           result='r_.csv',
           model_name='m_.dat',
           list_file='./cars_train.txt',
           batch_size=100,
           n_epochs=10,
           seed=1):
    '''
    Arg:
        # data
        data: where the data are stored
        dataset: the name of the dataset: CIFAR196,etc.
        train_size:
        test_size:
        
        # log
        save: tr   where the logs are stored
        model_saved: trained_model
        result:  loss
        model_new : 
        
        # trainning para
        embed_size: m : 16,32,64
        
        metric:  'E', 'rE','maha','snr','rM'
        
        sampler:'dist','npair'
        
        loss; 'tripl','contras','npair','lifted',
        
        #margin
            
        # others
        n_epochs :10 tr
        batch_size   tr
        seed :1
        
    
    
    '''
    # seed
    torch.backends.cudnn.deterministic = True
    if seed is not None:

        torch.manual_seed(seed)
        if torch.cuda.is_available():
            torch.cuda.manual_seed(seed)
            torch.cuda.manual_seed_all(seed)

    # dataLoader
    train_set = MyDataset(dataroot=data,
                          phase='train',
                          image_list_file=list_file)
    #test_set  = MyDataset(dataroot=data,phase='test',image_lise_file = list_file)

    #test_loader = DataLoader(dataset=test_set, batch_size=batch_size,shuffle=False, num_workers=4,pin_memory=torch.cuda.is_available())
    train_loader = DataLoader(dataset=train_set,
                              batch_size=batch_size,
                              shuffle=True,
                              num_workers=4,
                              pin_memory=torch.cuda.is_available())
    # model_setup
    if model_saved:
        model = models.resnet18(pretrained=False)
        #for p in model.parameters():
        #   p.requires_grad=False

        inp_fts = model.fc.in_features
        #classifier[6].in_features
        #inp_fts =  model.fc.in_features#model.classifier[6] = nn.Linear(inp_fts, embed_size)

        model.fc = nn.Linear(inp_fts, embed_size)
        model.load_state_dict(
            torch.load(os.path.join(save, model_saved), map_location='cpu'))

        #if fine_tune:
        #    for p in model.parameters():
        #       p.requires_grad = not p.requires_grad
    else:
        model = models.resnet18(pretrained=True)
        #for p in model.parameters():
        #    p.requires_grad=False

        inp_fts = model.fc.in_features  #
        #inp_fts=model.classifier[6].in_features
        model.fc = nn.Linear(inp_fts, embed_size)
        #model.classifier[6] = nn.Linear(inp_fts, embed_size)
        #if fine_tune:
        #   for p in model.parameters():
        #      p.requires_grad = not p.requires_grad

    print(model)
    # create folder for logging file
    # Make save directory
    if not os.path.exists(save):
        os.makedirs(save)
    if not os.path.isdir(save):
        raise Exception('%s is not a dir' % save)

    train(
        model=model,
        train_set=train_loader,  # model and data
        batch_size=batch_size,
        n_epochs=n_epochs,
        lambda_=lambda_,
        lr=lr,
        T=T,  # others
        embed_size=embed_size,
        loss=loss,
        metric=metric,
        sampler=sampler,
        num_cls=num_cls,  # train
        save=save,
        result_f=result,
        model_f=model_name  # log
    )
    print('Done')