Esempio n. 1
0
def main(train_db, test_db):

    # Load train dataset
    _, lst_vec_train = utils.data_load(train_db)

    # Get PCA transform matrix from train data
    X_train = np.array(lst_vec_train)
    _, W = utils.whiten(X_train)

    # Load test dataset
    lst_id, lst_vec_test = utils.data_load(test_db)

    # Whitening test dataset
    X_test = np.array(lst_vec_test)
    X_test_white = np.dot(X_test, W)

    # Calc scores for test dataset
    lst_white_vec = [X_test_white[i, :] for i in range(X_test_white.shape[0])]
    lst_compare_key_result, lst_compare_ivec_result = utils.calc_scores(
        lst_id, lst_white_vec)

    # Plot FR/FA curve
    utils.plot_fr_fa(lst_compare_key_result, lst_compare_ivec_result)

    # Plot scores hist
    utils.plot_hist_scores(lst_compare_key_result, lst_compare_ivec_result)
Esempio n. 2
0
def Transfer():

    image_src = utils.data_load(os.path.join(args.image_dir),
                                'test2',
                                src_transform,
                                1,
                                shuffle=True,
                                drop_last=True)

    with torch.no_grad():
        G.eval()
        for n, (x, _) in enumerate(image_src):
            x = x.to(device)
            G_recon = G(x)
            #result = torch.cat((x[0], G_recon[0]), 2)
            result = G_recon[0]
            path = os.path.join(args.output_image_dir, str(n + 1) + '.png')
            plt.imsave(path, (result.cpu().numpy().transpose(1, 2, 0) + 1) / 2)

    load = Image.open(r"C:\Users\LQF\Desktop\moTest\result" + "\\" + str(1) +
                      ".png")
    imgfile = ImageTk.PhotoImage(load)
    canvas2.image = imgfile  # <--- keep reference of your image
    canvas2.create_image(2, 2, anchor='nw', image=imgfile)

    os.remove(r"C:\Users\LQF\Desktop\moTest\result" + "\\" + str(1) + ".png")
    for i in os.listdir(r"C:\Users\LQF\Desktop\moTest\image_dir\test2"):
        path_file = os.path.join(
            r"C:\Users\LQF\Desktop\moTest\image_dir\test2", i)
        if os.path.isfile(path_file):
            os.remove(path_file)
Esempio n. 3
0
def main(args):
    proj_path = os.getcwd()
    data_path = 'data'
    test_path = data_path + '/test/preprocessed'
    model_save_path = 'model'

    save_freq = 10
    max_epoch = 5000
    max_patience = 30
    window_size = 7
    num_features = 264
    batch_size = 16

    net = torch.load(args[1])

    test_x_list, test_y_list = utils.data_load('data/final/preprocessed')

    train_piece_lens = []
    test_piece_lens = []

    for i in range(len(test_x_list)):
        # Add 1 to train data for log computability.
        # It can be inversed at post-processing phase.
        test_x_list[i] = utils.standardize(test_x_list[i] + 1, log=True).T
        test_y_list[i] = test_y_list[i].T
        test_piece_lens.append(test_x_list[i].shape[0])

        print('test loaded {}/{}'.format(i + 1, len(test_x_list)))

    test_x = np.vstack(test_x_list)
    del test_x_list
    test_y = np.vstack(test_y_list)
    del test_y_list

    # For GPU computing.
    dtype = torch.cuda.FloatTensor
    test_x = Variable(torch.Tensor(test_x).type(dtype))
    test_x.volatile = True
    test_y = Variable(torch.Tensor(test_y).type(dtype))
    test_y.volatile = True

    min_valid_loss = float('inf')
    patience = 0

    # criterion = nn.BCEWithLogitsLoss()
    criterion = nn.MSELoss()
    optimizer = optim.Adam(net.parameters())

    print('Preprocessing Completed.')

    # Train and calculate loss value.
    prec, recall, acc = run_test(net, test_x, test_y, criterion,
                                 test_piece_lens, batch_size, window_size)
    f_score = 2 * prec * recall / (prec + recall)

    print('Precision: {}\tRecall: {}\tAccuracy: {}'.format(prec, recall, acc))
    print('F-score: {}'.format(f_score))
Esempio n. 4
0
def dataloader_objects(args):
    # data_loader
    transform = transforms.Compose([
        transforms.Resize((args.img_size, args.img_size)),
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
    ])
    train_loader_A = utils.data_load(os.path.join('data', args.dataset),
                                     'trainA',
                                     transform,
                                     args.batch_size,
                                     shuffle=True,
                                     drop_last=True)
    train_loader_B = utils.data_load(os.path.join('data', args.dataset),
                                     'trainB',
                                     transform,
                                     args.batch_size,
                                     shuffle=True,
                                     drop_last=True)
    test_loader_A = utils.data_load(os.path.join('data', args.dataset),
                                    'testA',
                                    transform,
                                    1,
                                    shuffle=True,
                                    drop_last=True)
    test_loader_B = utils.data_load(os.path.join('data', args.dataset),
                                    'testB',
                                    transform,
                                    1,
                                    shuffle=True,
                                    drop_last=True)

    dataloaders = [
        train_loader_A, train_loader_B, test_loader_A, test_loader_B
    ]
    return dataloaders
def load_files(path_sc, path_test):
    """ loading scalar object and Test csv.
        Args:
            path(os path)
        Attributes:
            path(os path): path to the pickle object and testdata
        Returns: 
            X_test
            y_test
            X_scalar
            y_scalar
    """
    print("Loading files..")
    scalar = pickle.load(open(path_sc, "rb"))
    X_scalar = scalar[0]
    y_scalar = scalar[1]

    X_test, y_test = data_load(path_test)  # test data loading from utils call

    return X_test, y_test, X_scalar, y_scalar
Esempio n. 6
0
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if torch.backends.cudnn.enabled:
    torch.backends.cudnn.benchmark = True

G = networks.generator(args.in_ngc, args.out_ngc, args.ngf, args.nb)
if torch.cuda.is_available():
    G.load_state_dict(torch.load(args.pre_trained_model))
else:
    # cpu mode
    G.load_state_dict(torch.load(args.pre_trained_model, map_location=lambda storage, loc: storage))
G.to(device)

src_transform = transforms.Compose([
        transforms.Resize((args.input_size, args.input_size)),
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
])
# utils.data_load(os.path.join('data', args.src_data), 'test', src_transform, 1, shuffle=True, drop_last=True)
image_src = utils.data_load(os.path.join(args.image_dir), 'test', src_transform, 1, shuffle=True, drop_last=True)

with torch.no_grad():
    G.eval()
    for n, (x, _) in enumerate(image_src):
        x = x.to(device)
        G_recon = G(x)
        result = torch.cat((x[0], G_recon[0]), 2)
        path = os.path.join(args.output_image_dir, str(n + 1) + '.png')
        plt.imsave(path, (result.cpu().numpy().transpose(1, 2, 0) + 1) / 2)


Esempio n. 7
0
random.seed(opts.manualSeed)
torch.manual_seed(opts.manualSeed)
print('Random Seed: ', opts.manualSeed)

# Transform dataset
tgt_transform = transforms.Compose([
    transforms.Resize((opts.input_size, opts.input_size)),
    transforms.ToTensor(),
    transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
])

print('Loading data..')
hparams = load_json('./configs', opts.hparams)
test_cartoon_loader_src = data_load(os.path.join('./../../data/', 'bitmoji/'),
                                    'all_gender_test',
                                    tgt_transform,
                                    batch_size=1,
                                    shuffle=False,
                                    drop_last=True)

model = TravelGAN(hparams['model'], device=device)
if hparams['saved_model']:
    print('saved model : ', hparams['saved_model'])
    model.resume(hparams['saved_model'])

with torch.no_grad():
    model.eval()
    for n, (x_b, _) in enumerate(test_cartoon_loader_src):
        # Loading on device
        x_b = x_b.to(device)

        x_ba = model.transformToReal(x_b)
Esempio n. 8
0
    Relabel(255, 21),
])

train_loader_src = DataLoader(VOC12(args.src_data, image_transform,
                                    target_transform),
                              num_workers=4,
                              batch_size=args.batch_size,
                              shuffle=True)
train_loader_tgt = DataLoader(VOC12(args.tgt_data, image_transform,
                                    target_transform),
                              num_workers=4,
                              batch_size=args.batch_size,
                              shuffle=True)
test_loader_src = data_load('./data/test_data/',
                            'test',
                            image_transform,
                            1,
                            shuffle=True,
                            drop_last=True)

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

A2BG = net.generator(args.in_ngc, args.out_ngc, args.ngf)
B2AG = net.generator(args.in_ngc, args.out_ngc, args.ngf)
A2Bcolor = net.colorization(args.in_ngc, args.out_ngc, args.ngf)
B2Acolor = net.colorization(args.in_ngc, args.out_ngc, args.ngf)
AD = net.discriminator(args.in_ndc, args.out_ndc, args.ndf)
BD = net.discriminator(args.in_ndc, args.out_ndc, args.ndf)
Segm = net.segmantation(args.in_ngc, args.out_ngc, args.ngf)

print('---------- Networks initialized -------------')
print_network(A2BG)
    edge_promoting(os.path.join('./dataset/', 'CelebA/train/'),
                   os.path.join('./dataset/', 'CelebA/trainA_pair/'))
else:
    print('Domain A edge-promoting start!!')

if not os.path.isdir(os.path.join('./dataset/', 'Bitmoji/trainB_pair/')):
    print('Domain B edge-promoting start!!')
    edge_promoting(os.path.join('./dataset/', 'Bitmoji/'),
                   os.path.join('./dataset/', 'Bitmoji/trainB_pair/'))
else:
    print('Domain B edge-promoting start!!')

loading = hparams['loading']
train_loader_src = data_load(os.path.join('./dataset/', 'CelebA/'),
                             'trainA_pair',
                             src_transform,
                             batch_size=loading['batch_size'],
                             shuffle=loading['shuffle'],
                             drop_last=True)
train_loader_tgt = data_load(os.path.join('./dataset/', 'Bitmoji/'),
                             'trainB_pair',
                             tgt_transform,
                             batch_size=loading['batch_size'],
                             shuffle=loading['shuffle'],
                             drop_last=True)
test_loader_src = data_load(os.path.join('./dataset/', 'CelebA/'),
                            'test',
                            src_transform,
                            batch_size=loading['batch_size'],
                            shuffle=loading['shuffle'],
                            drop_last=True)
Esempio n. 10
0
    print('edge-promoting start!!')
    edge_promoting(os.path.join('data', args.tgt_data, 'train'), os.path.join('data', args.tgt_data, 'pair'))
else:
    print('edge-promoting already done')

# data_loader
src_transform = transforms.Compose([
        transforms.Resize((args.input_size, args.input_size)),
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
])
tgt_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
])
train_loader_src = utils.data_load(os.path.join('data', args.src_data), 'train', src_transform, args.batch_size, shuffle=True, drop_last=True)
train_loader_tgt = utils.data_load(os.path.join('data', args.tgt_data), 'pair', tgt_transform, args.batch_size, shuffle=True, drop_last=True)
test_loader_src = utils.data_load(os.path.join('data', args.src_data), 'test', src_transform, 1, shuffle=True, drop_last=True)

# network
G = networks.generator(args.in_ngc, args.out_ngc, args.ngf, args.nb)
if args.latest_generator_model != '':
    if torch.cuda.is_available():
        G.load_state_dict(torch.load(args.latest_generator_model))
    else:
        # cpu mode
        G.load_state_dict(torch.load(args.latest_generator_model, map_location=lambda storage, loc: storage))

D = networks.discriminator(args.in_ndc, args.out_ndc, args.ndf)
if args.latest_discriminator_model != '':
    if torch.cuda.is_available():
Esempio n. 11
0
def main():
    proj_path = os.getcwd()
    data_path = 'data'
    train_path = data_path + '/train/preprocessed'
    model_save_path = 'model'

    save_freq = 1
    max_epoch = 5
    max_patience = 5
    window_size = 7
    num_features = 264
    batch_size = 128
    mb_size = 500

    net = pytorch_model.AMT(window_size, num_features).cuda()
    train_x_list, train_y_list = utils.data_load(train_path)
    test_x_list, test_y_list = utils.data_load('data/test/preprocessed')

    train_piece_lens = []
    test_piece_lens = []

    # Standardize.
    for i in range(len(train_x_list)):
        train_x_list[i] = utils.standardize(train_x_list[i] + 1, log=True).T
        train_y_list[i] = train_y_list[i].T
        train_piece_lens.append(train_x_list[i].shape[0])
    print('train loaded {}/{}'.format(i + 1, len(train_x_list)),
          file=sys.stderr)

    for i in range(len(test_x_list)):
        # Add 1 to train data for log computability.
        # It can be inversed at post-processing phase.
        test_x_list[i] = utils.standardize(test_x_list[i] + 1, log=True).T
        test_y_list[i] = test_y_list[i].T
        test_piece_lens.append(test_x_list[i].shape[0])

        # test_x_list[i] = np.pad(utils.standardize(test_x_list[i]+1,log=True),
        #                          ((3,3),(0,0)),'constant')
        # test_y_list[i] = np.pad(test_y_list[i],((3,3),(0,0)),'constant')

        print('test loaded {}/{}'.format(i + 1, len(test_x_list)),
              file=sys.stderr)

    train_x = np.vstack(train_x_list)
    del train_x_list
    train_y = np.vstack(train_y_list)
    del train_y_list
    test_x = np.vstack(test_x_list)
    del test_x_list
    test_y = np.vstack(test_y_list)
    del test_y_list

    # train_x = Variable( torch.Tensor( train_x ) )
    # train_y = Variable( torch.Tensor( train_y ) )
    # test_x = Variable( torch.Tensor( test_x ) )
    # test_x.volatile = True
    # test_y = Variable( torch.Tensor( test_y ) )
    # test_y.volatile = True

    min_valid_loss = float('inf')
    patience = 0

    # criterion = nn.BCEWithLogitsLoss()
    criterion = nn.MSELoss()
    optimizer = optim.Adam(net.parameters())

    print('Preprocessing Completed.', file=sys.stderr)
    sys.stderr.flush()

    num_megabatches = train_x.data.shape[0] // mb_size
    print('{} megabatches\n'.format(num_megabatches), file=sys.stderr)
    train_megabatches = [(train_x[k * mb_size:(k + 1) * mb_size, :],
                          train_y[k * mb_size:(k + 1) * mb_size, :])
                         for k in range(num_megabatches)]
    # train_megabatches.append((train_x[num_megabatches*mb_size :, :], train_y[num_megabatches*mb_size :, :]))
    test_megabatches = [(test_x[k * mb_size:(k + 1) * mb_size, :],
                         test_y[k * mb_size:(k + 1) * mb_size, :])
                        for k in range(num_megabatches)]
    # test_megabatches.append((test_x[num_megabatches*mb_size :, :], test_y[num_megabatches*mb_size :, :]))

    del train_x, train_y, test_x, test_y

    for j in range(num_megabatches):
        print('megabatch {}'.format(j + 1))

        for i in range(max_epoch):
            # train_x = Variable( torch.Tensor( train_megabatches[j][0] ) )
            # train_y = Variable( torch.Tensor( train_megabatches[j][1] ) )
            # test_x = Variable( torch.Tensor( test_megabatches[j][0] ) )
            # test_y = Variable( torch.Tensor( test_megabatches[j][1] ) )

            train_x = train_megabatches[j][0]
            train_y = train_megabatches[j][1]
            test_x = test_megabatches[j][0]
            test_y = test_megabatches[j][1]

            # Train and calculate loss value.
            train_loss = pytorch_model.run_train(
                net, train_x, train_y, criterion, optimizer, train_piece_lens,
                batch_size, window_size).cpu().data.numpy()
            valid_loss = pytorch_model.run_loss(
                net, test_x, test_y, criterion, test_piece_lens, batch_size,
                window_size).cpu().data.numpy()
            if (valid_loss < min_valid_loss):
                patience = 0
                min_valid_loss = valid_loss
                # torch.save(net.state_dict(),model_save_path+'_ReLU_whole_log_best')
                torch.save(net, model_save_path + '/' + 'model_best.pt')
                print('\nBest model is saved.***\n', file=sys.stderr)
            else:
                patience += 1
            if (patience == max_patience or i == max_epoch - 1):
                # torch.save(net.state_dict(),model_save_path+'_ReLU_whole_log'+str(i+1))
                torch.save(net, model_save_path + '/model_' + str(i + 1))
                print('\n***{}th last model is saved.***\n'.format(i + 1),
                      file=sys.stderr)
                break

            print('------{}th iteration (max:{})-----'.format(
                i + 1, max_epoch))
            print('train_loss : ', train_loss)
            print('valid_loss : ', valid_loss)
            print('patience : ', patience)

            # print(i+1, train_loss[0], valid_loss[0])

            if (i % save_freq == save_freq - 1):
                # torch.save(net.state_dict(),model_save_path+'_ReLU_whole_log'+str(i+1))
                torch.save(net, model_save_path + '/model' + str(i + 1))
                print('\n***{}th model is saved.***\n'.format(i + 1),
                      file=sys.stderr)

            del train_x, train_y, test_x, test_y, train_loss, valid_loss
Esempio n. 12
0
dim_c = 6  # number of clusters
dim_z = 1  # latent variable z's channel
dim_w = 1  # dimension of prior w's channel
dim_z = 1  # latent variable z's channel
dim_x = imageshape[0] * imageshape[1]  # dimension of image in vectors
c_lambda = 0.5  # the constant  on kl[p(c|w,z)||p(c)]
clipstd = [0.0, 1.0]  # bound of std
start_step = 0  # default starting step
training_step = 400 * 4000  # how many steps to train at a time
train_rate = 5e-5

# Load Data

dataset = 'CamCANT2'  # or HCP
datapath = path_make('', 'data', '')
MRtrain = data_load(process='Train', dataset=dataset, datapath=datapath)
MRtest = data_load(process='Test', dataset=dataset, datapath=datapath)
test_image = MRtest[[
    73,
    107,
    185,
    199,
    382,
    419,
    443,
    472,
    509,
    540,
    554,  # some random images chosen for reconstruction during training
    713,
    746,