def ae_build(_data, encoding, optimizer, loss, _type):
    if (_data.name == 'mnist'):
        train = _data.X_train
        test = _data.x_test
    else:
        train = _data.X_train_std
        test = _data.x_test_std
    if (_type == 'linear'):
        name = 'aelinear_' + _data.name
    else:
        name = 'aenon-linear_' + _data.name
    ae = make_ae(train.shape[1], [encoding], encoding, [train.shape[1]],
                 [_type], [PAR.af_adec])
    ae.autoencoder.compile(optimizer=optimizer, loss=loss)
    hist = ae.autoencoder.fit(train,
                              train,
                              epochs=PAR.ae_epoch,
                              batch_size=PAR.ae_batch,
                              shuffle=True,
                              validation_data=(test, test))
    plot_ae(name, hist)
    # encoded_imgs = ae.encoder.predict(x_test)
    # decoded_imgs = ae.decoder.predict(encoded_imgs)
    reduced_train = ae.encoder.predict(train)
    reduced_test = ae.encoder.predict(test)
    ae_data = DS(reduced_train, _data.Y_train, reduced_test, _data.y_test,
                 name)
    return ae_data, ae
def pca_linear(_data, n_components):
    train = _data.X_train
    test = _data.x_test
    name = 'pca_' + _data.name
    pca = PCA(n_components=n_components)
    reduced_train = pca.fit_transform(train)
    reduced_test = pca.transform(test)
    pca_data = DS(reduced_train, _data.Y_train, reduced_test, _data.y_test,
                  name)
    return pca_data
예제 #3
0
    torch.backends.cudnn.benchmark = True

if not os.path.exists(args.save_dir):
    os.makedirs('{:s}/ckpt'.format(args.save_dir))

writer = SummaryWriter()

size = (args.image_size, args.image_size)
train_tf = transforms.Compose([
    transforms.Resize(size),
    transforms.RandomHorizontalFlip(),
    transforms.RandomGrayscale(),
    transforms.ToTensor(),
])

train_set = DS(args.root, train_tf)
iterator_train = iter(data.DataLoader(
    train_set,
    batch_size=args.batch_size,
    sampler=InfiniteSampler(len(train_set)),
    num_workers=args.n_threads))
print(len(train_set))

g_model = InpaintNet().to(device)
fd_model = FeaturePatchDiscriminator().to(device)
pd_model = PatchDiscriminator().to(device)
l1 = nn.L1Loss().to(device)
cons = ConsistencyLoss().to(device)

start_iter = 0
g_optimizer = torch.optim.Adam(
예제 #4
0
device = torch.device('cuda')

if not os.path.exists(args.save_dir):
    os.makedirs('{:s}/images'.format(args.save_dir))
    os.makedirs('{:s}/ckpt'.format(args.save_dir))

#writer = SummaryWriter(logdir=args.log_dir)

size = (args.image_size, args.image_size)
img_tf = transforms.Compose([
    transforms.Resize(size=size),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor()
])

dataset = DS(args.root, img_tf)

iterator_train = iter(
    data.DataLoader(dataset,
                    batch_size=args.batch_size,
                    sampler=InfiniteSampler(len(dataset)),
                    num_workers=args.n_threads))
print(len(dataset))
model = DFNet().to(device)

lr = args.lr

start_iter = 0
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.1)
criterion = InpaintingLoss().to(device)
def read_data():

    #Read and seperate the mnist dataset.
    (X_train, Y_train), (x_test, y_test) = mnist.load_data()

    #Flatten the data, MLP doesn't use the 2D structure of the data. 784 = 28*28
    X_train = X_train.reshape(60000, 784)
    x_test = x_test.reshape(10000, 784)

    #Make the value floats in [0;1] instead of int in [0;255]
    tr = X_train.astype('float32')
    te = x_test.astype('float32')
    tr = tr / 255
    te = te / 255
    trl = np.asarray(Y_train, dtype=np.int32)
    tel = np.asarray(y_test, dtype=np.int32)
    print(tr[1, :])
    mnist_data = DS(tr, trl, te, tel, 'mnist')
    print(tr)
    print(trl)

    #Read and seperate the HAR dataset.
    tr1 = pd.read_csv(os.path.join(path, 'UCI HAR Dataset', 'train',
                                   'X_train.txt'),
                      delim_whitespace=True,
                      error_bad_lines=False,
                      warn_bad_lines=False,
                      header=None)
    tr1 = tr1.to_numpy(dtype='float32', copy=True)
    trl1 = pd.read_csv(os.path.join(path, 'UCI HAR Dataset', 'train',
                                    'y_train.txt'),
                       error_bad_lines=False,
                       warn_bad_lines=False,
                       header=None)
    te1 = pd.read_csv(os.path.join(path, 'UCI HAR Dataset', 'test',
                                   'X_test.txt'),
                      delim_whitespace=True,
                      error_bad_lines=False,
                      warn_bad_lines=False,
                      header=None)
    te1 = te1.to_numpy(dtype='float32', copy=True)
    tel1 = pd.read_csv(os.path.join(path, 'UCI HAR Dataset', 'test',
                                    'y_test.txt'),
                       error_bad_lines=False,
                       warn_bad_lines=False,
                       header=None)
    har_data = DS(tr1, trl1.values.T[0], te1, tel1.values.T[0], 'har')

    #Read and seperate the pulsar dataset.
    alldata = pd.read_csv(os.path.join(path, 'HTRU2', 'HTRU_2.csv'),
                          sep=',',
                          error_bad_lines=False,
                          warn_bad_lines=False,
                          header=None)
    labels = pd.DataFrame(alldata[alldata.columns[8]])
    alldata.drop(alldata.columns[8], axis=1, inplace=True)
    tr2, te2, trl2, tel2 = train_test_split(alldata,
                                            labels,
                                            test_size=0.20,
                                            random_state=42)
    tr2 = tr2.to_numpy(dtype='float32', copy=True)
    te2 = te2.to_numpy(dtype='float32', copy=True)

    pulsar_data = DS(tr2, trl2.values.T[0], te2, tel2.values.T[0], 'pulsar')

    return mnist_data, har_data, pulsar_data