示例#1
0
def main(FG):
    vis = Visdom(port=10001, env=str(FG.vis_env))
    vis.text(argument_report(FG, end='<br>'), win='config')
    FG.global_step=0

    cae = CAE().cuda()

    print_model_parameters(cae)
    #criterion = nn.BCELoss()
    criterion = nn.MSELoss()
    optimizer = optim.Adam(cae.parameters(), lr=FG.lr, betas=(0.5, 0.999))
    schedular = torch.optim.lr_scheduler.ExponentialLR(optimizer, FG.lr_gamma)
    printers = dict(
        loss = summary.Scalar(vis, 'loss', opts=dict(
            showlegend=True, title='loss', ytickmin=0, ytinkmax=2.0)),
        lr = summary.Scalar(vis, 'lr', opts=dict(
            showlegend=True, title='lr', ytickmin=0, ytinkmax=2.0)),
        input_printer = summary.Image3D(vis, 'input')
        output_printer = summary.Image3D(vis, 'output'))

    trainloader, validloader = make_dataloader(FG)

    z = 256
    batchSize = FG.batch_size
    imageSize = 64
    input = torch.FloatTensor(batchSize, 1, imageSize, imageSize, imageSize).cuda()
    noise = torch.FloatTensor(batchSize, z).cuda()
    fixed_noise = torch.FloatTensor(batchSize, z).normal_(0, 1).cuda()

    label = torch.FloatTensor(batchSize).cuda()
    real_label = 1
    fake_label = 0

    for epoch in range(FG.num_epoch):
        schedular.step()
        torch.set_grad_enabled(True)
        pbar = tqdm(total=len(trainloader), desc='Epoch {:>3}'.format(epoch))
        for i, data in enumerate(trainloader):
            real = data[0][0].cuda()

            output = cae(real)
            loss = criterion(output, real)
            loss.backward()
            optimizer.step()

            FG.global_step += 1
            printers['loss']('loss', FG.global_step/len(trainloader), loss)
            printers['input']('input', real)
            printers['output']('output', output/output.max())
            pbar.update()
        pbar.close()
示例#2
0
depth_csv = False
file_end_inp = '.jpg'
file_end_outp = '_mask.gif'
input_channels = 1
use_tresh_hold = False
# save_path =None
use_sync_data = False

if use_sync_data:

    x_sz = y_sz = 64
else:
    x_sz = y_sz = 128

imget = imageGetter(inp_path, depth_csv, out_path, x_sz , y_sz,file_end_inp)
autoEnc = CAE( x_sz , y_sz, save_path, on_home,input_channels)

batch_size = 50
range_start = 0
epochs = 100
#
# for i in range(100):
#     images_inp, images_outp = imget.getImageSubset(999+i, 1000+i)
#     autoEnc.test(images_inp[0], images_outp[0])
iteration =0
for i in range(epochs):
    print('running epoch ',i)
    for range_end in range(batch_size,len(imget.filelist), batch_size):
        # TODO get random image set
        images_inp, images_outp = imget.getImageSubset(range_start, batch_size, use_sync_data,file_end_outp)
        # images_inp, images_outp = imget.create_test_data(range_start, range_end)
# Read the numpy array file
data = numpy.load(numpy_array_filename)
W = data['W']
c = data['c']
b = data['b']
CAE_params = data['CAE_params']
[n_hidden,learning_rate,jacobi_penalty,batch_size,epochs,schatten_p, loss] = CAE_params
X = data['X'] # The examples used during cae.fit

# Load the data into a CAE
ae = CAE( n_hiddens=n_hidden,
          W=W,
          c=c,
          b=b,
          learning_rate=learning_rate,
          jacobi_penalty=jacobi_penalty,
          batch_size=batch_size,
          epochs=epochs,
          schatten_p=schatten_p )

# Pick a random point in the data
pt_index = 0 #random.randint(0, len(X)-1)
pt = X[pt_index]


## Output the pt in a graph and its reconstruction by the CAE
#target = numpy.reshape(pt, (28,-1))
#reconstruction = numpy.reshape(ae.reconstruct(pt), (28,-1))
#num_cols = 4
#fig = plt.figure(2, (1,num_cols))
示例#4
0
# will be created and saved to this file
show_result = True
# If save_result_filename == None or "" then we don't save an image
save_result_filename = "result-1-1024-10000"
# Append the datetime to the save_result_filename
import datetime
d = datetime.datetime.now()
save_result_filename += "-" + str(d.year) + "-" + str(d.month) + "-" + str(d.day)
save_result_filename += "-" + str(d.hour) + str(d.minute)
print "Saving results to", save_result_filename

# read_amat_file is in helper_functions
[X, Y] = read_amat_file(training_file_name, sample_size)
print "Training File Read, starting fit"

ae = CAE(epochs=num_epochs, n_hiddens=num_hidden_units, schatten_p = schatten_p_value, save_results_file=save_result_filename)
ae.fit(X, True)

r_X = ae.reconstruct(X[0])

# Show the first image and the reconstructed image
fig = plt.figure(1, (1,2))
grid = ImageGrid(fig, 111, nrows_ncols = (1, 2), axes_pad=0.1)
grid[0].imshow(numpy.reshape(X[0], (28,-1)))
grid[1].imshow(numpy.reshape(r_X, (28,-1)))

if save_result_filename != None and save_result_filename != "":
  plt.savefig(save_result_filename + ".png")
if show_result:
  plt.show()
示例#5
0
# 1) Load a Numpy array file of a trained contractive autoencoder
data = numpy.load(numpy_array_filename)
W = data['W']
c = data['c']
b = data['b']
CAE_params = data['CAE_params']
[n_hidden,learning_rate,jacobi_penalty,batch_size,epochs,schatten_p, loss] = CAE_params
X = data['X'] # The examples used during cae.fit
print "loss:", loss
# Load the data into a CAE
ae = CAE( n_hiddens=n_hidden,
          W=W,
          c=c,
          b=b,
          learning_rate=learning_rate,
          jacobi_penalty=jacobi_penalty,
          batch_size=batch_size,
          epochs=epochs,
          schatten_p=schatten_p )

# 2) Load the datasets (such as MNIST)
# read_amat_file is in helper_functions
[rX, rY] = read_amat_file(training_file_name, training_sample_size)
[tX, tY] = read_amat_file(testing_file_name, testing_sample_size)

# For each training point, encode
encoded_rX = [ae.encode(x) for x in rX]
#for x in rX:
#  encoded_rX.append(ae.encode(x))
示例#6
0
import numpy as np
import matplotlib.pyplot as plt
from cae import CAE
from train_cae import fit_adagrad, fit_sgd

def generate_data(n=10000):
    t = 12*np.random.rand(n) + 3
    x = (t)*0.04*np.sin(t)
    y = (t)*0.04*np.cos(t)
    X = np.vstack((x,y)).T
    return X



X = generate_data()
cae = CAE(n_hiddens=1000, W=None, c=None, b=None, jacobi_penalty=0.0)
cae.init_weights(X.shape[1], dtype=np.float64)
theta_sgd = fit_sgd(cae, X, epochs=30, verbose=True, learning_rate=0.1)


lim = 0.5
lims = np.arange(-lim, lim, 0.1)
x, y = np.meshgrid(lims, lims)
gridX = np.vstack( (x.flatten(), y.flatten())).T
rX = cae.reconstruct(gridX)
dX = rX-gridX

plt.close('all')
plt.scatter(X[:,0],X[:,1])
plt.quiver(gridX[:,0], gridX[:, 1], dX[:, 0], dX[:,1])
plt.show()