Example #1
0
            target = torch.transpose(target, 1, 3)
            if use_cuda: hsv_image_t = target.data.cpu().numpy()
            else: hsv_image_t = target.data.numpy()
            rgb_image_t = utils.convert_hsv_to_rgb(hsv_image_t[0])
            utils.save_image(rgb_image_t,
                             "{}/target_epoch_{}.png".format(exp, epoch))

        # Save to checkpoint
        if (epoch % save_epoch == 0) or (epoch == training_epochs - 1):
            torch.save(dae.state_dict(),
                       '{}/dae_epoch_{}.pth'.format(exp, epoch))


data_manager = DataManager()
data_manager.prepare()
dae = DAE()
if opt.load != '':
    print('loading {}'.format(opt.load))
    if use_cuda:
        dae.load_state_dict(torch.load())
    else:
        dae.load_state_dict(
            torch.load(exp + '/' + opt.load,
                       map_location=lambda storage, loc: storage))

if use_cuda: dae = dae.cuda()

if opt.train:
    dae_optimizer = optim.Adam(dae.parameters(), lr=1e-4, eps=1e-8)
    train_dae(dae, data_manager, dae_optimizer)
Example #2
0
    return out_data


if __name__ == "__main__":
    epochs = 5000
    batch_size = 128

    input_size = 34
    latent_size = 8

    model = DAE(input_size, latent_size)
    model.to('cuda')
    torch.backends.cudnn.benchmark = True

    loss_fn = nn.MSELoss()
    optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
    DECAY = 0.95
    scheduler = LambdaLR(optimizer, lr_lambda=lambda t: DECAY**t)

    data = pd.read_csv("model/training_test_data.csv")
    data.sample(frac=1, random_state=200)
    data = data.to_numpy()
    size = data.shape[0]

    training_data = data[:int(0.7 * size)]
    validation_data = data[int(0.7 * size):int(0.9 * size)]
    test_data = data[int(0.9 * size):]

    # Train data clean
    weather_train = training_data[:, 5:-7]
    weather_train = weather_train[~np.isnan(weather_train).any(axis=1)]
                                    num_workers=args.num_workers,
                                    batch_sampler=train_sampler_adv)
 test_loader = AudioDataLoader(test_dataset,
                               batch_size=args.batch_size,
                               num_workers=args.num_workers)
 '''
 if (not args.no_shuffle and start_epoch != 0) or args.no_sorta_grad:
     print("Shuffling batches for the following epochs")
     train_sampler.shuffle(start_epoch)
 '''
 model = model.to(device)
 denoiser = denoiser.to(device)
 if args.mixed_precision:
     model = convert_model_to_half(model)
     denoiser = convert_model_to_half(denoiser)
 parameters = denoiser.parameters()
 optimizer = torch.optim.Adam(parameters, lr=args.lr)  #,
 #momentum=args.momentum, nesterov=True, weight_decay=1e-5)
 if args.distributed:
     model = DistributedDataParallel(model)
     denoiser = DistributedDataParallel(denoiser)
 if args.mixed_precision:
     optimizer = FP16_Optimizer(optimizer,
                                static_loss_scale=args.static_loss_scale,
                                dynamic_loss_scale=args.dynamic_loss_scale)
 if optim_state is not None:
     optimizer.load_state_dict(optim_state)
 print(model)
 print("Number of parameters: %d" % DeepSpeech.get_param_size(model))
 print(denoiser)
 print("Number of parameters: %d" % DeepSpeech.get_param_size(denoiser))
Example #4
0
#create one hot array 
one_hots=dataloader.generate_one_hots(data_set.shape[0])
#create shuffle indexes
perm=dataset.index_generate_random(data_set)
#shuffle the data
data_set=data_set[perm]
one_hots=one_hots[perm]

train_data,test_data=dataset.split_train_test(data_set,config.TRAIN_SIZE)
oh_train_data,oh_test_data=dataset.split_train_test(one_hots,config.TRAIN_SIZE)

writer = SummaryWriter()

DAE_net=DAE(config.DATA_CHANNEL,config.DAE_ENCODER_SIZES,config.DAE_DECODER_SIZES,config.DAE_ENCODER_ACTIVATION,config.DAE_DECODER_ACTIVATION,config.DAE_config.DAE_BOTTLENECK_SIZE,"bernouilli")
DAE_net.cuda()
optim_dae= torch.optim.Adam(DAE_net.parameters(),lr=config.DAE_lr,eps=config.DAE_eps)

if not config.DAE_PRETRAIN:
        train_set=dataloader.DAEdata(train_data)
        
        dae_training_generator=data.DataLoader(train_set,**config.generator_params)
        
        test_set=dataloader.DAEdata(test_data)
        
        dae_test_generator=data.DataLoader(test_set,**config.generator_params)
        
        train.train_dae(DAE_net,optim_dae,dae_training_generator,dae_test_generator,nn.MSELoss(),config.DAE_CHECKPOINT,config.DAE_TRAIN_EPOCH,writer,config.DAE_LOG):
else:
        utils.load_model(config.DAE_LOAD_PATH,DAE_net,optim_dae)
        
        
Example #5
0
from model import DAE
from visualize import *

# hyperparameters
num_epochs = 100
batch_size = 128
lr = 1e-3

# get images from MNIST database
dataset = MNIST('../data', transform=transforms.ToTensor(), download=True)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)

# create denoising autoencoder and optimizer for it
dae = DAE()
optimizer = optim.Adam(dae.parameters(), lr=lr)

# start training
for epoch in range(num_epochs):

    # minibatch optimization with Adam
    for data in dataloader:
        img, _ = data

        # change the images to be 1D
        img = img.view(img.size(0), -1)

        # get output from network
        out = dae(img)

        # calculate loss and update network