Exemple #1
0
def set_network(depth, ctx, lr, beta1, ndf, ngf, append=True, solver='adam'):
    # Pixel2pixel networks
    if append:
        netD = models.Discriminator(in_channels=6, n_layers =1 , ndf=ndf)##netG = models.CEGenerator(in_channels=3, n_layers=depth, ndf=ngf)  # UnetGenerator(in_channels=3, num_downs=8) #
    else:
    	netD = models.Discriminator(in_channels=3, n_layers =1 , ndf=ndf)
    	#netG = models.UnetGenerator(in_channels=3, num_downs =depth, ngf=ngf)  # UnetGenerator(in_channels=3, num_downs=8) #
    	#netD = models.Discriminator(in_channels=6, n_layers =depth-1, ndf=ngf/4)
    	netEn = models.Encoder(in_channels=3, n_layers =depth, ndf=ngf)
    	netDe = models.Decoder(in_channels=3, n_layers =depth, ndf=ngf)

    # Initialize parameters
    models.network_init(netEn, ctx=ctx)
    models.network_init(netDe, ctx=ctx)
    models.network_init(netD, ctx=ctx)
    if solver=='adam':
            # trainer for the generator and the discriminato
		trainerEn = gluon.Trainer(netEn.collect_params(), 'adam', {'learning_rate': lr, 'beta1': beta1})
		trainerDe = gluon.Trainer(netDe.collect_params(), 'adam', {'learning_rate': lr, 'beta1': beta1})
		trainerD = gluon.Trainer(netD.collect_params(), 'adam', {'learning_rate': lr, 'beta1': beta1})
    elif solver == 'sgd':
            print('sgd')
            trainerG = gluon.Trainer(netG.collect_params(), 'sgd', {'learning_rate': lr, 'momentum': 0.9} )
            trainerD = gluon.Trainer(netD.collect_params(), 'sgd', {'learning_rate': lr, 'momentum': 0.9})
    return netEn, netDe, netD, trainerEn, trainerDe, trainerD
def set_network(depth, ctx, lr, beta1, ndf, ngf,latent, append=True, solver='adam'):
    # Pixel2pixel networks
    if append:
        netD = models.Discriminator(in_channels=6, n_layers =2 , ndf=ndf)##netG = models.CEGenerator(in_channels=3, n_layers=depth, ndf=ngf)  # UnetGenerator(in_channels=3, num_downs=8) #
        netD2 = models.LatentDiscriminator(in_channels=6, n_layers =2 , ndf=ndf)##netG = models.CEGenerator(in_channels=3, n_layers=depth, ndf=ngf)  # UnetGenerator(in_channels=3, num_downs=8) #

    else:
        netD = models.Discriminator(in_channels=3, n_layers =2 , ndf=ndf)
        netD2 = models.LatentDiscriminator(in_channels=3, n_layers =2 , ndf=ndf)
        #netG = models.UnetGenerator(in_channels=3, num_downs =depth, ngf=ngf)  # UnetGenerator(in_channels=3, num_downs=8) #
        #netD = models.Discriminator(in_channels=6, n_layers =depth-1, ndf=ngf/4)
        netEn = models.Encoder(in_channels=3, n_layers =depth,latent=latent, ndf=ngf)
        netDe = models.Decoder(in_channels=3, n_layers =depth, latent=latent, ndf=ngf)

    # Initialize parameters
    models.network_init(netEn, ctx=ctx)
    models.network_init(netDe, ctx=ctx)
    models.network_init(netD, ctx=ctx)
    models.network_init(netD2, ctx=ctx)
    
    trainerEn = gluon.Trainer(netEn.collect_params(), 'adam', {'learning_rate': lr, 'beta1': beta1})
    trainerDe = gluon.Trainer(netDe.collect_params(), 'adam', {'learning_rate': lr, 'beta1': beta1})
    trainerD = gluon.Trainer(netD.collect_params(), 'adam', {'learning_rate': lr, 'beta1': beta1})
    trainerD2 = gluon.Trainer(netD2.collect_params(), 'adam', {'learning_rate': lr, 'beta1': beta1})
    return netEn, netDe, netD, netD2, trainerEn, trainerDe, trainerD, trainerD2
Exemple #3
0
def main():
    latent_dim = 16

    datasource = allocate_datasource(args.env)
    num_actions = datasource.binary_input_channels
    num_rewards = datasource.scalar_output_channels
    input_channels = datasource.conv_input_channels
    output_channels = datasource.conv_output_channels

    encoder = (models.Encoder(latent_dim, input_channels))
    decoder = (models.Decoder(latent_dim, output_channels))
    reward_predictor = (models.RewardPredictor(latent_dim, num_rewards))
    discriminator = (models.Discriminator())
    transition = (models.Transition(latent_dim, num_actions))

    if args.load_from is None:
        print('No --load-from directory specified: initializing new networks')
    elif 'model-encoder.pth' not in os.listdir(args.load_from):
        print('Error: Failed to load saved models from directory {}'.format(
            args.load_from))
        raise ValueError('Failed to load weights from *.pth')
    else:
        print('Loading models from directory {}'.format(args.load_from))
        encoder.load_state_dict(
            torch.load(os.path.join(args.load_from, 'model-encoder.pth')))
        decoder.load_state_dict(
            torch.load(os.path.join(args.load_from, 'model-decoder.pth')))
        transition.load_state_dict(
            torch.load(os.path.join(args.load_from, 'model-transition.pth')))
        discriminator.load_state_dict(
            torch.load(os.path.join(args.load_from,
                                    'model-discriminator.pth')))
        reward_predictor.load_state_dict(
            torch.load(
                os.path.join(args.load_from, 'model-reward_predictor.pth')))

    if args.evaluate:
        print('Finished {} playthroughs'.format(args.evaluations))
        for _ in range(args.evaluations):
            with torch.no_grad():
                play(latent_dim, datasource, num_actions, num_rewards, encoder,
                     decoder, reward_predictor, discriminator, transition)
        print('Finished {} playthroughs'.format(args.evaluations))
        evaluate(datasource,
                 encoder,
                 decoder,
                 transition,
                 discriminator,
                 reward_predictor,
                 latent_dim,
                 use_training_set=True)
    else:
        train(latent_dim, datasource, num_actions, num_rewards, encoder,
              decoder, reward_predictor, discriminator, transition)
    print('Finished execution, terminating')
    def test_size(self):
        """Test that the Decoder produced the correct size output."""
        batch_size = 32
        image_size = 64
        latent_size = 128

        D = models.Decoder(latent_size, image_size)
        tensor_in = Variable(2.0 * (torch.rand(
            (batch_size, latent_size, 1, 1)) - 0.5))
        tensor_out = D.forward(tensor_in)
        self.assertEqual(tensor_out.size(),
                         torch.Size([batch_size, 3, image_size, image_size]))
Exemple #5
0
def create_models(image_size=64, latent_size=128):
    """Creates encoder and decoder and moves them to the GPU if requested."""

    E = models.Encoder(latent_size, image_size)
    D = models.Decoder(latent_size, image_size)

    if RUN_ON_GPU:
        print('Moving models to GPU.')
        E.cuda()
        D.cuda()
    else:
        print('Keeping models on CPU.')

    return E, D
Exemple #6
0
def set_network(depth, ctx, ngf):
    # Pixel2pixel networks
    #netG = models.CEGenerator(in_channels=3, n_layers=depth, ndf=ngf)  # UnetGenerator(in_channels=3, num_downs=8) #
    netEn = models.Encoder(in_channels=3, n_layers=depth, ndf=ngf)  # UnetGenerator(in_channels=3, num_downs=8) #
    netDe = models.Decoder(in_channels=3, n_layers=depth, ndf=ngf)  # UnetGenerator(in_channels=3, num_downs=8) #
    netD = models.Discriminator(in_channels=3, n_layers =depth, ndf=ngf, isthreeway=True)

    # Initialize parameters
    models.network_init(netDe, ctx=ctx)    
    models.network_init(netEn, ctx=ctx)
    models.network_init(netD, ctx=ctx)



    return netEn, netDe, netD
Exemple #7
0
def set_network(depth, ctx, ndf):
    #netG = models.CEGenerator(in_channels=3, n_layers=depth, ndf=ngf)  # UnetGenerator(in_channels=3, num_downs=8) #
    netEn = models.Encoder(
        in_channels=3, n_layers=depth, ndf=ndf,
        usetanh=True)  # UnetGenerator(in_channels=3, num_downs=8) #
    netDe = models.Decoder(
        in_channels=3, n_layers=depth,
        ndf=ndf)  # UnetGenerator(in_channels=3, num_downs=8) #
    netD = models.Discriminator(in_channels=3,
                                n_layers=depth,
                                ndf=ndf,
                                isthreeway=False)
    netD2 = models.Discriminator(in_channels=3,
                                 n_layers=depth,
                                 ndf=ndf,
                                 isthreeway=False)

    return netEn, netDe, netD, netD2
Exemple #8
0
def set_network(depth, ctx, lr, beta1, ngf):
    # Pixel2pixel networks
    #netG = models.CEGenerator(in_channels=3, n_layers=depth, ndf=ngf)  # UnetGenerator(in_channels=3, num_downs=8) #
    netEn = models.Encoder(in_channels=3, n_layers=depth, ndf=ngf)  # UnetGenerator(in_channels=3, num_downs=8) #
    netDe = models.Decoder(in_channels=3, n_layers=depth, ndf=ngf)  # UnetGenerator(in_channels=3, num_downs=8) #
    netD = models.Discriminator(in_channels=3, n_layers =depth, ndf=ngf, isthreeway=True)

    # Initialize parameters
    models.network_init(netDe, ctx=ctx)    
    models.network_init(netEn, ctx=ctx)
    models.network_init(netD, ctx=ctx)

    # trainer for the generator and the discriminator
    trainerEn = gluon.Trainer(netEn.collect_params(), 'adam', {'learning_rate': lr, 'beta1': beta1})
    trainerDe = gluon.Trainer(netDe.collect_params(), 'adam', {'learning_rate': lr, 'beta1': beta1})
    trainerD = gluon.Trainer(netD.collect_params(), 'adam', {'learning_rate': lr, 'beta1': beta1})

    return netEn, netDe, netD, trainerEn, trainerDe, trainerD
    def test_layers(self):
        """Test that the Decoder produced the correct size output for each layer."""
        batch_size = 32
        image_size = 64
        latent_size = 128

        D = models.Decoder(latent_size, image_size)
        tensor = Variable(2.0 * (torch.rand(
            (batch_size, latent_size, 1, 1)) - 0.5))
        tensor = D.layer1.forward(tensor)
        self.assertEqual(tensor.size(), torch.Size([batch_size, 128, 7, 7]))
        tensor = D.layer2.forward(tensor)
        self.assertEqual(tensor.size(), torch.Size([batch_size, 64, 15, 15]))
        tensor = D.layer3.forward(tensor)
        self.assertEqual(tensor.size(), torch.Size([batch_size, 64, 31, 31]))
        tensor = D.layer4.forward(tensor)
        self.assertEqual(tensor.size(), torch.Size([batch_size, 32, 63, 63]))
        tensor = D.layer5.forward(tensor)
        self.assertEqual(tensor.size(),
                         torch.Size([batch_size, 3, image_size, image_size]))
Exemple #10
0
def set_network(depth, ctx, lr, beta1, ndf, ngf, append=True):
    if append:
        netD = models.Discriminator(in_channels=6,
                                    n_layers=depth - 1,
                                    istest=True,
                                    ndf=ndf)
    else:
        netD = models.Discriminator(in_channels=3,
                                    n_layers=depth - 1,
                                    istest=True,
                                    ndf=ndf)
    netEn = models.Encoder(
        in_channels=3, n_layers=depth, istest=True, latent=4096,
        ndf=ngf)  # UnetGenerator(in_channels=3, num_downs=8) #
    netDe = models.Decoder(
        in_channels=3, n_layers=depth, istest=True, latent=4096,
        ndf=ngf)  # UnetGenerator(in_channels=3, num_downs=8) #
    netD2 = None  # models.LatentDiscriminator(in_channels=6, n_layers =2 , ndf=ndf)

    return netEn, netDe, netD, netD2
Exemple #11
0
def main():
    batch_size = 16
    latent_dim = 12
    true_latent_dim = 4
    num_actions = 4
    encoder = models.Encoder(latent_dim)
    decoder = models.Decoder(latent_dim)
    transition = models.Transition(latent_dim, num_actions)
    blur = models.GaussianSmoothing(channels=3, kernel_size=11, sigma=4.)
    higgins_scores = []

    #load_from_dir = '/mnt/nfs/experiments/demo_2018_12_12/scm-gan_81bd12cd'
    load_from_dir = '.'

    print('Loading models from directory {}'.format(load_from_dir))
    encoder.load_state_dict(
        torch.load(os.path.join(load_from_dir, 'model-encoder.pth')))
    decoder.load_state_dict(
        torch.load(os.path.join(load_from_dir, 'model-decoder.pth')))
    transition.load_state_dict(
        torch.load(os.path.join(load_from_dir, 'model-transition.pth')))

    encoder.eval()
    decoder.eval()
    transition.eval()
    for model in (encoder, decoder, transition):
        for child in model.children():
            if type(child) == nn.BatchNorm2d or type(child) == nn.BatchNorm1d:
                child.momentum = 0

    states, rewards, dones, actions = datasource.get_trajectories(batch_size,
                                                                  timesteps=1)
    states = torch.Tensor(states).cuda()

    # Reconstruct the first timestep
    reconstructed = decoder(encoder(states[:, 0]))
    imutil.show(reconstructed)
Exemple #12
0
def pick_model(args, dicts):
    """
        Use args to initialize the appropriate model
    """
    Y = get_num_labels(args.Y)
    if args.model == "rnn":
        model = models.VanillaRNN(Y, args.embed_file, dicts, args.rnn_dim,
                                  args.cell_type, args.rnn_layers, args.gpu,
                                  args.embed_size, args.bidirectional)
    elif args.model == "cnn_vanilla":
        filter_size = int(args.filter_size)
        model = models.VanillaConv(Y, args.embed_file, filter_size,
                                   args.num_filter_maps, args.gpu, dicts,
                                   args.embed_size, args.dropout)
    elif args.model == "conv_attn":
        filter_size = int(args.filter_size)
        model = models.ConvAttnPool(Y,
                                    args.embed_file,
                                    filter_size,
                                    args.num_filter_maps,
                                    args.lmbda,
                                    args.gpu,
                                    dicts,
                                    embed_size=args.embed_size,
                                    dropout=args.dropout)
    elif args.model == "rnn_attn":
        encoder = models.Encoder(Y,
                                 args.embed_file,
                                 dicts,
                                 embed_size=args.embed_size)
        decoder = models.Decoder(Y, args.embed_file, dicts)
        model = models.Seq2Seq(encoder, decoder, Y, args.embed_file, dicts)
    elif args.model == "saved":
        model = torch.load(args.test_model)
    if args.gpu:
        model.cuda()
    return model
Exemple #13
0
import metrics
import utils

if __name__ == '__main__':
    cudnn.benchmark = True

    #load data
    train_data = datasets.Flickr8k
    train_data_loader = data.DataLoader(dataset=train_data,
                                        batch_size=config.BATCH_SIZE,
                                        shuffle=True,
                                        collate_fn=datasets.collate_fn)

    #load model
    encoder = models.Encoder()
    decoder = models.Decoder(train_data.num_word)

    if config.LOAD == True:
        utils.load_model(encoder,
                         os.path.join('./Model', str(config.EPOCH_START)),
                         'encoder.pth')
        utils.load_model(decoder,
                         os.path.join('./Model', str(config.EPOCH_START)),
                         'decoder.pth')

    #set optimizer
    encoder_optim = optim.Adam(encoder.parameters(), lr=config.LR)
    decoder_optim = optim.Adam(decoder.parameters(), lr=config.LR)

    #set loss and meter
    criterion = losses.MaskLoss()
Exemple #14
0
ARTICLE, TITLE, train,test = read_data()

INPUT_DIM = len(ARTICLE.vocab)
OUTPUT_DIM = len(TITLE.vocab)

ENC_EMB_DIM = 512
DEC_EMB_DIM = 512
ENC_HID_DIM = 256
DEC_HID_DIM = 256
ENC_DROPOUT = 0.5
DEC_DROPOUT = 0.5
PAD_IDX = ARTICLE.vocab.stoi['<pad>']
SOS_IDX = TITLE.vocab.stoi['<sos>']
EOS_IDX = TITLE.vocab.stoi['<eos>']
attn = Attention(ENC_HID_DIM, DEC_HID_DIM)
encoder = models.Encoder(INPUT_DIM, ENC_EMB_DIM, ENC_HID_DIM, DEC_HID_DIM, ENC_DROPOUT)
decoder = models.Decoder(OUTPUT_DIM, DEC_EMB_DIM, ENC_HID_DIM, DEC_HID_DIM, DEC_DROPOUT, attn)

model = models.Seq2Seq(encoder, decoder, device,PAD_IDX,SOS_IDX,EOS_IDX).to(device)
weigth = th.load("save_model/title_generate.pth")
model.load_state_dict(weigth)
article = "7月2日,プライベート写真が流出したことでAKB48としての活動を辞退した米沢瑠美が、新しい事務所が決まったことを自身のツイッターで明かした。米沢は7月1日、「みんなに早く伝えたかった事を、話せる準備が整ってきましたっ☆ まず、所属事務所のご報告。エムズエンタープライズさんに所属することになりました☆」と報告。今年3月いっぱいで所属事務所との契約が満了したため、約2年間続いたブログを閉鎖することとなった米沢だが、今回事務所が決まったことで、新たなオフィシャルブログを製作中。今月中旬頃にはスタートする予定だという。また、「これからは演技のお仕事を中心に頑張っていきたいと思っております(^^)」と今後の方針を示唆。どんどん活動の場を広げると思われる米沢から、今後も目が離せそうにない。"
idx_ = 90 # article に対応するtitleのindex
#article = "".join(vars(train.examples[idx_])["article"])
true = "".join(vars(train.examples[idx_])["title"])
pred_title, attention = generate(model,article)
print("".join(article))
print("[predict]","".join(pred_title))
print("[true]",true)
display_attention(article,pred_title,attention)
def main():
    print("init data folders")

    encoder = {}
    decoder = {}
    encoder_optim = {}
    decoder_optim = {}
    encoder_scheduler = {}
    decoder_scheduler = {}
    for s in ['s1', 's2']:
        encoder[s] = {}
        decoder[s] = {}
        encoder_optim[s] = {}
        decoder_optim[s] = {}
        encoder_scheduler[s] = {}
        decoder_scheduler[s] = {}
        for lv in ['lv1', 'lv2', 'lv3']:
            encoder[s][lv] = models.Encoder()
            decoder[s][lv] = models.Decoder()
            encoder[s][lv].apply(weight_init).cuda(GPU)
            decoder[s][lv].apply(weight_init).cuda(GPU)
            encoder_optim[s][lv] = torch.optim.Adam(encoder[s][lv].parameters(),lr=LEARNING_RATE)
            encoder_scheduler[s][lv] = StepLR(encoder_optim[s][lv],step_size=1000,gamma=0.1)
            decoder_optim[s][lv] = torch.optim.Adam(decoder[s][lv].parameters(),lr=LEARNING_RATE)
            decoder_scheduler[s][lv] = StepLR(decoder_optim[s][lv],step_size=1000,gamma=0.1)
            if os.path.exists(str('./checkpoints/' + METHOD + "/encoder_" + s + "_" + lv + ".pkl")):
                encoder[s][lv].load_state_dict(torch.load(str('./checkpoints/' + METHOD + "/encoder_" + s + "_" + lv + ".pkl")))
                print("load encoder_" + s + "_" + lv + " successfully!")
            if os.path.exists(str('./checkpoints/' + METHOD + "/decoder_" + s + "_" + lv + ".pkl")):
                decoder[s][lv].load_state_dict(torch.load(str('./checkpoints/' + METHOD + "/decoder_" + s + "_" + lv + ".pkl")))
                print("load decoder_" + s + "_" + lv + " successfully!")

    
    if os.path.exists('./test_results/' + EXPDIR) == False:
        os.system('mkdir ./test_results/' + EXPDIR)    
            
    iteration = 0.0
    test_time = 0.0
    for images_name in os.listdir(SAMPLE_DIR):
    	with torch.no_grad():
            images = {}
            feature = {}
            residual = {}
            for s in ['s1', 's2']:
                feature[s] = {}
                residual[s] = {}

            images['lv1'] = transforms.ToTensor()(Image.open(SAMPLE_DIR + '/' + images_name).convert('RGB'))
            images['lv1'] = Variable(images['lv1'] - 0.5).unsqueeze(0).cuda(GPU)
            start = time.time()

            H = images['lv1'].size(2)
            W = images['lv1'].size(3)
            
            images['lv2_1'] = images['lv1'][:,:,0:int(H/2),:]
            images['lv2_2'] = images['lv1'][:,:,int(H/2):H,:]
            images['lv3_1'] = images['lv2_1'][:,:,:,0:int(W/2)]
            images['lv3_2'] = images['lv2_1'][:,:,:,int(W/2):W]
            images['lv3_3'] = images['lv2_2'][:,:,:,0:int(W/2)]
            images['lv3_4'] = images['lv2_2'][:,:,:,int(W/2):W]

            s = 's1'		
            feature[s]['lv3_1'] = encoder[s]['lv3'](images['lv3_1'])
            feature[s]['lv3_2'] = encoder[s]['lv3'](images['lv3_2'])
            feature[s]['lv3_3'] = encoder[s]['lv3'](images['lv3_3'])
            feature[s]['lv3_4'] = encoder[s]['lv3'](images['lv3_4'])
            feature[s]['lv3_top'] = torch.cat((feature[s]['lv3_1'], feature[s]['lv3_2']), 3)
            feature[s]['lv3_bot'] = torch.cat((feature[s]['lv3_3'], feature[s]['lv3_4']), 3)
            residual[s]['lv3_top'] = decoder[s]['lv3'](feature[s]['lv3_top'])
            residual[s]['lv3_bot'] = decoder[s]['lv3'](feature[s]['lv3_bot'])

            feature[s]['lv2_1'] = encoder[s]['lv2'](images['lv2_1'] + residual[s]['lv3_top']) + feature[s]['lv3_top']
            feature[s]['lv2_2'] = encoder[s]['lv2'](images['lv2_2'] + residual[s]['lv3_bot']) + feature[s]['lv3_bot']
            feature[s]['lv2'] = torch.cat((feature[s]['lv2_1'], feature[s]['lv2_2']), 2)
            residual[s]['lv2'] = decoder[s]['lv2'](feature[s]['lv2'])

            feature[s]['lv1'] = encoder[s]['lv1'](images['lv1'] + residual[s]['lv2']) + feature[s]['lv2']
            residual[s]['lv1'] = decoder[s]['lv1'](feature[s]['lv1'])
            
            s = 's2'
            ps = 's1'
            feature[s]['lv3_1'] = encoder[s]['lv3'](residual[ps]['lv1'][:,:,0:int(H/2),0:int(W/2)])
            feature[s]['lv3_2'] = encoder[s]['lv3'](residual[ps]['lv1'][:,:,0:int(H/2),int(W/2):W])
            feature[s]['lv3_3'] = encoder[s]['lv3'](residual[ps]['lv1'][:,:,int(H/2):H,0:int(W/2)])
            feature[s]['lv3_4'] = encoder[s]['lv3'](residual[ps]['lv1'][:,:,int(H/2):H,int(W/2):W])
            feature[s]['lv3_top'] = torch.cat((feature[s]['lv3_1'], feature[s]['lv3_2']), 3) + feature[ps]['lv3_top']
            feature[s]['lv3_bot'] = torch.cat((feature[s]['lv3_3'], feature[s]['lv3_4']), 3) + feature[ps]['lv3_bot']
            residual[s]['lv3_top'] = decoder[s]['lv3'](feature[s]['lv3_top'])
            residual[s]['lv3_bot'] = decoder[s]['lv3'](feature[s]['lv3_bot'])

            feature[s]['lv2_1'] = encoder[s]['lv2'](residual[ps]['lv1'][:,:,0:int(H/2),:] + residual[s]['lv3_top']) + feature[s]['lv3_top'] + feature[ps]['lv2_1']
            feature[s]['lv2_2'] = encoder[s]['lv2'](residual[ps]['lv1'][:,:,int(H/2):H,:] + residual[s]['lv3_bot']) + feature[s]['lv3_bot'] + feature[ps]['lv2_2']
            feature[s]['lv2'] = torch.cat((feature[s]['lv2_1'], feature[s]['lv2_2']), 2)
            residual[s]['lv2'] = decoder[s]['lv2'](feature[s]['lv2']) + residual['s1']['lv1']

            feature[s]['lv1'] = encoder[s]['lv1'](residual[ps]['lv1'] + residual[s]['lv2']) + feature[s]['lv2'] + feature[ps]['lv1']
            residual[s]['lv1'] = decoder[s]['lv1'](feature[s]['lv1'])
            
            deblurred_image = residual[s]['lv1']
            stop = time.time()
            test_time += stop-start
            print('RunTime:%.4f'%(stop-start), '  Average Runtime:%.4f'%(test_time/(iteration+1)))
            save_images(deblurred_image.data + 0.5, images_name) 
            iteration += 1
Exemple #16
0
        if train:
            optim_encoder_reg.zero_grad()
            G_loss.backward()
            optim_encoder_reg.step()

        if i % 100 == 0:
            print(
                '\n Step [%d], recon_loss: %.4f, discriminator_loss :%.4f , generator_loss:%.4f'
                % (i, ae_loss.item(), D_loss.item(), G_loss.item()))

    M = len(dataloader.dataset)
    return total_rec_loss / M, total_disc_loss / M, total_gen_loss / M


encoder = models.Encoder().to(device)
decoder = models.Decoder().to(device)
Disc = models.Discriminator().to(device)

#encode/decode optimizers
optim_encoder = torch.optim.Adam(encoder.parameters(), lr=args.lr)
optim_decoder = torch.optim.Adam(decoder.parameters(), lr=args.lr)
optim_D = torch.optim.Adam(Disc.parameters(), lr=args.lr)
optim_encoder_reg = torch.optim.Adam(encoder.parameters(), lr=0.0001)

schedulerDisc = torch.optim.lr_scheduler.ExponentialLR(optim_D, gamma=0.99)
schedulerD = torch.optim.lr_scheduler.ExponentialLR(optim_decoder, gamma=0.99)
schedulerE = torch.optim.lr_scheduler.ExponentialLR(optim_encoder, gamma=0.99)

train_loss = []
val_loss = []
for epoch in range(args.epochs):
Exemple #17
0
def main():
    print("init data folders")

    encoder_lv1 = models.Encoder()
    encoder_lv2 = models.Encoder()    
    encoder_lv3 = models.Encoder()

    decoder_lv1 = models.Decoder()
    decoder_lv2 = models.Decoder()    
    decoder_lv3 = models.Decoder()
    
    encoder_lv1.apply(weight_init).cuda(GPU)    
    encoder_lv2.apply(weight_init).cuda(GPU)
    encoder_lv3.apply(weight_init).cuda(GPU)

    decoder_lv1.apply(weight_init).cuda(GPU)    
    decoder_lv2.apply(weight_init).cuda(GPU)
    decoder_lv3.apply(weight_init).cuda(GPU)
    
    encoder_lv1_optim = torch.optim.Adam(encoder_lv1.parameters(),lr=LEARNING_RATE)
    # encoder_lv1_scheduler = StepLR(encoder_lv1_optim,step_size=10,gamma=0.1)
    encoder_lv2_optim = torch.optim.Adam(encoder_lv2.parameters(),lr=LEARNING_RATE)
    # encoder_lv2_scheduler = StepLR(encoder_lv2_optim,step_size=10,gamma=0.1)
    encoder_lv3_optim = torch.optim.Adam(encoder_lv3.parameters(),lr=LEARNING_RATE)
    # encoder_lv3_scheduler = StepLR(encoder_lv3_optim,step_size=10,gamma=0.1)

    decoder_lv1_optim = torch.optim.Adam(decoder_lv1.parameters(),lr=LEARNING_RATE)
    # decoder_lv1_scheduler = StepLR(decoder_lv1_optim,step_size=10,gamma=0.1)
    decoder_lv2_optim = torch.optim.Adam(decoder_lv2.parameters(),lr=LEARNING_RATE)
    # decoder_lv2_scheduler = StepLR(decoder_lv2_optim,step_size=10,gamma=0.1)
    decoder_lv3_optim = torch.optim.Adam(decoder_lv3.parameters(),lr=LEARNING_RATE)
    # decoder_lv3_scheduler = StepLR(decoder_lv3_optim,step_size=10,gamma=0.1)

    # if os.path.exists(str('./checkpoints/' + METHOD + "/encoder_lv1.pkl")):
    #     encoder_lv1.load_state_dict(torch.load(str('./checkpoints/' + METHOD + "/encoder_lv1.pkl")))
    #     print("load encoder_lv1 success")
    # if os.path.exists(str('./checkpoints/' + METHOD + "/encoder_lv2.pkl")):
    #     encoder_lv2.load_state_dict(torch.load(str('./checkpoints/' + METHOD + "/encoder_lv2.pkl")))
    #     print("load encoder_lv2 success")
    # if os.path.exists(str('./checkpoints/' + METHOD + "/encoder_lv3.pkl")):
    #     encoder_lv3.load_state_dict(torch.load(str('./checkpoints/' + METHOD + "/encoder_lv3.pkl")))
    #     print("load encoder_lv3 success")

    # if os.path.exists(str('./checkpoints/' + METHOD + "/decoder_lv1.pkl")):
    #     decoder_lv1.load_state_dict(torch.load(str('./checkpoints/' + METHOD + "/decoder_lv1.pkl")))
    #     print("load encoder_lv1 success")
    # if os.path.exists(str('./checkpoints/' + METHOD + "/decoder_lv2.pkl")):
    #     decoder_lv2.load_state_dict(torch.load(str('./checkpoints/' + METHOD + "/decoder_lv2.pkl")))
    #     print("load decoder_lv2 success")
    # if os.path.exists(str('./checkpoints/' + METHOD + "/decoder_lv3.pkl")):
    #     decoder_lv3.load_state_dict(torch.load(str('./checkpoints/' + METHOD + "/decoder_lv3.pkl")))
    #     print("load decoder_lv3 success")

    # LOAD models here 

    saved_epoch = 99
    encoder_lv1.load_state_dict(torch.load(str('./checkpoints2/' + METHOD + "/ep" + str(saved_epoch)+ "_encoder_lv1.pkl")))
    encoder_lv2.load_state_dict(torch.load(str('./checkpoints2/' + METHOD + "/ep" + str(saved_epoch)+ "_encoder_lv2.pkl")))
    encoder_lv3.load_state_dict(torch.load(str('./checkpoints2/' + METHOD + "/ep" + str(saved_epoch)+ "_encoder_lv3.pkl")))

    decoder_lv1.load_state_dict(torch.load(str('./checkpoints2/' + METHOD + "/ep" + str(saved_epoch)+ "_decoder_lv1.pkl")))
    decoder_lv2.load_state_dict(torch.load(str('./checkpoints2/' + METHOD + "/ep" + str(saved_epoch)+ "_decoder_lv2.pkl")))
    decoder_lv3.load_state_dict(torch.load(str('./checkpoints2/' + METHOD + "/ep" + str(saved_epoch)+ "_decoder_lv3.pkl")))
    
    if os.path.exists('./checkpoints2/' + METHOD) == False:
        os.system('mkdir ./checkpoints2/' + METHOD)    
            
    for epoch in range(start_epoch, EPOCHS):
        # encoder_lv1_scheduler.step(epoch)
        # encoder_lv2_scheduler.step(epoch)
        # encoder_lv3_scheduler.step(epoch)

        # decoder_lv1_scheduler.step(epoch)
        # decoder_lv2_scheduler.step(epoch)
        # decoder_lv3_scheduler.step(epoch)     
        
        print("Training...")
        
        train_dataset = NH_HazeDataset(
            hazed_image_files = 'new_dataset/train_patch_hazy.txt',   # make changes here !
            dehazed_image_files = 'new_dataset/train_patch_gt.txt',
            root_dir = 'new_dataset/',
            crop = False,
            rotation = False,
            crop_size = IMAGE_SIZE,
            transform = transforms.Compose([transforms.Resize((128,160)),
                transforms.ToTensor()
                ]))
        train_dataloader = DataLoader(train_dataset, batch_size = BATCH_SIZE, shuffle=True)
        start = 0
        iLoss = 0

        print('Epoch: ',epoch)

        # torch.save(encoder_lv1.state_dict(),str('./checkpoints2/' + METHOD + "/encoder_lv1.pkl"))
        
        for iteration, images in enumerate(train_dataloader):            
            # mse = nn.MSELoss().cuda(GPU)   
            # mae = nn.L1Loss().cuda(GPU)      
            custom_loss_fn = CustomLoss_function().cuda(GPU) 
            
            gt = Variable(images['dehazed_image'] - 0.5).cuda(GPU)            
            H = gt.size(2)
            W = gt.size(3)

            images_lv1 = Variable(images['hazed_image'] - 0.5).cuda(GPU)
            images_lv2 = F.interpolate(images_lv1, scale_factor = 0.5, mode = 'bilinear')
            images_lv3 = F.interpolate(images_lv2, scale_factor = 0.5, mode = 'bilinear')

            feature_lv3 = encoder_lv3(images_lv3)
            residual_lv3 = decoder_lv3(feature_lv3)


            residual_lv3 = F.interpolate(residual_lv3, scale_factor=2, mode= 'bilinear')
            feature_lv3 = F.interpolate(feature_lv3, scale_factor=2, mode= 'bilinear')
            feature_lv2 = encoder_lv2(images_lv2 + residual_lv3)
            residual_lv2 = decoder_lv2(feature_lv2 + feature_lv3)

            residual_lv2 = F.interpolate(residual_lv2, scale_factor=2, mode= 'bilinear')
            feature_lv2 = F.interpolate(feature_lv2, scale_factor=2, mode= 'bilinear')
            feature_lv1 = encoder_lv1(images_lv1 + residual_lv2)
            dehazed_image = decoder_lv1(feature_lv1 + feature_lv2)

            loss_lv1, loss_recn, loss_perc, loss_tv = custom_loss_fn(dehazed_image,gt)

            loss  = loss_lv1

            iLoss += loss.item()
            
            encoder_lv1.zero_grad()
            encoder_lv2.zero_grad()
            encoder_lv3.zero_grad()

            decoder_lv1.zero_grad()
            decoder_lv2.zero_grad()
            decoder_lv3.zero_grad()

            loss.backward()

            encoder_lv1_optim.step()
            encoder_lv2_optim.step()
            encoder_lv3_optim.step()

            decoder_lv1_optim.step()
            decoder_lv2_optim.step()
            decoder_lv3_optim.step() 

            writer.add_scalar('Loss',loss.item(),epoch*len(train_dataloader)+iteration)
            writer.add_scalar('Loss_recn',loss_recn.item(),epoch*len(train_dataloader)+iteration)
            writer.add_scalar('Loss_perc',loss_perc.item(),epoch*len(train_dataloader)+iteration)
            writer.add_scalar('Loss_tv',loss_tv.item(),epoch*len(train_dataloader)+iteration)
            
            if (iteration+1)%10 == 0:
                stop = time.time()
                print("epoch:", epoch, "iteration:", iteration+1, "loss:%.4f"%loss.item(), 'time:%.4f'%(stop-start))
                start = time.time()
        
                    
        torch.save(encoder_lv1.state_dict(),str('./checkpoints2/' + METHOD + "/ep" + str(epoch)+ "_encoder_lv1.pkl"))
        torch.save(encoder_lv2.state_dict(),str('./checkpoints2/' + METHOD + "/ep" + str(epoch)+"_encoder_lv2.pkl"))
        torch.save(encoder_lv3.state_dict(),str('./checkpoints2/' + METHOD + "/ep" + str(epoch)+"_encoder_lv3.pkl"))

        torch.save(decoder_lv1.state_dict(),str('./checkpoints2/' + METHOD + "/ep" + str(epoch)+"_decoder_lv1.pkl"))
        torch.save(decoder_lv2.state_dict(),str('./checkpoints2/' + METHOD + "/ep" + str(epoch)+"_decoder_lv2.pkl"))
        torch.save(decoder_lv3.state_dict(),str('./checkpoints2/' + METHOD + "/ep" + str(epoch)+"_decoder_lv3.pkl"))

        print ('Training Loss:', iLoss/len(train_dataloader))
def main():
    print("init data folders")

    encoder_lv1 = models.Encoder().apply(weight_init).cuda(GPU)
    encoder_lv2 = models.Encoder().apply(weight_init).cuda(GPU)
    encoder_lv3 = models.Encoder().apply(weight_init).cuda(GPU)

    decoder_lv1 = models.Decoder().apply(weight_init).cuda(GPU)
    decoder_lv2 = models.Decoder().apply(weight_init).cuda(GPU)
    decoder_lv3 = models.Decoder().apply(weight_init).cuda(GPU)

    if os.path.exists(str('./checkpoints/' + METHOD + "/encoder_lv1.pkl")):
        encoder_lv1.load_state_dict(torch.load(str('./checkpoints/' + METHOD + "/encoder_lv1.pkl")))
        print("load encoder_lv1 success")
    if os.path.exists(str('./checkpoints/' + METHOD + "/encoder_lv2.pkl")):
        encoder_lv2.load_state_dict(torch.load(str('./checkpoints/' + METHOD + "/encoder_lv2.pkl")))
        print("load encoder_lv2 success")
    if os.path.exists(str('./checkpoints/' + METHOD + "/encoder_lv3.pkl")):
        encoder_lv3.load_state_dict(torch.load(str('./checkpoints/' + METHOD + "/encoder_lv3.pkl")))
        print("load encoder_lv3 success")

    if os.path.exists(str('./checkpoints/' + METHOD + "/decoder_lv1.pkl")):
        decoder_lv1.load_state_dict(torch.load(str('./checkpoints/' + METHOD + "/decoder_lv1.pkl")))
        print("load encoder_lv1 success")
    if os.path.exists(str('./checkpoints/' + METHOD + "/decoder_lv2.pkl")):
        decoder_lv2.load_state_dict(torch.load(str('./checkpoints/' + METHOD + "/decoder_lv2.pkl")))
        print("load decoder_lv2 success")
    if os.path.exists(str('./checkpoints/' + METHOD + "/decoder_lv3.pkl")):
        decoder_lv3.load_state_dict(torch.load(str('./checkpoints/' + METHOD + "/decoder_lv3.pkl")))
        print("load decoder_lv3 success")
    
    if os.path.exists('./test_results/' + EXPDIR) == False:
        os.system('mkdir ./test_results/' + EXPDIR)     
            
    iteration = 0.0
    test_time = 0.0
    for images_name in os.listdir(SAMPLE_DIR):
    	with torch.no_grad():             
            images_lv1 = transforms.ToTensor()(Image.open(SAMPLE_DIR + '/' + images_name).convert('RGB'))
            images_lv1 = Variable(images_lv1 - 0.5).unsqueeze(0).cuda(GPU)
            start = time.time()           
            H = images_lv1.size(2)
            W = images_lv1.size(3)

            images_lv2_1 = images_lv1[:,:,0:int(H/2),:]
            images_lv2_2 = images_lv1[:,:,int(H/2):H,:]
            images_lv3_1 = images_lv2_1[:,:,:,0:int(W/2)]
            images_lv3_2 = images_lv2_1[:,:,:,int(W/2):W]
            images_lv3_3 = images_lv2_2[:,:,:,0:int(W/2)]
            images_lv3_4 = images_lv2_2[:,:,:,int(W/2):W]

            feature_lv3_1 = encoder_lv3(images_lv3_1)
            feature_lv3_2 = encoder_lv3(images_lv3_2)
            feature_lv3_3 = encoder_lv3(images_lv3_3)
            feature_lv3_4 = encoder_lv3(images_lv3_4)
            feature_lv3_top = torch.cat((feature_lv3_1, feature_lv3_2), 3)
            feature_lv3_bot = torch.cat((feature_lv3_3, feature_lv3_4), 3)
            feature_lv3 = torch.cat((feature_lv3_top, feature_lv3_bot), 2)
            residual_lv3_top = decoder_lv3(feature_lv3_top)
            residual_lv3_bot = decoder_lv3(feature_lv3_bot)

            feature_lv2_1 = encoder_lv2(images_lv2_1 + residual_lv3_top)
            feature_lv2_2 = encoder_lv2(images_lv2_2 + residual_lv3_bot)
            feature_lv2 = torch.cat((feature_lv2_1, feature_lv2_2), 2) + feature_lv3
            residual_lv2 = decoder_lv2(feature_lv2)

            feature_lv1 = encoder_lv1(images_lv1 + residual_lv2) + feature_lv2
            deblur_image = decoder_lv1(feature_lv1)
            
            stop = time.time()
            test_time += stop-start
            print('RunTime:%.4f'%(stop-start), '  Average Runtime:%.4f'%(test_time/(iteration+1)))
            save_images(deblur_image.data + 0.5, images_name) 
            iteration += 1
Exemple #19
0
    hidden_size=hidden_size,
    vocab_size=vocab_size,
    embedding_dict=embeddings,
    num_layers=1,
    dropout=0,
    rnn_type='gru',
)

print("Encoder online")

# Create decoder
decoder = models.Decoder(
    input_size=input_size,
    hidden_size=hidden_size,
    vocab_size=vocab_size,
    embedding_dict=embeddings,
    num_layers=1,
    dropout=0,
    rnn_type='gru',
)

print("Decoder online")

if constants.USE_CUDA:
    encoder = encoder.cuda()
    decoder = decoder.cuda()

print("Synchronized with graphics unit")

# Create Adam optimizers. Decoder has 5* the learning rate of the encoder.
encoder_optimizer = torch.optim.Adam(encoder.parameters(), lr=learning_rate)
Exemple #20
0
def build_model(args, vocab):
    """
    Build the model, optimizer, and loss according to experiment args

    Parameters
    ----------
    args : argparse.Namespace
        Experiment arguments
    vocab : dict
        vocab for language model

    Returns
    -------
    model : torch.nn.Module
        The model to be trained
    optimizer_func : () -> torch.optim.Optimizer
        A function that creates an optimizer (done because we may need to re-initialize the optimizer)
    loss : torch.nn.Module
        The loss function
    """
    params_to_optimize = []
    # Build the model according to the given arguments
    if args.backbone == "mlp" and args.dataset == "coco":
        backbone_input_dim = 512
    else:
        backbone_input_dim = None
    encoder = models.ImageEncoder(
        backbone=args.backbone,
        pretrained=args.pretrained_backbone,
        grayscale="mnist" in args.dataset,
        backbone_input_dim=backbone_input_dim,
    )
    encoder.fine_tune(not args.freeze_encoder)
    if not args.freeze_encoder:
        params_to_optimize.append({
            "params": [p for p in encoder.parameters() if p.requires_grad],
            "lr":
            args.encoder_lr,
        })

    if args.attention:
        decoder = models.DecoderWithAttention(
            embed_dim=args.emb_dim,
            decoder_dim=args.decoder_dim,
            vocab_size=vocab,
            encoder_dim=encoder.output_dim,
            dropout=args.dropout,
            attention_dim=args.attention_dim,
        )
    else:
        decoder = models.Decoder(
            embed_dim=args.emb_dim,
            decoder_dim=args.decoder_dim,
            vocab=vocab,
            encoder_dim=encoder.output_dim,
            dropout=args.dropout,
        )

    params_to_optimize.append({
        "params": [p for p in decoder.parameters() if p.requires_grad],
        "lr":
        args.decoder_lr,
    })

    loss = nn.CrossEntropyLoss()

    model = models.Captioner(encoder, decoder, encoder_dropout=args.dropout)

    optimizer_func = lambda: optim.Adam(params_to_optimize)

    if args.cuda:
        model = model.cuda()
        loss = loss.cuda()

    return model, optimizer_func, loss
Exemple #21
0
import metrics
import utils

if __name__ == '__main__':
    cudnn.benchmark = True

    #load data
    train_data = datasets.Cornell
    train_data_loader = data.DataLoader(dataset=train_data,
                                        batch_size=config.BATCH_SIZE,
                                        shuffle=True,
                                        collate_fn=datasets.collate_fn)

    #load model
    encoder = models.Encoder(train_data.num_word, 512, 2, dropout=0.1)
    decoder = models.Decoder(train_data.num_word, 512, 2, 'dot', dropout=0.1)
    if config.LOAD == True:
        utils.load_model(encoder,
                         os.path.join('./Model', str(config.EPOCH_START)),
                         'encoder.pth')
        utils.load_model(decoder,
                         os.path.join('./Model', str(config.EPOCH_START)),
                         'decoder.pth')

    #set optimizer
    encoder_optim = optim.Adam(encoder.parameters(), lr=config.LR)
    decoder_optim = optim.Adam(decoder.parameters(), lr=config.LR * 5)

    #set loss and meter
    criterion = losses.MaskLoss()
    loss_meter = metrics.LossMeter()
def main():
    train = True
    input_size = 768
    # input_size = 256  # set to none for default cropping
    print("Training with Places365 Dataset")
    max_its = 300000
    max_eps = 20000
    optimizer = 'adam'  # separate optimizers for discriminator and autoencoder
    lr = 0.0002
    batch_size = 1
    step_lr_gamma = 0.1
    step_lr_step = 200000
    discr_success_rate = 0.8
    win_rate = 0.8
    log_interval = int(max_its // 100)
    # log_interval = 100
    if log_interval < 10:
        print("\n WARNING: VERY SMALL LOG INTERVAL\n")

    lam = 0.001
    disc_wt = 1.
    trans_wt = 100.
    style_wt = 100.

    alpha = 0.05

    tblock_kernel = 10
    # Models
    encoder = models.Encoder()
    decoder = models.Decoder()
    tblock = models.TransformerBlock(kernel_size=tblock_kernel)
    discrim = models.Discriminator()

    # init weights
    models.init_weights(encoder)
    models.init_weights(decoder)
    models.init_weights(tblock)
    models.init_weights(discrim)

    if torch.cuda.is_available():
        encoder = encoder.cuda()
        decoder = decoder.cuda()
        tblock = tblock.cuda()
        discrim = discrim.cuda()

    if train:
        # load tmp weights
        if os.path.exists('tmp'):
            device = 'cuda' if torch.cuda.is_available() else 'cpu'
            encoder = torch.load("tmp/encoder.pt", map_location=device)
            decoder = torch.load("tmp/decoder.pt", map_location=device)
            tblock = torch.load("tmp/tblock.pt", map_location=device)
            discrim = torch.load("tmp/discriminator.pt", map_location=device)

        # Losses
        gen_loss = losses.SoftmaxLoss()
        disc_loss = losses.SoftmaxLoss()
        transf_loss = losses.TransformedLoss()
        style_aware_loss = losses.StyleAwareContentLoss()

        # # optimizer for encoder/decoder (and tblock? - think it has no parameters though)
        # params_to_update = []
        # for m in [encoder, decoder, tblock, discrim]:
        #     for param in m.parameters():
        #         param.requires_grad = True
        #         params_to_update.append(param)
        # # optimizer = torch.optim.Adam(params_to_update, lr=lr)

        data_dir = '../Datasets/WikiArt-Sorted/data/vincent-van-gogh_road-with-cypresses-1890'
        # data_dir = '../Datasets/WikiArt-Sorted/data/edvard-munch/'
        style_data = datasets.StyleDataset(data_dir)
        num_workers = 8
        # if mpii:
        #     dataloaders = {'train': DataLoader(datasets.MpiiDataset(train=True, input_size=input_size,
        #                                                             style_dataset=style_data, crop_size=crop_size),
        #                                        batch_size=batch_size, shuffle=True, num_workers=num_workers),
        #                    'test': DataLoader(datasets.MpiiDataset(train=False, style_dataset=style_data, input_size=input_size),
        #                                       batch_size=1, shuffle=False, num_workers=num_workers)}
        # else:
        dataloaders = {
            'train':
            DataLoader(datasets.PlacesDataset(train=True,
                                              input_size=input_size,
                                              style_dataset=style_data),
                       batch_size=batch_size,
                       shuffle=True,
                       num_workers=num_workers),
            'test':
            DataLoader(datasets.TestDataset(),
                       batch_size=1,
                       shuffle=False,
                       num_workers=num_workers)
        }

        # optimizer for encoder/decoder (and tblock? - think it has no parameters though)
        gen_params = []
        for m in [encoder, decoder]:
            for param in m.parameters():
                param.requires_grad = True
                gen_params.append(param)
        g_optimizer = torch.optim.Adam(gen_params, lr=lr)

        # optimizer for disciminator
        disc_params = []
        for param in discrim.parameters():
            param.requires_grad = True
            disc_params.append(param)
        d_optimizer = torch.optim.Adam(disc_params, lr=lr)

        scheduler_g = torch.optim.lr_scheduler.StepLR(g_optimizer,
                                                      step_lr_step,
                                                      gamma=step_lr_gamma,
                                                      last_epoch=-1)
        scheduler_d = torch.optim.lr_scheduler.StepLR(d_optimizer,
                                                      step_lr_step,
                                                      gamma=step_lr_gamma,
                                                      last_epoch=-1)

        its = 0
        print('Begin Training:')
        g_steps = 0
        d_steps = 0
        image_id = 0
        time_per_it = []
        if max_its is None:
            max_its = len(dataloaders['train'])

        # set models to train()
        encoder.train()
        decoder.train()
        tblock.train()
        discrim.train()

        d_loss = 0
        g_loss = 0
        gen_acc = 0
        d_acc = 0
        for epoch in range(max_eps):
            if its > max_its:
                break
            for images, style_images in dataloaders['train']:
                t0 = process_time()
                # utils.export_image(images[0, :, :, :], style_images[0, :, :, :], 'input_images.jpg')

                # zero gradients
                g_optimizer.zero_grad()
                d_optimizer.zero_grad()

                if its > max_its:
                    break

                if torch.cuda.is_available():
                    images = images.cuda()
                    if style_images is not None:
                        style_images = style_images.cuda()

                # autoencoder
                emb = encoder(images)
                stylized_im = decoder(emb)

                # if training do losses etc
                stylized_emb = encoder(stylized_im)
                # add losses

                # tblock
                transformed_inputs, transformed_outputs = tblock(
                    images, stylized_im)
                # add loss

                # # # GENERATOR TRAIN # # # #
                g_optimizer.zero_grad()
                d_out_fake = discrim(
                    stylized_im
                )  # keep attached to generator because grads needed

                # accuracy given the fake output, generator images
                gen_acc = utils.accuracy(
                    d_out_fake,
                    target_label=1)  # accuracy given only the output image

                del g_loss
                g_loss = disc_wt * gen_loss(d_out_fake, target_label=1)
                g_loss += trans_wt * transf_loss(transformed_inputs,
                                                 transformed_outputs)
                g_loss += style_wt * style_aware_loss(emb, stylized_emb)
                g_loss.backward()
                d_optimizer.step()
                discr_success_rate = discr_success_rate * (
                    1. - alpha) + alpha * (1. - gen_acc)
                g_steps += 1

                # # # DISCRIMINATOR TRAIN # # # #
                d_optimizer.zero_grad()
                # detach from generator, so not propagating unnecessary gradients
                d_out_fake = discrim(stylized_im.clone().detach())
                d_out_real_ph = discrim(images)
                d_out_real_style = discrim(style_images)

                # accuracy given all the images
                d_acc_real_ph = utils.accuracy(d_out_real_ph, target_label=0)
                d_acc_fake_style = utils.accuracy(d_out_fake, target_label=0)
                d_acc_real_style = utils.accuracy(d_out_real_style,
                                                  target_label=1)
                gen_acc = 1 - d_acc_fake_style
                d_acc = (d_acc_real_ph + d_acc_fake_style +
                         d_acc_real_style) / 3

                # Loss calculation
                d_loss = disc_loss(d_out_fake, target_label=0)
                d_loss += disc_loss(d_out_real_style, target_label=1)
                d_loss += disc_loss(d_out_real_ph, target_label=0)

                d_loss.backward()
                d_optimizer.step()
                discr_success_rate = discr_success_rate * (
                    1. - alpha) + alpha * d_acc
                d_steps += 1

                # print(g_loss.item(), g_steps, d_loss.item(), d_steps)
                t1 = process_time()
                time_per_it.append((t1 - t0) / 3600)
                if len(time_per_it) > 100:
                    time_per_it.pop(0)

                if not its % log_interval:
                    running_mean_it_time = sum(time_per_it) / len(time_per_it)
                    time_rem = (max_its - its + 1) * running_mean_it_time
                    print(
                        "{}/{} -- {} G Steps -- G Loss {:.2f} -- G Acc {:.2f} -"
                        "- {} D Steps -- D Loss {:.2f} -- D Acc {:.2f} -"
                        "- {:.2f} D Success -- {:.1f} Hours remaing...".format(
                            its, max_its, g_steps, g_loss, gen_acc, d_steps,
                            d_loss, d_acc, discr_success_rate, time_rem))

                    for idx in range(images.size(0)):
                        output_path = 'outputs/training/'.format(epoch)
                        if not os.path.exists(output_path):
                            os.makedirs(output_path)

                        output_path += 'iteration_{:06d}_example_{}.jpg'.format(
                            its, idx)
                        utils.export_image([
                            images[idx, :, :, :], style_images[idx, :, :, :],
                            stylized_im[idx, :, :, :]
                        ], output_path)

                its += 1
                scheduler_g.step()
                scheduler_d.step()

                if not its % 10000:
                    if not os.path.exists('tmp'):
                        os.mkdir('tmp')
                    torch.save(encoder, "tmp/encoder.pt")
                    torch.save(decoder, "tmp/decoder.pt")
                    torch.save(tblock, "tmp/tblock.pt")
                    torch.save(discrim, "tmp/discriminator.pt")

        # only save if running on gpu (otherwise I'm just fixing bugs)
        torch.save(encoder, "encoder.pt")
        torch.save(decoder, "decoder.pt")
        torch.save(tblock, "tblock.pt")
        torch.save(discrim, "discriminator.pt")

        evaluate(encoder, decoder, dataloaders['test'])
    else:
        encoder = torch.load('encoder.pt', map_location='cpu')
        decoder = torch.load('decoder.pt', map_location='cpu')

        # encoder.load_state_dict(encoder_dict)
        # decoder.load_state_dict(decoder_dict)

        dataloader = DataLoader(datasets.TestDataset(),
                                batch_size=1,
                                shuffle=False,
                                num_workers=8)
        evaluate(encoder, decoder, dataloader)
        raise NotImplementedError('Not implemented standalone ')
Exemple #23
0
def main():
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('--loader_workers', type=int, default=64)
    parser.add_argument('--epochs', type=int, default=1200)
    parser.add_argument('--batch_size', type=int, default=512)
    parser.add_argument('--save_interval', type=int, default=10)
    parser.add_argument('--save_dir', type=str, default='logs/')
    parser.add_argument('--resume', type=str, default=None)
    parser.add_argument('--table', type=str, default='data/Cangjie5.txt')
    parser.add_argument('--codemap', type=str, default='data/codemap_cangjie5.txt')
    parser.add_argument('--fonts', nargs='+', default=['data/hanazono/HanaMinA.ttf', 'data/hanazono/HanaMinB.ttf'])
    parser.add_argument('--encoder_lr', type=float, default=1e-3)
    parser.add_argument('--decoder_lr', type=float, default=1e-3)
    parser.add_argument('--alpha_c', type=float, default=1.)
    parser.add_argument('--grad_clip', type=float, default=5.)
    args = parser.parse_args()
    args.save_dir = os.path.join(args.save_dir, datetime.datetime.now().strftime("%m-%d-%Y-%H:%M:%S"))
    os.makedirs(args.save_dir)

    glyph = dset.Glyph(args.fonts)
    dataset = dset.CodeTableDataset(glyph, table=args.table, codemap=args.codemap)
    train_length = int(len(dataset) * 0.7)
    train_set, val_set = torch.utils.data.random_split(dataset, [train_length, len(dataset) - train_length])
    train_loader = torch.utils.data.DataLoader(train_set, args.batch_size, True,
                                               collate_fn=dset.collate_batch,
                                               num_workers=args.loader_workers,
                                               pin_memory=True)
    val_loader = torch.utils.data.DataLoader(val_set, args.batch_size, False,
                                             collate_fn=dset.collate_batch,
                                             num_workers=args.loader_workers,
                                             pin_memory=True)

    encoder = models.Encoder(encode_channels=256).to(device)
    encoder_optim = torch.optim.Adam(encoder.parameters(), lr=args.encoder_lr)
    decoder = models.Decoder(128, 256, 256, 26 + 2, encoder_dim=256, dropout=0.5).to(device)
    decoder_optim = torch.optim.Adam(decoder.parameters(), lr=args.decoder_lr)
    epoch_start = 0
    if args.resume != None:
        print('loading checkpoint: %s'%args.resume)
        checkpoint = torch.load(args.resume, map_location=device)
        decoder.load_state_dict(checkpoint['decoder'])
        decoder_optim.load_state_dict(checkpoint['decoder_optimizer'])
        encoder.load_state_dict(checkpoint['encoder'])
        encoder_optim.load_state_dict(checkpoint['encoder_optimizer'])
        epoch_start = checkpoint['epoch']

    criterion = nn.CrossEntropyLoss().to(device)
    logger = PredLogger(dataset, args.save_dir, args.codemap)
    writer = SummaryWriter(args.save_dir)

    best_acc = 0

    for epoch in tqdm(range(epoch_start, args.epochs), position=0):
        train(train_loader=train_loader,
              encoder=encoder,
              decoder=decoder,
              criterion=criterion,
              encoder_optimizer=encoder_optim,
              decoder_optimizer=decoder_optim,
              epoch=epoch,
              logger=logger,
              writer=writer,
              args=args)
        acc, imgs, scores, alphas = validate(val_loader=val_loader,
                       encoder=encoder,
                       decoder=decoder,
                       criterion=criterion,
                       epoch=epoch,
                       logger=logger,
                       writer=writer,
                       args=args)
        
        is_best = best_acc < acc # and epoch > 0
        best_acc = max(acc, best_acc)
        
        if epoch % args.save_interval == args.save_interval - 1 or is_best:
            save_checkpoint(epoch, encoder, decoder, encoder_optim, decoder_optim, acc, is_best, args.save_dir)
            vis = visualize_att(T.ToPILImage()(imgs[0].cpu()), scores[0].topk(1, dim=-1).indices.flatten().tolist(), alphas[0].view(-1, 13, 13).cpu(), logger.map_rev)
            vis.savefig(os.path.join(args.save_dir, 'val_visualize_%d.png'%epoch))
Exemple #24
0
def main():
    print("init data folders")

    encoder_lv1 = models.Encoder()
    encoder_lv2 = models.Encoder()    
    encoder_lv3 = models.Encoder()
    encoder_lv4 = models.Encoder()

    decoder_lv1 = models.Decoder()
    decoder_lv2 = models.Decoder()    
    decoder_lv3 = models.Decoder()
    decoder_lv4 = models.Decoder()
    
    encoder_lv1.apply(weight_init).cuda(GPU)    
    encoder_lv2.apply(weight_init).cuda(GPU)
    encoder_lv3.apply(weight_init).cuda(GPU)
    encoder_lv4.apply(weight_init).cuda(GPU)

    decoder_lv1.apply(weight_init).cuda(GPU)    
    decoder_lv2.apply(weight_init).cuda(GPU)
    decoder_lv3.apply(weight_init).cuda(GPU)
    decoder_lv4.apply(weight_init).cuda(GPU)
    
    encoder_lv1_optim = torch.optim.Adam(encoder_lv1.parameters(),lr=LEARNING_RATE)
    encoder_lv1_scheduler = StepLR(encoder_lv1_optim,step_size=1000,gamma=0.1)
    encoder_lv2_optim = torch.optim.Adam(encoder_lv2.parameters(),lr=LEARNING_RATE)
    encoder_lv2_scheduler = StepLR(encoder_lv2_optim,step_size=1000,gamma=0.1)
    encoder_lv3_optim = torch.optim.Adam(encoder_lv3.parameters(),lr=LEARNING_RATE)
    encoder_lv3_scheduler = StepLR(encoder_lv3_optim,step_size=1000,gamma=0.1)
    encoder_lv4_optim = torch.optim.Adam(encoder_lv4.parameters(),lr=LEARNING_RATE)
    encoder_lv4_scheduler = StepLR(encoder_lv4_optim,step_size=1000,gamma=0.1)

    decoder_lv1_optim = torch.optim.Adam(decoder_lv1.parameters(),lr=LEARNING_RATE)
    decoder_lv1_scheduler = StepLR(decoder_lv1_optim,step_size=1000,gamma=0.1)
    decoder_lv2_optim = torch.optim.Adam(decoder_lv2.parameters(),lr=LEARNING_RATE)
    decoder_lv2_scheduler = StepLR(decoder_lv2_optim,step_size=1000,gamma=0.1)
    decoder_lv3_optim = torch.optim.Adam(decoder_lv3.parameters(),lr=LEARNING_RATE)
    decoder_lv3_scheduler = StepLR(decoder_lv3_optim,step_size=1000,gamma=0.1)
    decoder_lv4_optim = torch.optim.Adam(decoder_lv4.parameters(),lr=LEARNING_RATE)
    decoder_lv4_scheduler = StepLR(decoder_lv4_optim,step_size=1000,gamma=0.1)

    if os.path.exists(str('./checkpoints/' + METHOD + "/encoder_lv1.pkl")):
        encoder_lv1.load_state_dict(torch.load(str('./checkpoints/' + METHOD + "/encoder_lv1.pkl")))
        print("load encoder_lv1 success")
    if os.path.exists(str('./checkpoints/' + METHOD + "/encoder_lv2.pkl")):
        encoder_lv2.load_state_dict(torch.load(str('./checkpoints/' + METHOD + "/encoder_lv2.pkl")))
        print("load encoder_lv2 success")
    if os.path.exists(str('./checkpoints/' + METHOD + "/encoder_lv3.pkl")):
        encoder_lv3.load_state_dict(torch.load(str('./checkpoints/' + METHOD + "/encoder_lv3.pkl")))
        print("load encoder_lv3 success")
    if os.path.exists(str('./checkpoints/' + METHOD + "/encoder_lv4.pkl")):
        encoder_lv4.load_state_dict(torch.load(str('./checkpoints/' + METHOD + "/encoder_lv4.pkl")))
        print("load encoder_lv4 success")

    if os.path.exists(str('./checkpoints/' + METHOD + "/decoder_lv1.pkl")):
        decoder_lv1.load_state_dict(torch.load(str('./checkpoints/' + METHOD + "/decoder_lv1.pkl")))
        print("load encoder_lv1 success")
    if os.path.exists(str('./checkpoints/' + METHOD + "/decoder_lv2.pkl")):
        decoder_lv2.load_state_dict(torch.load(str('./checkpoints/' + METHOD + "/decoder_lv2.pkl")))
        print("load decoder_lv2 success")
    if os.path.exists(str('./checkpoints/' + METHOD + "/decoder_lv3.pkl")):
        decoder_lv3.load_state_dict(torch.load(str('./checkpoints/' + METHOD + "/decoder_lv3.pkl")))
        print("load decoder_lv3 success")
    if os.path.exists(str('./checkpoints/' + METHOD + "/decoder_lv4.pkl")):
        decoder_lv4.load_state_dict(torch.load(str('./checkpoints/' + METHOD + "/decoder_lv4.pkl")))
        print("load decoder_lv4 success")
    
    if os.path.exists('./checkpoints/' + METHOD) == False:
        os.system('mkdir ./checkpoints/' + METHOD)    
            
    for epoch in range(args.start_epoch, EPOCHS):
        encoder_lv1_scheduler.step(epoch)
        encoder_lv2_scheduler.step(epoch)
        encoder_lv3_scheduler.step(epoch)
        encoder_lv4_scheduler.step(epoch)

        decoder_lv1_scheduler.step(epoch)
        decoder_lv2_scheduler.step(epoch)
        decoder_lv3_scheduler.step(epoch)
        decoder_lv4_scheduler.step(epoch)      
        
        print("Training...")
        
        train_dataset = GoProDataset(
            blur_image_files = './datas/GoPro/train_blur_file.txt',
            sharp_image_files = './datas/GoPro/train_sharp_file.txt',
            root_dir = './datas/GoPro',
            crop = True,
            crop_size = IMAGE_SIZE,
            transform = transforms.Compose([
                transforms.ToTensor()
                ]))

        train_dataloader = DataLoader(train_dataset, batch_size = BATCH_SIZE, shuffle=True)
        start = 0
        
        for iteration, images in enumerate(train_dataloader): # images:{(3,256,256),(3,256,256)}           
            mse = nn.MSELoss().cuda(GPU)            
            
            gt = Variable(images['sharp_image'] - 0.5).cuda(GPU) #(2,3,256,256)
            H = gt.size(2) #256          
            W = gt.size(3) #256
            # 将图片划分为1,2,4,8个小块
            images_lv1 = Variable(images['blur_image'] - 0.5).cuda(GPU) #(2,3,256,256)
            images_lv2_1 = images_lv1[:,:,0:int(H/2),:]
            images_lv2_2 = images_lv1[:,:,int(H/2):H,:]
            images_lv3_1 = images_lv2_1[:,:,:,0:int(W/2)]
            images_lv3_2 = images_lv2_1[:,:,:,int(W/2):W]
            images_lv3_3 = images_lv2_2[:,:,:,0:int(W/2)]
            images_lv3_4 = images_lv2_2[:,:,:,int(W/2):W]
            images_lv4_1 = images_lv3_1[:,:,0:int(H/4),:]
            images_lv4_2 = images_lv3_1[:,:,int(H/4):int(H/2),:]
            images_lv4_3 = images_lv3_2[:,:,0:int(H/4),:]
            images_lv4_4 = images_lv3_2[:,:,int(H/4):int(H/2),:]
            images_lv4_5 = images_lv3_3[:,:,0:int(H/4),:]
            images_lv4_6 = images_lv3_3[:,:,int(H/4):int(H/2),:]
            images_lv4_7 = images_lv3_4[:,:,0:int(H/4),:]
            images_lv4_8 = images_lv3_4[:,:,int(H/4):int(H/2),:]

            feature_lv4_1 = encoder_lv4(images_lv4_1)
            feature_lv4_2 = encoder_lv4(images_lv4_2)
            feature_lv4_3 = encoder_lv4(images_lv4_3)
            feature_lv4_4 = encoder_lv4(images_lv4_4)
            feature_lv4_5 = encoder_lv4(images_lv4_5)
            feature_lv4_6 = encoder_lv4(images_lv4_6)
            feature_lv4_7 = encoder_lv4(images_lv4_7)
            feature_lv4_8 = encoder_lv4(images_lv4_8)
            feature_lv4_top_left = torch.cat((feature_lv4_1, feature_lv4_2), 2)
            feature_lv4_top_right = torch.cat((feature_lv4_3, feature_lv4_4), 2)
            feature_lv4_bot_left = torch.cat((feature_lv4_5, feature_lv4_6), 2)
            feature_lv4_bot_right = torch.cat((feature_lv4_7, feature_lv4_8), 2)
            feature_lv4_top = torch.cat((feature_lv4_top_left, feature_lv4_top_right), 3)
            feature_lv4_bot = torch.cat((feature_lv4_bot_left, feature_lv4_bot_right), 3)
            feature_lv4 = torch.cat((feature_lv4_top, feature_lv4_bot), 2)
            residual_lv4_top_left = decoder_lv4(feature_lv4_top_left)
            residual_lv4_top_right = decoder_lv4(feature_lv4_top_right)
            residual_lv4_bot_left = decoder_lv4(feature_lv4_bot_left)
            residual_lv4_bot_right = decoder_lv4(feature_lv4_bot_right)

            feature_lv3_1 = encoder_lv3(images_lv3_1 + residual_lv4_top_left)
            feature_lv3_2 = encoder_lv3(images_lv3_2 + residual_lv4_top_right)
            feature_lv3_3 = encoder_lv3(images_lv3_3 + residual_lv4_bot_left)
            feature_lv3_4 = encoder_lv3(images_lv3_4 + residual_lv4_bot_right)
            feature_lv3_top = torch.cat((feature_lv3_1, feature_lv3_2), 3) + feature_lv4_top
            feature_lv3_bot = torch.cat((feature_lv3_3, feature_lv3_4), 3) + feature_lv4_bot
            feature_lv3 = torch.cat((feature_lv3_top, feature_lv3_bot), 2)
            residual_lv3_top = decoder_lv3(feature_lv3_top)
            residual_lv3_bot = decoder_lv3(feature_lv3_bot)

            feature_lv2_1 = encoder_lv2(images_lv2_1 + residual_lv3_top)
            feature_lv2_2 = encoder_lv2(images_lv2_2 + residual_lv3_bot)
            feature_lv2 = torch.cat((feature_lv2_1, feature_lv2_2), 2) + feature_lv3
            residual_lv2 = decoder_lv2(feature_lv2)

            feature_lv1 = encoder_lv1(images_lv1 + residual_lv2) + feature_lv2
            deblur_image = decoder_lv1(feature_lv1)

            loss = mse(deblur_image, gt)
            
            encoder_lv1.zero_grad()
            encoder_lv2.zero_grad()
            encoder_lv3.zero_grad()
            encoder_lv4.zero_grad()

            decoder_lv1.zero_grad()
            decoder_lv2.zero_grad()
            decoder_lv3.zero_grad()
            decoder_lv4.zero_grad()
            
            loss.backward()

            encoder_lv1_optim.step()
            encoder_lv2_optim.step()
            encoder_lv3_optim.step()
            encoder_lv4_optim.step()

            decoder_lv1_optim.step()
            decoder_lv2_optim.step()
            decoder_lv3_optim.step()
            decoder_lv4_optim.step()
            
            if (iteration+1)%10 == 0:
                stop = time.time()
                print("epoch:", epoch, "iteration:", iteration+1, "loss:%.4f"%loss.item(), 'time:%.4f'%(stop-start))
                start = time.time()
                
        if (epoch)%100==0:
            if os.path.exists('./checkpoints/' + METHOD + '/epoch' + str(epoch)) == False:
            	os.system('mkdir ./checkpoints/' + METHOD + '/epoch' + str(epoch))
            
            print("Testing...")
            test_dataset = GoProDataset(
                blur_image_files = './datas/GoPro/test_blur_file.txt',
                sharp_image_files = './datas/GoPro/test_sharp_file.txt',
                root_dir = './datas/GoPro',
                transform = transforms.Compose([
                    transforms.ToTensor()
                ]))
            test_dataloader = DataLoader(test_dataset, batch_size = 1, shuffle=False)
            test_time = 0       
            for iteration, images in enumerate(test_dataloader):
                with torch.no_grad():
                    start = time.time()                 
                    images_lv1 = Variable(images['blur_image'] - 0.5).cuda(GPU)  
                    H = images_lv1.size(2)
                    W = images_lv1.size(3)          
                    images_lv2_1 = images_lv1[:,:,0:int(H/2),:]
                    images_lv2_2 = images_lv1[:,:,int(H/2):H,:]
                    images_lv3_1 = images_lv2_1[:,:,:,0:int(W/2)]
                    images_lv3_2 = images_lv2_1[:,:,:,int(W/2):W]
                    images_lv3_3 = images_lv2_2[:,:,:,0:int(W/2)]
                    images_lv3_4 = images_lv2_2[:,:,:,int(W/2):W]
                    images_lv4_1 = images_lv3_1[:,:,0:int(H/4),:]
                    images_lv4_2 = images_lv3_1[:,:,int(H/4):int(H/2),:]
                    images_lv4_3 = images_lv3_2[:,:,0:int(H/4),:]
                    images_lv4_4 = images_lv3_2[:,:,int(H/4):int(H/2),:]
                    images_lv4_5 = images_lv3_3[:,:,0:int(H/4),:]
                    images_lv4_6 = images_lv3_3[:,:,int(H/4):int(H/2),:]
                    images_lv4_7 = images_lv3_4[:,:,0:int(H/4),:]
                    images_lv4_8 = images_lv3_4[:,:,int(H/4):int(H/2),:]
                    
                    feature_lv4_1 = encoder_lv4(images_lv4_1)
                    feature_lv4_2 = encoder_lv4(images_lv4_2)
                    feature_lv4_3 = encoder_lv4(images_lv4_3)
                    feature_lv4_4 = encoder_lv4(images_lv4_4)
                    feature_lv4_5 = encoder_lv4(images_lv4_5)
                    feature_lv4_6 = encoder_lv4(images_lv4_6)
                    feature_lv4_7 = encoder_lv4(images_lv4_7)
                    feature_lv4_8 = encoder_lv4(images_lv4_8)
                    
                    feature_lv4_top_left = torch.cat((feature_lv4_1, feature_lv4_2), 2)
                    feature_lv4_top_right = torch.cat((feature_lv4_3, feature_lv4_4), 2)
                    feature_lv4_bot_left = torch.cat((feature_lv4_5, feature_lv4_6), 2)
                    feature_lv4_bot_right = torch.cat((feature_lv4_7, feature_lv4_8), 2)
                    
                    feature_lv4_top = torch.cat((feature_lv4_top_left, feature_lv4_top_right), 3)
                    feature_lv4_bot = torch.cat((feature_lv4_bot_left, feature_lv4_bot_right), 3)
                    
                    residual_lv4_top_left = decoder_lv4(feature_lv4_top_left)
                    residual_lv4_top_right = decoder_lv4(feature_lv4_top_right)
                    residual_lv4_bot_left = decoder_lv4(feature_lv4_bot_left)
                    residual_lv4_bot_right = decoder_lv4(feature_lv4_bot_right)
            
                    feature_lv3_1 = encoder_lv3(images_lv3_1 + residual_lv4_top_left)
                    feature_lv3_2 = encoder_lv3(images_lv3_2 + residual_lv4_top_right)
                    feature_lv3_3 = encoder_lv3(images_lv3_3 + residual_lv4_bot_left)
                    feature_lv3_4 = encoder_lv3(images_lv3_4 + residual_lv4_bot_right)
                    
                    feature_lv3_top = torch.cat((feature_lv3_1, feature_lv3_2), 3) + feature_lv4_top
                    feature_lv3_bot = torch.cat((feature_lv3_3, feature_lv3_4), 3) + feature_lv4_bot
                    residual_lv3_top = decoder_lv3(feature_lv3_top)
                    residual_lv3_bot = decoder_lv3(feature_lv3_bot)
                
                    feature_lv2_1 = encoder_lv2(images_lv2_1 + residual_lv3_top)
                    feature_lv2_2 = encoder_lv2(images_lv2_2 + residual_lv3_bot)
                    feature_lv2 = torch.cat((feature_lv2_1, feature_lv2_2), 2) + torch.cat((feature_lv3_top, feature_lv3_bot), 2)
                    residual_lv2 = decoder_lv2(feature_lv2)
            
                    feature_lv1 = encoder_lv1(images_lv1 + residual_lv2) + feature_lv2
                    deblur_image = decoder_lv1(feature_lv1)
                    stop = time.time()
                    test_time += stop - start
                    print('RunTime:%.4f'%(stop-start), '  Average Runtime:%.4f'%(test_time/(iteration+1)))
                    save_deblur_images(deblur_image.data + 0.5, iteration, epoch)
                
        torch.save(encoder_lv1.state_dict(),str('./checkpoints/' + METHOD + "/encoder_lv1.pkl"))
        torch.save(encoder_lv2.state_dict(),str('./checkpoints/' + METHOD + "/encoder_lv2.pkl"))
        torch.save(encoder_lv3.state_dict(),str('./checkpoints/' + METHOD + "/encoder_lv3.pkl"))
        torch.save(encoder_lv4.state_dict(),str('./checkpoints/' + METHOD + "/encoder_lv4.pkl"))
        torch.save(decoder_lv1.state_dict(),str('./checkpoints/' + METHOD + "/decoder_lv1.pkl"))
        torch.save(decoder_lv2.state_dict(),str('./checkpoints/' + METHOD + "/decoder_lv2.pkl"))
        torch.save(decoder_lv3.state_dict(),str('./checkpoints/' + METHOD + "/decoder_lv3.pkl"))
        torch.save(decoder_lv4.state_dict(),str('./checkpoints/' + METHOD + "/decoder_lv4.pkl"))
Exemple #25
0
    images = [
        np.asarray(Image.open(image_path)) for image_path in image_paths
    ][:1]
    for i in range(len(images)):
        image = images[i]
        dimensions = image.shape[:2]
        ratio = min([min(d, 480) / d for d in dimensions])
        scaled_dimensions = [int(ratio * d) for d in dimensions]
        image = resize(image, (*scaled_dimensions, 3), anti_aliasing=True)
        image = image[:, :, [2, 1, 0]]
        image = np.transpose(image, (2, 0, 1))
        images[i] = image

    encoder = models.Encoder(embedding_size=img_embedding_size).to(device)
    decoder = models.Decoder(img_embedding_size=img_embedding_size,
                             pos_embedding_size=pos_embedding_size,
                             hidden_size=decoder_hidden_size).to(device)

    encoder_opt = torch.optim.Adam(encoder.parameters(), lr=0.004)
    decoder_opt = torch.optim.Adam(decoder.parameters(), lr=0.004)
    encoder_scheduler = torch.optim.lr_scheduler.ExponentialLR(encoder_opt,
                                                               gamma=0.99999)
    decoder_scheduler = torch.optim.lr_scheduler.ExponentialLR(decoder_opt,
                                                               gamma=0.99999)

    for i in range(100000000):
        image_idxs = np.random.randint(low=0,
                                       high=len(images),
                                       size=batch_size)
        image = [images[idx] for idx in image_idxs]
        shape_idx = np.random.randint(low=0, high=batch_size)
Exemple #26
0
def main():
    global device
    parser = argparse.ArgumentParser(description='Neuro Cangjie')

    parser.add_argument('--model',
                        '-m',
                        help='path to model',
                        default='logs/cangjie5.pth.tar')
    parser.add_argument(
        '--fonts',
        '-f',
        nargs='+',
        default=['data/hanazono/HanaMinA.ttf', 'data/hanazono/HanaMinB.ttf'])
    parser.add_argument('--codemap',
                        '-cm',
                        help='path to code map',
                        default='data/codemap_cangjie5.txt')
    parser.add_argument('--beam_size',
                        '-b',
                        default=5,
                        type=int,
                        help='beam size for beam search')
    parser.add_argument('--dont_smooth',
                        dest='smooth',
                        action='store_false',
                        help='do not smooth alpha overlay')
    parser.add_argument('--use_cpu',
                        action='store_true',
                        help='use cpu for model inference')

    args = parser.parse_args()

    if args.use_cpu:
        device = torch.device('cpu')

    # Load model
    checkpoint = torch.load(args.model, map_location=device)
    encoder = models.Encoder(encode_channels=256).to(device)
    decoder = models.Decoder(128,
                             256,
                             256,
                             26 + 2,
                             encoder_dim=256,
                             dropout=0.5).to(device)
    decoder.load_state_dict(checkpoint['decoder'])
    encoder.load_state_dict(checkpoint['encoder'])
    decoder.eval()
    encoder.eval()

    word_map, rev_word_map = utils.load_map(args.codemap)

    glyph = dset.Glyph(args.fonts)
    while True:
        ch = input('>> ')[0]
        img = glyph.draw(ch)
        # img.save('exp.png')

        # Encode, decode with attention and beam search
        seq, alphas, all_seq = beam_search(encoder, decoder, img, word_map,
                                           args.beam_size)
        # seq, alphas, all_seq = first_out(encoder, decoder, img, word_map)
        print(''.join([rev_word_map[ind] for ind in seq[1:-1]]))
        alphas = torch.FloatTensor(alphas)
        # Visualize caption and attention of best sequence
        plt = utils.visualize_att(img, seq, alphas, rev_word_map, args.smooth)
        plt.savefig('result.png')
Exemple #27
0
def main():
    print("init data folders")

    encoder = {}
    decoder = {}
    encoder_optim = {}
    decoder_optim = {}
    encoder_scheduler = {}
    decoder_scheduler = {}
    for s in ['s1', 's2', 's3', 's4']:
        encoder[s] = {}
        decoder[s] = {}
        encoder_optim[s] = {}
        decoder_optim[s] = {}
        encoder_scheduler[s] = {}
        decoder_scheduler[s] = {}
        for lv in ['lv1', 'lv2', 'lv3']:
            encoder[s][lv] = models.Encoder()
            decoder[s][lv] = models.Decoder()
            encoder[s][lv].apply(weight_init).cuda(GPU)
            decoder[s][lv].apply(weight_init).cuda(GPU)
            encoder_optim[s][lv] = torch.optim.Adam(
                encoder[s][lv].parameters(), lr=LEARNING_RATE)
            encoder_scheduler[s][lv] = StepLR(encoder_optim[s][lv],
                                              step_size=1000,
                                              gamma=0.1)
            decoder_optim[s][lv] = torch.optim.Adam(
                decoder[s][lv].parameters(), lr=LEARNING_RATE)
            decoder_scheduler[s][lv] = StepLR(decoder_optim[s][lv],
                                              step_size=1000,
                                              gamma=0.1)
            if os.path.exists(
                    str('./checkpoints/' + METHOD + "/encoder_" + s + "_" +
                        lv + ".pkl")):
                encoder[s][lv].load_state_dict(
                    torch.load(
                        str('./checkpoints/' + METHOD + "/encoder_" + s + "_" +
                            lv + ".pkl")))
                print("load encoder_" + s + "_" + lv + " successfully!")
            if os.path.exists(
                    str('./checkpoints/' + METHOD + "/decoder_" + s + "_" +
                        lv + ".pkl")):
                decoder[s][lv].load_state_dict(
                    torch.load(
                        str('./checkpoints/' + METHOD + "/decoder_" + s + "_" +
                            lv + ".pkl")))
                print("load decoder_" + s + "_" + lv + " successfully!")

    if os.path.exists('./checkpoints/' + METHOD) == False:
        os.system('mkdir ./checkpoints/' + METHOD)

    for epoch in range(args.start_epoch, EPOCHS):
        for s in ['s1', 's2', 's3', 's4']:
            for lv in ['lv1', 'lv2', 'lv3']:
                encoder_scheduler[s][lv].step(epoch)
                decoder_scheduler[s][lv].step(epoch)

        print("Training...")

        train_dataset = GoProDataset(
            blur_image_files='./datas/GoPro/train_blur_file.txt',
            sharp_image_files='./datas/GoPro/train_sharp_file.txt',
            root_dir='./datas/GoPro/',
            crop=True,
            crop_size=CROP_SIZE,
            rotation=True,
            color_augment=True,
            transform=transforms.Compose([transforms.ToTensor()]))
        train_dataloader = DataLoader(train_dataset,
                                      batch_size=BATCH_SIZE,
                                      shuffle=True)

        for iteration, inputs in enumerate(train_dataloader):
            mse = nn.MSELoss().cuda(GPU)
            images = {}
            feature = {}
            residual = {}
            for s in ['s1', 's2', 's3', 's4']:
                feature[s] = {}
                residual[s] = {}

            images['gt'] = Variable(inputs['sharp_image'] - 0.5).cuda(GPU)
            images['lv1'] = Variable(inputs['blur_image'] - 0.5).cuda(GPU)
            H = images['lv1'].size(2)
            W = images['lv1'].size(3)

            images['lv2_1'] = images['lv1'][:, :, 0:int(H / 2), :]
            images['lv2_2'] = images['lv1'][:, :, int(H / 2):H, :]
            images['lv3_1'] = images['lv2_1'][:, :, :, 0:int(W / 2)]
            images['lv3_2'] = images['lv2_1'][:, :, :, int(W / 2):W]
            images['lv3_3'] = images['lv2_2'][:, :, :, 0:int(W / 2)]
            images['lv3_4'] = images['lv2_2'][:, :, :, int(W / 2):W]

            s = 's1'
            feature[s]['lv3_1'] = encoder[s]['lv3'](images['lv3_1'])
            feature[s]['lv3_2'] = encoder[s]['lv3'](images['lv3_2'])
            feature[s]['lv3_3'] = encoder[s]['lv3'](images['lv3_3'])
            feature[s]['lv3_4'] = encoder[s]['lv3'](images['lv3_4'])
            feature[s]['lv3_top'] = torch.cat(
                (feature[s]['lv3_1'], feature[s]['lv3_2']), 3)
            feature[s]['lv3_bot'] = torch.cat(
                (feature[s]['lv3_3'], feature[s]['lv3_4']), 3)
            residual[s]['lv3_top'] = decoder[s]['lv3'](feature[s]['lv3_top'])
            residual[s]['lv3_bot'] = decoder[s]['lv3'](feature[s]['lv3_bot'])

            feature[s]['lv2_1'] = encoder[s]['lv2'](
                images['lv2_1'] +
                residual[s]['lv3_top']) + feature[s]['lv3_top']
            feature[s]['lv2_2'] = encoder[s]['lv2'](
                images['lv2_2'] +
                residual[s]['lv3_bot']) + feature[s]['lv3_bot']
            feature[s]['lv2'] = torch.cat(
                (feature[s]['lv2_1'], feature[s]['lv2_2']), 2)
            residual[s]['lv2'] = decoder[s]['lv2'](feature[s]['lv2'])

            feature[s]['lv1'] = encoder[s]['lv1'](
                images['lv1'] + residual[s]['lv2']) + feature[s]['lv2']
            residual[s]['lv1'] = decoder[s]['lv1'](feature[s]['lv1'])

            s = 's2'
            ps = 's1'
            feature[s]['lv3_1'] = encoder[s]['lv3'](
                residual[ps]['lv1'][:, :, 0:int(H / 2), 0:int(W / 2)])
            feature[s]['lv3_2'] = encoder[s]['lv3'](
                residual[ps]['lv1'][:, :, 0:int(H / 2),
                                    int(W / 2):W])
            feature[s]['lv3_3'] = encoder[s]['lv3'](
                residual[ps]['lv1'][:, :, int(H / 2):H, 0:int(W / 2)])
            feature[s]['lv3_4'] = encoder[s]['lv3'](
                residual[ps]['lv1'][:, :, int(H / 2):H,
                                    int(W / 2):W])
            feature[s]['lv3_top'] = torch.cat(
                (feature[s]['lv3_1'], feature[s]['lv3_2']),
                3) + feature[ps]['lv3_top']
            feature[s]['lv3_bot'] = torch.cat(
                (feature[s]['lv3_3'], feature[s]['lv3_4']),
                3) + feature[ps]['lv3_bot']
            residual[s]['lv3_top'] = decoder[s]['lv3'](feature[s]['lv3_top'])
            residual[s]['lv3_bot'] = decoder[s]['lv3'](feature[s]['lv3_bot'])

            feature[s]['lv2_1'] = encoder[s]['lv2'](
                residual[ps]['lv1'][:, :, 0:int(H / 2), :] + residual[s]
                ['lv3_top']) + feature[s]['lv3_top'] + feature[ps]['lv2_1']
            feature[s]['lv2_2'] = encoder[s]['lv2'](
                residual[ps]['lv1'][:, :, int(H / 2):H, :] + residual[s]
                ['lv3_bot']) + feature[s]['lv3_bot'] + feature[ps]['lv2_2']
            feature[s]['lv2'] = torch.cat(
                (feature[s]['lv2_1'], feature[s]['lv2_2']), 2)
            residual[s]['lv2'] = decoder[s]['lv2'](
                feature[s]['lv2']) + residual['s1']['lv1']

            feature[s]['lv1'] = encoder[s]['lv1'](
                residual[ps]['lv1'] +
                residual[s]['lv2']) + feature[s]['lv2'] + feature[ps]['lv1']
            residual[s]['lv1'] = decoder[s]['lv1'](feature[s]['lv1'])

            s = 's3'
            ps = 's2'
            feature[s]['lv3_1'] = encoder[s]['lv3'](
                residual[ps]['lv1'][:, :, 0:int(H / 2), 0:int(W / 2)])
            feature[s]['lv3_2'] = encoder[s]['lv3'](
                residual[ps]['lv1'][:, :, 0:int(H / 2),
                                    int(W / 2):W])
            feature[s]['lv3_3'] = encoder[s]['lv3'](
                residual[ps]['lv1'][:, :, int(H / 2):H, 0:int(W / 2)])
            feature[s]['lv3_4'] = encoder[s]['lv3'](
                residual[ps]['lv1'][:, :, int(H / 2):H,
                                    int(W / 2):W])
            feature[s]['lv3_top'] = torch.cat(
                (feature[s]['lv3_1'], feature[s]['lv3_2']),
                3) + feature[ps]['lv3_top']
            feature[s]['lv3_bot'] = torch.cat(
                (feature[s]['lv3_3'], feature[s]['lv3_4']),
                3) + feature[ps]['lv3_bot']
            residual[s]['lv3_top'] = decoder[s]['lv3'](feature[s]['lv3_top'])
            residual[s]['lv3_bot'] = decoder[s]['lv3'](feature[s]['lv3_bot'])

            feature[s]['lv2_1'] = encoder[s]['lv2'](
                residual[ps]['lv1'][:, :, 0:int(H / 2), :] + residual[s]
                ['lv3_top']) + feature[s]['lv3_top'] + feature[ps]['lv2_1']
            feature[s]['lv2_2'] = encoder[s]['lv2'](
                residual[ps]['lv1'][:, :, int(H / 2):H, :] + residual[s]
                ['lv3_bot']) + feature[s]['lv3_bot'] + feature[ps]['lv2_2']
            feature[s]['lv2'] = torch.cat(
                (feature[s]['lv2_1'], feature[s]['lv2_2']), 2)
            residual[s]['lv2'] = decoder[s]['lv2'](
                feature[s]['lv2']) + residual['s1']['lv1']

            feature[s]['lv1'] = encoder[s]['lv1'](
                residual[ps]['lv1'] +
                residual[s]['lv2']) + feature[s]['lv2'] + feature[ps]['lv1']
            residual[s]['lv1'] = decoder[s]['lv1'](feature[s]['lv1'])

            s = 's4'
            ps = 's3'
            feature[s]['lv3_1'] = encoder[s]['lv3'](
                residual[ps]['lv1'][:, :, 0:int(H / 2), 0:int(W / 2)])
            feature[s]['lv3_2'] = encoder[s]['lv3'](
                residual[ps]['lv1'][:, :, 0:int(H / 2),
                                    int(W / 2):W])
            feature[s]['lv3_3'] = encoder[s]['lv3'](
                residual[ps]['lv1'][:, :, int(H / 2):H, 0:int(W / 2)])
            feature[s]['lv3_4'] = encoder[s]['lv3'](
                residual[ps]['lv1'][:, :, int(H / 2):H,
                                    int(W / 2):W])
            feature[s]['lv3_top'] = torch.cat(
                (feature[s]['lv3_1'], feature[s]['lv3_2']),
                3) + feature[ps]['lv3_top']
            feature[s]['lv3_bot'] = torch.cat(
                (feature[s]['lv3_3'], feature[s]['lv3_4']),
                3) + feature[ps]['lv3_bot']
            residual[s]['lv3_top'] = decoder[s]['lv3'](feature[s]['lv3_top'])
            residual[s]['lv3_bot'] = decoder[s]['lv3'](feature[s]['lv3_bot'])

            feature[s]['lv2_1'] = encoder[s]['lv2'](
                residual[ps]['lv1'][:, :, 0:int(H / 2), :] + residual[s]
                ['lv3_top']) + feature[s]['lv3_top'] + feature[ps]['lv2_1']
            feature[s]['lv2_2'] = encoder[s]['lv2'](
                residual[ps]['lv1'][:, :, int(H / 2):H, :] + residual[s]
                ['lv3_bot']) + feature[s]['lv3_bot'] + feature[ps]['lv2_2']
            feature[s]['lv2'] = torch.cat(
                (feature[s]['lv2_1'], feature[s]['lv2_2']), 2)
            residual[s]['lv2'] = decoder[s]['lv2'](
                feature[s]['lv2']) + residual['s1']['lv1']

            feature[s]['lv1'] = encoder[s]['lv1'](
                residual[ps]['lv1'] +
                residual[s]['lv2']) + feature[s]['lv2'] + feature[ps]['lv1']
            residual[s]['lv1'] = decoder[s]['lv1'](feature[s]['lv1'])

            loss = mse(residual['s4']['lv1'], images['gt']) + mse(
                residual['s3']['lv1'], images['gt']) + mse(
                    residual['s2']['lv1'], images['gt']) + mse(
                        residual['s1']['lv1'], images['gt'])

            for s in ['s1', 's2', 's3', 's4']:
                for lv in ['lv1', 'lv2', 'lv3']:
                    encoder[s][lv].zero_grad()
                    decoder[s][lv].zero_grad()

            loss.backward()

            for s in ['s1', 's2', 's3', 's4']:
                for lv in ['lv1', 'lv2', 'lv3']:
                    encoder_optim[s][lv].step()
                    decoder_optim[s][lv].step()

            if (iteration + 1) % 10 == 0:
                print(METHOD + "   epoch:", epoch, "iteration:", iteration + 1,
                      "loss:", loss.data[0])

        if (epoch) % 200 == 0:
            if os.path.exists('./checkpoints/' + METHOD + '/epoch' +
                              str(epoch)) == False:
                os.system('mkdir ./checkpoints/' + METHOD + '/epoch' +
                          str(epoch))

            for s in ['s1', 's2', 's3', 's4']:
                for lv in ['lv1', 'lv2', 'lv3']:
                    torch.save(
                        encoder[s][lv].state_dict(),
                        str('./checkpoints/' + METHOD + '/epoch' + str(epoch) +
                            "/encoder_" + s + "_" + lv + ".pkl"))
                    torch.save(
                        decoder[s][lv].state_dict(),
                        str('./checkpoints/' + METHOD + '/epoch' + str(epoch) +
                            "/decoder_" + s + "_" + lv + ".pkl"))

            print("Testing...")
            test_dataset = GoProDataset(
                blur_image_files='./datas/GoPro/test_blur_file.txt',
                sharp_image_files='./datas/GoPro/test_sharp_file.txt',
                root_dir='./datas/GoPro/',
                transform=transforms.Compose([transforms.ToTensor()]))
            test_dataloader = DataLoader(test_dataset,
                                         batch_size=1,
                                         shuffle=False)

            for iteration, inputs in enumerate(test_dataloader):
                with torch.no_grad():
                    images['lv1'] = Variable(inputs['blur_image'] -
                                             0.5).cuda(GPU)
                    H = images['lv1'].size(2)
                    W = images['lv1'].size(3)
                    images['lv2_1'] = images['lv1'][:, :, 0:int(H / 2), :]
                    images['lv2_2'] = images['lv1'][:, :, int(H / 2):H, :]
                    images['lv3_1'] = images['lv2_1'][:, :, :, 0:int(W / 2)]
                    images['lv3_2'] = images['lv2_1'][:, :, :, int(W / 2):W]
                    images['lv3_3'] = images['lv2_2'][:, :, :, 0:int(W / 2)]
                    images['lv3_4'] = images['lv2_2'][:, :, :, int(W / 2):W]

                    s = 's1'
                    feature[s]['lv3_1'] = encoder[s]['lv3'](images['lv3_1'])
                    feature[s]['lv3_2'] = encoder[s]['lv3'](images['lv3_2'])
                    feature[s]['lv3_3'] = encoder[s]['lv3'](images['lv3_3'])
                    feature[s]['lv3_4'] = encoder[s]['lv3'](images['lv3_4'])
                    feature[s]['lv3_top'] = torch.cat(
                        (feature[s]['lv3_1'], feature[s]['lv3_2']), 3)
                    feature[s]['lv3_bot'] = torch.cat(
                        (feature[s]['lv3_3'], feature[s]['lv3_4']), 3)
                    residual[s]['lv3_top'] = decoder[s]['lv3'](
                        feature[s]['lv3_top'])
                    residual[s]['lv3_bot'] = decoder[s]['lv3'](
                        feature[s]['lv3_bot'])

                    feature[s]['lv2_1'] = encoder[s]['lv2'](
                        images['lv2_1'] +
                        residual[s]['lv3_top']) + feature[s]['lv3_top']
                    feature[s]['lv2_2'] = encoder[s]['lv2'](
                        images['lv2_2'] +
                        residual[s]['lv3_bot']) + feature[s]['lv3_bot']
                    feature[s]['lv2'] = torch.cat(
                        (feature[s]['lv2_1'], feature[s]['lv2_2']), 2)
                    residual[s]['lv2'] = decoder[s]['lv2'](feature[s]['lv2'])

                    feature[s]['lv1'] = encoder[s]['lv1'](
                        images['lv1'] + residual[s]['lv2']) + feature[s]['lv2']
                    residual[s]['lv1'] = decoder[s]['lv1'](feature[s]['lv1'])

                    s = 's2'
                    ps = 's1'
                    feature[s]['lv3_1'] = encoder[s]['lv3'](
                        residual[ps]['lv1'][:, :, 0:int(H / 2), 0:int(W / 2)])
                    feature[s]['lv3_2'] = encoder[s]['lv3'](
                        residual[ps]['lv1'][:, :, 0:int(H / 2),
                                            int(W / 2):W])
                    feature[s]['lv3_3'] = encoder[s]['lv3'](
                        residual[ps]['lv1'][:, :,
                                            int(H / 2):H, 0:int(W / 2)])
                    feature[s]['lv3_4'] = encoder[s]['lv3'](
                        residual[ps]['lv1'][:, :,
                                            int(H / 2):H,
                                            int(W / 2):W])
                    feature[s]['lv3_top'] = torch.cat(
                        (feature[s]['lv3_1'], feature[s]['lv3_2']),
                        3) + feature[ps]['lv3_top']
                    feature[s]['lv3_bot'] = torch.cat(
                        (feature[s]['lv3_3'], feature[s]['lv3_4']),
                        3) + feature[ps]['lv3_bot']
                    residual[s]['lv3_top'] = decoder[s]['lv3'](
                        feature[s]['lv3_top'])
                    residual[s]['lv3_bot'] = decoder[s]['lv3'](
                        feature[s]['lv3_bot'])

                    feature[s]['lv2_1'] = encoder[s]['lv2'](
                        residual[ps]['lv1'][:, :, 0:int(H / 2), :] +
                        residual[s]['lv3_top']
                    ) + feature[s]['lv3_top'] + feature[ps]['lv2_1']
                    feature[s]['lv2_2'] = encoder[s]['lv2'](
                        residual[ps]['lv1'][:, :, int(H / 2):H, :] +
                        residual[s]['lv3_bot']
                    ) + feature[s]['lv3_bot'] + feature[ps]['lv2_2']
                    feature[s]['lv2'] = torch.cat(
                        (feature[s]['lv2_1'], feature[s]['lv2_2']), 2)
                    residual[s]['lv2'] = decoder[s]['lv2'](feature[s]['lv2'])

                    feature[s]['lv1'] = encoder[s]['lv1'](
                        residual[ps]['lv1'] + residual[s]['lv2']
                    ) + feature[s]['lv2'] + feature[ps]['lv1']
                    residual[s]['lv1'] = decoder[s]['lv1'](feature[s]['lv1'])

                    s = 's3'
                    ps = 's2'
                    feature[s]['lv3_1'] = encoder[s]['lv3'](
                        residual[ps]['lv1'][:, :, 0:int(H / 2), 0:int(W / 2)])
                    feature[s]['lv3_2'] = encoder[s]['lv3'](
                        residual[ps]['lv1'][:, :, 0:int(H / 2),
                                            int(W / 2):W])
                    feature[s]['lv3_3'] = encoder[s]['lv3'](
                        residual[ps]['lv1'][:, :,
                                            int(H / 2):H, 0:int(W / 2)])
                    feature[s]['lv3_4'] = encoder[s]['lv3'](
                        residual[ps]['lv1'][:, :,
                                            int(H / 2):H,
                                            int(W / 2):W])
                    feature[s]['lv3_top'] = torch.cat(
                        (feature[s]['lv3_1'], feature[s]['lv3_2']),
                        3) + feature[ps]['lv3_top']
                    feature[s]['lv3_bot'] = torch.cat(
                        (feature[s]['lv3_3'], feature[s]['lv3_4']),
                        3) + feature[ps]['lv3_bot']
                    residual[s]['lv3_top'] = decoder[s]['lv3'](
                        feature[s]['lv3_top'])
                    residual[s]['lv3_bot'] = decoder[s]['lv3'](
                        feature[s]['lv3_bot'])

                    feature[s]['lv2_1'] = encoder[s]['lv2'](
                        residual[ps]['lv1'][:, :, 0:int(H / 2), :] +
                        residual[s]['lv3_top']
                    ) + feature[s]['lv3_top'] + feature[ps]['lv2_1']
                    feature[s]['lv2_2'] = encoder[s]['lv2'](
                        residual[ps]['lv1'][:, :, int(H / 2):H, :] +
                        residual[s]['lv3_bot']
                    ) + feature[s]['lv3_bot'] + feature[ps]['lv2_2']
                    feature[s]['lv2'] = torch.cat(
                        (feature[s]['lv2_1'], feature[s]['lv2_2']), 2)
                    residual[s]['lv2'] = decoder[s]['lv2'](feature[s]['lv2'])

                    feature[s]['lv1'] = encoder[s]['lv1'](
                        residual[ps]['lv1'] + residual[s]['lv2']
                    ) + feature[s]['lv2'] + feature[ps]['lv1']
                    residual[s]['lv1'] = decoder[s]['lv1'](feature[s]['lv1'])

                    s = 's4'
                    ps = 's3'
                    feature[s]['lv3_1'] = encoder[s]['lv3'](
                        residual[ps]['lv1'][:, :, 0:int(H / 2), 0:int(W / 2)])
                    feature[s]['lv3_2'] = encoder[s]['lv3'](
                        residual[ps]['lv1'][:, :, 0:int(H / 2),
                                            int(W / 2):W])
                    feature[s]['lv3_3'] = encoder[s]['lv3'](
                        residual[ps]['lv1'][:, :,
                                            int(H / 2):H, 0:int(W / 2)])
                    feature[s]['lv3_4'] = encoder[s]['lv3'](
                        residual[ps]['lv1'][:, :,
                                            int(H / 2):H,
                                            int(W / 2):W])
                    feature[s]['lv3_top'] = torch.cat(
                        (feature[s]['lv3_1'], feature[s]['lv3_2']),
                        3) + feature[ps]['lv3_top']
                    feature[s]['lv3_bot'] = torch.cat(
                        (feature[s]['lv3_3'], feature[s]['lv3_4']),
                        3) + feature[ps]['lv3_bot']
                    residual[s]['lv3_top'] = decoder[s]['lv3'](
                        feature[s]['lv3_top'])
                    residual[s]['lv3_bot'] = decoder[s]['lv3'](
                        feature[s]['lv3_bot'])

                    feature[s]['lv2_1'] = encoder[s]['lv2'](
                        residual[ps]['lv1'][:, :, 0:int(H / 2), :] +
                        residual[s]['lv3_top']
                    ) + feature[s]['lv3_top'] + feature[ps]['lv2_1']
                    feature[s]['lv2_2'] = encoder[s]['lv2'](
                        residual[ps]['lv1'][:, :, int(H / 2):H, :] +
                        residual[s]['lv3_bot']
                    ) + feature[s]['lv3_bot'] + feature[ps]['lv2_2']
                    feature[s]['lv2'] = torch.cat(
                        (feature[s]['lv2_1'], feature[s]['lv2_2']), 2)
                    residual[s]['lv2'] = decoder[s]['lv2'](feature[s]['lv2'])

                    feature[s]['lv1'] = encoder[s]['lv1'](
                        residual[ps]['lv1'] + residual[s]['lv2']
                    ) + feature[s]['lv2'] + feature[ps]['lv1']
                    residual[s]['lv1'] = decoder[s]['lv1'](feature[s]['lv1'])

                    deblurred_image = residual[s]['lv1']

                    save_deblur_images(deblurred_image.data + 0.5, 4,
                                       iteration, epoch)

        for s in ['s1', 's2', 's3', 's4']:
            for lv in ['lv1', 'lv2', 'lv3']:
                torch.save(
                    encoder[s][lv].state_dict(),
                    str('./checkpoints/' + METHOD + "/encoder_" + s + "_" +
                        lv + ".pkl"))
                torch.save(
                    decoder[s][lv].state_dict(),
                    str('./checkpoints/' + METHOD + "/decoder_" + s + "_" +
                        lv + ".pkl"))
Exemple #28
0
num_epochs1 = 110
num_epochs2 = 300
num_epochs3 = 150


use_cuda=True if torch.cuda.is_available() else False
device=torch.device('cuda:0') if use_cuda else torch.device('cpu')


#encoder = models.Encoder()
encoder = tmodels.resnet18(pretrained=True)
#encoder = tmodels.inception_v3(pretrained=True)
#encoder.aux_logits=False
encoder.fc = nn.Sequential()
#discriminator = models.DCDPro(input_features=128)
decoder = models.Decoder()
encoder.to(device)
decoder = decoder.to(device)

loss_fn=torch.nn.MSELoss()
# -----------------------------------------------------------------------------
## etapa 1: entrenar g y h
print("||||| Stage 1 |||||")
optimizer=torch.optim.Adam(list(decoder.parameters()), lr = 0.0001)
scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[100], gamma=0.1)

data_transforms = {
    'train': transforms.Compose([
        transforms.CenterCrop((img_size, img_size)),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
Exemple #29
0
    print(
        "WARNING: You have a CUDA device, so you should probably run with --cuda"
    )

#################################
# initialize tensors
imInputBatch = Variable(
    torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize))
imInputMaskBatch = Variable(
    torch.FloatTensor(opt.batchSize, 1, opt.imageSize, opt.imageSize))
# need a variable size placeholder to handle variable number of views per fyuse...

# initialize models
encoderInit = nn.DataParallel(models.Encoder(), device_ids=opt.deviceIds)
decoderInit = nn.DataParallel(
    models.Decoder(numVertices=642 + 1),
    device_ids=opt.deviceIds)  # Center to be predicted too
colorInit = nn.DataParallel(models.Color(numVertices=642),
                            device_ids=opt.deviceIds)

##############  ######################
# Send things into GPU
if opt.cuda:
    imInputBatch = imInputBatch.cuda(opt.gpuId)
    imInputMaskBatch = imInputMaskBatch.cuda(opt.gpuId)

    encoderInit = encoderInit.cuda(opt.gpuId)
    decoderInit = decoderInit.cuda(opt.gpuId)
    colorInit = colorInit.cuda(opt.gpuId)
####################################
def main():
    train_chairs = [
        'chair_0001', 'chair_0005', 'chair_0101', 'chair_0084', 'chair_0497',
        'chair_0724', 'chair_0878'
    ]
    test_chairs = ['chair_0957']
    features = []
    np.random.seed(0)
    torch.manual_seed(0)

    logger.info('Loading data...')
    train_loader, val_loader, classes = custom_dataset.load_data(args)

    # override autodetect if n_classes is given
    if args.n_classes > 0:
        classes = np.arange(args.n_classes)

    model = load_model(classes)

    logger.info('Loaded model; params={}'.format(util.count_parameters(model)))
    if not args.cpu:
        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    else:
        device = "cpu"

    model.to(device)
    cudnn.benchmark = True
    logger.info('Running on ' + str(device))

    summary_writer = Logger(args.logdir)

    # Loss and Optimizer
    n_epochs = args.epochs
    if args.label_smoothing > 0:
        criterion = nn.BCEWithLogitsLoss()
    else:
        criterion = nn.CrossEntropyLoss()

    train_state = init_train_state()
    # freeze layers
    for l in args.freeze_layers:
        for p in getattr(model, l).parameters():
            p.requires_grad = False
    if args.optimizer == 'adam':
        optimizer = torch.optim.Adam(model.parameters(),
                                     lr=train_state['lr'],
                                     weight_decay=args.weight_decay)
    elif args.optimizer == 'nesterov':
        optimizer = torch.optim.SGD(model.parameters(),
                                    lr=train_state['lr'],
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay,
                                    nesterov=True)
    # this is used to warm-start
    if args.warm_start_from:
        logger.info('Warm-starting from {}'.format(args.warm_start_from))
        assert os.path.isfile(args.warm_start_from)
        train_state = load_checkpoint(args.warm_start_from, model, optimizer)
        logger.info('Params loaded.')
        # do not override train_state these when warm staring
        train_state = init_train_state()

    ckptfile = str(Path(args.logdir) / args.latest_fname)
    if os.path.isfile(ckptfile):
        logger.info('Loading checkpoint: {}'.format(ckptfile))
        train_state = load_checkpoint(ckptfile, model, optimizer)
        logger.info('Params loaded.')
    else:
        logger.info('Checkpoint {} not found; ignoring.'.format(ckptfile))

    # Training / Eval loop
    epoch_time = []  # store time per epoch
    # we save epoch+1 to checkpoints; but for eval we should repeat prev. epoch

    if args.skip_train:
        train_state['start_epoch'] -= 1
    for epoch in range(0, n_epochs):

        logger.info('Epoch: [%d/%d]' % (epoch + 1, n_epochs))
        start = time.time()

        if not args.skip_train:
            model.train()

            if epoch == n_epochs - 1:
                features = train(train_loader,
                                 device,
                                 model,
                                 criterion,
                                 optimizer,
                                 summary_writer,
                                 train_state,
                                 1,
                                 train_chairs,
                                 n_classes=len(classes))

                PIK = "descriptors.dat"
                with open(PIK, "wb") as f:
                    pickle.dump(train_desc, f)

            else:

                train(train_loader,
                      device,
                      model,
                      criterion,
                      optimizer,
                      summary_writer,
                      train_state,
                      0,
                      train_chairs,
                      n_classes=len(classes))

            logger.info('Time taken: %.2f sec...' % (time.time() - start))
            if epoch == 0:
                train_state['steps_epoch'] = train_state['step']
        # always eval on last epoch
        if not args.skip_eval or epoch == n_epochs + 1:
            #print("-------------SAVING MODEL----------------");
            #torch.save(model,"saved.pth")
            logger.info('\n Starting evaluation...')
            model.eval()
            eval_shrec = True if epoch == n_epochs - 1 and args.retrieval_dir else False
            metrics, inputs = eval(val_loader, device, model, criterion,
                                   eval_shrec, 0, test_chairs, features)

            logger.info('\tcombined: %.2f, Acc: %.2f, mAP: %.2f, Loss: %.4f' %
                        (metrics['combined'], metrics['acc_inst'],
                         metrics.get('mAP_inst', 0.), metrics['loss']))

            # Log epoch to tensorboard
            # See log using: tensorboard --logdir='logs' --port=6006
            ims = get_summary_ims(inputs)
            if not args.nolog:
                util.logEpoch(summary_writer, model, epoch + 1, metrics, ims)
        else:
            metrics = None

        # Decaying Learning Rate
        if args.lr_decay_mode == 'step':
            if (epoch + 1) % args.lr_decay_freq == 0:
                train_state['lr'] *= args.lr_decay
                for param_group in optimizer.param_groups:
                    param_group['lr'] = train_state['lr']

        # Save model
        if not args.skip_train:
            logger.info('\tSaving latest model')
            util.save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'step': train_state['step'],
                    'steps_epoch': train_state['steps_epoch'],
                    'state_dict': model.state_dict(),
                    'metrics': metrics,
                    'optimizer': optimizer.state_dict(),
                    'lr': train_state['lr'],
                }, str(Path(args.logdir) / args.latest_fname))

        total_epoch_time = time.time() - start
        epoch_time.append(total_epoch_time)
        logger.info('Total time for this epoch: {} s'.format(total_epoch_time))

        if args.skip_train:
            # if evaluating, run it once
            break

        if time.perf_counter() + np.max(
                epoch_time) > start_time + args.exit_after:
            logger.info(
                'Next epoch will likely exceed alotted time; exiting...')
            break

    print("Encoder training done")
    print("Now training the Decoder")

    ###############################Decoder ###########################################

    decoder = models.Decoder()
    print(decoder)
    decoder.to(device)

    train_state = init_train_state()

    crit = nn.MSELoss()

    optim = torch.optim.SGD(decoder.parameters(),
                            lr=train_state['lr'],
                            momentum=args.momentum,
                            weight_decay=args.weight_decay,
                            nesterov=True)

    path = str("/home/smjadhav/Research/emvn/decoder_model/latest.pth.tar")
    if os.path.isfile(path):
        logger.info('Loading decoder checkpoint: {}'.format(path))
        train_state = load_checkpoint(path, decoder, optimizer)
        logger.info('Params loaded.')
    else:
        print("Decoder model not found")

    train_size = len(train_loader)
    metrics = {}

    for epoch in range(0, 50):
        print("Epoch ", epoch + 1)
        decoder.train()

        PIK = "D1.dat"

        with open(PIK, "rb") as f:
            try:
                i = 0

                while (True):

                    data = pickle.load(f)

                    inputs = torch.from_numpy(data[1]).to(device)
                    target_img = torch.from_numpy(data[0]).to(device)
                    outputs = decoder(inputs)

                    optim.zero_grad()
                    loss = crit(outputs, target_img)
                    loss.backward()
                    optim.step()

                    if args.lr_decay_mode == 'cos':
                        # estimate steps_epoch from first epoch (we may have dropped entries)
                        steps_epoch = (train_state['steps_epoch']
                                       if train_state['steps_epoch'] > 0 else
                                       len(train_loader))
                        # TODO: there will be a jump here if many entries are dropped
                        #       and we only figure out # of steps after first epoch

                        if train_state['step'] < steps_epoch:
                            train_state['lr'] = args.lr * train_state[
                                'step'] / steps_epoch
                        else:
                            nsteps = steps_epoch * args.epochs
                            train_state['lr'] = (0.5 * args.lr * (1 + np.cos(
                                train_state['step'] * np.pi / nsteps)))
                        for param_group in optim.param_groups:
                            param_group['lr'] = train_state['lr']

                    if (i + 1) % args.print_freq == 0:
                        print("\tIter [%d/%d] Loss: %.4f" %
                              (i + 1, train_size, loss.item()))

                    if args.max_steps > 0 and i > args.max_steps:
                        break
                    i = i + 1

            except:
                exit

        if ((epoch + 1) % 5 == 0):
            print("Saving Decoder model")
            util.save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'step': train_state['step'],
                    'steps_epoch': train_state['steps_epoch'],
                    'state_dict': decoder.state_dict(),
                    'metrics': metrics,
                    'optimizer': optimizer.state_dict(),
                    'lr': train_state['lr'],
                }, path)
            PIK = "images.dat"
            with open(PIK, "wb") as f:
                pickle.dump(outputs, f)