Пример #1
0
    def _init_model(self):

        self._enc_model = models.Encoder(density=8, size=self._image_size,
                                         latent_size=self._latent_size)
        self._gen_model = models.Generator(density=8, size=self._image_size,
                                         latent_size=self._latent_size)
        self._dis_model = models.Discriminator(density=8, size=self._image_size)

        self._enc_dis_model = models.Encoder(density=8, size=self._image_size,
                                             latent_size=self._latent_size)
        self._gen_dis_model = models.Generator(density=8, size=self._image_size,
                                               latent_size=self._latent_size)

        self._optimizer_enc = optimizers.Adam(alpha=0.0001, beta1=0.5)
        self._optimizer_enc.setup(self._enc_model)
        self._optimizer_enc.add_hook(chainer.optimizer.WeightDecay(0.00001))
        self._optimizer_gen = optimizers.Adam(alpha=0.0001, beta1=0.5)
        self._optimizer_gen.setup(self._gen_model)
        self._optimizer_gen.add_hook(chainer.optimizer.WeightDecay(0.00001))

        self._optimizer_enc_dis = optimizers.Adam(alpha=0.0001, beta1=0.5)
        self._optimizer_enc_dis.setup(self._enc_dis_model)
        self._optimizer_enc_dis.add_hook(chainer.optimizer.WeightDecay(0.00001))
        self._optimizer_gen_dis = optimizers.Adam(alpha=0.0001, beta1=0.5)
        self._optimizer_gen_dis.setup(self._gen_dis_model)
        self._optimizer_gen_dis.add_hook(chainer.optimizer.WeightDecay(0.00001))

        self._optimizer_dis = optimizers.Adam(alpha=0.0001, beta1=0.5)
        self._optimizer_dis.setup(self._dis_model)
        self._optimizer_dis.add_hook(chainer.optimizer.WeightDecay(0.00001))

        self._enc_model.to_gpu(self._gpu)  # send to main GPU
        self._gen_model.to_gpu(self._gpu)
        self._dis_model.to_gpu(self._gpu)
Пример #2
0
def set_network(depth, ctx, lr, beta1, ndf, ngf, append=True, solver='adam'):
    # Pixel2pixel networks
    if append:
        netD = models.Discriminator(in_channels=6, n_layers =1 , ndf=ndf)##netG = models.CEGenerator(in_channels=3, n_layers=depth, ndf=ngf)  # UnetGenerator(in_channels=3, num_downs=8) #
    else:
    	netD = models.Discriminator(in_channels=3, n_layers =1 , ndf=ndf)
    	#netG = models.UnetGenerator(in_channels=3, num_downs =depth, ngf=ngf)  # UnetGenerator(in_channels=3, num_downs=8) #
    	#netD = models.Discriminator(in_channels=6, n_layers =depth-1, ndf=ngf/4)
    	netEn = models.Encoder(in_channels=3, n_layers =depth, ndf=ngf)
    	netDe = models.Decoder(in_channels=3, n_layers =depth, ndf=ngf)

    # Initialize parameters
    models.network_init(netEn, ctx=ctx)
    models.network_init(netDe, ctx=ctx)
    models.network_init(netD, ctx=ctx)
    if solver=='adam':
            # trainer for the generator and the discriminato
		trainerEn = gluon.Trainer(netEn.collect_params(), 'adam', {'learning_rate': lr, 'beta1': beta1})
		trainerDe = gluon.Trainer(netDe.collect_params(), 'adam', {'learning_rate': lr, 'beta1': beta1})
		trainerD = gluon.Trainer(netD.collect_params(), 'adam', {'learning_rate': lr, 'beta1': beta1})
    elif solver == 'sgd':
            print('sgd')
            trainerG = gluon.Trainer(netG.collect_params(), 'sgd', {'learning_rate': lr, 'momentum': 0.9} )
            trainerD = gluon.Trainer(netD.collect_params(), 'sgd', {'learning_rate': lr, 'momentum': 0.9})
    return netEn, netDe, netD, trainerEn, trainerDe, trainerD
Пример #3
0
def set_network(depth, ctx, lr, beta1, ndf, ngf,latent, append=True, solver='adam'):
    # Pixel2pixel networks
    if append:
        netD = models.Discriminator(in_channels=6, n_layers =2 , ndf=ndf)##netG = models.CEGenerator(in_channels=3, n_layers=depth, ndf=ngf)  # UnetGenerator(in_channels=3, num_downs=8) #
        netD2 = models.LatentDiscriminator(in_channels=6, n_layers =2 , ndf=ndf)##netG = models.CEGenerator(in_channels=3, n_layers=depth, ndf=ngf)  # UnetGenerator(in_channels=3, num_downs=8) #

    else:
        netD = models.Discriminator(in_channels=3, n_layers =2 , ndf=ndf)
        netD2 = models.LatentDiscriminator(in_channels=3, n_layers =2 , ndf=ndf)
        #netG = models.UnetGenerator(in_channels=3, num_downs =depth, ngf=ngf)  # UnetGenerator(in_channels=3, num_downs=8) #
        #netD = models.Discriminator(in_channels=6, n_layers =depth-1, ndf=ngf/4)
        netEn = models.Encoder(in_channels=3, n_layers =depth,latent=latent, ndf=ngf)
        netDe = models.Decoder(in_channels=3, n_layers =depth, latent=latent, ndf=ngf)

    # Initialize parameters
    models.network_init(netEn, ctx=ctx)
    models.network_init(netDe, ctx=ctx)
    models.network_init(netD, ctx=ctx)
    models.network_init(netD2, ctx=ctx)
    
    trainerEn = gluon.Trainer(netEn.collect_params(), 'adam', {'learning_rate': lr, 'beta1': beta1})
    trainerDe = gluon.Trainer(netDe.collect_params(), 'adam', {'learning_rate': lr, 'beta1': beta1})
    trainerD = gluon.Trainer(netD.collect_params(), 'adam', {'learning_rate': lr, 'beta1': beta1})
    trainerD2 = gluon.Trainer(netD2.collect_params(), 'adam', {'learning_rate': lr, 'beta1': beta1})
    return netEn, netDe, netD, netD2, trainerEn, trainerDe, trainerD, trainerD2
Пример #4
0
    def __init__(self, options):
        self.device = options.device
        self.enc_path = options.enc_path
        self.content_size = options.content_size
        self.enc_iter = options.enc_iter

        if not os.path.exists(os.path.join(self.enc_path, 'codes')):
            os.makedirs(os.path.join(self.enc_path, 'codes'))

        transforms = []
        if options.crop_size is not None:
            transforms.append(T.CenterCrop(options.crop_size))
        transforms.append(T.Resize(options.image_size))
        transforms.append(T.ToTensor())
        transforms.append(T.Normalize((0.5, 0.5, 0.5, 0), (0.5, 0.5, 0.5, 1)))

        self.dataset = ImageFolder(options.data_root,
                                   transform=T.Compose(transforms))
        self.dataloader = torch.utils.data.DataLoader(
            self.dataset,
            batch_size=options.batch_size,
            num_workers=options.nloader)
        self.data_iter = iter(self.dataloader)

        self.enc = models.Encoder(options.image_size, options.image_size,
                                  options.enc_features, options.enc_blocks,
                                  options.enc_adain_features,
                                  options.enc_adain_blocks,
                                  options.content_size)
        self.enc.to(self.device)
        self.enc.load_state_dict(
            torch.load(os.path.join(self.enc_path, 'models',
                                    '{0}_enc.pt'.format(self.enc_iter)),
                       map_location=self.device))
Пример #5
0
    def __call__(self) -> float:
        # initial data
        if self.args.GEOmetrics:
            self.adj_info, initial_positions = utils.load_mesh_vision(
                self.args, f'../data/sphere.obj')
        else:
            self.adj_info, initial_positions = utils.load_mesh_vision(
                self.args, f'../data/vision_sheets.obj')
        self.encoder = models.Encoder(self.adj_info,
                                      Variable(initial_positions.cuda()),
                                      self.args)
        self.encoder.cuda()
        params = list(self.encoder.parameters())
        self.optimizer = optim.Adam(params, lr=self.args.lr, weight_decay=0)

        writer = SummaryWriter(
            os.path.join('experiments/tensorboard/', self.args.exp_type))
        train_loader, valid_loaders = self.get_loaders()

        if self.args.eval:
            if self.args.pretrained != 'no':
                self.load_pretrained()
            else:
                self.load('')
            with torch.no_grad():
                self.validate(valid_loaders, writer)
            exit()
        # training loop
        for epoch in range(3000):
            self.epoch = epoch
            self.train(train_loader, writer)
            with torch.no_grad():
                self.validate(valid_loaders, writer)
            self.check_values()
Пример #6
0
def main():
    latent_dim = 16

    datasource = allocate_datasource(args.env)
    num_actions = datasource.binary_input_channels
    num_rewards = datasource.scalar_output_channels
    input_channels = datasource.conv_input_channels
    output_channels = datasource.conv_output_channels

    encoder = (models.Encoder(latent_dim, input_channels))
    decoder = (models.Decoder(latent_dim, output_channels))
    reward_predictor = (models.RewardPredictor(latent_dim, num_rewards))
    discriminator = (models.Discriminator())
    transition = (models.Transition(latent_dim, num_actions))

    if args.load_from is None:
        print('No --load-from directory specified: initializing new networks')
    elif 'model-encoder.pth' not in os.listdir(args.load_from):
        print('Error: Failed to load saved models from directory {}'.format(
            args.load_from))
        raise ValueError('Failed to load weights from *.pth')
    else:
        print('Loading models from directory {}'.format(args.load_from))
        encoder.load_state_dict(
            torch.load(os.path.join(args.load_from, 'model-encoder.pth')))
        decoder.load_state_dict(
            torch.load(os.path.join(args.load_from, 'model-decoder.pth')))
        transition.load_state_dict(
            torch.load(os.path.join(args.load_from, 'model-transition.pth')))
        discriminator.load_state_dict(
            torch.load(os.path.join(args.load_from,
                                    'model-discriminator.pth')))
        reward_predictor.load_state_dict(
            torch.load(
                os.path.join(args.load_from, 'model-reward_predictor.pth')))

    if args.evaluate:
        print('Finished {} playthroughs'.format(args.evaluations))
        for _ in range(args.evaluations):
            with torch.no_grad():
                play(latent_dim, datasource, num_actions, num_rewards, encoder,
                     decoder, reward_predictor, discriminator, transition)
        print('Finished {} playthroughs'.format(args.evaluations))
        evaluate(datasource,
                 encoder,
                 decoder,
                 transition,
                 discriminator,
                 reward_predictor,
                 latent_dim,
                 use_training_set=True)
    else:
        train(latent_dim, datasource, num_actions, num_rewards, encoder,
              decoder, reward_predictor, discriminator, transition)
    print('Finished execution, terminating')
Пример #7
0
    def test_size(self):
        """Test that the Encoder produces the correct size output."""
        batch_size = 32
        image_size = 64
        latent_size = 128

        E = models.Encoder(latent_size, image_size)
        tensor_in = Variable(2.0 * (torch.rand(
            (batch_size, 3, image_size, image_size)) - 0.5))
        tensor_out = E.forward(tensor_in)
        self.assertEqual(tensor_out.size(),
                         torch.Size([batch_size, latent_size]))
Пример #8
0
def create_models(image_size=64, latent_size=128):
    """Creates encoder and decoder and moves them to the GPU if requested."""

    E = models.Encoder(latent_size, image_size)
    D = models.Decoder(latent_size, image_size)

    if RUN_ON_GPU:
        print('Moving models to GPU.')
        E.cuda()
        D.cuda()
    else:
        print('Keeping models on CPU.')

    return E, D
Пример #9
0
    def test_layers(self):
        """Test that the Encoder produces the correct size output for each layer."""
        batch_size = 32
        image_size = 64
        latent_size = 128

        E = models.Encoder(latent_size, image_size)
        tensor = Variable(2.0 * (torch.rand(
            (batch_size, 3, image_size, image_size)) - 0.5))
        tensor = tensor.view(tensor.size()[0], -1)
        tensor = E.hidden_layer(tensor)
        self.assertEqual(tensor.size(), torch.Size([batch_size, latent_size]))
        tensor = E.output_layer(tensor)
        self.assertEqual(tensor.size(), torch.Size([batch_size, latent_size]))
Пример #10
0
def set_network(depth, ctx, ngf):
    # Pixel2pixel networks
    #netG = models.CEGenerator(in_channels=3, n_layers=depth, ndf=ngf)  # UnetGenerator(in_channels=3, num_downs=8) #
    netEn = models.Encoder(in_channels=3, n_layers=depth, ndf=ngf)  # UnetGenerator(in_channels=3, num_downs=8) #
    netDe = models.Decoder(in_channels=3, n_layers=depth, ndf=ngf)  # UnetGenerator(in_channels=3, num_downs=8) #
    netD = models.Discriminator(in_channels=3, n_layers =depth, ndf=ngf, isthreeway=True)

    # Initialize parameters
    models.network_init(netDe, ctx=ctx)    
    models.network_init(netEn, ctx=ctx)
    models.network_init(netD, ctx=ctx)



    return netEn, netDe, netD
Пример #11
0
    def __init__(self,
                 device,
                 model,
                 model_num_labels,
                 image_nc,
                 box_min,
                 box_max,
                 eps,
                 pgd_iter,
                 models_path,
                 out_path,
                 model_name,
                 writer,
                 E_lr,
                 defG_lr):
        output_nc = image_nc
        self.device = device
        self.model_num_labels = model_num_labels
        self.model = model
        self.input_nc = image_nc
        self.output_nc = output_nc
        self.box_min = box_min
        self.box_max = box_max
        self.eps = eps
        self.pgd_iter = pgd_iter
        self.models_path = models_path
        self.out_path = out_path
        self.model_name = model_name
        self.writer = writer
        self.E_lr = E_lr
        self.defG_lr = defG_lr

        self.en_input_nc = image_nc
        self.E = models.Encoder(image_nc).to(device)
        self.defG = models.Generator(adv=False).to(device)
        self.pgd = PGD(self.model, self.E, self.defG, self.device, self.eps)

        # initialize all weights
        self.E.apply(weights_init)
        self.defG.apply(weights_init)

        # initialize optimizers
        self.optimizer_E = torch.optim.Adam(self.E.parameters(),
                                            lr=self.E_lr)
        self.optimizer_defG = torch.optim.Adam(self.defG.parameters(),
                                               lr=self.defG_lr)
Пример #12
0
def set_network(depth, ctx, ndf):
    #netG = models.CEGenerator(in_channels=3, n_layers=depth, ndf=ngf)  # UnetGenerator(in_channels=3, num_downs=8) #
    netEn = models.Encoder(
        in_channels=3, n_layers=depth, ndf=ndf,
        usetanh=True)  # UnetGenerator(in_channels=3, num_downs=8) #
    netDe = models.Decoder(
        in_channels=3, n_layers=depth,
        ndf=ndf)  # UnetGenerator(in_channels=3, num_downs=8) #
    netD = models.Discriminator(in_channels=3,
                                n_layers=depth,
                                ndf=ndf,
                                isthreeway=False)
    netD2 = models.Discriminator(in_channels=3,
                                 n_layers=depth,
                                 ndf=ndf,
                                 isthreeway=False)

    return netEn, netDe, netD, netD2
Пример #13
0
def set_network(depth, ctx, lr, beta1, ngf):
    # Pixel2pixel networks
    #netG = models.CEGenerator(in_channels=3, n_layers=depth, ndf=ngf)  # UnetGenerator(in_channels=3, num_downs=8) #
    netEn = models.Encoder(in_channels=3, n_layers=depth, ndf=ngf)  # UnetGenerator(in_channels=3, num_downs=8) #
    netDe = models.Decoder(in_channels=3, n_layers=depth, ndf=ngf)  # UnetGenerator(in_channels=3, num_downs=8) #
    netD = models.Discriminator(in_channels=3, n_layers =depth, ndf=ngf, isthreeway=True)

    # Initialize parameters
    models.network_init(netDe, ctx=ctx)    
    models.network_init(netEn, ctx=ctx)
    models.network_init(netD, ctx=ctx)

    # trainer for the generator and the discriminator
    trainerEn = gluon.Trainer(netEn.collect_params(), 'adam', {'learning_rate': lr, 'beta1': beta1})
    trainerDe = gluon.Trainer(netDe.collect_params(), 'adam', {'learning_rate': lr, 'beta1': beta1})
    trainerD = gluon.Trainer(netD.collect_params(), 'adam', {'learning_rate': lr, 'beta1': beta1})

    return netEn, netDe, netD, trainerEn, trainerDe, trainerD
Пример #14
0
def _build_parser(loader, **kwargs):
    parser = models.BiaffineParser(
        n_rels=len(loader.rel_map),
        encoder=models.Encoder(
            loader.get_embeddings('word'),
            loader.get_embeddings('pre',
                                  normalize=lambda W: W / np.std(W)
                                  if np.std(W) > 0. else W),
            loader.get_embeddings('pos'),
            n_lstm_layers=kwargs.get('n_lstm_layers', 3),
            lstm_hidden_size=kwargs.get('lstm_hidden_size', 400),
            embeddings_dropout=kwargs.get('dropout_ratio', 0.33),
            lstm_dropout=kwargs.get('dropout_ratio', 0.33)),
        encoder_dropout=kwargs.get('dropout_ratio', 0.33),
        arc_mlp_units=kwargs.get('arc_mlp_units', 500),
        rel_mlp_units=kwargs.get('rel_mlp_units', 100),
        arc_mlp_dropout=kwargs.get('dropout_ratio', 0.33),
        rel_mlp_dropout=kwargs.get('dropout_ratio', 0.33))
    return parser
Пример #15
0
def set_network(depth, ctx, lr, beta1, ndf, ngf, append=True):
    if append:
        netD = models.Discriminator(in_channels=6,
                                    n_layers=depth - 1,
                                    istest=True,
                                    ndf=ndf)
    else:
        netD = models.Discriminator(in_channels=3,
                                    n_layers=depth - 1,
                                    istest=True,
                                    ndf=ndf)
    netEn = models.Encoder(
        in_channels=3, n_layers=depth, istest=True, latent=4096,
        ndf=ngf)  # UnetGenerator(in_channels=3, num_downs=8) #
    netDe = models.Decoder(
        in_channels=3, n_layers=depth, istest=True, latent=4096,
        ndf=ngf)  # UnetGenerator(in_channels=3, num_downs=8) #
    netD2 = None  # models.LatentDiscriminator(in_channels=6, n_layers =2 , ndf=ndf)

    return netEn, netDe, netD, netD2
Пример #16
0
    def __call__(self) -> float:
        self.encoder = models.Encoder(self.args)
        self.encoder.cuda()
        params = list(self.encoder.parameters())
        self.optimizer = optim.Adam(params, lr=self.args.lr, weight_decay=0)
        writer = SummaryWriter(
            os.path.join('experiments/tensorboard/', args.exp_type))

        train_loader, valid_loaders = self.get_loaders()

        if self.args.eval:
            self.load('')
            with torch.no_grad():
                self.validate(valid_loaders, writer)
            exit()
        for epoch in range(self.args.epochs):
            self.epoch = epoch
            self.train(train_loader, writer)
            with torch.no_grad():
                self.validate(valid_loaders, writer)
            self.check_values()
Пример #17
0
def pick_model(args, dicts):
    """
        Use args to initialize the appropriate model
    """
    Y = get_num_labels(args.Y)
    if args.model == "rnn":
        model = models.VanillaRNN(Y, args.embed_file, dicts, args.rnn_dim,
                                  args.cell_type, args.rnn_layers, args.gpu,
                                  args.embed_size, args.bidirectional)
    elif args.model == "cnn_vanilla":
        filter_size = int(args.filter_size)
        model = models.VanillaConv(Y, args.embed_file, filter_size,
                                   args.num_filter_maps, args.gpu, dicts,
                                   args.embed_size, args.dropout)
    elif args.model == "conv_attn":
        filter_size = int(args.filter_size)
        model = models.ConvAttnPool(Y,
                                    args.embed_file,
                                    filter_size,
                                    args.num_filter_maps,
                                    args.lmbda,
                                    args.gpu,
                                    dicts,
                                    embed_size=args.embed_size,
                                    dropout=args.dropout)
    elif args.model == "rnn_attn":
        encoder = models.Encoder(Y,
                                 args.embed_file,
                                 dicts,
                                 embed_size=args.embed_size)
        decoder = models.Decoder(Y, args.embed_file, dicts)
        model = models.Seq2Seq(encoder, decoder, Y, args.embed_file, dicts)
    elif args.model == "saved":
        model = torch.load(args.test_model)
    if args.gpu:
        model.cuda()
    return model
Пример #18
0
def main():
    batch_size = 16
    latent_dim = 12
    true_latent_dim = 4
    num_actions = 4
    encoder = models.Encoder(latent_dim)
    decoder = models.Decoder(latent_dim)
    transition = models.Transition(latent_dim, num_actions)
    blur = models.GaussianSmoothing(channels=3, kernel_size=11, sigma=4.)
    higgins_scores = []

    #load_from_dir = '/mnt/nfs/experiments/demo_2018_12_12/scm-gan_81bd12cd'
    load_from_dir = '.'

    print('Loading models from directory {}'.format(load_from_dir))
    encoder.load_state_dict(
        torch.load(os.path.join(load_from_dir, 'model-encoder.pth')))
    decoder.load_state_dict(
        torch.load(os.path.join(load_from_dir, 'model-decoder.pth')))
    transition.load_state_dict(
        torch.load(os.path.join(load_from_dir, 'model-transition.pth')))

    encoder.eval()
    decoder.eval()
    transition.eval()
    for model in (encoder, decoder, transition):
        for child in model.children():
            if type(child) == nn.BatchNorm2d or type(child) == nn.BatchNorm1d:
                child.momentum = 0

    states, rewards, dones, actions = datasource.get_trajectories(batch_size,
                                                                  timesteps=1)
    states = torch.Tensor(states).cuda()

    # Reconstruct the first timestep
    reconstructed = decoder(encoder(states[:, 0]))
    imutil.show(reconstructed)
Пример #19
0
# image size 3, 32, 32
# batch size must be an even number
# shuffle must be True
cifar_10_train_dt = CIFAR10(r'c:\data\tv', download=True, transform=ToTensor())
#dev = Subset(cifar_10_train_dt, range(128))
cifar_10_train_l = DataLoader(cifar_10_train_dt,
                              batch_size=batch_size,
                              shuffle=True,
                              drop_last=True,
                              pin_memory=torch.cuda.is_available())

epoch = 9
model_path = Path(r'c:\data\deepinfomax\models\run1\encoder' + str(epoch))

encoder = models.Encoder()
encoder.load_state_dict(torch.load(str(model_path)))
encoder.to(device)

# compute the latent space for each image and store in (latent, image)
minibatches = []
batch = tqdm(cifar_10_train_l, total=len(cifar_10_train_dt) // batch_size)
for images, target in batch:
    images = images.to(device)
    encoded, features = encoder(images)
    i = images.detach().cpu().unbind(0)
    e = encoded.detach().cpu().unbind(0)
    sublist = [elem for elem in zip(e, i)]
    minibatches.append(sublist)

# flatten the minibatches to a single list
Пример #20
0
def create_model(args):
    print("> Create model.")

    ## Gensim
    # word_model = Word2Vec.load("Word2Vec_V1.h5")
    # vectors = word_model.wv
    # all_words = vectors.index2word
    # mean_vector = vectors.vectors.mean(axis=0)
    # wei = torch.tensor(vectors.vectors, dtype=torch.float)
    ## Gensim

    with open(os.path.join(args.data_path, "dict&vectors.pkl"), "rb") as f:
        [word2idx, vectors] = pickle.load(f)

    global model
    if args.attn == 1:
        hidden = args.hidden_size
        encoder1 = models.Encoder(hidden_size=hidden, nlayers=1)
        encoder2 = models.Encoder(input_size=hidden*2*4, hidden_size=hidden, nlayers=1)

        attention_dim = 128
        attention = models.Attention(attention_dim, attention_dim, attention_dim)

        model = models.Classifier(encoder1, encoder2, attention,
                                  hidden_size=hidden,
                                  rec_len=rec_len,
                                  rep_len=rep_len,
                                  num_of_words=len(word2idx),
                                  drop_p=args.drop_p)

    elif args.attn == 2:
        model = models.BiDAF(window_size=args.max_length,
                             hidden_size=args.hidden_size,
                             drop_p=args.drop_p,
                             num_of_words=len(word2idx)
                            )
    elif args.attn == 3:
        model = models.RNNatt(window_size=args.max_length,
                              hidden_size=args.hidden_size,
                              drop_p=args.drop_p,
                              num_of_words=len(word2idx),
                              rec_len=rec_len,
                              rep_len=rep_len
                            )
    elif args.attn == 4:
        model = models.RNNatt_weight(window_size=args.max_length,
                                     hidden_size=args.hidden_size,
                                     drop_p=args.drop_p,
                                     num_of_words=len(word2idx),
                                     rec_len=rec_len,
                                     rep_len=rep_len
                                    )
    else: # args.attn == 0
        model = models.RNNbase(window_size=args.max_length,
                               hidden_size=args.hidden_size,
                               drop_p=args.drop_p,
                               num_of_words=len(word2idx)
                            )


    model.word_embedding.load_state_dict({'weight': vectors.to(torch.float32)})
    model.word_embedding.weight.requires_grad = False

    model = model.to(device)
    print(model)

    global optimizer
    optimizer = optim.Adam(model.parameters(),
                           lr=args.lr_rate) # , betas=(0.9, 0.999), weight_decay=1e-3)

    return word2idx, vectors
Пример #21
0
    parser.add_argument('--batch_size', dest='batch_size', type=int, default=1)
    parser.add_argument('--experiment_name',
                        dest='experiment_name',
                        default='stgan_128')

    args = parser.parse_args()
    # model
    atts = args.atts
    n_att = len(atts)
    img_size = args.img_size
    batch_size = args.batch_size
    experiment_name = args.experiment_name

    Gen = models.Generator()
    Dis = models.Discriminator(n_att)
    Enc = models.Encoder()
    Stu = models.Stu()

    x = tf.ones(shape=[2, 128, 128, 3], dtype=tf.float32)
    a = tf.ones(shape=[2, 13], dtype=tf.float32)

    z = Enc(x)
    z_stu = Stu(z, a)
    x_fake = Gen(z_stu, a - a)
    d, att = Dis(x)

    lr = tf.Variable(initial_value=0., trainable=False)
    g_opt = tf.optimizers.Adam(lr, beta_1=0., beta_2=0.99)
    d_opt = tf.optimizers.Adam(lr, beta_1=0., beta_2=0.99)
    params = tf.Variable(initial_value=[5, 0], trainable=False, dtype=tf.int64)
Пример #22
0
if torch.cuda.is_available() and not opt.cuda:
    print(
        "WARNING: You have a CUDA device, so you should probably run with --cuda"
    )

#################################
# initialize tensors
imInputBatch = Variable(
    torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize))
imInputMaskBatch = Variable(
    torch.FloatTensor(opt.batchSize, 1, opt.imageSize, opt.imageSize))
# need a variable size placeholder to handle variable number of views per fyuse...

# initialize models
encoderInit = nn.DataParallel(models.Encoder(), device_ids=opt.deviceIds)
decoderInit = nn.DataParallel(
    models.Decoder(numVertices=642 + 1),
    device_ids=opt.deviceIds)  # Center to be predicted too
colorInit = nn.DataParallel(models.Color(numVertices=642),
                            device_ids=opt.deviceIds)

##############  ######################
# Send things into GPU
if opt.cuda:
    imInputBatch = imInputBatch.cuda(opt.gpuId)
    imInputMaskBatch = imInputMaskBatch.cuda(opt.gpuId)

    encoderInit = encoderInit.cuda(opt.gpuId)
    decoderInit = decoderInit.cuda(opt.gpuId)
    colorInit = colorInit.cuda(opt.gpuId)
Пример #23
0
    transforms.Resize(64),
    transforms.ToTensor(),
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
dataset = datasets.ImageFolder(args.dataset_dir, transform=transform)
loader = torch.utils.data.DataLoader(dataset,
                                     batch_size=args.batch_size,
                                     shuffle=True,
                                     num_workers=2)

Z_dim = 64
disc_iters = 5

discriminator = models.Discriminator().cuda()
generator = models.Generator(Z_dim * 2).cuda()
encoder_c = models.Encoder(size_num=Z_dim).cuda()
encoder_t = models.Encoder(size_num=Z_dim).cuda()

v2 = models.vgg().cuda()

optim_disc = optim.Adam(filter(lambda p: p.requires_grad,
                               discriminator.parameters()),
                        lr=args.lr,
                        betas=(0.5, 0.9))
optim_gen = optim.Adam(generator.parameters(), lr=args.lr, betas=(0.5, 0.999))
optim_enc_c = optim.Adam([{
    'params': encoder_c.parameters()
}],
                         lr=args.lr,
                         betas=(0.9, 0.999))
optim_enc_t = optim.Adam([{
Пример #24
0
def main():
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('--loader_workers', type=int, default=64)
    parser.add_argument('--epochs', type=int, default=1200)
    parser.add_argument('--batch_size', type=int, default=512)
    parser.add_argument('--save_interval', type=int, default=10)
    parser.add_argument('--save_dir', type=str, default='logs/')
    parser.add_argument('--resume', type=str, default=None)
    parser.add_argument('--table', type=str, default='data/Cangjie5.txt')
    parser.add_argument('--codemap', type=str, default='data/codemap_cangjie5.txt')
    parser.add_argument('--fonts', nargs='+', default=['data/hanazono/HanaMinA.ttf', 'data/hanazono/HanaMinB.ttf'])
    parser.add_argument('--encoder_lr', type=float, default=1e-3)
    parser.add_argument('--decoder_lr', type=float, default=1e-3)
    parser.add_argument('--alpha_c', type=float, default=1.)
    parser.add_argument('--grad_clip', type=float, default=5.)
    args = parser.parse_args()
    args.save_dir = os.path.join(args.save_dir, datetime.datetime.now().strftime("%m-%d-%Y-%H:%M:%S"))
    os.makedirs(args.save_dir)

    glyph = dset.Glyph(args.fonts)
    dataset = dset.CodeTableDataset(glyph, table=args.table, codemap=args.codemap)
    train_length = int(len(dataset) * 0.7)
    train_set, val_set = torch.utils.data.random_split(dataset, [train_length, len(dataset) - train_length])
    train_loader = torch.utils.data.DataLoader(train_set, args.batch_size, True,
                                               collate_fn=dset.collate_batch,
                                               num_workers=args.loader_workers,
                                               pin_memory=True)
    val_loader = torch.utils.data.DataLoader(val_set, args.batch_size, False,
                                             collate_fn=dset.collate_batch,
                                             num_workers=args.loader_workers,
                                             pin_memory=True)

    encoder = models.Encoder(encode_channels=256).to(device)
    encoder_optim = torch.optim.Adam(encoder.parameters(), lr=args.encoder_lr)
    decoder = models.Decoder(128, 256, 256, 26 + 2, encoder_dim=256, dropout=0.5).to(device)
    decoder_optim = torch.optim.Adam(decoder.parameters(), lr=args.decoder_lr)
    epoch_start = 0
    if args.resume != None:
        print('loading checkpoint: %s'%args.resume)
        checkpoint = torch.load(args.resume, map_location=device)
        decoder.load_state_dict(checkpoint['decoder'])
        decoder_optim.load_state_dict(checkpoint['decoder_optimizer'])
        encoder.load_state_dict(checkpoint['encoder'])
        encoder_optim.load_state_dict(checkpoint['encoder_optimizer'])
        epoch_start = checkpoint['epoch']

    criterion = nn.CrossEntropyLoss().to(device)
    logger = PredLogger(dataset, args.save_dir, args.codemap)
    writer = SummaryWriter(args.save_dir)

    best_acc = 0

    for epoch in tqdm(range(epoch_start, args.epochs), position=0):
        train(train_loader=train_loader,
              encoder=encoder,
              decoder=decoder,
              criterion=criterion,
              encoder_optimizer=encoder_optim,
              decoder_optimizer=decoder_optim,
              epoch=epoch,
              logger=logger,
              writer=writer,
              args=args)
        acc, imgs, scores, alphas = validate(val_loader=val_loader,
                       encoder=encoder,
                       decoder=decoder,
                       criterion=criterion,
                       epoch=epoch,
                       logger=logger,
                       writer=writer,
                       args=args)
        
        is_best = best_acc < acc # and epoch > 0
        best_acc = max(acc, best_acc)
        
        if epoch % args.save_interval == args.save_interval - 1 or is_best:
            save_checkpoint(epoch, encoder, decoder, encoder_optim, decoder_optim, acc, is_best, args.save_dir)
            vis = visualize_att(T.ToPILImage()(imgs[0].cpu()), scores[0].topk(1, dim=-1).indices.flatten().tolist(), alphas[0].view(-1, 13, 13).cpu(), logger.map_rev)
            vis.savefig(os.path.join(args.save_dir, 'val_visualize_%d.png'%epoch))
Пример #25
0
import dataset
import helpers
import models
from config import *
assert (file_name == 'vae-gan')
writer, save_dir = helpers.init(gpu, file_name, experiment_name)

use_ganloss = True  # TODO can I use that here?
use_vaeloss = True
use_instancenoise = False
iter_decay = 1000.  # iterations after which instance noise is at 1/e

# models  # TODO to one model?
netD = models.Discriminator().cuda()
netG = models.Generator().cuda()
netE = models.Encoder().cuda()

# weight initialization
netD.apply(models.init_weights)  # xavier init
netE.apply(models.init_weights)  # xavier init
netG.apply(models.init_weights)  # xavier init

criterion = nn.BCELoss().cuda()
optD = torch.optim.Adam(netD.parameters(), lr=lr)
optG = torch.optim.Adam(netG.parameters(), lr=lr)
optE = torch.optim.Adam(netE.parameters(), lr=lr)


def sample(n_samples):
    noise = Variable(torch.randn(n_samples, nz, 1,
                                 1)).cuda()  # fake_images = generator(noise)
Пример #26
0
input_size = len(embeddings[1])

print("Loaded %d words" % vocab_size)

# Load/numberize messages
messages = preprocessing.load_messages(messages_file)
numberized_messages = preprocessing.numberize_messages(messages, w2i)

print("Loaded %d messages" % len(messages))

# Create encoder
encoder = models.Encoder(
    input_size=input_size,
    hidden_size=hidden_size,
    vocab_size=vocab_size,
    embedding_dict=embeddings,
    num_layers=1,
    dropout=0,
    rnn_type='gru',
)

print("Encoder online")

# Create decoder
decoder = models.Decoder(
    input_size=input_size,
    hidden_size=hidden_size,
    vocab_size=vocab_size,
    embedding_dict=embeddings,
    num_layers=1,
    dropout=0,
Пример #27
0
def main(args):
    utils.seedme(args.seed)
    cudnn.benchmark = True
    device = torch.device(
        'cuda' if torch.cuda.is_available() and not args.nocuda else 'cpu')

    os.system('mkdir -p {}'.format(args.outf))

    dataloader_train = utils.get_patchloader(args.image_train,
                                             resize=args.resize_train,
                                             patch_size=args.patch_size,
                                             batch_size=args.batch_size_train,
                                             fliplr=args.fliplr,
                                             flipud=args.flipud,
                                             rot90=args.rot90,
                                             smooth=args.smooth)
    if args.image_valid:
        dataloader_valid = utils.get_patchloader(
            args.image_valid,
            resize=args.resize_valid,
            patch_size=args.patch_size,
            batch_size=args.batch_size_valid,
            fliplr=args.fliplr,
            flipud=args.flipud,
            rot90=args.rot90,
            smooth=args.smooth)

    netG = models.DCGAN_G(image_size=args.patch_size,
                          nc=args.nc,
                          nz=args.ncode,
                          ngf=args.ngf).to(device)
    netE = models.Encoder(patch_size=args.patch_size,
                          nc=args.nc,
                          ncode=args.ncode,
                          ndf=args.ndf).to(device)

    print netG
    print netE

    optimizer = optim.Adam(list(netG.parameters()) + list(netE.parameters()),
                           lr=args.lr,
                           amsgrad=True)
    loss_func = nn.MSELoss()

    losses = []
    losses_valid = []
    best_loss = 1e16
    for i in range(args.niter):
        optimizer.zero_grad()
        x = next(dataloader_train).to(device)
        if args.sigma:
            x = utils.add_noise(x, args.sigma)
        y = netG(netE(x))
        loss = loss_func(y, x)
        loss.backward()
        optimizer.step()

        if args.image_valid:
            with torch.no_grad():
                netG.eval()
                netE.eval()
                x_ = next(dataloader_valid).to(device)
                if args.sigma:
                    x_ = utils.add_noise(x, args.sigma)
                y_ = netG(netE(x_))
                loss_valid = loss_func(y_, x_)
                netG.train()
                netE.train()
                losses_valid.append(loss_valid.item())

        _loss = loss_valid.item() if args.image_valid else loss.item()
        if _loss + 1e-3 < best_loss:
            best_loss = _loss
            print "[{}/{}] best loss: {}".format(i + 1, args.niter, best_loss)
            if args.save_best:
                torch.save(netE.state_dict(),
                           '{}/netD_best.pth'.format(args.outf))

        losses.append(loss.item())
        if (i + 1) % args.nprint == 0:
            if args.image_valid:
                print '[{}/{}] train: {}, test: {}, best: {}'.format(
                    i + 1, args.niter, loss.item(), loss_valid.item(),
                    best_loss)
            else:
                print '[{}/{}] train: {}, best: {}'.format(
                    i + 1, args.niter, loss.item(), best_loss)
            logger.vutils.save_image(torch.cat([x, y], dim=0),
                                     '{}/train_{}.png'.format(
                                         args.outf, i + 1),
                                     normalize=True)
            fig, ax = plt.subplots()
            ax.semilogy(scipy.signal.medfilt(losses, 11)[5:-5], label='train')
            if args.image_valid:
                logger.vutils.save_image(torch.cat([x_, y_], dim=0),
                                         '{}/test_{}.png'.format(
                                             args.outf, i + 1),
                                         normalize=True,
                                         nrow=32)
                ax.semilogy(scipy.signal.medfilt(losses_valid, 11)[5:-5],
                            label='valid')
            fig.legend()
            fig.savefig('{}/loss.png'.format(args.outf))
            plt.close(fig)
            torch.save(netE.state_dict(),
                       '{}/netD_iter_{}.pth'.format(args.outf, i + 1))
Пример #28
0
    image_paths = glob.glob("images/*.jfif")
    images = [
        np.asarray(Image.open(image_path)) for image_path in image_paths
    ][:1]
    for i in range(len(images)):
        image = images[i]
        dimensions = image.shape[:2]
        ratio = min([min(d, 480) / d for d in dimensions])
        scaled_dimensions = [int(ratio * d) for d in dimensions]
        image = resize(image, (*scaled_dimensions, 3), anti_aliasing=True)
        image = image[:, :, [2, 1, 0]]
        image = np.transpose(image, (2, 0, 1))
        images[i] = image

    encoder = models.Encoder(embedding_size=img_embedding_size).to(device)
    decoder = models.Decoder(img_embedding_size=img_embedding_size,
                             pos_embedding_size=pos_embedding_size,
                             hidden_size=decoder_hidden_size).to(device)

    encoder_opt = torch.optim.Adam(encoder.parameters(), lr=0.004)
    decoder_opt = torch.optim.Adam(decoder.parameters(), lr=0.004)
    encoder_scheduler = torch.optim.lr_scheduler.ExponentialLR(encoder_opt,
                                                               gamma=0.99999)
    decoder_scheduler = torch.optim.lr_scheduler.ExponentialLR(decoder_opt,
                                                               gamma=0.99999)

    for i in range(100000000):
        image_idxs = np.random.randint(low=0,
                                       high=len(images),
                                       size=batch_size)
def main():
    print("init data folders")

    encoder_lv1 = models.Encoder().apply(weight_init).cuda(GPU)
    encoder_lv2 = models.Encoder().apply(weight_init).cuda(GPU)
    encoder_lv3 = models.Encoder().apply(weight_init).cuda(GPU)

    decoder_lv1 = models.Decoder().apply(weight_init).cuda(GPU)
    decoder_lv2 = models.Decoder().apply(weight_init).cuda(GPU)
    decoder_lv3 = models.Decoder().apply(weight_init).cuda(GPU)

    if os.path.exists(str('./checkpoints/' + METHOD + "/encoder_lv1.pkl")):
        encoder_lv1.load_state_dict(torch.load(str('./checkpoints/' + METHOD + "/encoder_lv1.pkl")))
        print("load encoder_lv1 success")
    if os.path.exists(str('./checkpoints/' + METHOD + "/encoder_lv2.pkl")):
        encoder_lv2.load_state_dict(torch.load(str('./checkpoints/' + METHOD + "/encoder_lv2.pkl")))
        print("load encoder_lv2 success")
    if os.path.exists(str('./checkpoints/' + METHOD + "/encoder_lv3.pkl")):
        encoder_lv3.load_state_dict(torch.load(str('./checkpoints/' + METHOD + "/encoder_lv3.pkl")))
        print("load encoder_lv3 success")

    if os.path.exists(str('./checkpoints/' + METHOD + "/decoder_lv1.pkl")):
        decoder_lv1.load_state_dict(torch.load(str('./checkpoints/' + METHOD + "/decoder_lv1.pkl")))
        print("load encoder_lv1 success")
    if os.path.exists(str('./checkpoints/' + METHOD + "/decoder_lv2.pkl")):
        decoder_lv2.load_state_dict(torch.load(str('./checkpoints/' + METHOD + "/decoder_lv2.pkl")))
        print("load decoder_lv2 success")
    if os.path.exists(str('./checkpoints/' + METHOD + "/decoder_lv3.pkl")):
        decoder_lv3.load_state_dict(torch.load(str('./checkpoints/' + METHOD + "/decoder_lv3.pkl")))
        print("load decoder_lv3 success")
    
    if os.path.exists('./test_results/' + EXPDIR) == False:
        os.system('mkdir ./test_results/' + EXPDIR)     
            
    iteration = 0.0
    test_time = 0.0
    for images_name in os.listdir(SAMPLE_DIR):
    	with torch.no_grad():             
            images_lv1 = transforms.ToTensor()(Image.open(SAMPLE_DIR + '/' + images_name).convert('RGB'))
            images_lv1 = Variable(images_lv1 - 0.5).unsqueeze(0).cuda(GPU)
            start = time.time()           
            H = images_lv1.size(2)
            W = images_lv1.size(3)

            images_lv2_1 = images_lv1[:,:,0:int(H/2),:]
            images_lv2_2 = images_lv1[:,:,int(H/2):H,:]
            images_lv3_1 = images_lv2_1[:,:,:,0:int(W/2)]
            images_lv3_2 = images_lv2_1[:,:,:,int(W/2):W]
            images_lv3_3 = images_lv2_2[:,:,:,0:int(W/2)]
            images_lv3_4 = images_lv2_2[:,:,:,int(W/2):W]

            feature_lv3_1 = encoder_lv3(images_lv3_1)
            feature_lv3_2 = encoder_lv3(images_lv3_2)
            feature_lv3_3 = encoder_lv3(images_lv3_3)
            feature_lv3_4 = encoder_lv3(images_lv3_4)
            feature_lv3_top = torch.cat((feature_lv3_1, feature_lv3_2), 3)
            feature_lv3_bot = torch.cat((feature_lv3_3, feature_lv3_4), 3)
            feature_lv3 = torch.cat((feature_lv3_top, feature_lv3_bot), 2)
            residual_lv3_top = decoder_lv3(feature_lv3_top)
            residual_lv3_bot = decoder_lv3(feature_lv3_bot)

            feature_lv2_1 = encoder_lv2(images_lv2_1 + residual_lv3_top)
            feature_lv2_2 = encoder_lv2(images_lv2_2 + residual_lv3_bot)
            feature_lv2 = torch.cat((feature_lv2_1, feature_lv2_2), 2) + feature_lv3
            residual_lv2 = decoder_lv2(feature_lv2)

            feature_lv1 = encoder_lv1(images_lv1 + residual_lv2) + feature_lv2
            deblur_image = decoder_lv1(feature_lv1)
            
            stop = time.time()
            test_time += stop-start
            print('RunTime:%.4f'%(stop-start), '  Average Runtime:%.4f'%(test_time/(iteration+1)))
            save_images(deblur_image.data + 0.5, images_name) 
            iteration += 1
    print()

    # load the pretrained model
    pretrained_model = "./model_best.pth.tar"
    target_model = WideResNet().to(device)
    checkpoint = torch.load(pretrained_model, map_location=device)
    target_model.load_state_dict(checkpoint['state_dict'])
    if args.parameters_count:
        print('number of parameters(model):', parameters_count(target_model))
    target_model.eval()

    epoch = args.epoch

    # load encoder & generators
    E_path = models_path + model_name + 'E_epoch_{}.pth'.format(epoch)
    E = models.Encoder().to(device)
    E.load_state_dict(torch.load(E_path, map_location=device))
    if args.parameters_count:
        print('number of parameters(E):', parameters_count(E))
    E.eval()

    advG_path = models_path + model_name + 'advG_epoch_{}.pth'.format(epoch)
    advG = models.Generator(y_dim=10, adv=True).to(device)
    advG.load_state_dict(torch.load(advG_path, map_location=device))
    if args.parameters_count:
        print('number of parameters(advG):', parameters_count(advG))
    advG.eval()

    defG_path = models_path + model_name + 'defG_epoch_{}.pth'.format(epoch)
    defG = models.Generator(adv=False).to(device)
    defG.load_state_dict(torch.load(defG_path, map_location=device))