Exemplo n.º 1
0
    def init_models(self):

        self.G_X = Generator(3, 3, nn.InstanceNorm2d)
        self.D_X = Discriminator(3)
        self.G_Y = Generator(3, 3, nn.InstanceNorm2d)
        self.D_Y = Discriminator(3)

        if self.load_models:
            self.G_X.load_state_dict(
                torch.load(self.output_path + "models/G_X",
                           map_location='cpu'))
            self.G_Y.load_state_dict(
                torch.load(self.output_path + "models/G_Y",
                           map_location='cpu'))
            self.D_X.load_state_dict(
                torch.load(self.output_path + "models/D_X",
                           map_location='cpu'))
            self.D_Y.load_state_dict(
                torch.load(self.output_path + "models/D_Y",
                           map_location='cpu'))
        else:
            self.G_X.apply(init_func)
            self.G_Y.apply(init_func)
            self.D_X.apply(init_func)
            self.D_Y.apply(init_func)

        self.G_X.to(self.device)
        self.G_Y.to(self.device)
        self.D_X.to(self.device)
        self.D_Y.to(self.device)
def index(year, month, day):
    gen1 = Generator.get_by_id(1)
    gen2 = Generator.get_by_id(2)
    gen3 = Generator.get_by_id(3)

    date = dt.datetime(year, month, day).strftime('%Y-%m-%d')

    if len(gen1.daily_record(year, month, day)) > 0:
        data = {
            "date":
            date,
            "gen_record": [
                {
                    "name":
                    gen1.name,
                    "power": [
                        value.kWH
                        for value in gen1.daily_record(year, month, day)
                    ]
                },
                {
                    "name":
                    gen2.name,
                    "power": [
                        value.kWH
                        for value in gen2.daily_record(year, month, day)
                    ]
                },
                {
                    "name":
                    gen3.name,
                    "power": [
                        value.kWH
                        for value in gen3.daily_record(year, month, day)
                    ]
                },
            ]
        }
    else:
        data = {
            "date":
            date,
            "gen_record": [
                {
                    "name": gen1.name,
                    "power": [randint(50, 100) for i in range(0, 25)]
                },
                {
                    "name": gen2.name,
                    "power": [randint(50, 100) for i in range(0, 25)]
                },
                {
                    "name": gen3.name,
                    "power": [randint(50, 100) for i in range(0, 25)]
                },
            ]
        }

    #write code here to produce
    return jsonify(data)
def addData():
    if Generator.get_or_none(Generator.id == 1) == None:
        gen1 = Generator(name='Generator 1')
        gen1.save()
        gen2 = Generator(name='Generator 2')
        gen2.save()
        gen3 = Generator(name='Generator 3')
        gen3.save()
Exemplo n.º 4
0
def build_model(args):
    args.to_train = 'CDG'

    networks = {}
    opts = {}
    if 'C' in args.to_train:
        networks['C'] = GuidingNet(args.img_size, {'cont': args.sty_dim, 'disc': args.output_k})
        networks['C_EMA'] = GuidingNet(args.img_size, {'cont': args.sty_dim, 'disc': args.output_k})
    if 'D' in args.to_train:
        networks['D'] = Discriminator(args.img_size, num_domains=args.output_k)
    if 'G' in args.to_train:
        networks['G'] = Generator(args.img_size, args.sty_dim, use_sn=False)
        networks['G_EMA'] = Generator(args.img_size, args.sty_dim, use_sn=False)

    if args.distributed:
        if args.gpu is not None:
            print('Distributed to', args.gpu)
            torch.cuda.set_device(args.gpu)
            args.batch_size = int(args.batch_size / args.ngpus_per_node)
            args.workers = int(args.workers / args.ngpus_per_node)
            for name, net in networks.items():
                if name in ['inceptionNet']:
                    continue
                net_tmp = net.cuda(args.gpu)
                networks[name] = torch.nn.parallel.DistributedDataParallel(net_tmp, device_ids=[args.gpu], output_device=args.gpu)
        else:
            for name, net in networks.items():
                net_tmp = net.cuda()
                networks[name] = torch.nn.parallel.DistributedDataParallel(net_tmp)

    elif args.gpu is not None:
        torch.cuda.set_device(args.gpu)
        for name, net in networks.items():
            networks[name] = net.cuda(args.gpu)
    else:
        for name, net in networks.items():
            networks[name] = torch.nn.DataParallel(net).cuda()

    if 'C' in args.to_train:
        opts['C'] = torch.optim.Adam(
            networks['C'].module.parameters() if args.distributed else networks['C'].parameters(),
            1e-4, weight_decay=0.001)
        if args.distributed:
            networks['C_EMA'].module.load_state_dict(networks['C'].module.state_dict())
        else:
            networks['C_EMA'].load_state_dict(networks['C'].state_dict())
    if 'D' in args.to_train:
        opts['D'] = torch.optim.RMSprop(
            networks['D'].module.parameters() if args.distributed else networks['D'].parameters(),
            1e-4, weight_decay=0.0001)
    if 'G' in args.to_train:
        opts['G'] = torch.optim.RMSprop(
            networks['G'].module.parameters() if args.distributed else networks['G'].parameters(),
            1e-4, weight_decay=0.0001)

    return networks, opts
Exemplo n.º 5
0
def create_model(args):

    generator = Generator(args.local_condition_dim, args.z_dim)
    #discriminator = Multiple_Random_Window_Discriminators(args.local_condition_dim)
    discriminator = Discriminator()

    return generator, discriminator
def power_generated(year, month, day):
    generators = Generator.select().order_by(Generator.created_at.asc())
    date_select = dt.datetime(year, month, day, 0, 0, 0).strftime('%Y-%m-%d')
    #create a seven day list from the selection date input
    seven_day = [
        dt.datetime(year, month, day) - timedelta(days=i) for i in range(7)
    ]
    data = []
    for gen in generators:
        data_list = []
        for i in reversed(range(7)):
            data_list.append(
                dict({
                    'date':
                    seven_day[i].date().strftime('%Y-%m-%d'),
                    'power':
                    gen.power_generated(seven_day[i].date().year,
                                        seven_day[i].date().month,
                                        seven_day[i].date().day),
                }))

        data.append(
            dict({
                'generator_id': gen.id,
                'generator_name': gen.name,
                'power_generated': data_list,
            }))
    return jsonify(data)
Exemplo n.º 7
0
def main():
    d = 'cuda' if torch.cuda.is_available() else 'cpu'
    G_TYPE = 'LSTM'
    DATA_FILEPATH ="../data/processed/logp_data.csv"
    INPUT = 'smiles'
    MINIBATCH = 128

    training_data, testing_data, NUM_CHARS, MAX_LEN, W, T = load_generator_data(csv_file=DATA_FILEPATH,
                                                       input=INPUT,
                                                       minibatch_size=MINIBATCH,
                                                       device=d,
                                                       train_percentage=.95)
    vocab = {v:k for k,v in T.word_index.items()}
    if G_TYPE == 'LSTM':
        G = LSTMGenerator(vocabulary=vocab, vocab_size=NUM_CHARS, embedding_dim=32).to(d)
    else:
        G = Generator(vocabulary=vocab, vocab_size=NUM_CHARS, embedding_dim=32, max_len=MAX_LEN).to(d)
    train(model=G,
          train_loader=training_data,
          device=d,
          tqdm=tqdm.tqdm,
          max_len=MAX_LEN,
          class_weights=W,
          tokenizer=T,
          iter_max=1000000,
          iter_save=100000,
          loss_save=1000,
          save_name="LSTM_E32_H512")
Exemplo n.º 8
0
    def __init__(self, args, text_data):

        super(ModelGumbel, self).__init__()
        self.args = args
        self.text_data = text_data
        # embedding layer
        if self.args.pre_embedding and not self.args.elmo:
            # pre_trained embeddings are 300 dimensional, trainable
            self.embedding_layer = nn.Embedding.from_pretrained(torch.Tensor(
                self.text_data.pre_trained_embedding),
                                                                freeze=False)

        elif self.args.elmo:
            self.embedding_layer = Elmo(options_file,
                                        weight_file,
                                        1,
                                        dropout=1.0 - self.args.drop_out,
                                        requires_grad=self.args.train_elmo)
        else:
            self.embedding_layer = nn.Embedding(
                num_embeddings=self.text_data.getVocabularySize(),
                embedding_dim=self.args.embedding_size)

        # first generator
        self.generator = Generator(args=self.args)

        # then encoder
        self.encoder = Encoder(args=self.args)
Exemplo n.º 9
0
def main(model_filename, pitch_model_filename, output_dir, batch_size):
    model = torch.nn.Module()
    model.add_module('encoder', Encoder(**encoder_config))
    model.add_module('generator',
                     Generator(sum(encoder_config['n_out_channels'])))
    model = load_checkpoint(model_filename, model).cuda()
    model.eval()

    if os.path.isfile(pitch_model_filename):
        global pitch_model, use_predicted_pitch
        use_predicted_pitch = True
        pitch_model = PitchModel(**pitch_config)
        pitch_model = load_checkpoint(pitch_model_filename, pitch_model).cuda()
        pitch_model.eval()

    testset = TestSet(**(data_config))
    cond, name = testset[0]
    for files in chunker(testset, batch_size):
        files = list(zip(*files))
        cond_input, file_paths = files[:-1], files[-1]
        cond_input = [
            utils.to_gpu(torch.from_numpy(np.stack(x))).float()
            for x in cond_input
        ]

        #cond_input = model.encoder(cond_input.transpose(1, 2)).transpose(1, 2)
        cond_input = model.encoder(cond_input[0])
        audio = model.generator(cond_input)

        for i, file_path in enumerate(file_paths):
            print("writing {}".format(file_path))
            wav = audio[i].cpu().squeeze().detach().numpy() * 32768.0
            write("{}/{}.wav".format(output_dir, file_path),
                  data_config['sampling_rate'], wav.astype(np.int16))
Exemplo n.º 10
0
    def _build_network(self):
        cfg = self.cfg

        if cfg.enc_type == 'cnn':
            Encoder = EncoderCNN
        elif cfg.enc_type == 'rnn':
            Encoder = EncoderRNN
        else:
            raise ValueError('Unknown encoder type!')

        if cfg.dec_type == 'cnn':
            Decoder = DecoderCNN
        elif cfg.dec_type == 'rnn':
            Decoder = DecoderRNN
        else:
            raise ValueError('Unknown decoder type!')

        # NOTE remove later!
        self.embed_w = Embedding(cfg, self.vocab_w)  # Word embedding
        self.enc = Encoder(cfg)  # Encoder
        #self.reg = CodeSmoothingRegularizer(cfg)  # Code regularizer
        self.reg = VariationalRegularizer(cfg)
        self.dec = Decoder(cfg, self.embed_w)  # Decoder
        self.dec2 = Decoder(cfg, self.embed_w)  # Decoder
        self.gen = Generator(cfg)  # Generator
        self.rev = ReversedGenerator(cfg)
        self.disc = CodeDiscriminator(cfg, cfg.hidden_size_w)  # Discriminator
        #self.disc_s = SampleDiscriminator(cfg, cfg.hidden_size_w*2)

        self._print_modules_info()
        if cfg.cuda:
            self._upload_modules_to_gpu()
def build_model_and_trainer(config, data_loader):
    if config.model.type == 'classifier':
        model_builder = Classifier(config)
        model, parallel_model = WithLoadWeights(model_builder, model_name='classifier') \
            .build_model(model_name='classifier')
        trainer = ClassifierTrainer(model, parallel_model, data_loader, config)

        return model, trainer

    elif config.model.type == 'dcgan':
        g_model_builder = Generator(config)
        d_model_builder = Discriminator(config)
        # c_model_builder = Classifier(config)

        g = g_model_builder.define_model('generator')
        d, parallel_d = d_model_builder.build_model('discriminator')
        # c, _ = c_model_builder.build_model('classifier')

        # Load weights to classifier
        # checkpoint_path = './experiments/classifier_mnist/checkpoints/0050-classifier.hdf5'
        # if os.path.exists(checkpoint_path):
        #     c.load_weights(checkpoint_path)

        combined_model_builder = GANCombined(config)

        combined, parallel_combined = WithLoadWeights(combined_model_builder, model_name='combined') \
            .build_model(g=g, d=d, model_name='combined')
        # .build_model(g=g, d=d, c=c, model_name='combined')
        # trainer = GANTrainer(data_loader, config, g, d, parallel_d, c, combined, parallel_combined)
        trainer = GANTrainer(data_loader, config, g, d, parallel_d, combined,
                             parallel_combined)

        return combined, trainer
Exemplo n.º 12
0
def get_base(args, input_dims, device):
    if args.generator == 'convolutional':
        net = Generator(nz=args.Z_dim, nn_type=args.g_model).to(device)
    elif args.generator == 'gaussian':
        net = energy_model.GaussianGenerator([input_dims]).to(device)
    elif args.generator == 'made':
        net = energy_model.MADEGenerator([input_dims],
                                         mode='generator').to(device)
    elif args.generator == 'nvp':
        net = energy_model.NVP([input_dims],
                               device,
                               args.num_blocks,
                               mode='generator',
                               with_bn=args.gen_bn).to(device)
    elif args.generator == 'maf':
        net = energy_model.FlowGenerator([input_dims],
                                         device,
                                         args.num_blocks,
                                         'maf',
                                         mode='generator',
                                         with_bn=args.gen_bn).to(device)
    elif args.generator == 'mogmaf':
        net = energy_model.FlowGenerator([input_dims],
                                         device,
                                         args.num_blocks,
                                         'mogmaf',
                                         mode='generator',
                                         with_bn=args.gen_bn).to(device)
    elif args.generator == 'toy':
        net = tm.Generator(3)
    return net
    def __init__(self, train_loader, test_loader, valid_loader, general_args,
                 trainer_args):
        super(GeneratorTrainer, self).__init__(train_loader, test_loader,
                                               valid_loader, general_args)
        # Paths
        self.loadpath = trainer_args.loadpath
        self.savepath = trainer_args.savepath

        # Model
        self.generator = Generator(general_args).to(self.device)

        # Optimizer and scheduler
        self.optimizer = torch.optim.Adam(params=self.generator.parameters(),
                                          lr=trainer_args.lr)
        self.scheduler = lr_scheduler.StepLR(
            optimizer=self.optimizer,
            step_size=trainer_args.scheduler_step,
            gamma=trainer_args.scheduler_gamma)

        # Load saved states
        if os.path.exists(trainer_args.loadpath):
            self.load()

        # Loss function
        self.time_criterion = nn.MSELoss()
        self.use_freq_criterion = trainer_args.use_freq_criterion
        self.lambda_freq = trainer_args.lambda_freq
        self.frequency_criterion = nn.MSELoss()
Exemplo n.º 14
0
def make_model(args, dataset):
    N = args.gpu_num
    class Model(): pass
    model = Model()

    netG = Generator(args)
    netD = Discriminator(args)
    netRNN = DS_SJE(args, dataset.embed_mat)
    if N == 0:
        setattr(model, "netG_0", netG)
        setattr(model, "netD_0", netD)
        setattr(model, "netRNN_0", netRNN)
        
    else:
        for i in range(N):
            temp_netG = netG.copy()
            temp_netD = netD.copy()
            temp_netRNN = netRNN.copy()
            temp_netG.gpu_id = i
            temp_netD.gpu_id = i
            temp_netRNN.gpu_id = i
            setattr(model, f"netG_{i}", temp_netG.to_gpu(i))
            setattr(model, f"netD_{i}", temp_netD.to_gpu(i))
            setattr(model, f"netRNN_{i}", temp_netRNN.to_gpu(i))
        
    return model
    def __init__(self, batch_size, img_height, img_width, num_channels,
                 warm_up_generator, config: main_config.MainConfig,
                 output_paths: constants.OutputPaths):
        super(GMCNNGan,
              self).__init__(img_height, img_width, num_channels, batch_size,
                             config.training.wgan_training_ratio, output_paths)

        self.img_height = img_height
        self.img_width = img_width
        self.num_channels = num_channels
        self.warm_up_generator = warm_up_generator
        self.learning_rate = config.training.learning_rate
        self.num_gaussian_steps = config.model.num_gaussian_steps
        self.gradient_penalty_loss_weight = config.model.gradient_penalty_loss_weight
        self.id_mrf_loss_weight = config.model.id_mrf_loss_weight
        self.adversarial_loss_weight = config.model.adversarial_loss_weight
        self.nn_stretch_sigma = config.model.nn_stretch_sigma
        self.vgg_16_layers = config.model.vgg_16_layers
        self.id_mrf_style_weight = config.model.id_mrf_style_weight
        self.id_mrf_content_weight = config.model.id_mrf_content_weight
        self.gaussian_kernel_size = config.model.gaussian_kernel_size
        self.gaussian_kernel_std = config.model.gaussian_kernel_std
        self.add_mask_as_generator_input = config.model.add_mask_as_generator_input

        self.generator_optimizer = Adam(lr=self.learning_rate,
                                        beta_1=0.5,
                                        beta_2=0.9)
        self.discriminator_optimizer = Adam(lr=self.learning_rate,
                                            beta_1=0.5,
                                            beta_2=0.9)

        self.local_discriminator_raw = LocalDiscriminator(
            self.img_height, self.img_width, self.num_channels, output_paths)
        self.global_discriminator_raw = GlobalDiscriminator(
            self.img_height, self.img_width, self.num_channels, output_paths)
        self.generator_raw = Generator(self.img_height, self.img_width,
                                       self.num_channels,
                                       self.add_mask_as_generator_input,
                                       output_paths)

        # define generator model
        self.global_discriminator_raw.disable()
        self.local_discriminator_raw.disable()
        self.generator_model = self.define_generator_model(
            self.generator_raw, self.local_discriminator_raw,
            self.global_discriminator_raw)

        # define global discriminator model
        self.global_discriminator_raw.enable()
        self.generator_raw.disable()
        self.global_discriminator_model = self.define_global_discriminator(
            self.generator_raw, self.global_discriminator_raw)

        # define local discriminator model
        self.local_discriminator_raw.enable()
        self.global_discriminator_raw.disable()
        self.local_discriminator_model = self.define_local_discriminator(
            self.generator_raw, self.local_discriminator_raw)
Exemplo n.º 16
0
    def __init__(self,
                 num_fakes=16,
                 rand_dim=128,
                 size=(32, 32),
                 use_gpu=False):
        super(GAN, self).__init__()

        self.generator = Generator(num_fakes, rand_dim, size, use_gpu)
        self.discriminator = Discriminator((1, ) + size)
Exemplo n.º 17
0
def main():
    input_dir, output_dir = sys.argv[1:]
        
    print('Pytorch version:', torch.__version__)

    data_val = np.load(input_dir + '/data_val.npz', allow_pickle=True)
    val_data_path_out = output_dir + '/data_val_prediction.npz'

    data_test = np.load(input_dir + '/data_test.npz', allow_pickle=True)
    test_data_path_out = output_dir + '/data_test_prediction.npz'
    
    generator_cpu = Generator(NOISE_DIM)
    generator_cpu.load_state_dict(torch.load(os.path.dirname(os.path.abspath(__file__)) + '/generator.pt'))
    generator_cpu.eval()
    
    # val
    ParticleMomentum_val = torch.tensor(data_val['ParticleMomentum']).float()
    ParticlePoint_val = torch.tensor(data_val['ParticlePoint'][:, :2]).float()
    ParticleMomentum_ParticlePoint_val = torch.cat([ParticleMomentum_val, ParticlePoint_val], dim=1)
    calo_dataset_val = utils.TensorDataset(ParticleMomentum_ParticlePoint_val)
    calo_dataloader_val = torch.utils.data.DataLoader(calo_dataset_val, batch_size=BATCH_SIZE, shuffle=False)

    with torch.no_grad():
        EnergyDeposit_val = []
        for ParticleMomentum_ParticlePoint_val_batch in tqdm(calo_dataloader_val):
            noise = torch.randn(len(ParticleMomentum_ParticlePoint_val_batch[0]), NOISE_DIM)
            EnergyDeposit_val_batch = generator_cpu(noise, ParticleMomentum_ParticlePoint_val_batch[0]).detach().numpy()
            EnergyDeposit_val.append(EnergyDeposit_val_batch)
        np.savez_compressed(val_data_path_out, 
                            EnergyDeposit=np.concatenate(EnergyDeposit_val, axis=0).reshape(-1, 30, 30))

        del EnergyDeposit_val
    del data_val; del ParticleMomentum_val; del ParticlePoint_val; del ParticleMomentum_ParticlePoint_val;
    del calo_dataset_val; calo_dataloader_val
    
    
    ParticleMomentum_test = torch.tensor(data_test['ParticleMomentum']).float()
    ParticlePoint_test = torch.tensor(data_test['ParticlePoint'][:, :2]).float()
    ParticleMomentum_ParticlePoint_test = torch.cat([ParticleMomentum_test, ParticlePoint_test], dim=1)
    calo_dataset_test = utils.TensorDataset(ParticleMomentum_ParticlePoint_test)
    calo_dataloader_test = torch.utils.data.DataLoader(calo_dataset_test, batch_size=BATCH_SIZE, shuffle=False)

    with torch.no_grad():
        EnergyDeposit_test = []
        for ParticleMomentum_ParticlePoint_test_batch in tqdm(calo_dataloader_test):
            noise = torch.randn(len(ParticleMomentum_ParticlePoint_test_batch[0]), NOISE_DIM)
            EnergyDeposit_test_batch = generator_cpu(noise, ParticleMomentum_ParticlePoint_test_batch[0]).detach().numpy()
            EnergyDeposit_test.append(EnergyDeposit_test_batch)
        np.savez_compressed(test_data_path_out, 
                            EnergyDeposit=np.concatenate(EnergyDeposit_test, axis=0).reshape(-1, 30, 30))

        del EnergyDeposit_test
    del data_test; del ParticleMomentum_test; del ParticlePoint_test; del ParticleMomentum_ParticlePoint_test;
    del calo_dataset_test; calo_dataloader_test


    return 0
Exemplo n.º 18
0
def main(game_name, game_length):
    #Game description
    reward_mode = 'base'
    reward_scale = 1.0
    elite_prob = 0
    env = Env(
        game_name, game_length, {
            'reward_mode': reward_mode,
            'reward_scale': reward_scale,
            'elite_prob': elite_prob
        })

    #Network
    latent_shape = (512, )
    dropout = 0
    lr = .0001
    gen = Generator(latent_shape, env, 'nearest', dropout, lr)

    #Agent
    num_processes = 1
    experiment = "Experiments"
    lr = .00025
    model = 'base'
    dropout = .3
    reconstruct = None
    r_weight = .05
    Agent.num_steps = 5
    Agent.entropy_coef = .01
    Agent.value_loss_coef = .1
    agent = Agent(env, num_processes, experiment, 0, lr, model, dropout,
                  reconstruct, r_weight)

    #Training
    gen_updates = 1e4
    gen_batch = 32
    gen_batches = 1
    diversity_batches = 0
    rl_batch = 1e4
    pretrain = 0
    elite_persist = False
    elite_mode = 'mean'
    load_version = 0
    notes = ''
    agent.writer.add_hparams(
        {
            'Experiment': experiment,
            'RL_LR': lr,
            'Minibatch': gen_batch,
            'RL_Steps': rl_batch,
            'Notes': notes
        }, {})
    t = Trainer(gen, agent, experiment, load_version, elite_mode,
                elite_persist)
    t.loss = lambda x, y: x.mean().pow(2)
    t.train(gen_updates, gen_batch, gen_batches, diversity_batches, rl_batch,
            pretrain)
Exemplo n.º 19
0
def main(game_name, game_length):
    #Game description
    reward_mode = 'time'
    reward_scale = 1.0
    elite_prob = .5
    env = Env(
        game_name, game_length, {
            'reward_mode': reward_mode,
            'reward_scale': reward_scale,
            'elite_prob': elite_prob
        })

    #Network
    latent_shape = (512, )
    dropout = .2
    lr = .0001
    gen = Generator(latent_shape, env, 'pixel', dropout, lr)

    #Agent
    num_processes = 16
    experiment = "Experiment_Paper"
    lr = .00025
    model = 'resnet'
    dropout = 0
    reconstruct = gen
    r_weight = .05
    Agent.num_steps = 5
    Agent.entropy_coef = .01
    Agent.value_loss_coef = .1
    agent = Agent(env, num_processes, experiment, 0, lr, model, dropout,
                  reconstruct, r_weight)

    #Training
    gen_updates = 100
    gen_batch = 128
    gen_batches = 10
    diversity_batches = 90
    rl_batch = 1e6
    pretrain = 2e7
    elite_persist = True
    elite_mode = 'max'
    load_version = 0
    notes = 'Configured to match paper results'
    agent.writer.add_hparams(
        {
            'Experiment': experiment,
            'RL_LR': lr,
            'Minibatch': gen_batch,
            'RL_Steps': rl_batch,
            'Notes': notes
        }, {})
    t = Trainer(gen, agent, experiment, load_version, elite_mode,
                elite_persist)
    t.train(gen_updates, gen_batch, gen_batches, diversity_batches, rl_batch,
            pretrain)
Exemplo n.º 20
0
    def build_model(self):
        self.G = Generator(self.noise_n, self.G_last_act)
        self.D = BEGAN_Discriminator(self.noise_n // 2, self.D_last_act)

        self.G_optimizer = torch.optim.Adam(self.G.parameters(), self.G_lr,
                                            [self.beta1, self.beta2])
        self.D_optimizer = torch.optim.Adam(self.D.parameters(), self.D_lr,
                                            [self.beta1, self.beta2])
        if torch.cuda.is_available():
            self.G.cuda()
            self.D.cuda()
Exemplo n.º 21
0
 def __init__(self, sent_len, vocab_size):
     graph = tf.Graph()
     with graph.as_default():
         self.G = Generator(sent_len=sent_len,
                            vocab_size=vocab_size,
                            embedding_size=300,
                            init_embedding=None,
                            rnn_h_dim=150,
                            batch_size=50)
         ckpt = CheckpointManager("model_50_" + str(sent_len))
         self.sess = tf.Session(graph=graph)
         ckpt.restore(self.sess)
Exemplo n.º 22
0
    def __init__(self, embeddings, args):
        super(HardRationale3PlayerClassificationModel,
              self).__init__(embeddings, args)
        self.generator = Generator(args, self.input_dim)
        self.highlight_percentage = args.highlight_percentage
        self.highlight_count = args.highlight_count
        self.exploration_rate = args.exploration_rate

        self.loss_func = nn.CrossEntropyLoss(reduce=False)

        if args.margin is not None:
            self.margin = args.margin
Exemplo n.º 23
0
def main(args):
    # Load PC
    pc = trimesh.load(args.pc)
    pc_vox, scale, cent = pc2vox(pc.vertices, args.res)
    pc_vox = np.reshape(pc_vox, (args.res,) * 3).astype('float32')

    # save scale file
    from utils.preprocess_scan import SCALE, new_cent
    np.save(join(args.out_path, 'cent.npy'), [scale / SCALE, (cent - new_cent)])

    # Load network
    if args.model == 'IPNet':
        net = model.IPNet(hidden_dim=args.decoder_hidden_dim, num_parts=14)
        gen = GeneratorIPNet(net, 0.5, exp_name=None, resolution=args.retrieval_res,
                             batch_points=args.batch_points)
    elif args.model == 'IPNetMano':
        net = model.IPNetMano(hidden_dim=args.decoder_hidden_dim, num_parts=7)
        gen = GeneratorIPNetMano(net, 0.5, exp_name=None, resolution=args.retrieval_res,
                                 batch_points=args.batch_points)
    elif args.model == 'IPNetSingleSurface':
        net = model.IPNetSingleSurface(hidden_dim=args.decoder_hidden_dim, num_parts=14)
        gen = Generator(net, 0.5, exp_name=None, resolution=args.retrieval_res,
                        batch_points=args.batch_points)
    else:
        print('Wow watch where u goin\' with that model')
        exit()

    # Load weights
    print('Loading weights from,', args.weights)
    checkpoint_ = torch.load(args.weights)
    net.load_state_dict(checkpoint_['model_state_dict'])

    # Run IPNet and Save
    if not os.path.exists(args.out_path):
        os.makedirs(args.out_path)

    data = {'inputs': torch.tensor(pc_vox[np.newaxis])}    # add a batch dimension
    if args.model == 'IPNet':
        full, body, parts = gen.generate_meshs_all_parts(data)
        body.set_vertex_colors_from_weights(parts)
        body.write_ply(args.out_path + '/body.ply')
        body.write_obj(args.out_path + '/body_nosmpl.obj')
        np.save(args.out_path + '/parts.npy', parts)

    elif args.model == 'IPNetMano':
        full, parts = gen.generate_meshs_all_parts(data)
        np.save(args.out_path + '/parts.npy', parts)

    elif args.model == 'IPNetSingleSurface':
        full = gen.generate_mesh_all(data)

    full.write_ply(args.out_path + '/full.ply')
    full.write_obj(args.out_path + '/full_nosmpl.obj')
Exemplo n.º 24
0
def pretrain(img_data_loader, num_epochs=100, decay_factor=0.1, initial_lr=0.0001, checkpoint=None, save=True):

    if checkpoint is not None:
        imported_checkpoint = torch.load(checkpoint)
        generator = imported_checkpoint['generator']
        starting_epoch = imported_checkpoint['epoch'] + 1
        generator_optimizer = imported_checkpoint['generator_optimizer']
    else:
        generator = Generator()
        generator_optimizer = torch.optim.Adam(params=filter(lambda p: p.requires_grad, generator.parameters()), lr=initial_lr)
        starting_epoch = 0

    pretrain_criterion = F.mse_loss
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    
    # Push generator to gpu if it's available
    generator.to(device)

    generator.train()
    for epoch in range(starting_epoch, num_epochs):
        # If we're halfway through, reduce learning rate
        if epoch == num_epochs//2:
            for group in generator_optimizer.param_groups:
                group['lr'] = group['lr']*decay_factor

        running_loss = 0.0
        # Iterate through the dataloader
        for ii, (hr_imgs, lr_imgs) in enumerate(tqdm(img_data_loader)):
            hr_imgs, lr_imgs = hr_imgs.to(device), lr_imgs.to(device)

            # Super-resolve low-resolution images
            sr_imgs = generator(lr_imgs)

            # Compute loss, backpropagate, and update generator
            loss = pretrain_criterion(sr_imgs, hr_imgs)
            generator_optimizer.zero_grad()
            loss.backward()
            generator_optimizer.step()
            
            # Increment running loss
            running_loss += loss.item()
            del hr_imgs, lr_imgs, sr_imgs

        print("Pretraining epoch {}, Average loss: {}".format(epoch, running_loss/len(img_data_loader)))

    if save:
        # Save the final pretrained model if you're going to continue later
        torch.save({'epoch': epoch,
                    'generator': generator,
                    'generator_optimizer': generator_optimizer},
                    'pretrained_celebahq_generator.pth.tar')
    del generator
Exemplo n.º 25
0
def main():

    data_loader = DataLoader(data_path, batch_size)
    generator = Generator(vocab_size, g_emb_dim, g_hidden_dim)
    discriminator = Discriminator(vocab_size, d_hidden_dim)

    gen_optimizer = optim.Adam(generator.parameters(), lr=0.0001)
    disc_optimizer = optim.Adam(discriminator.parameters(), lr=0.0001)

    bce_criterion = nn.BCELoss()
    gen_criterion = nn.NLLLoss(size_average=False)

    if (opt.cuda):
        generator.cuda()

    pretrain_lstm(generator, data_loader, gen_optimizer, gen_criterion, 10)

    all_G_losses = []
    all_D_losses = []
    for i in tqdm(range(total_epochs)):
        g_losses, d_losses = train_gan_epoch(discriminator, generator,
                                             data_loader, gen_optimizer,
                                             disc_optimizer, bce_criterion)
        all_G_losses += g_losses
        all_D_losses += d_losses

    sample = generator.sample(batch_size, g_seq_length)

    print(generator)

    with open('./data/gumbel_softmax_gan_gen.txt', 'w') as f:
        for each_str in data_loader.convert_to_char(sample):
            f.write(each_str + '\n')

    gen_file_name = 'gen_gumbel_softmax_' + str(total_epochs) + '.pth'
    disc_file_name = 'disc_gumbel_softmax_' + str(total_epochs) + '.pth'

    Utils.save_checkpoints(checkpoint_dir, gen_file_name, generator)
    Utils.save_checkpoints(checkpoint_dir, disc_file_name, discriminator)

    plt.plot(list(range(len(all_G_losses))),
             all_G_losses,
             'g-',
             label='gen loss')
    plt.plot(list(range(len(all_D_losses))),
             all_D_losses,
             'b-',
             label='disc loss')
    plt.legend()
    plt.show()
Exemplo n.º 26
0
def get_modules(opt):
    modules = {}
    disc = Discriminator()
    gen = Generator()
    clf = Classifier()
    if opt.cuda:
        disc = disc.cuda()
        gen = gen.cuda()
        clf = clf.cuda()

    modules['Discriminator'] = disc
    modules['Generator'] = gen
    modules['Classifier'] = clf
    return modules
Exemplo n.º 27
0
    def __init__(self, opt, training, device):
        """
        :param opt: options for creating the generator and discriminator
        """
        super(Pix2Pix, self).__init__()

        self.opt = opt
        self.training = training
        self.device = device

        if self.training:
            self.netG = Generator(self.opt.netG_in, self.opt.netG_out)
            self.netG.init_weight()
            self.netG.to(self.device)
            if self.opt.netD_name == 'PixelGAN':
                self.netD = PixelGAN(self.opt.netD_in, self.opt.netD_out)
            elif opt.netD_name == 'PatchGAN':
                self.netD = PatchGAN(self.opt.netD_in, self.opt.netD_out,
                                     self.opt.netD_layers)
            self.netD.init_weight()
            self.netD.to(self.device)
        else:
            self.netG = Generator(self.opt.netG_in, self.opt.netG_out)
            self.netG.init_weight()
            self.netG.to(self.device)

        if self.training:
            self.lossGAN = BCEWithLogitsLoss().to(self.device)
            self.lossL1 = L1Loss().to(self.device)

            self.optimizer_G = Adam(self.netG.parameters(),
                                    lr=opt.lr,
                                    betas=(self.opt.beta1, self.opt.beta2))
            self.optimizer_D = Adam(self.netD.parameters(),
                                    lr=opt.lr,
                                    betas=(self.opt.beta1, self.opt.beta2))
Exemplo n.º 28
0
def get_generator(loadpath, device, general_args):
    """
    Returns a pre-trained generator in evaluation mode.
    :param loadpath: location of the generator trainer (string).
    :param device: either 'cpu' or 'cuda' depending on hardware availability (string).
    :param general_args: argument parser that contains the arguments that are independent to the script being executed.
    :return: pre-trained generator (nn.Module).
    """
    # Instantiate a new generator with identical architecture
    generator = Generator(general_args=general_args).to(device)

    # Restore pre-trained weights
    checkpoint = torch.load(loadpath, map_location=device)
    generator.load_state_dict(checkpoint['generator_state_dict'])
    return generator.eval()
Exemplo n.º 29
0
def build_model(args):
    generator = Generator(args)
    discriminator = Discriminator(args)
    mapping_network = MappingNetwork(args)

    generator_ema = copy.deepcopy(generator)
    mapping_network_ema = copy.deepcopy(mapping_network)

    nets = Munch(generator=generator,
                 discriminator=discriminator,
                 mapping_network=mapping_network)
    nets_ema = Munch(generator=generator_ema,
                     mapping_network=mapping_network_ema)

    return nets, nets_ema
Exemplo n.º 30
0
    def __init__(self, config, debug=False):
        super(AdaInGEN, self).__init__()

        self.config = config
        self.color_dim = config.color_dim
        self.image_size = config.image_size
        self.style_dim = config.style_dim
        self.c_dim = config.c_dim
        self.generator = Generator(config, debug=False)
        in_dim = self.style_dim + self.c_dim

        de_params = self.get_num_de_params(self.generator)
        self.Domain_Embedding = DE(
            config, in_dim, de_params, train=False, debug=debug)
        if debug:
            self.debug()