def __init__(self,
                 model,
                 pruning_rate,
                 previous_masks,
                 current_dataset_idx,
                 previous_samples=None,
                 unrelated_tasks=None):

        self.model = model
        self.pruning_rate = pruning_rate
        self.pruner = pruner.Pruner(self.model, self.pruning_rate,
                                    previous_masks, current_dataset_idx)

        self.unrelated_tasks = unrelated_tasks
        self.discriminator = discriminator.Discriminator(
            previous_samples, unrelated_tasks)

        self.criterion = nn.CrossEntropyLoss()
        self.optimizer = optim.SGD(self.model.parameters(),
                                   lr=1e-3,
                                   momentum=0.9)
        self.decay = [10]
        self.scheduler = MultiStepLR(self.optimizer,
                                     milestones=self.decay,
                                     gamma=0.2)
コード例 #2
0
ファイル: gan.py プロジェクト: VirtualNonsense/Project_CGAN
    def __init__(self,
                 channels,
                 width,
                 heigth,
                 num_classes: int,
                 embed_size: int,
                 latent_dim: int = 100,
                 lr: float = 0.002,
                 b1: float = 0.5,
                 b2: float = 0.999,
                 batch_size: int = 1024,
                 **kwargs):
        super().__init__()
        self.save_hyperparameters()

        # networks
        data_shape = (channels, width, heigth)
        self.generator = generator.Generator(
            num_classes=num_classes,
            embed_size=embed_size,
            latent_dim=self.hparams.latent_dim,
            img_shape=data_shape,
            output_dim=int(np.prod(data_shape)))
        self.discriminator = discriminator.Discriminator(
            img_shape=data_shape,
            output_dim=int(np.prod(data_shape)),
            num_classes=num_classes,
        )

        self.validation_z = torch.rand(batch_size, self.hparams.latent_dim)

        self.example_input_array = torch.zeros(2, self.hparams.latent_dim)
コード例 #3
0
ファイル: he_gan.py プロジェクト: zjl130345/HeGAN
 def build_discriminator(self):
     #with tf.variable_scope("discriminator"):
     self.discriminator = discriminator.Discriminator(
         n_node=self.n_node,
         n_relation=self.n_relation,
         node_emd_init=self.node_embed_init_d,
         relation_emd_init=None)
コード例 #4
0
ファイル: anvae2.py プロジェクト: oadonca/ANVAE
    def __init__(self, latent_spaces, batch_size):
        super(ANVAE, self).__init__()

        self.batch_size = batch_size
        self.latent_spaces = 3
        self.level_sizes = [1, 1, 1]
        self.input_s = [32, 32, 1]
        self.latent_channels = 20
        self.h_dim = 1000

        self.encoder = encoder.Encoder(self.latent_spaces, self.input_s)
        self.decoder = decoder.Decoder(self.encoder(
            tf.zeros([self.batch_size, 32, 32, 1]), False),
                                       latent_channels=self.latent_channels,
                                       level_sizes=self.level_sizes)
        self.discriminator = discriminator.Discriminator(
            self.latent_spaces, self.input_s, self.h_dim)

        self.lr_ae = .0001
        self.lr_dc = .0001
        self.lr_gen = .0001

        self.ae_optimizer = tf.keras.optimizers.Adamax(self.lr_ae, clipnorm=2)
        self.gen_optimizer = tf.keras.optimizers.Adamax(self.lr_gen,
                                                        clipnorm=2)
        self.dc_optimizer = tf.keras.optimizers.Adamax(self.lr_dc, clipnorm=2)

        self.ae_loss_weight = 1.
        self.gen_loss_weight = 6.
        self.dc_loss_weight = 6.

        self.lastEncVars = []
        self.lastDecVars = []
        self.lastDiscVars = []

        self.debugCount = 0
        self.counter = 1

        self.log_writer = tf.summary.create_file_writer(logdir='./tf_summary')
        self.step_count = 0

        self.conv_layers = []
        self.sr_u = {}
        self.sr_v = {}
        self.num_power_iter = 4

        for layer in self.encoder.layers:
            if isinstance(layer, tf.keras.layers.Conv2D) or isinstance(
                    layer, tf.keras.layers.DepthwiseConv2D):
                self.conv_layers.append(layer)

        for layer in self.decoder.layers:
            if isinstance(layer, tf.keras.layers.Conv2D) or isinstance(
                    layer, tf.keras.layers.DepthwiseConv2D):
                self.conv_layers.append(layer)

        for layer in self.discriminator.layers:
            if isinstance(layer, tf.keras.layers.Conv2D) or isinstance(
                    layer, tf.keras.layers.DepthwiseConv2D):
                self.conv_layers.append(layer)
コード例 #5
0
ファイル: solver.py プロジェクト: cnglen/SeqGAN-Pytorch
    def __init__(self, vocab_size, batch_size, pre_gen_epochs, pre_dis_epochs,
                 gan_epochs, generate_sum, sequence_len, lr, real_file,
                 fake_file, eval_file, update_rate):
        super(Solver, self).__init__()
        self.vocal_size = vocab_size
        self.batch_size = batch_size
        self.pre_gen_epochs = pre_gen_epochs
        self.pre_dis_epochs = pre_dis_epochs
        self.gan_epochs = gan_epochs
        self.generate_sum = generate_sum
        self.sequence_len = sequence_len
        self.lr = lr
        self.real_file = real_file
        self.fake_file = fake_file
        self.eval_file = eval_file
        self.update_rate = update_rate

        self.discriminator = discriminator.Discriminator(
            sequence_len, vocab_size, DisParams.emb_dim,
            DisParams.filter_sizes, DisParams.num_filters, DisParams.dropout)
        self.generator = generator.Generator(vocab_size, GenParams.emb_dim,
                                             GenParams.hidden_dim,
                                             GenParams.num_layers)
        self.target_lstm = target_lstm.TargetLSTM(vocab_size,
                                                  GenParams.emb_dim,
                                                  GenParams.hidden_dim,
                                                  GenParams.num_layers)

        self.discriminator = util.to_cuda(self.discriminator)
        self.generator = util.to_cuda(self.generator)
        self.target_lstm = util.to_cuda(self.target_lstm)
コード例 #6
0
    def build_discriminator(self):
        """initialize the discriminator"""

        with tf.variable_scope("discriminator"):
            self.discriminator = discriminator.Discriminator(
                n_node=self.n_node,
                node_emd_init=self.node_embed_init_d,
                node_features=self.node_feature_init)
コード例 #7
0
ファイル: channel.py プロジェクト: zhufengGNSS/NavLab-DPE-SDR
    def __init__(self, prn, receiver, param_list=_M_ARRAY_DATA_NAMES):
        """
        Constructs a Channel object with specified prn and measurement length.
        """

        self.prn = prn
        self.receiver = receiver
        self.rawfile = self.receiver.rawfile

        self._measurement_logs_on = False
        self.init_measurement_logs(param_list=param_list)

        self._cpcount = 0  # internal codeperiod  counter
        self.cp[0] = 0

        # Initialize correlator.
        self.correlator = correlator.Correlator(self.prn, channel=self)

        # Initialize discriminators and loopfilters.
        self.cdiscriminator = discriminator.Discriminator(flavor='DLL',
                                                          channel=self)
        self.idiscriminator = discriminator.Discriminator(flavor='PLL',
                                                          channel=self)
        self.cloopfilter = loopfilter.LoopFilter(self.rawfile.T,
                                                 Bnp=3.0,
                                                 channel=self)
        self.iloopfilter = loopfilter.LoopFilter(self.rawfile.T,
                                                 Bnp=40.0,
                                                 channel=self)

        # Initialize lockdetector and snrmeter.
        self.lockdetector = lockdetector.LockDetector(N=20,
                                                      k=1.5,
                                                      lossthreshold=50,
                                                      lockthreshold=240)
        self.snrmeter = snrmeter.SignalNoiseMeter(N=20, T=self.rawfile.T)
コード例 #8
0
def trainer(model_params):
    datasets = load_dataset('dataset', model_params)
    train_set = datasets[0][0] + datasets[0][1]
    valid_set = datasets[1][0] + datasets[1][1]
    test_set = datasets[2][0] + datasets[2][1]
    labels = {dataset: i for (i, dataset) in enumerate(model_params.data_set)}
    train_generator = DataGenerator(train_set,
                                    labels,
                                    batch_size=model_params.batch_size,
                                    shuffle=True)
    valid_generator = DataGenerator(valid_set,
                                    labels,
                                    batch_size=model_params.batch_size,
                                    shuffle=True)
    model = discriminator.Discriminator(model_params)
    model.train(train_generator, valid_generator)
    model.save()
    print('Done!')
コード例 #9
0
ファイル: utils.py プロジェクト: CopperWasp/DisentanglingGANs
def init_GAN():
    # models
    generator = g.Generator()
    discriminator = d.Discriminator()
    # Loss functions
    adversarial_loss = torch.nn.MSELoss()
    categorical_loss = torch.nn.CrossEntropyLoss()
    continuous_loss = torch.nn.MSELoss()

    if cuda:
        generator.cuda()
        discriminator.cuda()
        adversarial_loss.cuda()
        categorical_loss.cuda()
        continuous_loss.cuda()

    generator.apply(weights_init_normal)
    discriminator.apply(weights_init_normal)
    return generator, discriminator, adversarial_loss, categorical_loss, continuous_loss
コード例 #10
0
def create_model(actions, sampling=False):

    policy_net = seq2seq_model.Seq2SeqModel(
        FLAGS.architecture,
        FLAGS.seq_length_in if not sampling else 50,
        FLAGS.seq_length_out if not sampling else 25,
        FLAGS.size, # hidden layer size
        FLAGS.num_layers,
        FLAGS.batch_size,
        summaries_dir,
        FLAGS.loss_to_use if not sampling else "sampling_based",
        len( actions ),
        device,
        not FLAGS.omit_one_hot,
        FLAGS.residual_velocities,
        stochastic = True,
        dtype=torch.float32)

    discrim_net = discriminator.Discriminator(
        not FLAGS.omit_one_hot,
        FLAGS.discrim_hidden_size,
        FLAGS.batch_size,
        FLAGS.discrim_num_layers,
        len( actions )
    )

    #initalize a new model
    if FLAGS.load <= 0 and FLAGS.discrim_load <=0:
        print("Creating model with fresh parameters.")
        #TODO: Initial parameter here
        return policy_net, discrim_net
    #Load model from iteration
    if os.path.isfile(os.path.join(train_dir, 'pretrain-policy-checkpoint-{0}.pt'.format(FLAGS.load))):
        policy_net.load_state_dict(torch.load(os.path.join(train_dir, 'pretrain-policy-checkpoint-{0}.pt'.format(FLAGS.load))))
    elif FLAGS.load > 0:
        raise ValueError("Asked to load pretrain policy checkpoint {0}, but it does not seem to exist".format(FLAGS.load))

    if os.path.isfile(os.path.join(train_dir, 'pretrain-discrim-checkpoint-{0}.pt'.format(FLAGS.discrim_load))):
        policy_net.load_state_dict(torch.load(os.path.join(train_dir, 'discrim-checkpoint-{0}.pt'.format(FLAGS.load))))
    elif FLAGS.discrim_load > 0:
        raise ValueError("Asked to load pretrain discrim checkpoint {0}, but it does not seem to exist".format(FLAGS.discrim_load))

    return policy_net, discrim_net
コード例 #11
0
ファイル: trainer.py プロジェクト: rsberrocal/minecraft_irl
def train(train_dataset, test_dataset, epochs, tr_urls):
    gen = generator.Generator()
    generator_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)
    discr = discriminator.Discriminator()
    discriminator_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)
    checkpoint = tf.train.Checkpoint(
        generator_optimizer=generator_optimizer,
        discriminator_optimizer=discriminator_optimizer,
        generator=gen,
        discriminator=discr)

    summary_writer = tf.summary.create_file_writer(
        variables.LOG_DIR + '/' +
        datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))

    for epoch in range(epochs):
        start = time.time()

        imgi = 0
        for input_image, target in train_dataset:
            print('epoch ' + str(epoch) + ' - train: ' + str(imgi) + '/' +
                  str(len(tr_urls)))
            imgi += 1
            train_step(input_image, target, gen, generator_optimizer, discr,
                       discriminator_optimizer, summary_writer, epoch)

        if (epochs + 1) % 3 == 0:
            checkpoint.save(file_prefix=variables.CHECK_DIR + 'ckpt')

        print('Time taken for epoch {} is {} sec\n'.format(
            epoch + 1,
            time.time() - start))

    checkpoint.save(file_prefix=variables.CHECK_DIR + 'ckpt')

    checkpoint.restore(tf.train.latest_checkpoint(variables.CHECK_DIR))

    # clear
    imgi = 0
    for inp, tar in test_dataset.take(5):
        generate_images(gen, inp, tar)
        imgi += 1
コード例 #12
0
ファイル: train.py プロジェクト: Gylleus/icon-gen
def main(config: str):
    config = load_config(config)
    parameters = config['parameters']
    train_data = load_data(
        data_path=config['data']['directory'],
        img_size=parameters['img_size'],
        batch_size=parameters['batch_size'],
        use_rgb=parameters["rgb"],
    )

    gen = generator.Generator(
        generator.GeneratorConfig.from_config_dict(parameters))
    disc = discriminator.Discriminator(
        discriminator.DiscriminatorConfig.from_config_dict(parameters))
    trainer = Trainer(gen,
                      disc,
                      noise_dim=parameters['generator']['noise_dim'],
                      rgb=parameters["rgb"])
    trainer.train(dataset=train_data,
                  epochs=config['train']['epochs'],
                  save_interval=config['train']['save_interval'])
    gen.save("saved_model")
    tfjs.converters.save_keras_model(gen, "saved_js_model")
コード例 #13
0
ファイル: main.py プロジェクト: Jasonlee1995/StyleGAN
    def __init__(self,
                 z_dim=512,
                 resolution=1024,
                 randomize_noise=True,
                 gpu_id=0):
        self.z_dim = z_dim
        self.gpu = gpu_id

        torch.cuda.set_device(self.gpu)

        # loss function : non-saturating loss with R1 regularization
        self.loss_function_G = loss.G_logistic_nonsaturating()
        self.loss_function_D = loss.D_logistic()

        self.G = generator.Generator(self.z_dim,
                                     resolution=resolution,
                                     randomize_noise=randomize_noise).cuda(
                                         self.gpu)
        self.D = discriminator.Discriminator(resolution=resolution).cuda(
            self.gpu)

        self.train_G_losses = []
        self.train_D_losses = []
コード例 #14
0
ファイル: main.py プロジェクト: skyerhxx/SeqGAN
                                 VOCAB_SIZE,
                                 MAX_SEQ_LEN,
                                 gpu=CUDA)
    oracle.load_state_dict(torch.load(oracle_state_dict_path))
    oracle_samples = torch.load(oracle_samples_path).type(torch.LongTensor)
    # a new oracle can be generated by passing oracle_init=True in the generator constructor
    # samples for the new oracle can be generated using helpers.batchwise_sample()

    gen = generator.Generator(GEN_EMBEDDING_DIM,
                              GEN_HIDDEN_DIM,
                              VOCAB_SIZE,
                              MAX_SEQ_LEN,
                              gpu=CUDA)
    dis = discriminator.Discriminator(DIS_EMBEDDING_DIM,
                                      DIS_HIDDEN_DIM,
                                      VOCAB_SIZE,
                                      MAX_SEQ_LEN,
                                      gpu=CUDA)

    if CUDA:
        oracle = oracle.cuda()
        gen = gen.cuda()
        dis = dis.cuda()
        oracle_samples = oracle_samples.cuda()

    #训练GENERATOR MLE
    print('Starting Generator MLE Training...')
    gen_optimizer = optim.Adam(gen.parameters(), lr=1e-2)
    train_generator_MLE(gen, gen_optimizer, oracle, oracle_samples,
                        MLE_TRAIN_EPOCHS)
コード例 #15
0
ファイル: train.py プロジェクト: zoharli/SRGAN-tensorflow
    return minibatch, rescaled


with tf.device('/cpu:0'):
    minibatch, rescaled = read(filenames)
resnet = srResNet.srResNet(rescaled)
result = (resnet.conv5 + 1) * 127.5
gen_var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)

dbatch = tf.concat([tf.cast(minibatch, tf.float32), result], 0)
vgg = vgg19.Vgg19()
vgg.build(dbatch)
fmap = tf.split(vgg.conv5_4, 2)
content_loss = tf.losses.mean_squared_error(fmap[0], fmap[1])

disc = discriminator.Discriminator(dbatch)
D_x, D_G_z = tf.split(tf.squeeze(disc.dense2), 2)
adv_loss = tf.reduce_mean(tf.square(D_G_z - 1.0))
gen_loss = (adv_loss + content_loss)
disc_loss = (tf.reduce_mean(tf.square(D_x - 1.0) + tf.square(D_G_z)))
disc_var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
for x in gen_var_list:
    disc_var_list.remove(x)

global_step = tf.Variable(0, trainable=0, name='global_step')
gen_train_step1 = tf.train.AdamOptimizer(learn_rate1).minimize(
    gen_loss, global_step, gen_var_list)
#gen_train_step2=tf.train.AdamOptimizer(learn_rate2).minimize(gen_loss,global_step)
disc_train_step1 = tf.train.AdamOptimizer(learn_rate1).minimize(
    disc_loss, global_step, disc_var_list)
#disc_train_step2=tf.train.AdamOptimizer(learn_rate2).minimize(disc_loss,global_step)
コード例 #16
0
    def build_discriminator(self):
        """initializing the discriminator"""

        with tf.compat.v1.variable_scope("discriminator"):
            self.discriminator = discriminator.Discriminator(
                self.graph.n_node, self.node_embed_init_d, config)
コード例 #17
0

def parse_args():
    """
    Creates command line arguments with the same name and default values as those in the global opts variable
    Then updates opts using their respective argument values
    """

    # Parse command line arguments to assign to the global opt variable
    parser = argparse.ArgumentParser(
        description=
        'colorize images using conditional generative adversarial networks')
    for opt_name, value in opts.items():
        parser.add_argument("--%s" % opt_name, default=value)

    # Update global opts variable using flag values
    args = parser.parse_args()
    for opt_name, _ in opts.items():
        opts[opt_name] = getattr(args, opt_name)


parse_args()
with tf.Session() as sess:
    # Initialize networks
    gen = generator.Generator()
    disc = discriminator.Discriminator()

    # Train them
    t = Trainer(sess, gen, disc, opts)
    t.train()
コード例 #18
0
                                 args.vocab_size,
                                 args.max_seq_len,
                                 gpu=args.cuda,
                                 oracle_init=True)

gen = generator.Generator(
    args.g_emb_dim,
    args.g_hid_dim,
    args.vocab_size,
    args.max_seq_len,
    gpu=args.cuda,
)

dis = discriminator.Discriminator(args.d_emb_dim,
                                  args.d_hid_dim,
                                  args.vocab_size,
                                  args.max_seq_len,
                                  gpu=args.cuda)

if args.cuda:
    oracle = oracle.cuda()
    gen = gen.cuda()
    dis = dis.cuda()

oracle_samples = helpers.batchwise_sample(oracle, args.num_data)
if args.oracle_save is not None:
    torch.save(oracle.state_dict(), args.oracle_save)

logger = logger.Logger(args.log_dir)
# GENERATOR MLE TRAINING
gen_optimizer = optim.Adam(gen.parameters(), lr=1e-2)
コード例 #19
0
ファイル: train.py プロジェクト: Shrini97/SeeInTheDark
	def __init__(self):
		
		# self.IsTraining = tf.placeholder(tf.bool)

		"""
		This is placeholder for the input for the Generator it is a4 channnel input
		"""
		self.GeneratorInput=tf.placeholder(tf.float32, [None, None, None, 4])
		
		"""
		The DiscriminatorLabelsFake is always an array of zeros expanded
		of the same sized as batch size

		For the DiscriminatorLabelsReal are an array of ones expanded 
		of the same sized as batch size

		The GeneratorLabels shall take the same labels as that of DiscriminatorRealLabels

		The TargetImagePlaceholder is for when we need to feed the target image for the discriminnator

		"""
		self.DiscriminatorLabelsFake=tf.placeholder(tf.float32, [None, 1])
		self.DiscriminatorLabelsReal=tf.placeholder(tf.float32, [None, 1])
		self.GeneratorLabels=tf.placeholder(tf.float32, [None, 1])

		self.TargetImagePlaceholder=tf.placeholder(tf.float32, [None, None, None, 3])
		self.GeneratedImage=generator.network(self.GeneratorInput)

		"""
		The discriminator generates 2 outputs , the activation and the logits = sigmoid(activation) =P(Input=FakeImage)
		This is generated for both when fake image is fed(logit expected to be 1),and realimage is fed(logit expected to be 0)

		"""

		self.DiscriminatorOutReal,self.DiscriminatorLogitsReal=discriminator.Discriminator(Target=self.TargetImagePlaceholder,DarkInput=self.GeneratorInput)
		self.DiscriminatorOutFake,self.DiscriminatorLogitsFake=discriminator.Discriminator(Target=self.GeneratedImage,DarkInput=self.GeneratorInput,reuse=True)

		"""
		As the target and the computation graph are different when the generated image is fed or the true target 
		is, we compute 2 sigmoid crossentropy losses and the sum of both is the Final loss for the discriminnator

		Discriminatorloss=SigLoss(P(Discriminator thinks True target is correct),logit=1)+SigLoss(P(Discriminator thinks Generated Image is correct),logit =0)
		the first term is the RealLoss and the second term is FakeLoss

		"""
		
		self.DiscriminatorRealLoss=tf.reduce_mean(self.cross_entropy(logits=self.DiscriminatorLogitsReal, labels=self.DiscriminatorLabelsReal))
		self.DiscriminatorFakeLoss=tf.reduce_mean(self.cross_entropy(logits=self.DiscriminatorLogitsFake, labels=self.DiscriminatorLabelsFake))
		self.DiscriminatorLoss=tf.reduce_mean(self.DiscriminatorRealLoss+self.DiscriminatorFakeLoss)
		
		"""
		GeneratorLoss=SigLoss(P(Discriminator thinks Generated Image is correct),logit =1)

		"""
		self.GeneratorLoss=tf.reduce_mean(self.cross_entropy(logits=self.DiscriminatorLogitsFake, labels=self.GeneratorLabels))
		

		"""
		This is for the user to observe how well with time is the model able to generate images simillar to the target
		"""
		self.GeneratorABS=tf.losses.absolute_difference(self.GeneratedImage,self.TargetImagePlaceholder)

		self.TrainableVars=tf.trainable_variables()

		"""
		d_vars are the weights of the discriminator as all of them is under the scope of "Discriminator"
		This is necessary for allowing the optimizer to minimize the loss wrt discriminator weights

		g_vars are the weights of the generator as all of them is under the scope of "Generator"
		This is necessary for allowing the optimizer to minimize the loss wrt generator weights

		"""
		self.d_vars=[var for var in self.TrainableVars if 'Discriminator' in var.name]
		self.g_vars=[var for var in self.TrainableVars if 'Generator' in var.name] 
		

		"""
		In Soumith Chintala's GAN hacks tutorial, he sugest to feed all Generated and Real  Targets be fed
		in exclusicve batches the following 2 optimizers allows us to do the same
		They optimize Discriminator weights only
		"""
		self.DiscriminatorOptimizerReal=tf.train.AdamOptimizer(0.00001).minimize(self.DiscriminatorRealLoss,var_list=self.d_vars)
		self.DiscriminatorOptimizerFake=tf.train.AdamOptimizer(0.00001).minimize(self.DiscriminatorFakeLoss,var_list=self.d_vars)
		
		"""
		The followinng Optimizers are for minizing the Generator and Discriminator Loss wrt their weights
		"""
		self.gen_loss_lambda1 = 1.0
		self.GeneratorOptimizer=tf.train.AdamOptimizer(0.00001).minimize(self.GeneratorLoss+self.gen_loss_lambda1*self.GeneratorABS,var_list=self.g_vars)
		self.GeneratorGradients=tf.train.AdamOptimizer(0.00001).compute_gradients(self.GeneratorLoss,var_list=self.g_vars)
		self.DiscriminatorOptimizer=tf.train.AdamOptimizer(0.00001).minimize(self.DiscriminatorLoss,var_list=self.d_vars)
		
		self.Session = tf.Session()
		self.saver=tf.train.Saver()
		
		self.init_op= tf.initialize_all_variables()
		self.Session.run(self.init_op)
		self.BatchReplayBool=True
		self.BatchSize=1
		self.TrainSize=10
		self.HmEpochs=10
		self.PatchSize=64
		self.save_path="./models/"

		with open('DoDtrain_exposures.json') as f:
			self.TrainDict = json.load(f)
		with open('DoDval_exposures.json') as f:
			self.ValidDict = json.load(f)
		if tf.train.latest_checkpoint(self.save_path) is not None:
			self.checkpoint = tf.train.latest_checkpoint(self.save_path)
			self.saver.restore(self.Session,self.checkpoint)	
コード例 #20
0
ファイル: train.py プロジェクト: yoyoshiny/CycleGAN
def main(_argv):
    weight_decay = FLAGS.weight_decay
    learning_rate = FLAGS.learning_rate
    batch_size = FLAGS.batch_size
    epoch = FLAGS.epoch
    log_dir = FLAGS.logdir	
    use_lsgan = FLAGS.use_lsgan 
    norm = FLAGS.norm
    lambda1 = FLAGS.lambda1
    lambda2 = FLAGS.lambda2    
    beta1 = FLAGS.beta1
    ngf = FLAGS.ngf
    G = generator.Generator('G',ngf,weight_decay,norm=norm,more=True)   
    F = generator.Generator('F',ngf,weight_decay,norm=norm,more=True)   
    D_Y = discriminator.Discriminator('D_Y',reg=weight_decay,norm=norm)
    D_X = discriminator.Discriminator('D_X',reg=weight_decay,norm=norm)	
    forbuild=np.random.rand(1,768,768,3).astype(np.float32)
    built=G(forbuild)
    built=F(forbuild)
    built=D_Y(forbuild)
    built=D_X(forbuild)  
	
    source_data=data.source_data(batch_size)
    target_data=data.target_data(batch_size)

    train_loss_G = tf.keras.metrics.Mean('train_loss_G', dtype=tf.float32)
    train_loss_F = tf.keras.metrics.Mean('train_loss_F', dtype=tf.float32)
    train_loss_DX = tf.keras.metrics.Mean('train_loss_DX', dtype=tf.float32)	
    train_loss_DY = tf.keras.metrics.Mean('train_loss_DY', dtype=tf.float32)		
    train_loss = [train_loss_G,train_loss_F,train_loss_DX,train_loss_DY]
    generator_g_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)
    generator_f_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)

    discriminator_x_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)
    discriminator_y_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)

    train_summary_writer = tf.summary.create_file_writer(log_dir)

    ckpt = tf.train.Checkpoint(G=G,F=F,D_X=D_X,D_Y=D_Y,generator_g_optimizer=generator_g_optimizer,generator_f_optimizer=generator_f_optimizer,discriminator_x_optimizer=discriminator_x_optimizer,discriminator_y_optimizer=discriminator_y_optimizer)
    ckpt_manager = tf.train.CheckpointManager(ckpt, log_dir, max_to_keep=10)
    start =0
    lr = lr_sch(start)
    if ckpt_manager.latest_checkpoint:
        ckpt.restore(ckpt_manager.latest_checkpoint)
        start=int(ckpt_manager.latest_checkpoint.split('-')[-1])
        lr = lr_sch(start)
        print ('Latest checkpoint restored!!')
    for ep in range(start,epoch,1):
        print('Epoch:'+str(ep+1))	
        for step,  source in enumerate(source_data):
            target = next(iter(target_data))		
            fake_y, cycled_x,fake_x,cycled_y,same_x,same_y, total_gen_g_loss,total_gen_f_loss,disc_x_loss,disc_y_loss,steps = train_step(G,F,D_Y,D_X,source,target,generator_g_optimizer,generator_f_optimizer,discriminator_x_optimizer,discriminator_y_optimizer,train_loss,lambda1,lambda2)
            print('Step: '+str(steps.numpy())+' , G loss: '+str(total_gen_g_loss.numpy())+' , F loss: '+str(total_gen_f_loss.numpy())+' , D_X loss: '+str(disc_x_loss.numpy())+' , D_Y loss: '+str(disc_y_loss.numpy()))
            if (steps.numpy()-1)%10==0:
                source=tf.image.convert_image_dtype((source+1)/2,dtype = tf.uint8)
                target=tf.image.convert_image_dtype((target+1)/2,dtype = tf.uint8)			
                fake_y=tf.image.convert_image_dtype((fake_y+1)/2,dtype = tf.uint8)
                cycled_x=tf.image.convert_image_dtype((cycled_x+1)/2,dtype = tf.uint8)
                same_x=tf.image.convert_image_dtype((same_x+1)/2,dtype = tf.uint8)
                fake_x=tf.image.convert_image_dtype((fake_x+1)/2,dtype = tf.uint8)
                cycled_y=tf.image.convert_image_dtype((cycled_y+1)/2,dtype = tf.uint8)
                same_y=tf.image.convert_image_dtype((same_y+1)/2,dtype = tf.uint8)
                with train_summary_writer.as_default():
                    tf.summary.scalar('Learning_rate', lr, step=steps)
                    tf.summary.scalar('Loss/G_loss', train_loss[0].result(), step=steps)
                    tf.summary.scalar('Loss/F_loss', train_loss[1].result(), step=steps)
                    tf.summary.scalar('Loss/DX_loss', train_loss[2].result(), step=steps)
                    tf.summary.scalar('Loss/DY_loss', train_loss[3].result(), step=steps)
                    tf.summary.image('image_source/source',source,max_outputs=1,step=steps)
                    tf.summary.image('image_source/fake_target',fake_y,max_outputs=1, step=steps)
                    tf.summary.image('image_source/cycle_source',cycled_x,max_outputs=1,step=steps)
                    tf.summary.image('image_source/same_source',same_x,max_outputs=1, step=steps)
                    tf.summary.image('image_target/target',target,max_outputs=1,step=steps)
                    tf.summary.image('image_target/fake_source',fake_x,max_outputs=1, step=steps)
                    tf.summary.image('image_target/cycle_target',cycled_y,max_outputs=1,step=steps)
                    tf.summary.image('image_target/same_target',same_y,max_outputs=1, step=steps)
                train_loss[0].reset_states()
                train_loss[1].reset_states()
                train_loss[2].reset_states()
                train_loss[3].reset_states()
        lr = lr_sch(ep)
        generator_g_optimizer.learning_rate = lr
        generator_f_optimizer.learning_rate = lr
        discriminator_x_optimizer.learning_rate = lr
        discriminator_y_optimizer.learning_rate = lr
        ckpt_save_path = ckpt_manager.save()	
コード例 #21
0
        if classname.find('Conv') != -1:
            nn.init.normal_(m.weight.data, 0.0, 0.02)
        elif classname.find('BatchNorm') != -1:
            nn.init.normal_(m.weight.data, 1.0, 0.02)
            nn.init.constant_(m.bias.data, 0)

    # =============== GENEREATOR =============>>>
    n_gen = generator.Generator(args.ngpu).to(device)

    if (device.type == 'cuda') and (args.ngpu > 1):
        n_gen = nn.DataParallel(n_gen, list(range(args.ngpu)))

    n_gen.apply(weights_init)

    # ============ DISCRIMINATOR =====>>>
    n_disc = discriminator.Discriminator(args.ngpu).to(device)

    if (device.type == 'cuda') and (args.ngpu > 1):
        n_disc = nn.DataParallel(n_disc, list(range(args.ngpu)))

    n_disc.apply(weights_init)

    # ====== LOSS ==========>>>
    loss = nn.BCELoss()

    # Establish convention for real and fake labels during training
    real_label = 1
    fake_label = 0

    # ============== OPTIMISERS ===============>>>
    optimizerD = optim.Adam(n_disc.parameters(),
コード例 #22
0
def GAN(data_name = "circle"):

    # hyperparameters
    G_learning_rate = 2e-4
    D_learning_rate = 2e-4
    batch_size = 1000
    epoc_num = 20000
    d_steps = 1
    g_steps = 1

    dim = 0
    ori_z = None
    if data_name == "circle":
        dim = 2
        z_size = dim
        input_size = dim
        output_size = input_size
        img_dir = "imgs_circle"
        sample_true_data_func = sample_true_data_circle
        plot_true_data_and_noise(img_dir, sample_true_data_func, dim)
        g_dim_list = [z_size, 128, 128, output_size]
        g_act_list = [tf.nn.leaky_relu, tf.nn.leaky_relu, tf.nn.tanh]
    elif data_name == "Gaussian":
        dim = 2
        z_size = dim
        input_size = dim
        output_size = input_size
        img_dir = "imgs_Gaussian"
        sample_true_data_func = sample_true_data_Gaussian
        plot_true_data_and_noise(img_dir, sample_true_data_func, dim)
        g_dim_list = [z_size, 128, 128, output_size]
        g_act_list = [tf.nn.leaky_relu, tf.nn.leaky_relu, None]
    elif data_name == "mnist":

        # increase epoc_num for mnist
        batch_size = 250
        epoc_num = 120000

        img_height = 28
        img_width = 28
        dim = img_height * img_width
        z_size = dim
        input_size = dim
        output_size = input_size
        img_dir = "imgs_mnist"
        global mnist
        mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
        sample_true_data_func = sample_true_data_mnist
        g_dim_list = [z_size, 128, 128, output_size]
        g_act_list = [tf.nn.leaky_relu, tf.nn.leaky_relu, tf.sigmoid]
        ori_z = sample_noise(batch_size, dim)

    d_dim_list = [input_size, 128, 128, 1]
    d_act_list = [tf.nn.leaky_relu, tf.nn.leaky_relu, tf.nn.sigmoid]

    # build graph
    Generator = generator.Generator(batch_size, g_dim_list, g_act_list, scope_name = "Generator")
    Discriminator = discriminator.Discriminator(batch_size, d_dim_list, d_act_list, scope_name = "Discriminator")

    generator_output = Generator.build()
    data_preds, gen_preds = Discriminator.build(generator_output)

    G_loss = -tf.reduce_mean(tf.log(gen_preds))
    D_loss = -tf.reduce_mean((tf.log(data_preds) + tf.log(1 - gen_preds)))

    # G trainer
    G_trainer = tf.train.AdamOptimizer(G_learning_rate)
    G_step = G_trainer.minimize(G_loss, var_list = Generator.get_var_list())

    # D trainer
    D_trainer = tf.train.AdamOptimizer(D_learning_rate)
    D_step = D_trainer.minimize(D_loss, var_list = Discriminator.get_var_list())

    G_loss_list = []
    D_loss_list = []

    with tf.Session() as sess:
        #graph_writer = tf.summary.FileWriter("logs/", sess.graph)
        sess.run(tf.global_variables_initializer())
        m = batch_size
        # train proces
        for i in range(epoc_num):
            # 1. d steps for optimize D
            for d in range(d_steps):
                # sample m noise samples {z_1, z_2, ..., z_m} from p_z(z)
                z = sample_noise(m, dim)
                # sample m examples {x_1, x_2, ..., x_m} from p_data(x) (true_data)
                true_samples = sample_true_data_func(num = m)

                # update D
                d_loss, _, d_probs, g_probs = sess.run([D_loss, D_step, data_preds, gen_preds],
                        feed_dict = {
                        Generator.get_input_layer_tensor() : z, # p_z
                        Discriminator.get_input_layer_tensor() : true_samples, # true data
                        })
                G_loss_list.append(d_loss)

            # 2. g steps for optimize G
            for g in range(g_steps): 
                # sample m noise samples {z_1, z_2, ..., z_m} from p_z(z)
                z = sample_noise(m, dim)
                # sample m examples {x_1, x_2, ..., x_m} from p_data(x) (true_data)
                true_samples = sample_true_data_func(num = m)
                zero_data_for_G = np.zeros((m, dim))

                # update G
                g_loss, _, g_data = sess.run([G_loss, G_step, generator_output],
                        feed_dict = {
                        Generator.get_input_layer_tensor() : z, # p_z
                        Discriminator.get_input_layer_tensor() : zero_data_for_G, # zero data or true data?
                        })
                D_loss_list.append(g_loss)

            if i % 100 == 0:
                print(str(i) + " of " + str(epoc_num) + ", " + "{:.2f}".format(100.0 * i / epoc_num) + "%")

            if (i + 1) % 1000 == 0 or i == 0:
                z = sample_noise(m, dim)
                if data_name == "mnist":
                    z = ori_z
                gen_data = sess.run([generator_output],
                        feed_dict = {
                        Generator.get_input_layer_tensor() : z, # p_z
                        })
                if data_name == "mnist":
                    save_gen_img_data(img_dir, gen_data, i + 1, img_height, img_width)
                else:
                    plot_gen_data(img_dir, gen_data, i + 1)

    plot_loss_fig(img_dir, D_loss_list, G_loss_list)
コード例 #23
0
                        batch_size=config.batch_size,
                        shuffle=True,
                        num_workers=workers,
                        drop_last=True)

device = torch.device("cuda" if (
    torch.cuda.is_available() and config.gpus > 0) else "cpu")

# Create discriminator and generator

netG = generator.Generator(config.channels_noise, config.channels_img,
                           config.features_g, config.gpus,
                           config.classes).to(device)
netG.apply(weights_initialize)

netD = discriminator.Discriminator(config.channels_img, config.features_d,
                                   config.gpus, config.classes).to(device)
netD.apply(weights_initialize)
# Setup Optimizer for G and D
optimizerD = optim.Adam(netD.parameters(),
                        lr=config.lrD,
                        betas=(config.beta1, 0.999))
optimizerG = optim.Adam(netG.parameters(),
                        lr=config.lrG,
                        betas=(config.beta1, 0.999))

netG.train()
netD.train()

criterion = nn.BCELoss()

real_label = 1
コード例 #24
0
import discriminator

from tensorboardX import SummaryWriter

data = np.load('expert_data.npy')
expert_data = collections.deque()
for x in data:
    expert_data.append(x)

sess = tf.Session()

state_size = 4
action_size = 2
n_step = 128
agent = ppo.PPO(sess, state_size, action_size)
dis = discriminator.Discriminator(sess, state_size, action_size)

env = gym.make('CartPole-v0')
score = 0
episode = 0
p = 0
gail = True

writer = SummaryWriter()

state = env.reset()

while True:
    values_list, states_list, actions_list, dones_list, logp_ts_list, rewards_list = \
                [], [], [], [], [], []
コード例 #25
0
    def __init__(self,
                 in_dim=512,
                 embed_dim=16,
                 latent_std=1.0,
                 strt_dim=3,
                 n_heads=4,
                 init_knl=3,
                 max_pos=1000,
                 pos_conv_knl=3,
                 chstl_fc_layers=4,
                 chstl_activ=tf.keras.layers.LeakyReLU(alpha=0.1),
                 chsyn_fc_activation=tf.keras.layers.LeakyReLU(alpha=0.1),
                 chsyn_encoder_layers=3,
                 chsyn_decoder_layers=3,
                 chsyn_fc_layers=3,
                 chsyn_norm_epsilon=1e-6,
                 chsyn_transformer_dropout_rate=0.2,
                 chsyn_noise_std=0.5,
                 time_features=3,
                 tmstl_fc_layers=4,
                 tmstl_activ=tf.keras.layers.LeakyReLU(alpha=0.1),
                 tmsyn_encoder_layers=3,
                 tmsyn_decoder_layers=3,
                 tmsyn_fc_layers=3,
                 tmsyn_norm_epsilon=1e-6,
                 tmsyn_transformer_dropout_rate=0.2,
                 tmsyn_fc_activation=tf.keras.layers.LeakyReLU(alpha=0.1),
                 tmsyn_noise_std=0.5,
                 d_kernel_size=3,
                 d_encoder_layers=1,
                 d_decoder_layers=1,
                 d_fc_layers=3,
                 d_norm_epsilon=1e-6,
                 d_transformer_dropout_rate=0.2,
                 d_fc_activation=tf.keras.activations.tanh,
                 d_out_dropout=0.3,
                 d_recycle_fc_activ=tf.keras.activations.elu,
                 mode_='comb'):
        super(WGAN, self).__init__()
        if mode_ in ['chords', 'comb']:
            assert embed_dim % n_heads == 0, 'make sure: embed_dim % chsyn_n_heads == 0'

        # ---------------------------------- settings ----------------------------------
        self.mode_ = mode_
        self.embed_dim = embed_dim
        self.time_features = time_features  # 3 for [velocity, velocity, time since last start, chords duration]
        self.in_dim = in_dim
        self.strt_dim = strt_dim
        self.latent_std = latent_std
        # callback settings
        self.ckpts = dict()
        self.ckpt_managers = dict()
        # optimisers
        self.optimizer_gen = tf.keras.optimizers.Adam(0.01,
                                                      beta_1=0.9,
                                                      beta_2=0.98,
                                                      epsilon=1e-9)
        self.optimizer_disc = tf.keras.optimizers.Adam(0.0001,
                                                       beta_1=0.9,
                                                       beta_2=0.98,
                                                       epsilon=1e-9)
        # losses
        self.train_loss_gen = tf.keras.metrics.Mean(name='train_loss_gen')
        self.train_loss_disc = tf.keras.metrics.Mean(name='train_loss_disc')

        # ---------------------------------- layers ----------------------------------
        # generators
        if mode_ != 'time':
            self.chords_style = generator.Mapping(fc_layers=chstl_fc_layers,
                                                  activ=chstl_activ)
            self.chords_syn = generator.ChordsSynthesis(
                embed_dim=embed_dim,
                init_knl=init_knl,
                strt_dim=strt_dim,
                n_heads=n_heads,
                fc_activation=chsyn_fc_activation,
                encoder_layers=chsyn_encoder_layers,
                decoder_layers=chsyn_decoder_layers,
                fc_layers=chsyn_fc_layers,
                norm_epsilon=chsyn_norm_epsilon,
                transformer_dropout_rate=chsyn_transformer_dropout_rate,
                noise_std=chsyn_noise_std)
        if mode_ != 'chords':
            self.time_style = generator.Mapping(fc_layers=tmstl_fc_layers,
                                                activ=tmstl_activ)
            self.time_syn = generator.TimeSynthesis(
                time_features=time_features,
                init_knl=init_knl,
                strt_dim=strt_dim,
                fc_activation=tmsyn_fc_activation,
                encoder_layers=tmsyn_encoder_layers,
                decoder_layers=tmsyn_decoder_layers,
                fc_layers=tmsyn_fc_layers,
                norm_epsilon=tmsyn_norm_epsilon,
                transformer_dropout_rate=tmsyn_transformer_dropout_rate,
                noise_std=tmsyn_noise_std,
                max_pos=max_pos,
                pos_conv_knl=pos_conv_knl)

        # discriminator
        if mode_ == 'chords':
            self.disc = discriminator.ChordsDiscriminator(
                embed_dim=embed_dim,
                n_heads=n_heads,
                fc_activation=d_fc_activation,
                encoder_layers=d_encoder_layers,
                decoder_layers=d_decoder_layers,
                fc_layers=d_fc_layers,
                norm_epsilon=d_norm_epsilon,
                transformer_dropout_rate=d_transformer_dropout_rate,
                pre_out_dim=in_dim,
                out_dropout=d_out_dropout,
                recycle_fc_activ=d_recycle_fc_activ)
        elif mode_ == 'time':
            self.disc = discriminator.TimeDiscriminator(
                time_features=time_features,
                fc_activation=d_fc_activation,
                encoder_layers=d_encoder_layers,
                decoder_layers=d_decoder_layers,
                fc_layers=d_fc_layers,
                norm_epsilon=d_norm_epsilon,
                transformer_dropout_rate=d_transformer_dropout_rate,
                pre_out_dim=in_dim,
                out_dropout=d_out_dropout,
                recycle_fc_activ=d_recycle_fc_activ)
        else:  # mode_ == 'comb'
            self.disc = discriminator.Discriminator(
                embed_dim=embed_dim,
                n_heads=n_heads,
                kernel_size=d_kernel_size,
                fc_activation=d_fc_activation,
                encoder_layers=d_encoder_layers,
                decoder_layers=d_decoder_layers,
                fc_layers=d_fc_layers,
                norm_epsilon=d_norm_epsilon,
                transformer_dropout_rate=d_transformer_dropout_rate,
                pre_out_dim=in_dim,
                out_dropout=d_out_dropout,
                recycle_fc_activ=d_recycle_fc_activ)
コード例 #26
0
 def build_discriminator(self):
     self.discriminator = discriminator.Discriminator(n_node = self.n_node, node_emd_init=self.node_embed_init_d)
コード例 #27
0
#Define Models
AEncoder = ABot_Encoder.ABotEncoder(params)
ADecoder = ABot_Decoder.ABotDecoder(params)
QEncoder = QBot_Encoder.QBotEncoder(params)
QDecoder = QBot_Decoder.QBotDecoder(params)
embedding_weights = np.random.random(
    (params['vocab_size'], params['embed_size']))
embedding_weights[0, :] = np.zeros((1, params['embed_size']))
ABot_embedding_layer = ABot.EmbeddingLayer(embedding_weights)
QBot_embedding_layer = QBot.EmbeddingLayer(embedding_weights)
sampler = ABot.GumbelSampler()
embedding_weights_discr = np.random.random(
    (params['vocab_size'], params['embed_size']))
embedding_weights_discr[0, :] = np.zeros((1, params['embed_size']))
print(embedding_weights_discr)
discriminator = Discriminator.Discriminator(params, embedding_weights_discr)

#Criterion
criterion = {}
criterion['CrossEntropyLoss'] = nn.CrossEntropyLoss(reduce=False)
criterion['HingeEmbeddingLoss'] = nn.HingeEmbeddingLoss(margin=0.0,
                                                        size_average=False)
criterion['MSELoss'] = nn.MSELoss(size_average=False)
criterion['BCELoss'] = nn.BCELoss(size_average=False)

#Optimizer
ABot_optimizer = torch.optim.Adam([{
    'params': AEncoder.parameters()
}, {
    'params': ADecoder.parameters()
}, {
コード例 #28
0
                              GEN_HIDDEN_DIM,
                              GEN_EMBEDDING_DIM,
                              MAX_SEQ_LEN,
                              device=DEVICE)
    gen_optimizer = optim.Adam(gen.parameters(), lr=1e-2)

    if DISCRIMINATOR_LM:
        dis = discriminator_LM.Discriminator(DIS_EMBEDDING_DIM,
                                             DIS_HIDDEN_DIM,
                                             VOCAB_SIZE,
                                             MAX_SEQ_LEN,
                                             device=DEVICE)
    else:
        dis = discriminator.Discriminator(DIS_EMBEDDING_DIM,
                                          DIS_HIDDEN_DIM,
                                          VOCAB_SIZE,
                                          MAX_SEQ_LEN,
                                          device=DEVICE)
    dis_optimizer = optim.Adagrad(dis.parameters())  ## ADAGRAD ??

    if CUDA:
        dis = dis.cuda()

    # OPTIONAL: Pretrain generator
    # checkpoint = torch.load('generator_checkpoint.pth.tar')
    print('Starting Generator MLE Training...')
    train_generator_MLE(gen, gen_optimizer, train_data_loader,
                        MLE_TRAIN_EPOCHS)

    # #  OPTIONAL: Pretrain discriminator
    # print('\nStarting Discriminator Training...')
コード例 #29
0
	gen_optimizer = Ranger(itertools.chain(state_encoder.parameters(),reason_decoder.parameters()), lr=1e-5,weight_decay= 1e-5)
	# train_generator_MLE(state_encoder, reason_decoder, gen_optimizer,  PRE_TRAIN_GEN)
	print('\nStarting Generator Evaluating...')
	best_epoch = dev_generator(state_encoder, reason_decoder, gen_optimizer, name="pretrain_gen")
	pretrained_gen_path = "./model/pretrain_gen"+str(best_epoch)+".pth"
	state_encoder.load_state_dict(torch.load(pretrained_gen_path)["state_encoder"])
	reason_decoder.load_state_dict(torch.load(pretrained_gen_path)["reason_decoder"])
	embedding.load_state_dict(torch.load(pretrained_gen_path)["embed"])
	state = {"state_encoder": state_encoder.state_dict(), "reason_decoder": reason_decoder.state_dict(),
	         'embed': embedding.state_dict()}
	pretrained_gen_path = "./model/pretrain_gen.pth"
	torch.save(state, pretrained_gen_path)


	# PRETRAIN DISCRIMINATOR
	dis = discriminator.Discriminator(hidden_size,DIS_HIDDEN_DIM).to(
		device)  # use directly as classifier

	dis_option_encoder = EncoderRNN(VOCAB_SIZE, EMBED_DIM, hidden_size=int(hidden_size), embedding=embedding, bidirectional= False).to(device)
	print('\nStarting Discriminator Training...')
	dis_optimizer = Ranger(itertools.chain(dis_option_encoder.parameters(),dis.parameters()), lr=1e-5, weight_decay= 1e-4)
	# train_discriminator(dis, dis_option_encoder, dis_optimizer,  PRE_TRAIN_DIS)
	print('\nStarting Discriminator Evaluating...')
	best_epoch = dev_discriminator(dis,  dis_option_encoder, name="pretrain_dis")
	pretrained_dis_path = "./model/pretrain_dis"+str(best_epoch)+".pth"
	dis_option_encoder.load_state_dict(torch.load(pretrained_dis_path)["dis_option_encoder"])
	dis.load_state_dict(torch.load(pretrained_dis_path)["dis"])
	embedding.load_state_dict(torch.load(pretrained_dis_path)["embed"])
	state = {"dis_option_encoder": dis_option_encoder.state_dict(), "dis": dis.state_dict(),'embed': embedding.state_dict()}
	pretrained_dis_path = "./model/pretrain_dis.pth"
	torch.save(state, pretrained_dis_path)
コード例 #30
0
ファイル: train.py プロジェクト: moon8877/dcgan
def train(device=device):
    batch_size = 1024
    image_size = 64
    G_out_D_in = 3
    G_in = 100
    G_hidden = 64
    D_hidden = 64
    epochs = 5
    lr = 0.001
    betal = 0.5

    dataset = torchvision.datasets.ImageFolder(root='./dcgan/pic',transform=transforms.Compose(
        [
            transforms.Resize(image_size),
            transforms.CenterCrop(image_size),
            transforms.ToTensor(),
            transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))
        ]
    ))

    dataloader = torch.utils.data.DataLoader(dataset=dataset,batch_size=batch_size,shuffle=True)
    netg = generator.Generator(G_in,G_hidden,G_out_D_in).to(device)
    print(netg)

    netd = discriminator.Discriminator(G_out_D_in,D_hidden).to(device)
    print(netd)

    criterion = nn.BCELoss()
    fixed_noise = torch.randn(64,G_in,1,1,device=device)

    real_label = 1
    fake_label = 0

    optimizerd = optim.Adam(netd.parameters(),lr=lr,betas=(betal,0.999))
    optimizerg = optim.Adam(netg.parameters(),lr=lr,betas=(betal,0.999))
    img_list = []
    g_losses = []
    d_losses = []
    iters = 0
    print('start')
    for epoch in range(epochs):
        for i,data in enumerate(dataloader,0):
            netd.zero_grad()
            real_cpu = data[0].to(device)
            b_size =real_cpu.size(0)
            label = torch.full((b_size,),real_label,device=device)
            output = netd(real_cpu).view(-1)
            
            errD_real = criterion(output,label)
            errD_real.backward()
            D_x = output.mean().item()

            noise = torch.randn(b_size,G_in,1,1,device=device)
            fake = netg(noise)
            label.fill_(fake_label)
            output = netd(fake.detach()).view(-1)

            errd_fake = criterion(output,label)
            errd_fake.backward()

            d_g_z1 = output.mean().item()
            errd = errD_real+errd_fake
            optimizerd.step()

            netg.zero_grad()
            label.fill_(real_label)
            output = netd(fake).view(-1)
            errg = criterion(output,label)
            errg.backwar()
            d_g_z2 = output.mean().item()
            optimizerg.step()
            if(i%50 == 0):
                print('[%d/%d][%d/%d]\tLoss_D: %.4f\tLoss_G: %.4f\tD(x): %.4f\tD(G(z)): %.4f / %.4f' % (epoch, epochs, i, len(dataloader), errd.item(), errg.item(), D_x, d_g_z1, d_g_z2))
            g_losses.append(errg.item())
            d_losses.append(errd.item())
            if (iters%500 == 0) or((epoch == epochs-1)and(i == len(dataloader)-1)):
                with torch.no_grad():
                    fake = netg(fixed_noise).detach().cpu()
                img_list.append(utils.make_grid(fake,padding=2,normalize=True))
            iters = iters+1

    torch.save(netd,'netd.pkl')
    torch.save(netd.state_dict(),'netd_parameters.pkl')
    torch.save(netg,'net.pkl')
    torch.save(netg.state_dict(),'net_parameters.pkl')
    return g_losses,d_losses,img_list