Example #1
0
    def __init__(self, s_dim, a_num, skill_num, hidden, lr, gamma, tau,
                 log_prob_reg, alpha, capacity, batch_size, device):
        self.s_dim = s_dim
        self.a_num = a_num
        self.skill_num = skill_num
        hidden = hidden
        self.lr = lr
        self.gamma = gamma
        self.tau = tau
        self.log_prob_reg = log_prob_reg
        self.alpha = alpha
        self.capacity = capacity
        self.batch_size = batch_size
        self.device = device
        self.log_pz = torch.log(
            torch.tensor(1 / skill_num, dtype=torch.float, device=device))

        # network initialization
        self.policy = Policy(s_dim, skill_num, hidden, a_num).to(device)
        self.opt_policy = torch.optim.Adam(self.policy.parameters(), lr=lr)

        self.q_net = QNet(s_dim, skill_num, hidden, a_num).to(device)
        self.opt_q_net = torch.optim.Adam(self.q_net.parameters(), lr=lr)

        self.v_net = VNet(s_dim, skill_num, hidden).to(device)
        self.v_net_target = VNet(s_dim, skill_num, hidden).to(device)
        self.v_net_target.load_state_dict(self.v_net.state_dict())
        self.opt_v_net = torch.optim.Adam(self.v_net.parameters(), lr=lr)

        self.discriminator = Discriminator(s_dim, skill_num, hidden).to(device)
        self.opt_discriminator = torch.optim.Adam(
            self.discriminator.parameters(), lr=lr)

        # replay buffer, memory
        self.memory = ReplayBuffer(capacity, batch_size, device)
Example #2
0
def train(epochs, batch_size):
    data= []
    batch_count = int(images_hr_train.shape[0] / batch_size)
    shape = image_shape_LR
 
    generator = Generator(shape).generator()
    discriminator = Discriminator(image_shape_HR).discriminator()

    adam = Adam(lr=1E-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
    generator.compile(loss=vgg_loss, optimizer=adam)
    discriminator.compile(loss="binary_crossentropy", optimizer=adam)
    

    gan = get_gan_network(discriminator, shape, generator, adam)

    for e in range(1, epochs+1):
        print ('-'*15, 'Epoch %d' % e, '-'*15)
        for _ in range(batch_count):
            
            rand_nums = np.random.randint(0, images_hr_train.shape[0], size=batch_size)
            
            image_batch_hr = images_hr_train[rand_nums]
            image_batch_lr = images_lr_train[rand_nums]

            generated_images_sr = generator.predict(image_batch_lr)

            real_data_Y = np.ones(batch_size) - np.random.random_sample(batch_size)*0.2
            fake_data_Y = np.random.random_sample(batch_size)*0.2
            
            discriminator.trainable = True
            
            d_loss_real = discriminator.train_on_batch(image_batch_hr, real_data_Y)
            d_loss_fake = discriminator.train_on_batch(generated_images_sr, fake_data_Y)
            #d_loss = 0.5 * np.add(d_loss_fake, d_loss_real)
          
            rand_nums = np.random.randint(0, images_hr_train.shape[0], size=batch_size)
            image_batch_hr = images_hr_train[rand_nums]
            image_batch_lr = images_lr_train[rand_nums]

            gan_Y = np.ones(batch_size) - np.random.random_sample(batch_size)*0.2
            discriminator.trainable = False
            loss_gan = gan.train_on_batch(image_batch_lr, [image_batch_hr,gan_Y])
            print("Loss HR , Loss LR, Loss GAN")
            print(d_loss_real, d_loss_fake, loss_gan) 
            data= data + [(e,d_loss_real, d_loss_fake, loss_gan)]
        if e == 1 or e % 10 == 0:
            plot_generated_images(e, generator)
        if e % 100 == 0:
            generator.save('./output/gen_model%d.h5' % e)
            discriminator.save('./output/dis_model%d.h5' % e)
            gan.save('./output/gan_model%d.h5' % e)
        with open('LossData.csv','w') as out:
            csv_out=csv.writer(out)
            csv_out.writerow(['epoch','Loss HR' , 'Loss LR', 'Loss GAN'])
            for row in data:
                csv_out.writerow(row)
Example #3
0
def train(epochs, batch_size):

    downscale_factor = 4

    batch_count = int(x_train_hr.shape[0] / batch_size)
    shape = (image_shape[0] // downscale_factor,
             image_shape[1] // downscale_factor, image_shape[2])

    generator = Generator(shape).generator()
    discriminator = Discriminator(image_shape).discriminator()

    adam = Adam(lr=1E-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
    generator.compile(loss=vgg_loss, optimizer=adam)
    discriminator.compile(loss="binary_crossentropy", optimizer=adam)

    shape = (image_shape[0] // downscale_factor,
             image_shape[1] // downscale_factor, 3)
    gan = get_gan_network(discriminator, shape, generator, adam)

    for e in range(1, epochs + 1):
        print('-' * 15, 'Epoch %d' % e, '-' * 15)
        for _ in range(batch_count):

            rand_nums = np.random.randint(0,
                                          x_train_hr.shape[0],
                                          size=batch_size)

            image_batch_hr = x_train_hr[rand_nums]
            image_batch_lr = x_train_lr[rand_nums]
            generated_images_sr = generator.predict(image_batch_lr)

            real_data_Y = np.ones(
                batch_size) - np.random.random_sample(batch_size) * 0.2
            fake_data_Y = np.random.random_sample(batch_size) * 0.2

            discriminator.trainable = True

            d_loss_real = discriminator.train_on_batch(image_batch_hr,
                                                       real_data_Y)
            d_loss_fake = discriminator.train_on_batch(generated_images_sr,
                                                       fake_data_Y)
            d_loss = 0.5 * np.add(d_loss_fake, d_loss_real)

            rand_nums = np.random.randint(0,
                                          x_train_hr.shape[0],
                                          size=batch_size)
            image_batch_hr = x_train_hr[rand_nums]
            image_batch_lr = x_train_lr[rand_nums]

            gan_Y = np.ones(
                batch_size) - np.random.random_sample(batch_size) * 0.2
            discriminator.trainable = False
            loss_gan = gan.train_on_batch(image_batch_lr,
                                          [image_batch_hr, gan_Y])

        print("Loss HR , Loss LR, Loss GAN")
        print(d_loss_real, d_loss_fake, loss_gan)

        if e % 300 == 0:
            generator.save('./output/gen_model%d.h5' % e)
Example #4
0
def train(epochs, batch_size):
    psnr_v=[]
    epochs_v = []

    downscale_factor = 4
    
    batch_count = int(x_train_hr.shape[0] / batch_size)
    shape = (image_shape[0]//downscale_factor, image_shape[1]//downscale_factor, image_shape[2])
    
    generator = Generator(shape).generator()
    discriminator = Discriminator(image_shape).discriminator()

    adam = Adam(lr=1E-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
    generator.compile(loss=vgg_loss, optimizer=adam)
    discriminator.compile(loss="binary_crossentropy", optimizer=adam)
    
    shape = (image_shape[0]//downscale_factor, image_shape[1]//downscale_factor, 3)
    gan = get_gan_network(discriminator, shape, generator, adam)

    for e in range(1, epochs+1):
        print ('-'*15, 'Epoch %d' % e, '-'*15)
        for _ in range(batch_count):
            
            rand_nums = np.random.randint(0, x_train_hr.shape[0], size=batch_size)
            
            image_batch_hr = x_train_hr[rand_nums]
            image_batch_lr = x_train_lr[rand_nums]
            generated_images_sr = generator.predict(image_batch_lr)

            real_data_Y = np.ones(batch_size) - np.random.random_sample(batch_size)*0.2
            fake_data_Y = np.random.random_sample(batch_size)*0.2
            
            discriminator.trainable = True
            
            d_loss_real = discriminator.train_on_batch(image_batch_hr, real_data_Y)
            d_loss_fake = discriminator.train_on_batch(generated_images_sr, fake_data_Y)
            d_loss = 0.5 * np.add(d_loss_fake, d_loss_real)

            
            rand_nums = np.random.randint(0, x_train_hr.shape[0], size=batch_size)
            image_batch_hr = x_train_hr[rand_nums]
            image_batch_lr = x_train_lr[rand_nums]

            gan_Y = np.ones(batch_size) - np.random.random_sample(batch_size)*0.2
            discriminator.trainable = False
            loss_gan = gan.train_on_batch(image_batch_lr, [image_batch_hr,gan_Y])
            
        print("Loss HR , Loss LR, Loss GAN")
        print(d_loss_real, d_loss_fake, loss_gan)

        if e % 300 == 0:
            generator.save('./model/gen_model%d.h5' % e)
            model = load_model('./model/gen_model%d.h5'%e , custom_objects={'vgg_loss': loss.vgg_loss})
            model.predict(x_train_lr[rand_nums[0]])
            generated_image = denormalize(gen_img)
            psnr_v.append(sewar.psnr(x_train_hr[rand_nums[0]],generated_image,MAX=None))
            epochs_v.append(e)
            plt.plot(psnr_v,epochs_v)
            plt.savefig('./model' + 'progress%d.png'%e)
Example #5
0
def train(epochs, batch_size, input_dir, output_dir, model_save_dir,
          number_of_images, train_test_ratio):
    x_train_lr, x_train_hr, x_test_lr, x_test_hr = Utils.load_training_data(
        input_dir, '.png', number_of_images, train_test_ratio)
    loss = VGG_LOSS(image_shape)

    batch_count = int(x_train_hr.shape[0] / batch_size)
    shape = (image_shape[0] // downscale_factor,
             image_shape[1] // downscale_factor, image_shape[2])  # Not good

    generator, _ = Generator(shape).generator()
    discriminator = Discriminator(image_shape).discriminator()

    optimizer = Utils_model.get_optimizer()
    generator.compile(loss=loss.vgg_loss, optimizer=optimizer)
    discriminator.compile(loss="binary_crossentropy", optimizer=optimizer)

    gan = get_gan_network(discriminator, shape, generator, optimizer,
                          loss.vgg_loss)

    loss_file = open(model_save_dir + 'losses.txt', 'w+')
    loss_file.close()

    for e in range(1, epochs + 1):
        print('-' * 15, 'Epoch %d' % e, '-' * 15)
        for _ in tqdm(range(batch_count)):
            rand_nums = np.random.randint(0,
                                          x_train_hr.shape[0],
                                          size=batch_size)

            image_batch_hr = x_train_hr[rand_nums]
            image_batch_lr = x_train_lr[rand_nums]
            generated_images_sr = generator.predict(image_batch_lr)

            real_data_Y = np.ones(
                batch_size) - np.random.random_sample(batch_size) * 0.2
            fake_data_Y = np.random.random_sample(batch_size) * 0.2

            discriminator.trainable = True

            d_loss_real = discriminator.train_on_batch(image_batch_hr,
                                                       real_data_Y)
            d_loss_fake = discriminator.train_on_batch(generated_images_sr,
                                                       fake_data_Y)
            discriminator_loss = 0.5 * np.add(d_loss_fake, d_loss_real)

            rand_nums = np.random.randint(0,
                                          x_train_hr.shape[0],
                                          size=batch_size)
            image_batch_hr = x_train_hr[rand_nums]
            image_batch_lr = x_train_lr[rand_nums]

            gan_Y = np.ones(
                batch_size) - np.random.random_sample(batch_size) * 0.2
            discriminator.trainable = False
            gan_loss = gan.train_on_batch(image_batch_lr,
                                          [image_batch_hr, gan_Y])

        print("discriminator_loss : %f" % discriminator_loss)
        print("gan_loss :", gan_loss)
        gan_loss = str(gan_loss)

        loss_file = open(model_save_dir + 'losses.txt', 'a')
        loss_file.write('epoch%d : gan_loss = %s ; discriminator_loss = %f\n' %
                        (e, gan_loss, discriminator_loss))
        loss_file.close()

        if e == 1 or e % 5 == 0:
            Utils.plot_generated_images(output_dir, e, generator, x_test_hr,
                                        x_test_lr)
            # generator.save(model_save_dir + 'gen_model%d.h5' % e)
            # generator.save_weights(model_save_dir + 'gen_w%d.h5' % e)
            # exit()
        if e % 500 == 0:
            # generator.save(model_save_dir + 'gen_model%d.h5' % e)
            generator.save_weights(model_save_dir + 'gen_w%d.h5' % e)
Example #6
0
def train(epochs, batch_size, input_dir, output_dir, model_save_dir, number_of_images, train_test_ratio, image_extension):
    # Loading images
    x_train_lr, x_train_hr, x_test_lr, x_test_hr = \
        Utils.load_training_data(input_dir, image_extension, image_shape, number_of_images, train_test_ratio)

    print('======= Loading VGG_loss ========')
    # Loading VGG loss
    loss = VGG_LOSS(image_shape)
    loss2 = VGG_LOSS(image_shape)
    print('====== VGG_LOSS =======', loss)

    batch_count = int(x_train_hr.shape[0] / batch_size)
    print('====== Batch_count =======', batch_count)

    shape = (image_shape[0] // downscale_factor, image_shape[1] // downscale_factor, image_shape[2])
    print('====== Shape =======', shape)

    # Generator description
    generator = Generator(shape).generator()
    complex_generator = complex_Generator(shape).generator()
    # Discriminator description
    discriminator = Discriminator(image_shape).discriminator()
    discriminator2 = Discriminator(image_shape).discriminator()

    optimizer = Utils_model.get_optimizer()

    generator.compile(loss=loss.vgg_loss, optimizer=optimizer)
    complex_generator.compile(loss=loss2.vgg_loss, optimizer=optimizer)

    discriminator.compile(loss="binary_crossentropy", optimizer=optimizer)
    discriminator2.compile(loss="binary_crossentropy", optimizer=optimizer)

    gan = get_gan_network(discriminator, shape, generator, optimizer, loss.vgg_loss)
    complex_gan = get_gan_network(discriminator2, shape, complex_generator, optimizer, loss2.vgg_loss)

    loss_file = open(model_save_dir + 'losses.txt', 'w+')

    loss_file.close()

    for e in range(1, epochs + 1):
        print('-' * 15, 'Epoch %d' % e, '-' * 15)
        for _ in tqdm(range(batch_count)):
            rand_nums = np.random.randint(0, x_train_hr.shape[0], size=batch_size)

            image_batch_hr = x_train_hr[rand_nums]
            image_batch_lr = x_train_lr[rand_nums]
            generated_images_sr = generator.predict(image_batch_lr)
            generated_images_csr = complex_generator.predict(image_batch_lr)
            real_data_Y = np.ones(batch_size) - np.random.random_sample(batch_size) * 0.2
            fake_data_Y = np.random.random_sample(batch_size) * 0.2

            discriminator.trainable = True
            discriminator2.trainable = True

            d_loss_real = discriminator.train_on_batch(image_batch_hr, real_data_Y)
            d_loss_fake = discriminator.train_on_batch(generated_images_sr, fake_data_Y)
            discriminator_loss = 0.5 * np.add(d_loss_fake, d_loss_real)

            d_loss_creal = discriminator2.train_on_batch(image_batch_hr, real_data_Y)
            d_loss_cfake = discriminator2.train_on_batch(generated_images_csr, fake_data_Y)
            discriminator_c_loss = 0.5 * np.add(d_loss_cfake, d_loss_creal)
            ########
            rand_nums = np.random.randint(0, x_train_hr.shape[0], size=batch_size)
            image_batch_hr = x_train_hr[rand_nums]
            image_batch_lr = x_train_lr[rand_nums]

            gan_Y = np.ones(batch_size) - np.random.random_sample(batch_size) * 0.2
            discriminator.trainable = False
            discriminator2.trainable = False
            gan_loss = gan.train_on_batch(image_batch_lr, [image_batch_hr, gan_Y])
            gan_c_loss = complex_gan.train_on_batch(image_batch_lr, [image_batch_hr, gan_Y])

        print("discriminator_loss : %f" % discriminator_loss)
        print("gan_loss :", gan_loss)
        print("gan_c_loss :", gan_c_loss)
        gan_loss = str(gan_loss)

        loss_file = open(model_save_dir + 'losses.txt', 'a')
        loss_file.write('epoch%d : gan_loss = %s ; discriminator_loss = %f\n' % (e, gan_loss, discriminator_loss))
        loss_file.close()

        if e % 1 == 0:
            Utils.plot_generated_images(output_dir, e, generator,complex_generator, x_test_hr, x_test_lr)
        if e % 50 == 0:
            generator.save(model_save_dir + 'gen_model%d.h5' % e)
            discriminator.save(model_save_dir + 'dis_model%d.h5' % e)
Example #7
0
def train(epochs, batch_size, input_dir, output_dir, model_save_dir,
          number_of_images, train_test_ratio, image_extension):

    # Loading images

    x_train_lr, x_train_hr, x_test_lr, x_test_hr = \
        Utils.load_training_data(input_dir, image_extension, image_shape, number_of_images, train_test_ratio)

    # convert to loading PATCHES
    #num_samples = dataset_info['num_samples'][1]

    print('======= Loading VGG_loss ========')
    # Loading VGG loss

    # convert to 3 channels
    #img_input = Input(shape=original_image_shape)
    #image_shape_gray = Concatenate()([img_input, img_input, img_input])
    #image_shape_gray = Concatenate()([original_image_shape, original_image_shape])
    #image_shape_gray = Concatenate()([image_shape_gray,original_image_shape])
    #image_shape = patch_shape
    #experimental_run_tf_function=False

    loss = VGG_LOSS(image_shape)  # was image_shape

    print('====== VGG_LOSS =======', loss)

    # 1 channel
    #image_shape= original_image_shape
    batch_count = int(x_train_hr.shape[0] / batch_size)
    #batch_count = int(x_train_hr_patch.shape[0] / batch_size) # for patch

    print('====== Batch_count =======', batch_count)

    shape = (image_shape[0] // downscale_factor,
             image_shape[1] // downscale_factor, image_shape[2]
             )  # commented by Furat
    #shape = (image_shape[0] // downscale_factor, image_shape[1] // downscale_factor)
    print('====== Shape =======', shape)

    # Generator description
    generator = Generator(shape).generator()

    # Discriminator description
    discriminator = Discriminator(image_shape).discriminator()

    optimizer = Utils_model.get_optimizer()

    generator.compile(loss=loss.vgg_loss, optimizer=optimizer)

    discriminator.compile(loss="binary_crossentropy", optimizer=optimizer)

    gan = get_gan_network(discriminator, shape, generator, optimizer,
                          loss.vgg_loss)

    loss_file = open(model_save_dir + 'losses.txt', 'w+')

    loss_file.close()

    ## restore the patches into 1 image:
    # x_train_hr should have a whole image insted of patches?

    ######
    # input_data= x_train_hr
    # patch_shape = train_conf['patch_shape']
    # output_shape = train_conf['output_shape']
    # num_chs = num_modalities * dataset_info['RGBch'] # number of total channels

    # if input_data.ndim == 6: # augmentation case
    #     num_samples = dataset_info['num_samples'][1]

    #num_samples = 3
    ######

    #lr_data= x_train_lr
    #hr_data= x_train_hr

    #for patch_idx in range (num_patches):
    #this_input_data = np.reshape(input_data[:,:,patch_idx], input_data.shape[:2]+input_data.shape[3:])
    #this_hr_patch, this_lr_patch = overlap_patching(gen_conf, train_conf, x_train_hr)
    #this_output_patch, out = overlap_patching(gen_conf, train_conf, output_data)

    # take patches:
    this_hr_patch, = extract_2d(x_train_hr, (32, 32))
    this_lr_patch = extract_2d(x_train_lr, (32, 32))

    x_train_lr = this_lr_patch
    x_train_hr = this_hr_patch

    #convert to grayscale
    #x_train_hr= tf.image.rgb_to_grayscale(x_train_hr)
    #x_train_hr= rgb2gray(x_train_hr)
    #x_train_hr= np.concatenate(x_train_hr,1)
    #x_train_hr= np.array(x_train_hr)
    #x_train_lr= tf.image.rgb_to_grayscale(x_train_lr)
    #x_train_lr= np.array(x_train_lr)
    #x_train_lr= rgb2gray(x_train_lr)
    #x_train_lr= np.concatenate(x_train_lr,1)

    for e in range(1, epochs + 1):
        print('-' * 15, 'Epoch %d' % e, '-' * 15)
        for _ in tqdm(range(batch_count)):
            rand_nums = np.random.randint(0,
                                          x_train_hr.shape[0],
                                          size=batch_size)

            image_batch_hr = x_train_hr[rand_nums]
            image_batch_lr = x_train_lr[rand_nums]
            generated_images_sr = generator.predict(image_batch_lr)

            real_data_Y = np.ones(
                batch_size) - np.random.random_sample(batch_size) * 0.2
            fake_data_Y = np.random.random_sample(batch_size) * 0.2

            discriminator.trainable = True

            d_loss_real = discriminator.train_on_batch(image_batch_hr,
                                                       real_data_Y)
            d_loss_fake = discriminator.train_on_batch(generated_images_sr,
                                                       fake_data_Y)
            discriminator_loss = 0.5 * np.add(d_loss_fake, d_loss_real)

            rand_nums = np.random.randint(0,
                                          x_train_hr.shape[0],
                                          size=batch_size)
            image_batch_hr = x_train_hr[rand_nums]
            image_batch_lr = x_train_lr[rand_nums]

            gan_Y = np.ones(
                batch_size) - np.random.random_sample(batch_size) * 0.2
            discriminator.trainable = False
            gan_loss = gan.train_on_batch(image_batch_lr,
                                          [image_batch_hr, gan_Y])

        print("discriminator_loss : %f" % discriminator_loss)
        print("gan_loss :", gan_loss)
        gan_loss = str(gan_loss)

        loss_file = open(model_save_dir + 'losses.txt', 'a')
        loss_file.write('epoch%d : gan_loss = %s ; discriminator_loss = %f\n' %
                        (e, gan_loss, discriminator_loss))
        loss_file.close()

        if e == 1 or e % 5 == 0:
            Utils.plot_generated_images(output_dir, e, generator, x_test_hr,
                                        x_test_lr)
        if e % 200 == 0:
            generator.save(model_save_dir + 'gen_model%d.h5' % e)
            discriminator.save(model_save_dir + 'dis_model%d.h5' % e)
Example #8
0
    
    plt.tight_layout()
    plt.savefig('output/gan_generated_image_epoch_%d.png' % epoch)
    

def train(epochs=1, batch_size=128):
	
	losses_to_file = []

    downscale_factor = 4
    
    batch_count = int(x_train_hr.shape[0] / batch_size)
    shape = (image_shape[0]//downscale_factor, image_shape[1]//downscale_factor, image_shape[2])
    
    generator = Generator(shape).generator()
    discriminator = Discriminator(image_shape).discriminator()

    adam = Adam(lr=1E-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
    generator.compile(loss=vgg_loss, optimizer=adam)
    discriminator.compile(loss="binary_crossentropy", optimizer=adam)
    
    shape = (image_shape[0]//downscale_factor, image_shape[1]//downscale_factor, 3)
    gan = get_gan_network(discriminator, shape, generator, adam)

    for e in range(1, epochs+1):
        print ('-'*15, 'Epoch %d' % e, '-'*15)
        for _ in range(batch_count):
            
            rand_nums = np.random.randint(0, x_train_hr.shape[0], size=batch_size)
            
            image_batch_hr = x_train_hr[rand_nums]
Example #9
0
class DIAYN:
    def __init__(self, s_dim, a_num, skill_num, hidden, lr, gamma, tau,
                 log_prob_reg, alpha, capacity, batch_size, device):
        self.s_dim = s_dim
        self.a_num = a_num
        self.skill_num = skill_num
        hidden = hidden
        self.lr = lr
        self.gamma = gamma
        self.tau = tau
        self.log_prob_reg = log_prob_reg
        self.alpha = alpha
        self.capacity = capacity
        self.batch_size = batch_size
        self.device = device
        self.log_pz = torch.log(
            torch.tensor(1 / skill_num, dtype=torch.float, device=device))

        # network initialization
        self.policy = Policy(s_dim, skill_num, hidden, a_num).to(device)
        self.opt_policy = torch.optim.Adam(self.policy.parameters(), lr=lr)

        self.q_net = QNet(s_dim, skill_num, hidden, a_num).to(device)
        self.opt_q_net = torch.optim.Adam(self.q_net.parameters(), lr=lr)

        self.v_net = VNet(s_dim, skill_num, hidden).to(device)
        self.v_net_target = VNet(s_dim, skill_num, hidden).to(device)
        self.v_net_target.load_state_dict(self.v_net.state_dict())
        self.opt_v_net = torch.optim.Adam(self.v_net.parameters(), lr=lr)

        self.discriminator = Discriminator(s_dim, skill_num, hidden).to(device)
        self.opt_discriminator = torch.optim.Adam(
            self.discriminator.parameters(), lr=lr)

        # replay buffer, memory
        self.memory = ReplayBuffer(capacity, batch_size, device)

    def get_action(self, s, z):
        s = torch.tensor(s, dtype=torch.float, device=self.device)
        z = torch.tensor(z, dtype=torch.float, device=self.device)
        prob = self.policy(s, z)
        dist = Categorical(prob)
        a = dist.sample()
        return a.item()

    def get_pseudo_reward(self, s, z, a, s_):
        s = torch.tensor(s, dtype=torch.float, device=self.device)
        z = torch.tensor(z, dtype=torch.float, device=self.device)
        a = torch.tensor(a, dtype=torch.long, device=self.device)
        s_ = torch.tensor(s_, dtype=torch.float, device=self.device)

        pseudo_reward = self.discriminator(s_,log=True)[z.argmax(dim=-1)] - \
                        self.log_pz + \
                        self.alpha*self.policy(s,z)[a]

        return pseudo_reward.detach().item()

    def learn(self):
        index = torch.tensor(range(self.batch_size),
                             dtype=torch.long,
                             device=self.device)
        s, z, a, s_, r, done = self.memory.get_sample()
        # soft-actor-critic update
        # update q net
        q = self.q_net(s, z)[index, a].unsqueeze(dim=-1)
        v_ = self.v_net_target(s_, z)
        q_target = r + (1 - done) * self.gamma * v_
        q_loss = F.mse_loss(q, q_target.detach())

        self.opt_q_net.zero_grad()
        q_loss.backward()
        self.opt_q_net.step()

        # update v net
        v = self.v_net(s, z)
        log_prob = self.policy(s, z, log=True)[index, a].unsqueeze(dim=-1)
        q_new = self.q_net(s, z)[index, a].unsqueeze(dim=-1)
        v_target = q_new - log_prob
        v_loss = F.mse_loss(v, v_target.detach())

        self.opt_v_net.zero_grad()
        v_loss.backward()
        self.opt_v_net.step()

        # update policy net
        policy_loss = F.mse_loss(log_prob, q_new.detach())
        self.opt_policy.zero_grad()
        policy_loss.backward()
        self.opt_policy.step()

        # update target net
        self.soft_update(self.v_net_target, self.v_net)

        # update discriminator
        log_q_zs = self.discriminator(s, log=True)
        discriminator_loss = F.nll_loss(log_q_zs, z.argmax(dim=-1))
        self.opt_discriminator.zero_grad()
        discriminator_loss.backward()
        self.opt_discriminator.step()

    def soft_update(self, target, source):
        for target_param, param in zip(target.parameters(),
                                       source.parameters()):
            target_param.data.copy_(target_param.data * (1.0 - self.tau) +
                                    param.data * self.tau)
Example #10
0
def train(epochs, batch_size, input_dir, output_dir, model_save_dir,
          number_of_images, train_test_ratio, resume_train, downscale_factor,
          arch):
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    set_session(sess)

    hr_images, hr_label, lr_images, lr_label = Utils.load_training_data(
        input_dir, number_of_images)

    print(hr_images)
    loss = VGG_LOSS(IMAGE_SHAPE)
    lr_shape = (IMAGE_SHAPE[0] // downscale_factor,
                IMAGE_SHAPE[1] // downscale_factor, IMAGE_SHAPE[2])
    print(lr_shape)
    generator = Generator(lr_shape, downscale_factor, arch).generator()
    discriminator = Discriminator(IMAGE_SHAPE).discriminator()

    optimizer = Utils_model.get_optimizer()

    if (resume_train == True):
        last_epoch_number = Utils.get_last_epoch_number(model_save_dir +
                                                        'last_model_epoch.txt')

        gen_model = model_save_dir + arch + "_gen_model" + str(
            last_epoch_number) + ".h5"
        dis_model = model_save_dir + arch + "_dis_model" + str(
            last_epoch_number) + ".h5"
        generator.load_weights(gen_model)
        discriminator.load_weights(dis_model)

    else:
        last_epoch_number = 1

    generator.compile(loss=loss.vgg_loss, optimizer=optimizer)
    discriminator.compile(loss="binary_crossentropy", optimizer=optimizer)

    gan = gan_network(discriminator, lr_shape, generator, optimizer,
                      loss.vgg_loss)

    for e in range(last_epoch_number, last_epoch_number + epochs):
        print('-' * 15, 'Epoch %d' % e, '-' * 15)
        for _ in tqdm(range(1)):

            rand_nums = np.random.randint(0,
                                          hr_images.shape[0],
                                          size=batch_size)
            image_batch_hr = hr_images[rand_nums]
            image_batch_lr = lr_images[rand_nums]
            # video_images = lr_images[0]
            generated_images = generator.predict(
                image_batch_lr)  #array of generated images

            real_data = np.ones(
                batch_size) - np.random.random_sample(batch_size) * 0.2
            fake_data = np.random.random_sample(batch_size) * 0.2

            discriminator.trainable = True

            discriminator_loss_real = discriminator.train_on_batch(
                image_batch_hr, real_data)
            discriminator_loss_fake = discriminator.train_on_batch(
                generated_images, fake_data)
            discriminator_loss = 0.5 * np.add(
                discriminator_loss_fake,
                discriminator_loss_real)  #Mean Of Discriminator Loss

            rand_nums = np.random.randint(0,
                                          hr_images.shape[0],
                                          size=batch_size)

            discriminator.trainable = False
            gan_loss = gan.train_on_batch(image_batch_lr,
                                          [image_batch_hr, real_data])

        print("discriminator_loss : %f" % discriminator_loss)
        print("gan_loss : ", gan_loss)
        gan_loss = str(gan_loss)
        # generated_video_image = generator.predict(np.expand_dims(video_images,axis=0))
        Utils.save_losses_file(model_save_dir, e, gan_loss, discriminator_loss,
                               arch + '_losses.txt')
        # image_array.append(cv2.cvtColor(denormalize(generated_video_image[0]),cv2.COLOR_BGR2RGB))

        if e % EPOCHS_CHECKPOINT == 0:
            Utils.save_losses_file(model_save_dir, e, gan_loss,
                                   discriminator_loss, 'last_model_epoch.txt')
            generator.save(model_save_dir + arch + '_gen_model%d.h5' % e)
            discriminator.save(model_save_dir + arch + '_dis_model%d.h5' % e)
Example #11
0
def train(img_shape, epochs, batch_size, rescaling_factor, input_dirs,
          output_dir, model_save_dir, train_test_ratio):

    lr_shape = (img_shape[0] // rescaling_factor,
                img_shape[1] // rescaling_factor, img_shape[2])

    img_train_gen, img_test_gen = create_data_generator(
        input_dirs[1],
        input_dirs[0],
        target_size_lr=(lr_shape[0], lr_shape[1]),
        target_size_hr=(img_shape[0], img_shape[1]),
        preproc_lr=rescale_imgs_to_neg1_1,
        preproc_hr=rescale_imgs_to_neg1_1,
        validation_split=train_test_ratio,
        batch_size=batch_size)
    loss = VGG_LOSS(image_shape)

    batch_count = int(
        (len(os.listdir(os.path.join(input_dirs[1], 'ignore'))) / batch_size) *
        (1 - train_test_ratio))

    test_image = []
    for img in sorted(os.listdir(os.path.join(input_dirs[1], 'ignore'))):
        if 'niklas_city_0009' in img:
            test_image.append(
                rescale_imgs_to_neg1_1(
                    cv2.imread(os.path.join(input_dirs[1], 'ignore', img))))

    print("test length: ", len(test_image))

    generator = Generator(lr_shape, rescaling_factor).generator()
    discriminator = Discriminator(img_shape).discriminator()

    print('memory usage generator: ',
          get_model_memory_usage(batch_size, generator))
    print('memory usage discriminator: ',
          get_model_memory_usage(batch_size, discriminator))

    optimizer = Utils_model.get_optimizer()

    try:
        print("multi_gpu_model generator")
        par_generator = multi_gpu_model(generator, gpus=2)
    except:
        par_generator = generator
        print("single_gpu_model generator")

    try:
        print("multi_gpu_model discriminator")
        par_discriminator = multi_gpu_model(discriminator, gpus=2)
    except:
        par_discriminator = discriminator
        print("single_gpu_model discriminator")

    par_generator.compile(loss=loss.loss, optimizer=optimizer)
    par_discriminator.compile(loss="binary_crossentropy", optimizer=optimizer)

    gan, par_gan = get_gan_network(par_discriminator, lr_shape, par_generator,
                                   optimizer, loss.loss, batch_size)

    par_discriminator.summary()
    par_generator.summary()
    par_gan.summary()

    loss_file = open(model_save_dir + 'losses.txt', 'w+')
    loss_file.close()

    for e in range(1, epochs + 1):
        print('-' * 15, 'Epoch %d' % e, '-' * 15)

        if e == 100:
            optimizer.lr = 1e-5

        for i in tqdm(range(batch_count)):

            batch = next(img_train_gen)
            image_batch_hr = batch[1]
            image_batch_lr = batch[0]
            generated_images_sr = generator.predict(image_batch_lr)

            real_data_Y = np.ones(batch_size) - \
                np.random.random_sample(batch_size)*0.2
            fake_data_Y = np.random.random_sample(batch_size) * 0.2

            par_discriminator.trainable = True

            if image_batch_hr.shape[0] == batch_size and image_batch_lr.shape[
                    0] == batch_size:
                d_loss_real = par_discriminator.train_on_batch(
                    image_batch_hr, real_data_Y)
                d_loss_fake = par_discriminator.train_on_batch(
                    generated_images_sr, fake_data_Y)
                discriminator_loss = 0.5 * np.add(d_loss_fake, d_loss_real)
            else:
                print("weird multi_gpu_model batch error dis: ")
                print("hr batch shape: ", image_batch_hr.shape)
                print("lr batch shape: ", image_batch_lr.shape)
                print("gan y shape: ", gan_Y.shape)

            batch = next(img_train_gen)
            image_batch_hr = batch[1]
            image_batch_lr = batch[0]

            gan_Y = np.ones(batch_size) - \
                np.random.random_sample(batch_size)*0.2
            discriminator.trainable = False

            if image_batch_hr.shape[0] == batch_size and image_batch_lr.shape[
                    0] == batch_size:
                gan_loss = par_gan.train_on_batch(image_batch_lr,
                                                  [image_batch_hr, gan_Y])
            else:
                print("weird multi_gpu_model batch error gan: ")
                print("hr batch shape: ", image_batch_hr.shape)
                print("lr batch shape: ", image_batch_lr.shape)
                print("gan y shape: ", gan_Y.shape)

        print("discriminator_loss : %f" % discriminator_loss)
        print("gan_loss :", gan_loss)
        gan_loss = str(gan_loss)

        loss_file = open(model_save_dir + '_losses.txt', 'a')
        loss_file.write('epoch%d : gan_loss = %s ; discriminator_loss = %f\n' %
                        (e, gan_loss, discriminator_loss))
        loss_file.close()

        if e == 1 or e % 5 == 0:
            Utils.generate_test_image(output_dir, e, generator, test_image)
        if e % 5 == 0:
            generator.save(os.path.join(model_save_dir, 'gen_model%d.h5' % e))
            discriminator.save(
                os.path.join(model_save_dir, 'dis_model%d.h5' % e))
Example #12
0
        shuffle=True,
        num_workers=2,
        pin_memory=True)
    testloader = torch.utils.data.DataLoader(testSet,
                                             batch_size=1,
                                             shuffle=True,
                                             num_workers=2,
                                             pin_memory=True)

    print('Dataset initialized')

    device = torch.device(settings['device'])

    if device == "cpu":
        G = Generator(noise_dim=64, num_classes=100)
        D = Discriminator()
    else:
        G = torch.nn.DataParallel(Generator(noise_dim=64,
                                            num_classes=100)).cuda()
        D = torch.nn.DataParallel(Discriminator()).cuda()

    print('Network created')

    optimizer_G = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                          G.parameters()),
                                   lr=1e-4)
    optimizer_D = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                          D.parameters()),
                                   lr=1e-4)

    print('Optimizer created')