Exemplo n.º 1
0
    def __init__(self, input_dimension_size, num_train_images, data_dir, device):
        
        self.device = device
        self.input_dimension_size = input_dimension_size

        self.generator = Generator(self.input_dimension_size).to(self.device)
        self.discriminator = Discriminator(self.input_dimension_size).to(self.device)
        
        self.generator.apply(init_weights_normal)
        self.discriminator.apply(init_weights_normal)
        
        
        
        self.data_dir = data_dir
        self.num_train_images = num_train_images
        
        self.level = 1
        self.samples = {}
        
        # Better to sample from a gaussian distribution
        self.fixed_z = torch.randn(16, 25*self.input_dimension_size**2).to(self.device) 
Exemplo n.º 2
0
        src_path=os.path.join(data_PATH, "cartoon/1"),
        dst_path=os.path.join(data_PATH, "edge_smoothing/0"),
    )

data_real = ImageFolder(os.path.join(data_PATH, "real"), real_transform)
data_cartoon = ImageFolder(os.path.join(data_PATH, "cartoon"), cartoon_transform)
data_no_edge = ImageFolder(os.path.join(data_PATH, "edge_smoothing"), cartoon_transform)
data_validation = ImageFolder(os.path.join(data_PATH, "validation"), real_transform)

loader_real = DataLoader(data_real, batch_size=batch_size, shuffle=True, drop_last=True)
loader_cartoon = DataLoader(data_cartoon, batch_size=batch_size, shuffle=True, drop_last=True)
loader_no_edge = DataLoader(data_no_edge, batch_size=batch_size, shuffle=True, drop_last=True)
loader_validation = DataLoader(data_validation, batch_size=1, shuffle=False, drop_last=False)

# Models ======================================================================
G = Generator()
D = Discriminator()
P_VGG19 = VGG19(init_weights_path=vgg_PATH)
# P_Inception_v3 = Inception_v3(init_weights_path=inception_PATH)

G.to(device)
D.to(device)
P_VGG19.to(device)
# P_Inception_v3.to(device)

G.train()
D.train()
P_VGG19.eval()
# P_Inception_v3.eval()

# Loss ========================================================================
Exemplo n.º 3
0
# rolls = load_data('test2')
# fig = piano.plot(rolls[1])
# plt.show()
# os.environ['CUDA_VISIBLE_DEVICES'] = '-1'

# data = load_data("test6")
#
# np.save('datatest',data)
# print(np.asarray(data).shape)
# print(sum(data))
data = load_data('C')
np.save("train_sample", data[2])
print(data.shape)
labels = np.ones(np.asarray(data).shape[0])
generator = Generator()
os.environ["PATH"] += os.pathsep + 'C:/Program Files (x86)/Graphviz2.38/bin/'
generator.build((256, 100))
print(generator.summary())
plot_model(generator.noise_decoder,
           to_file='gen_plot.png',
           show_shapes=True,
           show_layer_names=True,
           dpi=1000)
discriminator = Discriminator()
discriminator.build((256, 5, 4, 96, 84))
print(discriminator.summary())

plot_model(discriminator.image_encoder,
           to_file='dis_plot.png',
           show_shapes=True,
Exemplo n.º 4
0
print('Batch size - {}'.format(batch_size))

generator_loss_hyperparams = config.generator_stabilizing_hyperparams
discriminator_loss_hyperparams = config.discriminator_stabilizing_hyperparams

# wgan setting
if (generator_loss_hyperparams['loss'] == 'wgan' and discriminator_loss_hyperparams['loss'] != 'wgan') or \
    (generator_loss_hyperparams['loss'] != 'wgan' and discriminator_loss_hyperparams['loss'] == 'wgan'):
    raise NotImplementedError

wgan_clip_size = discriminator_loss_hyperparams['wgan_clip_size']
is_wgan = generator_loss_hyperparams[
    'loss'] == 'wgan' and discriminator_loss_hyperparams == 'wgan'

# Initialize generator, discriminator and RoI generator
generator = Generator(**config.gen_hyperparams)
discriminator = Discriminator(dis_n_features, is_wgan, noise_hyperparams)

roi = RoI(image_size, locals()[roi_function], len(train_data))
roi_loader = DataLoader(roi,
                        batch_size=batch_size,
                        shuffle=False,
                        num_workers=5)

from utils.functions import count_parameters
if print_summary:
    print('Generator:')
    count_parameters(generator)
    #summary(generator, [(gen_n_input, 1, 1), (1, 64, 64)], device='cpu')
    print('Discriminator')
    count_parameters(discriminator)
Exemplo n.º 5
0
def get_generator(device, num_gpu, num_input):
    model = Generator(num_input=num_input).to(device)

    return init_model(model, device, num_gpu)
Exemplo n.º 6
0
if __name__ == '__main__':
    exp_num = 3
    metaCollector = MetaFeaturesCollector(16, 64)
    metaCollector.train(f"../processed_data/processed_16_64_2/")
    lambdas = LambdaFeaturesCollector(16, 64)
    loader = get_loader(f"../processed_data/test/",
                        16,
                        64,
                        2,
                        metaCollector,
                        lambdas,
                        100,
                        5,
                        train_meta=False)
    generator = Generator(16, 64, 2, metaCollector.getLength(), 100)
    methods = [
        'models_base', 'models_diag', 'models_corp', 'models_cors',
        'models_tspg', 'models_tsph'
    ]
    methods_results = []
    for w in range(len(methods)):
        print("Method " + methods[w])
        epoch_results = []
        for j in range(5, 55, 5):
            meta_results = []
            for i in range(exp_num):
                generator.load_state_dict(
                    torch.load(f'./{methods[w]}{i}/generator-16_64_2-{j}.pkl'))
                generator.eval()