Exemple #1
0
    def __init__(self,
                 _,
                 gan=None,
                 config=None,
                 g_optimizer=None,
                 d_optimizer=None,
                 name="GANOptimizer"):
        super().__init__(config.learn_rate, name=name)
        self.gan = gan
        self.config = config

        def create_optimizer(klass, options):
            options['gan'] = self.gan
            options['config'] = options
            defn = {
                k: v
                for k, v in options.items()
                if k in inspect.getargspec(klass).args
            }
            return klass(options.learn_rate, **defn)

        d_optimizer = hc.lookup_functions(d_optimizer)
        g_optimizer = hc.lookup_functions(g_optimizer)

        self.d_optimizer = create_optimizer(d_optimizer["class"], d_optimizer)
        self.g_optimizer = create_optimizer(g_optimizer["class"], g_optimizer)
Exemple #2
0
  def __init__(self, learning_rate=0.001, p=0.1, gan=None, config=None, use_locking=False, name="GigaWolfOptimizer", optimizer=None, optimizer2=None):
    super().__init__(use_locking, name)
    self.gan = gan
    self.config = config
    self._lr_t = learning_rate

    optimizer = hc.lookup_functions(optimizer)
    self.optimizer = self.gan.create_optimizer(optimizer)
    optimizer2 = hc.lookup_functions(optimizer2)
    self.optimizer2 = self.gan.create_optimizer(optimizer2)
  def __init__(self, learning_rate=0.001, p=0.1, gan=None, config=None, use_locking=False, name="GigaWolfOptimizer", optimizer=None, optimizer2=None):
    super().__init__(use_locking, name)
    self.gan = gan
    self.config = config
    self._lr_t = learning_rate

    optimizer = hc.lookup_functions(optimizer)
    self.optimizer = self.gan.create_optimizer(optimizer)
    optimizer2 = hc.lookup_functions(optimizer2)
    self.optimizer2 = self.gan.create_optimizer(optimizer2)
Exemple #4
0
    def create(self):
        config = self.config
        g_lr = config.g_learn_rate
        d_lr = config.d_learn_rate
        self.create_called = True
        self.global_step = tf.train.get_global_step()
        self.d_lr = d_lr
        self.g_lr = g_lr
        for hook_config in (config.hooks or []):
            hook_config = hc.lookup_functions(hook_config.copy())
            defn = {
                k: v
                for k, v in hook_config.items()
                if k in inspect.getargspec(hook_config['class']).args
            }
            defn['gan'] = self.gan
            defn['config'] = hook_config
            defn['trainer'] = self
            hook = hook_config["class"](**defn)
            self.gan.components += [hook]
            losses = hook.losses()
            if losses[0] is not None:
                self.gan.loss.sample[0] += losses[0]
            if losses[1] is not None:
                self.gan.loss.sample[1] += losses[1]
            self.train_hooks.append(hook)

        result = self._create()

        for hook in self.train_hooks:
            hook.after_create()
Exemple #5
0
    def __init__(self,
                 gan,
                 config,
                 d_vars=None,
                 g_vars=None,
                 loss=None,
                 name="BaseTrainer"):
        self.current_step = 0
        self.g_vars = g_vars
        self.d_vars = d_vars
        self.loss = loss
        self.d_shake = None
        self.g_shake = None
        self.train_hooks = []
        for hook_config in (config.hooks or []):
            hook_config = hc.lookup_functions(hook_config.copy())
            defn = {
                k: v
                for k, v in hook_config.items()
                if k in inspect.getargspec(hook_config['class']).args
            }
            defn['gan'] = gan
            defn['config'] = hook_config
            defn['trainer'] = self
            hook = hook_config["class"](**defn)
            losses = hook.losses()
            if losses[0] is not None:
                self.loss.sample[0] += losses[0]
            if losses[1] is not None:
                self.loss.sample[1] += losses[1]
            self.train_hooks.append(hook)

        GANComponent.__init__(self, gan, config, name=name)
Exemple #6
0
    def __init__(self,
                 learning_rate=0.001,
                 p=0.1,
                 gan=None,
                 config=None,
                 use_locking=False,
                 name="SOSOptimizer",
                 optimizer=None,
                 alpha=1):
        super().__init__(use_locking, name)
        self._alpha = alpha
        self.gan = gan
        self.config = config
        self._lr_t = learning_rate

        def create_optimizer(klass, options):
            options['gan'] = self.gan
            options['config'] = options
            defn = {
                k: v
                for k, v in options.items()
                if k in inspect.getargspec(klass).args
            }
            learn_rate = options.learn_rate or options.learning_rate
            if 'learning_rate' in options:
                del defn['learning_rate']
            return klass(learn_rate, **defn)

        optimizer = hc.lookup_functions(optimizer)
        self.optimizer = create_optimizer(optimizer['class'], optimizer)
    def create(self):
        config = self.config
        g_lr = config.g_learn_rate
        d_lr = config.d_learn_rate
        self.create_called = True
        self.global_step = tf.train.get_global_step()
        self.d_lr = d_lr
        self.g_lr = g_lr
        for hook_config in (config.hooks or []):
            hook_config = hc.lookup_functions(hook_config.copy())
            defn = {k: v for k, v in hook_config.items() if k in inspect.getargspec(hook_config['class']).args}
            defn['gan']=self.gan
            defn['config']=hook_config
            defn['trainer']=self
            hook = hook_config["class"](**defn)
            self.gan.components += [hook]
            losses = hook.losses()
            if losses[0] is not None:
                self.gan.loss.sample[0] += losses[0]
            if losses[1] is not None:
                self.gan.loss.sample[1] += losses[1]
            self.train_hooks.append(hook)
 
        result = self._create()

        for hook in self.train_hooks:
            hook.after_create()
    def __init__(self,
                 learning_rate=0.001,
                 decay=0.9,
                 gan=None,
                 config=None,
                 use_locking=False,
                 name="EmaOptimizer",
                 optimizer=None):
        super().__init__(use_locking, name)
        self._decay = decay
        self.gan = gan
        self.config = config
        self.name = name

        def create_optimizer(klass, options):
            options['gan'] = self.gan
            options['config'] = options
            defn = {
                k: v
                for k, v in options.items()
                if k in inspect.getargspec(klass).args
            }
            return klass(options.learn_rate, **defn)

        optimizer = hc.lookup_functions(optimizer)
        self.optimizer = create_optimizer(optimizer['class'], optimizer)
Exemple #9
0
    def discriminator(self, x, f,z,g,gz):
        config = self.gan.config
        batch_size = config['batch_size']*2
        single_batch_size = config['batch_size']
        channels = (config['channels'])
        # combine to one batch, per Ian's "Improved GAN"
        xs = [x]
        gs = g
        set_tensor("xs", xs)
        set_tensor("gs", gs)
        g = g[-1]
        for i in gs:
            resized = tf.image.resize_images(xs[-1],[int(xs[-1].get_shape()[1]//2),int(xs[-1].get_shape()[2]//2)], 1)
            xs.append(resized)
        xs.pop()
        gs.reverse()

        # careful on order.  See https://arxiv.org/pdf/1606.00704v1.pdf
        z = tf.concat(0, [z, gz])

        discriminators = []
        for i, discriminator in enumerate(config['discriminators']):
            discriminator = hc.lookup_functions(discriminator)
            discriminators.append(discriminator['create'](self.gan, discriminator, x, g, xs, gs,prefix="d_"+str(i)))
        net = tf.concat(1, discriminators)

        last_layer = net
        last_layer = tf.reshape(last_layer, [batch_size, -1])
        last_layer = tf.slice(last_layer, [single_batch_size, 0], [single_batch_size, -1])


        d_real = tf.reshape(net, [batch_size, -1])
        d_real = tf.slice(net, [0, 0], [single_batch_size, -1])
        d_fake = tf.reshape(net, [batch_size, -1])
        d_fake = tf.slice(net, [single_batch_size, 0], [single_batch_size, -1])
        if config['y_dims'] == 1:
            dr_class=None
            dr_logits=None
            df_class=None
            df_logits=None

        else:
            num_classes = config['y_dims']+1
            net = linear(net, num_classes, scope="d_fc_end", stddev=0.003)
            net = layer_norm_1(batch_size*2, name='d_bn_end')(net)
            class_logits = tf.slice(net, [0,1], [single_batch_size*2,num_classes-1])
            gan_logits = tf.squeeze(tf.slice(net, [0,0], [single_batch_size*2,1]))
            dr_class=tf.slice(class_logits, [0, 0], [single_batch_size, num_classes-1])
            dr_logits=tf.slice(gan_logits, [0], [single_batch_size])
            df_class=tf.slice(class_logits, [single_batch_size, 0], [single_batch_size, num_classes-1])
            df_logits=tf.slice(gan_logits, [single_batch_size], [single_batch_size]), 

        return [dr_class,dr_logits,df_class,df_logits, last_layer, d_real, d_fake]
Exemple #10
0
    def create_z_encoding(self):
        self.gan.graph.z = []
        encoders = []
        for i, encoder in enumerate(self.gan.config.encoders):
            encoder = hc.Config(hc.lookup_functions(encoder))
            zs, z_base = encoder.create(encoder, self.gan)
            encoders.append(zs)
            self.gan.graph.z.append(z_base)

        z_encoded = tf.concat(axis=1, values=encoders)
        self.gan.graph.z_encoded = z_encoded

        return z_encoded
Exemple #11
0
 def create_optimizer(self, options):
     options = hc.lookup_functions(options)
     klass = options['class']
     newopts = options.copy()
     newopts['gan']=self.gan
     newopts['config']=options
     defn = {k: v for k, v in newopts.items() if k in inspect.getargspec(klass).args}
     learn_rate = options.learn_rate or options.learning_rate
     if 'learning_rate' in options:
         del defn['learning_rate']
     gan_component = klass(learn_rate, **defn)
     self.components.append(gan_component)
     return gan_component
Exemple #12
0
 def create_optimizer(self, options):
     options = hc.lookup_functions(options)
     klass = options['class']
     newopts = options.copy()
     newopts['gan']=self.gan
     newopts['config']=options
     defn = {k: v for k, v in newopts.items() if k in inspect.getargspec(klass).args}
     learn_rate = options.learn_rate or options.learning_rate
     if 'learning_rate' in options:
         del defn['learning_rate']
     gan_component = klass(learn_rate, **defn)
     self.components.append(gan_component)
     return gan_component
Exemple #13
0
    def generator(self, z, reuse=False):
        config = self.gan.config
        x_dims = config.x_dims
        output_channels = config.channels
        batch_size = config.batch_size

        with (tf.variable_scope("generator", reuse=reuse)):

            if 'y' in self.gan.graph:
                z = tf.concat(axis=1, values=[z, self.gan.graph.y])

            generator = hc.Config(hc.lookup_functions(config.generator))
            nets = generator.create(generator, self.gan, z)

            return nets
Exemple #14
0
    def discriminator(self, x, f, z, g, gz):
        config = self.gan.config
        graph = self.gan.graph
        batch_size = config.batch_size * 2
        single_batch_size = config.batch_size
        channels = config.channels
        # combine to one batch, per Ian's "Improved GAN"
        xs = [x]
        gs = g
        graph.xs = xs
        graph.gs = gs
        g = g[-1]
        if len(gs) > 1:
            for i in gs:
                resized = tf.image.resize_images(xs[-1], [
                    int(xs[-1].get_shape()[1] // 2),
                    int(xs[-1].get_shape()[2] // 2)
                ], 1)
                xs.append(resized)
            xs.pop()
            gs.reverse()

        discriminators = []
        for i, discriminator in enumerate(config.discriminators):
            discriminator = hc.Config(hc.lookup_functions(discriminator))
            with (tf.variable_scope("discriminator")):
                discriminators.append(
                    discriminator.create(self.gan,
                                         discriminator,
                                         x,
                                         g,
                                         xs,
                                         gs,
                                         prefix="d_" + str(i)))

        def split_d(net, i):
            net = tf.slice(net, [single_batch_size * i, 0],
                           [single_batch_size, -1])
            return net

        d_reals = [split_d(x, 0) for x in discriminators]
        d_fakes = [split_d(x, 1) for x in discriminators]
        net = tf.concat(axis=1, values=discriminators)

        d_real = split_d(net, 0)
        d_fake = split_d(net, 1)

        return [d_real, d_fake, d_reals, d_fakes]
Exemple #15
0
    def step(self):
        trainer = hc.Config(hc.lookup_functions(self.config['trainer']))
        d_loss, g_loss = trainer.run(self.gan)

        if (self.steps > 1 and (self.steps % self.args.sample_every == 0)):
            sample_file = "samples/%06d.png" % (self.sampled)
            self.create_path(sample_file)
            print(str(self.steps) + ":", "Sample created " + sample_file)
            sample_list = self.sample(sample_file)
            if self.args.use_hc_io:
                hc.io.sample(self.config, sample_list)

            self.sampled += 1

        self.steps += 1
        return True
Exemple #16
0
def discriminator(config, x, f,z,g,gz):
    batch_size = config['batch_size']*2
    single_batch_size = config['batch_size']
    channels = (config['channels'])
    # combine to one batch, per Ian's "Improved GAN"
    xs = [x]
    gs = g
    set_tensor("xs", xs)
    set_tensor("gs", gs)
    g = g[-1]
    for i in gs:
        resized = tf.image.resize_images(xs[-1],[int(xs[-1].get_shape()[1]//2),int(xs[-1].get_shape()[2]//2)], 1)
        xs.append(resized)
    xs.pop()
    gs.reverse()

    # careful on order.  See https://arxiv.org/pdf/1606.00704v1.pdf
    z = tf.concat(0, [z, gz])

    discriminators = []
    for i, discriminator in enumerate(config['discriminators']):
        discriminator = hc.lookup_functions(discriminator)
        discriminators.append(discriminator['create'](config, discriminator, x, g, xs, gs,prefix="d_"+str(i)))
    net = tf.concat(1, discriminators)

    last_layer = net
    last_layer = tf.reshape(last_layer, [batch_size, -1])
    last_layer = tf.slice(last_layer, [single_batch_size, 0], [single_batch_size, -1])


    num_classes = config['y_dims']+1
    if config['y_dims'] == 1:
        net = linear(net, 1, scope="d_fc_end", stddev=0.003)
        class_logits = net
        gan_logits = tf.squeeze(net)

    else:
        net = linear(net, num_classes, scope="d_fc_end", stddev=0.003)
        class_logits = tf.slice(net, [0,1], [single_batch_size*2,num_classes-1])
        gan_logits = tf.squeeze(tf.slice(net, [0,0], [single_batch_size*2,1]))

    return [tf.slice(class_logits, [0, 0], [single_batch_size, num_classes-1]),
                tf.slice(gan_logits, [0], [single_batch_size]),
                tf.slice(class_logits, [single_batch_size, 0], [single_batch_size, num_classes-1]),
                tf.slice(gan_logits, [single_batch_size], [single_batch_size]), 
                last_layer]
Exemple #17
0
    def __init__(self,
                 learning_rate=0.001,
                 p=0.1,
                 gan=None,
                 config=None,
                 use_locking=False,
                 name="CurlOptimizer",
                 optimizer=None,
                 rho=1,
                 beta=1,
                 gamma=1):
        super().__init__(use_locking, name)
        self._beta = beta
        self._rho = rho
        self._gamma = gamma
        self.gan = gan
        self.config = config
        self._lr_t = learning_rate
        self.g_rho = gan.configurable_param(self.config.g_rho)
        self.d_rho = gan.configurable_param(self.config.d_rho)
        if tf.contrib.framework.is_tensor(self.g_rho):
            self.gan.add_metric("g_rho", self.g_rho)
        if tf.contrib.framework.is_tensor(self.d_rho):
            self.gan.add_metric("d_rho", self.d_rho)

        def create_optimizer(klass, options):
            options['gan'] = self.gan
            options['config'] = options
            defn = {
                k: v
                for k, v in options.items()
                if k in inspect.getargspec(klass).args
            }
            learn_rate = options.learn_rate or options.learning_rate
            if 'learning_rate' in options:
                del defn['learning_rate']
            return klass(learn_rate, **defn)

        optimizer = hc.lookup_functions(optimizer)
        self.optimizer = create_optimizer(optimizer['class'], optimizer)
Exemple #18
0
 def get_config_value(self, symbol):
     if symbol in self.config:
         config = hc.Config(hc.lookup_functions(self.config[symbol]))
         return config
     return None
Exemple #19
0
    def _create(self):
        gan = self.gan
        config = self.config

        d_vars = gan.d_vars()
        g_vars = gan.g_vars()

        d_vars = list(set(d_vars).intersection(tf.trainable_variables()))
        g_vars = list(set(g_vars).intersection(tf.trainable_variables()))

        loss = self.gan.loss
        d_loss, g_loss = loss.sample

        allloss = d_loss + g_loss

        allvars = d_vars + g_vars

        d_grads = tf.gradients(d_loss, d_vars)
        g_grads = tf.gradients(g_loss, g_vars)


        grads = d_grads + g_grads

        self.d_log = -tf.log(tf.abs(d_loss+TINY))
        for g, d_v in zip(grads,d_vars):
            if g is None:
                print("!!missing gradient")
                print(d_v)
                return
        apply_vec = []
        apply_vec_d = []
        apply_vec_g = []
        for (i, grad, v) in zip(range(len(grads)), grads, allvars): 

            if grad == None:
                print("WARNING: grad none", grad, v)
            else:
                apply_vec.append((grad, v))
                if v in d_vars:
                    apply_vec_d.append((grad, v))
                else:
                    apply_vec_g.append((grad, v))

        optimizer = hc.lookup_functions(config.optimizer)
        optimizer['gan']=self.gan
        optimizer['config']=optimizer
        defn = {k: v for k, v in optimizer.items() if k in inspect.getargspec(optimizer['class']).args}
        lr = optimizer.learn_rate or optimizer.learning_rate
        if 'learning_rate' in optimizer:
            del defn['learning_rate']
        tr = optimizer['class'](lr, **defn)
        self.optimizer = tr

        self.gan.trainer = self
        self.g_loss = g_loss
        self.d_loss = d_loss

        self.gan.optimizer = tr

        optimize_t = tr.apply_gradients(apply_vec, global_step=self.global_step)
        d_optimize_t = tr.apply_gradients(apply_vec_d, global_step=self.global_step)

        self.past_weights = []

        self.g_loss = g_loss
        self.d_loss = d_loss
        self.slot_vars = tr.variables()

            
        def _slot_var(x, g_vars):
            for g in g_vars:
                if x.name.startswith(g.name.split(":")[0]):
                    return True
            return False
        self.slot_vars_g = [x for x in self.slot_vars if _slot_var(x, g_vars)]
        self.slot_vars_d = [x for x in self.slot_vars if _slot_var(x, d_vars)]

        self.optimize_t = optimize_t
        self.d_optimize_t = d_optimize_t
        self.min_fitness=None
        
        print("CONFIG ", config)
        if config.fitness_type is not None:
            mean = tf.zeros([1])
            used_grads = d_grads
            if config.grad_type == "sum":
                for g in used_grads:
                    mean += tf.reduce_sum(tf.abs(g))
            else:
                for g in used_grads:
                    mean += tf.reduce_mean(tf.abs(g))
                mean/=len(used_grads)
            self.mean=mean
            #self.mean=mean*100
            if config.fitness_type == 'g_loss':
                self.g_fitness = g_loss - (config.diversity_importance or 1) * tf.log(tf.abs(self.mean + d_loss - g_loss))
            elif(config.fitness_type == 'gradient-only'):
                self.g_fitness = -tf.log(reg)
            elif(config.fitness_type == 'grads'):
                self.g_fitness = mean
            elif(config.fitness_type == 'point'):
                self.g_fitness = mean - 1000*d_loss + 1000*g_loss
            elif(config.fitness_type == 'fail'):
                self.g_fitness = -mean
            elif(config.fitness_type == 'fail2'):
                self.g_fitness = -loss.d_fake
            elif(config.fitness_type == 'fail3'):
                self.g_fitness = -g_loss
            elif(config.fitness_type == 'fail2-reverse'):
                self.g_fitness = loss.d_fake
            elif(config.fitness_type == 'ls'):
                a,b,c = loss.config.labels
                self.g_fitness = tf.square(loss.d_fake-a)
            elif(config.fitness_type == 'ls-r'):
                a,b,c = loss.config.labels
                self.g_fitness = -tf.square(loss.d_fake-a)
            elif(config.fitness_type == 'ls2'):
                a,b,c = loss.config.labels
                self.g_fitness = tf.square(loss.d_fake-c)
            elif(config.fitness_type == 'ls2-r'):
                a,b,c = loss.config.labels
                self.g_fitness = -tf.square(loss.d_fake-c)
            elif(config.fitness_type == 'std'):
                self.g_fitness = -tf.nn.sigmoid(loss.d_fake)
            elif(config.fitness_type == 'ls3'):
                self.g_fitness = 1-loss.d_fake
            elif(config.fitness_type == 'ls4'):
                self.g_fitness = loss.d_real-loss.d_fake
            elif(config.fitness_type == 'ls5'):
                self.g_fitness = tf.square(loss.d_real)-tf.square(loss.d_fake)
            elif(config.fitness_type == 'fq1'):
                lam = 0.1
                self.g_fitness = -loss.d_fake-lam*mean
            elif(config.fitness_type == 'fq2'):
                lam = 0.1
                self.g_fitness = loss.d_real-loss.d_fake-lam*mean
            elif(config.fitness_type == 'fq3'):
                lam = 1
                self.g_fitness = loss.d_real-loss.d_fake+lam*mean
            elif(config.fitness_type == 'fq4'):
                lam = 1
                self.g_fitness = -loss.d_fake+lam*mean
            elif(config.fitness_type == 'fq5'):
                lam = 1
                self.g_fitness = -loss.d_fake-lam*tf.norm(mean)
            elif(config.fitness_type == 'fq6'):
                lam = 0.1
                self.g_fitness = -loss.d_fake-lam*tf.norm(mean+d_loss)
            elif(config.fitness_type == 'fq7'):
                lam = 0.1
                self.g_fitness = -loss.d_fake-lam*tf.norm(-mean-d_loss)
            elif(config.fitness_type == 'fq8'):
                lam = 0.1
                self.g_fitness = -tf.norm(mean+d_loss)
            elif(config.fitness_type == 'fq9'):
                lam = 0.1
                self.g_fitness = lam*mean
            elif(config.fitness_type == 'fq10'):
                lam = 0.1
                self.g_fitness = tf.norm(mean+d_loss)
            elif(config.fitness_type == 'fq11'):
                lam = 100.00
                self.fq = -loss.d_fake
                self.fd = lam * mean
                self.g_fitness = -loss.d_fake + lam * mean
            elif(config.fitness_type == 'ls3-fail'):
                self.g_fitness = -(1-loss.d_fake)
            elif(config.fitness_type == 'gldl'):
                self.g_fitness = -d_loss + g_loss
            elif(config.fitness_type == 'df'):
                self.g_fitness = tf.abs(loss.d_fake) - tf.abs(loss.d_real)
            elif(config.fitness_type == 'standard'):
                self.g_fitness = tf.reduce_mean(g_loss) - (config.diversity_importance or 1)* tf.log(tf.abs(self.mean - tf.log(TINY+tf.sigmoid(d_loss)) - \
                        tf.log(1.0-tf.sigmoid(g_loss)+TINY)))
            else:
                self.g_fitness = tf.reduce_mean(loss.d_fake) - (config.diversity_importance or 1)* tf.log(tf.abs(self.mean + tf.reduce_mean(loss.d_real) - tf.reduce_mean(loss.d_fake)))
            self.g_fitness = tf.reduce_mean(self.g_fitness)

        return optimize_t, optimize_t
Exemple #20
0
 def train(self):
     trainer = hc.Config(hc.lookup_functions(self.config.trainer))
     return trainer.run(self)
Exemple #21
0
 def get_config_value(self, symbol):
     if symbol in self.config:
         config = hc.Config(hc.lookup_functions(self.config[symbol]))
         return config
     return None
Exemple #22
0
    def create(self, graph):
        x = graph.x
        f = graph.f
        config = self.gan.config
        # This is a hack to set dtype across ops.py, since each tensorflow instruction needs a dtype argument
        # TODO refactor
        set_ops_globals(config.dtype, config.batch_size)

        batch_size = config.batch_size

        g_losses = []
        extra_g_loss = []
        d_losses = []

        z = self.create_z_encoding()
        # create generator
        g = self.generator(z)

        g_sample = g

        d_real, d_fake, d_reals, d_fakes = self.discriminator(x, f, None, g, z)

        self.gan.graph.d_real = d_real
        self.gan.graph.d_fake = d_fake
        self.gan.graph.d_reals = d_reals
        self.gan.graph.d_fakes = d_fakes

        for i, loss in enumerate(config.losses):
            loss = hc.Config(hc.lookup_functions(loss))
            d_loss, g_loss = loss.create(loss, self.gan)
            if (d_loss is not None):
                d_losses.append(tf.squeeze(d_loss))
            if (g_loss is not None):
                g_losses.append(tf.squeeze(g_loss))

        g_reg_losses = [
            var
            for var in tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
            if 'g_' in var.name
        ]

        d_reg_losses = [
            var
            for var in tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
            if 'd_' in var.name
        ]

        extra_g_loss += g_reg_losses

        g_loss = tf.reduce_mean(tf.add_n(g_losses))
        for extra in extra_g_loss:
            g_loss += extra

        d_loss = tf.reduce_mean(tf.add_n(d_losses))
        #for extra in d_reg_losses:
        #    d_loss += extra
        joint_loss = tf.reduce_mean(tf.add_n(g_losses + d_losses))

        summary = tf.global_variables()

        def summary_reduce(s):
            if (len(s.get_shape()) == 0):
                return s
            while (len(s.get_shape()) > 1):
                s = tf.reduce_mean(s, 1)
                #s=tf.squeeze(s)
            return tf.reduce_mean(s, 0)

        summary = [(s.get_shape(), s.name, s.dtype, summary_reduce(s))
                   for s in summary]

        graph.d_loss = d_loss
        graph.d_log = -tf.log(tf.abs(d_loss + TINY))
        graph.f = f
        graph.g = g_sample
        graph.g_loss = g_loss
        graph.hc_summary = summary
        graph.joint_loss = joint_loss

        g_vars = [var for var in tf.trainable_variables() if 'g_' in var.name]
        d_vars = [var for var in tf.trainable_variables() if 'd_' in var.name]

        v_vars = [var for var in tf.trainable_variables() if 'v_' in var.name]
        g_vars += v_vars
        trainer = hc.Config(hc.lookup_functions(config.trainer))
        g_optimizer, d_optimizer = trainer.create(trainer, self.gan, d_vars,
                                                  g_vars)
        graph.d_optimizer = d_optimizer
        graph.g_optimizer = g_optimizer