def build(self):
        """Build the model."""
        self.global_step = tf.Variable(0, trainable=False, name='global_step')

        # Create placeholders
        self.z = tf.placeholder(
            tf.float32,
            (self.config['batch_size'], self.config['net_g']['z_dim']), 'z')
        data_shape = (self.config['batch_size'], self.config['num_bar'],
                      self.config['num_timestep'], self.config['num_pitch'],
                      self.config['num_track'])
        # print('[*]', data_shape)
        self.x = tf.placeholder(tf.bool, data_shape, 'x')
        self.x_ = tf.cast(self.x, tf.float32, 'x_')

        # Components
        self.G = Generator(self.z, self.config, name='G')
        self.test_round = self.G.tensor_out > 0.5
        self.test_bernoulli = self.G.tensor_out > tf.random_uniform(data_shape)

        self.D_fake = Discriminator(self.G.tensor_out, self.config, name='D')
        self.D_real = Discriminator(self.x_, self.config, name='D', reuse=True)
        self.components = (self.G, self.D_fake)

        # Losses
        self.g_loss, self.d_loss = self.get_adversarial_loss()

        # Optimizers
        with tf.variable_scope('Optimizer'):
            self.g_optimizer = self.get_optimizer()
            self.g_step = self.g_optimizer.minimize(self.g_loss,
                                                    self.global_step,
                                                    self.G.vars)

            self.d_optimizer = self.get_optimizer()
            self.d_step = self.d_optimizer.minimize(self.d_loss,
                                                    self.global_step,
                                                    self.D_fake.vars)

            # Apply weight clipping
            if self.config['gan']['type'] == 'wgan':
                with tf.control_dependencies([self.d_step]):
                    self.d_step = tf.group(*(tf.assign(
                        var,
                        tf.clip_by_value(var,
                                         -self.config['gan']['clip_value'],
                                         self.config['gan']['clip_value']))
                                             for var in self.D_fake.vars))

        # Metrics
        self.metrics = Metrics(self.config)

        # Saver
        self.saver = tf.train.Saver()

        # Print and save model information
        self.print_statistics()
        self.save_statistics()
        self.print_summary()
        self.save_summary()
예제 #2
0
    def build(self):
        """Build the model."""
        # Create global step variable
        self.global_step = tf.Variable(0, trainable=False, name='global_step')

        # Get tensors from the pretrained model
        self.z = tf.placeholder(
            tf.float32,
            (self.config['batch_size'], self.config['net_g']['z_dim']), 'z')
        self.x = tf.placeholder(tf.bool, data_shape[self.resolution], 'x')
        self.x_ = tf.cast(self.x, tf.float32, 'x_')

        self.G = Generator(self.z, self.resolution, self.config, name='G')
        self.test_round = self.G.tensor_out > 0.5
        self.test_bernoulli = self.G.tensor_out > tf.random_uniform(
            data_shape[self.resolution])

        self.slope_tensor = tf.Variable(1.0)
        self.R = Refiner(self.pretrained.G.tensor_out,
                         self.config,
                         slope_tensor=self.slope_tensor,
                         name='R')

        self.D_fake = Discriminator(self.R.tensor_out, self.config, name='D')
        self.D_real = Discriminator(self.x_, self.config, name='D', reuse=True)
        self.components = (self.G, self.R, self.D_fake)

        #self.R = Refiner()

        # Slope tensor for applying slope annealing trick to stochastic neurons
        # self.slope_tensor = tf.Variable(1.0)

        # self.D_real = self.pretrained.D_real
        # with tf.variable_scope(self.pretrained.scope, reuse=True):
        # 	self.D_fake = Discriminator(self.G.tensor_out, self.config,
        # 								name='D')
        # self.components = (self.pretrained.G, self.G, self.D_fake)

        # Losses
        self.g_loss, self.d_loss = self.get_adversarial_loss()

        # Optimizers
        with tf.variable_scope('Optimizer'):
            self.g_optimizer = self.get_optimizer()
            if self.config['joint_training']:
                self.g_step = self.g_optimizer.minimize(
                    self.g_loss, self.global_step, (self.R.vars + self.G.vars))
            else:
                self.g_step = self.g_optimizer.minimize(
                    self.g_loss, self.global_step, self.R.vars)
            self.d_optimizer = self.get_optimizer()
            self.d_step = self.d_optimizer.minimize(self.d_loss,
                                                    self.global_step,
                                                    self.D_fake.vars)

            # Apply weight clipping
            if self.config['gan']['type'] == 'wgan':
                with tf.control_dependencies([self.d_step]):
                    self.d_step = tf.group(*(tf.assign(
                        var,
                        tf.clip_by_value(var,
                                         -self.config['gan']['clip_value'],
                                         self.config['gan']['clip_value']))
                                             for var in self.D_fake.vars))

        # Optimizers
        with tf.variable_scope('Optimizer'):
            self.g_optimizer = self.get_optimizer()
            self.g_step = self.g_optimizer.minimize(self.g_loss,
                                                    self.global_step,
                                                    self.G.vars)

            self.d_optimizer = self.get_optimizer()
            self.d_step = self.d_optimizer.minimize(self.d_loss,
                                                    self.global_step,
                                                    self.D_fake.vars)

            # Apply weight clipping
            if self.config['gan']['type'] == 'wgan':
                with tf.control_dependencies([self.d_step]):
                    self.d_step = tf.group(*(tf.assign(
                        var,
                        tf.clip_by_value(var,
                                         -self.config['gan']['clip_value'],
                                         self.config['gan']['clip_value']))
                                             for var in self.D_fake.vars))

        # Metrics
        self.metrics = Metrics(self.config)

        # Saver
        self.saver = tf.train.Saver()

        # Print and save model information
        self.print_statistics()
        self.save_statistics()
        self.print_summary()
        self.save_summary()
예제 #3
0
    def build(self):
        """Build the model."""
        # Create global step variable
        self.global_step = tf.Variable(0, trainable=False, name='global_step')

        # Get tensors from the pretrained model
        self.z = self.pretrained.z
        self.x = self.pretrained.x
        self.x_ = self.pretrained.x_

        # Slope tensor for applying slope annealing trick to stochastic neurons
        self.slope_tensor = tf.Variable(1.0)

        # Components
        self.G = Refiner(self.pretrained.G.tensor_out,
                         self.config,
                         slope_tensor=self.slope_tensor,
                         name='R')
        self.D_real = self.pretrained.D_real
        with tf.variable_scope(self.pretrained.scope, reuse=True):
            self.D_fake = Discriminator(self.G.tensor_out,
                                        self.config,
                                        name='D')
        self.components = (self.pretrained.G, self.G, self.D_fake)

        # Losses
        self.g_loss, self.d_loss = self.get_adversarial_loss(
            self.pretrained.scope)

        # Optimizers
        with tf.variable_scope('Optimizer'):
            self.g_optimizer = self.get_optimizer()
            if self.config['joint_training']:
                self.g_step = self.g_optimizer.minimize(
                    self.g_loss, self.global_step,
                    (self.G.vars + self.pretrained.G.vars))
            else:
                self.g_step = self.g_optimizer.minimize(
                    self.g_loss, self.global_step, self.G.vars)
            self.d_optimizer = self.get_optimizer()
            self.d_step = self.d_optimizer.minimize(self.d_loss,
                                                    self.global_step,
                                                    self.D_fake.vars)

            # Apply weight clipping
            if self.config['gan']['type'] == 'wgan':
                with tf.control_dependencies([self.d_step]):
                    self.d_step = tf.group(*(tf.assign(
                        var,
                        tf.clip_by_value(var,
                                         -self.config['gan']['clip_value'],
                                         self.config['gan']['clip_value']))
                                             for var in self.D_fake.vars))

        # Metrics
        self.metrics = Metrics(self.config)

        # Saver
        self.saver = tf.train.Saver()

        # Print and save model information
        self.print_statistics()
        self.save_statistics()
        self.print_summary()
        self.save_summary()