def __init__(self, shape, sess, batch_size, variance_coef, data_info):
        """Autoencoder initializer

        Args:
          shape:          list of ints specifying
                          num input, hidden1 units,...hidden_n units, num outputs
          sess:           tensorflow session object to use
          batch_size:     batch size
          varience_coef:  multiplicative factor for the variance of noise wrt the variance of data
          data_info:      key information about the dataset
        """

        AutoEncoder.__init__(self, len(shape) - 2, batch_size, FLAGS.chunk_length, sess, data_info)

        self.__shape = shape  # [input_dim,hidden1_dim,...,hidden_n_dim,output_dim]

        self._gen_shape = FLAGS.gen_layers

        self._dis_shape = FLAGS.dis_layers

        self.__variables = {}


        with sess.graph.as_default():

            with tf.variable_scope("AE_Variables"):

                ##############        SETUP VARIABLES       #####################################

                #for i in range(len(self._gen_shape)-1):  # go over all layers

                    # create variables for matrices and biases for each layer
                    #self._gen_create_variables(i, FLAGS.Weight_decay)

                    
                #for i in range(len(self._dis_shape)-1):  # go over all layers

                    # create variables for matrices and biases for each layer
                    #self._dis_create_variables(i, FLAGS.Weight_decay)


                if FLAGS.reccurent:

                    # Define LSTM cell
                    lstm_sizes = self.__shape[1:]

                    def lstm_cell(size):
                        basic_cell = tf.contrib.rnn.BasicLSTMCell(
                            size, forget_bias=1.0, state_is_tuple=True, reuse=tf.AUTO_REUSE)
                        # Apply dropout on the hidden layers
                        if size != self.__shape[-1]:
                            hidden_cell = tf.contrib.rnn.DropoutWrapper\
                                (cell=basic_cell, output_keep_prob=FLAGS.dropout)
                            return hidden_cell
                        else:
                            return basic_cell

                    self._gen_lstm_cell = tf.contrib.rnn.MultiRNNCell(
                        [lstm_cell(sz) for sz in lstm_sizes], state_is_tuple=True)
                    #self._dis_lstm_cell = tf.contrib.rnn.MultiRNNCell(
                    #    [lstm_cell(sz) for sz in lstm_sizes], state_is_tuple=True)
                    

                ##############        DEFINE THE NETWORK     ###################################

                # Declare a mask for simulating missing_values
                self._mask = tf.placeholder(dtype=tf.float32,
                                            shape=[FLAGS.batch_size, FLAGS.chunk_length,
                                                   FLAGS.frame_size *
                                                   FLAGS.amount_of_frames_as_input],
                                            name='Mask_of_mis_markers')
                self._mask_generator = self.binary_random_matrix_generator(FLAGS.missing_rate)

                # Reminder: we use Denoising AE
                # (http://www.jmlr.org/papers/volume11/vincent10a/vincent10a.pdf)

                ''' 1 - Setup network for TRAINing '''
                # Input noisy data and reconstruct the original one
                self._input_ = add_noise(self._train_batch, variance_coef, data_info._data_sigma)
                self._target_ = self._train_batch

                # Define output and loss for the training data
                # self._output = self.construct_graph(self._input_, FLAGS.dropout)
                # self._output, self._gen_params_ = self.construct_gen_graph(self._input_, FLAGS.dropout)
                #self._output, self._gen_params_ = self.construct_gen_graph(self._input_, FLAGS.dropout)
                self._output, self._tcn_params_ = self.construct_tcn_graph(self._input_, FLAGS.dropout)
                #self._y_data_, self._y_generated_, self._dis_params_ = self.construct_dis_graph(self._input_, self._output, FLAGS.dropout)
                #self._y_data_, self._y_generated_, self._dis_params_ = self.construct_dis_graph(self._target_, self._output, FLAGS.dropout)
                print("self._output=",self._output)
                #print("self._target_=",self._target_)
                print("self._tcn_params_=",self._tcn_params_)
                self._reconstruction_loss = loss_reconstruction(self._output, self._target_, self.max_val)
                # self._gen_loss = tf.reduce_mean(tf.square(- tf.log(self._y_generated_)))
                # self._dis_loss = tf.reduce_mean(tf.square(- (tf.log(self._y_data_) + tf.log(1 - self._y_generated_))))
                #self._gen_loss = - tf.reduce_mean(self._y_generated_)
                #self._dis_loss = - tf.reduce_mean(self._y_data_) + tf.reduce_mean(self._y_generated_)

                # Gradient Penalty
                #self.epsilon = tf.random_uniform(shape=[self.batch_size, 1, 1, 1], minval=0., maxval=1.)
                #self.epsilon = tf.random_uniform(shape=[self.batch_size, 1, 1], minval=0., maxval=1.)
                #X_hat = self._target_ + self.epsilon * (self._output - self._target_)
                #D_X_hat, _, __ = self.construct_dis_graph(X_hat, self._output, FLAGS.dropout)
                #grad_D_X_hat = tf.gradients(D_X_hat, [X_hat])[0]
                #red_idx = [i for i in range(1, X_hat.shape.ndims)]
                #slopes = tf.sqrt(1e-8 + tf.reduce_sum(tf.square(grad_D_X_hat), reduction_indices=red_idx))
                #gradient_penalty = tf.reduce_mean((slopes - 1.) ** 2)
                #self._dis_loss = self._dis_loss + 10.0 * gradient_penalty
        
                #self.reg = tf.contrib.layers.apply_regularization(
                #    tf.contrib.layers.l1_regularizer(2.5e-5),
                #    weights_list=[var for var in tf.global_variables() if 'weights' in var.name]
                #)
                #self._gen_loss = self._gen_loss + self.reg
                #self._dis_loss = self._dis_loss + self.reg
        
                tf.add_to_collection('losses', self._reconstruction_loss)
                #tf.add_to_collection('losses', self._gen_loss)
                #tf.add_to_collection('losses', self._dis_loss)
                self._loss = tf.add_n(tf.get_collection('losses'), name='total_loss')

                ''' 2 - Setup network for TESTing '''
                self._valid_input_ = self._valid_batch
                self._valid_target_ = self._valid_batch

                # Define output
                #self._valid_output = self.construct_graph(self._valid_input_, 1)
                self._valid_output, self._valid_tcn_params_ = self.construct_tcn_graph(self._valid_input_, 1)

                # Define loss
                self._valid_loss = loss_reconstruction(self._valid_output, self._valid_target_,
                                                       self.max_val)
Example #2
0
trans_out_dir = create_dir(osp.join(top_out_dir, experiment_name))
train_dir = create_dir(osp.join(trans_out_dir, 'train'))
test_dir = create_dir(osp.join(trans_out_dir, 'test'))
samples_dir = create_dir(osp.join(trans_out_dir, 'samples'))

print(trans_out_dir)
print(train_dir)
print(test_dir)
print(samples_dir)

## Load pre-trained AE
reset_tf_graph()
ae_conf_AB = Configuration.load(ae_configuration_AB)
print(ae_conf_AB.__str__())

ae_AB = AutoEncoder(ae_conf_AB.experiment_name, ae_conf_AB)
ae_AB.restore_model(ae_conf_AB.train_dir, FLAGS.ae_epochs, verbose=True)

ae_A = ae_AB
ae_B = ae_AB

# data folders
datafolder = top_in_dir + class_name_A + '-' + class_name_B + '/'
train_dir_A = datafolder + class_name_A + '_train'
train_dir_B = datafolder + class_name_B + '_train'
test_dir_A = datafolder + class_name_A + '_test'
test_dir_B = datafolder + class_name_B + '_test'

## Load point-clouds
training_pc_data_A = load_point_clouds_under_folder(train_dir_A,
                                                    n_threads=8,
Example #3
0
    # dataloader setting
    loader = get_loader(args.datapath, args.batch_size, args.num_workers)

    # load config
    with open(args.config) as f:
        config = json.loads(f.read())

    # print configuration
    print(json.dumps(config, indent=4, sort_keys=True))
    for arg in vars(args):
        print("{}:{}".format(arg, getattr(args, arg)))

    # set Networks
    d = AutoEncoder(channel=config['model']['channels'],
                    n=config['model']['N'],
                    h=config['model']['h'])

    g = Decoder(channel=config['model']['channels'],
                n=config['model']['N'],
                h=config['model']['h'])
    # load Network
    if args.netD != '':
        d.load_state_dict(torch.load(args.netD))
    if args.netG != '':
        g.load_state_dict(torch.load(args.netG))

    d.cuda()
    g.cuda()
    # hparams
    lambda_ = config['train']['lambda']
Example #4
0
            train_dir = train_dir,
            loss_display_step = train_params['loss_display_step'],
            saver_step = train_params['saver_step'],
            saver_max_to_keep = train_params['saver_max_to_keep'],
            encoder = encoder,
            decoder = decoder,
            encoder_args = enc_args,
            decoder_args = dec_args,
            experiment_name = experiment_name
           )
conf.save(osp.join(train_dir, 'configuration'))

# Build AE Model.
reset_tf_graph()

ae = AutoEncoder(name=conf.experiment_name,  configuration=conf)


# load pretrained model
if FLAGS.load_pre_trained_ae:
    conf = Configuration.load(train_dir + '/configuration')
    reset_tf_graph()
    ae = AutoEncoder(conf.experiment_name, conf)
    ae.restore_model(conf.train_dir, epoch=FLAGS.restore_epoch)


batch_size =  train_params['batch_size'] 

if FLAGS.mode == 'train' :

        
    def __init__(self, shape, sess, batch_size, variance_coef, data_info):
        """Autoencoder initializer

        Args:
          shape:          list of ints specifying
                          num input, hidden1 units,...hidden_n units, num outputs
          sess:           tensorflow session object to use
          batch_size:     batch size
          varience_coef:  multiplicative factor for the variance of noise wrt the variance of data
          data_info:      key information about the dataset
        """

        AutoEncoder.__init__(self,
                             len(shape) - 2, batch_size, FLAGS.chunk_length,
                             sess, data_info)

        self.__shape = shape  # [input_dim,hidden1_dim,...,hidden_n_dim,output_dim]

        self.__variables = {}

        with sess.graph.as_default():

            with tf.variable_scope("AE_Variables"):

                ##############        SETUP VARIABLES       #####################################

                for i in range(self.num_hidden_layers +
                               1):  # go over all layers

                    # create variables for matrices and biases for each layer
                    self._create_variables(i, FLAGS.Weight_decay)

                if FLAGS.reccurent:

                    # Define LSTM cell
                    lstm_sizes = self.__shape[1:]

                    def lstm_cell(size):
                        basic_cell = tf.contrib.rnn.BasicLSTMCell(
                            size, forget_bias=1.0, state_is_tuple=True)
                        # Apply dropout on the hidden layers
                        if size != self.__shape[-1]:
                            hidden_cell = tf.contrib.rnn.DropoutWrapper\
                                (cell=basic_cell, output_keep_prob=FLAGS.dropout)
                            return hidden_cell
                        else:
                            return basic_cell

                    self._RNN_cell = tf.contrib.rnn.MultiRNNCell(
                        [lstm_cell(sz) for sz in lstm_sizes],
                        state_is_tuple=True)

                ##############        DEFINE THE NETWORK     ###################################

                # Declare a mask for simulating missing_values
                self._mask = tf.placeholder(
                    dtype=tf.float32,
                    shape=[
                        FLAGS.batch_size, FLAGS.chunk_length,
                        FLAGS.frame_size * FLAGS.amount_of_frames_as_input
                    ],
                    name='Mask_of_mis_markers')
                self._mask_generator = self.binary_random_matrix_generator(
                    FLAGS.missing_rate)

                # Reminder: we use Denoising AE
                # (http://www.jmlr.org/papers/volume11/vincent10a/vincent10a.pdf)
                ''' 1 - Setup network for TRAINing '''
                # Input noisy data and reconstruct the original one
                self._input_ = add_noise(self._train_batch, variance_coef,
                                         data_info._data_sigma)
                self._target_ = self._train_batch

                # Define output and loss for the training data
                self._output = self.construct_graph(self._input_,
                                                    FLAGS.dropout)
                self._reconstruction_loss = loss_reconstruction(
                    self._output, self._target_, self.max_val)
                tf.add_to_collection('losses', self._reconstruction_loss)
                self._loss = tf.add_n(tf.get_collection('losses'),
                                      name='total_loss')
                ''' 2 - Setup network for TESTing '''
                self._valid_input_ = self._valid_batch
                self._valid_target_ = self._valid_batch

                # Define output
                self._valid_output = self.construct_graph(
                    self._valid_input_, 1)

                # Define loss
                self._valid_loss = loss_reconstruction(self._valid_output,
                                                       self._valid_target_,
                                                       self.max_val)
Example #6
0
def main():
    # parse arguments
    opts = parse_args()
    if opts is None:
        exit()

        # declare instance for GAN
    if opts.gan_type == 'GAN':
        gan = GAN(opts)
    elif opts.gan_type == 'CGAN':
        gan = CGAN(opts)
    elif opts.gan_type == 'ACGAN':
        gan = ACGAN(opts)
    elif opts.gan_type == 'infoGAN':
        gan = infoGAN(opts, SUPERVISED=True)
    elif opts.gan_type == 'EBGAN':
        gan = EBGAN(opts)
    elif opts.gan_type == 'WGAN':
        gan = WGAN(opts)
    elif opts.gan_type == 'WGAN_GP':
        gan = WGAN_GP(opts)
    elif opts.gan_type == 'DRAGAN':
        gan = DRAGAN(opts)
    elif opts.gan_type == 'LSGAN':
        gan = LSGAN(opts)
    elif opts.gan_type == 'BEGAN':
        gan = BEGAN(opts)
    elif opts.gan_type == 'DRGAN':
        gan = DRGAN(opts)
    elif opts.gan_type == 'AE':
        gan = AutoEncoder(opts)
    elif opts.gan_type == 'GAN3D':
        gan = GAN3D(opts)
    elif opts.gan_type == 'VAEGAN3D':
        gan = VAEGAN3D(opts)
    elif opts.gan_type == 'DRGAN3D':
        gan = DRGAN3D(opts)
    elif opts.gan_type == 'Recog3D':
        gan = Recog3D(opts)
    elif opts.gan_type == 'Recog2D':
        gan = Recog2D(opts)
    elif opts.gan_type == 'VAEDRGAN3D':
        gan = VAEDRGAN3D(opts)
    elif opts.gan_type == 'DRcycleGAN3D':
        gan = DRcycleGAN3D(opts)
    elif opts.gan_type == 'CycleGAN3D':
        gan = CycleGAN3D(opts)
    elif opts.gan_type == 'AE3D':
        gan = AutoEncoder3D(opts)
    elif opts.gan_type == 'DRGAN2D':
        gan = DRGAN2D(opts)
    elif opts.gan_type == 'DRecon3DGAN':
        gan = DRecon3DGAN(opts)
    elif opts.gan_type == 'DRecon2DGAN':
        gan = DRecon2DGAN(opts)
    elif opts.gan_type == 'DReconVAEGAN':
        gan = DReconVAEGAN(opts)
    else:
        raise Exception("[!] There is no option for " + opts.gan_type)

    if opts.resume or len(opts.eval) > 0:
        print(" [*] Loading saved model...")
        gan.load()
        print(" [*] Loading finished!")

    # launch the graph in a session
    if len(opts.eval) == 0:
        gan.train()
        print(" [*] Training finished!")
    else:
        print(" [*] Training skipped!")

    # visualize learned generator
    if len(opts.eval) == 0:
        print(" [*] eval mode is not specified!")
    else:
        if opts.eval == 'generate':
            gan.visualize_results(opts.epoch)
        elif opts.eval == 'interp_z':
            gan.interpolate_z(opts)
        elif opts.eval == 'interp_id':
            gan.interpolate_id(opts)
        elif opts.eval == 'interp_expr':
            gan.interpolate_expr(opts)
        elif opts.eval == 'recon':
            gan.reconstruct()
        elif opts.eval == 'control_expr':
            gan.control_expr()
        else:
            gan.manual_inference(opts)
        print(" [*] Testing finished!")