_dev_disc_cost, _dev_gen_cost = session.run(
                [disc_cost, gen_cost],
                feed_dict={
                    real_data: _real_data,
                    condition_data: _cond_data,
                    time_data: _time_data
                })
            print("Step %d: D: loss = %.7f G: loss=%.7f " %
                  (iteration, _dev_disc_cost, _dev_gen_cost))
            dev_disc_costs.append(_dev_disc_cost)
            plotter.plot('dev disc cost', np.mean(dev_disc_costs))
            generate_image(iteration, False)

        # Save logs every 100 iters
        if (iteration < 5) or (iteration % 100 == 99):
            plotter.flush()

        plotter.tick()
    generate_image(
        ITERS,
        True)  # outputs ssim and mse vals for all samples, calculate accuracy
    save_path = saver.save(session,
                           restore_path)  # Save the variables to disk.
    print("Model saved in path: %s" % save_path)

summary_writer.close()  # flushes the outputwriter to disk

overall_end_time = time.time()  # time analysis
overall_time = (overall_end_time - overall_start_time)
print("From ", START_ITER, "to ", ITERS, " the GAN took ", overall_time,
      "sec to run")
Esempio n. 2
0
    def train(self, config):
        """Train DCGAN"""
        #define optimizer
        self.g_optim = tf.train.AdamOptimizer(
            learning_rate=config.learning_rate,
            beta1=config.beta1,
            beta2=config.beta2).minimize(
                self.gen_cost,
                var_list=params_with_name('Generator'),
                colocate_gradients_with_ops=True)
        self.d_optim = tf.train.AdamOptimizer(
            learning_rate=config.learning_rate,
            beta1=config.beta1,
            beta2=config.beta2).minimize(
                self.disc_cost,
                var_list=params_with_name('Discriminator.'),
                colocate_gradients_with_ops=True)

        tf.global_variables_initializer().run()

        #try to load trained parameters
        print('-------------')
        existing_gan, ckpt_name = self.load()

        #count number of variables
        total_parameters = 0
        for variable in tf.trainable_variables():
            # shape is an array of tf.Dimension
            shape = variable.get_shape()
            variable_parameters = 1
            for dim in shape:
                variable_parameters *= dim.value
            total_parameters += variable_parameters
        print('-------------')
        print('number of variables: ' + str(total_parameters))
        print('-------------')
        #start training
        counter_batch = 0
        epoch = 0
        #fitting errors
        f, sbplt = plt.subplots(2, 2, figsize=(8, 8), dpi=250)
        matplotlib.rcParams.update({'font.size': 8})
        plt.subplots_adjust(left=left,
                            bottom=bottom,
                            right=right,
                            top=top,
                            wspace=wspace,
                            hspace=hspace)
        for iteration in range(config.num_iter):
            start_time = time.time()
            # Train generator (only after the critic has been trained, at least once)
            if iteration + ckpt_name > 0:
                _ = self.sess.run(self.g_optim)

            # Train critic
            disc_iters = config.critic_iters
            for i in range(disc_iters):
                #get batch and update critic
                _data = self.training_samples[:, counter_batch *
                                              config.batch_size:
                                              (counter_batch + 1) *
                                              config.batch_size].T
                _disc_cost, _ = self.sess.run([self.disc_cost, self.d_optim],
                                              feed_dict={self.inputs: _data})
                #if we have reached the end of the real samples set, we start over and increment the number of epochs
                if counter_batch == int(
                        self.training_samples.shape[1] / self.batch_size) - 1:
                    counter_batch = 0
                    epoch += 1
                else:
                    counter_batch += 1
            aux = time.time() - start_time
            #plot the  critics loss and the iteration time
            plot.plot(self.sample_dir, 'train disc cost', -_disc_cost)
            plot.plot(self.sample_dir, 'time', aux)

            if (iteration + ckpt_name
                    == 500) or iteration % 20000 == 19999 or (
                        iteration + ckpt_name >= config.num_iter - 10):
                print('epoch ' + str(epoch))
                if config.dataset == 'uniform' or config.dataset == 'packets':
                    #this is to evaluate whether the discriminator has overfit
                    dev_disc_costs = []
                    for ind_dev in range(
                            int(self.dev_samples.shape[1] / self.batch_size)):
                        images = self.dev_samples[:, ind_dev *
                                                  config.batch_size:(ind_dev +
                                                                     1) *
                                                  config.batch_size].T
                        _dev_disc_cost = self.sess.run(
                            self.disc_cost, feed_dict={self.inputs: images})
                        dev_disc_costs.append(_dev_disc_cost)
                    #plot the dev loss
                    plot.plot(self.sample_dir, 'dev disc cost',
                              -np.mean(dev_disc_costs))

                #save the network parameters
                self.save(iteration + ckpt_name)

                #get simulated samples, calculate their statistics and compare them with the original ones
                fake_samples = self.sess.run([self.ex_samples])[0]
                acf_error, mean_error, corr_error, time_course_error,_ = analysis.get_stats(X=fake_samples.T, num_neurons=config.num_neurons,\
                    num_bins=config.num_bins, folder=config.sample_dir, name='fake'+str(iteration+ckpt_name), critic_cost=-_disc_cost,instance=config.data_instance)
                #plot the fitting errors
                sbplt[0][0].plot(iteration + ckpt_name, mean_error, '+b')
                sbplt[0][0].set_title('spk-count mean error')
                sbplt[0][0].set_xlabel('iterations')
                sbplt[0][0].set_ylabel('L1 error')
                sbplt[0][0].set_xlim([
                    0 - config.num_iter / 4,
                    config.num_iter + config.num_iter / 4
                ])
                sbplt[0][1].plot(iteration + ckpt_name, time_course_error,
                                 '+b')
                sbplt[0][1].set_title('time course error')
                sbplt[0][1].set_xlabel('iterations')
                sbplt[0][1].set_ylabel('L1 error')
                sbplt[0][1].set_xlim([
                    0 - config.num_iter / 4,
                    config.num_iter + config.num_iter / 4
                ])
                sbplt[1][0].plot(iteration + ckpt_name, acf_error, '+b')
                sbplt[1][0].set_title('AC error')
                sbplt[1][0].set_xlabel('iterations')
                sbplt[1][0].set_ylabel('L1 error')
                sbplt[1][0].set_xlim([
                    0 - config.num_iter / 4,
                    config.num_iter + config.num_iter / 4
                ])
                sbplt[1][1].plot(iteration + ckpt_name, corr_error, '+b')
                sbplt[1][1].set_title('corr error')
                sbplt[1][1].set_xlabel('iterations')
                sbplt[1][1].set_ylabel('L1 error')
                sbplt[1][1].set_xlim([
                    0 - config.num_iter / 4,
                    config.num_iter + config.num_iter / 4
                ])
                f.savefig(self.sample_dir + 'fitting_errors.svg',
                          dpi=600,
                          bbox_inches='tight')
                plt.close(f)
                plot.flush(self.sample_dir)

            plot.tick()
Esempio n. 3
0
        for i in range(disc_iters):
            _data = next(gen)
            _disc_cost, _ = session.run([disc_cost, disc_train_op],
                                        feed_dict={real_data: _data})

        plot.plot(FOLDER, 'train disc cost', _disc_cost)
        plot.plot(FOLDER, 'time', time.time() - start_time)

        if (iteration < 5) or iteration % 200 == 199:
            t = time.time()
            dev_disc_costs = []
            for (images, ) in dev_gen():
                _dev_disc_cost = session.run(disc_cost,
                                             feed_dict={real_data: images})
                dev_disc_costs.append(_dev_disc_cost)
            plot.plot(FOLDER, 'dev disc cost', np.mean(dev_disc_costs))

            generate_image(iteration)
            save(iteration)

        if (iteration < 5) or (iteration % 200 == 199):
            plot.flush(FOLDER)

        plot.tick()

    fixed_noise = tf.constant(
        np.random.normal(size=(1000, 128)).astype('float32'))
    n_samples = BATCH_SIZE
    all_fixed_noise_samples = FCGenerator(n_samples, noise=fixed_noise)
    samples = session.run(all_fixed_noise_samples)
    samples = binarize(samples)
Esempio n. 4
0
            if TRAIN_DETECTOR:
                for data, labels in dev_gen_adv():
                    dev_d1_cost, summary = session.run([d1_cost, summary_op],
                                                       feed_dict={
                                                           real_data_int: data,
                                                           y: labels
                                                       })
            else:
                for data, labels in dev_gen2():
                    dev_d1_cost, summary = session.run([d1_cost, summary_op],
                                                       feed_dict={
                                                           real_data_int: data,
                                                           y: labels
                                                       })
                    dev_d1_costs.append(dev_d1_cost)
            plot.plot('dev d1 cost', np.mean(dev_d1_costs))
            # plot.plot('dev d2 cost', np.mean(dev_d2_costs))
            generate_image(iteration, data, args.save_dir)
            generate_image(iteration,
                           data,
                           args.save_dir + '/random',
                           random=True)

        if iteration in SAVE_ITERS:
            saver.save(session,
                       'models/' + SAVE_NAME + '_' + str(iteration) + '_steps')
        # Save logs every 100 iters
        if (iteration < 5) or (iteration % 100 == 99):
            plot.flush()
        plot.tick()
Esempio n. 5
0
    def train(self, config):
        """Train DCGAN"""
        #define optimizer
        self.g_optim = tf.train.AdamOptimizer(
            learning_rate=config.learning_rate,
            beta1=config.beta1,
            beta2=config.beta2).minimize(
                self.gen_cost,
                var_list=params_with_name('Generator'),
                colocate_gradients_with_ops=True)
        self.d_optim = tf.train.AdamOptimizer(
            learning_rate=config.learning_rate,
            beta1=config.beta1,
            beta2=config.beta2).minimize(
                self.disc_cost,
                var_list=params_with_name('Discriminator.'),
                colocate_gradients_with_ops=True)

        #initizialize variables
        try:
            tf.global_variables_initializer().run()
        except:
            tf.initialize_all_variables().run()

        #try to load trained parameters
        self.load()

        #get real samples
        if config.dataset == 'uniform':
            firing_rates_mat = config.firing_rate + 2 * (
                np.random.random(int(self.num_neurons / config.group_size), ) -
                0.5) * config.firing_rate / 2
            correlations_mat = config.correlation + 2 * (
                np.random.random(int(self.num_neurons / config.group_size), ) -
                0.5) * config.correlation / 2
            aux = np.arange(int(self.num_neurons / config.group_size))
            activity_peaks = [
                [x] * config.group_size for x in aux
            ]  #np.random.randint(0,high=self.num_bins,size=(1,self.num_neurons)).reshape(self.num_neurons,1)
            activity_peaks = np.asarray(activity_peaks)
            activity_peaks = activity_peaks.flatten()
            activity_peaks = activity_peaks * config.group_size * self.num_bins / self.num_neurons
            activity_peaks = activity_peaks.reshape(self.num_neurons, 1)
            #activity_peaks = np.zeros((self.num_neurons,1))+self.num_bins/4
            self.real_samples = sim_pop_activity.get_samples(num_samples=config.num_samples, num_bins=self.num_bins,\
            num_neurons=self.num_neurons, correlations_mat=correlations_mat, group_size=config.group_size, refr_per=config.ref_period,firing_rates_mat=firing_rates_mat, activity_peaks=activity_peaks)
            #get dev samples
            dev_samples = sim_pop_activity.get_samples(num_samples=int(config.num_samples/4), num_bins=self.num_bins,\
            num_neurons=self.num_neurons, correlations_mat=correlations_mat, group_size=config.group_size, refr_per=config.ref_period,firing_rates_mat=firing_rates_mat, activity_peaks=activity_peaks)
            #save original statistics
            analysis.get_stats(X=self.real_samples,
                               num_neurons=self.num_neurons,
                               num_bins=self.num_bins,
                               folder=self.sample_dir,
                               name='real',
                               firing_rate_mat=firing_rates_mat,
                               correlation_mat=correlations_mat,
                               activity_peaks=activity_peaks)
        elif config.dataset == 'retina':
            self.real_samples = retinal_data.get_samples(
                num_bins=self.num_bins,
                num_neurons=self.num_neurons,
                instance=config.data_instance)
            #save original statistics
            analysis.get_stats(X=self.real_samples,
                               num_neurons=self.num_neurons,
                               num_bins=self.num_bins,
                               folder=self.sample_dir,
                               name='real',
                               instance=config.data_instance)

        #count number of variables
        total_parameters = 0
        for variable in tf.trainable_variables():
            # shape is an array of tf.Dimension
            shape = variable.get_shape()
            variable_parameters = 1
            for dim in shape:
                variable_parameters *= dim.value
            total_parameters += variable_parameters
        print('number of varaibles: ' + str(total_parameters))
        #start training
        counter_batch = 0
        epoch = 0
        #fitting errors
        f, sbplt = plt.subplots(2, 2, figsize=(8, 8), dpi=250)
        matplotlib.rcParams.update({'font.size': 8})
        plt.subplots_adjust(left=left,
                            bottom=bottom,
                            right=right,
                            top=top,
                            wspace=wspace,
                            hspace=hspace)
        for iteration in range(config.num_iter):
            start_time = time.time()
            # Train generator (only after the critic has been trained, at least once)
            if iteration > 0:
                _ = self.sess.run(self.g_optim)

            # Train critic
            disc_iters = config.critic_iters
            for i in range(disc_iters):
                #get batch and trained critic
                _data = self.real_samples[:, counter_batch *
                                          config.batch_size:(counter_batch +
                                                             1) *
                                          config.batch_size].T
                _disc_cost, _ = self.sess.run([self.disc_cost, self.d_optim],
                                              feed_dict={self.inputs: _data})
                #if we have reached the end of the real samples set, we start over and increment the number of epochs
                if counter_batch == int(
                        self.real_samples.shape[1] / self.batch_size) - 1:
                    counter_batch = 0
                    epoch += 1
                else:
                    counter_batch += 1
            aux = time.time() - start_time
            #plot the  critics loss and the iteration time
            plot.plot(self.sample_dir, 'train disc cost', -_disc_cost)
            plot.plot(self.sample_dir, 'time', aux)

            if (
                    iteration == 500
            ) or iteration % 20000 == 19999 or iteration > config.num_iter - 10:
                print('epoch ' + str(epoch))
                if config.dataset == 'uniform':
                    #this is to evaluate whether the discriminator has overfit
                    dev_disc_costs = []
                    for ind_dev in range(
                            int(dev_samples.shape[1] / self.batch_size)):
                        images = dev_samples[:, ind_dev *
                                             config.batch_size:(ind_dev + 1) *
                                             config.batch_size].T
                        _dev_disc_cost = self.sess.run(
                            self.disc_cost, feed_dict={self.inputs: images})
                        dev_disc_costs.append(_dev_disc_cost)
                    #plot the dev loss
                    plot.plot(self.sample_dir, 'dev disc cost',
                              -np.mean(dev_disc_costs))

                #save the network parameters
                self.save(iteration)

                #get simulated samples, calculate their statistics and compare them with the original ones
                fake_samples = self.get_samples(num_samples=2**13)
                fake_samples = fake_samples.eval(session=self.sess)
                fake_samples = self.binarize(samples=fake_samples)
                acf_error, mean_error, corr_error, time_course_error,_ = analysis.get_stats(X=fake_samples.T, num_neurons=config.num_neurons,\
                    num_bins=config.num_bins, folder=config.sample_dir, name='fake'+str(iteration), critic_cost=-_disc_cost,instance=config.data_instance)
                #plot the fitting errors
                sbplt[0][0].plot(iteration, mean_error, '+b')
                sbplt[0][0].set_title('spk-count mean error')
                sbplt[0][0].set_xlabel('iterations')
                sbplt[0][0].set_ylabel('L1 error')
                sbplt[0][0].set_xlim([
                    0 - config.num_iter / 4,
                    config.num_iter + config.num_iter / 4
                ])
                sbplt[0][1].plot(iteration, time_course_error, '+b')
                sbplt[0][1].set_title('time course error')
                sbplt[0][1].set_xlabel('iterations')
                sbplt[0][1].set_ylabel('L1 error')
                sbplt[0][1].set_xlim([
                    0 - config.num_iter / 4,
                    config.num_iter + config.num_iter / 4
                ])
                sbplt[1][0].plot(iteration, acf_error, '+b')
                sbplt[1][0].set_title('AC error')
                sbplt[1][0].set_xlabel('iterations')
                sbplt[1][0].set_ylabel('L1 error')
                sbplt[1][0].set_xlim([
                    0 - config.num_iter / 4,
                    config.num_iter + config.num_iter / 4
                ])
                sbplt[1][1].plot(iteration, corr_error, '+b')
                sbplt[1][1].set_title('corr error')
                sbplt[1][1].set_xlabel('iterations')
                sbplt[1][1].set_ylabel('L1 error')
                sbplt[1][1].set_xlim([
                    0 - config.num_iter / 4,
                    config.num_iter + config.num_iter / 4
                ])
                f.savefig(self.sample_dir + 'fitting_errors.svg',
                          dpi=600,
                          bbox_inches='tight')
                plt.close(f)
                plot.flush(self.sample_dir)

            plot.tick()