コード例 #1
0
ファイル: experiment.py プロジェクト: statX/RGAN
        print('%d\t%.2f\t%.4f\t%.4f\t%.5f\t%.0f\t %.2f\t %.2f' %
              (epoch, t, D_loss_curr, G_loss_curr, mmd2, that_np, ll_sample,
               ll_real))
    except TypeError:  # mmd, ll are missing (strings)
        print('%d\t%.2f\t%.4f\t%.4f\t%s\t %s\t %s' %
              (epoch, t, D_loss_curr, G_loss_curr, mmd2, ll_sample, ll_real))

    ## save trace
    trace.write(' '.join(
        map(str, [
            epoch, t, D_loss_curr, G_loss_curr, mmd2, that_np, ll_sample,
            ll_real
        ])) + '\n')
    if epoch % 10 == 0:
        trace.flush()
        plotting.plot_trace(identifier, xmax=num_epochs)

    if shuffle:  # shuffle the training data
        perm = np.random.permutation(samples['train'].shape[0])
        samples['train'] = samples['train'][perm]
        if labels['train'] is not None:
            labels['train'] = labels['train'][perm]

    if epoch % 50 == 0:
        model.dump_parameters(identifier + '_' + str(epoch), sess)

trace.flush()
plotting.plot_trace(identifier, xmax=num_epochs)
model.dump_parameters(identifier + '_' + str(epoch), sess)

## after-the-fact evaluation
コード例 #2
0
ファイル: experiment.py プロジェクト: archmaester/RCGAN
#     trace.write(' '.join(map(str, [epoch, t, D_loss_curr, G_loss_curr])) + '\n')
#     if epoch % 10 == 0:
#         trace.flush()
#         plotting.plot_trace(identifier, xmax=num_epochs, dp=False)

    if shuffle:  # shuffle the training data
        perm = np.random.permutation(samples['train'].shape[0])
        samples['train'] = samples['train'][perm]
        if labels['train'] is not None:
            labels['train'] = labels['train'][perm]

    if epoch % 50 == 0:
        model.dump_parameters(identifier + '_' + str(epoch), sess)

trace.flush()
plotting.plot_trace(identifier, xmax=num_epochs, dp=False)
model.dump_parameters(identifier + '_' + str(epoch), sess)

# # ## after-the-fact evaluation
# # #n_test = vali.shape[0]      # using validation set for now TODO
# # #n_batches_for_test = floor(n_test/batch_size)
# # #n_test_eval = n_batches_for_test*batch_size
# # #test_sample = np.empty(shape=(n_test_eval, seq_length, num_signals))
# # #test_Z = model.sample_Z(n_test_eval, seq_length, latent_dim, use_time)
# # #for i in range(n_batches_for_test):
# # #    test_sample[i*batch_size:(i+1)*batch_size, :, :] = sess.run(G_sample, feed_dict={Z: test_Z[i*batch_size:(i+1)*batch_size]})
# # #test_sample = np.float32(test_sample)
# # #test_real = np.float32(vali[np.random.choice(n_test, n_test_eval, replace=False), :, :])
# # ## we can only get samples in the size of the batch...
# # #heuristic_sigma = median_pairwise_distance(test_real, test_sample)
# # #test_mmd2, that = sess.run(mix_rbf_mmd2_and_ratio(test_real, test_sample, sigmas=heuristic_sigma, biased=False))
コード例 #3
0
        landmark_visible = distance_to_landmark < self.rf_radius
        return landmark_visible

    @staticmethod
    def neural_activation(x):
        return np.tanh(x)

    def log_positions(self):
        """Add current position to the position log.
        """
        self.log = np.append(self.log,
                             np.array([[self.x, self.y, self.trial]]),
                             axis=0)

    def save_log(self):
        """Return a log of the positions as a pandas dataframe.

        :return:
        """
        position_log = pd.DataFrame(
            self.log, columns=['X position', 'Y position', 'Trial'])
        position_log.index.name = 'Time bin'
        return position_log


a = Agent(n_trials=30)
a.run_blocking_experiment()
import plotting as pl

pl.plot_trace(a, range(a.n_trials))