示例#1
0
    def agent_init(self, taskSpecification):
        """
        This function is called once at the beginning of an experiment.

        :param taskSpecification: A string defining the task.  This string
        is decoded using TaskSpecVRLGLUE3.TaskSpecParser
        :return:
        """

        # DO SOME SANITY CHECKING ON THE TASKSPEC
        TaskSpec = TaskSpecVRLGLUE3.TaskSpecParser(taskSpecification)

        if TaskSpec.valid:

            assert ((len(TaskSpec.getIntObservations()) == 0) !=
                    (len(TaskSpec.getDoubleObservations()) == 0)), \
                "expecting continous or discrete observations.  Not both."
            assert not TaskSpec.isSpecial(TaskSpec.getDoubleActions()[0][0]), \
                " expecting min action to be a number not a special value"
            assert not TaskSpec.isSpecial(TaskSpec.getDoubleActions()[0][1]), \
                " expecting max action to be a number not a special value"
            #self.num_actions = TaskSpec.getIntActions()[0][1]+1
        else:
            print "INVALID TASK SPEC"

        observations = TaskSpec.getDoubleObservations(
        )  # TODO: take care of int observations
        self.observation_size = len(observations)

        actions = TaskSpec.getDoubleActions()
        self.action_size = len(actions)

        self.testing = False
        self.batch_size = 32
        self.episode_counter = 0
        self.step_counter = 0

        if self.nn_file is None:
            self.action_network = self._init_action_network(
                len(observations), len(actions))
            self.value_network = self._init_value_network(len(observations), 1)
        else:
            handle = open(self.nn_file, 'r')
            self.network = cPickle.load(handle)

        self.action_stdev = 0.01
        self.gamma = 0.9  # TaskSpec.getDiscountFactor()

        self.data_set = data_set.DataSet(
            len(observations),
            len(actions),
            observation_dtype='float32',
            action_dtype='float32',
        )
        # just needs to be big enough to create phi's
        self.test_data_set = data_set.DataSet(len(observations),
                                              len(actions),
                                              observation_dtype='float32',
                                              action_dtype='float32')
示例#2
0
 def __init__(self):
     data_set_holder = data_set.DataSet()
     df = data_set_holder.copy_df()
     df = data_set_holder.clean(df,
                                sex=True,
                                age=True,
                                nationality=True,
                                average_age=True)
     self.data_subsetter = DataSubset(df)
     self.visualizer = Visualizer()
示例#3
0
def ttrees_to_internal(ttrees, branches, binary=False):
    """Converts the TTrees to the main data structure."""
    data = concat_ttrees_to_array(ttrees, branches)
    if binary:
        labels = ttrees_to_binary(ttrees[0], ttrees[1])
    else:
        labels = ttrees_to_one_hot(ttrees)
    classes = len(ttrees)

    combined = data_set.DataSet(data, labels, classes)
    return combined
示例#4
0
def run(session):
    ds = data_set.DataSet()
    run_info = RunInfo(
        session=session,
        graph=graph.build_graph(ds.vocab_size()),
        data_set=ds,
        saver=tf.train.Saver(),
        summary_file_writer=tf.summary.FileWriter(config.TBOARD_NAME),
    )

    run_info.summary_file_writer.add_graph(session.graph)

    # Just some basic sanity check information.
    print(f"# Training xs: {len(ds.dataset('train')[0])}")
    print(f"# Training ys: {len(ds.dataset('train')[1])}")
    print(f"Vocab size: {ds.vocab_size()}")
    print(f"Label counts: {ds.label_counts()}")

    session.run(tf.global_variables_initializer())
    for epoch_idx in range(config.NUM_EPOCHS):
        run_epoch(run_info, epoch_idx)
        run_info.saver.save(session, config.CHECKPOINT_NAME)
示例#5
0
def main(_):
    log_epoch_interval = config_h.log_epoch_interval
    n_classes = len(config_h.classes)
    loss_weights = tf.constant(config_h.loss_weights, dtype=tf.float32)
    learning_rates = config_h.learning_rates
    restore_file = config_h.restore_file
    restart_epoch_i = config_h.restart_epoch_i
    loop_epoch_nums = config_h.loop_epoch_nums
    persist_checkpoint_interval = config_h.persist_checkpoint_interval
    persist_checkpoint_file = config_h.persist_checkpoint_file

    train_data = np.load(config_h.train_d_npy)
    train_ls = np.load(config_h.train_l_npy)
    mfcc_train = data_set.DataSet(train_data, train_ls)
    vali_data = np.load(config_h.vali_d_npy)
    vali_ls = np.load(config_h.vali_l_npy)
    mfcc_vali = data_set.DataSet(vali_data, vali_ls)
    test_data = np.load(config_h.test_d_npy)
    test_ls = np.load(config_h.test_l_npy)
    mfcc_test = data_set.DataSet(test_data, test_ls)

    x = tf.placeholder(tf.float32, [None, config_h.mfcc_n, 39])
    y_ = tf.placeholder(tf.float32, [None, n_classes])
    k_prob = tf.placeholder(tf.float32)
    y_conv = deep_nn(x, k_prob)
    learning_rate_ph = tf.placeholder(tf.float32)

    with tf.name_scope('loss'):
        # tmp1 = y_ * tf.log(y_conv)
        # print('tmp1 shape', tmp1.shape)
        # tmp2 = tmp1 * loss_weights
        # print('tmp2 shape', tmp2.shape)
        # cross_entroys = -tf.reduce_mean(y_ * tf.log(y_conv) * loss_weights, reduction_indices=[1])
        weights = tf.reduce_sum(loss_weights * y_, axis=1)
        unweighted_losses = tf.nn.softmax_cross_entropy_with_logits(
            labels=y_, logits=y_conv)
        weight_losses = unweighted_losses * weights
    loss = tf.reduce_mean(weight_losses)

    with tf.name_scope('adadelta_optimizer'):
        train_step = tf.train.AdadeltaOptimizer(learning_rate_ph).minimize(
            loss)

    with tf.name_scope('accuracy'):
        correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
        correct_prediction = tf.cast(correct_prediction, tf.float32)
    accuracy = tf.reduce_mean(correct_prediction)

    # graph_location = tempfile.mkdtemp()
    # print('Saving graph to: %s' % graph_location)
    # train_writer = tf.summary.FileWriter(graph_location)
    # train_writer.add_graph(tf.get_default_graph())

    saver = tf.train.Saver()

    with tf.Session() as sess:
        if config_h.is_train:
            start_i = 0
            end_i = reduce((lambda _a, _b: _a + _b), loop_epoch_nums, 0)
            if config_h.is_restore:
                saver.restore(sess, restore_file)
                start_i = restart_epoch_i
            else:
                init = tf.global_variables_initializer()
                sess.run(init)
            for i in range(start_i, end_i):
                if i % log_epoch_interval == 0:
                    train_acc, train_loss = acc_loss_epoch(
                        x, y_, k_prob, accuracy, loss, mfcc_train, sess)
                    vali_acc, vali_loss = acc_loss_epoch(
                        x, y_, k_prob, accuracy, loss, mfcc_vali, sess)
                    print(
                        'epoch %d , train_acc %g , train_loss %g , vali_acc %g, vali_loss %g'
                        % (i, train_acc, train_loss, vali_acc, vali_loss))
                if i % persist_checkpoint_interval == 0 and i >= persist_checkpoint_interval:
                    saver.save(sess, persist_checkpoint_file + str(i))
                # train_step = get_train_step(train_steps, loop_epoch_nums, i)
                lr = get_lr(learning_rates, loop_epoch_nums, i)
                # print('learning rate', lr)
                train_epoch(x, y_, k_prob, train_step, learning_rate_ph, lr,
                            mfcc_train, sess)
        else:
            saver.restore(sess, restore_file)
        test_acc, test_loss = acc_loss_epoch(x, y_, k_prob, accuracy, loss,
                                             mfcc_test, sess)
        print('test_acc %g , test_loss %g' % (test_acc, test_loss))
        save_result(x, y_, k_prob, y_conv, mfcc_test, sess)
示例#6
0
    mask_steps = ([2, 10], [2, 5], [2, 2], [2, 1])
    state_n = 8

    # Check to see if the model data exists
    if os.path.isfile(rnet_id + '/model.dat'):
        rn_model = regnet_model.RegNetModel.load(rnet_id)
    else:
        rn_model = regnet_model.RegNetModel(rnet_id, net_prop, action_n,
                                            state_n, feature_n)

    # Check to see if the training data exists
    file_name = rnet_id + '/data'
    if os.path.isfile(file_name + '.h5'):
        train_data = data_set.DataSet.load(file_name)
    else:
        train_data = data_set.DataSet(file_name, mask_steps, action_n,
                                      feature_n, instance_count, buffer_size)

    # If the model has not yet been trained, do so now
    if rn_model.trained == False:
        rn_model.run_training(train_data, max_steps=85000, restore=False)
        rn_model.save()

    if seed_model:
        # Seed the training data: match performance of linear reg. model
        error_count = 1
        while error_count > 0:
            error_count = learn_bbox(rn_model,
                                     train_data,
                                     update_inc=update_n,
                                     seed_data=True)
            print("train data stats:")
示例#7
0
    for i in range(k):
        im_average[:, i] /= im_count[i]

        plt.subplot(2, 5, i + 1)
        plt.title(kmeans.labels[i])
        plt.imshow(im_average[:, i].reshape(
            (kmeans.data.size_row, kmeans.data.size_col)),
                   cmap='Greys',
                   interpolation='None')

        frame = plt.gca()
        frame.axes.get_xaxis().set_visible(False)
        frame.axes.get_yaxis().set_visible(False)

    plt.show()


if __name__ == '__main__':
    data = data_set.DataSet()
    kmeans = KMeans(data, 10)
    kmeans.initialCluster()
    plotImages(kmeans)

    kmeans.run()
    print('After')
    print('energy histoty:')
    print(kmeans.getEnergyHistory())
    print('\naccuracy history: ')
    print(kmeans.getAccuracyHistory())
    plotImages(kmeans)