Пример #1
0
    def run_rnn_grucell2(self):
        train_input, train_output = self.generate_data()

        DLogger.logger().debug("test and training data loaded")

        data = tf.placeholder(Const.FLOAT, [None, 20, 3])  # Number of examples, number of input, dimension of each input
        target = tf.placeholder(Const.FLOAT, [None, 21])
        num_hidden = 24
        n_samples = 5

        cell = GRUCell2(num_hidden, n_samples)
        n_bacthes = len(train_input)

        dW1, db1, dW2, db2 = cell.get_weight_dims(3, num_hidden)
        W1, b1, W2, b2 = tf.get_variable(name='w1', shape=[n_samples, n_bacthes] + dW1, initializer=tf.constant_initializer(0.1),
                                         dtype=Const.FLOAT), \
                         tf.get_variable(name='b1', shape=[n_samples, n_bacthes] + db1, initializer=tf.constant_initializer(0.1),
                                         dtype=Const.FLOAT), \
                         tf.get_variable(name='w2', shape=[n_samples, n_bacthes] + dW2, initializer=tf.constant_initializer(0.1),
                                         dtype=Const.FLOAT), \
                         tf.get_variable(name='b2', shape=[n_samples, n_bacthes] + db2, initializer=tf.constant_initializer(0.1),
                                         dtype=Const.FLOAT)

        cell.set_weights(W1, b1, W2, b2)

        step_size = tf.ones([tf.shape(data)[0]], dtype=tf.int32) * tf.shape(data)[1]

        state_track, last_state = cell.dynamic_rnn(data, step_size)
        state_track = tf.transpose(state_track[0], [1, 0, 2])

        init_op = tf.global_variables_initializer()
        with tf.Session() as  sess:
            sess.run(init_op)
            entropy = sess.run(state_track, {data: train_input, target: train_output})
            DLogger.logger().debug('Entropy:' + str(entropy))
Пример #2
0
    def opt_model_train_test(cls, model_path):
        output = '../nongit/local/BD/opt/'
        with LogFile(output, 'run.log'):
            action, reward, state, ids, seq_lengths = BD.get_data()

            tf.reset_default_graph()

            with tf.device('/device:GPU:0'):
                model = HYPMMD(enc_cells=20,
                               dec_cells=3,
                               a_size=2,
                               s_size=0,
                               latent_size=2,
                               n_T=action.shape[1],
                               static_loops=False,
                               mmd_coef=50)

            ensure_dir(output)

            actions_train, actions_test, rewards_train, rewards_test, seq_train, seq_test, id_train, id_test = \
                stratified_train_test_split(action, reward, state, ids, seq_lengths)

            DLogger.logger().debug("test points: " +
                                   str(actions_test.shape[0]))
            DLogger.logger().debug("train points: " +
                                   str(actions_train.shape[0]))

            def lr_schedule(t):
                if t < 2000:
                    _lr = 0.001
                elif t < 5000:
                    _lr = 0.0001
                else:
                    _lr = 0.00001
                return _lr

            BD.opt_model_mddae(model,
                               actions_train,
                               rewards_train,
                               None,
                               seq_train,
                               actions_test,
                               rewards_test,
                               None,
                               seq_test,
                               output + '/model/',
                               model_path,
                               hessian_term=False,
                               lr_schedule=lr_schedule)
Пример #3
0
    def test_gru(self):
        DLogger.remove_handlers()
        self.test_output_buf = DLogger.get_string_logger()

        self.run_rnn_grucell2()
        self.test_output_buf.seek(0)
        output1 = self.test_output_buf.read()
        self.test_output_buf.seek(0)

        tf.reset_default_graph()
        self.run_rnn_grucell()
        self.test_output_buf.seek(0)
        output2 = self.test_output_buf.read()
        #
        self.assertEqual(output1, output2)
Пример #4
0
 def predict_z(cls, model_path):
     output = '../nongit/local/BD/opt/'
     with LogFile(output, 'run.log'):
         action, reward, state, ids, seq_lengths = BD.get_data()
         DLogger.logger().debug("data points: " + str(action.shape[0]))
         tf.reset_default_graph()
         ensure_dir(output)
         model = HYPMMD(enc_cells=20,
                        dec_cells=3,
                        a_size=2,
                        s_size=0,
                        latent_size=2,
                        n_T=action.shape[1],
                        static_loops=False)
         BD.predict(model, action, reward, state, ids, seq_lengths,
                    '../nongit/local/BD/', model_path)
Пример #5
0
    def get_data(cls):
        data = DataReader.read_synth_normal()
        # data = data.loc[data.diag == 'Healthy']
        ids = data['id'].unique().tolist()
        dftr = pd.DataFrame({'id': ids, 'train': 'train'})
        tdftr = pd.DataFrame({'id': ids, 'train': 'test'})
        train, test = DataProcess.train_test_between_subject(
            data, pd.concat((dftr, tdftr)), [1], 2)
        DLogger.logger().debug("total points: " + str(get_total_pionts(train)))
        train = DataProcess.merge_data(train)

        action = train['merged'][0]['action']
        reward = train['merged'][0]['reward']
        state = train['merged'][0]['state']
        ids = train['merged'][0]['id']
        seq_lengths = train['merged'][0]['seq_lengths']

        return action, reward, state, ids, seq_lengths
Пример #6
0
    def align_model(cls, model_path):
        output = '../nongit/local/BD/align/'
        with LogFile(output, 'run.log'):
            action, reward, state, ids, seq_lengths = BD.get_data()

            tf.reset_default_graph()

            with tf.device('/device:GPU:0'):
                model = HYPMMD(enc_cells=20,
                               dec_cells=3,
                               a_size=2,
                               s_size=0,
                               latent_size=2,
                               n_T=action.shape[1],
                               static_loops=True,
                               mmd_coef=2)

            ensure_dir(output)

            actions_train, actions_test, rewards_train, rewards_test, seq_train, seq_test, id_train, id_test = \
                stratified_train_test_split(action, reward, state, ids, seq_lengths)

            DLogger.logger().debug("test points: " +
                                   str(actions_test.shape[0]))
            DLogger.logger().debug("train points: " +
                                   str(actions_train.shape[0]))

            BD.opt_model_align(model,
                               actions_train,
                               rewards_train,
                               None,
                               seq_train,
                               actions_test,
                               rewards_test,
                               None,
                               seq_test,
                               output + '/model/',
                               model_path,
                               hessian_term=True,
                               _beta=0.5,
                               hessian_lr=0.0001,
                               _h=0.1)
Пример #7
0
    def run_rnn_grucell(self):
        train_input, train_output = self.generate_data()

        DLogger.logger().debug("test and training data loaded")

        data = tf.placeholder(Const.FLOAT,
                              [None, 20, 3])  # Number of examples, number of input, dimension of each input
        target = tf.placeholder(Const.FLOAT, [None, 21])
        num_hidden = 24

        cell = tf.nn.rnn_cell.GRUCell(num_hidden,
                                      kernel_initializer=tf.constant_initializer(0.1),
                                      bias_initializer=tf.constant_initializer(0.1))

        val, _ = tf.nn.dynamic_rnn(cell, data, dtype=Const.FLOAT)
        val = tf.transpose(val, [1, 0, 2])

        init_op = tf.global_variables_initializer()
        with tf.Session() as  sess:
            sess.run(init_op)
            entropy = sess.run(val, {data: train_input, target: train_output})
            DLogger.logger().debug('Entropy:' + str(entropy))

        sess.close()
Пример #8
0
import logging

try:
    # Capirca uses Google's abseil-py library, which uses a Google-specific
    # wrapper for logging. That wrapper will write a warning to sys.stderr if
    # the Google command-line flags library has not been initialized.
    #
    # https://github.com/abseil/abseil-py/blob/pypi-v0.7.1/absl/logging/__init__.py#L819-L825
    #
    # This is not right behavior for Python code that is invoked outside of a
    # Google-authored main program. Use knowledge of abseil-py to disable that
    # warning; ignore and continue if something goes wrong.
    import absl.logging

    # https://github.com/abseil/abseil-py/issues/99
    logging.root.removeHandler(absl.logging._absl_handler)
    # https://github.com/abseil/abseil-py/issues/102
    absl.logging._warn_preinit_stderr = False
except Exception:
    pass

from util.logger import DLogger

DLogger.set_up_logger()