Exemplo n.º 1
0
 def detect(array, gesture_id):
     peaks = list()
     count = 0
     start = 0
     for i, v in enumerate(array):
         if v < CONFIG.get_threshold(gesture_id):
             print("peak found")
             start = start + count
             count += 1
         if count == CONFIG.get_size_dim(gesture_id) - 1:
             peaks.append(Gesture(start, count, array[start:count]))
             count = 0
             start = 0
     return peaks
Exemplo n.º 2
0
 def get_results(series, gesture_id):
     # todo setup a config for current_end ( from the trained model)
     current_end = 0
     window_len = CONFIG.get_size_dim(gesture_id)
     window = np.full((1, window_len), np.nan)
     results = list()
     while current_end <= len(series) - 1:
         window, current_end, current_start = Launcher.update_window(
             series, window, current_end)
         results.append(window)
     return results
Exemplo n.º 3
0
    def evaluate_model(gesture_id):

        outer_list = list()
        for data_set in enumerate(CONFIG.UNI_GE_DATA_SETS):
            inner_list = list()

            for i in range(CONFIG.evaluation_runs):
                train_set, test_set, validation = FilesUtil.split_data_set(
                    CONFIG.get_path(data_set[1]), CONFIG.train_space)
                x_new, y_pre = evaluate_rnn_model(
                    FilesUtil.get_random_file(test_set), str(gesture_id),
                    CONFIG.get_neurons_dim(gesture_id))
                mse_x = get_mse(x_new[0][1:, 0], y_pre[0][:-1, 0])
                mse_y = get_mse(x_new[0][1:, 1], y_pre[0][:-1, 1])
                mse_z = get_mse(x_new[0][1:, 2], y_pre[0][:-1, 2])
                inner_list.append(
                    np.sqrt(mse_x * mse_x + mse_y * mse_y + mse_z * mse_z))
                # main_index = data_set[1]
                # result_dictionary[main_index][i] =
            outer_list.append(inner_list)

        return DataFrame(outer_list, index=[1, 2, 5, 6, 7, 8])
Exemplo n.º 4
0
    def online_test(path, gesture_id):

        results_folder_name = os.path.basename(os.path.splitext(path)[0])
        full_path = 'online_results/' + results_folder_name

        if not os.path.exists(full_path):
            os.makedirs(full_path)

        data_set = FilesUtil.generate_data_set_from_file(path)
        start = 0
        end = 149
        errors = list()
        values = list()
        predictions = list()
        for i in range(len(data_set)):

            if end < len(data_set):
                current = data_set[start:end]
                x_new, y_pre = evaluate_rnn_model(
                    current.values, str(gesture_id),
                    CONFIG.get_neurons_dim(gesture_id))
                mse_x = get_mse(x_new[0][1:, 0], y_pre[0][:-1, 0])
                mse_y = get_mse(x_new[0][1:, 1], y_pre[0][:-1, 1])
                mse_z = get_mse(x_new[0][1:, 2], y_pre[0][:-1, 2])
                errors.append(
                    np.sqrt(mse_x * mse_x + mse_y * mse_y + mse_z * mse_z))
                values.append(x_new)
                predictions.append(y_pre)
                current, start, end = FilesUtil.get_next_window(current,
                                                                dimension=150,
                                                                start=start,
                                                                end=end)
                print(
                    "done " + str(i) + "  of " + len(data_set).__str__() +
                    " error " +
                    str(np.sqrt(mse_x * mse_x + mse_y * mse_y +
                                mse_z * mse_z)) + " gesture " +
                    str(gesture_id))

        FilesUtil.save_results_to_file(
            errors, os.path.basename(os.path.splitext(path)[0]), gesture_id)
        return errors, values, predictions
Exemplo n.º 5
0
def evaluate_rnn_model(single_series, gesture_id, neurons_dim):
    tf.reset_default_graph()
    # todo put that parameter in configuration class
    # cut = CONFIG.max_steps
    x = tf.placeholder(tf.float32, [None, None, CONFIG.input_dim])
    # seq_length = tf.placeholder(tf.int32, [None])
    cell = tf.contrib.rnn.OutputProjectionWrapper(
        tf.contrib.rnn.LSTMCell(
            num_units=neurons_dim,
            activation=CONFIG.get_activation_function(gesture_id)),
        output_size=CONFIG.output_dim)
    outputs, states = tf.nn.dynamic_rnn(cell, x, dtype=tf.float32)

    saver = tf.train.Saver()
    with tf.Session() as sess:
        # x_new = FilesUtil.cut_and_reshape(single_series, cut)
        x_new = FilesUtil.reshape(single_series)
        model_path = os.path.join(os.getcwd(), 'model', 'model' + gesture_id,
                                  'rnn_' + gesture_id + '.ckpt')
        saver.restore(sess, model_path)
        y_pre = sess.run(outputs, feed_dict={x: x_new})
    return x_new, y_pre
Exemplo n.º 6
0
 def detect_old(array, gesture_id):
     starting_point = CONFIG.get_size_dim(gesture_id) - 1
     for i, v in enumerate(array[starting_point:]):
         if all(j < CONFIG.get_threshold(gesture_id) for j in v):
             print("i maybe found a gesture from index " + i.__str__() +
                   "to index " + (i + 150).__str__())
Exemplo n.º 7
0
 def test():
     for i in range(CONFIG.evaluation_runs):
         train_set, test_set, valid = FilesUtil.split_data_set(
             CONFIG.get_path(1), CONFIG.train_space)
         x_new, y_pre = evaluate_rnn_model(
             FilesUtil.get_random_file(test_set), str(1),
             CONFIG.get_neurons_dim(1))
         mse_x = get_mse(x_new[0][1:, 0], y_pre[0][:-1, 0])
         mse_y = get_mse(x_new[0][1:, 1], y_pre[0][:-1, 1])
         mse_z = get_mse(x_new[0][1:, 2], y_pre[0][:-1, 2])
         print(np.sqrt(mse_x * mse_x + mse_y * mse_y + mse_z * mse_z))
     print("----------------------------------------------------")
     for i in range(CONFIG.evaluation_runs):
         train_set, test_set, valid = FilesUtil.split_data_set(
             CONFIG.get_path(2), CONFIG.train_space)
         x_new, y_pre = evaluate_rnn_model(
             FilesUtil.get_random_file(test_set), str(1),
             CONFIG.get_neurons_dim(1))
         mse_x = get_mse(x_new[0][1:, 0], y_pre[0][:-1, 0])
         mse_y = get_mse(x_new[0][1:, 1], y_pre[0][:-1, 1])
         mse_z = get_mse(x_new[0][1:, 2], y_pre[0][:-1, 2])
         print(np.sqrt(mse_x * mse_x + mse_y * mse_y + mse_z * mse_z))
     print("----------------------------------------------------")
     for i in range(CONFIG.evaluation_runs):
         train_set, test_set, valid = FilesUtil.split_data_set(
             CONFIG.get_path(5), CONFIG.train_space)
         x_new, y_pre = evaluate_rnn_model(
             FilesUtil.get_random_file(test_set), str(1),
             CONFIG.get_neurons_dim(1))
         mse_x = get_mse(x_new[0][1:, 0], y_pre[0][:-1, 0])
         mse_y = get_mse(x_new[0][1:, 1], y_pre[0][:-1, 1])
         mse_z = get_mse(x_new[0][1:, 2], y_pre[0][:-1, 2])
         print(np.sqrt(mse_x * mse_x + mse_y * mse_y + mse_z * mse_z))
     print("----------------------------------------------------")
     for i in range(CONFIG.evaluation_runs):
         train_set, test_set, valid = FilesUtil.split_data_set(
             CONFIG.get_path(6), CONFIG.train_space)
         x_new, y_pre = evaluate_rnn_model(
             FilesUtil.get_random_file(test_set), str(1),
             CONFIG.get_neurons_dim(1))
         mse_x = get_mse(x_new[0][1:, 0], y_pre[0][:-1, 0])
         mse_y = get_mse(x_new[0][1:, 1], y_pre[0][:-1, 1])
         mse_z = get_mse(x_new[0][1:, 2], y_pre[0][:-1, 2])
         print(np.sqrt(mse_x * mse_x + mse_y * mse_y + mse_z * mse_z))
     print("----------------------------------------------------")
     for i in range(CONFIG.evaluation_runs):
         train_set, test_set, valid = FilesUtil.split_data_set(
             CONFIG.get_path(7), CONFIG.train_space)
         x_new, y_pre = evaluate_rnn_model(
             FilesUtil.get_random_file(test_set), str(1),
             CONFIG.get_neurons_dim(1))
         mse_x = get_mse(x_new[0][1:, 0], y_pre[0][:-1, 0])
         mse_y = get_mse(x_new[0][1:, 1], y_pre[0][:-1, 1])
         mse_z = get_mse(x_new[0][1:, 2], y_pre[0][:-1, 2])
         print(np.sqrt(mse_x * mse_x + mse_y * mse_y + mse_z * mse_z))
     print("----------------------------------------------------")
     for i in range(CONFIG.evaluation_runs):
         train_set, test_set, valid = FilesUtil.split_data_set(
             CONFIG.get_path(8), CONFIG.train_space)
         x_new, y_pre = evaluate_rnn_model(
             FilesUtil.get_random_file(test_set), str(1),
             CONFIG.get_neurons_dim(1))
         mse_x = get_mse(x_new[0][1:, 0], y_pre[0][:-1, 0])
         mse_y = get_mse(x_new[0][1:, 1], y_pre[0][:-1, 1])
         mse_z = get_mse(x_new[0][1:, 2], y_pre[0][:-1, 2])
         print(np.sqrt(mse_x * mse_x + mse_y * mse_y + mse_z * mse_z))
Exemplo n.º 8
0
def create_rnn_model(train_data_sets, validation_data_sets, gesture_id,
                     batch_size, neurons_dim, n_epochs):
    tf.reset_default_graph()
    x = tf.placeholder(tf.float32, [None, None, CONFIG.input_dim])
    y = tf.placeholder(tf.float32, [None, None, CONFIG.output_dim])
    seq_length = tf.placeholder(tf.int32, [None])

    cell = tf.contrib.rnn.OutputProjectionWrapper(
        tf.contrib.rnn.LSTMCell(
            num_units=neurons_dim,
            activation=CONFIG.get_activation_function(gesture_id)),
        output_size=CONFIG.output_dim)
    outputs, states = tf.nn.dynamic_rnn(cell,
                                        x,
                                        dtype=tf.float32,
                                        sequence_length=seq_length)

    loss = tf.reduce_mean(tf.square(outputs - y))  # MSE
    optimizer = tf.train.AdamOptimizer(learning_rate=CONFIG.learning_rate)
    training_op = optimizer.minimize(loss)

    init = tf.global_variables_initializer()

    saver = tf.train.Saver()

    with tf.Session() as sess:
        init.run()
        sess = loop_epochs(x=x,
                           y=y,
                           n_epochs=n_epochs,
                           training_op=training_op,
                           loss=loss,
                           batch_size=batch_size,
                           train_data_sets=train_data_sets,
                           validation_data_sets=validation_data_sets,
                           seq_length=seq_length,
                           gesture_id=gesture_id,
                           sess=sess)
        # validation_check = 0
        # for epoch in range(n_epochs):
        #     for iteration in range(CONFIG.train_space // batch_size):
        #         bx, by, s = FilesUtil.feed_next_batch(train_data_sets, batch_size, CONFIG.max_steps)
        #         vx, vy, s = FilesUtil.feed_next_batch(validation_data_sets, batch_size, CONFIG.max_steps)
        #         sess.run(training_op, feed_dict={x: np.asarray(bx), y: np.asarray(by), seq_length: np.asarray(s)})
        #         mse_train = loss.eval(feed_dict={x: np.asarray(bx), y: np.asarray(by), seq_length: np.asarray(s)})
        #         mse_val = loss.eval(feed_dict={x: np.asarray(vx), y: np.asarray(vy), seq_length: np.asarray(s)})
        #         perc_diff = FilesUtil.get_percent_diff(mse_train, mse_val)
        #         print("EPOCH=", epoch, "MSE_train", mse_train, "MSE_validate", mse_val, "Percentage Diff",
        #               perc_diff, "Gesture ", gesture_id)
        #         if mse_train < 1.0 and mse_val < 1.0 and perc_diff > 10.0:
        #             validation_check += 1
        #         elif validation_check == 3:
        #             print("exiting....")
        #             break
        #     if validation_check == 3:
        #         print("exiting..")
        #         break
        #         # print("EPOCH=", epoch, "MSE_validate", mse)
        #     if epoch % 100 == 0:
        #         print("epoch", epoch, "completed")
        model_path = os.path.join(os.getcwd(), 'model', 'model' + gesture_id,
                                  'rnn_' + gesture_id + '.ckpt')
        saver.save(sess, model_path)