def eval_entropy(model, sess, dataset_name):
    entropy = 0.0
    batch_count, data_size = get_batch_count(dataset[dataset_name], args.batch_size)
    for batch_idx in range(0, batch_count):
        begin_idx = batch_idx * args.batch_size
        end_idx = min(begin_idx + args.batch_size, data_size)
        entropy = entropy + model.entropy.eval(session=sess, feed_dict={
            model.xs: dataset[dataset_name][begin_idx: end_idx],
        })
    return entropy / batch_count
def eval_mse(model, sess, dataset_name):
    mse = 0.0
    batch_count, data_size = get_batch_count(dataset[dataset_name], args.batch_size)
    for batch_idx in range(0, batch_count):
        begin_idx = batch_idx * args.batch_size
        end_idx = min(begin_idx + args.batch_size, data_size)
        mse = mse + model.mse_error.eval(session=sess, feed_dict={
            model.xs: dataset[dataset_name][begin_idx: end_idx],
            model.ys: dataset[dataset_name][begin_idx: end_idx],
            model.feed_previous: True,
        })
    return mse / batch_count
def eval_acc(model, sess, dataset_name):
    accuracy = 0.0
    batch_count, data_size = get_batch_count(dataset[dataset_name],
                                             args.batch_size)
    for batch_idx in range(0, batch_count):
        begin_idx = batch_idx * args.batch_size
        end_idx = min(begin_idx + args.batch_size, data_size)
        accuracy = accuracy + model.accuracy.eval(
            session=sess,
            feed_dict={
                model.xs: dataset[dataset_name][begin_idx:end_idx],
                model.ys: dataset[dataset_name][begin_idx:end_idx],
                model.feed_previous: True,
            })
    return accuracy / batch_count
def eval_metric(model, sess, dataset_name):
    acc = 0.0
    entropy = 0.0
    batch_count, data_size = get_batch_count(dataset[dataset_name + '_feature'], args.batch_size)
    for batch_idx in range(0, batch_count):
        begin_idx = batch_idx * args.batch_size
        end_idx = min(begin_idx + args.batch_size, data_size)
        _acc, _entropy = sess.run([model.accuracy, model.entropy], feed_dict={
            model.xs: dataset[dataset_name + '_feature'][begin_idx: end_idx],
            model.ys: dataset[dataset_name + '_label'][begin_idx: end_idx],
        })
        acc = acc + _acc
        entropy = entropy + _entropy
        # acc = acc + model.accuracy.eval(session=sess, feed_dict={
        #     model.xs: dataset[dataset_name + '_feature'][begin_idx: end_idx],
        #     model.ys: dataset[dataset_name + '_label'][begin_idx: end_idx],
        # })
    return acc / batch_count, entropy / batch_count
    if args.src:
        importSaver = tf.train.Saver()
        importSaver.restore(sess, args.src)
    else:
        # initize variable
        sess.run(tf.global_variables_initializer())

    if args.dest:
        exportSaver = tf.train.Saver()
        prepare_directory(os.path.dirname(args.dest))

    filename = args.log or os.path.join(
        prepare_directory(os.path.join('../build/plots', args.scope,
                                       args.name)), 'log.csv')
    min_validate_mse = 999999
    batch_count, data_size = get_batch_count(dataset['train'], args.batch_size)
    with open(filename, 'w') as fd_log:
        start_time = time.time()

        # before training
        validate_mse = visualize_dataset(model, sess, 0, 'validate')
        anomalous_mse = visualize_dataset(model, sess, 0, 'anomalous')
        print(
            'Epoch\t%d, Batch\t%d, Elapsed time\t%.1fs, Validate MSE\t%s, Anomalous MSE\t%s, Min Validate MSE\t%s'
            % (0, 0, 0, validate_mse, anomalous_mse, min_validate_mse))

        learning_rates_schedules = np.reshape(args.learning_rates, (-1, 3))
        for schedule in learning_rates_schedules:
            learning_rate = schedule[2]

            # loop epochs
示例#6
0
            # config=tf.ConfigProto(intra_op_parallelism_threads=N_THREADS)
            # config=tf.ConfigProto(intra_op_parallelism_threads=16)
        )

        # prepare model import or export
        # importSaver = tf.train.Saver()
        # importSaver.restore(sess, args.src)
        importSaver = tf.train.import_meta_graph(
            os.path.join(args.src, '../model.meta'))
        importSaver.restore(sess, args.src)
        # graph = tf.get_default_graph()
        # restored_prediction = graph.get_operation_by_name('restored_prediction').outputs[0]
        print('Load Model \t%.5fs' % (time.time() - start_time, ))

        for context in ['sampled', 'ordered']:
            batch_count, data_size = get_batch_count(dataset[context],
                                                     args.batch_size)
            mses = np.array([])
            for batch_idx in range(0, batch_count, args.batch_step):
                start_time = time.time()

                if context == 'ordered' and batch_idx % 50 == 0:
                    print('{0} / {1} batches'.format(batch_idx, batch_count))
                begin_idx = batch_idx * args.batch_size
                end_idx = min(begin_idx + args.batch_size, data_size)
                ground_truth = dataset[context][begin_idx:end_idx]
                # restored_predictions = model.restored_prediction.eval(
                #     session=sess,
                #     feed_dict={
                #         model.xs: ground_truth,
                #         model.ys: ground_truth,
                #         model.feed_previous: True,
示例#7
0
if __name__ == '__main__':
    read_dataset()
    model = Model(args.step_size, args.hidden_size, args.embedding_size,
                  args.symbol_size, args.layer_depth, args.batch_size,
                  args.dropout_rate)

    # start session
    sess = tf.InteractiveSession(
        # config=tf.ConfigProto(intra_op_parallelism_threads=N_THREADS)
    )

    # prepare model import or export
    importSaver = tf.train.Saver()
    importSaver.restore(sess, args.src)

    batch_count, data_size = get_batch_count(dataset['ordered'],
                                             args.batch_size)
    plot_xs = []
    plot_ys = []

    start_time = time.time()
    for batch_idx in range(0, batch_count, args.batch_step):
        begin_idx = batch_idx * args.batch_size
        end_idx = min(begin_idx + args.batch_size, data_size)
        ground_truth = dataset['ordered'][begin_idx:end_idx]
        predictions = model.prediction.eval(session=sess,
                                            feed_dict={
                                                model.xs: ground_truth,
                                                model.ys: ground_truth,
                                                model.feed_previous: True,
                                            })
        errors = np.equal(predictions, ground_truth)