viz.text(json.dumps(obj=vars(args), sort_keys=True, indent=4))

# build network
if args.dataset == 'sinusoid':
    net_init, f = mlp(n_output=1,
                      n_hidden_layer=args.n_hidden_layer,
                      n_hidden_unit=args.n_hidden_unit,
                      bias_coef=args.bias_coef,
                      activation=args.activation,
                      norm=args.norm)
    _, params = net_init(rng=random.PRNGKey(42), input_shape=(-1, 1))

elif args.dataset == 'omniglot':
    net_init, f = conv_net(n_output=args.n_way,
                           n_conv_layer=args.n_hidden_layer,
                           n_filter=args.n_hidden_unit,
                           bias_coef=args.bias_coef,
                           activation='relu',
                           norm='None')
    _, params = net_init(rng=random.PRNGKey(42), input_shape=(-1, 28, 28, 1))

elif args.dataset == 'circle':
    net_init, f = mlp(n_output=args.n_way,
                      n_hidden_layer=args.n_hidden_layer,
                      n_hidden_unit=args.n_hidden_unit,
                      bias_coef=args.bias_coef,
                      activation=args.activation,
                      norm=args.norm)
    _, params = net_init(rng=random.PRNGKey(42), input_shape=(-1, 2))

else:
    raise ValueError

def test_model():
    global image_number
    correct = 0
    while image_number > 0:
        batch_x, batch_y = sess.run([images, labels])
        batch_x = np.reshape(batch_x, [network.batch_size, network.input_size])
        acc = sess.run([correct_pred], feed_dict={network.X: batch_x, network.Y: batch_y, keep_prob: 1})
        image_number = image_number - network.batch_size
        correct = correct + numpy.sum(acc)
        print("Predicted %d out of %d; partial accuracy %.4f" % (correct, total_images - image_number, correct / (total_images - image_number)))
    print(correct/total_images)


logits = network.conv_net(network.X, network.weights, network.biases, keep_prob)
prediction = tf.nn.softmax(logits)

loss_op = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,
                                                                        labels=network.Y))
optimizer = tf.train.AdamOptimizer(learning_rate=network.learning_rate)
train_op = optimizer.minimize(loss=loss_op)

correct_pred = tf.equal(tf.argmax(prediction, 1), network.Y)
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

init = tf.global_variables_initializer()


saver = tf.train.Saver()
示例#3
0
from params import *
from network import conv_net, weights, biases
from loadTable import convTable, poolTable
from loadData import *

np.set_printoptions(threshold=np.nan)
os.environ["CUDA_VISIBLE_DEVICES"] = "3"

TRAIN_MODE = False

X = tf.placeholder(tf.float32, [None, 1, inWidth, inChannel])
Y = tf.placeholder(tf.float32, [None, num_classes])


logits = conv_net(X, weights, biases, convTable, poolTable, div)
prediction = tf.nn.softmax(logits)

loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=Y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss_op)

# Evaluate model
correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()

start_time = time.time()
saver = tf.train.Saver()
示例#4
0
def example():
    # get face count of the group
    face_count = get_face_count(group_similarity_src)

    # select from database
    print("\n\n\n############ Training Session ############")

    datas = []
    for cnn_build_time_range in cnn_build_time_range_set:
        start_time = cnn_build_time_range[0]
        end_time = cnn_build_time_range[1]
        this_datas = get_data(start_time, end_time)
        datas = datas + this_datas
    results.clear()

    # print import data result for banchmark
    print("\n\n\n------ Original Result ------")
    find_rate(datas, 'person_first_id')

    # build automaton
    root_c = build_model(datas)

    # apply decision model
    matched_face_table, unmatched_face_table, unmatched_validate_table, unmatched_face_objects = find_match(
        root_c.next, face_count)

    # convert data to vector for neuron network
    unmatched_face_table, unmatched_validate_table, unmatched_face_objects = convert_to_vector(
        matched_face_table, unmatched_face_table, unmatched_validate_table,
        unmatched_face_objects)

    # separate training and test data
    division = int(unmatched_face_table.shape[0] * 0.7)
    X = unmatched_face_table[:division]
    y = unmatched_validate_table[:division]
    X_test = unmatched_face_table[division:]
    y_test = unmatched_validate_table[division:]

    # build cnn, and save the model
    model = conv_net(X, y, X_test, y_test)

    # use the model to make prediction on the construct session
    print("\n\n\n------ Unmatched Face (Corrected by CNN) ------")
    cnn_result = make_prediction(model, unmatched_face_table,
                                 unmatched_face_objects)

    print("\n\n\n------ Overall Result (Corrected by CNN) ------")
    all_result = list(results) + list(cnn_result)
    find_rate(all_result, 'result_id')

    print(predict_time_range_set)
    for predict_time_range in predict_time_range_set:
        print(predict_time_range)

        # get data from different session for testing
        print("\n\n\n############ Indipendent Testing Session ############")
        start_time = predict_time_range[0]
        end_time = predict_time_range[1]
        datas = get_data(start_time, end_time)
        results.clear()

        # print import data result for banchmark
        print("\n\n\n------ Original Result ------")
        find_rate(datas, 'person_first_id')

        # build automaton
        root_c = build_model(datas)

        # apply decision model
        matched_face_table, unmatched_face_table, unmatched_validate_table, unmatched_face_objects = find_match(
            root_c.next, face_count)

        # convert data to vector for neuron network
        unmatched_face_table, unmatched_validate_table, unmatched_face_objects = convert_to_vector(
            matched_face_table, unmatched_face_table, unmatched_validate_table,
            unmatched_face_objects)

        # evaluate the model using data of the indipendent test session
        score = model.evaluate(unmatched_face_table, unmatched_validate_table)
        print("\nCross Session Accuracy: ", score[-1])

        # use the model to make prediction on the indipendent test session
        print("\n\n\n------ Unmatched Face ------")
        find_rate(unmatched_face_objects, 'person_first_id')

        print("\n\n\n------ Unmatched Face (Corrected by CNN) ------")
        cnn_result = make_prediction(model, unmatched_face_table,
                                     unmatched_face_objects)

        print("\n\n\n------ Overall Result (Corrected by CNN) ------")
        all_result = list(results) + list(cnn_result)
        find_rate(all_result, 'result_id')