Пример #1
0
def eval_imagenet_q(net_name, param_pickle_path):
    netparams = load.load_netparams_tf_q(param_pickle_path)
    data_spec = networks.get_data_spec(net_name)
    input_node = tf.placeholder(tf.float32,
                                shape=(None, data_spec.crop_size,
                                       data_spec.crop_size,
                                       data_spec.channels))
    label_node = tf.placeholder(tf.int32)
    if net_name == 'alexnet':
        logits_ = networks.alexnet(input_node, netparams)
    elif net_name == 'googlenet':
        logits_ = networks.googlenet(input_node, netparams)
    elif net_name == 'nin':
        logits_ = networks.nin(input_node, netparams)
    elif net_name == 'resnet18':
        logits_ = networks.resnet18(input_node, netparams)
    elif net_name == 'resnet50':
        logits_ = networks.resnet50(input_node, netparams)
    elif net_name == 'squeezenet':
        logits_ = networks.squeezenet(input_node, netparams)
    elif net_name == 'vgg16net':
        logits_ = networks.vgg16net_noisy(input_node, netparams)
    loss_op = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=logits_,
                                                labels=label_node))  #
    probs = softmax(logits_)
    top_k_op = tf.nn.in_top_k(probs, label_node, 5)
    optimizer = tf.train.AdamOptimizer(learning_rate=0.001, epsilon=0.1)
    correct_pred = tf.equal(tf.argmax(probs, 1), tf.argmax(label_node, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
    count = 0
    correct = 0
    cur_accuracy = 0
    saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        image_producer = dataset.ImageNetProducer(val_path=IMAGE_LABLE,
                                                  data_path=IMAGE_PATH,
                                                  data_spec=data_spec)
        total = len(image_producer)
        coordinator = tf.train.Coordinator()
        threads = image_producer.start(session=sess, coordinator=coordinator)
        for (labels, images) in image_producer.batches(sess):
            correct += np.sum(
                sess.run(top_k_op,
                         feed_dict={
                             input_node: images,
                             label_node: labels
                         }))
            count += len(labels)
            cur_accuracy = float(correct) * 100 / count
            #print('{:>6}/{:<6} {:>6.2f}%'.format(count, total, cur_accuracy))
        print(cur_accuracy)
        coordinator.request_stop()
        coordinator.join(threads, stop_grace_period_secs=2)
    return cur_accuracy
def get_predict_labels():
    inputs = tf.placeholder("float", [None, 64, 64, 1])
    is_training = tf.placeholder("bool")
    prediction, _ = googlenet(inputs, is_training)
    predict_labels = tf.argmax(prediction, 1)
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    saver = tf.train.Saver()
    data = sio.loadmat("../data/dataset.mat")
    testdata = data["test"] / 127.5 - 1.0
    testlabel = data["testlabels"]
    saver.restore(sess, "../save_para/.\\model.ckpt")
    nums_test = testlabel.shape[1]
    PREDICT_LABELS = np.zeros([nums_test])
    for i in range(nums_test // BATCH_SIZE):
        PREDICT_LABELS[i * BATCH_SIZE:i * BATCH_SIZE + BATCH_SIZE] = sess.run(predict_labels, feed_dict={inputs: testdata[i * BATCH_SIZE:i * BATCH_SIZE + BATCH_SIZE], is_training: False})
    PREDICT_LABELS[(nums_test // BATCH_SIZE - 1) * BATCH_SIZE + BATCH_SIZE:] = sess.run(predict_labels, feed_dict={inputs: testdata[(nums_test // BATCH_SIZE - 1) * BATCH_SIZE + BATCH_SIZE:], is_training: False})
    np.savetxt("../data/predict_labels.txt", PREDICT_LABELS)
Пример #3
0
def get_feature():
    inputs = tf.placeholder("float", [None, 64, 64, 1])
    is_training = tf.placeholder("bool")
    _, feature = googlenet(inputs, is_training)
    feature = tf.squeeze(feature, [1, 2])
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    saver = tf.train.Saver()
    data = sio.loadmat("../data/dataset.mat")
    testdata = data["test"] / 127.5 - 1.0
    testlabels = data["testlabels"]
    saver.restore(sess, "../save_para/.\\model.ckpt")
    nums_test = testdata.shape[0]
    FEATURE = np.zeros([nums_test, 1024])
    for i in range(nums_test // BATCH_SIZE):
        FEATURE[i * BATCH_SIZE:i * BATCH_SIZE + BATCH_SIZE] = sess.run(feature, feed_dict={inputs: testdata[i * BATCH_SIZE:i * BATCH_SIZE + BATCH_SIZE], is_training: False})
    FEATURE[(nums_test // BATCH_SIZE - 1) * BATCH_SIZE + BATCH_SIZE:] = sess.run(feature, feed_dict={inputs: testdata[(nums_test // BATCH_SIZE - 1) * BATCH_SIZE + BATCH_SIZE:], is_training: False})
    sio.savemat("../data/feature.mat", {"feature": FEATURE, "testlabels": testlabels})