Exemple #1
0
def evaluate_graph(graph_file_name):
    with load_graph(graph_file_name).as_default() as graph:
        ground_truth_input = tf.placeholder(tf.float32, [None, 5],
                                            name='GroundTruthInput')

        image_buffer_input = graph.get_tensor_by_name('input:0')
        final_tensor = graph.get_tensor_by_name('final_result:0')
        accuracy, _ = retrain.add_evaluation_step(final_tensor,
                                                  ground_truth_input)

        logits = graph.get_tensor_by_name("final_training_ops/Wx_plus_b/add:0")
        xent = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits(labels=ground_truth_input,
                                                    logits=logits))

    image_dir = 'tf_files/flower_photos'
    testing_percentage = 10
    validation_percentage = 10
    validation_batch_size = 100
    category = 'testing'

    image_lists = retrain.create_image_lists(image_dir, testing_percentage,
                                             validation_percentage)
    class_count = len(image_lists.keys())

    ground_truths = []
    filenames = []

    for label_index, label_name in enumerate(image_lists.keys()):
        for image_index, image_name in enumerate(
                image_lists[label_name][category]):
            image_name = retrain.get_image_path(image_lists, label_name,
                                                image_index, image_dir,
                                                category)
            ground_truth = np.zeros([1, class_count], dtype=np.float32)
            ground_truth[0, label_index] = 1.0
            ground_truths.append(ground_truth)
            filenames.append(image_name)

    accuracies = []
    xents = []
    with tf.Session(graph=graph) as sess:
        for filename, ground_truth in zip(filenames, ground_truths):
            image = Image.open(filename).resize((224, 224), Image.ANTIALIAS)
            image = np.array(image, dtype=np.float32)[None, ...]
            image = (image - 128) / 128.0

            feed_dict = {
                image_buffer_input: image,
                ground_truth_input: ground_truth
            }

            eval_accuracy, eval_xent = sess.run([accuracy, xent], feed_dict)

            accuracies.append(eval_accuracy)
            xents.append(eval_xent)

    return np.mean(accuracies), np.mean(xents)
Exemple #2
0
def get_graph(path):

    clss = os.listdir(path)
    clss.sort()
    print(clss)
    graphs = []
    for cls in clss:
        graph_classifier = load_graph(os.path.join(path, cls))
        graphs.append(graph_classifier)

    return graphs
Exemple #3
0
        if args.labels:
            label_file = args.labels
        if args.input_height:
            input_height = args.input_height
        if args.input_width:
            input_width = args.input_width
        if args.input_mean:
            input_mean = args.input_mean
        if args.input_std:
            input_std = args.input_std
        if args.input_layer:
            input_layer = args.input_layer
        if args.output_layer:
            output_layer = args.output_layer

        graph = load_graph(model_file)
        t = read_tensor_from_image_file(file_name,
                                        input_height=input_height,
                                        input_width=input_width,
                                        input_mean=input_mean,
                                        input_std=input_std)

        input_name = "import/" + input_layer
        output_name = "import/" + output_layer
        input_operation = graph.get_operation_by_name(input_name)
        output_operation = graph.get_operation_by_name(output_name)

        with tf.Session(graph=graph) as sess:
            start = time.time()
            results = sess.run(output_operation.outputs[0],
                               {input_operation.outputs[0]: t})
Exemple #4
0
def extract():

    input_layer = "input"
    output_layer = "MobilenetV2/Predictions/Reshape_1"
    graph = load_graph('./frozen_pb/frozen_0-0-0.pb')
    #with graph.as_default() as g:
    #        image_buffer_input = g.get_tensor_by_name('input:0')
    #        final_tensor = g.get_tensor_by_name('MobilenetV2/Logits/AvgPool:0')
    input_operation = graph.get_operation_by_name(input_layer)
    output_operation = graph.get_operation_by_name(output_layer)

    #image_dir = '/home/xxxx/PHICOMM/ai-share/dataset/imagenet/raw-data/imagenet-data/validation'
    image_dir = "/home/deepl/PHICOMM/dataset/cifar10_tf/cifar-10/test"
    testing_percentage = 100
    validation_percentage = 0
    category = 'testing'

    image_lists = retrain.create_image_lists(image_dir, testing_percentage,
                                             validation_percentage)
    class_count = len(image_lists.keys())
    print(class_count)

    total_start = time.time()

    ground_truths = []
    filenames = []

    all_files_df = pd.DataFrame(
        columns=['image_name', 'ground_truth', "predecit_label"])

    for label_index, label_name in enumerate(image_lists.keys()):
        for image_index, image_name in enumerate(
                image_lists[label_name][category]):
            image_name = retrain.get_image_path(image_lists, label_name,
                                                image_index, image_dir,
                                                category)
            ground_truth = np.zeros([1, class_count], dtype=np.float32)
            ground_truth[0, label_index] = 1.0
            ground_truths.append(ground_truth)
            filenames.append(image_name)

            ground_truth_argmax = np.argmax(ground_truth, axis=1)
            ground_truth_argmax = np.squeeze(ground_truth_argmax)

            all_files_df = all_files_df.append(
                [{
                    'image_name': image_name,
                    'ground_truth': ground_truth_argmax
                }],
                ignore_index=True)

    #all_files_df.to_csv("ground_truth.csv")

    if os.path.exists("./data"):
        print("data is exist, please delete it!")
        exit()
        #shutil.rmtree("./data")
    #os.makedirs("./data")

    #sio.savemat('./data/truth.mat',{"truth": ground_truths})

    cf = 0.875
    predictions = []
    i = 0
    start = time.time()
    with tf.Session(graph=graph) as sess:
        read_tensor_from_jpg_image_file_op = read_tensor_from_jpg_image_file(
            input_height=224, input_width=224)
        for filename in filenames:
            t = sess.run(read_tensor_from_jpg_image_file_op,
                         feed_dict={"fnamejpg:0": filename})
            t = np.expand_dims(t, axis=0)
            #print(t.shape)
            #            feed_dict={image_buffer_input: t}
            #            ft = final_tensor.eval(feed_dict, sess)
            pre = sess.run(output_operation.outputs[0],
                           {input_operation.outputs[0]: t})
            predictions.append(pre)
            #i = i + 1
            #print(i)
    predictions = np.array(predictions)
    predictions = np.squeeze(predictions)
    ground_truths = np.array(ground_truths)
    ground_truths = np.squeeze(ground_truths)

    print(predictions.shape)
    print(ground_truths.shape)

    with tf.Session(graph=graph) as sess:
        ground_truth_input = tf.placeholder(tf.float32, [None, 10],
                                            name='GroundTruthInput')
        fts = tf.placeholder(tf.float32, [None, 10], name='fts')
        accuracy, _ = retrain.add_evaluation_step(fts, ground_truth_input)
        feed_dict = {fts: predictions, ground_truth_input: ground_truths}
        #accuracies.append(accuracy.eval(feed_dict, sess))
        ret = accuracy.eval(feed_dict, sess)

    for index, row in all_files_df.iterrows():
        row['predecit_label'] = np.squeeze(
            np.argmax(predictions[index, :], axis=0))

    all_files_df.to_csv("ground_truth.csv")

    print('Ensemble Accuracy: %g' % ret)

    stop = time.time()
    #print(str((stop-start)/len(ftg))+' seconds.')
    #sio.savemat('./data/feature.mat',{"feature": ftg})
    total_stop = time.time()
    print("total time is " + str((total_stop - total_start)) + ' seconds.')