Пример #1
0
 def infer_from_model(model: Model, image: np.ndarray) -> InferenceResult:
     image = np.expand_dims(image, axis=0)
     start = time.time()
     result = model.predict_on_batch(image)
     result = np.argmax(np.squeeze(result, axis=0), axis=-1)
     inference_time = time.time() - start
     result_colors = InferenceWrapper.map_color(class_map=result)
     return InferenceResult(
         result=result,
         result_colors=result_colors,
         inference_time=inference_time
     )
train_loader, test_loader = data_loader.run()
"""
Evaluate and check
"""
if evaluate:
    progress = tqdm.tqdm(test_loader, total=len(test_loader))
    inp = Input(shape=(2048, ), name="dense")
    dense_layer = Model(inp, motion_model_restored.layers[-1].layers[-1](inp))

    video_level_preds_np = np.zeros(
        (len(progress), num_actions))  # each video per 101 class (prediction)
    video_level_labels_np = np.zeros((len(progress), 1))

    for index, (video_frames, video_label
                ) in enumerate(progress):  # i don't need frame level labels
        feature_field, frame_preds = motion_model_with_2_outputs.predict_on_batch(
            video_frames)
        assert np.allclose(frame_preds, dense_layer.predict(feature_field))

        video_level_preds_np[index, :] = np.mean(frame_preds, axis=0)
        video_level_labels_np[index, 0] = video_label

    video_level_loss, video_level_accuracy_1, video_level_accuracy_5 = keras.backend.get_session(
    ).run(
        [val_loss_op, acc_top_1_op, acc_top_5_op],
        feed_dict={
            video_level_labels_k: video_level_labels_np,
            video_level_preds_k: video_level_preds_np
        })

    print("Motion Model validation", "prec@1", video_level_accuracy_1,
          "prec@5", video_level_accuracy_5, "loss", video_level_loss)
    save_model(model, 'keras-logos-gen-xception-ep%d.model' % EPOCHS)
else:
    model = load_model('keras-logos-gen-xception-ep%d.model' % EPOCHS)

all_predictions = []
with open(
        "keras-logos-gen-xception-ep%d-thresh%0.2f-predictions.txt" %
    (EPOCHS, THRESHOLD), 'w') as out:
    idx = 0
    for batch in test_dir_iterator:
        filenames = test_dir_iterator.filenames[idx:idx +
                                                test_dir_iterator.batch_size]
        if len(filenames) == 0:
            break
        idx += test_dir_iterator.batch_size
        predictions = model.predict_on_batch(batch[0])
        for i in range(len(predictions)):
            truth = test_class_map[np.argmax(batch[1][i])]
            conf = np.max(predictions[i])
            if conf >= THRESHOLD:
                predicted = train_class_map[np.argmax(predictions[i])]
            else:
                predicted = "no-logo"
            all_predictions.append(test_dir_iterator.class_indices[predicted])
            m = re.match(r".*/(\d+)\.jpg", filenames[i])
            fid = m.group(1)
            print("file = %s\ttruth = %s\tpredicted = %s\tconfidence = %f" %
                  (filenames[i], truth, predicted, conf))
            out.write("%s, %s, %f\n" % (fid, predicted, conf))

# generate confusion matrix