コード例 #1
0
ファイル: eval.py プロジェクト: wishgale/garbage_classify
def test_single_h5(FLAGS, h5_weights_path):
    if not os.path.isfile(h5_weights_path):
        print('%s is not a h5 weights file path' % h5_weights_path)
        return
    optimizer = adam(lr=FLAGS.learning_rate, clipnorm=0.001)
    objective = 'categorical_crossentropy'
    metrics = ['accuracy']
    model = model_fn(FLAGS, objective, optimizer, metrics)
    load_weights(model, FLAGS.eval_weights_path)
    img_names, test_data, test_labels = load_test_data(FLAGS)
    predictions = model.predict(test_data, verbose=0)

    right_count = 0
    error_infos = []
    for index, pred in enumerate(predictions):
        pred_label = np.argmax(pred, axis=0)
        test_label = test_labels[index]
        if pred_label == test_label:
            right_count += 1
        else:
            error_infos.append('%s, %s, %s\n' % (img_names[index], test_label, pred_label))

    accuracy = right_count / len(img_names)
    print('accuracy: %s' % accuracy)
    result_file_name = os.path.join(os.path.dirname(h5_weights_path),
                                    '%s_accuracy.txt' % os.path.basename(h5_weights_path))
    with open(result_file_name, 'w') as f:
        f.write('# predict error files\n')
        f.write('####################################\n')
        f.write('file_name, true_label, pred_label\n')
        f.writelines(error_infos)
        f.write('####################################\n')
        f.write('accuracy: %s\n' % accuracy)
    print('end')
コード例 #2
0
def load_weights_save_pb(FLAGS):
    optimizer = adam(lr=FLAGS.learning_rate, clipnorm=0.001)
    objective = 'categorical_crossentropy'
    metrics = ['accuracy']
    model = model_fn(FLAGS, objective, optimizer, metrics)
    load_weights(model, FLAGS.freeze_weights_file_path)
    save_pb_model(FLAGS, model)
コード例 #3
0
def load_weights_save_pb(FLAGS):
    optimizer = Nadam(lr=FLAGS.learning_rate,
                      beta_1=0.9,
                      beta_2=0.999,
                      epsilon=1e-08,
                      schedule_decay=0.004)
    objective = 'categorical_crossentropy'
    metrics = ['accuracy']
    model = model_fn(FLAGS, objective, optimizer, metrics)
    load_weights(model, FLAGS.freeze_weights_file_path)
    save_pb_model(FLAGS, model)
コード例 #4
0
def test_single_h5(FLAGS, h5_weights_path):
    if not os.path.isfile(h5_weights_path):
        print('%s is not a h5 weights file path' % h5_weights_path)
        return
    optimizer = Nadam(lr=FLAGS.learning_rate,
                      beta_1=0.9,
                      beta_2=0.999,
                      epsilon=1e-08,
                      schedule_decay=0.004)
    objective = 'categorical_crossentropy'
    metrics = ['accuracy']
    model = model_fn(FLAGS, objective, optimizer, metrics)
    #model = model_fn_SE_ResNet50(FLAGS, objective, optimizer, metrics)
    load_weights(model, FLAGS.eval_weights_path)
    img_names, test_data, test_labels = load_test_data(FLAGS)
    predictions = model.predict(test_data, verbose=0)

    right_count = 0
    error_infos = []

    print("img len :")
    print(len(img_names))
    print("test_data len :")
    print(len(test_data))
    print("test_labels len :")
    print(len(test_labels))

    for index, pred in enumerate(predictions):
        pred_label = np.argmax(pred, axis=0) + 1
        test_label = test_labels[index]
        if pred_label == test_label:
            print("{},{},{}".format(img_names[index], test_label, pred_label))
            right_count += 1
        else:
            error_infos.append('%s, %s, %s\n' %
                               (img_names[index], test_label, pred_label))

    accuracy = right_count / len(img_names)
    print('accuracy: %s' % accuracy)
    result_file_name = os.path.join(
        os.path.dirname(h5_weights_path),
        '%s_accuracy.txt' % os.path.basename(h5_weights_path))
    with open(result_file_name, 'w') as f:
        f.write('# predict error files\n')
        f.write('####################################\n')
        f.write('file_name, true_label, pred_label\n')
        f.writelines(error_infos)
        f.write('####################################\n')
        f.write('accuracy: %s\n' % accuracy)
    print('end')
コード例 #5
0
def test_batch_h5(FLAGS):
    """
    test all the h5 weights files in the model_dir
    """
    optimizer = adam(lr=FLAGS.learning_rate, clipnorm=0.001)
    objective = 'categorical_crossentropy'
    metrics = ['accuracy']
    model = model_fn(FLAGS, objective, optimizer, metrics)
    img_names, test_data, test_labels = load_test_data(FLAGS)

    file_paths = mox.file.glob(os.path.join(FLAGS.eval_weights_path, '*.h5'))
    for h5_weights_path in file_paths:
        model = load_weights(model, h5_weights_path)
        predictions = model.predict(test_data, verbose=0)

        right_count = 0
        error_infos = []
        for index, pred in enumerate(predictions):
            pred_label = np.argmax(pred, axis=0)
            test_label = test_labels[index]
            if pred_label == test_label:
                right_count += 1
            else:
                error_infos.append('%s, %s, %s\n' % (img_names[index], test_label, pred_label))

        accuracy = float(right_count) / len(img_names)
        print('accuracy: %s' % accuracy)
        result_file_name = os.path.join(os.path.dirname(h5_weights_path),
                                        '%s_accuracy.txt' % os.path.basename(h5_weights_path))
        with mox.file.File(result_file_name, 'w') as f:
            f.write('# predict error files\n')
            f.write('####################################\n')
            f.write('file_name, true_label, pred_label\n')
            f.writelines(error_infos)
            f.write('####################################\n')
            f.write('accuracy: %s\n' % accuracy)
        print('accuracy result file saved as %s' % result_file_name)
    print('end')
コード例 #6
0
def test_single_h5(FLAGS, h5_weights_path):
    if not os.path.isfile(h5_weights_path):
        print('%s is not a h5 weights file path' % h5_weights_path)
        return
    optimizer = Nadam(lr=FLAGS.learning_rate,
                      beta_1=0.9,
                      beta_2=0.999,
                      epsilon=1e-08,
                      schedule_decay=0.004)
    objective = 'categorical_crossentropy'
    metrics = ['accuracy']
    model = model_fn(FLAGS, objective, optimizer, metrics)
    load_weights(model, FLAGS.eval_weights_path)
    img_names, test_data = load_test_data(FLAGS)
    predictions = model.predict(test_data, verbose=0)

    test_labels = []
    for index, pred in enumerate(predictions):
        pred_label = np.argmax(pred, axis=0)
        test_labels.append(pred_label + 1)
    img_names = [x.split('/')[-1] for x in img_names]
    df = pd.DataFrame({"FileName": img_names, "type": test_labels})
    df.to_csv('result.csv', index=0)
コード例 #7
0
    parser.add_argument(
        "--model_dir", type=str, default=getenv("SM_MODEL_DIR", "/opt/ml/models")
    )
    parser.add_argument(
        "--data_dir",
        type=str,
        default=getenv("SM_CHANNEL_TRAINING", "/opt/ml/input/data/training"),
    )
    parser.add_argument(
        "--output_dir", type=str, default=getenv("SM_OUTPUT_DIR", "/opt/ml/output/")
    )
    args, _ = parser.parse_known_args()

    # call preprocessing on the data
    df = preprocess(join(args.data_dir, "public.csv.gz"))

    # load the model
    model = model_fn(args.model_dir)

    logger.info("creating predictions")

    inputs = df.drop(columns=target_columns)

    predictions = model.predict(inputs)
    logger.info(f"predictions have shape of {predictions.shape}")

    # save the predictions
    pd.DataFrame(predictions).to_csv(
        join(args.output_dir, "public.csv.out"), index=False, header=False
    )