コード例 #1
0
def test(features_path, save_path, test_features, csv_path, name):
    model = Classifier(len(test_features))
    n_batches = np.load(os.path.join(features_path, 'number.npy'))[0]

    graph = tf.Graph()

    with graph.as_default():
        model.build(is_training=False)
        saver = tf.train.Saver()

    model_path = os.path.join(save_path, name, 'catdog.ckpt')

    results = {}
    count = 0
    with tf.Session(graph=graph) as sess:

        saver.restore(sess, model_path)

        for i in range(n_batches):
            features = None
            labels = None
            for test_feature in test_features:

                feature_path = os.path.join(features_path, test_feature,
                                            'batch_{}'.format(i))

                feature = np.load(os.path.join(feature_path, 'features.npy'))
                if labels is None:
                    labels = np.load(os.path.join(feature_path, 'labels.npy'))

                if features is None:
                    features = feature
                else:
                    features = np.hstack((features, feature))

            prediction = sess.run(model.prediction,
                                  feed_dict={model.inputs: features})

            for j, label in enumerate(labels):
                results[label] = prediction[j]
                count += 1
    print('Processed {} images'.format(count))
    keys = sorted(results.keys())

    with open(os.path.join(csv_path, name + '.csv'), 'w', newline='') as f:
        writer = csv.writer(f)
        writer.writerow(['id', 'label'])
        for key in keys:
            writer.writerow([key, results[key]])
コード例 #2
0
ファイル: main.py プロジェクト: vgthengane/andromeda
image_URL = "../data/valid/handwritten/00002.png"

class_dict = {0: "handwritten",
              1: "printed"}

model = Classifier()

if phase == "train":

    model.compile(
        loss="binary_crossentropy",
        optimizer="adam",
        metrics=["accuracy"]
    )

    model.build((None,) + image_size)
    model.summary()


    train_loader, val_loader = get_loaders(data_dir, "train", image_size[:2], batch_size)

    model.fit(
        train_loader,
        steps_per_epoch=train_loader.samples // batch_size,
        epochs=5,
        validation_data=val_loader,
        validation_steps=val_loader.samples // batch_size
    )

    model.save_weights("../printed_and_handwritten_classifier.h5")