Beispiel #1
0
        counter = 0
        aug = do_inference_aug()
        
        for batch in chunker(test_files[0:test_limit], predict_batch_size):
          
            print("Indexes: %i - %i" % (counter, counter + predict_batch_size))
            
            counter += predict_batch_size
            X = [aug(image=(cv2.resize(cv2.imread(x), (img_size, img_size))))['image'] for x in batch]
            ids_batch = [get_id_from_file_path(x) for x in batch]
            
            # predict_batch_size must be divisible by the number of TPU cores in use
            dummy_rows = len(batch) % number_of_tpu_core
            if dummy_rows > 0:
              for i in range(0, number_of_tpu_core - dummy_rows):
                X.append(np.zeros((img_size,img_size,3), dtype=np.float32))
            
            X = np.array(X)
            
            preds_batch = ((model.predict(X).ravel()*model.predict(X[:, ::-1, :, :]).ravel()*model.predict(X[:, ::-1, ::-1, :]).
                            ravel()*model.predict(X[:, :, ::-1, :]).ravel())**0.25).tolist()
            
            preds += preds_batch
            ids += ids_batch

        df = pd.DataFrame({'id': ids, 'label': preds[0:-(number_of_tpu_core - dummy_rows)]})
        df.to_csv("results_"+str(fold)+".csv", index=False)
        print(df.head())

        # sum the predicted values
        ensemble_preds += np.array(preds[0:-(number_of_tpu_core - dummy_rows)], dtype=np.float)
Beispiel #2
0
for i in filenames:
    img = tf.keras.preprocessing.image.load_img(img_path + i,
                                                target_size=(224, 224))
    x = tf.keras.preprocessing.image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    y = new_model.predict(x)
    y_max = y.max(axis=-1)
    if y_max > 0.85:
        predict = str(y.argmax(axis=-1))
        predict = predict.replace("[", "")
        predict = predict.replace("]", "")
        predict = int(predict)
    else:
        predict = 4
    predictions.append(predict)
labels = (validation_generator.class_indices)
labels = dict((v, k) for k, v in labels.items())
labels[4] = 'None'
predicted = [labels[k] for k in predictions]
results = pd.DataFrame({
    "Filename": filenames,
    "Predictions": predicted,
    "Actual": filenames
})
for i in labels:
    results['Actual'] = results['Actual'].str.replace(labels[i] + "/", "")
results['Actual'] = results['Actual'].str.replace(".jpg", "")
name = []
for x in results['Actual']:
    e_n = re.sub("\d+", "", x)
Beispiel #3
0
model.fit(x=train_images, y=train_category, epochs=15, batch_size=5, verbose=2)

dataset_test = pd.read_csv('dataset/test.csv')

# import the test images
prediction = []
for i in range(len(dataset_test)):
    img = image.load_img('dataset/test/' + dataset_test['Image'][i],
                         target_size=(224, 224))
    img = image.img_to_array(img)
    img = preprocess_input(img)
    img = np.expand_dims(img, axis=0)
    pred = model.predict(np.array(img))
    category = np.argmax(pred)
    category = np.reshape(category, (-1, 1))

    prediction.append(labelEncoder.inverse_transform(category))
dataset_test['target'] = prediction
dataset_test.to_csv('dataset/test.csv', index=False)

# reading the actual predication
actual_predication = pd.read_excel('solution.xlsx')
actual = pd.DataFrame([sub.split(",") for sub in actual_predication['data']],
                      columns=('Image', 'target'))

data_frame = pd.merge(dataset_test, actual, on='Image', how='inner')
correct = data_frame[data_frame['target_x'] == data_frame['target_y']]
print("number of correct predications", len(correct))
print("accuracy", len(correct) / len(dataset_test))