test_labels = np.argmax(dataset.test_labels, axis=1) test_features = dataset.test_features csv_writer.append_to_file( ['#', 'Paveikslėlis', 'Nuspėta klasė', 'Tikroji klasė']) for index in range(30): csv_writer.append_to_file([ index + 1, '', LABELS[prediction_labels[index]], LABELS[test_labels[index]] ]) image_saver.plt.imshow(test_features[index]) image_saver.save_image(index) if __name__ == '__main__': dataset = Dataset(data_folder='./data') dataset.load_data(data_parts=[0.7, 0.2, 0.1]) print(dataset.get_data_summary()) l_rate, momentum, n_epoch, batch_size, verbose, optimizer, loss_func = load_P1_options( ) model = Model(l_rate=l_rate, momentum=momentum, optimizer=optimizer, loss=loss_func) # train_scenario() load_from_file_scenario() loss, accuracy, predictions = model.evaluate( test_data=dataset.test_features,
plt.ylabel('True label') plt.xlabel('Predicted label') ap = argparse.ArgumentParser() ap.add_argument("-m", "--model", default="svm.pickle", help="path to where the model will be stored") args = vars(ap.parse_args()) print("Collecting annotations ...") #CHANGE 'inflammed aorta' to the disease which you are working to diagnose d = Dataset(myDirectory, myDirectory, ['inflamed aorta']) labels, images = d.load_data() print("Gathered {} image slices".format(len(images))) data = [] labels_new = [] hog = HOG(orientations=19, pixelsPerCell=(8, 8), cellsPerBlock=(3, 3), transform=True) for i, image in enumerate(images): if i % 100 == 0: print("Gathering features, {} of {}".format(i, len(images))) if 0 not in image.shape: image_resized = resize(image, (291, 218), anti_aliasing=True) hist = hog.describe(rgb2gray(image_resized))