# build model
model, optimizer = nn.build_gabor_model(gabor_filters, input_shape=(1 if grayscale else 3, resolution[0], resolution[1]), learningrate = learningrate, decay = decay, mode=mode, add_conv2 = add_conv2)

# Load status
dataset_io.load_status(model, optimizer, weight_load_path + "/" + str(load_epoch))

# Print from where the images are loaded, to which resolution they are scaled and whether they are normalized
if normalize == 1:
	print('Loading data from {0} and rescaling it to {1}x{2}. Input images are normalized to [0,1]'.format(data_path, resolution[0], resolution[1]))
elif normalize == 2:
	print('Loading data from {0} and rescaling it to {1}x{2}. Input images are normalized to [-1,1]'.format(data_path, resolution[0], resolution[1]))
else:
	print('Loading data from {0} and rescaling it to {1}x{2}. Images are not normalized!'.format(data_path, resolution[0], resolution[1]))

# Load data
x_test, y_test, image_list, original_resolution = dataset_io.read_data(data_path, resolution, normalize=normalize, grayscale=grayscale, return_original_resolution=True, return_image_properties=True)
nb_labels = 30
max_dim = np.max(original_resolution)
expanded_x_test = [x_test for i in range(10)]

# normalize output
if normalize_output:
	y_test /= max_dim

#model.fit(expanded_x_test, y_test, nb_epoch=1, batch_size=batchsize, shuffle=True, verbose=True)

# test model
score = model.evaluate(expanded_x_test, y_test, 1, verbose=True)
print('Test score:', score)
print('Test score in pixels:', helpers.loss_to_pixel(score, np.max(resolution)))
Esempio n. 2
0
layout = nn.load_layout(layout_path)

# Get input shape and resolution
input_shape = layout[0][1]['input_shape']
resolution = input_shape[1:]

# Print from where the images are loaded, to which resolution they are scaled and whether they are normalized
if normalize == 1:
	print('Loading data from {0} and rescaling it to {1}x{2}. Input images are normalized to [0,1]'.format(data_path, resolution[0], resolution[1]))
elif normalize == 2:
	print('Loading data from {0} and rescaling it to {1}x{2}. Input images are normalized to [-1,1]'.format(data_path, resolution[0], resolution[1]))
else:
	print('Loading data from {0} and rescaling it to {1}x{2}. Images are not normalized!'.format(data_path, resolution[0], resolution[1]))

# Load data
x_train, y_train, original_resolution = dataset_io.read_data(data_path, resolution, normalize=normalize, grayscale=grayscale, return_original_resolution=True) # change
nb_labels = layout[-1][1]['output_dim']
max_dim = np.max(original_resolution)
x_test, y_test = dataset_io.read_data(test_data_path, resolution, normalize=normalize, grayscale=grayscale) # change

if normalize_output:
	y_train /= max_dim
	y_test /= max_dim

loss_callbacks = []

# do grid search
for m_idx, m in enumerate(momentums):
#for w_idx, w in enumerate(weightscales):
	for l_idx, l in enumerate(learningrates):	
		print("Learning rate: ", l)
Esempio n. 3
0
args = parser.parse_args()

# Load model
print('Loading model from {0}'.format(args.layout))
layout = nn.load_layout(args.layout)
model, optimizer = nn.build_model_to_layout(layout)

# Load weights
print('Loading weights from {0}'.format(args.weights))
model.load_weights('{0}.w'.format(args.weights))

# Load data
input_shape = layout[0][1]['input_shape']
resolution = input_shape[1:]
print('Loading data from {0} and rescaling it to {1}x{2}'.format(args.path, resolution[0], resolution[1]))
x_pred, y_true, image_list = dataset_io.read_data(args.path, resolution, args.datalimit, labels=True, return_image_properties=True)

# Predict
print('Predict on {0} samples at resolution {1}x{2} in batches of size {3}'.format(x_pred.shape[0], resolution[0], resolution[1], args.batchsize))
predictions = model.predict(x_pred, batch_size = args.batchsize, verbose=args.verbosity) * 640

# Concatenate true labels and predictions
y_pred_and_true = np.concatenate((y_true, predictions), axis=1)

# Save and/or display predictions
if args.predsave != None and args.predshow:
	# Save and show predictions
	print('Save images with drawn predicted land marks to path \'{0}\'. The images will be displayed.'.format(args.predsave))
	visualize.visualize_predictions(image_list, predictions, y_true, args.crosssize, args.predsave, args.predshow)
elif args.predshow:
	# Only show predictions
Esempio n. 4
0
parser.add_argument('weights', help='Path weights in xyz.w file')
parser.add_argument('layout', help='Path network layout specification')
parser.add_argument('path', help='Path to csv file that lists input images')
parser.add_argument('-b', '--batchsize', help='Size of the batches to be learned on [default 16]', type=int, default=16)
parser.add_argument('-d', '--datalimit', help='Maximum number of data points to read from PATH [if missing, read all]', type=int, default=None)
parser.add_argument('-v', '--verbosity', help='Set the verbosity level of keras (valid values: 0, 1, 2)', type=int, default=1)
args = parser.parse_args()

#~ Load model
print('Loading model from {0}'.format(args.layout))
layout = nn.load_layout(args.layout)
model, optimizer = nn.build_model_to_layout(layout)

#~ Load weights
print('Loading weights from {0}'.format(args.weights))
model.load_weights(args.weights)

#~ Load data
input_shape = layout[0][1]['input_shape']
resolution = input_shape[1:]
print('Loading data from {0} and rescaling it to {1}x{2}'.format(args.path, resolution[0], resolution[1]))
x_test, y_test, num_classes = dataset_io.read_data(args.path, resolution, args.datalimit)
y_test = np_utils.to_categorical(y_test, num_classes)

#~ Test the model
print('Testing on {0} samples at resolution {1}x{2} in batches of size {3}'.format(x_test.shape[0], resolution[0], resolution[1], args.batchsize))
score = model.evaluate(x_test, y_test, batch_size=args.batchsize, show_accuracy=True, verbose=args.verbosity)
print('Test score:', score[0])
print('Test accuracy:', score[1])

print('Done')
Esempio n. 5
0
args = parser.parse_args()

#~ Load model
print('Loading model from {0}'.format(args.layout))
layout = nn.load_layout(args.layout)
model, optimizer = nn.build_model_to_layout(layout)

#~ Load weights
print('Loading weights from {0}'.format(args.weights))
model.load_weights(args.weights)

#~ Load data
input_shape = layout[0][1]['input_shape']
resolution = input_shape[1:]
print('Loading data from {0} and rescaling it to {1}x{2}'.format(args.data, resolution[0], resolution[1]))
x_test, y_test, num_classes, image_properties = dataset_io.read_data(args.data, resolution, args.datalimit, return_image_properties=True)
y_test = np_utils.to_categorical(y_test, num_classes)

#~ Create predictions
print('Predicting labels for {0} samples at resolution {1}x{2} in batches of size {3}'.format(x_test.shape[0], resolution[0], resolution[1], args.batchsize))
predictions = model.predict_proba(x_test, batch_size=args.batchsize)

#~ If desired, reduce output to the misclassified images
if args.misclassified:
	print('Reducing output to misclassified samples')
	sample_count = len(image_properties)
	image_properties, predictions = extract_misclassified(image_properties, predictions, y_test)
	print('\tMisclassified images: {0} out of {1}'.format(len(image_properties), sample_count))

#~ Store preditions on disk
print('Storing samples to {0}'.format(args.path))