Пример #1
0
def get_sample_lists(path):
	t_images = load_dataset_paths(path, file_type='jpg')
	const.dprint('Loading categories...\t')
	training_list = []
	training_labels = []
	label_ids = []
	for category in list(t_images):
		if (const.USE_FULL_DATASET or (category in const.DATASET_CATEGORIES)) and (category != 'dataset_count'):
			training_list.extend([os.path.join(path, category, i) for i in training_images[category][0:const.NUMBER_OF_TRAINING_IMAGES]])
			training_labels.extend([len(label_ids)]*const.NUMBER_OF_TRAINING_IMAGES)
			label_ids.append(category)
	const.dprint('Done.\n')
	return (training_list, training_labels, label_ids)
Пример #2
0
def get_sample_lists(path):
    t_images = load_dataset_paths(path, file_type='jpg')
    const.dprint('Loading categories...\t')
    training_list = []
    training_labels = []
    label_ids = []
    for category in list(t_images):
        if (const.USE_FULL_DATASET or
            (category
             in const.DATASET_CATEGORIES)) and (category != 'dataset_count'):
            training_list.extend([
                os.path.join(path, category, i) for i in
                training_images[category][0:const.NUMBER_OF_TRAINING_IMAGES]
            ])
            training_labels.extend([len(label_ids)] *
                                   const.NUMBER_OF_TRAINING_IMAGES)
            label_ids.append(category)
    const.dprint('Done.\n')
    return (training_list, training_labels, label_ids)
Пример #3
0
				for ii in training_images[category]:
					p = os.path.join(path, category, ii)
					p_data = os.path.join(data_path, category, ii +'.mat')
					#try:
					mat = image_lib.convert_to_matrix(p)
					#except:
					#	print(p)
					#	print(category)
					#	input('pause')
					with open(p_data, 'w') as f:
						for aa in range(0, len(mat)):
							for bb in range(0, len(mat[aa])):
								f.write("{0}\n".format(mat[aa][bb]))
					count += 1
					console_lib.update_progress_bar(count/total_count, 'Resizing ' + category + ' ' + ii + '                            ')


def create_neural_network(n_inputs, n_hidden, n_outputs, activation_function, learning_rate):
	return neural_network.neural_network(n_inputs, n_hidden, n_outputs, activation_function, learning_rate)

def run_training(training_list, training_labels, neu_net):
	for ii in range(0, len(training_list)):
		matrix = image_lib.convert_to_matrix(training_list[ii])
		neu_net.add_sample(matrix, const.RESIZE_IMAGE_DIMENSIONS, True, correct_label=training_labels[ii])

if const.PERFORM_PREPROCESSING:
	preprocess_dataset(const.DATASET_PATH, resize_images=const.RESIZE_IMAGES, resize_image_dimensions=const.RESIZE_IMAGE_DIMENSIONS, store_data=const.CREATE_DATA_FILES, data_path=const.DATA_FILE_LOCATIONS)


const.dprint('Script Finished.\n')
Пример #4
0
                                f.write("{0}\n".format(mat[aa][bb]))
                    count += 1
                    console_lib.update_progress_bar(
                        count / total_count, 'Resizing ' + category + ' ' +
                        ii + '                            ')


def create_neural_network(n_inputs, n_hidden, n_outputs, activation_function,
                          learning_rate):
    return neural_network.neural_network(n_inputs, n_hidden, n_outputs,
                                         activation_function, learning_rate)


def run_training(training_list, training_labels, neu_net):
    for ii in range(0, len(training_list)):
        matrix = image_lib.convert_to_matrix(training_list[ii])
        neu_net.add_sample(matrix,
                           const.RESIZE_IMAGE_DIMENSIONS,
                           True,
                           correct_label=training_labels[ii])


if const.PERFORM_PREPROCESSING:
    preprocess_dataset(const.DATASET_PATH,
                       resize_images=const.RESIZE_IMAGES,
                       resize_image_dimensions=const.RESIZE_IMAGE_DIMENSIONS,
                       store_data=const.CREATE_DATA_FILES,
                       data_path=const.DATA_FILE_LOCATIONS)

const.dprint('Script Finished.\n')