def predict_command(model, training_cnf, features_file, images_dir, weights_from, tag, sync, ): util.check_required_program_args([model, training_cnf, features_file, images_dir, weights_from]) model_def = util.load_module(model) model = model_def.model cnf = util.load_module(training_cnf).cnf weights_from = str(weights_from) image_features = np.load(features_file) images = data.get_image_files(images_dir) predictions = predict_withf(model, cnf, weights_from, image_features) predict_dir = os.path.dirname(features_file) prediction_results_dir = os.path.abspath(os.path.join(predict_dir, 'predictions', tag)) if not os.path.exists(prediction_results_dir): os.makedirs(prediction_results_dir) names = data.get_names(images) image_prediction_probs = np.column_stack([names, predictions]) headers = ['score%d' % (i + 1) for i in range(predictions.shape[1])] title = np.array(['image'] + headers) image_prediction_probs = np.vstack([title, image_prediction_probs]) prediction_probs_file = os.path.join(prediction_results_dir, 'predictions.csv') np.savetxt(prediction_probs_file, image_prediction_probs, delimiter=",", fmt="%s") print('Predictions saved to: %s' % prediction_probs_file) if cnf['classification']: class_predictions = np.argmax(predictions, axis=1) image_class_predictions = np.column_stack([names, class_predictions]) title = np.array(['image', 'label']) image_class_predictions = np.vstack([title, image_class_predictions]) prediction_class_file = os.path.join(prediction_results_dir, 'predictions_class.csv') np.savetxt(prediction_class_file, image_class_predictions, delimiter=",", fmt="%s") print('Class predictions saved to: %s' % prediction_class_file)
def predict(model, training_cnf, predict_dir, weights_from, predict_type): model_def = util.load_module(model) model = model_def.model cnf = util.load_module(training_cnf).cnf weights_from = str(weights_from) images = data.get_image_files(predict_dir) preprocessor = None prediction_iterator = create_prediction_iter(cnf, model_def.crop_size, preprocessor, False) if predict_type == 'quasi': predictor = QuasiCropPredictor(model, cnf, weights_from, prediction_iterator, 20) elif predict_type == '1_crop': predictor = OneCropPredictor(model, cnf, weights_from, prediction_iterator) elif predict_type == '10_crop': predictor = TenCropPredictor(model, cnf, weights_from, prediction_iterator, model_def.crop_size[0], model_def.image_size[0]) else: raise ValueError('Unknown predict_type: %s' % predict_type) predictions = predictor.predict(images) predictions = predictions.reshape(-1, 1000) names = data.get_names(images) for i, name in enumerate(names): print("---Predictions for %s:" % name) preds = (np.argsort(predictions[i])[::-1])[0:5] for p in preds: print(class_names[p], predictions[i][p])
def main(directory, convert_directory, test, crop_size, extension): try: os.mkdir(convert_directory) except OSError: pass supported_extensions = set(['jpg', 'png', 'tiff', 'jpeg', 'tif']) # filenames = [os.path.join(dp, f) for dp, dn, fn in os.walk(directory) # for f in fn if f.split('.')[-1].lower() in supported_extensions] filenames = [ each for each in os.listdir(directory) if each.endswith('.jpg') ] filenames = [ os.path.join(directory, filename.strip('\n')) for filename in filenames ] # with open('/home/artelus_server/data/segment_artelus/train.txt', 'r') as f: # filenames = f.readlines() # filenames = [os.path.join(directory, filename.strip( # '\n') + '.jpg') for filename in filenames] filenames = sorted(filenames) if test: names = data.get_names(filenames) y = data.get_labels(names) for f, level in zip(filenames, y): if level == 1: try: img = convert(f, crop_size) img.show() Image.open(f).show() real_raw_input = vars(__builtins__).get('raw_input', input) real_raw_input('enter for next') except KeyboardInterrupt: exit(0) print("Resizing images in {} to {}, this takes a while." "".format(directory, convert_directory)) n = len(filenames) # process in batches, sometimes weird things happen with Pool on my machine batchsize = 500 batches = n // batchsize + 1 pool = Pool(N_PROC) args = [] for f in filenames: label_f = f[:-4] + '_final_mask.png' args.append((convert, (directory, convert_directory, f, label_f, crop_size, extension))) for i in range(batches): print("batch {:>2} / {}".format(i + 1, batches)) pool.map(process, args[i * batchsize:(i + 1) * batchsize]) pool.close() print('done')
def main(directory, convert_directory, test, crop_size, extension): try: os.mkdir(convert_directory) except OSError: pass filenames = [ each for each in os.listdir(directory) if each.endswith('.jpg') ] filenames = [ os.path.join(directory, filename.strip('\n')) for filename in filenames ] filenames = sorted(filenames) if test: names = data.get_names(filenames) y = data.get_labels(names) for f, level in zip(filenames, y): if level == 1: try: img = convert(f, crop_size) img.show() Image.open(f).show() real_raw_input = vars(__builtins__).get('raw_input', input) real_raw_input('enter for next') except KeyboardInterrupt: exit(0) print("Resizing images in {} to {}, this takes a while." "".format(directory, convert_directory)) n = len(filenames) # process in batches, sometimes weird things happen with Pool on my machine batchsize = 500 batches = n // batchsize + 1 pool = Pool(N_PROC) args = [] for f in filenames: label_f = f[:-4] + '_final_mask.png' args.append((convert, (directory, convert_directory, f, label_f, crop_size, extension))) for i in range(batches): print("batch {:>2} / {}".format(i + 1, batches)) pool.map(process, args[i * batchsize:(i + 1) * batchsize]) pool.close() print('done')
def predict(model, training_cnf, predict_dir, weights_from, dataset_name, convert, image_size, sync, test_type): model_def = util.load_module(model) model = model_def.model cnf = util.load_module(training_cnf).cnf weights_from = str(weights_from) images = data.get_image_files(predict_dir) standardizer = cnf.get('standardizer', None) preprocessor = convert_preprocessor(image_size) if convert else None prediction_iterator = create_prediction_iter(cnf, standardizer, model_def.crop_size, preprocessor, sync) if test_type == 'quasi': predictor = QuasiCropPredictor(model, cnf, weights_from, prediction_iterator, 20) predictions = predictor.predict(images) if not os.path.exists(os.path.join(predict_dir, '..', 'results')): os.mkdir(os.path.join(predict_dir, '..', 'results')) if not os.path.exists( os.path.join(predict_dir, '..', 'results', dataset_name)): os.mkdir(os.path.join(predict_dir, '..', 'results', dataset_name)) names = data.get_names(images) image_prediction_prob = np.column_stack([names, predictions]) headers = ['score%d' % (i + 1) for i in range(predictions.shape[1])] title = np.array(['image'] + headers) image_prediction_prob = np.vstack([title, image_prediction_prob]) labels_file_prob = os.path.abspath( os.path.join(predict_dir, '..', 'results', dataset_name, 'predictions.csv')) np.savetxt(labels_file_prob, image_prediction_prob, delimiter=",", fmt="%s")
def predict(model, training_cnf, predict_dir, weights_from, dataset_name, convert, image_size, sync, predict_type): images = data.get_image_files(predict_dir) # Form now, hard coded models, cnfs, and weights # Need to take these from program inputs or an ensembling config file print('Creating predictor 1') weights_from1 = 'weights.sa/model-epoch-97.ckpt' model1 = 'examples/mnist_model_sa.py' training_cnf1 = 'examples/mnist_cnf.py' model_def1 = util.load_module(model1) model1 = model_def1.model cnf1 = util.load_module(training_cnf1).cnf standardizer = cnf1.get('standardizer', NoOpStandardizer()) preprocessor = convert_preprocessor( model_def1.image_size[0]) if convert else None prediction_iterator1 = create_prediction_iter(cnf1, standardizer, model_def1.crop_size, preprocessor, sync) predictor1 = QuasiCropPredictor(model1, cnf1, weights_from1, prediction_iterator1, 20) # predictor1 = OneCropPredictor(model1, cnf1, weights_from1, prediction_iterator1) print('Creating predictor 2') weights_from2 = 'weights.rv/model-epoch-31.ckpt' model2 = 'examples/mnist_model.py' training_cnf2 = 'examples/mnist_cnf.py' model_def2 = util.load_module(model2) model2 = model_def2.model cnf2 = util.load_module(training_cnf2).cnf standardizer = cnf2.get('standardizer', NoOpStandardizer()) preprocessor = convert_preprocessor( model_def2.image_size[0]) if convert else None prediction_iterator2 = create_prediction_iter(cnf2, standardizer, model_def2.crop_size, preprocessor, sync) predictor2 = QuasiCropPredictor(model2, cnf2, weights_from2, prediction_iterator2, 20) # predictor2 = OneCropPredictor(model2, cnf2, weights_from2, prediction_iterator2) predictor = EnsemblePredictor([predictor1, predictor2]) predictions = predictor.predict(images) if not os.path.exists(os.path.join(predict_dir, '..', 'results')): os.mkdir(os.path.join(predict_dir, '..', 'results')) if not os.path.exists( os.path.join(predict_dir, '..', 'results', dataset_name)): os.mkdir(os.path.join(predict_dir, '..', 'results', dataset_name)) names = data.get_names(images) image_prediction_probs = np.column_stack([names, predictions]) headers = ['score%d' % (i + 1) for i in range(predictions.shape[1])] title = np.array(['image'] + headers) image_prediction_probs = np.vstack([title, image_prediction_probs]) prediction_probs_file = os.path.abspath( os.path.join(predict_dir, '..', 'results', dataset_name, 'predictions.csv')) np.savetxt(prediction_probs_file, image_prediction_probs, delimiter=",", fmt="%s") print('Predictions saved to: %s' % prediction_probs_file) if cnf1['classification']: class_predictions = np.argmax(predictions, axis=1) image_class_predictions = np.column_stack([names, class_predictions]) title = np.array(['image', 'label']) image_class_predictions = np.vstack([title, image_class_predictions]) prediction_class_file = os.path.abspath( os.path.join(predict_dir, '..', 'results', dataset_name, 'predictions_class.csv')) np.savetxt(prediction_class_file, image_class_predictions, delimiter=",", fmt="%s") print('Class predictions saved to: %s' % prediction_class_file)
def predict(model, output_layer, training_cnf, predict_dir, weights_from, tag, convert, image_size, sync, predict_type): util.check_required_program_args( [model, training_cnf, predict_dir, weights_from]) model_def = util.load_module(model) model = model_def.model cnf = util.load_module(training_cnf).cnf weights_from = str(weights_from) images = data.get_image_files(predict_dir) preprocessor = convert_preprocessor(image_size) if convert else None prediction_iterator = create_prediction_iter(cnf, model_def.crop_size, preprocessor, sync) if predict_type == 'quasi': predictor = QuasiCropPredictor(model, cnf, weights_from, prediction_iterator, 20, output_layer) elif predict_type == '1_crop': predictor = OneCropPredictor(model, cnf, weights_from, prediction_iterator, output_layer) elif predict_type == '10_crop': predictor = TenCropPredictor(model, cnf, weights_from, prediction_iterator, model_def.crop_size[0], model_def.image_size[0], output_layer) else: raise ValueError('Unknown predict_type: %s' % predict_type) predictions = predictor.predict(images) prediction_results_dir = os.path.abspath( os.path.join(predict_dir, '..', 'predictions', tag)) if not os.path.exists(prediction_results_dir): os.makedirs(prediction_results_dir) if output_layer == 'predictions': names = data.get_names(images) image_prediction_probs = np.column_stack([names, predictions]) headers = ['score%d' % (i + 1) for i in range(predictions.shape[1])] title = np.array(['image'] + headers) image_prediction_probs = np.vstack([title, image_prediction_probs]) prediction_probs_file = os.path.join(prediction_results_dir, 'predictions.csv') np.savetxt(prediction_probs_file, image_prediction_probs, delimiter=",", fmt="%s") print('Predictions saved to: %s' % prediction_probs_file) if cnf['classification']: class_predictions = np.argmax(predictions, axis=1) image_class_predictions = np.column_stack( [names, class_predictions]) title = np.array(['image', 'label']) image_class_predictions = np.vstack( [title, image_class_predictions]) prediction_class_file = os.path.join(prediction_results_dir, 'predictions_class.csv') np.savetxt(prediction_class_file, image_class_predictions, delimiter=",", fmt="%s") print('Class predictions saved to: %s' % prediction_class_file) else: # feature extraction features_file = os.path.join(prediction_results_dir, 'features.npy') np.save(features_file, predictions) print('Features from layer: %s saved to: %s' % (output_layer, features_file))