def object_modifier(fill_light, edge_light, diffuse_light, output, mask, fill=1, edge=1, diffuse=1, count=-1, verbose=False): """ This function calculates the per object modifier for an image. Arguments: fill_light -- path to the fill light image edge_light -- path to the edge light image diffuse_light -- path to the diffuse color light image mask -- the mask image that identifies the object of interest fill -- the weight of fill light in the object edge -- the weight of edge light in the object diffuse -- the weight of diffuse light in the object count -- the number of imags to use for this calculation verbose -- should we print debug info """ fill_image = utils.read_image(fill_light, normalize=True) edge_image = utils.read_image(edge_light, normalize=True) diffuse_image = utils.read_image(diffuse_light, normalize=True) mask_image = utils.read_image(mask, normalize=True) print fill_image modifier = modifier_lights.ModifierLights(verbose=verbose) res_image = modifier.per_object(fill_image, edge_image, diffuse_image, mask_image, fill, edge, diffuse) cv2.imwrite(output, utils.denormalize_img(res_image)) cv2.imshow('Object modifier', res_image) cv2.waitKey(0)
def run(in_fname, out_fname): """ draw lattice lines between all points. Lines are brighter based on the frequency of the occurance of lines with the same angle """ img = image_utils.read_image(in_fname) data = image_utils.image_to_matrix(img) edge_data = image_utils.image_to_matrix(img) point_data = image_utils.image_to_matrix(img) points = local_maxima.run(data) #for point in points: # print(point) llist = all_point_pairs_as_lines(points) llist = filter_segments_with_colinear_points(llist, points) # Draw blue edges between neighboring points for line in llist: rr, cc, lum = line_aa(line[0][0], line[0][1], line[1][0], line[1][1]) edge_data[rr, cc] = np.maximum(edge_data[rr, cc], lum * 255) # Draw red circles around all points for point in points: rr, cc, lum = circle_perimeter_aa(point[0], point[1], 4) point_data[rr, cc] = np.maximum(point_data[rr, cc], lum * 255) out_data = np.dstack((point_data, data, edge_data)) out_img = image_utils.matrix_to_image(out_data) image_utils.write_image(out_img, out_fname)
def __init__(self, image_number, database): if isinstance(image_number, int): image_number = '{:02d}'.format( image_number) # Leading zeros used in DRIVE database logging.debug('Reading image, mask, truth %s from database', image_number) self.image = image_utils.read_image('{}/image/{}.tif'.format( database, image_number)) self.truth = image_utils.read_image('{}/truth/{}.tif'.format( database, image_number), greyscale=True).astype(np.bool) self.fov_mask = image_utils.read_image('{}/mask/{}.tif'.format( database, image_number), greyscale=True).astype(np.bool)
def load_dataset(path, fractions): data = fu.create_train_dict(os.path.join(path, 'training-data')) train_fraction = fractions[0] validation_fraction = fractions[1] test_fraction = fractions[2] samples = [] global n_targets global targets # Go over all the categories for cat_key, cat in data.items(): # Go over all the classes per category for class_key, klass in cat.items(): targets.append(class_key) # Go over all the images per class for i, img_path in enumerate(klass): # Process the images img = iu.read_image(img_path) img = preprocess(img) features = getfeatures(img) samples.append((features, n_targets)) n_targets = n_targets + 1 n_samples = len(samples) print("# samples: {}".format(n_samples)) print("# targets: {}".format(n_targets)) # Shuffle the data so we don't always train/test on the same data np.random.shuffle(samples) n_train_samples = round(n_samples * fractions[0]) n_validation_samples = round(n_samples * fractions[1]) n_test_samples = round(n_samples * fractions[2]) X_train, y_train = zip(*(samples[0 : n_train_samples])) X_val, y_val = zip(*(samples[n_train_samples : n_train_samples + n_validation_samples])) X_test, y_test = zip(*(samples[n_train_samples + n_validation_samples : n_samples])) X_train = np.array(X_train) X_val = np.array(X_val) X_test = np.array(X_test) # y_train = y_train.astype(np.uint8) # y_val = y_val.astype(np.uint8) # y_test = y_test.astype(np.uint8) print(np.shape(X_train)) print(np.shape(y_train)) print(np.shape(X_val)) print(np.shape(y_val)) print(np.shape(X_test)) print(np.shape(y_test)) return list(X_train), list(y_train), list(X_val), list(y_val), list(X_test), list(y_test)
def load_imgs_from_paths(paths, auto_resize=True): # shuffle paths so that when we do test/train splits # we get shuffled distributions paths = sampler(paths, len(paths)) if type(auto_resize) is tuple and len(auto_resize) == 2: # resize to specified value imgs = [imresize(read_image(path), size=auto_resize) for path in paths] elif type(auto_resize) is bool and auto_resize: # automatically resizes to image_utils.AUTO_RESIZE_DEFAULT imgs = [imresize(read_image(path)) for path in paths] else: # no resize imgs = [read_image(path) for path in paths] return imgs
def process_image(task: {}) -> (str, Image.Image): log("Aplicando filtro '{}'...".format(task["filter"])) obj = mongo_storage.get_fs_object(task["file_id"]) original_image = image_utils.read_image(obj) filtered_image = filter_image(original_image, task["filter"]) return ("{}-{}.jpg".format(obj.filename.replace(".jpg", ""), task["filter"]), filtered_image)
def read_data(): X = [] filename = [] # read cat and dog images respectively: 0 for cat, 1 for dog for f in glob.glob(TEST_DATA + '/*.jpg'): image = read_image(f, [128, 128, 3]) X.append(image) filename.append(f) return X, filename
def read_data(): X = [] Y = [] # read cat and dog images respectively: 0 for cat, 1 for dog for f in glob.glob(TRAIN_DATA + '/cat/*.jpg'): label = 0 image = read_image(f, [128, 128, 3]) X.append(image) Y.append(label) for f in glob.glob(TRAIN_DATA + '/dog/*.jpg'): label = 1 image = read_image(f, [128, 128, 3]) X.append(image) Y.append(label) # split training data and validation set data X, X_test, y, y_test = train_test_split(X, Y, test_size=0.2, random_state=42) return (X, y), (X_test, y_test)
def regional_modifier(image_path, output, beta, verbose=False): """ This function calculates the soft lightning modifier for an image. Arguments: image -- the image to apply the modifier to beta -- determines which areas will be emphasized verbose -- should we print debug info """ image = utils.read_image(image_path, normalize=False) modifier = modifier_lights.ModifierLights(verbose=verbose) res_image = modifier.regional(image, beta) cv2.imwrite(output, res_image) cv2.imshow('Regional modifier', res_image) cv2.waitKey(0)
def determine_result(nnmodel, path): data = fu.list_all_test_data(os.path.join(path, 'test')) test_samples = [] for img_path in data: # Process the images img = iu.read_image(img_path) img = preprocess(img) features = getfeatures(img) test_samples.append(features) print(np.shape(test_samples)) test_results = nnmodel(test_samples) print(np.shape(test_results)) return test_results
application_model = model_utils.ApplicationModel(model, model_mod, make_linear=True, preprocessing_function=config.preprocessing_function, last_conv_layer=last_conv_layer, custom_objects=config.custom_objects) print('Complete.') # Initialize visualizers print('Initializing Integrated Gradients.') integrated_vis = visualizers.IntegratedGradientsVisualizer(application_model) print('Initializing GradCAM.') grad_cam = visualizers.GradCAMVisualizer(application_model) ''' Entry point here ''' x, img = image_utils.read_image( args.image_name, im_framework=input_config['im_framework'], target_size=input_config['target_size'], mode=input_config['color_mode'] ) #TODO: Dynamic colormap # For now, hardcode colormap cmap = plt.cm.copper print('Running Inference on {}'.format(args.image_name)) class_scores = application_model.predict(x, output='linear') class_probabilities, predicted_class = model_utils.softmax(class_scores) # Generate bar graph of top 5 (or less) classes savename = image_name.rpartition('.')[0] + '_prob_graph.png' image_utils.generate_bargraph(class_probabilities, class_map, os.path.join(args.save_path, savename))