def generatePatches_from_image(imagename, df, patch_size, quant_negative_patches=25, negative_patch_label='background', df_FP=None): img = dataset_loaders.load_image(imagename) X_pos, Y_pos = _get_positive_patches_from_image(img, imagename, df, patch_size) X_neg, Y_neg = _get_negative_patches_from_image(img, imagename, df, patch_size, quant_negative_patches, negative_patch_label) X_fp, Y_fp = _get_false_positives_patches_(img, imagename, df_FP, patch_size, negative_patch_label) del img # Merge positive patches and negative patches. # There are always negative ones, so we start there and concatenate the positive and fp in case there are some X, Y = X_neg, Y_neg if len(Y_pos) != 0: X, Y = np.vstack([X_pos, X]), np.concatenate([Y_pos, Y]) if len(X_fp) != 0: X, Y = np.vstack([X_fp, X]), np.concatenate([Y_fp, Y]) # We return all the patches we have gathered return X, Y
def scan_patches(imagename, image_size_nn, patch_size, step_frames, batch_size, square_to_scan=None): img = dataset_loaders.load_image(imagename) if square_to_scan is None: square_to_scan = [0, img.shape[0], 0, img.shape[1]] # Ensure the patch does not go out of the image x_ini = np.max([square_to_scan[0], int(patch_size / 2)]) x_end = np.min([square_to_scan[1], int(img.shape[0] - patch_size / 2)]) y_ini = np.max([square_to_scan[2], int(patch_size / 2)]) y_end = np.min([square_to_scan[3], int(img.shape[1] - patch_size / 2)]) patches = [] wnd = int(patch_size / 2) for xi in range(x_ini, x_end, step_frames): for yi in range(y_ini, y_end, step_frames): patches.append( scipy.misc.imresize(img[xi - wnd:xi + wnd, yi - wnd:yi + wnd], [image_size_nn, image_size_nn]) / 255) if len(patches) == batch_size: yield np.array(patches).transpose([0, 3, 1, 2]) patches = [] if len(patches) > 0: yield np.array(patches).transpose([0, 3, 1, 2])
def load_predictions(casename, df): img = dataset_loaders.load_image(casename) pred = np.load( "%s/%s_%s_%d.npz" % (annotations_path, annotation_basename, casename, scan_window)) if casename in df['image'].unique(): labels = df[df['image'] == casename] else: labels = None return img, pred['preds'], labels
def get_samples_prediction(casename, patch_size, df): X, Y, coords = [], [], [] img = dataset_loaders.load_image(casename) for ind in df.index.values: row = df.ix[ind] x0, x1, y0, y1 = int(row['x'] - patch_size / 2), int( row['x'] + patch_size / 2), int(row['y'] - patch_size / 2), int(row['y'] + patch_size / 2) if x0 >= 0 and y0 >= 0 and x1 < img.shape[0] and y1 < img.shape[1]: patch = img[x0:x1, y0:y1, :] X.append(patch) coords.append([row['x'], row['y']]) return np.array(X), np.array([0 for i in range(len(X))]), np.array(coords)
def get_sample(casename, patch_size): X, Y, coords = [], [], [] img = dataset_loaders.load_image(casename) for ind in dataset[dataset.id == casename].index.values: row = dataset.ix[ind] x0, x1, y0, y1 = int(row['xdet'] - patch_size / 2), int( row['xdet'] + patch_size / 2), int(row['ydet'] - patch_size / 2), int(row['ydet'] + patch_size / 2) if x0 >= 0 and y0 >= 0 and x1 < img.shape[0] and y1 < img.shape[1]: patch = img[x0:x1, y0:y1, :] X.append(patch) Y.append(row['classref']) coords.append([row['xdet'], row['ydet']]) return np.array(X), np.array(Y), np.array(coords)
def get_predictions(casename, return_debug_data=False): img = dataset_loaders.load_image(casename) ann_fps = np.load( st.DATAMODEL_PATH + '/patches_single_size_fps/annotations/' + [ x for x in os.listdir(st.DATAMODEL_PATH + '/patches_single_size_fps/annotations') if casename in x ][0])['preds'] rec_fps = reconstruct_original(img, ann_fps) plot_fps = np.copy(rec_fps) plot_fps[plot_fps < 1] = np.nan blobs = detect_blobs(np.nan_to_num(plot_fps)) if return_debug_data: return blobs, img, plot_fps else: return blobs
(settings.DATAMODEL_PATH, experiment_folder_name)) os.system('mkdir -p %s/%s/logs' % (settings.DATAMODEL_PATH, experiment_folder_name)) csv = pd.read_csv('train_predicted_positions_2.csv') dataset = csv[csv.pointtype.isin(['TP', 'FP'])][['xdet', 'ydet', 'classref', 'id']].fillna('FP') cases = [x for x in sorted(dataset_loaders.get_casenames()) if x != 'emtpy'] train_cases = cases[N_valid_cases:] valid_cases = cases[:N_valid_cases] def get_sample(case, patch_size): img = dataset_loaders.load_image(casename) for ind in dataset[dataset.id == casename].index.values: row = dataset.ix[ind] x0, x1, y0, y1 = int(row['xdet'] - patch_size / 2), int( row['xdet'] + patch_size / 2), int(row['ydet'] - patch_size / 2), int(row['ydet'] + patch_size / 2) if x0 >= 0 and y0 >= 0 and x1 < img.shape[0] and y1 < img.shape[1]: patch = img[x0:x1, y0:y1, :] X.append(patch) Y.append(row['classref']) return np.array(X), np.array(Y) def get_samples(CASES, patch_size, max_cases=1000000):