def stack_image(im_path1, im_path2): im1 = read_image(im_path1, dir="") im2 = read_image(im_path2, dir="") new_shape = (im1.shape[0] + im2.shape[0], max(im1.shape[1], im2.shape[1]), 3) comb_im = np.zeros(new_shape, dtype=im1.dtype) comb_im[0:im1.shape[0], 0:im1.shape[1]] = im1[:] comb_im[im1.shape[0]:, 0:im2.shape[1]] = im2[:] return comb_im
def ROUTINE_check_head_anno(): params_dictionary = read_anno_result(head_anno_path) head_filenames = get_files(comb_head_dir) for head_filename in head_filenames: head_image = read_image(head_filename, dir=comb_head_dir) head_image = get_anno_image(head_image) validate_ellipse_anno(head_image, head_filename, params_dictionary[head_filename])
def export_chosen_data(input_folder, target_folder, config_filepath): del_folder_content(target_folder) config_dictionary = read_chosen_config(config_filepath) for image_filepath, chosen_status in config_dictionary.items(): if chosen_status: upper_image = get_raw_image(read_image(image_filepath, dir="")) imsave(target_folder+basename(image_filepath), upper_image)
def ROUTINE_split_head_abdo(): for combined_filename in ellipse_combined_filenames: combined_image = read_image(combined_dir + combined_filename, dir="") image_shape = combined_image.shape head_image = combined_image[:, 0:int(image_shape[1] / 2)] abdo_image = combined_image[:, int(image_shape[1] / 2):] imsave(comb_head_dir + combined_filename, head_image) imsave(comb_abdo_dir + combined_filename, abdo_image)
def ROUTINE_split_femur_humerus(): config_dictionary = read_line_config() for combined_filename in line_combined_filenames: combined_image = read_image(combined_line_dir + combined_filename, dir="") image_shape = combined_image.shape image1 = combined_image[:, 0:int(image_shape[1] / 2)] image2 = combined_image[:, int(image_shape[1] / 2):] if config_dictionary[combined_filename]: imsave(comb_femur_dir + combined_filename, image2) imsave(comb_humerus_dir + combined_filename, image1) else: imsave(comb_femur_dir + combined_filename, image1) imsave(comb_humerus_dir + combined_filename, image2)
def ROUTINE_choose(mode = "head"): chosen_path = "" if mode == "head": chosen_path = head_chosen_path elif mode == "abdo": chosen_path = abdo_chosen_path elif mode == "femur": chosen_path = femur_chosen_path elif mode == "humerus": chosen_path = humerus_chosen_path dictionary = read_chosen_config(chosen_path) counter = 1 for filepath, value in dictionary.items(): image = read_image(filepath, dir="") choose_file(image, filepath, dictionary, chosen_path, counter) counter += 1
def SUBROUTINE_annotate_line(directory, output_filename, tag): filenames = get_files(directory) for filename in filenames: line_image = read_image(filename, dir=directory) line_image = get_anno_image(line_image) annotate_line(line_image, filename, output_filename, tag)
def ROUTINE_annotate_head(): head_filenames = get_files(comb_head_dir) for head_filename in head_filenames: head_sample = read_image(head_filename, dir=comb_head_dir) head_sample = get_anno_image(head_sample) annotate_head(head_sample, head_filename)
def extract_feature(scenario): n_layer = scenario['layer'] target_directory = get_feature_array_scenario_path(scenario['codename']) create_directory(target_directory) array_px_files = get_files(target_directory) # Jangan lakukan ekstraksi fitur ulang if len(array_px_files) >= 50: print "feature "+scenario['codename']+" is already existed. Abort mission" return # Ambil semua file gambar image_filenames = get_files(directory_path) counter = 0 for image_filename in image_filenames: # print "Extracting %s:%s"%(counter, position_file) counter += 1 a = read_image(image_filename) gt = read_groundtruth_image(image_filename) # konversi menjadi binary image gt = gt > 20 gt = gt.astype(int) image_shape = a.shape image_row = image_shape[0] image_col = image_shape[1] image_layer = image_shape[2] im_slic = [] im_disp = [] im_bound = [] features = [] # Extract superpixel feature for each layer for i in range(n_layer): im_slic.append(slic(a, compactness=scenario['settings'][i]['compactness'], n_segments=scenario['settings'][i]['segment'], sigma=scenario['settings'][i]['sigma'])) im_slic[i] = label(im_slic[i], neighbors=8) im_disp.append(np.copy(im_slic[i])) im_bound.append(mark_boundaries(a, im_slic[i])) temp_feature = regionprops(im_slic[i], intensity_image=rgb2gray(a)) features.append(list_to_dict(temp_feature)) X_indiv = [] for im_row in range(image_row): for im_col in range(image_col): # extract position and corresponding labels posLabel = gt[im_row, im_col] current_labels = [] # validate labels. 0 label is not allowed. causing not exists error valid_position = True for i in range(n_layer): current_level_labels = im_slic[i][im_row, im_col] current_labels.append(current_level_labels) if current_level_labels == 0: valid_position = False break if not valid_position: continue # concat all layer properties x_entry = [] for i in range(n_layer): feat = features[i][current_labels[i]] for att in attributes: if att == 'bbox': (min_row, min_col, max_row, max_col) = feat['bbox'] x_entry.append(min_row) x_entry.append(min_col) x_entry.append(max_row) x_entry.append(max_col) else: x_entry.append(feat[att]) if posLabel == 1: mark(current_labels[i], 1, im_slic[i], im_disp[i]) x_entry.append(posLabel) X_indiv.append(x_entry) f = get_feature_array_file(scenario['codename'], image_filename, mode='w') X_indiv = np.array(X_indiv) X_indiv_u = unique_rows(X_indiv) np.save(f, X_indiv_u) f.close()