def extract_road(self, start, end, proposal=False): if proposal: start_time = time.time() extraction = extract_road( self.img, start, end, self.get_all_correct_colors([start, end])) self.extraction_time += time.time() - start_time self.nb_extractions += 1 return extraction else: if self.rt_proposals and self.proposal is not None: smoothed_ext, points, extraction, cost_map = self.proposal ds = euclidean_distance(start, extraction[0]) de = euclidean_distance(end, extraction[-1]) if ds < 4.5 and de < 4.5: # _, points = post_process_extraction(extraction, cost_map) points[0] = start points[-1] = end return smoothed_ext, points, extraction, cost_map start_time = time.time() extraction = extract_road(self.img, start, end, self.get_all_correct_colors([start, end])) self.extraction_time += time.time() - start_time self.nb_extractions += 1 return extraction
def get_segment(path, i_start, max_length, margin=0, after=True): if len(path) > 0: mpml = margin + max_length segment = [path[i_start]] length = 0 i_end_margin = 0 margin_length = 0 if after: max_i = len(path) - 1 i = i_start while length < mpml and i < max_i: i += 1 segment.append(path[i]) dist = euclidean_distance(path[i - 1], path[i]) length += dist if length < margin: i_end_margin += 1 margin_length = length if length > mpml: segment = segment[:-1] length -= dist i -= 1 return segment[ i_end_margin:], length - margin_length, i_start + i_end_margin, i else: i = i_start while length < mpml and i > 0: i -= 1 segment.insert(0, path[i]) dist = euclidean_distance(path[i], path[i + 1]) length += dist if length < margin: i_end_margin -= 1 margin_length = length if length > mpml: segment = segment[1:] length -= dist i += 1 if i_end_margin < 0: segment = segment[:i_end_margin] return segment, length - margin_length, i, i_start + i_end_margin else: return [], 0, i_start, i_start
def point_on_max_dist_from_p1(self, max_dist): points = self.pixels() for point in points: dist = euclidean_distance(self.p1, point) if dist > max_dist: break previous_point = point return previous_point
def _closest_centroid(self, sample, centroids): closest_i = 0 closes_dist = float("inf") for i, centroid in enumerate(centroids): distance = euclidean_distance(sample, centroid) if distance < closes_dist: closes_dist = distance closest_i = i return closest_i
def predict(self, x): print "[prediction] :: classification based on euclidean_distance.. " # min_dist : initialized the maximum number in the range of float. min_dist = np.finfo('float').max min_class = -1 input_data_principal = project(self.eigen_vector, x, self.mean) for i in xrange(len(self.principals)): dist = euclidean_distance(self.principals[i], input_data_principal) if dist < min_dist: min_dist = dist min_class = self.T[i] return min_class
def get_closest_correct_point_pair(self, point): segments = self.get_segments() if len(segments) == 0: return False elif len(segments) == 1: return segments[0][0], segments[0][-1] else: indices = indices_closest_segments(point, segments) if len(indices) == 1: segment = segments[indices[0]] return segment[0], segment[-1] else: pair = False min_dist = False for i in indices: segment = segments[i] cp1 = segment[0] cp2 = segment[-1] dist = euclidean_distance(cp1, point) + euclidean_distance(cp2, point) if not min_dist or dist < min_dist: pair = cp1, cp2 min_dist = dist return pair
def on_timeout(self): if self.correcting or not self.rt_proposals or self.position is None: return start = self.extraction.get_last() if not start: return # do not compute proposal if we already have a proposal ending in this point with a margin < 2 pixels if self.proposal is not None: _, points, _, _ = self.proposal m = euclidean_distance(self.position, points[-1]) if m < 2: return # if distance between start and end is small: compute proposal # else: wait until mouse pointer is stable for 1 timeout period d = euclidean_distance(start, self.position) if d < 200 or self.prev_position is None: self.plot_road_proposal(self.position) else: m = euclidean_distance(self.position, self.prev_position) if m < 2: self.plot_road_proposal(self.position) self.prev_position = self.position
def get_cost_path(path, cost_map): if len(path) == 0: return 0 elif len(path) == 1: x, y = path[0] return cost_map[y][x] else: cost = 0 previous = None for current in path: if previous is not None: dist_db2 = euclidean_distance(current, previous) / 2 x, y = current x = max(0, x) x = min(x, len(cost_map[0]) - 1) y = max(0, y) y = min(y, len(cost_map) - 1) current = x, y px, py = previous cc = cost_map[y][x] cp = cost_map[py][px] cost += dist_db2 * cc + dist_db2 * cp previous = current return cost
def main(): ''' 前のフレームのjointと比較してユークリッド距離が一番近いものを 最適なjointとして選択する ''' args = sys.argv # 何度刻みで回転させるか DEG_SPLIT = Settings.DEG_SPLIT # 距離の閾値 DIST_THRESHOLD = np.inf # ここ大きくして MAX_NUM_IN_THRESHOLD = 1 # ここを1にすれば時系列だけ考慮することになる # 自分でジョイントを描画するか IS_SELF_DRAWING = Settings.IS_SELF_DRAWING # 平滑化 IS_SMOOTHED = Settings.IS_SMOOTHED W_CNT = Settings.W_CNT if IS_SMOOTHED else 1 # ベースとなるパス BASE_PATH = args[1] JSON_PATH = join(BASE_PATH, 'json') IMGS_PATH = join(BASE_PATH, 'images') FOR_VIDEO_PATH = join( BASE_PATH, 'for_video_deg{}_w_cnt{}_dist{}_time'.format(DEG_SPLIT, int(W_CNT * 100), DIST_THRESHOLD)) if not isdir(FOR_VIDEO_PATH): mkdir(FOR_VIDEO_PATH) # ファイルのパス LOG_FILE_PATH = join(FOR_VIDEO_PATH, 'log.txt') ANGLES_FILE_PATH = join(FOR_VIDEO_PATH, 'angles.txt') CONFIDENCE_MEAN_TEXT_PATH = join(FOR_VIDEO_PATH, 'confidence_mean.txt') # ファイルを開く log_f = open(LOG_FILE_PATH, 'w') angles_f = open(ANGLES_FILE_PATH, 'w') confidence_f = open(CONFIDENCE_MEAN_TEXT_PATH, 'w') # 元画像のパス INPUT_IMAGES_PATH = args[2] input_imgs_list = make_list_in_dir(INPUT_IMAGES_PATH, expanded='jpg') # OpenPoseの結果jsonと画像が入っているディレクトリの名前のリスト # image000001, image000002, ... json_dir_list = make_list_in_dir(JSON_PATH) imgs_dir_list = make_list_in_dir(IMGS_PATH) # 画像の中心を計算 rot_center_x, rot_center_y = get_rot_center_from_path( IMGS_PATH, imgs_dir_list) exists_first_keypoints = False for json_dir, imgs_dir, input_img in zip(json_dir_list, imgs_dir_list, input_imgs_list): # OpenPoseの結果jsonと画像が格納されているディレクトリ json_dir_path = join(JSON_PATH, json_dir) imgs_dir_path = join(IMGS_PATH, imgs_dir) # jsonと画像のファイル名を格納したリスト # 10度ごとに回転させたものが入っている # image000001_rotate000_keypoints.json, image000001_rotate010_keypoints.json, ... json_name_list = make_list_in_dir(json_dir_path) imgs_name_list = make_list_in_dir(imgs_dir_path) input_img_path = join(INPUT_IMAGES_PATH, input_img) output_img_path = join(FOR_VIDEO_PATH, '{}.png'.format(imgs_dir)) # 最初のキーポイントが定まっていなかった時はconfidenceで判断 if not exists_first_keypoints: log_f.write( '{} ====================================\n'.format(json_dir)) log_f.write(' method: confidence\n') # confidenceの最大とそのインデックスから # confidence最大の時のキーポイントのnp.arrayを得る max_confidence, max_confidence_idx = get_confidence_and_idx( json_dir_path) max_confidence_json_path = join(json_dir_path, json_name_list[max_confidence_idx]) max_confidence_keypoints = get_keypoints_array_from_json( max_confidence_json_path) confidence_f.write(str(max_confidence) + '\n') if max_confidence_keypoints.any(): deg = max_confidence_idx * DEG_SPLIT exists_first_keypoints = True pre_keypoints_array = rotate_keypoints_array( max_confidence_keypoints, deg, rot_center_x=rot_center_x, rot_center_y=rot_center_y) reshaped_pre_keypoints_array = pre_keypoints_array.reshape( [18, 3]) if IS_SELF_DRAWING: draw_joints_on_image(input_img_path, output_img_path, reshaped_pre_keypoints_array) else: max_image_name = imgs_name_list[max_confidence_idx] max_image_path = join(imgs_dir_path, max_image_name) save_rotate_image(max_image_path, output_img_path, deg) else: img = Image.open(input_img_path) img.save(output_img_path) log_f.write(' confidence score: {}\n'.format(max_confidence)) angles_f.write(str(deg) + '\n') # 最初のキーポイントが定まっているときは時系列を考慮して else: euclidean_dist_list = np.array([]) for i in range(len(json_name_list)): deg = i * DEG_SPLIT json_name = json_name_list[i] json_name_path = join(json_dir_path, json_name) keypoints_array = get_keypoints_array_from_json(json_name_path) if keypoints_array.any(): rotated_keypoints_array = rotate_keypoints_array( keypoints_array, deg, rot_center_x=rot_center_x, rot_center_y=rot_center_y) euclidean_dist = euclidean_distance( pre_keypoints_array, rotated_keypoints_array) euclidean_dist_list = np.append(euclidean_dist_list, euclidean_dist) else: euclidean_dist_list = np.append(euclidean_dist_list, np.inf) valid_euclidean_dist_list = np.array([ dist for dist in euclidean_dist_list if dist <= DIST_THRESHOLD ]) valid_dist_len = len(valid_euclidean_dist_list) valid_dist_len = valid_dist_len \ if valid_dist_len <= MAX_NUM_IN_THRESHOLD else MAX_NUM_IN_THRESHOLD if valid_dist_len == 0: max_confidence, max_confidence_idx = get_confidence_and_idx( json_dir_path) max_confidence_json_path = join( json_dir_path, json_name_list[max_confidence_idx]) max_confidence_keypoints = get_keypoints_array_from_json( max_confidence_json_path) log_f.write('{} ====================================\n'.format( json_dir)) log_f.write(' method: confidence\n') log_f.write(' dist: {}\n'.format( np.min(euclidean_dist_list))) confidence_f.write(str(max_confidence) + '\n') if max_confidence_keypoints.any(): deg = max_confidence_idx * DEG_SPLIT exists_first_keypoints = True cnt_keypoints_array = rotate_keypoints_array( max_confidence_keypoints, deg, rot_center_x=rot_center_x, rot_center_y=rot_center_y) if IS_SMOOTHED: cnt_keypoints_array = smoothing( cnt_keypoints_array, pre_keypoints_array, W_CNT) reshaped_cnt_keypoints_array = cnt_keypoints_array.reshape( [18, 3]) pre_keypoints_array = cnt_keypoints_array if IS_SELF_DRAWING: draw_joints_on_image(input_img_path, output_img_path, reshaped_cnt_keypoints_array) else: max_image_name = imgs_name_list[max_confidence_idx] max_image_path = join(imgs_dir_path, max_image_name) save_rotate_image(max_image_path, output_img_path, deg) else: img = Image.open(input_img_path) img.save(output_img_path) log_f.write( ' confidence score: {}\n'.format(max_confidence)) angles_f.write(str(deg) + '\n') else: log_f.write('{} ====================================\n'.format( json_dir)) log_f.write(' method: time series and confidence\n') confidence_array = create_confidence_array(json_dir_path) argsort_idx_list = np.argsort(euclidean_dist_list) argsort_idx_list = argsort_idx_list[:valid_dist_len] best_idx = 0 max_confidence = -1 for argsort_idx in argsort_idx_list: log_f.write(' idx - {}:\n'.format(argsort_idx)) log_f.write(' dist -> {}\n'.format( euclidean_dist_list[argsort_idx])) log_f.write(' confidence -> {}\n'.format( confidence_array[argsort_idx])) if confidence_array[argsort_idx] > max_confidence: best_idx = argsort_idx max_confidence = confidence_array[argsort_idx] confidence_f.write(str(max_confidence) + '\n') deg = best_idx * DEG_SPLIT best_json_path = join(json_dir_path, json_name_list[best_idx]) best_keypoints_array = get_keypoints_array_from_json( best_json_path) cnt_keypoints_array = rotate_keypoints_array( best_keypoints_array, deg, rot_center_x=rot_center_x, rot_center_y=rot_center_y) if IS_SMOOTHED: cnt_keypoints_array = smoothing(cnt_keypoints_array, pre_keypoints_array, W_CNT) reshaped_cnt_keypoints_array = cnt_keypoints_array.reshape( [18, 3]) pre_keypoints_array = cnt_keypoints_array if IS_SELF_DRAWING: draw_joints_on_image(input_img_path, output_img_path, reshaped_cnt_keypoints_array) else: max_image_name = imgs_name_list[best_idx] max_image_path = join(imgs_dir_path, max_image_name) save_rotate_image(max_image_path, output_img_path, deg) angles_f.write(str(deg) + '\n') log_f.close() angles_f.close() confidence_f.close()
img.save(image_path) i = 0 for segment in segments: i += 1 image_path = path_images + name + '_' + str(i) + '_z' + str(zoom_level) + '_s' + str(size) + extension if not os.path.exists(image_path): mpp = meter_per_pixel(segment.center().lat, zoom_level) reference = segment.pixels(size, zoom_level=zoom_level) pixel_length_reference = 0 previous_pixel = None for pixel in reference: if previous_pixel is not None: pixel_length_reference += euclidean_distance(previous_pixel, pixel) previous_pixel = pixel if pixel_length_reference * mpp >= min_length: img = image_collector.get_image(segment.center(), zoom_level, size, image_source) if image_source == 'google': # remove alpha channel img_float = img_as_float(img) img_new = np.zeros((len(img_float), len(img_float[0]), 3)) for r in range(0, len(img_float)): for c in range(0, len(img_float[0])): img_new[r][c] = img_float[r][c][:3] img = im.fromarray((img_new * 255).astype(np.uint8)) img.save(image_path)
def correct_sharp_turns(path, img_shape): theta = (3 / 4) * pi min_dist_betw_tps = 5 * road_width margin_fit = 3 * road_width min_len_fit = 2 * road_width max_len_fit = 6 * road_width max_l = len(path) - 1 max_x, max_y = img_shape[1] - 1, img_shape[0] - 1 # get potential turning points using RDP with a low epsilon turning_points = rdp(path, epsilon=1) turning_points = turning_points[1:-1] tp_indices = [] for tp in turning_points: tuple_tp = tuple(tp) tp_indices.append(path.index(tuple_tp)) # remove turning points to close to the start or the end of the path tp_indices_new = [] for i_tp in tp_indices: if 10 <= i_tp < len(path) - 10: tp_indices_new.append(i_tp) tp_indices = tp_indices_new turning_points_new = [] angles = [] segment_data_tuples = [] tp_indices_new = [] # fit lines at turning points and store all relevant info for i in tp_indices: segment1, len_s1, s1_i_min, s1_i_max = get_segment(path, i, max_len_fit, margin_fit, after=False) segment2, len_s2, s2_i_min, s2_i_max = get_segment(path, i, max_len_fit, margin_fit, after=True) if len_s1 < min_len_fit: line1 = Line(path[0], path[i]) s1_i_min = 0 else: line1 = fit_line(segment1) if len_s2 < min_len_fit: line2 = Line(path[i], path[max_l]) s2_i_max = max_l else: line2 = fit_line(segment2) segment1_data = segment1, len_s1, s1_i_min, s1_i_max segment2_data = segment2, len_s2, s2_i_min, s2_i_max if line1 and line2: angle = line1.angle(line2) if angle <= theta: ip = line1.intersection(line2) if ip: ip = int(round(ip[0])), int(round(ip[1])) cp = closest_point(ip, path[s1_i_max:s2_i_min]) line = Line(cp, ip) tp = line.point_on_max_dist_from_p1(road_width) tp = min(tp[0], max_x), min(tp[1], max_y) turning_points_new.append(tp) angles.append(angle) segment_data_tuples.append((segment1_data, segment2_data)) tp_indices_new.append(i) turning_points = turning_points_new tp_indices = tp_indices_new # sort turning points by angle and keep only the turning points with the smallest angle in their neighborhood tp_indices_sorted_by_angle = sorted(range(len(angles)), key=lambda k: angles[k]) to_keep = [True] * len(turning_points) to_remove = set() for i in tp_indices_sorted_by_angle: if i in to_remove: to_keep[i] = False else: tp_i = turning_points[i] for j, tp_j in enumerate(turning_points): if i != j and euclidean_distance(tp_i, tp_j) < min_dist_betw_tps: to_remove.add(j) turning_points_new = [] angles_new = [] segment_data_tuples_new = [] tp_indices_new = [] for i, keep in enumerate(to_keep): if keep: turning_points_new.append(turning_points[i]) angles_new.append(angles[i]) segment_data_tuples_new.append(segment_data_tuples[i]) tp_indices_new.append(tp_indices[i]) turning_points = turning_points_new angles = angles_new segment_data_tuples = segment_data_tuples_new tp_indices = tp_indices_new # compute the distances between remaining adjacent turning points distances = [] for i in range(len(turning_points) - 1): dist = euclidean_distance(turning_points[i], turning_points[i + 1]) distances.append(dist) # recompute turning points with lower margins for adjacent turns that are still close to each other previous_close = False for i in range(len(turning_points)): if i < len(turning_points) - 1: dist = distances[i] if dist < 9 * road_width: next_close = True else: next_close = False else: next_close = False if previous_close or next_close: j = tp_indices[i] # index tp in path if previous_close: dist_prev = distances[i - 1] s1, len_s1, s1_i_min, s1_i_max = get_segment(path, j, dist_prev / 2, dist_prev / 4, after=False) segment_data_tuples[i] = (s1, len_s1, s1_i_min, s1_i_max), segment_data_tuples[i][1] else: s1, len_s1, _, s1_i_max = segment_data_tuples[i][0] if next_close: dist_next = distances[i] s2, len_s2, s2_i_min, s2_i_max = get_segment(path, j, dist_next / 2, dist_next / 4, after=True) segment_data_tuples[i] = segment_data_tuples[i][0], (s2, len_s2, s2_i_min, s2_i_max) previous_close = True else: s2, len_s2, s2_i_min, _ = segment_data_tuples[i][1] previous_close = False line1 = fit_line(s1) line2 = fit_line(s2) view = False if view: plt.figure() px = [x for (x, y) in path] py = [y for (x, y) in path] plt.plot(px, py, c='g', linewidth=1) s1x = [x for (x, y) in s1] s1y = [y for (x, y) in s1] plt.plot(s1x, s1y, c='blue', linewidth=4) s2x = [x for (x, y) in s2] s2y = [y for (x, y) in s2] plt.plot(s2x, s2y, c='red', linewidth=4) pixels_line1 = line1.pixels() l1x = [x for (x, y) in pixels_line1] l1y = [y for (x, y) in pixels_line1] plt.plot(l1x, l1y, c='cyan', linewidth=2) pixels_line2 = line2.pixels() l2x = [x for (x, y) in pixels_line2] l2y = [y for (x, y) in pixels_line2] plt.plot(l2x, l2y, c='orange', linewidth=2) plt.scatter([path[j][0]], [path[j][1]], s=50, c='green') plt.scatter([turning_points[i][0]], [turning_points[i][1]], s=50, c='red') if line1 and line2: ip = line1.intersection(line2) if ip: ip = int(round(ip[0])), int(round(ip[1])) cp = closest_point(ip, path[s1_i_max:s2_i_min]) line = Line(cp, ip) tp = line.point_on_max_dist_from_p1(1.42 * road_width) tp = min(tp[0], max_x), min(tp[1], max_y) turning_points[i] = tp if view: plt.scatter([tp[0]], [tp[1]], s=50, c='blue') plt.show() # insert sharp turns in the path, connect all adjacent turning points that are still close to each other path_new = [] tp_indices = [] previous_close = False n = 0 for i in range(len(turning_points)): tp = turning_points[i] _, _, _, s1_i_max = segment_data_tuples[i][0] _, _, s2_i_min, _ = segment_data_tuples[i][1] if i < len(turning_points) - 1: dist = distances[i] if dist < 9 * road_width: next_close = True else: next_close = False else: next_close = False if previous_close: if next_close: turn = fill([], turning_points[i - 1], tp)[1:] previous_close = True else: turn = fill([tp], turning_points[i - 1], path[s2_i_min])[1:] previous_close = False else: path_new.extend(path[n:s1_i_max]) if next_close: turn = fill([], path[s1_i_max], tp) previous_close = True else: turn = fill([tp], path[s1_i_max], path[s2_i_min]) previous_close = False path_new.extend(turn) tp_indices.append(path_new.index(tp)) n = s2_i_min + 1 path_new.extend(path[n:]) return path_new, tp_indices