def create_table(self, table_name, csv_fname, skip_row_num=0, skip_col_num=0, auto_inc_pk_=True, show_=False): if show_: self.logger.info(" # Create \"{}\" table.".format(table_name)) utils.file_exists(csv_fname, exit_=True) df = pd.read_csv(csv_fname, skiprows=skip_row_num) df = df.drop(df.columns[[x for x in range(skip_col_num)]], axis=1) sql = "CREATE TABLE " + table_name + " (" if auto_inc_pk_: sql += "id INT AUTO_INCREMENT PRIMARY KEY," sql += " {} {}".format(df.loc[0][0].strip(), df.loc[0][1].strip()) if isinstance(df.loc[0][2], str) and len(df.loc[0][2]) > 0: sql += " " + df.loc[0][2].strip() for row in range(1, df.shape[0]): if not isinstance(df.loc[row][0], str): break sql += ", {} {}".format(df.loc[row][0].strip(), df.loc[row][1].strip()) if isinstance(df.loc[row][2], str) and len(df.loc[row][2]) > 0: sql += " " + df.loc[row][2].strip() sql += " )" cursor = self.con.cursor() try: cursor.execute(sql) except Exception as e: self.logger.error(e)
def insert_csv_file_into_table(self, table_name, csv_fname, row_num=-1): utils.file_exists(csv_fname, exit_=True) df = pd.read_csv(csv_fname, dtype=object) sql = "INSERT INTO " + table_name + " (" + ', '.join(df.columns) + ") " sql += "VALUES (" + ", ".join(["%s"] * len(df.columns)) + ")" if row_num < 0: row_num = df.shape[0] vals = [] for i in range(row_num): if not isinstance(df.loc[i][0], str): break val = [] for j in range(len(df.columns)): col = df.loc[i][j] if not isinstance(col, str): col = "" val.append(col) vals.append(tuple(val)) try: cursor = self.con.cursor() cursor.executemany(sql, vals) self.con.commit() self.print_cursor(cursor) self.logger.info("") self.logger.info(" # Insert csv file into table: {:d} was inserted.".format(cursor.rowcount)) except Exception as e: self.logger.error(e) return True
def detect_text_area(img, area_ratio_thresh=0.25, char_min_pxl=10, box_img_=False, debug_=False): # convert to gray scale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # smooth the image to avoid noises filtered = cv2.medianBlur(gray, 3) # Apply adaptive threshold # thresh = cv2.adaptiveThreshold(gray, 255, 1, 1, 11, 2) _, bin = cv2.threshold(filtered, thresh=128, maxval=255, type=cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU) kernel = np.ones((5, 5), np.uint8) """ kernel[0:2,0:2] = 0 kernel[0:2,3:5] = 0 kernel[3:5,3:5] = 0 kernel[3:5,0:2] = 0 kernel[1,1] = 1 kernel[1,3] = 1 kernel[3,1] = 1 kernel[3,3] = 1 """ morph = np.copy(bin) for _ in range(5): morph = cv2.dilate(morph, kernel, iterations=3) morph = cv2.erode(morph, kernel, iterations=3) # Find the contours image, contours, hierarchy = cv2.findContours(morph, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # For each contour, find the bounding rectangle and draw it boxes = [] for contour in contours: x, y, w, h = cv2.boundingRect(contour) contour_area = cv2.contourArea(contour) rect_area = w * h if (contour_area / rect_area > area_ratio_thresh) and w > char_min_pxl and h > char_min_pxl: boxes.append([x, y, x + w, y + h]) boxes_img = [] if debug_: boxes_img = hp.draw_boxes_on_img(np.copy(img), boxes, color=hp.RED, thickness=4) hp.imshow(morph) hp.imshow(boxes_img) return boxes, boxes_img
def run__erase_lines_in_image(): img_path = 'test_videos/census-1.jpg' img_prefix = '' imgs, filenames = hp.hp_imread_all_images(img_path, fname_prefix=img_prefix) for img in imgs: erase_img = erase_lines_in_image(img, pause_img_sec=0) hp.hp_imshow(np.hstack((cv2.cvtColor(img, cv2.COLOR_RGB2GRAY), erase_img)), desc="erase lines")
def parse_arguments(argv): parser = argparse.ArgumentParser() parser.add_argument("--source", default=None, help="source video filename") parser.add_argument("--target", default=None, help="source video filename") args = parser.parse_args(argv) args.source = utils.unicode_normalize(args.source) args.target = utils.unicode_normalize(args.target) return args
def __init__(self, camera_info, sim_save_dir, logger): self.pd_info_dict = dict() self.camera_info = camera_info self.sim_save_dir = sim_save_dir self.sim_out_dir = self.get_sim_out_dir(edge_uid, sim_save_dir) if os.path.isdir(self.sim_out_dir): shutil.rmtree(self.sim_out_dir) utils.folder_exists(self.sim_out_dir, create_=True) self.logger = logger
def run__derotate(): img_path = 'test_videos/census-rotate-2.jpeg' imgs, filenames = hp.hp_imread_all_images(img_path, fname_prefix='census-rotate-') for img in imgs: rot_img = derotate_image(img, rot_img_fname=None, check_time_=False, pause_img_sec=0) hp.hp_imshow(np.hstack((img, rot_img)), desc="de-rotation")
def make_window_masked_image(self, img, win_pos, color=utils.BLACK, show_sec=-1): img = utils.draw_box_on_img(img, win_pos, color=color, thickness=-1, alpha=0) utils.imshow(img, desc="make window masked image", pause_sec=show_sec) return img
def make_face_boxed_image(img, feat, color=utils.RED, thickness=4, alpha=0.5, show_conf_=True): if feat.face_pos_arr is None or feat.face_conf_arr is None: return img thickness = thickness if show_conf_ else -1 for pos, conf in zip(feat.face_pos_arr, feat.face_conf_arr): img = utils.draw_box_on_img(img, pos, color=color, thickness=thickness, alpha=alpha) if show_conf_: img = cv2.putText(img, "{:3d}".format(conf), (pos[0] + 4, pos[3] - 4), cv2.FONT_HERSHEY_SIMPLEX, 1, utils.WHITE, 5) img = cv2.putText(img, "{:3d}".format(conf), (pos[0] + 4, pos[3] - 4), cv2.FONT_HERSHEY_SIMPLEX, 1, color, 2) return img
def init_shm_files(self, shm_ini, shm_shape=None): self.shm_idx = 0 self.shm_file_num = int(shm_ini['shm_file_num']) self.prefix = shm_ini['shm_file_prefix'] if shm_shape: self.shm_shape = shm_shape else: shm_width = int(shm_ini['shm_width']) shm_height = int(shm_ini['shm_height']) self.shm_shape = (shm_height, shm_width, 3) self.shm_arr = [] for i in range(self.shm_file_num): shm_name = self.prefix + str(i) shm_path = os.path.join('/dev/shm/', shm_name) if utils.file_exists(shm_path): os.remove(shm_path) shm_size = self.shm_shape[0] * self.shm_shape[1] * self.shm_shape[2] shm = shared_memory.SharedMemory(name=shm_name, create=True, size=shm_size) # shm = np.ndarray(self.shm_shape, dtype=np.uint8, buffer=shm.buf) self.shm_arr.append(shm)
def draw_line_from_rho_and_theta(img, rho, theta, pause_sec=-1): img_sz = img.shape[1::-1] a, b = np.cos(theta), np.sin(theta) x0, y0 = a * rho, b * rho x = [] y = [] if b != 0: slope = -a / b y1 = slope * (-x0) + y0 if 0 <= y1 < img_sz[1]: x.append(0) y.append(y1) y1 = slope * (img_sz[0] - 1 - x0) + y0 if 0 <= y1 < img_sz[1]: x.append(img_sz[0] - 1) y.append(y1) x1 = (-y0) / slope + x0 if 0 <= x1 < img_sz[0]: x.append(x1) y.append(0) x1 = (img_sz[1] - 1 - y0) / slope + x0 if 0 <= x1 < img_sz[0]: x.append(x1) y.append(img_sz[1] - 1) else: x = [x0, x0] y = [0, img_sz[1] - 1] angle = (90 - (theta * 180 / np.pi)) if pause_sec >= 0: print(" # rotated angle = {:f} <- ({:.3f}, {:.3f})".format( angle, theta, rho)) if len(x) is 2: pts = [[int(x[0] + 0.5), int(y[0] + 0.5)], [int(x[1] + 0.5), int(y[1] + 0.5)]] else: if pause_sec >= 0: print(" @ Warning: rho is zero.\n") pts = [[0, 0], [0, 0]] line_img = np.copy(img) cv2.line(line_img, (pts[0][0], pts[0][1]), (pts[1][0], pts[1][1]), hp.RED, 4) hp.hp_imshow(line_img, pause_sec=pause_sec) return pts
def make_window_boxed_image(img, veh_feats, color=None, show_sec=-1): for veh_idx, veh_feat in enumerate(veh_feats): if veh_feat.win_conf_arr: color = utils.get_color(i=veh_idx, primary_=False) if color is None else color for win_idx, win_pos in enumerate(veh_feat.win_pos_arr): img = utils.draw_quadrilateral_on_image(img, win_pos, color=color, thickness=4) text = "{}: {:2d}".format("Window", veh_feat.win_conf_arr[win_idx]) img = cv2.putText(img, text, (win_pos[0], win_pos[1] - 10), cv2.FONT_HERSHEY_SIMPLEX, 2, color, 4) utils.imshow(img, desc="make window boxed image", pause_sec=show_sec) return img
def get_roi(img, roi, imshow_sec=-1, clockwise_=True): roi = np.array(roi) roi = roi * np.array(img.shape[1::-1]) if not sum(sum(roi > 1)) else roi if clockwise_: roi[[2, 3]] = roi[[3, 2]] roi_corners = np.array([[tuple(x) for x in roi]], dtype=np.int32) ignore_mask_color = (255, ) * img.shape[2] mask = cv2.fillPoly(np.zeros(img.shape, dtype=np.uint8), roi_corners, color=ignore_mask_color) roi_img = cv2.bitwise_and(img, mask) utils.imshow(roi_img, desc="roi image", pause_sec=imshow_sec) # gray_img = cv2.cvtColor(roi_img, code=cv2.COLOR_RGB2GRAY) offset = [[0, 0], list(img.shape[1::-1])] return roi_img, offset
def parse_arguments(argv): parser = argparse.ArgumentParser() parser.add_argument("--info_path", required=True, help="ini filename") parser.add_argument("--save_dir", required=True, help="ini filename") args = parser.parse_args(argv) args.info_path = utils.unicode_normalize(args.info_path) return args
def get_images_from_video(vid_fname, out_path, frame_interval, logger=Logger.get_stdout_logger()): utils.file_exists(vid_fname, exit_=True) utils.folder_exists(out_path, exit_=False, create_=True, print_=True) logger.info(" # Extract image from video, {}".format(vid_fname)) vid = mpy.VideoFileClip(vid_fname) base_fname = os.path.splitext(os.path.basename(vid_fname))[0] i_digit = int(np.log10(vid.duration / frame_interval)) + 1 n_digit = int(np.log10(vid.duration)) + 3 for i, s in enumerate(itools.numeric_range(0, vid.duration, frame_interval)): frame = vid.get_frame(s) time_info = "__" + \ "{:0{width}d}".format(i, width=i_digit) + \ "__" + \ "{:0{width}.1f}sec".format(s, width=n_digit) out_fname = os.path.join(out_path, base_fname + time_info + IMAGE_FILE_EXT) utils.imwrite(frame, out_fname) logger.info(" # save image, {}".format(out_fname))
def find_black_rects(img): gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) retval, thresh_gray = cv2.threshold(gray, thresh=100, maxval=255, type=cv2.THRESH_BINARY) # rsz_img = cv2.resize(thresh_gray, (0, 0), fx=0.3, fy=0.3) # cv2.imshow("result", rsz_img) # cv2.waitKey(0) cv2.bitwise_not(thresh_gray, thresh_gray) _, contours, hierarchy = cv2.findContours(thresh_gray, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) i = 0 for cont in contours: x1, y1, w, h = cv2.boundingRect(cont) k = float(h) / w if 0.9 < k < 1.1 and 1000 < w * h < 1500: cv2.drawContours(img, contours, i, (0, 250, 0), thickness=10) i += 1 hp.hp_imshow(img)
def run__crop(): paper_size = 'A4' ref_dim = [1280, None] org_img = hp.hp_imread("test_videos/DB_1.jpg") ret, det_vertices, box_img = detect_four_corners_based_on_ref_squares( org_img, (0, 0), search_margin=0.1, square_width=50. / 2480, debug_=False) if paper_size == 'A4': ref_dim[1] = int(ref_dim[0] * np.sqrt(2.)) tar_vertices = [[0, 0], [ref_dim[0], 0], [0, ref_dim[1]], [ref_dim[0], ref_dim[1]]] mtx = cv2.getPerspectiveTransform(np.float32(det_vertices), np.float32(tar_vertices)) warp_img = cv2.warpPerspective(org_img, mtx, dsize=tuple(ref_dim), flags=cv2.INTER_LINEAR) hp.hp_imshow(warp_img, "output") hp.hp_imwrite(warp_img, "output.png")
def make_face_masked_image(img, face_pos_arr, color=utils.RED, margin=0, show_sec=-1): if face_pos_arr is None: return img for face_pos in face_pos_arr: pos = [ face_pos[0] - margin, face_pos[1] - margin, face_pos[2] + margin, face_pos[3] + margin ] pos = utils.check_box_boundary(pos, img.shape[1::-1]) img = utils.draw_box_on_img(img, pos, color=color, thickness=-1, alpha=0) utils.imshow(img, desc="make face masked image", pause_sec=show_sec) return img
def create_multi_svr_ini(_this_basename_, ini_fname, proc_offset, port_inc=10): ini = utils.get_ini_parameters(ini_fname) if proc_offset >= 0: logger_name = ini['LOGGER']['name'] + '_{}'.format(proc_offset + 1) ini['LOGGER']['name'] = logger_name ini['LOGGER']['prefix'] = logger_name + '.' ini['LOGGER']['folder'] = 'Log/{}/'.format(proc_offset + 1) ini['SERVER_MODE']['name'] = logger_name ini['SERVER_MODE']['port'] = str( int(ini['SERVER_MODE']['port']) + proc_offset * port_inc) ini['SERVER_MODE']['acronym'] = ini['SERVER_MODE'][ 'acronym'] + '{}'.format(proc_offset + 1) return ini
def __init__(self, write_video: str = None, limit_time: int = None, input_video_path: str = None): # Init ini self.ini = utils.get_ini_parameters(INI_FNAME) # Init logger self.logger = setup_logger_with_ini(self.ini['LOGGER'], logging_=True, console_=True) # Image size self.img_height = None self.img_width = None self.img_height = int(self.ini['OUT_VIDEO']['height']) self.img_width = int(self.ini['OUT_VIDEO']['width']) # Init video captuer updater status = pyinotify.Stats() self.video_capture_updater = VideoCaptureUpdater( status, input_video_path=input_video_path) # change process name setproctitle("python StreamAdaptor.py in StreamAdaptor") # last send socket time self.last_send_socket_time = time.time() # Write Video self.write_video = write_video self.video_writer = None self.limit_time = limit_time self.start_time = time.time() if self.write_video is not None: self.logger.info('WRITE VIDEO MODE : ON') CODEC = cv2.VideoWriter_fourcc(*'XVID') self.video_writer = cv2.VideoWriter( self.write_video, fourcc=CODEC, fps=float(self.video_capture_updater.frame_fps), frameSize=(self.img_width, self.img_height))
def __init__(self, username, passwd, hostname=DEFAULT_HOST, port=DEFAULT_PORT, database=None, table=None, logger=None, show_=False): self.con = None self.db_name = database self.table_name = table self.db_name = database self.logger = logger if self.logger is None: self.logger = utils.setup_logger(None, None, logger_=False) if show_: self.logger.info(" # connect db : {}:{}".format(hostname, port)) self.logger.info(" # Create the instance of MariaDB handler, \"{}.{}\"".format(database, table)) try: self.con = mysql.connector.connect(host=hostname, port=port, user=username, passwd=passwd, database=database) except Exception as e: self.logger.error(e)
def get(self): frame_no = self.frame_no self.vid_cap.set(cv2.CAP_PROP_POS_FRAMES, frame_no) ret, frame = self.vid_cap.read() if ret: frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) if self.mmap is None: frame_info = frame else: if self.mmap.mmap_shape[0] != frame.shape[ 0] or self.mmap.mmap_shape[1] != frame.shape[1]: frame = utils.imresize(frame, width=self.mmap.mmap_shape[0], height=self.mmap.mmap_shape[1]) frame_info = self.mmap.write_mmap(frame) self.frame_no += self.move_frame else: frame_info = None return frame_no, frame_info
def check_lines_in_img(img, algorithm='HoughLineTransform'): if img.shape != 3: img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) img_rgb = np.copy(img) else: img_gray = np.copy(img) img_rgb = cv2.cvtColor(img_gray, cv2.COLOR_GRAY2RGB) ret, img_bw = cv2.threshold(img_gray, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU) img_edge = cv2.Canny(img_bw, 50, 150, apertureSize=3) if algorithm is 'HoughLineTransform': lines = cv2.HoughLines(img_edge, 1, np.pi / 180, 100) print(" # Total lines: {:d}".format(len(lines))) for line in lines: img_lines = np.copy(img_rgb) dim = img_lines.shape rho = line[0][0] theta = line[0][1] a = np.cos(theta) b = np.sin(theta) x0 = a * rho y0 = b * rho x = [] y = [] if b != 0: slope = -a / b y1 = slope * (-x0) + y0 if 0 <= y1 < dim[0]: x.append(0) y.append(y1) y1 = slope * (dim[1] - 1 - x0) + y0 if 0 <= y1 < dim[0]: x.append(dim[1] - 1) y.append(y1) x1 = (-y0) / slope + x0 if 0 <= x1 < dim[1]: x.append(x1) y.append(0) x1 = (dim[0] - 1 - y0) / slope + x0 if 0 <= x1 < dim[1]: x.append(x1) y.append(dim[0] - 1) else: x = [x0, x0] y = [0, dim[0] - 1] angle = (90 - (theta * 180 / np.pi)) print(" # rotated angle = {:.1f} <- ({:f}, {:f})".format( angle, theta, rho)) if len(x) is 2: img_lines = cv2.line(img_rgb, (int(x[0]), int(y[0])), (int(x[1]), int(y[1])), hp.RED, 4) hp.plt_imshow(img_lines) # if -5 < angle < 0 or 0 < angle < 5: # plt_imshow(img_line) else: print(" @ Warning: something wrong.\n") pass elif algorithm == 'ProbabilisticHoughTransform': end_pts_list = cv2.HoughLinesP(img_edge, 1, np.pi / 180, threshold=100, minLineLength=100, maxLineGap=50) img_lines = np.copy(img_rgb) print(" # Total lines: {:d}".format(len(end_pts_list))) for end_pts in end_pts_list: cv2.line(img_lines, tuple(end_pts[0][0:2]), tuple(end_pts[0][2:]), hp.RED, 10) angle = np.arctan2(end_pts[0][3] - end_pts[0][1], end_pts[0][2] - end_pts[0][0]) * 180. / np.pi print(" # rotated angle = {:.1f}".format(angle)) hp.hp_imshow(img_lines) # if -5 < angle < 0 or 0 < angle < 5: # plt_imshow(img_line) return True
def process_frame(self, frame_no, in_frame): if frame_no == 1714: a = 0 ret_img = in_frame ret_img = self.draw_text(ret_img, self.frame_no_text_pos, "Frame : {}".format(frame_no), font=self.font_frame) # 기존 plate 검출을 가지고 있다가 점점 박스 두께를 줄이도록 하며, final 검출 이후엔 없애도록 하자! # 그런데, 두개를 동시에 잡았을 땐 어떻게??? plt_info_list = [] if frame_no in self.pd_info_dict: plt_pos_arr, plt_list = self.pd_info_dict[frame_no] for plt_pos in plt_pos_arr: plt_pts = utils.transform_quadrilateral_to_rectangle( plt_pos, algo='max', margin=0) crop_plt_img = ret_img[plt_pts[0][1]:plt_pts[1][1], plt_pts[0][0]:plt_pts[1][0]] plt_info_list.append((plt_pts, crop_plt_img.copy())) ret_img = utils.draw_quadrilateral_on_image(ret_img, self.camera_roi, color=utils.GREEN, clockwise_=False, thickness=4) if len(plt_info_list) > 0: # ROI 선들이 번호판위를 지나갈수 있기 때문에 한번더 그려준다. for plt_info in plt_info_list: pts, crop_plt_img = plt_info ret_img[pts[0][1]:pts[1][1], pts[0][0]:pts[1][0]] = crop_plt_img # 박스를 그리자! ret_img = utils.draw_quadrilateral_on_image(ret_img, plt_pos_arr, color=utils.RED, clockwise_=True, thickness=10) if plt_list is not None: for idx in range(len(plt_pos_arr)): if idx < len(plt_list): plt_num = plt_list[idx]['plt_num'] else: plt_num = "not found" plt_pos = plt_pos_arr[idx] test_box2 = (min(plt_pos[0][0] + 600, ret_img.shape[1]), plt_pos[0][1] - 10) text_box1 = (test_box2[0] - 600, plt_pos[0][1] - (self.font_height + 20)) ret_img = cv2.rectangle(ret_img, text_box1, test_box2, utils.BLACK, -1) ret_img = self.draw_text(ret_img, (text_box1[0], text_box1[1]), plt_num) self.last_plt_list = (plt_info_list, plt_list) if self.last_plt_list: plt_info_list, plt_list = self.last_plt_list box_l = self.last_box_tl[0] box_r = self.last_box_br[0] text_pos_x = self.last_text_pos[0] for idx in range(len(plt_info_list)): if idx < len(plt_list): if self.is_paint_black_box: ret_img = cv2.rectangle(ret_img, (box_l, self.last_box_tl[1]), (box_r, self.last_box_br[1]), utils.BLACK, -1) plt_num = plt_list[idx]['plt_num'] plt_url = plt_list[idx]['plt_uri'] ret_img = self.draw_text( ret_img, (text_pos_x, self.last_text_pos[1]), plt_num) plt_img = utils.imread(plt_url) if plt_img is not None: plt_img = utils.imresize(plt_img, height=self.plt_height) plt_height = plt_img.shape[0] plt_width = plt_img.shape[1] ret_img[self.last_plt_pos_y:self.last_plt_pos_y + plt_height, text_pos_x:text_pos_x + plt_width] = plt_img box_l += 700 box_r += 700 text_pos_x += 700 return ret_img
def make(self): if not self.sim_save_dir: return video_fname = self.camera_id + '.' video_fname += utils.get_datetime().replace(":", "-") + '.mp4' video_path = os.path.join(self.sim_save_dir, video_fname) if os.path.isfile(video_path): shutil.rmtree(video_path) self.logger.info( "start making video from ImgExtract and ImgAVR. {}".format( video_path)) if self.total_frame: process1 = (ffmpeg.input(self.video_url).output( 'pipe:', vframes=self.total_frame, format='rawvideo', pix_fmt='rgb24').run_async(pipe_stdout=True)) else: process1 = (ffmpeg.input(self.video_url).output( 'pipe:', format='rawvideo', pix_fmt='rgb24').run_async(pipe_stdout=True)) process2 = (ffmpeg.input( 'pipe:', format='rawvideo', pix_fmt='rgb24', s='{}x{}'.format(self.video_width, self.video_height)).output( video_path, pix_fmt='yuv420p').overwrite_output().run_async( pipe_stdin=True)) read_size = self.video_width * self.video_height * 3 frame_no = 0 while True: in_bytes = process1.stdout.read(read_size) if not in_bytes: break in_frame = (np.frombuffer(in_bytes, np.uint8).reshape( [self.video_height, self.video_width, 3])) if self.img_width != self.video_width or self.img_height != self.video_height: in_frame = utils.imresize(in_frame, width=self.img_width, height=self.img_height) out_frame = self.process_frame(frame_no, in_frame) process2.stdin.write(out_frame.astype(np.uint8).tobytes()) if frame_no != 0 and (frame_no % (30 * 60)) == 0: self.logger.info( "making video. {} th frame writing".format(frame_no)) frame_no += 1 process2.stdin.close() process1.wait() process2.wait() self.logger.info("end making video. total:{} frame".format(frame_no))
def line_removal(img): """ :param img: :return: """ if img is None: print("Image is empty!") pass hp.hp_imshow(img, desc="Original image") gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) hp.hp_imshow(gray, desc="Gray image") bw = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 15, -2) hp.hp_imshow(bw, desc="Binary image") hz = bw vt = bw hz_size = hz.shape[1] / 30 hz_structure = cv2.getStructuringElement(cv2.MORPH_RECT, (hz_size, 1, 1)) hz = cv2.erode(hz, hz_structure, iterations=1) hz = cv2.dilate(hz, hz_structure, iterations=1) hp.hp_imshow(hz, desc="horizontal") vt_size = vt.shape[0] / 30 vt_structure = cv2.getStructuringElement(cv2.MORPH_RECT, (1, vt_size, 1)) vt = cv2.erode(vt, vt_structure, iterations=1) vt = cv2.dilate(vt, vt_structure, iterations=1) hp.hp_imshow(vt, desc="vertical") # bitwise_not vt = cv2.bitwise_not(vt) hp.hp_imshow(vt, desc="vertical bit") """ Extract edges and smooth image according to the logic 1. extract edges 2. dilate(edges) 3. src.copyTo(smooth) 4. blur smooth img 5. smooth.copyTo(src, edges) """ edges = cv2.adaptiveThreshold(vt, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 3, -2) hp.hp_imshow(edges, desc="edges") kernel = np.ones((2, 2), dtype="uint8") edges = cv2.dilate(edges, kernel) hp.hp_imshow(edges, desc="dilated edges") smooth = vt smooth = cv2.blur(smooth, (2, 2, 1)) vt, edges = smooth hp.hp_imshow(vt, desc="smooth")
def derotate_image(img, max_angle=30, max_angle_candidates=50, angle_resolution=0.5, inside_margin_ratio=0.1, rot_img_fname=None, check_time_=False, pause_img_sec=-1): """ Derotate image. :param img: :param max_angle: Maximum rotated angle. The angles above this should be ignored. :param max_angle_candidates: :param angle_resolution: :param inside_margin_ratio: :param rot_img_fname: :param check_time_: :param pause_img_sec: :return: """ start_time = None if check_time_: start_time = time.time() if len(img.shape) == 3: img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # img_gray = np.amin(img, axis=2) else: img_gray = np.copy(img) inside_margin = [int(x * inside_margin_ratio) for x in img.shape[1::-1]] img_gray[:inside_margin[1], :] = 255 img_gray[-inside_margin[1]:, :] = 255 img_gray[:, :inside_margin[0]] = 255 img_gray[:, -inside_margin[0]:] = 255 if False: check_lines_in_img(img, algorithm='HoughLineTransform') check_lines_in_img(img, algorithm='ProbabilisticHoughTransform') ret, img_bw = cv2.threshold(img_gray, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU) """ kernel = np.ones((5, 5), np.uint8) # note this is a horizontal kernel bw = np.copy(img_bw) for i in range(9): bw = cv2.erode(bw, kernel, iterations=1) bw = cv2.dilate(bw, kernel, iterations=1) hp.hp_imshow(hp.hstack_images((img_bw, bw))) """ img_edge = cv2.Canny(img_bw, 50, 150, apertureSize=3) if False: hp.hp_imshow(img_edge) # hp.plt_imshow(edges) lines = cv2.HoughLines(img_edge, 1, np.pi / 360, int(min(img_edge.shape) / 8.)) angles = [] if lines is not None: for cnt, line in enumerate(lines): angle = int((90 - line[0][1] * 180 / np.pi) / float(angle_resolution)) * angle_resolution draw_line_from_rho_and_theta(img, line[0][0], line[0][1], pause_sec=-1) if abs(angle) < max_angle: angles.append(angle) if max_angle_candidates < cnt: break # rot_angle = max(set(angles), key=angles.count) sorted_angles = sorted({x: angles.count(x) for x in angles}.items(), key=operator.itemgetter(1), reverse=True) if len(sorted_angles) == 0: rot_angle = 0 elif len(sorted_angles) == 1: rot_angle = sorted_angles[0][0] elif sorted_angles[0][0] == 0 and (sorted_angles[0][1] < 2 * sorted_angles[1][1]): rot_angle = sorted_angles[1][0] elif (sorted_angles[0][1] / sorted_angles[1][1] ) < 3 and abs(sorted_angles[0][0] - sorted_angles[1][0]) <= 1.0: rot_angle = (sorted_angles[0][0] + sorted_angles[1][0]) / 2. else: rot_angle = sorted_angles[0][0] """ if rot_angle != 0: rot_angle += 0.5 """ if pause_img_sec >= 0: print("# Rotated angle is {:5.1f} degree.".format(rot_angle)) sz = img_bw.shape[1::-1] rot_img = ~imutils.rotate(~img, angle=-rot_angle, center=(int(sz[0] / 2), int(sz[1] / 2)), scale=1) if check_time_: print( " # Time for rotation detection and de-rotation if any : {:.2f} sec" .format(float(time.time() - start_time))) if 0 <= pause_img_sec: hp.imshow(np.concatenate((img, rot_img), axis=1), pause_sec=pause_img_sec, desc="de-rotation") if rot_img_fname: hp.hp_imwrite(rot_img_fname, rot_img, 'RGB') return rot_img
def erase_lines_in_image(img, check_time_=False, pause_img_sec=-1): """ Erase lines. :param img: :param check_time_: :param pause_img_sec: :return: """ erase_window_sz = 9 line_thresh = 32 start_time = None if check_time_: start_time = time.time() img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) if len( img.shape) == 3 else np.copy(img) ret, img_bw = cv2.threshold(img_gray, 128, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU) """ kernel = np.ones((1, 5), np.uint8) # note this is a horizontal kernel bw = np.copy(img_bw) for i in range(9): bw = cv2.dilate(bw, kernel, iterations=1) bw = cv2.erode(bw, kernel, iterations=1) hp.hp_imshow(hp.hstack_images((img_bw, bw))) """ img_edge = cv2.Canny(img_bw, 50, 150, apertureSize=3) lines = cv2.HoughLines(img_edge, 1, np.pi / 360, 200) print(" # Total number of lines detected is {:d}.".format(len(lines))) hp.hp_imshow(img_bw, pause_sec=pause_img_sec) img_erase = np.ones((img_bw.shape[:2]), dtype=np.uint8) * 255 for cnt, line in enumerate(lines): line_pts = draw_line_from_rho_and_theta(img, line[0][0], line[0][1], pause_sec=pause_img_sec) x0, y0, x1, y1 = line_pts[0][0], line_pts[0][1], line_pts[1][ 0], line_pts[1][1] pnts = [] if (x1 - x0) > (y1 - y0): for x in range(x0, x1): y = (y1 - y0) / (x1 - x0) * (x - x0) + y0 pnts.append([int(x), int(y), img_bw[int(y), int(x)]]) else: for y in range(y0, y1): x = (x1 - x0) / (y1 - y0) * (y - y0) + x0 pnts.append([int(x), int(y), img_bw[int(y), int(x)]]) cnt = 0 stt_pnt = 0 for i in range(len(pnts)): if pnts[i][2] == 255: if cnt == 0: stt_pnt = i cnt += 1 else: if cnt > line_thresh: # print(" > {:d}-th line: {:d} + {:d} = {:d}".format(cnt, stt_pnt, cnt, stt_pnt+cnt)) for j in range(cnt): pos = pnts[stt_pnt + j][:2] if (x1 - x0) > (y1 - y0): img_erase[pos[1] - erase_window_sz:pos[1] + erase_window_sz + 1, pos[0]] = 0 else: img_erase[pos[1], pos[0] - erase_window_sz:pos[0] + erase_window_sz + 1] = 0 cnt = 0 hp.hp_imshow(img_erase, pause_sec=pause_img_sec) if check_time_: print(" # The processing time of erasing line function is {:.3f} sec". format(float(time.time() - start_time))) img_bw_erase = ((img_erase == 0) * 0 + (img_erase != 0) * img_bw).astype( np.uint8) return img_erase, img_bw_erase
def detect_four_corners_based_on_ref_squares(img, ref_vertex, search_margin=0.1, square_width=50. / 2480, debug_=False): """ Detect four quadrilateral vertices based on reference black squares. It is assumed that four black squares are located near the four corners based on reference vertices. :param img: :param ref_vertex: :param search_margin: :param square_width: :param debug_: :return: status, detected vertices, output image """ square_ratio_range = [0.8, 1.2] square_width_margin = 0.5 square_fill_thresh = 0.8 debug_in_ = False dim = img.shape[1::-1] square_width = square_width * dim[0] real_vertices = hp.generate_four_vertices_from_ref_vertex(ref_vertex, dim) crop_boxes = [] offsets = [int(x * search_margin) for x in dim] crop_boxes.append([ real_vertices[0][0], real_vertices[0][1], real_vertices[0][0] + offsets[0], real_vertices[0][1] + offsets[1] ]) crop_boxes.append([ real_vertices[1][0] - offsets[0], real_vertices[1][1], real_vertices[1][0], real_vertices[1][1] + offsets[1] ]) crop_boxes.append([ real_vertices[2][0], real_vertices[2][1] - offsets[1], real_vertices[2][0] + offsets[0], real_vertices[2][1] ]) crop_boxes.append([ real_vertices[3][0] - offsets[0], real_vertices[3][1] - offsets[1], real_vertices[3][0], real_vertices[3][1] ]) detected_vertices = [] kernel = np.ones((5, 5), np.uint8) for idx in range(4): crop_img = img[crop_boxes[idx][1]:crop_boxes[idx][3], crop_boxes[idx][0]:crop_boxes[idx][2]] gray_img = cv2.cvtColor(crop_img, cv2.COLOR_RGB2GRAY) ret, thresh_gray = cv2.threshold( gray_img, thresh=200, maxval=255, # type=cv2.THRESH_BINARY) type=cv2.THRESH_BINARY + cv2.THRESH_OTSU) for _ in range(3): thresh_gray = cv2.morphologyEx(thresh_gray, cv2.MORPH_CLOSE, kernel) cv2.bitwise_not(thresh_gray, thresh_gray) thresh_color = cv2.cvtColor(thresh_gray, cv2.COLOR_GRAY2RGB) ret, contours, hierarchy = cv2.findContours(thresh_gray, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) min_width_ratio = 1 det_vertex = [] for i, cont in enumerate(contours): x1, y1, w, h = cv2.boundingRect(cont) ratio = h / w width_ratio = abs(square_width - w) / square_width if debug_in_: if w > 10 and h > 10: # cv2.drawContours(crop_img, contours, i, hp.GREEN, thickness=4) cv2.drawContours(thresh_color, contours, i, hp.GREEN, thickness=4) print("-------") print(i) print(x1, y1, w, h, width_ratio) print(ratio, cv2.contourArea(cont) / (w * h)) if square_ratio_range[0] < ratio < square_ratio_range[1] and \ width_ratio < square_width_margin and \ cv2.contourArea(cont) / (w*h) > square_fill_thresh: moments = cv2.moments(cont) cx = int(moments['m10'] / moments['m00']) cy = int(moments['m01'] / moments['m00']) if width_ratio < min_width_ratio: det_vertex = [ cx + crop_boxes[idx][0], cy + crop_boxes[idx][1] ] min_width_ratio = width_ratio # print("****") if debug_: disp_img = np.copy(crop_img) cv2.drawContours(disp_img, contours, i, hp.RED, thickness=4) cv2.circle(disp_img, (cx, cy), 8, hp.GREEN, -1) hp.hp_imshow(disp_img) if debug_in_: hp.hp_imshow(thresh_color, desc="thresh_color") if det_vertex: detected_vertices.append(det_vertex) box_img = np.copy(img) if len(detected_vertices) != 4: # print(" @ Error: 4 corners are NOT detected!") return False, detected_vertices, img cv2.line(box_img, tuple(detected_vertices[0]), tuple(detected_vertices[1]), hp.RED, 10) cv2.line(box_img, tuple(detected_vertices[0]), tuple(detected_vertices[2]), hp.RED, 10) cv2.line(box_img, tuple(detected_vertices[1]), tuple(detected_vertices[3]), hp.RED, 10) cv2.line(box_img, tuple(detected_vertices[2]), tuple(detected_vertices[3]), hp.RED, 10) if debug_: hp.hp_imshow(box_img, desc="four corners") return True, detected_vertices, img