class Parse(object): def __init__(self): self._pen_style_list = ['b-', 'r-o', 'g-*'] self._draw = Drawing() pass def get_cur_x(self): return self._draw.get_cur_x() def get_cur_y(self): return self._draw.get_cur_y() def get_style(self): return self._draw.get_stype() def get_status(self): return self._draw.get_status() def show(self): return self._draw.show() def parse_one_line(self, line): line2 = line = line.strip() if line.find('#') != -1: line2 = line[0:line.find('#')] # remove comments words = line2.split() if len(words) == 0: return if words[0] == 'P': assert(len(words) == 2) self._draw.set_style(self._pen_style_list[int(words[1])]) elif words[0] == 'D': assert(len(words) == 1) self._draw.pen_down() elif words[0] == 'U': assert(len(words) == 1) self._draw.pen_up() elif words[0] == 'W': assert(len(words) == 2) self._draw.line_left(int(words[1])) elif words[0] == 'N': assert(len(words) == 2) self._draw.line_up(int(words[1])) elif words[0] == 'E': assert(len(words) == 2) self._draw.line_right(int(words[1])) elif words[0] == 'S': assert(len(words) == 2) self._draw.line_down(int(words[1])) else: print "Error: Un-supported Action!, in line \"" + line + "\"" assert(1)
def main(cam_idx): if DEBUG: frame1 = cv2.imread("data/color-calib-1.png", cv2.IMREAD_COLOR) frame2 = cv2.imread("data/color-calib-2.png", cv2.IMREAD_COLOR) frame3 = cv2.imread("data/color-calib-3.png", cv2.IMREAD_COLOR) frame4 = cv2.imread("data/color-calib-4.png", cv2.IMREAD_COLOR) # red, blue, yellow frame5 = cv2.imread("data/color-calib-5.png", cv2.IMREAD_COLOR) # red, blue, yellow, green frame6 = cv2.imread("data/color-calib-6.png", cv2.IMREAD_COLOR) # all + pens # find_marker_for_id(frame1, COLOR_BLUE) # find_marker_for_id(frame1, COLOR_RED) # find_marker_for_id(frame2, COLOR_BLUE) # find_marker_for_id(frame2, COLOR_RED) # find_marker_for_id(frame3, COLOR_BLUE) # find_marker_for_id(frame3, COLOR_RED) # find_marker_for_id(frame4, COLOR_BLUE) # find_marker_for_id(frame4, COLOR_RED) # find_marker_for_id(frame4, COLOR_YELLOW) # find_marker_for_id(frame5, COLOR_BLUE) # find_marker_for_id(frame5, COLOR_RED) # find_marker_for_id(frame5, COLOR_YELLOW) # find_marker_for_id(frame5, COLOR_GREEN) find_marker_for_id(frame6, COLOR_BLUE) find_marker_for_id(frame6, COLOR_RED) find_marker_for_id(frame6, COLOR_YELLOW) find_marker_for_id(frame6, COLOR_GREEN) if cv2.waitKey(1) == 27: cv2.destroyAllWindows() cam = cv2.VideoCapture(cam_idx) result, frame = cam.read() cv2.imwrite('data/sample-frame.png', frame) assert result shape = list(frame.shape) [frame_height, frame_width] = shape[:2] shape[0] *= 2 shape[1] *= 3 canvas = np.zeros(shape, dtype=np.uint8) global newcameramtx newcameramtx, _ = \ cv2.getOptimalNewCameraMatrix( intrinsics['mtx'], intrinsics['dist'], (frame_width, frame_height), 1, (frame_width, frame_height)) raw_history = History() draw_history = History() styled_history = History() pen_history = History() path_buffer = Queue() device_lock = Lock() def project_draw_to_bot(draw_point): return ( draw_point[0] - (target_width - draw_point[0]) * 0.02, draw_point[1] - (target_height - draw_point[1]) * .1 ) # initialize plotter p = None if not DRY_RUN: p = PlotterAxi(w=target_width, h=target_height-0.5, border=border) def disable_device(): with device_lock: p.device.wait() p.up() if not actions['motors'].device_reset: p.move(0, 0) p.device.wait() sp.check_call(['axi', 'off']) atexit.register(disable_device) def send_commands(): first_command = True while True: if path_buffer.qsize() > 48: print('Queue is too large: {}'.format(path_buffer.qsize())) path = [path_buffer.get() for _ in range(48)][::3] else: path = [path_buffer.get() for _ in range(12)] path = [project_draw_to_bot(p) for p in path] with device_lock: if first_command: p.move(*path[0]) p.down() first_command = False p.path(path) start_thread(send_commands) drawing = Drawing(p, pen_history) drawing.set_style(1) drawing.set_client(client_width, client_height) drawing.set_target(target_width, target_height, border) class RecordTemplateAction: def __init__(self): self.template = [] self.start_point = None self.recording = False def trigger(self): if not self.recording: print('Recording template') self.template = [] self.start_point = draw_history.last() self.recording = True else: print('Finish template') self.recording = False (lx, ly) = self.start_point self.template = [ (x - lx, y - ly) for x, y in self.template ] return self.template def on_draw(self, point): if self.recording: self.template.append(point) class ApplyTemplateAction: def __init__(self): self.path = [] self.recording = False self.rotation = 0 self.scale = 1 def apply_template(self, p, translate, rotate, scale): transformed_path = drawing.apply_transform( actions['record_template'].template, translate, rotate, scale) template_path = path_smooth(transformed_path) template_path = actions['record_template'].template print('Applying template') origin = (p.x, p.y) print(origin) with device_lock: p.move(*point_add(template_path[0], origin)) p.down() p.path([ point_add(p, origin) for p in template_path ]) p.up() # update template with transform actions['record_template'].template = transformed_path transformed_path = drawing.apply_transform( actions['record_template'].template, translate, rotate, scale) template_path = path_smooth(transformed_path) def trigger(self): translate = (0, 0)#(p.x, p.y) rotate = actions['apply_template'].rotation#math.pi/3#0 scale = actions['apply_template'].scale#.8 self.apply_template(p, translate, rotate, scale) def on_draw(self, point): self.path.append(point) class ApplyTemplatePathAction: def __init__(self): self.path = [] self.recording = False self.rotation = 0 self.scale = 1 def apply_template(self, p, translate, rotate, scale): transformed_path = drawing.apply_transform( actions['record_template'].template, translate, rotate, scale) template_path = path_smooth(transformed_path) diameter = max([ point_dist(p1, p2) for p1 in template_path for p2 in template_path ]) draw_points = self.path[:1] i = 1 while i < len(self.path): if point_dist(self.path[i], draw_points[-1]) >= diameter/2: draw_points.append(self.path[i]) i += 1 print('Applying template path') for origin in draw_points: with device_lock: p.move(*point_add(template_path[0], origin)) p.down() p.path([ point_add(p, origin) for p in template_path ]) p.up() # update template with transform actions['record_template'].template = transformed_path transformed_path = drawing.apply_transform( actions['record_template'].template, translate, rotate, scale) template_path = path_smooth(transformed_path) def trigger(self): if not self.recording: print('Recording application path') self.path = [] self.recording = True else: print('Finish application path') translate = (0, 0)#(p.x, p.y) rotate = actions['apply_template_path'].rotation#math.pi/3#0 scale = actions['apply_template_path'].scale#.8 self.apply_template(p, translate, rotate, scale) def on_draw(self, point): self.path.append(point) class MotorAction: def __init__(self): self.device_reset = False self.disabled = False def trigger(self): if not self.disabled: # print('predicted', # pen_history.last(), # 'actual', # p.device.read_position()) with device_lock: p.up() p.device.disable_motors() print('Disabled motors') self.disabled = True else: with device_lock: p.device.enable_motors() p.down() x, y = pen_history.last() p.x = x p.y = y print('Enabled motors at ({}, {})'.format(x, y)) self.device_reset = True self.disabled = False def on_draw(self, point): pass actions = { 'record_template': RecordTemplateAction(), 'apply_template': ApplyTemplateAction(), 'apply_template_path': ApplyTemplatePathAction(), 'motors': MotorAction(), } def recv_keyboard(): while True: inp = input().strip() if inp == 'i': with device_lock: p.up() elif inp == 'o': with device_lock: p.down() else: try: print(inp) n = int(inp) drawing.set_style(n) print('Set style to {}'.format(n)) except ValueError: pass for a in actions.values(): a.on_keypress(inp) start_thread(recv_keyboard) def recv_websocket(client, server, message): data = json.loads(message) if 'rotation' in data: rotation = int(data['rotation']) print(rotation) rotation = math.radians(rotation) # print(rotation) actions['apply_template'].rotation = rotation actions['apply_template_path'].rotation = rotation if 'scale' in data: scale = float(data['scale']) print(scale) actions['apply_template'].scale = scale actions['apply_template_path'].scale = scale if 'type' in data: server.send_message( client, json.dumps(actions[data['type']].trigger())) def websocket_server(): server.set_fn_message_received(recv_websocket) server.run_forever() start_thread(websocket_server) def draw_debug_info(frame, undistorted_frame, sat_img, raw_point, tall_draw_point, draw_point, pen_point): frame_canvas = canvas[:frame_height, frame_width:(frame_width*2), :] frame_canvas[:,:,:] = frame[::-1, ::-1, :] fixed_canvas = canvas[:frame_height, (frame_width*2):, :] fixed_canvas[:,:,:] = undistorted_frame[::-1, ::-1, :] masked_canvas = canvas[frame_height:, frame_width:(frame_width*2), :] masked_canvas[:,:,:] = sat_img[::-1, ::-1, :] camera_canvas = canvas[:, frame_width:(frame_width*2), :] cv2.line(camera_canvas, (client_x, 0), (client_x, frame_height), (255, 255, 0), 3) cv2.line(camera_canvas, (client_x + client_width, 0), (client_x + client_width, frame_height), (255, 255, 0), 3) cv2.line(camera_canvas, (0, client_y), (frame_width, client_y), (255, 255, 0), 3) cv2.line(camera_canvas, (0, client_y + client_height), (frame_width, client_y + client_height), (255, 255, 0), 3) def paper_to_pixel(p): return (int(p[0] / target_width * client_width + client_x), int(p[1] / target_height * client_height + client_y)) def draw_paper_point(canvas, paper_point, color): px, py = paper_to_pixel(paper_point) cv2.circle(canvas, (px, py), 20, color, -1) cv2.putText(canvas, "{:.2f}, {:.2f}".format(*paper_point), (px - 80, py+50), cv2.FONT_HERSHEY_SIMPLEX, 1.5, color, 5) if raw_point is not None and draw_point is not None: def raw_to_pixel(p): return (int(p[0] * frame_width / client_width), int(p[1] * frame_height / client_height)) # Draw raw coordinates raw_point = raw_to_pixel(raw_point) if raw_history.last() is not None: cv2.line(canvas, raw_to_pixel(raw_history.last()), raw_point, (255, 255, 255), 3) cv2.circle(canvas, raw_point, 10, (255, 255, 255), -1) # Draw draw coordinates draw_canvas = canvas[frame_height:, :frame_width, :] draw_color = (0, 255, 0) if actions['record_template'].recording \ else (255, 255, 0) if draw_history.last() is not None: cv2.line( draw_canvas, paper_to_pixel(draw_history.last()), paper_to_pixel(draw_point), draw_color, 3) cv2.circle( draw_canvas, paper_to_pixel(draw_point), 10, draw_color, -1) draw_paper_point(fixed_canvas, tall_draw_point, (0, 255, 255)) draw_paper_point(fixed_canvas, draw_point, (0, 255, 0)) if pen_point is not None: draw_paper_point(fixed_canvas, pen_point, (0, 255, 255)) bot_point = project_draw_to_bot(pen_point) draw_paper_point(fixed_canvas, bot_point, (0, 255, 0)) canvas_resize = cv2.resize( canvas, (1920, int(canvas.shape[0] * 1920 / canvas.shape[1]))) cv2.imshow('frame', canvas_resize) cv2.waitKey(30) last_sample = now() while True: # print('Total: {:.4f}'.format(now() - last_sample)) last_sample = now() # if now() - last_sample < camera_sample_rate: # continue result, frame = cam.read() assert result start = now() markers = { col: find_marker_for_id(frame.copy(), col) for col in [COLOR_BLUE, COLOR_RED] } # print('Finding markers: {:.4f}'.format(now() - start)) def project_raw(point): px, py = point # Put coordinates within the client box px -= client_x py -= client_y # Mirror x because we're behind the camera raw_point = (client_width - px, client_height - py) return raw_point for k, (pt, _1, _2) in markers.items(): if pt is not None: markers[k] = (project_raw(pt), _1, _2) axi_raw_point, undistorted_frame, _ = markers[COLOR_BLUE] sat_img = np.zeros(frame.shape) for _1, _2, sat_img_col in markers.values(): sat_img[:, :, :] += sat_img_col pen_point = None if axi_raw_point is not None: axi_draw_point = drawing.compute_draw_coordinates(*axi_raw_point) pen_point = transform_height_perspective(axi_draw_point, pen_height) pen_history.shift(pen_point) raw_point, _, _ = markers[COLOR_RED] tall_draw_point = None draw_point = None styled_point = None if raw_point is not None: tall_draw_point = drawing.compute_draw_coordinates(*raw_point) draw_point = transform_height_perspective(tall_draw_point, marker_height, pen=False) if 0 <= draw_point[0] and draw_point[0] < target_width and \ 0 <= draw_point[1] and draw_point[1] < target_height: last_draw_point = draw_history.last() styled_point = drawing.apply_style( draw_point, (draw_point[0] - last_draw_point[0], draw_point[1] - last_draw_point[1]) \ if last_draw_point is not None else draw_point) for a in actions.values(): a.on_draw(draw_point) if len(draw_history.pts) < 4: path_buffer.put(styled_point) pass else: interped = CatmullRomSpline( *(styled_history.pts[-3:] + [styled_point]), nPoints=3) if np.any(np.isnan(interped)): path_buffer.put(styled_history.pts[-1]) else: for pt in interped: path_buffer.put(pt) start = now() draw_debug_info( frame, undistorted_frame, sat_img, raw_point, tall_draw_point, draw_point, pen_point) # print('Drawing debug info: {:.3f}'.format(now() - start)) if raw_point is not None and draw_point is not None: raw_history.shift(raw_point) draw_history.shift(draw_point) styled_history.shift(styled_point)