예제 #1
0
def main():
    global mode,pt1_x,pt1_y,layer,layers
    
    video_capture = camera_setup()
    cv2.namedWindow('test draw')
    cv2.setMouseCallback('test draw',click_and_crop)
    while True:
            
        orig_img = next_frame2(video_capture)
        key = cv2.waitKey(5) & 0xFF         
        if process_key(key) == -1:
            break
        check_layers(orig_img)
        
        final_pic = updateImage(orig_img)
        common.draw_fps(final_pic)
        cv2.imshow('test draw',final_pic )
예제 #2
0
def get_measurement(video_capture):
    if not get_measurement.standalone:
        get_measurement.record = False
        get_measurement.save = False

    if not get_measurement.pause_updates:
        image0 = next_frame2(video_capture)
        get_measurement.last_image0 = image0
    else:
        image0 = get_measurement.last_image0

    if not get_measurement.pause_updates:
        if get_measurement.warp_m is not None:
            h, w = image0.shape[:2]
            warped = cv2.warpPerspective(image0, get_measurement.warp_m,
                                         (w, h))
            image1 = warped
        else:
            image1 = image0.copy()
        get_measurement.last_image1 = image1.copy()
    else:
        image1 = get_measurement.last_image1.copy()

    if not get_measurement.lock_path:
        circles = find_holes(image1)
        circles = organize_circles(circles, get_measurement.start_mpt,
                                   get_measurement.end_mpt)
        get_measurement.last_circles = circles
        image_b = image1.copy()
        get_measurement.last_image_b = image_b.copy()
    else:
        circles = get_measurement.last_circles
        image_b = get_measurement.last_image_b.copy()

    draw_table(image_b, circles)
    draw_circles(image_b, circles)
    draw_path(image_b, circles, get_measurement.start_mpt,
              get_measurement.end_mpt, get_measurement.cur_mpt,
              get_measurement.lock_path)

    global in_alignment

    global c_crop_rect
    if c_crop_rect:
        c = c_incomplete_color if in_alignment else c_line_color
        cv2.rectangle(image_b, round_pt(c_crop_rect[0]),
                      round_pt(c_crop_rect[1]), c, c_line_s)

    # Build and display composite image
    final_img = None
    if get_measurement.c_view == 0:
        img_all = np.hstack([image0, image1, image_b])
        img_all_resized = cv2.resize(img_all,
                                     None,
                                     fx=c_final_image_scale_factor,
                                     fy=c_final_image_scale_factor)
        final_img = img_all_resized
    elif get_measurement.c_view == 1:
        final_img = image0
    elif get_measurement.c_view == 2:
        final_img = image1
    elif get_measurement.c_view == 3:
        final_img = image_b

    global mouse_sqr_pts_done, mouse_sqr_pts
    if not mouse_sqr_pts_done and get_measurement.mouse_op == 'alignment':
        draw_selected_points(final_img, mouse_sqr_pts)

    if get_measurement.standalone:
        common.draw_error(final_img)

        if get_measurement.record:
            fn1 = 'mov_raw_z_{:06}.ppm'.format(get_measurement.record_ind)
            cv2.imwrite(fn1, image0)
            # fn2 = 'mov_all_z_{:06}.ppm'.format(get_measurement.record_ind)
            # cv2.imwrite(fn2, img_all)
            fn3 = 'mov_fin_z_{:06}.ppm'.format(get_measurement.record_ind)
            cv2.imwrite(fn3, final_img)
            get_measurement.record_ind += 1
            print('Recorded {} {}'.format(fn1, fn3))

        if get_measurement.save:
            get_measurement.save = False

            for i in range(100):
                # fn1 = f'raw_z_{i:03}.png'
                fn1 = 'raw_z_{:03}.png'.format(i)
                if not os.path.exists(fn1):
                    cv2.imwrite(fn1, image0)
                    # fn2 = f'all_z_{i:03}.png'
                    fn2 = 'all_z_{:03}.png'.format(i)
                    cv2.imwrite(fn2, final_img)
                    # print(f'Wrote images {fn1} and {fn2}')
                    print('Wrote images {} and {}'.format(fn1, fn2))
                    break

    if not get_measurement.pause_updates:
        if get_measurement.mouse_op == 'alignment' and get_measurement.c_view == 1:
            if mouse_sqr_pts_done:
                # draw_selected_points(final_img, mouse_sqr_pts)

                rct = np.array(mouse_sqr_pts, dtype=np.float32)
                w1 = line_length(mouse_sqr_pts[0], mouse_sqr_pts[1])
                w2 = line_length(mouse_sqr_pts[2], mouse_sqr_pts[3])
                h1 = line_length(mouse_sqr_pts[0], mouse_sqr_pts[3])
                h2 = line_length(mouse_sqr_pts[1], mouse_sqr_pts[2])
                w = max(w1, w2)
                h = max(h1, h2)

                pt1 = mouse_sqr_pts[0]
                dst0 = [
                    pt1, [pt1[0] + w, pt1[1]], [pt1[0] + w, pt1[1] + h],
                    [pt1[0], pt1[1] + h]
                ]
                dst = np.array(dst0, dtype=np.float32)

                get_measurement.warp_m = cv2.getPerspectiveTransform(rct, dst)

                pt1, pt2 = dst0[0], dst0[2]
                c_crop_rect = [pt1, pt2]

                mouse_sqr_pts = []
                mouse_sqr_pts_done = False

                get_measurement.c_view = 3
                get_measurement.mouse_op = ''

                in_alignment = True

        if get_measurement.c_view not in [1, 2]:
            global mouse_pts, mouse_moving
            mouse_pts = []
            mouse_moving = False

    if in_alignment:
        ss2 = ''
        if c_machine_rect[1]:
            ss2 = ' [{:.3f} x {:.3f}]'.format(*c_machine_rect[1])
        ss = 'Enter plate dimensions (W,H){}: {}'.format(
            ss2, process_key.plate_size_str)
        cv2.putText(final_img, ss, (20, 30), c_label_font, c_label_s,
                    c_label_color)
    elif c_machine_rect[1]:
        cv2.putText(final_img,
                    'Size (WxH): {:.3f} x {:.3f}'.format(*c_machine_rect[1]),
                    (20, 30), c_label_font, c_label_s, c_label_color)

    if get_measurement.standalone:
        common.draw_fps(final_img)

    return circles, final_img
예제 #3
0
def update_view(video_capture, video_capture2):
    cmds = {
        ord('0'): touch_off_center_of_hole,
        ord('4'): touch_off_right_edge,
        ord('6'): touch_off_left_edge,
        ord('8'): touch_off_forward_edge,
        ord('2'): touch_off_aft_edge,
        ord('5'): touch_off_top_edge,
        ord('1'): touch_off_ur_corner,
        ord('3'): touch_off_ul_corner,
        ord('7'): touch_off_lr_corner,
        ord('9'): touch_off_ll_corner
    }

    lst = machine_to_part_cs()
    if not z_camera.get_measurement.lock_path:
        z_camera.get_measurement.start_mpt = lst
    z_camera.get_measurement.cur_mpt = lst

    mm_final, haimer_img = haimer_camera.get_measurement(video_capture)
    circles, z_img = z_camera.get_measurement(video_capture2)
    target_sz = (720, 640)
    if haimer_img.shape[:2] != target_sz:
        # The debug view is (1008, 1344), so the aspect of the resized image will be different than source image
        haimer_img = cv2.resize(haimer_img, (target_sz[1], target_sz[0]))
    final_img = np.hstack([z_img, haimer_img])
    common.draw_fps(final_img)

    common.draw_error(final_img)

    if update_view.do_touchoff:
        s = 'Touch off using numeric keypad'
        thickness = 1
        text_size, baseline = cv2.getTextSize(s, c_label_font, c_label_s,
                                              thickness)
        text_pos = ((final_img.shape[1] - text_size[0]) // 2,
                    (final_img.shape[0] + text_size[1]) // 2)
        cv2.putText(final_img, s, text_pos, c_label_font, c_label_s,
                    c_label_color, thickness)

    c_final_image_scale_factor = 1280 / 1920.
    final_img_resized = cv2.resize(final_img,
                                   None,
                                   fx=c_final_image_scale_factor,
                                   fy=c_final_image_scale_factor)

    cv2.imshow(c_camera_name, final_img_resized)
    key = cv2.waitKey(5)

    if update_view.record:
        fn1 = 'mov_all_l_{:06}.ppm'.format(update_view.record_ind)
        cv2.imwrite(fn1, final_img)
        update_view.record_ind += 1
        print('Recorded {}'.format(fn1))

    if update_view.save:
        update_view.save = False

        for i in range(100):
            fn1 = 'all_l_{:03}.png'.format(i)
            if not os.path.exists(fn1):
                cv2.imwrite(fn1, final_img)
                print('Wrote image {}'.format(fn1))
                break

    # accepted = False
    if update_view.do_touchoff:
        try:
            res = cmds[key](video_capture)
            update_view.do_touchoff = False
            print(res)
            # accepted = True
        except KeyError:
            pass

    accepted = True
    if key == ord('r'):
        update_view.record = not update_view.record
    elif key == ord('s'):
        update_view.save = not update_view.save
    elif key == ord('t'):
        update_view.do_touchoff = not update_view.do_touchoff
    elif key == ord('g'):
        header, results = re_holes(video_capture, circles)

        rows = [header]
        for res in results:
            rows += [[res[0]] + ['{:.4f}'.format(x) for x in res[1:]]]

        fn = 'circles.csv'
        with open(fn, 'wb') as f:
            c = csv.writer(f, rows)
            c.writerows(rows)
        print('\nProbing results saved to {}\n'.format(fn))
    elif key in [27, ord('q')]:  # Escape or q
        raise common.QuitException
    else:
        accepted = False

    if not accepted:
        accepted = haimer_camera.process_key(key)
    if not accepted:
        z_camera.process_key(key)

    return mm_final, circles
예제 #4
0
def get_measurement(video_capture):
    mm_final, mm_b, mm_r = None, None, None

    if not get_measurement.standalone:
        get_measurement.record = False
        get_measurement.save = False

    build_all = get_measurement.record or get_measurement.save or get_measurement.debug_view

    image0 = next_frame2(video_capture)
    h, w = image0.shape[:2]
    image_center = c_image_center(w, h)

    m = cv2.getRotationMatrix2D(image_center,
                                c_initial_image_rot / math.pi * 180., 1.0)
    image1 = cv2.warpAffine(image0, m, (w, h))
    image2 = image1.copy()

    theta_b, image_b, seg_b, skel_b = black_arrow(image1, image_center)
    seg_b = cv2.cvtColor(seg_b, cv2.COLOR_GRAY2BGR)
    skel_b = cv2.cvtColor(skel_b, cv2.COLOR_GRAY2BGR)

    theta_r, image_r, seg_r, skel_r = red_arrow(image1, image_center)
    seg_r = cv2.cvtColor(seg_r, cv2.COLOR_GRAY2BGR)
    skel_r = cv2.cvtColor(skel_r, cv2.COLOR_GRAY2BGR)

    # Maintain a list of valid thetas for times when no measurements are
    # available, such as when the black hand passes over the red hand, and
    # to use for noise reduction.
    common.append_v(get_measurement.theta_b_l, theta_b)
    common.append_v(get_measurement.theta_r_l, theta_r)

    if get_measurement.theta_b_l and get_measurement.theta_r_l:
        theta_b = mean_angles(get_measurement.theta_b_l)
        theta_r = mean_angles(get_measurement.theta_r_l)

        mm_final, mm_b, mm_r = calc_mm(theta_b, theta_r)

    if build_all:
        # Draw outer circle dial and crosshairs on dial pivot.
        cv2.circle(image1, image_center, c_dial_outer_mask_r, c_line_color,
                   c_line_s)
        cv2.line(image1, (image_center[0] - c_inner_mask_r,
                          image_center[1] - c_inner_mask_r),
                 (image_center[0] + c_inner_mask_r,
                  image_center[1] + c_inner_mask_r), c_line_color, 1)
        cv2.line(image1, (image_center[0] - c_inner_mask_r,
                          image_center[1] + c_inner_mask_r),
                 (image_center[0] + c_inner_mask_r,
                  image_center[1] - c_inner_mask_r), c_line_color, 1)

        # Draw black arrow mask
        cv2.circle(image1, image_center, c_black_outer_mask_r, c_line_color,
                   c_line_s)
        cv2.ellipse(image1, image_center, c_black_outer_mask_e, 0, 0, 360,
                    c_line_color, c_line_s)
        cv2.circle(image1, image_center, c_inner_mask_r, c_line_color,
                   c_line_s)

        # Draw red arrow mask
        cv2.circle(image1, image_center, c_red_outer_mask_r, c_line_color,
                   c_line_s)
        cv2.circle(image1, image_center, c_inner_mask_r, c_line_color,
                   c_line_s)

    # Draw final marked up image
    mask = np.zeros(image2.shape, dtype=image2.dtype)
    cv2.circle(mask, image_center, c_dial_outer_mask_r, (255, 255, 255), -1)
    image2 = cv2.bitwise_and(image2, mask)

    # Draw calculated red and black arrows
    if get_measurement.theta_b_l and get_measurement.theta_r_l:
        plot_lines(None, theta_b, c_black_drawn_line_length, image2,
                   image_center)
        plot_lines(None, theta_r, c_red_drawn_line_length, image2,
                   image_center)

        draw_labels(image2, image_b, image_r, theta_b, theta_r, mm_b, mm_r,
                    mm_final)

    img_all, img_all_resized = None, None

    if build_all:
        # Build and display composite image
        img_all0 = np.vstack([image0, image1, image2])
        img_all1 = np.vstack([seg_b, skel_b, image_b])
        img_all2 = np.vstack([seg_r, skel_r, image_r])
        img_all = np.hstack([img_all0, img_all1, img_all2])

    img_b = cv2.resize(image_b, None, fx=0.5, fy=0.5)
    img_r = cv2.resize(
        image_r,
        (image2.shape[1] - img_b.shape[1], image2.shape[0] - img_b.shape[0]))
    img_simple = np.vstack([image2, np.hstack([img_b, img_r])])

    if get_measurement.standalone:
        common.draw_fps(img_simple)
        if build_all:
            common.draw_fps(img_all, append_t=False)

    if build_all:
        img_all_resized = cv2.resize(img_all,
                                     None,
                                     fx=c_final_image_scale_factor,
                                     fy=c_final_image_scale_factor)

    if get_measurement.debug_view:
        final_img = img_all_resized
    else:
        final_img = img_simple

    if get_measurement.standalone:
        common.draw_error(final_img)

        if get_measurement.record:
            fn1 = 'mov_raw_h_{:06}.ppm'.format(get_measurement.record_ind)
            cv2.imwrite(fn1, image0)
            fn2 = 'mov_all_h_{:06}.ppm'.format(get_measurement.record_ind)
            cv2.imwrite(fn2, img_all)
            fn3 = 'mov_fin_h_{:06}.ppm'.format(get_measurement.record_ind)
            cv2.imwrite(fn3, image2)
            fn4 = 'mov_sim_h_{:06}.ppm'.format(get_measurement.record_ind)
            cv2.imwrite(fn4, img_simple)
            get_measurement.record_ind += 1
            print('Recorded {} {} {} {}'.format(fn1, fn2, fn3, fn4))

        if get_measurement.save:
            get_measurement.save = False

            for i in range(100):
                # fn1 = f'raw_h_{i:03}.png'
                fn1 = 'raw_h_{:03}.png'.format(i)
                if not os.path.exists(fn1):
                    cv2.imwrite(fn1, image0)
                    # fn2 = f'all_h_{i:03}.png'
                    fn2 = 'all_h_{:03}.png'.format(i)
                    cv2.imwrite(fn2, img_all)
                    fn3 = 'sim_h_{:03}.png'.format(i)
                    cv2.imwrite(fn3, img_simple)
                    # print(f'Wrote images {fn1} and {fn2}')
                    print('Wrote images {} {} {}'.format(fn1, fn2, fn3))
                    break

    return mm_final, final_img
예제 #5
0
파일: cmm_main.py 프로젝트: prochj51/CMM
    def run(self):
        video_capture = imageHandler.camera_setup()

        while True:

            if not imageHandler.updateImage.pause_updates:
                orig_img = cv2.cvtColor(
                    imageHandler.next_frame2(video_capture), cv2.COLOR_BGR2RGB)
                imageHandler.updateImage.last_image0 = orig_img
            else:
                orig_img = imageHandler.updateImage.last_image0

            if self.win is None:
                continue

            #self.win.update()

            if True:
                # https://stackoverflow.com/a/55468544/6622587
                if self.key is not 255:
                    self.win.process_key(self.key)
                    self.key = 255

                #Check if some masks from drawing has been added
                imageHandler.check_layers(orig_img)

                if self.win.state == State.CalibrateCamera:
                    self.win.ui.instructionLabel.setText(
                        "Select base plate points")
                    if imageHandler.warpImage():
                        self.win.state = State.GetParams
                        self.win.ui.instructionLabel.setText(
                            "Enter dimensions of base plate")

                elif self.win.state == State.SelectOrigin:
                    self.win.ui.instructionLabel.setText("Select [0,0] origin")
                    if len(imageHandler.points_struct) != 0:
                        imageHandler.setOrigin()
                        self.win.set_camera_scale()
                        self.win.state = State.SelectPoints
                        self.win.ui.instructionLabel.setText(
                            "Machine is ready")


                elif self.win.state == State.SelectPoints or \
                self.win.state == State.Draw:
                    pix_x, pix_y = self.win.driver.cnc_to_camera(
                        self.win.x_act, self.win.y_act)
                    imageHandler.actual_position = [pix_x, pix_y]

                final_pic = imageHandler.updateImage(orig_img)
                # mask = imageHandler.plate_mask(final_pic)
                # gray = cv2.cvtColor(cv2.bitwise_and(mask, final_pic), cv2.COLOR_BGR2GRAY)
                # final_pic /= 2
                # final_pic[cv2.bitwise_not(mask) == 255] /= 2
                common.draw_fps(final_pic)
                h, w, ch = final_pic.shape
                #print(rgbImage.shape)
                bytesPerLine = ch * w
                convertToQtFormat = QImage(final_pic.data, w, h, bytesPerLine,
                                           QImage.Format_RGB888)
                p = convertToQtFormat.scaled(640, 480, Qt.KeepAspectRatio)
                self.changePixmap.emit(p)