예제 #1
0
                time_stamps, profiles = frame_handler.get_results()

                flag = False
                for i in range(0, len(profiles)):
                    # Adds full path to instruction handler
                    instruction_handler.add(time_stamps[i], profiles[i])
                    flag = True
                if flag:
                    instruction_handler.add(
                        0, 0
                    )  # Ending flag. This is how the handler knows the command is over
        ################################

        #### Just for visualization ####
        # else:
        frame = bounding_box.scale(frame)
        frame = cv2.copyMakeBorder(frame,
                                   0,
                                   300,
                                   300,
                                   300,
                                   cv2.BORDER_CONSTANT,
                                   value=0)

        global_parameters['PICKUP_POINT'].draw(frame)
        cv2.imshow("Temp", frame)

        k = cv2.waitKey(
            max(
                global_parameters['FRAME_RATE'] -
                round((time.time() - read_time) * 1000 + 1), 1)) & 0xFF
def main(data_path=DATA_PATH):
    global streamer, grapher, profile_model, drawing_model, current_graph, DISPLAY_TOGGLE
    # out = cv2.VideoWriter(r'C:\Users\User\Documents\Hylife 2020\Loin Feeder\output.mp4', 0x7634706d, 30, (1680,830))
    # out = cv2.VideoWriter(r'C:\Users\User\Documents\Hylife 2020\Loin Feeder\output.avi', cv2.VideoWriter_fourcc(*'XVID'), 30, (1680,830))

    if DISPLAY_TOGGLE:
        win = "Window"
        cv2.namedWindow(win)
        cv2.setMouseCallback(win, on_mouse)

    delay = 0
    flip_flop = False
    flip_flop2 = False

    meats = [0]
    queue1 = []
    queue2 = []
    times = []
    saved_state = []

    while (streamer.running):
        start = time.time()
        ################################################
        ### Video Processing and Meat Identification ###
        ################################################

        force_timer = time.time()

        # Keeps streamer queue size within a lag free range (>0 and <128)
        qsize = streamer.Q.qsize()
        if qsize < 40:
            streamer.sleep_time = 0
        elif qsize > 88:
            streamer.sleep_time = 0.005

        # frame = cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE)

        temp = streamer.read()
        frame = bounding_box.scale(temp)
        frame = cv2.copyMakeBorder(frame,
                                   0,
                                   300,
                                   300,
                                   300,
                                   cv2.BORDER_CONSTANT,
                                   value=0)

        iH, iW, iD = frame.shape
        box, _, _ = bounding_box.get_bbox(frame)

        # for i in range(0, len(box)):
        #     cv2.drawContours(frame, [box[i][0]], 0, (255, 255, 255), 3)

        # Artificially simulate camera trigger
        if (box != 0):
            for i in range(0, len(box)):
                if delay > 50:
                    cX = int(box[i][1]["m10"] / box[i][1]["m00"])
                    cY = int(box[i][1]["m01"] / box[i][1]["m00"])

                    if iH / 3 - 5 < cY and iH / 3 + 5 > cY:
                        if flip_flop:
                            meats += [
                                meat.Meat(box[i],
                                          side="Right",
                                          center=[cX, cY])
                            ]
                        else:
                            meats += [
                                meat.Meat(box[i], side="Left", center=[cX, cY])
                            ]
                        flip_flop = not flip_flop
                        delay = 0

        delay += 1

        ########################################
        ### Path planning and Robot Movement ###
        ########################################

        ep1 = Point(625, 735, angle=90)
        ep2 = Point(250, 735, angle=90)

        if len(meats) > 3:
            if len(meats) % 2 == 0 and delay == 1:
                # Queue [P1 index, P2 index], so that meat can be accounted for even if robot is currently in motion
                queue1 += [[len(meats) - 1, len(meats) - 2]]
                queue2 += [[len(meats) - 1, len(meats) - 2]]

        # # Profiler model creates motion profiles, it updates as fast as possible in a separate thread
        if PROFILER_TOGGLE:
            if profile_model.phase == 0 and len(
                    queue1) > 0 and not path_runner.running:
                dist = (global_parameters['PICKUP_POINT'] -
                        meats[queue1[0][0]].get_center_as_point()).y

                if dist > 0:
                    sp1 = meats[queue1[0][0]].get_center_as_point().copy(
                    ) + Point(0, dist)
                    sp2 = meats[queue1[0][1]].get_center_as_point().copy(
                    ) + Point(0, dist)
                    profile_model.move_meat(sp1, sp2, ep1, ep2, dist // global_parameters['CONVEYOR_SPEED'], \
                        meats[queue1[0][0]].width, meats[queue1[0][1]].width, phase_1_delay=False)
                    queue1 = queue1[1:]

                    # Given the start and end conditions, calculate the profile_model motor profiles
                    path_runner.start()

        # Drawing model is just for drawing purposes, it updates at the frame rate displayed
        if DISPLAY_TOGGLE:
            if drawing_model.phase == 0 and len(queue2) > 0:
                dist = (global_parameters['PICKUP_POINT'] -
                        meats[queue2[0][0]].get_center_as_point()).y

                if dist > 0:
                    sp1 = meats[queue2[0][0]].get_center_as_point().copy(
                    ) + Point(0, dist)
                    sp2 = meats[queue2[0][1]].get_center_as_point().copy(
                    ) + Point(0, dist)
                    drawing_model.move_meat(sp1, sp2, ep1, ep2, dist // (global_parameters['CONVEYOR_SPEED'] * \
                        global_parameters['RUNTIME_FACTOR']), meats[queue2[0][0]].width, meats[queue2[0][1]].width)
                    queue2 = queue2[1:]
                    flip_flop2 = True
                    if PROFILER_TOGGLE:
                        grapher.start(path_runner, (830, 830), 'o')
                else:
                    print(
                        "ERROR: Conveyor Speed too fast for current settings")
                    queue2 = queue2[1:]
            drawing_model.update()

        # Changes display chart
        if DISPLAY_TOGGLE and PROFILER_TOGGLE:
            if flip_flop2 and not path_runner.running and not grapher.running:
                current_graph = grapher.read()
                flip_flop2 = False

        ###############
        ### Display ###
        ###############

        if DISPLAY_TOGGLE:
            if (len(meats) != 1):
                for i in range(1, len(meats)):
                    meats[i].draw(frame, color=(255, 255, 0))
            drawing_model.draw(frame)

            if PROFILER_TOGGLE:
                frame = np.concatenate((frame, current_graph), axis=1)
            cv2.imshow(win, frame)

        ################
        ### Controls ###
        ################

        for i in range(1, len(meats)):
            meats[i].step()

        if DISPLAY_TOGGLE:
            # k = cv2.waitKey(1) & 0xFF
            k = cv2.waitKey(
                max(
                    global_parameters['FRAME_RATE'] -
                    round((time.time() - force_timer) * 1000 + 1), 1)) & 0xFF
            if k == ord('q'):
                break
            elif k == ord('p'):
                cv2.waitKey(0)
            elif k == ord('o'):
                grapher.start(path_runner, (830, 830), 'o')
                while grapher.running:
                    pass
                current_graph = grapher.read()
            elif k == ord('i'):
                grapher.start(path_runner, (830, 830), 'i')
                while grapher.running:
                    pass
                current_graph = grapher.read()
            elif k == ord('s'):
                saved_state = drawing_model.get_model_state()
                cv2.waitKey(0)
                print("State saved.\n")
            elif k == ord('r'):
                drawing_model.set_model_state(saved_state)
                print("State uploaded.")

        times += [time.time() - start]
        # out.write(frame)

        #Artifically slow the program to the desired frame rate

    print("Average frame time:", np.average(times))
    # out.release()
    streamer.stop()
    if PROFILER_TOGGLE:
        path_runner.stop()
    if DISPLAY_TOGGLE:
        grapher.stop()
    cv2.destroyAllWindows()
예제 #3
0
def main(data_path=DATA_PATH):
    cap = cv2.VideoCapture(data_path)
    # out = cv2.VideoWriter(r'C:\Users\User\Documents\Hylife 2020\Loin Feeder\output6.mp4', 0x7634706d, 30, (500,1059))

    delay = 0
    times = []
    meats = [0]
    flip_flop = False
    switch = 1

    while (cap.isOpened()):
        start = time.time()

        _, frame = cap.read()

        # frame = cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE)

        try:
            frame = bounding_box.scale(frame)
            frame = cv2.copyMakeBorder(frame,
                                       0,
                                       300,
                                       300,
                                       300,
                                       cv2.BORDER_CONSTANT,
                                       value=0)
        except:
            print("End of video")
            break

        iH, iW, _ = frame.shape

        box, mask, _ = bounding_box.get_bbox(frame)

        if (box != 0):
            for i in range(0, len(box)):
                if delay > 50:
                    try:
                        M = cv2.moments(box[i])
                        cX = int(M["m10"] / M["m00"])
                        cY = int(M["m01"] / M["m00"])

                        if iH / 1.8 - 5 < cY and iH / 1.8 + 5 > cY:
                            if flip_flop:
                                meats += [
                                    Meat(box[i], side="Right", center=[cX, cY])
                                ]
                            else:
                                meats += [
                                    Meat(box[i], side="Left", center=[cX, cY])
                                ]
                            flip_flop = not flip_flop
                            print("Meat detected")
                            print(meats[-1])

                            data = "Side:" + meats[-1].get_side() + "\n" + str(
                                delay)

                            delay = 0
                    except:
                        pass

        try:
            if switch == 1:
                res = bounding_box.draw_results(frame, [meats[-1].get_bbox()],
                                                "Test",
                                                meat=meats[-1],
                                                extra_data=data)
            elif switch == 2:
                filtered = bounding_box.gen_mask(frame,
                                                 bitwise_and=True,
                                                 process=False)
                bounding_box.draw_results(filtered, [meats[-1].get_bbox()],
                                          "Test",
                                          meat=meats[-1],
                                          extra_data=data)
            elif switch == 3:
                filtered = bbox.gen_mask(frame,
                                         bitwise_and=False,
                                         process=True)
                bounding_box.draw_results(filtered, [meats[-1].get_bbox()],
                                          "Test",
                                          meat=meats[-1],
                                          extra_data=data)
            # res = bbox.draw_results(mask, box, "Test", meat=meats[-1])
        except:
            res = frame
            cv2.imshow("Test", frame)

        for i in range(1, len(meats)):
            meats[i].step()

        # filtered = bbox.gen_mask(frame, bitwise_and=True, process=True)
        # mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)

        # t1 = filtered[:,0:int(iW//3)]
        # t2 = filtered[:,int(iW//3):int(iW*2//3+1)]
        # t3 = filtered[:,int(iW*2//3+1):iW-1]
        # # t2 = mask[1:-1,int(iW//3):int(iW*2//3+1)]
        # # t3 = mask[1:-1,int(iW*2//3+1):iW-1]

        # temp1 = cv2.hconcat([t1, t2])
        # temp = cv2.hconcat([temp1, t3])

        # # color = (31, 255, 49)
        # # if (box != 0):
        # #     for i in range(0, len(box)):
        # #         cv2.drawContours(temp, box, i, color, 2)

        # out.write(res)
        # cv2.imshow("Split", temp)

        k = cv2.waitKey(1) & 0xFF
        if k == ord('q'):
            break
        elif k == ord('p'):
            cv2.waitKey(0)
        elif k == ord('1'):
            switch = 1
        elif k == ord('2'):
            switch = 2
        elif k == ord('3'):
            switch = 3

        # if delay == 0:
        #     cv2.waitKey(0)
        delay += 1
        times += [time.time() - start]
        # cv2.waitKey(20)

    print("Average frame processing time:", np.average(times))

    cap.release()
    # out.release()
    cv2.destroyAllWindows()
예제 #4
0
def main(data_path=DATA_PATH):
    global streamer
    # out = cv2.VideoWriter(r'C:\Users\User\Documents\Hylife 2020\Loin Feeder\output.mp4', 0x7634706d, 30, (1680,830))

    if DISPLAY_TOGGLE:
        win = "Window"
        cv2.namedWindow(win)

    delay = 0
    flip_flop = False
    flip_flop2 = False

    meats = [0]
    queue1 = []
    queue2 = []
    times = []

    while (streamer.running):
        start = time.time()
        ################################################
        ### Video Processing and Meat Identification ###
        ################################################

        force_timer = time.time()

        # Keeps streamer queue size within a lag free range (>0 and <128)
        qsize = streamer.Q.qsize()
        if qsize < 40:
            streamer.sleep_time = 0
        elif qsize > 88:
            streamer.sleep_time = 0.005

        temp = streamer.read()
        frame = bounding_box.scale(temp)
        frame = cv2.copyMakeBorder(frame,
                                   0,
                                   300,
                                   300,
                                   300,
                                   cv2.BORDER_CONSTANT,
                                   value=0)

        iH, iW, iD = frame.shape
        box, _, _ = bounding_box.get_bbox(frame)

        # Artificially simulate camera trigger
        if (box != 0):
            for i in range(0, len(box)):
                if delay > 50:
                    cX = int(box[i][1]["m10"] / box[i][1]["m00"])
                    cY = int(box[i][1]["m01"] / box[i][1]["m00"])

                    if iH / 3 - 5 < cY and iH / 3 + 5 > cY:
                        if flip_flop:
                            meats += [
                                meat.Meat(box[i],
                                          side="Right",
                                          center=[cX, cY])
                            ]
                        else:
                            meats += [
                                meat.Meat(box[i], side="Left", center=[cX, cY])
                            ]
                        flip_flop = not flip_flop
                        delay = 0

        delay += 1

        ###############
        ### Display ###
        ###############

        if DISPLAY_TOGGLE:
            cv2.imshow(win, frame)

        ################
        ### Controls ###
        ################

        for i in range(1, len(meats)):
            meats[i].step()

        if DISPLAY_TOGGLE:
            k = cv2.waitKey(1) & 0xFF
            if k == ord('q'):
                break
            elif k == ord('p'):
                cv2.waitKey(0)

        times += [time.time() - start]
        # out.write(frame)

        #Artifically slow the program to the desired frame rate
        # cv2.waitKey(max(global_parameters['FRAME_RATE'] - round((time.time() - force_timer )*1000 + 1), 1))

    print("Average frame time:", np.average(times))
    # out.release()
    streamer.stop()
    cv2.destroyAllWindows()
예제 #5
0
def main(input_path=DATA_PATH):
    h = [0, 180]
    s = [0, 255]
    v = [0, 255]
    hlow = 0
    hhigh = 15
    slow = 51
    shigh = 204
    vlow = 51
    vhigh = 255
    flag = False

    for i in range(START_INDEX, END_INDEX + 1):
        temp = input_path + str(i) + FILE_TYPE
        og = bounding_box.scale(cv2.imread(temp), width=500)

        src = 'Source'
        cv2.namedWindow(src)

        cv2.createTrackbar('H Low', src, hlow, 180, n)
        cv2.createTrackbar('H High', src, hhigh, 180, n)
        cv2.createTrackbar('S Low', src, slow, 255, n)
        cv2.createTrackbar('S High', src, shigh, 255, n)
        cv2.createTrackbar('V Low', src, vlow, 255, n)
        cv2.createTrackbar('V High', src, vhigh, 255, n)

        while (1):
            k = cv2.waitKey(1) & 0xFF
            if (k == ord('n')):
                break
            elif (k == ord('q')):
                flag = True
                break
            hlow = cv2.getTrackbarPos('H Low', src)
            hhigh = cv2.getTrackbarPos('H High', src)
            slow = cv2.getTrackbarPos('S Low', src)
            shigh = cv2.getTrackbarPos('S High', src)
            vlow = cv2.getTrackbarPos('V Low', src)
            vhigh = cv2.getTrackbarPos('V High', src)

            LOWER_MASK = np.array([hlow, slow, vlow])
            UPPER_MASK = np.array([hhigh, shigh, vhigh])

            box, contours, _ = bounding_box.get_bbox(og,
                                                     source=src,
                                                     lower_mask=LOWER_MASK,
                                                     upper_mask=UPPER_MASK)
            temp = og.copy()

            if contours != 0:
                for i in range(0, len(box)):
                    cv2.drawContours(temp, [box[i][0]], 0, (255, 255, 255), 3)
                for i in range(0, len(contours)):
                    cv2.drawContours(temp, contours, i,
                                     ((i * 17) % 255, (i * 57) % 255,
                                      (i * 3) % 255), 2)

            cv2.imshow(src, temp)

        cv2.destroyAllWindows()

        if (hlow < h[1]):
            h[1] = hlow
        if (slow < s[1]):
            s[1] = slow
        if (vlow < v[1]):
            v[1] = vlow

        if (hhigh > h[0]):
            h[0] = hhigh
        if (shigh > s[0]):
            s[0] = shigh
        if (vhigh > v[0]):
            v[0] = vhigh

        if flag:
            break

    st = ""
    st += "H: (" + str(h[1]) + "," + str(h[0]) + ")\n"
    st += "S: (" + str(s[1]) + "," + str(s[0]) + ")\n"
    st += "V: (" + str(v[1]) + "," + str(v[0]) + ")"
    print(st)