示例#1
0
def run():
    import blobs
    import aruco_beta
    import numpy as np
    import cv2
    from cv2 import aruco
    import matplotlib.pyplot as plt

    from ar_markers import detect_markers
    control = []
    for i in range(100):
        control.append(i)
    frame = cv2.imread("markers_test.png")

    im = cv2.imread("markers_test - blank.png")
    i = 0
    markers, c_markers = detect_markers(frame), detect_markers(im)
    marks = []
    for marker in markers:
        marker.highlite_marker(frame)
        if i % 2 == 0:
            #print(marker.id)
            marks.append(marker.id)
        i += 1

    missing = []

    xy = []
    i2 = 0
    for i in control:
        if i in marks:
            pass
        else:
            missing.append(i)

    for i in c_markers:
        if i2 % 2 == 0:
            #print(int(i.id) in missing, i.id)
            if int(i.id) in missing:
                xy.append(i.center)
        i2 += 1
    x = []
    y = []

    for i in xy:
        #print(i[0],i[1])
        x.append(i[0])
        y.append(i[1])
        #print(xy)

    return (blobs.start(x, y, xy))
    def stream(self):
        print('Press "q" to quit')
        counter = 0
        while True:
            unique_markers = {}
            marker_ids = []

            frame = self.getMobileFrame(self.url)
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            if (counter == 5):
                print(datetime.datetime.now().time())
                if (self.evaluateMarkerSequence()):
                    print("Major diff detected!")
                    print()
                counter = 0
            markers = detect_markers(frame)
            for marker in markers:
                if (
                        marker.id not in marker_ids
                ):  #This removes duplicate markers. Dunno why every marker is registered twice
                    unique_markers[marker.id] = marker.center
                    #unique_markers[marker.id] =(round(marker.center[0]*cm_to_pixelsX,1),round(marker.center[1]*cm_to_pixelsY,1))
                    marker_ids.append(marker.id)
                    marker.highlite_marker(frame)
            self.d_list.append(unique_markers)
            frame = cv2.resize(frame, (1210, 720))
            cv2.imshow('Detection Frame', frame)
            counter += 1
            if cv2.waitKey(1) & 0xFF == ord('q'):
                self.stopper.set()
                break

        cv2.destroyAllWindows()
示例#3
0
def main():
    motor_key = 115
    while True:
        frame_captured, frame = capture.read()
        frame = cv2.flip(frame, -1)
        frame = cv2.resize(frame, (320, 240))
        #undistort
        undistorted_image = undistort(frame)

        #ar_marker
        markers = detect_markers(undistorted_image)
        for marker in markers:
            marker.highlite_marker(undistorted_image)

        cv2.imshow('Frame', undistorted_image)
        key = cv2.waitKey(1) & 0xFF

        print(key)

        if key == 27:
            break
        elif key == ord("\t"):
            captured(undistorted_image)
        elif key in MOTOR_SPEEDS:
            motor_key = key
            motor(motor_key)
        elif key == 255:
            motor(motor_key)

        # When everything done, release the capture
    capture.release()
    cv2.destroyAllWindows()
示例#4
0
def scan(img,mnum,ccol):
    global card
    global cardz
    markers = detect_markers(img)
    for marker in markers:
        marker.highlite_marker(img)
        #print(marker.id)
        if marker.id>=10:
            colz="black"
            num=int(str(marker.id)[1])
        else:
            colz="white"
            num=int(str(marker.id))
        palo=Pal(marker.id,num,marker.center,colz)
        paloz=Pal(marker.id,0,0,0)
        #if not (palo in card ):
        if not paloz in cardz:
            card.append(palo)
            cardz.append(paloz)
    #cv2.imshow('AR', img)
    card = sorted(card, key=lambda x:x.Cen[0], reverse=False)
    lens=len(cardz)    
    print(lens)
    if lens==4:
        for i in range(len(card)):
            #if not card[i].Num in mnum:
            mnum.append(card[i].Num)
            ccol.append(card[i].colz)
            if len(mnum)==4:
                break
    return mnum,ccol,lens
示例#5
0
def find_mat_marker():
    capture = cv2.VideoCapture(1)
    # capture = cv2.VideoCapture('markcheck4.mp4')
    p1x, p2x, p3x, p4x, p1y, p2y, p3y, p4y = 0, 0, 0, 0, 0, 0, 0, 0

    if capture.isOpened():
        frame_captured, frame = capture.read()
    else:
        frame_captured = False
    while frame_captured:
        markers = detect_markers(frame)
        for marker in markers:
            # print("ID {}'s position is {}".format(marker.id, marker.center))
            if marker.id == 185:
                p1x, p1y = marker.center[0], marker.center[1]
                print("p1(x,y) : ", p1x, p1y)
            elif marker.id == 992:
                p2x, p2y = marker.center[0], marker.center[1]
                print("p2(x,y) : ", p2x, p2y)
            elif marker.id == 2808:
                p3x, p3y = marker.center[0], marker.center[1]
                print("p3(x,y) : ", p3x, p3y)
            elif marker.id == 3540:
                p4x, p4y = marker.center[0], marker.center[1]
                print("p4(x,y) : ", p4x, p4y)

        cv2.imshow("Test Frame", frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
        frame_captured, frame = capture.read()
    capture.release()
    cv2.destroyAllWindows()

    return p1x, p2x, p3x, p4x, p1y, p2y, p3y, p4y
示例#6
0
def main(q):

    #for capture every second
    checktimeBefore = int(time.strftime('%S'))

    for frame in camera.capture_continuous(rawCapture,
                                           format="bgr",
                                           use_video_port=True):
        # grab the raw NumPy array representing the image, then initialize the timestamp
        # and occupied/unoccupied text

        image = frame.array

        #undistort
        undistorted_image = undistort(image)

        #cascade
        cascade(undistorted_image)

        #AR marker
        markers = ar_markers.detect_markers(undistorted_image)
        for marker in markers:
            print('marker :', marker.id)  #, marker.center)
            marker.highlite_marker(undistorted_image)

        # show the frame
        cv2.imshow("Frame", undistorted_image)
        key = cv2.waitKey(1) & 0xFF

        # clear the stream in preparation for the next frame
        rawCapture.truncate(0)

        # 1 : capture negative images every second
        switch = 0
        checktime = int(time.strftime('%S'))
        if checktime - checktimeBefore >= 1 and switch == 1:
            captured(undistorted_image)
            checktimeBefore = checktime

        # Threading
        evt = threading.Event()
        data = undistorted_image
        q.put((data, evt))

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break
        elif key == ord("\t"):
            captured(undistorted_image)
示例#7
0
文件: hsr.py 项目: royf/SkillHub
 def apply(hsr, arg):
     while True:
         rospy.sleep(5.)
         img = hsr.subscribers['head_cam'].callback.data
         _, img = cv2.threshold(img, 32, 255, cv2.THRESH_BINARY)
         markers = {marker.id: marker for marker in ar_markers.detect_markers(img)}.values()
         for marker in markers:
             marker.draw_contour(img)
         cv2.imshow('head', img)
         cv2.waitKey(3)
         if len(markers) == 2:
             return sort_markers(sum([list(marker.contours.flat) for marker in markers], []))
         else:
             print('found {} markers'.format(len(markers)))
             rospy.sleep(1.)
示例#8
0
def main(argv):
    # 마커 인식할 이미지 파일 이름 표시
    print("the image file name is " + argv)
    # 이미지 파일에서 이미지 추출
    frame = cv2.imread(argv, cv2.IMREAD_UNCHANGED)
    # 이미지에서 marker 위치 추출(찾기)
    markers = detect_markers(frame)
    # 찾은 마커 개수 표시
    print("{} markers found.".format(len(markers)))
    # 마커 정보 표시
    for marker in markers:
        # id = marker.id, 마커의 중심 위치 : marker.center 표시
        print("ID {}'s position is {}".format(marker.id, marker.center))
        # 해당 마커의 코너를 이쁘게(?) 표시. 사각형이니 4포인트(x,y)
        print("contours are :")
        for pos in marker.contours:
            print("\t {}".format(pos))
def main(argv):
    cam_id = 0  #default 0
    fps_view = False
    try:
        opts, args = getopt.getopt(argv, "hc:f", ["camera_id="])
    except getopt.GetoptError:
        print('default setting : cam id = 0, fps indication = disabled')

    for opt, arg in opts:
        if opt == '-h':
            print('test.py -c < camera_id >f')
            sys.exit()
        elif opt in ("-c", "--camera"):
            cam_id = int(arg)
        elif opt in ("-f", "--fps"):
            fps_view = True

    print('Press "q" to quit')
    capture = cv2.VideoCapture(cam_id)

    if capture.isOpened():  #try to get the first frame
        frame_captured, frame = capture.read()

    else:
        print('Failed to Open Camer %d' % cam_id)
        frame_captured = False

    while frame_captured:
        frame_captured, frame = capture.read()

        markers = detect_markers(frame)

        for marker in markers:
            marker.highlite_marker(frame)
            print('Marker ID:', marker.id)

        if fps_view:
            putFps(frame)

        cv2.imshow('Test Frame', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    #When everything done, release the capture
    capture.release()
    cv2.destroyAllWindows()
示例#10
0
    def do_POST(self):
        self.send_response(200)
        self.send_header('X-Server2Client', '123')
        self.end_headers()

        data = self.rfile.read(int(self.headers['Content-Length']))
        data_numpy = np.asarray(bytearray(data), dtype="uint8")
        img = cv2.imdecode(data_numpy, cv2.IMREAD_ANYCOLOR)
        markers = detect_markers(img)
        mark_result = ' '
        if markers:
            for marker in markers:
                print("detected", marker.id)
                marker.highlite_marker(img)
                if marker.id == 114:
                    mark_result = 'left'
                if marker.id == 922:
                    mark_result = 'right'
                if marker.id == 2537:
                    mark_result = 'stop'
        
        cas_result = cascade(img)

        masked_img = select_white(img, 120)

        k = path_decision(masked_img, 0.25)
        y1, x1 = masked_img.shape
        x1 = int(x1/2)
        x2 = int(-k[2] * k[1] + x1)
        y2 = y1-k[2]
        # cv2.line(image,(x1,y1),(x2,y2),(0,0,255),2)  
        cv2.line(masked_img,(x1,y1),(x2,y2),(100),2)
        
        with open('rawvideo.jpg', 'wb') as File:
            File.write(data)

        with open('mask.jpg', 'wb') as File:
            result, masked_data = cv2.imencode('.jpg', masked_img, [int(cv2.IMWRITE_JPEG_QUALITY), 90])
            File.write(masked_data)
            print('m : {}'.format(k[1]))
            print('forward : {}'.format(k[2]))

        send = {"action": k[0], "markers" : mark_result, "cascade" : cas_result}        
        self.wfile.write(
            bytes(json.dumps(send), encoding='utf8'))
        self.wfile.write(b'\n')
示例#11
0
    def camera_callback(self,data):
        #print "here"

        try:
            cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")

    	except CvBridgeError as e:
    		print(e)
    	(row,cols,channels) = cv_image.shape

    	im = cv_image

        markers = detect_markers(im)
        for marker in markers:
                marker.highlite_marker(im)
                print ("found")
                print rospy.Time.now()
        print (markers)

        # display the result
        cv2.imshow("img", im)

        cv2.waitKey(100)
示例#12
0
    def __markers_pos(self,
                      num_of_markers: int,
                      display: bool = False) -> Dict[int, Tuple[int]]:
        cap = self.capture
        # Check if the camera is opened and try to get the first frame
        if cap.isOpened():
            frame_captured, frame = cap.read()
        else:
            raise MarkerRecognitionFailure()

        # Init a dictionary to store positions of AR markers.
        # Format: {marker's ID: marker's location}
        pos = dict()
        print('Vision: start markers recognition.')
        while frame_captured and len(
                pos
        ) < num_of_markers:  # Break when all markers are recognized.
            markers = detect_markers(frame)
            for marker in markers:
                if marker.id not in pos:
                    pos[marker.id] = tuple(marker.center)
                marker.highlite_marker(frame)

            # Display the feed with marker highlighting
            # if the display flag is set to true.
            if display:
                cv2.imshow('Test Frame', frame)
                # Break when 'q' is pressed (& 0xFF required for 64-bit architecture)
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break
            else:
                time.sleep(0.01)
            frame_captured, frame = cap.read()

        if len(pos) < num_of_markers:
            raise MarkerRecognitionFailure()
        return pos
 def stream(self):
     print('Press "q" to quit')
     counter = 0
     while not self.stopper.is_set():
         unique_markers = {}
         marker_ids = []
         frame = self.getMobileFrame(self.url)
         if (counter == 5):
             self.evaluateMarkerSequence()
             counter = 0
         markers = detect_markers(frame)
         for marker in markers:
             if (marker.id
                     not in marker_ids):  # This removes duplicate markers.
                 unique_markers[marker.id] = marker.center
                 marker_ids.append(marker.id)
                 marker.highlite_marker(frame)
         self.d_list.append(unique_markers)
         frame = cv2.resize(frame, (1210, 720))
         cv2.imshow('Detection Frame', frame)
         counter += 1
         if cv2.waitKey(1) & 0xFF == ord('q'):
             print("Close the kinect windows to terminate the program")
     cv2.destroyAllWindows()
示例#14
0
def marker(img):
    id = 0
    markers = detect_markers(img)
    for marker in markers:
        id=marker.id
        print('detected',marker.id)
        marker.highlite_marker(img)
        
    if id == 144:
        key = 'a'
        self.wfile.write(bytes(json.dumps(key), encoding='utf8'))
        self.wfile.write(b'\n')
        '''cv2.waitKey(1)'''
        time.sleep(3)#회전시간
        key = 'w'
        self.wfile.write(bytes(json.dumps(key), encoding='utf8'))
        self.wfile.write(b'\n')
        '''cv2.waitKey(1)'''
        time.sleep(2)#전진시간
    elif id == 922:
        key = 'd'
        self.wfile.write(bytes(json.dumps(key), encoding='utf8'))
        self.wfile.write(b'\n')
        '''cv2.waitKey(1)'''
        time.sleep(3)#회전시간
        key = 'w'
        self.wfile.write(bytes(json.dumps(key), encoding='utf8'))
        self.wfile.write(b'\n')
        '''cv2.waitKey(1)'''
        time.sleep(2)#전진시간
    elif id == 2537:
        key = 's'
    else:
        key = 'blank'
    id=0
    return key
示例#15
0
    b = bottom_right[0] - bottom_left[0]
    c = bottom_left[1] - top_left[1]
    d = bottom_right[1] - top_right[1]
    print(a, b, c, d)
    x1 = coordinates[0] / a * horizontalDist
    x2 = coordinates[0] / b * horizontalDist
    y1 = coordinates[1] / c * verticalDist
    y2 = coordinates[1] / d * verticalDist
    print(x1, x2, y1, y2)

    return (int(np.mean([x1, x2])), int(np.mean([y1, y2])))


test_image = cv.imread("images/Scanned_Collin.jpg")

markers = ar.detect_markers(test_image)
print(test_image.shape)
print(markers)
for marker in markers:
    marker.highlite_marker(test_image)

print()
test_point = (300, 400)
cv.circle(test_image, test_point, 10, (255, 0, 0))

point = getPaperCoordinates(test_point, markers)
print(point)
cv.circle(test_image, point, 10, (0, 255, 0))

cv.imshow('test_image', test_image)
cv.waitKey(0)
示例#16
0
    def __init__(self):
        # initiliaze
        rospy.init_node('GoForward', anonymous=False)

        # tell user how to stop TurtleBot
        rospy.loginfo("To stop TurtleBot q")

        # What function to call when you ctrl + c
        rospy.on_shutdown(self.shutdown)

        # Create a publisher which can "talk" to TurtleBot and tell it to move
        # Tip: You may need to change cmd_vel_mux/input/navi to /cmd_vel if you're not using TurtleBot2
        self.cmd_vel = rospy.Publisher('cmd_vel_mux/input/navi',
                                       Twist,
                                       queue_size=10)

        #TurtleBot will stop if we don't keep telling it to move.  How often should we tell it to move? 10 HZ
        r = rospy.Rate(10)

        # Twist is a datatype for velocity
        move_cmd = Twist()
        # let's go forward at 0.2 m/s
        move_cmd.linear.x = 0.3
        # let's turn at 0 radians/s
        #move_cmd.angular.z = 0

        #let's turn at 45 deg/s
        turn_cmd = Twist()
        turn_cmd.linear.x = 0
        turn_cmd.angular.z = radians(90)
        #45 deg/s in radians/s

        #let's turn at -45 deg/s
        turn_cmd2 = Twist()
        turn_cmd2.linear.x = 0
        turn_cmd2.angular.z = radians(-90)
        #45 deg/s in radians/s

        #Capture the workstation video input
        capture = cv2.VideoCapture(0)

        if capture.isOpened():  # try to get the first frame
            frame_captured, frame = capture.read()
        else:
            frame_captured = False

        global i
        i = 0
        while frame_captured:

            print("---------------------")
            markers = detect_markers(frame)
            if detect_markers(frame):  # try to get the first frame
                #print("detected")

                for marker in markers:
                    print(
                        "111111111111111111111111111111111111111111111111111111111111111111111111111111111111"
                    )
                    marker.highlite_marker(frame)
                    print(marker_id)
                    if (marker_id == 1803):
                        if (i == 1):
                            print("break out")
                            continue
                        print(
                            "***************************up  right**********************************"
                        )
                        i = 1
                        for x in range(0, 50):
                            self.cmd_vel.publish(move_cmd)
                            print("moving forward")
                            # wait for 0.1 seconds (10 HZ) and publish again
                            r.sleep()
                        for x in range(0, 10):
                            self.cmd_vel.publish(turn_cmd2)
                            print("ROUND MOVEEE")
                            r.sleep()
                        for x in range(0, 60):
                            self.cmd_vel.publish(move_cmd)
                            print("moving forward")
                            # wait for 0.1 seconds (10 HZ) and publish again
                            r.sleep()

                    elif (marker_id == 123):
                        print(
                            "***************************round and up***************************************"
                        )
                        if (i == 2):
                            print("break out")
                            continue
                        i = 2
                        for x in range(0, 25):
                            self.cmd_vel.publish(turn_cmd)
                            print("ROUND MOVEEE")
                            r.sleep()
                        for x in range(0, 80):
                            self.cmd_vel.publish(move_cmd)
                            print("moving forward")
                            # wait for 0.1 seconds (10 HZ) and publish again
                            r.sleep()

            cv2.imshow('Test Frame', frame)

            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
            frame_captured, frame = capture.read()

        # When everything done, release the capture
        capture.release()
        cv2.destroyAllWindows()
        self.shutdown()
示例#17
0
def main():
        
    # construct the argument parse 
    parser = argparse.ArgumentParser(
        description='Script to run MobileNet-SSD object detection network ')
    parser.add_argument("--video", help="path to video file. If empty, camera's stream will be used")
    parser.add_argument("--prototxt", default="MobileNetSSD_deploy.prototxt",
                                    help='Path to text network file: '
                                        'MobileNetSSD_deploy.prototxt for Caffe model or '
                                        )
    parser.add_argument("--weights", default="MobileNetSSD_deploy.caffemodel",
                                    help='Path to weights: '
                                        'MobileNetSSD_deploy.caffemodel for Caffe model or '
                                        )
    parser.add_argument("--thr", default=0.2, type=float, help="confidence threshold to filter out weak detections")
    args = parser.parse_args()

    # Labels of Network.
    classNames = { 0: 'background',
        1: 'aeroplane', 2: 'bicycle', 3: 'bird', 4: 'boat',
        5: 'bottle', 6: 'bus', 7: 'car', 8: 'cat', 9: 'chair',
        10: 'cow', 11: 'diningtable', 12: 'dog', 13: 'horse',
        14: 'motorbike', 15: 'person', 16: 'pottedplant',
        17: 'sheep', 18: 'sofa', 19: 'train', 20: 'tvmonitor' }

    # Open video file or capture device. 
    if args.video:
        cap = cv2.VideoCapture(args.video)
    else:
        cap = cv2.VideoCapture(0)

    #Load the Caffe model 
    net = cv2.dnn.readNetFromCaffe(args.prototxt, args.weights)

    while True:
        #switch init
        switch = 0

        # Capture frame-by-frame
        ret, frame = cap.read()
        size = (120,90)
        frame = cv2.flip(frame,-1)
        frame_resized = cv2.resize(frame,size) # resize frame for prediction
        frame = undistort(frame)
        # MobileNet requires fixed dimensions for input image(s)
        # so we have to ensure that it is resized to 300x300 pixels.
        # set a scale factor to image because network the objects has differents size. 
        # We perform a mean subtraction (127.5, 127.5, 127.5) to normalize the input;
        # after executing this command our "blob" now has the shape:
        # (1, 3, 300, 300)
        blob = cv2.dnn.blobFromImage(frame_resized, 0.007843, size, (127.5, 127.5, 127.5), False)
        #Set to network the input blob 
        net.setInput(blob)
        #Prediction of network
        detections = net.forward()

        #Size of frame resize (300x300)
        cols = frame_resized.shape[1] 
        rows = frame_resized.shape[0]

        #For get the class and location of object detected, 
        # There is a fix index for class, location and confidence
        # value in @detections array .
        for i in range(detections.shape[2]):
            confidence = detections[0, 0, i, 2] #Confidence of prediction 
            if confidence > args.thr: # Filter prediction 
                class_id = int(detections[0, 0, i, 1]) # Class label

                # Object location 
                xLeftBottom = int(detections[0, 0, i, 3] * cols) 
                yLeftBottom = int(detections[0, 0, i, 4] * rows)
                xRightTop   = int(detections[0, 0, i, 5] * cols)
                yRightTop   = int(detections[0, 0, i, 6] * rows)

                # Factor for scale to original size of frame
                heightFactor = frame.shape[0]/size[0] 
                widthFactor = frame.shape[1]/size[1]
                # Scale object detection to frame
                xLeftBottom = int(widthFactor * xLeftBottom) 
                yLeftBottom = int(heightFactor * yLeftBottom)
                xRightTop   = int(widthFactor * xRightTop)
                yRightTop   = int(heightFactor * yRightTop)
                # Draw location of object  
                cv2.rectangle(frame, (xLeftBottom, yLeftBottom), (xRightTop, yRightTop),
                            (0, 255, 0))

                # Draw label and confidence of prediction in frame resized
                if class_id in classNames:
                    label = classNames[class_id] + ": " + str(confidence)
                    labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)

                    yLeftBottom = max(yLeftBottom, labelSize[1])
                    cv2.rectangle(frame, (xLeftBottom, yLeftBottom - labelSize[1]),
                                        (xLeftBottom + labelSize[0], yLeftBottom + baseLine),
                                        (255, 255, 255), cv2.FILLED)
                    cv2.putText(frame, label, (xLeftBottom, yLeftBottom),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0))

                    print(label) #print class and confidence
                    switch = 1

        #AR marker
        try:
            markers = ar_markers.detect_markers(undistorted_image)
            for marker in markers:             
                marker.highlite_marker(undistorted_image)
            if marker.id > 0:
                switch = 2
        except:
            pass

        #Threading                
        evt = threading.Event()
        #mobilenet
        if switch = 1:
            qdata = label
        elif switch = 2:
            qdata = marker.id
示例#18
0
    frame_captured = False

# CALIBRAZIONE TRAMITE MARKERS AR
timer = 0
found = [False, False, False, False]
markers = [
    HammingMarker(0),
    HammingMarker(0),
    HammingMarker(0),
    HammingMarker(0)
]
centers = [(0, 0), (0, 0), (0, 0), (0, 0)]

while frame_captured:
    frame_orig = frame.copy()  # type: object
    actual_markers = detect_markers(frame)

    for marker in actual_markers:  # type: HammingMarker
        if marker.id == 1:
            markers[0] = marker
            found[0] = True
            centers[0] = marker.center
        if marker.id == 3:
            markers[1] = marker
            found[1] = True
            centers[1] = marker.center
        if marker.id == 5:
            markers[2] = marker
            found[2] = True
            centers[2] = marker.center
        if marker.id == 7:
示例#19
0
 def detect_marker(self):
     markers = detect_markers(self.img_gray)
     for marker in markers:
         print(marker.id)
         return marker.id
     return 0
示例#20
0
    def do_POST(self):
        print(self.headers['X-Client2Server'])

        self.send_response(200)
        self.send_header('X-Server2Client', '123')
        self.end_headers()

        data = self.rfile.read(int(self.headers['Content-Length']))
        if DISPLAY:
            data = np.asarray(bytearray(data), dtype="uint8")
            img = cv2.imdecode(data, cv2.IMREAD_ANYCOLOR)
            
            img= undistort(img)
            
            id = 0
            markers = detect_markers(img)
            for marker in markers:
                id=marker.id
                print('detected',marker.id)
                marker.highlite_marker(img)
        
            if id == 144:
                result = 'a'
                self.wfile.write(bytes(json.dumps(result), encoding='utf8'))
                self.wfile.write(b'\n')
                #cv2.waitKey(1)
                sleep(3)#회전시간
                result = 'w'
                self.wfile.write(bytes(json.dumps(result), encoding='utf8'))
                self.wfile.write(b'\n')
                #cv2.waitKey(1)
                sleep(2)#전진시간
            elif id == 922:
                result = 'd'
                self.wfile.write(bytes(json.dumps(result), encoding='utf8'))
                self.wfile.write(b'\n')
                #cv2.waitKey(1)
                sleep(3)#회전시간
                result = 'w'
                self.wfile.write(bytes(json.dumps(result), encoding='utf8'))
                self.wfile.write(b'\n')
                #cv2.waitKey(1)
                sleep(2)#전진시간
            elif id == 2537:
                result = 's'
            else:
                gray_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
                cascade_obj = objs_cascade.detectMultiScale(gray_image, scaleFactor=1.02, minNeighbors=5, minSize=(16,16))
              
                for (x_pos, y_pos, width, height) in cascade_obj:
            
                    if(width>=40):
                        cv2.rectangle(img, (x_pos, y_pos), (x_pos+width, y_pos+height), (255, 255, 255), 2)
                        cv2.putText(img, 'Stop', (x_pos, y_pos-10), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
                        result="s"
                        self.wfile.write(bytes(json.dumps(result), encoding='utf8'))
                        self.wfile.write(b'\n')
                        #cv2.waitKey(1)
                        sleep(6)
                        result ='w'
                    
                    else:
                        white_img = select_white(img, 160)
                        result, a1, a2 = set_path3(white_img,0.25)
                        y1, x1 = img.shape
                        x1 = int(x1/2)
                        x2 = int(-a2 * a1 + x1)
                        y2 = y1-a2 
                        cv2.line(img,(x1,y1),(x2,y2),(255),2)
    
            
            cv2.imshow("Processed", img)
            key = result
            
            print(key)
                         
            #data = {"action": key}
            print(time(), 'Sending', data)
            self.wfile.write(bytes(json.dumps(key), encoding='utf8'))
            self.wfile.write(b'\n')
            #cv2.imshow('image', img)
            cv2.waitKey(1)

        else:
            with open('uploaded.jpg', 'wb') as File:
                File.write(data)
                print('Written to file')
    def stream(self):
        print('Press "q" to quit')
        counter = 0
        d_list = []
        prev_reading = {}
        current_reading = {}
        while True:
            #unique_markers = []
            unique_markers = {}
            marker_ids = []

            frame = self.getMobileFrame(self.url)
            #frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            if (counter == 5):
                full_d = {}
                for d in d_list:  #d_list is a list of the 5 most recent readings
                    for d_key in d:
                        if (not d_key in full_d):
                            full_d[d_key] = [
                            ]  #full_d is a single dict holding all info from the 5 most recent readings.
                        full_d[d_key].append(d[d_key])
                for d_key in full_d:
                    c = Counter(full_d[d_key])
                    value = c.most_common()[0][
                        0]  #Get the most frequent value from the 5 most recent readings
                    current_reading[
                        d_key] = value  #current_reading holds the most frequent values from the 5 most recent readings.
                diff_list = []
                major_diff_detected = False
                for diff in list(dictdiffer.diff(prev_reading,
                                                 current_reading)):
                    if (diff[0] == 'change'):
                        if (not isInBoundary(
                                diff[2][0], diff[2][1],
                                4)):  #diff in 4 pixels mean actual movement
                            diff_list.append((diff[1], diff[2]))
                            major_diff_detected = True
                    if (diff[0] == 'add'):
                        print("Added " + str(diff[2]))
                    if (diff[0] == 'remove'):
                        print("Removed " + str(diff[2]))
                if (major_diff_detected):
                    print("Change detected on: " + str(diff_list))
                prev_reading = current_reading
                current_reading = {}
                counter = 0
                d_list = []
            markers = detect_markers(frame)
            for marker in markers:
                if (
                        marker.id not in marker_ids
                ):  #This removes duplicate markers. Dunno why every marker is registered twice
                    #unique_markers.append(marker)
                    unique_markers[marker.id] = marker.center
                    marker_ids.append(marker.id)
                    marker.highlite_marker(frame)
            print()
            d_list.append(unique_markers)
            frame = cv2.resize(frame, (1210, 720))
            cv2.imshow('Detection Frame', frame)
            counter += 1
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        cv2.destroyAllWindows()
示例#22
0
        aligned_frames = align.process(frames)

        # Get aligned frames
        # aligned_depth_frame is a 640x480 depth image
        aligned_depth_frame = aligned_frames.get_depth_frame()
        color_frame = aligned_frames.get_color_frame()

        # Validate that both frames are valid
        if not aligned_depth_frame or not color_frame:
            continue

        depth_image = np.asanyarray(aligned_depth_frame.get_data())
        color_image = np.asanyarray(color_frame.get_data())

        # detect AR markers
        markers = detect_markers(color_image)
        for marker in markers:
            marker.highlite_marker(color_image)
            center = marker.get_center_cood()
            img = cv2.circle(color_image, center, 5, (0, 0, 255), -1)
            distance = aligned_depth_frame.get_distance(
                list(center)[0],
                list(center)[1])
            #print('Distance : {:.3f}m'.format(distance))
            marker.put_distance(color_image, round(distance, 3))

        cv2.imshow('test frame', color_image)

        # Remove background - Set pixels further than clipping_distance to grey
        # grey_color = 153
        # depth_image_3d = np.dstack((depth_image,depth_image,depth_image)) #depth image is 1 channel, color is 3 channels
示例#23
0
def measure_length(url, user):
    # construct the argument parse and parse the arguments
    # ap = argparse.ArgumentParser()
    # ap.add_argument("-i", "--image", required=True,
    # 	help="path to the input image")
    # ap.add_argument("-w", "--width", type=float, required=True,
    # 	help="width of the left-most object in the image (in inches)")
    # args = vars(ap.parse_args())

    # load the image, convert it to grayscale, and blur it slightly
    flag = True
    img_url = url
    print("image url :", img_url)
    image = cv2.imread('./' + img_url)

    markers = detect_markers(image)
    marker_width = 15

    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (7, 7), 0)
    # cv2.imshow("gaussian", gray)

    # ret, gray = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
    # cv2.imshow("binary", gray)

    # perform edge detection, then perform a dilation + erosion to
    # close gaps in between object edges
    edged = cv2.Canny(gray, 50, 100)
    edged = cv2.dilate(edged, None, iterations=1)
    edged = cv2.erode(edged, None, iterations=1)

    # find contours in the edge map
    cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)
    print(len(cnts))

    # sort the contours from left-to-right and initialize the
    # 'pixels per metric' calibration variable
    (cnts, _) = contours.sort_contours(cnts, method="top-to-bottom")
    print(len(cnts))

    cnts = list(cnts)
    try:
        cnts.insert(0, markers[0].contours)
    except Exception:
        print("marker detection fail")
        flag = False
        return flag
        # sys.exit()

    cnts = tuple(cnts)
    print(len(cnts))

    pixelsPerMetric = None

    # loop over the contours individually
    result_list = list()
    for idx, c in enumerate(cnts):

        # if the contour is not sufficiently large, ignore it
        if cv2.contourArea(c) < 100:
            continue

        # compute the rotated bounding box of the contour
        orig = image.copy()
        box = cv2.minAreaRect(c)
        box = cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box)
        box = np.array(box, dtype="int")

        # order the points in the contour such that they appear
        # in top-left, top-right, bottom-right, and bottom-left
        # order, then draw the outline of the rotated bounding
        # box
        box = perspective.order_points(box)
        cv2.drawContours(orig, [box.astype("int")], -1, (0, 255, 0), 2)

        # loop over the original points and draw them
        for (x, y) in box:
            cv2.circle(orig, (int(x), int(y)), 5, (0, 0, 255), -1)

        # unpack the ordered bounding box, then compute the midpoint
        # between the top-left and top-right coordinates, followed by
        # the midpoint between bottom-left and bottom-right coordinates
        (tl, tr, br, bl) = box
        (tltrX, tltrY) = midpoint(tl, tr)
        (blbrX, blbrY) = midpoint(bl, br)

        # compute the midpoint between the top-left and top-right points,
        # followed by the midpoint between the top-righ and bottom-right
        (tlblX, tlblY) = midpoint(tl, bl)
        (trbrX, trbrY) = midpoint(tr, br)

        # draw the midpoints on the image
        cv2.circle(orig, (int(tltrX), int(tltrY)), 5, (255, 0, 0), -1)
        cv2.circle(orig, (int(blbrX), int(blbrY)), 5, (255, 0, 0), -1)
        cv2.circle(orig, (int(tlblX), int(tlblY)), 5, (255, 0, 0), -1)
        cv2.circle(orig, (int(trbrX), int(trbrY)), 5, (255, 0, 0), -1)

        # draw lines between the midpoints
        cv2.line(orig, (int(tltrX), int(tltrY)), (int(blbrX), int(blbrY)),
                 (255, 0, 255), 2)
        cv2.line(orig, (int(tlblX), int(tlblY)), (int(trbrX), int(trbrY)),
                 (255, 0, 255), 2)

        # compute the Euclidean distance between the midpoints
        dA = dist.euclidean((tltrX, tltrY), (blbrX, blbrY))
        dB = dist.euclidean((tlblX, tlblY), (trbrX, trbrY))

        # if the pixels per metric has not been initialized, then
        # compute it as the ratio of pixels to supplied metric
        # (in this case, inches)
        if pixelsPerMetric is None:
            pixelsPerMetric = dB / marker_width

        # compute the size of the object
        dimA = dA / pixelsPerMetric
        dimB = dB / pixelsPerMetric

        # draw the object sizes on the image

        # show the output image
        if (float(format(dimA)) > 25 or float(format(dimB)) > 25):
            if idx == 0:
                continue

            cv2.putText(orig, "width: {:.1f}cm".format(dimB), (0, 100),
                        cv2.FONT_HERSHEY_SIMPLEX, 1.2, (255, 0, 0), 2)
            cv2.putText(orig, "height: {:.1f}cm".format(dimA), (0, 150),
                        cv2.FONT_HERSHEY_SIMPLEX, 1.2, (255, 0, 0), 2)

            save_path = date_upload_measured()
            print('save path:' + save_path)
            cv2.imwrite('./media/' + save_path, orig)

            img_measured = MeasureHistory.objects.create(user_idx=user,
                                                         image=save_path,
                                                         width=dimB,
                                                         height=dimA)
            img_measured.save()
            img_measured.msg = '성공'
            img_measured.code = 100
            result_list.append(img_measured)

    return result_list
示例#24
0
from __future__ import print_function

try:
    import cv2
    from ar_markers import detect_markers
except ImportError:
    raise Exception('Error: OpenCv is not installed')


if __name__ == '__main__':
    print('Press "q" to quit')
    capture = cv2.VideoCapture(0)

    if capture.isOpened():  # try to get the first frame
    	frame_captured, frame = capture.read()
    else:
    	frame_captured = False
    while frame_captured:
    	markers = detect_markers(frame)
    	for marker in markers:
    	    marker.highlite_marker(frame)
            print(marker)
    	cv2.imshow('Test Frame', frame)
    	if cv2.waitKey(1) & 0xFF == ord('q'):
    		break
    	frame_captured, frame = capture.read()
	# When everything done, release the capture
    capture.release()
    cv2.destroyAllWindows()
示例#25
0
def main(q):
    #for capture every second
    checktimeBefore = int(time.strftime('%S'))

    for frame in camera.capture_continuous(rawCapture,
                                           format="bgr",
                                           use_video_port=True):
        # grab the raw NumPy array representing the image, then initialize the timestamp
        # and occupied/unoccupied text

        image = frame.array

        #undistort
        undistorted_image = undistort(image)

        #--------------motor control--------------

        #decision (action, round(m,4), forward, left_line, right_line, center, direction)
        masked_image = select_white(undistorted_image, 160)
        result = set_path3(masked_image)

        #line marker
        line = []
        #right
        for j in range(result[2]):
            left_coord = (+result[5] + 1 + result[3][j], 239 - j)
            line.append(left_coord)
            undistorted_image = cv2.line(undistorted_image, left_coord,
                                         left_coord, (0, 255, 0), 4)

        #left
        for j in range(result[2]):
            right_coord = (result[5] + 1 - result[4][j], 239 - j)
            line.append(right_coord)
            undistorted_image = cv2.line(undistorted_image, right_coord,
                                         right_coord, (0, 255, 0), 4)

        #slope
        try:
            undistorted_image = cv2.line(undistorted_image, result[6][0],
                                         result[6][1], (0, 0, 255), 4)
        except:
            pass
        #decision motor
        direction = motor(result[0], result[1])
        if result[0] == 'a' or result[0] == 'd':
            time.sleep(0.5)
        print(result[2])

        #e-stop
        if result[2] > 230:
            motor('s', 0)
#----------------------------

#ultrasonic
        ultra = ultrasonic()
        if ultra > 0 and ultra < 12:
            #print('stop')
            direction = motor('s', 0)
            print(ultra)

        #cascade
#        cas = len(cascade(undistorted_image))
#        if cas != 0:
#            direction = motor('s')

#AR marker
        markers = ar_markers.detect_markers(undistorted_image)
        for marker in markers:
            if marker.id == 114:
                direction = motor('q', result[1])
            elif marker.id == 922:
                direction = motor('e', result[1])
            elif marker.id == 2537:
                direction = motor('s', 0)
            marker.highlite_marker(undistorted_image)
#----------------------------
#putText
        try:
            #slope
            cv2.putText(undistorted_image, 'm = ' + str(result[1]), (10, 20),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255), 1)
            #direction
            cv2.putText(undistorted_image, direction, (10, 40),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255), 1)
        except:
            pass


#----------------------------
# show the frame
        cv2.imshow("Frame", undistorted_image)
        key = cv2.waitKey(1) & 0xFF
        rawCapture.truncate(0)

        #Threading
        if switch == 1:
            evt = threading.Event()
            qdata = undistorted_image
            q.put((qdata, evt))

        # q : break, tap : capture
        if key == ord("q"):
            break
        elif key == ord("\t"):
            captured(undistorted_image)
示例#26
0
    img = cv2.remap(img,
                    map1,
                    map2,
                    interpolation=cv2.INTER_LINEAR,
                    borderMode=cv2.BORDER_CONSTANT)
    return img


while True:
    frame_captured, frame = capture.read()
    frame = cv2.flip(frame, -1)
    frame = cv2.resize(frame, (320, 240))
    #undistort
    undistorted_image = frame  #undistort(frame)

    #ar_marker
    markers = detect_markers(undistorted_image)
    for marker in markers:
        marker.highlite_marker(undistorted_image)

    cv2.imshow('Frame', undistorted_image)
    key = cv2.waitKey(1) & 0xFF
    if key == ord('q'):
        break
    elif key == ord("\t"):
        captured(undistorted_image)

# When everything done, release the capture
capture.release()
cv2.destroyAllWindows()
示例#27
0
#!/usr/bin/env python3

import cv2
from ar_markers import detect_markers

img = cv2.imread('image-0.png')
markers = detect_markers(img)
print(markers)

for m in markers:
    print(">> Found:", m.id)
示例#28
0
from __future__ import print_function
import cv2
from ar_markers import detect_markers

if __name__ == "__main__":
    print('Press "q" to quit')
    camera0 = cv2.VideoCapture(0)

    if camera0.isOpened():  #try to get the first frame
        frame_captured, frame = camera0.read()
    else:
        frame_captured = False

    while frame_captured:
        gray_scale = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        #markers = detect_markers(frame)
        markers = detect_markers(gray_scale)
        print("markers:\n", markers)
        for marker in markers:
            marker.highlite_marker(gray_scale)
        cv2.imshow('Test Frame', gray_scale)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
        frame_captured, frame = camera0.read()

    #When everything done, release the camera0
    camera0.release()
    cv2.destroyAllWindows()
示例#29
0
    def marker_search(self, args):
        #tracking all global variables necessary
        global whiffcount
        global lastmarker
        global lastmidmarker
        global enemyflag
        global enemybase
        global carryingflag
        global taglockouts
        global tagtimes
        global walldistance
        global zdir
        global ser

        #get the picture
        img = self.vision.get_latest_valid_picture()

        #shouldnt fail, but checking if it failed anyway
        if (img is not None):
            #initialize counters for 400 and 200 nodes
            fourMarkers = 0
            twoMarkers = 0

            #find the markers - puts them into an array
            markers = detect_markers(img)

            #initialize search for finding our location based on mid markers and other markers
            highestmarker = 0
            highestmidmarker = 0

            #convert array of markers to array of marker IDs
            mids = []
            for marker in markers:
                mids.append(int(marker.id))
                marker.highlite_marker(img)

            #for every marker id...
            for mid in mids:
                #if we found one in the 400's that is a location marker
                if mid > 404 and mid < 500:
                    #increment that count
                    fourMarkers = fourMarkers + 1
                    #if this is the highest # marker we've seen, then save it
                    if mid % 100 > highestmarker % 100:
                        highestmarker = mid
                #if we found one in the 200's that is a location marker
                elif mid > 204 and mid < 300:
                    #increment that count
                    twoMarkers = twoMarkers + 1
                    #if this is the highest # marker we've seen, then save it
                    if mid % 100 > highestmarker % 100:
                        highestmarker = mid
                #if we found a mid-level wall marker
                elif mid < 100:
                    #and this is the new highest mid marker, save it
                    if mid > highestmidmarker:
                        highestmidmarker = mid
                #if we found another drone (WOW)
                elif (mid % 100 == 3 or mid % 100 == 4) and mid < 400:
                    #search for the id in taglockouts
                    ind = -1
                    for i in range(len(taglockouts)):
                        if taglockouts[i] == mid:
                            ind = i
                    #if we found it, check the time stamp
                    if ind >= 0:
                        #if it is too soon, skip this marker
                        if time.time() - tagtimes[ind] < 2:
                            continue
                            #otherwise remove it and keep going
                            taglockouts.remove(mid)
                            tagtimes.remove(tagtimes[ind])
                    #add the score to the ledger
                    f = open(scoreledger, 'a+')
                    f.write("found enemy " + str(mid) + ": +1 points " +
                            str(time.time()) + "\n")
                    #and add the marker to the lockouts
                    taglockouts.append(mid)
                    tagtimes.append(time.time())
                #if we found our teammate (EVEN MORE WOW)
                elif (mid == 403):
                    #write that to the ledger too, i guess
                    f = open(scoreledger, 'a+')
                    f.write("tagged teammate\n")
                #if we found the enemy flag (!!!!!!!)
                elif mid % 100 == 2 and mid < 402:
                    #save location, play noise, and grab the flag
                    path = os.path.abspath('\FARTNoises\FART2.wav')
                    os.system('start %s' % path).play()
                    carryingflag = True
                    #if by some miracle this isnt the first time we grab the flag, we dont need to save the location
                    if enemyflag == -1 and lastmarker != 999:
                        enemyflag = lastmarker
                        #write results to file
                        with open('flagloc.pk', 'wb') as f:
                            pickle.dump(enemyflag, f)
                #if we found our base
                elif mid == 401:
                    #...and we have the flag
                    if carryingflag:
                        #SCORE! JACKPOT BABY
                        carryingflag = False
                        f = open(scoreledger, 'a+')
                        f.write("flag captured: +10 points " +
                                str(time.time()) + "\n")
                        path = os.path.abspath('\FARTNoises\FART3.wav')
                    os.system('start %s' % path).play()
                #if we found the enemy base
                elif mid % 100 == 1:
                    #search for the id in taglockouts
                    ind = -1
                    for i in range(len(taglockouts)):
                        if taglockouts[i] == mid:
                            ind = i
                    #if we found it, check the time stamp
                    if ind >= 0:
                        #if it is too soon, skip this marker
                        if time.time() - tagtimes[ind] < 30:
                            continue
                        #otherwise remove it and keep going
                        else:
                            taglockouts.remove(mid)
                            tagtimes.remove(tagtimes[ind])
                    #add the score
                    f = open(scoreledger, 'a+')
                    f.write("enemy base tagged: +2 points " +
                            str(time.time()) + "\n")
                    #and add the marker to the lockouts
                    taglockouts.append(mid)
                    tagtimes.append(time.time())

            #so now that we parsed all the markers, we gotta figure out what to do with that info

            #if we found way more 400 markers than 200 markers
            if fourMarkers > twoMarkers + 1:
                #go down some
                zdir = 0
            #if we found way more 200 markers than 400 markers
            elif twoMarkers > fourMarkers + 1:
                #go up some
                zdir = 0
            #otherwise, keep it level (hopefully)
            else:
                zdir = 0

            #if we found a highestmarker, then we have our location
            #this wont happen every time, especially if we find no markers (which is not unlikely)
            if highestmarker != 0:
                #save it
                lastmarker = highestmarker
                #initialize centerpoints
                p1 = 0
                p2 = 0
                #search for the centers of this id and this id-1
                for m in markers:
                    if int(m.id) == lastmarker:
                        p1 = m.center
                    elif int(m.id) == lastmarker - 1:
                        p2 = m.center
                #if we found one, then calculate wall distance
                if p2 != 0:
                    dist = math.sqrt((int(p1[0]) - int(p2[0]))**2 +
                                     (int(p1[1]) - int(p2[1]))**2)
                    walldistance = wallDistance(dist)

            #print location for testing purposes
            print("location: " + str(lastmarker) + "midloc: " +
                  str(lastmidmarker) + "walldist: " + str(walldistance))

            #tell other drone where we are and where enemyflag is
            stng = str(lastmarker) + "," + str(enemyflag) + "\n"
            ser.write(str.encode(stng))

            #read in and parse info from other team
            serin = ser.readline()
            serial_input = serin.decode('utf-8')
            coord = serial_input.split(",")

            # if serial reads correctly and gets enemy flag location from other team
            if (len(coord) == 2):
                print("transmission: " + str(coord[0]) + "," + str(coord[1]))
                #if other team is close to us move forward a bit to avoid crashing
                if ((int(coord[0]) - lastmarker) < 5):
                    mambo.fly_direct(roll=0,
                                     pitch=7,
                                     yaw=0,
                                     vertical_movement=0,
                                     duration=.1)

                #if flag is still not found set it from other team
                if enemyflag == -1:
                    if (coord[1] != '-'):
                        enemyflag = int(coord[1])
                #if flag is found, write to file
                if enemyflag != -1:
                    with open('flagloc.pk', 'wb') as f:
                        pickle.dump(enemyflag, f)

            #if we found a highestmidmarker, then we have our mid marker location
            if highestmidmarker != 0:
                #save it
                lastmidmarker = highestmidmarker

            #if we missed a lot of markers, then increment the whiff counter
            if twoMarkers < 1 and fourMarkers < 1:
                whiffcount = whiffcount + 1
            else:
                whiffcount = 0
示例#30
0
def setAndGetPos():

    #trackbar rajzolasa
    cv2.namedWindow('dataSet')
    cv2.createTrackbar('threshParam1', 'dataSet', 150, 255, nothing)
    cv2.createTrackbar('threshParam2', 'dataSet', 250, 255, nothing)
    cv2.createTrackbar('cannyParam1', 'dataSet', 150, 255, nothing)
    cv2.createTrackbar('cannyParam2', 'dataSet', 200, 255, nothing)


    x = 0
    y = 0
    w = 0
    h = 0
    bool=False
    while True:
        #trackbar adatok lekerese
        threshParam1 = cv2.getTrackbarPos('threshParam1', 'dataSet')
        threshParam2 = cv2.getTrackbarPos('threshParam2', 'dataSet')
        cannyParam1 = cv2.getTrackbarPos('cannyParam1', 'dataSet')
        cannyParam2 = cv2.getTrackbarPos('cannyParam2', 'dataSet')

        cropped=""
        #uj kep
        ret, LiveImg = cap.read()
        #elforgatas szoge
        rot=0
        #marker kereses
        try:
            markers = detect_markers(LiveImg)
        except:
            continue
        #ha van marker es 3663 akkor
        for marker in markers:
            if (marker.id == 3663):
                #szog es pozicio eltarolasa
                rot = Angle(marker.contours[0][0], marker.contours[1][0])
                if(rot>45):
                    rot=rot-90

                x, y, w, h = cv2.boundingRect(marker.contours)  # good
                #kep vagasa az elonezeti kephez
                cropped = LiveImg[y:y + h, x:x + w]  # good
                offset = 50
                #pozico ellenorzes

                if (marker.contours[0][0][0] - offset < marker.contours[1][0][0] and marker.contours[0][0][0] + offset >
                    marker.contours[1][0][0] and marker.contours[1][0][1] - offset < marker.contours[2][0][1] and
                                marker.contours[1][0][1] + offset > marker.contours[2][0][1]):

                    bool=True


        #data rajzolsa, ezt majd megoldom mashogy
        LiveImg = cv2.resize(LiveImg, (640, 480), interpolation=cv2.INTER_AREA)
        ret, thresh1 = cv2.threshold(LiveImg, threshParam1,threshParam2, cv2.THRESH_BINARY)
        edge = cv2.Canny(thresh1, cannyParam1, cannyParam2)
        edge = cv2.resize(edge, (640, 480), interpolation=cv2.INTER_AREA)
        edge = cv2.cvtColor(edge, cv2.COLOR_GRAY2RGB)
        try:
            cropped = cv2.resize(cropped, (640, 480), interpolation=cv2.INTER_AREA)
            cv2.imshow("data", np.vstack((np.hstack((LiveImg,thresh1)),np.hstack((edge, cropped)))))
        except :
            tmp = np.zeros((height, width, 3), np.uint8)
            tmp = cv2.resize(tmp, (640, 480), interpolation=cv2.INTER_AREA)
            cv2.imshow("data", np.vstack((np.hstack((LiveImg, thresh1)), np.hstack((edge, tmp)))))


        if (cv2.waitKey(1) & 0xFF == ord('q') or bool==True):
            #eltolas
            o=25
            x=x+o
            y=y+o
            w=w-o*2
            h=h-o*2

            return x, y, w, h,rot,threshParam1,threshParam2,cannyParam1,cannyParam2