Пример #1
0
def main():
    parser = ArgumentParser(description='test apriltag Python bindings')
    parser.add_argument('device_or_movie', metavar='INPUT', nargs='?', default=0,help='Movie to load or integer ID of camera device')
    apriltag.add_arguments(parser)
    options = parser.parse_args()

    webcam = cv.VideoCapture(0)
    cv.namedWindow('frame')

    detector = apriltag.Detector(options,searchpath=apriltag._get_demo_searchpath())

    while True:
        _,frame = webcam.read()
        gray = cv.cvtColor(frame,cv.COLOR_RGB2GRAY)

        detections, dimg = detector.detect(gray, return_image=True)
        num_detections = len(detections)
        print('Detected {} tags.\n'.format(num_detections))
	try:
	    print (detections[0])
	except:
        print ("not found")

        overlay = frame // 2 + dimg[:, :, None] // 2
        
        cv.imshow('frame', overlay)
        ikey = cv.waitKey(1)
        if ikey == ord('q'):
            break

if __name__ == '__main__':
    main()
Пример #2
0
def main():
    '''Main function.'''

    parser = ArgumentParser(description='test apriltag Python bindings')

    parser.add_argument('device_or_movie',
                        metavar='INPUT',
                        nargs='?',
                        default=0,
                        help='Movie to load or integer ID of camera device')

    apriltag.add_arguments(parser)

    options = parser.parse_args()

    try:
        cap = cv2.VideoCapture(int(options.device_or_movie))
    except ValueError:
        cap = cv2.VideoCapture(options.device_or_movie)

    window = 'Camera'
    cv2.namedWindow(window)

    # set up a reasonable search path for the apriltag DLL inside the
    # github repo this file lives in;
    #
    # for "real" deployments, either install the DLL in the appropriate
    # system-wide library directory, or specify your own search paths
    # as needed.

    detector = apriltag.Detector(options,
                                 searchpath=apriltag._get_demo_searchpath())

    while True:

        success, frame = cap.read()
        if not success:
            break

        gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
        detections, dimg = detector.detect(gray, return_image=True)

        num_detections = len(detections)
        print('Detected {} tags.\n'.format(num_detections))

        for i, detection in enumerate(detections):
            print('Detection {} of {}:'.format(i + 1, num_detections))
            print()
            print(detection.tostring(indent=2))
            print()

        overlay = frame // 2 + dimg[:, :, None] // 2

        cv2.imshow(window, overlay)
        k = cv2.waitKey(1)

        if k == 27:
            break
def main():
    cam = cv2.VideoCapture(0)
    cam.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
    cam.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
    # sleep(0.2)
    success, frame = cam.read()
    if not success:
        print 'failed to read camera'
        return
    display = frame.copy()
    frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
    cv2.imshow('win', frame)
    cv2.waitKey()
    aprilsize = 0.08
    opts = np.array([[aprilsize, -aprilsize, 0.],
                        [-aprilsize, -aprilsize, 0.],
                        [-aprilsize, aprilsize, 0.],
                        [aprilsize, aprilsize, 0.]])

    cmat = np.loadtxt('CameraMatrix.txt')
    dmat = np.loadtxt('Distortion.txt')

    # set upt the detector
    parser = ArgumentParser(
        description='test apriltag Python bindings')

    parser.add_argument('device_or_movie', metavar='INPUT', nargs='?', default=0,
                        help='Movie to load or integer ID of camera device')

    apriltag.add_arguments(parser)

    options = parser.parse_args()

    # create an apriltag detector
    detector = apriltag.Detector(options)
    tags = detector.detect(frame, return_image= False)
    print 'found {} tags'.format(len(tags))


    print
    for tag in tags:
        print 'tag has corners \n{} \n'.format(tag.corners)
        print tag.corners.shape, tag.corners.dtype
        # line = np.array([[10, 0], [10, 470]])
        # cv2.polylines(display, [line], True, (255, 0, 255), 0, 16)
        cv2.polylines(display, [tag.corners.astype(int)], True, (255,255,0), 1, 16)
        # print frame.shape
        # corners_adj = tag.corners + np.array([frame.shape[0]/2, 0])
        # print 'adjusted corners \n{} \n'.format(corners_adj)

        retval, rvec, tvec = cv2.solvePnP(opts, tag.corners,
            cmat, dmat)

        print 'tag has tvec \n{}'.format(tvec)
Пример #4
0
def apriltag_image(
    input_images=[
        '../media/input/single_tag.jpg', '../media/input/multiple_tags.jpg'
    ],
    output_images=False,
    display_images=True,
    detection_window_name='AprilTag',
):
    '''
    Detect AprilTags from static images.

    Args:   input_images [list(str)]: List of images to run detection algorithm on
            output_images [bool]: Boolean flag to save/not images annotated with detections
            display_images [bool]: Boolean flag to display/not images annotated with detections
            detection_window_name [str]: Title of displayed (output) tag detection window
    '''

    parser = ArgumentParser(description='Detect AprilTags from static images.')
    apriltag.add_arguments(parser)
    options = parser.parse_args()
    '''
    Set up a reasonable search path for the apriltag DLL.
    Either install the DLL in the appropriate system-wide
    location, or specify your own search paths as needed.
    '''

    detector = apriltag.Detector(options, searchpath=apriltag._get_dll_path())

    for image in input_images:

        img = cv2.imread(image)

        print('Reading {}...\n'.format(os.path.split(image)[1]))

        result, overlay = apriltag.detect_tags(
            img,
            detector,
            camera_params=(3156.71852, 3129.52243, 359.097908, 239.736909),
            tag_size=0.0762,
            vizualization=3,
            verbose=3,
            annotation=True)

        if output_images:
            output_path = '../media/output/' + str(os.path.split(image)[1])
            output_path = output_path.replace(str(os.path.splitext(image)[1]),
                                              '.jpg')
            cv2.imwrite(output_path, overlay)

        if display_images:
            cv2.imshow(detection_window_name, overlay)
            while cv2.waitKey(5) < 0:  # Press any key to load subsequent image
                pass
Пример #5
0
def main():
    '''Main function.'''

    parser = ArgumentParser(description='test apriltag Python bindings')

    parser.add_argument('device_or_movie',
                        metavar='INPUT',
                        nargs='?',
                        default=0,
                        help='Movie to load or integer ID of camera device')

    apriltag.add_arguments(parser)

    options = parser.parse_args()

    try:
        cap = cv2.VideoCapture(int(options.device_or_movie))
    except ValueError:
        cap = cv2.VideoCapture(options.device_or_movie)

    window = 'Camera'
    cv2.namedWindow(window)

    detector = apriltag.Detector(options)

    while True:

        success, frame = cap.read()
        if not success:
            break

        gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
        detections, dimg = detector.detect(gray, return_image=True)

        num_detections = len(detections)
        print 'Detected {} tags.\n'.format(num_detections)

        for i, detection in enumerate(detections):
            print 'Detection {} of {}:'.format(i + 1, num_detections)
            print
            print detection.tostring(indent=2)
            print

        overlay = frame / 2 + dimg[:, :, None] / 2

        cv2.imshow(window, overlay)
        k = cv2.waitKey(1)

        if k == 27:
            break
Пример #6
0
    def __init__(self, tag_size):

        # The python bindings for the April Tag library require this strange configuration:
        parser = ArgumentParser(description='test apriltag Python bindings')
        parser.families = 'tag36h11'
        parser.border = 1  # how wide (in bit-sizes) is the black border? (usually 1)
        parser.nthreads = 4  #(int) number of threads to use
        parser.quad_decimate = 1.0  #(float) Decimate input image by this factor
        parser.quad_blur = 0.0  #(float) Apply low-pass blur to input; negative sharpens
        parser.refine_edges = True  #(bool) Set to True to spend more time to align edges of tags
        parser.refine_decode = True  #(bool) Set to True to spend more time to decode tags
        parser.refine_pose = True  #(bool) Set to True to spend more time to precisely localize tags
        parser.debug = False  #(bool)Enable/Disable debugging output (slow when enabled)
        parser.quad_contours = True
        apriltag.add_arguments(parser)

        self.options = parser.parse_args()
        self.options.tag_size = tag_size
Пример #7
0
    def __init__(self):
        rospy.init_node('apriltag_detector')
        # rospy.Subscriber('image_topic', Image, self.image_callback)

        # HACK: rather than subscribing to an image topic and then using that
        # data, just capture the node with cv2.VideoCapture every set length
        # of time.

        # Will let us convert from cv image to ros image and back
        self.bridge = CvBridge()

        # UNHACK: subscribe to the image publisher and create an image_callback
        self.sub = rospy.Subscriber("camera_raw", Image, self.image_callback )
        # set up parameters for the apriltag detector
        aprilsize = 0.08 #  half the side length of the apriltag in meters; changed from 0.05 4/20/17
        self.opts = np.array([[aprilsize, -aprilsize, 0.],
                    [-aprilsize, -aprilsize, 0.],
                    [-aprilsize, aprilsize, 0.],
                    [aprilsize, aprilsize, 0.]])
        # From camera calibration
        self.cmatrix = np.loadtxt("/home/schleppy/project_files/Schlepbot-E90/catkin_ws_new/src/apriltag_detector/CameraMatrix.txt")
        self.dists = np.loadtxt("/home/schleppy/project_files/Schlepbot-E90/catkin_ws_new/src/apriltag_detector/Distortion.txt")

        self.pub = rospy.Publisher('apriltag', Point, queue_size = 10)

        # set up the detector
        parser = ArgumentParser(
            description='test apriltag Python bindings')

        parser.add_argument('device_or_movie', metavar='INPUT', nargs='?', default=0,
                            help='Movie to load or integer ID of camera device')

        apriltag.add_arguments(parser)

        options = parser.parse_args(rospy.myargv()[1:])

        # create an apriltag detector
        self.detector = apriltag.Detector(options)

        # Moving average of translation vectors.
        self.tvec_avg = np.array([[0], [0], [0]])
Пример #8
0
def main():

    '''Main function.'''

    parser = ArgumentParser(
        description='test apriltag Python bindings')

    apriltag.add_arguments(parser)

    options = parser.parse_args()


    
    
    

 

    
    
    for k in range(20):
Пример #9
0
    def __init__(self):
        rospy.init_node('apriltag_detector')
        rospy.Subscriber('image_topic_raw', Image)
        self.pub = rospy.Publisher('apriltag', Image, self.image_callback)
        self.bridge = CvBridge()

        # set upt the detector
        parser = ArgumentParser(description='test apriltag Python bindings')

        parser.add_argument(
            'device_or_movie',
            metavar='INPUT',
            nargs='?',
            default=0,
            help='Movie to load or integer ID of camera device')

        apriltag.add_arguments(parser)

        options = parser.parse_args()

        # create an apriltag detector
        self.detector = apriltag.Detector(options)
Пример #10
0
def main():

    '''Detect apriltags.'''
    
    if (use_pympler):
      tr = tracker.SummaryTracker()

    from argparse import ArgumentParser

    # for some reason pylint complains about members being undefined :(
    # pylint: disable=E1101

    parser = ArgumentParser(
        description='Detect apriltag in videos and images. Output to tagjson/ directory')
        
    show_default = ' (default %(default)s)'
    
    apriltag.add_arguments(parser)

    parser.add_argument('filenames', metavar='IMAGE', nargs='*',
                        help='files to convert')

    parser.add_argument('-F', '--family-preset', 
                        action=FamilyPresetAction,
                        help='Family preset: detect "inv" at the end of the name to set both "family" and "inverse"')
    parser.add_argument('-V', dest='video_in', default=None,
                        help='Input video')
    parser.add_argument('-f0', dest='f0', default=0, type=int,
                        help='Frame start '+ show_default)
    parser.add_argument('-f1', dest='f1', default=0, type=int,
                        help='Frame end '+ show_default)
    parser.add_argument('-fps', dest='fps', default=20.0, type=float,
                        help='fps '+ show_default)
         
    parser.add_argument('-outdir', dest='outdir', default='.',
                        help='For video, basedir of output '+ show_default)
                        
    parser.add_argument('-tagout', dest='tagout', default=False, 
                        action='store_true',
                        help='Save image with detected tags overlay '+ show_default)
    parser.add_argument('-tagplot', dest='tagplot', default=False, 
                        action='store_true',
                        help='Plot tags overlay using matplotlib '+ show_default)
    parser.add_argument('-m', dest='multiframefile', default=False, 
                        action='store_true',
                        help='Save multiple frames into single JSON file '+ show_default)
    parser.add_argument('-mv', dest='multiframefile_version', default=0, type=int,
                        help='Single JSON file version'+ show_default)
    parser.add_argument('-cvt', dest='cv_nthreads', default=2, type=int,
                        help='Number of threads for OpenCV '+ show_default)
          
    if (True):     
        parser.add_argument('-max_side_length', dest='max_side_length', 
                        default=35, type=int,
                        help='Maximum tag size in pixel '+ show_default)         
        parser.add_argument('-min_side_length', dest='min_side_length',       
                        default=20, type=int,
                        help='Tags smaller than that are discarded '+ show_default)
        parser.add_argument('-min_aspect', dest='min_aspect',
                            default=0.7, type=float,
                            help='Tags smaller than that are discarded '+ show_default)

    parser.add_argument('-rgb_mean', dest='rgb_mean', default=False, 
                        action='store_true',
                        help='Extract extra info rgb_mean '+ show_default)
    parser.add_argument('-tag_img', dest='tag_img', default=False, 
                        action='store_true',
                        help='Extract extra info tag image '+ show_default)
    parser.add_argument('-tag_os', dest='tag_oversampling', 
                        default=1, type=int,
                        help='Oversampling factor to extract tag image or compute rgb_mean '+ show_default)         
    parser.add_argument('-tag_d', dest='tag_d', 
                        default=5, type=int,
                        help='Size of tag (tag25h5 -> d=5) '+ show_default)         

    options = parser.parse_args()
    
    cv2.setNumThreads(options.cv_nthreads)
    print('OpenCV running with {} threads'.format(cv2.getNumThreads()))

    if (not options.multiframefile and options.multiframefile_version>0):
        options.multiframefile = True
    if (options.multiframefile and options.multiframefile_version==0):
        options.multiframefile_version = 2
    
    if (options.f1<options.f0):
        options.f1=options.f0
    
    print(options)
    
    det = apriltag.Detector(options)
    
    # For Quad Contour Params Detection (QCP)
    #det.tag_detector.contents.qcp.min_side_length = options.min_side_length;
    #det.tag_detector.contents.qcp.min_aspect = options.min_aspect;
    
# Defaults for Quad Threshold Approach
#     qtp->max_nmaxima = 10;
#     qtp->min_cluster_pixels = 5;
#     qtp->max_line_fit_mse = 1.0;
#     qtp->critical_rad = 10 * M_PI / 180;
#     qtp->deglitch = 0;
#     qtp->min_white_black_diff = 15;
    
    qtp = det.tag_detector.contents.qtp
    qtp.min_cluster_pixels = 200
    qtp.critical_rad = 50.0 / 180 * math.pi
    qtp.min_white_black_diff = 30
    qtp.deglitch = 0

#         ('max_nmaxima', ctypes.c_int),
#        ('max_line_fit_mse', ctypes.c_float),
    
    #init_gui()

    if not (options.video_in is None): # Input is a video
        print('Processing video {}'.format(options.video_in))
        vidcap = cv2.VideoCapture(options.video_in)
        
        if not vidcap.isOpened(): 
            print("Could not open video. Aborting.")
            raise IOError("Could not open video {}. Aborting.".format(options.video_in))
            #sys.exit(1)
        
        vidcap.set(cv2.CAP_PROP_POS_FRAMES,0)     
        nframes=vidcap.get(cv2.CAP_PROP_FRAME_COUNT);
        coderfps=vidcap.get(cv2.CAP_PROP_FPS)
        
        vidcap.set(cv2.CAP_PROP_POS_FRAMES,nframes-1)
        duration=vidcap.get(cv2.CAP_PROP_POS_MSEC)/1000.0;
       
        if (False): 
           print("Opened video.\n  nframes={}\n  nframes with cmd line fps={}\n  coder fps={}\n  coder video duration={}s={}min\n  command line fps={}\n  command line duration={}s={}min\n  time at frame {}={}s".format(nframes, nframes*coderfps/options.fps,
                    coderfps,nframes/coderfps, nframes/coderfps/60,
                    options.fps,nframes/options.fps, nframes/options.fps/60,
                    int(nframes), duration))
                 
           print('ffprobe infos:')
           import shlex, subprocess
           cmdline = "ffprobe '{}' -show_streams -loglevel -8 | grep nb_frames=".format(options.video_in)
           subprocess.call(cmdline,shell=True)
           cmdline = "ffprobe '{}' -show_streams -loglevel -8 | grep duration=".format(options.video_in)
           subprocess.call(cmdline,shell=True)
           cmdline = "ffprobe '{}' -show_streams -loglevel -8 | grep frame_rate=".format(options.video_in)
           subprocess.call(cmdline,shell=True)
        

        outdir = options.outdir
        tagjson = outdir+'/tagjson'
        tagout = outdir+'/tagout'
        
        os.makedirs(outdir,exist_ok=True)
        if (not options.multiframefile):                
            os.makedirs(tagjson,exist_ok=True)
        if (options.tagout):
            os.makedirs(tagout,exist_ok=True)
        
        def printfps(t, name):
            print("Time {:10}   = {:5.3f}s   ({:4.1f} fps)".format(name, t, 1.0/t)) 
        
        fps=options.fps
        
        vidcap.set(cv2.CAP_PROP_POS_MSEC,0)    
        status,orig = vidcap.read();
        # Caution: orig in BGR format by default
        if (orig is None):
            print('Warning: could not read frame {}'.format(f))
            print('Aborting...')
            raise IOError("Could not read frame {}. Aborting.".format(f))
            #sys.exit(1)
        print("Image size: {}".format(orig.shape))
        
        if (options.multiframefile):
            filenameJSON=outdir+"/tags_{:05d}-{:05d}.json".format(options.f0,options.f1)
            singlejson=Multiframejson(filenameJSON)
            singlejson.set_config(options)
            singlejson.open()
            
            print("All writes will be to tmp JSON file {}...".format(singlejson.tmpfile))
        
        for f in range(options.f0,options.f1+1):
            filename=tagout+"/tagout_{:05d}.png".format(f)
            filenameJSON=tagjson+"/tags_{:05d}.json".format(f)
        
            print("Processing frame {}".format(f), flush=True)
        
            tstart = timer()
 
            vidcap.set(cv2.CAP_PROP_POS_MSEC,1000.0/fps*f)    
            #status,_ = vidcap.read(orig); # Source of segfault?
            status,orig = vidcap.read(); # Caution: BGR format !
            
            # Caution: orig in BGR format by default
            if (orig is None):
                print('Warning: could not read frame {}'.format(f))
                print('Aborting...')
                break
            endread = timer()
            #printfps(endread - tstart, 'read')
            
            #print('dodetect',flush=True,file=sys.stderr)
            detections = do_detect(det, orig)
            
            enddetect = timer()
            #print('dodetect DONE',flush=True,file=sys.stderr)
            #printfps(enddetect-endread, 'detect')
            
            extractextra = options.rgb_mean or options.tag_img
            if (extractextra):
                tag_family_d=options.tag_d  # d=code pixels, d+2=code+inborder, d+4=code+outborder
                tag_oversampling=options.tag_oversampling
                flags = {'tag_img': options.tag_img, 'rgb_mean': options.rgb_mean}
                tagextra = do_extract_extras(detections, orig, 
                                             flags=flags,
                                             n=tag_family_d+4, pixsize=tag_oversampling)
            else:
                tagextra = None
            
            endextra = timer()


            if (options.multiframefile):
                if (options.debug & 1): # DEBUG_LOG
                    print("  Appending to JSON file {}...".format(singlejson.tmpfile))
                singlejson.append(detections, f, tagextra)
            else:
                print("  Saving JSON to {}...".format(filenameJSON))
                savejson(detections, filenameJSON)

            #plot_detections(detections, gax[0], orig)
            
            if (options.tagout):
                print("  Saving tagimg to {}".format(filename))
                tagimg = orig.copy()
                draw_detections(tagimg, detections, draw_sampling=0, fontscale=0.75, options=options)            
                #tagimg = cv2.cvtColor(tagimg, cv2.COLOR_RGB2BGR);
                cv2.imwrite(filename,tagimg)
            
            #print_detections(detections, show_details=False)
            #print('Detected {} tags'.format(len(detections)))
            
            #plt.draw()
            #plt.show(block=False)
            
            #_ = input("Stopped at frame {}. Press Enter to continue".format(f))
            
            endsave = timer()
            #printfps(endsave-enddetect,'save')
            #printfps(endsave-tstart, 'TOTAL')
            if (False):
              print('  frame {:5}'.format(f))
            else:
              print('  DONE frame {:5}, {:3} tags, {:4.1f} fps'.format(f, len(detections), 1.0/(endsave-tstart)))
              #print('  Times: {:5.1f} read, {:5.1f} detect, {:5.1f} extra, {:5.1f} save,  {:5.1f} total'.format(
              #    endread-tstart, enddetect-endread, endextra-enddetect, endsave-endextra, 
              #    endsave-tstart))
              if (use_resource):
                maxrss = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
                print('  MEM  frame {:5}, {:10d} bytes'.format(f, maxrss))
            
            if (use_pympler):
                tr.print_diff()
                
        if (options.multiframefile):
            print("  Closing JSON {}".format(singlejson.tmpfile))
            singlejson.close()
            print("  Renamed to {}".format(singlejson.filename))
            
    else: # Input is an image (or several)
        print('Processing image(s) {}'.format(options.filenames))
        for filename in options.filenames:

            filenameJSON="{filename}-tag.json".format(filename=filename)
            filename_tagout="{filename}-out.jpg".format(filename=filename)
            
            if have_cv2:
                orig = cv2.imread(filename)
            else:
                pil_image = Image.open(filename)
                orig = numpy.array(pil_image)
                #gray = numpy.array(pil_image.convert('L'))
                
            if (orig is None):
                print('Warning: could not read frame {}'.format(f))
                print('Aborting...')
                break
                
            detections = do_detect(det, orig)
            
            print("  Detected {} tags".format(len(detections)))
            
            extractextra = options.rgb_mean or options.tag_img
            if (extractextra):
                tag_family_d=options.tag_d
                tag_oversampling=options.tag_oversampling
                flags = {'tag_img': options.tag_img, 'rgb_mean': options.rgb_mean}
                tagextra = do_extract_extras(detections, orig, 
                                             flags=flags,
                                             n=tag_family_d+4, pixsize=tag_oversampling)
#                print(tagextra)
            else:
                tagextra = None
            
            if (options.tagplot):
                ax=plt.gca()
                plot_detections(detections, ax, orig)
            
            if (options.tagout):
                print("  Saving tagimg to {}".format(filename_tagout))
                tagimg = orig.copy()
                draw_detections(tagimg, detections, draw_sampling=0)            
                #tagimg = cv2.cvtColor(tagimg, cv2.COLOR_RGB2BGR);
                cv2.imwrite(filename_tagout,tagimg)
                if (options.tag_img):
                    for k,t in enumerate(tagextra):
                        filename_tagimg = "{filename}-tagimg-{k}-{id}.png".format(filename=filename,k=k,id=detections[k].tag_id)
                        cv2.imwrite(filename_tagimg,t['tag_img'] )
            
            print("  Saving JSON to {}".format(filenameJSON))
            savejson(detections, filenameJSON)
Пример #11
0
    pass


####################################
cv.createTrackbar('smt1', 'window', 0, 10, nothing)
cv.createTrackbar('smt2', 'window', 0, 10, nothing)
cv.createTrackbar('smt3', 'window', 0, 10, nothing)
cv.createTrackbar('smt4', 'window', 0, 10, nothing)
###/################################
parser = ArgumentParser(description='test apriltag Python bindings')
parser.add_argument('device_or_movie',
                    metavar='INPUT',
                    nargs='?',
                    default=0,
                    help='Movie to load or integer ID of camera device')
apriltag.add_arguments(parser)
options = parser.parse_args()
detector = apriltag.Detector(options,
                             searchpath=apriltag._get_demo_searchpath())
###/################################
while True:
    _, frame = webcam.read()
    mrkzy = int(len(frame) / 2)
    mrkzx = int(len(frame[0]) / 2)
    xframe = len(frame[0])
    yframe = len(frame)
    smt1 = cv.getTrackbarPos('smt1', 'window')
    smt2 = cv.getTrackbarPos('smt2', 'window')
    smt3 = cv.getTrackbarPos('smt3', 'window')
    smt4 = cv.getTrackbarPos('smt4', 'window')
    gray = cv.cvtColor(frame, cv.COLOR_RGB2GRAY)
Пример #12
0
def main():
    '''Test function for this Python wrapper.'''

    from argparse import ArgumentParser

    # for some reason pylint complains about members being undefined :(
    # pylint: disable=E1101

    parser = ArgumentParser(description='test apriltag Python bindings')

    show_default = ' (default %(default)s)'

    parser.add_argument('filenames',
                        metavar='IMAGE',
                        nargs='*',
                        help='files to convert')

    parser.add_argument('-V',
                        dest='video_in',
                        default=None,
                        help='Input video')
    parser.add_argument('-f0',
                        dest='f0',
                        default=0,
                        type=int,
                        help='Frame start ' + show_default)
    parser.add_argument('-f1',
                        dest='f1',
                        default=0,
                        type=int,
                        help='Frame end ' + show_default)

    parser.add_argument('-min_side_length',
                        dest='min_side_length',
                        default=25,
                        type=int,
                        help='Tags smaller than that are discarded ' +
                        show_default)
    parser.add_argument('-min_aspect',
                        dest='min_aspect',
                        default=0.7,
                        type=float,
                        help='Tags smaller than that are discarded ' +
                        show_default)

    parser.add_argument('-nowait',
                        dest='nowait',
                        default=False,
                        action='store_true',
                        help='Process all input without waiting for user')

    apriltag.add_arguments(parser)

    options = parser.parse_args()

    if (options.f1 < options.f0):
        options.f1 = options.f0

    print(options)

    det = apriltag.Detector(options)

    # For Quad Contour Params Detection (QCP)
    #det.tag_detector.contents.qcp.min_side_length = options.min_side_length;
    #det.tag_detector.contents.qcp.min_aspect = options.min_aspect;

    # Defaults for Quad Threshold Approach
    #     qtp->max_nmaxima = 10;
    #     qtp->min_cluster_pixels = 5;
    #     qtp->max_line_fit_mse = 1.0;
    #     qtp->critical_rad = 10 * M_PI / 180;
    #     qtp->deglitch = 0;
    #     qtp->min_white_black_diff = 15;

    qtp = det.tag_detector.contents.qtp
    qtp.min_cluster_pixels = 200
    qtp.critical_rad = 50.0 / 180 * math.pi
    qtp.min_white_black_diff = 30
    qtp.deglitch = 0

    #         ('max_nmaxima', ctypes.c_int),
    #        ('max_line_fit_mse', ctypes.c_float),

    init_gui()

    if (options.video_in):  # Input is a video
        print('Processing video {}'.format(options.video_in))
        vidcap = cv2.VideoCapture(options.video_in)
        vidcap.set(cv2.CAP_PROP_POS_FRAMES, 0)
        nframes = vidcap.get(cv2.CAP_PROP_FRAME_COUNT)
        options.f1 = int(min(options.f1, nframes))

        win = cv2.namedWindow('tags', cv2.WINDOW_NORMAL)

        os.makedirs('tagout', exist_ok=True)

        for f in range(options.f0, options.f1 + 1):

            filename = "tagout/tagout_{:05d}.png".format(f)

            fps = 22
            vidcap.set(cv2.CAP_PROP_POS_MSEC, 1000.0 / fps * f)
            status, orig = vidcap.read()
            # Caution: orig in BGR format by default
            if (orig is None):
                print('Warning: could not read frame {}'.format(f))
                continue
            print("Detecting on frame {}, saving to {}".format(f, filename))

            detections = do_detect(det, orig)

            plot_detections(detections, gax[0], orig)

            tagimg = orig.copy()
            draw_detections(tagimg, detections, draw_sampling=0)
            #tagimg = cv2.cvtColor(tagimg, cv2.COLOR_RGB2BGR);
            cv2.imwrite(filename, tagimg)

            #print_detections(detections, show_details=False)

            plt.draw()
            plt.show(block=False)

            #_ = input("Stopped at frame {}. Press Enter to continue".format(f))
    else:  # Input is an image (or several)
        print('Processing image(s) {}'.format(options.filenames))
        for filename in options.filenames:

            if have_cv2:
                orig = cv2.imread(filename)
            else:
                pil_image = Image.open(filename)
                orig = numpy.array(pil_image)
                #gray = numpy.array(pil_image.convert('L'))

            detections = do_detect(det, orig)

            plot_detections(detections, gax[0], orig)

            tagimg = orig.copy()
            draw_detections(tagimg, detections, draw_sampling=0)
            #tagimg = cv2.cvtColor(tagimg, cv2.COLOR_RGB2BGR);
            cv2.imwrite(filename, tagimg)

            plt.show(block=False)
Пример #13
0
def Main():
    reference_tags = [0, 1, 2]
    x_distance = 95  # 2.1844
    y_distance = 67.5  # 1.1938
    global odoData
    global ref_x
    global ref_y
    global orig
    global rotation_matrix
    global transform_matrix
    print(odoData)
    odoData1 = {'robot1': 170, 'robot2': 650}
    window = 'Overlay1'

    cv2.namedWindow(window)

    endpt = [0, 0]

    endptFlag = True
    HOST = '192.168.1.78'  # The server's hostname or IP address
    PORT = 12346
    SERVER_SOCKET = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    SERVER_SOCKET.bind((HOST, PORT))
    print('Socket Bound to Port ', PORT)
    SERVER_SOCKET.listen(5)

    # Starts a thread that looks for connections form the swarmbots

    connection_thread = threading.Thread(target=find_connections,
            args=(SERVER_SOCKET, ))
    connection_thread.start()

    # Localization Code Here

    parser = ArgumentParser(description='test apriltag Python bindings')
    parser.add_argument('device_or_movie', metavar='INPUT', nargs='?',
                        default=0,
                        help='Movie to load or integer ID of camera device'
                        )
    orient = 0
    apriltag.add_arguments(parser)
    options = parser.parse_args()
    try:
        try:
            cap = cv2.VideoCapture(0)
            cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
            cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
        except ValueError:
            cap = cv2.VideoCapture(options.device_or_movie)

        detector = apriltag.Detector(options,
                searchpath=apriltag._get_demo_searchpath())
        font = cv2.FONT_HERSHEY_SIMPLEX
        fontScale = 0.3
        fontColor = (0, 0, 255)
        lineType = 1
        while True:
            odoData1.clear()
            (success, frame) = cap.read()
            if not success:
                break
            gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
            (detections, dimg) = detector.detect(gray,
                    return_image=True)
            headDir = np.array([0, 0])
            num_detections = len(detections)
            dimg1 = dimg

            try:

                # Gets the x and y positions of the reference tags

                ref_x = detections[reference_tags[1]].center
                ref_y = detections[reference_tags[2]].center
                orig = detections[reference_tags[0]].center
            except IndexError:
                continue

            # Creates the transform matrix for tags (units are inches)

            transform_matrix.append(np.abs(x_distance)
                                    / np.abs(ref_x[0] - orig[0]))
            transform_matrix.append(np.abs(y_distance) / np.abs(orig[1]
                                    - ref_y[1]))

            # Creates the rotaion matrix for the reference frame

            rotation_matrix = np.array([[0, 1], [-1, 0]])

            for (i, detection) in enumerate(detections):
                dimg1 = draw(frame, detection.corners)
                center = detection.center

                # Gets the center of the tag in inches and rotated accordingly

                center_meters = transform(center)

                # Labels are the position in inches

                posString = '({x:.2f},{y:.2f})'.format(x=center_meters[0],y=center_meters[1])
                if not detection.tag_id in reference_tags:

                    # Gets the forward direction

                    (forwardDir, angle) = headingDir(detection.corners,
                            center)

                    # Converts the forward direction to meters

                    forwardDir_meters = transform(forwardDir)

                    # Only draws arrows on tags that are not references

                    dimg1 = draw1(dimg1, forwardDir, center, (0, 0,
                                  255))
                centerTxt = center.ravel().astype(int).astype(str)
                cv2.putText(dimg1,posString,tuple(center.ravel().astype(int) + 10),font,fontScale,(255, 0, 0),lineType,)

                if detection.tag_id == int(endPtID):
                    dimg1 = cv2.putText(dimg1,'End Point',tuple(center.ravel().astype(int)),font,0.8,fontColor,2,)
                    dimg1 = cv2.circle(dimg1,tuple(center.ravel().astype(int)), 30, (0,128, 255), 2)
                else:
                    cv2.putText(dimg1,'Id:' + str(detection.tag_id),tuple(center.ravel().astype(int)),font,0.8,(0, 0, 0),2,)

                overlay = dimg1
                if not detection.tag_id in reference_tags:

                    # Gets the center position, heading vector and current angle of all none reference tags

                    odoData1[str(detection.tag_id)] = \
                        [tuple(center_meters)
                         + tuple(forwardDir_meters), angle]
                else:

                    # Only gets the center of reference tags

                    odoData1[str(detection.tag_id)] = \
                        [tuple(center_meters)]
            if len(detections) == 0 and odoData1 == 0:
                overlay = frame
                odoData = odoData1.copy()
            elif odoData1:

                odoData = odoData1.copy()

            # print('frame')

            cv2.imshow(window, overlay)
            cv2.waitKey(1)
    except KeyboardInterrupt:

    # Change the following line to get back the connection part.

        # sndString=str(strtPt[0])+' '+str(strtPt[1])+' '+str(headDir[0])+' '+str(headDir[1])+' '+str(endptFlag)+' '+str(endpt[0])+' '+str(endpt[1])

        # k = cv2.waitKey(1)

        return
Пример #14
0
def main():

    '''Main function.'''

    parser = ArgumentParser(
        description='test apriltag Python bindings')

    # parser.add_argument('device_or_movie', metavar='INPUT', nargs='?', default=0,
    #                     help='Movie to load or integer ID of camera device')

    parser.add_argument('-k', '--camera-params', type=_camera_params,
                        default=None,
                        help='intrinsic parameters for camera (in the form fx,fy,cx,cy)')

    parser.add_argument('-s', '--tag-size', type=float,
                        default=1.0,
                        help='tag size in user-specified units (default=1.0)')

    add_arguments(parser)

    options = parser.parse_args()

    apriltag.add_arguments(parser)

    options = parser.parse_args()

    # try:
    #     cap = cv2.VideoCapture(int(options.device_or_movie))
    # except ValueError:
    #     cap = cv2.VideoCapture(options.device_or_movie)

    cap = cv2.VideoCapture(1)

    window = 'Camera'
    cv2.namedWindow(window)

    # set up a reasonable search path for the apriltag DLL inside the
    # github repo this file lives in;
    #
    # for "real" deployments, either install the DLL in the appropriate
    # system-wide library directory, or specify your own search paths
    # as needed.
    
    detector = apriltag.Detector(options,
                                 searchpath=apriltag._get_demo_searchpath())

    while True:

        success, frame = cap.read()
        if not success:
            break

        gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
        detections, dimg = detector.detect(gray, return_image=True)

        num_detections = len(detections)
        print('Detected {} tags.\n'.format(num_detections))

        for i, detection in enumerate(detections):
            print( 'Detection {} of {}:'.format(i+1, num_detections))
            print()
            print(detection.tostring(indent=2))

            #if options.camera_params is not None:
                
            pose, e0, e1 = detector.detection_pose(detection,
                                              options.camera_params,
                                              options.tag_size)

            if _HAVE_CV2:
                _draw_pose(overlay,
                           options.camera_params,
                           options.tag_size,
                           pose)
            
            print(detection.tostring(
                collections.OrderedDict([('Pose',pose),
                                         ('InitError', e0),
                                         ('FinalError', e1)]),
                indent=2))
                
            print()

        overlay = frame // 2 + dimg[:, :, None] // 2

        cv2.imshow(window, overlay)
        k = cv2.waitKey(1)

        if k == 27:
            break
Пример #15
0
def apriltag_video(
    input_streams=[
        '../media/input/single_tag.mp4', '../media/input/multiple_tags.mp4'
    ],  # For default cam use -> [0]
    output_stream=False,
    display_stream=True,
    detection_window_name='AprilTag',
):
    '''
    Detect AprilTags from video stream.

    Args:   input_streams [list(int/str)]: Camera index or movie name to run detection algorithm on
            output_stream [bool]: Boolean flag to save/not stream annotated with detections
            display_stream [bool]: Boolean flag to display/not stream annotated with detections
            detection_window_name [str]: Title of displayed (output) tag detection window
    '''

    parser = ArgumentParser(description='Detect AprilTags from video stream.')
    apriltag.add_arguments(parser)
    options = parser.parse_args()
    '''
    Set up a reasonable search path for the apriltag DLL.
    Either install the DLL in the appropriate system-wide
    location, or specify your own search paths as needed.
    '''

    detector = apriltag.Detector(options, searchpath=apriltag._get_dll_path())

    for stream in input_streams:

        video = cv2.VideoCapture(stream)

        output = None

        if output_stream:
            width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
            height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
            fps = int(video.get(cv2.CAP_PROP_FPS))
            codec = cv2.VideoWriter_fourcc(*'XVID')
            if type(stream) != int:
                output_path = '../media/output/' + str(
                    os.path.split(stream)[1])
                output_path = output_path.replace(
                    str(os.path.splitext(stream)[1]), '.avi')
            else:
                output_path = '../media/output/' + 'camera_' + str(
                    stream) + '.avi'
            output = cv2.VideoWriter(output_path, codec, fps, (width, height))

        while (video.isOpened()):

            success, frame = video.read()
            if not success:
                break

            result, overlay = apriltag.detect_tags(
                frame,
                detector,
                camera_params=(3156.71852, 3129.52243, 359.097908, 239.736909),
                tag_size=0.0762,
                vizualization=3,
                verbose=3,
                annotation=True)
            if output_stream:
                output.write(overlay)

            if display_stream:
                cv2.imshow(detection_window_name, overlay)
                if cv2.waitKey(1) & 0xFF == ord(
                        ' '):  # Press space bar to terminate
                    break
Пример #16
0
def main():
    transmitCntr=0
    hostname = socket.gethostname()
    ip_address = socket.gethostbyname(hostname)
    HOST = '192.168.1.78'  # The server's hostname or IP address # This is the server
    PORT = 65432       # The port used by the server
    print(ip_address)
    serverSocket=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
    serverSocket.bind((HOST,PORT))
    serverSocket.listen()
    
    robotClient,addr=serverSocket.accept()
    msg=robotClient.recv(1024).decode('ascii')

    if(msg=='RobotConnected'):
        print('Starting Localization for 1 robot')

        parser = ArgumentParser(
            description='test apriltag Python bindings')

        parser.add_argument('device_or_movie', metavar='INPUT', nargs='?', default=0,
                            help='Movie to load or integer ID of camera device')
        orient = 0
        apriltag.add_arguments(parser)

        options = parser.parse_args()

        try:
            cap = cv2.VideoCapture(0)
            cap.set(cv2.CAP_PROP_FRAME_WIDTH, 2560)
            cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1440)
        except ValueError:
            cap = cv2.VideoCapture(options.device_or_movie)

        window = 'Camera'
        window2 = 'Overlay1'
        cv2.namedWindow(window)
        cv2.namedWindow(window2)

        detector = apriltag.Detector(options,
                                    searchpath=apriltag._get_demo_searchpath())

        font                   = cv2.FONT_HERSHEY_SIMPLEX
        fontScale              = 0.3
        fontColor              = (0,0,255)
        lineType               = 1
        print(type(fontColor))
        while True:
            
            
            endpt=[0,0]
            strtPt=[0,0]
            strtFlag=False
            endptFlag=False
            success, frame = cap.read()
            if not success:
                break

            gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
            detections, dimg = detector.detect(gray, return_image=True)
            headDir=np.array([0,0])
            num_detections = len(detections)
            #print('Detected {} tags.\n'.format(num_detections))
            #dimg=np.zeros
        
            dimg1=dimg

            for i, detection in enumerate(detections):
                dimg1 = draw(frame,detection.corners)
                center=detection.center
                centerTxt=((center.ravel()).astype(int)).astype(str)
                if detection.tag_id==6:
                    strtPt=center
                    strtFlag=True
                    headDir=headingDir(detection.corners,center)
                if detection.tag_id==9:
                    dimg1=cv2.putText(dimg1,'End Point', tuple((center.ravel()).astype(int)),font,0.8,fontColor,2)
                    dimg1 = cv2.circle(dimg1, tuple((center.ravel()).astype(int)),30, (0,128,255), 2) 
                    endpt=center
                    endptFlag=True
                else:
                    cv2.putText(dimg1,'Id:'+str(detection.tag_id), tuple((center.ravel()).astype(int)),font,0.8,(0,0,0),2)
                    botDir=headingDir(detection.corners,center)
                    dimg1=draw1(dimg1,botDir,center,(0,0,255))
                cv2.putText(dimg1,'('+centerTxt[0]+','+centerTxt[1]+')', tuple((center.ravel()).astype(int)+10),font,fontScale,(255,0,0),lineType)
            if num_detections >0:
                if endptFlag and strtFlag:
                    orient=getTheta(strtPt,endpt,strtPt,headDir)
                    dimg1=cv2.putText(dimg1,'T:'+str(int(orient)), tuple((strtPt.ravel()).astype(int)+50),font,0.8,(0,0,0),2)
                    dimg1=draw1ine(dimg1,strtPt,endpt,(255,0,255))
                overlay=dimg1
            else: 
                overlay = frame

        # Change the following line to get back the connection part.
             
            transmitCntr=0
            sndString=str(strtPt[0])+' '+str(strtPt[1])+' '+str(headDir[0])+' '+str(headDir[1])+' '+str(endptFlag)+' '+str(endpt[0])+' '+str(endpt[1])
            
            if robotClient.recv(2048).decode('ascii')=='ok':
                robotClient.send(CalTheta(sndString).encode('ascii'))

        


            cv2.imshow(window, overlay)
Пример #17
0
def tracker(qlist, ids, goals, loc, obs, ob_ids, debug=False):

    parser = ArgumentParser(description='test apriltag Python')

    parser.add_argument('device_or_movie',
                        metavar='INPUT',
                        nargs='?',
                        default=1)

    apriltag.add_arguments(parser)

    options = parser.parse_args()

    cap = cv2.VideoCapture(2)

    # window = 'Camera'
    win2 = "bigger_image"
    cv2.namedWindow(win2)
    # cv2.namedWindow(window)

    dic = {}
    dic2 = {}
    detector = apriltag.Detector(options,
                                 searchpath=apriltag._get_demo_searchpath())
    got_detect = {}
    got_detect2 = {}
    while True:
        # print("visual man loc", loc)
        success, frame = cap.read()
        if not success:
            break

        gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
        r, c = gray.shape

        detections, dimg = detector.detect(gray, return_image=True)

        num_detections = len(detections)
        if debug:
            print(c, r)
            print('Detected {} tags.\n'.format(num_detections))

        for i in ids:
            got_detect[i] = False

        for i, detection in enumerate(detections):
            # print('Detection {} of {}:'.format(i+1, num_detections))
            # print()
            # print(detection.tostring(indent=2))
            # print(pose)

            p1 = detection.corners[0]
            p2 = detection.corners[1]
            location = detection.center
            tid = detection.tag_id
            # print("detection_id is", tid)
            got_detect[tid] = True
            orient = orientation(p1, p2, r, c)

            p = detection.center

            x, y = p[0], p[1]
            x, y = change_grid((x, y), r, c)
            x, y = x * scale_x, y * scale_y

            if debug:
                print(p1, p2)
                print(detection.corners)
                print("center is", p)
                print("x and y", x, y, "orientation:",
                      degrees(orientation(p1, p2, r, c)), "tid is", tid)

            if tid in ids:
                # print("found id",tid)
                dic[tid] = (x, y, orient, 1)
                # loc.value[tid] = (x, y, orient,1)
                # print("x and y and orient", x,y,orient)
                got_detect[tid] = True
                # print("loc.value", loc.value, tid)
                if debug:
                    print("stored in", tid)

            if tid in ob_ids:
                # print("**************************")
                # print("Found obstacle {}".format(tid))
                # print("**************************")
                p = detection.center
                x, y = p[0], p[1]
                x, y = change_grid((x, y), r, c)
                x, y = x * scale_x, y * scale_y
                dic2[tid] = (x, y)
                got_detect2[tid] = True

            # print(theta_x, theta_y,theta_z)

        for i in ids:
            if not got_detect[i]:
                dic[i] = (goals[i][0], goals[i][1], 0, 0)
                # loc.value[i] = ((goals[i][0],goals[i][1],0))
        default = {
            12: (200, -210),
            19: (
                -320,
                10,
            )
        }
        for i in ob_ids:
            if not got_detect2[i]:
                dic2[i] = default[i]
                # loc.value[i] = ((goals[i][0],goals[i][1],0))

        loc.value = dic
        obs.value = dic2
        # print("loc.value", loc.value)

        overlay = frame // 2 + dimg[:, :, None] // 2

        # cv2.imshow(window, overlay)
        scale_percent_r = 160
        scale_percent_c = 150
        width = int(c * scale_percent_c / 100)
        height = int(r * scale_percent_r / 100)
        dim = (width, height)
        resized = cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)
        cv2.imshow(win2, resized)
        k = cv2.waitKey(1)

        if k == 27:
            break