Esempio n. 1
0
def smallSentry(mag_q):
    #search for inverted cones
    #track them and return their angle compared to center of robot (-180 to 180)
    #possibly implement reverse queue to turn cannon when hitting navigaton error.
    camera = recognition.setup()
    cannon.init()
    lastSeenTime = time.time()
    try:
        # Function that will look for a cone and fire if found. Used after a magnet is detected.
        foundMagnet = None
        while foundMagnet != 'Stop':
            try:
                foundMagnet = mag_q.get(timeout=0.001)
                print foundMagnet
            except:
                pass

            print "In small sentry."
            targetObjects = recognition.detect(camera, SPHEREFILE)
            index = recognition.biggestObject(targetObjects)
            if index != -1:
                lastSeenTime = time.time()
                # Move cannon
                if recognition.isCentered(targetObjects[index]):
                    print "Firing"
                    cannon.moveV(0.1)
                    cannon.moveH(-0.03)
                    navigate.blink()
                    cannon.firex_small(1)
                    cannon.moveH(0.03)
                    cannon.moveV(-0.1)
                else:
                    # Move cannon
                    biggestObject = targetObjects[index]
                    xTarg = biggestObject[0] + biggestObject[2] / 2
                    yTarg = biggestObject[1] + biggestObject[3] / 2
                    print "Found an object, moving to coordinates, x: " + ` xTarg ` + " y: " + ` yTarg `
                    cannon.moveTo(xTarg * 1.10, yTarg)
            elif time.time() - lastSeenTime > TIMEOUT:
                cannon.sweepH(0.08)
                time.sleep(0.2)
                print "Sweeping"
    finally:
        camera.close()
        print "Ran the smallSentry close"
Esempio n. 2
0
def scan(nav_q, scan_q):
    #search for inverted cones
    #track them and return their angle compared to center of robot (-180 to 180)
    #possibly implement reverse queue to turn cannon when hitting navigaton error.
    camera = recognition.setup()
    cannon.init()
    lastSeenTime = time.time()
    try:
        # Function that will look for a cone and fire if found. Used after a magnet is detected.
        while True:
            print "10"
            try:
                targetAngle = scan_q.get(timeout=0.001)
                print "Cannon Target angle:", targetAngle
                cannon.moveAngle(-targetAngle)
            except:
                pass

            print "detect, check, fire or track"
            targetObjects = recognition.detect(camera, INVERTEDCONEFILE)
            index = recognition.biggestObject(targetObjects)
            if index != -1:
                lastSeenTime = time.time()
                # Move cannon
                biggestObject = targetObjects[index]
                xTarg = biggestObject[0] + biggestObject[2] / 2
                yTarg = biggestObject[1] + biggestObject[3] / 2
                print "Found an object, moving to coordinates, x: " + ` xTarg ` + " y: " + ` yTarg `
                cannon.moveTo(
                    xTarg * 1.05, yTarg
                )  # Might consider multiplying xTarg by a constant, so it overshoots, since robot is moving.
                nav_q.put(cannon.getAngle())
                print "putting value: " + ` cannon.getAngle() `
            elif time.time() - lastSeenTime > TIMEOUT:
                cannon.sweepH(0.08)
                time.sleep(0.2)
                print "Sweeping"
    finally:
        nav_q.put("STOP")
        camera.close()
        GPIO.cleanup()
        print "Ran the scan close"
Esempio n. 3
0
def sentryMode():
    camera = recognition.setup()
    try:
        # Function that will look for a cone and fire if found. Used after a magnet is detected.
        global DART_COUNT
        if DART_COUNT > 0:
            cannon.init()
            lastSeenTime = time.time()
        while DART_COUNT > 0:
            print "detect, check, fire or track"
            targetObjects = recognition.detect(camera, SPHEREFILE)
            index = recognition.biggestObject(targetObjects)
            if index != -1:
                lastSeenTime = time.time()
                # Check if biggest object is in the middle of the screen and fire if it is
                if recognition.isCentered(targetObjects[index]):
                    print "Firing"
                    cannon.moveV(0.1)
                    navigate.blink()
                    cannon.firex(1)
                    cannon.moveV(-0.1)
                    DART_COUNT = DART_COUNT - 1
                else:
                        # Move cannon
                        biggestObject =targetObjects[index]
                        xTarg = biggestObject[0] + biggestObject[2]/2
                        yTarg = biggestObject[1] + biggestObject[3]/2
                        print "Found an object, moving to coordinates, x: " + `xTarg` + " y: " + `yTarg`
                        cannon.moveTo(xTarg, yTarg)
            elif time.time() - lastSeenTime > TIMEOUT:
                cannon.sweepH(0.08)
                time.sleep(0.2)
        print "Out of darts"
    finally:
        #GPIO.cleanup()
        camera.close()
        print "Ran the sentry close"
    # just like the predefined data structure
    results = {}

    # just use a picture to test the program
    test_dir = osp.join(this_dir, 'data', 'demo', 'test')
    result_dir = osp.join(this_dir, 'data', 'demo', 'result')
    test_imgs = os.listdir(test_dir)
    for img_path in test_imgs:
        for cls_ind, cls in enumerate(CLASSES[1:]):
            results[cls] = 0
        image = cv2.imread(osp.join(test_dir, img_path))
        #recognition : can loop
        timer = Timer()
        timer.tic()
        detections = recognition.detect(net, image, CLASSES, 0.4)
        timer.toc()
        print('Detection took {:.3f}s').format(timer.total_time)

        # wrz debug iou test
        new_detections = delete_box_iou(detections, 0.4)  # iou thresh
        #print 'old_detections==============',detections
        print 'new_detections==============', new_detections

        #output by the predefined data structure
        for detection in new_detections:
            # get the count of breads
            results[detection[0]] = results[detection[0]] + 1
            cv2.rectangle(image, (int(detection[1]), int(detection[2])),
                          (int(detection[3]), int(detection[4])),
                          (255, 255, 255), 2)