Exemple #1
0
def take():
    id_ = str(int(time.time()))
    path = 'res/img/' + id_ + '.jpg'
    camera.take_photo(path)
    detect = vision.detect(path)
    result = {'id': id_, 'result': detect}
    return jsonify(result)
Exemple #2
0
def pilonhunt():
    print 'Looking for pilons...'
    motor.ctrl(0)
    car_dir.home()
    pilons = []
    #video_dir.maxright()
    #time.sleep(2.0)
    video_dir.tilt_rel(0)
    r = 0.2
    while r < 1:
        #		r = video_dir.pan(100)
        video_dir.tilt_rel(0)
        video_dir.pan_rel(r)
        r = r + 0.2
        time.sleep(1.0)
        pilons.append(len(vision.detect()))
        video_dir.tilt_rel(0.05)
        time.sleep(0.5)
        pilons.append(len(vision.detect()))
    video_dir.home_x()
    print pilons
Exemple #3
0
def distance_from_goal(r_goal, c_goal, t_full):
    t, rowsl, colsl, rowsr, colsr = t_full;
    is_left = False;
    if ((r_goal % 2)== 0):
        is_left = True;
    savestr1 = '~/jenga/current_pics/left_cam_feedback.JPG';
    savestr2 = '~/jenga/current_pics/right_cam_feedback.JPG';
    command_string1 = "fswebcam -S 40 -d /dev/video4 " + savestr1;
    command_string2 = "fswebcam -D 1 -S 40 -d /dev/video3 " + savestr2;
    os.system(command_string1);
    os.system(command_string2);
    cascade_side = cv2.CascadeClassifier('./detectors/sides.xml');
    max_size_side = (33, 69);
    cascade_end = cv2.CascadeClassifier('./detectors/ends.xml');
    max_size_end = (75, 45);
    if (is_left):
        pic_end = cv2.imread(savestr1,cv2.CV_LOAD_IMAGE_GRAYSCALE);
        pic_side = cv2.imread(savestr2, cv2.CV_LOAD_IMAGE_GRAYSCALE);
        side_goal = vision.right_tower[2];
        r_block = rowsl[r_goal/2];
        c_block = colsl[c_goal];
    else:
        pic_end = cv2.imread(savestr2,cv2.CV_LOAD_IMAGE_GRAYSCALE);
        pic_side = cv2.imread(savestr1, cv2.CV_LOAD_IMAGE_GRAYSCALE);
        side_goal = vision.left_tower[0];
        r_block = rowsr[r_goal/2];
        c_block = colsr[c_goal];
    pic_end = cv2.GaussianBlur(pic_end, (15,15), 1);
    pic_end = cv2.Canny(pic_end, 70, 175);
    pic_side = cv2.GaussianBlur(pic_side, (15,15), 1);
    pic_side = cv2.Canny(pic_side, 70, 175);
    bbox_sides = vision.detect(pic_side, cascade_side, max_size_side);
    bbox_ends = vision.detect(pic_end, cascade_end, max_size_end);
    side_error = get_side_error(bbox_sides,r_block, c_block);
    end_error = get_end_error(bbox_ends, r_block, c_block);
    if (is_left):
        return (side_error, end_error[0], end_error[1]);
    else:
        return (end_error[0], side_error, end_error[1]);
 def imageCallback(self, data):
     try:
       image = self.bridge.imgmsg_to_cv2(data, "bgr8")
     except CvBridgeError as e:
       print(e)
     cv2.imshow("IM", image)
     cv2.waitKey(1)
     x, y, conf, r = vision.detect(image)
     if conf > self.confidence_thres:
         ori, angle = vision.calculateOrientation(x, image.shape[1], ZED_FOV_H)
         dist = vision.calcDist(2*r, image.shape[1], ZED_FOV_H)
         self.notfound = 0
         # Angle heading
         self.foundCallback(ori, angle, dist)
     else:
         self.notfound += 1
         self.notfoundCallback(self.notfound)
Exemple #5
0
for c_str in ["y", "b", "g"]:

    if c_str == "y" : logging.debug("START START CAPTURING YELLOW MANGOS")
    elif c_str == "b" : logging.debug("START START CAPTURING BROWN MANGOS")
    elif c_str == "g" : logging.debug("START START CAPTURING GREEN MANGOS")
    else: break

    i = 0
    while i < n:

        left_raw, right_raw = read_video(cap)
        left_dst = cv2.undistort(left_raw, mtx, dist, None, newcameramtx)
        right_dst = cv2.undistort(right_raw, mtx, dist, None, newcameramtx)
        cv2.imshow('left', left_dst)
        cv2.imshow('right', right_dst)
        left_mango = vision.detect(left_dst, net)
        right_mango = vision.detect(right_dst, net)
        if left_mango != None and right_mango != None:
            left_i = 2*i
            right_i = 2*i + 1
            left_path = "mango_color/" + c_str + str(left_i) + ".png"
            cv2.imwrite(left_path, left_dst)
            logging.debug("WRITE AT " + left_path)
            right_path ="mango_color/" + c_str + str(right_i) + ".png" 
            cv2.imwrite(right_path, right_dst)
            logging.debug("WRITE AT " + right_path)
            i += 1
        cv2.waitKey(100)

    if c_str != "g": input("PRESS ENTER TO CONTINUE")
import glob
import vision
import cv2

for imagePath in glob.glob("test_images/*.JPEG"):
    image = cv2.imread(imagePath)
    x, y, conf, r = vision.detect(image)
    vision.draw(image, x, y, r)

Exemple #7
0
def react(id_):
    path = 'res/img/' + id_ + '_react.jpg'
    camera.take_photo(path)
    detect = vision.detect(path)
    result = {'id': id_, 'result': detect}
    return jsonify(result)
img_counter = 0
while True:

    imgResp = urllib.request.urlopen(url)

    imgNp = np.array(bytearray(imgResp.read()), dtype=np.uint8)
    img = cv2.imdecode(imgNp, -1)

    # put the image on screen

    img = cv2.resize(img, (540, 540))
    cv2.imshow('IPWebcam', img)

    #To give the processor some less stress
    #time.sleep(0.1)

    k = cv2.waitKey(1)
    if k % 256 == 27:
        # ESC pressed
        print("Escape hit, closing...")
        break
    elif k % 256 == 32:
        # SPACE pressed
        img_name = "test{}.png".format(img_counter)
        cv2.imwrite(img_name, img)
        print("{} written!".format(img_name))
        detect(img_counter)
        img_counter += 1

cv2.destroyAllWindows()
Exemple #9
0
print('READY TO HARVEST')
input('PRESS ENTER TO START')
print("START HARVESTING")

x = x_min
dir_x = 1
loop_end = False

x = x_min
y = y_min
## drive x as primary axis
for x in range(x_min, x_max, x_short):

    print('position', x, y)
    lframe, rframe = get_frames(vcap)
    raw_mangos = vision.detect(rframe, net)
    mango = most_left(raw_mangos, x, y)
    x_mango, y_mango = approx_position(mango, x, y)
    if abs(x_mango - x) <= x_error:

        color = get_color(rframe)

        #check mango's color
        if color == c.yellow() or color == c.brown():

            ## save point
            saved_x = x

            ## drive y as secondary axis
            x, y = go_to(x, y, x, y_mango)
            arm.forward()