Ejemplo n.º 1
0
def displayRedOrBlue(dev, data, timestamp):
	global keep_running
	global red
	global iter
	if(red):
		lower = np.array(boundaries[0], dtype = "uint8")
		upper = np.array(boundaries[1], dtype = "uint8")
		iter+=1
	elif(not red):
		lower = np.array(boundaries[2], dtype = "uint8")
		upper = np.array(boundaries[3], dtype = "uint8")
		iter+=1
	if(iter%10==0):
		red = not red

	mask = cv2.inRange(frame_convert2.video_cv(data), lower, upper)
	output = cv2.bitwise_and(frame_convert2.video_cv(data), frame_convert2.video_cv(data), mask = mask)
	gray = cv2.cvtColor(output, cv2.COLOR_BGR2GRAY)
	gray = cv2.GaussianBlur(gray, (7, 7), 0)
 
# perform edge detection, then perform a dilation + erosion to
# close gaps in between object edges
	edged = cv2.Canny(gray, 50, 100)
	edged = cv2.dilate(edged, None, iterations=1)
	edged = cv2.erode(edged, None, iterations=1)
 
# find contours in the edge map
	cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
	cv2.CHAIN_APPROX_SIMPLE)
	cnts = imutils.grab_contours(cnts)
	cv2.imshow('Red and Blue', output)
	if cv2.waitKey(10) == 27:
		keep_running = False
Ejemplo n.º 2
0
def display_rgb(dev, data, timestamp):
    global keep_running
    print frame_convert2.video_cv(data)
    c = cv2.waitKey(10)
    if 'q' == chr(c & 255):
        keep_running = False
  
    if 's' == chr(c & 255):
	cv2.imwrite("saved1.jpg", frame_convert2.video_cv(data))
Ejemplo n.º 3
0
def display_blue(dev, data, timestamp):
	global keep_running
	lower = np.array(boundaries[2], dtype = "uint8")
	upper = np.array(boundaries[3], dtype = "uint8")
 
	# find the colors within the specified boundaries and apply
	# the mask
	mask = cv2.inRange(frame_convert2.video_cv(data), lower, upper)
	output = cv2.bitwise_and(frame_convert2.video_cv(data), frame_convert2.video_cv(data), mask = mask)
	cv2.imshow('Blue', output)
	if cv2.waitKey(10) == 27:
		keep_running = False
Ejemplo n.º 4
0
def draw_canny(data):
    gray = frame_convert2.video_cv(data)
    gray = cv2.cvtColor(data, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (5, 5), 0)
    edged = cv2.Canny(gray, 35, 125)

    return edged
Ejemplo n.º 5
0
def collect_rgb(dev, data, timestamp):
    global keep_running
    global rgb_index
    np.save('{0}/{1}/{2}'.format(ROOT_PATH, RGB_PATH, rgb_index), data)
    rgb_index += 1
    cv2.imshow('RGB', frame_convert2.video_cv(data))
    if cv2.waitKey(10) == 27:
        keep_running = False
Ejemplo n.º 6
0
def get_video():
    global cam
    data = frame_convert2.video_cv(freenect.sync_get_video()[0])
    #data,_ = freenect.sync_get_video()
    #data = cv2.cvtColor(data, cv2.COLOR_RGB2BGR)
    img = cv2.imencode('.JPEG', data)[1].tostring()
    enData = base64.b64encode(img).decode('UTF-8')
    return enData
Ejemplo n.º 7
0
def display_rgb(dev, data, timestamp):
    global keep_running
    global faceCascade
    global light
    global tilt
    global search
    global faces
    global ser
    global last_time
    camera_x_center = 320

    image = frame_convert2.video_cv(data)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    # Detect faces in the image
    faces = faceCascade.detectMultiScale(
       gray,
       scaleFactor=1.1,
       minNeighbors=5,
       minSize=(30, 30),
       flags = cv2.CASCADE_SCALE_IMAGE
    )

    print "found {0} face(s)".format(len(faces))
    if len(faces) > 0:
	search = False
	light = 1
    	face_x_center = faces[0][0] + faces[0][2]/2
	face_y_center = faces[0][1] + faces[0][3]/2
    	diff = face_x_center - camera_x_center
#	print diff
    	if diff > 32:
		print "face on the right side, turn right!"
    	elif diff < -32:
		print "face on the left side, turn left!"
    	else :
		print "face straight ahead"
    else:
	print "no face"
	diff = 640
        search = True
	light = 3
    

    print diff
    ser.write(str(diff-641)) 

    c = cv2.waitKey(10)
    if 'q' == chr(c & 255):
        keep_running = False
    if 's' == chr(c & 255):
	cv2.imwrite('detected.jpg', image)
	image = cv2.imread('detected.jpg')
	for (x, y, w, h) in faces:
    		cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)
	cv2.imwrite('detected.jpg', image)
Ejemplo n.º 8
0
def display_rgb(dev, data, timestamp):
#    pass
    global keep_running
    im = frame_convert2.video_cv(data)
#    im = im_or.copy()
#    pdb.set_trace()
    if len(im) is not 0:
        cls,det = CNN.detect(net,im)
#    print(det[0,0:4])
        print det.shape
Ejemplo n.º 9
0
    def display_camera_half(self, dev, data, timestamp):

        faces = self.facial_recog(data)
        for (x, y, w, h) in faces:
            cv2.rectangle(data, (x, y), (x + w, y + h), (0, 255, 0), 2)

        data = cv2.resize(data, (1280, 720))
        cv2.imshow('RGB', frame_convert2.video_cv(data))
        if cv2.waitKey(1) == 27:
            self.keep_running = [False]
Ejemplo n.º 10
0
def display_rgb(dev, data, timestamp):
    global keep_running
    global tilt
    global light
    cv2.imshow('RGB', frame_convert2.video_cv(data))
    c = cv2.waitKey(10)
    if 'q' == chr(c & 255):
        keep_running = False
  
    if 's' == chr(c & 255):
	cv2.imwrite("saved.jpg", frame_convert2.video_cv(data))
    if 'u' == chr(c & 255):
	tilt = 25
    if 'd' == chr(c & 255):
        tilt = 5
    if '1' == chr(c & 255):
        light = 1
    if '2' == chr(c & 255):
        light = 2
    if '3' == chr(c & 255):
        light = 3
Ejemplo n.º 11
0
    def display_camera_full(self, dev, data, timestamp):
        data = cv2.resize(data, (1900, 1050))

        faces = self.facial_recog(data)
        for (x, y, w, h) in faces:
            cv2.rectangle(data, (x, y), (x + w, y + h), (0, 255, 0), 2)

        rgb = data[:, :, ::-1]
        self.recognizer.recognize(rgb)

        cv2.imshow('RGB', frame_convert2.video_cv(data))
        if cv2.waitKey(1) == 27:
            self.keep_running = [False]
Ejemplo n.º 12
0
def display_rgb(delay):
    i = 0
    while 1:
        try:
            data = np.load('{0}/{1}/{2}.npy'.format(ROOT_PATH, RGB_PATH, i))
            cv2.imshow('RGB', frame_convert2.video_cv(data))
            if cv2.waitKey(delay) == 27:
                break
        except Exception:
            break
        if DEBUG:
            print i
        i += 1
Ejemplo n.º 13
0
    def display_camera_fourth(self, dev, data, timestamp):
        faces = self.facial_recog(data)
        for (x, y, w, h) in faces:
            cv2.rectangle(data, (x, y), (x + w, y + h), (0, 255, 0), 2)
        print('faces')
        #print(faces)
        rgb = data[:, :, ::-1]
        locations = [[y, x + w, y + h, x] for (x, y, w, h) in faces]
        print(locations)
        if len(locations) > 0:
            print(self.recognizer.recognize_face_loc(rgb, locations))

        cv2.imshow('RGB', frame_convert2.video_cv(data))
        if cv2.waitKey(1) == 27:
            self.keep_running = [False]
Ejemplo n.º 14
0
def display_rgb(dev, data, timestamp):
    t_start = datetime.datetime.now()
    global keep_running


    #result = draw_canny(data)
    data = frame_convert2.video_cv(data)
    result = data.copy()

    t_end = datetime.datetime.now()
    t_delta = t_end - t_start

    cv2.putText(result, str(1000000/t_delta.microseconds), (0, 25), cv2.FONT_HERSHEY_PLAIN, 2, (127, 0, 0), 2)
    cv2.imshow('RGB', result)
    
    if cv2.waitKey(10) == 27:
        keep_running = False
Ejemplo n.º 15
0
def kinect_run():
    # ROS node setup
    pub = rospy.Publisher('Image_Screws', kinect_msg, queue_size=1)
    rospy.init_node('Kinect_Main', anonymous=True)
    rate = rospy.Rate(10)
    msg = kinect_msg()
    status = 3  # Current status = setting up
    try:
        im_screw_detect = ImScrewDetector()
    except Exception:
        print("**Screw Detect Error**")
        status = 0
        traceback.print_exc(file=sys.stdout)

    while not rospy.is_shutdown():
        try:
            image = np.array(
                frame_convert2.video_cv(freenect.sync_get_video()[0]))
            image = scale(image)

            status = 1  # Status ready
        except TypeError as e:
            time.sleep(1)
        except Exception as e:
            print("**Get Image Error**")
            status = 0  # error status
            traceback.print_exc(file=sys.stdout)
            break

        im_screw_states, tally = im_screw_detect.detect_screws(
            image, args.disp)
        im_screw_states = im_screw_states.tolist()

        # For each of 4 points, probability is each of 5 classes
        # ['screw_full', 'screw_empty', 'bolt_full', 'bolt_empty', 'null']
        msg.im_screw_probs_1 = im_screw_states[0]
        msg.im_screw_probs_2 = im_screw_states[1]
        msg.im_screw_probs_3 = im_screw_states[2]
        msg.im_screw_probs_4 = im_screw_states[3]
        msg.tally = tally  # Bolts and Screws Tally
        msg.im_stat = status  # Vision status no.

        msg.safe_move = False  # future use variable for robot
        pub.publish(msg)
        rate.sleep()
Ejemplo n.º 16
0
def doloop(c, conn, insert_snapshot):
    global depth, rgb, counter
    counter = 0
    while True:
        # Get a fresh frame
        (depth, _) = get_depth()
        fn = './images/{}.png'.format(uuid4())
        cv2.imwrite(fn, frame_convert2.video_cv(freenect.sync_get_video()[0]))
        # Build a two panel color image
        # d3 = np.dstack((depth,depth,depth)).astype(np.uint8)
        # da = np.hstack((d3,rgb))
        # print(d3, rgb)
        # time.sleep(3)
        counter += 1
        insert_snapshot(c, fn, depth)
        (ok, ) = c.execute('select count(1) from snapshot;')
        print(ok)
        conn.commit()
        time.sleep(1)
Ejemplo n.º 17
0
def rgb(dev, data, timestamp):
    global keep_running

    frame = frame_convert2.video_cv(data)
    _target = frame.copy()

    # find the horizontal centre pixel
    _center1 = np.uint16(np.round(frame.shape[1] / 2))
    # find the vertical centre pixel
    _center2 = np.uint16(np.round(frame.shape[0] / 2))

    cv2.circle(_target, (_center1, _center2), 2, (0, 0, 255), 3)
    # apply a red dot to the centre of the frame so the user can target the object to find HSV components from.

    cv2.imshow('RGB', _target)
    # show targeting frame in a window

    # Convert to HSV colourspace
    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

    # find the Hue, Saturation, Value components of the frame
    CH_H, CH_S, CH_V = cv2.split(frame)

    if cv2.waitKey(10) == 27:
        # find Hue value of middle pixel
        pxl_H = CH_H[int(frame.shape[0] / 2), int(frame.shape[1] / 2)]
        # find Saturation value of middle pixel
        pxl_S = CH_S[int(frame.shape[0] / 2), int(frame.shape[1] / 2)]
        # find intensity value of middle pixel
        pxl_V = CH_V[int(frame.shape[0] / 2), int(frame.shape[1] / 2)]

        cv2.imwrite("measure_hsv.jpg", _target)

        print("Hue", pxl_H)
        print("Saturation", pxl_S)
        print("Intensity", pxl_V)

        keep_running = False
Ejemplo n.º 18
0
def get_video():
    """ Get's the current video data from Kinect """
    return frame_convert2.video_cv(freenect.sync_get_video()[0])
Ejemplo n.º 19
0
 def get_rgb():
     return imutils.rotate(
         frame_convert2.video_cv(freenect.sync_get_video()[0]), -90)
Ejemplo n.º 20
0
else:
    rate = rospy.Rate(1)

if debug:
    Counter = 0
while not rospy.is_shutdown():
    rate.sleep()
    if debug:
        if Counter % 100 == 0:
            print(Counter)
        #if Counter > 30:
        #    break
        Counter += 1
    depth, timestamp = freenect.sync_get_depth()
    #depth,safe = isBlocked(depth)
    video = frame_convert2.video_cv(freenect.sync_get_video()[0])

    pub.publish(True)

    maskArray = k.findObjects(video)
    for mask in maskArray:
        keypointsArray, bluredIm = k.findBlobs(mask)
        #this is added up here becuase as part ofthis bluredIm goes from a 1 dimensional
        # ndarray to 3 channels which we need later
        # TODO Figure out how to do this without that.
        bluredIm = cv2.drawKeypoints(
            bluredIm, keypointsArray, np.array([]), (0, 0, 255),
            cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
        cannyEdges = k.ourCannyBlob(mask)
        for point in keypointsArray:
            #try:
Ejemplo n.º 21
0
def get_video():
    return frame_convert2.video_cv(
        freenect.sync_get_video_with_res(
            resolution=freenect.RESOLUTION_HIGH)[0])
Ejemplo n.º 22
0
 def display_camera_reg(self, dev, data, timestamp):
     cv2.imshow('RGB', frame_convert2.video_cv(data))
     if cv2.waitKey(1) == 27:
         self.keep_running = [False]
Ejemplo n.º 23
0
def show_video():
    cv2.imshow('Video', frame_convert2.video_cv(freenect.sync_get_video()[0]))
Ejemplo n.º 24
0
        return None

diff = None
lower = 0
upper = 100
max_upper = 1900
cv_prev_cx = 0
cv_prev_cy = 0
cv_prev_radius = 0
depth_area = 0
depth_x = 0
depth_y = 0
#freenect.set_depth_mode(freenect.RESOLUTION_MEDIUM,freenect.DEPTH_REGISTERED)
while 1:
    while upper < max_upper:
        rgb = frame_convert2.video_cv(freenect.sync_get_video()[0])
        rgb = rgb.astype(np.uint8)
        rgb_dil,rgb_mask = masking(rgb)
        depth, timestamp = freenect.sync_get_depth()
        #print('%d < depth < %d' % (lower, upper))
        depth = disp_thresh(lower,upper,depth)
        mblur = cv2.medianBlur(depth, 15)
        apt_thresh = cv2.adaptiveThreshold(mblur, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 15, 5)
        canny = cv2.Canny(apt_thresh,100,255)
        dilate = cv2.dilate(canny, kernel=(3, 3), iterations=1)
        comb = cv2.bitwise_and(apt_thresh,rgb_mask)
        _, cnt, hierarchy = cv2.findContours(dilate, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        _,rgb_cnt,rgb_heir = cv2.findContours(rgb_dil,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
        if rgb_cnt:
            epsilon = 0.1 * cv2.arcLength(rgb_cnt[0], True)
            approx = cv2.approxPolyDP(rgb_cnt[0], epsilon, True)
Ejemplo n.º 25
0
def display_rgb(dev, data, timestamp):
    global keep_running
    cv2.imshow('RGB', frame_convert2.video_cv(data))
    if cv2.waitKey(10) == 27:
        keep_running = False
Ejemplo n.º 26
0
def show_video():
    cv2.imshow('Video', frame_convert2.video_cv(freenect.sync_get_video()[0]))
def show_video():
    cv2.imshow('Video', frame_convert2.video_cv(frame))
Ejemplo n.º 28
0
def kinect_get_video():
    # get the rgb image and make it correct format 
    return frame_convert2.video_cv(freenect.sync_get_video()[0])
Ejemplo n.º 29
0
def get_circle_pos(data):
    """ Get's the x,y location of circles in the current frame """

    frame = frame_convert2.video_cv(data)
    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

    # setup lower threshold masks for filtering (H,S,V)
    lower_red = np.array([165, 220, 50])
    # setup upper threshold masks for filtering (H,S,V)
    upper_red = np.array([185, 255, 200])
    # use higher than 180 hue to wrap around to 0

    # cast to type that the opencv can process consistently
    lower = np.array(lower_red, dtype="uint8")
    upper = np.array(upper_red, dtype="uint8")

    _mask = cv2.inRange(frame, lower, upper)  # find boolean mask for filtering
    # bitwise-and original frame using the mask above to find the filtered frame.
    frame = cv2.bitwise_and(frame, frame, mask=_mask)

    # convert filtered frame back to BGR colourspace (opencv likes BGR rather than RGB)
    frame = cv2.cvtColor(frame, cv2.COLOR_HSV2BGR)

    # CIRCLE DETECTION
    # convert to greyscale colourspace
    cimg = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    # (optional: smooth image to reduce detection errors, must be an odd number)
    cimg = cv2.GaussianBlur(cimg, (9, 9), 0)

    circles = cv2.HoughCircles(cimg,
                               cv2.HOUGH_GRADIENT,
                               1,
                               200,
                               param1=40,
                               param2=15,
                               minRadius=1,
                               maxRadius=50)
    # this function will need some tuning and trial & error, see the opencv documentation

    # if(circles != None): # if there are circle detected in the frame

    point = Point(0, 0)

    if circles is not None:
        circles = np.uint16(np.around(circles))

        # loop through all the circles detected
        # for i in range(len(circles[0])):
        i = 0
        point.x = circles[0][i][0]
        point.y = circles[0][i][1]

        # draw the outer circle
        cv2.circle(frame, (circles[0][i][0], circles[0][i][1]),
                   circles[0][i][2], (0, 255, 0), 2)
        # draw the centre of the circle
        cv2.circle(frame, (circles[0][i][0], circles[0][i][1]), 2, (0, 0, 255),
                   3)
        # see documentation for draw functions
    else:
        point.x = -1
        point.y = -1

    # show frame in a separate window
    cv2.imshow('RGB', frame)
    return point
Ejemplo n.º 30
0
def doloop():
    """
    Primary process loop.
    """
    global depth, rgb, current_target

    current_target = None
    positions = []

    create_windows()

    nothing = lambda x: None
    # cv2.createTrackbar('distance min', 'color', 0, 255, nothing)
    # cv2.createTrackbar('distance max', 'color', 0, 255, nothing)
    cv2.createTrackbar('min', 'color', MIN_A, 500, nothing)
    cv2.createTrackbar('max', 'color', MAX_A, 1000, nothing)
    cv2.createTrackbar('trim', 'color', 0, 200, nothing)
    cv2.createTrackbar('user', 'grid', 0, 640, nothing)
    cv2.createTrackbar('difficulty', 'grid', 0, 4, nothing)
    #cv2.createTrackbar('Top Motor', 'grid', MOTOR1, 1023, nothing)
    #cv2.createTrackbar('Bottom Motor', 'grid', MOTOR2, 1023, nothing)
    cv2.createTrackbar('rotation', 'grid', 90, 180, nothing)

    times = []

    while True:
        #time.sleep(1)
        start = time.time()

        # Trackbar updates
        distance_min = cv2.getTrackbarPos('distance min', 'color')
        distance_max = cv2.getTrackbarPos('distance max', 'color')
        min_a = cv2.getTrackbarPos('min', 'color')
        max_a = cv2.getTrackbarPos('max', 'color')
        trim = cv2.getTrackbarPos('trim', 'color')
        difficulty = cv2.getTrackbarPos('difficulty', 'grid')
        userpos = cv2.getTrackbarPos('user', 'grid')
        if distance_min == -1: distance_min = DISTANCE_MIN
        if distance_max == -1: distance_max = DISTANCE_MAX
        if min_a == -1: min_a = MIN_A
        if max_a == -1: max_a = MAX_A
        if trim == -1: trim = EDGE

        # Get a fresh frame
        (depth, _), (rgb, _) = get_depth(), get_video()
        depth = prepare_depth(depth)

        # Trim frame
        if trim:
            depth = depth[:, trim:depth.shape[1] - trim]
            rgb = rgb[:, trim:rgb.shape[1] - trim]

        # Threshold
        ret, thresh1 = cv2.threshold(depth, distance_min, 255,
                                     cv2.THRESH_TOZERO)
        ret, thresh = cv2.threshold(thresh1, distance_max, 255,
                                    cv2.THRESH_TOZERO_INV)
        if 'd' in windows: cv2.imshow('depth', depth)
        if 't' in windows: cv2.imshow('threshold', thresh)

        # Get target point
        contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE,
                                               cv2.CHAIN_APPROX_SIMPLE)
        target = get_contour2(contours, min_a, max_a)
        if not target is None:
            M = cv2.moments(target)
            if M['m00'] == 0: print "0"
            else:
                target_point = (int(M['m10'] / M['m00']),
                                int(M['m01'] / M['m00']))

                x_val = (640.0 / float(target_point[0]) * 255)
                _, radius = cv2.minEnclosingCircle(target)
                #cv2.putText(rgb, str(radius), target_point, cv2.FONT_HERSHEY_SIMPLEX, \
                #1, (0,255,0), 2, cv2.CV_AA)
                cv2.circle(rgb, target_point, 4, (255, 0, 0), -1)
                cv2.drawContours(rgb, [target], -1, (0, 255, 0), 3)

                if userpos == -1: userpos = target_point[0]

        #Clip userpos
        userpos = max(min(2 * userpos - WIDTH / 2, WIDTH), 0)

        if difficulty:
            current_target = pick_target(userpos, difficulty)
            current_speed = pick_speed(userpos, difficulty)

        # Keep a list of the detected positions
        positions.append(current_target)
        if len(positions) == 6:
            # select and send target
            send_target(select_target(positions, difficulty))
            positions = []

        if 'g' in windows:
            cv2.imshow('grid', drawTable(userpos, current_target))

        if 'c' in windows: cv2.imshow('color', video_cv(rgb))
        char = cv2.waitKey(10)
        if char == 27:
            cv2.destroyAllWindows()
            break
        elif char == 115:
            cv2.imwrite('capture.png', video_cv(rgb))
            cv2.namedWindow('saved')
            cv2.imshow('saved', video_cv(rgb))
        elif char == ord('r'):
            if serial_out:
                serial_out.close()
                serial_out.open()

        end = time.time()
        times.append(end - start)
Ejemplo n.º 31
0
 def kinect_get_image(self):
     # im: A numpy array, shape:(480, 640, 3) dtype:np.uint8
     # timestamp: int representing the time
     img, timestamp = freenect.sync_get_video()
     return frame_convert2.video_cv(img)
Ejemplo n.º 32
0
def display_rgb(dev, data, timestamp):
    global keep_running
    cv2.imshow('RGB', frame_convert2.video_cv(data))
    if cv2.waitKey(10) == 27:
        keep_running = False
Ejemplo n.º 33
0
def show_depth(warp=False):
    global threshold
    global current_depth
    global last_image
    global client
    global last_note

    depth, timestamp = freenect.sync_get_depth()
    rgb = frame_convert2.video_cv(freenect.sync_get_video()[0])
    depth = 255 * np.logical_and(depth >= current_depth - threshold,
                                 depth <= current_depth + threshold)
    depth = depth.astype(np.uint8)
    depth_rgb = cv2.cvtColor(depth, cv2.COLOR_GRAY2RGB)
    # kernel = np.ones((9,9), np.uint8)

    # depth_rgb = cv2.dilate(depth_rgb, kernel, iterations=1)
    # depth_rgb = cv2.erode(depth_rgb, kernel, iterations=1)

    hSobel = cv2.Sobel(depth_rgb, cv2.CV_32F, 0, 1, -1)
    vSobel = cv2.Sobel(depth_rgb, cv2.CV_32F, 1, 0, -1)

    depth_rgb = cv2.add(hSobel, vSobel)

    depth_rgb = cv2.convertScaleAbs(depth_rgb)
    depth_rgb = cv2.GaussianBlur(depth_rgb, (3, 3), 0)
    ret, depth_rgb = cv2.threshold(depth_rgb, 80, 255, cv2.THRESH_BINARY)
    # depth_rgb_mask = cv2.cvtColor(depth, cv2.COLOR_GRAY2RGB)
    # depth_rgb = cv2.bitwise_and(rgb,rgb,mask = depth)
    h, w = depth.shape[:2]

    # img = cv2.GaussianBlur(depth_rgb,(3,3),0)
    # laplacian = cv2.Laplacian(img,cv2.CV_8U)

    if (not last_image is None):
        x_offset = int(w * (scale - 1.0) / 2.0)
        y_offset = int(h * (scale - 1.0) / 2.0)
        scaled = cv2.resize(last_image, None, fx=scale,
                            fy=scale)[y_offset:h + y_offset,
                                      x_offset:w + x_offset]

        color_img = np.zeros((h, w, 3), dtype=np.uint8)
        color_img[:, :] = [
            random.randint(1, 255),
            random.randint(1, 255),
            random.randint(1, 255)
        ]

        color_scale = cv2.bitwise_and(color_img, scaled)
        image = cv2.bitwise_or(color_scale, depth_rgb)
        # image = depth_rgb

        # overlay = cv2.bitwise_xor(rgb, image)
        # cv2.line(image, (0, y_threshold), (w, y_threshold), (0, 255, 0), 3)

        # overlay_with_keypoints = cv2.drawKeypoints(overlay, keypoints, np.array([]), (0, 0, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

        if False:
            params = cv2.SimpleBlobDetector_Params()
            params.minThreshold = 0
            params.maxThreshold = 255
            params.minArea = 200
            params.maxArea = w * h
            params.filterByArea = True

            params.filterByCircularity = False
            params.filterByConvexity = False
            params.filterByColor = False
            params.filterByInertia = False

            detector = cv2.SimpleBlobDetector_create(params)
            keypoints = detector.detect(depth)
            print("Detected " + str(len(keypoints)))
            for keypoint in keypoints:
                cX = int(keypoint.pt[0])
                cY = int(keypoint.pt[1])

                location = str(cX) + "," + str(cY)
                cv2.circle(image, (cX, cY), 15, (255, 0, 255), -1)
                cv2.putText(image, location, (cX - 25, cY - 25),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 2)

                # if(last_note >= 0):
                #     # Already playing a note
                #     if(cY > y_threshold):
                #         # Center is below line, stop playing
                #         client.publish("note-off", last_note)
                #         last_note = -1
                #     else:
                #         # Center is still above the line, leave current note
                #         client.publish("note-off", last_note)
                #         note = int(map_range((0, w), (88, 0), cX))
                #         # Only change if we've moved by 2 semitones
                #         if(abs(note - last_note) > 2):
                #             last_note = note
                #             client.publish("note-on", note)
                #         pass
                # else:
                #     # No active note
                #     if(cY < y_threshold):
                #         # Center is above the line, start a new note
                #         note = int(map_range((0, w), (88, 0), cX))
                #         last_note = note
                #         client.publish("note-on", note)

                #     else:
                #         # Center is below the line, don't start anything
                #         pass

        if (warp):
            src = np.array([[0, 0], [w - 1, 0], [w - 1, h - 1], [0, h - 1]],
                           dtype="float32")

            dst = np.array(
                [[150, 50], [w - 1, 0], [w - 1, h - 1], [150, h - 51]],
                dtype="float32")

            M = cv2.getPerspectiveTransform(src, dst)

            warped = cv2.warpPerspective(overlay, M, (w, h))

            cv2.imshow('Depth', warped)
        else:
            # cv2.imshow('Depth', overlay)
            cv2.imshow('Depth', image)

        last_image = image

    else:
        last_image = depth_rgb
Ejemplo n.º 34
0
def get_video():
    f = frame_convert2.video_cv(freenect.sync_get_video()[0])
    return cv2.fastNlMeansDenoisingColored(f, None, 10, 10, 3, 3)
Ejemplo n.º 35
0
def get_video():
    return frame_convert2.video_cv(freenect.sync_get_video()[0])
Ejemplo n.º 36
0
def get_video():
    return frame_convert2.video_cv(freenect.sync_get_video()[0])
Ejemplo n.º 37
0
def get_video():
    frame = frame_convert2.video_cv(freenect.sync_get_video()[0])
    #print('\nframeVideo =\n')
    #print(frame)
    return frame