Example #1
0
def display_depth(dev, data, timestamp):
    global keep_running
    global conn
    global addr
    buf = buffered_readLine(conn)
    #if not buf:
    #   conn, addr = sock.accept()
    #  return
    #print buf
    auto = int(float(buf))
    shifted = int(buf) >> 7
    # print("auto: " + str(auto))
    # print(" buf: " +str(buf))
    # print(" shifted: " + str(shifted))
    depthimg = frame_convert2.pretty_depth_cv(data)
    eroded = cv2.erode(depthimg, (7, 7), iterations=3)  #filter the image
    dilated = cv2.dilate(eroded, (7, 7), iterations=3)  #filter the image more
    #cv2.rectangle(dilated, (280, 200), (360, 280), (0), 3) #draw rectangle in the center for analysis
    roi = dilated[200:280, 280:360]  #actual pixels to be analyzed for the mean
    mask = cv2.inRange(dilated, closeVal, farVal)
    topHalf = mask[0:240, 0:640]
    #cv2.imshow('Depth', topHalf)
    contours = cv2.findContours(topHalf.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)[-2]
    centerX = 320
    if (shifted == 1):
        if len(contours) > 0:
            humanCnt = max(contours,
                           key=cv2.contourArea)  #pick the biggest shape
            x, y, w, h = cv2.boundingRect(humanCnt)
            centerX = (x + x + w) / 2
            #print(centerX)
            cv2.rectangle(topHalf, (x, y), (x + w, y + h), 125, 2)
            meanValue = roi.mean()
            depthft = meanValue * meanValue * .000892 - .292 * meanValue + 26.5
            #print(depthft)
            if (depthft < 6):  #stop
                # arduino.write(0x00)
                arduino.write('\x00')
            elif (centerX < 300):  #left
                # arduino.write(0x4D)
                arduino.write('\x4D')
            elif (centerX > 340):  #right
                # arduino.write(0x1D)
                arduino.write('\x1D')
            else:  #forward
                # arduino.write(0x2D)
                arduino.write('\x2D')
        else:  #stop
            # arduino.write(0x00)
            arduino.write('\x00')
    else:
        arduino.write(chr(int(buf)))
    cv2.imshow('Mask', topHalf)
    if cv2.waitKey(10) == 27:
        keep_running = False
        print dilated.shape
        print dilated.dtype
        arduino.write('\x00')
        sock.close()
def display_depth(dev, data, timestamp):
    global keep_running
    myImg = frame_convert2.pretty_depth_cv(data)
    myImg = cv2.erode(myImg, None, iterations=5)
    myImg = cv2.dilate(myImg, None, iterations=5)
    ##    cv2.imshow('Depth', myImg)
    depthColorLower = (100)
    depthColorHigher = (150)
    mask = cv2.inRange(myImg, depthColorLower, depthColorHigher)
    mask = cv2.erode(mask, None, iterations=2)
    mask = cv2.dilate(mask, None, iterations=2)
    cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)[-2]
    center = None
    ##    bbox = (287, 23, 86, 320)
    ##    if len(cnts) > 0:
    ##        c = max(cnts, key=cv2.contourArea)
    ##        if ok:
    ##            p1 = (int(bbox[0]), int(bbox[1]))
    ##            p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
    ##            cv2.rectangle(frame, p1, p2, (255,0,0), 2, 1)
    c = max(cnts, key=cv2.contourArea)
    x, y, w, h = cv2.boundingRect(c)
    if (w > h):
        print("width is bigger")
    else:
        print("height is bigger")

    cv2.rectangle(myImg, (x, y), (x + w, y + h), (0, 255, 0), 2)
    cv2.imshow("Tracking", mask)
    cv2.imshow('depth', myImg)

    if cv2.waitKey(10) == 27:
        keep_running = False
Example #3
0
def set_image_up(data):  # Gives us a depth, binary and contoured image
    img = frame_convert2.pretty_depth_cv(data)
    img = cv2.medianBlur(img, 9)
    thresh, binary = cv2.threshold(img, 50, 255, cv2.THRESH_BINARY_INV)
    contour, hierarchy = cv2.findContours(binary, cv2.RETR_TREE,
                                          cv2.CHAIN_APPROX_SIMPLE)
    return img, binary, contour
Example #4
0
def collect_depth(dev, data, timestamp):
    global keep_running
    global depth_index
    np.save('{0}/{1}/{2}'.format(ROOT_PATH, DEPTH_PATH, depth_index), data)
    cv2.imshow('Depth', frame_convert2.pretty_depth_cv(data))
    if cv2.waitKey(10) == 27:
        keep_running = False
    depth_index += 1
def get_depth():
    #saving as 11 bit for csv file
    #frame = freenect.sync_get_depth()[0]
    #good for displaying and saving depth image
    frame = frame_convert2.pretty_depth_cv(freenect.sync_get_depth()[0])
    #print('\nframeDepth =\n')
    #print(frame)
    return frame
Example #6
0
def show_depth(depth,window):
    """
    passes a raw depth from the kinect and displays it
    ont a image called window
    """
    #depth = depth.astype(np.uint8)
    depth = frame_convert2.pretty_depth_cv(depth)
    cv2.imshow(window, depth)
Example #7
0
def show_depth(depth, window):
    """
    passes a raw depth from the kinect and displays it
    ont a image called window
    """
    #depth = depth.astype(np.uint8)
    depth = frame_convert2.pretty_depth_cv(depth)
    cv2.imshow(window, depth)
Example #8
0
def display_depth(dev, data, timestamp):
    global keep_running
    cv2.imshow('Depth', frame_convert2.pretty_depth_cv(data))
    c = cv2.waitKey(10)
    if 'q' == chr(c & 255):
        keep_running = False
  
    if 's' == chr(c & 255):
	cv2.imwrite("saved1.jpg", data)
def main():
    all_images_path = "../data/u_turn"
    images_path, cfg_path, weights_path, data_path, num_images = all_images_path + "/rgb/", "./neural_net/yolo-obj.cfg", "./neural_net/rover.weights", "./neural_net/obj.data", 10
    image_sim = BoundingBoxSim(images_path=images_path,
                               weights_path=weights_path,
                               data_path=data_path,
                               num_images=num_images,
                               cfg_path=cfg_path)

    image_sim.simulate()

    # Analyzing variables
    global world_coords
    world_coords = []
    xvel = []
    yvel = []
    zvel = []

    while image_sim.step():
        # Get current step and state
        curr_state = image_sim.curr_state
        step_num = image_sim.curr_step
        print "Step number ", step_num
        print "Current state is ", curr_state

        # Compute image rects and coords
        img = rgb_rect(curr_state)
        depth_path = all_images_path + '/depth/' + str(step_num) + '.npy'
        depth, coords = calc_coords(depth_path, curr_state)

        # Analyze obtained coordinates
        analyze_coords(coords)
        print "World Coordinates (cm)", coords
        world_coords.append([100 * coord for coord in coords])
        if len(world_coords) > 1:
            T = 1 / 30
            xvel.append((world_coords[-1][0] - world_coords[-2][0]) / T)
            yvel.append((world_coords[-1][1] - world_coords[-2][1]) / T)
            zvel.append((world_coords[-1][2] - world_coords[-2][2]) / T)
            print "Velocities (cm / s)", xvel[-1], yvel[-1], zvel[-1]

        # Display images
        cv2.imshow('image', img)
        cv2.imshow('Depth', frame_convert2.pretty_depth_cv(depth))
        cv2.waitKey(10)

    cv2.destroyAllWindows()

    # Plotting functions
    world_coords = np.array(world_coords)
    plot_states(x=world_coords[:, 0],
                y=world_coords[:, 1],
                z=world_coords[:, 2],
                xvel=xvel,
                yvel=yvel,
                zvel=zvel)
Example #10
0
def display_depth(dev, data, timestamp):
    global keep_running
    t_start = datetime.datetime.now()
    data2 = frame_convert2.pretty_depth_cv(cv2.resize(data, (0, 0), fx=0.25, fy=0.25))
    #data2 = frame_convert2.pretty_depth_cv(data)

    #data2 = cv2.applyColorMap(data.astype(np.uint8), cv2.COLORMAP_JET)
    t_end = datetime.datetime.now()
    t_delta = t_end - t_start
    print(1000000/t_delta.microseconds)
    cv2.imshow('Depth', cv2.resize(data2, (0, 0), fx=4, fy=4))
    if cv2.waitKey(10) == 27:
        keep_running = False
def display_depth(delay):
    i = 0
    while 1:
        try:
            data = np.load('{0}/{1}/{2}.npy'.format(ROOT_PATH, DEPTH_PATH, i))
            cv2.imshow('Depth', frame_convert2.pretty_depth_cv(data))
            if cv2.waitKey(delay) == 27:
                break
        except Exception:
            break
        if DEBUG:
            print i
        i += 1
Example #12
0
def get_depth():
    return frame_convert2.pretty_depth_cv(freenect.sync_get_depth()[0])
    return cv2.fastNlMeansDenoising(f, None, 10, 7, 21)
#OUTDOOR = [20-70,100-255,50-255]
#INDOOR = [30-50,100-255,100-255]
##########################

##########INITIALIZE CAMERA##########
'''
cv_a = cv2.VideoCapture(0)
ret1,cv_b=cv_a.read()
print 'Starting Camera...'
while ret1!=True:
    cv_a=0
    cv_a=cv2.VideoCapture(0)
    ret1,cv_b=cv_a.read()
print 'DONE'
'''
cv_depth = frame_convert2.pretty_depth_cv(freenect.sync_get_depth()[0])
cv_b = frame_convert2.video_cv(freenect.sync_get_video()[0])
#####################################
count = 0
cv_esc = 0
cv_eqn1 = 0
cv_area_m = 0
cv_timeout = 0
cv_cnt2 = 0
cv_cnt1 = 0
cv_rot = 0
cv_prev_radius = 0
cv_prev_cx = 0
cv_prev_cy = 0
cv_test_area = 0
cv_cx = 0
Example #14
0
def get_depth():
    """ Get's the current depth data from Kinect """
    return frame_convert2.pretty_depth_cv(freenect.sync_get_depth()[0])
Example #15
0
 def get_ir():
     array, _ = freenect.sync_get_video(0, freenect.VIDEO_IR_10BIT)
     return imutils.rotate(
         cv2.cvtColor(frame_convert2.pretty_depth_cv(array),
                      cv2.COLOR_GRAY2RGB), -90)
Example #16
0
def findCountours():
    cnts = cv2.findContours(frame_convert2.pretty_depth_cv(data), cv2.RETR_EXTERNAL,
	cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)
Example #17
0
def get_depth():
    depth = frame_convert2.pretty_depth_cv(freenect.sync_get_depth()[0])
    return np.where(np.array(depth) == 255, 0, 255).astype(np.dtype('uint8'))
Example #18
0
def get_depth(img):
    return frame_convert2.pretty_depth_cv(img)
Example #19
0
def get_depth():
    return frame_convert2.pretty_depth_cv(freenect.sync_get_depth()[0])
def display_depth(dev, data, timestamp):
    global keep_running
    cv2.imshow('Depth', frame_convert2.pretty_depth_cv(data))
    if cv2.waitKey(10) == 27:
        keep_running = False
def get_depth():
    return frame_convert2.pretty_depth_cv(freenect.sync_get_depth(format=freenect.DEPTH_REGISTERED)[0])
Example #22
0
def display_depth(dev, data, timestamp):
    global keep_running
    cv2.imshow('Depth', frame_convert2.pretty_depth_cv(data))
    if cv2.waitKey(10) == 27:
        keep_running = False
Example #23
0
def getDepth(dsRate):
    return frame_convert2.pretty_depth_cv(
        freenect.sync_get_depth()[0])[::dsRate, ::dsRate]
Example #24
0
def main():
    print("Running...")
    flag_track2 = 0
    count = 0
    counttrack = 0
    prev_y_pixel = 0
    prev_x_pixel = 0
    tetaperpixel = 0.994837 / 400.0
    tracker = KCF.kcftracker(True, False, True,
                             False)  # hog, fixed_window, multiscale, lab
    counttrack2 = 0
    prev_distance2 = 0
    # grab one frame at first to compare for background substraction
    frame, timestamp = freenect.sync_get_video()
    frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
    frame_resized = imutils.resize(frame, width=min(400, frame.shape[1]))
    frame_resized_grayscale = cv2.cvtColor(frame_resized, cv2.COLOR_BGR2GRAY)
    print(frame_resized_grayscale.shape)
    # initialize centroid
    center = [[frame_resized.shape[1] / 2, frame_resized.shape[0] / 2]]
    center_fix = []
    # defining min cuoff area
    #min_area=(480/400)*frame_resized.shape[1]
    min_area = (0.01) * frame_resized.shape[1]
    print(frame_resized.shape)  # (300,400,3)
    boxcolor = (0, 255, 0)
    timeout = 0
    #variable for counting time elapsed
    key = ''
    temp = 1

    # save video
    countsave = 0
    while key != 113:  # for 'q' key
        # start timer
        timer = cv2.getTickCount()
        starttime = time.time()
        previous_frame = frame_resized_grayscale
        # retrieve new RGB frame image
        frame, timestamp = freenect.sync_get_video()
        frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
        frame_resized = imutils.resize(frame, width=min(400, frame.shape[1]))
        frame_resized_grayscale = cv2.cvtColor(frame_resized,
                                               cv2.COLOR_BGR2GRAY)
        #temp=background_subtraction(previous_frame, frame_resized_grayscale, min_area)

        # retrieve depth map
        depth, timestamp = freenect.sync_get_depth()
        depth = imutils.resize(depth, width=min(400, depth.shape[1]))
        print(depth.shape)
        depth2 = np.copy(depth)
        # orig = image.copy()
        if temp == 1:
            if (flag_track2 == 0):
                frame_processed, center_fix, pick2 = detect_people(
                    frame_resized, center, frame_resized, boxcolor)
                if (len(center_fix) > 0):
                    i = 0
                    for b in center_fix:

                        #print(b)
                        #print("Point "+str(i)+": "+str(b[0])+" "+str(b[1]))

                        x_pixel = b[1]
                        y_pixel = b[0]
                        print("x1:" + str(x_pixel) + "y1:" + str(y_pixel))
                        rawDisparity = depth[(int)(x_pixel), (int)(y_pixel)]
                        print("raw:" + str(rawDisparity))
                        distance = 1 / (-0.00307 * rawDisparity + 3.33)
                        if (distance < 0):
                            distance = 0.5
                        print("Distance : " + str(distance))
                        cv2.putText(
                            frame_resized, "distance: {:.2f}".format(distance),
                            (10, (frame_resized.shape[0] - (i + 1) * 25) - 50),
                            font, 0.65, (0, 0, 255), 3)
                        cv2.putText(
                            frame_resized, "Point " + str(i) + ": " +
                            str(b[0]) + " " + str(b[1]),
                            (10, frame_resized.shape[0] - (i + 1) * 25), font,
                            0.65, (0, 0, 255), 3)
                        i = i + 1
                    y_pix, x_pix = center_fix[0]

                    endtime = time.time()
                    #nucleo.write(("8,"+str(x_person)+","+str(y_person)).encode()) # send x_person and y_person
                    if ((abs(prev_x_pixel - x_pix)) < 50
                            and (abs(prev_y_pixel - y_pix)) < 50):
                        timeout = timeout + (endtime - starttime)
                        if (timeout > 5):
                            flag_track2 = 1
                            boxcolor = (255, 0, 0)
                    else:
                        timeout = 0
                        boxcolor = (0, 255, 0)

                    prev_y_pixel, prev_x_pixel = y_pix, x_pix
                    # DEBUGGING #
                    #print("Teta: " + str(teta) + "Distance: " + str(distance))
                    print("Timeout: " + str(timeout))
                    #print ("Distance : " + str(distance))
                elif (len(center_fix) <= 0):
                    timeout = 0
                    boxcolor = (0, 255, 0)

            elif (flag_track2 == 1):
                if (counttrack2 == 0):
                    iA, iB, iC, iD = pick2[0]

                    # Draw new bounding box from body to only head figures

                    tracker.init([iA, iB, iC - iA, iD - iB], frame_resized)
                    counttrack2 = counttrack2 + 1
                elif (counttrack2 == 1):
                    print(pick2[0])
                    print("iA:" + str(iA) + "iB:" + str(iB) + "iC:" + str(iC) +
                          "iD:" + str(iD))
                    boundingbox = tracker.update(
                        frame_resized)  #frame had better be contiguous
                    boundingbox = list(map(int, boundingbox))
                    cv2.rectangle(frame_resized,
                                  (boundingbox[0], boundingbox[1]),
                                  (boundingbox[0] + boundingbox[2],
                                   boundingbox[1] + boundingbox[3]),
                                  (255, 0, 0), 3)
                    #GENERAL ASSUMPTION SINGLE PERSON TRACKING
                    # start tracking...

                    x_track = ((boundingbox[2]) / 2.0) + boundingbox[0]
                    y_track = ((boundingbox[3]) / 2.0) + boundingbox[1]
                    print("x:" + str(x_track) + "y:" + str(y_track))
                    x_center = (frame_resized.shape[1] + 1) / 2
                    y_center = (frame_resized.shape[0] + 1) / 2
                    print(x_center, y_center)
                    # compute teta asumsi distance lurus

                    rawDisparity2 = depth2[(int)(y_track), (int)(x_track)]
                    print("raw2:" + str(rawDisparity2))
                    distance2 = 1 / (-0.00307 * rawDisparity2 + 3.33)
                    if (distance2 < 0):
                        distance2 = prev_distance2
                    prev_distance2 = distance2

                    #realx = (x_track-x_center)+(distance/30.0)
                    #teta = math.atan(realx/distance) # if distance is tangensial
                    #teta = math.asin((0.026458333*(x_track-x_center)/distance)) # if distance is euclidean
                    teta = (x_track - x_center) * tetaperpixel
                    print("Teta: " + str(teta))
                    print("Distance2 : " + str(distance2))
                    cv2.putText(frame_resized,
                                "distance: {:.2f}".format(distance2),
                                (10,
                                 (frame_resized.shape[0] - (i + 1) * 25) - 50),
                                font, 0.65, (0, 0, 255), 3)
                    cv2.putText(
                        frame_resized, "Point " + str(0) + ": " +
                        str(x_track) + " " + str(y_track),
                        (10, frame_resized.shape[0] - (i + 1) * 25), font,
                        0.65, (0, 0, 255), 3)
                    # send the teta and distance
                    #nucleo.flush()
                    #if(teta<0.0):
                    #flag= nucleo.write(("7,"+format(teta,'1.2f')+","+format(distance2,'1.3f')).encode())
                    #elif(teta>0.0):
                    #flag= nucleo.write(("7,"+format(teta,'1.3f')+","+format(distance2,'1.3f')).encode())
                    #print("WRITEIN1" + str(flag))
                    print("Peak: " + str(tracker.getpeakvalue()))
                    if (tracker.getpeakvalue() < 0.6):
                        counttrack2 = 0
                        flag_track2 = 0
                        #nucleo.flush()
                        #nucleo.write("8,,,,,,,,,,,,".encode())
                        print("WRITEOUT")

            #frame_resized = cv2.flip(frame_resized, 0)
            cv2.imshow("Detected Human", frame_resized)
            cv2.imshow("Depth", frame_convert2.pretty_depth_cv(depth))
            #cv2.imshow("Depth2", frame_convert2.pretty_depth_cv(depth2))
            # cv2.imshow("Original", frame)
        else:
            count = count + 1
            print("Number of frame skipped in the video= " + str(count))

        # compute the fps
        fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)
        print("FPS: " + str(fps))
        #outframe = open("/home/ubuntu/Progress\ TA/Integrasi/rgb640/%d.jpg" % countsave, 'wb+')
        cv2.imwrite('%d.jpg' % countsave, frame_resized)  # Save image...
        countsave = countsave + 1
        key = cv2.waitKey(5)

    cv2.destroyAllWindows()
    freenect.sync_stop()
    nucleo.close()
    print("\nFINISH")
def get_depth():
    frame = frame_convert2.pretty_depth_cv(freenect.sync_get_depth()[0])
    cv2.imwrite('depthcapture.jpg', frame)
    print('frameDepth = [%d]\n' % (frame))
    return frame
Example #26
0
def kinect_get_ir():
    # get the IR image and make it correct format
    array, t = freenect.sync_get_video(0,freenect.VIDEO_IR_10BIT)
    array = frame_convert2.pretty_depth_cv(array)
    return array
Example #27
0
def get_depth():
    return frame_convert2.pretty_depth_cv(freenect.sync_get_depth()[0])
Example #28
0
def get_depth():
    #good for displaying and saving depth image
    frame = frame_convert2.pretty_depth_cv(freenect.sync_get_depth()[0])
    return frame