Ejemplo n.º 1
0
    def getKinectData(self):

        delay = 1 / self.fps

        #Wait to ports are setup before streaming
        time.sleep(10)
        freenect.sync_stop()

        while self.running:

            if self.depth == True:

                #get a frame from depth sensor
                depth = self.get_depth()
                #write depth image to file
                cv.imwrite('/var/www/html/stream/depthImage.jpg', depth)

            if self.image == True:

                #get a frame from depth sensor
                image = self.get_video()
                #write depth image to file
                cv.imwrite('/var/www/html/stream/image.jpg', image)

            #frame delay
            time.sleep(delay)

        print "INFO: Shutting down Kinect Data Capture."
        freenect.sync_stop()
Ejemplo n.º 2
0
def testkinect1():
    # test video modes
    [ctx,dev] = initkinect()
    print(freenect.get_video_format(dev))
    freenect.set_video_mode(dev,1,1)
    print(freenect.get_video_format(dev))
    killkinect(ctx,dev)
    old_time = time.time()
    while time.time()-old_time < 10:
        cv.ShowImage('Video', get_video())
        cv2.waitKey(1)
    cv2.destroyAllWindows()
    cv2.waitKey(1000)
    freenect.sync_stop()

    [ctx,dev] = initkinect()
    freenect.set_video_mode(dev,1,2)
    print(freenect.get_video_format(dev))
    killkinect(ctx,dev)
    old_time = time.time()
    while time.time()-old_time < 10:
        cv.ShowImage('Video', get_video())
        cv2.waitKey(1)
    cv2.destroyAllWindows()
    cv2.waitKey(1000)
    freenect.sync_stop()

    [ctx,dev] = initkinect()
    freenect.set_video_mode(dev,1,3)
    print(freenect.get_video_format(dev))
    killkinect(ctx,dev)
    old_time = time.time()
    while time.time()-old_time < 10:
        cv.ShowImage('Video', get_video())
        cv2.waitKey(1)
    cv2.destroyAllWindows()
    freenect.sync_stop()

    [ctx,dev] = initkinect()
    freenect.set_video_mode(dev,1,5)
    print(freenect.get_video_format(dev))
    killkinect(ctx,dev)
    old_time = time.time()
    while time.time()-old_time < 10:
        cv.ShowImage('Video', get_video())
        cv2.waitKey(1)
    cv2.destroyAllWindows()
    freenect.sync_stop()

    [ctx,dev] = initkinect()
    freenect.set_video_mode(dev,1,6)
    print(freenect.get_video_format(dev))
    killkinect(ctx,dev)
    old_time = time.time()
    while time.time()-old_time < 10:
        cv.ShowImage('Video', get_video())
        cv2.waitKey(1)
    cv2.destroyAllWindows()
    freenect.sync_stop()
Ejemplo n.º 3
0
def capture_emotions(person_name, emotions, duration):
    """Capture each emotion in turn"""
    captured_emotions = {}
    print """I am going to prompt you to pretend to feel the following emotions for %s seconds each: %s""" % (duration, ', '.join(emotions))
    for emotion in emotions:
        capture_emotion(captured_emotions, person_name, emotion, duration)
    freenect.sync_stop()
    save_emotions(captured_emotions, person_name)
Ejemplo n.º 4
0
def capture_emotions(person_name, emotions, duration):
    """Capture each emotion in turn"""
    captured_emotions = {}
    print """I am going to prompt you to pretend to feel the following emotions for %s seconds each: %s""" % (
        duration, ', '.join(emotions))
    for emotion in emotions:
        capture_emotion(captured_emotions, person_name, emotion, duration)
    freenect.sync_stop()
    save_emotions(captured_emotions, person_name)
Ejemplo n.º 5
0
def disp_thresh(lower, upper,depth):
    l1=map(lower,0,1800,0,255)
    depth = (255-l1) * np.logical_and(depth > lower, depth < upper)
    depth = depth.astype(np.uint8)
    if cv2.waitKey(1) == 27:
        freenect.sync_stop()
        cv2.destroyAllWindows()
        exit (0)
    return depth
Ejemplo n.º 6
0
def kill():
    [ctx, dev] = initkinect()
    cv2.destroyAllWindows()
    freenect.sync_stop()
    freenect.stop_video(dev)
    freenect.stop_depth(dev)
    freenect.close_device(dev)
    freenect.shutdown(ctx)
    quit()
Ejemplo n.º 7
0
  def stop(self):
    """
    Safely stop the Kinect Thread, freeing the Kinect properly.

    Set a variable to stop Thread execution by terminating the while loop in
    run().
    """

    freenect.sync_stop()
    self.exit = True
Ejemplo n.º 8
0
 def keyboard_press_event(self, key, x, y):
     
     if key == chr(27):
         #freenect.sync_set_led(1)
         freenect.sync_stop()
     elif key == ' ':
         print "Toggled the RGB/IR image."
         self.show_ir = not self.show_ir
     
     super(PointCloudDemo, self).keyboard_press_event(key, x, y)
def keyPressed(key, x: int, y: int):
    global color
    if key == b'C' or key == b'c':
        color = not color
        # break;

    if key == b'Q' or key == b'q' or key == b'\x1b':  # ESC
        glutDestroyWindow(window)
        # device->stopDepth();
        # device->stopVideo();
        freenect.sync_stop()
        exit(0)
Ejemplo n.º 10
0
 def keyboard_press_event(self, key, x, y):
     if key == chr(27):
         #freenect.sync_set_led(1)
         freenect.sync_stop()
         #np.savez("data", vertices=self.vertices_measure.get())
         #np.save("F", self.ffusion.F_gpu.get())
         #np.save("W", self.ffusion.W_gpu.get())
     if key == 'm':
         self.draw_flags['measure'] ^= True
     if key == 'r':
         self.draw_flags['raycast'] ^= True
     if key == 't':
         self.ffusion.active_tracking ^= True
     
     super(DenseDemo, self).keyboard_press_event(key, x, y)
Ejemplo n.º 11
0
    def keyboard_press_event(self, key, x, y):
        if key == chr(27):
            #freenect.sync_set_led(1)
            freenect.sync_stop()
            #np.savez("data", vertices=self.vertices_measure.get())
            #np.save("F", self.ffusion.F_gpu.get())
            #np.save("W", self.ffusion.W_gpu.get())
        if key == 'm':
            self.draw_flags['measure'] ^= True
        if key == 'r':
            self.draw_flags['raycast'] ^= True
        if key == 't':
            self.ffusion.active_tracking ^= True

        super(DenseDemo, self).keyboard_press_event(key, x, y)
Ejemplo n.º 12
0
def temp_test():
    fn_ctx = fn.init()
    fn_dev = fn.open_device(fn_ctx, fn.num_devices(fn_ctx) - 1)
    fn.set_tilt_degs(fn_dev, 0)
    fn.close_device(fn_dev)

    while True:
        dep = get_depth()
        dep *= (dep * 1.3).astype(np.uint8)
        print("{}\t,\t{}".format(np.min(dep), np.max(dep)))

        cv2.imshow('depth', dep)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            cv2.destroyAllWindows()
            fn.sync_stop()
            break
Ejemplo n.º 13
0
def capture_one(emotion, duration):
    """Capture the emotion to tsv files in person_name/emotion"""
    while True:
        print "\aPlease start (pretending that you are) feeling %s" % emotion
        print "I am going to start capturing data in %s seconds" % \
            SECONDS_TO_WAIT_BEFORE_CAPTURING
        time.sleep(SECONDS_TO_WAIT_BEFORE_CAPTURING)
        frames_and_eeg_data = capture_data(duration)
        print "\aDone."
        print "Did you manage to hold the feeling the entire time? [y/n]"
        if raw_input().lower().strip() in ["y", "yes"]:
            break
        else:
            print "Trying again..."
    #TODO: Expose this less :-)
    freenect.sync_stop()
    return frames_and_eeg_data
Ejemplo n.º 14
0
def capture_one(emotion, duration):
    """Capture the emotion to tsv files in person_name/emotion"""
    while True:
        print "\aPlease start (pretending that you are) feeling %s" % emotion
        print "I am going to start capturing data in %s seconds" % \
            SECONDS_TO_WAIT_BEFORE_CAPTURING
        time.sleep(SECONDS_TO_WAIT_BEFORE_CAPTURING)
        frames_and_eeg_data = capture_data(duration)
        print "\aDone."
        print "Did you manage to hold the feeling the entire time? [y/n]"
        if raw_input().lower().strip() in ["y", "yes"]:
            break
        else:
            print "Trying again..."
    #TODO: Expose this less :-)
    freenect.sync_stop()
    return frames_and_eeg_data
Ejemplo n.º 15
0
def masking (rgb):
    hsv = cv2.cvtColor(rgb, cv2.COLOR_BGR2HSV)
    rgb_lthresh = np.array([24, 100, 50])
    rgb_uthresh = np.array([50, 255, 255])
    rgb_mask = cv2.inRange(hsv, rgb_lthresh, rgb_uthresh)
    rgb_f = cv2.bitwise_and(rgb,rgb,mask=rgb_mask)
    rgb_md = cv2.medianBlur(rgb_f, 15)
    rgb_gray = cv2.cvtColor(rgb_md, cv2.COLOR_BGR2GRAY)
    rgb_mask = cv2.adaptiveThreshold(rgb_gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 15, 5)
    rgb_edges = cv2.Canny(rgb_mask, 100, 255)
    rgb_dil = cv2.dilate(rgb_edges, kernel=(3, 3), iterations=1)
    cv2.imshow('RGB',rgb_dil)
    if cv2.waitKey(1) == 27:
        freenect.sync_stop()
        cv2.destroyAllWindows()
        exit (0)
    return rgb_dil,rgb_mask
Ejemplo n.º 16
0
def density_plot():
    fn_ctx = fn.init()
    fn_dev = fn.open_device(fn_ctx, fn.num_devices(fn_ctx) - 1)
    fn.set_tilt_degs(fn_dev, 0)
    fn.close_device(fn_dev)

    show_image = cv2.imshow
    waitkey = cv2.waitKey
    ravel = np.ravel
    countbin = np.bincount

    length = 256
    nums = np.arange(0, length, 1)
    zero = np.zeros_like(nums)

    import matplotlib.pyplot as plt
    import matplotlib.animation as animation

    fig, ax = plt.subplots()
    line, = ax.plot(nums, zero)
    ax.set_ylim(0, 10000)
    ax.set_xlim(0, 256)
    set_y_data = line.set_ydata

    def update(data):
        set_y_data(data)
        return line,

    def get_dep():
        dep = get_depth()
        dep = cv2.medianBlur(dep, 3, dep)
        dep = ravel(dep)
        # dep = medfilt(dep, 21).astype(np.uint8)
        return dep

    def data_gen():
        while True:
            yield countbin(get_dep(), minlength=length)

    ani = animation.FuncAnimation(fig, update, data_gen)
    plt.show()

    cv2.destroyAllWindows()

    fn.sync_stop()
Ejemplo n.º 17
0
def set_kinect_angle(angle, device_index=0):
    # Clamp angle to [-30, 30]
    angle = min(angle, max(angle, -30), 30)
    print "Setting Kinect angle to", angle

    # We have to stop the synchronous runloop to interact with the device.
    freenect.sync_stop()

    # Open the device
    ctx = freenect.init()
    dev = freenect.open_device(ctx, device_index)

    # Set angle
    freenect.set_tilt_degs(dev, angle)

    # Shutdown context, allowing synchronous runloop to start
    freenect.shutdown(ctx)

    return angle
Ejemplo n.º 18
0
    def streamKinectImage(self):

        try:
            self.image = True
            self.port = self.port + 1
            port = self.port - 1
            print "INFO: Adding kinect RGB image to stream on port ", str(
                port), "."
            startKinectImageStream = self.serverCommand + " -o 'output_http.so -w ./www/html -p " + str(
                port
            ) + "' -i 'input_file.so -f /var/www/html/stream -n image.jpg -d 0'"
            os.system(startKinectImageStream)

        except self.running == False:

            print "INFO: Shutting down the kinect RGB image stream"
            self.image = False
            freenect.sync_stop()
            sys.exit()
def main():
    print ('[*] Start')
    start = datetime.now()
    folder_name=get_args()
    os.mkdir(folder_name)
    os.chdir(folder_name)
    i=1
    buff=[]
    try:
        while True:
            print '[*] Recording Index %s'%(str(i))
            (depth,_), (rgb,_) = get_depth(), get_video()
            buff.append([depth.copy(),rgb.copy()])
            i+=1
            time.sleep(0.0001)
            if i>=1000000:
                break
    except KeyboardInterrupt:
        sync_stop() # stop the sync_get_video...etc
        print '\n[*] End Buff with following information :'
        duration = str((datetime.now()-start).total_seconds() ).split('.')[0]
        fps = i/((datetime.now()-start).total_seconds())
        print '[*] Duration is { %s }'%(duration)
        print '[*] FPS is { %s }'%(str(fps).split('.')[0])

        print '\n[*] Start Saving IMG from Buff'
        try:
            for j in range(i):
                #list.pop(index)
                depth,rgb = buff.pop(0)
                a=""
                if len(str(j))<frame_length_limit_order:
                    a='0'*(frame_length_limit_order-len(str(j)))
                depth=depth_to_gray(depth)
                # io.imsave to a series of .png
                io.imsave('depth'+a+str(j)+'.png',depth)
                io.imsave('rgb'+a+str(j)+'.png',rgb)
                print '[*] Saving Index %s'%(str(j))
        except:
            print '\n[*] End Saving IMG '
Ejemplo n.º 20
0
#!/usr/bin/env python
import freenect
import cv
import numpy as np

cv.NamedWindow("Depth")
cv.NamedWindow("Video")
ind = 0
while 1:
    print(ind)
    try:
        depth, timestamp = freenect.sync_get_depth(ind)
        rgb, timestamp = freenect.sync_get_video(ind)
    except TypeError:
        ind = 0
        continue
    ind += 1
    cv.ShowImage("Depth", depth.astype(np.uint8))
    cv.ShowImage("Video", rgb[:, :, ::-1].astype(np.uint8))
    cv.WaitKey(10)
    freenect.sync_stop()  # NOTE: May remove if you have good USB bandwidth
Ejemplo n.º 21
0
def get_video():
    array,_ = freenect.sync_get_video()
    array = cv2.cvtColor(array,cv2.COLOR_RGB2BGR)
    freenect.sync_stop()
    return array
Ejemplo n.º 22
0
def get_depth():
    array,_ = freenect.sync_get_depth()
    array = array.astype(np.uint8)
    freenect.sync_stop()
    return array
	panel.display()

	oldDepth = depth
	for event in pygame.event.get() :
		if event.type == pygame.QUIT :
			finish = True
		if event.type == pygame.KEYDOWN  :
			if event.key == pygame.K_ESCAPE :
				finnish = True
			if event.key == pygame.K_q :
				finnish = True
			if event.key == pygame.K_r :
				backgroundDepth[:] = 0
			if event.key == pygame.K_l :
				flooding = not flooding
			if event.key == pygame.K_f :
				pygame.display.toggle_fullscreen()
			if event.key == pygame.K_v :
				panel.toggle_video_capture()
	fps.tic()
del fps

print "Stopping kinect"
freenect.sync_stop()
print "Stopping pygame"




Ejemplo n.º 24
0
            [slice(None) if i != axis else 0 for i in range(dif.ndim)])
        index = [
            mesh[i][index].ravel() if i != axis else location.ravel()
            for i in range(bins.ndim)
        ]
        counts = bins[tuple(index)].reshape(location.shape)
        index[axis] = indices[tuple(index)]
        modals = srt[tuple(index)].reshape(location.shape)
        return (modals, counts)


date = datetime.date.today().strftime("%B-%d-%Y")
os.mkdir(date)
os.chdir(date)
logging.basicConfig(filename='EventLog.txt', level=logging.DEBUG)
a = KinectTracker()
#z = a.calculate_masked_background()
#y,z = a.identify_valid_masked_background()
# Functionally, a.average_background() is just getting a single snapshot. Write another function that calls it periodically to handle it as a full time thing
camera = PiCamera()
camera.resolution = (1296, 972)
camera.framerate = 30
# allow the camera to warmup
time.sleep(1)
camera.start_recording(date + "_vid.h264", bitrate=7500000)
print "start picam recording"
a.long_term_recording()
camera.stop_recording()
print "stopped recording"
freenect.sync_stop()
Ejemplo n.º 25
0
def main():
    print("Running...")
    flag_track2 = 0
    count = 0
    counttrack = 0
    prev_y_pixel = 0
    prev_x_pixel = 0
    tetaperpixel = 0.994837 / 400.0
    tracker = KCF.kcftracker(True, False, True,
                             False)  # hog, fixed_window, multiscale, lab
    counttrack2 = 0
    prev_distance2 = 0
    # grab one frame at first to compare for background substraction
    frame, timestamp = freenect.sync_get_video()
    frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
    frame_resized = imutils.resize(frame, width=min(400, frame.shape[1]))
    frame_resized_grayscale = cv2.cvtColor(frame_resized, cv2.COLOR_BGR2GRAY)
    print(frame_resized_grayscale.shape)
    # initialize centroid
    center = [[frame_resized.shape[1] / 2, frame_resized.shape[0] / 2]]
    center_fix = []
    # defining min cuoff area
    #min_area=(480/400)*frame_resized.shape[1]
    min_area = (0.01) * frame_resized.shape[1]
    print(frame_resized.shape)  # (300,400,3)
    boxcolor = (0, 255, 0)
    timeout = 0
    #variable for counting time elapsed
    key = ''
    temp = 1

    # save video
    countsave = 0
    while key != 113:  # for 'q' key
        # start timer
        timer = cv2.getTickCount()
        starttime = time.time()
        previous_frame = frame_resized_grayscale
        # retrieve new RGB frame image
        frame, timestamp = freenect.sync_get_video()
        frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
        frame_resized = imutils.resize(frame, width=min(400, frame.shape[1]))
        frame_resized_grayscale = cv2.cvtColor(frame_resized,
                                               cv2.COLOR_BGR2GRAY)
        #temp=background_subtraction(previous_frame, frame_resized_grayscale, min_area)

        # retrieve depth map
        depth, timestamp = freenect.sync_get_depth()
        depth = imutils.resize(depth, width=min(400, depth.shape[1]))
        print(depth.shape)
        depth2 = np.copy(depth)
        # orig = image.copy()
        if temp == 1:
            if (flag_track2 == 0):
                frame_processed, center_fix, pick2 = detect_people(
                    frame_resized, center, frame_resized, boxcolor)
                if (len(center_fix) > 0):
                    i = 0
                    for b in center_fix:

                        #print(b)
                        #print("Point "+str(i)+": "+str(b[0])+" "+str(b[1]))

                        x_pixel = b[1]
                        y_pixel = b[0]
                        print("x1:" + str(x_pixel) + "y1:" + str(y_pixel))
                        rawDisparity = depth[(int)(x_pixel), (int)(y_pixel)]
                        print("raw:" + str(rawDisparity))
                        distance = 1 / (-0.00307 * rawDisparity + 3.33)
                        if (distance < 0):
                            distance = 0.5
                        print("Distance : " + str(distance))
                        cv2.putText(
                            frame_resized, "distance: {:.2f}".format(distance),
                            (10, (frame_resized.shape[0] - (i + 1) * 25) - 50),
                            font, 0.65, (0, 0, 255), 3)
                        cv2.putText(
                            frame_resized, "Point " + str(i) + ": " +
                            str(b[0]) + " " + str(b[1]),
                            (10, frame_resized.shape[0] - (i + 1) * 25), font,
                            0.65, (0, 0, 255), 3)
                        i = i + 1
                    y_pix, x_pix = center_fix[0]

                    endtime = time.time()
                    #nucleo.write(("8,"+str(x_person)+","+str(y_person)).encode()) # send x_person and y_person
                    if ((abs(prev_x_pixel - x_pix)) < 50
                            and (abs(prev_y_pixel - y_pix)) < 50):
                        timeout = timeout + (endtime - starttime)
                        if (timeout > 5):
                            flag_track2 = 1
                            boxcolor = (255, 0, 0)
                    else:
                        timeout = 0
                        boxcolor = (0, 255, 0)

                    prev_y_pixel, prev_x_pixel = y_pix, x_pix
                    # DEBUGGING #
                    #print("Teta: " + str(teta) + "Distance: " + str(distance))
                    print("Timeout: " + str(timeout))
                    #print ("Distance : " + str(distance))
                elif (len(center_fix) <= 0):
                    timeout = 0
                    boxcolor = (0, 255, 0)

            elif (flag_track2 == 1):
                if (counttrack2 == 0):
                    iA, iB, iC, iD = pick2[0]

                    # Draw new bounding box from body to only head figures

                    tracker.init([iA, iB, iC - iA, iD - iB], frame_resized)
                    counttrack2 = counttrack2 + 1
                elif (counttrack2 == 1):
                    print(pick2[0])
                    print("iA:" + str(iA) + "iB:" + str(iB) + "iC:" + str(iC) +
                          "iD:" + str(iD))
                    boundingbox = tracker.update(
                        frame_resized)  #frame had better be contiguous
                    boundingbox = list(map(int, boundingbox))
                    cv2.rectangle(frame_resized,
                                  (boundingbox[0], boundingbox[1]),
                                  (boundingbox[0] + boundingbox[2],
                                   boundingbox[1] + boundingbox[3]),
                                  (255, 0, 0), 3)
                    #GENERAL ASSUMPTION SINGLE PERSON TRACKING
                    # start tracking...

                    x_track = ((boundingbox[2]) / 2.0) + boundingbox[0]
                    y_track = ((boundingbox[3]) / 2.0) + boundingbox[1]
                    print("x:" + str(x_track) + "y:" + str(y_track))
                    x_center = (frame_resized.shape[1] + 1) / 2
                    y_center = (frame_resized.shape[0] + 1) / 2
                    print(x_center, y_center)
                    # compute teta asumsi distance lurus

                    rawDisparity2 = depth2[(int)(y_track), (int)(x_track)]
                    print("raw2:" + str(rawDisparity2))
                    distance2 = 1 / (-0.00307 * rawDisparity2 + 3.33)
                    if (distance2 < 0):
                        distance2 = prev_distance2
                    prev_distance2 = distance2

                    #realx = (x_track-x_center)+(distance/30.0)
                    #teta = math.atan(realx/distance) # if distance is tangensial
                    #teta = math.asin((0.026458333*(x_track-x_center)/distance)) # if distance is euclidean
                    teta = (x_track - x_center) * tetaperpixel
                    print("Teta: " + str(teta))
                    print("Distance2 : " + str(distance2))
                    cv2.putText(frame_resized,
                                "distance: {:.2f}".format(distance2),
                                (10,
                                 (frame_resized.shape[0] - (i + 1) * 25) - 50),
                                font, 0.65, (0, 0, 255), 3)
                    cv2.putText(
                        frame_resized, "Point " + str(0) + ": " +
                        str(x_track) + " " + str(y_track),
                        (10, frame_resized.shape[0] - (i + 1) * 25), font,
                        0.65, (0, 0, 255), 3)
                    # send the teta and distance
                    #nucleo.flush()
                    #if(teta<0.0):
                    #flag= nucleo.write(("7,"+format(teta,'1.2f')+","+format(distance2,'1.3f')).encode())
                    #elif(teta>0.0):
                    #flag= nucleo.write(("7,"+format(teta,'1.3f')+","+format(distance2,'1.3f')).encode())
                    #print("WRITEIN1" + str(flag))
                    print("Peak: " + str(tracker.getpeakvalue()))
                    if (tracker.getpeakvalue() < 0.6):
                        counttrack2 = 0
                        flag_track2 = 0
                        #nucleo.flush()
                        #nucleo.write("8,,,,,,,,,,,,".encode())
                        print("WRITEOUT")

            #frame_resized = cv2.flip(frame_resized, 0)
            cv2.imshow("Detected Human", frame_resized)
            cv2.imshow("Depth", frame_convert2.pretty_depth_cv(depth))
            #cv2.imshow("Depth2", frame_convert2.pretty_depth_cv(depth2))
            # cv2.imshow("Original", frame)
        else:
            count = count + 1
            print("Number of frame skipped in the video= " + str(count))

        # compute the fps
        fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)
        print("FPS: " + str(fps))
        #outframe = open("/home/ubuntu/Progress\ TA/Integrasi/rgb640/%d.jpg" % countsave, 'wb+')
        cv2.imwrite('%d.jpg' % countsave, frame_resized)  # Save image...
        countsave = countsave + 1
        key = cv2.waitKey(5)

    cv2.destroyAllWindows()
    freenect.sync_stop()
    nucleo.close()
    print("\nFINISH")
Ejemplo n.º 26
0
def main_vision(load):
    # inits
    fn_ctx = fn.init()
    fn_dev = fn.open_device(fn_ctx, fn.num_devices(fn_ctx) - 1)
    fn.set_tilt_degs(fn_dev, 0)
    fn.close_device(fn_dev)
    key_point = KeyPoints(150)
    predictor = prediction(ModelPath)
    preds = []

    # optimization
    t0 = 0.0
    t1 = 0.0
    fps = 0.0
    total_fps = 0.0
    frames = 0
    kp_speed = key_point._get_kp_speedup()
    draw_speed = key_point._get_draw_speedup()
    proc_speed = key_point._get_process_speedup()
    cvtColor = cv2.cvtColor
    BGR2RGB = cv2.COLOR_BGR2RGB
    get_kp = key_point.get_key_points
    draw_kp = key_point.draw_key_points
    process_image = key_point.__process_image
    show_image = cv2.imshow
    wait_for_key = cv2.waitKey
    copy_thing = copy.copy
    num_features = key_point.get_num_features()
    arr_shape = np.shape
    shape_check = (num_features, 32)
    ravel = np.ravel
    append_pred = preds.append
    get_time = time.time

    current_class = 0
    if load:
        brain = predictor.load_brain()
        pred_speed = predictor.get_pred_speed()
        predict = predictor.predict
    else:
        add_speed = predictor.get_add_speed()
        add_data = predictor.add_data
        get_length = predictor.get_data_length
    if load:
        net = Neural_Net(predictor.brain.getPoint(), np.vstack(predictor.brain.getData()), 4800 * 2, num_features)
        nl_predict = net.predict
        nl_speed = net.get_neural_speed()

    # mainLoop
    while True:
        t0 = get_time()

        # Get a fresh frame
        depth = get_depth()
        frame = get_video()
        show_image('Raw Image', cvtColor(frame, BGR2RGB))

        # Process Depth Image
        # depth = remove_background(depth, 25)
        depth = remove_background_percent(depth, .5, 50)
        depth = convert_to_bw(depth)
        mask = make_mask(depth)

        # Process Image
        frame = cvtColor(frame, BGR2RGB)
        video = copy_thing(frame)
        frame = process_image(frame, proc_speed)
        # Make Masked Frame
        masked_frame = copy_thing(frame)
        masked_frame[mask] = 0

        # Process Key Points
        kp, des = get_kp(masked_frame, kp_speed)
        video = draw_kp(video, kp, True, speedup=draw_speed)

        # Predict current
        if (load) and (des is not None) and (arr_shape(des) == shape_check):
            pred = predict(ravel(des), pred_speed)
            append_pred(pred)
            print(pred)
            print(nl_predict([ravel(des)], nl_speed))
        # Add object description to data set
        if (not load) and (des is not None) and (arr_shape(des) == shape_check):
            add_data(add_speed, np.ravel(des), current_class)
            print('Current Class and Length:\t%i\t%i' % (get_length(), current_class))

        t1 = get_time()
        fps = (1 / (t1 - t0))
        total_fps += fps
        frames += 1
        print('%.2f FPS' % fps)
        show_image('masked image', masked_frame)
        show_image('depth', depth)
        show_image('key points', video)
        # show_image('all', frame, masked_frame, depth, video)
        if wait_for_key(1) & 0xFF == ord('q'):
            cv2.destroyAllWindows()
            if load:
                break
            print('Current Class: %i\nn : Next Class\nr : Continue Current Class\nq : Quit' % (current_class))
            inp = raw_input()
            if inp == 'n':
                current_class += 1
            elif inp == 'q':
                break

    # print(np.mean(preds))
    cv2.destroyAllWindows()
    print('Average FPS: %.2f' % (total_fps / frames))
    fn.sync_stop()
    if not load:
        predictor.create_brain()
        main_vision(True)
Ejemplo n.º 27
0
#!/usr/bin/env python
import freenect
import cv
import numpy as np

cv.NamedWindow('Depth')
cv.NamedWindow('Video')
ind = 0
while 1:
    print(ind)
    try:
        depth, timestamp = freenect.sync_get_depth(ind)
        rgb, timestamp = freenect.sync_get_video(ind)
    except TypeError:
        ind = 0
        continue
    ind += 1
    cv.ShowImage('Depth', depth.astype(np.uint8))
    cv.ShowImage('Video', rgb[:, :, ::-1].astype(np.uint8))
    cv.WaitKey(10)
    freenect.sync_stop()  # NOTE: May remove if you have good USB bandwidth
Ejemplo n.º 28
0
def StopKinect():
    freenect.sync_stop()
Ejemplo n.º 29
0
#cv.NamedWindow('Video')
ind = 0
print('%s\nPress ESC to stop' % __doc__)
  
  
def get_depth(ind):
    #return frame_convert.pretty_depth_cv(freenect.sync_get_depth(ind)[0])
    array = freenect.sync_get_depth(ind)[0]
    ret_array = array.astype(np.uint8)
    return ret_array 
  
  
def get_video(ind):
    return frame_convert.video_cv(freenect.sync_get_video(ind)[0])
  
  
while 1:
    print(ind)
    try:
        depth = get_depth(ind)
       # video = get_video(ind)
    except TypeError:
        ind = 0
        continue
    ind += 1
    cv.ShowImage('Depth', depth)
   # cv.ShowImage('Video', video)
    if cv.WaitKey(10) == 27:
        break
    freenect.sync_stop()  # NOTE: Uncomment if your machine can't handle it
Ejemplo n.º 30
0
def close(event, x, y, flags, param):
    if event == cv.CV_EVENT_LBUTTONDOWN:
        freenect.sync_stop()
        sys.exit()
Ejemplo n.º 31
0
def main():
	print("Running...");
	flag_track = 0
	count = 0
	counttrack = 0
	prev_y_pixel = 0
	prev_x_pixel = 0
	tetaperpixel = 0.994837/400.0
	# grab one frame at first to compare for background substraction
	frame,timestamp = freenect.sync_get_video()
	frame = cv2.cvtColor(frame,cv2.COLOR_RGB2BGR)
	frame_resized = imutils.resize(frame, width=min(400, frame.shape[1]))
	frame_resized_grayscale = cv2.cvtColor(frame_resized, cv2.COLOR_BGR2GRAY)

	# initialize centroid
	center = [[frame_resized.shape[1]/2, frame_resized.shape[0]/2]]
	center_fix = []
	# defining min cuoff area
	#min_area=(480/400)*frame_resized.shape[1] 
	min_area=(0.01)*frame_resized.shape[1] 
	print(frame_resized.shape)
	boxcolor=(0,255,0)
	timeout = 0; #variable for counting time elapsed
	key = ''
	temp = 1
	while key != 113:  # for 'q' key
		# start timer
		timer = cv2.getTickCount()
		starttime = time.time()
		previous_frame = frame_resized_grayscale
		# retrieve new RGB frame image
		frame,timestamp = freenect.sync_get_video()
		frame = cv2.cvtColor(frame,cv2.COLOR_RGB2BGR)
		frame_resized = imutils.resize(frame, width=min(400, frame.shape[1]))
		frame_resized_grayscale = cv2.cvtColor(frame_resized, cv2.COLOR_BGR2GRAY)
		#temp=background_subtraction(previous_frame, frame_resized_grayscale, min_area)

		# retrieve depth map
		depth,timestamp = freenect.sync_get_depth()
		depth = imutils.resize(depth, width=min(400, depth.shape[1]))
		# orig = image.copy()
		if temp==1:
			frame_processed,center_fix = detect_people(frame_resized_grayscale,center,frame_resized,boxcolor)
			if (len(center_fix)>0):
				i = 0
				for b in center_fix:		
					#print(b)
					#print("Point "+str(i)+": "+str(b[0])+" "+str(b[1]))
					x_pixel= b[1]
					y_pixel= b[0]
					rawDisparity = depth[(int)(x_pixel),(int)(y_pixel)]
					distance = 1/(-0.00307 * rawDisparity + 3.33)
					if (distance<0):
						distance = 0.5
					print ("Distance : " + str(distance))
					cv2.putText(frame_resized, "distance: {:.2f}".format(distance), (10, (frame_resized.shape[0]-(i+1)*25)-50), font, 0.65, (0, 0, 255), 3)
					cv2.putText(frame_resized, "Point "+str(i)+": "+str(b[0])+" "+str(b[1]), (10, frame_resized.shape[0]-(i+1)*25), font, 0.65, (0, 0, 255), 3)
					i = i + 1
				y_pix,x_pix = center_fix[0]
				
				endtime = time.time()
				if (flag_track == 0):
					#nucleo.write(("8,"+str(x_person)+","+str(y_person)).encode()) # send x_person and y_person
					if ((abs(prev_x_pixel-x_pix))<50 and (abs(prev_y_pixel-y_pix))<50):
						timeout = timeout + (endtime - starttime)
						if (timeout > 5):
							flag_track = 1;
							boxcolor = (255,0,0)
					else:
						timeout = 0
						boxcolor = (0,255,0)
				elif (flag_track == 1):
					#GENERAL ASSUMPTION SINGLE PERSON TRACKING
					# start tracking...
					#nucleo.write("9,,,,,,,,,,,,".encode())
					
					x_track,y_track = x_pix,y_pix
					x_center = (frame_resized.shape[1]+1)/2
					y_center = (frame_resized.shape[0]+1)/2
					print(x_center,y_center)
					# compute teta asumsi distance lurus
					
					rawDisparity = depth[(int)(x_track),(int)(y_track)]
					distance = 1/(-0.00307 * rawDisparity + 3.33)
					if (distance<0):
						distance = 0.5
					#realx = (x_track-x_center)+(distance/30.0)
					#teta = math.atan(realx/distance) # if distance is tangensial
					#teta = math.asin((0.026458333*(x_track-x_center)/distance)) # if distance is euclidean
					teta = (y_track-x_center)*tetaperpixel
					print("teta="+str(teta)+"x:"+str(x_track)+"y:"+str(y_track))
					
					# send the teta and distance
					nucleo.flush()
					if(teta<0.0):
						flag= nucleo.write(("7,"+format(teta,'1.2f')+","+format(distance,'1.3f')).encode())
					elif(teta>0.0):
						flag= nucleo.write(("7,"+format(teta,'1.3f')+","+format(distance,'1.3f')).encode())
					print("WRITEIN1" + str(flag))

				prev_y_pixel,prev_x_pixel = y_pix,x_pix
				# DEBUGGING #
				#print("Teta: " + str(teta) + "Distance: " + str(distance))
				print("Timeout: " + str(timeout))
				print ("Distance : " + str(distance))
			elif(len(center_fix)<=0):
				# count how many frame skipped without people detected while tracking
				if (flag_track==1):
					counttrack = counttrack + 1
					# continue tracking after a maximum of 50 frame without people taking previous location as target
					if (counttrack<=50):
						# still print the last condition to the output image
						cv2.putText(frame_resized, "distance: {:.2f}".format(distance), (10, (frame_resized.shape[0]-25)-50), font, 0.65, (0, 255, 255), 3)
						cv2.putText(frame_resized, "Point "+str(i)+": "+str(b[0])+" "+str(b[1]), (10, frame_resized.shape[0]-25), font, 0.65, (0, 255, 255), 3)
						# still send the nucleo tracking state
						nucleo.flush()
						if(teta<0.0):
							flag= nucleo.write(("7,"+format(teta,'1.2f')+","+format(distance,'1.3f')).encode())
						elif(teta>0.0):
							flag= nucleo.write(("7,"+format(teta,'1.3f')+","+format(distance,'1.3f')).encode())
						print("WRITEIN" + str(flag))
					elif (counttrack>50):
						counttrack = 0
						flag_track = 0
						# tell nucleo to leave tracking state
						nucleo.flush()
						nucleo.write("8,,,,,,,,,,,,".encode())
						print("WRITEOUT")
				elif (flag_track==0):
					timeout = 0
					boxcolor = (0,255,0)

			
			#frame_resized = cv2.flip(frame_resized, 0)
			cv2.imshow("Detected Human", frame_resized)
			#cv2.imshow("Depth", depth)			
			# cv2.imshow("Original", frame)
		else:
			count=count+1
			print("Number of frame skipped in the video= " + str(count))

		# compute the fps
		fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)
		print("FPS: " + str(fps))
		key = cv2.waitKey(5)

	cv2.destroyAllWindows()
	freenect.sync_stop()
	nucleo.close()
	print("\nFINISH")
Ejemplo n.º 32
0
def close(event, x, y, flags, param):
    if event == cv.CV_EVENT_LBUTTONDOWN:
        freenect.sync_stop()
        sys.exit()
def kinect_shutdown():
    freenect.sync_stop()
Ejemplo n.º 34
0
def main():
    print("Running...")

    count = 0
    # grab one frame at first to compare for background substraction
    frame, timestamp = freenect.sync_get_video()
    frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
    frame_resized = imutils.resize(frame, width=min(400, frame.shape[1]))
    frame_resized_grayscale = cv2.cvtColor(frame_resized, cv2.COLOR_BGR2GRAY)

    # initialize centroid
    center = [[frame_resized.shape[1] / 2, frame_resized.shape[0] / 2]]
    center_fix = []
    # defining min cuoff area
    min_area = (480 / 400) * frame_resized.shape[1]
    print(frame_resized.shape[1])
    boxcolor = (0, 255, 0)
    timeout = 0
    #variable for counting time elapsed
    key = ''
    while key != 113:  # for 'q' key
        # start timer
        timer = cv2.getTickCount()
        starttime = time.time()
        previous_frame = frame_resized_grayscale
        # retrieve new RGB frame image
        frame, timestamp = freenect.sync_get_video()
        frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
        frame_resized = imutils.resize(frame, width=min(400, frame.shape[1]))
        frame_resized_grayscale = cv2.cvtColor(frame_resized,
                                               cv2.COLOR_BGR2GRAY)
        temp = background_subtraction(previous_frame, frame_resized_grayscale,
                                      min_area)

        # retrieve depth map
        depth, timestamp = freenect.sync_get_depth()
        depth = imutils.resize(depth, width=min(400, depth.shape[1]))
        # orig = image.copy()
        if temp == 1:
            frame_processed, center_fix = detect_people(
                frame_resized_grayscale, center, frame_resized, boxcolor)
            if (len(center_fix) > 0):
                xnorm = encodex(
                    center_fix[0][0])  # retrieve coded position from image
                prev_xnorm = xnorm
                #nucleo.write(struct.pack('>B',xnorm))
                endtime = time.time()
                if (prev_xnorm == xnorm):
                    timeout = timeout + (endtime - starttime)
                    if (timeout > 10):
                        boxcolor = (255, 0, 0)
                else:
                    timeout = 0
                    boxcolor = (0, 255, 0)
                print("X_NORM: " + str(xnorm))
                print("Timeout: " + str(timeout))
                rawDisparity = depth[(int)(center_fix[0][1]),
                                     (int)(center_fix[0][0])]
                distance = 100 / (-0.00307 * rawDisparity + 3.33)
                cv2.putText(frame_resized, "distance: {:.2f}".format(distance),
                            (10, frame_processed.shape[0]),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 3)
                #print("Distance: " + str(depth.shape) + str(frame_processed.shape))
            else:
                timeout = 0
                boxcolor = (0, 255, 0)
            i = 0
            for b in center_fix:
                cv2.putText(
                    frame_resized,
                    "Point " + str(i) + ": " + str(b[0]) + " " + str(b[1]),
                    (10, frame_resized.shape[0] - (i + 1) * 35), font, 0.65,
                    (0, 0, 255), 3)
                #print(b)
                #print("Point "+str(i)+": "+str(b[0])+" "+str(b[1]))
                i = i + 1
            #frame_resized = cv2.flip(frame_resized, 0)
            cv2.imshow("Detected Human", frame_resized)
            #cv2.imshow("Depth", depth)
            # cv2.imshow("Original", frame)
        else:
            count = count + 1
            print("Number of frame skipped in the video= " + str(count))

        # compute the fps
        fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)
        print("FPS: " + str(fps))
        key = cv2.waitKey(5)

    cv2.destroyAllWindows()
    freenect.sync_stop()
    nucleo.close()
    print("\nFINISH")
from misc.demo import frame_convert

cv.NamedWindow('Depth')
cv.NamedWindow('Video')
ind = 0
print('%s\nPress ESC to stop' % __doc__)


def get_depth(ind):
    return frame_convert.pretty_depth_cv(freenect.sync_get_depth(ind)[0])


def get_video(ind):
    return frame_convert.video_cv(freenect.sync_get_video(ind)[0])


while 1:
    print(ind)
    try:
        depth = get_depth(ind)
        video = get_video(ind)
    except TypeError:
        ind = 0
        continue
    ind += 1
    cv.ShowImage('Depth', depth)
    cv.ShowImage('Video', video)
    if cv.WaitKey(10) == 27:
        break
    freenect.sync_stop()  # NOTE: Uncomment if your machine can't handle it
Ejemplo n.º 36
0
def handler(signum, frame):
	video_writer.release()
	freenect.sync_stop()
	exit()
Ejemplo n.º 37
0
 def close(self):
   freenect.sync_stop()
   self.sim.stop()