Beispiel #1
0
def main():
    print("Initializing vid server")
    serv = Roni.RoniServer()
    serv.listen()
    cl = serv.getClient()
    cv2.namedWindow('RoniStream', cv2.WINDOW_AUTOSIZE)

    while True:
        data = serv.receiveData(cl, Roni.TYPE_RGB)
        if len(data) > 0:
            img = Coppa.decodeColorFrame(data)
            cv2.imshow('RoniStream', img)
            cv2.waitKey(1)
Beispiel #2
0
def main():
    global keyPress
    global gui
    global showConf
    global clients
    global nindex
    global connHandler
    global serv

    print("Reading in config file")
    config = None
    try:
        with open("config.json", 'r') as f:
            config = json.loads(f.read())
    except:
        print("Unable to open config file")
        exit()
    print("Loaded config file\n", json.dumps(config))

    print("Initializing vid server")
    serv = Roni.RoniServer()
    serv.listen()
    connHandler = NewConnHandler()

    # For averaging framerate
    t0 = time.time()
    t1 = 0
    last10 = [0] * 10
    it = 0
    frames = 0

    # adjust this to point to your downloaded/trained model
    model_path = '/home/nvidia/Documents/SalamiSense/snapshots_1/resnet50_csv_inference.h5'
    #model_path = '/home/nvidia/Documents/SalamiSense/resnet50_csv_inference10.h5'

    print("loading CNN model")
    # load retinanet model
    bThread = BoxThread(model_path)
    bThread.start()

    theta, meanStd = Reg.loadModel("noisy_sensor_model.csv")

    gui = initGUI()
    thresh = 0
    pdist = 0

    while keyPress != ord('q'):
        connHandler.tick()
        # New client connection established
        if connHandler.getState() == connHandler.STATE_FINISHED:
            newClient = connHandler.getClient()
            if newClient is not None:
                cids = [citer.getID() for citer in clients]
                c = NodeData()
                c.setClient(newClient)
                for i in range(1000):
                    if not i in cids:
                        c.setID(i)
                        break
                clients.append(c)
                nindex = len(clients) - 1
                bThread.addClient(c)
        statusStr, statusColor = connHandler.getStatusStr()
        gui.setButtonText("newnode", statusStr, statusColor)
        gui.update()

        gui.setFrameText("nodeno", "%d/%d" % (nindex + 1, len(clients)))
        if not clients:
            gui.setFrameImage("vid", [])
            gui.setFrameImage("depth", [])
            continue

        # Receive color and depth data from each client
        for c in clients:
            data = serv.receiveData(c.getClient(), Roni.TYPE_RGB)
            depth = serv.receiveData(c.getClient(), Roni.TYPE_DEPTH)
            sense = serv.receiveData(c.getClient(), Roni.TYPE_EDGE)
            vfrac = 0
            vertStr = None
            for i in range(8):
                vertStr = serv.receiveData(c.getClient(), Roni.TYPE_3D_0 + i)
                if vertStr is not None and vertStr:
                    vertStr = pickle.loads(vertStr)
                    vfrac = i
                    break

            # Decode data to images and sensor readings
            if data is not None and data:
                img = Coppa.decodeColorFrame(data)
                c.setColor(img.copy())
            if depth is not None and depth:
                depImg = Coppa.decodeDepthFrame(depth)
                c.setDepth(depImg.copy())
            if sense is not None and sense:
                sense = pickle.loads(sense)
                if len(sense) > 3:
                    c.setCO2(sense[0])
                    c.setTVOC(sense[1])
                    c.setTemp(sense[2])
                    c.setHumidity(sense[3])
            if vertStr is not None and len(vertStr) > 0:
                c.setVertex(vertStr, vfrac)

        # Skip first 10 frames, just in case
        frames += 1
        if frames < 10: continue

        newThresh = gui.getEntryValue("thresh")
        try:
            thresh = int(newThresh)
        except:
            pass

        newPDist = gui.getEntryValue("pdist")
        try:
            pdist = float(newPDist)
        except:
            pass

        newTrgtTemp = gui.getEntryValue("temp")
        try:
            config["Sensors"]["TargetOccupTemp"] = int(newTrgtTemp)
        except:
            pass

        # get copies of color and depth frames
        img = clients[nindex].getColor().copy()
        depImg = clients[nindex].getDepth().copy()
        if len(img) < 1 or len(depImg) < 1:
            continue

        fVert = clients[nindex].getVertex()

        # Display calibration points
        refXY = clients[nindex].getRefXY()
        for pt in refXY:
            cv2.circle(img, (pt[0], pt[1]), 4, (0, 255, 255), 1)
            img[pt[1]][pt[0]] = [0, 255, 255]

        # Find valid boxes and add to or update people list
        people = []
        for c in clients:
            boxes, scores, labels = c.getResults()
            if len(boxes) > 0:
                for box, score, label in zip(boxes[0], scores[0], labels[0]):
                    # scores are sorted so we can break at first low score found
                    if score < (thresh / 100):
                        break
                    Peeps.AddToPeople(c, box, people, pdist, score)

        totalPeeps = len(people)
        frameBoxes, scores, isDupe = Peeps.GetIDBoxes(nindex, people)
        framePeeps = len(frameBoxes)

        # Visualize People found in frame
        for i in range(framePeeps):
            # color based on if appears in multiple frames
            color = (0, 0, 255)
            if isDupe[i]: color = (0, 255, 0)

            b = frameBoxes[i].astype(int)
            draw_box(img, b, color=color)

            # draw center point of box
            x = int((frameBoxes[i][0] + frameBoxes[i][2]) / 2)
            y = int((frameBoxes[i][1] + frameBoxes[i][3]) / 2)
            cv2.circle(img, (x, y), 3, (255, 255, 0), -1)

            if showConf:
                draw_caption(img, b, "%0.0f%%" % (float(scores[i]) * 100))

        # Calculate frame rate and display on image
        t1 = time.time()
        last10[it] = 1.0 / (t1 - t0)
        it = (it + 1) % 10
        fps = np.average(last10)
        t0 = t1
        cv2.putText(img, "stream FPS: %.2f" % fps, (0, 30),
                    cv2.FONT_HERSHEY_PLAIN, 1.2, (0, 0, 255))

        # Sensor readings
        fCO2 = clients[nindex].getCO2()
        fTVOC = clients[nindex].getTVOC()
        fTemp = clients[nindex].getTemp()
        fHum = clients[nindex].getHumidity()

        # Regression
        # [people, co2, tvoc, tempr, humid]
        regInput = [totalPeeps, fCO2, fTVOC, 70, fHum]
        res = Reg.runThroughModel(regInput, theta, meanStd)

        # Do HVAC stuff
        ctrl = controlHVAC(config, res, 70) + ("(%.2f)" % res)
        gui.setFrameText("HVAC", ctrl)

        # Display sensor readings on GUI
        gui.setFrameText("people",
                         "%d/%d (frame/total)" % (framePeeps, totalPeeps))
        gui.setFrameText("temp", "%.2f ºF" % fTemp)
        gui.setFrameText("humid", "%.2f%%" % fHum)
        gui.setFrameText("co2", "%.0f ppm" % fCO2)
        gui.setFrameText("tvoc", "%.0f ppb" % fTVOC)

        # Display color and depth images (depth picture-in-picture)
        imRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        depRGB = cv2.cvtColor(depImg, cv2.COLOR_BGR2RGB)
        guiImg = ImageTk.PhotoImage(Image.fromarray(imRGB))
        depImg = ImageTk.PhotoImage(
            Image.fromarray(cv2.resize(depRGB, (160, 120))))

        gui.setFrameImage("vid", guiImg)
        gui.setFrameImage("depth", depImg)

    # Close server and display window
    bThread.stop()
    bThread.join()
    serv.close()
    cv2.destroyAllWindows()
Beispiel #3
0
def main():
    print("Enabling streams and starting camera...")
    pipeline = rs.pipeline()
    pc = rs.pointcloud()

    uartPort = "/dev/ttyS3"
    uartBaud = 115200
    rtscts = True

    cape = Sopressata.Cape(uartPort, uartBaud, rtscts)

    # Enable color and depth streams
    config = rs.config()
    config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
    config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)

    # Start streams
    profile = pipeline.start(config)

    # Depth sensor info
    depthSensor = profile.get_device().first_depth_sensor()

    depthProfile = rs.video_stream_profile(profile.get_stream(rs.stream.depth))
    #epthSensor.set_option(rs.option.visual_preset, 0)
    depthScale = depthSensor.get_depth_scale()

    # Enable stream alignment to align depth to color
    #alignStream = rs.stream.color
    #align = rs.align(alignStream)

    print("Initializing Client")
    client = Roni.RoniClient()
    print("Connecting...")
    client.connect()
    cape.openPort()
    print("UART connected to cape:", uartPort)
    print("streaming...")

    itr = 0
    while True:
        # Get next set of frames from stream
        frames = pipeline.wait_for_frames()

        depthFrame = frames.get_depth_frame()
        colorFrame = frames.get_color_frame()

        # Align depth frame to color frame
        pc.map_to(colorFrame)
        #alignedFrames = align.process(frames)

        pts = pc.calculate(depthFrame)
        v = pts.get_vertices()
        verts = np.asanyarray(v).view(np.float32).reshape(-1, 3)  # xyz
        step = int(len(verts) / 8)  #640x480 / 8 = 38,400
        vertSend = verts[itr * step:step * (itr + 1)]
        vertPick = pickle.dumps(vertSend, protocol=pickle.HIGHEST_PROTOCOL)

        # Get aligned depth and color frames
        #depthFrame = alignedFrames.get_depth_frame()
        #colorFrame = alignedFrames.get_color_frame()

        # Read sensor data from Cape UART
        rawRead = cape.readSerial()
        if rawRead:
            co2, tvoc, temp, humid = cape.getAllSensorData(rawRead)
            sensors = [co2, tvoc, temp, humid]
            senseDump = pickle.dumps(sensors, protocol=pickle.HIGHEST_PROTOCOL)
            client.sendData(senseDump, Roni.TYPE_EDGE)

        depthTxt = Coppa.encodeDepthFrame(depthFrame)
        colorTxt = Coppa.encodeColorFrame(colorFrame)

        client.sendData(depthTxt, Roni.TYPE_DEPTH)
        client.sendData(vertPick, (Roni.TYPE_3D_0 + (itr % 8)))
        if not client.sendData(colorTxt, Roni.TYPE_RGB):
            break

        itr = (itr + 1) % 8

    client.close()
    cape.closePort()
Beispiel #4
0
def main():
    global keyPress
    global gui
    global showConf
    global clients
    global nindex
    global connHandler
    global serv
    print("Initializing vid server")
    serv = Roni.RoniServer()
    serv.listen()
    connHandler = NewConnHandler()
    #connHandler.newClient(serv)
    #while(connHandler.getState() != connHandler.STATE_FINISHED):
    #	connHandler.tick()
    #cl = connHandler.getClient()

    t0 = time.time()
    t1 = 0
    last10 = [0] * 10
    it = 0
    frames = 0

    # adjust this to point to your downloaded/trained model
    model_path = '/home/nvidia/Documents/SalamiSense/snapshots_1/resnet50_csv_inference.h5'
    #model_path = '/home/nvidia/Documents/SalamiSense/resnet50_csv_inference10.h5'

    print("loading model")
    # load retinanet model
    bThread = BoxThread(model_path)
    bThread.start()

    # load label to names mapping for visualization purposes
    labels_to_names = {0: 'person'}
    colorizer = rs.colorizer()

    gui = initGUI()
    thresh = 0

    while keyPress != ord('q'):
        connHandler.tick()
        if connHandler.getState() == connHandler.STATE_FINISHED:
            clients.append(connHandler.getClient())
            nindex += 1
        gui.setButtonText("newnode", connHandler.getStatusStr())
        gui.update()

        if not clients:
            continue

        data = []
        depth = []
        while not data or not depth:
            if not data:
                data = serv.receiveData(clients[nindex], Roni.TYPE_RGB)
            if not depth:
                depth = serv.receiveData(clients[nindex], Roni.TYPE_DEPTH)

        frames += 1
        if frames < 10: continue

        # get image
        img = Coppa.decodeColorFrame(data)
        depImg = Coppa.decodeDepthFrame(depth)
        if depImg is None: continue

        newThresh = gui.getEntryValue("thresh")
        try:
            thresh = int(newThresh)
        except:
            pass

        bThread.setData(img)
        boxes, scores, labels = bThread.getBoxes()
        bc = 0
        if len(boxes) > 0:
            for box, score, label in zip(boxes[0], scores[0], labels[0]):
                # scores are sorted so we can break
                if score < (thresh / 100):
                    break
                bc += 1
                color = label_color(label)

                b = box.astype(int)
                draw_box(img, b, color=color)
                if showConf:
                    draw_caption(img, b, "%s" % score)

        # Calculate frame rate and display on image
        t1 = time.time()
        last10[it] = 1.0 / (t1 - t0)
        it = (it + 1) % 10
        fps = np.average(last10)
        t0 = t1
        cv2.putText(img, "stream FPS: %.2f" % fps, (0, 30),
                    cv2.FONT_HERSHEY_PLAIN, 1.2, (0, 0, 255))

        gui.setFrameText("people", "%d" % bc)

        imRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        depRGB = cv2.cvtColor(depImg, cv2.COLOR_BGR2RGB)
        guiImg = ImageTk.PhotoImage(Image.fromarray(imRGB))
        depImg = ImageTk.PhotoImage(
            Image.fromarray(cv2.resize(depRGB, (160, 120))))

        gui.setFrameImage("vid", guiImg)
        gui.setFrameImage("depth", depImg)

        # Display

    # Close server and display window
    bThread.stop()
    bThread.join()
    serv.close()
    cv2.destroyAllWindows()
Beispiel #5
0
def main():
    print("Initializing vid server")
    serv = Roni.RoniServer()
    serv.listen()
    cl = serv.getClient()
    cv2.namedWindow('RoniStream', cv2.WINDOW_AUTOSIZE)

    t0 = time.time()
    t1 = 0
    last10 = [0] * 10
    it = 0
    frames = 0
    framerates = []

    keyPressed = None
    while keyPressed != ord('q'):
        data = serv.receiveData(cl, Roni.TYPE_RGB)

        if len(data) > 0:
            frames += 1

            # get image, copy, and edge detect
            img = Coppa.decodeColorFrame(data)
            orig = np.copy(img)

            w = img.shape[1]
            h = img.shape[0]

            #edge detect
            edge = cv2.Canny(img, 100, 400)
            for i, row in enumerate(edge):
                for j, px in enumerate(row):
                    if px:
                        img[i][j] = [0, 255, 64]

            # Calculate framerate and display on image
            t1 = time.time()
            last10[it] = 1.0 / (t1 - t0)
            it = (it + 1) % 10
            fps = np.average(last10)
            if 550 > frames >= 50: framerates.append(fps)
            t0 = t1
            cv2.putText(img, "FPS: %.2f" % fps, (0, 30),
                        cv2.FONT_HERSHEY_PLAIN, 1.2, (0, 0, 255))

            # Combine original, edge dectect, and overlay images
            edge3 = np.empty_like(img)
            edge3 = cv2.cvtColor(edge, cv2.COLOR_GRAY2BGR)
            img = cv2.copyMakeBorder(img, 0, 0, int(w / 2), int(w / 2),
                                     cv2.BORDER_CONSTANT, (0, 0, 0))
            combined = np.hstack((orig, edge3))
            combined = np.vstack((combined, img))

            # Display
            cv2.imshow('RoniStream', combined)
            keyPressed = cv2.waitKey(1)

    #with open("last1000frames_cpu.csv", 'w') as fi:
    #	for n in framerates:
    #		fi.write("%f," % n)

    print(np.average(framerates))

    # Close server and display window
    serv.close()
    cv2.destroyAllWindows()
Beispiel #6
0
def main():
	print("Initializing vid server")
	serv = Roni.RoniServer()
	serv.listen()
	cl = serv.getClient()
	cv2.namedWindow('RoniStream', cv2.WINDOW_AUTOSIZE)
	
	t0 = time.time()
	t1 = 0
	last10 = [0]*10
	it = 0
	frames = 0
	keyPress = None


	# adjust this to point to your downloaded/trained model
	model_path = '/home/nvidia/Documents/SalamiSense/snapshots_1/resnet50_csv_inference.h5'
	#model_path = '/home/nvidia/Documents/SalamiSense/resnet50_csv_inference10.h5'

	print("loading model")
	# load retinanet model
	bThread = BoxThread(model_path)
	bThread.start()

	# load label to names mapping for visualization purposes
	labels_to_names = {0: 'person'}

	while keyPress != ord('q'):
		data = []
		depth = []
		while not data or not depth:
			if not data:
				data = serv.receiveData(cl, Roni.TYPE_RGB)
			if not depth:
				depth = serv.receiveData(cl, Roni.TYPE_DEPTH)

		frames += 1
		if frames < 10: continue

		# get image
		img = Coppa.decodeColorFrame(data)
		depImg = Coppa.decodeDepthFrame(depth)
		if depImg is None: continue
		depImg = np.asanyarray(depImg)
		depColorMap = cv2.applyColorMap(cv2.convertScaleAbs(depImg, alpha=0.03), cv2.COLORMAP_JET)

		bThread.setData(img)
		boxes, scores, labels = bThread.getBoxes()
		bc = 0
		if len(boxes) > 0:
			for box, score, label in zip(boxes[0], scores[0], labels[0]):
				# scores are sorted so we can break
				if score < 0.6:
					break
				bc += 1
				color = label_color(label)
		
				b = box.astype(int)
				draw_box(img, b, color=color)
				draw_caption(img, b, "%s" % score)

		# Calculate frame rate and display on image
		t1 = time.time()
		last10[it] = 1.0 / (t1 - t0)
		it = (it + 1) % 10
		fps = np.average(last10)
		t0 = t1
		cv2.putText(img, "stream FPS: %.2f" % fps, (0, 30),
		cv2.FONT_HERSHEY_PLAIN, 1.2, (0, 0, 255))
	
		cv2.putText(img, "People: %d" % bc, (0, 60),
		cv2.FONT_HERSHEY_PLAIN, 1.2, (0, 0, 255))
	
		combined = np.hstack((img, depColorMap))

		# Display
		cv2.imshow('RoniStream', combined)
		keyPress = cv2.waitKey(1)


	# Close server and display window 
	bThread.stop()
	bThread.join()
	serv.close()
	cv2.destroyAllWindows()
Beispiel #7
0
def main():
    print("Initializing vid server")
    serv = Roni.RoniServer()
    serv.listen()
    cl = serv.getClient()
    cv2.namedWindow('RoniStream', cv2.WINDOW_AUTOSIZE)

    t0 = time.time()
    t1 = 0
    last10 = [0] * 10
    it = 0
    frames = 0
    keyPress = None

    # set the modified tf session as backend in keras
    keras.backend.tensorflow_backend.set_session(get_session())

    # adjust this to point to your downloaded/trained model
    model_path = '/home/nvidia/Documents/SalamiSense/snapshots_1/resnet50_csv_inference.h5'
    #model_path = '/home/nvidia/Documents/SalamiSense/resnet50_csv_inference.h5'

    print("loading model")
    # load retinanet model
    model = models.load_model(model_path, backbone_name='resnet50')

    # load label to names mapping for visualization purposes
    labels_to_names = {0: 'person'}

    while keyPress != ord('q'):
        data = serv.receiveData(cl, Roni.TYPE_RGB)

        if len(data) > 0:
            frames += 1
            if frames < 50: continue

            # get image
            img = Coppa.decodeColorFrame(data)

            img_copy = img.copy()
            img = preprocess_image(img)

            boxes, scores, labels = model.predict_on_batch(
                np.expand_dims(img, axis=0))

            for box, score, label in zip(boxes[0], scores[0], labels[0]):
                # scores are sorted so we can break
                if score < 0.6:
                    break

                color = label_color(label)

                b = box.astype(int)
                draw_box(img_copy, b, color=color)
                draw_caption(img_copy, b, "%s" % score)

            # Calculate frame rate and display on image
            t1 = time.time()
            last10[it] = 1.0 / (t1 - t0)
            it = (it + 1) % 10
            fps = np.average(last10)
            t0 = t1
            cv2.putText(img_copy, "FPS: %.2f" % fps, (0, 30),
                        cv2.FONT_HERSHEY_PLAIN, 1.2, (0, 0, 255))

            # Display
            cv2.imshow('RoniStream', img_copy)

            keyPress = cv2.waitKey(1)

    # Close server and display window
    serv.close()
    cv2.destroyAllWindows()
Beispiel #8
0
def main():
    print("Initializing vid server")
    serv = Roni.RoniServer()
    serv.listen()
    cl = serv.getClient()
    cv2.namedWindow('RoniStream', cv2.WINDOW_AUTOSIZE)

    mod = GC.makeGpuFun("func", funcStr)

    t0 = time.time()
    t1 = 0
    last10 = [0] * 10
    it = 0
    framerates = []
    frames = 0
    keyPress = None
    disco = 0

    while keyPress != ord('q'):
        data = serv.receiveData(cl, Roni.TYPE_RGB)
        if keyPress == ord('d'):
            disco = 0 if disco else 1

        if len(data) > 0:
            frames += 1

            # get image, copy, and edge detect
            img = Coppa.decodeColorFrame(data)
            orig = np.copy(img)
            edge = cv2.Canny(img, 100, 400)

            w = img.shape[1]
            h = img.shape[0]

            RGB = RB.tick(15)
            # Copy data to GPU
            imgGPU = GC.gpuMemcpy(img, img.nbytes)
            edgeGPU = GC.gpuMemcpy(edge, edge.nbytes)

            # Launch kernel
            grid = (int((w + 15) / 16), int((h + 15) / 16), 1)
            GC.execute(mod,
                       imgGPU,
                       edgeGPU,
                       np.int32(h),
                       np.int32(w),
                       np.int32(RGB[2]),
                       np.int32(RGB[1]),
                       np.int32(RGB[0]),
                       np.int32(disco),
                       grid=grid)

            # Copy memory back from GPU
            GC.gpuMemget(img, imgGPU)

            # Calculate frame rate and display on image
            t1 = time.time()
            last10[it] = 1.0 / (t1 - t0)
            it = (it + 1) % 10
            fps = np.average(last10)
            if 550 > frames >= 50: framerates.append(fps)
            t0 = t1
            cv2.putText(img, "FPS: %.2f" % fps, (0, 30),
                        cv2.FONT_HERSHEY_PLAIN, 1.2, (0, 0, 255))

            # Combine original, edge detect, and overlay images
            edge3 = np.empty_like(img)
            edge3 = cv2.cvtColor(edge, cv2.COLOR_GRAY2BGR)
            img = cv2.copyMakeBorder(img, 0, 0, int(w / 2), int(w / 2),
                                     cv2.BORDER_CONSTANT, (0, 0, 0))
            combined = np.hstack((orig, edge3))
            combined = np.vstack((combined, img))

            # Display
            cv2.imshow('RoniStream', combined)
            keyPress = cv2.waitKey(1)

    #with open("last1000frames.csv", 'w') as fi:
    #	for n in framerates:
    #		fi.write("%f," % n)

    print(np.average(framerates))

    # Close server and display window
    serv.close()
    cv2.destroyAllWindows()