Example #1
0
def main():
    global keyPress
    global gui
    global showConf
    global clients
    global nindex
    global connHandler
    global serv
    print("Initializing vid server")
    serv = Roni.RoniServer()
    serv.listen()
    connHandler = NewConnHandler()
    #connHandler.newClient(serv)
    #while(connHandler.getState() != connHandler.STATE_FINISHED):
    #	connHandler.tick()
    #cl = connHandler.getClient()

    t0 = time.time()
    t1 = 0
    last10 = [0] * 10
    it = 0
    frames = 0

    # adjust this to point to your downloaded/trained model
    model_path = '/home/nvidia/Documents/SalamiSense/snapshots_1/resnet50_csv_inference.h5'
    #model_path = '/home/nvidia/Documents/SalamiSense/resnet50_csv_inference10.h5'

    print("loading model")
    # load retinanet model
    bThread = BoxThread(model_path)
    bThread.start()

    # load label to names mapping for visualization purposes
    labels_to_names = {0: 'person'}
    colorizer = rs.colorizer()

    gui = initGUI()
    thresh = 0

    while keyPress != ord('q'):
        connHandler.tick()
        if connHandler.getState() == connHandler.STATE_FINISHED:
            clients.append(connHandler.getClient())
            nindex += 1
        gui.setButtonText("newnode", connHandler.getStatusStr())
        gui.update()

        if not clients:
            continue

        data = []
        depth = []
        while not data or not depth:
            if not data:
                data = serv.receiveData(clients[nindex], Roni.TYPE_RGB)
            if not depth:
                depth = serv.receiveData(clients[nindex], Roni.TYPE_DEPTH)

        frames += 1
        if frames < 10: continue

        # get image
        img = Coppa.decodeColorFrame(data)
        depImg = Coppa.decodeDepthFrame(depth)
        if depImg is None: continue

        newThresh = gui.getEntryValue("thresh")
        try:
            thresh = int(newThresh)
        except:
            pass

        bThread.setData(img)
        boxes, scores, labels = bThread.getBoxes()
        bc = 0
        if len(boxes) > 0:
            for box, score, label in zip(boxes[0], scores[0], labels[0]):
                # scores are sorted so we can break
                if score < (thresh / 100):
                    break
                bc += 1
                color = label_color(label)

                b = box.astype(int)
                draw_box(img, b, color=color)
                if showConf:
                    draw_caption(img, b, "%s" % score)

        # Calculate frame rate and display on image
        t1 = time.time()
        last10[it] = 1.0 / (t1 - t0)
        it = (it + 1) % 10
        fps = np.average(last10)
        t0 = t1
        cv2.putText(img, "stream FPS: %.2f" % fps, (0, 30),
                    cv2.FONT_HERSHEY_PLAIN, 1.2, (0, 0, 255))

        gui.setFrameText("people", "%d" % bc)

        imRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        depRGB = cv2.cvtColor(depImg, cv2.COLOR_BGR2RGB)
        guiImg = ImageTk.PhotoImage(Image.fromarray(imRGB))
        depImg = ImageTk.PhotoImage(
            Image.fromarray(cv2.resize(depRGB, (160, 120))))

        gui.setFrameImage("vid", guiImg)
        gui.setFrameImage("depth", depImg)

        # Display

    # Close server and display window
    bThread.stop()
    bThread.join()
    serv.close()
    cv2.destroyAllWindows()
Example #2
0
def main():
    global keyPress
    global gui
    global showConf
    global clients
    global nindex
    global connHandler
    global serv

    print("Reading in config file")
    config = None
    try:
        with open("config.json", 'r') as f:
            config = json.loads(f.read())
    except:
        print("Unable to open config file")
        exit()
    print("Loaded config file\n", json.dumps(config))

    print("Initializing vid server")
    serv = Roni.RoniServer()
    serv.listen()
    connHandler = NewConnHandler()

    # For averaging framerate
    t0 = time.time()
    t1 = 0
    last10 = [0] * 10
    it = 0
    frames = 0

    # adjust this to point to your downloaded/trained model
    model_path = '/home/nvidia/Documents/SalamiSense/snapshots_1/resnet50_csv_inference.h5'
    #model_path = '/home/nvidia/Documents/SalamiSense/resnet50_csv_inference10.h5'

    print("loading CNN model")
    # load retinanet model
    bThread = BoxThread(model_path)
    bThread.start()

    theta, meanStd = Reg.loadModel("noisy_sensor_model.csv")

    gui = initGUI()
    thresh = 0
    pdist = 0

    while keyPress != ord('q'):
        connHandler.tick()
        # New client connection established
        if connHandler.getState() == connHandler.STATE_FINISHED:
            newClient = connHandler.getClient()
            if newClient is not None:
                cids = [citer.getID() for citer in clients]
                c = NodeData()
                c.setClient(newClient)
                for i in range(1000):
                    if not i in cids:
                        c.setID(i)
                        break
                clients.append(c)
                nindex = len(clients) - 1
                bThread.addClient(c)
        statusStr, statusColor = connHandler.getStatusStr()
        gui.setButtonText("newnode", statusStr, statusColor)
        gui.update()

        gui.setFrameText("nodeno", "%d/%d" % (nindex + 1, len(clients)))
        if not clients:
            gui.setFrameImage("vid", [])
            gui.setFrameImage("depth", [])
            continue

        # Receive color and depth data from each client
        for c in clients:
            data = serv.receiveData(c.getClient(), Roni.TYPE_RGB)
            depth = serv.receiveData(c.getClient(), Roni.TYPE_DEPTH)
            sense = serv.receiveData(c.getClient(), Roni.TYPE_EDGE)
            vfrac = 0
            vertStr = None
            for i in range(8):
                vertStr = serv.receiveData(c.getClient(), Roni.TYPE_3D_0 + i)
                if vertStr is not None and vertStr:
                    vertStr = pickle.loads(vertStr)
                    vfrac = i
                    break

            # Decode data to images and sensor readings
            if data is not None and data:
                img = Coppa.decodeColorFrame(data)
                c.setColor(img.copy())
            if depth is not None and depth:
                depImg = Coppa.decodeDepthFrame(depth)
                c.setDepth(depImg.copy())
            if sense is not None and sense:
                sense = pickle.loads(sense)
                if len(sense) > 3:
                    c.setCO2(sense[0])
                    c.setTVOC(sense[1])
                    c.setTemp(sense[2])
                    c.setHumidity(sense[3])
            if vertStr is not None and len(vertStr) > 0:
                c.setVertex(vertStr, vfrac)

        # Skip first 10 frames, just in case
        frames += 1
        if frames < 10: continue

        newThresh = gui.getEntryValue("thresh")
        try:
            thresh = int(newThresh)
        except:
            pass

        newPDist = gui.getEntryValue("pdist")
        try:
            pdist = float(newPDist)
        except:
            pass

        newTrgtTemp = gui.getEntryValue("temp")
        try:
            config["Sensors"]["TargetOccupTemp"] = int(newTrgtTemp)
        except:
            pass

        # get copies of color and depth frames
        img = clients[nindex].getColor().copy()
        depImg = clients[nindex].getDepth().copy()
        if len(img) < 1 or len(depImg) < 1:
            continue

        fVert = clients[nindex].getVertex()

        # Display calibration points
        refXY = clients[nindex].getRefXY()
        for pt in refXY:
            cv2.circle(img, (pt[0], pt[1]), 4, (0, 255, 255), 1)
            img[pt[1]][pt[0]] = [0, 255, 255]

        # Find valid boxes and add to or update people list
        people = []
        for c in clients:
            boxes, scores, labels = c.getResults()
            if len(boxes) > 0:
                for box, score, label in zip(boxes[0], scores[0], labels[0]):
                    # scores are sorted so we can break at first low score found
                    if score < (thresh / 100):
                        break
                    Peeps.AddToPeople(c, box, people, pdist, score)

        totalPeeps = len(people)
        frameBoxes, scores, isDupe = Peeps.GetIDBoxes(nindex, people)
        framePeeps = len(frameBoxes)

        # Visualize People found in frame
        for i in range(framePeeps):
            # color based on if appears in multiple frames
            color = (0, 0, 255)
            if isDupe[i]: color = (0, 255, 0)

            b = frameBoxes[i].astype(int)
            draw_box(img, b, color=color)

            # draw center point of box
            x = int((frameBoxes[i][0] + frameBoxes[i][2]) / 2)
            y = int((frameBoxes[i][1] + frameBoxes[i][3]) / 2)
            cv2.circle(img, (x, y), 3, (255, 255, 0), -1)

            if showConf:
                draw_caption(img, b, "%0.0f%%" % (float(scores[i]) * 100))

        # Calculate frame rate and display on image
        t1 = time.time()
        last10[it] = 1.0 / (t1 - t0)
        it = (it + 1) % 10
        fps = np.average(last10)
        t0 = t1
        cv2.putText(img, "stream FPS: %.2f" % fps, (0, 30),
                    cv2.FONT_HERSHEY_PLAIN, 1.2, (0, 0, 255))

        # Sensor readings
        fCO2 = clients[nindex].getCO2()
        fTVOC = clients[nindex].getTVOC()
        fTemp = clients[nindex].getTemp()
        fHum = clients[nindex].getHumidity()

        # Regression
        # [people, co2, tvoc, tempr, humid]
        regInput = [totalPeeps, fCO2, fTVOC, 70, fHum]
        res = Reg.runThroughModel(regInput, theta, meanStd)

        # Do HVAC stuff
        ctrl = controlHVAC(config, res, 70) + ("(%.2f)" % res)
        gui.setFrameText("HVAC", ctrl)

        # Display sensor readings on GUI
        gui.setFrameText("people",
                         "%d/%d (frame/total)" % (framePeeps, totalPeeps))
        gui.setFrameText("temp", "%.2f ºF" % fTemp)
        gui.setFrameText("humid", "%.2f%%" % fHum)
        gui.setFrameText("co2", "%.0f ppm" % fCO2)
        gui.setFrameText("tvoc", "%.0f ppb" % fTVOC)

        # Display color and depth images (depth picture-in-picture)
        imRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        depRGB = cv2.cvtColor(depImg, cv2.COLOR_BGR2RGB)
        guiImg = ImageTk.PhotoImage(Image.fromarray(imRGB))
        depImg = ImageTk.PhotoImage(
            Image.fromarray(cv2.resize(depRGB, (160, 120))))

        gui.setFrameImage("vid", guiImg)
        gui.setFrameImage("depth", depImg)

    # Close server and display window
    bThread.stop()
    bThread.join()
    serv.close()
    cv2.destroyAllWindows()
Example #3
0
def main():
	print("Initializing vid server")
	serv = Roni.RoniServer()
	serv.listen()
	cl = serv.getClient()
	cv2.namedWindow('RoniStream', cv2.WINDOW_AUTOSIZE)
	
	t0 = time.time()
	t1 = 0
	last10 = [0]*10
	it = 0
	frames = 0
	keyPress = None


	# adjust this to point to your downloaded/trained model
	model_path = '/home/nvidia/Documents/SalamiSense/snapshots_1/resnet50_csv_inference.h5'
	#model_path = '/home/nvidia/Documents/SalamiSense/resnet50_csv_inference10.h5'

	print("loading model")
	# load retinanet model
	bThread = BoxThread(model_path)
	bThread.start()

	# load label to names mapping for visualization purposes
	labels_to_names = {0: 'person'}

	while keyPress != ord('q'):
		data = []
		depth = []
		while not data or not depth:
			if not data:
				data = serv.receiveData(cl, Roni.TYPE_RGB)
			if not depth:
				depth = serv.receiveData(cl, Roni.TYPE_DEPTH)

		frames += 1
		if frames < 10: continue

		# get image
		img = Coppa.decodeColorFrame(data)
		depImg = Coppa.decodeDepthFrame(depth)
		if depImg is None: continue
		depImg = np.asanyarray(depImg)
		depColorMap = cv2.applyColorMap(cv2.convertScaleAbs(depImg, alpha=0.03), cv2.COLORMAP_JET)

		bThread.setData(img)
		boxes, scores, labels = bThread.getBoxes()
		bc = 0
		if len(boxes) > 0:
			for box, score, label in zip(boxes[0], scores[0], labels[0]):
				# scores are sorted so we can break
				if score < 0.6:
					break
				bc += 1
				color = label_color(label)
		
				b = box.astype(int)
				draw_box(img, b, color=color)
				draw_caption(img, b, "%s" % score)

		# Calculate frame rate and display on image
		t1 = time.time()
		last10[it] = 1.0 / (t1 - t0)
		it = (it + 1) % 10
		fps = np.average(last10)
		t0 = t1
		cv2.putText(img, "stream FPS: %.2f" % fps, (0, 30),
		cv2.FONT_HERSHEY_PLAIN, 1.2, (0, 0, 255))
	
		cv2.putText(img, "People: %d" % bc, (0, 60),
		cv2.FONT_HERSHEY_PLAIN, 1.2, (0, 0, 255))
	
		combined = np.hstack((img, depColorMap))

		# Display
		cv2.imshow('RoniStream', combined)
		keyPress = cv2.waitKey(1)


	# Close server and display window 
	bThread.stop()
	bThread.join()
	serv.close()
	cv2.destroyAllWindows()