def __init__(self): super().__init__() self.setupUi(self) # 전체화면 설정 self.showFullScreen() self.BluetoothButton.clicked.connect(self.BluetoothClicked) self.KinectButton.clicked.connect(self.KinectClicked) self.SnakeButton.clicked.connect(self.SnakeClicked) self.m = Matrix(ROWS, COLS) self.m.syncActivate() self.m.setHeight(SERVO_MIN) self.a = AudioSpectrum() self.k = Kinect() self.mMode = 0 self.moduleThread = threading.Thread(target=self.moduleSyncThread) self.moduleThread.start() #self.k.threadActivate() self.s = Snake()
def initKinect(): ############################# ### Kinect runtime object ### ############################# global disconnectStatus global kinect global initialization global color_img global joint global jointPoints global qImg kinect = Kv2.initializeKinect() color_width, color_height = Kv2.getColorDimension(kinect) tempImg = np.zeros((color_height, color_width, 4)) bytesPerLine = 4 * color_width qImg = QImage(tempImg.data, color_width, color_height, bytesPerLine, QImage.Format_ARGB32) ######################## ### Main Kinect Loop ### ######################## while True: if kinect.has_new_body_frame() and kinect.has_new_color_frame(): ############################## ### Get images from camera ### ############################## color_img, joint, jointPoints = Kv2.getKinectFrames(kinect, color_height, color_width) initialization = 1 if disconnectStatus == 1: break kinect.close() print("Kinect Shutdown!")
import numpy as np import Kinect import test import points import AnimatedPointcloud import PointSourceTest #test.create_cube() #points.create_points() #AnimatedPointcloud.test() k = Kinect.Kinect(debug=False) k.start() k.wait_for_init() pc = AnimatedPointcloud.DisplayPointcloud(k.get_pointcloud) pc.start() print("Started") #points = np.zeros((10000, 3)) #for k in range(len(points)): # points[k] = 20*(np.random.rand(3)-0.5) #points = k.get_pointcloud() #print("max:", np.max(points[:,1])) #print("min:", np.min(points[:,1])) #print("Points generated") #pc.change_data(points) #print("Points Added")
class mainWindow(QMainWindow, main_form_class): def __init__(self): super().__init__() self.setupUi(self) # 전체화면 설정 self.showFullScreen() self.BluetoothButton.clicked.connect(self.BluetoothClicked) self.KinectButton.clicked.connect(self.KinectClicked) self.SnakeButton.clicked.connect(self.SnakeClicked) self.m = Matrix(ROWS, COLS) self.m.syncActivate() self.m.setHeight(SERVO_MIN) self.a = AudioSpectrum() self.k = Kinect() self.mMode = 0 self.moduleThread = threading.Thread(target=self.moduleSyncThread) self.moduleThread.start() #self.k.threadActivate() self.s = Snake() def KinectClicked(self): self.mMode = 1 self.k.threadActivate(True) # 키넥트 스레드 시작 dlg = kinectDialog() dlg.exec_() if dlg.isBackClicked == True: print("종료") self.mMode = 0 self.k.threadActivate(False) def BluetoothClicked(self): self.mMode = 2 self.a.threadActivate(True) dlg = audioDialog() dlg.exec_() if dlg.isBackClicked == True: self.mMode = 0 self.a.threadActivate(False) def SnakeClicked(self): self.mMode = 3 dlg = snakeDialog(self) dlg.exec_() if dlg.isBackClicked == True: self.mMode = 0 def moduleSyncThread(self): while (True): # 1. 키넥트 if (self.mMode == 1): #self.k.getDepth() print(self.k.depth) #print('키넥트') self.m.setKinectHeight(self.k.depth) time.sleep(0.0001) # 2. 오디오 elif (self.mMode == 2): self.m.setKinectHeight(self.a.depth) time.sleep(0.0001) # 3. 스네이크 elif (self.mMode == 3): #print("스네잌 ") self.m.setHeight(self.s.map) #print(self.s.map) #self.m.setKinectHeight(self.s.map) time.sleep(0.0001) time.sleep(0.0001)
def processKinect(): global disconnectStatus global kinect global initialization global color_img global joint global jointPoints global qImg shoulder_distance = 300 waist_distance = 300 SJR_x = 0 SJR_y = 0 SJL_x = 0 SJL_y = 0 Nk_x = 0 Nk_y = 0 HPL_x = 0 HPL_y = 0 HPR_x = 0 HPR_y = 0 SPB_x = 0 SPB_y = 0 # load cloth images shirts = [cv2.imread(file) for file in glob.glob('Datasets/Shirts/*.png')] pants = [cv2.imread(file) for file in glob.glob('Datasets/Pants/*.png')] cropped_shirts = [] cropped_pants = [] # process and store cloth images for items in shirts: shirt_processed = IP.removeClothBG(items) cropped_shirts.append(shirt_processed) for items in pants: pant_processed = IP.removeClothBG(items) cropped_pants.append(pant_processed) while True: if initialization == 1: ################################################# ### Extract and Draw 2D joints on single body ### ################################################# if joint != 0: joint2D = Kv2.getBodyJoints(joint, jointPoints) SJR_x = int(joint2D[8, 0] * 3.75) + 30 SJR_y = int(joint2D[8, 1] * 2.547) - 50 SJL_x = int(joint2D[4, 0] * 3.75) + 30 SJL_y = int(joint2D[4, 1] * 2.547) - 50 Nk_x = int(joint2D[2, 0] * 3.75) + 30 Nk_y = int(joint2D[2, 1] * 2.547) - 50 HPL_x = int(joint2D[12, 0] * 3.75) HPL_y = int(joint2D[12, 1] * 2.547) - 50 HPR_x = int(joint2D[16, 0] * 3.75) + 60 HPR_y = int(joint2D[16, 1] * 2.547) - 50 SPB_x = int(joint2D[0, 0] * 3.75) + 30 SPB_y = int(joint2D[0, 1] * 2.547) - 50 # color_img = Kv2.drawJointsFull(color_img, joint2D) distance1 = mt.sqrt(mt.pow(SJL_x - SJR_x, 2)) distance2 = mt.sqrt(mt.pow(HPL_x - HPR_x, 2)) else: distance1 = 300 distance2 = 300 if distance1 <= 0: shoulder_distance = shoulder_distance else: shoulder_distance = distance1 if distance2 <= 0: waist_distance = waist_distance else: waist_distance = distance2 ############################################## ### Resizing Cloth image according to user ### ############################################## resized_shirt = IP.image_resize(cropped_shirts[1], width=int(shoulder_distance + 60)) resized_pant = IP.image_resize(cropped_pants[3], width=int(waist_distance)) shirt_y = np.shape(resized_shirt)[0] shirt_x = np.shape(resized_shirt)[1] pant_y = np.shape(resized_pant)[0] pant_x = np.shape(resized_pant)[1] ##################################################### ### Add resized image to Background removed Image ### ##################################################### # Offsets: overlay = np.zeros_like(color_img) overlay_y = np.shape(overlay)[0] overlay_x = np.shape(overlay)[1] # Pant addition cutoff_pants = (SPB_y + pant_y) - overlay_y if cutoff_pants <= 0: cutoff_pants = 0 else: cutoff_pants = cutoff_pants if (HPL_x - 20) > 0 and SPB_y > 0 and (HPR_x + 70) < overlay_x: overlay[SPB_y:SPB_y + pant_y - cutoff_pants, int(SPB_x - (waist_distance / 2)):int(SPB_x - (waist_distance / 2)) + pant_x] = resized_pant[ 0:pant_y - cutoff_pants, 0:pant_x] # Shirt addition if (SJL_x - 20) > 0 and Nk_y > 0 and (SJR_x + 70) < overlay_x: overlay[Nk_y:Nk_y + shirt_y, int(Nk_x - (shoulder_distance / 2) - 30):int( Nk_x - (shoulder_distance / 2) - 30) + shirt_x] = resized_shirt[0:shirt_y, 0:shirt_x] Output = IP.mergeImages(overlay, color_img) ##################################### ## Display 2D images using OpenCV ### ##################################### height, width, channel = np.shape(Output) bytesPerLine = 4 * width qImg = QImage(Output.data, width, height, bytesPerLine, QImage.Format_ARGB32) if disconnectStatus == 1: break
__author__ = 'aniket' import Kinect while True: Kinect.take_right_near()
#Phillips, King #Calls all of the other methods. Note, this will have to be replaced once we start using ROS, but this allows us to get started on the individual parts much more easily from PIL import Image; from freenect import sync_get_depth as get_depth, sync_get_video as get_video; import numpy as np; import Locator,Classifier,Router,Kinect; img=Image.open("Finder1.jpg")#Images will eventually draw from webcam robotPos=Locator.locateRobot(img) targetPos=Locator.locateTarget(img) #(depths,_)=get_depth() #need Kinect hooked up for this to work. Moore, can you create some simulations of the output of this? depths=[[0]*img.size[0] for i in range(img.size[1])] knownHeights=Kinect.getHeights(img,depths,robotPos) drivable=Classifier.getDriveable(img, knownHeights) (dist,waypoints)=Router.getRoute(drivable,robotPos,targetPos)
import Kinect Kinect.start()
__author__ = 'aniket' import Kinect import cv2 while True: z = Kinect.get_depth() cr = Kinect.contours_return(z, 10) cl = Kinect.contours_return(z, -10) for c in cr: Kinect.potential_leftedge(c) for c in cl: Kinect.potential_rightedge(c) Kinect.search_wall() cv2.imshow('final', z) if cv2.waitKey(1) != -1: break
__author__ = 'aniket' import Kinect import cv2 while True: z = Kinect.get_depth() cr = Kinect.contours_return(z,10) cl = Kinect.contours_return(z,-10) for c in cr: Kinect.potential_leftedge(c) for c in cl: Kinect.potential_rightedge(c) Kinect.search_wall() cv2.imshow('final', z) if cv2.waitKey(1) != -1: break