def run(self): while self.__running.isSet(): self.__flag.wait() if servo_command == 'lookleft': servo.lookleft(servo_speed) elif servo_command == 'lookright': servo.lookright(servo_speed) elif servo_command == 'up': servo.up(servo_speed) elif servo_command == 'down': servo.down(servo_speed) time.sleep(0.03)
def run(self): while self.__running.isSet(): self.__flag.wait() if servo_command == "lookleft": servo.lookleft(servo_speed) elif servo_command == "lookright": servo.lookright(servo_speed) elif servo_command == "up": servo.up(servo_speed) elif servo_command == "down": servo.down(servo_speed) time.sleep(0.03)
def run(self): while self.__running.isSet(): self.__flag.wait() # 为True时立即返回, 为False时阻塞直到内部的标识位为True后返回 if servo_command == 'lookleft': servo.lookleft(servo_speed) elif servo_command == 'lookright': servo.lookright(servo_speed) elif servo_command == 'up': servo.up(servo_speed) elif servo_command == 'down': servo.down(servo_speed) elif servo_command == 'lookup': servo.lookup(servo_speed) elif servo_command == 'lookdown': servo.lookdown(servo_speed) elif servo_command == 'grab': servo.grab(servo_speed) elif servo_command == 'loose': servo.loose(servo_speed) else: pass time.sleep(0.07)
def run(self): global goal_pos, servo_command, init_get, functionMode while self.__running.isSet(): self.__flag.wait() if functionMode != 6: if servo_command == 'lookleft': servo.lookleft(servo_speed) elif servo_command == 'lookright': servo.lookright(servo_speed) elif servo_command == 'up': servo.up(servo_speed) elif servo_command == 'down': servo.down(servo_speed) else: pass if functionMode == 4: servo.ahead() findline.run() if not functionMode: move.motorStop() elif functionMode == 5: autoDect(50) if not functionMode: move.motorStop() elif functionMode == 6: if MPU_connection: accelerometer_data = sensor.get_accel_data() X_get = accelerometer_data['x'] if not init_get: goal_pos = X_get init_get = 1 if servo_command == 'up': servo.up(servo_speed) time.sleep(0.2) accelerometer_data = sensor.get_accel_data() X_get = accelerometer_data['x'] goal_pos = X_get elif servo_command == 'down': servo.down(servo_speed) time.sleep(0.2) accelerometer_data = sensor.get_accel_data() X_get = accelerometer_data['x'] goal_pos = X_get if abs(X_get - goal_pos) > tor_pos: if X_get > goal_pos: servo.down(int(mpu_speed * abs(X_get - goal_pos))) elif X_get < goal_pos: servo.up(int(mpu_speed * abs(X_get - goal_pos))) time.sleep(0.03) continue else: functionMode = 0 try: self.pause() except: pass time.sleep(0.03)
def up(): print('U') servo.up() #making new list to copy temp_left = [] for i in range(0, 3): temp_left.append(char_cube[0][i][:]) temp_front = [] for i in range(0, 3): temp_front.append(char_cube[1][i][:]) temp_right = [] for i in range(0, 3): temp_right.append(char_cube[2][i][:]) temp_back = [] for i in range(0, 3): temp_back.append(char_cube[3][i][:]) temp_up = [] for i in range(0, 3): temp_up.append(char_cube[5][i][:]) # rotate up for i in range(0, 3): for j in range(0, 3): char_cube[5][i][j] = temp_up[2 - j][i] # left side replace for i in range(0, 3): char_cube[0][0][i] = temp_front[0][i] # front side replace for i in range(0, 3): char_cube[1][0][i] = temp_right[0][i] # right side replace for i in range(0, 3): char_cube[2][0][i] = temp_back[0][i] # back side replace for i in range(0, 3): char_cube[3][0][i] = temp_left[0][i] update_cube()
def capture_thread(self, IPinver): ap = argparse.ArgumentParser() #OpenCV initialization ap.add_argument("-b", "--buffer", type=int, default=64, help="max buffer size") args = vars(ap.parse_args()) pts = deque(maxlen=args["buffer"]) font = cv2.FONT_HERSHEY_SIMPLEX camera = picamera.PiCamera() camera.resolution = (640, 480) camera.framerate = 20 rawCapture = PiRGBArray(camera, size=(640, 480)) deeppicar = DeepPiCar(camera) context = zmq.Context() footage_socket = context.socket(zmq.PUB) print(IPinver) footage_socket.connect('tcp://%s:5555' % IPinver) avg = None motionCounter = 0 lastMovtionCaptured = datetime.datetime.now() for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True): #time.sleep(1) frame_image = frame.array #objects,frame_image = deeppicar.process_objects_on_road(frame_image) #frame_image = deeppicar.follow_lane(frame_image) cv2.line(frame_image, (300, 240), (340, 240), (128, 255, 128), 1) cv2.line(frame_image, (320, 220), (320, 260), (128, 255, 128), 1) timestamp = datetime.datetime.now() if FindColorMode: if 0 and objects != None: for obj in objects: if obj.label_id == 5: move.motorStop() #time.sleep(1) ####>>>OpenCV Start<<<#### hsv = cv2.cvtColor(frame_image, cv2.COLOR_BGR2HSV) mask = cv2.inRange(hsv, self.colorLower, self.colorUpper) mask = cv2.erode(mask, None, iterations=2) mask = cv2.dilate(mask, None, iterations=2) cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2] center = None move.move(80, 'forward') if len(cnts) > 0: cv2.putText(frame_image, 'Target Detected', (40, 60), font, 0.5, (255, 255, 255), 1, cv2.LINE_AA) c = max(cnts, key=cv2.contourArea) ((x, y), radius) = cv2.minEnclosingCircle(c) M = cv2.moments(c) center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"])) X = int(x) Y = int(y) if radius > 10: cv2.rectangle(frame_image, (int(x - radius), int(y + radius)), (int(x + radius), int(y - radius)), (255, 255, 255), 1) if Y < (240 - tor): error = (240 - Y) / 5 outv = int(round((pid.GenOut(error)), 0)) servo.up(outv) Y_lock = 0 elif Y > (240 + tor): error = (Y - 240) / 5 outv = int(round((pid.GenOut(error)), 0)) servo.down(outv) Y_lock = 0 else: Y_lock = 1 if X < (320 - tor * 3): error = (320 - X) / 5 outv = int(round((pid.GenOut(error)), 0)) servo.lookleft(outv) servo.turnLeft(coe_Genout(error, 64)) X_lock = 0 elif X > (330 + tor * 3): error = (X - 240) / 5 outv = int(round((pid.GenOut(error)), 0)) servo.lookright(outv) servo.turnRight(coe_Genout(error, 64)) X_lock = 0 else: move.motorStop() X_lock = 1 if X_lock == 1 and Y_lock == 1: switch.switch(1, 1) switch.switch(2, 1) switch.switch(3, 1) #moveCtrl(ultra.checkdist(), back_R, forward_R) else: #move.motorStop() switch.switch(1, 0) switch.switch(2, 0) switch.switch(3, 0) else: cv2.putText(frame_image, 'Target Detecting', (40, 60), font, 0.5, (255, 255, 255), 1, cv2.LINE_AA) move.move(50, 'backward') time.sleep(1) move.move(70, 'forward') #move.motorStop() ####>>>OpenCV Ends<<<#### if WatchDogMode: gray = cv2.cvtColor(frame_image, cv2.COLOR_BGR2GRAY) gray = cv2.GaussianBlur(gray, (21, 21), 0) if avg is None: print("[INFO] starting background model...") avg = gray.copy().astype("float") rawCapture.truncate(0) continue cv2.accumulateWeighted(gray, avg, 0.5) frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg)) # threshold the delta image, dilate the thresholded image to fill # in holes, then find contours on thresholded image thresh = cv2.threshold(frameDelta, 5, 255, cv2.THRESH_BINARY)[1] thresh = cv2.dilate(thresh, None, iterations=2) cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cnts = imutils.grab_contours(cnts) # print('x') # loop over the contours for c in cnts: # if the contour is too small, ignore it if cv2.contourArea(c) < 5000: continue # compute the bounding box for the contour, draw it on the frame, # and update the text (x, y, w, h) = cv2.boundingRect(c) cv2.rectangle(frame_image, (x, y), (x + w, y + h), (128, 255, 0), 1) text = "Occupied" motionCounter += 1 #print(motionCounter) #print(text) LED.colorWipe(255, 16, 0) lastMovtionCaptured = timestamp switch.switch(1, 1) switch.switch(2, 1) switch.switch(3, 1) if (timestamp - lastMovtionCaptured).seconds >= 0.5: LED.colorWipe(255, 255, 0) switch.switch(1, 0) switch.switch(2, 0) switch.switch(3, 0) encoded, buffer = cv2.imencode('.jpg', frame_image) jpg_as_text = base64.b64encode(buffer) footage_socket.send(jpg_as_text) rawCapture.truncate(0)
def capture_thread(self,IPinver): global frame_image, camera#Z font = cv2.FONT_HERSHEY_SIMPLEX context = zmq.Context() footage_socket = context.socket(zmq.PUB) print(IPinver) footage_socket.connect('tcp://%s:5555'%IPinver) avg = None motionCounter = 0 #time.sleep(4) lastMovtionCaptured = datetime.datetime.now() for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True): frame_image = frame.array cv2.line(frame_image,(300,240),(340,240),(128,255,128),1) cv2.line(frame_image,(320,220),(320,260),(128,255,128),1) timestamp = datetime.datetime.now() if FindLineMode:#2 cvFindLine() if FindColorMode: ####>>>OpenCV Start<<<#### hsv = cv2.cvtColor(frame_image, cv2.COLOR_BGR2HSV) mask = cv2.inRange(hsv, colorLower, colorUpper)#1 mask = cv2.erode(mask, None, iterations=2) mask = cv2.dilate(mask, None, iterations=2) cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2] center = None if len(cnts) > 0: cv2.putText(frame_image,'Target Detected',(40,60), font, 0.5,(255,255,255),1,cv2.LINE_AA) c = max(cnts, key=cv2.contourArea) ((x, y), radius) = cv2.minEnclosingCircle(c) M = cv2.moments(c) center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"])) X = int(x) Y = int(y) if radius > 10: cv2.rectangle(frame_image,(int(x-radius),int(y+radius)),(int(x+radius),int(y-radius)),(255,255,255),1) if Y < (240-tor): error = (240-Y)/5 outv = int(round((pid.GenOut(error)),0)) servo.up(outv) Y_lock = 0 elif Y > (240+tor): error = (Y-240)/5 outv = int(round((pid.GenOut(error)),0)) servo.down(outv) Y_lock = 0 else: Y_lock = 1 if X < (320-tor*3): error = (320-X)/5 outv = int(round((pid.GenOut(error)),0)) servo.lookleft(outv) #move.move(70, 'no', 'left', 0.6) X_lock = 0 elif X > (330+tor*3): error = (X-240)/5 outv = int(round((pid.GenOut(error)),0)) servo.lookright(outv) #move.move(70, 'no', 'right', 0.6) X_lock = 0 else: #move.motorStop() X_lock = 1 if X_lock == 1 and Y_lock == 1: switch.switch(1,1) switch.switch(2,1) switch.switch(3,1) else: switch.switch(1,0) switch.switch(2,0) switch.switch(3,0) # if UltraData > 0.5: # move.move(70, 'forward', 'no', 0.6) # elif UltraData < 0.4: # move.move(70, 'backward', 'no', 0.6) # print(UltraData) # else: # move.motorStop() else: cv2.putText(frame_image,'Target Detecting',(40,60), font, 0.5,(255,255,255),1,cv2.LINE_AA) move.motorStop() if WatchDogMode: gray = cv2.cvtColor(frame_image, cv2.COLOR_BGR2GRAY) gray = cv2.GaussianBlur(gray, (21, 21), 0) if avg is None: print("[INFO] starting background model...") avg = gray.copy().astype("float") rawCapture.truncate(0) continue cv2.accumulateWeighted(gray, avg, 0.5) frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg)) # threshold the delta image, dilate the thresholded image to fill # in holes, then find contours on thresholded image thresh = cv2.threshold(frameDelta, 5, 255, cv2.THRESH_BINARY)[1] thresh = cv2.dilate(thresh, None, iterations=2) cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cnts = imutils.grab_contours(cnts) for c in cnts: # if the contour is too small, ignore it if cv2.contourArea(c) < 5000: continue # compute the bounding box for the contour, draw it on the frame, # and update the text (x, y, w, h) = cv2.boundingRect(c) cv2.rectangle(frame_image, (x, y), (x + w, y + h), (128, 255, 0), 1) text = "Occupied" motionCounter += 1 LED.colorWipe(Color(255,16,0)) lastMovtionCaptured = timestamp switch.switch(1,1) switch.switch(2,1) switch.switch(3,1) if (timestamp - lastMovtionCaptured).seconds >= 0.5: LED.colorWipe(Color(255,255,0)) switch.switch(1,0) switch.switch(2,0) switch.switch(3,0) if FindLineMode and not frameRender:#2 encoded, buffer = cv2.imencode('.jpg', frame_findline) else: cv2.putText(frame_image, modeText,(40,100), font, 0.5,(255,255,255),1,cv2.LINE_AA) encoded, buffer = cv2.imencode('.jpg', frame_image) jpg_as_text = base64.b64encode(buffer) footage_socket.send(jpg_as_text) print(jpg_as_text) rawCapture.truncate(0)
def run(self): global goal_pos, servo_command, init_get, functionMode while self.__running.isSet(): self.__flag.wait() # 为True时立即返回, 为False时阻塞直到内部的标识位为True后返回 if functionMode != 6: if servo_command == 'lookleft': servo.lookleft(servo_speed) elif servo_command == 'lookright': servo.lookright(servo_speed) elif servo_command == 'up': servo.up(servo_speed) elif servo_command == 'down': servo.down(servo_speed) elif servo_command == 'lookup': servo.lookup(servo_speed) elif servo_command == 'lookdown': servo.lookdown(servo_speed) elif servo_command == 'grab': servo.grab(servo_speed) elif servo_command == 'loose': servo.loose(servo_speed) else: pass if functionMode == 4: servo.ahead() findline.run() if not functionMode: move.motorStop() elif functionMode == 5: servo.ahead() dis_get = ultra.checkdist() if dis_get < 0.15: move.motorStop() move.move(100, 'backward', 'no', 1) move.motorStop() move.move(100, 'no', 'left', 1) time.sleep(0.2) move.motorStop() else: move.move(100, 'forward', 'no', 1) if not functionMode: move.motorStop() elif functionMode == 6: if MPU_connection: accelerometer_data = sensor.get_accel_data() Y_get = accelerometer_data['y'] if not init_get: goal_pos = Y_get init_get = 1 if servo_command == 'up': servo.up(servo_speed) time.sleep(0.2) accelerometer_data = sensor.get_accel_data() Y_get = accelerometer_data['y'] goal_pos = Y_get elif servo_command == 'down': servo.down(servo_speed) time.sleep(0.2) accelerometer_data = sensor.get_accel_data() Y_get = accelerometer_data['y'] goal_pos = Y_get if abs(Y_get - goal_pos) > tor_pos: if Y_get < goal_pos: servo.down(int(mpu_speed * abs(Y_get - goal_pos))) elif Y_get > goal_pos: servo.up(int(mpu_speed * abs(Y_get - goal_pos))) time.sleep(0.03) continue else: functionMode = 0 try: self.pause() except: pass time.sleep(0.07)
def run(self): global goal_pos, servo_command, init_get, functionMode while self.__running.isSet(): self.__flag.wait() # 为True时立即返回, 为False时阻塞直到内部的标识位为True后返回 if functionMode != 6: if servo_command == 'lookleft': servo.lookleft(servo_speed) elif servo_command == 'lookright': servo.lookright(servo_speed) elif servo_command == 'up': servo.up(servo_speed) elif servo_command == 'down': servo.down(servo_speed) elif servo_command == 'lookup': servo.lookup(servo_speed) elif servo_command == 'lookdown': servo.lookdown(servo_speed) elif servo_command == 'grab': servo.grab(servo_speed) elif servo_command == 'loose': servo.loose(servo_speed) elif servo_command == 'handup': servo.handUp(servo_speed) elif servo_command == 'handdown': servo.handDown(servo_speed) else: pass if functionMode == 4: if MPU_connection: try: accelerometer_data = sensor.get_accel_data() Y_get = accelerometer_data['y'] X_get = accelerometer_data['x'] tcpCliSock.send( ('OSD %f %f' % (round(X_get, 2), round(Y_get, 2))).encode()) #print('OSD %f %f'%(round(Y_get,2),round(X_get,2))) time.sleep(0.1) except: print('MPU_6050 I/O ERROR') pass elif functionMode == 5: servo.ahead() dis_get = ultra.checkdist() if dis_get < 0.15: move.motorStop() move.move(100, 'backward', 'no', 1) move.motorStop() move.move(100, 'no', 'left', 1) time.sleep(1) move.motorStop() else: move.move(100, 'forward', 'no', 1) if not functionMode: move.motorStop() elif functionMode == 6: if MPU_connection: xGet = sensor.get_accel_data() print(xGet) xGet = xGet['x'] xOut = kalman_filter_X.kalman(xGet) pwm.set_pwm(4, 0, servo.pwm3_pos + pwmGenOut(xOut * 9)) # pwm.set_pwm(2, 0, self.steadyGoal+pwmGenOut(xGet*10)) time.sleep(0.05) continue else: functionMode = 0 servo_move.pause() time.sleep(0.07)