def main(): #app = QtGui.QApplication(sys.argv) signal.signal(signal.SIGINT, exit_handler) #d = display.display() while True: time.sleep(0.1) temp = th02.readTemp() time.sleep(0.1) temp = th02.readTemp() #temp = 0 time.sleep(0.1) hum = th02.readHum() time.sleep(0.1) hum = th02.readHum() #print('tempureture: %.2f , humidity: %.2f' % (temp, hum)) di = th02.outputDI(temp, hum) #print('不快指数: %.2f \n' % di) if di < 65: led.blue() elif di < 70: led.cyan() elif di < 75: led.green() elif di < 80: led.yellow() elif di < 85: led.magenta() elif di < 100: led.red() else: led.white() #d.initUI(temp, hum, di) time.sleep(0.1)
def watch_dis(): path = '/home/pi/files/IoT/voice/' count = 0 while True: try: distance = Watcher.dis_d() # 将最后2000个距离写入文件,方便调试 count += 1 if count < 2000: with open('/home/pi/files/IoT/voice/data.txt','a+') as f: f.write(str(count) + " ") f.close() else: with open('/home/pi/files/IoT/voice/data.txt','w+') as f: f.write('0 ') f.close() count = 0 # 设定为15cm if distance < 15: Led.red() # 录制一段音频 os.system('arecord -D "plughw:1,0" -d 2 -f S16_LE /home/pi/files/IoT/voice/test.wav') words = Listener.use_cloud('/home/pi/files/IoT/voice/test.wav') if '关' in words: Light.off_all() elif '开' in words: Light.on_all() # 当识别不成功时,led闪烁警告 else: Led.blink1() Led.off_red()
def cycle(self): v_current = (x, y, z) = self._accel_sensor.acceleration() # print('({0:.2f}, {1:.2f}, {2:.2f})'.format(abs(x), abs(y), abs(z))) pairs = zip(v_current, self._v_last) diffs = [abs(i - j) for i, j in pairs] flags = [i > self._v_delta for i in diffs] if any(flags): if not self._v_detected: led.red() self._minutes = 1 self._v_detected = True self._chrono.start() self._magnitudes[:] = [] self._magnitudes.append((0, self._v_last[0], self._v_last[1], self._v_last[2])) self._t_v_last = self._chrono.read() t = (self._chrono.read(), x, y, z) # print('t:{}, x:{}, y:{}, z:{}'.format(t[0], t[1], t[2], t[3])) self._magnitudes.append(t) if (t[0] > 60 * self._minutes): # send magnitudes each 60 seconds to avoid overflow self._queue.push_mangnitudes(self._magnitudes) self._magnitudes[:] = [] self._minutes += 1 if self._t_v_last > self._t_v_min and not self._in_use: self._queue.push_vibration_started() self._in_use = True elif self._v_detected: t = self._chrono.read() diff = t - self._t_v_last # print('last vibration {} sec ago'.format(diff)) if diff > self._t_v_release: self._chrono.stop() self._chrono.reset() self._v_detected = False self._in_use = False led.green() if t - diff > self._t_v_min: print('DETECTED VIBRATION FOR {} SEC (MIN {} SEC)' .format(t - diff, self._t_v_min)) self._queue.push_vibration_stopped(t - diff) self._queue.push_mangnitudes(self._magnitudes) self._v_last = v_current
def display_stock(stock): stock.get_prices() stock.lcd_clear() stock.output() try: if (stock.current_price > stock.open_price): led.green() elif (stock.current_price < stock.open_price): led.red() else: led.blue() except: stock.current_price = "PRICE ERROR" stock.open_price = "PRICE ERROR" print("PRICE ERROR") led.blue() sleep(2.5)
def test_led(): led.setup() print("both_on()") led.both_on() time.sleep(3) print("red()") led.red() time.sleep(3) print("both_off()") led.both_off() time.sleep(1) print("yellow()") led.yellow() time.sleep(3) print("pink()") led.pink() time.sleep(3) print("both_off()") led.both_off() time.sleep(1) print("cyan()") led.cyan() time.sleep(3) # Flashing in 2 cycles print("police_on(2)") led.police_on(2) print("both_off()") led.both_off() time.sleep(3) # Flashing in 6 seconds print("police_on()") led.police_on() time.sleep(6) #6 seconds led.police_off()
def watch_dis(): result_dic = {} count = 0 path = '/home/pi/files/IoT/face/storage/test.jpg' while True: try: distance = Watcher.dis_d() count += 1 if count < 2000: with open('/home/pi/files/IoT/face/data.txt', 'a+') as f: f.write(str(count) + " ") f.close() else: with open('/home/pi/files/IoT/face/data.txt', 'w+') as f: f.write('0 ') f.close() if distance < 60: time.sleep(0.5) if Watcher.dis_d() < 60: Led.red() command = 'raspistill -t 2000 -o ' + path + ' -q 5' os.system(command) Led.blink2() result_dic = iden_myface(path) try: if int(float(result_dic['score'])) > 85: print(result_dic['uid'] + ',please come in!') Light.on(Light.pin1) except: print('Nothing.') time.sleep(3) Led.off_red() except: continue
def run(): with sr.Microphone() as source: print("Please wait. Calibrating microphone...") r.adjust_for_ambient_noise(source, duration=2) while (True): l.both_off() l.green() audio = r.listen(source) l.both_off() l.red() try: v_command = r.recognize_google(audio) print("(recognize_google) Google thinks you said '" + v_command + "'") if wake_word in v_command: espeak.synth("hi, I'm listening") l.both_off() l.cyan() audio2 = r.listen(source) l.both_off() l.red() try: v_command2 = r.recognize_google(audio2) print("(recognize_google) Google thinks you said '" + v_command2 + "'") if keywords[0] in v_command2: say_time() break if keywords[1] in v_command2: print("Say a course no.") espeak.synth('Say a course number') l.both_off() l.cyan() audio3 = r.listen(source) l.both_off() l.red() course_id_str = r.recognize_google(audio3) print(course_id_str) try: course_id = int(course_id_str) say_class_schedule(course_id) except ValueError: print( "I could not recognize any course number.") break except sr.UnknownValueError: print("I could not understand audio") except sr.UnknownValueError: print("I could not understand audio")
def run(): #Main loop global hoz_mid, vtr_mid, ip_con, led_status, auto_status, opencv_mode, findline_mode, speech_mode, auto_mode, data, addr, footage_socket, ap_status, turn_status, wifi_status led.setup() while True: #Connection try: s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(("1.1.1.1", 80)) ipaddr_check = s.getsockname()[0] s.close() print(ipaddr_check) wifi_status = 1 except: if ap_status == 0: ap_threading = threading.Thread( target=ap_thread) #Define a thread for data receiving ap_threading.setDaemon( True ) #'True' means it is a front thread,it would close when the mainloop() closes ap_threading.start() #Thread starts led.both_off() led.yellow() time.sleep(5) wifi_status = 0 if wifi_status == 1: print('waiting for connection...') led.red() tcpCliSock, addr = tcpSerSock.accept( ) #Determine whether to connect led.both_off() led.green() print('...connected from :', addr) #time.sleep(1) tcpCliSock.send( ('SET %s' % vtr_mid + ' %s' % hoz_mid + ' %s' % left_spd + ' %s' % right_spd + ' %s' % look_up_max + ' %s' % look_down_max).encode()) print('SET %s' % vtr_mid + ' %s' % hoz_mid + ' %s' % left_spd + ' %s' % right_spd + ' %s' % left + ' %s' % right) break else: led.both_off() led.blue() print('waiting for connection...') tcpCliSock, addr = tcpSerSock.accept( ) #Determine whether to connect led.both_off() led.green() print('...connected from :', addr) #time.sleep(1) tcpCliSock.send( ('SET %s' % vtr_mid + ' %s' % hoz_mid + ' %s' % left_spd + ' %s' % right_spd + ' %s' % look_up_max + ' %s' % look_down_max).encode()) print('SET %s' % vtr_mid + ' %s' % hoz_mid + ' %s' % left_spd + ' %s' % right_spd + ' %s' % left + ' %s' % right) ap_status = 1 break #FPV initialization context = zmq.Context() footage_socket = context.socket(zmq.PUB) footage_socket.connect('tcp://%s:5555' % addr[0]) print(addr[0]) #Threads start video_threading = threading.Thread( target=opencv_thread) #Define a thread for FPV and OpenCV video_threading.setDaemon( True ) #'True' means it is a front thread,it would close when the mainloop() closes video_threading.start() #Thread starts ws2812_threading = threading.Thread( target=ws2812_thread) #Define a thread for ws_2812 leds ws2812_threading.setDaemon( True ) #'True' means it is a front thread,it would close when the mainloop() closes ws2812_threading.start() #Thread starts findline_threading = threading.Thread( target=findline_thread) #Define a thread for line tracking findline_threading.setDaemon( True ) #'True' means it is a front thread,it would close when the mainloop() closes findline_threading.start() #Thread starts speech_threading = threading.Thread( target=speech_thread) #Define a thread for speech recognition speech_threading.setDaemon( True ) #'True' means it is a front thread,it would close when the mainloop() closes speech_threading.start() #Thread starts auto_threading = threading.Thread( target=auto_thread) #Define a thread for ultrasonic tracking auto_threading.setDaemon( True ) #'True' means it is a front thread,it would close when the mainloop() closes auto_threading.start() #Thread starts scan_threading = threading.Thread( target=dis_scan_thread) #Define a thread for ultrasonic scan scan_threading.setDaemon( True ) #'True' means it is a front thread,it would close when the mainloop() closes scan_threading.start() #Thread starts while True: data = '' data = tcpCliSock.recv(BUFSIZ).decode() if not data: continue elif 'exit' in data: os.system("sudo shutdown -h now\n") elif 'spdset' in data: global spd_ad spd_ad = float((str(data))[7:]) #Speed Adjustment elif 'scan' in data: dis_can = scan() #Start Scanning str_list_1 = dis_can #Divide the list to make it samller to send str_index = ' ' #Separate the values by space str_send_1 = str_index.join(str_list_1) + ' ' tcpCliSock.sendall((str(str_send_1)).encode()) #Send Data tcpCliSock.send( 'finished'.encode() ) #Sending 'finished' tell the client to stop receiving the list of dis_can elif 'EC1set' in data: #Camera Adjustment new_EC1 = int((str(data))[7:]) turn.camera_turn(new_EC1) replace_num('E_C1:', new_EC1) elif 'EC2set' in data: #Ultrasonic Adjustment new_EC2 = int((str(data))[7:]) replace_num('E_C2:', new_EC2) turn.ultra_turn(new_EC2) elif 'EM1set' in data: #Motor A Speed Adjustment new_EM1 = int((str(data))[7:]) replace_num('E_M1:', new_EM1) elif 'EM2set' in data: #Motor B Speed Adjustment new_EM2 = int((str(data))[7:]) replace_num('E_M2:', new_EM2) elif 'LUMset' in data: #Motor A Turningf Speed Adjustment new_ET1 = int((str(data))[7:]) replace_num('look_up_max:', new_ET1) turn.camera_turn(new_ET1) elif 'LDMset' in data: #Motor B Turningf Speed Adjustment new_ET2 = int((str(data))[7:]) replace_num('look_down_max:', new_ET2) turn.camera_turn(new_ET2) elif 'stop' in data: #When server receive "stop" from client,car stops moving tcpCliSock.send('9'.encode()) setup() motor.motorStop() setup() if led_status == 0: led.setup() led.both_off() colorWipe(strip, Color(0, 0, 0)) continue elif 'lightsON' in data: #Turn on the LEDs led.both_on() led_status = 1 tcpCliSock.send('lightsON'.encode()) elif 'lightsOFF' in data: #Turn off the LEDs led.both_off() led_status = 0 tcpCliSock.send('lightsOFF'.encode()) elif 'middle' in data: #Go straight if led_status == 0: led.side_color_off(left_R, left_G) led.side_color_off(right_R, right_G) else: led.side_on(left_B) led.side_on(right_B) turn_status = 0 turn.middle() elif 'Left' in data: #Turn left if led_status == 0: led.side_color_on(left_R, left_G) else: led.side_off(left_B) turn.left() turn_status = 1 tcpCliSock.send('3'.encode()) elif 'Right' in data: #Turn right if led_status == 0: led.side_color_on(right_R, right_G) else: led.side_off(right_B) turn.right() turn_status = 2 tcpCliSock.send('4'.encode()) elif 'backward' in data: #When server receive "backward" from client,car moves backward tcpCliSock.send('2'.encode()) motor.motor_left(status, backward, left_spd * spd_ad) motor.motor_right(status, forward, right_spd * spd_ad) colorWipe(strip, Color(255, 0, 0)) elif 'forward' in data: #When server receive "forward" from client,car moves forward tcpCliSock.send('1'.encode()) motor.motor_left(status, forward, left_spd * spd_ad) motor.motor_right(status, backward, right_spd * spd_ad) colorWipe(strip, Color(0, 0, 255)) elif 'l_up' in data: #Camera look up if vtr_mid < look_up_max: vtr_mid += turn_speed turn.camera_turn(vtr_mid) tcpCliSock.send('5'.encode()) elif 'l_do' in data: #Camera look down if vtr_mid > look_down_max: vtr_mid -= turn_speed turn.camera_turn(vtr_mid) print(vtr_mid) tcpCliSock.send('6'.encode()) elif 'l_le' in data: #Camera look left if hoz_mid < look_left_max: hoz_mid += turn_speed turn.ultra_turn(hoz_mid) tcpCliSock.send('7'.encode()) elif 'l_ri' in data: #Camera look right if hoz_mid > look_right_max: hoz_mid -= turn_speed turn.ultra_turn(hoz_mid) tcpCliSock.send('8'.encode()) elif 'ahead' in data: #Camera look ahead turn.ahead() elif 'Stop' in data: #When server receive "Stop" from client,Auto Mode switches off opencv_mode = 0 findline_mode = 0 speech_mode = 0 auto_mode = 0 auto_status = 0 dis_scan = 1 tcpCliSock.send('auto_status_off'.encode()) motor.motorStop() led.both_off() turn.middle() time.sleep(0.1) motor.motorStop() led.both_off() turn.middle() elif 'auto' in data: #When server receive "auto" from client,start Auto Mode if auto_status == 0: tcpCliSock.send('0'.encode()) auto_status = 1 auto_mode = 1 dis_scan = 0 else: pass continue elif 'opencv' in data: #When server receive "auto" from client,start Auto Mode if auto_status == 0: auto_status = 1 opencv_mode = 1 tcpCliSock.send('oncvon'.encode()) continue elif 'findline' in data: #Find line mode start if auto_status == 0: tcpCliSock.send('findline'.encode()) auto_status = 1 findline_mode = 1 else: pass continue elif 'voice_3' in data: #Speech recognition mode start if auto_status == 0: auto_status = 1 speech_mode = 1 tcpCliSock.send('voice_3'.encode()) else: pass continue
def opencv_thread(): #OpenCV and FPV video global hoz_mid_orig, vtr_mid_orig font = cv2.FONT_HERSHEY_SIMPLEX for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True): image = frame.array cv2.line(image, (300, 240), (340, 240), (128, 255, 128), 1) cv2.line(image, (320, 220), (320, 260), (128, 255, 128), 1) if opencv_mode == 1: hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) mask = cv2.inRange(hsv, colorLower, colorUpper) mask = cv2.erode(mask, None, iterations=2) mask = cv2.dilate(mask, None, iterations=2) cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2] center = None if len(cnts) > 0: led.both_off() led.green() cv2.putText(image, 'Target Detected', (40, 60), font, 0.5, (255, 255, 255), 1, cv2.LINE_AA) c = max(cnts, key=cv2.contourArea) ((x, y), radius) = cv2.minEnclosingCircle(c) M = cv2.moments(c) center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"])) X = int(x) Y = int(y) if radius > 10: cv2.rectangle(image, (int(x - radius), int(y + radius)), (int(x + radius), int(y - radius)), (255, 255, 255), 1) if X < 310: mu1 = int((320 - X) / 3) hoz_mid_orig += mu1 if hoz_mid_orig < look_left_max: pass else: hoz_mid_orig = look_left_max ultra_turn(hoz_mid_orig) #print('x=%d'%X) elif X > 330: mu1 = int((X - 330) / 3) hoz_mid_orig -= mu1 if hoz_mid_orig > look_right_max: pass else: hoz_mid_orig = look_right_max ultra_turn(hoz_mid_orig) #print('x=%d'%X) else: turn.middle() pass mu_t = 390 - (hoz_mid - hoz_mid_orig) v_mu_t = 390 + (hoz_mid + hoz_mid_orig) turn.turn_ang(mu_t) dis = dis_data if dis < (distance_stay - 0.1): led.both_off() led.red() turn.turn_ang(mu_t) motor.motor_left(status, backward, left_spd * spd_ad_u) motor.motor_right(status, forward, right_spd * spd_ad_u) cv2.putText(image, 'Too Close', (40, 80), font, 0.5, (128, 128, 255), 1, cv2.LINE_AA) elif dis > (distance_stay + 0.1): motor.motor_left(status, forward, left_spd * spd_ad_2) motor.motor_right(status, backward, right_spd * spd_ad_2) cv2.putText(image, 'OpenCV Tracking', (40, 80), font, 0.5, (128, 255, 128), 1, cv2.LINE_AA) else: motor.motorStop() led.both_off() led.blue() cv2.putText(image, 'In Position', (40, 80), font, 0.5, (255, 128, 128), 1, cv2.LINE_AA) if dis < 8: cv2.putText(image, '%s m' % str(round(dis, 2)), (40, 40), font, 0.5, (255, 255, 255), 1, cv2.LINE_AA) if Y < 230: mu2 = int((240 - Y) / 5) vtr_mid_orig += mu2 if vtr_mid_orig < look_up_max: pass else: vtr_mid_orig = look_up_max camera_turn(vtr_mid_orig) elif Y > 250: mu2 = int((Y - 240) / 5) vtr_mid_orig -= mu2 if vtr_mid_orig > look_down_max: pass else: vtr_mid_orig = look_down_max camera_turn(vtr_mid_orig) if X > 280: if X < 350: #print('looked') cv2.line(image, (300, 240), (340, 240), (64, 64, 255), 1) cv2.line(image, (320, 220), (320, 260), (64, 64, 255), 1) cv2.rectangle(image, (int(x - radius), int(y + radius)), (int(x + radius), int(y - radius)), (64, 64, 255), 1) else: led.both_off() led.yellow() cv2.putText(image, 'Target Detecting', (40, 60), font, 0.5, (255, 255, 255), 1, cv2.LINE_AA) led_y = 1 motor.motorStop() for i in range(1, len(pts)): if pts[i - 1] is None or pts[i] is None: continue thickness = int(np.sqrt(args["buffer"] / float(i + 1)) * 2.5) cv2.line(image, pts[i - 1], pts[i], (0, 0, 255), thickness) else: dis = dis_data if dis < 8: cv2.putText(image, '%s m' % str(round(dis, 2)), (40, 40), font, 0.5, (255, 255, 255), 1, cv2.LINE_AA) encoded, buffer = cv2.imencode('.jpg', image) jpg_as_text = base64.b64encode(buffer) footage_socket.send(jpg_as_text) rawCapture.truncate(0)
def run(): '''Main loop''' #??? Do these globals make sense global hoz_mid, vtr_mid, ip_con, led_status, auto_status, opencv_mode, findline_mode, speech_mode, auto_mode, data, addr, footage_socket, ap_status, turn_status, wifi_status led.setup() while True: #??? only exits from breaks, seems wrong #??? should this be if else rather than try except #??? while not-connected try: # try to ping to see if there is wifi # checking self ip addr s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(("1.1.1.1", 80)) ipaddr_check = s.getsockname()[0] s.close() print(ipaddr_check) wifi_status = 1 except: # if not set up hotspot if ap_status == 0: #??? it always is #Define a thread for data receiving ap_threading = threading.Thread(target=ap_thread) ap_threading.setDaemon(True) ap_threading.start() led.both_off() led.yellow() time.sleep(5) #??? why sleep wifi_status = 0 #??? seems unecessary if wifi_status == 1: print('waiting for connection...') led.red() #??? pause here and wait for connection? tcpCliSock, addr = tcpSerSock.accept() led.both_off() led.green() print('...connected from :', addr) #time.sleep(1) #TODO: change to format #??? is this the best way to do this tcpCliSock.send( ('SET %s' % vtr_mid + ' %s' % hoz_mid + ' %s' % left_spd + ' %s' % right_spd + ' %s' % look_up_max + ' %s' % look_down_max).encode()) print('SET %s' % vtr_mid + ' %s' % hoz_mid + ' %s' % left_spd + ' %s' % right_spd + ' %s' % left + ' %s' % right) break else: #??? now assuming ap-hotspot, but never explained led.both_off() led.blue() #??? a lot of this is duplicated in the if statement print('waiting for connection...') # wait for connection tcpCliSock, addr = tcpSerSock.accept() led.both_off() led.green() print('...connected from :', addr) #time.sleep(1) tcpCliSock.send( ('SET %s' % vtr_mid + ' %s' % hoz_mid + ' %s' % left_spd + ' %s' % right_spd + ' %s' % look_up_max + ' %s' % look_down_max).encode()) print('SET %s' % vtr_mid + ' %s' % hoz_mid + ' %s' % left_spd + ' %s' % right_spd + ' %s' % left + ' %s' % right) ap_status = 1 break #??? this is outside the while loop now #FPV initialization context = zmq.Context() footage_socket = context.socket(zmq.PUB) footage_socket.connect('tcp://%s:5555' % addr[0]) print(addr[0]) #??? why are we starting all these threads now #Threads start #Define a thread for FPV and OpenCV video_threading = threading.Thread(target=opencv_thread) video_threading.setDaemon(True) video_threading.start() #Define a thread for ws_2812 leds ws2812_threading = threading.Thread(target=ws2812_thread) ws2812_threading.setDaemon(True) ws2812_threading.start() #Define a thread for line tracking findline_threading = threading.Thread(target=findline_thread) findline_threading.setDaemon(True) findline_threading.start() #Define a thread for speech recognition speech_threading = threading.Thread(target=speech_thread) speech_threading.setDaemon(True) speech_threading.start() #Define a thread for ultrasonic tracking auto_threading = threading.Thread(target=auto_thread) auto_threading.setDaemon(True) auto_threading.start() #Define a thread for ultrasonic scan scan_threading = threading.Thread(target=dis_scan_thread) scan_threading.setDaemon(True) scan_threading.start() while True: #??? infinite loop data = '' #??? why do we have to define early data = tcpCliSock.recv(BUFSIZ).decode() print(data) #??? BUFSIZ is global #??? this should be handled by a dictionary or interrupts if not data: continue elif 'exit' in data: quit() #??? shutting down is a bad idea #TODO: Make a shutdown function #TODO: Include proper shutdowns #os.system("sudo shutdown -h now\n") elif 'spdset' in data: #Speed Adjustment global spd_ad spd_ad = float((str(data))[7:]) elif 'scan' in data: #Start Scanning dis_can = scan() #Divide the list to make it samller to send str_list_1 = dis_can #Separate the values by space str_index = ' ' str_send_1 = str_index.join(str_list_1) + ' ' #??? this send is surprising #Send Data tcpCliSock.sendall((str(str_send_1)).encode()) #Sending 'finished' #tell the client to stop receiving the list of dis_can tcpCliSock.send('finished'.encode()) elif 'scan_rev' in data: #Start Scanning #??? what is scan_rev #??? a lot of this is repeated dis_can = scan_rev() #Divide the list to make it samller to send str_list_1 = dis_can #Separate the values by space str_index = ' ' str_send_1 = str_index.join(str_list_1) + ' ' #Send Data tcpCliSock.sendall((str(str_send_1)).encode()) #Sending 'finished' #tell the client to stop receiving the list of dis_can tcpCliSock.send('finished'.encode()) #??? what are there names elif 'EC1set' in data: #Camera Adjustment new_EC1 = int((str(data))[7:]) turn.camera_turn(new_EC1) replace_num('E_C1:', new_EC1) elif 'EC2set' in data: #Ultrasonic Adjustment new_EC2 = int((str(data))[7:]) replace_num('E_C2:', new_EC2) turn.ultra_turn(new_EC2) #TODO: fix the code for single motor elif 'EM1set' in data: #Motor A Speed Adjustment new_EM1 = int((str(data))[7:]) replace_num('E_M1:', new_EM1) elif 'EM2set' in data: #Motor B Speed Adjustment new_EM2 = int((str(data))[7:]) replace_num('E_M2:', new_EM2) #TODO: fix the code for single motor elif 'LUMset' in data: #Motor A Turningf Speed Adjustment new_ET1 = int((str(data))[7:]) replace_num('look_up_max:', new_ET1) turn.camera_turn(new_ET1) elif 'LDMset' in data: #Motor B Turningf Speed Adjustment new_ET2 = int((str(data))[7:]) replace_num('look_down_max:', new_ET2) turn.camera_turn(new_ET2) elif 'stop' in data: #??? what is the goal of stop #??? what is this 9 tcpCliSock.send('9'.encode()) #??? why are we doing setup stop setup setup() motor.motorStop() setup() #??? why are we doing this if led_status == 0: led.setup() led.both_off() #??? what is the colorwipe colorWipe(strip, Color(0, 0, 0)) continue elif 'lightsON' in data: # Turn on the LEDs led.both_on() #TODO statuses should be handled by a dictionary led_status = 1 tcpCliSock.send('lightsON'.encode()) elif 'lightsOFF' in data: # Turn off the LEDs led.both_off() led_status = 0 tcpCliSock.send('lightsOFF'.encode()) elif 'middle' in data: # Go straight if led_status == 0: led.side_color_off(left_R, left_G) led.side_color_off(right_R, right_G) else: led.side_on(left_B) led.side_on(right_B) #TODO status handled better turn_status = 0 turn.middle() elif 'Left' in data: # Turn left #TODO: fix the capital case if led_status == 0: led.side_color_on(left_R, left_G) else: led.side_off(left_B) turn.left() turn_status = 1 #??? what do these numbers map too #??? maybe use a dictionary tcpCliSock.send('3'.encode()) elif 'Right' in data: # Turn right #TODO: fix the capital case if led_status == 0: #TODO: clarify variable names led.side_color_on(right_R, right_G) else: led.side_off(right_B) turn.right() turn_status = 2 tcpCliSock.send('4'.encode()) #TODO: fix for single motor elif 'backward' in data: # Go backward motor.motor_left(status, backward, left_spd * spd_ad) motor.motor_right(status, forward, right_spd * spd_ad) colorWipe(strip, Color(255, 0, 0)) tcpCliSock.send('2'.encode()) #TODO: fix for single motor elif 'forward' in data: # Go forward motor.motor_left(status, forward, left_spd * spd_ad) motor.motor_right(status, backward, right_spd * spd_ad) colorWipe(strip, Color(0, 0, 255)) tcpCliSock.send('1'.encode()) #TODO: make better names elif 'l_up' in data: #Camera look up if vtr_mid < look_up_max: vtr_mid += turn_speed turn.camera_turn(vtr_mid) tcpCliSock.send('5'.encode()) elif 'l_do' in data: # Camera look down if vtr_mid > look_down_max: vtr_mid -= turn_speed turn.camera_turn(vtr_mid) print(vtr_mid) tcpCliSock.send('6'.encode()) elif 'l_le' in data: # Camera look left if hoz_mid < look_left_max: hoz_mid += turn_speed turn.ultra_turn(hoz_mid) tcpCliSock.send('7'.encode()) elif 'l_ri' in data: # Camera look right if hoz_mid > look_right_max: hoz_mid -= turn_speed turn.ultra_turn(hoz_mid) tcpCliSock.send('8'.encode()) elif 'ahead' in data: # Camera look ahead turn.ahead() elif 'Stop' in data: #TODO: fix capitalization opencv_mode = 0 findline_mode = 0 speech_mode = 0 auto_mode = 0 auto_status = 0 dis_scan = 1 #??? why is this 1 tcpCliSock.send('auto_status_off'.encode()) motor.motorStop() led.both_off() turn.middle() time.sleep(0.1) #??? why #??? why do this twice motor.motorStop() led.both_off() turn.middle() elif 'auto' in data: # start Auto Mode #??? which automode is this if auto_status == 0: tcpCliSock.send('0'.encode()) auto_status = 1 auto_mode = 1 #?? dif from automode and autostatus dis_scan = 0 #??? dis_scan else: pass continue elif 'opencv' in data: # Start opencv mode if auto_status == 0: auto_status = 1 opencv_mode = 1 #??? what is this name tcpCliSock.send('oncvon'.encode()) continue elif 'findline' in data: # Find line mode start if auto_status == 0: tcpCliSock.send('findline'.encode()) auto_status = 1 findline_mode = 1 else: pass continue #??? why voice 3 elif 'voice_3' in data: #Speech recognition mode start if auto_status == 0: auto_status = 1 speech_mode = 1 tcpCliSock.send('voice_3'.encode()) else: pass continue
def opencv_thread(): '''OpenCV and FPV video''' #??? why isn't opencv stuff in it's own module #??? do we need these globals (maybe) global hoz_mid_orig, vtr_mid_orig font = cv2.FONT_HERSHEY_SIMPLEX #??? necessary? #??? rawCapture is defined outside function but isn't global for frame in camera.capture_continuous( rawCapture, format="bgr", #??? why bgr use_video_port=True): image = frame.array # draw cross on image cv2.line(image, (300, 240), (340, 240), (128, 255, 128), 1) cv2.line(image, (320, 220), (320, 260), (128, 255, 128), 1) if opencv_mode == 1: #??? #TODO: make this a function # searches for largest yellow object #??? where are colorLower and colorUpper defined hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) mask = cv2.inRange(hsv, colorLower, colorUpper) mask = cv2.erode(mask, None, iterations=2) mask = cv2.dilate(mask, None, iterations=2) #??? how does findCountours work cnts = cv2.findContours( mask.copy(), #??? why use copy cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2] center = None #??? necessary if len(cnts) > 0: led.both_off() led.green() cv2.putText(image, 'Target Detected', (40, 60), font, 0.5, (255, 255, 255), 1, cv2.LINE_AA) #??? line aa #TODO: no single varible names # c is the largest area contour c = max(cnts, key=cv2.contourArea) # location of moments #TODO: make a function ((x, y), radius) = cv2.minEnclosingCircle(c) X = int(x) Y = int(y) #TODO: make this a function #??? what is this second attempt at finding center M = cv2.moments(c) center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"])) if radius > 10: #TODO: make min size a setting cv2.rectangle( image, #??? why doesn't this pattern right (int(x - radius), int(y + radius)), (int(x + radius), int(y - radius)), (255, 255, 255), 1) #TODO: pan head is a function #TODO: magic numbers in panning block if X < 310: mu1 = int((320 - X) / 3) #??? why 3 #??? what is mu1 hoz_mid_orig += mu1 if hoz_mid_orig < look_left_max: pass #??? why is this not if > max else: hoz_mid_orig = look_left_max ultra_turn(hoz_mid_orig) #print('x=%d'%X) elif X > 330: mu1 = int((X - 330) / 3) hoz_mid_orig -= mu1 if hoz_mid_orig > look_right_max: pass #??? why is this not if > max else: hoz_mid_orig = look_right_max ultra_turn(hoz_mid_orig) #print('x=%d'%X) else: turn.middle() pass #??? why pass #TODO: turn wheels as a function #TODO: remove magic numbers mu_t = 390 - (hoz_mid - hoz_mid_orig) #??? what is mu_t v_mu_t = 390 + (hoz_mid + hoz_mid_orig ) #??? what is v_mu_t turn.turn_ang(mu_t) #TODO: seperate as a function #??? why not use dis directly dis = dis_data #??? dis_data could be a list if dis < (distance_stay - 0.1): #TODO: .1 is magic number led.both_off() led.red() turn.turn_ang(mu_t) #??? this is already done #TODO: fix for 1 motor motor.motor_left(status, backward, left_spd * spd_ad_u) motor.motor_right(status, forward, right_spd * spd_ad_u) cv2.putText(image, 'Too Close', (40, 80), font, 0.5, (128, 128, 255), 1, cv2.LINE_AA) elif dis > (distance_stay + 0.1): #TODO: .1 is magic number #TODO: fix for 1 motor motor.motor_left(status, forward, left_spd * spd_ad_2) motor.motor_right(status, backward, right_spd * spd_ad_2) cv2.putText(image, 'OpenCV Tracking', (40, 80), font, 0.5, (128, 255, 128), 1, cv2.LINE_AA) else: motor.motorStop() #TODO: include both_off in all color change functions? led.both_off() led.blue() #TODO: adjust colors with non-magic numbers cv2.putText(image, 'In Position', (40, 80), font, 0.5, (255, 128, 128), 1, cv2.LINE_AA) if dis < 8: #TODO: 8 is magic number #??? what is this cv2.putText(image, '%s m' % str(round(dis, 2)), (40, 40), font, 0.5, (255, 255, 255), 1, cv2.LINE_AA) #TODO: put in tilt function #TODO: reposition to be with X #TODO: fix magic numbers if Y < 230: mu2 = int((240 - Y) / 5) #??? what is mu2 vtr_mid_orig += mu2 if vtr_mid_orig < look_up_max: pass #??? why is if like this else: vtr_mid_orig = look_up_max turn.camera_turn(vtr_mid_orig) #TODO change to tilt elif Y > 250: #TODO: see above mu2 = int((Y - 240) / 5) vtr_mid_orig -= mu2 if vtr_mid_orig > look_down_max: pass #??? why is if like this else: vtr_mid_orig = look_down_max turn.camera_turn(vtr_mid_orig) if X > 280: #TODO: fix magic numbers if X < 350: #??? what is happening here # if X in some range #print('looked') #??? what cv2.line(image, (300, 240), (340, 240), (64, 64, 255), 1) cv2.line(image, (320, 220), (320, 260), (64, 64, 255), 1) cv2.rectangle(image, (int(x - radius), int(y + radius)), (int(x + radius), int(y - radius)), (64, 64, 255), 1) else: # no contours found led.both_off() #TODO: put this into color changes led.yellow() cv2.putText(image, 'Target Detecting', (40, 60), font, 0.5, (255, 255, 255), 1, cv2.LINE_AA) led_y = 1 #??? never used #TODO: choose search patterns not just stop and wait motor.motorStop() #??? no idea what this block is doing for i in range(1, len(pts)): #??? pts is global what is it? if pts[i - 1] is None or pts[i] is None: continue #TODO: magic numbers #??? what is args thickness = int(np.sqrt(args["buffer"] / float(i + 1)) * 2.5) cv2.line(image, pts[i - 1], pts[i], (0, 0, 255), thickness) else: # not opencv_mode==1 #??? why not use directly and its a list dis = dis_data #Place the distance to closest object on screen if dis < 8: cv2.putText(image, '%s m' % str(round(dis, 2)), (40, 40), font, 0.5, (255, 255, 255), 1, cv2.LINE_AA) # send image to client encoded, buffer = cv2.imencode('.jpg', image) jpg_as_text = base64.b64encode(buffer) footage_socket.send(jpg_as_text) rawCapture.truncate(0)
led_threading = threading.Thread( target=led_thread) #Define a thread for ws_2812 leds led_threading.setDaemon( True ) #'True' means it is a front thread,it would close when the mainloop() closes led_threading.start() #Thread starts tasks = [ remote_control.read_gamepad_input(), remote_control.rumble(), read_gamepad_inputs() ] loop.run_until_complete( asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED)) led.red() loop.run_until_complete(removetasks(loop)) motor.destroy() disconnect_sound.play(1.0) except Exception as e: print("Error occured " + str(e)) finally: if remote_control != None: remote_control.power_on = False #remote_control.erase_rumble() if (strip != None): led_strip.colorWipe(strip, Color(0, 0, 0)) print("Closing async loop..") loop.run_until_complete(loop.shutdown_asyncgens())
def run(): global v_command # obtain audio from the microphone r = sr.Recognizer() with sr.Microphone(device_index=2, sample_rate=48000) as source: r.record(source, duration=2) #r.adjust_for_ambient_noise(source) led.both_off() led.yellow() print("Command?") audio = r.listen(source) led.both_off() led.blue() try: v_command = r.recognize_sphinx(audio, keyword_entries=[ ('forward', 1.0), ('backward', 1.0), ('left', 1.0), ('right', 1.0), ('stop', 1.0) ]) #You can add your own command here print(v_command) led.both_off() led.cyan() except sr.UnknownValueError: print("say again") led.both_off() led.red() except sr.RequestError as e: led.both_off() led.red() pass #print('pre') if 'forward' in v_command: motor.motor_left(status, forward, left_spd * spd_ad_2) motor.motor_right(status, backward, right_spd * spd_ad_2) time.sleep(2) motor.motorStop() elif 'backward' in v_command: motor.motor_left(status, backward, left_spd) motor.motor_right(status, forward, right_spd) time.sleep(2) motor.motorStop() elif 'left' in v_command: turn.left() motor.motor_left(status, forward, left_spd * spd_ad_2) motor.motor_right(status, backward, right_spd * spd_ad_2) time.sleep(2) motor.motorStop() elif "right" in v_command: turn.right() motor.motor_left(status, forward, left_spd * spd_ad_2) motor.motor_right(status, backward, right_spd * spd_ad_2) time.sleep(2) motor.motorStop() elif 'stop' in v_command: motor.motorStop() else: pass
def run(): #Main loop global hoz_mid, vtr_mid, ip_con, led_status, auto_status, opencv_mode, findline_mode, speech_mode, auto_mode, data, addr, footage_socket, ap_status, turn_status, wifi_status led.setup() while True: #Connection print('waiting for connection...') led.red() tcpCliSock, addr = tcpSerSock.accept() #Determine whether to connect led.both_off() led.green() print('...connected from :', addr) #time.sleep(1) tcpCliSock.send(('SET %s' % vtr_mid + ' %s' % hoz_mid + ' %s' % left_spd + ' %s' % right_spd + ' %s' % look_up_max + ' %s' % look_down_max).encode()) print('SET %s' % vtr_mid + ' %s' % hoz_mid + ' %s' % left_spd + ' %s' % right_spd + ' %s' % left + ' %s' % right) break #Threads start findline_threading = threading.Thread( target=findline_thread) #Define a thread for line tracking findline_threading.setDaemon( True ) #'True' means it is a front thread,it would close when the mainloop() closes findline_threading.start() #Thread starts auto_threading = threading.Thread( target=auto_thread) #Define a thread for ultrasonic tracking auto_threading.setDaemon( True ) #'True' means it is a front thread,it would close when the mainloop() closes auto_threading.start() #Thread starts time.sleep(0.5) tcpCliSock.send('TestVersion'.encode()) while True: data = '' data = tcpCliSock.recv(BUFSIZ).decode() if not data: continue elif 'exit' in data: pass elif 'spdset' in data: global spd_ad spd_ad = float((str(data))[7:]) #Speed Adjustment elif 'scan' in data: dis_can = scan() #Start Scanning str_list_1 = dis_can #Divide the list to make it samller to send str_index = ' ' #Separate the values by space str_send_1 = str_index.join(str_list_1) + ' ' tcpCliSock.sendall((str(str_send_1)).encode()) #Send Data tcpCliSock.send( 'finished'.encode() ) #Sending 'finished' tell the client to stop receiving the list of dis_can elif 'scan_rev' in data: dis_can = scan_rev() #Start Scanning str_list_1 = dis_can #Divide the list to make it samller to send str_index = ' ' #Separate the values by space str_send_1 = str_index.join(str_list_1) + ' ' tcpCliSock.sendall((str(str_send_1)).encode()) #Send Data tcpCliSock.send( 'finished'.encode() ) #Sending 'finished' tell the client to stop receiving the list of dis_can elif 'EC1set' in data: #Camera Adjustment new_EC1 = int((str(data))[7:]) turn.camera_turn(new_EC1) replace_num('E_C1:', new_EC1) elif 'EC2set' in data: #Ultrasonic Adjustment new_EC2 = int((str(data))[7:]) replace_num('E_C2:', new_EC2) turn.ultra_turn(new_EC2) elif 'EM1set' in data: #Motor A Speed Adjustment new_EM1 = int((str(data))[7:]) replace_num('E_M1:', new_EM1) elif 'EM2set' in data: #Motor B Speed Adjustment new_EM2 = int((str(data))[7:]) replace_num('E_M2:', new_EM2) elif 'LUMset' in data: #Motor A Turningf Speed Adjustment new_ET1 = int((str(data))[7:]) replace_num('look_up_max:', new_ET1) turn.camera_turn(new_ET1) elif 'LDMset' in data: #Motor B Turningf Speed Adjustment new_ET2 = int((str(data))[7:]) replace_num('look_down_max:', new_ET2) turn.camera_turn(new_ET2) elif 'stop' in data: #When server receive "stop" from client,car stops moving tcpCliSock.send('9'.encode()) setup() motor.motorStop() setup() if led_status == 0: led.setup() led.both_off() continue elif 'lightsON' in data: #Turn on the LEDs led.both_on() led_status = 1 tcpCliSock.send('lightsON'.encode()) elif 'lightsOFF' in data: #Turn off the LEDs led.both_off() led_status = 0 tcpCliSock.send('lightsOFF'.encode()) elif 'middle' in data: #Go straight if led_status == 0: led.side_color_off(left_R, left_G) led.side_color_off(right_R, right_G) else: led.side_on(left_B) led.side_on(right_B) turn_status = 0 turn.middle() elif 'Left' in data: #Turn left if led_status == 0: led.side_color_on(left_R, left_G) else: led.side_off(left_B) turn.left() tcpCliSock.send('3'.encode()) elif 'Right' in data: #Turn right if led_status == 0: led.side_color_on(right_R, right_G) else: led.side_off(right_B) turn.right() tcpCliSock.send('4'.encode()) elif 'backward' in data: #When server receive "backward" from client,car moves backward tcpCliSock.send('2'.encode()) motor.motor_left(status, backward, left_spd * spd_ad) motor.motor_right(status, forward, right_spd * spd_ad) elif 'forward' in data: #When server receive "forward" from client,car moves forward tcpCliSock.send('1'.encode()) motor.motor_left(status, forward, left_spd * spd_ad) motor.motor_right(status, backward, right_spd * spd_ad) elif 'l_up' in data: #Camera look up if vtr_mid < look_up_max: vtr_mid += turn_speed turn.camera_turn(vtr_mid) tcpCliSock.send('5'.encode()) elif 'l_do' in data: #Camera look down if vtr_mid > look_down_max: vtr_mid -= turn_speed turn.camera_turn(vtr_mid) tcpCliSock.send('6'.encode()) elif 'l_le' in data: #Camera look left if hoz_mid < look_left_max: hoz_mid += turn_speed turn.ultra_turn(hoz_mid) tcpCliSock.send('7'.encode()) elif 'l_ri' in data: #Camera look right if hoz_mid > look_right_max: hoz_mid -= turn_speed turn.ultra_turn(hoz_mid) tcpCliSock.send('8'.encode()) elif 'ahead' in data: #Camera look ahead turn.ahead() elif 'Stop' in data: #When server receive "Stop" from client,Auto Mode switches off findline_mode = 0 auto_mode = 0 auto_status = 0 tcpCliSock.send('auto_status_off'.encode()) motor.motorStop() led.both_off() turn.middle() time.sleep(0.1) motor.motorStop() led.both_off() turn.middle() elif 'auto' in data: #When server receive "auto" from client,start Auto Mode if auto_status == 0: tcpCliSock.send('0'.encode()) auto_status = 1 auto_mode = 1 dis_scan = 0 else: pass continue elif 'findline' in data: #Find line mode start if auto_status == 0: tcpCliSock.send('findline'.encode()) auto_status = 1 findline_mode = 1 else: pass continue
def start_main(): init() print("init") while True: # 距離が15cm以上だったら1秒間前に進む print("while") if sensor.getDistance() >=KYORI: motor.drive(motor.FORWARD) print("GO Forward") sound.CLOCK() led.blue_blink(0.8) time.sleep(0.5) # 距離が5cm以下だったら止まって、方向転換 else: #sensor.getDistance() <= KYORI: sound.NG() motor.stop() led.red_blink(0.8) # 左を確認 servo.drive_servo(servo.LEFT) # 距離が6cm以上だったら方向転換 if sensor.getDistance() >= KYORI: sound.OK() led.blue_blink(0.8) motor.drive(motor.TURN_LEFT) print("TURN LEFT") led.seq_back(3) time.sleep(1) servo.drive_servo(servo.FRONT) ######=================---------------------------------------------------------- else: sound.NG() led.red_blink(0.8) # 右を確認 servo.drive_servo(servo.RIGHT) # 距離が6cm以上だったら方向転換 if sensor.getDistance() >= KYORI: sound.OK() led.blue_blink(0.8) motor.drive(motor.TURN_RIGHT) print("TURN RIGHT") led.seq_forward(3) time.sleep(1) servo.drive_servo(servo.FRONT) # もう進めない場合は後ろに下がって回れ右 else: sound.NG() led.red() print("led.red") motor.drive(motor.BACKWARD) print("motor.drive") time.sleep(2) servo.drive_servo(servo.FRONT) # End if sensor.getDistance() <= KYORI: sound.END() motor.end() led.seq_forward(1.2) led.seq_back(1.2) time.sleep(1) servo.drive_servo(servo.RIGHT) servo.drive_servo(servo.LEFT) servo.drive_servo(servo.RIGHT) servo.drive_servo(servo.FRONT) sensor.end() servo.end() GPIO.cleanup() else: sound.OK() motor.drive(motor.TURN_RIGHT) time.sleep(1)