def loadSetting(): mic_enable = int(ps.get_setting("settings.ini","Settings","Mic")) sound_enable = int(ps.get_setting("settings.ini","Settings","Sound")) if int(ps.get_setting("settings.ini","Settings","Operation mode")) == 0: microphone = microphone_device_0 else: microphone = microphone_device_1
def send_command_to_interaction(command): setting = settings.get_setting("settings.ini","PIDS","Interaction") if setting != "0": os.kill(int(setting), signal.SIGUSR1) with open(FIFO_Interaction, "w" ) as fifo: print("FIFO opened") fifo.write(command)
def send_command_to_sound(command): setting = settings.get_setting("settings.ini","PIDS","Sound") if setting != "0" : os.kill(int(setting), signal.SIGUSR1) with open(FIFO_Sound, "w" ) as fifo: print("FIFO opened") fifo.write(command)
def optionsStatus(): status = {} status['sound'] = settings.get_setting("settings.ini","Settings","Sound") status['mic'] = settings.get_setting("settings.ini","Settings","Sound") status['facerec'] = settings.get_setting("settings.ini","Settings","Sound") status['submon'] = settings.get_setting("settings.ini","Settings","Sound") status['opmode'] = settings.get_setting("settings.ini","Settings","Sound") status['battery'] = settings.get_setting("settings.ini","Settings","Battery") # JSON encode and transmit response response = json.dumps(status) client_sock.send(response)
with open(FIFO, "r" ) as fifo: print("FIFO opened") for line in fifo: runCommand(line) return def loadSetting(): mic_enable = int(ps.get_setting("settings.ini","Settings","Mic")) sound_enable = int(ps.get_setting("settings.ini","Settings","Sound")) if int(ps.get_setting("settings.ini","Settings","Operation mode")) == 0: microphone = microphone_device_0 else: microphone = microphone_device_1 ps.update_setting("settings.ini", "PIDS", "Sound", str(os.getpid())) signal.signal(signal.SIGUSR1, receiveSignal) loadSetting() #dictionary #/home/<user>/.local/lib/python3.7/site-packages/speech_recognition/pocketsphinx-data/en-US while True: if find: out = subprocess.Popen(['aplay','-D', microphone_device_0 ,'/home/Robo/Sounds/play.wav'], stderr=subprocess.STDOUT, shell=False) out.wait() continue if sound_enable == 0 or mic_enable == 0:
def loadSetting(): face_recognition = int(ps.get_setting("settings.ini","Settings","Face recognition")) subject_monitoring = int(ps.get_setting("settings.ini","Settings","Subject monitoring")) sound_enable = int(ps.get_setting("settings.ini","Settings","Sound")) movement_enable = int(ps.get_setting("settings.ini","Settings","Operation mode"))
FaceRecognitionProcessing.detect.iy, FaceRecognitionProcessing.detect.iw, FaceRecognitionProcessing.detect.ih) count = 0 detected = 0 detected += 1 else: count+=1 else : count+=1 print('no face') return result ps.update_setting("settings.ini", "PIDS", "Interaction", str(os.getpid())) signal.signal(signal.SIGUSR1, receiveSignal) print('Signal handler set') loadSetting() print('Process settings loaded') MovementProcessing.Initial_State() while True: if not Search(): print ('user not found') if movement_enable == 1: if sound_enable == 1: os.system('mpg321 -a plughw:1,0 ./Sounds/blip2.mp3') MovementProcessing.Movement_Interaction() time.sleep(1)
def parseCommand(command, parts, count): if command == '': continue elif command == '/': root() elif command == '/status/': optionsStatus() elif command == '/subjects/': count_subjects() elif command == '/add/': send_command_to_interaction("add") elif command[0:8] == '/remove/' and count == 3: send_command_to_interaction("remove " + parts[2]) elif command == '/restart/': os.system('reboot') elif command == '/find/': send_command_to_sound("find") elif command == '/wifi_connect/': handle_client(client_sock) elif command[0:8] == '/opmode/' and count == 3: enable = parts[2] settings.update_setting("settings.ini","Settings","Operation mode", enable) soundEn = settings.get_setting("settings.ini","Settings","Operation mode") send_command_to_interaction("opmode " + soundEn) elif command[0:8] == '/submon/' and count == 3: enable = parts[2] settings.update_setting("settings.ini","Settings","Subject monitoring", enable) soundEn = settings.get_setting("settings.ini","Settings","Subject monitoring") send_command_to_interaction("submon " + soundEn) elif command[0:9] == '/facerec/' and count == 3: enable = parts[2] settings.update_setting("settings.ini","Settings","Face recognition", enable) soundEn = settings.get_setting("settings.ini","Settings","Face recognition") send_command_to_interaction("facerec " + soundEn) elif command[0:5] == '/mic/' and count == 3: enable = parts[2] settings.update_setting("settings.ini","Settings","Mic", enable) soundEn = settings.get_setting("settings.ini","Settings","Mic") send_command_to_interaction("mic " + soundEn) elif command[0:7] == '/sound/' and count == 3: enable = parts[2] settings.update_setting("settings.ini","Settings","Sound", enable) soundEn = settings.get_setting("settings.ini","Settings","Sound") send_command_to_interaction("sound " + soundEn) elif command[0:7] == '/photo/': number_ = 1 if count == 3: number_ = parts[2] print("Count = " + str(count) + " part[2] = " + str(parts[2]) + " number = " + str(number)) imagePath = '/home/raspberry/data/subject.' + str(number_) + '.1.jpg' with open(imagePath, mode='rb') as file: fileContent = file.read() size = len(fileContent) print("{" + str(size) + "}") client_sock.settimeout(5.0) client_sock.sendall(struct.pack(">L", size) + fileContent) client_sock.settimeout(None) print("Sent photo done") else: print('unrecognized command')