def __init__(self): self.stop = False logging.basicConfig() self.logger = logging.getLogger("MMNT") if DEBUG: self.logger.setLevel(logging.DEBUG) else: self.logger.setLevel(logging.INFO) self.logger.info("Initializing") self.masterSampleTime = time.time() self.slaveSampleTime = time.time() self.humanSampleTime = time.time() self.micSampleTime = time.time() self.logger.debug("Initializing motor control") self.mc = MotorControl() self.mc.resetMotors() self.logger.debug("Initializing microphone") dev = usb.core.find(idVendor=0x2886, idProduct=0x0018) if not dev: sys.exit("Could not find ReSpeaker Mic Array through USB") self.mic = Tuning(dev) self.mic.write("NONSTATNOISEONOFF", 1) self.mic.write("STATNOISEONOFF", 1) self.mic.write("ECHOONOFF", 1) self.logger.debug("Initializing video streams") self.topCamStream = VideoStream(1) self.botCamStream = VideoStream(2) self.logger.debug("Initializing models") self.ht_model = ht.get_model() self.tfPose = TfPoseEstimator(get_graph_path(TF_MODEL), target_size=(VideoStream.DEFAULT_WIDTH, VideoStream.DEFAULT_HEIGHT)) self.logger.info("Initialization complete") self.topCamState = State.IDLE self.botCamState = State.IDLE self.topCamAngle = 0 self.topAngleUpdated = False self.botCamAngle = 180 self.botAngleUpdated = False self.master = Cams.TOP self.lastMaster = Cams.TOP self.botCamProc = None self.topCamProc = None self.audioMap = Map(15) self.checkMic()
def test_tilts(): import nidaqmx # pylint: disable=import-error # pylint: disable=import-error from nidaqmx.constants import LineGrouping, Edge, AcquisitionType, WAIT_INFINITELY from motor_control import MotorControl tilt_types = [ ('a', 1, 9, 'Slow Counter Clockwise',), ('b', 2, 11, 'Fast Counter Clockwise',), ('c', 3, 12, 'Slow Clockwise' ,), ('d', 4, 14, 'Fast Clockwise' ,), ] with ExitStack() as stack: motor = MotorControl() stack.enter_context(motor) for tilt_type in tilt_types: print("tilt", tilt_type) with nidaqmx.Task() as task: sample_rate = 1000 batch_size = 3000 read_timeout = 4 task.timing.cfg_samp_clk_timing( sample_rate, source='', sample_mode=AcquisitionType.CONTINUOUS, samps_per_chan=batch_size, ) task.ai_channels.add_ai_voltage_chan("Dev6/ai8") motor.tilt(tilt_type[0]) time.sleep(1.75) motor.tilt('stop') data = task.read(batch_size, read_timeout) assert len(data) == 1 strobe = data[0] print("strobe max", max(strobe)) assert strobe[0] < 4 assert strobe[-1] < 4 assert any(x > 4 for x in strobe) input('press enter to continue') motor.close()
def __init__(self): # Source to get pi IP: https://www.raspberrypi.org/forums/viewtopic.php?t=79936, user: mikerr # UDP_IP = subprocess.check_output(['hostname', '-I']).decode('ascii').strip() # UDP_IP = socket.gethostbyname(socket.gethostname()) # Get ip source: https://stackoverflow.com/questions/48606440/get-ip-address-from-python, user: An0n # UDP_PORT = 0 UDP_IP = '192.168.43.192' UDP_PORT = 38049 # Code for connecting over wifi source: https://wiki.python.org/moin/UdpCommunication self.app_socket = socket.socket( socket.AF_INET, # Internet socket.SOCK_DGRAM) # UDP self.app_socket.bind((UDP_IP, UDP_PORT)) port = int(str(self.app_socket).split(',')[5].replace(')>', '')) stats.display(UDP_IP, str(port)) print("Port: ", port) # self.send_network_info(UDP_IP,port) self.get_data(MotorControl()) self.app_socket.close()
Flask-powered web app for remote control of ACROBOTIC's wheeled robot PyPi ''' from flask import Flask, render_template # Use the socketio module for using websockets # for easily handling requests in real-time from flask_socketio import SocketIO, emit from motor_control import MotorControl from time import sleep # Instantiate Flask class app = Flask(__name__) # Use the flask object to instantiate the SocketIO class socketio = SocketIO(app) # Create the motor control object mc = MotorControl() # Create the route(s) to access the web app @app.route('/') def handle_index(): return render_template('index.html') # Create the function handlers for the different websocket events @socketio.on('req') # 'req' is an arbitrary name for my event def on_message(msg): # I expect the message to be formatted in JSON so I can parse # it as a Python dictionary and look for specific keys direction = msg['direction'] # this will tell me how to move the motors if direction != 'STP':
class Moment(object): def __init__(self): self.stop = False logging.basicConfig() self.logger = logging.getLogger("MMNT") if DEBUG: self.logger.setLevel(logging.DEBUG) else: self.logger.setLevel(logging.INFO) self.logger.info("Initializing") self.masterSampleTime = time.time() self.slaveSampleTime = time.time() self.humanSampleTime = time.time() self.micSampleTime = time.time() self.logger.debug("Initializing motor control") self.mc = MotorControl() self.mc.resetMotors() self.logger.debug("Initializing microphone") dev = usb.core.find(idVendor=0x2886, idProduct=0x0018) if not dev: sys.exit("Could not find ReSpeaker Mic Array through USB") self.mic = Tuning(dev) self.mic.write("NONSTATNOISEONOFF", 1) self.mic.write("STATNOISEONOFF", 1) self.mic.write("ECHOONOFF", 1) self.logger.debug("Initializing video streams") self.topCamStream = VideoStream(1) self.botCamStream = VideoStream(2) self.logger.debug("Initializing models") self.ht_model = ht.get_model() self.tfPose = TfPoseEstimator(get_graph_path(TF_MODEL), target_size=(VideoStream.DEFAULT_WIDTH, VideoStream.DEFAULT_HEIGHT)) self.logger.info("Initialization complete") self.topCamState = State.IDLE self.botCamState = State.IDLE self.topCamAngle = 0 self.topAngleUpdated = False self.botCamAngle = 180 self.botAngleUpdated = False self.master = Cams.TOP self.lastMaster = Cams.TOP self.botCamProc = None self.topCamProc = None self.audioMap = Map(15) self.checkMic() def stop(self): self.stop = True def updateTopAngle(self, angle): if abs(angle - self.topCamAngle) > ANGLE_THRESHOLD and abs( angle - self.botCamAngle) > OVERLAP_THRESHOLD: self.topCamAngle = angle self.topAngleUpdated = True def updateBotAngle(self, angle): if abs(angle - self.botCamAngle) > ANGLE_THRESHOLD and abs( angle - self.topCamAngle) > OVERLAP_THRESHOLD: self.botCamAngle = angle self.botAngleUpdated = True def updatePositions(self): # Send Serial Commands if self.topAngleUpdated and self.botAngleUpdated: self.logger.debug("Top Angle: {}".format(self.topCamAngle)) self.logger.debug("Bot Angle: {}".format(self.botCamAngle)) self.topAngleUpdated = False self.botAngleUpdated = False self.mc.runMotors(self.topCamAngle, self.botCamAngle) elif self.topAngleUpdated: self.logger.debug("Top Angle: {}".format(self.topCamAngle)) self.topAngleUpdated = False self.mc.runTopMotor(self.topCamAngle) elif self.botAngleUpdated: self.logger.debug("Bot Angle: {}".format(self.botCamAngle)) self.botAngleUpdated = False self.mc.runBotMotor(self.botCamAngle) def isWithinNoiseFov(self, angle): topDiff = abs(angle - self.topCamAngle) botDiff = abs(angle - self.botCamAngle) if topDiff < NOISE_ANGLE_THRESHOLD: self.topCamState |= State.NOISE if self.topCamState == State.BOTH: self.master = Cams.TOP return True else: self.topCamState &= ~State.NOISE if botDiff < NOISE_ANGLE_THRESHOLD: self.botCamState |= State.NOISE if self.botCamState == State.BOTH: self.master = Cams.BOT return True else: self.botCamState &= ~State.NOISE return False def checkMic(self): speechDetected, micDOA = self.mic.speech_detected(), self.mic.direction if not speechDetected: # self.audioMap.update_map_with_no_noise() self.topCamState &= ~State.NOISE self.botCamState &= ~State.NOISE return self.logger.debug("speech detected from {}".format(micDOA)) self.audioMap.update_map_with_noise(micDOA) primaryMicDOA, secondaryMicDOA = self.audioMap.get_POI_location() if DEBUG: self.audioMap.print_map() if primaryMicDOA == -1: self.logger.debug("no good audio source") return self.logger.debug("mapped audio from {}".format(primaryMicDOA)) # Check if camera is already looking at the primary noise source if self.isWithinNoiseFov(primaryMicDOA): # If camera is already looking, check the secondary noise source if secondaryMicDOA == -1: self.logger.debug("no good secondary audio source") return elif self.isWithinNoiseFov(secondaryMicDOA): return else: micDOA = secondaryMicDOA else: micDOA = primaryMicDOA topDiff = abs(micDOA - self.topCamAngle) botDiff = abs(micDOA - self.botCamAngle) # Camera is NOT looking at the noise source at this point # If both Cameras are not tracking a human, # move the closest camera if self.topCamState < State.HUMAN and self.botCamState < State.HUMAN: if botDiff < topDiff: self.botCamState |= State.NOISE self.updateBotAngle(micDOA) if self.botCamState == State.IDLE: self.master = Cams.TOP else: self.topCamState |= State.NOISE self.updateTopAngle(micDOA) if self.topCamState == State.IDLE: self.master = Cams.BOT # One of the cameras are on a human, if the other camera is not on a human, move it elif self.topCamState < State.HUMAN: self.topCamState |= State.NOISE self.updateTopAngle(micDOA) self.master = Cams.BOT elif self.botCamState < State.HUMAN: self.botCamState |= State.NOISE self.updateBotAngle(micDOA) self.master = Cams.TOP # The cameras are on a human else: # If both are on a human, move the one that's not master if self.topCamState == State.HUMAN and self.botCamState == State.HUMAN: if self.master != Cams.BOT: self.botCamState |= State.NOISE self.updateBotAngle(micDOA) else: self.topCamState |= State.NOISE self.updateTopAngle(micDOA) # One of the cameras are on a HUMAN+NOISE, move the one that's not only on a HUMAN elif self.topCamState == State.HUMAN: self.topCamState |= State.NOISE self.updateTopAngle(micDOA) self.master = Cams.BOT elif self.botCamState == State.HUMAN: self.botCamState |= State.NOISE self.updateBotAngle(micDOA) self.master = Cams.TOP def getBestFace(self, humans): midX = -1 bestHuman = humans[0] maxScore = 0 for human in humans: gotMidX = False score = 0 currMidX = -1 for part in headParts: if part in human.body_parts: score += human.body_parts[part].score if not gotMidX: currMidX = human.body_parts[ part].x * VideoStream.DEFAULT_WIDTH gotMidX = True if score > maxScore: maxScore = score midX = currMidX bestHuman = human return bestHuman, midX def checkHumans(self, frame, camera): humans = self.tfPose.inference(frame, resize_to_default=True, upsample_size=RESIZE_RATIO) if len(humans): if camera == Cams.TOP: self.topCamState |= State.HUMAN if self.topCamState == State.BOTH: self.master = Cams.TOP else: self.botCamState |= State.HUMAN if self.botCamState == State.BOTH: self.master = Cams.BOT if DISPLAY_VIDEO and DRAW_ON_FRAME: TfPoseEstimator.draw_humans(frame, humans, imgcopy=False) human, midX = self.getBestFace(humans) if (ht.is_hands_above_head(human)): self.logger.debug("HANDS ABOVE HEAD!!!") if midX != -1: centerDiff = abs(midX - VideoStream.DEFAULT_WIDTH / 2) if centerDiff > FACE_THRESHOLD: if midX < VideoStream.DEFAULT_WIDTH / 2: # rotate CCW if camera == Cams.TOP: self.updateTopAngle( (self.topCamAngle + centerDiff * degreePerPixel) % 360) else: self.updateBotAngle( (self.botCamAngle + centerDiff * degreePerPixel) % 360) elif midX > VideoStream.DEFAULT_WIDTH / 2: # rotate CW if camera == Cams.TOP: self.updateTopAngle( (self.topCamAngle - centerDiff * degreePerPixel) % 360) else: self.updateBotAngle( (self.botCamAngle - centerDiff * degreePerPixel) % 360) else: if camera == Cams.TOP: self.topCamState &= ~State.HUMAN else: self.botCamState &= ~State.HUMAN return frame def playVideo(self, cam): if cam == Cams.TOP: if self.botCamProc is not None and self.botCamProc.poll( ) is not None: self.botCamProc.kill() self.topCamProc = subprocess.Popen( "ffmpeg -f v4l2 -i /dev/video3 -f v4l2 /dev/video5", shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) elif cam == Cams.BOT: if self.topCamProc is not None and self.topCamProc.poll( ) is not None: self.topCamProc.kill() self.botCamProc = subprocess.Popen( "ffmpeg -f v4l2 -i /dev/video4 -f v4l2 /dev/video5", shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) def start(self): Thread(target=self.run, args=()).start() def run(self): self.stop = False while not self.stop: try: topFrame = self.topCamStream.read() botFrame = self.botCamStream.read() if time.time() - self.humanSampleTime > HUMAN_SAMPLE_FREQ: if topFrame is not None: topFrame = self.checkHumans(topFrame, Cams.TOP) if botFrame is not None: botFrame = self.checkHumans(botFrame, Cams.BOT) self.humanSampleTime = time.time() if time.time() - self.micSampleTime > MIC_SAMPLE_FREQ: self.checkMic() self.micSampleTime = time.time() self.updatePositions() # if DISPLAY_VIDEO and topFrame is not None and botFrame is not None: # if self.master == Cams.TOP: # if topFrame is not None: # cv.imshow('Master', topFrame) # if botFrame is not None: # cv.imshow('Slave', botFrame) # else: # if botFrame is not None: # cv.imshow('Master', botFrame) # if topFrame is not None: # cv.imshow('Slave', topFrame) # if cv.waitKey(1) == 27: # pass if DISPLAY_VIDEO and topFrame is not None and botFrame is not None: if self.master == Cams.TOP: top_master = np.concatenate((topFrame, botFrame), axis=1) cv.imshow('Master + Slave', top_master) else: bot_master = np.concatenate((botFrame, topFrame), axis=1) cv.imshow('Master + Slave', bot_master) if cv.waitKey(1) == 27: pass except KeyboardInterrupt: self.logger.debug("Keyboard interrupt! Terminating.") break self.mc.resetMotors()
def __init__(self, *, mock: bool = False, delay_range: Tuple[float, float]): self.delay_range = delay_range self.motor = MotorControl(mock = mock)
class TiltPlatform(AbstractContextManager): def __init__(self, *, mock: bool = False, delay_range: Tuple[float, float]): self.delay_range = delay_range self.motor = MotorControl(mock = mock) def __exit__(self, *exc): self.close() def stop(self): self.motor.tilt('stop') def close(self): self.motor.close() def tilt(self, tilt_type, water=False): water_duration = 0.15 tilt_duration = 1.75 try: tilt_name = {1: 'a', 2: 'b', 3: 'c', 4: 'd'}[tilt_type] except KeyError: raise ValueError("Invalid tilt type {}".format(tilt_type)) self.motor.tilt(tilt_name) time.sleep(tilt_duration) self.motor.tilt('stop') if water: self.motor.tilt('wateron') time.sleep(water_duration) self.motor.tilt('stop') # delay = ((randint(1,100))/100)+1.5 import random delay = random.uniform(*self.delay_range) time.sleep(delay)
from oled_print import OLEDPrint from wifi import Wifi from socket_server import SocketServer from motor_control import MotorControl from wheel_encoder import WheelEncoder import global_params if __name__ == '__main__': global_params.init() global_params.set_value("motor_control", MotorControl()) global_params.set_value("wheel_encoder", WheelEncoder()) oled = OLEDPrint() wifi = Wifi(output_fun=oled) flag = wifi.do_connect() if flag is False: oled.output("Wifi Error") exit(0) ip = wifi.get_ip() socket_server = SocketServer(ip, output_fun=oled) socket_server.run()
def __init__( self, *, baseline_recording: bool, save_template: bool = True, template_output_path, template_in_path, channel_dict, mock: bool = False, reward_enabled: bool, ): self.mock = mock self.save_template = save_template self.template_output_path = template_output_path self.template_in_path = template_in_path self.reward_enabled = reward_enabled if save_template: assert self.template_output_path is not None self.motor = MotorControl(mock=mock) self.motor.tilt('stop') self.motor_interrupt = MotorControl(port=1, mock=mock) self.motor_interrupt.tilt('stop') if mock: self.PL_SingleWFType = 0 self.PL_ExtEventType = 1 self.plex_client = None else: from pyplexclientts import PyPlexClientTSAPI, PL_SingleWFType, PL_ExtEventType dll_path = Path(__file__).parent / 'bin' client = PyPlexClientTSAPI(plexclient_dll_path=dll_path) client.init_client() self.PL_SingleWFType = PL_SingleWFType self.PL_ExtEventType = PL_ExtEventType self.plex_client = client # channel_dict = { # 1: [1], 2: [1,2], 3: [1,2], 4: [1,2], # 6: [1,2], 7: [1,2,3,4], 8: [1,2,3], # 9: [1,2,3], 13: [1,2,3,4], 14: [1,2], # 20: [1,2,3], 25: [1,2], 26: [1], 27: [1], 28: [1], # 31: [1], # 55: [1,2,3,4], # } pre_time = 0.0 post_time = 0.200 self._post_time = post_time bin_size = 0.020 self.baseline_recording = baseline_recording event_num_mapping = { 1: 1, 2: 2, 3: 3, 4: 4, 9: 1, 11: 2, 12: 3, 14: 4, } psth = Psth(channel_dict, pre_time, post_time, bin_size, event_num_mapping=event_num_mapping) if not baseline_recording: assert template_in_path is not None if template_in_path is not None: psth.loadtemplate(template_in_path) self.psth = psth self._mock_state = { '_get_ts': { 's': 'pending', 't': time.perf_counter(), } } self.no_spike_wait = False # time to wait after tilt event is recieved from plexon self.fixed_spike_wait_time = None # program fails after this amount of time if tilt isn't recieved from plexon self.fixed_spike_wait_timeout = None self.closed = False self.delay_range = (1.5, 2)
class PsthTiltPlatform(AbstractContextManager): def __init__( self, *, baseline_recording: bool, save_template: bool = True, template_output_path, template_in_path, channel_dict, mock: bool = False, reward_enabled: bool, ): self.mock = mock self.save_template = save_template self.template_output_path = template_output_path self.template_in_path = template_in_path self.reward_enabled = reward_enabled if save_template: assert self.template_output_path is not None self.motor = MotorControl(mock=mock) self.motor.tilt('stop') self.motor_interrupt = MotorControl(port=1, mock=mock) self.motor_interrupt.tilt('stop') if mock: self.PL_SingleWFType = 0 self.PL_ExtEventType = 1 self.plex_client = None else: from pyplexclientts import PyPlexClientTSAPI, PL_SingleWFType, PL_ExtEventType dll_path = Path(__file__).parent / 'bin' client = PyPlexClientTSAPI(plexclient_dll_path=dll_path) client.init_client() self.PL_SingleWFType = PL_SingleWFType self.PL_ExtEventType = PL_ExtEventType self.plex_client = client # channel_dict = { # 1: [1], 2: [1,2], 3: [1,2], 4: [1,2], # 6: [1,2], 7: [1,2,3,4], 8: [1,2,3], # 9: [1,2,3], 13: [1,2,3,4], 14: [1,2], # 20: [1,2,3], 25: [1,2], 26: [1], 27: [1], 28: [1], # 31: [1], # 55: [1,2,3,4], # } pre_time = 0.0 post_time = 0.200 self._post_time = post_time bin_size = 0.020 self.baseline_recording = baseline_recording event_num_mapping = { 1: 1, 2: 2, 3: 3, 4: 4, 9: 1, 11: 2, 12: 3, 14: 4, } psth = Psth(channel_dict, pre_time, post_time, bin_size, event_num_mapping=event_num_mapping) if not baseline_recording: assert template_in_path is not None if template_in_path is not None: psth.loadtemplate(template_in_path) self.psth = psth self._mock_state = { '_get_ts': { 's': 'pending', 't': time.perf_counter(), } } self.no_spike_wait = False # time to wait after tilt event is recieved from plexon self.fixed_spike_wait_time = None # program fails after this amount of time if tilt isn't recieved from plexon self.fixed_spike_wait_timeout = None self.closed = False self.delay_range = (1.5, 2) def __exit__(self, *exc): self.close() def close(self, *, save_template=None): if self.closed == True: return self.closed = True if save_template is None: save_template = self.save_template self.motor.close() self.motor_interrupt.close() if not self.mock: self.plex_client.close_client() if save_template: self.psth.psthtemplate() self.psth.savetemplate(self.template_output_path) def _get_ts(self): if self.mock: for k, v in self.psth.channel_dict.items(): if v: channel = k unit = v[0] break class MockEvent: Channel: int Type: int TimeStamp: float time.sleep(0.050) # wait 50ms to maybe mimick plexon s = self._mock_state['_get_ts'] if s['s'] == 'pending': e = MockEvent() e.Type = self.PL_ExtEventType e.Channel = 257 e.Unit = unit e.TimeStamp = time.perf_counter() s['s'] = 'tilting' return [e] elif s['s'] == 'tilting': e = MockEvent() e.Type = self.PL_SingleWFType e.Channel = channel e.Unit = unit e.TimeStamp = time.perf_counter() s['s'] = 'pending' return [e] else: assert False else: res = self.plex_client.get_ts() return res def tilt(self, tilt_type, water=False, *, sham_result=None, delay=None): tilt_record = { 'system_time': time.perf_counter(), 'tilt_type': tilt_type, 'events': [], 'warnings': [], 'got_response': None, 'delay': None, 'decoder_result': None, 'decoder_result_source': None, 'predicted_tilt_type': None, } def add_event_to_record(event, *, ignored=None, relevent=None): rec = { 'system_time': time.perf_counter(), 'type': None, 'ignored': ignored, 'relevent': relevent, 'time': event.TimeStamp, 'channel': event.Channel, 'unit': event.Unit, } if event.Type == self.PL_SingleWFType: rec['type'] = 'spike' elif event.Type == self.PL_ExtEventType: rec['type'] = 'tilt' else: rec['type'] = event.Type tilt_record['events'].append(rec) water_duration = 0.15 punish_duration = 2 if tilt_type == 1: # data = tilt1 tilt_name = 'a' elif tilt_type == 2: # data = tilt3 tilt_name = 'b' elif tilt_type == 3: # data = tilt4 tilt_name = 'c' elif tilt_type == 4: # data = tilt6 tilt_name = 'd' else: raise ValueError("Invalid tilt type {}".format(tilt_type)) res = self._get_ts() for event in res: add_event_to_record(event, ignored=True) self.motor.tilt(tilt_name) send_tilt_time = time.time() found_event = False # track if a tilt has started yet collected_ts = False packets_since_tilt = 0 tilt_time = None tilt_plexon_time = None while (found_event == False or collected_ts == False) or self.fixed_spike_wait_time is not None: res = self._get_ts() if found_event: packets_since_tilt += 1 for t in res: # 50ms ? is_relevent = None if t.Type == self.PL_SingleWFType: is_relevent = self.psth.build_unit(t.Channel, t.Unit, t.TimeStamp) if is_relevent: if self.fixed_spike_wait_time or self.no_spike_wait: collected_ts = True elif tilt_plexon_time is not None and \ t.TimeStamp >= tilt_plexon_time + self._post_time: collected_ts = True elif t.Type == self.PL_ExtEventType: if t.Channel == 257 and found_event: warn_str = "WARNING: recieved a second tilt event" print(warn_str) tilt_record['warnings'].append(warn_str) is_relevent = False # tilt started if t.Channel == 257 and not found_event: print(('Event Ts: {}s Ch: {} Unit: {}').format( t.TimeStamp, t.Channel, t.Unit)) # print('event') self.psth.event(t.TimeStamp, t.Unit) found_event = True is_relevent = True tilt_time = time.time() tilt_plexon_time = t.TimeStamp add_event_to_record(t, relevent=is_relevent) if self.no_spike_wait or \ ( self.fixed_spike_wait_time is not None and tilt_time is not None and time.time() - tilt_time > self.fixed_spike_wait_time ): # don't wait for a spike if not found_event or not collected_ts: warn_str = "WARNING: no spike events found for trial. THIS SHOULD NOT HAPPEN. TELL DR MOXON" print(warn_str) tilt_record['warnings'].append(warn_str) break if self.fixed_spike_wait_timeout is not None and \ (time.time() - send_tilt_time > self.fixed_spike_wait_timeout): raise SpikeWaitTimeout(tilt_record) print('found event and collected ts') if tilt_time is not None: post_tilt_wait_time = time.time() - tilt_time else: post_tilt_wait_time = None print('post tilt wait time', post_tilt_wait_time, 'send', time.time() - send_tilt_time) # print('post send tilt time', time.time() - send_tilt_time) got_response = found_event and collected_ts tilt_record['got_response'] = got_response if got_response: self.psth.psth(True, self.baseline_recording) if not self.baseline_recording: self.psth.psth(False, self.baseline_recording) # ?if not self.baseline_recording and found_event and collected_ts: if not self.baseline_recording: if sham_result is not None: decoder_result = sham_result d_source = 'sham' predicted_tilt_type = None elif got_response: decoder_result = self.psth.decode() d_source = 'psth' predicted_tilt_type = self.psth.decoder_list[-1] else: print("skipping decode due to no spikes") decoder_result = True d_source = 'no_spikes' predicted_tilt_type = None tilt_record['decoder_result_source'] = d_source tilt_record['decoder_result'] = decoder_result tilt_record['predicted_tilt_type'] = predicted_tilt_type print(f"decode {decoder_result}") if decoder_result: if self.reward_enabled: self.motor_interrupt.tilt('reward') self.motor.tilt('wateron') time.sleep(water_duration) self.motor.tilt('stop') self.motor_interrupt.tilt('stop') else: self.motor_interrupt.tilt('punish') time.sleep(punish_duration) self.motor_interrupt.tilt('stop') # time.sleep(2) if delay is None: delay = random.uniform(*self.delay_range) tilt_record['delay'] = delay self.motor.tilt('stop') print(f'delay {delay:.2f}') time.sleep(delay) return tilt_record
def main(): logging.basicConfig() logger = logging.getLogger("MMNT") logger.setLevel(logging.INFO) logger.info("Initializing") masterSampleTime = time.time() slaveSampleTime = time.time() logger.debug("Initializing motor control") mc = MotorControl() mc.resetMotors() logger.debug("Initializing microphone") dev = usb.core.find(idVendor=0x2886, idProduct=0x0018) if not dev: sys.exit("Could not find ReSpeaker Mic Array through USB") mic = Tuning(dev) mic.write("NONSTATNOISEONOFF", 1) mic.write("STATNOISEONOFF", 1) logger.debug("Initializing models") ht_model = ht.get_model() tfPose = TfPoseEstimator(get_graph_path(TF_MODEL), target_size=(VideoStream.DEFAULT_WIDTH, VideoStream.DEFAULT_HEIGHT)) logger.debug("Initializing video streams") topCamStream = VideoStream(1) botCamStream = VideoStream(2) topCamStream.start() botCamStream.start() masterCamID = TOP_CAM_ID masterStream = topCamStream slaveCamID = BOT_CAM_ID slaveStream = botCamStream masterTargetAngle = 0 slaveTargetAngle = 180 updateMasterAngle = False updateSlaveAngle = False masterTracking = False logger.info("Initialization complete") while True: try: # MASTER masterFrame = masterStream.read() if time.time() - masterSampleTime > MASTER_SAMPLE_FREQ: humans = tfPose.inference(masterFrame, resize_to_default=True, upsample_size=RESIZE_RATIO) if len(humans): logger.debug("Master tracking") masterTracking = True if DISPLAY_VIDEO: TfPoseEstimator.draw_humans(masterFrame, humans, imgcopy=False) human = humans[0] if (ht.is_hands_above_head(human)): logger.debug("HANDS ABOVE HEAD!!!") midX = -1 for part in headParts: if part in human.body_parts: midX = human.body_parts[part].x * VideoStream.DEFAULT_WIDTH break if midX != -1: centerDiff = abs(midX - VideoStream.DEFAULT_WIDTH/2) if centerDiff > FACE_THRESHOLD: if midX < VideoStream.DEFAULT_WIDTH/2: # rotate CCW masterTargetAngle += centerDiff * degreePerPixel elif midX > VideoStream.DEFAULT_WIDTH/2: # rotate CW masterTargetAngle -= centerDiff * degreePerPixel masterTargetAngle = masterTargetAngle % 360 updateMasterAngle = True masterSampleTime = time.time() else: logger.debug("Master stopped tracking") masterTracking = False # If master is not tracking a human, move towards speech if not masterTracking: speechDetected, micDOA = mic.speech_detected(), mic.direction logger.debug("master speech detected:", speechDetected, "diff:", abs(micDOA - masterTargetAngle)) if speechDetected and abs(micDOA - masterTargetAngle) > ANGLE_THRESHOLD: masterTargetAngle = micDOA logger.debug("Update master angle:", masterTargetAngle) masterSampleTime = time.time() updateMasterAngle = True # SLAVE slaveFrame = slaveStream.read() if time.time() - slaveSampleTime > SLAVE_SAMPLE_FREQ: # If master is not tracking a human and a slave sees a human, move master to the visible human and move slave away if not masterTracking and time.time() - masterSampleTime > MASTER_SAMPLE_FREQ: humans = tfPose.inference(slaveFrame, resize_to_default=True, upsample_size=RESIZE_RATIO) if len(humans): logger.debug("slave found mans") if DISPLAY_VIDEO: TfPoseEstimator.draw_humans(slaveFrame, humans, imgcopy=False) human = humans[0] if (ht.is_hands_above_head(human)): logger.debug("HANDS ABOVE HEAD!!!") midX = -1 for part in headParts: if part in human.body_parts: midX = human.body_parts[part].x * VideoStream.DEFAULT_WIDTH break if midX != -1: centerDiff = abs(midX - VideoStream.DEFAULT_WIDTH/2) # if centerDiff > FACE_THRESHOLD: if midX < VideoStream.DEFAULT_WIDTH/2: # rotate CCW masterTargetAngle = slaveTargetAngle + centerDiff * degreePerPixel elif midX > VideoStream.DEFAULT_WIDTH/2: # rotate CW masterTargetAngle = slaveTargetAngle - centerDiff * degreePerPixel masterTargetAngle = masterTargetAngle % 360 updateMasterAngle = True masterSampleTime = time.time() slaveTargetAngle = (masterTargetAngle + 180) % 360 updateSlaveAngle = True logger.debug("Moving master to slave:", masterTargetAngle) speechDetected, micDOA = mic.speech_detected(), mic.direction speechMasterDiff = abs(micDOA - masterTargetAngle) if speechDetected and speechMasterDiff > SLAVE_MASTER_THRESHOLD and abs(micDOA - slaveTargetAngle) > ANGLE_THRESHOLD: slaveTargetAngle = micDOA logger.debug("Update slave angle:", slaveTargetAngle) slaveSampleTime = time.time() updateSlaveAngle = True # Send Serial Commands if updateSlaveAngle and updateMasterAngle: logger.debug("Slave Angle:", slaveTargetAngle) logger.debug("Master Angle:", masterTargetAngle) updateSlaveAngle = False updateMasterAngle = False if slaveCamID == BOT_CAM_ID: mc.runMotors(masterTargetAngle, slaveTargetAngle) else: mc.runMotors(slaveTargetAngle, masterTargetAngle) elif updateSlaveAngle: mc.runMotor(slaveCamID, slaveTargetAngle) logger.debug("Slave Angle:", slaveTargetAngle) updateSlaveAngle = False elif updateMasterAngle: mc.runMotor(masterCamID, masterTargetAngle) logger.debug("Master Angle:", masterTargetAngle) updateMasterAngle = False if DISPLAY_VIDEO: cv.imshow('Master Camera', masterFrame) cv.imshow('Slave Camera', slaveFrame) if cv.waitKey(1) == 27: pass except KeyboardInterrupt: logger.debug("Keyboard interrupt! Terminating.") mc.stopMotors() slaveStream.stop() masterStream.stop() mic.close() time.sleep(2) break cv.destroyAllWindows()