Пример #1
0
    def __init__(self):
        self.stop = False
        logging.basicConfig()
        self.logger = logging.getLogger("MMNT")
        if DEBUG:
            self.logger.setLevel(logging.DEBUG)
        else:
            self.logger.setLevel(logging.INFO)
        self.logger.info("Initializing")

        self.masterSampleTime = time.time()
        self.slaveSampleTime = time.time()

        self.humanSampleTime = time.time()
        self.micSampleTime = time.time()

        self.logger.debug("Initializing motor control")
        self.mc = MotorControl()
        self.mc.resetMotors()

        self.logger.debug("Initializing microphone")
        dev = usb.core.find(idVendor=0x2886, idProduct=0x0018)
        if not dev:
            sys.exit("Could not find ReSpeaker Mic Array through USB")
        self.mic = Tuning(dev)
        self.mic.write("NONSTATNOISEONOFF", 1)
        self.mic.write("STATNOISEONOFF", 1)
        self.mic.write("ECHOONOFF", 1)

        self.logger.debug("Initializing video streams")
        self.topCamStream = VideoStream(1)
        self.botCamStream = VideoStream(2)

        self.logger.debug("Initializing models")
        self.ht_model = ht.get_model()
        self.tfPose = TfPoseEstimator(get_graph_path(TF_MODEL),
                                      target_size=(VideoStream.DEFAULT_WIDTH,
                                                   VideoStream.DEFAULT_HEIGHT))
        self.logger.info("Initialization complete")

        self.topCamState = State.IDLE
        self.botCamState = State.IDLE

        self.topCamAngle = 0
        self.topAngleUpdated = False
        self.botCamAngle = 180
        self.botAngleUpdated = False
        self.master = Cams.TOP
        self.lastMaster = Cams.TOP

        self.botCamProc = None
        self.topCamProc = None

        self.audioMap = Map(15)
        self.checkMic()
Пример #2
0
def test_tilts():
    import nidaqmx # pylint: disable=import-error
    # pylint: disable=import-error
    from nidaqmx.constants import LineGrouping, Edge, AcquisitionType, WAIT_INFINITELY
    
    from motor_control import MotorControl
    
    tilt_types = [
        ('a', 1, 9,  'Slow Counter Clockwise',),
        ('b', 2, 11, 'Fast Counter Clockwise',),
        ('c', 3, 12, 'Slow Clockwise'        ,),
        ('d', 4, 14, 'Fast Clockwise'        ,),
    ]
    
    with ExitStack() as stack:
        motor = MotorControl()
        stack.enter_context(motor)
        
        for tilt_type in tilt_types:
            print("tilt", tilt_type)
            
            with nidaqmx.Task() as task:
                sample_rate = 1000
                batch_size = 3000
                read_timeout = 4
                task.timing.cfg_samp_clk_timing(
                    sample_rate,
                    source='',
                    sample_mode=AcquisitionType.CONTINUOUS,
                    samps_per_chan=batch_size,
                )
                task.ai_channels.add_ai_voltage_chan("Dev6/ai8")
                
                motor.tilt(tilt_type[0])
                time.sleep(1.75)
                motor.tilt('stop')
                
                data = task.read(batch_size, read_timeout)
                
                assert len(data) == 1
                strobe = data[0]
                print("strobe max", max(strobe))
                assert strobe[0] < 4
                assert strobe[-1] < 4
                assert any(x > 4 for x in strobe)
            
            input('press enter to continue')
        
        motor.close()
Пример #3
0
    def __init__(self):
        # Source to get pi IP: https://www.raspberrypi.org/forums/viewtopic.php?t=79936, user: mikerr
        # UDP_IP = subprocess.check_output(['hostname', '-I']).decode('ascii').strip()
        # UDP_IP = socket.gethostbyname(socket.gethostname()) # Get ip source: https://stackoverflow.com/questions/48606440/get-ip-address-from-python, user: An0n
        # UDP_PORT = 0
        UDP_IP = '192.168.43.192'
        UDP_PORT = 38049
        # Code for connecting over wifi source: https://wiki.python.org/moin/UdpCommunication
        self.app_socket = socket.socket(
            socket.AF_INET,  # Internet
            socket.SOCK_DGRAM)  # UDP
        self.app_socket.bind((UDP_IP, UDP_PORT))
        port = int(str(self.app_socket).split(',')[5].replace(')>', ''))

        stats.display(UDP_IP, str(port))

        print("Port: ", port)
        # self.send_network_info(UDP_IP,port)
        self.get_data(MotorControl())
        self.app_socket.close()
Пример #4
0
Flask-powered web app for remote control
of ACROBOTIC's wheeled robot PyPi
'''
from flask import Flask, render_template
# Use the socketio module for using websockets
# for easily handling requests in real-time
from flask_socketio import SocketIO, emit
from motor_control import MotorControl
from time import sleep

# Instantiate Flask class
app = Flask(__name__)
# Use the flask object to instantiate the SocketIO class
socketio = SocketIO(app)
# Create the motor control object
mc = MotorControl()


# Create the route(s) to access the web app
@app.route('/')
def handle_index():
    return render_template('index.html')


# Create the function handlers for the different websocket events
@socketio.on('req')  # 'req' is an arbitrary name for my event
def on_message(msg):
    # I expect the message to be formatted in JSON so I can parse
    # it as a Python dictionary and look for specific keys
    direction = msg['direction']  # this will tell me how to move the motors
    if direction != 'STP':
 def __init__(self, *, mock: bool = False, delay_range: Tuple[float, float]):
     
     self.delay_range = delay_range
     
     self.motor = MotorControl(mock = mock)
Пример #6
0
from oled_print import OLEDPrint
from wifi import Wifi
from socket_server import SocketServer
from motor_control import MotorControl
from wheel_encoder import WheelEncoder
import global_params

if __name__ == '__main__':
    global_params.init()
    global_params.set_value("motor_control", MotorControl())
    global_params.set_value("wheel_encoder", WheelEncoder())

    oled = OLEDPrint()
    wifi = Wifi(output_fun=oled)
    flag = wifi.do_connect()
    if flag is False:
        oled.output("Wifi Error")
        exit(0)
    ip = wifi.get_ip()

    socket_server = SocketServer(ip, output_fun=oled)
    socket_server.run()
Пример #7
0
    def __init__(
        self,
        *,
        baseline_recording: bool,
        save_template: bool = True,
        template_output_path,
        template_in_path,
        channel_dict,
        mock: bool = False,
        reward_enabled: bool,
    ):

        self.mock = mock
        self.save_template = save_template
        self.template_output_path = template_output_path
        self.template_in_path = template_in_path
        self.reward_enabled = reward_enabled
        if save_template:
            assert self.template_output_path is not None

        self.motor = MotorControl(mock=mock)
        self.motor.tilt('stop')

        self.motor_interrupt = MotorControl(port=1, mock=mock)
        self.motor_interrupt.tilt('stop')

        if mock:
            self.PL_SingleWFType = 0
            self.PL_ExtEventType = 1
            self.plex_client = None
        else:
            from pyplexclientts import PyPlexClientTSAPI, PL_SingleWFType, PL_ExtEventType
            dll_path = Path(__file__).parent / 'bin'
            client = PyPlexClientTSAPI(plexclient_dll_path=dll_path)
            client.init_client()
            self.PL_SingleWFType = PL_SingleWFType
            self.PL_ExtEventType = PL_ExtEventType
            self.plex_client = client

        # channel_dict = {
        #     1: [1], 2: [1,2], 3: [1,2], 4: [1,2],
        #     6: [1,2], 7: [1,2,3,4], 8: [1,2,3],
        #     9: [1,2,3], 13: [1,2,3,4], 14: [1,2],
        #     20: [1,2,3], 25: [1,2], 26: [1], 27: [1], 28: [1],
        #     31: [1],
        #     55: [1,2,3,4],
        # }
        pre_time = 0.0
        post_time = 0.200
        self._post_time = post_time
        bin_size = 0.020
        self.baseline_recording = baseline_recording
        event_num_mapping = {
            1: 1,
            2: 2,
            3: 3,
            4: 4,
            9: 1,
            11: 2,
            12: 3,
            14: 4,
        }
        psth = Psth(channel_dict,
                    pre_time,
                    post_time,
                    bin_size,
                    event_num_mapping=event_num_mapping)
        if not baseline_recording:
            assert template_in_path is not None
        if template_in_path is not None:
            psth.loadtemplate(template_in_path)
        self.psth = psth

        self._mock_state = {
            '_get_ts': {
                's': 'pending',
                't': time.perf_counter(),
            }
        }

        self.no_spike_wait = False
        # time to wait after tilt event is recieved from plexon
        self.fixed_spike_wait_time = None
        # program fails after this amount of time if tilt isn't recieved from plexon
        self.fixed_spike_wait_timeout = None
        self.closed = False
        self.delay_range = (1.5, 2)
Пример #8
0
Файл: main.py Проект: cudnn/mmnt
def main():
    logging.basicConfig()
    logger = logging.getLogger("MMNT")
    logger.setLevel(logging.INFO)
    logger.info("Initializing")

    masterSampleTime = time.time()
    slaveSampleTime = time.time()

    logger.debug("Initializing motor control")
    mc = MotorControl()
    mc.resetMotors()
    logger.debug("Initializing microphone")
    dev = usb.core.find(idVendor=0x2886, idProduct=0x0018)
    if not dev:
        sys.exit("Could not find ReSpeaker Mic Array through USB")
    mic = Tuning(dev)
    mic.write("NONSTATNOISEONOFF", 1)
    mic.write("STATNOISEONOFF", 1)

    logger.debug("Initializing models")
    ht_model = ht.get_model()
    tfPose = TfPoseEstimator(get_graph_path(TF_MODEL), target_size=(VideoStream.DEFAULT_WIDTH, VideoStream.DEFAULT_HEIGHT))

    logger.debug("Initializing video streams")
    topCamStream = VideoStream(1)
    botCamStream = VideoStream(2)

    topCamStream.start()
    botCamStream.start()


    masterCamID = TOP_CAM_ID
    masterStream = topCamStream
    slaveCamID = BOT_CAM_ID
    slaveStream = botCamStream

    masterTargetAngle = 0
    slaveTargetAngle = 180

    updateMasterAngle = False
    updateSlaveAngle = False

    masterTracking = False

    logger.info("Initialization complete")
    while True:
        try:
            # MASTER
            masterFrame = masterStream.read()
            if time.time() - masterSampleTime > MASTER_SAMPLE_FREQ:
                humans = tfPose.inference(masterFrame, resize_to_default=True, upsample_size=RESIZE_RATIO)
                if len(humans):
                    logger.debug("Master tracking")
                    masterTracking = True
                    if DISPLAY_VIDEO:
                        TfPoseEstimator.draw_humans(masterFrame, humans, imgcopy=False)
                    human = humans[0]
                    if (ht.is_hands_above_head(human)):
                        logger.debug("HANDS ABOVE HEAD!!!")
                    midX = -1
                    for part in headParts:
                        if part in human.body_parts:
                            midX = human.body_parts[part].x * VideoStream.DEFAULT_WIDTH
                            break
                    if midX != -1:
                        centerDiff = abs(midX - VideoStream.DEFAULT_WIDTH/2)
                        if centerDiff > FACE_THRESHOLD:
                            if midX < VideoStream.DEFAULT_WIDTH/2:
                                # rotate CCW
                                masterTargetAngle += centerDiff * degreePerPixel
                            elif midX > VideoStream.DEFAULT_WIDTH/2:
                                # rotate CW
                                masterTargetAngle -= centerDiff * degreePerPixel

                            masterTargetAngle = masterTargetAngle % 360
                            updateMasterAngle = True
                            masterSampleTime = time.time()
                else:
                    logger.debug("Master stopped tracking")
                    masterTracking = False

                # If master is not tracking a human, move towards speech
                if not masterTracking:
                    speechDetected, micDOA = mic.speech_detected(), mic.direction
                    logger.debug("master speech detected:", speechDetected, "diff:", abs(micDOA - masterTargetAngle))
                    if speechDetected and abs(micDOA - masterTargetAngle) > ANGLE_THRESHOLD:
                        masterTargetAngle = micDOA
                        logger.debug("Update master angle:", masterTargetAngle)
                        masterSampleTime = time.time()
                        updateMasterAngle = True

            # SLAVE
            slaveFrame = slaveStream.read()
            if time.time() - slaveSampleTime > SLAVE_SAMPLE_FREQ:
                # If master is not tracking a human and a slave sees a human, move master to the visible human and move slave away
                if not masterTracking and time.time() - masterSampleTime > MASTER_SAMPLE_FREQ:
                    humans = tfPose.inference(slaveFrame, resize_to_default=True, upsample_size=RESIZE_RATIO)
                    if len(humans):
                        logger.debug("slave found mans")
                        if DISPLAY_VIDEO:
                            TfPoseEstimator.draw_humans(slaveFrame, humans, imgcopy=False)
                        human = humans[0]
                        if (ht.is_hands_above_head(human)):
                            logger.debug("HANDS ABOVE HEAD!!!")
                        midX = -1
                        for part in headParts:
                            if part in human.body_parts:
                                midX = human.body_parts[part].x * VideoStream.DEFAULT_WIDTH
                                break
                        if midX != -1:
                            centerDiff = abs(midX - VideoStream.DEFAULT_WIDTH/2)
                            # if centerDiff > FACE_THRESHOLD:
                            if midX < VideoStream.DEFAULT_WIDTH/2:
                                # rotate CCW
                                masterTargetAngle = slaveTargetAngle + centerDiff * degreePerPixel
                            elif midX > VideoStream.DEFAULT_WIDTH/2:
                                # rotate CW
                                masterTargetAngle = slaveTargetAngle - centerDiff * degreePerPixel

                            masterTargetAngle = masterTargetAngle % 360
                            updateMasterAngle = True
                            masterSampleTime = time.time()
                            slaveTargetAngle = (masterTargetAngle + 180) % 360
                            updateSlaveAngle = True
                            logger.debug("Moving master to slave:", masterTargetAngle)

                speechDetected, micDOA = mic.speech_detected(), mic.direction
                speechMasterDiff = abs(micDOA - masterTargetAngle)
                if speechDetected and speechMasterDiff > SLAVE_MASTER_THRESHOLD and abs(micDOA - slaveTargetAngle) > ANGLE_THRESHOLD:
                    slaveTargetAngle = micDOA
                    logger.debug("Update slave angle:", slaveTargetAngle)
                    slaveSampleTime = time.time()
                    updateSlaveAngle = True


            # Send Serial Commands
            if updateSlaveAngle and updateMasterAngle:
                logger.debug("Slave Angle:", slaveTargetAngle)
                logger.debug("Master Angle:", masterTargetAngle)
                updateSlaveAngle = False
                updateMasterAngle = False
                if slaveCamID == BOT_CAM_ID:
                    mc.runMotors(masterTargetAngle, slaveTargetAngle)
                else:
                    mc.runMotors(slaveTargetAngle, masterTargetAngle)
            elif updateSlaveAngle:
                mc.runMotor(slaveCamID, slaveTargetAngle)
                logger.debug("Slave Angle:", slaveTargetAngle)
                updateSlaveAngle = False
            elif updateMasterAngle:
                mc.runMotor(masterCamID, masterTargetAngle)
                logger.debug("Master Angle:", masterTargetAngle)
                updateMasterAngle = False

            if DISPLAY_VIDEO:
                cv.imshow('Master Camera', masterFrame)
                cv.imshow('Slave Camera', slaveFrame)
                if cv.waitKey(1) == 27:
                    pass

        except KeyboardInterrupt:
            logger.debug("Keyboard interrupt! Terminating.")
            mc.stopMotors()
            slaveStream.stop()
            masterStream.stop()
            mic.close()
            time.sleep(2)
            break

    cv.destroyAllWindows()