Exemple #1
0
    def __init__(self, model_path, fs_path, landmark_path, task_path):
        self.load_keras_model(model_path, fs_path)
        self.load_landmarks(landmark_path)
        self.load_task_order(task_path)

        # init node for mm_bridge
        rospy.init_node('ttp_node', anonymous=True)

        # init the publisher for command
        self.cmd_pub = rospy.Publisher('cmd_pred', String, queue_size=10)
        self.item_id = 0
        self.rail_id = 1
        self.thumbtack_id = 1
        self.cmd_request_history = []

        # init the signal processing unit
        self.dsp = DSP()
        self.z_buf = []
        self.z_buf_size = 5  # min 5 timestamps for feature encoding
        self.f_buf = []
        self.f_buf_size = 120  # 20 Hz * 6 seconds

        # record firing time
        self.last_fire_time = time()

        # init the subscriber at the end when all init is finished
        rospy.Subscriber("mm_bridge_output", String, self.mm_msg_callback)
Exemple #2
0
def feedback_test():
    tb = DSP(filter_params, True, True)
    cd_dsp4 = ClockDomain("dsp4", reset_less=True)
    tb.clock_domains += cd_dsp4
    tb.comb += tb.offset.eq(tb.output)

    def run(tb):
        dut = tb

        yield dut.data.eq(1)
        yield dut.coeff.eq(2)
        yield
        yield dut.data.eq(2)
        yield
        yield dut.data.eq(0)
        for i in range(20):
            yield
        yield

    run_simulation(
        tb,
        run(tb),
        vcd_name="feedback_test.vcd",
        clocks={
            "sys": (8, 0),
            "dsp4": (2, 0),
        },
    )
Exemple #3
0
def dsp_test(double_reg=True):
    tb = DSP(filter_params, double_reg)

    def run(tb):
        dut = tb
        yield dut.data.eq(128)
        yield dut.carryin.eq(20)
        yield dut.coeff.eq(2)
        for i in range(20):
            yield
        yield

    run_simulation(tb, run(tb), vcd_name="dsp_test.vcd")
Exemple #4
0
 def __init__(self, ram=None, ipl_ram=None, dspreg_bytes=None):
     self.RAM = bytearray(0x10000) if ram is None else ram
     self.IPLRAM = bytearray(0x40) if ipl_ram is None else ipl_ram
     self.DSP = None if dspreg_bytes is None else DSP(
         self.RAM, dspreg_bytes)
     # timing
     self.clock = Clock(32040 * 768)  ## 24Mhz
     self.clock.add_clock("SPC", 24)  ## 1Mhz
     self.clock.add_clock("DSP", 768)  ## 32Khz
     self.timer0 = Timer(128)  ## 8khz
     self.timer1 = Timer(128)  ## 8khz
     self.timer2 = Timer(16)  ## 64khz
     #internal variables
     self.iplROMEnable = True
     self.dspaddress = 0
     self.apu0 = 0
     self.apu1 = 0
     self.apu2 = 0
     self.apu3 = 0
     self.aux4 = 0
     self.aux5 = 0
     self.clockcounter = 0
     #ports
     self.ports = {
         0x00F0: Port(0x00F0, "TEST", False, True),
         0x00F1: Port(0x00F1, "CONTROL", False, True),
         0x00F2: Port(0x00F2, "DSPADDR", True, True),
         0x00F3: Port(0x00F3, "DSPDATA", True, True),
         0x00F4: Port(0x00F4, "CPUIO0", True,
                      False),  ## no CPU connected so readonly
         0x00F5: Port(0x00F5, "CPUIO1", True,
                      False),  ## no CPU connected so readonly
         0x00F6: Port(0x00F6, "CPUIO2", True,
                      False),  ## no CPU connected so readonly
         0x00F7: Port(0x00F7, "CPUIO3", True,
                      False),  ## no CPU connected so readonly
         0x00F8: Port(0x00F8, "AUXIO4", True, True),
         0x00F9: Port(0x00F9, "AUXIO5", True, True),
         0x00FA: Port(0x00FA, "T0TARGET", False, True),
         0x00FB: Port(0x00FB, "T1TARGET", False, True),
         0x00FC: Port(0x00FC, "T2TARGET", False, True),
         0x00FD: Port(0x00FD, "T0OUT", True, False),
         0x00FE: Port(0x00FE, "T1OUT", True, False),
         0x00FF: Port(0x00FF, "T2OUT", True, False),
     }
Exemple #5
0
class TTP:
    def __init__(self, model_path, fs_path, landmark_path, task_path):
        self.load_keras_model(model_path, fs_path)
        self.load_landmarks(landmark_path)
        self.load_task_order(task_path)

        # init node for mm_bridge
        rospy.init_node('ttp_node', anonymous=True)

        # init the publisher for command
        self.cmd_pub = rospy.Publisher('cmd_pred', String, queue_size=10)
        self.item_id = 0
        self.rail_id = 1
        self.thumbtack_id = 1
        self.cmd_request_history = []

        # init the signal processing unit
        self.dsp = DSP()
        self.z_buf = []
        self.z_buf_size = 5  # min 5 timestamps for feature encoding
        self.f_buf = []
        self.f_buf_size = 120  # 20 Hz * 6 seconds

        # record firing time
        self.last_fire_time = time()

        # init the subscriber at the end when all init is finished
        rospy.Subscriber("mm_bridge_output", String, self.mm_msg_callback)

    def grace_exit(self, exit_msg, gohome):
        print "!" * 20
        print '(grace?) exit with message:'
        print '---\n', exit_msg, '\n---'
        if gohome:
            self.cmd_pub.publish('0|home|1.00|15:57:42:87|keyboard')
            print "go home robot you are drunk..."
        print "cmd history:"
        for c in self.cmd_request_history:
            print c
        exit(1)

    def load_keras_model(self, model_path, fs_path):
        self.model = keras.models.load_model(model_path)
        print '-' * 30
        print "loaded LSTM model at %s" % model_path
        print(self.model.summary())
        # plot_model(self.model, show_shapes=True)
        self.select_feat_names = list(
            pd.read_csv(fs_path, header=None)[0].values)
        self.feat_dim = len(self.select_feat_names)
        print "loaded Feature Selection at %s" % fs_path
        print "Selected features\n", self.select_feat_names

    def load_landmarks(self, landmark_path):
        self.df_landmark = pd.read_csv(landmark_path)
        print '-' * 30
        print 'loaded landmark_path at: %s' % landmark_path
        print self.df_landmark['name'].to_string(index=False)

    def load_task_order(self, task_path):
        self.df_task = pd.read_csv(task_path)
        self.df_task['status'] = 'to_do'
        print '-' * 30
        print 'loaded task_path at: %s' % task_path
        # print self.df_task.info()
        # print self.df_task.head()
        # exit()

    def mm_msg_callback(self, msg):
        # decode the msg into the format that we want
        df = self.dsp.decode_packet(msg.data)
        if df is None:
            print 'invalid mm msg packet, unequal pac length'
            return

        x = df.loc[0].values.flatten()  # shape (num_raw_columns,)

        self.dsp.clip_outlier(x)
        self.dsp.scale(x, method='standard')
        z = self.dsp.expSmooth(x, self.z_buf[-1],
                               alpha=0.2) if len(self.z_buf) != 0 else x
        self.z_buf.append(z)

        if len(self.z_buf) < self.z_buf_size:
            return
        elif len(self.z_buf) > self.z_buf_size:
            del self.z_buf[0]
        assert len(self.z_buf) == self.z_buf_size

        # encode features (LoG, Gabor etc)
        en, encode_feat_names = self.dsp.encode_features_online(self.z_buf)
        f = self.dsp.select_features(en, encode_feat_names,
                                     self.select_feat_names)
        self.f_buf.append(f)
        if len(self.f_buf) > self.f_buf_size:
            del self.f_buf[0]

    def intent_pred(self):
        if len(self.f_buf) < self.f_buf_size:
            return 0
        # the intent_pred is very fast (takes about 0.01 second)
        x_test = np.array(self.f_buf).reshape(1, self.f_buf_size,
                                              self.feat_dim)
        y_test_pred_prob = self.model.predict(x_test, batch_size=1)[0]
        intent = y_test_pred_prob[1]
        return intent

    def item_pred(self):
        # first, let us fix the order to dictate the steps
        to_do_tasks = self.df_task.loc[self.df_task['status'] == 'to_do']
        if len(to_do_tasks) == 0:
            print "all tasks finished..."
            return 'done'
        item = to_do_tasks.iloc[0]['part_name']
        return item

    def make_command_msg(self, item, intent, comment='NA'):
        # msg has the following format
        # step | task/object name | hand over ready? | timestamp | comment
        # e.g.
        # 1,tape,0,2018_04_30_19_49_32_983,NA
        # 1,tape,1,2018_04_30_19_50_03_323,NA
        # 2,marker,1,2018_04_30_19_53_03_323,This is a comment
        if item not in self.df_landmark['name'].values and item not in [
                'rail', 'thumbtack'
        ]:
            print "invalid command: [%s]" % item
            return ''

        msg = '%s|' % self.item_id
        if item == 'rail':
            msg += '%s%i|' % (item, self.rail_id)
            self.rail_id += 1
            if self.rail_id == 8:
                print "Run out of rails! Cannot pick!"
                return ''
        elif item == 'thumbtack':
            msg += '%s%i|' % (item, self.thumbtack_id)
            self.thumbtack_id += 1
            if self.thumbtack_id == 18:
                print "Run out of thumbtacks! Cannot pick!"
                return ''
        else:
            msg += '%s|' % item
        msg += '%.2f|' % intent
        msg += '%s|' % datetime.now().strftime(
            '%H:%M:%S:%f')[:-4]  # %Y_%m_%d_%H_%M_%S_%f
        msg += '%s' % comment
        self.item_id += 1
        self.cmd_request_history.append(msg)
        return msg

    def run_keyboard(self):
        """
        in this case, for every entry of command, we publish one message
        and we are sure of the intent (always 1)
        """
        while not rospy.is_shutdown():
            item = raw_input('Enter name of next task object: ')
            if item == 'menu':
                print 'menu: ', self.df_landmark['name'].values
            if item == 'quit':
                self.grace_exit('quit program', gohome=False)

            # map some shortcut names to full name
            mapping = {}
            mapping['left'] = 'left stile'
            mapping['right'] = 'right stile'
            mapping['notes'] = 'sticky notes'
            mapping['cut'] = 'scissors'
            for i in range(1, 17):
                mapping['tack' + str(i)] = 'thumbtack' + str(i)
            if item in mapping:
                print "translate command [%s] => [%s]" % (item, mapping[item])
                item = mapping[item]
            msg = self.make_command_msg(item, intent=1, comment='keyboard')
            if msg:
                self.cmd_pub.publish(msg)
        self.grace_exit('keyboard control finished...', gohome=False)

    def decode_word(self, word):
        """
        convert to lower case
        remove the beginning and ending spaces
        if the word is repeated, only return one
        e.g. 'seat seat' -> return 'seat'
        """
        item = word.lower().lstrip().rstrip()
        item_split = item.split()
        if len(set(item_split)) == 1:
            item = item_split[0]
        return item

    def run_speech(self):
        """
        in this case, we wait for speech command, and do necessary
        decoding to link it to our task. Everytime we got a command
        it is for sure that the human needs this object.
        We explicitly ask participant to utter the name of the object
        and not more than that
        """
        from pocketsphinx import LiveSpeech
        speech = LiveSpeech(
            verbose=False,
            sampling_rate=96000,  # 48000
            buffer_size=4096,  # 4096
            no_search=False,
            full_utt=False,
            hmm=
            '/home/tzhou/Workspace/catkin_ws/src/ttp/model/sphinx/en-us-Win10',  # 'en-us-Win10', 'en-us-dist-packages'
            lm='/home/tzhou/Workspace/catkin_ws/src/ttp/model/sphinx/7600.lm',
            dic='/home/tzhou/Workspace/catkin_ws/src/ttp/model/sphinx/7600.dic'
        )

        print 'listening...'
        thres = 0.1
        low_thres = 0.05
        print "thres: %.2f" % thres
        print "low_thres: %.2f" % low_thres
        for phrase in speech:
            if rospy.is_shutdown():
                break
            confi = phrase.confidence()
            item = self.decode_word(phrase.hypothesis())
            if confi > low_thres and confi < thres:
                print 'low-confi  word [%s] with confi [%.2f]' % (item, confi)
            if confi < thres:
                continue
            print 'recognized word [%s] with confi [%.2f]' % (item, confi)
            msg = self.make_command_msg(item, intent=1, comment='speech')
            if msg:
                # self.cmd_pub.publish(msg)
                print '(no) publish message: [%s]' % msg
                print '========================='
        self.grace_exit('speech control finished...', gohome=False)

    def check_fire(self, intent_history, time_history, confi_thres,
                   active_duration_thres, fire_interval_thres):
        """
        === used, looks good overall!!!! 
        method 1: find the duration where confi exceeds a confidence threshold, 
        if this continuous duration is longer than a window threshold, we fire.
            problem: have to work together with the silencing scheme, otherwise
            we will see multiple firing for the same turn request action

        === not used 
        method 2: find local maximum, and if the value at local maximum exceeds
        a confidence threshold, we fire, and start silencing.
            problem: if the confi keeps increasing and never reaches local maximum,
            we cannot fire.
        """
        N = len(intent_history)
        hz = 20.0
        for i in range(N - 1, max(-1, N - 100), -1):
            if intent_history[i] > confi_thres:
                if time_history[-1] - time_history[i] >= active_duration_thres:
                    if time_history[
                            -1] - self.last_fire_time > fire_interval_thres:
                        if 0:
                            plt.plot(np.arange(N) / hz,
                                     confi_thres * np.ones(N),
                                     'g',
                                     label='threshold')
                            plt.plot(np.arange(0, i) / hz,
                                     intent_history[:i],
                                     'b',
                                     label='inactive')
                            plt.plot(np.arange(i, N) / hz,
                                     intent_history[i:],
                                     'r',
                                     label='active')
                            plt.xlabel('time (in seconds)')
                            plt.ylabel('intent history')
                            plt.legend(loc='best')
                            plt.show()
                        self.last_fire_time = time()
                        # print "fired at time: ", datetime.now()
                        print "intent: [%.2f], fire!!!" % intent_history[-1]
                        return True
                    else:
                        remain_time = fire_interval_thres - (
                            time() - self.last_fire_time)
                        print "intent: [%.2f], wait for silence period to pass, %.2f sec left" % (
                            intent_history[-1], remain_time)
                        return False
            else:
                break
        print "intent: [%.2f], not high enough..." % intent_history[-1]
        return False

    def run_tt(self):
        """
        since the intent and the item prediction happens non-stop
        we need to think about when to publish this new msg
        we don't always publish them, but only publish when necessary
        need to think about it        
        """
        rate = rospy.Rate(20)
        intent_history = []
        time_history = []
        time_past = []
        print "waiting for feature buffer to be fully inited (need 120 steps)..."
        f = plt.figure()
        ax = f.gca()
        f.show()
        ax.set_xlabel('past time in seconds')
        ax.set_ylabel('intent history')
        ax.set_ylim([0, 1])
        t0 = time()

        # === some parameters ===
        fire_interval_thres = 10  # 12 to be conservative, make it 10 to be faster. For subject 1,2,3 it is 12
        confi_thres = 0.4  # 0.5 to be conservative, notice that 0.5 does not work well with some subjects. Use 0.4!
        active_duration_thres = 0.3  # 0.5 to be conservative
        while not rospy.is_shutdown():
            item = self.item_pred()
            if item == 'done':
                break
            intent = self.intent_pred()
            # rate.sleep()
            intent_history.append(intent)
            time_history.append(time())
            time_past.append(time() - t0)
            if len(intent_history) < 10:
                continue

            # do some plot
            ax.plot(time_past[-2:], intent_history[-2:], 'r-', linewidth=5)
            ax.plot(time_past[-2:], [confi_thres, confi_thres],
                    'b-',
                    linewidth=2)
            xlim_min = 0 if len(
                time_past) < 100 else time_past[-1] - 10  # show 10 seconds
            ax.set_xlim([xlim_min, time_past[-1]])
            f.canvas.draw()
            if self.check_fire(intent_history, time_history, confi_thres,
                               active_duration_thres, fire_interval_thres):
                # 12 is good in practice, 6 is good in debug (no robot motion)
                # confi_thres in range [0.5, 0.99], the larger, the more misses but more accurate
                # active_duration_thres = 0.5 # high value for > 0.5 seconds
                # fire_interval_thres = 12 # 12 seconds: 7 sec for pickup/delivery, and 5 sec for operation
                msg = self.make_command_msg(item,
                                            intent=1,
                                            comment='early tt prediction')
                self.df_task.loc[self.df_task['part_name'] == item,
                                 'status'] = 'done'
                # print self.df_task
                self.cmd_pub.publish(msg)
                print 'publish message: [%s]' % msg
                print '========================='
        self.grace_exit('tt prediction finished...', gohome=False)

    def run(self, mode):
        if mode not in ['tt', 'speech', 'keyboard']:
            rospy.logerr('unrecognized TTP mode %s' % mode)
        if mode == 'keyboard':
            self.run_keyboard()
        elif mode == 'speech':
            self.run_speech()
        elif mode == 'tt':
            self.run_tt()
Exemple #6
0
def runVisualizer(roomDimensions, microphoneSensitivity, signalPos,
                  sigStrength, micGrid, gridOffset, gridHeight, camPos):
    # Room parameters
    width = roomDimensions[0]
    length = roomDimensions[1]
    ceiling = roomDimensions[2]

    # Define room
    room = Room(width, length, ceiling)

    # Create a microphone array
    microphonePositions = helpers.generateMicArray(micGrid[0], micGrid[1],
                                                   gridOffset, gridHeight)
    if not helpers.allInRoom(microphonePositions, room):
        print(
            "Some microphones fell outside the boundaries of the room, please re-enter data."
        )
        return

    # Microphone parameters
    numMicrophones = len(microphonePositions)
    microphoneArray = []
    micSensitivity = microphoneSensitivity

    # Set up microphone objects
    for x in range(0, numMicrophones):
        microphoneArray.append(
            Microphone(microphonePositions[x], micSensitivity))

    # Configure DSP object
    dsp = DSP(99, microphoneArray)

    # Set up camera controller
    cameraPosition = camPos
    if not helpers.allInRoom([cameraPosition], room):
        print(
            "The camera fell outside the boundaries of the room, please re-enter data."
        )
        return
    cameraOrientation = [0, 0]  # pointing along x-axis in degrees
    cameraController = CameraController(cameraPosition, cameraOrientation, dsp,
                                        room)
    cameraController.setMicSensitivity(micSensitivity)
    cameraController.setMicPositions(microphonePositions)

    # Define Signal parameters
    signalPosition = signalPos
    if not helpers.allInRoom([signalPosition], room):
        print(
            "The signal fell outside the boundaries of the room, please re-enter data."
        )
        return
    signalStrength = sigStrength

    # Send signal to all microphones
    for microphone in microphoneArray:
        microphone.sendSignal(signalPosition, signalStrength)
    print(
        "-> Audio signal at position x: {0}, y: {1}, z: {2} with strength {3} broadcast to all microphones"
        .format(signalPosition[0], signalPosition[1], signalPosition[2],
                signalStrength))

    # Predict signal location using sphere trilateration
    predictedSignalLocation = cameraController.getSignalPosition(
        signalStrength)
    print("->>> Predicted Signal Location: {0}, Actual Signal Location: {1}".
          format(predictedSignalLocation, signalPosition))

    cameraController.rePositionCamera(predictedSignalLocation)

    visualize(cameraController, signalStrength, predictedSignalLocation)
RFduinoConn = BleConn(inputArgs.sourceMac, inputArgs.MAC, inputArgs.interface)

def getValue():
    if inputArgs.uuid:
        return RFduinoConn.readValueFromUUID(inputArgs.uuid)
    else:
        return RFduinoConn.readValueFromHandle(inputArgs.handle)

def unpackFloat(hex):
    return struct.unpack("f", binascii.unhexlify(hex))[0]

def unpackInt(hex):
    return struct.unpack("h", binascii.unhexlify(hex))[0]

c = Calibration()
dsp = DSP(9)

value = getValue()
if inputArgs.debug:
    print value

while inputArgs.continuous:
    value = getValue()

    hexStr = "".join(value.split(" "))
    gx = unpackInt(hexStr[:4])
    gy = unpackInt(hexStr[4:8])
    gz = unpackInt(hexStr[8:12])
    ax = unpackInt(hexStr[12:16])
    ay = unpackInt(hexStr[16:20])
    az = unpackInt(hexStr[20:24])
fig, ax = plt.subplots()

ax.set_xlim([0, VISUALIZER_LENGTH])
ax.set_ylim([0, VISUALIZER_HEIGHT])

x_axis = np.arange(VISUALIZER_LENGTH)
y_axis = np.zeros(VISUALIZER_LENGTH)

line, = ax.plot(x_axis, y_axis)

mic = Microphone(FORMAT=FORMAT,
                 CHANNELS=CHANNELS,
                 RATE=RATE,
                 FRAMES_PER_BUFFER=FRAMES_PER_BUFFER,
                 DEVICE_ID=DEVICE_ID)
sig_processor = DSP(ALPHA_SMOOTHING=ALPHA_SMOOTHING)


def init():  # only required for blitting to give a clean slate.
    x_data = np.arange(VISUALIZER_LENGTH)
    y_data = np.zeros(len(x_data))
    line.set_data(x_data, y_data)
    return line,


def animate(i):
    x_data = np.arange(VISUALIZER_LENGTH)
    y_data = np.zeros(len(x_data))
    raw_data = mic.sampleInput()
    if sum(raw_data) > 0:
        processed_data = sig_processor.process_sample(raw_data)