Beispiel #1
0
def _cochlear_trim_sai_marginals(filename_and_indexes):
    try:
        filename, norm_segstart, norm_segend, audio_id, NAP_detail = filename_and_indexes
        sai_video_filename = '{}_sai_video_{}'.format(filename, NAP_detail)
        if os.path.isfile('{}.npy'.format(sai_video_filename)):
            return sai_video_filename

        if NAP_detail == 'high':
            try:
                NAP = utils.csv_to_array(filename + 'cochlear' + NAP_detail)
            except:
                NAP = brain.cochlear(filename,
                                     stride=1,
                                     rate=44100,
                                     apply_filter=0,
                                     suffix='cochlear' + NAP_detail)
        if NAP_detail == 'low':
            try:
                NAP = utils.csv_to_array(filename + 'cochlear' + NAP_detail)
            except:
                NAP = brain.cochlear(
                    filename,
                    stride=IO.NAP_STRIDE,
                    rate=IO.NAP_RATE,
                    apply_filter=0,
                    suffix='cochlear' + NAP_detail
                )  # Seems to work best, in particular when they are all the same.

        num_channels = NAP.shape[1]
        input_segment_width = 2048
        sai_params = CreateSAIParams(num_channels=num_channels,
                                     input_segment_width=input_segment_width,
                                     trigger_window_width=input_segment_width,
                                     sai_width=1024)

        sai = pysai.SAI(sai_params)

        NAP = utils.trim_right(NAP[np.int(np.rint(NAP.shape[0] *
                                                  norm_segstart)):np.
                                   int(np.rint(NAP.shape[0] * norm_segend))],
                               threshold=.05)
        sai_video = [
            np.copy(sai.RunSegment(input_segment.T))
            for input_segment in utils.chunks(NAP, input_segment_width)
        ]
        del NAP
        np.save(sai_video_filename,
                np.array([sai_rectangles(frame) for frame in sai_video]))
        return sai_video_filename

    except:
        print utils.print_exception(
            'Calculation SAI video failed for file {}, NAP detail {}'.format(
                filename, NAP_detail))
        return False
Beispiel #2
0
def _cochlear_trim_sai_marginals(filename_and_indexes):
    try:
        filename, norm_segstart, norm_segend, audio_id, NAP_detail = filename_and_indexes
        sai_video_filename = '{}_sai_video_{}'.format(filename, NAP_detail)
        if os.path.isfile('{}.npy'.format(sai_video_filename)):
          return sai_video_filename

        if NAP_detail == 'high':
            try: 
                NAP = utils.csv_to_array(filename+'cochlear'+NAP_detail)
            except:
                NAP = brain.cochlear(filename, stride=1, rate=44100, apply_filter=0, suffix='cochlear'+NAP_detail)
        if NAP_detail == 'low':
            try: 
                NAP = utils.csv_to_array(filename+'cochlear'+NAP_detail)
            except: 
                NAP = brain.cochlear(filename, stride=IO.NAP_STRIDE, rate=IO.NAP_RATE, apply_filter=0, suffix='cochlear'+NAP_detail) # Seems to work best, in particular when they are all the same.

        num_channels = NAP.shape[1]
        input_segment_width = 2048
        sai_params = CreateSAIParams(num_channels=num_channels,
                                     input_segment_width=input_segment_width,
                                     trigger_window_width=input_segment_width,
                                     sai_width=1024)

        sai = pysai.SAI(sai_params)

        NAP = utils.trim_right(NAP[ np.int(np.rint(NAP.shape[0]*norm_segstart)) : np.int(np.rint(NAP.shape[0]*norm_segend)) ], threshold=.05)
        sai_video = [ np.copy(sai.RunSegment(input_segment.T)) for input_segment in utils.chunks(NAP, input_segment_width) ]
        del NAP        
        np.save(sai_video_filename, np.array([ sai_rectangles(frame) for frame in sai_video ]))
        return sai_video_filename

    except:
        print utils.print_exception('Calculation SAI video failed for file {}, NAP detail {}'.format(filename, NAP_detail))
        return False
Beispiel #3
0
    def parse(self, message):
        print '[self.] received: {}'.format(message)

        black_list = []

        try:
            # if 'learnwav' in message or 'respondwav_single' in message or 'respondwav_sentence' in message:
            #     _, filename = message.split()
            #     if filename in black_list:
            #         print 'SKIPPING BAD FILE {}'.format(filename)
            #         return

            if message == 'dream':
                self.state['memoryRecording'] = False
                self.state['autorespond_sentence'] = False
                self.state['ambientSound'] = False
                self.state['autolearn'] = False
                self.state['autorespond_single'] = False
                self.state['_audioLearningStatus'] = False
                self.state['record'] = False
                self.publisher.send_json(self.state)

                self.event.send_json({'dream': True})

            if message == 'reboot':
                utils.reboot()

            if message == 'appendCurrentSettings':
                self.association.send_pyobj([message])
                self.association.recv_pyobj()
            
            if 'getCurrentSettings' in message:
                msg, value = message.split()
                self.association.send_pyobj([msg, value])
                self.association.recv_pyobj()

            if 'i_am_speaking' in message:
                _, value = message.split()
                self.state['i_am_speaking'] = value in ['True', '1']

            if 'enable_say_something' in message:
                _, value = message.split()
                self.state['enable_say_something'] = value in ['True', '1']
            
            if 'last_segment_ids' in message:
                the_ids = message[17:]
                self.event.send_json({'last_segment_ids': loads(the_ids) })
                
            if 'last_most_significant_audio_id' in message:
                audio_id = message[31:]
                self.event.send_json({'last_most_significant_audio_id': audio_id })
            
            if message == 'clear play_events':
                self.event.send_json({'clear play_events' : 'True'})

            if 'calculate_cochlear' in message:
                _, wav_file = message.split()
                t0 = time.time()
                try:
                    brain.cochlear(utils.wait_for_wav(wav_file), stride=IO.NAP_STRIDE, rate=IO.NAP_RATE)
                except:
                    print 'SHOULD {} BE BLACKLISTED?'.format(wav_file)
                    black_list.append(wav_file)
                print 'Calculating cochlear neural activation patterns took {} seconds'.format(time.time() - t0)
            
            if message == 'evolve':
                self.state['memoryRecording'] = False
                self.state['autorespond_sentence'] = False
                self.state['autolearn'] = False
                self.state['autorespond_single'] = False
                self.state['_audioLearningStatus'] = False
                self.state['record'] = False
                self.publisher.send_json(self.state)
                
                self.association.send_pyobj(['evolve'])
                self.association.recv_pyobj()

            if 'register' in message and 'BRAIN' in message:
                _, name, free = message.split()
                self.state['brains'][name] = int(free)

            if 'fullscreen' in message:
                _, value = message.split()
                self.event.send_json({ 'fullscreen': value in ['True', '1'] })

            if 'display2' in message:
                _, value = message.split()
                self.event.send_json({ 'display2': value in ['True', '1'] })

            if message == 'startrec':
                self.state['record'] = True

            if message == 'stoprec':
                self.state['record'] = False

            if 'facerecognition' in message:
                _, value = message.split()
                self.state['facerecognition'] = value in ['True', '1']

            if 'print_me' in message:
                self.event.send_json({ 'print_me': message[7:] })

            if 'play_id' in message:
                self.event.send_json({ 'play_id': message[8:] })

            if 'testSentence' in message:
                self.event.send_json({ 'testSentence': message[13:] })

            if 'assoc_setPlotting' in message:
                self.event.send_json({ 'assoc_setPlotting': message[18:] in ['True', '1'] })

            if 'assoc_setParam' in message:
                self.event.send_json({ 'assoc_setParam': message[15:] })

            if 'respond_setParam' in message:
                self.event.send_json({ 'respond_setParam': message[17:] })

            if 'memoryRecording' in message:
                self.state['memoryRecording'] = message[16:] in ['True', '1']
               
            if '_audioLearningStatus' in message:
                self.state['_audioLearningStatus'] = message[21:] in ['True', '1']

            if 'roboActive' in message:
                self.state['roboActive'] = int(message[11:])

            if 'ambientSound' in message:
                self.state['ambientSound'] = int(message[13:])

            if 'decrement' in message:
                _, name = message.split()
                self.state['brains'][name] -= 1
                print '{} has now {} available slots'.format(name, self.state['brains'][name])

            if 'learnwav' in message:
                _, filename = message.split()
                self.event.send_json({ 'learn': True, 'filename': filename })

            if 'respondwav_single' in message:
                _, filename = message.split()
                self.event.send_json({ 'respond_single': True, 'filename': filename })

            if 'respondwav_sentence' in message:
                _, filename = message.split()
                self.event.send_json({ 'respond_sentence': True, 'filename': filename })

            if 'play_sentence' in message:
                print 'playSentence', message
                sentence = message[13:]
                self.event.send_json({ 'play_sentence':True, 'sentence': sentence })

            if 'rhyme' in message:
                _, value = message.split()
                self.event.send_json({'rhyme': value == 'True'})

            if 'urge_to_say_something' in message:
                _, value = message.split()
                self.event.send_json({'urge_to_say_something': value})

            if 'autolearn' in message:
                self.state['autolearn'] = message[10:] in ['True', '1']

            if 'autorespond_single' in message:
                self.state['autorespond_single'] = message[19:] in ['True', '1']

            if 'autorespond_sentence' in message:
                self.state['autorespond_sentence'] = message[21:] in ['True', '1']

            if 'inputLevel' in message:
                self.event.send_json({ 'inputLevel': message[11:] })

            if 'calibrateEq' in message:
                self.event.send_json({ 'calibrateEq': True })

            if 'calibrateAudio' in message:
                latency_ok = False
                try:
                    lat = open('roundtrip_latency.txt', 'r')
                    latency = float(lat.readline())
                    self.event.send_json({ 'setLatency': latency })
                    latency_ok = True
                except Exception, e:
                    print 'Something went wrong when reading latency from file.', e
                    self.event.send_json({ 'calibrateAudio': True })
                if latency_ok:
                    self.event.send_json({ 'calibrateNoiseFloor': True }) 
                if 'calibrateAudio memoryRecording' in message:
                    self.state['memoryRecording'] = True

            if 'csinstr' in message:
                self.event.send_json({ 'csinstr': message[8:] })
             
            if 'selfDucking' in message:
                self.event.send_json({ 'selfDucking': message[12:] })

            if 'zerochannels' in message:
                self.event.send_json({ 'zerochannels': message[13:] })

            if 'playfile' in message:
                self.event.send_json({ 'playfile': message[9:] })

            if 'selfvoice' in message:
                self.event.send_json({ 'selfvoice': message[10:] })

            if 'save' in message:
                self.event.send_json({ 'save': utils.brain_name() if len(message) == 4 else message[5:] })

            if 'load' in message:
                if len(message) == 4:
                    brain_name = utils.find_last_valid_brain()
                else:
                    _, brain_name = message.split()
                if brain_name:
                    self.event.send_json({ 'load': brain_name })

            self.publisher.send_json(self.state)