Esempio n. 1
0
class recordThread(QThread):
    def __init__(self):
        super().__init__()

    def run(self):
        self.rec = Recorder()
        self.rec.start()
Esempio n. 2
0
    def prepare_basic_settings(self):
        self.recorder = Recorder()

        self.global_seed = utils.hash_seed(self.config.problem,
                                           self.config.seed)

        self.start = config.start
        self.end = config.start + config.n_sample
        self.current_idx = self.start
        self.current_stage = 0

        self.scene_reload_time = 10
Esempio n. 3
0
    def __init__(self, root):
        super().__init__()
        self.master = root
        self.log_directory_text = StringVar()
        self.log_directory_text.set("please select log directory ->")
        self.record_button_text = StringVar()
        self.record_button_text.set("Start Recording")
        self.record_count_text = StringVar()
        self.record_count_text.set("0 records")

        self.recorder = Recorder()
        self.init_ui()
        self.is_recording = False
Esempio n. 4
0
class Application(object):
    def __init__(self, file_name):
        self._file_name = file_name
        self._recorder = Recorder(self._file_name)
        self._reporter = Reporter(self._recorder)

    def start(self):
        self.start_services()
        self.print_report()

    def _make_app(self):
        pass

    def start_services(self):
        self._recorder.start()

    def print_report(self):
        self._reporter.display_report()
Esempio n. 5
0
    def __init__(self, *pubs, storage='local'):
        self.recorder = Recorder(storage=storage)

        if len(pubs) == 0:
            self.src = news_publishers.src

        else:
            self.src = {
                k: v
                for k, v in news_publishers.src.items() if k in pubs
            }
Esempio n. 6
0
class ButtonRecorderPlayer(object):
    def __init__(self):
        self.isPlaying = True
        self.p = pyaudio
        self.rec = Recorder(channels=1)
        self.play = None
        self.playback_thread = threading.Thread(name='button_listener', target=self.button_listener)

    def on_button(self, channel):  # Called by inbuilt threaded interrupt
        print('button')
        if recPlayBtn.isPlaying:
            print('stoping playback and starting recording')
            recPlayBtn.stop_playback()
            recPlayBtn.isPlaying = False
            recPlayBtn.start_recording()
        else:
            print('stoping recording and starting playback')
            recPlayBtn.stop_recording()
            recPlayBtn.isPlaying = True
            recPlayBtn.start_playback()

    def button_listener(self):
        # with mouse.Listener( on_click = self.on_click) as listener:
        with gpio.add_event_detect(17, gpio.FALLING, callback=self.on_button, bouncetime=100) as listener:
            listener.join()
            print ('listener started')

    def start(self):
        self.playback_thread.start()
        self.start_playback()

    def start_recording(self, channel=1):
        print ('Recording, click to stop recording')
        timestr = time.strftime("%Y%m%d-%H%M%S")
        gpio.output(4, True) #LED on
        self.recfile = self.rec.open('recordings/' + timestr + '.wav', self.p, 'wb')
        self.recfile.start_recording()

    def stop_recording(self, channel=1):
        self.recfile.stop_recording()
        self.recfile.close()
        gpio.output(4, False) #LED on
        print ('Recording Stopped')

    def start_playback(self, channel=1):
        print ('playback starting')
        self.play = Player('recordings', self.p)
        self.play.start()

    def stop_playback(self):
        self.play.stopper()
        print ('playback stopped')
Esempio n. 7
0
    def __init__(self, parent=None):
        super(MyWidget, self).__init__(parent)
        self.setupUi(self)
        self.setWindowTitle("VoiceReminder")
        self.pushButton.setStyleSheet("QPushButton{border-image: url(img/start.png)}")
        self.lcdNumber.setDigitCount(2)
        self.lcdNumber.setVisible(False)

        self.setting_time_dialog = SettingTimeDialog()
        self.setting_time_dialog.setWindowModality(Qt.ApplicationModal)
        self.record = Recorder()
        self.timer = QTimer()
        self.timing_thread = TimingThread()
        self.play_thread = PlayAudioThread()
        self.is_recording = False   # 用于判断当前是否正在录音

        self.display_all_reminders_list_from_existed_database()
        self.timer.timeout.connect(self.displaying_recording_time)
        self.pushButton.clicked.connect(lambda: self.start_or_stop_recording(self.is_recording))
        self.setting_time_dialog.pushButton.clicked.connect(self.set_time_from_dialog)
        self.listWidget.itemClicked.connect(self.play_corresponding_audio_file)  # itemClicked自带一个参数:item
        self.item_delete_signal.connect(self.display_all_reminders_list_from_existed_database)
        self.timing_thread.time_out_signal.connect(self.display_all_reminders_list_from_existed_database)
Esempio n. 8
0
class ButtonRecorderPlayer(object):
    def __init__(self):
        self.isRecording = False
        self.p = pyaudio
        self.rec = Recorder(channels=1)
        self.filename = ''
        self.t = TweetThis(filename=self.filename)

    def on_button(self, channel):
        print('button')
        if not recPlayBtn.isRecording:
            recPlayBtn.start_recording()
            recPlayBtn.isRecording = True
            time.sleep(1)
        else:
            recPlayBtn.stop_recording()
            recPlayBtn.isRecording = False
            gpio.remove_event_detect(17)
            recPlayBtn.listen()
            time.sleep(1)

    def listen(self):
        gpio.add_event_detect(17,
                              gpio.FALLING,
                              callback=recPlayBtn.on_button,
                              bouncetime=100)
        print('listening')

    def start_recording(self, channel=1):
        print('recording, click to stop recording')
        timestr = time.strftime("%Y%m%d-%H%M%S")
        gpio.output(4, True)  #LED on
        self.filename = 'twitter_recordings/' + timestr + '.wav'
        self.recfile = self.rec.open(self.filename, self.p, 'wb')
        self.recfile.start_recording()

    def stop_recording(self, channel=1):
        self.recfile.stop_recording()
        self.recfile.close()
        gpio.output(4, False)  #LED on
        print('recording stopped')
        print('tweeting', self.filename)
        self.t.start(self.filename)
        time.sleep(1)
Esempio n. 9
0
def Client(sendBytes):
    rospy.wait_for_service('voice_service')
    try:
        client = rospy.ServiceProxy('voice_service', call)
        resp = client(sendBytes)
        print (type(resp))
        print (type(resp.returnBytes))
        with open("/home/ros/voice/robot_recv.wav",'wb') as ff:
             ff.write(resp.returnBytes)
    except rospy.ServiceException:
        print ("Service call failed")

if __name__ == "__main__":
    while True:
        #键盘唤醒
        print ("INPUT WAKEUP KEY:")
        key = input()
        if (key == "w"):
            r = Recorder()
            recordBytes = r.recorder()
            # with open('/home/ros/voice/robot_send.wav', 'rb') as f:
            #     sendBytes = f.read()#全部读取内容
            print ("send voice bytes")
            Client(recordBytes)
            os.system("mplayer /home/ros/voice/robot_recv.wav")
        else:
            print ("wakeup failed")



Esempio n. 10
0
class Renderer:
    def __init__(self, config):
        self.config = config

        self.prepare_log_and_dir()
        self.prepare_basic_settings()
        self.load_scene()
        self.prepare_sampler()

    def prepare_log_and_dir(self):
        self.log_dir = Path(self.config.output_log_dir).expanduser().joinpath(
            '{}_{}_{}'.format(self.config.problem, self.config.start,
                              self.config.start + self.config.n_sample - 1))
        self.logger = utils.get_logger(__name__,
                                       log_dir=self.log_dir,
                                       use_tqdm=True)

        self.output_dir = Path(
            self.config.output_data_dir).expanduser().joinpath(
                self.config.problem)
        self.image_dir = self.output_dir.joinpath('image')
        self.blend_dir = self.output_dir.joinpath('blend')
        self.info_dir = self.output_dir.joinpath('info')
        self.seg_dir = self.output_dir.joinpath('seg')

        directories = [self.image_dir, self.seg_dir, self.info_dir]

        if self.config.output_blend:
            directories.append(self.blend_dir)
        if self.config.output_log:
            directories.append(self.log_dir)
        for directory in directories:
            directory.mkdir(exist_ok=True, parents=True)

    def prepare_basic_settings(self):
        self.recorder = Recorder()

        self.global_seed = utils.hash_seed(self.config.problem,
                                           self.config.seed)

        self.start = config.start
        self.end = config.start + config.n_sample
        self.current_idx = self.start
        self.current_stage = 0

        self.scene_reload_time = 10

    def load_scene(self):
        self.logger.info('Loading scene into blender...')
        self.scene = Scene(self.config.base_scene_file, self.config.shape_dir,
                           self.config.material_dir,
                           self.config.properties_json)
        self.scene.set_render_args(
            resolution_x=self.config.width,
            resolution_y=self.config.height,
            tile_x=self.config.render_tile_size,
            tile_y=self.config.render_tile_size,
            gpu=self.config.gpu,
            render_num_samples=self.config.render_num_samples,
            transparent_min_bounces=self.config.transparent_min_bounces,
            transparent_max_bounces=self.config.transparent_max_bounces,
            min_bounces=self.config.min_bounces,
            max_bounces=self.config.max_bounces)

        self.valid_cameras = self.scene.cameras.keys()
        self.initial_cameras = list(
            set(self.valid_cameras) & set(self.config.initial_cameras))
        self.final_cameras = list(
            set(self.valid_cameras) & set(self.config.final_cameras))
        self.logger.info('Available cameras for initial state: {}'.format(
            self.initial_cameras))
        self.logger.info('Available cameras for final state: {}'.format(
            self.final_cameras))

    def prepare_sampler(self):
        valid_pairs = [
            pair for pair in self.scene.option.pairs
            if pair.startswith(tuple(self.config.valid_attr))
        ]
        assert config.min_init_vis <= config.max_init_vis <= config.total_obj
        self.sample_manager = SampleManager(
            valid_init_vis=range(config.min_init_vis, config.max_init_vis + 1),
            total_obj=config.total_obj,
            valid_steps=range(config.min_trans, config.max_trans + 1),
            valid_pairs=valid_pairs,
            valid_move_type=['inner', 'out', 'in'],
            pair_split=self.scene.option.SPLIT)

    @property
    def str_stage(self):
        if self.current_stage == 0:
            return 'initial'
        elif self.current_stage == self.num_step:
            return 'final'
        else:
            return '{:02d}'.format(self.current_stage)

    @property
    def images_path(self):
        cameras = self.initial_cameras \
            if self.current_stage == 0 else self.final_cameras
        return {
            cam: os.path.join(
                str(self.image_dir),
                '{}_{}_img_{:06d}-{}.{}.png'.format(self.config.dataset_prefix,
                                                    self.config.problem,
                                                    self.current_idx,
                                                    self.str_stage, cam))
            for cam in cameras
        }

    @property
    def segs_path(self):
        return {
            cam: os.path.join(
                str(self.seg_dir),
                '{}_{}_seg_{:06d}-{}.{}.png'.format(self.config.dataset_prefix,
                                                    self.config.problem,
                                                    self.current_idx,
                                                    self.str_stage, cam))
            for cam in self.final_cameras
        }

    @property
    def blend_path(self):
        return os.path.join(
            str(self.blend_dir),
            '{}_{}_{:06d}-{}.blend'.format(self.config.dataset_prefix,
                                           self.config.problem,
                                           self.current_idx, self.str_stage))

    @property
    def json_path(self):
        return os.path.join(
            str(self.info_dir),
            '{}_{}_{:06d}.json'.format(self.config.dataset_prefix,
                                       self.config.problem, self.current_idx))

    @property
    def prefix(self):
        return '#{}-{} '.format(self.current_idx, self.current_stage)

    def info(self, message):
        self.logger.info(self.prefix + message)

    def warning(self, message):
        self.logger.warning(self.prefix + message)

    def error(self, message):
        self.logger.error(self.prefix + message, exc_info=True)

    def exclude_output_dir(self, path_dict):
        return {key: os.path.basename(path) for key, path in path_dict.items()}

    def set_seed(self):
        seed = [self.global_seed, self.current_idx]
        random.seed(str(seed))
        np.random.seed(seed)

    def run(self, replace_start=-1):
        for self.current_idx in tqdm.trange(self.start, self.end, ncols=80):
            # Reload to avoiding the unknown issue of slowing down.
            if self.current_idx % self.scene_reload_time == 0:
                self.load_scene()

            # Skip existing samples.
            if os.path.isfile(self.json_path):
                info = self.sample_manager.record_json(self.json_path)
                if self.current_idx >= replace_start:
                    self.render_from_json(info)
                continue

            self.logger.info('---')
            self.set_seed()
            while True:
                try:
                    if self.build_sample():
                        self.sample_manager.sample_success()
                        self.summary()
                        break
                    else:
                        self.sample_manager.sample_fail()
                        self.clear_history()
                except KeyboardInterrupt:
                    self.warning('Keyboard Interrupted.')
                    sys.exit(1)
                except Exception as e:
                    self.error(str(e))

    """
    Rule A: no overlapping.
    Rule B: transformation must be observable
    1. occluded objects can not be transformmed (visible in initial states)
    2. transformed objects can not be moved out (visible in final states)
    3. transformed objects can not be occluded (visible in final states)
    """

    def build_sample(self):
        # Sample the initial state.
        self.current_stage = 0
        if not self.sample_init_state():
            return False
        self.record()

        # Decide the number of steps.
        self.num_step = self.sample_manager.random_step()
        self.logger.info('Number of atomic transformations: {}.'.format(
            self.num_step))

        # Apply randomly sampled atomic transformations one by one.
        for _ in range(self.num_step):
            self.current_stage += 1
            while True:
                pair = self.sample_manager.random_pair()
                if pair is None:
                    return False
                response = self.try_pair(pair)
                if response is None:
                    self.warning(
                        '[Fair] ({}/{}) {} -> no feasible object.'.format(
                            len(self.sample_manager.tried_pairs),
                            self.sample_manager.top_pair, pair))
                    self.sample_manager.stage_fail()
                else:
                    self.record(trans_response=response)
                    self.sample_manager.stage_success()
                    break

        # Show the sequence of transformation.
        for i, trans_info in enumerate(self.recorder.info['transformations']):
            self.logger.info('[{}] {} -> {}{}'.format(
                i, trans_info['pair'], trans_info['obj_idx'],
                ' ({})'.format(trans_info['type'])
                if trans_info['attr'] == 'position' else ''))

        # Save the information about the sample into a json file.
        self.info('Saveing info...')
        if not self.recorder.save(self.json_path, self.config.json_indent):
            return False

        with open(self.json_path, 'r') as f:
            info = json.load(f)
        self.render_from_json(info)

        return True

    def render_from_json(self, info):
        cameras = info['cameras']
        lamps = info['lamps']
        self.num_step = len(info['states']) - 1
        render_stages = list(range(self.num_step + 1))
        if not self.config.render_intermediate:
            render_stages = [render_stages[0], render_stages[-1]]
        for s in render_stages:
            self.current_stage = s
            for _, path in self.images_path.items():
                if not os.path.isfile(path):
                    objects = info['states'][s]['objects']
                    self.scene.set_scene(objects, cameras, lamps)
                    if not self.render_main():
                        return False
                    break
        return True

    def sample_init_state(self):
        self.info('Start to building initial state.')

        # Reset & initialize blender environment.
        self.info('Reset environment.')
        self.scene.reset()
        self.scene.perturb_camera(self.config.camera_jitter)
        self.scene.perturb_lamp(self.config.lamp_jitter)

        # Reset recorder.
        self.info('Reset recorder')
        self.recorder.init(self.scene, self.current_idx)

        # Decide the visibilities of objects.
        self.num_init_visible = self.sample_manager.random_init()
        visible_choices = [True] * self.num_init_visible \
            + [False] * (self.config.total_obj - self.num_init_visible)
        random.shuffle(visible_choices)

        # Randomly create and place objects.
        self.info('Creating {} random objects, {} visible...'.format(
            self.config.max_init_vis, self.num_init_visible))
        for i, visible in enumerate(visible_choices):
            obj = self.scene.create_random_object(visible=visible)
            if obj is None:
                self.warning('{} No enough space for {} objects.'.format(
                    i, 'visible' if visible else 'invisible'))
                self.warning('Failed to build initial state. Retry...')
                return False
            else:
                self.info('{} ({:^9s}) Put a {:6s} {:6s} {:6s} {:8s}'
                          ' at ({:3d}, {:3d})'.format(
                              i, 'visible' if visible else 'invisible',
                              obj['size'], obj['color'], obj['material'],
                              obj['shape'], *obj['position']))

        self.info('* Successfully build initial state.')
        return True

    def try_pair(self, pair):
        # Try sampled atomic transformations on objects.
        while True:
            move_type = self.sample_manager.random_move_type() \
                if pair.startswith('position') else None
            if pair.startswith('position') and move_type is None:
                return None
            objs = self.get_valid_objs(pair, move_type=move_type)
            self.info('Valid objects for {}: {}'.format(pair, objs))
            while len(objs) > 0:
                obj_idx = self.sample_manager.random_obj(valid_objs=objs)
                objs.remove(obj_idx)
                is_success, response = self.transform(
                    self.scene.objects[obj_idx], pair, move_type)
                if is_success:
                    return response
            if not pair.startswith('position'):
                return None
            else:
                self.warning('[Fail] ({}/{}) move {}'.format(
                    len(self.sample_manager.tried_move_type),
                    self.sample_manager.top_move_type, move_type))

    def get_valid_objs(self, pair, move_type=None):
        # Rule 1. Occluded objects can not be transformmed.
        if move_type == 'in':
            choices = self.scene.invisible_objects
        else:
            choices = self.get_non_occluded_objs()

        choices = [self.scene.get_index(obj) for obj in choices]

        return choices

    def transform(self, obj, pair, move_type=None):
        trans = [obj, *pair.split(self.scene.option.SPLIT)]
        attr = trans.pop(1)

        trans_info = {
            'pair': pair,
            'obj_name': self.scene.get_name(obj),
            'obj_idx': self.scene.get_index(obj)
        }
        trans_func = getattr(self, 't_{}'.format(attr))
        args = trans + [move_type] if move_type else trans
        self.info('{} <- {}'.format(trans_info['obj_name'], pair))
        is_success, response = trans_func(*args)

        response.update(trans_info)

        return is_success, response

    def t_position(self, obj, direction, step, move_type):
        step = int(step)
        response = {
            'attr': 'position',
            'old': self.scene.get_position(obj),
            'new': '',
            'target': (direction, step),
            'type': '',
            'options': [],
        }

        x, y = response['old']
        dx, dy = self.scene.option.get_delta_position(direction, step)
        new_x = x + dx
        new_y = y + dy

        response['new'] = (new_x, new_y)

        if not self.scene.option.is_position_valid(new_x, new_y):
            self.warning('[Fail] position ({}, {}) is invalid.'.format(
                new_x, new_y))
            return False, response

        vis_old = self.scene.option.is_visible(x, y)
        vis_new = self.scene.option.is_visible(new_x, new_y)

        if vis_old and vis_new:
            response['type'] = 'inner'
        elif vis_old and (not vis_new):
            response['type'] = 'out'
        elif (not vis_old) and vis_new:
            response['type'] = 'in'
        else:
            self.warning('[Fail] move object inside invisible area.')
            return False, response

        if response['type'] != move_type:
            self.warning('[Fail] move type ({} vs. {}) not match.'.format(
                response['type'], move_type))
            return False, response

        # Rule 2. Transformed objects can not be moved out.
        if response['type'] == 'out' and self.was_transformed(obj):
            self.warning('[Fail] Modified objects can not be moved out.')
            return False, response

        self.scene.set_position(obj, new_x, new_y)

        if self.scene.is_overlapped(obj):
            self.warning('[Fail] Overlap.')
        # Rule 3. Transformed objects can not be occluded.
        elif self.cause_occlusion(obj):
            self.warning('[Fail] Cause occlusion.')
        else:
            if response['type'] == 'out':
                response['options'].extend(
                    self.scene.option.get_move_options(x, y)['invisible'])
            else:
                response['options'].append((direction, step))
            return True, response

        # Revert.
        self.scene.set_position(obj, x, y)
        return False, response

    def t_shape(self, obj, new_shape):
        response = {
            'attr': 'shape',
            'old': self.scene.get_shape(obj),
            'new': None,
            'target': None,
        }
        old_shape = response['old']
        if new_shape == old_shape:
            self.warning('[Fail] no change.')
            return False, response
        obj = self.scene.set_shape(obj, new_shape)
        response['new'] = response['target'] = new_shape

        return True, response

    def t_size(self, obj, new_size):
        response = {
            'attr': 'size',
            'old': self.scene.get_size(obj),
            'new': new_size,
            'target': new_size,
        }
        old_size = response['old']
        if new_size == old_size:
            self.warning('[Fail] no change.')
            return False, response

        self.scene.set_size(obj, new_size)

        if self.scene.option.is_bigger(new_size, old_size) and \
                self.scene.is_overlapped(obj):
            self.warning('[Fail] Overlap.')
        # Rule 3. transformed objects can not be occluded (visible in final states)
        elif self.cause_occlusion(obj):
            self.warning('[Fail] Cause occlusion.')
        else:
            return True, response

        self.scene.set_size(obj, old_size)
        return False, response

    def t_material(self, obj, new_material):
        response = {
            'attr': 'material',
            'old': self.scene.get_material(obj),
            'new': new_material,
            'target': new_material,
        }
        old_material = response['old']
        if new_material == old_material:
            self.warning('[Fail] no change.')
            return False, response

        self.scene.set_material(obj, new_material)
        return True, response

    def t_color(self, obj, new_color):
        response = {
            'attr': 'color',
            'old': self.scene.get_color(obj),
            'new': new_color,
            'target': new_color,
        }
        old_color = response['old']
        if new_color == old_color:
            self.warning('[Fail] no change.')
            return False, response

        self.scene.set_color(obj, new_color)
        return True, response

    def render_main(self):
        if not self.config.no_render:
            self.info('[Render] main ({})...'.format(self.current_stage))
            for key, image_path in self.images_path.items():
                if not os.path.isfile(image_path):
                    time_used = self.scene.render(image_path,
                                                  self.scene.cameras[key],
                                                  config.width, config.height)
                    self.info('- {}: {}'.format(key,
                                                utils.time2str(time_used)))
                    if not os.path.isfile(image_path):
                        return False
        return True

    def render_seg(self):
        self.info('[Render] seg ({})...'.format(self.current_stage))
        for key, seg_path in self.segs_path.items():
            time_seg_used = self.scene.render_shadeless(
                seg_path, self.scene.cameras[key], config.seg_width,
                config.seg_height)
            self.info('- {}: {}'.format(key, utils.time2str(time_seg_used)))
            if not os.path.isfile(seg_path):
                return False
        return True

    def record(self, trans_response=None):
        status = self.get_objs_status()

        self.recorder.record_scene(self.scene, self.current_stage,
                                   self.exclude_output_dir(self.images_path),
                                   self.exclude_output_dir(self.segs_path),
                                   status['n_pixels'], status['status'])
        if trans_response is not None:
            self.recorder.record_trans(self.current_stage - 1,
                                       self.current_stage, trans_response)

        if self.config.output_blend:
            self.info('[Save] Blender file.')
            self.scene.save(self.blend_path)

    def get_objs_status(self, stage=None):
        if stage is None:
            stage = self.current_stage
        states = self.recorder.info['states']
        assert -len(states) <= stage <= len(states), (
            'stage {} is out of range'.format(stage))
        if -len(states) <= stage < len(states):
            return {
                'status': states[stage]['status'],
                'n_pixels': states[stage]['n_pixels']
            }
        else:
            assert self.render_seg(), '[Error] render seg failed.'
            n_pixels = {}
            cams = list(set(self.initial_cameras) | set(self.final_cameras))
            for cam in cams:
                image_path = self.segs_path[cam]
                n_pixels[cam] = self.count_pixels(image_path)
            status = self.pixels2status(n_pixels)
            return {'status': status, 'n_pixels': n_pixels}

    def count_pixels(self, seg_path):
        assert os.path.isfile(seg_path), '[Error] {} doesn\'t exist.'.format(
            seg_path)
        pixels = {}
        colors = utils.count_color(seg_path)
        for n, color in sorted(colors):
            obj = self.scene.option.get_seg_object(color)
            if obj is not None:
                pixels[obj] = n
        return pixels

    def pixels2status(self, n_pixels):
        status = defaultdict(dict)
        min_pixels = self.config.occlusion_threshold
        for cam, val in n_pixels.items():
            for obj in self.scene.objects:
                if obj in self.scene.invisible_objects:
                    status[cam][obj.name] = 'invisible'
                elif obj.name not in val or val[obj.name] < min_pixels:
                    status[cam][obj.name] = 'occluded'
                else:
                    status[cam][obj.name] = 'visible'
        return status

    def cause_occlusion(self, obj):
        status = self.get_objs_status(stage=self.current_stage)['status']
        for x in self.scene.objects:
            if x == obj or self.was_transformed(x):
                x_name = self.scene.get_name(x)
                for cam in self.final_cameras:
                    if status[cam][x_name] == 'occluded':
                        return self.scene.get_index(x)
        return None

    def get_non_occluded_objs(self):
        status = self.get_objs_status(stage=0)['status']
        objects = self.scene.visible_objects.copy()
        for cam in self.initial_cameras:
            for obj_name, state in status[cam].items():
                obj = self.scene.b_objects[obj_name]
                if state == 'occluded' and obj in objects:
                    objects.remove(obj)
        return objects

    def was_transformed(self, obj, attr=None):
        for trans_info in self.recorder.info['transformations']:
            if obj.name == trans_info['obj_name'] and (
                    attr is None or trans_info['attr'] == attr):
                return True
        return False

    def clear_history(self):
        self.logger.info('Removing files after failed...')
        for scene in self.recorder.info['states']:
            for image in scene['images'].values():
                img_path = self.image_dir / image
                if img_path.exists():
                    img_path.unlink()
            for seg in scene['segs'].values():
                seg_path = self.seg_dir / seg
                if seg_path.exists():
                    seg_path.unlink()

    def summary(self):
        self.logger.info('---')
        self.logger.info('Data dir: {}'.format(self.output_dir.resolve()))
        if self.config.output_log:
            self.logger.info('Log dir: {}'.format(self.log_dir.resolve()))

        self.logger.info('---')
        state = self.sample_manager.state()
        self.logger.info('Progress: {} (load: {})'.format(
            state['sample_success'] + state['sample_load'],
            state['sample_load']))
        self.logger.info('Sample success: {} ({:.4f})'.format(
            state['sample_success'], state['rate_sample_success']))
        self.logger.info('Stage success: {} ({:.4f})'.format(
            state['stage_success'], state['rate_stage_success']))
        self.logger.info('Total time: {}'.format(utils.time2str(
            state['time'])))
        self.logger.info('Average sample time: {}'.format(
            utils.time2str(state['time_avg_sample'])))
        self.logger.info('Average stage time: {}'.format(
            utils.time2str(state['time_avg_stage'])))

        self.logger.info('---')
        self.logger.info('Initial Visible Object:')
        for init_vis, num in sorted(self.sample_manager.n_init_vis.items()):
            self.logger.info('- {}: {}'.format(init_vis, num))

        self.logger.info('---')
        self.logger.info('Steps:')
        for key, num in sorted(self.sample_manager.n_step.items()):
            self.logger.info('- {}: {}'.format(key, num))

        self.logger.info('---')
        self.logger.info('Object:')
        for key, num in sorted(self.sample_manager.n_obj.items()):
            self.logger.info('- {}: {}'.format(key, num))

        self.logger.info('---')
        self.logger.info('Pair:')
        for key, num in sorted(self.sample_manager.n_pair['gram_1'].items()):
            self.logger.info('- {}: {}'.format(key, num))

        self.logger.info('---')
        self.logger.info('Move Type:')
        for key, num in sorted(self.sample_manager.n_move_type.items()):
            self.logger.info('- {}: {}'.format(key, num))

        self.logger.info('---')
        self.logger.info('Balance State')
        for key, state in sorted(self.sample_manager.balance_state.items()):
            self.logger.info('# {}'.format(key))
            for k, v in sorted(state.items()):
                self.logger.info('- {}: {}'.format(k, v))
Esempio n. 11
0
class Example(Frame):
    def __init__(self, root):
        super().__init__()
        self.master = root
        self.log_directory_text = StringVar()
        self.log_directory_text.set("please select log directory ->")
        self.record_button_text = StringVar()
        self.record_button_text.set("Start Recording")
        self.record_count_text = StringVar()
        self.record_count_text.set("0 records")

        self.recorder = Recorder()
        self.init_ui()
        self.is_recording = False

    def init_ui(self):
        self.master.title("TA Recorder")
        self.master.resizable(True, False)
        self.pack(fill=BOTH, expand=True)

        frame1 = Frame(self)

        frame1.pack(fill=X)

        lbl1 = Label(frame1, text="LOG DIR", width=10)
        lbl1.pack(side=LEFT, padx=5, pady=5)

        btn_dir = Button(frame1, text="...", width=4, command=self.select_dir)
        btn_dir.pack(side=RIGHT, padx=5, pady=5)

        lbl_dir = Label(frame1,
                        text="please select log directory ->",
                        textvar=self.log_directory_text,
                        width=10,
                        borderwidth=2,
                        relief="sunken")
        lbl_dir.pack(fill=X, padx=5, expand=True)
        self.lbl_dir = lbl_dir

        frame2 = Frame(self)
        frame2.pack(fill=X)

        btn_rec = Button(frame2,
                         text="Start Recording",
                         textvar=self.record_button_text,
                         command=self.toggle_record)
        btn_rec.pack(side=RIGHT, padx=5, pady=5)
        lbl_count = Label(frame2,
                          text="0 data records",
                          textvar=self.record_count_text,
                          width=10)
        lbl_count.pack(fill=X, padx=5, pady=5)

    def select_dir(self):
        dir_name = filedialog.askdirectory()
        current_id = self.recorder.set_directory(dir_name)
        self.log_directory_text.set(dir_name)
        self.update_record_count(current_id)

    def toggle_record(self):
        if self.is_recording:
            self.stop_rec()
            self.is_recording = False
            self.record_button_text.set("Start Recording")
        else:
            self.is_recording = True
            self.record_button_text.set("Stop")
            self.start_rec()

    def start_rec(self):
        self.recorder.init_rec()

        def step():
            while True:
                cur_id = self.recorder.store_rec()
                self.update_record_count(cur_id)
                self.nextstep_id = self.master.after(1, nextstep)
                yield

        nextstep = step().__next__
        self.nextstep_id = self.master.after(1, nextstep)

    def stop_rec(self):
        self.master.after_cancel(self.nextstep_id)
        print("stopped")

    def update_record_count(self, count):
        self.record_count_text.set('%i records' % count)
Esempio n. 12
0
 def __init__(self, file_name):
     self._file_name = file_name
     self._recorder = Recorder(self._file_name)
     self._reporter = Reporter(self._recorder)
Esempio n. 13
0
#!/usr/bin/env python

import RPi.GPIO as GPIO
from record import Recorder
from control import Control
from command import Command
import time
import logging

GPIO_PIN = 3

recorder = Recorder()
control = Control()
logger = None


def main():
    button_press()
    return
    GPIO.setmode(GPIO.BCM)
    GPIO.setup(GPIO_PIN, GPIO.IN)
    while True:
        button_state = GPIO.input(GPIO_PIN)
        #print button_state
        if button_state == 0:
            button_press()


def button_press():
    time.sleep(1)
    button_state = GPIO.input(GPIO_PIN)
Esempio n. 14
0
def test_recorder_raises_illegal_record_error():
    recorder = Recorder("test_file")
    with pytest.raises(IllegalRecordError):
        csv_row = ["132332", "/a/b/c", "PUT1", 10]
        recorder._create_record(csv_row)
Esempio n. 15
0
                    help="Time interval between each data point in seconds")
parser.add_argument("--directory", "--dir", "-f", type=str, default="",
                    help="Directory to save log files")
parser.add_argument("--server", "--ip", type=str,
                    default="roborio-4774-frc.local", help="ip of the roborio")
parser.add_argument("--camera", "--cam", "-c", action="append", type=str,
                    help="ip of camera server")

args = parser.parse_args()

if args.camera is None:
    args.camera = ["http://10.47.74.36:1181/stream.mjpg",
                   "http://10.47.74.36:1182/stream.mjpg"]  # [0,1] testing

current_time = time.strftime("%H-%M-%S %d-%m-%y")
streams = [Recorder(args.directory + "cam" + str(num) + "-" + current_time +
                    ".mp4", stream, int(max(args.refresh_rate, 1)))
           for num, stream in enumerate(args.camera, start=1)]

logger = Log(4774)

if __name__ == "__main__":
    while True:
        start = time.monotonic()

        for stream in streams:
            stream.record()

        logger.log()

        elapsed = time.monotonic() - start
        if 1/args.refresh_rate - elapsed > 0:
Esempio n. 16
0
 def run(self):
     self.rec = Recorder()
     self.rec.start()
Esempio n. 17
0
def test_recorder_raises_nofile_exception():
    recorder = Recorder("test_file")
    with pytest.raises(FileDoesNotExistError):
        recorder._read_csv()
Esempio n. 18
0
 def __init__(self):
     self.isRecording = False
     self.p = pyaudio
     self.rec = Recorder(channels=1)
     self.filename = ''
     self.t = TweetThis(filename=self.filename)
Esempio n. 19
0
class MyWidget(QWidget, Ui_Form):
    """
    主窗口
    """
    item_delete_signal = DeleteItemSignal.my_signal().signal

    def __init__(self, parent=None):
        super(MyWidget, self).__init__(parent)
        self.setupUi(self)
        self.setWindowTitle("VoiceReminder")
        self.pushButton.setStyleSheet("QPushButton{border-image: url(img/start.png)}")
        self.lcdNumber.setDigitCount(2)
        self.lcdNumber.setVisible(False)

        self.setting_time_dialog = SettingTimeDialog()
        self.setting_time_dialog.setWindowModality(Qt.ApplicationModal)
        self.record = Recorder()
        self.timer = QTimer()
        self.timing_thread = TimingThread()
        self.play_thread = PlayAudioThread()
        self.is_recording = False   # 用于判断当前是否正在录音

        self.display_all_reminders_list_from_existed_database()
        self.timer.timeout.connect(self.displaying_recording_time)
        self.pushButton.clicked.connect(lambda: self.start_or_stop_recording(self.is_recording))
        self.setting_time_dialog.pushButton.clicked.connect(self.set_time_from_dialog)
        self.listWidget.itemClicked.connect(self.play_corresponding_audio_file)  # itemClicked自带一个参数:item
        self.item_delete_signal.connect(self.display_all_reminders_list_from_existed_database)
        self.timing_thread.time_out_signal.connect(self.display_all_reminders_list_from_existed_database)

    def displaying_recording_time(self):
        """
        每1秒触发一次,使得LCD显示加1,作为录音时长的显示
        """
        self.lcdNumber.display(self.lcdNumber.intValue() + 1)

    def start_or_stop_recording(self, flag):
        """
        根据self.is_recording标记判断此时按下按钮是开始录音还是停止录音
        """
        if not flag:    # 开始录音
            self.lcdNumber.setVisible(True)
            self.start_recording()
            self.timer.start(1000)
        else:   # 停止录音,并显示提醒时间的设置对话框
            self.timer.stop()
            self.lcdNumber.setVisible(False)
            self.lcdNumber.display(0)
            self.stop_recording()
            self.setting_time_dialog.show_in_somewhere(
                self.pos().x() + (self.width() - self.setting_time_dialog.width()) // 2,
                self.pos().y() + (self.height() - self.setting_time_dialog.height()) // 2)
            self.setting_time_dialog.display_time_same_as_current_time()

    def start_recording(self):
        """
        开始录音
        """
        self.is_recording = True
        self.pushButton.setStyleSheet("QPushButton{border-image: url(img/stop.png)}")
        self.record.start()

    def stop_recording(self):
        """
        停止录音
        """
        self.is_recording = False
        self.pushButton.setStyleSheet("QPushButton{border-image: url(img/start.png)}")
        self.record.stop()
        self.record.save()

    def set_time_from_dialog(self):
        """
        设置时间后,将(时间串, 录音文件名)存入数据文件,并刷新窗口
        """
        date_string = self.setting_time_dialog.dateEdit.date().toString("yyyy-MM-dd")
        time_string = self.setting_time_dialog.timeEdit.time().toString("HH:mm")
        target_time_string = date_string + "  " + time_string
        self.setting_time_dialog.setVisible(False)
        save_reminder_information_to_database(target_time_string, self.record.filename)
        self.display_all_reminders_list_from_existed_database()

    def display_all_reminders_list_from_existed_database(self, database_file=DATABASE_FILE):
        """
        读取数据文件中的所有待办事项并重新显示
        """
        self.listWidget.clear()
        try:
            reminder_list = get_data_from_database(database_file)
            if not reminder_list:
                print("无语音待办提醒")
                return
            current_time_string = get_current_time_string()  # 获取当前时间,判断哪些是无效的那些是有效的
            happened, not_happened = split_into_happened_and_not_happened_ones(current_time_string, reminder_list)
            if not not_happened:
                self.timing_thread.set_time_and_audio_filename('', '')
                # 没有not_happened待办时,传递空值参数使计时线程停止
                print("所有语音待办提醒均已完成")
            else:
                self.timing_thread.set_time_and_audio_filename(not_happened[0][0], not_happened[0][1])
            self.timing_thread.start()
            not_happened_length = len(not_happened)
            for i, reminder in enumerate(not_happened + happened[::-1]):
                item = MyQListItem(name=reminder[0])
                item.setToolTip(reminder[1])
                if i >= not_happened_length:    # 已完成待办显示成灰色
                    item.label.setStyleSheet("color:#aaaaaa;")
                else:
                    item.label.setStyleSheet("color:#000000;")
                self.listWidget.addItem(item)
                self.listWidget.setItemWidget(item, item.widget)
        except FileNotFoundError:
            print("您还没有创建任何语音待办提醒")

    def play_corresponding_audio_file(self, item):
        """
        播放列表中的此item对应的语音文件
        """
        print(item.label.text(), "Voice filename:", item.toolTip())
        try:
            self.play_thread.set_wav_filename(item.toolTip())
            self.play_thread.start()
        except FileNotFoundError:
            # 如果找不到对应的语音文件,则删除此条
            msg_box = QMessageBox()
            ret = msg_box.warning(self, "Warning", "Can't find .wav file, will u want to delete it?",
                                  QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)
            if ret == QMessageBox.Yes:
                delete_reminder_from_database(item.label.text(), item.toolTip())
                self.display_all_reminders_list_from_existed_database()
Esempio n. 20
0
    def run(self):

        # 清除缓存
        files = os.listdir(self.cacheDir)
        for fname in files:
            if fname == "evaled":
                continue
            cachedFile = self.cacheDir + fname
            if self.deleteEvaled:
                os.remove(cachedFile)
            else:
                if (os.path.exists(self.evaledDir + fname)):
                    os.remove(self.evaledDir + fname)
                shutil.move(cachedFile, self.evaledDir)

        print("服务已部署,等待连接中")
        handler = easyconn.connectAcpt(self.port)
        recoder = Recorder(self.cacheDir)
        recoder.start()
        self.running = True
        print("连接已建立,正在提供分类服务")

        while (self.terminated is False):

            files = os.listdir(self.cacheDir)
            for fname in files:  # 清空整个文件夹

                # 获得一个预测
                if fname == 'evaled':  # 忽略evaled子目录
                    continue
                inputFile = self.cacheDir + fname
                y = None
                try:  # 预测
                    y = ac.evaluate.eval_file(model, inputFile)
                except ValueError:
                    print(f"音频{fname}读取失败")
                except FileNotFoundError:
                    print(f"音频{fname}读取失败")

                if y is not None:
                    jstr = toJson(y, os.path.getctime(inputFile))
                    try:
                        handler.send_data(jstr)
                    except ConnectionAbortedError:
                        self.terminated = True
                        break
                    print(f"自{fname}预测: {jstr}")
                if self.deleteEvaled and os.path.exists(inputFile):
                    os.remove(inputFile)
                else:
                    if (os.path.exists(self.evaledDir + fname)):
                        os.remove(self.evaledDir + fname)
                    shutil.move(inputFile, self.evaledDir)

            if len(files) <= 1:  # 目录空,等待
                time.sleep(0.1)

        #  结束
        recoder.terminated = True
        recoder.join()
        self.running = False
        handler.con.close()
        del handler
        print("服务终止")
Esempio n. 21
0
 def __init__(self):
     self.isPlaying = True
     self.p = pyaudio
     self.rec = Recorder(channels=1)
     self.play = None
     self.playback_thread = threading.Thread(name='button_listener', target=self.button_listener)
Esempio n. 22
0
    def on_click_play(self):
        # Check if number of trials
        if self.trial_no.text().isdigit():
            repeditions = int(self.trial_no.text())
        else:
            repeditions = 1

        if not self.soundfiles:
            mb = QMessageBox()
            mb.setLayout
            questionstart = mb.information(
                self, "Information",
                "No sound files location selected. Please Open sounds' location first.",
                QMessageBox.No | QMessageBox.Yes, QMessageBox.Yes)
            pass
        else:
            counter = 1
            mb = QMessageBox()
            mb.setLayout
            questionstart = mb.information(self, "Information",
                                           "<font size = 10 >Ready?</font>",
                                           QMessageBox.No | QMessageBox.Yes,
                                           QMessageBox.Yes)

            if questionstart == mb.Yes:
                while counter <= repeditions:
                    if self.randomize.isChecked():
                        file_list_order = self.soundfiles
                        file_list_order = random.sample(
                            file_list_order, len(file_list_order))
                    else:
                        file_list_order = self.soundfiles

                    for s in file_list_order:
                        try:
                            if system() == "Windows":
                                os.system("start " + s)
                                time.sleep(.900)
                            else:
                                os.system("play " + s)
                                rec = Recorder()
                        except KeyboardInterrupt:
                            break
                        currentDT = datetime.datetime.now()
                        if self.speaker_name.text() == "":
                            recordedfile = 'sound-' + \
                                str(counter) + "-" + \
                                str(currentDT.strftime(
                                    "%Y-%m-%d-%H_%M")) + '.wav'
                        else:
                            recordedfile = self.speaker_name.text() + '-repedition-' + \
                                str(counter) + "-" + \
                                str(currentDT.strftime(
                                    "%Y-%m-%d-%H_%M")) + '.wav'
                            trial1record = QMessageBox.question(
                                self, "Repedition " + str(counter),
                                "Repedition " + str(counter) +
                                "\nRecording... Press Yes to move to the next sound. Press No to quit.",
                                QMessageBox.No | QMessageBox.Yes,
                                QMessageBox.Yes)
                        with rec.open(recordedfile, 'wb') as recfile:
                            recfile.start_recording()
                            if trial1record == QMessageBox.Yes:
                                recfile.stop_recording()
                            else:
                                recfile.stop_recording()
                                break
                    counter += 1

                QMessageBox.information(self, "Good job!", "Very good!")
            else:
                pass
Esempio n. 23
0
                        action='store_true',
                        help='show list of audio devices and exit')
    parser.add_argument('-d',
                        '--device',
                        type=int,
                        default=DEVICE,
                        help='numeric id of input deviec')

    args = parser.parse_args()
    list_devices = args.list_devices
    device = args.device

    if list_devices:
        print(sd.query_devices())
    else:
        recorder = Recorder(device)
        raw_input('Press Enter to start recording')
        record_file = recorder.record()
        print('Recording...')
        raw_input('Press Enter to stop recording')
        recorder.stop()
        print('Recording finished.')
        transcriber = TranscribeClient()
        results = transcriber.transcribe(record_file)

        print('Transcript: {}'.format(results[0]))

        type(results)

        record_file = record_file.strip("'")
        os.remove(record_file)
Esempio n. 24
0
def test_recorder_creates_record_from_csv():
    recorder = Recorder("test_file")
    csv_row = ["132332", "/a/b/c", "PUT", 10]
    assert len(recorder._records) == 0
    recorder._create_record(csv_row)
    assert len(recorder._records) == 1