def get_recordings(self):
     """Return a list of all Recordings from Asterisk."""
     result = self._api.call('recordings', http_method='GET')
     # Temporary until method is implemented
     result_list = [Recording(self._api), Recording(self._api)]
     #recordings = [Recording(x) for x in result]
     return result_list
Exemplo n.º 2
0
 def display(self,
             config,
             onlineDisplay=False,
             saveDisplay=True,
             forChecking=False,
             labelEncoder=None):
     """
     Function displaying all files of the dataset.
     Each file is dealt with as a Recording object, created and displayed.
     See Recording.analyze for more info.
     labelEncoder is only needed if forChecking.
     forChecking creates a file per observation and sort them in class by
     class folders that the expert can then review.
     Check README for more information.
     """
     if self._verbatim > 0:
         print('\n\n *** DATASET DISPLAY ***')
     tStartGlobal = time.time()
     for file_path in self.files:
         recording = Recording(file_path, config, verbatim=self._verbatim)
         recording.load(config)
         tStart = time.time()
         recording.display(config,
                           onlineDisplay=onlineDisplay,
                           saveDisplay=saveDisplay,
                           forChecking=forChecking,
                           labelEncoder=labelEncoder)
         tEnd = time.time()
         if self._verbatim > 1:
             print('\tRecording has been loaded and displayed', file_path,
                   tEnd - tStart)
     tEndGlobal = time.time()
     if self._verbatim > 0:
         print('Dataset has been displayed', tEndGlobal - tStartGlobal)
     return
Exemplo n.º 3
0
 def analyze(self, analyzer, config, save=True):
     """
     Function analyzing all files of the dataset.
     Each file is dealt with as a Recording object, created, analyzed and
     saved.
     See Recording.analyze for more info.
     """
     if self._verbatim > 0:
         print('\n\n *** DATASET ANALYSIS ***')
     tStartGlobal = time.time()
     for file_path in self.files:
         recording = Recording(file_path, config, verbatim=self._verbatim)
         tStart = time.time()
         recording.analyze(analyzer, config)
         tEnd = time.time()
         if self._verbatim > 1:
             print('\tRecording has been analyzed', file_path,
                   tEnd - tStart)
         if save:
             recording.save(config)
             if self._verbatim > 1:
                 print('\tRecording has been saved')
     tEndGlobal = time.time()
     if self._verbatim > 0:
         print('Dataset has been analyzed', tEndGlobal - tStartGlobal)
     return
Exemplo n.º 4
0
 def makeDecision(self, config, save=True):
     """
     Function making the decision upon the predicted probas from analyze
     method.
     Each file is dealt with as a Recording object, created, loaded,
     decisions are made, and saved again.
     See Recording.makeDecision for more info.
     """
     if self._verbatim > 0:
         print(
             '\n\n *** DATASET ANALYSIS: MAKING DECISION ON PREDICTIONS ***'
         )
     tStartGlobal = time.time()
     for file_path in self.files:
         recording = Recording(file_path, config, verbatim=self._verbatim)
         recording.load(config)
         tStart = time.time()
         recording.makeDecision(config)
         tEnd = time.time()
         if save:
             recording.save(config)
         if self._verbatim > 1:
             print(
                 '\tRecording has been re-analyzed: decisions on predictions have been made',
                 file_path, tEnd - tStart)
     tEndGlobal = time.time()
     if self._verbatim > 0:
         print(
             'Dataset has been re-analyzed: decisions on predictions have been made',
             tEndGlobal - tStartGlobal)
     return
Exemplo n.º 5
0
def delete(id_list, args):
    # TODO: add a confirmation (sans --yyyyassss)
    total = len(id_list)
    if total == 0:
        print(f"Nothing to delete, exiting...")
        return
    elif total == 1:
        print(f"Deleting {total} recording")
    else:
        print(f"Deleting {total} recordings")
    print("-" * 50)

    # Load all the recs
    path = built_ins['db']['recordings']
    rec_db = TinyDB(path)
    shows = Query()
    # shortcut for later
    shows_qry = shows.data

    recs = []
    total = 0
    for obj_id in id_list:
        obj = rec_db.get((shows_qry.object_id == int(obj_id))
                         & (shows_qry.video_details.state != 'recording'))
        if not obj:
            print(f'ERROR: Unable to find recording with '
                  f'object_id == "{obj_id}", skipping...')
            continue

        total += 1
        recs.append({
            'doc_id': obj.doc_id,
            'obj_id': obj_id,
            'rec': Recording(obj['data'])
        })

    # TODO: don't "total" like this
    if total <= 0:
        print(f"No recordings found; {len(id_list)} requested.")
    elif total == 1:
        print(f"Deleting {total} recording...")
    else:
        print(f"Deleting {total} recordings...")

    if total > 0:
        for rec in recs:
            rec = rec['rec']
            print(f" - {rec.get_actual_dur()} | {rec.get_description()} ")

        print("-" * 50)
        if not args.yes:
            print()
            print('\tAdd the "--yes" flag to actually delete things...')
            print()
        else:
            for rec in recs:
                _delete(rec, rec_db)

    print("\nFINISHED")
 def get_recording(self, object_id):
     """Return Recording specified by object_id."""
     result = self._api.call('recordings', http_method='GET',
                             object_id=object_id)
     # Temporary until method is implemented
     result = Recording(self._api)
     #recording = Recording(result)
     return result
Exemplo n.º 7
0
def print_dupes():
    dupes = _find_dupes()
    for key, data in dupes.items():
        if len(data) > 1:
            print(key + " = " + str(len(data)))
            for item in data:
                rec = Recording(item)
                print("\t" + str(rec.object_id) + " | " +
                      rec.get_description() + " - " + rec.get_dur())
Exemplo n.º 8
0
def print_incomplete(args):
    # weird way I made it work...
    percent = args.incomplete
    if percent == -1:
        percent = 100
    else:
        percent = min(percent, 100)
        percent = max(percent, 0)

    percent = percent / 100
    dupes = _find_dupes()
    proper_dur = 0
    matched = 0
    total_recs = 0
    id_set = []
    for key, data in dupes.items():
        if key.startswith('SH'):
            continue
        if len(data) > 0:
            sum_actual_dur = 0
            recs = []
            for item in data:
                rec = Recording(item)
                actual_dur = rec.video_details['duration']
                proper_dur = rec.airing_details['duration']
                sum_actual_dur += actual_dur
                if proper_dur > actual_dur:
                    recs.append(rec)

            if (proper_dur * percent) > sum_actual_dur:
                matched += 1
                total_recs += len(recs)
                header = None
                for x in recs:
                    if args.id_list:
                        if x.object_id not in id_set:
                            id_set.append(x.object_id)
                    else:
                        if not header:
                            header = x.get_description() + \
                                " - " + x.episode['tms_id']
                            print(header)
                        print("\t" + str(x.object_id) + " | " +
                              x.get_description() + " - " + x.get_dur())
                if not args.id_list:
                    sum_txt = str(timedelta(seconds=sum_actual_dur))
                    total_txt = str(timedelta(seconds=proper_dur))
                    pct = str(round(sum_actual_dur / proper_dur * 100, 2))
                    print(f"\n\t{sum_txt}  /  {total_txt}  ({pct}%)")
                    print()
    if args.id_list:
        print(id_set)
    else:
        print(f"Total incomplete shows less than {percent*100}% - {matched} "
              f"({total_recs} items)")
Exemplo n.º 9
0
    def tick(self, button_pressed):
        if button_pressed:
            if not self._current_recording:
                self._current_recording = Recording(self._create_recording_file_name(), self.audio, self.device_index)
                self._recordings.append(self._current_recording)

            self._current_recording.write_frames()
        else:
            if self._current_recording:
                self._current_recording.complete()
                self.completion_callback(self._current_recording)
                self._current_recording = None
Exemplo n.º 10
0
 def _parseRec(self, data):
     metadataDicts = ['Stims', 'vars', 'Packages', 'Logs', 'nm_folder0']
     recordingsKeys = [
         ddict for ddict in data if ddict not in metadataDicts
     ]
     for key in recordingsKeys:
         parts = key.split('_')
         if not (parts[0][-1] == self.id):
             recordingsKeys.remove(key)
     recordings = OrderedDict()
     for key in recordingsKeys:
         recordings[key] = Recording({key: data[key]}, self)
     return recordings
Exemplo n.º 11
0
def analyse_recording(file, args, start_frame=0, end_frame=0):
    global recordings

    recording = Recording(file, start_frame, end_frame)
    try:
        recording.open_video()

        logging.debug("Analysing Video ...")
        analyse_video(recording)
        get_dimension(recording)

        if not args.skip_split and start_frame == 0 and end_frame == 0:
            parts = check_for_splits(recording)

            if parts:
                recording.processing_finished()
                logging.info(
                    "Video contains %s recordings, splitting into different files",
                    str(len(parts)))

                for part in parts:
                    logging.debug("Video Part from frame %s to %s",
                                  str(int(part[0])), str(int(part[1])))
                    analyse_recording(file, args, part[0], part[1])

                raise Exception('video is split')

        get_start_frame(recording)
        get_end_frame(recording)
        calculate_duration(recording)

        extract_modelname(recording, args.model)

        render_video(recording,
                     args.output,
                     args.dry_run,
                     quality=args.x265_quality,
                     audio_bitrate=args.audio_bitrate)

        get_recording_size(recording)
    except Exception as e:
        recording.error = str(e)
        logging.debug(e, exc_info=True)

    print_recording_stats(recording)

    recording.processing_finished()

    if not recording.has_errors() or not recording.error == 'video is split':
        recordings.append(recording)
Exemplo n.º 12
0
    def build_feature_data(self,
                           condition,
                           wav_path='./EmoDB/wav/',
                           laut_path='./EmoDB/lablaut/'):
        assert condition in [1, 2, 3], 'unknown condition {}'.format(condition)

        data_available = sorted([
            s.split('.')[0] for s in os.listdir(wav_path) if s.endswith('.wav')
        ])

        df = pd.DataFrame()
        row_list = []
        for ID in tqdm(data_available):
            rec = Recording(ID)
            if len(rec.df_tags) <= 1:
                print('Excluded {} (empty tag-file)'.format(ID))
                continue

            if condition == 1:
                row_list.append({
                    'ID':
                    ID,
                    'feature_vec':
                    rec.get_features(per_phoneme=False)
                })
            if condition in [2, 3]:
                for i, row in rec.df_tags.iterrows():
                    row_list.append({
                        'phoneme':
                        row.phoneme,
                        'ID':
                        ID,
                        'feature_vec':
                        rec.get_features(row.t_start, row.t_stop)
                    })
                pass

        df = df.append(row_list)
        df['sex'] = df.ID.apply(lambda x: self.speaker_info[x[:2]][0])
        df['speaker_id'] = df.ID.apply(lambda x: x[:2])
        df['age'] = df.ID.apply(lambda x: self.speaker_info[x[:2]][1])
        df['emotion_name'] = df.ID.apply(
            lambda x: self.emotion_from_ID(x, num=False))
        df['emotion_label'] = df.ID.apply(lambda x: self.emotion_from_ID(x))
        return df
Exemplo n.º 13
0
 def recording_add(self, name, channel, priority, start, stop, **info):
     """
     add a new recording
     """
     log.info('recording.add: %s', name)
     r = Recording(name, channel, priority, start, stop, info=info)
     if r in self.recordings:
         r = self.recordings[self.recordings.index(r)]
         if r.status == DELETED:
             r.status = CONFLICT
             r.favorite = False
             # update schedule, this will also send an update to all
             # clients registered.
             self.reschedule()
             return r
         raise AttributeError('Already scheduled')
     self.recordings.append(r)
     self.reschedule()
     return r
Exemplo n.º 14
0
def check_favorite(fav, recordings):
    """
    Check the given favorite against the db and add recordings
    """
    # Note: we can't use keyword searching here because it won't match
    # some favorite titles when they have short names.
    if fav.substring:
        # unable to do that right now
        listing = kaa.epg.search(keywords=fav.name)
    else:
        # 'like' search
        listing = kaa.epg.search(title=kaa.epg.QExpr('like', fav.name))
    now = time.time()
    for p in listing:
        if not fav.match(p.title, p.channel.name, p.start_timestamp):
            continue
        if p.stop_timestamp < now:
            # do not add old stuff
            continue
        # we found a new recording.
        rec = Recording(p.title,
                        p.channel.name,
                        fav.priority,
                        p.start_timestamp,
                        p.stop_timestamp,
                        info={
                            "episode": p.episode,
                            "subtitle": p.subtitle,
                            "description": p.description
                        })
        if rec in recordings:
            # This does not only avoid adding recordings twice, it
            # also prevents from added a deleted favorite as active
            # again.
            continue
        fav.update_recording(rec)
        recordings.append(rec)
        log.info('added\n%s', rec)
        signals['changed'].emit(rec)
        if fav.once:
            favorites.remove(fav)
            break
Exemplo n.º 15
0
def view(args):
    print()
    path = built_ins['db']['recordings']
    rec_db = TinyDB(path)

    id_set = []
    cnt = 0
    for item in rec_db.all():
        cnt += 1
        if args.id_list:
            obj_id = item['data']['object_id']
            if obj_id not in id_set:
                id_set.append(obj_id)

        elif args.full:
            pprint.pprint(item)
        else:
            Recording(item['data']).print()

    if args.id_list:
        print(id_set)
    else:
        print(f'Total recordings found: {cnt}')
Exemplo n.º 16
0
    def record(self):
        logger.debug("recording")
        cur_time_s = time.time()

        # check if this is the first time we will be recording
        if self.last_record_time_s < 0:
            self.set_last_record_state(cur_time_s)
            return

        # get the amount of time that has passed since the last recording
        delta_time_s = cur_time_s - self.last_record_time_s
        if delta_time_s < config.min_recording_duration_s:
            return

        # create a recording for each of the flow meters
        flow_meter_recordings = []
        for flow_meter in self.flow_meters:
            # get the change in the total number of interrupts for the flow meter
            last_record_flow_meter_num_interrupts = self.last_record_flow_meters_num_interrupts[
                flow_meter.id]
            delta_flow_meter_num_interrupts = flow_meter.num_interrupts - last_record_flow_meter_num_interrupts

            # calculate the volume
            volume_ml = delta_flow_meter_num_interrupts * flow_meter.volume_per_interrupt_ml

            flow_meter_recordings.append(
                FlowMeterRecording(flow_meter, volume_ml))

        # create a recording
        recording = Recording(cur_time_s, delta_time_s, flow_meter_recordings)

        # set the last record state
        self.set_last_record_state(cur_time_s)

        # record to recorders
        for recorder in self.recorders:
            recorder.record(recording)
Exemplo n.º 17
0
def process_recording(recording_id, args):
    """
    Generate features for the given recording.

    Returns a path to an ARFF file containing features for the given
    recording.
    """

    wav_path = get_data_file(recording_id, 'calls', args.ogi_dir)

    decoded_path = decode_call_file(wav_path)
    if args.gain_level is not None:
        decoded_path = normalize_call_file(decoded_path, args.gain_level)

    segment_paths = split_call_file(decoded_path, args.segment_length,
                                    args.drop_short_segments)

    segments = []
    for segment_path in segment_paths:
        features = extract_audio_features(segment_path, args)
        if features is not None:
            segments.append(Segment(segment_path, features))

    return Recording(recording_id, segments)
Exemplo n.º 18
0
def process_recording(recording_id, args):
    """
    Generate features for the given recording.

    Returns a path to an ARFF file containing features for the given
    recording.
    """

    # get decoded_path directly, since CSLU is already in regular .wav files
    decoded_path = get_data_file(recording_id, 'speech', args.cslu_dir)

    if args.gain_level is not None:
        decoded_path = normalize_call_file(decoded_path, args.gain_level)

    segment_paths = split_call_file(decoded_path, args.segment_length,
                                    args.drop_short_segments)

    segments = []
    for segment_path in segment_paths:
        features = extract_audio_features(segment_path, args)
        if features is not None:
            segments.append(Segment(segment_path, features))

    return Recording(recording_id, segments)
Exemplo n.º 19
0
def _build_recordings():
    recs_path = built_ins['db']['recordings']
    recshow_path = built_ins['db']['recording_shows']
    if not built_ins['dry_run']:
        try:
            os.unlink(recs_path)
        except Exception:
            pass
        try:
            os.unlink(recshow_path)
        except Exception:
            pass

    recs_db = TinyDB(recs_path)

    programs = Api.recordings.airings.get()
    show_paths = []
    print(f"Total Recordings: {len(programs)}")
    # cnt = 0
    with tqdm(total=len(programs)) as pbar:
        for piece in chunks(programs, MAX_BATCH):
            airings = Api.batch.post(piece)
            # cnt += len(airings)
            # print(f"\tchunk: {cnt}/{len(programs)}")
            for path, data in airings.items():
                airing = Recording(data)

                if airing.showPath not in show_paths:
                    show_paths.append(airing.showPath)

                if not built_ins['dry_run']:
                    recs_db.insert({
                        'id': airing.object_id,
                        'path': airing.path,
                        'show_path': airing.showPath,
                        'data': airing.data,
                        'version': Api.device.version
                    })
                pbar.update(1)

    recshow_db = TinyDB(recshow_path)
    print(f"Total Recorded Shows: {len(show_paths)}")
    my_show = Query()
    with tqdm(total=len(show_paths)) as pbar:
        # this is silly and just to make the progress bar move :/
        for piece in chunks(show_paths, math.ceil(MAX_BATCH / 5)):
            # not caring about progress, we'd use this:
            # for piece in chunks(show_paths, MAX_BATCH):
            airing_shows = Api.batch.post(piece)
            for path, data in airing_shows.items():
                stuff = recshow_db.search(my_show.show_path == path)
                pbar.update(1)
                if not stuff:
                    if not built_ins['dry_run']:
                        recshow_db.insert({
                            'id': data['object_id'],
                            'show_path': path,
                            'data': data,
                            'version': Api.device.version
                        })

    print("Done!")
Exemplo n.º 20
0
    def load(self):
        treestr = self.level * TAB + self.name + '/'
        # print string to tree hierarchy and screen
        self.writetree(treestr + '\n')
        print(treestr)
        dirnames = [
            name for name in os.listdir(self.path)
            if os.path.isdir(os.path.join(self.path, name))
        ]
        # collect recording names: either the 1st char of each name must be a digit,
        # or the last _ separated field must be an 'e' (for 'experiment') followed by a number:
        rnames = []
        for dirname in dirnames:
            if dirname[0].isdigit():
                rnames.append(dirname)
            else:
                lastfield = dirname.split('_')[-1]
                if lastfield[0] == 'e' and lastfield[1:].isnumeric():
                    rnames.append(dirname)
        rnames.sort()  # alphabetical order
        dt = 0  # calculate total track duration by summing durations of all recordings
        # does this track have any missing sorts, or rely on old impoverished .spk files?:
        missingsort, simplesort = False, False
        for rname in rnames:
            path = os.path.join(self.path, rname)
            recording = Recording(path, track=self)
            recording.load()
            if recording.sort == None:
                missingsort = True
            elif type(
                    recording.sort.header) in [core.SPKHeader, core.MATHeader]:
                simplesort = True
            self.r[recording.id] = recording
            self.__setattr__('r' + str(recording.id),
                             recording)  # add shortcut attrib
            dt += recording.dt
        self.rnames = rnames  # easy way to print out all recording names
        self.dt = dt
        self.dtsec = self.dt / 1e6
        self.dtmin = self.dtsec / 60
        self.dthour = self.dtmin / 60

        if len(rnames) == 0:
            return  # no recordings in this track, nothing else to do

        if missingsort or simplesort:
            return  # skip all below due to missing or impoverished sort files (.mat or .spk)

        # create a TrackSort with TrackNeurons:
        self.sort = TrackSort(self)
        self.sort.load()
        # load RF type for each cell, should be one big dict indexed by nid:
        rftypefname = os.path.join(self.path, self.absname + '.rftype')
        try:
            with open(rftypefname, 'r') as f:
                rftypestr = f.read()
            rftypes = eval(rftypestr)
            for nid, rftype in rftypes.items():
                assert rftype in ['simple', 'complex', 'LGN', None]
                self.alln[nid].rftype = rftype
        except IOError:  # no absname.rftype file denoting RF type of each cell
            pass
        # load spike type for each cell, should be one big dict indexed by nid:
        spiketypefname = os.path.join(self.path, self.absname + '.spiketype')
        try:
            with open(spiketypefname, 'r') as f:
                spiketypestr = f.read()
            spiketypes = eval(spiketypestr)
            for nid, spiketype in spiketypes.items():
                assert spiketype in ['fast', 'slow', 'fastasym', 'slowasym']
                self.alln[nid].spiketype = spiketype
        except IOError:  # no absname.spiketype file denoting RF type of each cell
            pass

        # calculate tranges, representing start and stop times (us) of child recordings
        # relative to start of track:
        rids = sorted(self.r)  # all recording ids in self
        r0 = self.r[rids[0]]
        assert r0.datetime == self.datetime
        tranges = []
        for rid in rids:
            rec = self.r[rid]
            # rec.td is time delta (us) between start of track and start of recording
            trange = rec.td + rec.trange[0], rec.td + rec.trange[1]
            tranges.append(trange)

        self.tranges = np.array(tranges)  # each row is a recording trange
        self.trange = self.tranges[0, 0], self.tranges[-1, 1]

        self.calc_meanrates()

        # pttype better be the same for all member recordings:
        pttype = self.r[rids[0]].pttype  # init to pttype of first recording
        for rid in rids[1:]:
            r = self.r[rid]
            # if recording doesn't have a pttype, it's probably from an old .spk file,
            # so don't bother doing this test:
            if hasattr(r, 'pttype') and pttype != r.pttype:
                raise ValueError(
                    "inconsistent polytrode types %r and %r in track %s" %
                    (pttype, r.pttype, self.id))
Exemplo n.º 21
0
import pyaudio
import wave
import time
from recording import Recording
from gpiozero import Button

button = Button(17)
button.wait_for_press()

rec = Recording("out.wav")
rec.record(button)
Exemplo n.º 22
0
class Controller(object):
    """
    Class for the tvserver.
    """
    def __init__(self, datafile):
        epg.init()
        self.locked = False
        self.datafile = datafile
        # load the recordings file
        self.load_schedule()
        # connect to recorder signals
        device.signals['start-recording'].connect(self._recorder_start)
        device.signals['stop-recording'].connect(self._recorder_stop)
        device.signals['changed'].connect(self.reschedule)
        # start by checking the recordings/favorites
        self.check_favorites_and_reschedule()
        # add schedule timer for SCHEDULE_TIMER / 3 seconds
        kaa.Timer(self.check_favorites_and_reschedule).start(SCHEDULE_TIMER / 3)

    @kaa.timed(0.1, kaa.OneShotTimer, policy=kaa.POLICY_ONCE)
    def print_schedule(self):
        """
        Print current schedule (for debug only)
        """
        if self.locked:
            # system busy, call again later
            self.print_schedule()
            return False
        if hasattr(self, 'only_print_current'):
            # print only latest recordings
            all = False
        else:
            # print all recordings in the list
            all = True
            # mark that all are printed once
            self.only_print_current = True
        # print only from the last 24 hours
        maxtime = int(time.time()) - 60 * 60 * 24
        info = 'recordings:\n'
        for r in self.recordings:
            if all or r.stop > maxtime:
                info += '%s\n' % r
        log.info(info)
        info = 'favorites:\n'
        for f in self.favorites:
            info += '%s\n' % f
        log.info(info)
        return True

    @kaa.coroutine()
    def reschedule(self):
        """
        Reschedule all recordings.
        """
        if self.locked:
            # system busy, call again later
            kaa.OneShotTimer(self.reschedule).start(0.1)
            yield False
        self.locked = True
        # get current time (UTC)
        ctime = int(time.time())
        # remove old recorderings
        self.recordings = filter(lambda r: r.start > ctime - 60*60*24*7, self.recordings)
        # run the scheduler to attach devices to recordings
        yield scheduler.schedule(self.recordings)
        # sort by start time
        self.recordings.sort(lambda l, o: cmp(l.start,o.start))
        # save schedule
        self.save_schedule()
        self.print_schedule()
        # Schedule recordings on recorder for the next SCHEDULE_TIMER seconds.
        log.info('schedule recordings')
        for r in self.recordings:
            if r.start < ctime + SCHEDULE_TIMER and r.status == SCHEDULED:
                r.schedule()
        self.locked = False
        yield True

    @kaa.coroutine()
    def check_favorites_and_reschedule(self):
        """
        Update recordings based on favorites and epg.
        """
        if self.locked:
            # system busy, call again later
            kaa.OneShotTimer(self.check_favorites_and_reschedule).start(0.1)
            yield False
        self.locked = True
        yield epg.check(self.recordings, self.favorites)
        self.locked = False
        self.reschedule()
        yield True

    #
    # load / save schedule file with recordings and favorites
    #

    def load_schedule(self):
        """
        load the schedule file
        """
        self.recordings = []
        self.favorites = []
        if not os.path.isfile(self.datafile):
            return
        try:
            xml = kaa.xmlutils.create(self.datafile, root='schedule')
        except Exception, e:
            log.exception('tvserver.load: %s corrupt:' % self.datafile)
            sys.exit(1)
        for child in xml:
            if child.nodename == 'recording':
                try:
                    r = Recording(node=child)
                except Exception, e:
                    log.exception('tvserver.load_recording')
                    continue
                if r.status == RECORDING:
                    log.warning('recording in status \'recording\'')
                    # Oops, we are in 'recording' status and this was saved.
                    # That means we are stopped while recording, set status to
                    # missed
                    r.status = MISSED
                if r.status == SCHEDULED:
                    # everything is a conflict for now
                    r.status = CONFLICT
                self.recordings.append(r)
            if child.nodename == 'favorite':
                try:
                    f = Favorite(node=child)
                except Exception, e:
                    log.exception('tvserver.load_favorite:')
                    continue
                self.favorites.append(f)
Exemplo n.º 23
0
    async def process_metadata(self, item):
        if item["type"] == "ssnc":
            if item["code"] == "mdst":  # beginning of metadata
                ### Start with fresh metadata, does not matter if we already had some
                self.current_metadata = Metadata(self.aiohttp_session, app)
                ### Add the mdst timing info as the first item in our metadata list
                self.current_metadata.set_field(item["name"], item["value"])
                #self.sync_rtsp(item["value"])

            elif item["code"] == "mden":  # end of metadata
                ### Add the mden as the last item to our metadata list
                self.current_metadata.set_field(item["name"], item["value"])
                persistent_id = self.current_metadata.get_field("persistentid")
                log.debug("Ended metadata {} for persistent_id {}".format(
                    item["value"], persistent_id))
                if self.next_item is None or self.next_item.recording.get_persistent_id(
                ) != persistent_id:
                    if self.next_item is None:
                        log.debug("No next item queued, looking up")
                    else:
                        log.debug(
                            "Next item id changed from {} to {}, looking up".
                            format(
                                self.next_item.recording.get_persistent_id(),
                                persistent_id))
                    ### Set up our recording object
                    recording = Recording(
                        self.current_metadata.get_field("persistentid"))

                    ### Get the recording ID, if possible. Load the recording info
                    ### from either Acousticbrainz or just the metadata we were sent
                    try:
                        recordingId = await self.current_metadata.get_recordingId(
                        )
                        ab_info = await self.acoustic_brainz.lookup_recording(
                            recordingId)
                        recording.load_acousticbrainz(ab_info)
                    except (Metadata.RecordingLookupError, TypeError,
                            FailedRequest):
                        recordingId = 0
                        recording.load_metadata(self.current_metadata)

                    ### Enqueue the item, to start at the frame specified in the mden message
                    await self.queue_item(recording)
                self.current_metadata = None

            elif item["code"] == "prgr":  # progress info
                await self.handle_progress(item)

            elif item["code"] == "pbeg":  # start playing
                pass

            elif item["code"] == "prsm":  # resume playing
                pass

            elif item["code"] == "pend":  # stop playing
                self.playback_state = PlaybackState.STOPPED
                if (self.current_item):
                    self.current_item.cancel()
                if (self.next_item):
                    self.next_item.cancel()

                pass

            elif item["code"] == "pfls":  # flush (pause?)
                pass

            elif item["code"] == "pvol":  # volume
                await handle_volume(item)

        elif item["type"] == "core":
            self.current_metadata.set_field(item["name"], item["value"])
Exemplo n.º 24
0
 def open_recording(self, path):
     rec = Recording(path)  # init it just to parse its id
     exec_lines = ("r%s = Recording(%r)\n"
                   "r%s.load()" % (rec.id, path, rec.id))
     self.ipw.execute(exec_lines)
Exemplo n.º 25
0
    def run(self):
        self.merger.start()

        if self.visualization:
            self.visualizer.start()

        recording_id_odas = [0, 0, 0, 0]
        last_recording_id_odas = [0, 0, 0, 0]

        recordings = {}
        # request to speaker recognition waiting to be answered, key is the id,
        # value is the queue in which the result will be stored
        sr_requests = {}

        # kevins ros changes
        pub = rospy.Publisher('/roboy/cognition/sam/output',
                              String,
                              queue_size=10)
        rospy.Subscriber("/roboy/control/matrix/leds/mode", ControlLeds,
                         self.mode_callback)
        rospy.Subscriber("/roboy/control/matrix/leds/freeze", msg_Empty,
                         self.freeze_callback)
        # s = rospy.Service('/roboy/cognition/speech/recognition', RecognizeSpeech, self.handle_service)
        # self.ledmode_pub = rospy.Publisher("/roboy/control/matrix/leds/mode/simple", Int32, queue_size=3)
        # self.ledoff_pub = rospy.Publisher('/roboy/control/matrix/leds/off', msg_Empty, queue_size=10)
        # self.ledfreeze_pub = rospy.Publisher("/roboy/control/matrix/leds/freeze", msg_Empty, queue_size=1)
        # self.ledpoint_pub = rospy.Publisher("/roboy/control/matrix/leds/point", Int32, queue_size=1)
        rospy.init_node("SAM", anonymous=True)

        # operation average
        angle_list = []

        while self.merger.is_alive() and not rospy.is_shutdown():

            # we do ask for the next data block
            # maybe this is the place where i can insert a call and replace the while loop

            # wait for/get next data
            try:
                next_data = self.merger_to_main_queue.get(block=True,
                                                          timeout=1)
            except q_Empty:
                continue  # restart loop, but check again if we maybe got a stop signal

            cid = next_data['id_info']
            caudio = next_data['audio_data']

            ############################################################################################
            # this part separates the 4 streams and manages the ones where currently audio is being recorded
            #########################################################################################
            # cid[i] = [id, x, y, z, activity]
            for i in range(len(cid)):  # len=4

                recording_id_odas[i] = cid[i][0]

                if recording_id_odas[i] > 0:
                    if recording_id_odas[i] == last_recording_id_odas[i]:
                        # same person continues speaking
                        recordings[recording_id_odas[i]].audio = np.append(
                            recordings[recording_id_odas[i]].audio, caudio[i])
                        recordings[recording_id_odas[i]].currentpos = [
                            cid[i][1], cid[i][2], cid[i][3]
                        ]

                    else:
                        # a person started speaking
                        recordings[recording_id_odas[i]] = Recording(
                            recording_id_odas[i],
                            [cid[i][1], cid[i][2], cid[i][3]])
                        recordings[recording_id_odas[i]].audio = np.append(
                            recordings[recording_id_odas[i]].audio, caudio[i])

                        # if a different person was speaking before, he is now done
                        if last_recording_id_odas[i] > 0:
                            recordings[
                                last_recording_id_odas[i]].stopped = True
                elif recording_id_odas[
                        i] == 0 and last_recording_id_odas[i] > 0:
                    # if a different person was speaking before, he is now done
                    recordings[last_recording_id_odas[i]].stopped = True

                last_recording_id_odas[i] = recording_id_odas[i]

            ##########################################################
            # check if we got any answers from sr (speaker recognition) in the meantime
            #############################################################
            to_delete_req = []
            for rec_id, req in sr_requests.iteritems():
                try:
                    # sr_id: -99 means new speaker
                    # certainty between 0-10
                    certainty = 0
                    preliminary_id, sr_id, certainty = req.get(block=False)

                    # Fuse info of speaker recognition on localization together
                    # First the best case, both agree on an is/new speker
                    if sr_id == recordings[rec_id].preliminary_speaker_id:
                        # both agree, thats nice
                        recordings[rec_id].final_speaker_id = recordings[
                            rec_id].preliminary_speaker_id
                        recordings[rec_id].send_to_trainer = True
                    elif recordings[
                            rec_id].created_new_speaker and sr_id == -99:
                        # both agree, that this is a new speaker
                        output_string = "both agree that rec %d is new speaker %d" % (
                            rec_id, recordings[rec_id].preliminary_speaker_id)
                        rospy.logdebug(output_string)
                        recordings[rec_id].final_speaker_id = recordings[
                            rec_id].preliminary_speaker_id
                        recordings[rec_id].send_to_trainer = True
                    else:

                        # Now come the harder parts.
                        if certainty < 1:
                            # if speaker recognition is unsure we rely on localization
                            recordings[rec_id].final_speaker_id = recordings[
                                rec_id].preliminary_speaker_id
                        elif certainty > 8:
                            # sr is super sure, we trust it
                            recordings[rec_id].final_speaker_id = sr_id
                            recordings[rec_id].sr_changed_speaker = True
                        else:
                            # check the angle the the speaker sr suggested, and depending on the certainty decide
                            # go through the list of speaker angles and find the one one which sr suggests
                            found = False
                            for (oth_id, angl
                                 ) in recordings[rec_id].angles_to_speakers:
                                if oth_id == sr_id:
                                    # the further we are away the shurer sr has to be
                                    if certainty * 20 > angl:
                                        recordings[
                                            rec_id].final_speaker_id = sr_id
                                        recordings[
                                            rec_id].sr_changed_speaker = True
                                    else:
                                        recordings[
                                            rec_id].final_speaker_id = recordings[
                                                rec_id].preliminary_speaker_id
                                    found = True
                                    break
                            if not found:
                                # this shouldn't happen
                                output_string = "Speaker recognition suggestested id {} for recording {}," \
                                                " which doesn't exist".format(sr_id, rec_id)
                                rospy.logerr(output_string)
                                recordings[
                                    rec_id].final_speaker_id = recordings[
                                        rec_id].preliminary_speaker_id

                    output_string = "response for req %d, results is %d, certanty %d" % (
                        rec_id, sr_id, certainty)
                    rospy.logdebug(output_string)
                    recordings[rec_id].is_back_from_sr = True
                    to_delete_req.append(rec_id)

                except q_Empty:
                    if time.time() - recordings[
                            rec_id].time_sent_to_sr > 3:  # no response from sr for 3 sec -> timeout
                        # print("no response for request %d in 3 sec -> timeout" % (rec_id))
                        recordings[rec_id].final_speaker_id = recordings[
                            rec_id].preliminary_speaker_id
                        recordings[rec_id].is_back_from_sr = True
                        to_delete_req.append(rec_id)

            for req in to_delete_req:
                del sr_requests[req]

            ##################################################################################
            # here we go through our recordings and handle them based on their current status
            ####################################################################################
            to_delete = []

            rec_info_to_vis = []

            for rec_id, rec in recordings.iteritems():
                if self.visualization and not rec.stopped:
                    # convert audio to energy and append it to the tuple
                    # Energy is the root mean square of the signal
                    # E = sqrt(sum(s[n]^2)/N)
                    curr_energy = np.sqrt(np.mean(np.square(rec.audio.data)))
                    if not rec.stopped:
                        rec_info_to_vis.append([
                            rec_id, rec.currentpos[0], rec.currentpos[1],
                            rec.currentpos[2], 200, curr_energy
                        ])  # 200 is the size of the blob
                    else:
                        rec_info_to_vis.append([
                            rec_id, rec.currentpos[0], rec.currentpos[1],
                            rec.currentpos[2], 50, curr_energy
                        ])

                if rec.new:
                    output_string = "new recording " + str(rec_id)
                    rospy.loginfo(output_string)
                    # get angles to all known speakers
                    rec.get_angles_to_all_speakers(self.speakers, rec.startpos)

                    # if it is wihthin a certain range to a known speaker, assign it to him
                    if len(
                            self.speakers
                    ) > 0 and rec.angles_to_speakers[0][1] < 35:  # degree
                        output_string = "preliminary assigning recording %d to speaker %d, angle is %d" % (
                            rec_id, rec.angles_to_speakers[0][0],
                            rec.angles_to_speakers[0][1])
                        rospy.loginfo(output_string)
                        rec.preliminary_speaker_id = rec.angles_to_speakers[0][
                            0]
                        rec.final_speaker_id = rec.preliminary_speaker_id  # this will be overwritten later

                    else:
                        # create a new speaker
                        self.num_speakers += 1
                        new_id = self.num_speakers
                        self.speakers[new_id] = Speaker(new_id, rec.startpos)
                        rec.preliminary_speaker_id = new_id
                        rec.final_speaker_id = rec.preliminary_speaker_id  # this will be overwritten later
                        rec.created_new_speaker = True
                        closest_ang = -999
                        if len(rec.angles_to_speakers) > 0:
                            closest_ang = rec.angles_to_speakers[0][1]
                        output_string = "creating new speaker %d for recording %d, closest angle is %d" % (
                            new_id, rec_id, closest_ang)
                        rospy.logdebug(output_string)

                        if self.num_speakers == 1:
                            rec.send_to_trainer = True

                    rec.new = False

                # elif self.speaker_recognition and (not rec.was_sent_sr and rec.audio.shape[
                #     0] > 16000 * 3):  # its longer than 3 sec, time to send it to speaker recognition
                #     sr_requests[rec_id] = Queue(maxsize=1)
                #     self.sr.test(rec.audio, rec.preliminary_speaker_id, sr_requests[rec_id])
                #     rec.was_sent_sr = True
                #     rec.time_sent_to_sr = time.time()

                elif rec.stopped:
                    # speaker finished, handle this
                    if not rec.alldone:
                        if rec.audio.shape[
                                0] < 16000 * 0.4:  # everything shorter than this we simply discard
                            output_string = "recording %d was too short, discarding" % (
                                rec_id)
                            print output_string
                            rospy.loginfo(output_string)
                            if rec.created_new_speaker:
                                del self.speakers[rec.preliminary_speaker_id]
                                output_string = "thus also deleting speaker" + str(
                                    rec.preliminary_speaker_id)
                                rospy.logdebug(output_string)
                            rec.alldone = True
                    if not rec.alldone:
                        if (rec.was_sent_sr and
                                rec.is_back_from_sr) or (not rec.was_sent_sr):
                            if not rec.was_sent_sr:
                                # it seems like this has been to short to be sent to
                                rec.final_speaker_id = rec.preliminary_speaker_id
                            self.speakers[
                                rec.final_speaker_id].pos = rec.currentpos

                            if rec.created_new_speaker and rec.sr_changed_speaker:
                                try:
                                    del self.speakers[
                                        rec.preliminary_speaker_id]
                                except:
                                    output_string = "Error deleting preliminary speaker " + str(
                                        rec.preliminary_speaker_id)
                                    print output_string
                                    rospy.logerr(output_string)

                            # TODO:
                            # send to speech to text
                            if self.bing_allowed:
                                text = self.stt.get_text(rec.audio)
                                # wavfile.write(text.encode('utf-8') + ".wav", 16000, rec.audio.data)
                            else:
                                text = "bing is not allowed yet"
                            # output_string = "Speaker {}: ".format(rec.final_speaker_id) + text.encode('utf-8')
                            output_string = text.encode('utf-8')
                            rospy.loginfo(output_string)
                            pub.publish(output_string)

                            if self.bing_allowed:
                                self.text_queue.put(output_string)
                                rospy.logdebug("text_queue lenght in main: " +
                                               str(self.text_queue.qsize()))

                            # send this to trainer
                            # if self.speaker_recognition and rec.send_to_trainer:
                            #     self.sr.train(rec.final_speaker_id, rec.audio)
                            #     output_string = "sending recording %d to trainer" % (rec_id)
                            #     rospy.logdebug(output_string)

                            output_string = "succesfully handeld recording " + str(
                                rec_id)
                            rospy.logdebug(output_string)
                            rec.alldone = True
                        else:
                            pass  # wait for the response of sr

                if rec.alldone:
                    to_delete.append(rec_id)

            for rec_id in to_delete:
                del recordings[rec_id]

            if self.visualization:
                try:
                    self.main_to_vis_queue.put(
                        {
                            'speakers': self.speakers,
                            'recordings': rec_info_to_vis
                        },
                        block=False)

                except Full:
                    # print("couldn't put data into visualization queue, its full")
                    pass

            # ---------------------------------------------------------------------------------------------------
            # new doa to led addon
            # print
            # print "------------------------------------"
            # print "speakers: "
            # print self.speakers
            # print "rec_info_to_vis: "
            # operation average
            # if len(rec_info_to_vis) > 0 and not self.bing_allowed:
            #     # print "0 -> ", rec_info_to_vis[0][0]
            #     # print "1 -> ", rec_info_to_vis[0][1]
            #     # print "2 -> ", rec_info_to_vis[0][2]
            #     # print "3 -> ", rec_info_to_vis[0][3]
            #     # print "4 -> ", rec_info_to_vis[0][4]
            #     angle_list.append(rec_info_to_vis[0][1])
            #     if len(angle_list) >= 10:
            #         publish_point_left_right(self.ledpoint_pub, sum(angle_list)/len(angle_list))
            #         angle_list = []
            # else:
            #     print "Empty dude"
            # print "------------------------------------"
            # print
            # publish_point(self.ledpoint_pub, rec_info_to_vis[1])
            # ---------------------------------------------------------------------------------------------------

        output_string = "SAM is done."
        print output_string
        rospy.loginfo(output_string)
        self.merger.stop()
        if self.visualization:
            self.visualizer.stop()
        rospy.signal_shutdown("SAM is done.")
Exemplo n.º 26
0
def valid_file_name(name):
    return not any([c == '/' for c in name])


if __name__ == '__main__':

    recording = None
    path = DEFAULT_RECORD_PATH

    while True:

        user_input = input()

        if user_input == 'r':
            if recording is None:
                recording = Recording()
            track = recording.create_track()
            track.record()

        elif user_input == 'p':
            if recording.playing:
                recording.play()
            else:
                recording.pause()
        elif user_input == 's':
            recording.save()
        elif user_input == 't':
            user_input = input()
            recording = 0.0 if user_input == '' else float(user_input)
        elif user_input == 'o':
            recording = Recording.open()
Exemplo n.º 27
0
def main(input_file, output_file, speed, debug=False):
    """
    Main control flow for Voice Assistant device.
    """
    GPIO.setmode(GPIO.BOARD)
    button = Button(17)
    button.hold_time = 2
    button.when_held = play_tutorial
    light = led.LED()
    # pull last saved speed from json
    with open('save_state.json', 'r') as saveFile:
        response = json.load(saveFile)
    speed = float(response['savedSpeed'])

    client = avs.connect_to_avs()
    dialog_req_id = [helpers.generate_unique_id()]
    audio_process = Processing(input_file, output_file, speed, 15)
    os.system("mpg321 audio_instrs/startup.mp3")

    # check if should play tutorial, requires holding for 2 sec
    time.sleep(5)

    if IN_TUTORIAL:
        print("hello in tutorial")
        time.sleep(78)

    if speed == 1:
        os.system("mpg321 " + menu_filenames[int(speed) - 1])
        light.flash(led.RED)
    elif speed == 2:
        os.system("mpg321 " + menu_filenames[int(speed) - 1])
        light.flash(led.GRN)
    else:
        os.system("mpg321 " + menu_filenames[int(speed) - 1])
        light.flash(led.BLUE)

    # reset hold time/when_held func to go to menu
    button.hold_time = 5
    button.when_held = partial(launch_menu, button, light, audio_process)

    try:
        while True:
            print("ready for input")
            light.change_color(led.GRN)

            # record from mic
            if input_file == "in.wav":
                button.wait_for_press()

                if button.is_pressed:
                    button.wait_for_release()

                if IN_MENU:
                    while IN_MENU:
                        pass
                    continue

                rec = Recording(input_file)
                light.change_color(led.BLU)
                rec.record(button)

            light.change_color(led.ALL)
            if debug:
                output_file = input_file
            else:
                audio_process.apply()

            # send to avs
            # outfiles = avs.send_rec_to_avs(output_file, client)
            outfiles = avs.send_rec_to_avs(output_file, client, dialog_req_id)

            # play back avs response
            light.change_color(led.PUR)
            if not outfiles:
                light.change_color(led.RED)
                os.system("mpg321 audio_instrs/alexa-noresponse.mp3")
                print("Error, no outfiles")
                time.sleep(1)

            for of in outfiles:
                print("playing: " + of)
                os.system("mpg321 " + of)

            if input_file == 'in.wav':
                print("Command completed! Waiting for new input!")
            else:
                light.interrupt()
                break

    except KeyboardInterrupt:
        light.interrupt()
Exemplo n.º 28
0
gcp = GCPSpeech()
led = Light(17)

while True:
    interrupted = False
    signal.signal(signal.SIGINT, signal_handler)
    time1 = timer()
    model = 'resources/Murphy.pmdl'
    detector = snowboydecoder.HotwordDetector(model, sensitivity=0.5)
    time2 = timer()
    print 'dector setup time: ' + str(time2 - time1)

    detector.start(detected_callback=process,
                   interrupt_check=interrupt_callback,
                   sleep_time=0.03)
    detector.terminate()
    time3 = timer()
    print 'decting time: ' + str(time3 - time2)

    print 'record for 5 seconds....'
    rec = Recording()
    rec.write_to_file()
    time4 = timer()
    print 'total recording time: ' + str(time4 - time3)

    print 'transcribing....'
    gcp.transcribe()
    time5 = timer()
    print 'transcribing time: ' + str(time5 - time4)
Exemplo n.º 29
0
import time
import pytz

# connect to database
cur = utilities.get_cursor()

# select all future scheduled recordings not yet processed
cur.execute('SELECT id, station_name, show_name, time_start, time_stop, stream_url FROM scheduled_recordings WHERE processed = 0')
scheduled_recordings = [{'id': row[0], 'station_name': row[1], 'show_name': row[2], 'time_start': datetime.datetime.strptime(row[3], '%Y-%m-%d %H:%M %z'), 'time_stop': datetime.datetime.strptime(row[4], '%Y-%m-%d %H:%M %z'), 'stream_url': row[5]} for row in list(cur)]

now = datetime.datetime.now(pytz.utc)

# if any recording scheduled within the next 10 minutes, create Recording object and start record()
recordings=[]
for rec in scheduled_recordings:
    if rec['time_stop'] < now: # if show has already ended
        continue
    if rec['time_start'] < now or abs(rec['time_start'] - now).minutes < 10: # if show starts within the next 10 minutes or has already started
        id = rec['id']
        recordings.append(Recording(stream_url=rec['stream_url'], station_name=rec['station_name'], show_name=rec['show_name'], time_start=rec['time_start'], time_stop=rec['time_stop']).record())
        # Update entry with processed = True
        cur.execute (f'UPDATE scheduled_recordings SET processed = 1 WHERE id={id}')
# wait until all reacordings are finished to keep streamripper process running
while not all([rec.poll() is not None for rec in recordings]):
    time.sleep(1)





Exemplo n.º 30
0
def search(args):
    path = built_ins['db']['recordings']
    rec_db = TinyDB(path)
    shows = Query()
    # shortcut for later
    shows_qry = shows.data

    # to store all possible search options/segments
    params = []

    # Handle search "term"arg - this checks title and description
    if args.term:
        params.append(
            shows_qry.airing_details.show_title.matches(f'.*{args.term}.*',
                                                        flags=re.IGNORECASE)
            | shows_qry.episode.description.matches(f'.*{args.term}.*',
                                                    flags=re.IGNORECASE)
            # Gah, should work, always bombs. Suspect on non-episodes
            # though episode.description is fine?
            # |
            # shows_qry.episode['title'].matches(
            #     f'.*{args.term}.*', flags=re.IGNORECASE
            # )
        )

    # Handle "after" date arg
    if args.after:
        params.append(
            shows_qry.airing_details.datetime.test(datetime_comp, '>',
                                                   args.after))

    # Handle "before" date arg
    if args.before:
        params.append(
            shows_qry.airing_details.datetime.test(datetime_comp, '<',
                                                   args.before))
    # Handle recording state args
    if args.state:
        state_params = []
        for state in args.state:
            state_params.append(shows_qry.video_details.state == state)
        state_query = None
        for param in state_params:
            if not state_query:
                state_query = param
            else:
                state_query = (state_query) | (param)

        params.append(state_query)

    # Handle recording type args
    if args.type:
        type_params = []
        for rec_type in args.type:
            type_params.append(
                shows.path.matches(f'.*{rec_type}.*', flags=re.IGNORECASE))
        type_query = None
        for param in type_params:
            if not type_query:
                type_query = param
            else:
                type_query = (type_query) | (param)

        params.append(type_query)

    # Handle watched arg
    if args.watched:
        params.append(shows_qry.user_info.watched == True  # noqa: E712
                      )

    # Handle season arg
    if args.season:
        params.append(shows_qry.episode.season_number == args.season)

    # Handle season arg
    if args.episode:
        params.append(shows_qry.episode.number == args.episode)

    # Handle tms-id arg
    if args.tms_id:
        params.append(shows_qry.episode.tms_id == args.tms_id)
    # Handle duration limit
    if args.duration:
        params.append(shows_qry.video_details.duration < args.duration)
    # Handle tablo object id arg
    if args.id:
        params.append(shows_qry.object_id == int(args.id))

    # Finally, put the all the query params together and do the search
    query = None
    for param in params:
        if not query:
            query = param
        else:
            query = query & param

    if not query:
        # TODO: probably shouldn't let this happen?
        results = rec_db.all()
    else:
        results = rec_db.search(query)

    if not results:
        if args.id_list:
            print([])
        else:
            # TODO: print the criteria we tried to match
            print(f'No matching records found.')
    else:
        id_set = []
        returned = 0
        for item in results:
            if args.id_list:
                obj_id = item['data']['object_id']
                if obj_id not in id_set:
                    id_set.append(obj_id)
            elif args.full:
                pprint(item)
            else:
                Recording(item['data']).print()

            returned += 1
            if args.limit and returned == args.limit:
                break
        if args.id_list:
            print(id_set)
        else:
            if returned == len(results):
                print(f'Total recordings found: {len(results)}')
            else:
                print(f'{returned}/{len(results)} total recordings displayed')