Пример #1
0
def getVideoEventSyncMessages(session_folder):
    block_flash_msg_times = None
    hubdata = None
    try:
        hubdata = openDataStoreReader(session_folder)
        session_id = hubdata.getSessionMetaData()[0].session_id
        msgeventstable = hubdata.getEventTable(EventConstants.MESSAGE)

        rec_start_msg_idx = list(
            msgeventstable.get_where_list(
                '(session_id == %d) & (text == "RECORDING_STARTED")' %
                (session_id)))
        rec_stop_msg_idx = list(
            msgeventstable.get_where_list(
                '(session_id == %d) & (text == "RECORDING_STOPPED")' %
                (session_id)))

        # Ensure that start records are matched to correct stop records
        rec_blocks_idx = []
        ei = 0
        for si in range(len(rec_start_msg_idx)):
            e1 = rec_stop_msg_idx[ei]
            s1 = rec_start_msg_idx[si]
            if si + 1 < len(rec_start_msg_idx):
                s2 = rec_start_msg_idx[si + 1]
                if s2 > e1:
                    rec_blocks_idx.append((s1, e1))
                    ei += 1
            else:
                rec_blocks_idx.append((s1, e1))

        block_flash_msg_times = []
        for si, ei in rec_blocks_idx:
            cblock = []
            block_flash_msg_times.append(cblock)
            rec_block_msgs = msgeventstable.read(start=si, stop=ei + 1)
            for r in rec_block_msgs:
                msg_time, msg_text = r['time'], r['text']

                if msg_text[0] == '[' and msg_text[-1] == ']':
                    cblock.append((msg_time, msg_text))
    except:
        import traceback

        traceback.print_exc()
        block_flash_msg_times = None
    finally:
        if hubdata:
            hubdata.close()

    return block_flash_msg_times
def getVideoEventSyncMessages(session_folder):
    block_flash_msg_times = None
    hubdata = None
    try:
        hubdata = openDataStoreReader(session_folder)
        session_id = hubdata.getSessionMetaData()[0].session_id
        msgeventstable = hubdata.getEventTable(EventConstants.MESSAGE)

        rec_start_msg_idx = list(msgeventstable.get_where_list(
            '(session_id == %d) & (text == "RECORDING_STARTED")' % (
                session_id)))
        rec_stop_msg_idx = list(msgeventstable.get_where_list(
            '(session_id == %d) & (text == "RECORDING_STOPPED")' % (
                session_id)))

        # Ensure that start records are matched to correct stop records
        rec_blocks_idx = []
        ei = 0
        for si in range(len(rec_start_msg_idx)):
            e1 = rec_stop_msg_idx[ei]
            s1 = rec_start_msg_idx[si]
            if si + 1 < len(rec_start_msg_idx):
                s2 = rec_start_msg_idx[si + 1]
                if s2 > e1:
                    rec_blocks_idx.append((s1, e1))
                    ei += 1
            else:
                rec_blocks_idx.append((s1, e1))

        block_flash_msg_times = []
        for si, ei in rec_blocks_idx:
            cblock = []
            block_flash_msg_times.append(cblock)
            rec_block_msgs = msgeventstable.read(start=si, stop=ei + 1)
            for r in rec_block_msgs:
                msg_time, msg_text = r['time'], r['text']

                if msg_text[0] == '[' and msg_text[-1] == ']':
                    cblock.append((msg_time, msg_text))
    except:
        import traceback

        traceback.print_exc()
        block_flash_msg_times = None
    finally:
        if hubdata:
            hubdata.close()

    return block_flash_msg_times
def getEventDetails(session_path, frame_event_lookup_file, filters, batch_size):
    session_folder = os.path.normpath(os.path.abspath(session_path))
    datastore_reader = openDataStoreReader(session_folder)
    frame_events_lookup = np.load(os.path.join(session_folder, frame_event_lookup_file))['events']
    event_types = filters.get('event_type', np.unique(frame_events_lookup['event_type']))

    for etype in event_types:
        filters['event_type'] = [etype,]
        filtered_table_rows = Picker(frame_events_lookup).pick_data('event_table_row_index', **filters)
        event_table = datastore_reader.getEventTable(etype)
        while len(filtered_table_rows) > 0:
            row_nums = [int(i) for i in filtered_table_rows[:batch_size]]
            yield event_table[row_nums]
            filtered_table_rows = filtered_table_rows[batch_size:]
    datastore_reader.close()
Пример #4
0
def getEventDetails(session_path, frame_event_lookup_file, filters,
                    batch_size):
    session_folder = os.path.normpath(os.path.abspath(session_path))
    datastore_reader = openDataStoreReader(session_folder)
    frame_events_lookup = np.load(
        os.path.join(session_folder, frame_event_lookup_file))['events']
    event_types = filters.get('event_type',
                              np.unique(frame_events_lookup['event_type']))

    for etype in event_types:
        filters['event_type'] = [
            etype,
        ]
        filtered_table_rows = Picker(frame_events_lookup).pick_data(
            'event_table_row_index', **filters)
        event_table = datastore_reader.getEventTable(etype)
        while len(filtered_table_rows) > 0:
            row_nums = [int(i) for i in filtered_table_rows[:batch_size]]
            yield event_table[row_nums]
            filtered_table_rows = filtered_table_rows[batch_size:]
    datastore_reader.close()
Пример #5
0
def getSamplesPerFrame(session_folder, frame_times_per_session_video):
    """
    Return a numpy ndarray of size (total_event_count,4). Each row represents
    an event that occurred during the given screen capture video, between
    experiment message events defined by the app config settings:

        data_collection:
            recording_period:
                event_period:
                    start_msg: START_EVENT_PERIOD
                    end_msg: END_EVENT_PERIOD

    The elements of a row are video_id, frame_index, event_type, event_id, event_time

    """
    result_dtype = [('video_id', np.uint8), ('frame_number', np.uint32),
                    ('frame_time', np.float32), ('event_time', np.float32),
                    ('gaze_x', np.float32), ('gaze_y', np.float32),
                    ('status', np.int)]
    hubdata = None
    try:
        hubdata = openDataStoreReader(session_folder)

        evt_blocks_idx = getDataStoreRecordingBlockBounds(hubdata)
        # garyfeng: debug
        if (PRINT_VERBOSE):
            printf("=====================================")
            printf("getSamplesPerFrame\nevt_blocks_idx:", evt_blocks_idx)
        # garyfeng: end
        msgeventstable = hubdata.getEventTable(EventConstants.MESSAGE)
        # garyfeng: The following line does NOT seem to limit the data to the current session_id.
        # so we need to add a filter to the current session ID
        session_id = hubdata.getSessionMetaData()[0].session_id
        # garyfeng: end
        sampleeventstable = hubdata.getEventTable(SAMPLE_EVENT_ID)

        # Now match up frame start and end times for each video of the
        # current session, creating the events_by_video_frame np array rows
        # defined by result_dtype.
        video_events = []
        for vi, (si, ei) in enumerate(evt_blocks_idx):
            # garyfeng
            # debug
            if (PRINT_VERBOSE):
                printf("getSamplesPerFrame: si, ei indices")
                printf("si", si)
                printf("ei", ei)
            # end garyfeng

            edge_msgs = msgeventstable[[si, ei]]
            rec_start_msg, rec_end_msg = edge_msgs[:]
            rec_block_start_time = rec_start_msg['time']
            rec_block_end_time = rec_end_msg['time']
            # garyfeng
            # debug
            if (PRINT_VERBOSE):
                printf("getSamplesPerFrame")
                printf("rec_block_start_time", rec_block_start_time)
                printf("rec_block_end_time", rec_block_end_time)
            # end garyfeng

            #cond = "(time >= %f) & (time <= %f) " % (
            # garyfeng: adding condition to use only valid eye gaze
            # also adding the condition to restrict to only the current session_id
            cond = "(time >= %f) & (time <= %f) & (status == 0) & (session_id==%d)" % (
                rec_block_start_time, rec_block_end_time, session_id)
            if EVENT_FILTER_ID >= 0:
                cond = cond + " & (filter_id == %d)" % (EVENT_FILTER_ID)
            # garyfeng: debug
            printf("getSamplesPerFrame: filter cond=", cond)
            # garyfeng: ;end

            frame_times = frame_times_per_session_video[vi]
            frame_count = frame_times.shape[0]
            video_frame_events = [[] for z in xrange(frame_count)]
            event_count = 0
            filtered_sample_events = sampleeventstable.read_where(cond)
            frame_num = 0
            fstart_time = int(frame_times[0][1] * 1000)
            fend_time = int(frame_times[1][1] * 1000)
            # garyfeng
            if (PRINT_VERBOSE):
                printf("initial fstart_time", fstart_time)
                printf("initial fend_time", fend_time)
            # garyfeng: end

            for e in filtered_sample_events:
                evt_time = int(e['time'] * 1000)
                # garyfeng: debug
                if (PRINT_VERBOSE):
                    printf(e['session_id'], e['event_id'], e['time'],
                           e['gaze_x'], e['gaze_y'])
                # garyfeng

                if evt_time >= fstart_time:
                    if evt_time < fend_time:
                        event_count += 1
                        video_frame_events[frame_num].append(
                            (vi, frame_num, fstart_time / 1000.0, e['time'],
                             e[X_COL], e[Y_COL], e['status']))
                    else:
                        # garyfeng
                        if (PRINT_VERBOSE):
                            if (video_frame_events[frame_num]):
                                printf("        ",
                                       video_frame_events[frame_num])
                        # garyfeng
                        frame_num += 1
                        if frame_num + 1 < frame_count:
                            fstart_time = int(frame_times[frame_num][1] * 1000)
                            fend_time = int(frame_times[frame_num + 1][1] *
                                            1000)
                        elif frame_num + 1 == frame_count:
                            fend_time = fend_time + (fend_time - fstart_time)
                            fstart_time = fend_time
                        else:
                            break
                        if evt_time < fend_time:
                            event_count += 1
                            video_frame_events[frame_num].append(
                                (vi, frame_num, fstart_time / 1000.0,
                                 e['time'], e[X_COL], e[Y_COL], e['status']))

            # now have all mono samples per frame in the current video.
            # They are not sorted by time in each video, but by event type.
            # Resort events for each video in each frame.
            video_event_list = []
            for frame_events in video_frame_events:
                if frame_events:
                    video_event_list.extend(
                        sorted(frame_events, key=lambda x: x[-4]))

            video_events.extend(video_event_list)
        # Convert to numpy ndarray
        return np.array(video_events, dtype=result_dtype)
    except:
        import traceback
        traceback.print_exc()
    finally:
        if hubdata:
            hubdata.close()
def getSamplesPerFrame(session_folder, frame_times_per_session_video):
    """
    Return a numpy ndarray of size (total_event_count,4). Each row represents
    an event that occurred during the given screen capture video, between
    experiment message events defined by the app config settings:

        data_collection:
            recording_period:
                event_period:
                    start_msg: START_EVENT_PERIOD
                    end_msg: END_EVENT_PERIOD

    The elements of a row are video_id, frame_index, event_type, event_id, event_time

    """
    result_dtype = [('video_id', np.uint8),
                    ('frame_number', np.uint32),
                    ('frame_time', np.float32),
                    ('event_time', np.float32),
                    ('gaze_x', np.float32),
                    ('gaze_y', np.float32),
                    ('status', np.int)

                    ]
    hubdata = None
    try:
        hubdata = openDataStoreReader(session_folder)

        evt_blocks_idx = getDataStoreRecordingBlockBounds(hubdata)
        # garyfeng: debug
        if (PRINT_VERBOSE):
            printf("=====================================")
            printf("getSamplesPerFrame\nevt_blocks_idx:", evt_blocks_idx)
        # garyfeng: end
        msgeventstable = hubdata.getEventTable(EventConstants.MESSAGE)
        # garyfeng: The following line does NOT seem to limit the data to the current session_id.
        # so we need to add a filter to the current session ID
        session_id = hubdata.getSessionMetaData()[0].session_id
        # garyfeng: end
        sampleeventstable = hubdata.getEventTable(SAMPLE_EVENT_ID)
        


        # Now match up frame start and end times for each video of the
        # current session, creating the events_by_video_frame np array rows
        # defined by result_dtype.
        video_events = []
        for vi, (si, ei) in enumerate(evt_blocks_idx):
            # garyfeng
            # debug
            if (PRINT_VERBOSE):
                printf("getSamplesPerFrame: si, ei indices")
                printf("si", si)
                printf("ei",ei)
            # end garyfeng
            
            edge_msgs = msgeventstable[[si, ei]]
            rec_start_msg, rec_end_msg = edge_msgs[:]
            rec_block_start_time = rec_start_msg['time']
            rec_block_end_time = rec_end_msg['time']
            # garyfeng
            # debug
            if (PRINT_VERBOSE):
                printf("getSamplesPerFrame")
                printf("rec_block_start_time", rec_block_start_time)
                printf("rec_block_end_time",rec_block_end_time)
            # end garyfeng            
            
            #cond = "(time >= %f) & (time <= %f) " % (
            # garyfeng: adding condition to use only valid eye gaze
            # also adding the condition to restrict to only the current session_id
            cond = "(time >= %f) & (time <= %f) & (status == 0) & (session_id==%d)" % (
                rec_block_start_time, rec_block_end_time, session_id)
            if EVENT_FILTER_ID >= 0:
                 cond = cond + " & (filter_id == %d)" % (EVENT_FILTER_ID)
            # garyfeng: debug
            printf("getSamplesPerFrame: filter cond=", cond)
            # garyfeng: ;end
            
            frame_times = frame_times_per_session_video[vi]
            frame_count = frame_times.shape[0]
            video_frame_events = [[] for z in xrange(frame_count)]
            event_count = 0
            filtered_sample_events = sampleeventstable.read_where(cond)
            frame_num = 0
            fstart_time = int(frame_times[0][1] * 1000)
            fend_time = int(frame_times[1][1] * 1000)
            # garyfeng
            if (PRINT_VERBOSE):
                printf("initial fstart_time", fstart_time)
                printf("initial fend_time", fend_time)
            # garyfeng: end
            
            for e in filtered_sample_events:
                evt_time = int(e['time'] * 1000)
                # garyfeng: debug
                if (PRINT_VERBOSE):
                    printf(e['session_id'], e['event_id'], e['time'], e['gaze_x'], e['gaze_y'])
                # garyfeng
                
                if evt_time >= fstart_time:
                    if evt_time < fend_time:
                        event_count += 1
                        video_frame_events[frame_num].append((
                            vi, frame_num, fstart_time/1000.0,
                            e['time'], e[X_COL], e[Y_COL], e['status']))
                    else:
                        # garyfeng
                        if (PRINT_VERBOSE):
                            if(video_frame_events[frame_num]):
                                printf("        ", video_frame_events[frame_num])
                        # garyfeng
                        frame_num += 1
                        if frame_num + 1 < frame_count:
                            fstart_time = int(
                                frame_times[frame_num][1] * 1000)
                            fend_time = int(
                                frame_times[frame_num + 1][1] * 1000)
                        elif frame_num + 1 == frame_count:
                            fend_time = fend_time + (
                                fend_time - fstart_time)
                            fstart_time = fend_time
                        else:
                            break
                        if evt_time < fend_time:
                            event_count += 1
                            video_frame_events[frame_num].append((
                                vi, frame_num, fstart_time/1000.0,
                                e['time'],  e[X_COL], e[Y_COL], e['status']))

            # now have all mono samples per frame in the current video.
            # They are not sorted by time in each video, but by event type.
            # Resort events for each video in each frame.
            video_event_list = []
            for frame_events in video_frame_events:
                if frame_events:
                    video_event_list.extend(
                        sorted(frame_events, key=lambda x: x[-4]))

            video_events.extend(video_event_list)
        # Convert to numpy ndarray
        return np.array(video_events, dtype=result_dtype)
    except:
        import traceback
        traceback.print_exc()
    finally:
        if hubdata:
            hubdata.close()
def getEventsPerFrame(session_folder, frame_times_per_session_video):
    """
    Return a numpy ndarray of size (total_event_count,4). Each row represents
    an event that occurred during the given screen capture video, between
    experiment message events defined by the app config settings:

        data_collection:
            recording_period:
                event_period:
                    start_msg: START_EVENT_PERIOD
                    end_msg: END_EVENT_PERIOD

    The elements of a row are video_id, frame_index, event_type, event_id, event_time

    """
    result_dtype = [('video_id', np.uint8), ('frame_number', np.uint32),
                    ('frame_time', np.float32), ('event_type', np.uint8),
                    ('event_id', np.uint32), ('filter_id', np.uint8), ('event_table_row_index', np.uint32),
                    ('event_time', np.float32)]
    hubdata = None
    try:
        hubdata = openDataStoreReader(session_folder)

        evt_blocks_idx = getDataStoreRecordingBlockBounds(hubdata)

        msgeventstable = hubdata.getEventTable(EventConstants.MESSAGE)


        # Now match up frame start and end times for each video of the
        # current session, creating the events_by_video_frame np array rows
        # defined by result_dtype.
        video_events = []
        for vi, (si, ei) in enumerate(evt_blocks_idx):
            edge_msgs = msgeventstable[[si, ei]]
            rec_start_msg, rec_end_msg = edge_msgs[:]
            rec_block_start_time = rec_start_msg['time']
            rec_block_end_time = rec_end_msg['time']
            cond = "(time >= %f) & (time <= %f)" % (
                rec_block_start_time, rec_block_end_time)
            if EVENT_FILTER_ID >= 0:
                 cond = cond + " & (filter_id == %d)" % (EVENT_FILTER_ID)
            frame_times = frame_times_per_session_video[vi]
            frame_count = frame_times.shape[0]
            video_frame_events = [[] for z in xrange(frame_count)]
            event_count = 0
            for event_id, event_iter in hubdata.getEventsByType(
                    cond).iteritems():
                frame_num = 0
                fstart_time = int(frame_times[0][1] * 1000)
                fend_time = int(frame_times[1][1] * 1000)
                for e in event_iter:
                    evt_time = int(e['time'] * 1000)
                    if evt_time >= fstart_time:
                        if evt_time < fend_time:
                            event_count += 1
                            video_frame_events[frame_num].append((
                                vi, frame_num, fstart_time/1000.0, e['type'],
                                e['event_id'], e['filter_id'],e.nrow, e['time']))
                        else:
                            frame_num += 1
                            if frame_num + 1 < frame_count:
                                fstart_time = int(
                                    frame_times[frame_num][1] * 1000)
                                fend_time = int(
                                    frame_times[frame_num + 1][1] * 1000)
                            elif frame_num + 1 == frame_count:
                                fend_time = fend_time + (
                                    fend_time - fstart_time)
                                fstart_time = fend_time
                            else:
                                break

                            if evt_time < fend_time:
                                event_count += 1
                                video_frame_events[frame_num].append((
                                    vi, frame_num, fstart_time/1000.0 , e['type'],
                                    e['event_id'], e['filter_id'],e.nrow, e['time']))

            # now have all events per frame in the current video.
            # They are not sorted by time in each video, but ny event type.
            # Resort events for each video in each frame.
            video_event_list = []
            for frame_events in video_frame_events:
                if frame_events:
                    video_event_list.extend(
                        sorted(frame_events, key=lambda x: x[-1]))

            video_events.extend(video_event_list)
        # Convert to numpy ndarray
        return np.array(video_events, dtype=result_dtype)
    except:
        import traceback
        traceback.print_exc()
    finally:
        if hubdata:
            hubdata.close()
Пример #8
0
def getEventsPerFrame(session_folder, frame_times_per_session_video):
    """
    Return a numpy ndarray of size (total_event_count,4). Each row represents
    an event that occurred during the given screen capture video, between
    experiment message events defined by the app config settings:

        data_collection:
            recording_period:
                event_period:
                    start_msg: START_EVENT_PERIOD
                    end_msg: END_EVENT_PERIOD

    The elements of a row are video_id, frame_index, event_type, event_id, event_time

    """
    result_dtype = [('video_id', np.uint8), ('frame_number', np.uint32),
                    ('frame_time', np.float32), ('event_type', np.uint8),
                    ('event_id', np.uint32), ('filter_id', np.uint8),
                    ('event_table_row_index', np.uint32),
                    ('event_time', np.float32)]
    hubdata = None
    try:
        hubdata = openDataStoreReader(session_folder)

        evt_blocks_idx = getDataStoreRecordingBlockBounds(hubdata)

        msgeventstable = hubdata.getEventTable(EventConstants.MESSAGE)

        # Now match up frame start and end times for each video of the
        # current session, creating the events_by_video_frame np array rows
        # defined by result_dtype.
        video_events = []
        for vi, (si, ei) in enumerate(evt_blocks_idx):
            edge_msgs = msgeventstable[[si, ei]]
            rec_start_msg, rec_end_msg = edge_msgs[:]
            rec_block_start_time = rec_start_msg['time']
            rec_block_end_time = rec_end_msg['time']
            cond = "(time >= %f) & (time <= %f)" % (rec_block_start_time,
                                                    rec_block_end_time)
            if EVENT_FILTER_ID >= 0:
                cond = cond + " & (filter_id == %d)" % (EVENT_FILTER_ID)
            frame_times = frame_times_per_session_video[vi]
            frame_count = frame_times.shape[0]
            video_frame_events = [[] for z in xrange(frame_count)]
            event_count = 0
            for event_id, event_iter in hubdata.getEventsByType(
                    cond).iteritems():
                frame_num = 0
                fstart_time = int(frame_times[0][1] * 1000)
                fend_time = int(frame_times[1][1] * 1000)
                for e in event_iter:
                    evt_time = int(e['time'] * 1000)
                    if evt_time >= fstart_time:
                        if evt_time < fend_time:
                            event_count += 1
                            video_frame_events[frame_num].append(
                                (vi, frame_num, fstart_time / 1000.0,
                                 e['type'], e['event_id'], e['filter_id'],
                                 e.nrow, e['time']))
                        else:
                            frame_num += 1
                            if frame_num + 1 < frame_count:
                                fstart_time = int(frame_times[frame_num][1] *
                                                  1000)
                                fend_time = int(frame_times[frame_num + 1][1] *
                                                1000)
                            elif frame_num + 1 == frame_count:
                                fend_time = fend_time + (fend_time -
                                                         fstart_time)
                                fstart_time = fend_time
                            else:
                                break

                            if evt_time < fend_time:
                                event_count += 1
                                video_frame_events[frame_num].append(
                                    (vi, frame_num, fstart_time / 1000.0,
                                     e['type'], e['event_id'], e['filter_id'],
                                     e.nrow, e['time']))

            # now have all events per frame in the current video.
            # They are not sorted by time in each video, but ny event type.
            # Resort events for each video in each frame.
            video_event_list = []
            for frame_events in video_frame_events:
                if frame_events:
                    video_event_list.extend(
                        sorted(frame_events, key=lambda x: x[-1]))

            video_events.extend(video_event_list)
        # Convert to numpy ndarray
        return np.array(video_events, dtype=result_dtype)
    except:
        import traceback
        traceback.print_exc()
    finally:
        if hubdata:
            hubdata.close()