예제 #1
0
    def indexes_from_files(self,
                           file_path,
                           file_type=Const.typeImage,
                           length_int=Const.length_int,
                           should_index=False):
        """
        This function will load the indexes from predicted image files within a folder, which will then serve as their
        ID.
        :param file_path: Path to directory containing the predicted images. Nothing else should be included in this
        directory!
        :param file_type: This specifies the file type which we are looking at (only the length matters - like '.png')
        :param length_int: This is the length of the index of an image (standard seven digit).
        :param should_index: A boolean which decides if we should go through the trouble of creating the indexes
        (actual numbers) from the file names (good for large projects!).
        :return: indexes (array containing all the indexes in ascending order) and the names of the files
        """
        # First, a list of names within the folder is fetched.
        names = []
        for (dir_path, dir_names, file_names) in walk(file_path):
            for file_name in file_names:
                if file_name[-len(file_type):] == file_type:
                    names.extend(file_name)
                else:
                    debug.cout(file_name, 'Wrong File Type - Ignored.')

        # Now, the list of names is converted into a series or indexes (numbers).
        indexes = []
        if should_index:
            for name in names:
                # The index of a file is defined by the number at the end of its name.
                index = np.int(
                    name[-(length_int + len(file_type)):-len(file_type)])
                indexes.append(index)
            self.library = self.library.reindex(indexes, self.library.columns)
        return indexes, names
예제 #2
0
    def fetch_librarian(self, ground_path, sample_string, iteration=None):
        """
        This function will fetch the most up-to-date librarian of a project. It can detect, whether a file is existing
        at all and the iteration of a file can be specified.
        :param ground_path: The path to the project (including the project name in it).
        :param sample_string: This string needs to be of the same length as the expected file name. For example:
            sample_string = date_string + project_name + ' 01.xlsx'
        :param iteration: Here one can specify the a certain iteration (if needed) (integer value)
        :return: the current iteration
        """
        # Files is a list of .xlsx files within the folder of length of the sample string.
        files = []
        for file_name in os.listdir(ground_path):
            if file_name.endswith(".xlsx"):
                files.append(file_name)
        files = [fi for fi in files if fi.endswith(".xlsx")]
        files.sort()
        files = [fi for fi in files if len(fi) == len(sample_string)]

        # In case the iteration is specified, the correct iteration is chosen.
        if iteration is not None:
            files = [fi for fi in files if int(fi[-7:-5]) == iteration]

        # If, after all these filters, we still have files remaining, we take the most recent one.
        if len(files) > 0:
            recent = files[-1]
        else:
            recent = None
        debug.cout(ground_path + recent, 'Librarian Path', 'Fetch')
        self.load_excel_file(ground_path + recent)
        current_iteration = int(recent[-7:-5])
        return current_iteration
예제 #3
0
    def video_from_actions(self, librarian, column, video_output_path,
                           project_name, small_stills_path, small_stills_base):
        """
        This function will take the values written in a certain column (usually 'very rigid') and convert the actions
        in this column into video clips.
        :param librarian: The full librarian with its data.
        :param column: A string with the column name which should be analysed (e.g. 'very_rigid').
        :param video_output_path: The folder path where the videos should be saved to.
        :param project_name: A string with the name of the current project (e.g. 'dremel37').
        :param small_stills_path: The folder path where the still images are stored.
        :param small_stills_base: The name base of a still image (e.g. 'small_still' for 'small_still0000189.png').
        :return: -
        """
        actions, pauses = librarian.action_sequences(column)

        # The buffer of frames is added before and after the action for easier evaluation.
        frames_buffer = int(Const.duration_buffer * Const.frame_rate)

        total_frames = len(librarian.library)
        if not os.path.exists(video_output_path):
            os.makedirs(video_output_path)

        # For every action.
        for start, array in zip(actions.library.start, actions.library.array):
            start = int(start)
            original_length = int(len(array))
            first_index = start - frames_buffer
            last_index = start + original_length + frames_buffer

            # Check whether the action is below the first or above the last frame.
            if first_index < 0:
                first_index = 0
            if last_index > total_frames:
                last_index = total_frames
            length = last_index - first_index

            names = []
            current_index = first_index
            for enum in xrange(length):
                index = str(current_index)
                while len(index) < Const.length_int:
                    index = '0' + index
                names.append(small_stills_path + small_stills_base + index +
                             Const.typeImage)
                current_index += 1

            time_stamp = debug.frame_to_time(first_index, Const.frame_rate)
            video_name = video_output_path + project_name + '_' + time_stamp + '.avi'

            height, width, layers = self.just_load(names[0]).shape
            video = cv2.VideoWriter(video_name, Const.video_codec,
                                    Const.frame_rate, (width, height))
            for name in names:
                image = self.just_load(name)
                video.write(image)
            video.release()
            debug.cout(time_stamp, 'Video created')
예제 #4
0
    def video_from_stills(self,
                          path_video,
                          images_path,
                          frame_rate=Const.frame_rate,
                          start=None,
                          end=None):
        """
        This function will take a folder with images and turn them into a video. One can specify if all the images
        should be used by defining the start and end frames of the video.
        :param path_video: The entire path and name of the video.
        :param images_path: The folder where the images lie.
        :param frame_rate: In frames/second (default: 60)
        :param start: The nth image which should be used to start the video.
        :param end: The nth image which should be used to end the video.
        :return: -
        """
        lib_side_by_sides = Librarian()
        _, images_names = lib_side_by_sides.indexes_from_files(images_path)

        images_names.sort()

        # Instantiate the video writer class.
        height, width, layers = self.just_load(images_path +
                                               images_names[0]).shape
        video = cv2.VideoWriter(path_video, Const.video_codec, frame_rate,
                                (width, height))

        # Check whether special start or end frames are chosen.
        if start is None and end is None:
            start = 0
            end = len(images_names)
        elif start is None:
            start = 0
        elif end is None:
            end = len(images_names)

        if end > len(images_names):
            end = len(images_names)

        # Add all the frames to the video which were specified.
        for enum in xrange(start, end, 1):
            image = self.just_load(images_path + images_names[enum])
            video.write(image)
            if len(images_names) > 100 and enum % 100 == 0:
                debug.cout(enum, 'index', 'video')
        video.release()
예제 #5
0
    def create_side_by_sides(self,
                             path_left,
                             path_right,
                             path_side_by_side,
                             aspect_ratio=Const.video_aspect_ratio,
                             file_type_left=Const.typeImage,
                             file_type_right=Const.typeImage):
        """
        This function will take two folder paths (left and right) with equal number of images and create a new image for
        each pair with the two side by side. The images are saved into a new folder given in path_side_by_side.

        The width is hereby set to double the width of the left image. The height is then adjusted so that the overall
        aspect ratio is 16:9.

        :param path_left: The folder for the images in the left half of the screen.
        :param path_right: The folder for the images in the right half of the screen.
        :param path_side_by_side: The destination folder for the 'collages' (will be created if necessary).
        :param aspect_ratio: The overall aspect ratio of the resulting image.
        :param file_type_left: The file type for the left image type.
        :param file_type_right: The file type for the right image type.
        :return: -
        """
        lib_masks = Librarian()
        lib_stills = Librarian()
        _, left_names = lib_stills.indexes_from_files(path_left,
                                                      file_type_left,
                                                      should_index=False)
        _, right_names = lib_masks.indexes_from_files(path_right,
                                                      file_type_right,
                                                      should_index=False)

        left_names.sort()
        right_names.sort()

        if not os.path.exists(path_side_by_side):
            os.makedirs(path_side_by_side)

        for enum in xrange(len(right_names)):
            left = self.just_load(path_left + left_names[enum])
            right = self.just_load(path_right + right_names[enum])

            self.input_image = self.create_side_by_side(
                left, right, aspect_ratio)
            self.save_image(path_side_by_side + 'side_by_side', enum)
            if len(right_names) > 100 and enum % 1000 == 0:
                debug.cout(enum, 'index', 'side by sides')
예제 #6
0
 def fill_begaze_to_library(self):
     """
     Since the eye tracking data is recorded slightly below 60Hz, the two time scales need to be synchronised. To
     that end, the closest matching row is filled into the video time line. This function will copy the BeGaze values
     for x_gaze and y_gaze for every time point in the list.
     :return: -
     """
     enum = 0
     last_row = None
     # Cycle through each time step and fill the closest match into that row.
     for time_point in self.library[self.columns[0]]:
         if last_row is None:
             last_row = self.begaze_to_library(
                 time_point, [self.columns[3], self.columns[4]])
         else:
             last_row = self.begaze_to_library(
                 time_point, [self.columns[3], self.columns[4]], last_row)
         if enum % 10000 == 0:
             debug.cout(enum, 'BeGaze Data Import')
         enum += 1
예제 #7
0
    def exclude_times(self,
                      cut_times,
                      project_name,
                      new_columns=None,
                      frame_rate=Const.frame_rate):
        """
        This function will crop the librarian and return its cropped version. But the librarian is not actually cropped,
        it is instead filled with np.nan!
        :param cut_times: This is a Librarian containing the cut_times for each project.
        :type cut_times: Librarian
        :param project_name: The name of the current project (e.g. dremel05e)
        :param new_columns: In case one wants to create completely new column names, this is the place to add them.
        :param frame_rate: The frame rate at which the cut times (in ms) should be converted to seconds/minutes.
        :return: The cropped librarian together with the time_stamp which will be included into the name of the excel
        file.
        """
        time_stamp = ''

        # Load the cut times depending on the project.
        times = cut_times.library[project_name]
        time_values = np.asarray(times.values[np.isfinite(times.values)],
                                 dtype=int) * frame_rate

        # Only the actual measured data should be cleared, not the entire table (time values etc.). The clear columns
        # tell you which columns should be cleared.
        clear_columns = self.library.drop(self.columns[0:2],
                                          axis=1).columns.values

        global_start = self.library.index[0]
        global_end = self.library.index[-1]

        if new_columns is None:
            new_columns = self.library.columns
        short_librarian = Librarian(columns=new_columns)

        short_librarian.library = self.library
        if len(time_values) % 2 != 0:
            debug.cout(time_values, 'Not an even number of time stamps!')
        elif len(time_values) < 2:
            debug.cout(time_values, 'Too few time values')
        else:
            short_librarian.library.ix[global_start:time_values[0],
                                       clear_columns] = np.nan
            # Go through the time_values and using the even and odd numbers, replace the values with NaN.
            for enum in xrange(len(time_values) / 2 - 1):
                end_of_previous = time_values[enum * 2 + 1]
                start_of_next = time_values[enum * 2 + 2]
                short_librarian.library.ix[end_of_previous:start_of_next,
                                           clear_columns] = np.nan
            short_librarian.library.ix[time_values[-1]:global_end + 1,
                                       clear_columns] = np.nan

            # Add a time stamp with all the cut times to the file name for easy reference and debugging.
            for time_value in time_values:
                minutes, seconds = map(str, divmod(time_value / frame_rate,
                                                   60))
                if len(minutes) < 2:
                    minutes = '0' + minutes
                if len(seconds) < 2:
                    seconds = '0' + seconds
                time_stamp += '_' + minutes + seconds

        return short_librarian, time_stamp
예제 #8
0
    # The iteration integer is converted into a string of format xx (e.g. 01 or 14).
    iter_string = str(iteration)
    while len(iter_string) < 2:
        iter_string = '0' + iter_string

    # The following line checks which projects have already been analysed. In case a participant has been interrupted,
    # it is easiest to delete that folder and let the program run again.
    directories = [directory for directory in os.listdir(project_output_path)
                   if os.path.isdir(os.path.join(project_output_path, directory))]

    # Now we start the "actual" program. main_start is used to print the time required for certain sub-steps to the
    # console.
    main_start = time.time()
    for project_name, video_path, begaze_path in zip(project_names, video_paths, begaze_paths):
        project_start = time.time()
        debug.cout(project_name, 'Project')

        # Creating the path variables for the project.
        ground_path = project_output_path + project_name + '/'
        excel_path = ground_path
        date_string = datetime.datetime.today().strftime('%Y_%m_%d') + '_'
        graphs_path = ground_path + 'graphs/'
        small_stills_path = ground_path + 'small_stills/'
        video_clips_path = ground_path + 'video_clips/'
        very_rigids_base = 'very_rigid'
        small_stills_base = 'small_still'
        bar_plot_path = graphs_path + date_string + project_name + ' Bar Plot.png'
        table_path = graphs_path + date_string + project_name + ' Table.png'
        sample_string = date_string + project_name + ' 01.xlsx'
        last_iteration = iteration
예제 #9
0
    def measure_distances(self,
                          librarian,
                          small_stills_path,
                          small_stills_base,
                          blue=False):
        """
        This function will go through a video and for each image:
            . predict where the hands are
            . measure the distance to the gaze point
            . save a smaller version of that image
        And in the end it will write the results of this analysis into the librarian and return it back upward.
        :param librarian: The (empty) librarian.
        :param small_stills_path: The folder where the images have to be saved to.
        :param small_stills_base: The base of the name which the images will carry for the rest of their meager lives.
        :param blue: This allows the user to specify if the hands are covered in blue gloves.
        :return: filled up librarian.
        """
        length = len(librarian.library)
        gaze_locations = librarian.library[['x_gaze [px]',
                                            'y_gaze [px]']].values
        arr_n_hands = []
        array_hgd = []

        success, image = self.image_from_video()
        index = 0
        debug.cout('First image loaded, starting measurement.', 'Info')
        while success:
            """
            Here we go through the segmented images and find the number of hands, as well as the centroid
            positions for visual clarification of the image (later).
            """
            # Find the hands.
            self.input_image = image

            # In case the video contains blue gloved hands, one can choose to use the specific predict function.
            if blue:
                self.predict_blue()
                n_hands, contours = self.get_number_of_hands(
                    Const.area_threshold_blue)
            else:
                self.predict()
                n_hands, contours = self.get_number_of_hands(
                    Const.area_threshold_skin)

            # Factor in the gaze position and measure the distance to the hands.
            (x_gaze, y_gaze) = gaze_locations[index]
            min_distance = self.minimum_distance_contours(
                contours, (x_gaze, y_gaze))

            # Append data.
            arr_n_hands.append(n_hands)
            array_hgd.append(min_distance)

            # Save a smaller version (disc capacity) of the image, in order to create a video again in the end.
            small_still = self.resize(image, 0.5, 0.5)
            self.just_save(small_still, small_stills_path + small_stills_base,
                           index)

            index += 1
            if index % np.floor_divide(length, 10) == 0:
                debug.cout(
                    str(int(100 * np.true_divide(index, length)) + 1) + '%',
                    'Points Calculated', length)
            success, image = self.image_from_video()

        librarian.library['n_hands [-]'] = arr_n_hands
        librarian.library['hgd [px]'] = array_hgd
        return librarian