Exemple #1
0
def generate(args):

    config = handle_arguments_and_configurations(name, args)

    base_path = config['path']
    if not os.path.isdir(base_path):
        os.makedirs(base_path)
    print("Generation in: {}".format(base_path))

    images_path = os.path.join(base_path, "images")
    if not os.path.isdir(images_path):
        os.makedirs(images_path)

    # Get configuration parameters.
    vh_image_nbs = config['vh_image_nbs']
    eye_diameter = config['eye_diameter']
    mean_luminance = config['mean_luminance']
    std_luminance = config['std_luminance']
    normalized_value_median = config['normalized_value_median']
    normalized_value_mad = config['normalized_value_mad']
    display_rate = config['display_rate']
    adaptation_duration = config['adaptation_duration']
    flash_duration = config['flash_duration']
    inter_flash_duration = config['inter_flash_duration']
    frame_resolution = config['frame']['resolution']
    frame_width = config['frame']['width']
    frame_height = config['frame']['height']
    nb_repetitions = config['nb_repetitions']
    stuttering_vh_image_nbs = config['stuttering_vh_image_nbs']
    nb_stuttering_vh_images = config['nb_stuttering_vh_images']
    nb_stutters = config['nb_stutters']
    seed = config['seed']
    verbose = config['verbose']

    # Fetch van Hateren images.
    vh.fetch(image_nbs=vh_image_nbs,
             download_if_missing=False,
             verbose=verbose)

    # Select unsaturated van Hateren image numbers.
    if vh_image_nbs is None:
        vh_image_nbs = vh.get_image_nbs()
    else:
        vh_image_nbs = np.array(vh_image_nbs)
    are_saturated = vh.get_are_saturated(image_nbs=vh_image_nbs,
                                         verbose=verbose)
    are_unsaturated = np.logical_not(are_saturated)
    assert vh_image_nbs.size == are_unsaturated.size, "{} != {}".format(
        vh_image_nbs.size, are_unsaturated.size)
    unsaturated_vh_image_nbs = vh_image_nbs[are_unsaturated]

    # Select ...
    # mean_luminances = vh.get_mean_luminances(image_nbs=unsaturated_vh_image_nbs, verbose=verbose)
    # std_luminances = vh.get_std_luminances(image_nbs=unsaturated_vh_image_nbs, verbose=verbose)
    max_luminances = vh.get_max_luminances(image_nbs=unsaturated_vh_image_nbs,
                                           verbose=verbose)
    # TODO remove the following lines?
    # max_centered_luminances = max_luminances / mean_luminances
    # # print(np.min(max_centered_luminances))
    # # print(np.median(max_centered_luminances))
    # # print(np.max(max_centered_luminances))
    # are_good = max_centered_luminances <= 8.3581  # TODO correct?
    # TODO remove the following lines?
    # max_normalized_luminances = (max_luminances / mean_luminances - 1.0) / std_luminances + 1.0
    # are_good = max_normalized_luminances <= 1.02
    # TODO remove the following lines?
    log_mean_luminances = vh.get_log_mean_luminances(
        image_nbs=unsaturated_vh_image_nbs, verbose=verbose)
    log_std_luminances = vh.get_log_mean_luminances(
        image_nbs=unsaturated_vh_image_nbs, verbose=verbose)
    log_max_luminances = np.log(1.0 + max_luminances)
    log_max_normalized_luminances = (log_max_luminances -
                                     log_mean_luminances) / log_std_luminances
    are_good = log_max_normalized_luminances <= 5.0
    # ...
    good_vh_image_nbs = unsaturated_vh_image_nbs[are_good]
    # ...
    # selected_vh_image_nbs = unsaturated_vh_image_nbs
    selected_vh_image_nbs = good_vh_image_nbs

    # Check stuttering van Hateren image numbers.
    np.random.seed(seed)
    if stuttering_vh_image_nbs is None:
        stuttering_vh_image_nbs = np.array([])
    else:
        assert len(stuttering_vh_image_nbs) <= nb_stuttering_vh_images
        for stuttering_vh_image_nb in stuttering_vh_image_nbs:
            assert stuttering_vh_image_nb in selected_vh_image_nbs, stuttering_vh_image_nb
    potential_stuttering_vh_image_nbs = np.setdiff1d(selected_vh_image_nbs,
                                                     stuttering_vh_image_nbs,
                                                     assume_unique=True)
    nb_missing_stuttering_vh_image_nbs = nb_stuttering_vh_images - len(
        stuttering_vh_image_nbs)
    stuttering_vh_image_nbs = np.concatenate(
        (stuttering_vh_image_nbs,
         np.random.choice(potential_stuttering_vh_image_nbs,
                          nb_missing_stuttering_vh_image_nbs,
                          replace=False)))
    stuttering_vh_image_nbs.sort()

    # Generate grey image.
    image_filename = "image_{i:04d}.png".format(i=0)
    image_path = os.path.join(images_path, image_filename)
    # if not os.path.isfile(image_path):  # TODO uncomment this line.
    frame = get_grey_frame(frame_width, frame_height, luminance=mean_luminance)
    frame = float_frame_to_uint8_frame(frame)
    image = create_png_image(frame)
    image.save(image_path)

    # Extract images from the van Hateren images.
    for vh_image_nb in tqdm.tqdm(selected_vh_image_nbs):
        # Check if image already exists.
        image_filename = "image_{i:04d}.png".format(i=vh_image_nb)
        image_path = os.path.join(images_path, image_filename)
        if os.path.isfile(image_path):
            continue
        # Cut out central sub-region.
        a_x = vh.get_horizontal_angles()
        a_y = vh.get_vertical_angles()
        luminance_data = vh.load_luminance_data(vh_image_nb)
        rbs = sp.interpolate.RectBivariateSpline(a_x,
                                                 a_y,
                                                 luminance_data,
                                                 kx=1,
                                                 ky=1)
        angular_resolution = math.atan(
            frame_resolution / eye_diameter) * (180.0 / math.pi)
        a_x = compute_horizontal_angles(width=frame_width,
                                        angular_resolution=angular_resolution)
        a_y = compute_vertical_angles(height=frame_height,
                                      angular_resolution=angular_resolution)
        a_x, a_y = np.meshgrid(a_x, a_y)
        luminance_data = rbs.ev(a_x, a_y)
        luminance_data = luminance_data.transpose()
        # TODO uncomment the following lines?
        # # Prepare data.
        # luminance_median = np.median(luminance_data)
        # luminance_mad = np.median(np.abs(luminance_data - luminance_median))
        # centered_luminance_data = luminance_data - luminance_median
        # if luminance_mad > 0.0:
        #     centered_n_reduced_luminance_data = centered_luminance_data / luminance_mad
        # else:
        #     centered_n_reduced_luminance_data = centered_luminance_data
        # normalized_luminance_data = centered_n_reduced_luminance_data
        # scaled_data = normalized_value_mad * normalized_luminance_data
        # shifted_n_scaled_data = scaled_data + normalized_value_median
        # data = shifted_n_scaled_data
        # TODO remove the 2 following lines?
        # scaled_luminance_data = luminance_data / np.mean(luminance_data)
        # data = scaled_luminance_data / 8.3581
        # TODO remove the 2 following lines?
        # normalized_luminance_data = (luminance_data / np.mean(luminance_data) - 1.0) / np.std(luminance_data) + 1.0
        # data = (normalized_luminance_data - 1.0) / 0.02 * 0.8 + 0.2
        # TODO remove the following lines?
        log_luminance_data = np.log(1.0 + luminance_data)
        log_mean_luminance = np.mean(log_luminance_data)
        log_std_luminance = np.std(log_luminance_data)
        normalized_log_luminance_data = log_luminance_data - log_mean_luminance
        if log_std_luminance > 1e-13:
            normalized_log_luminance_data = normalized_log_luminance_data / log_std_luminance
        normalized_log_luminance_data = 0.2 * normalized_log_luminance_data
        normalized_luminance_data = np.exp(normalized_log_luminance_data) - 1.0
        normalized_luminance_data = normalized_luminance_data - np.mean(
            normalized_luminance_data)
        if np.std(normalized_luminance_data) > 1e-13:
            normalized_luminance_data = normalized_luminance_data / np.std(
                normalized_luminance_data)
        data = std_luminance * normalized_luminance_data + mean_luminance
        # Prepare image data
        if np.count_nonzero(data < 0.0) > 0:
            s = "some pixels are negative in image {} (consider changing the configuration, 'normalized_value_mad': {})"
            message = s.format(
                vh_image_nb, normalized_value_mad /
                ((normalized_value_median - np.min(data)) /
                 (normalized_value_median - 0.0)))
            warnings.warn(message)
        data[data < 0.0] = 0.0
        if np.count_nonzero(data > 1.0) > 0:
            s = "some pixels saturate in image {} (consider changing the configuration, 'normalized_value_mad': {})"
            message = s.format(
                vh_image_nb, normalized_value_mad /
                ((np.max(data) - normalized_value_median) /
                 (1.0 - normalized_value_median)))
            warnings.warn(message)
        data[data > 1.0] = 1.0
        data = np.array(
            254.0 * data,
            dtype=np.uint8)  # 0.0 -> 0 and 1.0 -> 254 such that 0.5 -> 127
        data = np.transpose(data)
        data = np.flipud(data)
        image = create_png_image(data)
        # Save image.
        image.save(image_path)

    # Set condition numbers and image paths.
    condition_nbs = []
    stuttering_condition_nbs = []
    image_paths = {}
    for k, vh_image_nb in enumerate(selected_vh_image_nbs):
        # condition_nb = k + 1
        condition_nb = k
        image_filename = 'image_{i:04d}.png'.format(i=vh_image_nb)
        image_path = os.path.join(images_path, image_filename)
        assert condition_nb not in condition_nbs
        assert os.path.isfile(image_path)
        condition_nbs.append(condition_nb)
        if vh_image_nb in stuttering_vh_image_nbs:
            stuttering_condition_nbs.append(condition_nb)
        image_paths[condition_nb] = image_path
    condition_nbs = np.array(condition_nbs)
    stuttering_condition_nbs = np.array(stuttering_condition_nbs)
    nb_conditions = len(condition_nbs)
    nb_stuttering_conditions = len(stuttering_condition_nbs)

    # Create conditions .csv file.
    conditions_csv_filename = '{}_conditions.csv'.format(name)
    conditions_csv_path = os.path.join(base_path, conditions_csv_filename)
    print("Start creating conditions .csv file...")
    # Open conditions .csv file.
    conditions_csv_file = open_csv_file(conditions_csv_path, columns=['path'])
    # Add conditions for van Hateren images.
    for condition_nb in condition_nbs:
        image_path = image_paths[condition_nb]
        path = image_path.replace(base_path, '')
        path = path[1:]  # remove separator
        conditions_csv_file.append(path=path)
    # Close conditions .csv file.
    conditions_csv_file.close()
    # ...
    print("End of conditions .csv file creation.")

    # Set sequence of conditions for each repetition.
    repetition_sequences = {}
    np.random.seed(seed)
    normal_condition_nbs = np.setdiff1d(condition_nbs,
                                        stuttering_condition_nbs,
                                        assume_unique=True)
    nb_normal_indices = len(normal_condition_nbs)
    nb_stuttering_indices = nb_stuttering_conditions * nb_stutters
    nb_indices = nb_normal_indices + nb_stuttering_indices
    sequence = np.empty(nb_indices, dtype=np.int)
    stuttering_indices = np.linspace(0,
                                     nb_indices,
                                     num=nb_stuttering_indices,
                                     endpoint=False)
    stuttering_indices = stuttering_indices.astype(np.int)
    normal_indices = np.setdiff1d(np.arange(0, nb_indices), stuttering_indices)
    sequence[stuttering_indices] = np.concatenate(
        tuple([stuttering_condition_nbs for _ in range(0, nb_stutters)]))
    for repetition_nb in range(0, nb_repetitions):
        repetition_sequence = np.copy(sequence)
        # Normal.
        repetition_normal_sequence = np.copy(normal_condition_nbs)
        np.random.shuffle(repetition_normal_sequence)
        repetition_sequence[normal_indices] = repetition_normal_sequence
        # Stuttering.
        repetition_stuttering_sequence = []
        for _ in range(0, nb_stutters):
            repetition_stuttering_condition_nbs = np.copy(
                stuttering_condition_nbs)
            np.random.shuffle(repetition_stuttering_condition_nbs)
            repetition_stuttering_sequence.append(
                repetition_stuttering_condition_nbs)
        repetition_stuttering_sequence = np.concatenate(
            tuple(repetition_stuttering_sequence))
        repetition_sequence[
            stuttering_indices] = repetition_stuttering_sequence
        # ...
        repetition_sequences[repetition_nb] = repetition_sequence

    # Create .bin file.
    print("Start creating .bin file...")
    bin_filename = '{}.bin'.format(name)
    bin_path = os.path.join(base_path, bin_filename)
    nb_bin_images = 1 + nb_conditions  # i.e. grey image and other conditions
    bin_frame_nbs = {}
    # Open .bin file.
    bin_file = open_bin_file(bin_path,
                             nb_bin_images,
                             frame_width=frame_width,
                             frame_height=frame_height,
                             reverse=False,
                             mode='w')
    # Add grey frame.
    grey_frame = get_grey_frame(frame_width,
                                frame_height,
                                luminance=mean_luminance)
    grey_frame = float_frame_to_uint8_frame(grey_frame)
    bin_file.append(grey_frame)
    bin_frame_nbs[None] = bin_file.get_frame_nb()
    # Add van Hateren frames.
    for condition_nb in tqdm.tqdm(condition_nbs):
        image_path = image_paths[condition_nb]
        image = load_png_image(image_path)
        frame = image.data
        bin_file.append(frame)
        bin_frame_nbs[condition_nb] = bin_file.get_frame_nb()
    # Close .bin file.
    bin_file.close()
    # ...
    print("End of .bin file creation.")

    # Create .vec file and stimulation .csv file.
    print("Start creating .vec file...")
    vec_filename = "{}.vec".format(name)
    vec_path = os.path.join(base_path, vec_filename)
    csv_filename = "{}_trials.csv".format(name)
    csv_path = os.path.join(base_path, csv_filename)
    # ...
    nb_displays_during_adaptation = int(
        np.ceil(adaptation_duration * display_rate))
    nb_displays_per_flash = int(np.ceil(flash_duration * display_rate))
    nb_displays_per_inter_flash = int(
        np.ceil(inter_flash_duration * display_rate))
    nb_flashes_per_repetition = nb_conditions + nb_stuttering_vh_images * (
        nb_stutters - 1)
    nb_displays_per_repetition = nb_flashes_per_repetition * (
        nb_displays_per_flash + nb_displays_per_inter_flash)
    nb_displays = nb_displays_during_adaptation + nb_repetitions * nb_displays_per_repetition
    # Open .vec file.
    vec_file = open_vec_file(vec_path, nb_displays=nb_displays)
    # Open .csv file.
    csv_file = open_csv_file(
        csv_path,
        columns=['condition_nb', 'start_display_nb', 'end_display_nb'])
    # Add adaptation.
    bin_frame_nb = bin_frame_nbs[None]  # i.e. default frame (grey)
    for _ in range(0, nb_displays_during_adaptation):
        vec_file.append(bin_frame_nb)
    # Add repetitions.
    for repetition_nb in tqdm.tqdm(range(0, nb_repetitions)):
        repetition_sequence = repetition_sequences[repetition_nb]
        for condition_nb in repetition_sequence:
            # Add flash.
            start_display_nb = vec_file.get_display_nb() + 1
            bin_frame_nb = bin_frame_nbs[condition_nb]
            for _ in range(0, nb_displays_per_flash):
                vec_file.append(bin_frame_nb)
            end_display_nb = vec_file.get_display_nb()
            csv_file.append(condition_nb=condition_nb,
                            start_display_nb=start_display_nb,
                            end_display_nb=end_display_nb)
            # Add inter flash.
            bin_frame_nb = bin_frame_nbs[None]  # i.e. default frame (grey)
            for _ in range(0, nb_displays_per_inter_flash):
                vec_file.append(bin_frame_nb)
    # Close .csv file.
    csv_file.close()
    # Close .vec file.
    vec_file.close()
    # ...
    print("End of .vec file creation.")

    return
Exemple #2
0
def generate(args):

    config = handle_arguments_and_configurations(name, args)

    base_path = config['path']
    if not os.path.isdir(base_path):
        os.makedirs(base_path)
    print("Generation in {}".format(base_path))

    # Create directories (if necessary).
    images_dirname = 'images'
    images_path = os.path.join(base_path, images_dirname)
    if not os.path.isdir(images_path):
        os.makedirs(images_path)
    patterns_dirname = 'patterns'
    patterns_path = os.path.join(base_path, patterns_dirname)
    if not os.path.isdir(patterns_path):
        os.makedirs(patterns_path)
    frames_dirname = 'frames'
    frames_path = os.path.join(base_path, frames_dirname)
    if not os.path.isdir(frames_path):
        os.makedirs(frames_path)

    # Get configuration parameters.
    image_keys = config['images']
    pattern_nbs = config['perturbations']['pattern_nbs']
    amplitude_value = config['perturbations']['amplitude']
    eye_diameter = config['eye_diameter']
    mean_luminance = config['mean_luminance']
    std_luminance = config['std_luminance']
    display_rate = config['display_rate']
    adaptation_duration = config['adaptation_duration']
    flash_duration = config['flash_duration']
    inter_flash_duration = config['inter_flash_duration']
    frame_width = config['frame']['width']
    frame_height = config['frame']['height']
    frame_resolution = config['frame']['resolution']
    nb_repetitions = config['nb_repetitions']
    seed = config['seed']

    # Fetch images.
    image_nbs = np.array(list(image_keys.keys()), dtype=int)
    for image_nb in image_nbs:
        image_key = image_keys[str(image_nb)]
        fetch_image(*image_key)
    nb_images = len(image_nbs)
    _ = nb_images

    # Fetch patterns.
    pattern_nbs = np.array(pattern_nbs)
    fetch_patterns(image_nbs=pattern_nbs)
    nb_patterns = len(pattern_nbs)

    # Prepare image parameters.
    images_params = collections.OrderedDict()
    for image_nb in image_nbs:
        filename = 'image_{nb:01d}_data.npy'.format(nb=image_nb)
        path = os.path.join(images_dirname, filename)
        images_params[image_nb] = collections.OrderedDict([('path', path)])

    # Prepare pattern parameters.
    patterns_params = collections.OrderedDict()
    for pattern_nb in pattern_nbs:
        filename = 'pattern_{nb:01d}_data.npy'.format(nb=pattern_nb)
        path = os.path.join(patterns_dirname, filename)
        patterns_params[pattern_nb] = collections.OrderedDict([('path', path)])

    def get_image_data(image_nb):

        # Load image data.
        image_key = image_keys[str(image_nb)]
        dataset_name = image_key[0]
        dataset = get_dataset(dataset_name)
        data = dataset.load_data(*image_key[1:])
        # Cut out central sub-regions.
        a_x = dataset.get_horizontal_angles()
        a_y = dataset.get_vertical_angles()
        rbs = sp.interpolate.RectBivariateSpline(a_x, a_y, data)
        angular_resolution = math.atan(
            frame_resolution / eye_diameter) * (180.0 / math.pi)
        a_x = compute_horizontal_angles(width=frame_width,
                                        angular_resolution=angular_resolution)
        a_y = compute_vertical_angles(height=frame_height,
                                      angular_resolution=angular_resolution)
        a_x, a_y = np.meshgrid(a_x, a_y)
        data = rbs.ev(a_x, a_y)
        data = data.transpose()
        # # Prepare image data.
        # median = np.median(data)
        # mad = np.median(np.abs(data - median))
        # # mad = np.std(data)
        # centered_data = data - median
        # if mad > 1.e-13:
        #     centered_n_reduced_data = centered_data / mad
        # else:
        #     centered_n_reduced_data = centered_data
        # normalized_data = centered_n_reduced_data
        # scaled_data = 0.1 * normalized_data
        # shifted_n_scaled_data = scaled_data + 0.5
        # TODO keep the following normalization?
        # # Prepare image data.
        # mean = np.mean(data)
        # scaled_data = data / mean if mean > 0.0 else data
        # shifted_n_scaled_data = 0.2 * scaled_data  # TODO correct?
        # TODO keep the following normalization?
        luminance_data = data
        log_luminance_data = np.log(1.0 + luminance_data)
        log_mean_luminance = np.mean(log_luminance_data)
        log_std_luminance = np.std(log_luminance_data)
        normalized_log_luminance_data = log_luminance_data - log_mean_luminance
        if log_std_luminance > 1e-13:
            normalized_log_luminance_data = normalized_log_luminance_data / log_std_luminance
        normalized_log_luminance_data = 0.2 * normalized_log_luminance_data
        normalized_luminance_data = np.exp(normalized_log_luminance_data) - 1.0
        normalized_luminance_data = normalized_luminance_data - np.mean(
            normalized_luminance_data)
        if np.std(normalized_luminance_data) > 1e-13:
            normalized_luminance_data = normalized_luminance_data / np.std(
                normalized_luminance_data)
        luminance_data = std_luminance * normalized_luminance_data + mean_luminance
        # Save image data.
        image_data_path = os.path.join(base_path,
                                       images_params[image_nb]['path'])
        np.save(image_data_path, luminance_data)
        # Prepare image.
        data = np.copy(luminance_data)
        data[data < 0.0] = 0.0
        data[data > 1.0] = 1.0
        data = np.array(
            254.0 * data,
            dtype=np.uint8)  # 0.0 -> 0 and 1.0 -> 254 such that 0.5 -> 127
        data = np.transpose(data)
        data = np.flipud(data)
        image = create_png_image(data)
        # Save image.
        image_image_filename = "image_{nb:01d}_image.png".format(nb=image_nb)
        image_image_path = os.path.join(images_path, image_image_filename)
        image.save(image_image_path)

        return luminance_data

    def get_pattern_data(pattern_nb):

        # Load pattern data.
        data = load_checkerboard_data(pattern_nb, with_borders=0.5)
        data = 2.0 * (data - 0.5) / 254.0
        # Save pattern data.
        pattern_data_path = os.path.join(base_path,
                                         patterns_params[pattern_nb]['path'])
        np.save(pattern_data_path, data[1:-1, 1:-1])  # without borders
        # Project.
        # a_x = cb.get_horizontal_angles()
        # a_y = cb.get_vertical_angles()
        # rbs = sp.interpolate.RectBivariateSpline(a_x, a_y, data, kx=1, ky=1)
        a_x = cb.get_horizontal_angles(with_borders=True)
        a_y = cb.get_vertical_angles(with_borders=True)
        a_x, a_y = np.meshgrid(a_x, a_y)
        a = np.stack((np.ravel(a_x), np.ravel(a_y)),
                     axis=1)  # i.e. stack along 2nd axis
        ni = sp.interpolate.NearestNDInterpolator(a, np.ravel(data))
        angular_resolution = math.atan(
            frame_resolution / eye_diameter) * (180.0 / math.pi)
        a_x = compute_horizontal_angles(width=frame_width,
                                        angular_resolution=angular_resolution)
        a_y = compute_vertical_angles(height=frame_height,
                                      angular_resolution=angular_resolution)
        a_x, a_y = np.meshgrid(a_x, a_y)
        # data = rbs.ev(a_x, a_y)
        # data = data.transpose()
        a = np.stack((np.ravel(a_x), np.ravel(a_y)),
                     axis=1)  # i.e. stack along 2nd axis
        data = ni(a)
        shape = (frame_width, frame_height)
        data = np.reshape(data, shape)
        data = data.transpose()
        # Create pattern image.
        image_data = data
        image_data = 254.0 * image_data
        image_data = np.array(
            image_data,
            dtype=np.uint8)  # 0.0 -> 0 and 1.0 -> 254 such that 0.5 -> 127
        image_data = np.transpose(image_data)
        image_data = np.flipud(image_data)
        pattern = create_png_image(image_data)
        # Save pattern.
        pattern_image_filename = "pattern_{nb:01d}_image.png".format(
            nb=pattern_nb)
        pattern_image_path = os.path.join(patterns_path,
                                          pattern_image_filename)
        pattern.save(pattern_image_path)

        return data

    def get_frame_path(image_nb, pattern_nb):

        image_index = np.where(image_nbs == image_nb)[0][0]
        pattern_index = np.where(pattern_nbs == pattern_nb)[0][0]
        frame_nb = (image_index * nb_patterns) + pattern_index
        filename = "frame_{nb:04d}.png".format(nb=frame_nb)
        path = os.path.join(frames_path, filename)

        return path

    # Set condition parameters.
    condition_nb = 0
    conditions_params = collections.OrderedDict()
    frame_paths = collections.OrderedDict()
    for image_nb in image_nbs:
        for pattern_nb in pattern_nbs:
            assert condition_nb not in conditions_params
            conditions_params[condition_nb] = collections.OrderedDict([
                ('image_nb', image_nb),
                ('pattern_nb', pattern_nb),
            ])
            frame_paths[condition_nb] = get_frame_path(image_nb, pattern_nb)
            condition_nb += 1
    condition_nbs = np.array(list(conditions_params.keys()))
    nb_conditions = len(condition_nbs)

    # Create frames.
    # # Preload images data.
    images_data = {}
    for image_nb in image_nbs:
        # Get image data.
        image_data = get_image_data(image_nb)
        # Store image data.
        images_data[image_nb] = image_data
    # # Create frames.
    pattern_data = None
    for pattern_nb in tqdm.tqdm(pattern_nbs):
        for image_nb in image_nbs:
            frame_path = get_frame_path(image_nb, pattern_nb)
            if os.path.isfile(frame_path):
                continue
            # Get image data.
            image_data = images_data[image_nb]
            # Get pattern data.
            pattern_data = get_pattern_data(
                pattern_nb) if pattern_data is None else pattern_data
            # Create frame data.
            data = image_data + amplitude_value * pattern_data
            # Create frame image.
            data[data < 0.0] = 0.0
            data[data > 1.0] = 1.0
            data = np.array(
                254.0 * data,
                dtype=np.uint8)  # 0.0 -> 0 and 1.0 -> 254 such that 0.5 -> 127
            data = np.transpose(data)
            data = np.flipud(data)
            image = create_png_image(data)
            # Save frame image.
            image.save(frame_path)
        pattern_data = None

    # Set image ordering for each repetitions.
    repetition_orderings = collections.OrderedDict()
    np.random.seed(seed)
    for repetition_nb in range(0, nb_repetitions):
        ordering = np.copy(condition_nbs)
        np.random.shuffle(ordering)
        repetition_orderings[repetition_nb] = ordering

    # Create conditions .csv file.
    conditions_csv_filename = '{}_conditions.csv'.format(name)
    conditions_csv_path = os.path.join(base_path, conditions_csv_filename)
    print("Start creating conditions .csv file...")
    conditions_csv_file = open_csv_file(conditions_csv_path,
                                        columns=['image_nb', 'pattern_nb'])
    for condition_nb in condition_nbs:
        condition_params = conditions_params[condition_nb]
        conditions_csv_file.append(**condition_params)
    conditions_csv_file.close()
    print("End of conditions .csv file creation.")

    # Create images .csv file.
    images_csv_filename = '{}_images.csv'.format(name)
    images_csv_path = os.path.join(base_path, images_csv_filename)
    print("Start creating images .csv file...")
    images_csv_file = open_csv_file(images_csv_path, columns=['path'])
    for image_nb in image_nbs:
        image_params = images_params[image_nb]
        images_csv_file.append(**image_params)
    images_csv_file.close()
    print("End of images .csv file creation.")

    # Create patterns .csv file.
    patterns_csv_filename = '{}_patterns.csv'.format(name)
    patterns_csv_path = os.path.join(base_path, patterns_csv_filename)
    print("Start creating patterns .csv file...")
    patterns_csv_file = open_csv_file(patterns_csv_path, columns=['path'])
    for pattern_nb in pattern_nbs:
        pattern_params = patterns_params[pattern_nb]
        patterns_csv_file.append(**pattern_params)
    patterns_csv_file.close()
    print("End of patterns .csv file creation.")

    # Create .bin file.
    print("Start creating .bin file...")
    bin_filename = '{}.bin'.format(name)
    bin_path = os.path.join(base_path, bin_filename)
    nb_bin_images = 1 + nb_conditions  # i.e. grey image and other conditions
    bin_frame_nbs = {}
    # Open .bin file.
    bin_file = open_bin_file(bin_path,
                             nb_bin_images,
                             frame_width=frame_width,
                             frame_height=frame_height,
                             reverse=False,
                             mode='w')
    # Add grey frame.
    grey_frame = get_grey_frame(frame_width,
                                frame_height,
                                luminance=mean_luminance)
    grey_frame = float_frame_to_uint8_frame(grey_frame)
    bin_file.append(grey_frame)
    bin_frame_nbs[None] = bin_file.get_frame_nb()
    # Add frames.
    for condition_nb in tqdm.tqdm(condition_nbs):
        frame_path = frame_paths[condition_nb]
        frame = load_png_image(frame_path)
        bin_file.append(frame.data)
        bin_frame_nbs[condition_nb] = bin_file.get_frame_nb()
    # Close .bin file.
    bin_file.close()
    # ...
    print("End of .bin file creation.")

    # Create .vec file.
    print("Start creating .vec file...")
    vec_filename = "{}.vec".format(name)
    vec_path = os.path.join(base_path, vec_filename)
    csv_filename = "{}_trials.csv".format(name)
    csv_path = os.path.join(base_path, csv_filename)
    # ...
    nb_displays_during_adaptation = int(
        np.ceil(adaptation_duration * display_rate))
    nb_displays_per_flash = int(np.ceil(flash_duration * display_rate))
    nb_displays_per_inter_flash = int(
        np.ceil(inter_flash_duration * display_rate))
    nb_displays_per_repetition = nb_conditions * (nb_displays_per_flash +
                                                  nb_displays_per_inter_flash)
    nb_displays = nb_displays_during_adaptation + nb_repetitions * nb_displays_per_repetition
    # Open .vec file.
    vec_file = open_vec_file(vec_path, nb_displays=nb_displays)
    # Open .csv file.
    csv_file = open_csv_file(
        csv_path,
        columns=['condition_nb', 'start_display_nb', 'end_display_nb'])
    # Add adaptation.
    bin_frame_nb = bin_frame_nbs[None]  # i.e. default frame (grey)
    for _ in range(0, nb_displays_during_adaptation):
        vec_file.append(bin_frame_nb)
    # Add repetitions.
    for repetition_nb in tqdm.tqdm(range(0, nb_repetitions)):
        condition_nbs = repetition_orderings[repetition_nb]
        for condition_nb in condition_nbs:
            # Add flash.
            start_display_nb = vec_file.get_display_nb() + 1
            bin_frame_nb = bin_frame_nbs[condition_nb]
            for _ in range(0, nb_displays_per_flash):
                vec_file.append(bin_frame_nb)
            end_display_nb = vec_file.get_display_nb()
            csv_file.append(condition_nb=condition_nb,
                            start_display_nb=start_display_nb,
                            end_display_nb=end_display_nb)
            # Add inter flash.
            bin_frame_nb = bin_frame_nbs[None]  # i.e. default frame (grey)
            for _ in range(0, nb_displays_per_inter_flash):
                vec_file.append(bin_frame_nb)
    # Close .csv file.
    csv_file.close()
    # Close .vec file.
    vec_file.close()
    # ...
    print("End of .vec file creation.")

    # TODO add unperturbed flashed images?

    return
Exemple #3
0
def generate(args):

    config = handle_arguments_and_configurations(name, args)

    path = config['path']
    if not os.path.isdir(path):
        os.makedirs(path)
    print(path)

    reference_images_path = os.path.join(path, "reference_images")
    if not os.path.isdir(reference_images_path):
        os.makedirs(reference_images_path)

    perturbation_patterns_path = os.path.join(path, "perturbation_patterns")
    if not os.path.isdir(perturbation_patterns_path):
        os.makedirs(perturbation_patterns_path)

    frames_path = os.path.join(path, "frames")
    if not os.path.isdir(frames_path):
        os.makedirs(frames_path)

    # Get configuration parameters.
    reference_images = config['reference_images']
    nb_horizontal_checks = config['perturbations']['nb_horizontal_checks']
    nb_vertical_checks = config['perturbations']['nb_vertical_checks']
    assert nb_horizontal_checks % 2 == 0, "number of checks should be even (horizontally): {}".format(
        nb_horizontal_checks)
    assert nb_vertical_checks % 2 == 0, "number of checks should be even (vertically): {}".format(
        nb_vertical_checks)
    with_random_patterns = config['perturbations']['with_random_patterns']
    perturbation_patterns_indices = config['perturbations']['pattern_indices']
    perturbation_amplitudes = config['perturbations']['amplitudes']
    perturbation_amplitudes_indices = [
        k for k, _ in enumerate(perturbation_amplitudes)
    ]
    display_rate = config['display_rate']
    frame_width_in_px = config['frame']['width']
    frame_height_in_px = config['frame']['height']
    frame_duration = config['frame']['duration']
    nb_repetitions = config['nb_repetitions']

    # Collect reference images.
    reference_indices = [int(key) for key in reference_images.keys()]
    for reference_index in reference_indices:
        dataset, index = reference_images[str(reference_index)]
        collect_reference_image(reference_index,
                                dataset=dataset,
                                index=index,
                                path=reference_images_path,
                                config=config)

    # Create .csv file for reference_image.
    csv_filename = "{}_reference_images.csv".format(name)
    csv_path = os.path.join(path, csv_filename)
    columns = ['reference_image_path']
    csv_file = open_csv_file(csv_path, columns=columns)
    for index in reference_indices:
        reference_image_path = os.path.join(
            "reference_images", "reference_{:05d}.png".format(index))
        csv_file.append(reference_image_path=reference_image_path)
    csv_file.close()

    # Prepare perturbation pattern indices.
    if with_random_patterns:
        # TODO check the following lines!
        # Compute the number of random patterns.
        nb_perturbation_patterns = len(perturbation_patterns_indices)
        nb_perturbation_amplitudes = len(perturbation_amplitudes_indices)
        nb_perturbations = nb_perturbation_patterns * nb_perturbation_amplitudes
        nb_random_patterns = nb_perturbations * nb_repetitions
        print("number of random patterns: {}".format(
            nb_random_patterns))  # TODO remove this line.
        # Choose the indices of the random patterns.
        random_patterns_indices = nb_perturbation_patterns + np.arange(
            0, nb_random_patterns)
        # Define pattern indices.
        all_patterns_indices = np.concatenate(
            (perturbation_patterns_indices, random_patterns_indices))
    else:
        nb_random_patterns = 0
        random_patterns_indices = None
        all_patterns_indices = perturbation_patterns_indices

    print("Start collecting perturbation patterns...")

    # TODO remove the following commented lines?
    # for index in perturbation_patterns_indices:
    #     collect_perturbation_pattern(index, nb_horizontal_checks=nb_horizontal_checks,
    #                                  nb_vertical_checks=nb_vertical_checks, path=perturbation_patterns_path)
    for index in tqdm.tqdm(all_patterns_indices):
        collect_perturbation_pattern(index,
                                     nb_horizontal_checks=nb_horizontal_checks,
                                     nb_vertical_checks=nb_vertical_checks,
                                     path=perturbation_patterns_path)

    print("End collecting perturbation patterns.")

    # Create .csv file for perturbation pattern.
    csv_filename = "{}_perturbation_patterns.csv".format(name)
    csv_path = os.path.join(path, csv_filename)
    columns = ['perturbation_pattern_path']
    csv_file = open_csv_file(csv_path, columns=columns)
    # TODO remove the following commented line?
    # for index in perturbation_patterns_indices:
    for index in all_patterns_indices:
        perturbation_pattern_path = os.path.join(
            "perturbation_patterns", "checkerboard{:05d}.png".format(index))
        csv_file.append(perturbation_pattern_path=perturbation_pattern_path)
    csv_file.close()

    # Create .csv file for perturbation amplitudes.
    csv_filename = "{}_perturbation_amplitudes.csv".format(name)
    csv_path = os.path.join(path, csv_filename)
    columns = ['perturbation_amplitude']
    csv_file = open_csv_file(csv_path, columns=columns)
    for perturbation_amplitude in perturbation_amplitudes:
        csv_file.append(perturbation_amplitude=perturbation_amplitude)
    csv_file.close()

    # Compute the number of images.
    nb_reference_images = len(reference_indices)
    nb_perturbation_patterns = len(perturbation_patterns_indices)
    nb_perturbation_amplitudes = len(perturbation_amplitudes_indices)
    nb_images = 1 + nb_reference_images * (
        1 + nb_perturbation_patterns * nb_perturbation_amplitudes)
    if with_random_patterns:
        nb_images = nb_images + nb_reference_images * nb_random_patterns  # TODO check this line!

    combinations = get_combinations(reference_indices,
                                    perturbation_patterns_indices,
                                    perturbation_amplitudes_indices)

    if with_random_patterns:
        nb_deterministic_combinations = len(
            combinations) + 1  # +1 to take inter-stimulus frame into account
        random_combinations = get_random_combinations(
            reference_indices, random_patterns_indices,
            nb_deterministic_combinations)
    else:
        random_combinations = None

    # Create .csv file.
    csv_filename = "{}_combinations.csv".format(name)
    csv_path = os.path.join(path, csv_filename)
    columns = [
        'reference_id', 'perturbation_pattern_id', 'perturbation_amplitude_id'
    ]
    csv_file = open_csv_file(csv_path, columns=columns, dtype='Int64')
    for combination_index in combinations:
        combination = combinations[combination_index]
        # TODO remove the following commented lines?
        # kwargs = {
        #     'reference_id': reference_indices[combination[0]],
        #     'perturbation_pattern_id': perturbation_patterns_indices[combination[1]],
        #     'perturbation_amplitude_id': perturbation_amplitudes_indices[combination[2]],
        # }
        reference_id, perturbation_pattern_id, perturbation_amplitude_id = combination
        kwargs = {
            'reference_id': reference_id,
            'perturbation_pattern_id': perturbation_pattern_id,
            'perturbation_amplitude_id': perturbation_amplitude_id,
        }
        csv_file.append(**kwargs)
    # TODO check the following lines!
    # Add random pattern (if necessary).
    if with_random_patterns:
        for combination_index in random_combinations:
            combination = random_combinations[combination_index]
            reference_id, pattern_id, amplitude_id = combination
            kwargs = {
                'reference_id': reference_id,
                'perturbation_pattern_id': pattern_id,
                'perturbation_amplitude_id': amplitude_id,
            }
            csv_file.append(**kwargs)
    csv_file.close()

    # TODO fix the permutations?

    nb_combinations = len(combinations)
    nb_frame_displays = int(display_rate * frame_duration)
    assert display_rate * frame_duration == float(nb_frame_displays)
    nb_displays = nb_frame_displays + nb_repetitions * nb_combinations * (
        2 * nb_frame_displays)
    if with_random_patterns:
        nb_random_combinations = len(random_combinations)
        nb_displays = nb_displays + nb_random_combinations * (
            2 * nb_frame_displays)

    display_time = float(nb_displays) / display_rate
    print("display time: {} s ({} min)".format(display_time,
                                               display_time / 60.0))
    # TODO improve feedback.

    combination_indices = list(combinations)
    permutations = get_permutations(combination_indices,
                                    nb_repetitions=nb_repetitions)

    if with_random_patterns:
        random_combination_indices = list(random_combinations)
        random_combination_groups = get_random_combination_groups(
            random_combination_indices, nb_repetitions=nb_repetitions)
    else:
        random_combination_groups = None

    print("Start creating .bin file...")

    # Create .bin file.
    bin_filename = "fipwc.bin"
    bin_path = os.path.join(path, bin_filename)
    bin_file = open_bin_file(bin_path,
                             nb_images,
                             frame_width=frame_width_in_px,
                             frame_height=frame_height_in_px)
    # Save grey frame.
    grey_frame = get_grey_frame(frame_width_in_px,
                                frame_height_in_px,
                                luminance=0.5)
    grey_frame = float_frame_to_uint8_frame(grey_frame)
    # # Save frame in .bin file.
    bin_file.append(grey_frame)
    # # Save frame as .png file.
    grey_frame_filename = "grey.png"
    grey_frame_path = os.path.join(frames_path, grey_frame_filename)
    save_frame(grey_frame_path, grey_frame)
    # Save reference frames.
    for reference_index in reference_indices:
        # Get reference frame.
        reference_image = load_reference_image(reference_index,
                                               reference_images_path)
        reference_frame = float_frame_to_uint8_frame(reference_image)
        # Save frame in .bin file.
        bin_file.append(reference_frame)
        # Save frame as .png file.
        reference_frame_filename = "reference_{:05d}.png".format(
            reference_index)
        reference_frame_path = os.path.join(frames_path,
                                            reference_frame_filename)
        save_frame(reference_frame_path, reference_frame)
    # Save perturbed frames.
    for reference_index in tqdm.tqdm(reference_indices):
        reference_image = load_reference_image(reference_index,
                                               reference_images_path)
        for perturbation_pattern_index in perturbation_patterns_indices:
            perturbation_pattern = load_perturbation_pattern(
                perturbation_pattern_index, perturbation_patterns_path)
            for perturbation_amplitude_index in perturbation_amplitudes_indices:
                # Get perturbed frame.
                perturbation_amplitude = perturbation_amplitudes[
                    perturbation_amplitude_index]
                perturbed_frame = get_perturbed_frame(reference_image,
                                                      perturbation_pattern,
                                                      perturbation_amplitude,
                                                      config)
                perturbed_frame = float_frame_to_uint8_frame(perturbed_frame)
                # Save frame in .bin file.
                bin_file.append(perturbed_frame)
                # Save frame as .png file.
                perturbed_frame_filename = "perturbed_r{:05d}_p{:05d}_a{:05d}.png".format(
                    reference_index, perturbation_pattern_index,
                    perturbation_amplitude_index)
                perturbed_frame_path = os.path.join(frames_path,
                                                    perturbed_frame_filename)
                save_frame(perturbed_frame_path, perturbed_frame)
    # TODO check the following lines!
    # Save randomly perturbed frames (if necessary).
    if with_random_patterns:
        for reference_index in tqdm.tqdm(reference_indices):
            reference_image = load_reference_image(reference_index,
                                                   reference_images_path)
            for perturbation_pattern_index in random_patterns_indices:
                pattern = load_perturbation_pattern(
                    perturbation_pattern_index, perturbation_patterns_path)
                # Get perturbed frame.
                amplitude = float(15) / float(256)  # TODO change this value?
                frame = get_perturbed_frame(reference_image, pattern,
                                            amplitude, config)
                frame = float_frame_to_uint8_frame(frame)
                # Save frame in .bin file.
                bin_file.append(frame)
                # Save frame as .png file (if necessary).
                if perturbation_pattern_index < 100:
                    perturbed_frame_filename = "perturbed_r{:05d}_p{:05d}.png".format(
                        reference_index, perturbation_pattern_index)
                    perturbed_frame_path = os.path.join(
                        frames_path, perturbed_frame_filename)
                    save_frame(perturbed_frame_path, frame)

    bin_file.close()

    print("End creating .bin file.")

    print("Start creating .vec and .csv files...")

    # Create .vec and .csv files.
    vec_filename = "{}.vec".format(name)
    vec_path = os.path.join(path, vec_filename)
    vec_file = open_vec_file(vec_path, nb_displays=nb_displays)
    csv_filename = "{}.csv".format(name)
    csv_path = os.path.join(path, csv_filename)
    csv_file = open_csv_file(csv_path,
                             columns=['k_min', 'k_max', 'combination_id'])
    # Append adaptation.
    grey_frame_id = 0
    for _ in range(0, nb_frame_displays):
        vec_file.append(grey_frame_id)
    # For each repetition...
    for repetition_index in range(0, nb_repetitions):
        # Add frozen patterns.
        combination_indices = permutations[repetition_index]
        for combination_index in combination_indices:
            combination_frame_id = combination_index
            k_min = vec_file.get_display_index() + 1
            # Append trial.
            for _ in range(0, nb_frame_displays):
                vec_file.append(combination_frame_id)
            k_max = vec_file.get_display_index()
            csv_file.append(k_min=k_min,
                            k_max=k_max,
                            combination_id=combination_index)
            # Append intertrial.
            for _ in range(0, nb_frame_displays):
                vec_file.append(grey_frame_id)
        # TODO add a random pattern.
        # Add random patterns (if necessary).
        if with_random_patterns:
            random_combination_indices = random_combination_groups[
                repetition_index]
            for combination_index in random_combination_indices:
                combination_frame_id = combination_index
                k_min = vec_file.get_display_index() + 1
                # Append trial.
                for _ in range(0, nb_frame_displays):
                    vec_file.append(combination_frame_id)
                k_max = vec_file.get_display_index()
                csv_file.append(k_min=k_min,
                                k_max=k_max,
                                combination_id=combination_index)
                # Append intertrial.
                for _ in range(0, nb_frame_displays):
                    vec_file.append(grey_frame_id)

    csv_file.close()
    vec_file.close()

    print("End creating .vec and .csv files.")

    return
Exemple #4
0
def generate(args):

    config = handle_arguments_and_configurations(name, args)

    pixel_size = 3.5  # µm
    # dmd_width = 1920  # px
    # dmd_height = 1080  # px

    frame_width_in_um = config['frame']['width']
    frame_height_in_um = config['frame']['height']
    frame_rate = config['frame']['rate']
    nb_repetitions = config['nb_repetitions']

    path = config['path']
    if not os.path.isdir(path):
        os.makedirs(path)
    print(path)

    dtype = np.uint8
    nb_grey_levels = np.iinfo(dtype).max - np.iinfo(dtype).min + 1
    nb_images = nb_grey_levels

    frame_height_in_px, frame_width_in_px = shape(pixel_size,
                                                  width=frame_width_in_um,
                                                  height=frame_height_in_um)

    # Create .bin file.
    bin_filename = "{}.bin".format(name)
    bin_path = os.path.join(path, bin_filename)
    bin_file = open_bin_file(bin_path,
                             nb_images,
                             frame_width=frame_width_in_px,
                             frame_height=frame_height_in_px,
                             mode='w')
    for k in range(0, nb_grey_levels):
        grey_level = float(k) / nb_grey_levels
        frame = get_grey_frame(frame_width_in_px,
                               frame_height_in_px,
                               luminance=grey_level)
        frame = float_frame_to_uint8_frame(frame)
        bin_file.append(frame)
        bin_file.flush()
    bin_file.close()

    pattern_config = config['pattern']
    pattern = get_pattern(pattern_config, frame_rate)
    intertrial_duration = config['intertrial_duration']
    initial_adaptation_duration = config['initial_adaptation_duration']
    assert (intertrial_duration * frame_rate).is_integer()

    # Create .csv file for pattern profile.
    csv_filename = "{}_luminance_profile.csv".format(name)
    csv_path = os.path.join(path, csv_filename)
    columns = ['luminance']
    csv_file = open_csv_file(csv_path, columns=columns)
    luminances = digitize(pattern)
    for luminance in luminances:
        csv_file.append(luminance=luminance)
    csv_file.close()

    # Plot pattern profile.
    plot_filename = "{}.pdf".format(name)
    plot_path = os.path.join(path, plot_filename)
    fig, ax = plot_pattern(pattern, frame_rate)
    fig.savefig(plot_path)
    plt.close(fig)

    # nb_displays_per_trial = int(np.round(trial_duration * frame_rate))
    nb_displays_per_trial = pattern.size
    nb_displays_per_intertrial = int(np.round(intertrial_duration *
                                              frame_rate))
    nb_displays_in_initial_adaptation = int(
        np.round(initial_adaptation_duration * frame_rate))

    nb_trials = nb_repetitions
    nb_intertrials = nb_trials
    nb_displays = \
        nb_displays_in_initial_adaptation \
        + nb_trials * nb_displays_per_trial \
        + nb_intertrials * nb_displays_per_intertrial

    frame_indices = digitize(pattern)

    # Create .vec and .csv files.
    vec_filename = "{}.vec".format(name)
    vec_path = os.path.join(path, vec_filename)
    vec_file = open_vec_file(vec_path, nb_displays=nb_displays)
    csv_filename = "{}.csv".format(name)
    csv_path = os.path.join(path, csv_filename)
    csv_file = open_csv_file(csv_path,
                             columns=['k_min', 'k_max', 'combination_id'])
    # Append initial adaptation.
    for _ in range(0, nb_displays_in_initial_adaptation):
        frame_index = 0
        vec_file.append(frame_index)
    # For each repetition...
    for _ in range(0, nb_repetitions):
        k_min = vec_file.get_display_index() + 1
        # Append trial.
        for k in range(0, nb_displays_per_trial):
            frame_id = frame_indices[k]
            vec_file.append(frame_id)
        k_max = vec_file.get_display_index()
        csv_file.append(k_min=k_min, k_max=k_max, combination_id=0)
        # Append intertrial.
        for _ in range(0, nb_displays_per_intertrial):
            frame_id = 0
            vec_file.append(frame_id)
    csv_file.close()
    vec_file.close()

    # Print the total duration.
    duration = float(nb_displays) / frame_rate
    print("duration: {} s".format(duration))
    print("duration: {} min".format(duration / 60.0))

    return
Exemple #5
0
def generate(args):

    config = handle_arguments_and_configurations(name, args)

    base_path = config['path']
    if not os.path.isdir(base_path):
        os.makedirs(base_path)
    print("Generation in {}.".format(base_path))

    # Get configuration parameters.
    stimuli_dirnames = config['stimuli']
    mean_luminance = config['mean_luminance']
    display_rate = config['display_rate']
    adaptation_duration = config['adaptation_duration']
    flash_duration = config['flash_duration']
    inter_flash_duration = config['inter_flash_duration']
    frame_width = config['frame']['width']
    frame_height = config['frame']['height']
    # nb_repetitions = config['nb_repetitions']  # TODO remove?
    seed = config['seed']

    # ...
    stimulus_nbs = []
    stimuli_params = {}
    for stimulus_nb, stimulus_dirname in stimuli_dirnames.items():
        stimulus_nb = int(stimulus_nb)  # TODO improve?
        assert os.path.isdir(stimulus_dirname), stimulus_dirname
        stimulus_nbs.append(stimulus_nb)
        stimuli_params[stimulus_nb] = {
            'dirname': stimulus_dirname,
            'name': os.path.split(stimulus_dirname)[-1],
        }
    stimulus_nbs = np.array(stimulus_nbs)

    # Set conditions parameters.
    condition_nb = 0
    condition_params = collections.OrderedDict()
    for stimulus_nb in stimulus_nbs:
        assert condition_nb not in condition_params
        condition_params[condition_nb] = collections.OrderedDict([
            ('stimulus_nb', stimulus_nb)
        ])
        condition_nb += 1
    condition_nbs = np.array(list(condition_params))
    nb_conditions = len(condition_nbs)
    _ = nb_conditions

    # ...
    stimuli_nb_trials = {}
    stimuli_condition_nbs = {}  # stimulus_nb, trial_nb -> condition_nb
    stimuli_bin_frame_nbs = {  # stimulus_nb, condition_nb -> bin_frame_nb
        None: 0,  # i.e. inter-flash frame (grey frame)
    }
    for stimulus_nb in stimulus_nbs:
        stimulus_params = stimuli_params[stimulus_nb]
        # ...
        stimulus_trial_csv_dirname = stimulus_params['dirname']
        stimulus_trial_csv_filename = '{}_trials.csv'.format(stimulus_params['name'])
        stimulus_trial_csv_path = os.path.join(stimulus_trial_csv_dirname, stimulus_trial_csv_filename)
        stimulus_trials = load_csv_file(stimulus_trial_csv_path)
        # ...
        stimulus_condition_nbs = {}  # trial_nb -> condition_nb
        stimulus_start_frame_nbs = {}  # condition_nb -> start_frame_nb
        for trial_nb, stimulus_trial in stimulus_trials.iterrows():
            stimulus_condition_nbs[trial_nb] = stimulus_trial['condition_nb']
            stimulus_start_frame_nbs[trial_nb] = stimulus_trial['start_frame_nb']
        # ...
        stimulus_vec_dirname = stimulus_params['dirname']
        stimulus_vec_filename = '{}.vec'.format(stimulus_params['name'])
        stimulus_vec_path = os.path.join(stimulus_vec_dirname, stimulus_vec_filename)
        stimulus_vec = load_vec_file(stimulus_vec_path)
        # ...
        stimulus_bin_frame_nbs = {}
        nb_trials = len(stimulus_trials)
        for trial_nb in range(0, nb_trials):
            condition_nb = stimulus_condition_nbs[trial_nb]
            start_frame_nb = stimulus_start_frame_nbs[condition_nb]
            if condition_nb not in stimulus_bin_frame_nbs:
                stimulus_bin_frame_nbs[condition_nb] = stimulus_vec[start_frame_nb]
            else:
                assert stimulus_bin_frame_nbs[condition_nb] == stimulus_vec[start_frame_nb]
        # ...
        stimuli_nb_trials[stimulus_nb] = len(stimulus_trials)
        stimuli_condition_nbs[stimulus_nb] = stimulus_condition_nbs
        stimuli_bin_frame_nbs[stimulus_nb] = stimulus_bin_frame_nbs

    stimulus_sequence = np.concatenate(tuple([
        np.repeat(stimulus_nb, len(stimuli_condition_nbs[stimulus_nb]))
        for stimulus_nb in stimulus_nbs
    ]))
    np.random.seed(seed)
    np.random.shuffle(stimulus_sequence)
    stimuli_indices = {
        stimulus_nb: np.where(stimulus_sequence == stimulus_nb)[0]
        for stimulus_nb in stimulus_nbs
    }

    trials = {}  # trial_nb -> stimulus_nb, condition_nb
    trial_nb = 0
    ordering = np.empty_like(stimulus_sequence, dtype=np.int)
    for stimulus_nb in stimulus_nbs:
        stimulus_condition_nbs = stimuli_condition_nbs[stimulus_nb]
        stimulus_indices = stimuli_indices[stimulus_nb]
        for condition_nb, stimulus_index in zip(stimulus_condition_nbs.values(), stimulus_indices):
            trials[trial_nb] = (stimulus_nb, condition_nb)
            ordering[stimulus_index] = trial_nb
            trial_nb += 1
    nb_trials = len(trials)

    # # Set ordering.
    # np.random.seed(seed)
    # trial_nbs = np.arange(0, nb_trials)
    # ordering = np.copy(trial_nbs)
    # np.random.shuffle(ordering)

    # Create conditions .csv file.
    # TODO complete.

    # Get number of images in .bin files.
    stimuli_nb_bin_images = {}
    for stimulus_nb in stimulus_nbs:
        stimulus_params = stimuli_params[stimulus_nb]
        stimulus_bin_dirname = stimulus_params['dirname']
        stimulus_bin_filename = '{}.bin'.format(stimulus_params['name'])
        stimulus_bin_path = os.path.join(stimulus_bin_dirname, stimulus_bin_filename)
        stimulus_bin_file = open_bin_file(stimulus_bin_path, mode='r')
        stimuli_nb_bin_images[stimulus_nb] = stimulus_bin_file.nb_frames

    # TODO Map stimulus bin frame numbers to bin frame numbers.
    bin_frame_nbs = {
        None: 0,
    }
    bin_frame_nb_offset = 1
    for stimulus_nb in stimulus_nbs:
        bin_frame_nbs[stimulus_nb] = {}
        stimulus_bin_frame_nbs = stimuli_bin_frame_nbs[stimulus_nb]
        for condition_nb in stimulus_bin_frame_nbs.keys():
            stimulus_bin_frame_nb = stimulus_bin_frame_nbs[condition_nb]
            bin_frame_nbs[stimulus_nb][condition_nb] = stimulus_bin_frame_nb + bin_frame_nb_offset
        bin_frame_nb_offset += stimuli_nb_bin_images[stimulus_nb]

    # Create .bin file.
    bin_filename = '{}.bin'.format(name)
    bin_path = os.path.join(base_path, bin_filename)
    nb_bin_images = 1 + int(np.sum([n for n in stimuli_nb_bin_images.values()]))
    # Open .bin file.
    bin_file = open_bin_file(bin_path, nb_bin_images, frame_width=frame_width, frame_height=frame_height, reverse=False, mode='w')
    # Add grey frame.
    grey_frame = get_grey_frame(frame_width, frame_height, luminance=mean_luminance)
    grey_frame = float_frame_to_uint8_frame(grey_frame)
    bin_file.append(grey_frame)
    # Add frames.
    for stimulus_nb in stimulus_nbs:
        stimulus_params = stimuli_params[stimulus_nb]
        stimulus_bin_dirname = stimulus_params['dirname']
        stimulus_bin_filename = '{}.bin'.format(stimulus_params['name'])
        stimulus_bin_path = os.path.join(stimulus_bin_dirname, stimulus_bin_filename)
        stimulus_bin_file = open_bin_file(stimulus_bin_path, mode='r')
        assert stimulus_bin_file.width == frame_width
        assert stimulus_bin_file.height == frame_height
        frame_nbs = stimulus_bin_file.get_frame_nbs()
        for frame_nb in frame_nbs:
            frame_bytes = stimulus_bin_file.read_frame_as_bytes(frame_nb)
            bin_file.append(frame_bytes)
    # Close .bin file.
    bin_file.close()
    # ...
    print("End of .bin file creation.")

    # Create .vec file.
    print("Start creating .vec file...")
    vec_filename = "{}.vec".format(name)
    vec_path = os.path.join(base_path, vec_filename)
    csv_filename = "{}_trials.csv".format(name)
    csv_path = os.path.join(base_path, csv_filename)
    # ...
    nb_displays_during_adaptation = int(np.ceil(adaptation_duration * display_rate))
    nb_displays_per_flash = int(np.ceil(flash_duration * display_rate))
    nb_displays_per_inter_flash = int(np.ceil(inter_flash_duration * display_rate))
    nb_displays_per_trial = nb_displays_per_flash + nb_displays_per_inter_flash
    nb_displays = nb_displays_during_adaptation + nb_trials * nb_displays_per_trial
    # Open .vec file.
    vec_file = open_vec_file(vec_path, nb_displays=nb_displays)
    # Open .csv file.
    csv_file = open_csv_file(csv_path, columns=['stimulus_nb', 'start_frame_nb', 'end_frame_nb'])
    # Add adaptation.
    # TODO swap and clean the 2 following lines.
    # bin_frame_nb = stimuli_bin_frame_nbs[None]  # i.e. default frame (grey)
    bin_frame_nb = bin_frame_nbs[None]  # i.e. default frame (grey)
    for _ in range(0, nb_displays_during_adaptation):
        vec_file.append(bin_frame_nb)
    # TODO remove the following commented lines.
    # # Add repetitions.
    # for repetition_nb in tqdm.tqdm(range(0, nb_repetitions)):
    #     condition_nbs = repetition_orderings[repetition_nb]
    #     for condition_nb in condition_nbs:
    #         # Add flash.
    #         start_frame_nb = vec_file.get_display_nb() + 1
    #         bin_frame_nb = bin_frame_nbs[condition_nb]
    #         for _ in range(0, nb_displays_per_flash):
    #             vec_file.append(bin_frame_nb)
    #         end_frame_nb = vec_file.get_display_nb()
    #         csv_file.append(condition_nb=condition_nb, start_frame_nb=start_frame_nb, end_frame_nb=end_frame_nb)
    #         # Add inter flash.
    #         bin_frame_nb = bin_frame_nbs[None]  # i.e. default frame (grey)
    #         for _ in range(0, nb_displays_per_inter_flash):
    #             vec_file.append(bin_frame_nb)
    for trial_nb in tqdm.tqdm(ordering):
        stimulus_nb, condition_nb = trials[trial_nb]
        # Add flash.
        start_frame_nb = vec_file.get_display_nb() + 1
        # TODO swap and clean the 2 following lines.
        # bin_frame_nb = stimuli_bin_frame_nbs[stimulus_nb][condition_nb]
        bin_frame_nb = bin_frame_nbs[stimulus_nb][condition_nb]
        for _ in range(0, nb_displays_per_flash):
            vec_file.append(bin_frame_nb)
        end_frame_nb = vec_file.get_display_nb()
        csv_file.append(stimulus_nb=stimulus_nb, start_frame_nb=start_frame_nb, end_frame_nb=end_frame_nb)
        # Add inter flash.
        # TODO swap and clean the 2 following lines.
        # bin_frame_nb = stimuli_bin_frame_nbs[None]
        bin_frame_nb = bin_frame_nbs[None]
        for _ in range(0, nb_displays_per_inter_flash):
            vec_file.append(bin_frame_nb)
    # Close .csv file.
    csv_file.close()
    # Close .vec file.
    vec_file.close()
    # ...
    print("End of .vec file creation.")

    # TODO create conditions .csv file for each stimulus.

    return
Exemple #6
0
def generate(args):

    config = handle_arguments_and_configurations(name, args)

    frame_width_in_um = config['frame']['width']
    frame_height_in_um = config['frame']['height']
    frame_rate = config['frame']['rate']
    frame_resolution = config['frame']['resolution']
    background_luminance = config['background_luminance']
    size = config['size']
    duration = config['duration']
    nb_repetitions = config['nb_repetitions']
    path = config['path']

    pixel_size = frame_resolution * 1e+6  # µm

    # Check duration.
    assert (duration * frame_rate).is_integer()
    assert int(duration * frame_rate) % 2 == 0

    # Create output directory (if necessary).
    if not os.path.isdir(path):
        os.makedirs(path)
    print(path)

    # Collect frame parameters.
    frame_height_in_px, frame_width_in_px = shape(pixel_size,
                                                  width=frame_width_in_um,
                                                  height=frame_height_in_um)
    x, y = linspace(pixel_size,
                    width=frame_width_in_um,
                    height=frame_height_in_um)
    xm = np.logical_and(-size / 2.0 <= x, x <= +size / 2.0)
    ym = np.logical_and(-size / 2.0 <= y, y <= +size / 2.0)
    i = np.nonzero(xm)[0]
    j = np.nonzero(ym)[0]
    i_min, i_max = i[0], i[-1]
    j_min, j_max = j[0], j[-1]
    # Create white frame.
    white_frame = get_grey_frame(frame_width_in_px,
                                 frame_height_in_px,
                                 luminance=background_luminance)
    white_frame[j_min:j_max + 1, i_min:i_max + 1] = 1.0
    white_frame = float_frame_to_uint8_frame(white_frame)
    # Create black frame.
    black_frame = get_grey_frame(frame_width_in_px,
                                 frame_height_in_px,
                                 luminance=background_luminance)
    black_frame[j_min:j_max + 1, i_min:i_max + 1] = 0.0
    black_frame = float_frame_to_uint8_frame(black_frame)

    nb_images = 2

    # Create .bin file.
    bin_filename = "{}.bin".format(name)
    bin_path = os.path.join(path, bin_filename)
    bin_file = open_bin_file(bin_path,
                             nb_images,
                             frame_width=frame_width_in_px,
                             frame_height=frame_height_in_px,
                             mode='w')
    bin_file.append(black_frame)
    bin_file.append(white_frame)
    bin_file.close()

    nb_displays_per_repetition = int(duration * frame_rate)
    nb_displays = nb_displays_per_repetition * nb_repetitions

    # Create .vec file.
    vec_filename = "{}.vec".format(name)
    vec_path = os.path.join(path, vec_filename)
    vec_file = open_vec_file(vec_path, nb_displays)
    for _ in range(0, nb_repetitions):
        for _ in range(0, nb_displays_per_repetition // 2):
            frame_index = 1  # i.e.white
            vec_file.append(frame_index)
        for _ in range(0, nb_displays_per_repetition // 2):
            frame_index = 0  # i.e. black
            vec_file.append(frame_index)
    vec_file.close()

    return
Exemple #7
0
def generate(args):

    config = handle_arguments_and_configurations(name, args)

    base_path = config['path']
    if not os.path.isdir(base_path):
        os.makedirs(base_path)
    print("Generation in {}.".format(base_path))

    # Get configuration parameters.
    stimulus_names = config['stimuli']
    mean_luminance = config['mean_luminance']
    display_rate = config['display_rate']
    adaptation_duration = config['adaptation_duration']
    flash_duration = config['flash_duration']
    inter_flash_duration = config['inter_flash_duration']
    frame_width = config['frame']['width']
    frame_height = config['frame']['height']
    seed = config['seed']
    to_interleave = config['to_interleave']

    # TODO pour chaque stimulus, récupérer les condition_nbs.
    # TODO pour chaque stimulus, récupérer le mapping condition_nb -> bin_frame_nb.
    # TODO pour chaque stimulus, récupérer la séquence de condition_nb.
    # TODO créer une séquence 'interleaved' de (stimulus_nb, condition_nb).
    # TODO convertir en séquence (stimulus_nb, bin_frame_nb).
    # TODO convertir en séquence bin_frame_nb.

    nb_stimuli = len(stimulus_names)
    stimulus_nbs = np.arange(0, nb_stimuli)
    stimuli_params_dict = {}  # stimulus_nb -> stimulus_params
    for stimulus_nb in stimulus_nbs:
        stimulus_name = stimulus_names[stimulus_nb]
        stimulus_params = {
            'name':
            stimulus_name,
            'bin_path':
            os.path.join(pystim_path, stimulus_name,
                         '{}.bin'.format(stimulus_name)),
            'vec_path':
            os.path.join(pystim_path, stimulus_name,
                         '{}.vec'.format(stimulus_name)),
            'trials_path':
            os.path.join(pystim_path, stimulus_name,
                         '{}_trials.csv'.format(stimulus_name)),
        }
        stimuli_params_dict[stimulus_nb] = stimulus_params
    # Get number of bin frames for each stimulus.
    for stimulus_nb in stimulus_nbs:
        stimulus_bin_path = stimuli_params_dict[stimulus_nb]['bin_path']
        stimulus_nb_bin_frames = load_nb_bin_frames(stimulus_bin_path)
        stimuli_params_dict[stimulus_nb][
            'nb_bin_frames'] = stimulus_nb_bin_frames

    # Create .bin file.
    bin_filename = '{}.bin'.format(name)
    bin_path = os.path.join(base_path, bin_filename)
    nb_bin_frames = 1 + int(
        np.sum([
            stimuli_params_dict[stimulus_nb]['nb_bin_frames']
            for stimulus_nb in stimulus_nbs
        ]))
    # Open .bin file.
    bin_file = open_bin_file(bin_path,
                             nb_bin_frames,
                             frame_width=frame_width,
                             frame_height=frame_height,
                             reverse=False,
                             mode='w')
    # ...
    bin_frame_nbs_dict = {
    }  # stimulus_nb -> stimulus_bin_frame_nb -> bin_frame_nb
    # Add grey frame.
    grey_frame = get_grey_frame(frame_width,
                                frame_height,
                                luminance=mean_luminance)
    grey_frame = float_frame_to_uint8_frame(grey_frame)
    bin_file.append(grey_frame)
    bin_frame_nbs_dict[None] = bin_file.get_frame_nb()
    # Add frames.
    for stimulus_nb in stimulus_nbs:
        stimulus_params = stimuli_params_dict[stimulus_nb]
        # Open stimulus .bin file.
        stimulus_bin_path = stimulus_params['bin_path']
        stimulus_bin_file = open_bin_file(stimulus_bin_path, mode='r')
        # Copy frames from stimulus .bin file to .bin file.
        stimulus_bin_frame_nbs = stimulus_bin_file.get_frame_nbs()
        bin_frame_nbs_dict[stimulus_nb] = {}
        for stimulus_bin_frame_nb in stimulus_bin_frame_nbs:
            frame_bytes = stimulus_bin_file.read_frame_as_bytes(
                stimulus_bin_frame_nb)
            bin_file.append(frame_bytes)
            bin_frame_nbs_dict[stimulus_nb][
                stimulus_bin_frame_nb] = bin_file.get_frame_nb()
        # Close stimulus .bin file.
        stimulus_bin_file.close()
    # ...
    assert bin_file.get_frame_nb() == nb_bin_frames - 1, "{} != {} - 1".format(
        bin_file.get_frame_nb(), nb_bin_frames)
    # Close .bin file.
    bin_file.close()
    # ...
    print("End of .bin file creation.")

    # Get trials for each stimulus.
    for stimulus_nb in stimulus_nbs:
        stimulus_trials_path = stimuli_params_dict[stimulus_nb]['trials_path']
        stimulus_trials = load_csv_file(stimulus_trials_path,
                                        expected_columns=[
                                            'condition_nb', 'start_display_nb',
                                            'end_display_nb'
                                        ])
        stimuli_params_dict[stimulus_nb]['trials'] = stimulus_trials
    # Compute the number of trials for each stimulus.
    for stimulus_nb in stimulus_nbs:
        stimulus_trials = stimuli_params_dict[stimulus_nb]['trials']
        stimulus_nb_trials = len(stimulus_trials)
        stimuli_params_dict[stimulus_nb]['nb_trials'] = stimulus_nb_trials
    # Compute the number of trials after the merger.
    nb_trials = int(
        np.sum([
            stimuli_params_dict[stimulus_nb]['nb_trials']
            for stimulus_nb in stimulus_nbs
        ]))

    # Generate the interleaved sequence of stimulus numbers.
    merged_stimulus_nbs_sequence = []
    for stimulus_nb in stimulus_nbs:
        stimulus_nb_trials = stimuli_params_dict[stimulus_nb]['nb_trials']
        merged_stimulus_nbs_sequence.extend(stimulus_nb_trials * [stimulus_nb])
    merged_stimulus_nbs_sequence = np.array(merged_stimulus_nbs_sequence)
    assert merged_stimulus_nbs_sequence.size == nb_trials
    if to_interleave:
        np.random.seed(seed)
        np.random.shuffle(merged_stimulus_nbs_sequence)
    # Compute the corresponding interleaved sequence of stimulus condition numbers.
    merged_stimulus_condition_nbs_sequence = np.empty(nb_trials, dtype=np.int)
    for stimulus_nb in stimulus_nbs:
        indices = np.where(merged_stimulus_nbs_sequence == stimulus_nb)[0]
        merged_stimulus_condition_nbs_sequence[indices] = stimuli_params_dict[
            stimulus_nb]['trials']['condition_nb']
    # Compute the corresponding interleaved sequence of stimulus bin frame numbers.
    merged_stimulus_bin_frame_nbs_sequence = np.empty(nb_trials, dtype=np.int)
    for stimulus_nb in stimulus_nbs:
        stimulus_trials = stimuli_params_dict[stimulus_nb]['trials']
        stimulus_vec_path = stimuli_params_dict[stimulus_nb]['vec_path']
        stimulus_bin_frame_nbs_sequence = load_vec_file(stimulus_vec_path)
        tmp = np.empty(len(stimulus_trials), dtype=np.int)
        for index, stimulus_trial in stimulus_trials.iterrows():
            start_display_nb = stimulus_trials['start_display_nb'][index]
            end_display_nb = stimulus_trials['end_display_nb'][index]
            try:
                bin_frame_nbs_sequence = stimulus_bin_frame_nbs_sequence[
                    start_display_nb:end_display_nb + 1]
            except TypeError as e:
                print(start_display_nb)
                print(end_display_nb)
                raise e
            assert np.all(bin_frame_nbs_sequence == bin_frame_nbs_sequence[0])
            bin_frame_nb = bin_frame_nbs_sequence[0]
            tmp[index] = bin_frame_nb
        indices = np.where(merged_stimulus_nbs_sequence == stimulus_nb)[0]
        merged_stimulus_bin_frame_nbs_sequence[indices] = tmp
    # Summarize everything in a data frame.
    merged_trials_dataframe = pd.DataFrame(
        data={
            'stimulus_nb': merged_stimulus_nbs_sequence,
            'condition_nb': merged_stimulus_condition_nbs_sequence,
            'bin_frame_nb': merged_stimulus_bin_frame_nbs_sequence,
        })

    # Create .vec file.
    print("Start creating .vec file...")
    vec_filename = "{}.vec".format(name)
    vec_path = os.path.join(base_path, vec_filename)
    csv_filename = "{}_trials.csv".format(name)
    csv_path = os.path.join(base_path, csv_filename)
    csv_filenames = {
        stimulus_nb: os.path.join(
            base_path, '{}_{}_trials.csv'.format(name,
                                                 stimulus_names[stimulus_nb]))
        for stimulus_nb in stimulus_nbs
    }
    csv_paths = {
        stimulus_nb: os.path.join(base_path, csv_filenames[stimulus_nb])
        for stimulus_nb in stimulus_nbs
    }
    # ...
    nb_displays_during_adaptation = int(
        np.ceil(adaptation_duration * display_rate))
    nb_displays_per_flash = int(np.ceil(flash_duration * display_rate))
    nb_displays_per_inter_flash = int(
        np.ceil(inter_flash_duration * display_rate))
    nb_displays_per_trial = nb_displays_per_flash + nb_displays_per_inter_flash
    nb_displays = nb_displays_during_adaptation + nb_trials * nb_displays_per_trial
    # # Open .vec file.
    vec_file = open_vec_file(vec_path, nb_displays=nb_displays)
    # # Open .csv files.
    columns = ['condition_nb', 'start_display_nb', 'end_display_nb']
    csv_file = open_csv_file(csv_path, columns=columns)
    csv_files = {
        stimulus_nb: open_csv_file(csv_paths[stimulus_nb], columns=columns)
        for stimulus_nb in stimulus_nbs
    }
    # # Add adaptation.
    bin_frame_nb = bin_frame_nbs_dict[None]  # i.e. default frame (grey)
    for _ in range(0, nb_displays_during_adaptation):
        vec_file.append(bin_frame_nb)
    for _, merged_trial in tqdm.tqdm(merged_trials_dataframe.iterrows()):
        stimulus_nb = merged_trial['stimulus_nb']
        stimulus_condition_nb = merged_trial['condition_nb']
        stimulus_bin_frame_nb = merged_trial['bin_frame_nb']
        # Add flash.
        start_display_nb = vec_file.get_display_nb() + 1
        bin_frame_nb = bin_frame_nbs_dict[stimulus_nb][stimulus_bin_frame_nb]
        for _ in range(0, nb_displays_per_flash):
            vec_file.append(bin_frame_nb)
        end_display_nb = vec_file.get_display_nb()
        csv_file.append(condition_nb=stimulus_nb,
                        start_display_nb=start_display_nb,
                        end_display_nb=end_display_nb)
        csv_files[stimulus_nb].append(condition_nb=stimulus_condition_nb,
                                      start_display_nb=start_display_nb,
                                      end_display_nb=end_display_nb)
        # Add inter flash.
        bin_frame_nb = bin_frame_nbs_dict[None]
        for _ in range(0, nb_displays_per_inter_flash):
            vec_file.append(bin_frame_nb)
    # Close .csv files.
    csv_file.close()
    for csv_file in csv_files.values():
        csv_file.close()
    # Close .vec file.
    vec_file.close()
    # ...
    print("End of .vec file creation.")

    return