コード例 #1
0
ファイル: io_import_test.py プロジェクト: zhixinshu/menpo
def test_importing_ffmpeg_exact_frame_count_no_ffprobe(is_file,
                                                       video_infos_ffprobe):
    video_infos_ffprobe.side_effect = ValueError
    is_file.return_value = True
    mio.import_video('fake_image_being_mocked.avi',
                     normalize=True,
                     exact_frame_count=True)
コード例 #2
0
ファイル: landmarking.py プロジェクト: konatasick/ip-avsr
def process_video(file, dest):
    if is_video(file):
        create_dir(os.path.dirname(dest))
        frames = mio.import_video(file, normalise=False)
        print('{} contains {} frames'.format(file, len(frames)))
        print('writing landmarks to {}...'.format(dest))
        frames = frames.map(fit_image)
        with open(dest, 'w') as outputfile:
            outwriter = csv.writer(outputfile)
            try:
                for i, frame in enumerate(print_progress(frames)):
                    if 'final_shape' not in frame.landmarks:
                        warnings.warn(
                            'no faces detected in the frame {}, '
                            'initializing landmarks to -1s...'.format(i))
                        # dlib does not fitting from previous initial shape so
                        # leave entire row as -1s
                        # initial_shape = frames[i - 1].landmarks['final_shape'].lms
                        # fitting_result = fit_image.fitter.fit_from_shape(frame, initial_shape)
                        # frame.landmarks['final_shape'] = fitting_result.final_shape
                        landmarks = [-1] * 136
                    else:
                        lmg = frame.landmarks['final_shape']
                        landmarks = lmg['all'].points.reshape(
                            (136, )).tolist()  # reshape to 136 points
                    fill_row(outwriter, i, landmarks)
            except Exception as e:
                warnings.warn('Runtime Error at frame {}'.format(i))
                print('initializing landmarks to -1s...')
                fill_row(outwriter, i, [-1] * 136)
コード例 #3
0
ファイル: landmarking.py プロジェクト: lzuwei/ip-avsr
def process_video(file, dest):
    if is_video(file):
        create_dir(os.path.dirname(dest))
        frames = mio.import_video(file, normalise=False)
        print('{} contains {} frames'.format(file, len(frames)))
        print('writing landmarks to {}...'.format(dest))
        frames = frames.map(fit_image)
        with open(dest, 'w') as outputfile:
            outwriter = csv.writer(outputfile)
            try:
                for i, frame in enumerate(print_progress(frames)):
                    if 'final_shape' not in frame.landmarks:
                        warnings.warn('no faces detected in the frame {}, '
                                      'initializing landmarks to -1s...'.format(i))
                        # dlib does not fitting from previous initial shape so
                        # leave entire row as -1s
                        # initial_shape = frames[i - 1].landmarks['final_shape'].lms
                        # fitting_result = fit_image.fitter.fit_from_shape(frame, initial_shape)
                        # frame.landmarks['final_shape'] = fitting_result.final_shape
                        landmarks = [-1] * 136
                    else:
                        lmg = frame.landmarks['final_shape']
                        landmarks = lmg['all'].points.reshape((136,)).tolist()  # reshape to 136 points
                    fill_row(outwriter, i, landmarks)
            except Exception as e:
                warnings.warn('Runtime Error at frame {}'.format(i))
                print('initializing landmarks to -1s...')
                fill_row(outwriter, i, [-1] * 136)
コード例 #4
0
def test_importing_imageio_ffmpeg_all_bad_frames(is_file, mock_reader):
    def fake_get_data(_):
        raise ValueError()

    mock_reader.return_value.get_length.return_value = 2
    mock_reader.return_value.get_data.side_effect = fake_get_data
    is_file.return_value = True

    ll = mio.import_video('fake_image_being_mocked.avi')
    assert len(ll) == 0
コード例 #5
0
ファイル: io_import_test.py プロジェクト: dkollias/menpo
def test_importing_imageio_ffmpeg_all_bad_frames(is_file, mock_reader):
    def fake_get_data(_):
        raise ValueError()

    mock_reader.return_value.get_length.return_value = 2
    mock_reader.return_value.get_data.side_effect = fake_get_data
    is_file.return_value = True

    ll = mio.import_video('fake_image_being_mocked.avi')
    assert len(ll) == 0
コード例 #6
0
ファイル: io_import_test.py プロジェクト: dkollias/menpo
def test_importing_imageio_avi_no_normalise(is_file, mock_image):
    mock_image.return_value.get_data.return_value = np.ones((10, 10, 3),
                                                            dtype=np.uint8)
    mock_image.return_value.get_length.return_value = 1
    is_file.return_value = True

    ll = mio.import_video('fake_image_being_mocked.avi', normalise=False)
    assert len(ll) == 1

    im = ll[0]
    assert im.shape == (10, 10)
    assert im.n_channels == 3
    assert im.pixels.dtype == np.uint8
コード例 #7
0
def test_register_video_importer(is_file):
    from menpo.image import Image
    from menpo.base import LazyList

    def foo_importer(filepath, **kwargs):
        return LazyList([lambda: Image.init_blank((10, 10))])

    is_file.return_value = True

    with patch.dict(mio.input.extensions.ffmpeg_video_types, {}, clear=True):
        mio.register_video_importer('.foo', foo_importer)
        new_video = mio.import_video('fake.foo')
    assert len(new_video) == 1
コード例 #8
0
def test_importing_imageio_avi_no_normalise(is_file, mock_image):
    mock_image.return_value.get_data.return_value = np.ones((10, 10, 3),
                                                            dtype=np.uint8)
    mock_image.return_value.get_length.return_value = 1
    is_file.return_value = True

    ll = mio.import_video('fake_image_being_mocked.avi', normalise=False)
    assert len(ll) == 1

    im = ll[0]
    assert im.shape == (10, 10)
    assert im.n_channels == 3
    assert im.pixels.dtype == np.uint8
コード例 #9
0
ファイル: io_import_test.py プロジェクト: dvdm/menpo
def test_register_video_importer(is_file):
    from menpo.image import Image
    from menpo.base import LazyList

    def foo_importer(filepath, **kwargs):
        return LazyList([lambda: Image.init_blank((10, 10))])

    is_file.return_value = True

    with patch.dict(mio.input.extensions.ffmpeg_video_types, {}, clear=True):
        mio.register_video_importer('.foo', foo_importer)
        new_video = mio.import_video('fake.foo')
    assert len(new_video) == 1
コード例 #10
0
ファイル: dct.py プロジェクト: Mordokkai/pyVSR
    def _detect_write_roi_bounds(self, file):
        from menpo.io import import_video
        frames = import_video(filepath=file, normalize=True, exact_frame_count=True)

        bounds = []
        for frame_idx, frame in enumerate(frames):

            lips_pointcloud, success = _get_roi_pointcloud(frame, self._detect, self._fitter)
            if success is True:
                roi_bounds = _get_pointcloud_bounds(lips_pointcloud, self._boundary_proportion)
            else:
                roi_bounds = [-1, -1, -1, -1]
            bounds.append(roi_bounds)
        self._write_rois_to_file(file, bounds)
コード例 #11
0
ファイル: io_import_test.py プロジェクト: dvdm/menpo
def test_importing_ffmpeg_avi_normalize(is_file, video_infos_ffprobe, pipe):
    video_infos_ffprobe.return_value = {'duration': 2, 'width': 100,
                                        'height': 150, 'n_frames': 10, 'fps': 5}
    is_file.return_value = True
    empty_frame = np.zeros(150*100*3, dtype=np.uint8).tostring()
    pipe.return_value.stdout.read.return_value = empty_frame
    ll = mio.import_video('fake_image_being_mocked.avi', normalize=True)
    assert ll.path.name == 'fake_image_being_mocked.avi'
    assert ll.fps == 5
    assert len(ll) == 5*2
    image = ll[0]
    assert image.shape == (150, 100)
    assert image.n_channels == 3
    assert image.pixels.dtype == np.float
コード例 #12
0
ファイル: io_import_test.py プロジェクト: dkollias/menpo
def test_importing_imageio_ffmpeg_bad_frames(is_file, mock_reader):
    def fake_get_data(index):
        if index not in [1, 2]:
            raise ValueError()
        f = np.ones((10, 10, 3)) * -1
        f[0, 0, 0] = index
        return f

    mock_reader.return_value.get_length.return_value = 4
    mock_reader.return_value.get_data.side_effect = fake_get_data
    is_file.return_value = True

    ll = mio.import_video('fake_image_being_mocked.avi', normalise=False)
    assert len(ll) == 2

    im = ll[0]
    assert im.shape == (10, 10)
    assert im.pixels[0, 0, 0] == 1
コード例 #13
0
def test_importing_imageio_ffmpeg_bad_frames(is_file, mock_reader):
    def fake_get_data(index):
        if index not in [1, 2]:
            raise ValueError()
        f = np.ones((10, 10, 3)) * -1
        f[0, 0, 0] = index
        return f

    mock_reader.return_value.get_length.return_value = 4
    mock_reader.return_value.get_data.side_effect = fake_get_data
    is_file.return_value = True

    ll = mio.import_video('fake_image_being_mocked.avi', normalise=False)
    assert len(ll) == 2

    im = ll[0]
    assert im.shape == (10, 10)
    assert im.pixels[0, 0, 0] == 1
コード例 #14
0
def test_importing_ffmpeg_avi_normalize(is_file, video_infos_ffprobe, pipe):
    video_infos_ffprobe.return_value = {
        'duration': 2,
        'width': 100,
        'height': 150,
        'n_frames': 10,
        'fps': 5
    }
    is_file.return_value = True
    empty_frame = np.zeros(150 * 100 * 3, dtype=np.uint8).tostring()
    pipe.return_value.stdout.read.return_value = empty_frame
    ll = mio.import_video('fake_image_being_mocked.avi', normalize=True)
    assert ll.path.name == 'fake_image_being_mocked.avi'
    assert ll.fps == 5
    assert len(ll) == 5 * 2
    image = ll[0]
    assert image.shape == (150, 100)
    assert image.n_channels == 3
    assert image.pixels.dtype == np.float
コード例 #15
0
ファイル: io_import_test.py プロジェクト: dkollias/menpo
def test_importing_imageio_ffmpeg_many_bad_frames_warning_end(is_file, mock_reader):
    def fake_get_data(index):
        if index > 3:
            raise ValueError()
        f = np.ones((10, 10, 3)) * -1
        f[0, 0, 0] = index
        return f

    mock_reader.return_value.get_length.return_value = 15
    mock_reader.return_value.get_data.side_effect = fake_get_data
    is_file.return_value = True

    with warnings.catch_warnings(record=True) as w:
        ll = mio.import_video('fake_image_being_mocked.avi', normalise=False)
        assert len(w) == 1
    assert len(ll) == 4

    assert ll[0].pixels[0, 0, 0] == 0
    assert ll[-1].pixels[0, 0, 0] == 3
コード例 #16
0
ファイル: test_io_import.py プロジェクト: eosulliv/menpo
def test_importing_ffmpeg_avi_no_normalize(is_file, video_infos_ffprobe, pipe):
    video_infos_ffprobe.return_value = {
        "duration": 2,
        "width": 100,
        "height": 150,
        "n_frames": 10,
        "fps": 5,
    }
    is_file.return_value = True
    empty_frame = np.zeros(150 * 100 * 3, dtype=np.uint8).tostring()
    pipe.return_value.stdout.read.return_value = empty_frame
    ll = mio.import_video("fake_image_being_mocked.avi", normalize=False)
    assert ll.path.name == "fake_image_being_mocked.avi"
    assert ll.fps == 5
    assert len(ll) == 5 * 2
    image = ll[0]
    assert image.shape == ((150, 100))
    assert image.n_channels == 3
    assert image.pixels.dtype == np.uint8
コード例 #17
0
ファイル: dct.py プロジェクト: Mordokkai/pyVSR
    def _compute_3d_dct_menpo(self, file):
        r"""
        Video import is based on menpo implementation
        (LazyList with ffmpeg backend)
        :param file:
        :return:
        """
        from menpo.io import import_video
        frames = import_video(filepath=file, normalize=True, exact_frame_count=True)
        dct_volume = np.zeros((self._yres, self._xres, len(frames)), dtype=np.float32)

        for frame_idx, frame in enumerate(frames):
            roi = self._get_roi(frame, frame_idx, file)

            dctmat = np.zeros(np.shape(roi))
            cv2.dct(roi, dctmat)
            dct_volume[:, :, frame_idx] = dctmat

        return dct_volume
コード例 #18
0
def test_importing_imageio_ffmpeg_many_bad_frames_warning_end(
        is_file, mock_reader):
    def fake_get_data(index):
        if index > 3:
            raise ValueError()
        f = np.ones((10, 10, 3)) * -1
        f[0, 0, 0] = index
        return f

    mock_reader.return_value.get_length.return_value = 15
    mock_reader.return_value.get_data.side_effect = fake_get_data
    is_file.return_value = True

    with warnings.catch_warnings(record=True) as w:
        ll = mio.import_video('fake_image_being_mocked.avi', normalise=False)
        assert len(w) == 1
    assert len(ll) == 4

    assert ll[0].pixels[0, 0, 0] == 0
    assert ll[-1].pixels[0, 0, 0] == 3
コード例 #19
0
def process_video(file, dest):
    if is_video(file):
        try:
            frames = mio.import_video(file, normalise=False)
        except IOError:
            warnings.warn('IO error reading video file {}, '.format(file) +
                          'the file may be corrupted or the video format is unsupported, skipping...')
        except ValueError as e:
            warnings.warn('Value Error reading video file {}, '.format(file) +
                          e.message)
            return
        # check if directory is non empty
        if os.path.dirname(dest):
            create_dir(os.path.dirname(dest))
        print('{} contains {} frames'.format(file, len(frames)))
        print('writing landmarks to {}...'.format(dest))
        frames = frames.map(fit_image)
        with open(dest, 'w') as outputfile:
            outwriter = csv.writer(outputfile)
            try:
                for i, frame in enumerate(print_progress(frames)):
                    if 'final_shape' not in frame.landmarks:
                        warnings.warn('no faces detected in the frame {}, '
                                      'initializing landmarks to -1s...'.format(i))
                        # dlib does not fitting from previous initial shape so
                        # leave entire row as -1s
                        # initial_shape = frames[i - 1].landmarks['final_shape'].lms
                        # fitting_result = fit_image.fitter.fit_from_shape(frame, initial_shape)
                        # frame.landmarks['final_shape'] = fitting_result.final_shape
                        landmarks = [-1] * NO_LANDMARKS*2
                    else:
                        lmg = frame.landmarks['final_shape']
                        landmarks = lmg['all'].points.reshape((NO_LANDMARKS*2,)).tolist()  # reshape to 136 points
                    fill_row(outwriter, i, landmarks)
            except Exception as e:
                warnings.warn('Runtime Error at frame {}'.format(i))
                print('initializing landmarks to -1s...')
                fill_row(outwriter, i, [-1] * NO_LANDMARKS*2)
コード例 #20
0
def test_importing_ffmpeg_exact_frame_count_no_ffprobe(
        is_file, video_infos_ffprobe):
    video_infos_ffprobe.side_effect = ValueError
    is_file.return_value = True
    mio.import_video('fake_image_being_mocked.avi', normalize=True,
                     exact_frame_count=True)
コード例 #21
0
    def extract_save_features(self, files):
        r"""
        Uses the input files as train AAMs and store the resulting pickle on the disk
        Parameters
        ----------
        files

        Returns
        -------

        """

        # 1. fetch all video frames, attach landmarks
        frames = mio.import_video(files[0],
                                  landmark_resolver=self._myresolver,
                                  normalize=True,
                                  exact_frame_count=True)

        # frames = frames.map(AAMFeature._preprocess)
        idx_above_thresh, idx_lip_opening = landmark_filter(
            files[0],
            self._landmarkDir,
            threshold=self._confidence_thresh,
            keep=self._kept_frames)

        frames = frames[idx_above_thresh]
        frames = frames[idx_lip_opening]
        frames = frames.map(attach_semantic_landmarks)

        if self._greyscale is True:
            frames = frames.map(convert_to_grayscale)

        # initial AAM training
        if self._warpType == 'holistic':
            aam = HolisticAAM(frames,
                              group=self._landmarkGroup,
                              holistic_features=self._features,
                              reference_shape=None,
                              diagonal=self._diagonal,
                              scales=self._scales,
                              max_shape_components=self._max_shape_components,
                              max_appearance_components=self._max_appearance_components,
                              verbose=False)
        elif self._warpType == 'patch':
            aam = PatchAAM(frames,
                           group=self._landmarkGroup,
                           holistic_features=self._features,
                           diagonal=self._diagonal,
                           scales=self._scales,
                           max_shape_components=self._max_shape_components,
                           max_appearance_components=self._max_appearance_components,
                           patch_shape=self._extractOpts['patch_shape'],
                           verbose=False)

        else:
            raise Exception('Unknown warp type. Did you mean holistic/patch ?')

        frame_buffer = LazyList.init_from_iterable([])
        buffer_len = 256
        for idx, file in enumerate(files[1:]):
            # useful to check progress
            with open('./run/log_' + self._outModelName + '.txt', 'w') as log:
                log.write(str(idx) + ' ' + file + '\n')

            frames = mio.import_video(file,
                                      landmark_resolver=self._myresolver,
                                      normalize=True,
                                      exact_frame_count=True)
            idx_above_thresh, idx_lip_opening = landmark_filter(
                file,
                landmark_dir=self._landmarkDir,
                threshold=self._confidence_thresh,
                keep=self._kept_frames)

            frames = frames[idx_above_thresh]
            frames = frames[idx_lip_opening]
            frames = frames.map(attach_semantic_landmarks)
            if self._greyscale is True:
                frames = frames.map(convert_to_grayscale)

            frame_buffer += frames
            if len(frame_buffer) > buffer_len:
                # 2. retrain AAM
                aam.increment(frame_buffer,
                              group=self._landmarkGroup,
                              shape_forgetting_factor=1.0,
                              appearance_forgetting_factor=1.0,
                              verbose=False,
                              batch_size=None)
                del frame_buffer
                frame_buffer = LazyList.init_from_iterable([])
            else:
                pass

        if len(frame_buffer) != 0:  #
            # deplete remaining frames
            aam.increment(frame_buffer,
                          group=self._landmarkGroup,
                          shape_forgetting_factor=1.0,
                          appearance_forgetting_factor=1.0,
                          verbose=False,
                          batch_size=None)
            del frame_buffer

        mio.export_pickle(obj=aam, fp=self._outDir + self._outModelName, overwrite=True, protocol=4)
コード例 #22
0
    def get_feature(self, file, process_opts=None):
        r"""
        Computes the AAM features, according to the `process_opts`
        Parameters
        ----------
        file
        process_opts

        Returns
        -------
        A dictionary of five elements, each representing a variation of the computed features
        (shape and appearance alone or concatenated, with or without derivatives)
        """

        self._maybe_start_logging(file)
        self._load_landmark_fitter()

        frames = mio.import_video(file,
                                  landmark_resolver=self._myresolver,
                                  normalize=True,
                                  exact_frame_count=True)

        feat_shape = []
        feat_app = []
        feat_shape_app = []

        for frameIdx, frame in enumerate(frames):

            bounding_boxes = self._face_detect(frame)
            if len(bounding_boxes) > 0:
                initial_bbox = bounding_boxes[0]
                if self._log_errors is True:
                    gt_shape = frame.landmarks['pts_face']
                else:
                    gt_shape = None

                if isinstance(self._landmark_fitter, LucasKanadeAAMFitter):
                    result = self._landmark_fitter.fit_from_bb(
                        frame, initial_bbox,
                        max_iters=self._max_iters,
                        gt_shape=gt_shape)

                elif isinstance(self._landmark_fitter, DlibWrapper):  # DLIB fitter, doesn't have max_iters
                    result = self._landmark_fitter.fit_from_bb(
                        frame,
                        initial_bbox,
                        gt_shape=gt_shape)
                else:
                    raise Exception('incompatible landmark fitter')

                self._maybe_append_to_log(file, frameIdx, result)

                if self._shape == 'face':

                    if self._parameters == 'lk_fitting':
                        # skip the first 4 similarity params, probably not useful for classification
                        shape_param_frame = result.shape_parameters[-1][4:]
                        app_param_frame = result.appearance_parameters[-1]
                    elif self._parameters == 'aam_projection':
                        result_aam = self._projection_fitter.fit_from_shape(
                            frame,
                            result.final_shape,
                            max_iters=[0, 0, 0])

                        # TODO: analyse the case when aam true components are less than max components
                        shape_param_frame = result_aam.shape_parameters[-1][4:]
                        app_param_frame = result_aam.appearance_parameters[-1]

                    feat_shape.append(shape_param_frame)
                    feat_app.append(app_param_frame)
                    feat_shape_app.append(np.concatenate((shape_param_frame, app_param_frame)))

                elif self._shape == 'lips':

                    # extract lips landmarks from the final face fitting to initialize the part model fitting

                    aam_lips = mio.import_pickle(self._part_aam)
                    fitter_lips = LucasKanadeAAMFitter(aam_lips, lk_algorithm_cls=WibergInverseCompositional,
                                                       n_shape=[10, 20], n_appearance=[20, 150])

                    result_lips = fitter_lips.fit_from_shape(
                        image=frame,
                        initial_shape=_pointcloud_subset(result.final_shape, 'lips'),
                        max_iters=[5, 5])

                    shape_param_frame_lips = result_lips.shape_parameters[-1][4:]
                    app_param_frame_lips = result_lips.appearance_parameters[-1]

                    feat_shape.append(shape_param_frame_lips)
                    feat_app.append(app_param_frame_lips)
                    feat_shape_app.append(np.concatenate((shape_param_frame_lips, app_param_frame_lips)))

                elif self._shape == 'chin':

                    # extract chin and lips landmarks from the final face fitting to initialize the part model fitting

                    aam_chin = mio.import_pickle(self._part_aam)
                    fitter_chin = LucasKanadeAAMFitter(aam_chin, lk_algorithm_cls=WibergInverseCompositional,
                                                       n_shape=[10, 20, 25], n_appearance=[20, 50, 150])

                    result_chin = fitter_chin.fit_from_shape(
                        image=frame,
                        initial_shape=_pointcloud_subset(result.final_shape, 'chin'),
                        max_iters=[10, 10, 5])

                    shape_param_frame_mchin = result_chin.shape_parameters[-1][4:]
                    app_param_frame_mchin = result_chin.appearance_parameters[-1]

                    feat_shape.append(shape_param_frame_mchin)
                    feat_app.append(app_param_frame_mchin)
                    feat_shape_app.append(np.concatenate((shape_param_frame_mchin, app_param_frame_mchin)))

                else:
                    raise Exception('Unknown shape model, currently supported are: face, lips, chin')

            else:  # we did not detect any face

                zero_feat_shape = np.zeros(process_opts['shape_components'][-1])
                zero_feat_app = np.zeros(process_opts['appearance_components'][-1])
                zero_feat_shape_app = np.zeros(
                    process_opts['shape_components'][-1] + process_opts['appearance_components'][-1])

                feat_shape.append(zero_feat_shape)
                feat_app.append(zero_feat_app)
                feat_shape_app.append(zero_feat_shape_app)

        npfeat_shape = np.array(feat_shape)
        npfeat_app = np.array(feat_app)
        npfeat_shape_app = np.array(feat_shape_app)

        npfeat_app_delta = vsrmath.accurate_derivative(npfeat_app, 'delta')
        npfeat_shape_app_delta = vsrmath.accurate_derivative(npfeat_shape_app, 'delta')

        return {'shape': npfeat_shape,
                'app': npfeat_app,
                'shape_app': npfeat_shape_app,
                'app_delta': npfeat_app_delta,
                'shape_app_delta': npfeat_shape_app_delta}
コード例 #23
0
ファイル: io_import_test.py プロジェクト: dkollias/menpo
def test_importing_imageio_avi_no_frames(is_file, mock_image):
    mock_image.return_value.get_length.return_value = 0
    is_file.return_value = True

    ll = mio.import_video('fake_image_being_mocked.avi')
    assert len(ll) == 0
コード例 #24
0
    p_out = '/vol/bitbucket/ml9915/landmarksLeft/'
    bboxes = {}
    for cnt, im in enumerate(ims):
        if im.n_channels == 3:
            im = im.as_greyscale()
    #   ffld2 detector returns bounding_boxes
        lns = ffld2_detector(im)
        if im.landmarks.n_groups == 0:
            # there are no detections
            print 'error'
            continue
        name = '{0:06d}'.format(cnt)
        bboxes[name] = lns
    # export the boundingbox
    mio.export_pickle(bboxes, p_out + filename +'.pkl', overwrite=True)

# decide which detector to use:
ffld2_detector = load_ffld2_frontal_face_detector()
# input path
pbLeft = ('/vol/bitbucket/ml9915/annotateVideosLeft/')
filesLeft = os.listdir(pbLeft)
filesLeft = filter(lambda x: not x.startswith("._"), filesLeft)

for filename in filesLeft:
    if(filename == '.DS_Store'):
        continue
    print filename
    # import the video
    ims = mio.import_video(join(pbLeft, filename))
    storeBB(ims, filename.split('.')[0])
コード例 #25
0
def test_importing_imageio_avi_no_frames(is_file, mock_image):
    mock_image.return_value.get_length.return_value = 0
    is_file.return_value = True

    ll = mio.import_video('fake_image_being_mocked.avi')
    assert len(ll) == 0