示例#1
0
def test_openface():
    # For OpenFace data file
    filename = os.path.join(get_test_data_path(), "OpenFace_Test.csv")
    openface = Fex(read_openface(filename), sampling_freq=30)

    # Test KeyError
    with pytest.raises(KeyError):
        Fex(read_openface(filename, features=["NotHere"]), sampling_freq=30)

    # Test length
    assert len(openface) == 100

    # Test loading from filename
    openface = Fex(filename=filename, sampling_freq=30, detector="OpenFace")
    openface = openface.read_file()

    # Test length?
    assert len(openface) == 100

    # Test landmark methods
    assert openface.landmark().shape[1] == 136
    assert openface.iloc[0].landmark().shape[0] == 136
    assert openface.landmark_x().shape[1] == openface.landmark_y().shape[1]
    assert (openface.iloc[0].landmark_x().shape[0] ==
            openface.iloc[0].landmark_y().shape[0])

    # Test PSPI calculation b/c diff from facet
    assert len(openface.calc_pspi()) == len(openface)
示例#2
0
文件: test_feat.py 项目: yanliu1/feat
def test_openface():
    # For OpenFace data file
    filename = join(get_test_data_path(), 'OpenFace_Test.csv')
    openface = Fex(read_openface(filename), sampling_freq=30)

    # Test KeyError
    with pytest.raises(KeyError):
        Fex(read_openface(filename, features=['NotHere']), sampling_freq=30)

    # Test length
    assert len(openface) == 100

    # Test loading from filename
    openface = Openface(filename=filename, sampling_freq=30)
    openface.read_file()

    # Test length?
    assert len(openface) == 100

    # Test PSPI calculation b/c diff from facet
    assert len(openface.calc_pspi()) == len(openface)

    # Test if a method returns subclass.
    openface = openface.downsample(target=10, target_type='hz')
    assert isinstance(openface, Openface)
示例#3
0
def test_stats():
    filename = os.path.join(get_test_data_path(), "OpenFace_Test.csv")
    openface = Fex(filename=filename, sampling_freq=30, detector="OpenFace")
    openface = openface.read_file()

    aus = openface.aus()
    aus.sessions = range(len(aus))
    y = aus[[i for i in aus.columns if "_r" in i]]
    X = pd.DataFrame(aus.sessions)
    b, t, p, df, res = aus.regress(X, y, mode="ols", fit_intercept=True)
    assert b.shape == (2, 17)
    assert res.mean().mean() < 1

    clf = openface.predict(X=["AU02_c"], y="AU04_c")
    assert clf.coef_ < 0

    clf = openface.predict(X=openface[["AU02_c"]], y=openface["AU04_c"])
    assert clf.coef_ < 0

    t, p = openface[["AU02_c"]].ttest_1samp()
    assert t > 0

    a = openface.aus().assign(input="0")
    b = openface.aus().apply(lambda x: x + np.random.rand(100)).assign(
        input="1")
    doubled = pd.concat([a, b])
    doubled.sessions = doubled['input']
    t, p = doubled.ttest_ind(col="AU12_r", sessions=("0", "1"))
    assert (t < 0)

    frame = np.concatenate([
        np.array(range(int(len(doubled) / 2))),
        np.array(range(int(len(doubled) / 2)))
    ])
    assert (doubled.assign(frame=frame).isc(col="AU04_r").iloc[0, 0] == 1)
示例#4
0
def test_affectiva():
    filename = os.path.join(get_test_data_path(),
                            "sample_affectiva-api-app_output.json")
    affdex = Fex(read_affectiva(filename),
                 sampling_freq=1,
                 detector="Affectiva")
    assert affdex.shape[1] == 32
示例#5
0
    def detect_image(self, inputFname, outputFname=None):
        """Detects FEX from a video file.

        Args:
            inputFname (str, or list of str): Path to image file or a list of paths to image files.
            outputFname (str, optional): Path to output file. Defaults to None.

        Returns:
            Fex: Prediction results dataframe if outputFname is None. Returns True if outputFname is specified.
        """
        assert type(inputFname)==str or type(inputFname)==list, "inputFname must be a string path to image or list of image paths"
        if type(inputFname)==str:
            inputFname = [inputFname]
        for inputF in inputFname:
            if not os.path.exists(inputF):
                raise FileNotFoundError(f"File {inputF} not found.")
        self.info['inputFname'] = inputFname
        
        init_df = pd.DataFrame(columns=self["output_columns"])
        if outputFname:
            init_df.to_csv(outputFname, index=False, header=True)

        for inputF in inputFname:
            frame = Image.open(inputF)
            df = self.process_frame(np.array(frame))

            if outputFname:
                df.to_csv(outputFname, index=True, header=False, mode='a')
            else:
                init_df = pd.concat([init_df, df], axis=0)

        if outputFname:
            return True
        else:
            return Fex(init_df, filename = inputFname, au_columns = None, emotion_columns = FEAT_EMOTION_COLUMNS, facebox_columns = FEAT_FACEBOX_COLUMNS, landmark_columns = openface_2d_landmark_columns, time_columns = FACET_TIME_COLUMNS, detector="Feat")
示例#6
0
    def detect_image(self, inputFname, outputFname=None, verbose=False):
        """Detects FEX from an image file.

        Args:
            inputFname (str, or list of str): Path to image file or a list of paths to image files.
            outputFname (str, optional): Path to output file. Defaults to None.

        Rseturns:
            Fex: Prediction results dataframe if outputFname is None. Returns True if outputFname is specified.
        """
        assert (
            type(inputFname) == str or type(inputFname) == list
        ), "inputFname must be a string path to image or list of image paths"
        if type(inputFname) == str:
            inputFname = [inputFname]
        for inputF in inputFname:
            if not os.path.exists(inputF):
                raise FileNotFoundError(f"File {inputF} not found.")
        self.info["inputFname"] = inputFname

        init_df = pd.DataFrame(columns=self["output_columns"])
        if outputFname:
            init_df.to_csv(outputFname, index=False, header=True)

        for inputF in inputFname:
            if verbose:
                print(f"processing {inputF}")
            frame = cv2.imread(inputF)
            df = self.process_frame(frame)
            df["input"] = inputF
            if outputFname:
                df[init_df.columns].to_csv(outputFname,
                                           index=False,
                                           header=False,
                                           mode="a")
            else:
                init_df = pd.concat([init_df, df[init_df.columns]], axis=0)

        if outputFname:
            return True
        else:
            return Fex(
                init_df,
                filename=inputFname,
                au_columns=self['au_presence_columns'],
                emotion_columns=FEAT_EMOTION_COLUMNS,
                facebox_columns=FEAT_FACEBOX_COLUMNS,
                landmark_columns=openface_2d_landmark_columns,
                time_columns=FACET_TIME_COLUMNS,
                detector="Feat",
            )
示例#7
0
def test_stats():
    filename = os.path.join(get_test_data_path(), "OpenFace_Test.csv")
    openface = Fex(filename=filename, sampling_freq=30, detector="OpenFace")
    openface = openface.read_file()

    aus = openface.aus()
    aus.sessions = range(len(aus))
    y = aus[[i for i in aus.columns if "_r" in i]]
    X = pd.DataFrame(aus.sessions)
    b, t, p, df, res = aus.regress(X, y, mode="ols", fit_intercept=True)
    assert b.shape == (2, 17)
    assert res.mean().mean() < 1

    clf = openface.predict(X=["AU02_c"], y="AU04_c")
    assert clf.coef_ < 0

    clf = openface.predict(X=openface[["AU02_c"]], y=openface["AU04_c"])
    assert clf.coef_ < 0

    t, p = openface[["AU02_c"]].ttest()
    assert t > 0
示例#8
0
def test_feat():
    filename = os.path.join(get_test_data_path(), "output.csv")
    fex = Fex(filename=filename, detector="Feat")
    fex = fex.read_file()
    # test input
    assert fex.input().values[0] == fex.iloc[0].input()
示例#9
0
def test_fex():
    with pytest.raises(Exception):
        fex = Fex().read_feat()
    with pytest.raises(Exception):
        fex = Fex().read_facet()
    with pytest.raises(Exception):
        fex = Fex().read_openface()
    with pytest.raises(Exception):
        fex = Fex().read_affectiva()

    # For iMotions-FACET data files
    # test reading iMotions file < version 6
    filename = os.path.join(get_test_data_path(), 'iMotions_Test_v5.txt')
    dat = Fex(read_facet(filename), sampling_freq=30)

    # test reading iMotions file > version 6
    filename = os.path.join(get_test_data_path(), 'iMotions_Test_v6.txt')
    df = read_facet(filename)

    # Test slicing functions.
    assert df.aus().shape == (519, 20)

    assert df.emotions().shape == (519, 12)

    assert df.facebox().shape == (519, 4)

    assert df.time().shape[-1] == 4

    assert df.design().shape[-1] == 4

    sessions = np.array([[x] * 10
                         for x in range(1 + int(len(df) / 10))]).flatten()[:-1]
    dat = Fex(df, sampling_freq=30, sessions=sessions)
    dat = dat[[
        'Joy', 'Anger', 'Surprise', 'Fear', 'Contempt', 'Disgust', 'Sadness',
        'Confusion', 'Frustration', 'Neutral', 'Positive', 'Negative'
    ]]

    # Test Session ValueError
    with pytest.raises(ValueError):
        Fex(df, sampling_freq=30, sessions=sessions[:10])

    # Test KeyError
    with pytest.raises(KeyError):
        Fex(read_facet(filename, features=['NotHere']), sampling_freq=30)

    # Test length
    assert len(dat) == 519

    # Test sessions generator
    assert len(np.unique(dat.sessions)) == len([x for x in dat.itersessions()])

    # Test metadata propagation
    assert dat[['Joy']].sampling_freq == dat.sampling_freq
    assert dat.iloc[:, 0].sampling_freq == dat.sampling_freq

    # Test Downsample
    assert len(dat.downsample(target=10)) == 52

    # Test upsample
    assert len(dat.upsample(target=60, target_type='hz')) == (len(dat) - 1) * 2

    # Test interpolation
    assert dat.interpolate(method='linear').isnull().sum(
    )['Positive'] < dat.isnull().sum()['Positive']
    dat = dat.interpolate(method='linear')

    # Test distance
    d = dat[['Positive']].distance()
    assert isinstance(d, Adjacency)
    assert d.square_shape()[0] == len(dat)

    # Test Copy
    assert isinstance(dat.copy(), Fex)
    assert dat.copy().sampling_freq == dat.sampling_freq

    # Test rectification
    rectified = df.rectification()
    assert df[df.au_columns].isna().sum()[0] < rectified[
        rectified.au_columns].isna().sum()[0]

    # Test pspi
    assert len(df.calc_pspi()) == len(df)

    # Test baseline
    assert isinstance(dat.baseline(baseline='median'), Fex)
    assert isinstance(dat.baseline(baseline='mean'), Fex)
    assert isinstance(dat.baseline(baseline='begin'), Fex)
    assert isinstance(dat.baseline(baseline=dat.mean()), Fex)
    assert isinstance(dat.baseline(baseline='median', ignore_sessions=True),
                      Fex)
    assert isinstance(dat.baseline(baseline='mean', ignore_sessions=True), Fex)
    assert isinstance(dat.baseline(baseline=dat.mean(), ignore_sessions=True),
                      Fex)
    assert isinstance(dat.baseline(baseline='median', normalize='pct'), Fex)
    assert isinstance(dat.baseline(baseline='mean', normalize='pct'), Fex)
    assert isinstance(dat.baseline(baseline=dat.mean(), normalize='pct'), Fex)
    assert isinstance(
        dat.baseline(baseline='median', ignore_sessions=True, normalize='pct'),
        Fex)
    assert isinstance(
        dat.baseline(baseline='mean', ignore_sessions=True, normalize='pct'),
        Fex)
    assert isinstance(
        dat.baseline(baseline=dat.mean(),
                     ignore_sessions=True,
                     normalize='pct'), Fex)
    # Test ValueError
    with pytest.raises(ValueError):
        dat.baseline(baseline='BadValue')

    # Test summary
    dat2 = dat.loc[:, ['Positive', 'Negative']].interpolate()
    out = dat2.extract_summary(min=True, max=True, mean=True)
    assert len(out) == len(np.unique(dat2.sessions))
    assert np.array_equal(out.sessions, np.unique(dat2.sessions))
    assert out.sampling_freq == dat2.sampling_freq
    assert dat2.shape[1] * 3 == out.shape[1]
    out = dat2.extract_summary(min=True,
                               max=True,
                               mean=True,
                               ignore_sessions=True)
    assert len(out) == 1
    assert dat2.shape[1] * 3 == out.shape[1]

    # Check if file is missing columns
    data_bad = dat.iloc[:, 0:10]
    with pytest.raises(Exception):
        _check_if_fex(data_bad, imotions_columns)

    # Check if file has too many columns
    data_bad = dat.copy()
    data_bad['Test'] = 0
    with pytest.raises(Exception):
        _check_if_fex(data_bad, imotions_columns)

    # Test clean
    assert isinstance(dat.clean(), Fex)
    assert dat.clean().columns is dat.columns
    assert dat.clean().sampling_freq == dat.sampling_freq

    # Test Decompose
    n_components = 3
    stats = dat.decompose(algorithm='pca', axis=1, n_components=n_components)
    assert n_components == stats['components'].shape[1]
    assert n_components == stats['weights'].shape[1]

    stats = dat.decompose(algorithm='ica', axis=1, n_components=n_components)
    assert n_components == stats['components'].shape[1]
    assert n_components == stats['weights'].shape[1]

    new_dat = dat + 100
    stats = new_dat.decompose(algorithm='nnmf',
                              axis=1,
                              n_components=n_components)
    assert n_components == stats['components'].shape[1]
    assert n_components == stats['weights'].shape[1]

    stats = dat.decompose(algorithm='fa', axis=1, n_components=n_components)
    assert n_components == stats['components'].shape[1]
    assert n_components == stats['weights'].shape[1]

    stats = dat.decompose(algorithm='pca', axis=0, n_components=n_components)
    assert n_components == stats['components'].shape[1]
    assert n_components == stats['weights'].shape[1]

    stats = dat.decompose(algorithm='ica', axis=0, n_components=n_components)
    assert n_components == stats['components'].shape[1]
    assert n_components == stats['weights'].shape[1]

    new_dat = dat + 100
    stats = new_dat.decompose(algorithm='nnmf',
                              axis=0,
                              n_components=n_components)
    assert n_components == stats['components'].shape[1]
    assert n_components == stats['weights'].shape[1]

    stats = dat.decompose(algorithm='fa', axis=0, n_components=n_components)
    assert n_components == stats['components'].shape[1]
    assert n_components == stats['weights'].shape[1]
示例#10
0
文件: test_feat.py 项目: yanliu1/feat
def test_fex():
    # For iMotions-FACET data files
    # test reading iMotions file < version 6
    dat = Fex(read_facet(join(get_test_data_path(), 'iMotions_Test_v2.txt')),
              sampling_freq=30)

    # test reading iMotions file > version 6
    filename = join(get_test_data_path(), 'iMotions_Test.txt')
    df = read_facet(filename)
    sessions = np.array([[x] * 10
                         for x in range(1 + int(len(df) / 10))]).flatten()[:-1]
    dat = Fex(df, sampling_freq=30, sessions=sessions)

    # Test Session ValueError
    with pytest.raises(ValueError):
        Fex(df, sampling_freq=30, sessions=sessions[:10])

    # Test KeyError
    with pytest.raises(KeyError):
        Fex(read_facet(filename, features=['NotHere']), sampling_freq=30)

    # Test length
    assert len(dat) == 519

    # Test Info
    assert isinstance(dat.info(), str)

    # Test sessions generator
    assert len(np.unique(dat.sessions)) == len([x for x in dat.itersessions()])

    # Test metadata propagation
    assert dat['Joy'].sampling_freq == dat.sampling_freq
    assert dat.iloc[:, 0].sampling_freq == dat.sampling_freq

    # Test Downsample
    assert len(dat.downsample(target=10)) == 52

    # Test upsample
    assert len(dat.upsample(target=60, target_type='hz')) == (len(dat) - 1) * 2

    # Test interpolation
    assert np.sum(dat.interpolate(method='linear').isnull().sum() == 0) == len(
        dat.columns)
    dat = dat.interpolate(method='linear')

    # Test distance
    d = dat.distance()
    assert isinstance(d, Adjacency)
    assert d.square_shape()[0] == len(dat)

    # Test Copy
    assert isinstance(dat.copy(), Fex)
    assert dat.copy().sampling_freq == dat.sampling_freq

    # Test baseline
    assert isinstance(dat.baseline(baseline='median'), Fex)
    assert isinstance(dat.baseline(baseline='mean'), Fex)
    assert isinstance(dat.baseline(baseline='begin'), Fex)
    assert isinstance(dat.baseline(baseline=dat.mean()), Fex)
    assert isinstance(dat.baseline(baseline='median', ignore_sessions=True),
                      Fex)
    assert isinstance(dat.baseline(baseline='mean', ignore_sessions=True), Fex)
    assert isinstance(dat.baseline(baseline=dat.mean(), ignore_sessions=True),
                      Fex)
    assert isinstance(dat.baseline(baseline='median', normalize='pct'), Fex)
    assert isinstance(dat.baseline(baseline='mean', normalize='pct'), Fex)
    assert isinstance(dat.baseline(baseline=dat.mean(), normalize='pct'), Fex)
    assert isinstance(
        dat.baseline(baseline='median', ignore_sessions=True, normalize='pct'),
        Fex)
    assert isinstance(
        dat.baseline(baseline='mean', ignore_sessions=True, normalize='pct'),
        Fex)
    assert isinstance(
        dat.baseline(baseline=dat.mean(),
                     ignore_sessions=True,
                     normalize='pct'), Fex)
    # Test ValueError
    with pytest.raises(ValueError):
        dat.baseline(baseline='BadValue')

    # Test summary
    dat2 = dat.loc[:, ['Positive', 'Negative']].interpolate()
    out = dat2.extract_summary(min=True, max=True, mean=True)
    assert len(out) == len(np.unique(dat2.sessions))
    assert np.array_equal(out.sessions, np.unique(dat2.sessions))
    assert out.sampling_freq == dat2.sampling_freq
    assert dat2.shape[1] * 3 == out.shape[1]
    out = dat2.extract_summary(min=True,
                               max=True,
                               mean=True,
                               ignore_sessions=True)
    assert len(out) == 1
    assert dat2.shape[1] * 3 == out.shape[1]

    # Check if file is missing columns
    data_bad = dat.iloc[:, 0:10]
    with pytest.raises(Exception):
        _check_if_fex(data_bad, imotions_columns)

    # Check if file has too many columns
    data_bad = dat.copy()
    data_bad['Test'] = 0
    with pytest.raises(Exception):
        _check_if_fex(data_bad, imotions_columns)

    # Test clean
    assert isinstance(dat.clean(), Fex)
    assert dat.clean().columns is dat.columns
    assert dat.clean().sampling_freq == dat.sampling_freq

    # Test Decompose
    n_components = 3
    stats = dat.decompose(algorithm='pca', axis=1, n_components=n_components)
    assert n_components == stats['components'].shape[1]
    assert n_components == stats['weights'].shape[1]

    stats = dat.decompose(algorithm='ica', axis=1, n_components=n_components)
    assert n_components == stats['components'].shape[1]
    assert n_components == stats['weights'].shape[1]

    new_dat = dat + 100
    stats = new_dat.decompose(algorithm='nnmf',
                              axis=1,
                              n_components=n_components)
    assert n_components == stats['components'].shape[1]
    assert n_components == stats['weights'].shape[1]

    stats = dat.decompose(algorithm='fa', axis=1, n_components=n_components)
    assert n_components == stats['components'].shape[1]
    assert n_components == stats['weights'].shape[1]

    stats = dat.decompose(algorithm='pca', axis=0, n_components=n_components)
    assert n_components == stats['components'].shape[1]
    assert n_components == stats['weights'].shape[1]

    stats = dat.decompose(algorithm='ica', axis=0, n_components=n_components)
    assert n_components == stats['components'].shape[1]
    assert n_components == stats['weights'].shape[1]

    new_dat = dat + 100
    stats = new_dat.decompose(algorithm='nnmf',
                              axis=0,
                              n_components=n_components)
    assert n_components == stats['components'].shape[1]
    assert n_components == stats['weights'].shape[1]

    stats = dat.decompose(algorithm='fa', axis=0, n_components=n_components)
    assert n_components == stats['components'].shape[1]
    assert n_components == stats['weights'].shape[1]
示例#11
0
def test_plot_face():
    # test plotting method
    fx = Fex(filename=join(get_test_data_path(), 'iMotions_Test_v6.txt'),
             sampling_freq=30,
             detector='FACET')
    fx = fx.read_file()
    ax = fx.plot_aus(row_n=0)
    assert_plot_shape(ax)
    plt.close()

    fx = Fex(filename=join(get_test_data_path(), 'OpenFace_Test.csv'),
             sampling_freq=30,
             detector='OpenFace')
    fx = fx.read_file()
    ax = fx.plot_aus(row_n=0)
    assert_plot_shape(ax)
    plt.close()

    fx = Fex(filename=join(get_test_data_path(),
                           'sample_affectiva-api-app_output.json'),
             sampling_freq=30,
             detector='Affectiva')
    fx = fx.read_file(orig_cols=False)
    ax = fx.plot_aus(row_n=0)
    assert_plot_shape(ax)
    plt.close()

    # test plot in util
    plot_face()
    assert_plot_shape(plt.gca())
    plt.close()

    plot_face(au=au, vectorfield={'reference': predict(au2)})
    assert_plot_shape(plt.gca())
    plt.close()

    with pytest.raises(ValueError):
        plot_face(model=au, au=au, vectorfield={'reference': predict(au2)})
    with pytest.raises(ValueError):
        plot_face(model=au, au=au, vectorfield=[])
    with pytest.raises(ValueError):
        plot_face(model=au, au=au, vectorfield={'noreference': predict(au2)})
示例#12
0
def test_fextractor():
    filename = os.path.join(get_test_data_path(), "iMotions_Test_v6.txt")
    df = read_facet(filename)
    sessions = np.array([[x] * 10
                         for x in range(1 + int(len(df) / 10))]).flatten()[:-1]
    dat = Fex(df, sampling_freq=30, sessions=sessions)
    dat = dat[[
        "Joy",
        "Anger",
        "Surprise",
        "Fear",
        "Contempt",
        "Disgust",
        "Sadness",
        "Confusion",
        "Frustration",
        "Neutral",
        "Positive",
        "Negative",
    ]]

    # Test Fextractor class
    extractor = Fextractor()
    dat = dat.interpolate()  # interpolate data to get rid of NAs
    f = 0.5
    num_cyc = 3  # for wavelet extraction
    # Test each extraction method
    extractor.mean(fex_object=dat)
    extractor.max(fex_object=dat)
    extractor.min(fex_object=dat)
    # boft needs a groupby function.
    extractor.multi_wavelet(fex_object=dat)
    extractor.wavelet(fex_object=dat, freq=f, num_cyc=num_cyc)
    # Test ValueError
    with pytest.raises(ValueError):
        extractor.wavelet(fex_object=dat,
                          freq=f,
                          num_cyc=num_cyc,
                          mode="BadValue")
    # Test Fextracor merge method
    newdat = extractor.merge(out_format="long")
    assert newdat["sessions"].nunique() == 52
    assert isinstance(newdat, DataFrame)
    assert len(extractor.merge(out_format="long")) == 7488
    assert len(extractor.merge(out_format="wide")) == 52

    # Test summary method
    extractor = Fextractor()
    dat2 = dat.loc[:, ["Positive", "Negative"]].interpolate()
    extractor.summary(fex_object=dat2, min=True, max=True, mean=True)
    # [Pos, Neg] * [mean, max, min] + ['sessions']
    assert extractor.merge(out_format="wide").shape[1] == dat2.shape[1] * 3 + 1

    # Test wavelet extraction
    extractor = Fextractor()
    extractor.wavelet(fex_object=dat,
                      freq=f,
                      num_cyc=num_cyc,
                      ignore_sessions=False)
    extractor.wavelet(fex_object=dat,
                      freq=f,
                      num_cyc=num_cyc,
                      ignore_sessions=True)
    wavelet = extractor.extracted_features[0]  # ignore_sessions = False
    assert wavelet.sampling_freq == dat.sampling_freq
    assert len(wavelet) == len(dat)
    wavelet = extractor.extracted_features[1]  # ignore_sessions = True
    assert wavelet.sampling_freq == dat.sampling_freq
    assert len(wavelet) == len(dat)
    assert np.array_equal(wavelet.sessions, dat.sessions)
    for i in ["filtered", "phase", "magnitude", "power"]:
        extractor = Fextractor()
        extractor.wavelet(fex_object=dat,
                          freq=f,
                          num_cyc=num_cyc,
                          ignore_sessions=True,
                          mode=i)
        wavelet = extractor.extracted_features[0]
        assert wavelet.sampling_freq == dat.sampling_freq
        assert len(wavelet) == len(dat)

    # Test multi wavelet
    dat2 = dat.loc[:, ["Positive", "Negative"]].interpolate()
    n_bank = 4
    extractor = Fextractor()
    extractor.multi_wavelet(
        fex_object=dat2,
        min_freq=0.1,
        max_freq=2,
        bank=n_bank,
        mode="power",
        ignore_sessions=False,
    )
    out = extractor.extracted_features[0]
    assert n_bank * dat2.shape[1] == out.shape[1]
    assert len(out) == len(dat2)
    assert np.array_equal(out.sessions, dat2.sessions)
    assert out.sampling_freq == dat2.sampling_freq

    # Test Bag Of Temporal Features Extraction
    filename = os.path.join(get_test_data_path(), "iMotions_Test_v6.txt")
    facet = Fex(filename=filename, sampling_freq=30, detector="FACET")
    facet = facet.read_file()
    facet_filled = facet.fillna(0)
    facet_filled = facet_filled[[
        "Joy",
        "Anger",
        "Surprise",
        "Fear",
        "Contempt",
        "Disgust",
        "Sadness",
        "Confusion",
        "Frustration",
        "Neutral",
        "Positive",
        "Negative",
    ]]
    # assert isinstance(facet_filled,Facet)
    extractor = Fextractor()
    extractor.boft(facet_filled)
    assert isinstance(extractor.extracted_features[0], DataFrame)
    filters, histograms = 8, 12
    assert (extractor.extracted_features[0].shape[1] ==
            facet_filled.columns.shape[0] * filters * histograms)
示例#13
0
def test_fex():
    with pytest.raises(Exception):
        fex = Fex().read_feat()
    with pytest.raises(Exception):
        fex = Fex().read_facet()
    with pytest.raises(Exception):
        fex = Fex().read_openface()
    with pytest.raises(Exception):
        fex = Fex().read_affectiva()

    # For iMotions-FACET data files
    # test reading iMotions file < version 6
    filename = os.path.join(get_test_data_path(), "iMotions_Test_v5.txt")
    dat = Fex(read_facet(filename), sampling_freq=30)

    # test reading iMotions file > version 6
    filename = os.path.join(get_test_data_path(), "iMotions_Test_v6.txt")
    df = read_facet(filename)

    # Test slicing functions.
    assert df.aus().shape == (519, 20)

    assert df.emotions().shape == (519, 12)

    assert df.facebox().shape == (519, 4)

    assert df.time().shape[-1] == 4

    assert df.design().shape[-1] == 4

    # Test metadata propagation to sliced series
    assert df.iloc[0].aus().shape == (20, )
    assert df.iloc[0].emotions().shape == (12, )
    assert df.iloc[0].facebox().shape == (4, )
    assert df.iloc[0].time().shape == (4, )
    assert df.iloc[0].design().shape == (4, )

    sessions = np.array([[x] * 10
                         for x in range(1 + int(len(df) / 10))]).flatten()[:-1]
    dat = Fex(df, sampling_freq=30, sessions=sessions)
    dat = dat[[
        "Joy",
        "Anger",
        "Surprise",
        "Fear",
        "Contempt",
        "Disgust",
        "Sadness",
        "Confusion",
        "Frustration",
        "Neutral",
        "Positive",
        "Negative",
    ]]

    # Test Session ValueError
    with pytest.raises(ValueError):
        Fex(df, sampling_freq=30, sessions=sessions[:10])

    # Test KeyError
    with pytest.raises(KeyError):
        Fex(read_facet(filename, features=["NotHere"]), sampling_freq=30)

    # Test length
    assert len(dat) == 519

    # Test sessions generator
    assert len(np.unique(dat.sessions)) == len([x for x in dat.itersessions()])

    # Test metadata propagation
    assert dat[["Joy"]].sampling_freq == dat.sampling_freq
    assert dat.iloc[:, 0].sampling_freq == dat.sampling_freq
    assert dat.iloc[0, :].sampling_freq == dat.sampling_freq

    assert dat.loc[[0], :].sampling_freq == dat.sampling_freq
    assert dat.loc[:, ["Joy"]].sampling_freq == dat.sampling_freq
    # assert dat.loc[0].sampling_freq == dat.sampling_freq # DOES NOT WORK YET

    # Test Downsample
    assert len(dat.downsample(target=10)) == 52

    # Test upsample
    assert len(dat.upsample(target=60, target_type="hz")) == (len(dat) - 1) * 2

    # Test interpolation
    assert (dat.interpolate(method="linear").isnull().sum()["Positive"] <
            dat.isnull().sum()["Positive"])
    dat = dat.interpolate(method="linear")

    # Test distance
    d = dat[["Positive"]].distance()
    assert isinstance(d, Adjacency)
    assert d.square_shape()[0] == len(dat)

    # Test Copy
    assert isinstance(dat.copy(), Fex)
    assert dat.copy().sampling_freq == dat.sampling_freq

    # Test rectification
    rectified = df.rectification()
    assert (df[df.au_columns].isna().sum()[0] <
            rectified[rectified.au_columns].isna().sum()[0])

    # Test pspi
    assert len(df.calc_pspi()) == len(df)

    # Test baseline
    assert isinstance(dat.baseline(baseline="median"), Fex)
    assert isinstance(dat.baseline(baseline="mean"), Fex)
    assert isinstance(dat.baseline(baseline="begin"), Fex)
    assert isinstance(dat.baseline(baseline=dat.mean()), Fex)
    assert isinstance(dat.baseline(baseline="median", ignore_sessions=True),
                      Fex)
    assert isinstance(dat.baseline(baseline="mean", ignore_sessions=True), Fex)
    assert isinstance(dat.baseline(baseline=dat.mean(), ignore_sessions=True),
                      Fex)
    assert isinstance(dat.baseline(baseline="median", normalize="pct"), Fex)
    assert isinstance(dat.baseline(baseline="mean", normalize="pct"), Fex)
    assert isinstance(dat.baseline(baseline=dat.mean(), normalize="pct"), Fex)
    assert isinstance(
        dat.baseline(baseline="median", ignore_sessions=True, normalize="pct"),
        Fex)
    assert isinstance(
        dat.baseline(baseline="mean", ignore_sessions=True, normalize="pct"),
        Fex)
    assert isinstance(
        dat.baseline(baseline=dat.mean(),
                     ignore_sessions=True,
                     normalize="pct"), Fex)
    # Test ValueError
    with pytest.raises(ValueError):
        dat.baseline(baseline="BadValue")

    # Test summary
    dat2 = dat.loc[:, ["Positive", "Negative"]].interpolate()
    out = dat2.extract_summary(min=True, max=True, mean=True)
    assert len(out) == len(np.unique(dat2.sessions))
    assert np.array_equal(out.sessions, np.unique(dat2.sessions))
    assert out.sampling_freq == dat2.sampling_freq
    assert dat2.shape[1] * 3 == out.shape[1]
    out = dat2.extract_summary(min=True,
                               max=True,
                               mean=True,
                               ignore_sessions=True)
    assert len(out) == 1
    assert dat2.shape[1] * 3 == out.shape[1]

    # Check if file is missing columns
    data_bad = dat.iloc[:, 0:10]
    with pytest.raises(Exception):
        _check_if_fex(data_bad, imotions_columns)

    # Check if file has too many columns
    data_bad = dat.copy()
    data_bad["Test"] = 0
    with pytest.raises(Exception):
        _check_if_fex(data_bad, imotions_columns)

    # Test clean
    assert isinstance(dat.clean(), Fex)
    assert dat.clean().columns is dat.columns
    assert dat.clean().sampling_freq == dat.sampling_freq

    # Test Decompose
    n_components = 3
    stats = dat.decompose(algorithm="pca", axis=1, n_components=n_components)
    assert n_components == stats["components"].shape[1]
    assert n_components == stats["weights"].shape[1]

    stats = dat.decompose(algorithm="ica", axis=1, n_components=n_components)
    assert n_components == stats["components"].shape[1]
    assert n_components == stats["weights"].shape[1]

    new_dat = dat + 100
    stats = new_dat.decompose(algorithm="nnmf",
                              axis=1,
                              n_components=n_components)
    assert n_components == stats["components"].shape[1]
    assert n_components == stats["weights"].shape[1]

    stats = dat.decompose(algorithm="fa", axis=1, n_components=n_components)
    assert n_components == stats["components"].shape[1]
    assert n_components == stats["weights"].shape[1]

    stats = dat.decompose(algorithm="pca", axis=0, n_components=n_components)
    assert n_components == stats["components"].shape[1]
    assert n_components == stats["weights"].shape[1]

    stats = dat.decompose(algorithm="ica", axis=0, n_components=n_components)
    assert n_components == stats["components"].shape[1]
    assert n_components == stats["weights"].shape[1]

    new_dat = dat + 100
    stats = new_dat.decompose(algorithm="nnmf",
                              axis=0,
                              n_components=n_components)
    assert n_components == stats["components"].shape[1]
    assert n_components == stats["weights"].shape[1]

    stats = dat.decompose(algorithm="fa", axis=0, n_components=n_components)
    assert n_components == stats["components"].shape[1]
    assert n_components == stats["weights"].shape[1]
示例#14
0
文件: test_feat.py 项目: yanliu1/feat
def test_fextractor():
    filename = join(get_test_data_path(), 'iMotions_Test.txt')
    df = read_facet(filename)
    sessions = np.array([[x] * 10
                         for x in range(1 + int(len(df) / 10))]).flatten()[:-1]
    dat = Fex(df, sampling_freq=30, sessions=sessions)

    # Test Fextractor class
    extractor = Fextractor()
    dat = dat.interpolate()  # interpolate data to get rid of NAs
    f = .5
    num_cyc = 3  # for wavelet extraction
    # Test each extraction method
    extractor.mean(fex_object=dat)
    extractor.max(fex_object=dat)
    extractor.min(fex_object=dat)
    #extractor.boft(fex_object=dat, min_freq=.01, max_freq=.20, bank=1)
    extractor.multi_wavelet(fex_object=dat)
    extractor.wavelet(fex_object=dat, freq=f, num_cyc=num_cyc)
    # Test ValueError
    with pytest.raises(ValueError):
        extractor.wavelet(fex_object=dat,
                          freq=f,
                          num_cyc=num_cyc,
                          mode='BadValue')
    # Test Fextracor merge method
    newdat = extractor.merge(out_format='long')
    assert newdat['sessions'].nunique() == 52
    assert isinstance(newdat, DataFrame)
    assert len(extractor.merge(out_format='long')) == 24960
    assert len(extractor.merge(out_format='wide')) == 52

    # Test summary method
    extractor = Fextractor()
    dat2 = dat.loc[:, ['Positive', 'Negative']].interpolate()
    extractor.summary(fex_object=dat2, min=True, max=True, mean=True)
    # [Pos, Neg] * [mean, max, min] + ['sessions']
    assert extractor.merge(out_format='wide').shape[1] == dat2.shape[1] * 3 + 1

    # Test wavelet extraction
    extractor = Fextractor()
    extractor.wavelet(fex_object=dat,
                      freq=f,
                      num_cyc=num_cyc,
                      ignore_sessions=False)
    extractor.wavelet(fex_object=dat,
                      freq=f,
                      num_cyc=num_cyc,
                      ignore_sessions=True)
    wavelet = extractor.extracted_features[0]  # ignore_sessions = False
    assert wavelet.sampling_freq == dat.sampling_freq
    assert len(wavelet) == len(dat)
    wavelet = extractor.extracted_features[1]  # ignore_sessions = True
    assert wavelet.sampling_freq == dat.sampling_freq
    assert len(wavelet) == len(dat)
    assert np.array_equal(wavelet.sessions, dat.sessions)
    for i in ['filtered', 'phase', 'magnitude', 'power']:
        extractor = Fextractor()
        extractor.wavelet(fex_object=dat,
                          freq=f,
                          num_cyc=num_cyc,
                          ignore_sessions=True,
                          mode=i)
        wavelet = extractor.extracted_features[0]
        assert wavelet.sampling_freq == dat.sampling_freq
        assert len(wavelet) == len(dat)

    # Test multi wavelet
    dat2 = dat.loc[:, ['Positive', 'Negative']].interpolate()
    n_bank = 4
    extractor = Fextractor()
    extractor.multi_wavelet(fex_object=dat2,
                            min_freq=.1,
                            max_freq=2,
                            bank=n_bank,
                            mode='power',
                            ignore_sessions=False)
    out = extractor.extracted_features[0]
    assert n_bank * dat2.shape[1] == out.shape[1]
    assert len(out) == len(dat2)
    assert np.array_equal(out.sessions, dat2.sessions)
    assert out.sampling_freq == dat2.sampling_freq

    # Test Bag Of Temporal Features Extraction
    filename = join(get_test_data_path(), 'iMotions_Test.txt')
    facet = Facet(filename=filename, sampling_freq=30)
    facet.read_file()
    facet_filled = facet.fillna(0)
    assert isinstance(facet_filled, Facet)
    extractor = Fextractor()
    extractor.boft(facet_filled)
    assert isinstance(extractor.extracted_features[0], DataFrame)
    filters, histograms = 8, 12
    assert extractor.extracted_features[0].shape[
        1] == facet.columns.shape[0] * filters * histograms
示例#15
0
def test_info(capsys):
    importantstring = "ThisStringMustBeIncluded"
    fex = Fex(filename=importantstring)
    fex.info()
    captured = capsys.readouterr()
    assert importantstring in captured.out
示例#16
0
def test_fex(tmpdir):
    # For iMotions-FACET data files
    # test reading iMotions file < version 6
    dat = Fex(read_facet(join(get_test_data_path(), 'iMotions_Test_v2.txt')),
              sampling_freq=30)

    # test reading iMotions file > version 6
    filename = join(get_test_data_path(), 'iMotions_Test.txt')
    df = read_facet(filename)
    sessions = np.array([[x] * 10
                         for x in range(1 + int(len(df) / 10))]).flatten()[:-1]
    dat = Fex(df, sampling_freq=30, sessions=sessions)

    # Test KeyError
    class MyTestCase(unittest.TestCase):
        def test1(self):
            with self.assertRaises(KeyError):
                Fex(read_facet(filename, features=['NotHere']),
                    sampling_freq=30)

    # Test length
    assert len(dat) == 519

    # Test Info
    assert isinstance(dat.info(), str)

    # Test sessions generator
    assert len(np.unique(dat.sessions)) == len([x for x in dat.itersessions()])

    # Test metadata propagation
    assert dat['Joy'].sampling_freq == dat.sampling_freq
    assert dat.iloc[:, 0].sampling_freq == dat.sampling_freq

    # Test Downsample
    assert len(dat.downsample(target=10)) == 52

    # Test upsample
    assert len(dat.upsample(target=60, target_type='hz')) == (len(dat) - 1) * 2

    # Test interpolation
    assert np.sum(dat.interpolate(method='linear').isnull().sum() == 0) == len(
        dat.columns)
    dat = dat.interpolate(method='linear')

    # Test distance
    d = dat.distance()
    assert isinstance(d, Adjacency)
    assert d.square_shape()[0] == len(dat)

    # Test Copy
    assert isinstance(dat.copy(), Fex)
    assert dat.copy().sampling_freq == dat.sampling_freq

    # Test baseline
    assert isinstance(dat.baseline(baseline='median'), Fex)
    assert isinstance(dat.baseline(baseline='mean'), Fex)
    assert isinstance(dat.baseline(baseline=dat.mean()), Fex)
    assert isinstance(dat.baseline(baseline='median', ignore_sessions=True),
                      Fex)
    assert isinstance(dat.baseline(baseline='mean', ignore_sessions=True), Fex)
    assert isinstance(dat.baseline(baseline=dat.mean(), ignore_sessions=True),
                      Fex)
    assert isinstance(dat.baseline(baseline='median', normalize='pct'), Fex)
    assert isinstance(dat.baseline(baseline='mean', normalize='pct'), Fex)
    assert isinstance(dat.baseline(baseline=dat.mean(), normalize='pct'), Fex)
    assert isinstance(
        dat.baseline(baseline='median', ignore_sessions=True, normalize='pct'),
        Fex)
    assert isinstance(
        dat.baseline(baseline='mean', ignore_sessions=True, normalize='pct'),
        Fex)
    assert isinstance(
        dat.baseline(baseline=dat.mean(),
                     ignore_sessions=True,
                     normalize='pct'), Fex)

    # Test facet subclass
    facet = Facet(filename=filename, sampling_freq=30)
    facet.read_file()
    assert len(facet) == 519

    # Test PSPI calculation
    assert len(facet.calc_pspi()) == len(facet)

    # Test Fextractor class
    extractor = Fextractor()
    dat = dat.interpolate()  # interpolate data to get rid of NAs
    f = .5
    num_cyc = 3  # for wavelet extraction
    # Test each extraction method
    extractor.mean(fex_object=dat)
    extractor.max(fex_object=dat)
    extractor.min(fex_object=dat)
    #extractor.boft(fex_object=dat, min_freq=.01, max_freq=.20, bank=1)
    extractor.multi_wavelet(fex_object=dat)
    extractor.wavelet(fex_object=dat, freq=f, num_cyc=num_cyc)
    # Test Fextracor merge method
    newdat = extractor.merge(out_format='long')
    assert newdat['sessions'].nunique() == 52
    assert isinstance(newdat, DataFrame)
    assert len(extractor.merge(out_format='long')) == 24960
    assert len(extractor.merge(out_format='wide')) == 52

    # Test wavelet extraction
    extractor = Fextractor()
    extractor.wavelet(fex_object=dat,
                      freq=f,
                      num_cyc=num_cyc,
                      ignore_sessions=False)
    extractor.wavelet(fex_object=dat,
                      freq=f,
                      num_cyc=num_cyc,
                      ignore_sessions=True)
    wavelet = extractor.extracted_features[0]  # ignore_sessions = False
    assert wavelet.sampling_freq == dat.sampling_freq
    assert len(wavelet) == len(dat)
    wavelet = extractor.extracted_features[1]  # ignore_sessions = True
    assert wavelet.sampling_freq == dat.sampling_freq
    assert len(wavelet) == len(dat)
    assert np.array_equal(wavelet.sessions, dat.sessions)
    for i in ['filtered', 'phase', 'magnitude', 'power']:
        extractor = Fextractor()
        extractor.wavelet(fex_object=dat,
                          freq=f,
                          num_cyc=num_cyc,
                          ignore_sessions=True,
                          mode=i)
        wavelet = extractor.extracted_features[0]
        assert wavelet.sampling_freq == dat.sampling_freq
        assert len(wavelet) == len(dat)

    # Test multi wavelet
    dat2 = dat.loc[:, ['Positive', 'Negative']].interpolate()
    n_bank = 4
    extractor = Fextractor()
    extractor.multi_wavelet(fex_object=dat2,
                            min_freq=.1,
                            max_freq=2,
                            bank=n_bank,
                            mode='power',
                            ignore_sessions=False)
    out = extractor.extracted_features[0]
    assert n_bank * dat2.shape[1] == out.shape[1]
    assert len(out) == len(dat2)
    assert np.array_equal(out.sessions, dat2.sessions)
    assert out.sampling_freq == dat2.sampling_freq

    # Test Bag Of Temporal Features Extraction
    facet_filled = facet.fillna(0)
    assert isinstance(facet_filled, Facet)
    extractor = Fextractor()
    extractor.boft(facet_filled)
    assert isinstance(extractor.extracted_features[0], DataFrame)
    filters, histograms = 8, 12
    assert extractor.extracted_features[0].shape[
        1] == facet.columns.shape[0] * filters * histograms

    # Test mean, min, and max Features Extraction
    # assert isinstance(facet_filled.extract_mean(), Facet)
    # assert isinstance(facet_filled.extract_min(), Facet)
    # assert isinstance(facet_filled.extract_max(), Facet)

    # Test if a method returns subclass.
    facet = facet.downsample(target=10, target_type='hz')
    assert isinstance(facet, Facet)

    ### Test Openface importer and subclass ###

    # For OpenFace data file
    filename = join(get_test_data_path(), 'OpenFace_Test.csv')
    openface = Fex(read_openface(filename), sampling_freq=30)

    # Test KeyError
    class MyTestCase(unittest.TestCase):
        def test1(self):
            with self.assertRaises(KeyError):
                Fex(read_openface(filename, features=['NotHere']),
                    sampling_freq=30)

    # Test length
    assert len(openface) == 100

    # Test loading from filename
    openface = Openface(filename=filename, sampling_freq=30)
    openface.read_file()

    # Test length?
    assert len(openface) == 100

    # Test PSPI calculation b/c diff from facet
    assert len(openface.calc_pspi()) == len(openface)

    # Test if a method returns subclass.
    openface = openface.downsample(target=10, target_type='hz')
    assert isinstance(openface, Openface)

    # Check if file is missing columns
    data_bad = dat.iloc[:, 0:10]
    with pytest.raises(Exception):
        _check_if_fex(data_bad, imotions_columns)

    # Check if file has too many columns
    data_bad = dat.copy()
    data_bad['Test'] = 0
    with pytest.raises(Exception):
        _check_if_fex(data_bad, imotions_columns)

    # Test clean
    assert isinstance(dat.clean(), Fex)
    assert dat.clean().columns is dat.columns
    assert dat.clean().sampling_freq == dat.sampling_freq

    # Test Decompose
    n_components = 3
    stats = dat.decompose(algorithm='pca', axis=1, n_components=n_components)
    assert n_components == stats['components'].shape[1]
    assert n_components == stats['weights'].shape[1]

    stats = dat.decompose(algorithm='ica', axis=1, n_components=n_components)
    assert n_components == stats['components'].shape[1]
    assert n_components == stats['weights'].shape[1]

    new_dat = dat + 100
    stats = new_dat.decompose(algorithm='nnmf',
                              axis=1,
                              n_components=n_components)
    assert n_components == stats['components'].shape[1]
    assert n_components == stats['weights'].shape[1]

    stats = dat.decompose(algorithm='fa', axis=1, n_components=n_components)
    assert n_components == stats['components'].shape[1]
    assert n_components == stats['weights'].shape[1]

    stats = dat.decompose(algorithm='pca', axis=0, n_components=n_components)
    assert n_components == stats['components'].shape[1]
    assert n_components == stats['weights'].shape[1]

    stats = dat.decompose(algorithm='ica', axis=0, n_components=n_components)
    assert n_components == stats['components'].shape[1]
    assert n_components == stats['weights'].shape[1]

    new_dat = dat + 100
    stats = new_dat.decompose(algorithm='nnmf',
                              axis=0,
                              n_components=n_components)
    assert n_components == stats['components'].shape[1]
    assert n_components == stats['weights'].shape[1]

    stats = dat.decompose(algorithm='fa', axis=0, n_components=n_components)
    assert n_components == stats['components'].shape[1]
    assert n_components == stats['weights'].shape[1]
示例#17
0
 def test1(self):
     with self.assertRaises(KeyError):
         Fex(read_facet(filename, features=['NotHere']),
             sampling_freq=30)
示例#18
0
def test_feat():
    filename = os.path.join(get_test_data_path(), 'output.csv')
    fex = Fex(filename=filename, detector='Feat')
    fex.read_file()
示例#19
0
文件: test_feat.py 项目: yanliu1/feat
def test_affectiva():
    filename = join(get_test_data_path(),
                    'sample_affectiva-api-app_output.json')
    affdex = Fex(read_affectiva(filename), sampling_freq=1)
    assert affdex.shape[1] == 32
示例#20
0
    def detect_video(self,
                     inputFname,
                     outputFname=None,
                     skip_frames=1,
                     verbose=False):
        """Detects FEX from a video file.

        Args:
            inputFname (str): Path to video file
            outputFname (str, optional): Path to output file. Defaults to None.
            skip_frames (int, optional): Number of every other frames to skip for speed or if not all frames need to be processed. Defaults to 1.
            
        Returns:
            dataframe: Prediction results dataframe if outputFname is None. Returns True if outputFname is specified.
        """
        self.info["inputFname"] = inputFname
        self.info["outputFname"] = outputFname
        init_df = pd.DataFrame(columns=self["output_columns"])
        if outputFname:
            init_df.to_csv(outputFname, index=False, header=True)

        cap = cv2.VideoCapture(inputFname)
        length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
        frames_to_process = int(np.ceil(length / skip_frames))

        # Determine whether to use multiprocessing.
        n_jobs = self["n_jobs"]
        if n_jobs == -1:
            thread_num = cv2.getNumberOfCPUs()  # get available cpus
        else:
            thread_num = n_jobs
        if verbose:
            print(f"Using {thread_num} cpus")
        pool = ThreadPool(processes=thread_num)
        pending_task = deque()
        counter = 0
        processed_frames = 0
        frame_got = True
        detected_faces = []
        if verbose:
            print("Processing video.")
        #  single core
        while True:
            frame_got, frame = cap.read()
            if counter % skip_frames == 0:
                df = self.process_frame(frame, counter=counter)
                df["input"] = inputFname
                if outputFname:
                    df[init_df.columns].to_csv(outputFname,
                                               index=False,
                                               header=False,
                                               mode="a")
                else:
                    init_df = pd.concat([init_df, df[init_df.columns]], axis=0)
            counter = counter + 1
            if not frame_got:
                break
        cap.release()
        if outputFname:
            return True
        else:
            return Fex(
                init_df,
                filename=inputFname,
                au_columns=self["au_presence_columns"],
                emotion_columns=FEAT_EMOTION_COLUMNS,
                facebox_columns=FEAT_FACEBOX_COLUMNS,
                landmark_columns=openface_2d_landmark_columns,
                time_columns=FEAT_TIME_COLUMNS,
                detector="Feat",
            )
示例#21
0
    def detect_image(self,
                     inputFname,
                     batch_size=5,
                     outputFname=None,
                     verbose=False,
                     singleframe4error=False):
        """Detects FEX from an image file.

        Args:
            inputFname (list of str): Path to a list of paths to image files.
            bacth_size (int, optional): how many batches of images you want to run at one shot. Larger gives faster speed but is more memory-consuming
            outputFname (str, optional): Path to output file. Defaults to None.
            singleframe4error (bool, default = False): When set True, when exception occurs inside a batch, instead of nullify the whole batch, process each img in 
                                                        batch individually 
        Rseturns:
            Fex: Prediction results dataframe if outputFname is None. Returns True if outputFname is specified.
        """
        assert (
            type(inputFname) == str or type(inputFname) == list
        ), "inputFname must be a string path to image or list of image paths"
        if type(inputFname) == str:
            inputFname = [inputFname]
        for inputF in inputFname:
            if not os.path.exists(inputF):
                raise FileNotFoundError(f"File {inputF} not found.")
        self.info["inputFname"] = inputFname

        init_df = pd.DataFrame(columns=self["output_columns"])
        if outputFname:
            init_df.to_csv(outputFname, index=False, header=True)

        counter = 0
        concat_frame = None
        input_names = []
        while counter < len(inputFname):
            #if counter % skip_frames == 0:
            frame = np.expand_dims(cv2.imread(inputFname[counter]), 0)
            if concat_frame is None:
                concat_frame = frame
                tmp_counter = counter
            else:
                concat_frame = np.concatenate([concat_frame, frame], 0)
            input_names.append(inputFname[counter])
            counter = counter + 1

            if (counter % batch_size == 0) and (concat_frame is not None):
                if singleframe4error:
                    try:
                        df, _ = self.process_frame(
                            concat_frame,
                            counter=tmp_counter,
                            singleframe4error=singleframe4error)
                    except FaceDetectionError:
                        df = None
                        for id_fr in range(concat_frame.shape[0]):
                            tmp_df, _ = self.process_frame(
                                concat_frame[id_fr:(id_fr + 1)],
                                counter=tmp_counter,
                                singleframe4error=False)
                            tmp_counter += 1
                            if df is None:
                                df = tmp_df
                            else:
                                df = pd.concat((df, tmp_df), 0)
                else:
                    df, _ = self.process_frame(concat_frame,
                                               counter=tmp_counter)

                df["input"] = input_names
                if outputFname:
                    df[init_df.columns].to_csv(outputFname,
                                               index=False,
                                               header=False,
                                               mode="a")
                else:
                    init_df = pd.concat([init_df, df[init_df.columns]], axis=0)

                concat_frame = None
                tmp_counter = None
                input_names = []

        if len(inputFname) % batch_size != 0:
            # process remaining frames
            if concat_frame is not None:
                if singleframe4error:
                    try:
                        df, _ = self.process_frame(concat_frame,
                                                   counter=tmp_counter)
                    except FaceDetectionError:
                        df = None
                        for id_fr in range(concat_frame.shape[0]):
                            tmp_df, _ = self.process_frame(
                                concat_frame[id_fr:(id_fr + 1)],
                                counter=tmp_counter,
                                singleframe4error=False)
                            tmp_counter += 1
                            if df is None:
                                df = tmp_df
                            else:
                                df = pd.concat((df, tmp_df), 0)
                else:
                    df, _ = self.process_frame(concat_frame,
                                               counter=tmp_counter)
                df["input"] = input_names
                if outputFname:
                    df[init_df.columns].to_csv(outputFname,
                                               index=False,
                                               header=False,
                                               mode="a")
                else:
                    init_df = pd.concat([init_df, df[init_df.columns]], axis=0)

        if outputFname:
            return True
        else:
            return Fex(
                init_df,
                filename=inputFname,
                au_columns=self['au_presence_columns'],
                emotion_columns=FEAT_EMOTION_COLUMNS,
                facebox_columns=FEAT_FACEBOX_COLUMNS,
                landmark_columns=openface_2d_landmark_columns,
                facepose_columns=FACET_FACEPOSE_COLUMNS,
                time_columns=FACET_TIME_COLUMNS,
                detector="Feat",
            )
示例#22
0
    def detect_video(self,
                     inputFname,
                     batch_size=5,
                     outputFname=None,
                     skip_frames=1,
                     verbose=False,
                     singleframe4error=False):
        """Detects FEX from a video file.

        Args:
            inputFname (str): Path to video file
            outputFname (str, optional): Path to output file. Defaults to None.
            bacth_size (int, optional): how many batches of images you want to run at one shot. Larger gives faster speed but is more memory-consuming
            skip_frames (int, optional): Number of every other frames to skip for speed or if not all frames need to be processed. Defaults to 1.
            singleframe4error (bool, default = False): When set True, when exception occurs inside a batch, instead of nullify the whole batch, process each img in 
                                                        batch individually 
        Returns:
            dataframe: Prediction results dataframe if outputFname is None. Returns True if outputFname is specified.
        """
        self.info["inputFname"] = inputFname
        self.info["outputFname"] = outputFname
        init_df = pd.DataFrame(columns=self["output_columns"])
        if outputFname:
            init_df.to_csv(outputFname, index=False, header=True)

        cap = cv2.VideoCapture(inputFname)
        length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
        frames_to_process = int(np.ceil(length / skip_frames))

        counter = 0
        frame_got = True
        if verbose:
            print("Processing video.")
        #  single core
        concat_frame = None
        while True:
            frame_got, frame = cap.read()
            if frame_got:
                if counter % skip_frames == 0:
                    # if the
                    if concat_frame is None:
                        concat_frame = np.expand_dims(frame, 0)
                        tmp_counter = counter
                    else:
                        concat_frame = np.concatenate(
                            [concat_frame,
                             np.expand_dims(frame, 0)], 0)
                if (concat_frame is not None) and (counter != 0) and (
                        concat_frame.shape[0] % batch_size
                        == 0):  # I think it's probably this error
                    if singleframe4error:
                        try:
                            df, _ = self.process_frame(
                                concat_frame,
                                counter=tmp_counter,
                                singleframe4error=singleframe4error,
                                skip_frame_rate=skip_frames)
                        except FaceDetectionError:
                            df = None
                            for id_fr in range(concat_frame.shape[0]):
                                tmp_df, _ = self.process_frame(
                                    concat_frame[id_fr:(id_fr + 1)],
                                    counter=tmp_counter,
                                    singleframe4error=False,
                                    skip_frame_rate=skip_frames)
                                tmp_counter += 1
                                if df is None:
                                    df = tmp_df
                                else:
                                    df = pd.concat((df, tmp_df), 0)
                    else:
                        df, _ = self.process_frame(concat_frame,
                                                   counter=tmp_counter,
                                                   skip_frame_rate=skip_frames)

                    df["input"] = inputFname
                    if outputFname:
                        df[init_df.columns].to_csv(outputFname,
                                                   index=False,
                                                   header=False,
                                                   mode="a")
                    else:
                        init_df = pd.concat([init_df, df[init_df.columns]],
                                            axis=0)
                    concat_frame = None
                    tmp_counter = None
                counter = counter + 1
            else:
                # process remaining frames
                if concat_frame is not None:
                    if singleframe4error:
                        try:
                            df, _ = self.process_frame(
                                concat_frame,
                                counter=tmp_counter,
                                skip_frame_rate=skip_frames)
                        except FaceDetectionError:
                            df = None
                            for id_fr in range(concat_frame.shape[0]):
                                tmp_df, _ = self.process_frame(
                                    concat_frame[id_fr:(id_fr + 1)],
                                    counter=tmp_counter,
                                    singleframe4error=False,
                                    skip_frame_rate=skip_frames)
                                tmp_counter += 1
                                if df is None:
                                    df = tmp_df
                                else:
                                    df = pd.concat((df, tmp_df), 0)
                    else:
                        df, _ = self.process_frame(concat_frame,
                                                   counter=tmp_counter,
                                                   skip_frame_rate=skip_frames)
                    df["input"] = inputFname
                    if outputFname:
                        df[init_df.columns].to_csv(outputFname,
                                                   index=False,
                                                   header=False,
                                                   mode="a")
                    else:
                        init_df = pd.concat([init_df, df[init_df.columns]],
                                            axis=0)
                break
        cap.release()
        if outputFname:
            return True
        else:
            return Fex(
                init_df,
                filename=inputFname,
                au_columns=self["au_presence_columns"],
                emotion_columns=FEAT_EMOTION_COLUMNS,
                facebox_columns=FEAT_FACEBOX_COLUMNS,
                landmark_columns=openface_2d_landmark_columns,
                facepose_columns=FACET_FACEPOSE_COLUMNS,
                time_columns=FEAT_TIME_COLUMNS,
                detector="Feat",
            )