def test_fex(): with pytest.raises(Exception): fex = Fex().read_feat() with pytest.raises(Exception): fex = Fex().read_facet() with pytest.raises(Exception): fex = Fex().read_openface() with pytest.raises(Exception): fex = Fex().read_affectiva() # For iMotions-FACET data files # test reading iMotions file < version 6 filename = os.path.join(get_test_data_path(), "iMotions_Test_v5.txt") dat = Fex(read_facet(filename), sampling_freq=30) # test reading iMotions file > version 6 filename = os.path.join(get_test_data_path(), "iMotions_Test_v6.txt") df = read_facet(filename) # Test slicing functions. assert df.aus().shape == (519, 20) assert df.emotions().shape == (519, 12) assert df.facebox().shape == (519, 4) assert df.time().shape[-1] == 4 assert df.design().shape[-1] == 4 # Test metadata propagation to sliced series assert df.iloc[0].aus().shape == (20, ) assert df.iloc[0].emotions().shape == (12, ) assert df.iloc[0].facebox().shape == (4, ) assert df.iloc[0].time().shape == (4, ) assert df.iloc[0].design().shape == (4, ) sessions = np.array([[x] * 10 for x in range(1 + int(len(df) / 10))]).flatten()[:-1] dat = Fex(df, sampling_freq=30, sessions=sessions) dat = dat[[ "Joy", "Anger", "Surprise", "Fear", "Contempt", "Disgust", "Sadness", "Confusion", "Frustration", "Neutral", "Positive", "Negative", ]] # Test Session ValueError with pytest.raises(ValueError): Fex(df, sampling_freq=30, sessions=sessions[:10]) # Test KeyError with pytest.raises(KeyError): Fex(read_facet(filename, features=["NotHere"]), sampling_freq=30) # Test length assert len(dat) == 519 # Test sessions generator assert len(np.unique(dat.sessions)) == len([x for x in dat.itersessions()]) # Test metadata propagation assert dat[["Joy"]].sampling_freq == dat.sampling_freq assert dat.iloc[:, 0].sampling_freq == dat.sampling_freq assert dat.iloc[0, :].sampling_freq == dat.sampling_freq assert dat.loc[[0], :].sampling_freq == dat.sampling_freq assert dat.loc[:, ["Joy"]].sampling_freq == dat.sampling_freq # assert dat.loc[0].sampling_freq == dat.sampling_freq # DOES NOT WORK YET # Test Downsample assert len(dat.downsample(target=10)) == 52 # Test upsample assert len(dat.upsample(target=60, target_type="hz")) == (len(dat) - 1) * 2 # Test interpolation assert (dat.interpolate(method="linear").isnull().sum()["Positive"] < dat.isnull().sum()["Positive"]) dat = dat.interpolate(method="linear") # Test distance d = dat[["Positive"]].distance() assert isinstance(d, Adjacency) assert d.square_shape()[0] == len(dat) # Test Copy assert isinstance(dat.copy(), Fex) assert dat.copy().sampling_freq == dat.sampling_freq # Test rectification rectified = df.rectification() assert (df[df.au_columns].isna().sum()[0] < rectified[rectified.au_columns].isna().sum()[0]) # Test pspi assert len(df.calc_pspi()) == len(df) # Test baseline assert isinstance(dat.baseline(baseline="median"), Fex) assert isinstance(dat.baseline(baseline="mean"), Fex) assert isinstance(dat.baseline(baseline="begin"), Fex) assert isinstance(dat.baseline(baseline=dat.mean()), Fex) assert isinstance(dat.baseline(baseline="median", ignore_sessions=True), Fex) assert isinstance(dat.baseline(baseline="mean", ignore_sessions=True), Fex) assert isinstance(dat.baseline(baseline=dat.mean(), ignore_sessions=True), Fex) assert isinstance(dat.baseline(baseline="median", normalize="pct"), Fex) assert isinstance(dat.baseline(baseline="mean", normalize="pct"), Fex) assert isinstance(dat.baseline(baseline=dat.mean(), normalize="pct"), Fex) assert isinstance( dat.baseline(baseline="median", ignore_sessions=True, normalize="pct"), Fex) assert isinstance( dat.baseline(baseline="mean", ignore_sessions=True, normalize="pct"), Fex) assert isinstance( dat.baseline(baseline=dat.mean(), ignore_sessions=True, normalize="pct"), Fex) # Test ValueError with pytest.raises(ValueError): dat.baseline(baseline="BadValue") # Test summary dat2 = dat.loc[:, ["Positive", "Negative"]].interpolate() out = dat2.extract_summary(min=True, max=True, mean=True) assert len(out) == len(np.unique(dat2.sessions)) assert np.array_equal(out.sessions, np.unique(dat2.sessions)) assert out.sampling_freq == dat2.sampling_freq assert dat2.shape[1] * 3 == out.shape[1] out = dat2.extract_summary(min=True, max=True, mean=True, ignore_sessions=True) assert len(out) == 1 assert dat2.shape[1] * 3 == out.shape[1] # Check if file is missing columns data_bad = dat.iloc[:, 0:10] with pytest.raises(Exception): _check_if_fex(data_bad, imotions_columns) # Check if file has too many columns data_bad = dat.copy() data_bad["Test"] = 0 with pytest.raises(Exception): _check_if_fex(data_bad, imotions_columns) # Test clean assert isinstance(dat.clean(), Fex) assert dat.clean().columns is dat.columns assert dat.clean().sampling_freq == dat.sampling_freq # Test Decompose n_components = 3 stats = dat.decompose(algorithm="pca", axis=1, n_components=n_components) assert n_components == stats["components"].shape[1] assert n_components == stats["weights"].shape[1] stats = dat.decompose(algorithm="ica", axis=1, n_components=n_components) assert n_components == stats["components"].shape[1] assert n_components == stats["weights"].shape[1] new_dat = dat + 100 stats = new_dat.decompose(algorithm="nnmf", axis=1, n_components=n_components) assert n_components == stats["components"].shape[1] assert n_components == stats["weights"].shape[1] stats = dat.decompose(algorithm="fa", axis=1, n_components=n_components) assert n_components == stats["components"].shape[1] assert n_components == stats["weights"].shape[1] stats = dat.decompose(algorithm="pca", axis=0, n_components=n_components) assert n_components == stats["components"].shape[1] assert n_components == stats["weights"].shape[1] stats = dat.decompose(algorithm="ica", axis=0, n_components=n_components) assert n_components == stats["components"].shape[1] assert n_components == stats["weights"].shape[1] new_dat = dat + 100 stats = new_dat.decompose(algorithm="nnmf", axis=0, n_components=n_components) assert n_components == stats["components"].shape[1] assert n_components == stats["weights"].shape[1] stats = dat.decompose(algorithm="fa", axis=0, n_components=n_components) assert n_components == stats["components"].shape[1] assert n_components == stats["weights"].shape[1]
def test_fex(): with pytest.raises(Exception): fex = Fex().read_feat() with pytest.raises(Exception): fex = Fex().read_facet() with pytest.raises(Exception): fex = Fex().read_openface() with pytest.raises(Exception): fex = Fex().read_affectiva() # For iMotions-FACET data files # test reading iMotions file < version 6 filename = os.path.join(get_test_data_path(), 'iMotions_Test_v5.txt') dat = Fex(read_facet(filename), sampling_freq=30) # test reading iMotions file > version 6 filename = os.path.join(get_test_data_path(), 'iMotions_Test_v6.txt') df = read_facet(filename) # Test slicing functions. assert df.aus().shape == (519, 20) assert df.emotions().shape == (519, 12) assert df.facebox().shape == (519, 4) assert df.time().shape[-1] == 4 assert df.design().shape[-1] == 4 sessions = np.array([[x] * 10 for x in range(1 + int(len(df) / 10))]).flatten()[:-1] dat = Fex(df, sampling_freq=30, sessions=sessions) dat = dat[[ 'Joy', 'Anger', 'Surprise', 'Fear', 'Contempt', 'Disgust', 'Sadness', 'Confusion', 'Frustration', 'Neutral', 'Positive', 'Negative' ]] # Test Session ValueError with pytest.raises(ValueError): Fex(df, sampling_freq=30, sessions=sessions[:10]) # Test KeyError with pytest.raises(KeyError): Fex(read_facet(filename, features=['NotHere']), sampling_freq=30) # Test length assert len(dat) == 519 # Test sessions generator assert len(np.unique(dat.sessions)) == len([x for x in dat.itersessions()]) # Test metadata propagation assert dat[['Joy']].sampling_freq == dat.sampling_freq assert dat.iloc[:, 0].sampling_freq == dat.sampling_freq # Test Downsample assert len(dat.downsample(target=10)) == 52 # Test upsample assert len(dat.upsample(target=60, target_type='hz')) == (len(dat) - 1) * 2 # Test interpolation assert dat.interpolate(method='linear').isnull().sum( )['Positive'] < dat.isnull().sum()['Positive'] dat = dat.interpolate(method='linear') # Test distance d = dat[['Positive']].distance() assert isinstance(d, Adjacency) assert d.square_shape()[0] == len(dat) # Test Copy assert isinstance(dat.copy(), Fex) assert dat.copy().sampling_freq == dat.sampling_freq # Test rectification rectified = df.rectification() assert df[df.au_columns].isna().sum()[0] < rectified[ rectified.au_columns].isna().sum()[0] # Test pspi assert len(df.calc_pspi()) == len(df) # Test baseline assert isinstance(dat.baseline(baseline='median'), Fex) assert isinstance(dat.baseline(baseline='mean'), Fex) assert isinstance(dat.baseline(baseline='begin'), Fex) assert isinstance(dat.baseline(baseline=dat.mean()), Fex) assert isinstance(dat.baseline(baseline='median', ignore_sessions=True), Fex) assert isinstance(dat.baseline(baseline='mean', ignore_sessions=True), Fex) assert isinstance(dat.baseline(baseline=dat.mean(), ignore_sessions=True), Fex) assert isinstance(dat.baseline(baseline='median', normalize='pct'), Fex) assert isinstance(dat.baseline(baseline='mean', normalize='pct'), Fex) assert isinstance(dat.baseline(baseline=dat.mean(), normalize='pct'), Fex) assert isinstance( dat.baseline(baseline='median', ignore_sessions=True, normalize='pct'), Fex) assert isinstance( dat.baseline(baseline='mean', ignore_sessions=True, normalize='pct'), Fex) assert isinstance( dat.baseline(baseline=dat.mean(), ignore_sessions=True, normalize='pct'), Fex) # Test ValueError with pytest.raises(ValueError): dat.baseline(baseline='BadValue') # Test summary dat2 = dat.loc[:, ['Positive', 'Negative']].interpolate() out = dat2.extract_summary(min=True, max=True, mean=True) assert len(out) == len(np.unique(dat2.sessions)) assert np.array_equal(out.sessions, np.unique(dat2.sessions)) assert out.sampling_freq == dat2.sampling_freq assert dat2.shape[1] * 3 == out.shape[1] out = dat2.extract_summary(min=True, max=True, mean=True, ignore_sessions=True) assert len(out) == 1 assert dat2.shape[1] * 3 == out.shape[1] # Check if file is missing columns data_bad = dat.iloc[:, 0:10] with pytest.raises(Exception): _check_if_fex(data_bad, imotions_columns) # Check if file has too many columns data_bad = dat.copy() data_bad['Test'] = 0 with pytest.raises(Exception): _check_if_fex(data_bad, imotions_columns) # Test clean assert isinstance(dat.clean(), Fex) assert dat.clean().columns is dat.columns assert dat.clean().sampling_freq == dat.sampling_freq # Test Decompose n_components = 3 stats = dat.decompose(algorithm='pca', axis=1, n_components=n_components) assert n_components == stats['components'].shape[1] assert n_components == stats['weights'].shape[1] stats = dat.decompose(algorithm='ica', axis=1, n_components=n_components) assert n_components == stats['components'].shape[1] assert n_components == stats['weights'].shape[1] new_dat = dat + 100 stats = new_dat.decompose(algorithm='nnmf', axis=1, n_components=n_components) assert n_components == stats['components'].shape[1] assert n_components == stats['weights'].shape[1] stats = dat.decompose(algorithm='fa', axis=1, n_components=n_components) assert n_components == stats['components'].shape[1] assert n_components == stats['weights'].shape[1] stats = dat.decompose(algorithm='pca', axis=0, n_components=n_components) assert n_components == stats['components'].shape[1] assert n_components == stats['weights'].shape[1] stats = dat.decompose(algorithm='ica', axis=0, n_components=n_components) assert n_components == stats['components'].shape[1] assert n_components == stats['weights'].shape[1] new_dat = dat + 100 stats = new_dat.decompose(algorithm='nnmf', axis=0, n_components=n_components) assert n_components == stats['components'].shape[1] assert n_components == stats['weights'].shape[1] stats = dat.decompose(algorithm='fa', axis=0, n_components=n_components) assert n_components == stats['components'].shape[1] assert n_components == stats['weights'].shape[1]