示例#1
0
def test_sfa_parameter_computation(dimension, n_samples):
    current_data = mixed_trigonometric_functions(dimension, n_samples)
    sfa = SFA()
    slow_features = sfa.fit_transform(current_data)
    W, b = sfa.affine_parameters()
    affine_transformed = np.dot(current_data, W.T) + b
    assert np.allclose(slow_features, affine_transformed)
示例#2
0
def test_sfa_parameter_computation_rank_deficit_nonzero_fill(
        dimension, rank_deficit):
    current_data = mixed_trigonometric_functions(dimension,
                                                 rank_deficit=rank_deficit)
    sfa = SFA(fill_mode="noise")
    slow_features = sfa.fit_transform(current_data)
    with pytest.raises(RuntimeError):
        W, b = sfa.affine_parameters()
示例#3
0
def test_sfa_parameter_computation_rank_deficit_zero_fill(
        dimension, rank_deficit):
    current_data = mixed_trigonometric_functions(dimension,
                                                 rank_deficit=rank_deficit)
    sfa = SFA(fill_mode="zero")
    slow_features = sfa.fit_transform(current_data)
    W, b = sfa.affine_parameters()
    affine_transformed = np.dot(current_data, W.T) + b
    assert np.allclose(slow_features, affine_transformed)
示例#4
0
 def initialize_layers(self):
     # Stack all layers except the last
     for build_idx, (field_w, field_h, stride_w, stride_h, n_components,
                     poly_degree) in enumerate(self.layer_configurations):
         if build_idx > 0 and (field_w == field_h == -1):
             field_w = slicer.reconstruction_shape[0]
             field_h = slicer.reconstruction_shape[1]
         try:
             input_shape = self.input_shape if build_idx == 0 else slicer.reconstruction_shape
             slicer = ReceptiveSlicer(input_shape=input_shape,
                                      field_size=(field_w, field_h),
                                      strides=(stride_w, stride_h))
         except AssertionError:
             raise ValueError(
                 f"Layer {build_idx + 1}: Field ({field_w}, {field_h}) with stride ({stride_w}, {stride_h}) does not fit data dimension ({input_shape[0]}, {input_shape[1]})"
             )
         self.sequence.append(slicer)
         if poly_degree > 1:
             pre_expansion_sfa = SFA(n_components,
                                     batch_size=self.internal_batch_size,
                                     fill_mode=None)
             self.sequence.append(pre_expansion_sfa)
             expansion = PolynomialFeatures(poly_degree)
             expansion.partial = expansion.fit
             self.sequence.append(expansion)
         self.sequence.append(AdditiveNoise(self.noise_std))
         post_expansion_sfa = SFA(n_components,
                                  batch_size=self.internal_batch_size,
                                  fill_mode=None)
         self.sequence.append(post_expansion_sfa)
         self.sequence.append(Clipper(-4, 4))
         reconstructor = ReceptiveRebuilder((slicer.reconstruction_shape))
         self.sequence.append(reconstructor)
         self.layer_outputs.append(slicer.reconstruction_shape)
         if self.verbose > 0:
             print(f"WxH output layer {build_idx + 1}: " +
                   str(slicer.reconstruction_shape))
     # Flatten
     self.sequence.append(Flatten())
     # Last layer
     if self.final_degree > 1:
         pre_expansion_sfa = SFA(self.n_components,
                                 batch_size=self.internal_batch_size,
                                 fill_mode=None)
         self.sequence.append(pre_expansion_sfa)
         expansion = PolynomialFeatures(self.final_degree)
         expansion.partial = expansion.fit
         self.sequence.append(expansion)
     self.sequence.append(AdditiveNoise(self.noise_std))
     post_expansion_sfa = SFA(self.n_components,
                              batch_size=self.internal_batch_size,
                              fill_mode=None)
     self.sequence.append(post_expansion_sfa)
     self.sequence.append(Clipper(-4, 4))
     if self.verbose > 0:
         print("Shape of final output: " + str((self.n_components, )))
示例#5
0
split_ratio = 0.7
all_sequences = np.load("data/mmnist_data.npy").squeeze()
ground_truth = np.load("data/mmnist_positions.npy").squeeze()
n_points = all_sequences.shape[0]
split_idx = int(split_ratio * n_points)
all_sequences = all_sequences[:, ::, ::]
old_shape = all_sequences.shape[-2:]

data = all_sequences.reshape(all_sequences.shape[0], -1)
training_data = data[:split_idx]
training_gt = ground_truth[:split_idx]
test_data = data[split_idx:]
test_gt = ground_truth[split_idx:]

sfa = SFA(2)

sfa.fit(training_data)
output = sfa.transform(test_data)

gt_delta = np.var(test_gt[1:] - test_gt[:-1], axis=0)
gt_order = np.argsort(gt_delta)
gt_labels = ["x", "y"]

fig, ax = plt.subplots(2, 2, sharex=True)
cutoff = 60
ax[0, 0].plot(output[:cutoff, 0])
ax[1, 0].plot(output[:cutoff, 1])
ax[0, 1].plot(test_gt[:cutoff, gt_order[0]])
ax[1, 1].plot(test_gt[:cutoff, gt_order[1]])
ax[0, 0].set_title("Extracted features")
示例#6
0
def test_sfa_detects_rank_deficit(dimension, rank_deficit):
    sfa = SFA(fill_mode=None)
    current_data = mixed_trigonometric_functions(dimension,
                                                 rank_deficit=rank_deficit)
    with pytest.raises(ValueError):
        sfa.fit(current_data)
示例#7
0
def test_sfa_feature_order(dimension, n_samples):
    current_data = mixed_trigonometric_functions(dimension, n_samples)
    sfa = SFA()
    slow_features = sfa.fit_transform(current_data)
    explicit_delta_values = compute_delta(slow_features)
    assert np.allclose(explicit_delta_values, np.sort(explicit_delta_values))
示例#8
0
def test_sfa_constraints(dimension, n_samples):
    current_data = mixed_trigonometric_functions(dimension, n_samples)
    sfa = SFA()
    slow_features = sfa.fit_transform(current_data)
    covariance_matrix = np.cov(slow_features.T)
    assert np.allclose(covariance_matrix, np.eye(dimension))
示例#9
0
def test_sfa_is_fitted(data):
    est = SFA()
    est.fit(data)
    assert hasattr(est, 'is_fitted_')
示例#10
0
# Generate latent cosine signals
x = np.hstack([np.cos(t), 0.5 * np.cos(t), np.cos(2 * t), 1.5 * np.cos(t)])

# Compute random affine mapping of cosines (observed)
A = np.random.normal(0, 1, (dim, dim))
b = np.random.normal(0, 2, (1, dim))
data = np.dot(x, A) + b

# Extract slow features from observed data

# Plot cosines, mapped data, and extracted features
fig, ax = plt.subplots(2 + len(fill_modes), 1, sharex=True)
fig.set_size_inches(8, 18)
fig.subplots_adjust(hspace=0.5)
for d in range(n_slow_features):
    ax[0].plot(x[:, d])
ax[1].plot(data)
for idx, fill_mode in enumerate(fill_modes):
    sfa = SFA(n_slow_features, fill_mode=fill_mode)
    slow_features = sfa.fit_transform(data)
    ax[2 + idx].plot(slow_features[:, :-1])
    ax[2 + idx].plot(slow_features[:, -1], linestyle=":", color="purple")
    ax[2 + idx].set_title(f"Extracted features, fill_mode='{fill_mode}'")
    ax[2 + idx].set_xlabel("Time t")
ax[0].set_title("x(t)")
ax[1].set_title("A⋅x(t) + b")
for idx in range(2 + len(fill_modes)):
    ax[idx].set_ylabel("Features")
plt.show()
示例#11
0
# Generate different randomly shifted time-scales
t = np.linspace(0, 2*np.pi, n_samples).reshape(n_samples, 1)
t = t * np.arange(1, dim+1)
t += np.random.uniform(0, 2*np.pi, (1, dim))

# Generate latent cosine signals
x = np.cos(t)

# Compute random affine mapping of cosines (observed)
A = np.random.normal(0, 1, (dim, dim))
b = np.random.normal(0, 2, (1, dim))
data = np.dot(x, A) + b

# Extract slow features from observed data
sfa = SFA(n_slow_features)
slow_features = sfa.fit_transform(data)

# Plot cosines, mapped data, and extracted features
fig, ax = plt.subplots(3, 1, sharex=True)
fig.subplots_adjust(hspace=0.5)
for d in reversed(range(n_slow_features, dim)):
    ax[0].plot(x[:, d], color=(0.2, 0.2, 0.2, 0.25))
for d in range(n_slow_features):
    ax[0].plot(x[:, d])
ax[1].plot(data)
ax[2].plot(slow_features)
ax[0].set_title("x(t), slowest features colored")
ax[1].set_title("A⋅x(t) + b")
ax[2].set_title("Extracted features")
ax[2].set_xlabel("Time t")
示例#12
0
lem = LEM(2, n_neighbors=50)
embedded = lem.fit_transform(data)
A = lem.affinity_matrix_

restart_rate = 500
n_random_samples = 1500
trajectory = randomWalkFromCSC(sp.sparse.csc_matrix(A),
                               n_random_samples,
                               restart_rate=restart_rate)
walk_data = data[trajectory]

visited = np.unique(trajectory)
non_visited = np.setdiff1d(np.arange(0, n_points), visited)

pf = PolynomialFeatures(1)
sfa = SFA(2, batch_size=restart_rate if restart_rate > 0 else None)
sf = sfa.fit_transform(pf.fit_transform(walk_data))
oos = sfa.transform(pf.transform(data[non_visited]))

pca = PCA(2)
pc = pca.fit_transform(data)

fig = plt.figure()
fig.set_size_inches(8, 12)
fig.subplots_adjust(hspace=0.5)

ax_3d = fig.add_subplot(321, projection='3d')
ax_3d.set_title("Wavy circle data")
ax_rw = fig.add_subplot(323, projection='3d')
ax_rw.set_title("Random walk samples")
ax_lem = fig.add_subplot(322)