Exemple #1
0

# 3) Run the actual ODIN regression by initializing the optimizer, building the
#    model and calling the fit() function

# Set some positivity onstraints on states (not necessary though)
state_bounds = np.array([[0.0, 10.0], [0.0, 10.0]])

# ODIN optimizer
odin_optimizer = ODIN(trainable_lotka_volterra,
                      system_obs,
                      t_obs,
                      gp_kernel='RBF',  # For LV we use the RBF kernel
                      optimizer='L-BFGS-B',  # L-BFGS-B optimizer for the bounds
                      initial_gamma=1.0,  # initial gamma value
                      initial_gamma_prime=10.0,  # initial gamma' value
                      use_sec_grads=True,  # we will use second order derivatives
                      train_gamma=True,  # gamma will be trained as well
                      train_gamma_prime=True,  # we will train gamma'
                      state_bounds=state_bounds,  # Pass the state bounds
                      single_gp=False,  # we use one set of HP for both states
                      basinhopping=False,  # we don't use the basinhopping here
                      time_normalization=True,  # time normalization on
                      state_normalization=True)  # states normalization on

# Build the model
odin_optimizer.build_model()

# Fit the model
final_theta, final_gamma, final_x = odin_optimizer.fit()
print(final_theta)
    n_states, n_points, bounds=theta_bounds)

# 3) Run the actual ODIN regression by initializing the optimizer, building the
#    model and calling the fit() function

# Constraints on states
state_bounds = np.array([[0.0, 2.0], [0.0, 2.0], [0.0, 2.0], [0.0, 2.0],
                         [0.0, 2.0]])

# ODIN optimizer
odin_optimizer = ODIN(
    trainable_protein_transduction,
    system_obs,
    t_obs,
    gp_kernel='Sigmoid',  # For PT we use the Sigmoid kernel
    optimizer='L-BFGS-B',  # L-BFGS-B optimizer for the bounds
    initial_gamma=1e-1,  # initial gamma value
    train_gamma=True,  # gamma will be trained as well
    gamma_bounds=(1e-6, 10.0),  # bounds on gamma
    state_bounds=state_bounds,  # Pass the state bounds
    single_gp=False,  # Here we use one GP per state
    basinhopping=True,  # Here we do use basinhopping
    time_normalization=False,  # Better fit if off (empirical)
    state_normalization=True)  # states normalization on

# Build the model
odin_optimizer.build_model()

# Fit the model
final_theta, final_gamma, final_x = odin_optimizer.fit()
Exemple #3
0
# Constraints on parameters
theta_bounds = np.array([[0.0, 100.0], [0.0, 100.0], [0.0, 100.0]])

trainable_fitzhugh_nagumo = TrainableFitzHughNagumo(n_states,
                                                    n_points,
                                                    bounds=theta_bounds)

# 3) Run the actual ODIN regression by initializing the optimizer, building the
#    model and calling the fit() function

# ODIN optimizer
odin_optimizer = ODIN(
    trainable_fitzhugh_nagumo,
    system_obs,
    t_obs,
    gp_kernel='Matern52',  # For FHN we use the Matern kernel
    optimizer='L-BFGS-B',  # L-BFGS-B optimizer for the bounds
    initial_gamma=1.0,  # initial gamma value
    train_gamma=True,  # gamma will be trained as well
    single_gp=False,  # Here we use one GP per state
    basinhopping=True,  # Basinhopping activated
    basinhopping_options={'n_iter': 10},  # Set 10 iterations
    time_normalization=True,  # time normalization on
    state_normalization=True)  # states normalization on

# Build the model
odin_optimizer.build_model()

# Fit the model
final_theta, final_gamma, final_x = odin_optimizer.fit()
Exemple #4
0
n_states, n_points = system_obs.shape


# 2) Initialize the provided TrainableLorenz96 class

# Trainable object
trainable_l96 = TrainableLorenz96(n_states, n_points)


# 3) Run the actual ODIN regression by initializing the optimizer, building the
#    model and calling the fit() function

# ODIN optimizer
odin_optimizer = ODIN(trainable_l96,
                      system_obs,
                      t_obs,
                      gp_kernel='Matern32',  # For L96 we use the Matern kernel
                      optimizer='L-BFGS-B',  # L-BFGS-B optimizer for the bounds
                      initial_gamma=1.0,  # initial gamma value
                      train_gamma=True,  # gamma will be trained as well
                      single_gp=False,  # Here we use one GP per state
                      basinhopping=False,  # we don't use the basinhopping here
                      time_normalization=True,  # time normalization on
                      state_normalization=True)  # states normalization on

# Build the model
odin_optimizer.build_model()

# Fit the model
final_theta, final_gamma, final_x = odin_optimizer.fit()
X_inliers = np.r_[X_inliers + 2, X_inliers - 2]

# Generate some outliers
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
X = np.r_[X_inliers, X_outliers]

n_outliers = len(X_outliers)
ground_truth = np.ones(len(X), dtype=int)
ground_truth[-n_outliers:] = -1

y = np.zeros(200, dtype=np.int)
y_outlier = np.ones(20, dtype=np.int)
y = np.append(y, y_outlier)

# use my class
odin = ODIN(t=5)
coef = odin.fit_predict(X)
#print(coef)
'''
probedata = clf.fit_predict(data)
print(clf.threshold_)
'''
color = np.array(['k', 'b'])

plt.title("Outlier Detection using Indegree Number (ODIN)")
plt.scatter(X[:, 0], X[:, 1], color=color[y], s=3., label='Data points')
# plot circles with radius proportional to the outlier scores

plt.scatter(X[:, 0],
            X[:, 1],
            s=200 * coef,