Beispiel #1
0
def test_call_fit_with_arguments_score_does_not_accept():
    mlflow.sklearn.autolog()

    from sklearn.linear_model import SGDRegressor

    assert "intercept_init" in _get_arg_names(SGDRegressor.fit)
    assert "intercept_init" not in _get_arg_names(SGDRegressor.score)

    mock_obj = mock.Mock()

    def mock_score(self, X, y, sample_weight=None):  # pylint: disable=unused-argument
        mock_obj(X, y, sample_weight)
        return 0

    assert inspect.signature(
        SGDRegressor.score) == inspect.signature(mock_score)

    SGDRegressor.score = mock_score
    model = SGDRegressor()
    X, y = get_iris()

    with mlflow.start_run() as run:
        model.fit(X, y, intercept_init=0)
        mock_obj.assert_called_once_with(X, y, None)

    run_id = run.info.run_id
    params, metrics, tags, artifacts = get_run_data(run_id)
    assert params == truncate_dict(
        stringify_dict_values(model.get_params(deep=True)))
    assert {TRAINING_SCORE: model.score(X, y)}.items() <= metrics.items()
    assert tags == get_expected_class_tags(model)
    assert MODEL_DIR in artifacts
    assert_predict_equal(load_model_by_run_id(run_id), model, X)
def linear_regression(features, values):
    """
    Perform linear regression given a data set with an arbitrary number of features.
    """
    
    ###########################
    ### YOUR CODE GOES HERE ###
    ###########################

    clf = SGDRegressor(alpha=0.1, n_iter=20)
    clf.fit(features, values)
    params = clf.get_params()
    intercept = 0

    #print params
    #print len(features[0]), len(clf.coef_)
    #print clf.coef_
    #print clf.intercept_

    return clf.intercept_, clf.coef_
def linear_regression(features, values):
    """
    Perform linear regression given a data set with an arbitrary number of features.
    """

    ###########################
    ### YOUR CODE GOES HERE ###
    ###########################

    clf = SGDRegressor(alpha=0.1, n_iter=20)
    clf.fit(features, values)
    params = clf.get_params()
    intercept = 0

    #print params
    #print len(features[0]), len(clf.coef_)
    #print clf.coef_
    #print clf.intercept_

    return clf.intercept_, clf.coef_
Beispiel #4
0
def test_both_fit_and_score_contain_sample_weight(sample_weight_passed_as):
    mlflow.sklearn.autolog()

    from sklearn.linear_model import SGDRegressor

    # ensure that we use an appropriate model for this test
    assert "sample_weight" in _get_arg_names(SGDRegressor.fit)
    assert "sample_weight" in _get_arg_names(SGDRegressor.score)

    mock_obj = mock.Mock()

    def mock_score(self, X, y, sample_weight=None):  # pylint: disable=unused-argument
        mock_obj(X, y, sample_weight)
        return 0

    assert inspect.signature(
        SGDRegressor.score) == inspect.signature(mock_score)

    SGDRegressor.score = mock_score
    model = SGDRegressor()
    X, y = get_iris()
    sample_weight = abs(np.random.randn(len(X)))

    with mlflow.start_run() as run:
        if sample_weight_passed_as == "positional":
            model.fit(X, y, None, None, sample_weight)
        elif sample_weight_passed_as == "keyword":
            model.fit(X, y, sample_weight=sample_weight)
        mock_obj.assert_called_once_with(X, y, sample_weight)

    run_id = run.info.run_id
    params, metrics, tags, artifacts = get_run_data(run_id)
    assert params == truncate_dict(
        stringify_dict_values(model.get_params(deep=True)))
    assert {TRAINING_SCORE: model.score(X, y)}.items() <= metrics.items()
    assert tags == get_expected_class_tags(model)
    assert MODEL_DIR in artifacts
    assert_predict_equal(load_model_by_run_id(run_id), model, X)
Beispiel #5
0
class LinearAprxAgent:
    def create_policy(self, func_approximator, epsilon=0):
        # from lab 8
        def policy_fn(state):
            """
            
            Input:
                state: a 2D array with the position and velocity
            Output:
                A,q_values: 
            """
            action_index = np.ones(self.num_of_actions,
                                   dtype=float) * epsilon / self.num_of_actions

            #transform to the same shape as the model was trained on
            state_transformed = self.feature_transformer.transform([state])
            q_values = self.func_approximator.predict(state_transformed)

            best_action = np.argmax(q_values)
            action_index[best_action] += (1.0 - epsilon)

            return action_index, q_values  # return the potentially stochastic policy (which is due to the exploration)

        return policy_fn  # return a handle to the function so we can call it in the future

    def __init__(self, env):

        #RBF Hyper parameters
        self.SGD_learning_rate = "optimal"  #‘constant’, ‘optimal’, ‘invscaling’, ‘adaptive’
        self.tol = 1e-5  #The stopping criterion
        self.SGD_max_iter = 1e4

        self.func_approximator = SGDRegressor(
            learning_rate=self.SGD_learning_rate,
            tol=self.tol,
            max_iter=self.SGD_max_iter,
            loss='huber')

        self.feature_transformer = sklearn.pipeline.FeatureUnion([
            ("rbf1", RBFSampler(gamma=12.8, n_components=50)),
            ("rbf2", RBFSampler(gamma=6.4, n_components=50)),
            ("rbf3", RBFSampler(gamma=3.2, n_components=50)),
            ("rbf4", RBFSampler(gamma=1.6, n_components=50)),
            ("rbf5", RBFSampler(gamma=0.8, n_components=50)),
            ("rbf6", RBFSampler(gamma=0.4, n_components=50)),
            ("rbf7", RBFSampler(gamma=0.2, n_components=50)),
            ("rbf8", RBFSampler(gamma=0.1, n_components=50))
        ])

        self.num_of_actions = env.action_space.n
        self.env = env
        #function which is the learned function
        self.policy = self.create_policy(self.func_approximator, 1)

        self.episodes = 200
        #         self.print_out_every_x_episodes = int(self.episodes/50)
        self.times_exploited = 0

        # hyper parameters for epsilon explore
        self.initial_epsilon = 1  # initial
        self.decrease_factor = (1 / self.episodes) / 1.25  # epsilon
#         print("Decrease Factor: " + str(self.decrease_factor))

    def train(self):
        states, all_rewards, all_total_rewards = self.run_all_episodes(
            "Training")
        state_transformed = self.feature_transformer.transform(states)

        q_values = self.func_approximator.predict(state_transformed)
        return states, all_rewards, all_total_rewards, self.func_approximator, state_transformed, q_values

    def evaluate(self,
                 intercept,
                 coeff,
                 states_transformed,
                 q_values,
                 last_reward,
                 episodes=100):
        self.func_approximator = SGDRegressor(
            learning_rate=self.SGD_learning_rate,
            tol=self.tol,
            max_iter=self.SGD_max_iter,
            loss='huber')
        self.func_approximator.fit(states_transformed,
                                   last_reward,
                                   coef_init=coeff,
                                   intercept_init=intercept)

        self.episodes = episodes
        self.epsilon = -1000
        states, all_rewards, all_total_rewards = self.run_all_episodes(
            "Evaluation", evaluate=True)
        return states, all_rewards, all_total_rewards

    def run_all_episodes(self, title, evaluate=False):
        all_total_rewards = []
        all_rewards = []
        epsilon = self.initial_epsilon  # at the start only explore

        power = 1
        for episode in range(1, self.episodes + 1):
            states, rewards = self.run_episode(epsilon, evaluate)
            total_reward = np.sum(rewards)

            #             if episode % self.print_out_every_x_episodes == 0:
            #                 print("Episode number: " + str(episode) + ". Total reward in episode: " + str(total_reward) + ". Episode executed with epsilon = " + str(epsilon))
            #                 print("Average total reward in last " + str(self.print_out_every_x_episodes) + " episodes: " + str(np.mean(all_rewards[-self.print_out_every_x_episodes:])))
            #                 print("Times exploited the last episode " + str(self.times_exploited))
            #                 print("-----")
            self.times_exploited = 0
            all_rewards.append(rewards)
            all_total_rewards.append(total_reward)
            if not evaluate:
                epsilon = self.decrease_epsilon(epsilon, power)
            power += 0.10

        #graph with orange smoothed reward
        if not evaluate:
            window_size = int(self.episodes / 10)
            smoothed_rewards = pd.Series(all_total_rewards).rolling(
                window_size, min_periods=window_size).mean()
            this_smoothed_reward = smoothed_rewards.values[-1]
            smooth_plot(all_total_rewards, smoothed_rewards, title)
        return states, all_rewards, all_total_rewards

    #exponential decrease in epsilon
    def decrease_epsilon(self, epsilon, power):
        decrease = 0.005
        return epsilon * ((1 - decrease)**power)

    def run_episode(self, epsilon, evaluate=False):
        rewards = []
        states = []
        actions = []
        done = False

        state = self.env.reset()
        states.append(state)

        while not done:
            random_number = np.random.random()
            if random_number < epsilon:
                #explore
                action = np.random.choice(self.num_of_actions)
            else:
                #exploit
                action = self.get_action(state)
                self.times_exploited += 1

            new_state, reward, done, i = self.env.step(action=action)

            states.append(new_state)
            actions.append(action)
            rewards.append(reward)

            if not evaluate:
                #update policy function
                self.update(states[1:], rewards, epsilon)

            state = new_state

        return states, rewards

    def update(self, states, rewards, epsilon):

        #update the linear function
        self.feature_transformer.fit(states)
        states_transformed = self.feature_transformer.transform(states)

        self.func_approximator.fit(states_transformed, rewards)
        self.policy = self.create_policy(self.func_approximator, 0)

    def get_action(self, state):
        #linear function to get best action
        actions, q_values = self.policy(state)
        return np.argmax(actions)

    def get_action_text(self):
        return action_text

    def get_env(self):
        return env

    def get_chart_title(self):
        return "Action = " + action_text

    def get_weights(self):
        return self.func_approximator.get_params()

    def set_params(self, coef, intercept):
        self.func_approximator.set_params(weights.get_items())
Beispiel #6
0
svm_reg = SVR(kernel="poly", degree=2, C=100, epsilon=0.1, gamma="scale")
svm_reg.fit(fires_prepared, fires_labels)

#dt
from sklearn.tree import DecisionTreeRegressor
tree_reg = DecisionTreeRegressor(max_depth=2, random_state=42)
tree_reg.fit(fires_prepared, fires_labels)

#rf
from sklearn.ensemble import RandomForestRegressor
forest_reg = RandomForestRegressor(random_state=42)
forest_reg.fit(fires_prepared, fires_labels)

#2-1
from sklearn.model_selection import GridSearchCV
print("sgd_reg.get_params().keys(): ", sgd_reg.get_params().keys())
print("svm_reg.get_params().keys(): ", svm_reg.get_params().keys())
print("tree_reg.get_params().keys(): ", tree_reg.get_params().keys())
print("forest_reg.get_params().keys(): ", forest_reg.get_params().keys())

params_sgd = [
    {
        'alpha': [0.1, 0.5],
        'epsilon': [0.1, 1]
    },
    {
        'alpha': [0.5, 0.6],
        'epsilon': [0.1, 0.7]
    },
]
Beispiel #7
0
np.random.seed(0)
print("среднее значение отклика обучающей выборки: %f" %
      np.mean(sgd.predict(X_train)))
print(
    "корень из среднеквадратичной ошибки прогноза средним значением на обучающей выборке, обучение: %f"
    % np.sqrt(mean_squared_error(sgd.predict(X_train), y_train)))
print(
    "корень из среднеквадратичной ошибки прогноза средним значением на обучающей выборке, тест: %f"
    % np.sqrt(mean_squared_error(sgd.predict(X_test), y_test)))
print('коэффициент детерминации: %f' % sgd.score(X_test, y_test))
print('абсолютная ошибка: %f' %
      mean_absolute_error(y_test, sgd.predict(X_test)))

# In[348]:

sgd.get_params().keys()

# In[349]:

parameters_grid = {
    'max_iter': [3, 10, 50, 100],
    'penalty': ['l1', 'l2', 'none'],
    'alpha': [0., 0.01, 0.014, 0.1],
}

# In[350]:

grid_cv = GridSearchCV(sgd,
                       parameters_grid,
                       scoring='mean_absolute_error',
                       cv=4)