Exemplo n.º 1
0
def init_objects(dt_tsid, dt_mpc, k_max_loop, k_mpc, T_mpc, type_MPC,
                 predefined, h_init, kf_enabled, perfectEstimator):
    """Create several objects that are used in the control loop

    Args:
        dt_tsid (float): time step of TSID
        dt_mpc (float): time step of the MPC
        k_max_loop (int): maximum number of iterations of the simulation
        k_mpc (int): number of tsid iterations for one iteration of the mpc
        T_mpc (float): duration of mpc prediction horizon
        type_MPC (bool): which MPC you want to use (PA's or Thomas')
        predefined (bool): if we are using a predefined reference velocity (True) or a joystick (False)
        h_init (float): initial height of the robot base
        kf_enabled (bool): complementary filter (False) or kalman filter (True)
        perfectEstimator (bool): if we use a perfect estimator
    """

    # Create Joystick object
    joystick = Joystick.Joystick(predefined)

    # Create logger object
    logger = Logger.Logger(k_max_loop, dt_tsid, dt_mpc, k_mpc, T_mpc, type_MPC)

    # Create Estimator object
    estimator = Estimator.Estimator(dt_tsid, k_max_loop, h_init, kf_enabled,
                                    perfectEstimator)

    return joystick, logger, estimator
Exemplo n.º 2
0
def classifier(model, train, test, scoring, k, n, col):
    """creates the model and trains it and then tests it
    
        Args: 
            model(string): the model to use to classify-possible choices are 
                svm for svm,mlp for mlp-classifier, forests for randomforestclassifier, and knn for knearestneighbors
            train(pandas dataframe): the data to train the model with
            test(pandas dataframe): the data to test the model with
            scoring(string): how to prioritize what to optimize 
                for more options, check http://scikit-learn.org/stable/modules/model_evaluation.html#scoring-parameter
            k(int): the amount of features to select
            n(int): the number of dimensions to reduce to
            col(string): the column you are trying to predict
            
        Returns:
            (estimator): returns the model if its needed for future puposes
            """
    if model == "svm":
        classify = svm.SVC()
        parameters = {
            'kernel': ['rbf'],
            'C': [2**i for i in range(0, 20)],
            'gamma': [2**i for i in range(-20, 0)]
        }
    elif model == "mlp":
        classify = MLPClassifier(random_state=19654)
        parameters = {
            'hidden_layer_sizes': [60, 70, 80, 90, [80, 30], 100],
            'alpha': [2**i for i in range(-13, 14)]
        }
    elif model == "forest":
        classify = RandomForestClassifier(random_state=154)
        parameters = {'n_estimators': [30, 35, 40, 42, 45]}
    elif model == "knn":
        classify = KNeighborsClassifier()
        parameters = {
            'n_neighbors': range(1, 15),
            'weights': ['uniform', 'distance']
        }
    else:
        print("no model recognized")
        return
    GS = GridSearchCV(classify, parameters, verbose=2, scoring=scoring)
    est = Estimator.estimator(col, GS, k, True, n)
    est.fit(train)
    print(est.estimator.best_estimator_)
    est.check_model(test)
    est.plot()
    return est
Exemplo n.º 3
0
	def step_forward(self):
		return Estimator.step_forward(self)
Exemplo n.º 4
0
        # store the results of the different algorithms
        ls_results = list()
        rls_results = list()
        krls_results = list()

        # list_of_testing_results
        ls_test_results = list()
        rls_test_results = list()
        krls_test_results = list()

        print('%.2f - %s - %d - Preparing dataset...' %
              (time.time() - big_ben, datetime.now().strftime("%H:%M:%S"),
               attempt))
        #obtain 4 different matrix (x-train, x-test, y-train, y-test)
        xtr, xts, ytr, yts = Estimator.normalize_set(df,
                                                     y_column,
                                                     ts_size=testing_size)

        # LS algorithm
        print('%.2f - %s - %d - Training LS...' %
              (time.time() - big_ben, datetime.now().strftime("%H:%M:%S"),
               attempt))
        ls = Estimator.train_estimator(LinearRegression(),
                                       xtr,
                                       ytr,
                                       params={},
                                       folds=folds)
        print('%.2f - %s - %d - Testing LS...' %
              (time.time() - big_ben, datetime.now().strftime("%H:%M:%S"),
               attempt))
        ls_score, ls_score_2 = Estimator.test_estimator(ls, xts, yts)
Exemplo n.º 5
0
from datetime import datetime
from sys import argv
import os
import sys
from Estimator import *

start = datetime.now()


input_file = argv[1]
q_file = argv[2]
e_file = argv[3]
output_file = argv[4]

data = []
estimator = Estimator()
estimator.load_from_file(q_file, e_file)

for line in file(input_file):
    arr = [['***', 'STR'], ['***', 'STR']] + line[:-1].split()
    data.append(arr)

for sentence in data:
    for i in range(2, len(sentence)):
        a, b, c = sentence[i - 2], sentence[i - 1], sentence[i]
        t3 = estimator.get_best_tag(a, b, c)
        sentence[i] = [sentence[i], t3]


output = []
for line in data:
Exemplo n.º 6
0
from datetime import datetime
from sys import argv
import os
import sys
from Estimator import *

threshold_unk = 1

fname = argv[1]
qMLE = argv[2]
eMLE = argv[3]

estimator = Estimator()
file = read_data(fname)
train = file[:int(len(file) * 0.9)]
dev = file[len(train):]


def MLETrain():
    for line in train:
        for a, b, c in zip(line, line[1:], line[2:]):
            tag1, tag2, tag3 = a[1], b[1], c[1]
            estimator.addQLine(tag1, tag2, tag3)
            estimator.addELine(a)

        tag2, tag3 = b[1], c[1]
        estimator.addQLine(tag2, tag3)
        estimator.addELine(b)
        estimator.addELine(c)

    for line in dev: