示例#1
0
def test_rocket_on_power_demand():
    # load power demand data
    X_train, y_train = load_italy_power_demand(split="train", return_X_y=True)
    X_test, y_test = load_italy_power_demand(split="test", return_X_y=True)
    indices = np.random.RandomState(0).permutation(100)

    # train ROCKET
    rocket = ROCKETClassifier(num_kernels=1000, random_state=0)
    rocket.fit(X_train, y_train)

    score = rocket.score(X_test.iloc[indices], y_test[indices])
    assert score >= 0.92
示例#2
0
def test_rocket_on_gunpoint():
    # load gunpoint data
    X_train, y_train = load_gunpoint(split="train", return_X_y=True)
    X_test, y_test = load_gunpoint(split="test", return_X_y=True)
    indices = np.random.RandomState(0).permutation(10)

    # train ROCKET
    rocket = ROCKETClassifier(num_kernels=1000, random_state=0)
    rocket.fit(X_train.iloc[indices], y_train[indices])

    # assert probabilities are the same
    probas = rocket.predict_proba(X_test.iloc[indices])
    testing.assert_array_equal(probas, rocket_gunpoint_probas)
示例#3
0
def test_rocket_ensemble_on_gunpoint(n_jobs, ensemble_config):
    ensemble_size, ensemble, n_estimators = ensemble_config

    # load gunpoint data
    X_train, y_train = load_gunpoint(split="train", return_X_y=True)
    X_test, y_test = load_gunpoint(split="test", return_X_y=True)
    indices = np.random.RandomState(0).permutation(10)

    # train ROCKET ensemble
    rocket_e = ROCKETClassifier(
        num_kernels=1000,
        ensemble_size=ensemble_size,
        ensemble=ensemble,
        n_estimators=n_estimators,
        random_state=0,
        n_jobs=n_jobs,
    )
    rocket_e.fit(X_train.iloc[indices], y_train[indices])

    # assert probabilities are the same
    probas = rocket_e.predict_proba(X_test.iloc[indices])
    testing.assert_array_equal(probas, rocket_e_gunpoint_probas)
示例#4
0
def set_classifier(cls, resampleId=None):
    """
    Basic way of creating the classifier to build using the default settings. This
    set up is to help with batch jobs for multiple problems to facilitate easy
    reproducability. You can set up bespoke classifier in many other ways.

    :param cls: String indicating which classifier you want
    :param resampleId: classifier random seed

    :return: A classifier.

    """
    name = cls.lower()
    # Distance based
    if name == "pf" or name == "proximityforest":
        return ProximityForest(random_state=resampleId)
    elif name == "pt" or name == "proximitytree":
        return ProximityTree(random_state=resampleId)
    elif name == "ps" or name == "proximityStump":
        return ProximityStump(random_state=resampleId)
    elif name == "dtwcv" or name == "kneighborstimeseriesclassifier":
        return KNeighborsTimeSeriesClassifier(metric="dtw")
    elif name == "ee" or name == "elasticensemble":
        return ElasticEnsemble()
    elif name == "shapedtw":
        return ShapeDTW()
    # Dictionary based
    elif name == "boss" or name == "bossensemble":
        return BOSSEnsemble(random_state=resampleId)
    elif name == "cboss" or name == "contractableboss":
        return ContractableBOSS(random_state=resampleId)
    elif name == "tde" or name == "temporaldictionaryensemble":
        return TemporalDictionaryEnsemble(random_state=resampleId)
    elif name == "weasel":
        return WEASEL(random_state=resampleId)
    elif name == "muse":
        return MUSE(random_state=resampleId)
    # Interval based
    elif name == "rise" or name == "randomintervalspectralforest":
        return RandomIntervalSpectralForest(random_state=resampleId)
    elif name == "tsf" or name == "timeseriesforest":
        return TimeSeriesForest(random_state=resampleId)
    elif name == "cif" or name == "canonicalintervalforest":
        return CanonicalIntervalForest(random_state=resampleId)
    elif name == "drcif":
        return DrCIF(random_state=resampleId)
    # Shapelet based
    elif name == "stc" or name == "shapelettransformclassifier":
        return ShapeletTransformClassifier(random_state=resampleId,
                                           time_contract_in_mins=1)
    elif name == "mrseql" or name == "mrseqlclassifier":
        return MrSEQLClassifier(seql_mode="fs", symrep=["sax", "sfa"])
    elif name == "rocket":
        return ROCKETClassifier(random_state=resampleId)
    # Hybrid
    elif name == "catch22":
        return Catch22ForestClassifier(random_state=resampleId)
    elif name == "hivecotev1":
        return HIVECOTEV1(random_state=resampleId)
    else:
        raise Exception("UNKNOWN CLASSIFIER")
norm_data = data_input.copy()
norm_data = norm_data.apply(lambda x: (x-x.min())/(x.max()-x.min()), axis=1)
X_norm = norm_data.values

#label binário
lb = LabelBinarizer()
y = lb.fit_transform(label)
y = y.reshape(-1)[:]

#será necessário converter os dados de tabular para nested para aplicar algoritmos da sktime
X_nested = from_2d_array_to_nested(X_norm)[:]

#definição dos modelos e parametros
model_params = {
    'ROCKET' : {
        'model': ROCKETClassifier(),
        'params': {
            'num_kernels': [10000,8000,5000]     
        }
    }
}

#definição das métricas e parametros
scoring = {'acc': 'accuracy',
           'prec': make_scorer(precision_score,pos_label=pos_label),
           'avg_prec': make_scorer(average_precision_score,pos_label=pos_label),
           'recall': make_scorer(recall_score,pos_label=pos_label),
           'f1': make_scorer(f1_score,pos_label=pos_label),
           'bal_acc': 'balanced_accuracy'
            }