def comparisonExperiments():
    data_dir = sys.argv[1]
    res_dir = sys.argv[2]

    complete_classifiers = [
        "ShapeletForestClassifier",
    ]

    small_datasets = [
        "Beef",
        "Car",
        "Coffee",
        "CricketX",
        "CricketY",
        "CricketZ",
        "DiatomSizeReduction",
        "Fish",
        "GunPoint",
        "ItalyPowerDemand",
        "MoteStrain",
        "OliveOil",
        "Plane",
        "SonyAIBORobotSurface1",
        "SonyAIBORobotSurface2",
        "SyntheticControl",
        "Trace",
        "TwoLeadECG",
    ]
    small_datasets = [
        "Beef",
        "Coffee",
    ]

    num_folds = 2

    import sktime.contrib.experiments as exp

    for f in range(num_folds):
        for d in small_datasets:
            for c in complete_classifiers:
                print(c, d, f)
                try:
                    exp.run_experiment(data_dir, res_dir, c, d, f)
                except:
                    print('\n\n FAILED: ', sys.exc_info()[0], '\n\n')
Пример #2
0
def comparisonExperiments():

    data_dir = '/home/carl/Downloads/Univariate2018_ts/Univariate_ts/'  # sys.argv[1]
    res_dir = '/home/carl/temp/'  # sys.argv[2]

    complete_classifiers = [
        "catch22ForestClassifier",
    ]

    small_datasets = [
        "Beef",
        "Car",
        "Coffee",
        "CricketX",
        "CricketY",
        "CricketZ",
        "DiatomSizeReduction",
        "Fish",
        "GunPoint",
        "ItalyPowerDemand",
        "MoteStrain",
        "OliveOil",
        "Plane",
        "SonyAIBORobotSurface1",
        "SonyAIBORobotSurface2",
        "SyntheticControl",
        "Trace",
        "TwoLeadECG",
    ]
    small_datasets = [
        "Beef",
        "Coffee",
    ]

    num_folds = 2

    import sktime.contrib.experiments as exp

    for f in range(num_folds):
        for d in small_datasets:
            for c in complete_classifiers:
                print(c, d, f)
                # try:
                exp.run_experiment(data_dir, res_dir, c, d, f)
Пример #3
0
def tsf_benchmarking():
    for i in range(len(benchmark_datasets)):
        dataset = benchmark_datasets[i]
        print(str(i) + " problem = " + dataset)
        tsf = ib.TimeSeriesForest(n_estimators=100)
        exp.run_experiment(overwrite=False,
                           problem_path=data_dir,
                           results_path=results_dir,
                           cls_name="PythonTSF",
                           classifier=tsf,
                           dataset=dataset,
                           train_file=False)
        steps = [
            ('segment', RandomIntervalSegmenter(n_intervals='sqrt')),
            ('transform',
             FeatureUnion([('mean',
                            RowTransformer(
                                FunctionTransformer(func=np.mean,
                                                    validate=False))),
                           ('std',
                            RowTransformer(
                                FunctionTransformer(func=np.std,
                                                    validate=False))),
                           ('slope',
                            RowTransformer(
                                FunctionTransformer(func=time_series_slope,
                                                    validate=False)))])),
            ('clf', DecisionTreeClassifier())
        ]
        base_estimator = Pipeline(steps)
        tsf = TimeSeriesForestClassifier(estimator=base_estimator,
                                         n_estimators=100)
        exp.run_experiment(overwrite=False,
                           problem_path=data_dir,
                           results_path=results_dir,
                           cls_name="PythonTSFComposite",
                           classifier=tsf,
                           dataset=dataset,
                           train_file=False)
Пример #4
0
def elastic_distance_benchmarking():
    for i in range(int(len(distance_test))):
        dataset = distance_test[i]
        print(
            str(i) + " problem = " + dataset + " writing to " + results_dir +
            "/DTW/")
        dtw = dist.KNeighborsTimeSeriesClassifier(metric="dtw")
        exp.run_experiment(overwrite=False,
                           problem_path=data_dir,
                           results_path=results_dir + "/DTW/",
                           cls_name="PythonDTW",
                           classifier=dtw,
                           dataset=dataset,
                           train_file=False)
        twe = dist.KNeighborsTimeSeriesClassifier(metric="dtw")
        exp.run_experiment(overwrite=False,
                           problem_path=data_dir,
                           results_path=results_dir + "/DTW/",
                           cls_name="PythonTWE",
                           classifier=twe,
                           dataset=dataset,
                           train_file=False)
Пример #5
0
def dlExperiment(data_dir, res_dir, classifier_name, dset, fold, classifier=None):

    if classifier is None:
        classifier = setNetwork(classifier_name, fold)

    exp.run_experiment(data_dir, res_dir, classifier_name, dset, classifier=classifier, resampleID=fold)
Пример #6
0
if __name__ == "__main__":
    """
    Example simple usage, with arguments input via script or hard coded for testing
    """
    print(" Local Run")
    results_dir = "C:/Temp/sktime-dl/"
    classifier = "resnet"
    resample = 0
    #         for i in range(0, len(univariate_datasets)):
    #             dataset = univariate_datasets[i]
    # #            print(i)
    # #            print(" problem = "+dataset)
    problem = "ItalyPowerDemand"
    print("Loading ", problem)
    trX, trY = load_UCR_UEA_dataset(problem, split="train", return_X_y=True)
    teX, teY = load_UCR_UEA_dataset(problem, split="test", return_X_y=True)
    tf = False
    run_experiment(
        overwrite=True,
        trainX=trX,
        trainY=trY,
        testX=trX,
        testY=trY,
        results_path=results_dir,
        cls_name=classifier,
        dataset=problem,
        resampleID=resample,
        train_file=tf,
    )