def run_model(classifier_name, data, epoch, window_len, stride, binary, i):
    output_directory = data[7] + classifier_name + "_" + str(i) + "/"
    create_directory(output_directory)

    if classifier_name == "rocket":
        metrics, conf_mat = run_rocket(data, epoch, window_len, stride, binary)

    else:
        metrics, conf_mat = run_deep_learning_models(classifier_name, data,
                                                     epoch, output_directory)

    metrics.to_csv(output_directory + 'classification_metrics.csv')
    np.savetxt(output_directory + 'confusion_matrix.csv',
               conf_mat,
               delimiter=",")

    return metrics
        metrics = pd.concat([metrics_train, metrics_val,
                             metrics_test]).reset_index(drop=True)

        print(metrics.head())

    metrics.to_csv(output_directory + 'classification_metrics.csv')
    np.savetxt(output_directory + 'confusion_matrix.csv',
               conf_mat,
               delimiter=",")


if len(sys.argv) >= 6:
    data_path = sys.argv[1]
    output_directory = sys.argv[2]
    problem = sys.argv[3]
    classifier_name = sys.argv[4]
    itr = sys.argv[5]
else:
    cwd = os.getcwd()
    data_path = cwd + "/TS_Segmentation/"
    output_directory = cwd + "/output/"
    problem = "Emotiv266"
    classifier_name = "MHA"
    itr = "itr_0"

output_directory = output_directory + classifier_name + '/' + problem + '/' + itr + '/'
create_directory(output_directory)

run_experiments()
TIME_ZONE = 'Europe/Moscow'

USE_I18N = True

USE_L10N = True

USE_TZ = True

STATIC_URL = '/static/'

CELERY_BROKER = 'redis://localhost:6379'

FILE_LOGGER = '/tmp/django_telegram/logs/debug.log'
FILE_HTTP_CLIENT_LOGGER = '/tmp/django_telegram/logs/http_client.log'
create_directory(FILE_LOGGER); create_directory(FILE_HTTP_CLIENT_LOGGER)

# CELERYD_HIJACK_ROOT_LOGGER = False  # TODO True
LOGGING = {
    'version': 1,
    'formatters': {
        'verbose': {
            'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
        },
        'verbose_with_extra': {
            'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d '
                      '%(extra)s %(message)s'
        },
        'simple': {
            'format': '%(levelname)s %(message)s'
        },
def prepare_data_cnn_lstm(problem, window_len, stride, binary, data_version,
                          upsampled, norm, segmentation_norm):
    # Set up output location
    cwd = os.getcwd()
    data_path = cwd + "/TS_Segmentation/"
    output_directory = cwd + "/output/"
    output_directory = output_directory + "compare_models" + '/' + problem + '/'
    create_directory(output_directory)

    print(
        "#########################################################################"
    )
    print("[Compare_Models] Run Compare_Models")
    print(
        "#########################################################################"
    )
    print("[Compare_Models] Data path: {}".format(data_path))
    print("[Compare_Models] Output Dir: {}".format(output_directory))
    print("[Compare_Models] Problem: {}".format(problem))
    print("[Compare_Models] Window Len: {}".format(window_len))
    print("[Compare_Models] Stride: {}".format(stride))
    print(
        "#########################################################################"
    )

    data_folder = data_path + problem + "/"
    train_file = data_folder + problem + "_TRAIN.csv"
    test_file = data_folder + problem + "_TEST.csv"

    train_data = data_loader.load_segmentation_data(train_file,
                                                    norm=segmentation_norm)
    test_data = data_loader.load_segmentation_data(test_file,
                                                   norm=segmentation_norm)
    print("[Compare_Models] {} train series".format(len(train_data)))
    print("[Compare_Models] {} test series".format(len(test_data)))

    dataset1, dataset2 = prepare_inputs_combined(train_inputs=train_data,
                                                 test_inputs=test_data,
                                                 window_len=window_len,
                                                 stride=stride,
                                                 binary=binary,
                                                 data_version=data_version,
                                                 upsampled=upsampled,
                                                 norm=norm)

    X_train, y_train, X_val, y_val, X_test, y_test = dataset1[0], dataset1[
        1], dataset1[2], dataset1[3], dataset1[4], dataset1[5]
    X2_train, y2_train, X2_val, y2_val, X2_test, y2_test = dataset2[
        0], dataset2[1], dataset2[2], dataset2[3], dataset2[4], dataset2[5]

    if y_val is not None:
        all_labels = np.concatenate((y_train, y_val, y_test), axis=0)
    else:
        all_labels = np.concatenate((y_train, y_test), axis=0)
    print("[Compare_Models] All labels: {}".format(np.unique(all_labels)))

    tmp = pd.get_dummies(all_labels).values

    y_train = tmp[:len(y_train)]
    y_val = tmp[len(y_train):len(y_train) + len(y_val)]
    y_test = tmp[len(y_train) + len(y_val):]

    data_deep_learning = [
        all_labels, X_train, y_train, X_val, y_val, X_test, y_test,
        output_directory
    ]

    if y2_val is not None:
        all_labels2 = np.concatenate((y2_train, y2_val, y2_test), axis=0)
    else:
        all_labels2 = np.concatenate((y2_train, y2_test), axis=0)
    print("[Compare_Models] All labels 2: {}".format(np.unique(all_labels2)))

    tmp2 = pd.get_dummies(all_labels2).values

    y2_train = tmp2[:len(y2_train)]
    y2_val = tmp2[len(y2_train):len(y2_train) + len(y2_val)]
    y2_test = tmp2[len(y2_train) + len(y2_val):]

    data_cnn_lstm = [
        all_labels2, X2_train, y2_train, X2_val, y2_val, X2_test, y2_test,
        output_directory
    ]

    return data_deep_learning, data_cnn_lstm