Beispiel #1
0
def config():
    use_1 = False
    use_both = False
    num_files = util_funcs.TOTAL_NUM_FILES
    clf_step = None
    use_expanded_y = True
    clf_name = "simple_nn.pt"
    num_epochs = 1000
    ex.observers.append(
        MongoObserver.create(client=util_funcs.get_mongo_client()))
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    test_size = 0.2
    valid_size = 0.25
    batch_size = 50
    batch_print_size = 10
    lr = 0.001
    momentum = 0.9
    step_size = 50
    gamma = 0.8
    dropout = 0.5
    hidden_size_factor = 10
Beispiel #2
0
from os import path
import sys
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.metrics import f1_score, make_scorer, accuracy_score, roc_auc_score, matthews_corrcoef, classification_report, mean_squared_error
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
import pickle as pkl
import sacred
from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import RandomUnderSampler
import xgboost as xgb
ex = sacred.Experiment(name="seizure_predict_baseline_traditional_ml")

import util_funcs
ex.observers.append(MongoObserver.create(client=util_funcs.get_mongo_client()))


@ex.named_config
def rf():
    parameters = {
        'rf__criterion': ["gini", "entropy"],
        'rf__n_estimators': [400, 600, 1200],
        #         'rf__n_estimators': [50,  ],
        'rf__max_features': ['auto', 'log2', 30],
        'rf__max_depth':
        [None, 2, 8],  #smaller max depth, gradient boosting, more max features
        'rf__min_samples_split': [2, 4, 8],
        'rf__n_jobs': [1],
        'rf__min_weight_fraction_leaf': [0, 0.2, 0.5],
        # 'imb__method': [None, util_funcs.ImbalancedClassResampler.SMOTE, util_funcs.ImbalancedClassResampler.RANDOM_UNDERSAMPLE]