Exemple #1
0
def tabtune(Xtrain,
            Xvalid,
            ytrain,
            yvalid,
            verbose=True,
            scorefunc=roc_auc_score,
            predfunc=pred_1dprobs,
            return_extras=False,
            return_score=False,
            **kwargs):
    for k in DEFAULT_TAB:
        if k not in kwargs:
            kwargs[k] = DEFAULT_TAB[k]
    xg_bestimator = None
    xg_best_score = -float('inf')
    xg_best_params = None
    xg_params_scores = {}
    xg_results = {}

    params = TUNE_TAB.copy()

    xg_keys = params.keys()
    for vals in tqdm(list(itertools.product(*[params[k] for k in xg_keys]))):
        paramdict = {k: v for k, v in zip(xg_keys, vals)}
        n_d_a = paramdict.pop('n_d_a')
        paramdict['n_d'] = paramdict['n_a'] = n_d_a
        xg_results[vals] = {}
        bst = TabNetClassifier(**kwargs, **paramdict)
        bst.fit(Xtrain,
                ytrain,
                eval_set=[(Xtrain, ytrain), (Xvalid, yvalid)],
                eval_metric=['auc'],
                max_epochs=MAX_EPOCHS,
                patience=20,
                batch_size=1024,
                virtual_batch_size=128,
                num_workers=0,
                weights=1,
                drop_last=False)  #,eval_set=[(Xvalid,yvalid)]
        cur_score = scorefunc(yvalid, predfunc(bst, Xvalid))
        xg_params_scores[vals] = cur_score
        if cur_score > xg_best_score:
            xg_bestimator = bst
            xg_best_score = cur_score
            xg_best_params = paramdict


#     xg_best_params['n_estimators']=bst.booster_.best_iteration
    if not return_extras:
        return (xg_best_params,
                xg_best_score) if return_score else xg_best_params
    else:
        return ((xg_best_params, xg_best_score), xg_params_scores)
Exemple #2
0
def build_tabnet():
    model_file_name = 'tabnet_model_{}'.format(current_dataset_name)

    df = current_dataset.copy()
    cleaning_text(df)

    X = df['clean_content']
    y = df['emotion']
    # tokenize la data
    tok = Tokenizer(num_words=1000, oov_token='<UNK>')
    # fit le model avec les données de train
    # tok.fit_on_texts(X)
    # X = tok.texts_to_matrix(X, mode='tfidf')
    # split data
    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=0.33,
                                                        stratify=y,
                                                        random_state=1)
    X_test_save = X_test
    tok.fit_on_texts(X_test)
    X_test = tok.texts_to_matrix(X_test, mode='tfidf')
    tok.fit_on_texts(X_train)
    X_train = tok.texts_to_matrix(X_train, mode='tfidf')
    # X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.3, stratify=y)
    # build model, fit and predict
    if LOAD_MODEL and pathlib.Path(model_file_name).exists():
        model = pickle.load(open(model_file_name, 'rb'))
    else:
        model = TabNetClassifier()
        model.fit(X_train=X_train,
                  y_train=y_train,
                  eval_set=[(X_train, y_train), (X_test, y_test)],
                  eval_name=['train', 'valid'],
                  eval_metric=['accuracy', 'balanced_accuracy', 'logloss'])

    preds_mapper = {
        idx: class_name
        for idx, class_name in enumerate(model.classes_)
    }
    preds = model.predict_proba(X_test)
    y_pred_proba = np.vectorize(preds_mapper.get)(np.argmax(preds, axis=1))
    y_pred = model.predict(X_test)
    test_acc = accuracy_score(y_pred=y_pred, y_true=y_test)
    pickle.dump(model, open(model_file_name, 'wb'))
    # model.save_model(model_file_name)
    return model, y_test, y_pred, test_acc
Exemple #3
0
    def fit(self, X, y):
        X, y = check_X_y(X, y)

        self.estimators_ = []
        self.features_ = []
        self.classes_ = np.unique(y)
        self.n_samples_ = int(np.round(self.max_samples * X.shape[0]))
        self.n_features_ = int(np.round(self.max_features * X.shape[1]))

        for _ in range(self.n_estimators):
            samples = np.random.choice(X.shape[0],
                                       size=self.n_samples_,
                                       replace=self.bootstrap)
            features = np.random.choice(X.shape[1],
                                        size=self.n_features_,
                                        replace=False)

            unused_samples = np.array(
                [i for i in range(X.shape[0]) if i not in samples])

            X_train = X[samples][:, features]
            y_train = y[samples]

            estimator = TabNetClassifier(verbose=self.verbose,
                                         device_name=self.device_name)

            if self.oob_score and len(unused_samples) > 0:
                X_val = X[unused_samples][:, features]
                y_val = y[unused_samples]

                estimator.fit(X_train,
                              y_train,
                              eval_set=[(X_val, y_val)],
                              eval_metric=['balanced_accuracy'],
                              patience=self.patience,
                              max_epochs=self.max_epochs)
            else:
                estimator.fit(X_train,
                              y_train,
                              patience=self.patience,
                              max_epochs=self.max_epochs)

            self.estimators_.append(estimator)
            self.features_.append(features)

        return self
class ModelTabNetClassifier(Model):

    def train(self, tr_x, tr_y, va_x=None, va_y=None, te_x=None):

        categorical_dims = {}
        for col in self.categorical_features:
            tr_x[col] = tr_x[col].fillna("unk")
            va_x[col] = va_x[col].fillna("unk")
            te_x[col] = te_x[col].fillna("unk")
            categorical_dims[col] = len(set(tr_x[col].values) | set(va_x[col].values) | set(te_x[col].values))

        cat_idxs = [i for i, f in enumerate(tr_x.columns) if f in self.categorical_features]
        cat_dims = [categorical_dims[f] for i, f in enumerate(tr_x.columns) if f in self.categorical_features]
        cat_emb_dim = [10 for _ in categorical_dims]

        for col in tr_x.columns:
            tr_x[col] = tr_x[col].fillna(tr_x[col].mean())
            va_x[col] = va_x[col].fillna(tr_x[col].mean())
            te_x[col] = te_x[col].fillna(tr_x[col].mean())

        self.model = TabNetClassifier(cat_dims=cat_dims, cat_emb_dim=cat_emb_dim, cat_idxs=cat_idxs)
        self.model.fit(X_train=tr_x.values, y_train=tr_y.values,
                       X_valid=va_x.values, y_valid=va_y.values,
                       max_epochs=1000,
                       patience=50,
                       batch_size=1024,
                       virtual_batch_size=128)

    def predict(self, te_x):
        return self.model.predict_proba(te_x.values)[:, 1].reshape(-1, )

    def save_model(self):
        model_path = os.path.join('../output/model', f'{self.run_fold_name}.model')
        os.makedirs(os.path.dirname(model_path), exist_ok=True)
        Data.dump(self.model, model_path)

    def load_model(self):
        model_path = os.path.join('../output/model', f'{self.run_fold_name}.model')
        self.model = Data.load(model_path)
Exemple #5
0
        clf = TabNetClassifier(
            optimizer_fn=torch.optim.Adam,
            optimizer_params=dict(lr=2e-1),
            scheduler_params={"step_size": 10, "gamma": 0.9},  # how to use learning rate scheduler
            scheduler_fn=torch.optim.lr_scheduler.StepLR,
            mask_type="sparsemax",  # This will be overwritten if using pretrain model
        )

        clf.fit(
            X_train=X_trn,
            y_train=y_trn,
            eval_set=[(X_trn, y_trn), (X_vld, y_vld)],
            eval_name=["train", "valid"],
            eval_metric=["logloss"],
            max_epochs=max_epochs,
            patience=50,
            batch_size=128,
            virtual_batch_size=128,
            num_workers=0,
            weights=1,
            drop_last=False,
            from_unsupervised=loaded_pretrain,
        )

        fold_preds = clf.predict_proba(X_vld).astype(np.float64)[:, 1]
        _test_preds.append(clf.predict_proba(X_tst)[:, 1])
        oof[vld_index] = fold_preds
        scores.append(log_loss(y_vld, fold_preds))
        importances = pd.concat(
            [
                importances,
Exemple #6
0
                       optimizer_params=dict(lr=2e-2),
                       scheduler_params={"step_size":50, # how to use learning rate scheduler
                                         "gamma":0.9},
                       scheduler_fn=torch.optim.lr_scheduler.StepLR,
                       mask_type='entmax' # "sparsemax"
                      )



X_train = train[features].values[train_indices]
y_train = train[target].values[train_indices]

X_valid = train[features].values[valid_indices]
y_valid = train[target].values[valid_indices]

X_test = train[features].values[test_indices]
y_test = train[target].values[test_indices]

max_epochs = 1000 if not os.getenv("CI", False) else 2

clf.fit(
    X_train=X_train, y_train=y_train,
    eval_set=[(X_train, y_train), (X_valid, y_valid)],
    eval_name=['train', 'valid'],
    eval_metric=['auc'],
    max_epochs=max_epochs , patience=20,
    batch_size=1024, virtual_batch_size=128,
    num_workers=0,
    weights=1,
    drop_last=False
)
Exemple #7
0
  d_all[col] = preprocessing.LabelEncoder().fit_transform(d_all[col])

X_all = d_all[vars_num+vars_cat]
y_all = np.where(d_all["dep_delayed_15min"]=="Y",1,0)

cat_idxs = [ i for i, col in enumerate(X_all.columns) if col in vars_cat]
cat_dims = [ len(np.unique(X_all.iloc[:,i].values)) for i in cat_idxs]

X_train = X_all[0:d_train.shape[0]].to_numpy()
y_train = y_all[0:d_train.shape[0]]
X_test = X_all[d_train.shape[0]:(d_train.shape[0]+d_test.shape[0])].to_numpy()
y_test = y_all[d_train.shape[0]:(d_train.shape[0]+d_test.shape[0])]


md = TabNetClassifier(cat_idxs=cat_idxs,
                       cat_dims=cat_dims,
                       cat_emb_dim=1
)

%%time
md.fit( X_train=X_train, y_train=y_train,
    max_epochs=10, patience=0
)


y_pred = md.predict_proba(X_test)[:,1]
print(metrics.roc_auc_score(y_test, y_pred))



Exemple #8
0
class MyTabNetClassifierModel(BaseModel):
    """
    Paramters
    ---------
    ref: https://dreamquark-ai.github.io/tabnet/generated_docs/README.html#model-parameters
    model_params:
        n_d:default=8(range 8 to 64)
        n_a:default=8
        n_steps:default=3(range 3 to 10)
        gamma:default=1.3(range 1.0 to 2.0)
        n_independent:default=2(range 1 to 5)
        n_shared:default=2(range 1 to 5)
        lambda_sparse:default=1e3
        optimizer_fn:default=Adam
        optimizer_params:default=(lr=2e2, weight_decay=None),
        mask_type:default=sparsemax or entmax
        scheduler_params:dict(T_0=200, T_mult=1, eta_min=1e-4, last_epoch=-1, verbose=False),
        seed: default=0
        verbose=5,
        cat_dims=cat_dims, cat_idxs=cat_idx, cat_emb_dim=1
        
    fit_params:
        max_epochs:default=200
        patience:default=15
        loss_fn(torch.loss or list of torch.loss):default to mse for regression and cross entropy for classification
        eval_metric(list or str)
        batch_size:default=1024
        virtual_batch_size:default=128
        pretrain_ratio
        
    ### Example use:
        >>>nunique = train_feat_df.nunique()
        >>>types = train_feat_df.dtypes
        >>>categorical_columns = []
        >>>categorical_dims = {}
        >>>train_feat_df["is_train"] = 1
        >>>test_feat_df["is_train"] = 0
        >>>all_df = pd.concat([train_feat_df, test_feat_df])
        >>for col in train_feat_df.drop(["is_train"], axis=1).columns:
            if str(types[col]) == 'category' or nunique[col] < 200:
                l_enc = LabelEncoder()
                all_df[col] = l_enc.fit_transform(all_df[col].values)
                all_df[col] = all_df[col].astype("category")
                categorical_columns.append(col)
                categorical_dims[col] = len(l_enc.classes_ )
                
        >>>cat_idx = [i for i, f in enumerate(train_feat_df.columns.tolist()) if f in categorical_columns]
        >>>cat_dims = [categorical_dims[f] for i, f in enumerate(train_feat_df.columns.tolist()) if f in categorical_columns]
    """
    def __init__(self, model_params, fit_params):
        self.model_params = model_params
        self.fit_params = fit_params

    def build_model(self):
        self.model = TabNetClassifier(**self.model_params)
        return self.model

    def fit(self, train_x, train_y, valid_x=None, valid_y=None):
        train_x = train_x.values
        valid_x = valid_x.values
        self.model = self.build_model()
        self.model.fit(train_x,
                       train_y,
                       eval_set=[(train_x, train_y), (valid_x, valid_y)],
                       eval_name=["train", "valid"],
                       **self.fit_params)
        return self.model

    def predict(self, est, valid_x):
        valid_x = valid_x.values
        preds = est.predict_proba(valid_x)[:, 1]
        return preds

    def get_feature_importance(self,
                               train_x: pd.DataFrame,
                               is_save=False,
                               filepath=None):
        feature_importance_df = pd.DataFrame()
        num = 0
        for i, model in self.models.items():
            _df = pd.DataFrame()
            _df['feature_importance'] = model.feature_importances_
            _df['column'] = train_x.columns
            _df['fold'] = num + 1
            feature_importance_df = pd.concat([feature_importance_df, _df],
                                              axis=0,
                                              ignore_index=True)
            num += 1

        order = feature_importance_df.groupby('column')\
            .sum()[['feature_importance']]\
            .sort_values('feature_importance', ascending=False).index[:50]

        fig, ax = plt.subplots(figsize=(8, max(6, len(order) * .25)))
        if is_save:
            fig.savefig(filepath + "tabnet_feature_importance.png")
            _df.to_csv(filepath + "tabnet_feature_importance.csv", index=False)
        sns.boxenplot(data=feature_importance_df,
                      x='feature_importance',
                      y='column',
                      order=order,
                      ax=ax,
                      palette='viridis',
                      orient='h')
        ax.tick_params(axis='x', rotation=90)
        ax.set_title('Tabnet Feature Importance')
        ax.grid()
        plt.show()
Exemple #9
0

MAX_EPOCH = 10 
BS = 1024 

md = TabNetClassifier(cat_idxs=cat_idxs,
                       cat_dims=cat_dims,
                       cat_emb_dim=1,
                       ## optimizer_fn=torch.optim.Adam,
                       ## optimizer_params=dict(lr=2e-2),
                       scheduler_fn=torch.optim.lr_scheduler.OneCycleLR,
                       scheduler_params=dict(max_lr=0.05,
                                             steps_per_epoch=int(X_train.shape[0] / BS),
                                             epochs=MAX_EPOCH,
                                             is_batch_level=True),
                       mask_type='entmax' # "sparsemax"
)

%%time
md.fit( X_train=X_train, y_train=y_train,
    max_epochs=MAX_EPOCH, patience=0,
    ## batch_size=1024, virtual_batch_size=128,
    ## weights=0,
    drop_last = True
)


y_pred = md.predict_proba(X_test)[:,1]
print(metrics.roc_auc_score(y_test, y_pred))

def main():
    # Generate Synthetic Data
    data, test_data, cat_col_names, num_col_names = data_load()
    cat_dims = data[cat_col_names].nunique().to_list()
    cat_idxs = [(cat_col_names + num_col_names).index(cat_col)
                for cat_col in cat_col_names]
    cat_emb_dims = np.ceil(np.log(cat_dims)).astype(np.int).tolist()
    cat_emb_dims = np.ceil(np.clip((np.array(cat_dims)) / 2, a_min=1,
                                   a_max=50)).astype(np.int).tolist()
    FEATURES = cat_col_names + num_col_names
    df_sub = pd.read_csv('Data/sample_submission.csv')

    bsize = 2500 * 2

    # ##########Define the Configs############
    N_D = 16
    N_A = 16
    N_INDEP = 2
    N_SHARED = 2
    N_STEPS = 1  # 2
    MASK_TYPE = "sparsemax"
    GAMMA = 1.5
    BS = 512
    MAX_EPOCH = 21  # 20
    PRETRAIN = True

    X = data[FEATURES].values
    y = data["target"].values

    X_test = test_data[FEATURES].values

    if PRETRAIN:
        pretrain_params = dict(
            n_d=N_D,
            n_a=N_A,
            n_steps=N_STEPS,  # 0.2,
            n_independent=N_INDEP,
            n_shared=N_SHARED,
            cat_idxs=cat_idxs,
            cat_dims=cat_dims,
            cat_emb_dim=cat_emb_dims,
            gamma=GAMMA,
            lambda_sparse=0.,
            optimizer_fn=torch.optim.Adam,
            optimizer_params=dict(lr=2e-2),
            mask_type=MASK_TYPE,
            scheduler_params=dict(
                mode="min",
                patience=3,
                min_lr=1e-5,
                factor=0.5,
            ),
            scheduler_fn=torch.optim.lr_scheduler.ReduceLROnPlateau,
            verbose=1,
        )

        pretrainer = TabNetPretrainer(**pretrain_params)

        pretrainer.fit(
            X_train=X_test,
            eval_set=[X],
            max_epochs=MAX_EPOCH,
            patience=25,
            batch_size=BS,
            virtual_batch_size=BS,  # 128,
            num_workers=0,
            drop_last=True,
            pretraining_ratio=
            0.5  # The bigger your pretraining_ratio the harder it is to reconstruct
        )
    # Training the Model
    # tabular_mode.fit(train=train, validation=val)
    # # Evaluating the Model
    # # #Loss and Metrics on New Data¶
    # result = tabular_mode.evaluate(test)

    cv = StratifiedKFold(n_splits=10, shuffle=True, random_state=777)

    BS = 2048
    MAX_EPOCH = 20
    # skf = StratifiedKFold(n_splits=5, random_state=2021, shuffle=True)

    data['oof_preds'] = np.nan

    for fold_nb, (train_index, valid_index) in enumerate(cv.split(X, y)):
        X_train, X_valid = X[train_index], X[valid_index]
        y_train, y_valid = y[train_index], y[valid_index]

        tabnet_params = dict(
            n_d=N_D,
            n_a=N_A,
            n_steps=N_STEPS,
            gamma=GAMMA,
            n_independent=N_INDEP,
            n_shared=N_SHARED,
            lambda_sparse=1e-5,
            seed=0,
            clip_value=2,
            cat_idxs=cat_idxs,
            cat_dims=cat_dims,
            cat_emb_dim=cat_emb_dims,
            mask_type=MASK_TYPE,
            device_name='auto',
            optimizer_fn=torch.optim.Adam,
            optimizer_params=dict(lr=5e-2, weight_decay=1e-5),
            scheduler_params=dict(
                max_lr=5e-2,
                steps_per_epoch=int(X_train.shape[0] / BS),
                epochs=MAX_EPOCH,
                # final_div_factor=100,
                is_batch_level=True),
            scheduler_fn=torch.optim.lr_scheduler.OneCycleLR,
            #                               scheduler_params=dict(mode='max',
            #                                                     factor=0.5,
            #                                                     patience=5,
            #                                                     is_batch_level=False,),
            #                               scheduler_fn=torch.optim.lr_scheduler.ReduceLROnPlateau,
            verbose=1)
        # Defining TabNet model
        model = TabNetClassifier(**tabnet_params)

        model.fit(
            X_train=X_train,
            y_train=y_train,
            from_unsupervised=pretrainer if PRETRAIN else None,
            eval_set=[(X_train, y_train), (X_valid, y_valid)],
            eval_name=["train", "valid"],
            eval_metric=["auc"],
            batch_size=BS,
            virtual_batch_size=256,
            max_epochs=MAX_EPOCH,
            drop_last=True,
            pin_memory=True,
            patience=10,
        )

        val_preds = model.predict_proba(X_valid)[:, -1]
        print('auc:', roc_auc_score(y_true=y_valid, y_score=val_preds))

        data['oof_preds'].iloc[valid_index] = val_preds

        test_preds = model.predict_proba(X_test)[:, -1]
        df_sub[f"fold_{fold_nb+1}"] = test_preds

    df_sub["target"] = df_sub.filter(like="fold_").mean(axis=1).values

    df_sub.to_csv("Analysis/submission_5_tabnet.csv", index=False)

    df_sub = pd.read_csv("Analysis/submission_5_tabnet.csv")

    # df_sub.target = df_sub.target.map(lambda x: 0 if x<=0.5 else 1)
    df_sub.loc[:,
               ["id", "target"]].to_csv("Analysis/submission_5_2_tabnet.csv",
                                        index=False)
        scaler = StandardScaler().fit(interactions_t)
        interactions_t = scaler.transform(interactions_t)
        interactions_v = scaler.transform(interactions_v)

        torch.manual_seed(0)
        np.random.seed(0)

        clf = TabNetClassifier( scheduler_params={"step_size":5, "gamma":0.8},
                                scheduler_fn=torch.optim.lr_scheduler.StepLR )

        clf.fit(
            interactions_t, y_t,
            eval_set=[(interactions_t, y_t),(interactions_v, y_v)],
            eval_name=['train','val'],
            batch_size = 4096,
            virtual_batch_size = 4096//8,
            patience = 3,
            max_epochs = 100
        )

        # print('importances:',clf.feature_importances_*100)

        selected = np.where(clf.feature_importances_*100 > 8)[0]
        print('multiplying by:',i)
        print('selected:',selected)
        print('percents:',clf.feature_importances_[selected]*100)
        print()

    ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## 
Exemple #12
0
X_test = X_all[d_train.shape[0]:(d_train.shape[0]+d_test.shape[0])].to_numpy()
y_test = y_all[d_train.shape[0]:(d_train.shape[0]+d_test.shape[0])]

cat_idxs = [ i for i, col in enumerate(X_all.columns) if col in vars_cat]
cat_dims = [ len(np.unique(X_all.iloc[:,i].values)) for i in cat_idxs]
cat_emb_dim = np.floor(np.log(cat_dims)).astype(int)


md = TabNetClassifier(cat_idxs=cat_idxs,
                       cat_dims=cat_dims,
                       cat_emb_dim=cat_emb_dim,
                       ## optimizer_fn=torch.optim.Adam,
                       ## optimizer_params=dict(lr=2e-2),
                       ## mask_type='sparsemax',
                       n_steps=1,
)

%%time
md.fit( X_train=X_train, y_train=y_train,
    eval_set=[(X_train, y_train), (X_valid, y_valid)],
    eval_name=['train', 'valid'],
    eval_metric=['auc'],
    max_epochs=20, patience=20,
    ## batch_size=1024, virtual_batch_size=128,
    ## weights=0,
)

y_pred = md.predict_proba(X_test)[:,1]
print(metrics.roc_auc_score(y_test, y_pred))

Exemple #13
0
X_test = X_all[d_train.shape[0]:(d_train.shape[0]+d_test.shape[0])].to_numpy()
y_test = y_all[d_train.shape[0]:(d_train.shape[0]+d_test.shape[0])]


cat_idxs = [ i for i, col in enumerate(X_all.columns) if col in vars_cat]
cat_dims = [ len(np.unique(X_all.iloc[:,i].values)) for i in cat_idxs]
cat_emb_dim = np.floor(np.log(cat_dims)).astype(int)


md = TabNetClassifier(cat_idxs=cat_idxs,
                       cat_dims=cat_dims,
                       cat_emb_dim=cat_emb_dim,
                       ## optimizer_fn=torch.optim.Adam,
                       ## optimizer_params=dict(lr=2e-2),
                       ## mask_type='sparsemax',
                       n_steps=1,
)

%%time
md.fit( X_train=X_train, y_train=y_train,
    max_epochs=10, patience=0,
    ## batch_size=1024, virtual_batch_size=128,
    ## weights=0,
)


y_pred = md.predict_proba(X_test)[:,1]
print(metrics.roc_auc_score(y_test, y_pred))


Exemple #14
0
                optimizer_params=dict(lr=2e-2),
                verbose=0,
                scheduler_params={
                    "step_size": 50,  # how to use learning rate scheduler
                    "gamma": 0.9
                },
                scheduler_fn=torch.optim.lr_scheduler.StepLR,
                mask_type='entmax'  # "sparsemax"
            )

            clf.fit(X_train=X_tr,
                    y_train=y_tr,
                    X_valid=X_val,
                    y_valid=y_val,
                    max_epochs=epoch,
                    patience=epoch,
                    batch_size=1024,
                    virtual_batch_size=128,
                    num_workers=0,
                    weights=1,
                    drop_last=False)

            test_pred += clf.predict_proba(X_test)[:, 1]

        test_pred /= 5

        test_auc = roc_auc_score(y_test, test_pred)

        print('test auc:', test_auc)
        res[data_name] = test_auc
Exemple #15
0
y_test = y_all[d_train.shape[0]:(d_train.shape[0]+d_test.shape[0])]


md = TabNetClassifier(cat_idxs=cat_idxs,
                       cat_dims=cat_dims,
                       cat_emb_dim=1,
                       optimizer_fn=torch.optim.Adam,
                       optimizer_params=dict(lr=2e-2),
                       scheduler_params={"step_size":50, # how to use learning rate scheduler
                                         "gamma":0.9},
                       scheduler_fn=torch.optim.lr_scheduler.StepLR,
                       mask_type='entmax' # "sparsemax"
)

%%time
md.fit( X_train=X_train, y_train=y_train,
    eval_set=[(X_train, y_train)],
    eval_name=['train'],
    eval_metric=['auc'],
    max_epochs=10, patience=0,
    batch_size=1024, virtual_batch_size=128,
    num_workers=0,
    weights=1,
    drop_last=False
)


y_pred = md.predict_proba(X_test)[:,1]
print(metrics.roc_auc_score(y_test, y_pred))

Exemple #16
0
y_test = y_all[d_train.shape[0]:(d_train.shape[0]+d_test.shape[0])]


cat_idxs = [ i for i, col in enumerate(X_all.columns) if col in vars_cat]
cat_dims = [ len(np.unique(X_all.iloc[:,i].values)) for i in cat_idxs]
cat_emb_dim = np.floor(np.log(cat_dims)).astype(int)


md = TabNetClassifier(cat_idxs=cat_idxs,
                       cat_dims=cat_dims,
                       cat_emb_dim=cat_emb_dim,
                       ## optimizer_fn=torch.optim.Adam,
                       ## optimizer_params=dict(lr=2e-2),
                       ## mask_type='sparsemax',
                       n_steps=1,
)

%%time
md.fit( X_train=X_train, y_train=y_train,
    eval_set=[(X_train, y_train), (X_test, y_test)],
    eval_name=['train', 'test_EVIL'],
    eval_metric=['auc'],
    max_epochs=100, patience=100,
    ## batch_size=1024, virtual_batch_size=128,
    ## weights=0,
)

y_pred = md.predict_proba(X_test)[:,1]
print(metrics.roc_auc_score(y_test, y_pred))

Exemple #17
0
import torch
N_D = 16
N_A = 16
N_INDEP = 2
N_SHARED = 2
N_STEPS = 1  #2
MASK_TYPE = "sparsemax"
GAMMA = 1.5
BS = 128  #512
MAX_EPOCH = 20  # 20
PRETRAIN = True

clf = TabNetClassifier()
clf.fit(
    X,
    y,
    #   eval_set=[(X_valid, y_valid)]
)

# if PRETRAIN:
#     pretrain_params = dict(n_d=N_D, n_a=N_A, n_steps=N_STEPS,  #0.2,
#                            n_independent=N_INDEP, n_shared=N_SHARED,
#                            cat_idxs=cat_idxs,
#                            cat_dims=cat_dims,
#                            cat_emb_dim=cat_emb_dims,
#                            gamma=GAMMA,
#                            lambda_sparse=0., optimizer_fn=torch.optim.Adam,
#                            optimizer_params=dict(lr=2e-2),
#                            mask_type=MASK_TYPE,
#                            scheduler_params=dict(mode="min",
#                                                  patience=3,
Exemple #18
0
categorical_val.remove('answer_creation_date')
categorical_val.remove('group_creation_date')
categorical_val.remove('request_creation_date')
categorical_val.remove('victim_of_violence_type')
date_columns = [
    'answer_creation_date', 'group_creation_date', 'request_creation_date'
]
# We transform the dataframe into encoded features and target
X, y = preprocess(requests_train, categorical_val, date_columns)

X_train, X_val, y_train, y_val = train_test_split(X,
                                                  y,
                                                  test_size=0.3,
                                                  random_state=42)
X_train, y_train = preprocess_for_tabnet(X_train, y_train)
X_val, y_val = preprocess_for_tabnet(X_val, y_val)
clf = TabNetClassifier(optimizer_fn=torch.optim.Adam,
                       optimizer_params=dict(lr=1e-1))
clf.device = device
weights = {0: 1, 1: 10, 2: 10**2, 3: 10**3}
clf.fit(
    X_train=X_train,
    y_train=y_train,  ##Train features and train targets
    X_valid=X_val,
    y_valid=y_val,  ##Valid features and valid targets
    weights=weights,
    max_epochs=20,  ##Maxiµmum number of epochs during training
    patience=
    5,  ##Number of consecutive non improving epoch before early stopping
    batch_size=16,  ##Training batch size
)
Exemple #19
0
    def fit(self, x_train, y_train, kf_splits=5, tabnet_type=None):
        def _get_tabnet_params(tabnet_type):
            if (tabnet_type is None):
                tabnet_params = dict(
                    verbose=40,
                    optimizer_fn=torch.optim.Adam,
                    optimizer_params=dict(lr=1e-2, weight_decay=1e-5),
                    scheduler_params=dict(max_lr=0.05,
                                          steps_per_epoch=x_train.shape[0] //
                                          128,
                                          epochs=300),
                    scheduler_fn=torch.optim.lr_scheduler.OneCycleLR)
                fit_params = dict(batch_size=1024,
                                  virtual_batch_size=128,
                                  eval_metric='accuracy')
            elif (tabnet_type == 'TabNet-S'):
                tabnet_params = dict(
                    n_d=8,
                    n_a=8,
                    lambda_sparse=0.0001,
                    momentum=0.1,
                    n_steps=3,
                    gamma=1.2,
                    verbose=40,
                    optimizer_fn=torch.optim.Adam,
                    optimizer_params=dict(lr=0.01),
                    scheduler_params=dict(step_size=8000, gamma=0.05),
                    scheduler_fn=torch.optim.lr_scheduler.StepLR)
                fit_params = dict(batch_size=4096,
                                  virtual_batch_size=256,
                                  eval_metric='mse')
            else:
                print('[ERROR] Unknown tabnet_type: {}'.format(tabnet_type))
                quit()

            # --- check problem ---
            if fit_params['eval_metric'] in [
                    'auc', 'accuracy', 'balanced_accuracy', 'logloss'
            ]:
                problem = 'classification'
            elif fit_params['eval_metric'] in ['mse', 'mae', 'rmse', 'rmsle']:
                problem = 'regression'

            return tabnet_params, fit_params, problem

        kf = KFold(n_splits=kf_splits, shuffle=False)
        scores = []
        self.tabnet_models = []

        tabnet_params, fit_params, problem = _get_tabnet_params(tabnet_type)

        for i, (train_index,
                val_index) in enumerate(kf.split(x_train, y_train)):
            if (problem == 'classification'):
                unsupervised_model = TabNetPretrainer(**tabnet_params)
                tabnet_model = TabNetClassifier(**tabnet_params)
            elif (problem == 'regression'):
                unsupervised_model = TabNetPretrainer(**tabnet_params)
                tabnet_model = TabNetRegressor(**tabnet_params)
            else:
                pring('[ERROR] Unknown problem: {}'.format(problem))
                quit()

            x_tr = x_train[train_index]
            x_val = x_train[val_index]
            y_tr = y_train[train_index]
            y_val = y_train[val_index]

            unsupervised_model.fit(x_tr,
                                   eval_set=[x_val],
                                   patience=300,
                                   max_epochs=5000,
                                   pretraining_ratio=0.8)

            tabnet_model.fit(
                x_tr,
                y_tr,
                eval_set=[(x_val, y_val)],
                eval_metric=[fit_params['eval_metric']],
                batch_size=fit_params['batch_size'],
                virtual_batch_size=fit_params['virtual_batch_size'],
                patience=300,
                max_epochs=5000,
                from_unsupervised=unsupervised_model)

            self.tabnet_models.append(tabnet_model)
            prediction = tabnet_model.predict(x_val)
            if (problem == 'classification'):
                scores.append(accuracy_score(y_val, prediction))
            elif (problem == 'regression'):
                scores.append(mean_squared_error(y_val, prediction))
            else:
                pring('[ERROR] Unknown problem: {}'.format(problem))
                quit()

            if (i == 0):
                feature_importances = tabnet_model.feature_importances_.copy()
            else:
                feature_importances = np.vstack(
                    (feature_importances, tabnet_model.feature_importances_))

        print(scores)
        print(np.mean(scores))

        return scores, feature_importances