def train_two_class_classifier(self, nu=0.1):
        positive_feature_matrix = self.construct_feature_matrix(
            self.positive_examples)
        negative_feature_matrix = self.construct_feature_matrix(
            self.negative_examples)
        positive_labels = [1] * positive_feature_matrix.shape[0]
        negative_labels = [0] * negative_feature_matrix.shape[0]

        X = np.concatenate((positive_feature_matrix, negative_feature_matrix))
        Y = np.concatenate((positive_labels, negative_labels))

        if negative_feature_matrix.shape[
                0] >= 10:  # TODO: Implement gamma="auto" for thundersvm
            kwargs = {
                "kernel": "rbf",
                "gamma": "auto",
                "class_weight": "balanced"
            }
        else:
            kwargs = {"kernel": "rbf", "gamma": "auto"}

        self.optimistic_classifier = SVC(**kwargs)
        self.optimistic_classifier.fit(X, Y)

        training_predictions = self.optimistic_classifier.predict(X)
        positive_training_examples = X[training_predictions == 1]

        if positive_training_examples.shape[0] > 0:
            self.pessimistic_classifier = OneClassSVM(kernel="rbf", nu=nu)
            self.pessimistic_classifier.fit(positive_training_examples)
Ejemplo n.º 2
0
    def train_kSVM(X_train, X_test, y_train, y_test, split_ID):
        kSVM = SVC(kernel='rbf',
                   degree=3,
                   gamma='auto',
                   coef0=0.0,
                   C=1.0,
                   tol=0.001,
                   probability=False,
                   class_weight='balanced',
                   shrinking=False,
                   cache_size=None,
                   verbose=False,
                   max_iter=-1,
                   n_jobs=-1,
                   max_mem_size=-1,
                   random_state=None,
                   decision_function_shape='ovo')
        kSVM_model = kSVM.fit(X_train, y_train)

        kSVM_preds = kSVM_model.predict(X_test)
        prec, rec, f_1, supp = prf(y_test, kSVM_preds, average=None)
        class_rep = sklearn.metrics.classification_report(y_test, kSVM_preds)
        exp.log_other('Classification Report' + split_ID, class_rep)
        mcc = sklearn.metrics.matthews_corrcoef(y_test, kSVM_preds)

        #if first iteration, report model parameters to comet
        if split_ID == '0':
            exp.log_parameters(kSVM_preds.get_params())
        return prec, rec, f_1, supp, mcc
Ejemplo n.º 3
0
    def get_classifier(self, traindata, kf):

        x_tr, x_te, y_tr, y_te = fac.to_kfold(traindata, kf)
        acc_max, bestK, acc = 0, 0, [[] for a in range(kf)]

        for i in range(kf):

            # print('DOAO round', i, 'begin')
            # svm 00
            print('test00')
            clf_svm = SVC()
            clf_svm.fit(x_tr[i], y_tr[i].ravel())
            label_svm = clf_svm.predict(x_te[i])
            acc[i].append(fac.get_acc(y_te[i], label_svm)[0])

            # KNN 01
            print('test01')
            acc_k = []
            aux_k = [3, 5, 7]
            # for k in range(3, 12, 2):
            for k in aux_k:
                clf_knn = KNN_GPU(k=k)
                clf_knn.fit(x_tr[i], y_tr[i])
                label_knn = clf_knn.predict(x_te[i])
                acc_k.append(fac.get_acc(y_te[i], label_knn)[0])
            acc[i].append(max(acc_k))
            bestK = aux_k[acc_k.index(max(acc_k))]

            # LR 02
            print('test02')
            clf_lr = LogisticRegression()
            clf_lr.fit(x_tr[i], y_tr[i])
            label_LR = clf_lr.predict(x_te[i])
            acc[i].append(fac.get_acc(y_te[i], label_LR)[0])

            # XgBoost 03
            print('test03')
            clf_xgb = DecisionTreeClassifier()
            clf_xgb.fit(x_tr[i], y_tr[i])
            label_xgb = clf_xgb.predict(x_te[i])
            acc[i].append(fac.get_acc(y_te[i], label_xgb)[0])

            # RF 04
            print('test04')

            clf_rf = TGBMClassifier()
            clf_rf.fit(x_tr[i], y_tr[i])
            label_rf = clf_rf.predict(x_te[i])
            acc[i].append(fac.get_acc(y_te[i], label_rf)[0])

            print('DOAO round', i, 'end')

        acc = np.array(acc)
        acc_mean = acc.mean(axis=0)

        # fun_best = np.where(acc_mean == max(acc_mean))
        fun_best = np.argmax(acc_mean)

        return fun_best, bestK
Ejemplo n.º 4
0
def accuracy(trials: ts.SparseRec,
             labels: np.ndarray,
             transform: str = "none",
             repeats: int = 1000,
             train_size: int = 30,
             test_size: int = 20,
             validate: bool = False,
             **kwargs) -> np.ndarray:
    """Give precision estimation on the estimate from a simple SVC.
    The score is calculated as tp / (tp + fp)
    where tp is true positivie and fp is false positivie.
    A 1000 time shuffle is made on the score population from different train test splits and
    the mean score is output.
    Args:
        record_file: recording file
        labels: ids of the trials belonging to different clusters
        transform: how to we get the predictors
            "none": platten neuron x sample_points and take PCs
            "mean": temporal mean so each neuron has one value per trial, then take PCs
            "corr": pairwise correlations between neurons and take PCs
        repeats: number of repeats of resampling train/test sets
    Returns:
        the distribution of mean prediction scores
    """
    X, y = trials.values, quantize(labels, groups=1)
    trial_mask = X.min(axis=2).max(axis=0) > 0
    X, y = X[:, trial_mask, :], y[trial_mask]
    X = scale_features(X, (0, 2))
    if transform == "none":
        X = np.swapaxes(X, 0, 1).reshape(X.shape[1], X.shape[0] * X.shape[2])
    elif transform == "corr":  # get inter-neuron correlation for each of the trials
        X = np.array([
            np.corrcoef(x)[np.triu_indices(x.shape[0], 1)]
            for x in np.rollaxis(X, 1)
        ])
    elif transform == "mean":
        X = np.swapaxes(X.mean(-1), 0, 1)
    else:
        raise ValueError(
            "[precision] <transform> must be one of 'none', 'corr', or 'mean'."
        )
    X = PCA(PC_NO).fit_transform(X) if X.shape[1] > PC_NO else X
    params = {"kernel": "linear", "gamma": "auto"}
    params.update(kwargs)
    svc = SVC(**params)
    splitter = split_data(X, y, repeats, train_size, test_size)
    if validate:
        result = [
            accuracy_score(y_te,
                           svc.fit(X_tr, y_tr).predict(X_te))
            for X_tr, y_tr, X_te, y_te in splitter
        ]
    else:
        result = [
            accuracy_score(y_tr,
                           svc.fit(X_tr, y_tr).predict(X_tr))
            for X_tr, y_tr, _, _ in splitter
        ]
    return [x for x in result if (x is not None and x > 0.0)]
Ejemplo n.º 5
0
def predict(args):
    """Loads model from file, makes predictions and computes metrics.

    All created files are saved to args.out_dir directory if provided,
    or to results_<task> otherwise.

    Creates files:
    conf_matrix.png file with confusion matrix,
    report.txt with various metrics,
    preds_{task}.npy with raw predictions.
    """
    if args.task.startswith('4'):
        test_features = np.load('test_features_3b.npy')
        test_labels = get_labels(split='test')
        y_true = np.array([CLASSES.index(l) for l in test_labels])

        out_dir = args.out_dir or f'results_{args.task}'

        for c in [0.001, 0.01, 0.1, 1.0, 10]:
            svc = SVC()
            svc.load_from_file(f'svc_{args.task}_C_{c}')

            y_pred = svc.predict(test_features)

            evaluate(y_true, y_pred, None, CLASSES,
                     os.path.join(out_dir, f'C_{c}'))
    else:
        model: Model = load_model(
            f'model_fc_{args.task}.h5',
            custom_objects={'top_5_accuracy': top_5_accuracy})

        test_generator = create_data_generator(split='test',
                                               target_size=args.target_size,
                                               batch_size=args.batch_size,
                                               shuffle=False)

        # get predictions
        preds = model.predict_generator(test_generator, verbose=1)

        # create output directory
        out_dir = args.out_dir or f'results_{args.task}'
        os.makedirs(out_dir, exist_ok=True)

        # save numpy array with predictions
        save_file = os.path.join(out_dir, f'preds_{args.task}.npy')
        np.save(save_file, preds)
        print(f'Predictions saved to: {save_file}')

        # first, prepare y_pred, y_true and class names
        # y_pred are classes predicted with the highest probability
        y_pred = np.array([np.argmax(x) for x in preds])
        # since we did not shuffle data in data generator,
        # classes attribute of the generator contains true labels for each sample
        y_true = np.array(test_generator.classes)
        # class_names = list(test_generator.class_indices.keys())
        # class_names.sort(key=lambda x: test_generator.class_indices[x])

        evaluate(y_true, y_pred, preds, CLASSES, out_dir)
Ejemplo n.º 6
0
    def get_classifier(self, train, kf):

        x_tr, x_te, y_tr, y_te = fac.to_kfold(train, kf)
        acc_max, bestK, acc = 0, 0, [[] for a in range(kf)]

        for i in range(kf):

            # print('DECOC round', i, 'begin')
            # svm 00
            clf_svm = SVC()
            clf_svm.fit(x_tr[i], y_tr[i].ravel())
            label_svm = clf_svm.predict(x_te[i])
            acc[i].append(fac.get_acc(y_te[i], label_svm)[0])

            # KNN 01
            acc_k = []
            aux_k = [3, 5, 7]
            # for k in range(3, 12, 2):
            for k in aux_k:
                clf_knn = KNN_GPU(k=k)
                clf_knn.fit(x_tr[i], y_tr[i])
                label_knn = clf_knn.predict(x_te[i])
                acc_k.append(fac.get_acc(y_te[i], label_knn)[0])
            acc[i].append(max(acc_k))
            bestK = aux_k[acc_k.index(max(acc_k))]

            # # LR 02
            # clf_lr = LR_GPU()
            # clf_lr.fit(x_tr[i], y_tr[i])
            # label_LR = clf_lr.predicted(x_te[i])
            # acc[i].append(fac.get_acc(y_te[i], label_LR)[0])

            # LR 02
            clf_lr = LogisticRegression()
            clf_lr.fit(x_tr[i], y_tr[i])
            label_LR = clf_lr.predict(x_te[i])
            acc[i].append(fac.get_acc(y_te[i], label_LR)[0])

            # CART 03
            clf_cart = DecisionTreeClassifier()
            clf_cart.fit(x_tr[i], y_tr[i])
            label_cart = clf_cart.predict(x_te[i])
            acc[i].append(fac.get_acc(y_te[i], label_cart)[0])

            # # RF 04
            clf_rf = TGBMClassifier()
            clf_rf.fit(x_tr[i], y_tr[i].ravel())
            label_rf = clf_rf.predict(x_te[i])
            acc[i].append(fac.get_acc(y_te[i], label_rf)[0])

            print('DECOC round', i, 'end')

        acc = np.array(acc)
        acc_mean = acc.mean(axis=0)
        # fun_best = np.where(acc_mean == max(acc_mean))
        fun_best = np.argmax(acc_mean)

        return fun_best, bestK
Ejemplo n.º 7
0
    def fun_predict(self, x_te, C, D, L):
        print('func_predict')

        num = len(D)
        cf = C[0]
        ck = C[1]

        allpre = np.zeros((len(x_te), num))
        for i in range(num):
            train = D[i]
            traindata = train[:, 0:-1]
            trainlabel = train[:, -1]

            if cf[i] == 0:
                # svm
                print('SVM predict')
                clf_svm = SVC()
                clf_svm.fit(traindata, trainlabel.ravel())
                label_svm = clf_svm.predict(x_te)
                allpre[:, i] = label_svm
            elif cf[i] == 1:
                # knn
                clf_knn = KNN_GPU(k=ck[i])
                clf_knn.fit(traindata, trainlabel)
                label_knn = clf_knn.predict(x_te)
                allpre[:, i] = label_knn
            elif cf[i] == 2:
                # LR
                print('LR predict')
                clf_lr = LogisticRegression()
                clf_lr.fit(traindata, trainlabel.ravel())
                label_LR = clf_lr.predict(x_te)
                allpre[:, i] = label_LR
            elif cf[i] == 3:
                # CART
                print('CART predict')
                clf_xgb = DecisionTreeClassifier()
                clf_xgb.fit(traindata, trainlabel)
                label_xgb = clf_xgb.predict(x_te)
                allpre[:, i] = label_xgb
            elif cf[i] == 4:
                # Rf
                print('RF predict')
                clf_rf = TGBMClassifier()
                clf_rf.fit(traindata, trainlabel.ravel())
                label_rf = clf_rf.predict(x_te)
                allpre[:, i] = label_rf
            else:
                print('error !!!! DOAO.fun_predict')

            label = L[i]
            for j in range(len(x_te)):
                allpre[j, i] = label[0] if allpre[j, i] == 0 else label[1]

        # print('predict end for')
        pre = mode(allpre, axis=1)[0]
        return pre
Ejemplo n.º 8
0
    def funcPreEDOVO(self, x_test, y_test, C, D):

        numC = np.asarray(C).shape[0]
        num_set = len(y_test)
        allpre = np.zeros([num_set, numC])

        for i in range(numC):

            train = D[i]
            traindata = np.array(train[:, 0:-1])
            trainlabel = np.array(train[:, -1], dtype='int64')
            if C[i, 0] == 0:
                print('test0')
                # svm
                clf_svm = SVC()
                clf_svm.fit(traindata, trainlabel.ravel())
                label_svm = clf_svm.predict(x_test)
                allpre[:, i] = label_svm
            elif C[i, 0] == 1:
                # print('test1')
                # knn
                clf_knn = KNN_GPU(k=C[i][1])
                # clf_knn = KNN_torch(k=C[i][1])
                clf_knn.fit(traindata, trainlabel)
                label_knn = clf_knn.predict(x_test)
                allpre[:, i] = label_knn.ravel()
            elif C[i, 0] == 2:
                print('test2')
                # LR
                clf_lr = LogisticRegression()
                clf_lr.fit(traindata, trainlabel)
                label_LR = clf_lr.predict(x_test)
                allpre[:, i] = label_LR
                # # LR
                # clf_lr = LR_GPU()
                # clf_lr.fit(traindata, trainlabel)
                # label_LR = clf_lr.predicted(x_test)
                # allpre[:, i] = label_LR
            elif C[i, 0] == 3:
                print('test3')
                # CART
                clf_cart = DecisionTreeClassifier()
                clf_cart.fit(traindata, trainlabel)
                label_cart = clf_cart.predict(x_test)
                allpre[:, i] = label_cart
            elif C[i, 0] == 4:
                print('test4')
                # RandomForest
                clf_ada = TGBMClassifier()
                clf_ada.fit(traindata, trainlabel.ravel())
                label_ada = clf_ada.predict(x_test)
                allpre[:, i] = label_ada

            else:
                print('error !!!! DECOC.funcPreEDOVO')

        return allpre
Ejemplo n.º 9
0
def construct_linear_classifier(linear_type: LinearType) -> None:
    if linear_type == LinearType.LOGISTIC_REGRESSION:
        return LogisticRegression()
    elif linear_type == LinearType.SVM_LINEAR:
        return SVC(kernel='linear', probability=True)
    elif linear_type == LinearType.SVM_QUADRATIC:
        return SVC(kernel='poly', degree=2, probability=True)
    elif linear_type == LinearType.SVM_RBF:
        return SVC(kernel='rbf', probability=True)
    else:
        raise Exception(f"Unknown Linear type: {linear_type}")
Ejemplo n.º 10
0
    def check_jthunder(self, clf: thundersvm.SVC):
        self.assertTrue(
            np.allclose(
                clf.decision_function(self.ds_test.xs).flatten(),
                -jthunder.decision_function(clf, self.jtest_xs),
                rtol=1e-4,
                atol=1e-4,
            ))

        self.assertTrue(
            all(
                clf.predict(self.ds_test.xs).astype(np.int) ==
                jthunder.predict(clf, self.jtest_xs)))

        self.assertTrue(
            np.allclose(jthunder.norm2(clf), jthunder.norm2_naive(clf)))
def SVM_Model(c=1, ker='rbf', gam=0.5, max_iter=5000, class_weight='balanced'):
    model = SVC(C=c,
                kernel=ker,
                gamma=gam,
                probability=True,
                class_weight=class_weight,
                max_iter=max_iter,
                verbose=True)
    return model
    def train_one_class_svm(self,
                            nu=0.1
                            ):  # TODO: Implement gamma="auto" for thundersvm
        positive_feature_matrix = self.construct_feature_matrix(
            self.positive_examples)
        self.pessimistic_classifier = OneClassSVM(kernel="rbf", nu=nu)
        self.pessimistic_classifier.fit(positive_feature_matrix)

        self.optimistic_classifier = OneClassSVM(kernel="rbf", nu=nu / 10.)
        self.optimistic_classifier.fit(positive_feature_matrix)
Ejemplo n.º 13
0
    def funClassifier(self, x_train, y_train, type):
        labels = np.unique(y_train)
        nc = len(labels)
        code = self.funECOCim(nc, type)

        train, numn = [], []
        for i in range(nc):
            idi = np.where(y_train == labels[i])[0]
            train.append(x_train[idi])
            numn.append(len(idi))

        num1 = np.size(code, 1)
        ft = []
        for t in range(num1):
            Dt, DtLabel, flagDt, numAp, numAn, numNp, numNn = np.asarray(
                []), [], 0, 0, 0, 0, 0
            for i in range(nc):
                if code[i, t] == 1:
                    if Dt.shape[0] == 0:
                        Dt = train[i]
                    else:
                        Dt = np.append(Dt, train[i], axis=0)
                    DtLabel[flagDt:flagDt + numn[i]] = [1] * numn[i]
                    flagDt += numn[i]
                    numAp += 1
                    numNp += numn[i]
                elif code[i, t] == -1:
                    if Dt.shape[0] == 0:
                        Dt = train[i]
                    else:
                        Dt = np.append(Dt, train[i], axis=0)
                    DtLabel[flagDt:flagDt + numn[i]] = [0] * numn[i]
                    flagDt += numn[i]
                    numAn += 1
                    numNn += numn[i]

            clf_svc = SVC()
            clf_svc.fit(np.array(Dt), np.array(DtLabel).ravel())
            ft.append(clf_svc)

        return code, ft, labels
def main():
    INPUTS_DIR = os.getenv('VH_INPUTS_DIR', '../data')
    OUTPUTS_DIR = os.getenv('VH_OUTPUTS_DIR', './models')
    data_name = 'sql_trainer_filtered_attempts.csv'
    data_path = os.path.join(INPUTS_DIR, 'filtered-data', data_name)

    X_train, X_val, y_train, y_val = load_data(data_path, 100)

    linear_scores = dict()

    split_amount = 5  # 5 is default
    kf_splits = list(KFold(n_splits=split_amount).split(X_train))

    C_key = 1

    for i in range(0, len(kf_splits)):
        clf = SVC(kernel='linear', C=1)
        res = train_classifier_with_split(kf_splits, i, clf, X_train, y_train,
                                          'linearSVM', upload_tsvm_classifier,
                                          OUTPUTS_DIR)
        linear_scores.setdefault(C_key, []).append(res['score'])

    for key, val in linear_scores.values():
        print('%s: %s' % (key, val))
Ejemplo n.º 15
0
 print("Creating model......")
 
 args.model = args.model.lower()
 args.svm = args.svm.lower()
 
 if args.model == 'logistic':
     # Logistic -> 0.9651
     model = LogisticRegression()
 elif args.model == 'lda':
     # LDA -> 0.9656
     model = LinearDiscriminantAnalysis()
 elif args.model == 'svm':
     
     if args.svm == 'linear':
         # SVM-linear -> 0.9658
         model = SVC(kernel='linear')
     elif args.svm == 'rbf':
         # SVM-rbf: gamma = 0.8 -> 0.9764
         model = SVC(kernel='rbf', gamma=0.8)
     elif args.svm == 'poly':
         # SVM-polynomial: degree = 3, gamma = 0.8 -> 0.9778
         model = SVC(kernel='polynomial', degree=3, gamma=0.8)
     else:
         raise NotImplementedError
 elif args.model == 'cnn':
     # CNN -> 0.9869
     model = CNN()
 else:
     raise NotImplementedError
 
 if args.train:
Ejemplo n.º 16
0
            stop_words="english",
            strip_accents="unicode",
            binary=False,
            max_df=0.75,
            min_df=1,
            lowercase=True,
            use_idf=False,
            smooth_idf=True,
            sublinear_tf=True,
        )

        sampler = SMOTEENN(random_state=SEED)
        clf = SVC(
            kernel="linear",
            C=0.1,
            probability=True,
            decision_function_shape="ovo",
            random_state=SEED,
        )

        # Create the Pipeline
        pipeline = Pipeline(
            steps=[
                ("vect", vect),
                ("sample", sampler),
                ("clf", clf),
            ],
            verbose=10,
        )

        # Split and Train
            mesh2.update_cell_ids_and_points()
            mesh2.cell_attributes['Label'] = refine_labels
            mesh2.to_vtp(os.path.join(output_path, '{}_d_predicted_refined.vtp'.format(i_sample[:-4])))

            # upsampling
            print('\tUpsampling...')
            if mesh.cells.shape[0] > 100000:
                target_num = 100000 # set max number of cells
                ratio = 1 - target_num/mesh.cells.shape[0] # calculate ratio
                mesh.mesh_decimation(ratio)
                print('Original contains too many cells, simpify to {} cells'.format(mesh.cells.shape[0]))

            fine_cells = mesh.cells

            if upsampling_method == 'SVM':
                clf = SVC(kernel='rbf', gamma='auto', gpu_id=gpu_id)
                # train SVM
                clf.fit(cells, np.ravel(refine_labels))
                fine_labels = clf.predict(fine_cells)
                fine_labels = fine_labels.reshape([mesh.cells.shape[0], 1])
            elif upsampling_method == 'KNN':
                neigh = KNeighborsClassifier(n_neighbors=3)
                # train KNN
                neigh.fit(cells, np.ravel(refine_labels))
                fine_labels = neigh.predict(fine_cells)
                fine_labels = fine_labels.reshape([mesh.cells.shape[0], 1])

            mesh2 = Easy_Mesh()
            mesh2.cells = mesh.cells
            mesh2.update_cell_ids_and_points()
            mesh2.cell_attributes['Label'] = fine_labels
Ejemplo n.º 18
0
def SVM():

    model = SVC()

    return model
Ejemplo n.º 19
0
import numpy as np
import torch
import torch.nn as nn
from efficientnet_pytorch import EfficientNet
from sklearn.metrics import classification_report, confusion_matrix
from thundersvm import SVC
from torch.utils.data import DataLoader

from dataset import CustomDataset

# init svm
soft_svm = SVC(kernel="linear", C=0.1)
hard_svm = SVC(kernel="linear", C=100)
gaussian_svm = SVC(kernel="rbf")
sigmoid_svm = SVC(kernel="sigmoid")

svm = {
    "soft_svm": soft_svm,
    "hard_svm": hard_svm,
    "gaussian_svm": gaussian_svm,
    "sigmoid_svm": sigmoid_svm,
}


def extract_features(data_loader):
    model_name = "efficientnet-b4"

    net = EfficientNet.from_pretrained(model_name)
    print("Network loaded from pretrain")

    net.to(torch.device("cuda"))
Ejemplo n.º 20
0
    logger = logging.getLogger(__name__)
    handler = logging.StreamHandler()
    handler.setLevel(logging.DEBUG)
    logger.addHandler(handler)
    current_app = DummyFlaskApp(logger)

# instantiate classifier objects
CLASSIFIERS = [
    # MultinomialNB(),
    # DecisionTreeClassifier(),
    # KNeighborsClassifier(3),
    # DecisionTreeClassifier(),
    # SVC(probability=True, kernel="linear", cache_size=1000),
    # XGBClassifier(n_jobs=PROCESSES, objective="binary:logistic"),
    SVC(kernel="linear", C=10, probability=True, decision_function_shape="ovr")
]

VECTORIZER = None


def get_vectorizer():
    global VECTORIZER
    if not VECTORIZER:
        VECTORIZER = build_vectorizer()
    return VECTORIZER


def build_vectorizer():
    return TfidfVectorizer(
        norm="l1",
Ejemplo n.º 21
0
def predict(y, z):
    clf = SVC(kernel="linear")
    clf.load_from_file("./model")
    clf.predict([[y, z]])[0]
    #feature_tensor= tf.concat(0, [first_tensor, feature_var])

!wget https://developer.nvidia.com/compute/cuda/9.0/Prod/local_installers/cuda-repo-ubuntu1704-9-0-local_9.0.176-1_amd64-deb
!ls  # Check if required cuda 9.0 amd64-deb file is downloaded
!dpkg -i cuda-repo-ubuntu1704-9-0-local_9.0.176-1_amd64-deb
!ls /var/cuda-repo-9-0-local | grep .pub
!apt-key add /var/cuda-repo-9-0-local/7fa2af80.pub
!apt-get update
!sudo apt-get install cuda-9.0

!pip install thundersvm

#import thundersvm
from thundersvm import SVC

model = SVC(C=100, kernel='rbf')
model.fit(first_tensor, final_label)

svm_prediction = model.predict(first_tensor1)

svm_probability = model.predict_proba(first_tensor1)

label_test2 = []
for image , label in testloader:
    label1 = []
    for i in label:
        if i!= 1:
           j= 0
           label1.append(j)
        else:
Ejemplo n.º 23
0
def polynomialSVM(whid):
    # This function models the SVM of orders in relation (the features) to each warehouse based on the percentage
    # of products the warehouse of focus can supply and the distance to warehouse of focus

    # Retrieve Dataset
    dataset = _is_order_optimized_('./assets/warehouse_' + str(whid) +
                                   '.csv', )
    X = dataset.drop('classifier', axis=1)
    Y = dataset["classifier"]

    # Splitting data into train and test
    xTrain, xTest, yTrain, yTest = train_test_split(X,
                                                    Y,
                                                    test_size=0.20,
                                                    random_state=0)

    # Feature scaling
    sc = StandardScaler()
    xTrain = sc.fit_transform(xTrain)
    xTest = sc.fit_transform(xTest)

    # Fitting Kernel SVM to the Training set.
    svcClassifier = SVC(
        kernel='rbf',
        random_state=0,
        max_mem_size=50000,
        n_jobs=8,
        C=100  # This value can vary for whether the margin is too 'hard' or too 'soft'
    )

    # pickling the files (serializing them and storing them)
    # This way, the model can run the data against other data
    svcClassifier.fit(xTrain, yTrain)
    svc_pickle = './assets/sv_pickle_rbf_' + str(whid) + '.sav'
    pickle.dump(svcClassifier, open(svc_pickle, 'wb'))

    # Predicting the test results
    polyPred = svcClassifier.predict(xTest)
    print(polyPred)

    # Confusion Matrix Print: SVM Classifier polyTest against the Test Labeled Data yTest
    print("Confusion Matrix")
    print(confusion_matrix(yTest, polyPred))
    print("\n")

    # Classification report
    print("Classification Report")
    print(classification_report(yTest, polyPred))
    print("\n")

    # Applying k-fold cross validation for accuracy purposes
    accuracies = cross_val_score(estimator=svcClassifier,
                                 X=xTrain,
                                 y=yTrain,
                                 cv=10)
    print(accuracies.mean())
    print(accuracies.std())

    # Visualising the Test set results
    from matplotlib.colors import ListedColormap
    X_set, y_set = xTest, yTest
    X1, X2 = np.meshgrid(
        np.arange(start=X_set[:, 0].min() - 1,
                  stop=X_set[:, 0].max() + 1,
                  step=0.01),
        np.arange(start=X_set[:, 1].min() - 1,
                  stop=X_set[:, 1].max() + 1,
                  step=0.01))
    plt.contourf(       # Creating the contouring lines
        X1,
        X2,
        svcClassifier.\
            predict(np.array([X1.ravel(), X2.ravel()]).T).\
                reshape(X1.shape),
        alpha = 0.5,
        cmap = ListedColormap(('blue', 'black'))
    )
    plt.xlim(X1.min(), X1.max())
    plt.ylim(X2.min(), X2.max())
    for i, j in enumerate(np.unique(y_set)):  # Creating the scatter plots
        plt.scatter(X_set[y_set == j, 0],
                    X_set[y_set == j, 1],
                    c=ListedColormap(('red', 'green'))(i),
                    label=j)

    # labeling each plot
    plt.title('Kernel SVM (Training Data) Warehouse: ' + str(whid))
    plt.xlabel('Distance From Warehouse')
    plt.ylabel('Percentage of Available Products')
    plt.legend()
    plt.savefig('./assets/RBF' + str(whid) + '_' + str(int(time.time())) +
                '.png')
    plt.close()
    return False
Ejemplo n.º 24
0
    cas9 = 'sp'

features = pd.DataFrame(pd.read_hdf('../Experiment Data/deephf_without_gapped_x.h5', key='deephf'))
labels = pd.DataFrame(pd.read_hdf('../Experiment Data/deephf_y_' + cas9 + '.h5', key='deephf'))

data = pd.concat([features, labels], axis=1, ignore_index=True)

data = data.dropna().reset_index(drop=True)

train_data, test_data = train_test_split(data, test_size=0.15, random_state=1, stratify=data.iloc[:, -1])

rf = RandomForestClassifier(n_estimators=500, n_jobs=-1, random_state=1, verbose=2)

steps = [('SFM', SelectFromModel(estimator=rf, max_features=2899, threshold=-np.inf)),
         ('scaler', StandardScaler()),
         ('SVM', SVC(C=1, gamma='auto', kernel='rbf', cache_size=20000, verbose=True,
                                max_mem_size=6000, probability=True))]

train_x = train_data.iloc[:, :-1]
train_y = train_data.iloc[:, -1]
test_x = test_data.iloc[:, :-1]
test_y = test_data.iloc[:, -1]

if train_y.iloc[0] == 0:
    idx = 1
else:
    idx = 0

model = Pipeline(steps)
model.fit(train_x, train_y)

predict = model.predict(test_x)
Ejemplo n.º 25
0
    print("acc %s" % score)
"""

if __name__ == "__main__":
    with open("./data/comment_new/vedio_vector_svm", "rb") as f:
        vedio_vector = pickle.load(f)
    with open("./train", "rb") as f:
        train_D = pickle.load(f)
    with open("./test", "rb") as f:
        test_D = pickle.load(f)
    train_X = []
    train_Y = []
    for f, L1 in train_D:
        train_X.append(vedio_vector[f])
        train_Y.append(m[L1])
    test_X = []
    test_Y = []
    for f, L1 in test_D:
        test_X.append(vedio_vector[f])
        test_Y.append(m[L1])

    train_X = np.array(train_X)
    train_Y = np.array(train_Y)
    test_X = np.array(test_X)
    test_Y = np.array(test_Y)

    clf = SVC()
    print(train_X.shape)
    print(train_Y.shape)
    clf.fit(train_X, train_Y)
    print("acc %s" % clf.score(test_X, test_Y))
Ejemplo n.º 26
0
    def run(self):

        model = self.exp_data['model']
        base_workspace = self.exp_data['base_workspace']

        if model == 'ridge':
            space = [Real(1e-3, 1e+3, prior='log-uniform')]
            optimize_types = ['alpha']
            minimizer = MagnitudeAxisMinimizer(base_workspace, optimize_types,
                                               gp_minimize)
            minimizer.model = Ridge
            x0 = [1.0]
        elif model == 'kernel_ridge':
            space = [
                Categorical(['poly', 'rbf', 'sigmoid']),
                Real(1e-3, 1e+3, prior='log-uniform'),
                Integer(1, 8),
                Real(1e-6, 1e+1, prior='log-uniform'),
                Real(-10, 10)
            ]
            optimize_types = ['kernel', 'alpha', 'degree', 'gamma', 'coef0']
            minimizer = MagnitudeAxisMinimizer(base_workspace, optimize_types,
                                               gp_minimize)
            minimizer.model = KernelRidge
            x0 = ['poly', 1.0, 3, 1 / 300, 0]
        elif model == 'kernel_ridge_separation':
            space = [Real(1e-3, 1e+3, prior='log-uniform')]
            optimize_types = ['alpha']
            minimizer = MagnitudeAxisMinimizer(base_workspace, optimize_types,
                                               gp_minimize)
            minimizer.model_fixed_params = {
                'kernel': 'poly',
                'degree': 3,
                'gamma': 1 / 300,
                'coef0': 0
            }
            minimizer.model = KernelRidge
            x0 = [1.0]
        elif model == 'kernel_ridge_random':
            space = [Real(1e-3, 1e+3, prior='log-uniform')]
            optimize_types = ['alpha']
            minimizer = MagnitudeAxisMinimizer(base_workspace, optimize_types,
                                               gp_minimize)
            kernel = random.choice(
                ['poly', 'rbf', 'laplacian', 'sigmoid', 'cosine'])
            degree = random.randint(1, 10)
            coef = random.uniform(-5, 5)
            minimizer.model_fixed_params = {
                'kernel': kernel,
                'degree': degree,
                'gamma': None,
                'coef0': coef
            }
            minimizer.model = KernelRidge
            x0 = [1.0]
        elif model == 'nn':
            space = [
                Integer(16, 256),
                Real(1e-5, 1, prior='log-uniform'),
            ]
            optimize_types = ['n_hidden_units', 'lr']
            minimizer = MagnitudeAxisMinimizer(base_workspace, optimize_types,
                                               gp_minimize)
            minimizer.model_fixed_params = {
                'build_fn': self.build_nn,
                'epochs': 20,
                'batch_size': 256
            }
            minimizer.model = KerasRegressor
            x0 = [64, 0.001]
        elif model == 'kernel_svm':
            space = [
                Categorical(['poly', 'rbf', 'sigmoid']),
                Real(1e-3, 1e+3, prior='log-uniform'),
                Integer(1, 8),
                Real(1e-6, 1e+1, prior='log-uniform'),
                Real(-10, 10)
            ]
            optimize_types = ['kernel', 'C', 'degree', 'gamma', 'coef0']
            minimizer = MagnitudeAxisMinimizer(base_workspace, optimize_types,
                                               gp_minimize)
            minimizer.model_fixed_params = {
                'cache_size': 8000,
                'max_iter': 10000
            }
            minimizer.model = SVR
            x0 = ['poly', 1.0, 3, 1 / 300, 0]
        elif model == 'pca':
            if not hasattr(self, 'pca_component'):
                pca = PCA(n_components=1)
                _, X_nums, *_ = prepare_separation_data(
                    self.name.split('_')[0] + '.txt')
                pca.fit(X_nums)
                # pca.fit(np.concatenate([base_workspace['X'],base_workspace['X_test']]))
                self.pca_component = pca.components_  # 1xd
            error = self.evaluate_w(base_workspace['X_test'],
                                    self.pca_component,
                                    base_workspace['y_test'])
            return error
        elif model == 'proj_pca':
            if not hasattr(self, 'proj_pca_component'):
                X, X_nums, y_label, _ = prepare_separation_data(
                    self.name.split('_')[0] + '.txt')
                svc = SVC(kernel='linear',
                          degree=3,
                          gamma=1 / 300,
                          coef0=0,
                          C=1,
                          cache_size=4000,
                          class_weight='balanced',
                          verbose=True)

                svc.fit(X, y_label)
                beta = svc.coef_  # 1xd
                # X_pred = svc.decision_function(X) nx1
                # X_nums = np.concatenate([base_workspace['X'],base_workspace['X_test']])
                X_proj = X_nums - (
                    (svc.decision_function(X_nums) / beta @ beta.T) @ beta
                )  # nxd

                pca = PCA(n_components=1)
                pca.fit(X_proj)
                self.proj_pca_component = pca.components_  # 1xd
            error = self.evaluate_w(base_workspace['X_test'],
                                    self.proj_pca_component,
                                    base_workspace['y_test'])
            return error
        elif model == 'kernel_proj_pca':
            pass
        else:
            assert False

        res = minimizer.minimize(space, n_calls=40, verbose=True, x0=x0)
        if self.save_results:
            skopt.dump(res, self.name + '.pkl', store_objective=False)

        params = {type: v for type, v in zip(minimizer.optimize_types, res.x)}
        if hasattr(minimizer, 'model_fixed_params'):
            params = {**params, **minimizer.model_fixed_params}
        error = self.fit_test_best_model(minimizer.model, base_workspace['X'],
                                         base_workspace['y'],
                                         base_workspace['X_test'],
                                         base_workspace['y_test'], **params)
        return error
Ejemplo n.º 27
0
print(X.shape)

X_train, X_test, y_train, y_test = train_test_split(X,
                                                    y,
                                                    test_size=0.1,
                                                    random_state=0)

with open('settings/olivetti.json') as f:
    data = json.load(f)

for s in data['models']:
    print(s)

    m = SVC(kernel=s.get('kernel', 'rbf'),
            C=s.get('C', 10.0),
            coef0=s.get('coef0', 0.0),
            gamma=s.get('gamma', 'auto'),
            degree=int(s.get('degree', 3)),
            verbose=False)

    start = time.time()
    m.fit(X_train, y_train)
    end = time.time()
    print('fit time:           ', end - start)

    t = m.predict(X_train)
    print('training error:     ', 1 - accuracy_score(y_train, t))

    start = time.time()
    p = m.predict(X_test)
    end = time.time()
    print('prediction time:    ', end - start)
Ejemplo n.º 28
0
                    5] = mesh._polydata.GetPoint(
                        mesh._polydata.GetCell(i).GetPointId(
                            1))  # don't need to copy
                cells[i][6], cells[i][7], cells[i][
                    8] = mesh._polydata.GetPoint(
                        mesh._polydata.GetCell(i).GetPointId(
                            2))  # don't need to copy

            fine_cells = cells

            barycenters = mesh3.cellCenters()  # don't need to copy
            fine_barycenters = mesh.cellCenters()  # don't need to copy

            if upsampling_method == 'SVM':
                #clf = SVC(kernel='rbf', gamma='auto', probability=True, gpu_id=gpu_id)
                clf = SVC(kernel='rbf', gamma='auto', gpu_id=gpu_id)
                # train SVM
                #clf.fit(mesh2.cells, np.ravel(refine_labels))
                #fine_labels = clf.predict(fine_cells)

                clf.fit(barycenters, np.ravel(refine_labels))
                fine_labels = clf.predict(fine_barycenters)
                fine_labels = fine_labels.reshape(-1, 1)
            elif upsampling_method == 'KNN':
                neigh = KNeighborsClassifier(n_neighbors=3)
                # train KNN
                #neigh.fit(mesh2.cells, np.ravel(refine_labels))
                #fine_labels = neigh.predict(fine_cells)

                neigh.fit(barycenters, np.ravel(refine_labels))
                fine_labels = neigh.predict(fine_barycenters)
Ejemplo n.º 29
0
from sklearn.model_selection import PredefinedSplit, GridSearchCV
from thundersvm import SVC
from sklearn.metrics import make_scorer
from profile_tools import average_mcc

DATASET_PATH = '../jpred_train.csv'
RANDOM_STATE = 42
PARAMETERS = {
    'C': [2, 4],
    'kernel': ['rbf', 'linear', 'polynomial', 'sigmoid'],
    'gamma': [0.5, 2],
    'random_state': [RANDOM_STATE]
}

if __name__ == '__main__':
    svm = SVC(random_state=RANDOM_STATE)
    jpred = load('../data/training/jpred.joblib')
    X_train = jpred.iloc[:, :-2].to_numpy()
    y_train = jpred['Class'].to_numpy().ravel()
    ps = PredefinedSplit(jpred['Set']).split()
    matthews_score = make_scorer(average_mcc)
    clf = GridSearchCV(svm,
                       param_grid=PARAMETERS,
                       cv=ps,
                       n_jobs=1,
                       scoring=matthews_score,
                       verbose=10,
                       return_train_score=True,
                       refit=True)

    clf.fit(X_train, y_train)
class ModelBasedOption(object):
    def __init__(self,
                 *,
                 name,
                 parent,
                 mdp,
                 global_solver,
                 global_value_learner,
                 buffer_length,
                 global_init,
                 gestation_period,
                 timeout,
                 max_steps,
                 device,
                 use_vf,
                 use_global_vf,
                 use_model,
                 dense_reward,
                 option_idx,
                 lr_c,
                 lr_a,
                 max_num_children=2,
                 target_salient_event=None,
                 path_to_model="",
                 multithread_mpc=False):
        self.mdp = mdp
        self.name = name
        self.lr_c = lr_c
        self.lr_a = lr_a
        self.parent = parent
        self.device = device
        self.use_vf = use_vf
        self.global_solver = global_solver
        self.use_global_vf = use_global_vf
        self.timeout = timeout
        self.use_model = use_model
        self.max_steps = max_steps
        self.global_init = global_init
        self.dense_reward = dense_reward
        self.buffer_length = buffer_length
        self.max_num_children = max_num_children
        self.target_salient_event = target_salient_event
        self.multithread_mpc = multithread_mpc

        # TODO
        self.overall_mdp = mdp
        self.seed = 0
        self.option_idx = option_idx

        self.num_goal_hits = 0
        self.num_executions = 0
        self.gestation_period = gestation_period

        self.positive_examples = []
        self.negative_examples = []
        self.optimistic_classifier = None
        self.pessimistic_classifier = None

        # In the model-free setting, the output norm doesn't seem to work
        # But it seems to stabilize off policy value function learning
        # Therefore, only use output norm if we are using MPC for action selection
        use_output_norm = self.use_model

        if not self.use_global_vf or global_init:
            self.value_learner = TD3(state_dim=self.mdp.state_space_size() + 2,
                                     action_dim=self.mdp.action_space_size(),
                                     max_action=1.,
                                     name=f"{name}-td3-agent",
                                     device=self.device,
                                     lr_c=lr_c,
                                     lr_a=lr_a,
                                     use_output_normalization=use_output_norm)

        self.global_value_learner = global_value_learner if not self.global_init else None  # type: TD3

        if use_model:
            print(f"Using model-based controller for {name}")
            self.solver = self._get_model_based_solver()
        else:
            print(f"Using model-free controller for {name}")
            self.solver = self._get_model_free_solver()

        self.children = []
        self.success_curve = []
        self.effect_set = []

        if path_to_model:
            print(f"Loading model from {path_to_model} for {self.name}")
            self.solver.load_model(path_to_model)

        if self.use_vf and not self.use_global_vf and self.parent is not None:
            self.initialize_value_function_with_global_value_function()

        print(
            f"Created model-based option {self.name} with option_idx={self.option_idx}"
        )

        self.is_last_option = False

    def _get_model_based_solver(self):
        assert self.use_model

        if self.global_init:
            return MPC(mdp=self.mdp,
                       state_size=self.mdp.state_space_size(),
                       action_size=self.mdp.action_space_size(),
                       dense_reward=self.dense_reward,
                       device=self.device,
                       multithread=self.multithread_mpc)

        assert self.global_solver is not None
        return self.global_solver

    def _get_model_free_solver(self):
        assert not self.use_model
        assert self.use_vf

        # Global option creates its own VF solver
        if self.global_init:
            assert self.value_learner is not None
            return self.value_learner

        # Local option either uses the global VF..
        if self.use_global_vf:
            assert self.global_value_learner is not None
            return self.global_value_learner

        # .. or uses its own local VF as solver
        assert self.value_learner is not None
        return self.value_learner

    # ------------------------------------------------------------
    # Learning Phase Methods
    # ------------------------------------------------------------

    def get_training_phase(self):
        if self.num_goal_hits < self.gestation_period:
            return "gestation"
        return "initiation_done"

    def extract_features_for_initiation_classifier(self, state):
        features = state if isinstance(state, np.ndarray) else state.features()
        if "push" in self.mdp.env_name:
            return features[:4]
        return features[:2]

    def is_init_true(self, state):
        if self.global_init or self.get_training_phase() == "gestation":
            return True

        if self.is_last_option and self.mdp.get_start_state_salient_event()(
                state):
            return True

        features = self.extract_features_for_initiation_classifier(state)
        return self.optimistic_classifier.predict(
            [features])[0] == 1 or self.pessimistic_is_init_true(state)

    def is_term_true(self, state):
        if self.parent is None:
            return self.target_salient_event(state)

        # TODO change
        return self.parent.pessimistic_is_init_true(state)

    def pessimistic_is_init_true(self, state):
        if self.global_init or self.get_training_phase() == "gestation":
            return True

        features = self.extract_features_for_initiation_classifier(state)
        return self.pessimistic_classifier.predict([features])[0] == 1

    def is_at_local_goal(self, state, goal):
        """ Goal-conditioned termination condition. """

        reached_goal = self.mdp.sparse_gc_reward_function(state, goal, {})[1]
        reached_term = self.is_term_true(state) or state.is_terminal()
        return reached_goal and reached_term

    # ------------------------------------------------------------
    # Control Loop Methods
    # ------------------------------------------------------------

    def _get_epsilon(self):
        if self.use_model:
            return 0.1
        if not self.dense_reward and self.num_goal_hits <= 3:
            return 0.8
        return 0.2

    def act(self, state, goal):
        """ Epsilon-greedy action selection. """

        if random.random() < self._get_epsilon():
            return self.mdp.sample_random_action()

        if self.use_model:
            assert isinstance(self.solver, MPC), f"{type(self.solver)}"
            vf = self.value_function if self.use_vf else None
            return self.solver.act(state, goal, vf=vf)

        assert isinstance(self.solver, TD3), f"{type(self.solver)}"
        augmented_state = self.get_augmented_state(state, goal)
        return self.solver.act(augmented_state, evaluation_mode=False)

    def update_model(self, state, action, reward, next_state):
        """ Learning update for option model/actor/critic. """

        self.solver.step(state.features(), action, reward,
                         next_state.features(), next_state.is_terminal())

    def get_goal_for_rollout(self):
        """ Sample goal to pursue for option rollout. """

        if self.parent is None and self.target_salient_event is not None:
            return self.target_salient_event.get_target_position()

        sampled_goal = self.parent.sample_from_initiation_region_fast_and_epsilon(
        )
        assert sampled_goal is not None

        if isinstance(sampled_goal, np.ndarray):
            return sampled_goal.squeeze()

        return self.extract_goal_dimensions(sampled_goal)

    def rollout(self, step_number, rollout_goal=None, eval_mode=False):
        """ Main option control loop. """

        start_state = deepcopy(self.mdp.cur_state)
        assert self.is_init_true(start_state)

        num_steps = 0
        total_reward = 0
        visited_states = []
        option_transitions = []

        state = deepcopy(self.mdp.cur_state)
        goal = self.get_goal_for_rollout(
        ) if rollout_goal is None else rollout_goal

        print(
            f"[Step: {step_number}] Rolling out {self.name}, from {state.position} targeting {goal}"
        )

        self.num_executions += 1

        while not self.is_at_local_goal(
                state, goal
        ) and step_number < self.max_steps and num_steps < self.timeout:

            # Control
            action = self.act(state, goal)
            reward, next_state = self.mdp.execute_agent_action(action)

            if self.use_model:
                self.update_model(state, action, reward, next_state)

            # Logging
            num_steps += 1
            step_number += 1
            total_reward += reward
            visited_states.append(state)
            option_transitions.append((state, action, reward, next_state))
            state = deepcopy(self.mdp.cur_state)

        visited_states.append(state)
        self.success_curve.append(self.is_term_true(state))
        self.effect_set.append(state.features())

        if self.is_term_true(state):
            self.num_goal_hits += 1

        if self.use_vf and not eval_mode:
            self.update_value_function(
                option_transitions,
                pursued_goal=goal,
                reached_goal=self.extract_goal_dimensions(state))

        self.derive_positive_and_negative_examples(visited_states)

        # Always be refining your initiation classifier
        if not self.global_init and not eval_mode:
            self.fit_initiation_classifier()

        return option_transitions, total_reward

    # ------------------------------------------------------------
    # Hindsight Experience Replay
    # ------------------------------------------------------------

    def update_value_function(self, option_transitions, reached_goal,
                              pursued_goal):
        """ Update the goal-conditioned option value function. """

        self.experience_replay(option_transitions, pursued_goal)
        self.experience_replay(option_transitions, reached_goal)

    def initialize_value_function_with_global_value_function(self):
        self.value_learner.actor.load_state_dict(
            self.global_value_learner.actor.state_dict())
        self.value_learner.critic.load_state_dict(
            self.global_value_learner.critic.state_dict())
        self.value_learner.target_actor.load_state_dict(
            self.global_value_learner.target_actor.state_dict())
        self.value_learner.target_critic.load_state_dict(
            self.global_value_learner.target_critic.state_dict())

    def extract_goal_dimensions(self, goal):
        goal_features = goal if isinstance(goal,
                                           np.ndarray) else goal.features()
        if "ant" in self.mdp.env_name:
            return goal_features[:2]
        raise NotImplementedError(f"{self.mdp.env_name}")

    def get_augmented_state(self, state, goal):
        assert goal is not None and isinstance(goal, np.ndarray)

        goal_position = self.extract_goal_dimensions(goal)
        return np.concatenate((state.features(), goal_position))

    def experience_replay(self, trajectory, goal_state):
        for state, action, reward, next_state in trajectory:
            augmented_state = self.get_augmented_state(state, goal=goal_state)
            augmented_next_state = self.get_augmented_state(next_state,
                                                            goal=goal_state)
            done = self.is_at_local_goal(next_state, goal_state)

            reward_func = self.overall_mdp.dense_gc_reward_function if self.dense_reward \
                else self.overall_mdp.sparse_gc_reward_function
            reward, global_done = reward_func(next_state, goal_state, info={})

            if not self.use_global_vf or self.global_init:
                self.value_learner.step(augmented_state, action, reward,
                                        augmented_next_state, done)

            # Off-policy updates to the global option value function
            if not self.global_init:
                assert self.global_value_learner is not None
                self.global_value_learner.step(augmented_state, action, reward,
                                               augmented_next_state,
                                               global_done)

    def value_function(self, states, goals):
        assert isinstance(states, np.ndarray)
        assert isinstance(goals, np.ndarray)

        if len(states.shape) == 1:
            states = states[None, ...]
        if len(goals.shape) == 1:
            goals = goals[None, ...]

        goal_positions = goals[:, :2]
        augmented_states = np.concatenate((states, goal_positions), axis=1)
        augmented_states = torch.as_tensor(augmented_states).float().to(
            self.device)

        if self.use_global_vf and not self.global_init:
            values = self.global_value_learner.get_values(augmented_states)
        else:
            values = self.value_learner.get_values(augmented_states)

        return values

    # ------------------------------------------------------------
    # Learning Initiation Classifiers
    # ------------------------------------------------------------

    def get_first_state_in_classifier(self,
                                      trajectory,
                                      classifier_type="pessimistic"):
        """ Extract the first state in the trajectory that is inside the initiation classifier. """

        assert classifier_type in ("pessimistic",
                                   "optimistic"), classifier_type
        classifier = self.pessimistic_is_init_true if classifier_type == "pessimistic" else self.is_init_true
        for state in trajectory:
            if classifier(state):
                return state
        return None

    def sample_from_initiation_region_fast(self):
        """ Sample from the pessimistic initiation classifier. """
        num_tries = 0
        sampled_state = None
        while sampled_state is None and num_tries < 200:
            num_tries = num_tries + 1
            sampled_trajectory_idx = random.choice(
                range(len(self.positive_examples)))
            sampled_trajectory = self.positive_examples[sampled_trajectory_idx]
            sampled_state = self.get_first_state_in_classifier(
                sampled_trajectory)
        return sampled_state

    def sample_from_initiation_region_fast_and_epsilon(self):
        """ Sample from the pessimistic initiation classifier. """
        def compile_states(s):
            pos0 = self.mdp.get_position(s)
            pos1 = np.copy(pos0)
            pos1[0] -= self.target_salient_event.tolerance
            pos2 = np.copy(pos0)
            pos2[0] += self.target_salient_event.tolerance
            pos3 = np.copy(pos0)
            pos3[1] -= self.target_salient_event.tolerance
            pos4 = np.copy(pos0)
            pos4[1] += self.target_salient_event.tolerance
            return pos0, pos1, pos2, pos3, pos4

        idxs = [i for i in range(len(self.positive_examples))]
        random.shuffle(idxs)

        for idx in idxs:
            sampled_trajectory = self.positive_examples[idx]
            states = []
            for s in sampled_trajectory:
                states.extend(compile_states(s))

            position_matrix = np.vstack(states)
            # optimistic_predictions = self.optimistic_classifier.predict(position_matrix) == 1
            # pessimistic_predictions = self.pessimistic_classifier.predict(position_matrix) == 1
            # predictions = np.logical_or(optimistic_predictions, pessimistic_predictions)
            predictions = self.pessimistic_classifier.predict(
                position_matrix) == 1
            predictions = np.reshape(predictions, (-1, 5))
            valid = np.all(predictions, axis=1)
            indices = np.argwhere(valid == True)
            if len(indices) > 0:
                return sampled_trajectory[indices[0][0]]

        return self.sample_from_initiation_region_fast()

    def derive_positive_and_negative_examples(self, visited_states):
        start_state = visited_states[0]
        final_state = visited_states[-1]

        if self.is_term_true(final_state):
            positive_states = [start_state
                               ] + visited_states[-self.buffer_length:]
            self.positive_examples.append(positive_states)
        else:
            negative_examples = [start_state]
            self.negative_examples.append(negative_examples)

    def should_change_negative_examples(self):
        should_change = []
        for negative_example in self.negative_examples:
            should_change += [
                self.does_model_rollout_reach_goal(negative_example[0])
            ]
        return should_change

    def does_model_rollout_reach_goal(self, state):
        sampled_goal = self.get_goal_for_rollout()
        final_states, actions, costs = self.solver.simulate(
            state, sampled_goal, num_rollouts=14000, num_steps=self.timeout)
        farthest_position = final_states[:, :2].max(axis=0)
        return self.is_term_true(farthest_position)

    def fit_initiation_classifier(self):
        if len(self.negative_examples) > 0 and len(self.positive_examples) > 0:
            self.train_two_class_classifier()
        elif len(self.positive_examples) > 0:
            self.train_one_class_svm()

    def construct_feature_matrix(self, examples):
        states = list(itertools.chain.from_iterable(examples))
        positions = [
            self.extract_features_for_initiation_classifier(state)
            for state in states
        ]
        return np.array(positions)

    def train_one_class_svm(self,
                            nu=0.1
                            ):  # TODO: Implement gamma="auto" for thundersvm
        positive_feature_matrix = self.construct_feature_matrix(
            self.positive_examples)
        self.pessimistic_classifier = OneClassSVM(kernel="rbf", nu=nu)
        self.pessimistic_classifier.fit(positive_feature_matrix)

        self.optimistic_classifier = OneClassSVM(kernel="rbf", nu=nu / 10.)
        self.optimistic_classifier.fit(positive_feature_matrix)

    def train_two_class_classifier(self, nu=0.1):
        positive_feature_matrix = self.construct_feature_matrix(
            self.positive_examples)
        negative_feature_matrix = self.construct_feature_matrix(
            self.negative_examples)
        positive_labels = [1] * positive_feature_matrix.shape[0]
        negative_labels = [0] * negative_feature_matrix.shape[0]

        X = np.concatenate((positive_feature_matrix, negative_feature_matrix))
        Y = np.concatenate((positive_labels, negative_labels))

        if negative_feature_matrix.shape[
                0] >= 10:  # TODO: Implement gamma="auto" for thundersvm
            kwargs = {
                "kernel": "rbf",
                "gamma": "auto",
                "class_weight": "balanced"
            }
        else:
            kwargs = {"kernel": "rbf", "gamma": "auto"}

        self.optimistic_classifier = SVC(**kwargs)
        self.optimistic_classifier.fit(X, Y)

        training_predictions = self.optimistic_classifier.predict(X)
        positive_training_examples = X[training_predictions == 1]

        if positive_training_examples.shape[0] > 0:
            self.pessimistic_classifier = OneClassSVM(kernel="rbf", nu=nu)
            self.pessimistic_classifier.fit(positive_training_examples)

    # ------------------------------------------------------------
    # Distance functions
    # ------------------------------------------------------------

    def get_states_inside_pessimistic_classifier_region(self):
        point_array = self.construct_feature_matrix(self.positive_examples)
        point_array_predictions = self.pessimistic_classifier.predict(
            point_array)
        positive_point_array = point_array[point_array_predictions == 1]
        return positive_point_array

    def distance_to_state(self, state, metric="euclidean"):
        """ Compute the distance between the current option and the input `state`. """

        assert metric in ("euclidean", "value"), metric
        if metric == "euclidean":
            return self._euclidean_distance_to_state(state)
        return self._value_distance_to_state(state)

    def _euclidean_distance_to_state(self, state):
        point = self.mdp.get_position(state)

        assert isinstance(point, np.ndarray)
        assert point.shape == (2, ), point.shape

        positive_point_array = self.get_states_inside_pessimistic_classifier_region(
        )

        distances = distance.cdist(point[None, :], positive_point_array)
        return np.median(distances)

    def _value_distance_to_state(self, state):
        features = state.features() if not isinstance(state,
                                                      np.ndarray) else state
        goals = self.get_states_inside_pessimistic_classifier_region()

        distances = self.value_function(features, goals)
        distances[distances > 0] = 0.
        return np.median(np.abs(distances))

    # ------------------------------------------------------------
    # Convenience functions
    # ------------------------------------------------------------

    def get_option_success_rate(self):
        if self.num_executions > 0:
            return self.num_goal_hits / self.num_executions
        return 1.

    def get_success_rate(self):
        if len(self.success_curve) == 0:
            return 0.
        return np.mean(self.success_curve)

    def __str__(self):
        return self.name

    def __repr__(self):
        return str(self)

    def __eq__(self, other):
        if isinstance(other, ModelBasedOption):
            return self.name == other.name
        return False