Esempio n. 1
0
def accuracy(trials: ts.SparseRec,
             labels: np.ndarray,
             transform: str = "none",
             repeats: int = 1000,
             train_size: int = 30,
             test_size: int = 20,
             validate: bool = False,
             **kwargs) -> np.ndarray:
    """Give precision estimation on the estimate from a simple SVC.
    The score is calculated as tp / (tp + fp)
    where tp is true positivie and fp is false positivie.
    A 1000 time shuffle is made on the score population from different train test splits and
    the mean score is output.
    Args:
        record_file: recording file
        labels: ids of the trials belonging to different clusters
        transform: how to we get the predictors
            "none": platten neuron x sample_points and take PCs
            "mean": temporal mean so each neuron has one value per trial, then take PCs
            "corr": pairwise correlations between neurons and take PCs
        repeats: number of repeats of resampling train/test sets
    Returns:
        the distribution of mean prediction scores
    """
    X, y = trials.values, quantize(labels, groups=1)
    trial_mask = X.min(axis=2).max(axis=0) > 0
    X, y = X[:, trial_mask, :], y[trial_mask]
    X = scale_features(X, (0, 2))
    if transform == "none":
        X = np.swapaxes(X, 0, 1).reshape(X.shape[1], X.shape[0] * X.shape[2])
    elif transform == "corr":  # get inter-neuron correlation for each of the trials
        X = np.array([
            np.corrcoef(x)[np.triu_indices(x.shape[0], 1)]
            for x in np.rollaxis(X, 1)
        ])
    elif transform == "mean":
        X = np.swapaxes(X.mean(-1), 0, 1)
    else:
        raise ValueError(
            "[precision] <transform> must be one of 'none', 'corr', or 'mean'."
        )
    X = PCA(PC_NO).fit_transform(X) if X.shape[1] > PC_NO else X
    params = {"kernel": "linear", "gamma": "auto"}
    params.update(kwargs)
    svc = SVC(**params)
    splitter = split_data(X, y, repeats, train_size, test_size)
    if validate:
        result = [
            accuracy_score(y_te,
                           svc.fit(X_tr, y_tr).predict(X_te))
            for X_tr, y_tr, X_te, y_te in splitter
        ]
    else:
        result = [
            accuracy_score(y_tr,
                           svc.fit(X_tr, y_tr).predict(X_tr))
            for X_tr, y_tr, _, _ in splitter
        ]
    return [x for x in result if (x is not None and x > 0.0)]
Esempio n. 2
0
    def get_classifier(self, traindata, kf):

        x_tr, x_te, y_tr, y_te = fac.to_kfold(traindata, kf)
        acc_max, bestK, acc = 0, 0, [[] for a in range(kf)]

        for i in range(kf):

            # print('DOAO round', i, 'begin')
            # svm 00
            print('test00')
            clf_svm = SVC()
            clf_svm.fit(x_tr[i], y_tr[i].ravel())
            label_svm = clf_svm.predict(x_te[i])
            acc[i].append(fac.get_acc(y_te[i], label_svm)[0])

            # KNN 01
            print('test01')
            acc_k = []
            aux_k = [3, 5, 7]
            # for k in range(3, 12, 2):
            for k in aux_k:
                clf_knn = KNN_GPU(k=k)
                clf_knn.fit(x_tr[i], y_tr[i])
                label_knn = clf_knn.predict(x_te[i])
                acc_k.append(fac.get_acc(y_te[i], label_knn)[0])
            acc[i].append(max(acc_k))
            bestK = aux_k[acc_k.index(max(acc_k))]

            # LR 02
            print('test02')
            clf_lr = LogisticRegression()
            clf_lr.fit(x_tr[i], y_tr[i])
            label_LR = clf_lr.predict(x_te[i])
            acc[i].append(fac.get_acc(y_te[i], label_LR)[0])

            # XgBoost 03
            print('test03')
            clf_xgb = DecisionTreeClassifier()
            clf_xgb.fit(x_tr[i], y_tr[i])
            label_xgb = clf_xgb.predict(x_te[i])
            acc[i].append(fac.get_acc(y_te[i], label_xgb)[0])

            # RF 04
            print('test04')

            clf_rf = TGBMClassifier()
            clf_rf.fit(x_tr[i], y_tr[i])
            label_rf = clf_rf.predict(x_te[i])
            acc[i].append(fac.get_acc(y_te[i], label_rf)[0])

            print('DOAO round', i, 'end')

        acc = np.array(acc)
        acc_mean = acc.mean(axis=0)

        # fun_best = np.where(acc_mean == max(acc_mean))
        fun_best = np.argmax(acc_mean)

        return fun_best, bestK
Esempio n. 3
0
    def get_classifier(self, train, kf):

        x_tr, x_te, y_tr, y_te = fac.to_kfold(train, kf)
        acc_max, bestK, acc = 0, 0, [[] for a in range(kf)]

        for i in range(kf):

            # print('DECOC round', i, 'begin')
            # svm 00
            clf_svm = SVC()
            clf_svm.fit(x_tr[i], y_tr[i].ravel())
            label_svm = clf_svm.predict(x_te[i])
            acc[i].append(fac.get_acc(y_te[i], label_svm)[0])

            # KNN 01
            acc_k = []
            aux_k = [3, 5, 7]
            # for k in range(3, 12, 2):
            for k in aux_k:
                clf_knn = KNN_GPU(k=k)
                clf_knn.fit(x_tr[i], y_tr[i])
                label_knn = clf_knn.predict(x_te[i])
                acc_k.append(fac.get_acc(y_te[i], label_knn)[0])
            acc[i].append(max(acc_k))
            bestK = aux_k[acc_k.index(max(acc_k))]

            # # LR 02
            # clf_lr = LR_GPU()
            # clf_lr.fit(x_tr[i], y_tr[i])
            # label_LR = clf_lr.predicted(x_te[i])
            # acc[i].append(fac.get_acc(y_te[i], label_LR)[0])

            # LR 02
            clf_lr = LogisticRegression()
            clf_lr.fit(x_tr[i], y_tr[i])
            label_LR = clf_lr.predict(x_te[i])
            acc[i].append(fac.get_acc(y_te[i], label_LR)[0])

            # CART 03
            clf_cart = DecisionTreeClassifier()
            clf_cart.fit(x_tr[i], y_tr[i])
            label_cart = clf_cart.predict(x_te[i])
            acc[i].append(fac.get_acc(y_te[i], label_cart)[0])

            # # RF 04
            clf_rf = TGBMClassifier()
            clf_rf.fit(x_tr[i], y_tr[i].ravel())
            label_rf = clf_rf.predict(x_te[i])
            acc[i].append(fac.get_acc(y_te[i], label_rf)[0])

            print('DECOC round', i, 'end')

        acc = np.array(acc)
        acc_mean = acc.mean(axis=0)
        # fun_best = np.where(acc_mean == max(acc_mean))
        fun_best = np.argmax(acc_mean)

        return fun_best, bestK
Esempio n. 4
0
    def fun_predict(self, x_te, C, D, L):
        print('func_predict')

        num = len(D)
        cf = C[0]
        ck = C[1]

        allpre = np.zeros((len(x_te), num))
        for i in range(num):
            train = D[i]
            traindata = train[:, 0:-1]
            trainlabel = train[:, -1]

            if cf[i] == 0:
                # svm
                print('SVM predict')
                clf_svm = SVC()
                clf_svm.fit(traindata, trainlabel.ravel())
                label_svm = clf_svm.predict(x_te)
                allpre[:, i] = label_svm
            elif cf[i] == 1:
                # knn
                clf_knn = KNN_GPU(k=ck[i])
                clf_knn.fit(traindata, trainlabel)
                label_knn = clf_knn.predict(x_te)
                allpre[:, i] = label_knn
            elif cf[i] == 2:
                # LR
                print('LR predict')
                clf_lr = LogisticRegression()
                clf_lr.fit(traindata, trainlabel.ravel())
                label_LR = clf_lr.predict(x_te)
                allpre[:, i] = label_LR
            elif cf[i] == 3:
                # CART
                print('CART predict')
                clf_xgb = DecisionTreeClassifier()
                clf_xgb.fit(traindata, trainlabel)
                label_xgb = clf_xgb.predict(x_te)
                allpre[:, i] = label_xgb
            elif cf[i] == 4:
                # Rf
                print('RF predict')
                clf_rf = TGBMClassifier()
                clf_rf.fit(traindata, trainlabel.ravel())
                label_rf = clf_rf.predict(x_te)
                allpre[:, i] = label_rf
            else:
                print('error !!!! DOAO.fun_predict')

            label = L[i]
            for j in range(len(x_te)):
                allpre[j, i] = label[0] if allpre[j, i] == 0 else label[1]

        # print('predict end for')
        pre = mode(allpre, axis=1)[0]
        return pre
Esempio n. 5
0
    def funcPreEDOVO(self, x_test, y_test, C, D):

        numC = np.asarray(C).shape[0]
        num_set = len(y_test)
        allpre = np.zeros([num_set, numC])

        for i in range(numC):

            train = D[i]
            traindata = np.array(train[:, 0:-1])
            trainlabel = np.array(train[:, -1], dtype='int64')
            if C[i, 0] == 0:
                print('test0')
                # svm
                clf_svm = SVC()
                clf_svm.fit(traindata, trainlabel.ravel())
                label_svm = clf_svm.predict(x_test)
                allpre[:, i] = label_svm
            elif C[i, 0] == 1:
                # print('test1')
                # knn
                clf_knn = KNN_GPU(k=C[i][1])
                # clf_knn = KNN_torch(k=C[i][1])
                clf_knn.fit(traindata, trainlabel)
                label_knn = clf_knn.predict(x_test)
                allpre[:, i] = label_knn.ravel()
            elif C[i, 0] == 2:
                print('test2')
                # LR
                clf_lr = LogisticRegression()
                clf_lr.fit(traindata, trainlabel)
                label_LR = clf_lr.predict(x_test)
                allpre[:, i] = label_LR
                # # LR
                # clf_lr = LR_GPU()
                # clf_lr.fit(traindata, trainlabel)
                # label_LR = clf_lr.predicted(x_test)
                # allpre[:, i] = label_LR
            elif C[i, 0] == 3:
                print('test3')
                # CART
                clf_cart = DecisionTreeClassifier()
                clf_cart.fit(traindata, trainlabel)
                label_cart = clf_cart.predict(x_test)
                allpre[:, i] = label_cart
            elif C[i, 0] == 4:
                print('test4')
                # RandomForest
                clf_ada = TGBMClassifier()
                clf_ada.fit(traindata, trainlabel.ravel())
                label_ada = clf_ada.predict(x_test)
                allpre[:, i] = label_ada

            else:
                print('error !!!! DECOC.funcPreEDOVO')

        return allpre
Esempio n. 6
0
    def train_kSVM(X_train, X_test, y_train, y_test, split_ID):
        kSVM = SVC(kernel='rbf',
                   degree=3,
                   gamma='auto',
                   coef0=0.0,
                   C=1.0,
                   tol=0.001,
                   probability=False,
                   class_weight='balanced',
                   shrinking=False,
                   cache_size=None,
                   verbose=False,
                   max_iter=-1,
                   n_jobs=-1,
                   max_mem_size=-1,
                   random_state=None,
                   decision_function_shape='ovo')
        kSVM_model = kSVM.fit(X_train, y_train)

        kSVM_preds = kSVM_model.predict(X_test)
        prec, rec, f_1, supp = prf(y_test, kSVM_preds, average=None)
        class_rep = sklearn.metrics.classification_report(y_test, kSVM_preds)
        exp.log_other('Classification Report' + split_ID, class_rep)
        mcc = sklearn.metrics.matthews_corrcoef(y_test, kSVM_preds)

        #if first iteration, report model parameters to comet
        if split_ID == '0':
            exp.log_parameters(kSVM_preds.get_params())
        return prec, rec, f_1, supp, mcc
Esempio n. 7
0
    def funClassifier(self, x_train, y_train, type):
        labels = np.unique(y_train)
        nc = len(labels)
        code = self.funECOCim(nc, type)

        train, numn = [], []
        for i in range(nc):
            idi = np.where(y_train == labels[i])[0]
            train.append(x_train[idi])
            numn.append(len(idi))

        num1 = np.size(code, 1)
        ft = []
        for t in range(num1):
            Dt, DtLabel, flagDt, numAp, numAn, numNp, numNn = np.asarray(
                []), [], 0, 0, 0, 0, 0
            for i in range(nc):
                if code[i, t] == 1:
                    if Dt.shape[0] == 0:
                        Dt = train[i]
                    else:
                        Dt = np.append(Dt, train[i], axis=0)
                    DtLabel[flagDt:flagDt + numn[i]] = [1] * numn[i]
                    flagDt += numn[i]
                    numAp += 1
                    numNp += numn[i]
                elif code[i, t] == -1:
                    if Dt.shape[0] == 0:
                        Dt = train[i]
                    else:
                        Dt = np.append(Dt, train[i], axis=0)
                    DtLabel[flagDt:flagDt + numn[i]] = [0] * numn[i]
                    flagDt += numn[i]
                    numAn += 1
                    numNn += numn[i]

            clf_svc = SVC()
            clf_svc.fit(np.array(Dt), np.array(DtLabel).ravel())
            ft.append(clf_svc)

        return code, ft, labels
Esempio n. 8
0
                                                    y,
                                                    test_size=0.1,
                                                    random_state=0)

with open('settings/olivetti.json') as f:
    data = json.load(f)

for s in data['models']:
    print(s)

    m = SVC(kernel=s.get('kernel', 'rbf'),
            C=s.get('C', 10.0),
            coef0=s.get('coef0', 0.0),
            gamma=s.get('gamma', 'auto'),
            degree=int(s.get('degree', 3)),
            verbose=False)

    start = time.time()
    m.fit(X_train, y_train)
    end = time.time()
    print('fit time:           ', end - start)

    t = m.predict(X_train)
    print('training error:     ', 1 - accuracy_score(y_train, t))

    start = time.time()
    p = m.predict(X_test)
    end = time.time()
    print('prediction time:    ', end - start)
    print('accuracy:           ', accuracy_score(y_test, p))
            mesh2.to_vtp(os.path.join(output_path, '{}_d_predicted_refined.vtp'.format(i_sample[:-4])))

            # upsampling
            print('\tUpsampling...')
            if mesh.cells.shape[0] > 100000:
                target_num = 100000 # set max number of cells
                ratio = 1 - target_num/mesh.cells.shape[0] # calculate ratio
                mesh.mesh_decimation(ratio)
                print('Original contains too many cells, simpify to {} cells'.format(mesh.cells.shape[0]))

            fine_cells = mesh.cells

            if upsampling_method == 'SVM':
                clf = SVC(kernel='rbf', gamma='auto', gpu_id=gpu_id)
                # train SVM
                clf.fit(cells, np.ravel(refine_labels))
                fine_labels = clf.predict(fine_cells)
                fine_labels = fine_labels.reshape([mesh.cells.shape[0], 1])
            elif upsampling_method == 'KNN':
                neigh = KNeighborsClassifier(n_neighbors=3)
                # train KNN
                neigh.fit(cells, np.ravel(refine_labels))
                fine_labels = neigh.predict(fine_cells)
                fine_labels = fine_labels.reshape([mesh.cells.shape[0], 1])

            mesh2 = Easy_Mesh()
            mesh2.cells = mesh.cells
            mesh2.update_cell_ids_and_points()
            mesh2.cell_attributes['Label'] = fine_labels
            mesh2.to_vtp(os.path.join(output_path, '{}_predicted_refined.vtp'.format(i_sample[:-4])))
Esempio n. 10
0
    print("acc %s" % score)
"""

if __name__ == "__main__":
    with open("./data/comment_new/vedio_vector_svm", "rb") as f:
        vedio_vector = pickle.load(f)
    with open("./train", "rb") as f:
        train_D = pickle.load(f)
    with open("./test", "rb") as f:
        test_D = pickle.load(f)
    train_X = []
    train_Y = []
    for f, L1 in train_D:
        train_X.append(vedio_vector[f])
        train_Y.append(m[L1])
    test_X = []
    test_Y = []
    for f, L1 in test_D:
        test_X.append(vedio_vector[f])
        test_Y.append(m[L1])

    train_X = np.array(train_X)
    train_Y = np.array(train_Y)
    test_X = np.array(test_X)
    test_Y = np.array(test_Y)

    clf = SVC()
    print(train_X.shape)
    print(train_Y.shape)
    clf.fit(train_X, train_Y)
    print("acc %s" % clf.score(test_X, test_Y))
class ModelBasedOption(object):
    def __init__(self,
                 *,
                 name,
                 parent,
                 mdp,
                 global_solver,
                 global_value_learner,
                 buffer_length,
                 global_init,
                 gestation_period,
                 timeout,
                 max_steps,
                 device,
                 use_vf,
                 use_global_vf,
                 use_model,
                 dense_reward,
                 option_idx,
                 lr_c,
                 lr_a,
                 max_num_children=2,
                 target_salient_event=None,
                 path_to_model="",
                 multithread_mpc=False):
        self.mdp = mdp
        self.name = name
        self.lr_c = lr_c
        self.lr_a = lr_a
        self.parent = parent
        self.device = device
        self.use_vf = use_vf
        self.global_solver = global_solver
        self.use_global_vf = use_global_vf
        self.timeout = timeout
        self.use_model = use_model
        self.max_steps = max_steps
        self.global_init = global_init
        self.dense_reward = dense_reward
        self.buffer_length = buffer_length
        self.max_num_children = max_num_children
        self.target_salient_event = target_salient_event
        self.multithread_mpc = multithread_mpc

        # TODO
        self.overall_mdp = mdp
        self.seed = 0
        self.option_idx = option_idx

        self.num_goal_hits = 0
        self.num_executions = 0
        self.gestation_period = gestation_period

        self.positive_examples = []
        self.negative_examples = []
        self.optimistic_classifier = None
        self.pessimistic_classifier = None

        # In the model-free setting, the output norm doesn't seem to work
        # But it seems to stabilize off policy value function learning
        # Therefore, only use output norm if we are using MPC for action selection
        use_output_norm = self.use_model

        if not self.use_global_vf or global_init:
            self.value_learner = TD3(state_dim=self.mdp.state_space_size() + 2,
                                     action_dim=self.mdp.action_space_size(),
                                     max_action=1.,
                                     name=f"{name}-td3-agent",
                                     device=self.device,
                                     lr_c=lr_c,
                                     lr_a=lr_a,
                                     use_output_normalization=use_output_norm)

        self.global_value_learner = global_value_learner if not self.global_init else None  # type: TD3

        if use_model:
            print(f"Using model-based controller for {name}")
            self.solver = self._get_model_based_solver()
        else:
            print(f"Using model-free controller for {name}")
            self.solver = self._get_model_free_solver()

        self.children = []
        self.success_curve = []
        self.effect_set = []

        if path_to_model:
            print(f"Loading model from {path_to_model} for {self.name}")
            self.solver.load_model(path_to_model)

        if self.use_vf and not self.use_global_vf and self.parent is not None:
            self.initialize_value_function_with_global_value_function()

        print(
            f"Created model-based option {self.name} with option_idx={self.option_idx}"
        )

        self.is_last_option = False

    def _get_model_based_solver(self):
        assert self.use_model

        if self.global_init:
            return MPC(mdp=self.mdp,
                       state_size=self.mdp.state_space_size(),
                       action_size=self.mdp.action_space_size(),
                       dense_reward=self.dense_reward,
                       device=self.device,
                       multithread=self.multithread_mpc)

        assert self.global_solver is not None
        return self.global_solver

    def _get_model_free_solver(self):
        assert not self.use_model
        assert self.use_vf

        # Global option creates its own VF solver
        if self.global_init:
            assert self.value_learner is not None
            return self.value_learner

        # Local option either uses the global VF..
        if self.use_global_vf:
            assert self.global_value_learner is not None
            return self.global_value_learner

        # .. or uses its own local VF as solver
        assert self.value_learner is not None
        return self.value_learner

    # ------------------------------------------------------------
    # Learning Phase Methods
    # ------------------------------------------------------------

    def get_training_phase(self):
        if self.num_goal_hits < self.gestation_period:
            return "gestation"
        return "initiation_done"

    def extract_features_for_initiation_classifier(self, state):
        features = state if isinstance(state, np.ndarray) else state.features()
        if "push" in self.mdp.env_name:
            return features[:4]
        return features[:2]

    def is_init_true(self, state):
        if self.global_init or self.get_training_phase() == "gestation":
            return True

        if self.is_last_option and self.mdp.get_start_state_salient_event()(
                state):
            return True

        features = self.extract_features_for_initiation_classifier(state)
        return self.optimistic_classifier.predict(
            [features])[0] == 1 or self.pessimistic_is_init_true(state)

    def is_term_true(self, state):
        if self.parent is None:
            return self.target_salient_event(state)

        # TODO change
        return self.parent.pessimistic_is_init_true(state)

    def pessimistic_is_init_true(self, state):
        if self.global_init or self.get_training_phase() == "gestation":
            return True

        features = self.extract_features_for_initiation_classifier(state)
        return self.pessimistic_classifier.predict([features])[0] == 1

    def is_at_local_goal(self, state, goal):
        """ Goal-conditioned termination condition. """

        reached_goal = self.mdp.sparse_gc_reward_function(state, goal, {})[1]
        reached_term = self.is_term_true(state) or state.is_terminal()
        return reached_goal and reached_term

    # ------------------------------------------------------------
    # Control Loop Methods
    # ------------------------------------------------------------

    def _get_epsilon(self):
        if self.use_model:
            return 0.1
        if not self.dense_reward and self.num_goal_hits <= 3:
            return 0.8
        return 0.2

    def act(self, state, goal):
        """ Epsilon-greedy action selection. """

        if random.random() < self._get_epsilon():
            return self.mdp.sample_random_action()

        if self.use_model:
            assert isinstance(self.solver, MPC), f"{type(self.solver)}"
            vf = self.value_function if self.use_vf else None
            return self.solver.act(state, goal, vf=vf)

        assert isinstance(self.solver, TD3), f"{type(self.solver)}"
        augmented_state = self.get_augmented_state(state, goal)
        return self.solver.act(augmented_state, evaluation_mode=False)

    def update_model(self, state, action, reward, next_state):
        """ Learning update for option model/actor/critic. """

        self.solver.step(state.features(), action, reward,
                         next_state.features(), next_state.is_terminal())

    def get_goal_for_rollout(self):
        """ Sample goal to pursue for option rollout. """

        if self.parent is None and self.target_salient_event is not None:
            return self.target_salient_event.get_target_position()

        sampled_goal = self.parent.sample_from_initiation_region_fast_and_epsilon(
        )
        assert sampled_goal is not None

        if isinstance(sampled_goal, np.ndarray):
            return sampled_goal.squeeze()

        return self.extract_goal_dimensions(sampled_goal)

    def rollout(self, step_number, rollout_goal=None, eval_mode=False):
        """ Main option control loop. """

        start_state = deepcopy(self.mdp.cur_state)
        assert self.is_init_true(start_state)

        num_steps = 0
        total_reward = 0
        visited_states = []
        option_transitions = []

        state = deepcopy(self.mdp.cur_state)
        goal = self.get_goal_for_rollout(
        ) if rollout_goal is None else rollout_goal

        print(
            f"[Step: {step_number}] Rolling out {self.name}, from {state.position} targeting {goal}"
        )

        self.num_executions += 1

        while not self.is_at_local_goal(
                state, goal
        ) and step_number < self.max_steps and num_steps < self.timeout:

            # Control
            action = self.act(state, goal)
            reward, next_state = self.mdp.execute_agent_action(action)

            if self.use_model:
                self.update_model(state, action, reward, next_state)

            # Logging
            num_steps += 1
            step_number += 1
            total_reward += reward
            visited_states.append(state)
            option_transitions.append((state, action, reward, next_state))
            state = deepcopy(self.mdp.cur_state)

        visited_states.append(state)
        self.success_curve.append(self.is_term_true(state))
        self.effect_set.append(state.features())

        if self.is_term_true(state):
            self.num_goal_hits += 1

        if self.use_vf and not eval_mode:
            self.update_value_function(
                option_transitions,
                pursued_goal=goal,
                reached_goal=self.extract_goal_dimensions(state))

        self.derive_positive_and_negative_examples(visited_states)

        # Always be refining your initiation classifier
        if not self.global_init and not eval_mode:
            self.fit_initiation_classifier()

        return option_transitions, total_reward

    # ------------------------------------------------------------
    # Hindsight Experience Replay
    # ------------------------------------------------------------

    def update_value_function(self, option_transitions, reached_goal,
                              pursued_goal):
        """ Update the goal-conditioned option value function. """

        self.experience_replay(option_transitions, pursued_goal)
        self.experience_replay(option_transitions, reached_goal)

    def initialize_value_function_with_global_value_function(self):
        self.value_learner.actor.load_state_dict(
            self.global_value_learner.actor.state_dict())
        self.value_learner.critic.load_state_dict(
            self.global_value_learner.critic.state_dict())
        self.value_learner.target_actor.load_state_dict(
            self.global_value_learner.target_actor.state_dict())
        self.value_learner.target_critic.load_state_dict(
            self.global_value_learner.target_critic.state_dict())

    def extract_goal_dimensions(self, goal):
        goal_features = goal if isinstance(goal,
                                           np.ndarray) else goal.features()
        if "ant" in self.mdp.env_name:
            return goal_features[:2]
        raise NotImplementedError(f"{self.mdp.env_name}")

    def get_augmented_state(self, state, goal):
        assert goal is not None and isinstance(goal, np.ndarray)

        goal_position = self.extract_goal_dimensions(goal)
        return np.concatenate((state.features(), goal_position))

    def experience_replay(self, trajectory, goal_state):
        for state, action, reward, next_state in trajectory:
            augmented_state = self.get_augmented_state(state, goal=goal_state)
            augmented_next_state = self.get_augmented_state(next_state,
                                                            goal=goal_state)
            done = self.is_at_local_goal(next_state, goal_state)

            reward_func = self.overall_mdp.dense_gc_reward_function if self.dense_reward \
                else self.overall_mdp.sparse_gc_reward_function
            reward, global_done = reward_func(next_state, goal_state, info={})

            if not self.use_global_vf or self.global_init:
                self.value_learner.step(augmented_state, action, reward,
                                        augmented_next_state, done)

            # Off-policy updates to the global option value function
            if not self.global_init:
                assert self.global_value_learner is not None
                self.global_value_learner.step(augmented_state, action, reward,
                                               augmented_next_state,
                                               global_done)

    def value_function(self, states, goals):
        assert isinstance(states, np.ndarray)
        assert isinstance(goals, np.ndarray)

        if len(states.shape) == 1:
            states = states[None, ...]
        if len(goals.shape) == 1:
            goals = goals[None, ...]

        goal_positions = goals[:, :2]
        augmented_states = np.concatenate((states, goal_positions), axis=1)
        augmented_states = torch.as_tensor(augmented_states).float().to(
            self.device)

        if self.use_global_vf and not self.global_init:
            values = self.global_value_learner.get_values(augmented_states)
        else:
            values = self.value_learner.get_values(augmented_states)

        return values

    # ------------------------------------------------------------
    # Learning Initiation Classifiers
    # ------------------------------------------------------------

    def get_first_state_in_classifier(self,
                                      trajectory,
                                      classifier_type="pessimistic"):
        """ Extract the first state in the trajectory that is inside the initiation classifier. """

        assert classifier_type in ("pessimistic",
                                   "optimistic"), classifier_type
        classifier = self.pessimistic_is_init_true if classifier_type == "pessimistic" else self.is_init_true
        for state in trajectory:
            if classifier(state):
                return state
        return None

    def sample_from_initiation_region_fast(self):
        """ Sample from the pessimistic initiation classifier. """
        num_tries = 0
        sampled_state = None
        while sampled_state is None and num_tries < 200:
            num_tries = num_tries + 1
            sampled_trajectory_idx = random.choice(
                range(len(self.positive_examples)))
            sampled_trajectory = self.positive_examples[sampled_trajectory_idx]
            sampled_state = self.get_first_state_in_classifier(
                sampled_trajectory)
        return sampled_state

    def sample_from_initiation_region_fast_and_epsilon(self):
        """ Sample from the pessimistic initiation classifier. """
        def compile_states(s):
            pos0 = self.mdp.get_position(s)
            pos1 = np.copy(pos0)
            pos1[0] -= self.target_salient_event.tolerance
            pos2 = np.copy(pos0)
            pos2[0] += self.target_salient_event.tolerance
            pos3 = np.copy(pos0)
            pos3[1] -= self.target_salient_event.tolerance
            pos4 = np.copy(pos0)
            pos4[1] += self.target_salient_event.tolerance
            return pos0, pos1, pos2, pos3, pos4

        idxs = [i for i in range(len(self.positive_examples))]
        random.shuffle(idxs)

        for idx in idxs:
            sampled_trajectory = self.positive_examples[idx]
            states = []
            for s in sampled_trajectory:
                states.extend(compile_states(s))

            position_matrix = np.vstack(states)
            # optimistic_predictions = self.optimistic_classifier.predict(position_matrix) == 1
            # pessimistic_predictions = self.pessimistic_classifier.predict(position_matrix) == 1
            # predictions = np.logical_or(optimistic_predictions, pessimistic_predictions)
            predictions = self.pessimistic_classifier.predict(
                position_matrix) == 1
            predictions = np.reshape(predictions, (-1, 5))
            valid = np.all(predictions, axis=1)
            indices = np.argwhere(valid == True)
            if len(indices) > 0:
                return sampled_trajectory[indices[0][0]]

        return self.sample_from_initiation_region_fast()

    def derive_positive_and_negative_examples(self, visited_states):
        start_state = visited_states[0]
        final_state = visited_states[-1]

        if self.is_term_true(final_state):
            positive_states = [start_state
                               ] + visited_states[-self.buffer_length:]
            self.positive_examples.append(positive_states)
        else:
            negative_examples = [start_state]
            self.negative_examples.append(negative_examples)

    def should_change_negative_examples(self):
        should_change = []
        for negative_example in self.negative_examples:
            should_change += [
                self.does_model_rollout_reach_goal(negative_example[0])
            ]
        return should_change

    def does_model_rollout_reach_goal(self, state):
        sampled_goal = self.get_goal_for_rollout()
        final_states, actions, costs = self.solver.simulate(
            state, sampled_goal, num_rollouts=14000, num_steps=self.timeout)
        farthest_position = final_states[:, :2].max(axis=0)
        return self.is_term_true(farthest_position)

    def fit_initiation_classifier(self):
        if len(self.negative_examples) > 0 and len(self.positive_examples) > 0:
            self.train_two_class_classifier()
        elif len(self.positive_examples) > 0:
            self.train_one_class_svm()

    def construct_feature_matrix(self, examples):
        states = list(itertools.chain.from_iterable(examples))
        positions = [
            self.extract_features_for_initiation_classifier(state)
            for state in states
        ]
        return np.array(positions)

    def train_one_class_svm(self,
                            nu=0.1
                            ):  # TODO: Implement gamma="auto" for thundersvm
        positive_feature_matrix = self.construct_feature_matrix(
            self.positive_examples)
        self.pessimistic_classifier = OneClassSVM(kernel="rbf", nu=nu)
        self.pessimistic_classifier.fit(positive_feature_matrix)

        self.optimistic_classifier = OneClassSVM(kernel="rbf", nu=nu / 10.)
        self.optimistic_classifier.fit(positive_feature_matrix)

    def train_two_class_classifier(self, nu=0.1):
        positive_feature_matrix = self.construct_feature_matrix(
            self.positive_examples)
        negative_feature_matrix = self.construct_feature_matrix(
            self.negative_examples)
        positive_labels = [1] * positive_feature_matrix.shape[0]
        negative_labels = [0] * negative_feature_matrix.shape[0]

        X = np.concatenate((positive_feature_matrix, negative_feature_matrix))
        Y = np.concatenate((positive_labels, negative_labels))

        if negative_feature_matrix.shape[
                0] >= 10:  # TODO: Implement gamma="auto" for thundersvm
            kwargs = {
                "kernel": "rbf",
                "gamma": "auto",
                "class_weight": "balanced"
            }
        else:
            kwargs = {"kernel": "rbf", "gamma": "auto"}

        self.optimistic_classifier = SVC(**kwargs)
        self.optimistic_classifier.fit(X, Y)

        training_predictions = self.optimistic_classifier.predict(X)
        positive_training_examples = X[training_predictions == 1]

        if positive_training_examples.shape[0] > 0:
            self.pessimistic_classifier = OneClassSVM(kernel="rbf", nu=nu)
            self.pessimistic_classifier.fit(positive_training_examples)

    # ------------------------------------------------------------
    # Distance functions
    # ------------------------------------------------------------

    def get_states_inside_pessimistic_classifier_region(self):
        point_array = self.construct_feature_matrix(self.positive_examples)
        point_array_predictions = self.pessimistic_classifier.predict(
            point_array)
        positive_point_array = point_array[point_array_predictions == 1]
        return positive_point_array

    def distance_to_state(self, state, metric="euclidean"):
        """ Compute the distance between the current option and the input `state`. """

        assert metric in ("euclidean", "value"), metric
        if metric == "euclidean":
            return self._euclidean_distance_to_state(state)
        return self._value_distance_to_state(state)

    def _euclidean_distance_to_state(self, state):
        point = self.mdp.get_position(state)

        assert isinstance(point, np.ndarray)
        assert point.shape == (2, ), point.shape

        positive_point_array = self.get_states_inside_pessimistic_classifier_region(
        )

        distances = distance.cdist(point[None, :], positive_point_array)
        return np.median(distances)

    def _value_distance_to_state(self, state):
        features = state.features() if not isinstance(state,
                                                      np.ndarray) else state
        goals = self.get_states_inside_pessimistic_classifier_region()

        distances = self.value_function(features, goals)
        distances[distances > 0] = 0.
        return np.median(np.abs(distances))

    # ------------------------------------------------------------
    # Convenience functions
    # ------------------------------------------------------------

    def get_option_success_rate(self):
        if self.num_executions > 0:
            return self.num_goal_hits / self.num_executions
        return 1.

    def get_success_rate(self):
        if len(self.success_curve) == 0:
            return 0.
        return np.mean(self.success_curve)

    def __str__(self):
        return self.name

    def __repr__(self):
        return str(self)

    def __eq__(self, other):
        if isinstance(other, ModelBasedOption):
            return self.name == other.name
        return False
Esempio n. 12
0
    def run(self):

        model = self.exp_data['model']
        base_workspace = self.exp_data['base_workspace']

        if model == 'ridge':
            space = [Real(1e-3, 1e+3, prior='log-uniform')]
            optimize_types = ['alpha']
            minimizer = MagnitudeAxisMinimizer(base_workspace, optimize_types,
                                               gp_minimize)
            minimizer.model = Ridge
            x0 = [1.0]
        elif model == 'kernel_ridge':
            space = [
                Categorical(['poly', 'rbf', 'sigmoid']),
                Real(1e-3, 1e+3, prior='log-uniform'),
                Integer(1, 8),
                Real(1e-6, 1e+1, prior='log-uniform'),
                Real(-10, 10)
            ]
            optimize_types = ['kernel', 'alpha', 'degree', 'gamma', 'coef0']
            minimizer = MagnitudeAxisMinimizer(base_workspace, optimize_types,
                                               gp_minimize)
            minimizer.model = KernelRidge
            x0 = ['poly', 1.0, 3, 1 / 300, 0]
        elif model == 'kernel_ridge_separation':
            space = [Real(1e-3, 1e+3, prior='log-uniform')]
            optimize_types = ['alpha']
            minimizer = MagnitudeAxisMinimizer(base_workspace, optimize_types,
                                               gp_minimize)
            minimizer.model_fixed_params = {
                'kernel': 'poly',
                'degree': 3,
                'gamma': 1 / 300,
                'coef0': 0
            }
            minimizer.model = KernelRidge
            x0 = [1.0]
        elif model == 'kernel_ridge_random':
            space = [Real(1e-3, 1e+3, prior='log-uniform')]
            optimize_types = ['alpha']
            minimizer = MagnitudeAxisMinimizer(base_workspace, optimize_types,
                                               gp_minimize)
            kernel = random.choice(
                ['poly', 'rbf', 'laplacian', 'sigmoid', 'cosine'])
            degree = random.randint(1, 10)
            coef = random.uniform(-5, 5)
            minimizer.model_fixed_params = {
                'kernel': kernel,
                'degree': degree,
                'gamma': None,
                'coef0': coef
            }
            minimizer.model = KernelRidge
            x0 = [1.0]
        elif model == 'nn':
            space = [
                Integer(16, 256),
                Real(1e-5, 1, prior='log-uniform'),
            ]
            optimize_types = ['n_hidden_units', 'lr']
            minimizer = MagnitudeAxisMinimizer(base_workspace, optimize_types,
                                               gp_minimize)
            minimizer.model_fixed_params = {
                'build_fn': self.build_nn,
                'epochs': 20,
                'batch_size': 256
            }
            minimizer.model = KerasRegressor
            x0 = [64, 0.001]
        elif model == 'kernel_svm':
            space = [
                Categorical(['poly', 'rbf', 'sigmoid']),
                Real(1e-3, 1e+3, prior='log-uniform'),
                Integer(1, 8),
                Real(1e-6, 1e+1, prior='log-uniform'),
                Real(-10, 10)
            ]
            optimize_types = ['kernel', 'C', 'degree', 'gamma', 'coef0']
            minimizer = MagnitudeAxisMinimizer(base_workspace, optimize_types,
                                               gp_minimize)
            minimizer.model_fixed_params = {
                'cache_size': 8000,
                'max_iter': 10000
            }
            minimizer.model = SVR
            x0 = ['poly', 1.0, 3, 1 / 300, 0]
        elif model == 'pca':
            if not hasattr(self, 'pca_component'):
                pca = PCA(n_components=1)
                _, X_nums, *_ = prepare_separation_data(
                    self.name.split('_')[0] + '.txt')
                pca.fit(X_nums)
                # pca.fit(np.concatenate([base_workspace['X'],base_workspace['X_test']]))
                self.pca_component = pca.components_  # 1xd
            error = self.evaluate_w(base_workspace['X_test'],
                                    self.pca_component,
                                    base_workspace['y_test'])
            return error
        elif model == 'proj_pca':
            if not hasattr(self, 'proj_pca_component'):
                X, X_nums, y_label, _ = prepare_separation_data(
                    self.name.split('_')[0] + '.txt')
                svc = SVC(kernel='linear',
                          degree=3,
                          gamma=1 / 300,
                          coef0=0,
                          C=1,
                          cache_size=4000,
                          class_weight='balanced',
                          verbose=True)

                svc.fit(X, y_label)
                beta = svc.coef_  # 1xd
                # X_pred = svc.decision_function(X) nx1
                # X_nums = np.concatenate([base_workspace['X'],base_workspace['X_test']])
                X_proj = X_nums - (
                    (svc.decision_function(X_nums) / beta @ beta.T) @ beta
                )  # nxd

                pca = PCA(n_components=1)
                pca.fit(X_proj)
                self.proj_pca_component = pca.components_  # 1xd
            error = self.evaluate_w(base_workspace['X_test'],
                                    self.proj_pca_component,
                                    base_workspace['y_test'])
            return error
        elif model == 'kernel_proj_pca':
            pass
        else:
            assert False

        res = minimizer.minimize(space, n_calls=40, verbose=True, x0=x0)
        if self.save_results:
            skopt.dump(res, self.name + '.pkl', store_objective=False)

        params = {type: v for type, v in zip(minimizer.optimize_types, res.x)}
        if hasattr(minimizer, 'model_fixed_params'):
            params = {**params, **minimizer.model_fixed_params}
        error = self.fit_test_best_model(minimizer.model, base_workspace['X'],
                                         base_workspace['y'],
                                         base_workspace['X_test'],
                                         base_workspace['y_test'], **params)
        return error
!wget https://developer.nvidia.com/compute/cuda/9.0/Prod/local_installers/cuda-repo-ubuntu1704-9-0-local_9.0.176-1_amd64-deb
!ls  # Check if required cuda 9.0 amd64-deb file is downloaded
!dpkg -i cuda-repo-ubuntu1704-9-0-local_9.0.176-1_amd64-deb
!ls /var/cuda-repo-9-0-local | grep .pub
!apt-key add /var/cuda-repo-9-0-local/7fa2af80.pub
!apt-get update
!sudo apt-get install cuda-9.0

!pip install thundersvm

#import thundersvm
from thundersvm import SVC

model = SVC(C=100, kernel='rbf')
model.fit(first_tensor, final_label)

svm_prediction = model.predict(first_tensor1)

svm_probability = model.predict_proba(first_tensor1)

label_test2 = []
for image , label in testloader:
    label1 = []
    for i in label:
        if i!= 1:
           j= 0
           label1.append(j)
        else:

           label1.append(i)
Esempio n. 14
0
def polynomialSVM(whid):
    # This function models the SVM of orders in relation (the features) to each warehouse based on the percentage
    # of products the warehouse of focus can supply and the distance to warehouse of focus

    # Retrieve Dataset
    dataset = _is_order_optimized_('./assets/warehouse_' + str(whid) +
                                   '.csv', )
    X = dataset.drop('classifier', axis=1)
    Y = dataset["classifier"]

    # Splitting data into train and test
    xTrain, xTest, yTrain, yTest = train_test_split(X,
                                                    Y,
                                                    test_size=0.20,
                                                    random_state=0)

    # Feature scaling
    sc = StandardScaler()
    xTrain = sc.fit_transform(xTrain)
    xTest = sc.fit_transform(xTest)

    # Fitting Kernel SVM to the Training set.
    svcClassifier = SVC(
        kernel='rbf',
        random_state=0,
        max_mem_size=50000,
        n_jobs=8,
        C=100  # This value can vary for whether the margin is too 'hard' or too 'soft'
    )

    # pickling the files (serializing them and storing them)
    # This way, the model can run the data against other data
    svcClassifier.fit(xTrain, yTrain)
    svc_pickle = './assets/sv_pickle_rbf_' + str(whid) + '.sav'
    pickle.dump(svcClassifier, open(svc_pickle, 'wb'))

    # Predicting the test results
    polyPred = svcClassifier.predict(xTest)
    print(polyPred)

    # Confusion Matrix Print: SVM Classifier polyTest against the Test Labeled Data yTest
    print("Confusion Matrix")
    print(confusion_matrix(yTest, polyPred))
    print("\n")

    # Classification report
    print("Classification Report")
    print(classification_report(yTest, polyPred))
    print("\n")

    # Applying k-fold cross validation for accuracy purposes
    accuracies = cross_val_score(estimator=svcClassifier,
                                 X=xTrain,
                                 y=yTrain,
                                 cv=10)
    print(accuracies.mean())
    print(accuracies.std())

    # Visualising the Test set results
    from matplotlib.colors import ListedColormap
    X_set, y_set = xTest, yTest
    X1, X2 = np.meshgrid(
        np.arange(start=X_set[:, 0].min() - 1,
                  stop=X_set[:, 0].max() + 1,
                  step=0.01),
        np.arange(start=X_set[:, 1].min() - 1,
                  stop=X_set[:, 1].max() + 1,
                  step=0.01))
    plt.contourf(       # Creating the contouring lines
        X1,
        X2,
        svcClassifier.\
            predict(np.array([X1.ravel(), X2.ravel()]).T).\
                reshape(X1.shape),
        alpha = 0.5,
        cmap = ListedColormap(('blue', 'black'))
    )
    plt.xlim(X1.min(), X1.max())
    plt.ylim(X2.min(), X2.max())
    for i, j in enumerate(np.unique(y_set)):  # Creating the scatter plots
        plt.scatter(X_set[y_set == j, 0],
                    X_set[y_set == j, 1],
                    c=ListedColormap(('red', 'green'))(i),
                    label=j)

    # labeling each plot
    plt.title('Kernel SVM (Training Data) Warehouse: ' + str(whid))
    plt.xlabel('Distance From Warehouse')
    plt.ylabel('Percentage of Available Products')
    plt.legend()
    plt.savefig('./assets/RBF' + str(whid) + '_' + str(int(time.time())) +
                '.png')
    plt.close()
    return False
Esempio n. 15
0
                        mesh._polydata.GetCell(i).GetPointId(
                            2))  # don't need to copy

            fine_cells = cells

            barycenters = mesh3.cellCenters()  # don't need to copy
            fine_barycenters = mesh.cellCenters()  # don't need to copy

            if upsampling_method == 'SVM':
                #clf = SVC(kernel='rbf', gamma='auto', probability=True, gpu_id=gpu_id)
                clf = SVC(kernel='rbf', gamma='auto', gpu_id=gpu_id)
                # train SVM
                #clf.fit(mesh2.cells, np.ravel(refine_labels))
                #fine_labels = clf.predict(fine_cells)

                clf.fit(barycenters, np.ravel(refine_labels))
                fine_labels = clf.predict(fine_barycenters)
                fine_labels = fine_labels.reshape(-1, 1)
            elif upsampling_method == 'KNN':
                neigh = KNeighborsClassifier(n_neighbors=3)
                # train KNN
                #neigh.fit(mesh2.cells, np.ravel(refine_labels))
                #fine_labels = neigh.predict(fine_cells)

                neigh.fit(barycenters, np.ravel(refine_labels))
                fine_labels = neigh.predict(fine_barycenters)
                fine_labels = fine_labels.reshape(-1, 1)

            mesh.addCellArray(fine_labels, 'Label')
            vedo.write(
                mesh,