def get_classifier(self, traindata, kf): x_tr, x_te, y_tr, y_te = fac.to_kfold(traindata, kf) acc_max, bestK, acc = 0, 0, [[] for a in range(kf)] for i in range(kf): # print('DOAO round', i, 'begin') # svm 00 print('test00') clf_svm = SVC() clf_svm.fit(x_tr[i], y_tr[i].ravel()) label_svm = clf_svm.predict(x_te[i]) acc[i].append(fac.get_acc(y_te[i], label_svm)[0]) # KNN 01 print('test01') acc_k = [] aux_k = [3, 5, 7] # for k in range(3, 12, 2): for k in aux_k: clf_knn = KNN_GPU(k=k) clf_knn.fit(x_tr[i], y_tr[i]) label_knn = clf_knn.predict(x_te[i]) acc_k.append(fac.get_acc(y_te[i], label_knn)[0]) acc[i].append(max(acc_k)) bestK = aux_k[acc_k.index(max(acc_k))] # LR 02 print('test02') clf_lr = LogisticRegression() clf_lr.fit(x_tr[i], y_tr[i]) label_LR = clf_lr.predict(x_te[i]) acc[i].append(fac.get_acc(y_te[i], label_LR)[0]) # XgBoost 03 print('test03') clf_xgb = DecisionTreeClassifier() clf_xgb.fit(x_tr[i], y_tr[i]) label_xgb = clf_xgb.predict(x_te[i]) acc[i].append(fac.get_acc(y_te[i], label_xgb)[0]) # RF 04 print('test04') clf_rf = TGBMClassifier() clf_rf.fit(x_tr[i], y_tr[i]) label_rf = clf_rf.predict(x_te[i]) acc[i].append(fac.get_acc(y_te[i], label_rf)[0]) print('DOAO round', i, 'end') acc = np.array(acc) acc_mean = acc.mean(axis=0) # fun_best = np.where(acc_mean == max(acc_mean)) fun_best = np.argmax(acc_mean) return fun_best, bestK
def predict(args): """Loads model from file, makes predictions and computes metrics. All created files are saved to args.out_dir directory if provided, or to results_<task> otherwise. Creates files: conf_matrix.png file with confusion matrix, report.txt with various metrics, preds_{task}.npy with raw predictions. """ if args.task.startswith('4'): test_features = np.load('test_features_3b.npy') test_labels = get_labels(split='test') y_true = np.array([CLASSES.index(l) for l in test_labels]) out_dir = args.out_dir or f'results_{args.task}' for c in [0.001, 0.01, 0.1, 1.0, 10]: svc = SVC() svc.load_from_file(f'svc_{args.task}_C_{c}') y_pred = svc.predict(test_features) evaluate(y_true, y_pred, None, CLASSES, os.path.join(out_dir, f'C_{c}')) else: model: Model = load_model( f'model_fc_{args.task}.h5', custom_objects={'top_5_accuracy': top_5_accuracy}) test_generator = create_data_generator(split='test', target_size=args.target_size, batch_size=args.batch_size, shuffle=False) # get predictions preds = model.predict_generator(test_generator, verbose=1) # create output directory out_dir = args.out_dir or f'results_{args.task}' os.makedirs(out_dir, exist_ok=True) # save numpy array with predictions save_file = os.path.join(out_dir, f'preds_{args.task}.npy') np.save(save_file, preds) print(f'Predictions saved to: {save_file}') # first, prepare y_pred, y_true and class names # y_pred are classes predicted with the highest probability y_pred = np.array([np.argmax(x) for x in preds]) # since we did not shuffle data in data generator, # classes attribute of the generator contains true labels for each sample y_true = np.array(test_generator.classes) # class_names = list(test_generator.class_indices.keys()) # class_names.sort(key=lambda x: test_generator.class_indices[x]) evaluate(y_true, y_pred, preds, CLASSES, out_dir)
def get_classifier(self, train, kf): x_tr, x_te, y_tr, y_te = fac.to_kfold(train, kf) acc_max, bestK, acc = 0, 0, [[] for a in range(kf)] for i in range(kf): # print('DECOC round', i, 'begin') # svm 00 clf_svm = SVC() clf_svm.fit(x_tr[i], y_tr[i].ravel()) label_svm = clf_svm.predict(x_te[i]) acc[i].append(fac.get_acc(y_te[i], label_svm)[0]) # KNN 01 acc_k = [] aux_k = [3, 5, 7] # for k in range(3, 12, 2): for k in aux_k: clf_knn = KNN_GPU(k=k) clf_knn.fit(x_tr[i], y_tr[i]) label_knn = clf_knn.predict(x_te[i]) acc_k.append(fac.get_acc(y_te[i], label_knn)[0]) acc[i].append(max(acc_k)) bestK = aux_k[acc_k.index(max(acc_k))] # # LR 02 # clf_lr = LR_GPU() # clf_lr.fit(x_tr[i], y_tr[i]) # label_LR = clf_lr.predicted(x_te[i]) # acc[i].append(fac.get_acc(y_te[i], label_LR)[0]) # LR 02 clf_lr = LogisticRegression() clf_lr.fit(x_tr[i], y_tr[i]) label_LR = clf_lr.predict(x_te[i]) acc[i].append(fac.get_acc(y_te[i], label_LR)[0]) # CART 03 clf_cart = DecisionTreeClassifier() clf_cart.fit(x_tr[i], y_tr[i]) label_cart = clf_cart.predict(x_te[i]) acc[i].append(fac.get_acc(y_te[i], label_cart)[0]) # # RF 04 clf_rf = TGBMClassifier() clf_rf.fit(x_tr[i], y_tr[i].ravel()) label_rf = clf_rf.predict(x_te[i]) acc[i].append(fac.get_acc(y_te[i], label_rf)[0]) print('DECOC round', i, 'end') acc = np.array(acc) acc_mean = acc.mean(axis=0) # fun_best = np.where(acc_mean == max(acc_mean)) fun_best = np.argmax(acc_mean) return fun_best, bestK
def fun_predict(self, x_te, C, D, L): print('func_predict') num = len(D) cf = C[0] ck = C[1] allpre = np.zeros((len(x_te), num)) for i in range(num): train = D[i] traindata = train[:, 0:-1] trainlabel = train[:, -1] if cf[i] == 0: # svm print('SVM predict') clf_svm = SVC() clf_svm.fit(traindata, trainlabel.ravel()) label_svm = clf_svm.predict(x_te) allpre[:, i] = label_svm elif cf[i] == 1: # knn clf_knn = KNN_GPU(k=ck[i]) clf_knn.fit(traindata, trainlabel) label_knn = clf_knn.predict(x_te) allpre[:, i] = label_knn elif cf[i] == 2: # LR print('LR predict') clf_lr = LogisticRegression() clf_lr.fit(traindata, trainlabel.ravel()) label_LR = clf_lr.predict(x_te) allpre[:, i] = label_LR elif cf[i] == 3: # CART print('CART predict') clf_xgb = DecisionTreeClassifier() clf_xgb.fit(traindata, trainlabel) label_xgb = clf_xgb.predict(x_te) allpre[:, i] = label_xgb elif cf[i] == 4: # Rf print('RF predict') clf_rf = TGBMClassifier() clf_rf.fit(traindata, trainlabel.ravel()) label_rf = clf_rf.predict(x_te) allpre[:, i] = label_rf else: print('error !!!! DOAO.fun_predict') label = L[i] for j in range(len(x_te)): allpre[j, i] = label[0] if allpre[j, i] == 0 else label[1] # print('predict end for') pre = mode(allpre, axis=1)[0] return pre
def funcPreEDOVO(self, x_test, y_test, C, D): numC = np.asarray(C).shape[0] num_set = len(y_test) allpre = np.zeros([num_set, numC]) for i in range(numC): train = D[i] traindata = np.array(train[:, 0:-1]) trainlabel = np.array(train[:, -1], dtype='int64') if C[i, 0] == 0: print('test0') # svm clf_svm = SVC() clf_svm.fit(traindata, trainlabel.ravel()) label_svm = clf_svm.predict(x_test) allpre[:, i] = label_svm elif C[i, 0] == 1: # print('test1') # knn clf_knn = KNN_GPU(k=C[i][1]) # clf_knn = KNN_torch(k=C[i][1]) clf_knn.fit(traindata, trainlabel) label_knn = clf_knn.predict(x_test) allpre[:, i] = label_knn.ravel() elif C[i, 0] == 2: print('test2') # LR clf_lr = LogisticRegression() clf_lr.fit(traindata, trainlabel) label_LR = clf_lr.predict(x_test) allpre[:, i] = label_LR # # LR # clf_lr = LR_GPU() # clf_lr.fit(traindata, trainlabel) # label_LR = clf_lr.predicted(x_test) # allpre[:, i] = label_LR elif C[i, 0] == 3: print('test3') # CART clf_cart = DecisionTreeClassifier() clf_cart.fit(traindata, trainlabel) label_cart = clf_cart.predict(x_test) allpre[:, i] = label_cart elif C[i, 0] == 4: print('test4') # RandomForest clf_ada = TGBMClassifier() clf_ada.fit(traindata, trainlabel.ravel()) label_ada = clf_ada.predict(x_test) allpre[:, i] = label_ada else: print('error !!!! DECOC.funcPreEDOVO') return allpre
def check_jthunder(self, clf: thundersvm.SVC): self.assertTrue( np.allclose( clf.decision_function(self.ds_test.xs).flatten(), -jthunder.decision_function(clf, self.jtest_xs), rtol=1e-4, atol=1e-4, )) self.assertTrue( all( clf.predict(self.ds_test.xs).astype(np.int) == jthunder.predict(clf, self.jtest_xs))) self.assertTrue( np.allclose(jthunder.norm2(clf), jthunder.norm2_naive(clf)))
y, test_size=0.1, random_state=0) with open('settings/olivetti.json') as f: data = json.load(f) for s in data['models']: print(s) m = SVC(kernel=s.get('kernel', 'rbf'), C=s.get('C', 10.0), coef0=s.get('coef0', 0.0), gamma=s.get('gamma', 'auto'), degree=int(s.get('degree', 3)), verbose=False) start = time.time() m.fit(X_train, y_train) end = time.time() print('fit time: ', end - start) t = m.predict(X_train) print('training error: ', 1 - accuracy_score(y_train, t)) start = time.time() p = m.predict(X_test) end = time.time() print('prediction time: ', end - start) print('accuracy: ', accuracy_score(y_test, p))
# upsampling print('\tUpsampling...') if mesh.cells.shape[0] > 100000: target_num = 100000 # set max number of cells ratio = 1 - target_num/mesh.cells.shape[0] # calculate ratio mesh.mesh_decimation(ratio) print('Original contains too many cells, simpify to {} cells'.format(mesh.cells.shape[0])) fine_cells = mesh.cells if upsampling_method == 'SVM': clf = SVC(kernel='rbf', gamma='auto', gpu_id=gpu_id) # train SVM clf.fit(cells, np.ravel(refine_labels)) fine_labels = clf.predict(fine_cells) fine_labels = fine_labels.reshape([mesh.cells.shape[0], 1]) elif upsampling_method == 'KNN': neigh = KNeighborsClassifier(n_neighbors=3) # train KNN neigh.fit(cells, np.ravel(refine_labels)) fine_labels = neigh.predict(fine_cells) fine_labels = fine_labels.reshape([mesh.cells.shape[0], 1]) mesh2 = Easy_Mesh() mesh2.cells = mesh.cells mesh2.update_cell_ids_and_points() mesh2.cell_attributes['Label'] = fine_labels mesh2.to_vtp(os.path.join(output_path, '{}_predicted_refined.vtp'.format(i_sample[:-4]))) #remove tmp folder
class ModelBasedOption(object): def __init__(self, *, name, parent, mdp, global_solver, global_value_learner, buffer_length, global_init, gestation_period, timeout, max_steps, device, use_vf, use_global_vf, use_model, dense_reward, option_idx, lr_c, lr_a, max_num_children=2, target_salient_event=None, path_to_model="", multithread_mpc=False): self.mdp = mdp self.name = name self.lr_c = lr_c self.lr_a = lr_a self.parent = parent self.device = device self.use_vf = use_vf self.global_solver = global_solver self.use_global_vf = use_global_vf self.timeout = timeout self.use_model = use_model self.max_steps = max_steps self.global_init = global_init self.dense_reward = dense_reward self.buffer_length = buffer_length self.max_num_children = max_num_children self.target_salient_event = target_salient_event self.multithread_mpc = multithread_mpc # TODO self.overall_mdp = mdp self.seed = 0 self.option_idx = option_idx self.num_goal_hits = 0 self.num_executions = 0 self.gestation_period = gestation_period self.positive_examples = [] self.negative_examples = [] self.optimistic_classifier = None self.pessimistic_classifier = None # In the model-free setting, the output norm doesn't seem to work # But it seems to stabilize off policy value function learning # Therefore, only use output norm if we are using MPC for action selection use_output_norm = self.use_model if not self.use_global_vf or global_init: self.value_learner = TD3(state_dim=self.mdp.state_space_size() + 2, action_dim=self.mdp.action_space_size(), max_action=1., name=f"{name}-td3-agent", device=self.device, lr_c=lr_c, lr_a=lr_a, use_output_normalization=use_output_norm) self.global_value_learner = global_value_learner if not self.global_init else None # type: TD3 if use_model: print(f"Using model-based controller for {name}") self.solver = self._get_model_based_solver() else: print(f"Using model-free controller for {name}") self.solver = self._get_model_free_solver() self.children = [] self.success_curve = [] self.effect_set = [] if path_to_model: print(f"Loading model from {path_to_model} for {self.name}") self.solver.load_model(path_to_model) if self.use_vf and not self.use_global_vf and self.parent is not None: self.initialize_value_function_with_global_value_function() print( f"Created model-based option {self.name} with option_idx={self.option_idx}" ) self.is_last_option = False def _get_model_based_solver(self): assert self.use_model if self.global_init: return MPC(mdp=self.mdp, state_size=self.mdp.state_space_size(), action_size=self.mdp.action_space_size(), dense_reward=self.dense_reward, device=self.device, multithread=self.multithread_mpc) assert self.global_solver is not None return self.global_solver def _get_model_free_solver(self): assert not self.use_model assert self.use_vf # Global option creates its own VF solver if self.global_init: assert self.value_learner is not None return self.value_learner # Local option either uses the global VF.. if self.use_global_vf: assert self.global_value_learner is not None return self.global_value_learner # .. or uses its own local VF as solver assert self.value_learner is not None return self.value_learner # ------------------------------------------------------------ # Learning Phase Methods # ------------------------------------------------------------ def get_training_phase(self): if self.num_goal_hits < self.gestation_period: return "gestation" return "initiation_done" def extract_features_for_initiation_classifier(self, state): features = state if isinstance(state, np.ndarray) else state.features() if "push" in self.mdp.env_name: return features[:4] return features[:2] def is_init_true(self, state): if self.global_init or self.get_training_phase() == "gestation": return True if self.is_last_option and self.mdp.get_start_state_salient_event()( state): return True features = self.extract_features_for_initiation_classifier(state) return self.optimistic_classifier.predict( [features])[0] == 1 or self.pessimistic_is_init_true(state) def is_term_true(self, state): if self.parent is None: return self.target_salient_event(state) # TODO change return self.parent.pessimistic_is_init_true(state) def pessimistic_is_init_true(self, state): if self.global_init or self.get_training_phase() == "gestation": return True features = self.extract_features_for_initiation_classifier(state) return self.pessimistic_classifier.predict([features])[0] == 1 def is_at_local_goal(self, state, goal): """ Goal-conditioned termination condition. """ reached_goal = self.mdp.sparse_gc_reward_function(state, goal, {})[1] reached_term = self.is_term_true(state) or state.is_terminal() return reached_goal and reached_term # ------------------------------------------------------------ # Control Loop Methods # ------------------------------------------------------------ def _get_epsilon(self): if self.use_model: return 0.1 if not self.dense_reward and self.num_goal_hits <= 3: return 0.8 return 0.2 def act(self, state, goal): """ Epsilon-greedy action selection. """ if random.random() < self._get_epsilon(): return self.mdp.sample_random_action() if self.use_model: assert isinstance(self.solver, MPC), f"{type(self.solver)}" vf = self.value_function if self.use_vf else None return self.solver.act(state, goal, vf=vf) assert isinstance(self.solver, TD3), f"{type(self.solver)}" augmented_state = self.get_augmented_state(state, goal) return self.solver.act(augmented_state, evaluation_mode=False) def update_model(self, state, action, reward, next_state): """ Learning update for option model/actor/critic. """ self.solver.step(state.features(), action, reward, next_state.features(), next_state.is_terminal()) def get_goal_for_rollout(self): """ Sample goal to pursue for option rollout. """ if self.parent is None and self.target_salient_event is not None: return self.target_salient_event.get_target_position() sampled_goal = self.parent.sample_from_initiation_region_fast_and_epsilon( ) assert sampled_goal is not None if isinstance(sampled_goal, np.ndarray): return sampled_goal.squeeze() return self.extract_goal_dimensions(sampled_goal) def rollout(self, step_number, rollout_goal=None, eval_mode=False): """ Main option control loop. """ start_state = deepcopy(self.mdp.cur_state) assert self.is_init_true(start_state) num_steps = 0 total_reward = 0 visited_states = [] option_transitions = [] state = deepcopy(self.mdp.cur_state) goal = self.get_goal_for_rollout( ) if rollout_goal is None else rollout_goal print( f"[Step: {step_number}] Rolling out {self.name}, from {state.position} targeting {goal}" ) self.num_executions += 1 while not self.is_at_local_goal( state, goal ) and step_number < self.max_steps and num_steps < self.timeout: # Control action = self.act(state, goal) reward, next_state = self.mdp.execute_agent_action(action) if self.use_model: self.update_model(state, action, reward, next_state) # Logging num_steps += 1 step_number += 1 total_reward += reward visited_states.append(state) option_transitions.append((state, action, reward, next_state)) state = deepcopy(self.mdp.cur_state) visited_states.append(state) self.success_curve.append(self.is_term_true(state)) self.effect_set.append(state.features()) if self.is_term_true(state): self.num_goal_hits += 1 if self.use_vf and not eval_mode: self.update_value_function( option_transitions, pursued_goal=goal, reached_goal=self.extract_goal_dimensions(state)) self.derive_positive_and_negative_examples(visited_states) # Always be refining your initiation classifier if not self.global_init and not eval_mode: self.fit_initiation_classifier() return option_transitions, total_reward # ------------------------------------------------------------ # Hindsight Experience Replay # ------------------------------------------------------------ def update_value_function(self, option_transitions, reached_goal, pursued_goal): """ Update the goal-conditioned option value function. """ self.experience_replay(option_transitions, pursued_goal) self.experience_replay(option_transitions, reached_goal) def initialize_value_function_with_global_value_function(self): self.value_learner.actor.load_state_dict( self.global_value_learner.actor.state_dict()) self.value_learner.critic.load_state_dict( self.global_value_learner.critic.state_dict()) self.value_learner.target_actor.load_state_dict( self.global_value_learner.target_actor.state_dict()) self.value_learner.target_critic.load_state_dict( self.global_value_learner.target_critic.state_dict()) def extract_goal_dimensions(self, goal): goal_features = goal if isinstance(goal, np.ndarray) else goal.features() if "ant" in self.mdp.env_name: return goal_features[:2] raise NotImplementedError(f"{self.mdp.env_name}") def get_augmented_state(self, state, goal): assert goal is not None and isinstance(goal, np.ndarray) goal_position = self.extract_goal_dimensions(goal) return np.concatenate((state.features(), goal_position)) def experience_replay(self, trajectory, goal_state): for state, action, reward, next_state in trajectory: augmented_state = self.get_augmented_state(state, goal=goal_state) augmented_next_state = self.get_augmented_state(next_state, goal=goal_state) done = self.is_at_local_goal(next_state, goal_state) reward_func = self.overall_mdp.dense_gc_reward_function if self.dense_reward \ else self.overall_mdp.sparse_gc_reward_function reward, global_done = reward_func(next_state, goal_state, info={}) if not self.use_global_vf or self.global_init: self.value_learner.step(augmented_state, action, reward, augmented_next_state, done) # Off-policy updates to the global option value function if not self.global_init: assert self.global_value_learner is not None self.global_value_learner.step(augmented_state, action, reward, augmented_next_state, global_done) def value_function(self, states, goals): assert isinstance(states, np.ndarray) assert isinstance(goals, np.ndarray) if len(states.shape) == 1: states = states[None, ...] if len(goals.shape) == 1: goals = goals[None, ...] goal_positions = goals[:, :2] augmented_states = np.concatenate((states, goal_positions), axis=1) augmented_states = torch.as_tensor(augmented_states).float().to( self.device) if self.use_global_vf and not self.global_init: values = self.global_value_learner.get_values(augmented_states) else: values = self.value_learner.get_values(augmented_states) return values # ------------------------------------------------------------ # Learning Initiation Classifiers # ------------------------------------------------------------ def get_first_state_in_classifier(self, trajectory, classifier_type="pessimistic"): """ Extract the first state in the trajectory that is inside the initiation classifier. """ assert classifier_type in ("pessimistic", "optimistic"), classifier_type classifier = self.pessimistic_is_init_true if classifier_type == "pessimistic" else self.is_init_true for state in trajectory: if classifier(state): return state return None def sample_from_initiation_region_fast(self): """ Sample from the pessimistic initiation classifier. """ num_tries = 0 sampled_state = None while sampled_state is None and num_tries < 200: num_tries = num_tries + 1 sampled_trajectory_idx = random.choice( range(len(self.positive_examples))) sampled_trajectory = self.positive_examples[sampled_trajectory_idx] sampled_state = self.get_first_state_in_classifier( sampled_trajectory) return sampled_state def sample_from_initiation_region_fast_and_epsilon(self): """ Sample from the pessimistic initiation classifier. """ def compile_states(s): pos0 = self.mdp.get_position(s) pos1 = np.copy(pos0) pos1[0] -= self.target_salient_event.tolerance pos2 = np.copy(pos0) pos2[0] += self.target_salient_event.tolerance pos3 = np.copy(pos0) pos3[1] -= self.target_salient_event.tolerance pos4 = np.copy(pos0) pos4[1] += self.target_salient_event.tolerance return pos0, pos1, pos2, pos3, pos4 idxs = [i for i in range(len(self.positive_examples))] random.shuffle(idxs) for idx in idxs: sampled_trajectory = self.positive_examples[idx] states = [] for s in sampled_trajectory: states.extend(compile_states(s)) position_matrix = np.vstack(states) # optimistic_predictions = self.optimistic_classifier.predict(position_matrix) == 1 # pessimistic_predictions = self.pessimistic_classifier.predict(position_matrix) == 1 # predictions = np.logical_or(optimistic_predictions, pessimistic_predictions) predictions = self.pessimistic_classifier.predict( position_matrix) == 1 predictions = np.reshape(predictions, (-1, 5)) valid = np.all(predictions, axis=1) indices = np.argwhere(valid == True) if len(indices) > 0: return sampled_trajectory[indices[0][0]] return self.sample_from_initiation_region_fast() def derive_positive_and_negative_examples(self, visited_states): start_state = visited_states[0] final_state = visited_states[-1] if self.is_term_true(final_state): positive_states = [start_state ] + visited_states[-self.buffer_length:] self.positive_examples.append(positive_states) else: negative_examples = [start_state] self.negative_examples.append(negative_examples) def should_change_negative_examples(self): should_change = [] for negative_example in self.negative_examples: should_change += [ self.does_model_rollout_reach_goal(negative_example[0]) ] return should_change def does_model_rollout_reach_goal(self, state): sampled_goal = self.get_goal_for_rollout() final_states, actions, costs = self.solver.simulate( state, sampled_goal, num_rollouts=14000, num_steps=self.timeout) farthest_position = final_states[:, :2].max(axis=0) return self.is_term_true(farthest_position) def fit_initiation_classifier(self): if len(self.negative_examples) > 0 and len(self.positive_examples) > 0: self.train_two_class_classifier() elif len(self.positive_examples) > 0: self.train_one_class_svm() def construct_feature_matrix(self, examples): states = list(itertools.chain.from_iterable(examples)) positions = [ self.extract_features_for_initiation_classifier(state) for state in states ] return np.array(positions) def train_one_class_svm(self, nu=0.1 ): # TODO: Implement gamma="auto" for thundersvm positive_feature_matrix = self.construct_feature_matrix( self.positive_examples) self.pessimistic_classifier = OneClassSVM(kernel="rbf", nu=nu) self.pessimistic_classifier.fit(positive_feature_matrix) self.optimistic_classifier = OneClassSVM(kernel="rbf", nu=nu / 10.) self.optimistic_classifier.fit(positive_feature_matrix) def train_two_class_classifier(self, nu=0.1): positive_feature_matrix = self.construct_feature_matrix( self.positive_examples) negative_feature_matrix = self.construct_feature_matrix( self.negative_examples) positive_labels = [1] * positive_feature_matrix.shape[0] negative_labels = [0] * negative_feature_matrix.shape[0] X = np.concatenate((positive_feature_matrix, negative_feature_matrix)) Y = np.concatenate((positive_labels, negative_labels)) if negative_feature_matrix.shape[ 0] >= 10: # TODO: Implement gamma="auto" for thundersvm kwargs = { "kernel": "rbf", "gamma": "auto", "class_weight": "balanced" } else: kwargs = {"kernel": "rbf", "gamma": "auto"} self.optimistic_classifier = SVC(**kwargs) self.optimistic_classifier.fit(X, Y) training_predictions = self.optimistic_classifier.predict(X) positive_training_examples = X[training_predictions == 1] if positive_training_examples.shape[0] > 0: self.pessimistic_classifier = OneClassSVM(kernel="rbf", nu=nu) self.pessimistic_classifier.fit(positive_training_examples) # ------------------------------------------------------------ # Distance functions # ------------------------------------------------------------ def get_states_inside_pessimistic_classifier_region(self): point_array = self.construct_feature_matrix(self.positive_examples) point_array_predictions = self.pessimistic_classifier.predict( point_array) positive_point_array = point_array[point_array_predictions == 1] return positive_point_array def distance_to_state(self, state, metric="euclidean"): """ Compute the distance between the current option and the input `state`. """ assert metric in ("euclidean", "value"), metric if metric == "euclidean": return self._euclidean_distance_to_state(state) return self._value_distance_to_state(state) def _euclidean_distance_to_state(self, state): point = self.mdp.get_position(state) assert isinstance(point, np.ndarray) assert point.shape == (2, ), point.shape positive_point_array = self.get_states_inside_pessimistic_classifier_region( ) distances = distance.cdist(point[None, :], positive_point_array) return np.median(distances) def _value_distance_to_state(self, state): features = state.features() if not isinstance(state, np.ndarray) else state goals = self.get_states_inside_pessimistic_classifier_region() distances = self.value_function(features, goals) distances[distances > 0] = 0. return np.median(np.abs(distances)) # ------------------------------------------------------------ # Convenience functions # ------------------------------------------------------------ def get_option_success_rate(self): if self.num_executions > 0: return self.num_goal_hits / self.num_executions return 1. def get_success_rate(self): if len(self.success_curve) == 0: return 0. return np.mean(self.success_curve) def __str__(self): return self.name def __repr__(self): return str(self) def __eq__(self, other): if isinstance(other, ModelBasedOption): return self.name == other.name return False
def predict(y, z): clf = SVC(kernel="linear") clf.load_from_file("./model") clf.predict([[y, z]])[0]
!ls # Check if required cuda 9.0 amd64-deb file is downloaded !dpkg -i cuda-repo-ubuntu1704-9-0-local_9.0.176-1_amd64-deb !ls /var/cuda-repo-9-0-local | grep .pub !apt-key add /var/cuda-repo-9-0-local/7fa2af80.pub !apt-get update !sudo apt-get install cuda-9.0 !pip install thundersvm #import thundersvm from thundersvm import SVC model = SVC(C=100, kernel='rbf') model.fit(first_tensor, final_label) svm_prediction = model.predict(first_tensor1) svm_probability = model.predict_proba(first_tensor1) label_test2 = [] for image , label in testloader: label1 = [] for i in label: if i!= 1: j= 0 label1.append(j) else: label1.append(i) label_test2 = label_test2 + label1
def polynomialSVM(whid): # This function models the SVM of orders in relation (the features) to each warehouse based on the percentage # of products the warehouse of focus can supply and the distance to warehouse of focus # Retrieve Dataset dataset = _is_order_optimized_('./assets/warehouse_' + str(whid) + '.csv', ) X = dataset.drop('classifier', axis=1) Y = dataset["classifier"] # Splitting data into train and test xTrain, xTest, yTrain, yTest = train_test_split(X, Y, test_size=0.20, random_state=0) # Feature scaling sc = StandardScaler() xTrain = sc.fit_transform(xTrain) xTest = sc.fit_transform(xTest) # Fitting Kernel SVM to the Training set. svcClassifier = SVC( kernel='rbf', random_state=0, max_mem_size=50000, n_jobs=8, C=100 # This value can vary for whether the margin is too 'hard' or too 'soft' ) # pickling the files (serializing them and storing them) # This way, the model can run the data against other data svcClassifier.fit(xTrain, yTrain) svc_pickle = './assets/sv_pickle_rbf_' + str(whid) + '.sav' pickle.dump(svcClassifier, open(svc_pickle, 'wb')) # Predicting the test results polyPred = svcClassifier.predict(xTest) print(polyPred) # Confusion Matrix Print: SVM Classifier polyTest against the Test Labeled Data yTest print("Confusion Matrix") print(confusion_matrix(yTest, polyPred)) print("\n") # Classification report print("Classification Report") print(classification_report(yTest, polyPred)) print("\n") # Applying k-fold cross validation for accuracy purposes accuracies = cross_val_score(estimator=svcClassifier, X=xTrain, y=yTrain, cv=10) print(accuracies.mean()) print(accuracies.std()) # Visualising the Test set results from matplotlib.colors import ListedColormap X_set, y_set = xTest, yTest X1, X2 = np.meshgrid( np.arange(start=X_set[:, 0].min() - 1, stop=X_set[:, 0].max() + 1, step=0.01), np.arange(start=X_set[:, 1].min() - 1, stop=X_set[:, 1].max() + 1, step=0.01)) plt.contourf( # Creating the contouring lines X1, X2, svcClassifier.\ predict(np.array([X1.ravel(), X2.ravel()]).T).\ reshape(X1.shape), alpha = 0.5, cmap = ListedColormap(('blue', 'black')) ) plt.xlim(X1.min(), X1.max()) plt.ylim(X2.min(), X2.max()) for i, j in enumerate(np.unique(y_set)): # Creating the scatter plots plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1], c=ListedColormap(('red', 'green'))(i), label=j) # labeling each plot plt.title('Kernel SVM (Training Data) Warehouse: ' + str(whid)) plt.xlabel('Distance From Warehouse') plt.ylabel('Percentage of Available Products') plt.legend() plt.savefig('./assets/RBF' + str(whid) + '_' + str(int(time.time())) + '.png') plt.close() return False
2)) # don't need to copy fine_cells = cells barycenters = mesh3.cellCenters() # don't need to copy fine_barycenters = mesh.cellCenters() # don't need to copy if upsampling_method == 'SVM': #clf = SVC(kernel='rbf', gamma='auto', probability=True, gpu_id=gpu_id) clf = SVC(kernel='rbf', gamma='auto', gpu_id=gpu_id) # train SVM #clf.fit(mesh2.cells, np.ravel(refine_labels)) #fine_labels = clf.predict(fine_cells) clf.fit(barycenters, np.ravel(refine_labels)) fine_labels = clf.predict(fine_barycenters) fine_labels = fine_labels.reshape(-1, 1) elif upsampling_method == 'KNN': neigh = KNeighborsClassifier(n_neighbors=3) # train KNN #neigh.fit(mesh2.cells, np.ravel(refine_labels)) #fine_labels = neigh.predict(fine_cells) neigh.fit(barycenters, np.ravel(refine_labels)) fine_labels = neigh.predict(fine_barycenters) fine_labels = fine_labels.reshape(-1, 1) mesh.addCellArray(fine_labels, 'Label') vedo.write( mesh, os.path.join(output_path,
class FaceIdentification: def __init__(self, model=None, batch_size=32, dataset_name='vggface2_test', data_dir=None): super().__init__() logging.info('Face Identification for VGGFace2.') self.data_dir = data_dir self.dataset_name = dataset_name self.image_size = 112 self.batch_size = batch_size self.model = model def load_model(self, feature_dim, model_dir=None): logging.info('Model Loading') net = cbam.CBAMResNet(100, feature_dim=feature_dim, mode='ir') self.device = torch.device( 'cuda' if torch.cuda.is_available() else 'cpu') net.load_state_dict(torch.load(model_dir)['net_state_dict']) net = DataParallel(net).to(self.device) self.model = net.eval() return self.model def get_batch_img(self, batch_list): batch = [] for img_dir in batch_list: img_path = Path(self.data_dir) / img_dir img = cv2.imread(str(img_path)) img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) img = img - 127.5 img = img * 0.0078125 img = np.transpose(img, axes=(2, 0, 1)) batch.append(img) return batch def get_features(self, image_list, model=None): self.image_list = image_list.files if Path('features/{}_features.npy'.format(self.dataset_name)).exists(): logging.info('Feature files are found.') self.load_features() else: logging.info('Feature Extraction, It may take a while...') if model is not None: self.model = model embeddings = np.zeros((len(self.image_list), 512)) for idx in tqdm.tqdm( range(0, len(self.image_list), self.batch_size)): batch_list = self.image_list[idx:idx + self.batch_size] batch_data = self.get_batch_img(batch_list) batch_data = torch.FloatTensor(batch_data).to(self.device) embed = self.model(batch_data) embeddings[idx:idx + self.batch_size] = embed.detach().cpu().numpy() embed = None self.features = embeddings self.save_feature() labels = image_list.Class_ID le = preprocessing.LabelEncoder() self.labels = le.fit_transform(labels) def train_svm(self, train_ixs): logging.info('SVM Training...') X_train = self.features[train_ixs] y_train = self.labels[train_ixs] self.svm_model = SVC(kernel="linear", C=1).fit(X_train, y_train) def test_svm(self, test_ixs): TEST_BATCH_SIZE = 1000 preds = [] X_test = self.features[test_ixs] y_test = self.labels[test_ixs] with tqdm(total=len(test_ixs), file=sys.stdout) as pbar: for i in range(0, len(test_ixs), TEST_BATCH_SIZE): X_test_batch = X_test[i:i + TEST_BATCH_SIZE] pred = self.svm_model.predict(X_test_batch) preds.append(pred) # update tqdm pbar.set_description("Processed: %d" % (1 + i)) pbar.update(TEST_BATCH_SIZE) y_pred = np.array(list(chain(*preds))) logging.info("Overall Accuracy: {}".format( accuracy_score(y_test, y_pred))) return y_test, y_pred def load_features(self): self.features = np.load('features/{}_features.npy'.format( self.dataset_name), allow_pickle=True) self.image_list = np.load('features/{}_paths.npy'.format( self.dataset_name), allow_pickle=True) def save_feature(self): np.save('features/{}_features.npy'.format(self.dataset_name), self.features) np.save('features/{}_paths.npy'.format(self.dataset_name), self.image_list)