def Simulate(self, simulation_loop=1, state_value_report=True): for looptime in range(simulation_loop): R = 0 is_end = False next_feature = False current_feature = -1 current_label = -1 self.Reset() while True: if is_end: Update.MonteCarlo_Update(R, self.state_list, self.state_action_label_value_map) break else: next_feature = Select.MonteCarlo_Epsilon_Select( self.feature_remaining, current_feature, current_label, self.state_action_label_value_map) Select.Erase_Feature(self.feature_remaining, next_feature) self.hypo_remaining_set = Observe.Observe_Subset( self.true_hypothesis, self.hypo_remaining_set, next_feature) Observe.Clear_Overlap(self.feature_remaining, self.hypo_remaining_set) is_end = Observe.Check_End(self.hypo_remaining_set) self.state_list.append( (current_feature, next_feature, current_label)) R += -1 current_label = self.true_hypothesis[next_feature] current_feature = next_feature if state_value_report: Report.Report_State_Value_Map(self.state_action_label_value_map)
def Apply_Policy_To_Random_Hypo(hypo_subset, number_features, state_action_label_value_map): R = 0 is_end = False next_feature = 0 true_hypothesis = Generate.Get_Hypo(hypo_subset) hypo_remaining_set = hypo_subset feature_remaining_set = [] feature_trajectory = [] current_feature = -1 current_label = -1 for i in range(number_features): feature_remaining_set.append(i) while True: if is_end: break else: next_feature = Select.MonteCarlo_Select( feature_remaining_set, current_feature, current_label, state_action_label_value_map) Select.Erase_Feature(feature_remaining_set, next_feature) hypo_remaining_set = Observe.Observe_Subset( true_hypothesis, hypo_remaining_set, next_feature) Observe.Clear_Overlap(feature_remaining_set, hypo_remaining_set) is_end = Observe.Check_End(hypo_remaining_set) feature_trajectory.append(next_feature) current_label = true_hypothesis[next_feature] current_feature = next_feature return feature_trajectory
def NKnowledgeability_Task(hypo_table, number_hypo, number_feature, number_label, knowledgeability, iter=200): feature_set = [] # New knowledgeability table # Axis 1: index of observations # Axis 2~3: the delta knowledegeability table new_knowledgeability_delta_table = numpy.zeros( (number_feature + 1, number_hypo, number_hypo), dtype=float) # Assume there is a true hypo = hypo # Get all posible hypothesis in the hypo map for hypo_idx in range(len(hypo_table)): # Get the observable feature set for f in range(number_feature): feature_set.append(f) obs = 0 # Set the environment num_hypo, num_feature, num_label, p_teacher_x_h, p_teacher_xy_h, p_learner_h_xy, p_y_xh, delta_g_h, phx = Set.Set( hypo_table, None, knowledgeability=knowledgeability) while True: for h in range(number_hypo): new_knowledgeability_delta_table[obs][hypo_idx][h] = phx[h] # Get the PT p_learner_h_xy = Knowledgeability_Task(num_hypo, num_feature, num_label, p_teacher_xy_h, p_teacher_x_h, p_y_xh, delta_g_h, phx, iter) # Choose a feature feature = Observe.Get_Feature(feature_set, hypo_idx, p_teacher_x_h) obs += 1 prob_find, true_label = Observe.Observe(hypo_table, hypo_idx, feature, p_learner_h_xy) # Assign the p_learner_h_xy to phx for h in range(number_hypo): phx[h] = p_learner_h_xy[h][feature][true_label] # remove the feature in the feature set, # make the same feature only be observed once feature_set.remove(feature) if (len(feature_set) == 0): for h in range(number_hypo): new_knowledgeability_delta_table[obs][hypo_idx][h] = phx[h] break return new_knowledgeability_delta_table
def Probability_Task(hypo_table, number_hypo, number_feature, number_label, p_teacher_x_h, knowledgeability, iter=100): prob_map = {} feature_set = [] # Assume there is a true hypo = hypo # Get all posible hypothesis in the hypo map for hypo_idx in range(len(hypo_table)): # Get the observable feature set for f in range(number_feature): feature_set.append(f) obs = 0 prob = [] select = [] # Make a copy of the whole hypo table, and transform to a hypo_map hypo_map_copy = copy.deepcopy(hypo_table) while True: # Pass the hypo_copy to Set function num_hypo, num_feature, num_label, p_teacher_x_h, p_teacher_xy_h, p_learner_h_xy, p_y_xh, delta_g_h = Init.Set( hypo_map_copy, knowledgeability=knowledgeability) # Get the PT p_learner_h_xy = Init.Initstep(num_hypo, num_feature, num_label, p_y_xh) Knowledgeability_Task(num_hypo, num_feature, num_label, p_teacher_xy_h, p_teacher_x_h, p_learner_h_xy, p_y_xh, delta_g_h, iter) # Choose a feature new_hypo_idx = Observe.Get_Index(hypo_table, hypo_map_copy, hypo_idx) feature = Observe.Get_Feature(feature_set, new_hypo_idx, p_teacher_x_h) obs += 1 prob_find, hypo_map_copy = Observe.Observe(hypo_map_copy, new_hypo_idx, hypo_table[hypo_idx], feature, p_learner_h_xy) prob.append(prob_find) select.append(feature) # remove the feature in the feature set feature_set.remove(feature) if len(feature_set) == 0: prob_map[hypo_idx] = (prob, select) break return prob_map
def Probability_Task(hypo_table, number_hypo, number_feature, number_label, k_table, iter): prob_map = {} select_map = {} const_feature_set = [] # Append all observable features to the feature set for f in range(number_feature): const_feature_set.append(f) # Assume there is a true hypo = hypo # Get all posible hypothesis in the hypo map for hypo_idx in range(len(hypo_table)): # Make a copy to save time feature_set = copy.deepcopy(const_feature_set) prob = [] select = [] # Set the environment # Since we have the knowledgeability table, the knowledgeability argument will be ignored num_hypo, num_feature, num_label, p_teacher_x_h, p_teacher_xy_h, p_learner_h_xy, p_y_xh, delta_g_h, phx = Set.Set( hypo_table, k_table, knowledgeability=1) while True: # Get the PT p_learner_h_xy = Knowledgeability_Task(num_hypo, num_feature, num_label, p_teacher_xy_h, p_teacher_x_h, p_y_xh, k_table, phx, iter) # Pick the feature with the highest PT feature = Observe.Get_Feature(feature_set, hypo_idx, p_teacher_x_h) prob_find, true_label = Observe.Observe(hypo_table, hypo_idx, feature, p_learner_h_xy) prob.append(prob_find) select.append(feature) # Assign the p_learner_h_xy to phx for h in range(number_hypo): phx[h] = p_learner_h_xy[h][feature][true_label] # remove the feature in the feature set, # make the same feature only be observed once feature_set.remove(feature) if len(feature_set) == 0: prob_map[hypo_idx] = prob select_map[hypo_idx] = select break return prob_map, select_map
def Get_Prob_Table(number_obs, p_teacher_x_h, prob_list): # Again get the feature list feature_list = Observe.Get_Target_Feature_Set([0, 1, 2], number_obs) # The new probability map with a lenth = number of hypothesis new_prob_list = numpy.zeros(len(prob_list)) for hypo in range(len(prob_list)): sum = 0 for feature in range(len(feature_list)): prob_select = Observe.Get_Probability(p_teacher_x_h, hypo, feature_list[feature]) sum += prob_list[hypo, feature] * prob_select new_prob_list[hypo] = sum return new_prob_list
def Sample_P(hypo_table, number_hypo, number_feature, number_label, k_matrix, iter): hypo_table_size = len(hypo_table) prob_list = [] const_feature_set = [] # Append all observable features to the feature set for f in range(number_feature): const_feature_set.append(f) # Assume there is a true hypo = hypo # Get all posible hypothesis in the hypo map for hypo_idx in range(hypo_table_size): # Make a copy to save time feature_set = copy.deepcopy(const_feature_set) # Set the environment # Since we have the knowledgeability table, the knowledgeability argument will be ignored num_hypo, num_feature, num_label, p_teacher_x_h, p_teacher_xy_h, p_learner_h_xy, p_y_xh, delta_g_h, phx = Set.Set( hypo_table, k_matrix, knowledgeability=1) # Get the PT p_learner_h_xy = Knowledgeability_Task(num_hypo, num_feature, num_label, p_teacher_xy_h, p_teacher_x_h, p_y_xh, k_matrix, phx, iter) # Pick the feature with the highest PT feature = Observe.Get_Feature(feature_set, hypo_idx, p_teacher_x_h) # Get the p and the corresponding label prob_find, true_label = Observe.Observe(hypo_table, hypo_idx, feature, p_learner_h_xy) # Append the p to the prob list prob_list.append(prob_find) p = 0 # Average the value for item in prob_list: p += item p /= (number_hypo) return p
def Probability_Task(hypo_map, number_obs, number_hypo, number_feature, number_label, p_teacher_x_h): prob_list = [] feature_list = Observe.Get_Target_Feature_Set([0, 1, 2], number_obs) print(feature_list) # Assume there is a true hypo # Get all posible hypothesis in the hypo map for hypo in range(number_hypo): F = [] # Choose a feature to observe for feature_set in feature_list: # Get the probability that L will select this feature / these features # prob = Observe.Get_Probability_Map(p_teacher_x_h, hypo, feature_set) # Does the learner find the true hypo ? prob_find = Observe.Observe(hypo_map, hypo, feature_set) F.append(prob_find) prob_list.append(F) return numpy.array(prob_list)