def test_action_value_is_gaussian(self): """ Verify that our action will get a correct value from N(mu, sigma) - mu: the mean, here 0 - sigma: the variance, here 1 """ results = [Action(self.bandit).value for x in range(5000)] law_1 = get_percent([x for x in results if x > 0], results) self.assertAlmostEqual(law_1, 50, delta=2, msg="%s%% of results are " "above mu instead of around 51%%" % law_1) law_2 = get_percent([x for x in results if x > -1 and x < 1], results) self.assertAlmostEqual(law_2, 68, delta=2, msg="%s%% of results are " "above mu instead of around 68%%" % law_2)
def main(): print("Image:") image = input("") features = finding_face_landmark.finding_face_landmark(image) if (len(features) == 0): exit(0) data_file_name = "features.csv" X, Y, Q = utils.get_data(data_file_name, 2000) x_min, x_max = utils.get_min_max(X) X = utils.normalize_features(x_min, x_max, X) test_file_name = "test.csv" T, P, L = utils.get_data_test(test_file_name, x_min, x_max, len(X), Q, Y) model_file_name = './my_test_model.ckpt' neural_network = n.Neural_Network(X, Y, model_file_name) # neural_network.training() # neural_network.test(T,P) features = utils.normalize_features(x_min, x_max, features) predict = neural_network.predict([features]) image_path = Q[predict][0].strip() metadata = 'C:\\ProjekatSoft\\wiki_crop\\wiki.mat' name = utils.get_name(image_path, metadata) percent = utils.get_percent(features, X[predict:predict + 1, :15][0]) utils.show_image('C:\\ProjekatSoft\\wiki_crop\\' + image_path, name, percent)
def test_action_can_play(self): """Let's play and check that action got a reward""" action = self.action # shortcut [action.play() for x in range(1000)] lim = [action.mu - 3 * action.sigma, action.mu + 3 * action.sigma] rewards_in_range = [r for r in action.rewards if lim[0] <= r <= lim[1]] percent = get_percent(rewards_in_range, action.rewards) self.assertAlmostEqual(percent, 85.0, delta=15, msg="%s%% instead of " "99,7%% for a Normal Distribution" % percent)
def calc_d_a_result_type_2(dataset, optinal_list): sDict = calc_d_a_result_type_0(dataset, optinal_list) if sDict is None: return None sDict['TYPE'] = "2" if sDict.__contains__(P): # print sDict[P] vList = list(sDict[P]) vList = sorted(vList) # rlist = {} print dataset v_dict = {} for val in vList: print 'val', val v_dict[float(val)] = utils.get_Decimal( utils.get_percent(dataset, float(val))) # rlist.append(v_dict) sDict[P] = v_dict print "sDict[P] =====>", sDict[P] return sDict
def get_officer_info(conn, year, is_until_year=True): cur = conn.cursor() cur.execute("drop table if exists tmp_officer_info") cur.execute(""" create temp table tmp_officer_info as select dao.id, dao.gender, dao.race, extract(year from dao.appointed_date) as appointed_year, dao.birth_year, extract(year from dao.resignation_date) as resign_year, count(doa.id) from data_officer as dao left join data_officerallegation as doa on dao.id = doa.officer_id and extract(year from doa.start_date) = {} where extract(year from dao.appointed_date) <= {} group by dao.id; """.format(year + 1, year)) cur.execute("select * from tmp_officer_info") raw_officer_info = cur.fetchall() officer_info = [ utils.get_onehot_encoding(tables.gender_table, roi[1]) + utils.get_onehot_encoding(tables.race_table, roi[2]) + (utils.get_normalized_year( roi[3], roi[5] or year), utils.get_normalized_year(roi[4], year)) for roi in raw_officer_info ] officer_id = [roi[0] for roi in raw_officer_info] target = [roi[6] for roi in raw_officer_info] officer_index = {roi[0]: idx for idx, roi in enumerate(raw_officer_info)} cur.execute(""" select dao.id, json_agg(extract(year from start_date)) as start_year, json_agg(final_finding) as final_finding, json_agg(final_outcome) as final_outcome, json_agg(coalesce(category, 'Unknown')) as allegation_category, json_agg(disciplined) as disciplined, json_agg(is_officer_complaint) as is_officer_complaint, json_agg(dv.gender) as victim_gender, coalesce(avg(extract(year from start_date) - dv.birth_year), 0) as victim_age from data_officer as dao left join data_officerallegation as doa on dao.id = doa.officer_id left join data_allegation as da on doa.allegation_id = da.crid left join data_allegationcategory as dac on doa.allegation_category_id = dac.id left join data_victim as dv on doa.allegation_id = dv.allegation_id where extract(year from start_date) {} {} and extract(year from dao.appointed_date) <= {} group by dao.id """.format('<=' if is_until_year else '=', year, year)) raw_total_allegation_info = cur.fetchall() o_idx = None for rai in raw_total_allegation_info: o_idx = officer_index[rai[0]] officer_info[o_idx] += ( ( len(rai[1]), # number of total allegations till year utils.get_percent(lambda x: x == "SU", rai[2]), # total sustained rate ) + utils.get_percent_histogram(tables.final_outcome_table, rai[3]) + utils.get_percent_histogram(tables.allegation_table, rai[4]) + ( utils.get_percent(lambda x: x, rai[5]), # disciplined rate utils.get_percent(lambda x: x, rai[6]) ) # officer complainant rate + utils.get_percent_histogram(tables.race_table, rai[7]) # victim race + (utils.get_normalized_year(0, rai[8]), )) # victim age # pad 0 to other officers length = len(officer_info[o_idx]) for idx, oi in enumerate(officer_info): if len(oi) != length: officer_info[idx] = oi + (0.0, ) * (length - len(oi)) cur.execute(""" select doa1.officer_id as officer1, doa2.officer_id as officer2, count(doa1.id) from data_officerallegation as doa1 join data_officerallegation as doa2 on doa1.allegation_id = doa2.allegation_id and doa1.officer_id != doa2.officer_id where extract(year from doa1.start_date) {} {} and doa1.officer_id in (select id from tmp_officer_info) and doa2.officer_id in (select id from tmp_officer_info) group by doa1.officer_id, doa2.officer_id """.format('<=' if is_until_year else '=', year, year)) officer_relation = cur.fetchall() officer_relation = [(officer_index[ori[0]], officer_index[ori[1]], ori[2]) for ori in officer_relation] # create tensor for features and adjacency matrix features = torch.tensor(officer_info, dtype=torch.float32) adjacency = torch.tensor(officer_relation, dtype=torch.long)[:, 0:2] adjacency_weight = torch.tensor(officer_relation, dtype=torch.float32)[:, 2] \ .view(adjacency.shape[0], 1) target = torch.tensor(target, dtype=torch.float32) \ .view(features.shape[0], 1) features = utils.column_normalize(features) adjacency_weight = utils.column_normalize(adjacency_weight) return officer_id, features, adjacency, adjacency_weight, target