def parse_ms(s): fn_data = Name_functions.DS_file(s) x, y, time, case_id = Di(fn_data).get_data(return_identifiers=True, return_split_values=True) print('\tM^{}_j ... '.format(s), end='', flush=True) # S predictions for i in sorted([int(i) for i in Name_functions.S_J_values(s)], reverse=True): if Filefunctions.exists(Name_functions.DSJ_probabilities(s, i)): continue model_i = Model_Functions.loadModel(Name_functions.model_SJ(s, i)) model_labels = model_i.classes_.tolist() model_end_time = Name_functions.SJ_period_end_time(s, i) with open(Name_functions.DSJ_probabilities(s, i), 'w+') as wf: for dx, t, idn in zip(x, time, case_id): if t < model_end_time: # Only test if the model existed before the data point continue model_predictions = model_i.predict_proba(dx.reshape(1, -1))[0] actual_predictions = [ (0 if (i not in model_labels) else model_predictions[model_labels.index(i)]) for i in all_labels ] wf.write('{};{};{}\n'.format( idn, t, ';'.join(['{:4f}'.format(x) for x in actual_predictions]))) print('Done') # Naive predictions print('\tM^{}_naive ... '.format(s), end='', flush=True) if Filefunctions.exists(Name_functions.DS_probabilities_naive(s)): print('Already done') return model_naive = Model_Functions.loadModel(Name_functions.model_S_naive(s)) model_naive_labels = model_naive.classes_.tolist() model_naive_end_time = Parameters.train_time_naive_stop with open(Name_functions.DS_probabilities_naive(s), 'w+') as wf: for dx, t, idn in zip(x, time, case_id): if t < model_naive_end_time: # Only test if the model existed before the data point continue model_predictions = model_naive.predict_proba(dx.reshape(1, -1))[0] actual_predictions = [ (0 if (i not in model_naive_labels) else model_predictions[model_naive_labels.index(i)]) for i in all_labels ] wf.write('{};{};{}\n'.format( idn, t, ';'.join(['{:4f}'.format(x) for x in actual_predictions]))) print('Done')
def parse_ms(s): print('D^{} ... '.format(s), end='', flush=True) if Filefunctions.exists(Name_functions.DS_train_ids(s)): if Filefunctions.exists(Name_functions.DS_test_ids(s)): print('Already done') return np.random.seed(0) X, y, times, ids = DI(Name_functions.DS_file(s)).get_data( Name_functions.DS_reduced_ids_DSJ(s), True, True) if Parameters.take_test_split_chronological: test_case_ids = [] train_case_ids = [] times_post_warm_up = [ t for t in times if t > Parameters.test_time_start ] times_post_warm_up.sort() train_start_index = int( (1 - Parameters.assessment_test_split) * len(times_post_warm_up)) train_time_end = times_post_warm_up[train_start_index] for case_start_time, case_id in zip(times, ids): if case_start_time <= Parameters.test_time_start: continue if case_start_time < train_time_end: train_case_ids.append(case_id) else: test_case_ids.append(case_id) else: indices = [ i for i in range(len(ids)) if times[i] > Parameters.test_time_start ] test_indices = [] train_indices = [] c, cc = np.unique(y[indices], return_counts=True) for label, label_count in zip(c, cc): num_test = int(label_count * Parameters.assessment_test_split) indices_c = [i for i in indices if y[i] == label] indices_c_test = np.random.choice(indices_c, num_test, replace=False) test_indices.extend(indices_c_test.tolist()) train_indices.extend( [i for i in indices_c if i not in indices_c_test]) test_case_ids = ids[test_indices] train_case_ids = ids[train_indices] with open(Name_functions.DS_train_ids(s), 'w+') as wf: for case_id in train_case_ids: wf.write('{}\n'.format(case_id)) with open(Name_functions.DS_test_ids(s), 'w+') as wf: for case_id in test_case_ids: wf.write('{}\n'.format(case_id)) print('Done')
def parse_naive(s): fn_target = Name_functions.DS_reduced_ids_naive(s) print('\tD^S_naive ... ', end='', flush=True) # Check existence if Filefunctions.exists(fn_target): print('Already done') return fn_input = Name_functions.DS_file(s) x, y, timestamps, ids = DataImporter(fn_input).get_data( return_identifiers=True, return_split_values=True) first_year_indices = [ i for i in range(len(timestamps)) if timestamps[i] < Parameters.train_time_naive_stop ] x = x[first_year_indices] y = y[first_year_indices] ids = ids[first_year_indices] x, y, medoid_indices = KMedoids.reduce_to_medoids( x, y, return_indices=True, factor=Parameters.LargeSmallFactor) ids_keep = [ids[i] for i in medoid_indices] with open(fn_target, 'w+') as wf: for CaseID in ids_keep: wf.write('{}\n'.format(CaseID)) print('Done')
def _eval_previous(self): print('Parsing Previous ... ', end='', flush=True) fn_recent = Name_functions.parameter_evaluation_evaluation_metric_file('Previous') if Filefunctions.exists(fn_recent): print('Already done') return with open(fn_recent, 'w+') as wf: wf.write('S;Day;NumEntries;accuracy;f1\n') for S in self.Multi['S']: predictor = Classifiers.PreviousClassifier(S) fn = Name_functions.DS_file(S) _, labels, times, ids = Di(fn).get_data(fn_subset_ids=self.test_ids_fn, return_split_values=True, return_identifiers=True) data = pd.DataFrame(index=ids) data['time'] = times data['y_true'] = [l[0] for l in labels] data['Day'] = np.floor(data['time']) # Calculate the accuracy score for each day for day in data['Day'].unique(): subset = data[data['Day'] == day] acc_score, f1_score = self.get_scores(predictor=predictor, true_labels=subset['y_true'], times=subset['time'], ids=subset.index ) if not (acc_score is None or f1_score is None): wf.write('{};{};{};{};{}\n'.format(S, day, len(subset), acc_score, f1_score)) print('Done')
def load_dict_from_csv(filename): assert (Filefunctions.exists(filename)) ret = dict() with open(filename, 'r') as rf: for line in rf.readlines(): k, v = line[:-1].split(';', 1) ret[k] = v return ret
def __init__(self, filename): assert Filefunctions.exists(filename) # We implement the event log as a dict, this allows easier reference when adding an event to the case # Since each event has a reference to a case_id, not to a case itself self.cases = dict() with open(filename, 'r') as rf: for line in rf: # for each line case_id, timestamp, act = line[:-1].split(';') # create an event e = Event(case_id=case_id, time=float(timestamp), act=act) # add it to the corresponding case (or create the new case if necessary) self.cases.setdefault(case_id, Case(case_id=case_id)).add_event(e)
def split_data(self, interval, fn_subset_ids=None, return_split_values=False, return_identifiers=False): if fn_subset_ids is not None: if not Filefunctions.exists(fn_subset_ids): raise Exception( 'File does not exist:\n{}'.format(fn_subset_ids)) keep_idx = None if fn_subset_ids is not None: with open(fn_subset_ids, 'r') as rf: keep_ids = [line[:-1] for line in rf.readlines()] keep_idx = [ i for i in range(len(self.IDS)) if self.IDS[i] in keep_ids ] index_list = dict() for (i, s) in enumerate(self.split_values): if keep_idx is not None and i not in keep_idx: continue group = math.floor(float(s) / interval) index_list.setdefault(group, []).append(i) return_x = dict() return_y = dict() return_split = dict() return_ids = dict() for (group, indices) in index_list.items(): return_x[group] = self.X.toarray()[indices] return_y[group] = self.y[indices] if return_split_values: return_split[group] = self.split_values[indices] if return_identifiers: return_ids[group] = self.IDS[indices] ret = ( return_x, return_y, ) ret += ((return_split, ) if return_split_values else ()) ret += ((return_ids, ) if return_identifiers else ()) return ret
def _eval_param(self, evaluated_parameter): print('Parsing parameter {} ... '.format(evaluated_parameter), end='', flush=True) fn = Name_functions.parameter_evaluation_evaluation_metric_file(evaluated_parameter) if Filefunctions.exists(fn): print('Already done') return with open(fn, 'w+') as wf: wf.write('S;Beta;Tau;P;Day;NumEntries;accuracy;f1\n') for S in self.values(evaluated_parameter, 'S'): predictor = Classifiers.BPTSClassifier(s=S, score_function=None) fn = Name_functions.DS_file(S) _, labels, times, ids = Di(fn).get_data(fn_subset_ids=self.test_ids_fn, return_split_values=True, return_identifiers=True) data = pd.DataFrame(index=ids) data['time'] = times data['y_true'] = [l[0] for l in labels] data['Day'] = np.floor(data['time']) for beta in self.values(evaluated_parameter, 'Beta'): for p in self.values(evaluated_parameter, 'P'): for tau in self.values(evaluated_parameter, 'Tau'): scoring_function = PeriodScoring(s=S, beta=beta, tau=tau, p=p) predictor.set_scoring_function(scoring_function) for day in data['Day'].unique(): subset = data[data['Day'] == day] acc_score, f1_score = self.get_scores(predictor=predictor, ids=subset.index, times=subset['time'], true_labels=subset['y_true'], ) if not (acc_score is None or f1_score is None): wf.write('{};{};{};{};{};{};{};{}\n'.format(S, beta, tau, p, day, len(subset), acc_score, f1_score)) print('Done')
def parse_ms(s): fn_target = Name_functions.DS_reduced_ids_DSJ(s) # Check existence print('\tD^S_j ... ', end='', flush=True) if Filefunctions.exists(fn_target): print('Already done') return fn_input = Name_functions.DS_file(s) x, y, ids = DataImporter(fn_input).split_data(int(s), return_identifiers=True) ids_keep = [] for i in sorted(x): xi, yi, indices = KMedoids.reduce_to_medoids(x[i], y[i], return_indices=True) ids_keep.extend([ids[i][j] for j in indices]) with open(fn_target, 'w+') as wf: for caseID in ids_keep: wf.write('{}\n'.format(caseID)) print('Done')
def get_data(self, fn_subset_ids=None, return_split_values=False, return_identifiers=False): if fn_subset_ids is None: ret = (self.X.toarray(), self.y) ret += (self.split_values, ) if return_split_values else () ret += (self.IDS, ) if return_identifiers else () else: if not Filefunctions.exists(fn_subset_ids): raise Exception( 'File does not exist:\n{}'.format(fn_subset_ids)) with open(fn_subset_ids, 'r') as rf: keep_ids = [line[:-1] for line in rf.readlines()] keep_idx = [ i for i in range(len(self.IDS)) if self.IDS[i] in keep_ids ] ret = (self.X.toarray()[keep_idx], self.y[keep_idx]) ret += ( self.split_values[keep_idx], ) if return_split_values else () ret += (self.IDS[keep_idx], ) if return_identifiers else () return ret
def parse_ms(s): print('\tGRAEC ... ', end='', flush=True) if Filefunctions.exists(Name_functions.S_GRAEC_enumeration_dictionary(s)): print('Already done') return enumeration_encoder = dict() fn_data = Name_functions.DS_file(s) fn_train_ids = Name_functions.DS_train_ids(s) fn_test_ids = Name_functions.DS_test_ids(s) x_train, labels_train, times_train, ids_train = DI(fn_data).get_data( fn_subset_ids=fn_train_ids, return_split_values=True, return_identifiers=True) x_test, labels_test, times_test, ids_test = DI(fn_data).get_data( fn_subset_ids=fn_test_ids, return_split_values=True, return_identifiers=True) enumeration = 0 predictor = Classifiers.BPTSClassifier(s=s, score_function=None) for B in Parameters.GRAEC_beta: for T in Parameters.GRAEC_tau: for P in Parameters.GRAEC_p if not T == 0 else [ 0 ]: # P has no use for T == 0 enumeration_encoder[enumeration] = '{};{};{}'.format(B, T, P) predictor.set_scoring_function( score_function=PeriodScoring(beta=B, p=P, tau=T, s=s)) with open( Name_functions.S_GRAEC_train_predictions( s, enumeration), 'w+') as wf: wf.write('SOID;time;True_label;Predicted_label\n') for case_id, t, true_label in zip(ids_train, times_train, labels_train): predicted_label = predictor.predict(case_id=case_id, time=t) wf.write('{};{};{};{}\n'.format( case_id, t, true_label[0], predicted_label)) with open( Name_functions.S_GRAEC_test_predictions( s, enumeration), 'w+') as wf: wf.write('Case_id;time;True_label;Predicted_label\n') for case_id, t, true_label in zip(ids_test, times_test, labels_test): predicted_label = predictor.predict(case_id=case_id, time=t) wf.write('{};{};{};{}\n'.format( case_id, t, true_label[0], predicted_label)) enumeration += 1 Human_Functions.save_dict_to_csv( enumeration_encoder, Name_functions.S_GRAEC_enumeration_dictionary(s)) fn_data = Name_functions.DS_file(s) fn_ids = Name_functions.DS_test_ids(s) x, labels, times, ids = DI(fn_data).get_data(fn_subset_ids=fn_ids, return_split_values=True, return_identifiers=True) print('Done') print('\tNaive and Previous ... ', end='', flush=True) naive_predictor = Classifiers.NaiveClassifier(s) previous_predictor = Classifiers.PreviousClassifier(s) with open(Name_functions.S_naive_test_predictions(s), 'w+') as wf_naive: with open(Name_functions.S_recent_test_predictions(s), 'w+') as wf_previous: wf_naive.write('{};{};{};{}\n'.format('Case_id', 'time', 'True_label', 'Predicted_label')) wf_previous.write('{};{};{};{}\n'.format('Case_id', 'time', 'True_label', 'Predicted_label')) for case_id, t, true_label in zip(ids, times, labels): predicted_label_naive = naive_predictor.predict( case_id=case_id, time=t) if predicted_label_naive is not None: wf_naive.write('{};{};{};{}\n'.format( case_id, t, true_label[0], predicted_label_naive)) predicted_label_previous = previous_predictor.predict( case_id=case_id, time=t) if predicted_label_previous is not None: wf_previous.write('{};{};{};{}\n'.format( case_id, t, true_label[0], predicted_label_previous)) print('Done')
def S_J_values(S): folder = S_folder(S) + '/Splits' if Filefunctions.exists(folder): return os.listdir(folder) else: return []