def parse_naive(s): fn_target = Name_functions.DS_reduced_ids_naive(s) print('\tD^S_naive ... ', end='', flush=True) # Check existence if Filefunctions.exists(fn_target): print('Already done') return fn_input = Name_functions.DS_file(s) x, y, timestamps, ids = DataImporter(fn_input).get_data( return_identifiers=True, return_split_values=True) first_year_indices = [ i for i in range(len(timestamps)) if timestamps[i] < Parameters.train_time_naive_stop ] x = x[first_year_indices] y = y[first_year_indices] ids = ids[first_year_indices] x, y, medoid_indices = KMedoids.reduce_to_medoids( x, y, return_indices=True, factor=Parameters.LargeSmallFactor) ids_keep = [ids[i] for i in medoid_indices] with open(fn_target, 'w+') as wf: for CaseID in ids_keep: wf.write('{}\n'.format(CaseID)) print('Done')
def parse_s(s): fn_data = Name_functions.DS_file(s) fn_subset = Name_functions.DS_reduced_ids_DSJ(s) x, y = Di(fn_data).split_data(int(s), fn_subset_ids=fn_subset) print('\tM^{}_j ... '.format(s), end='', flush=True) good_splits = 0 for i in sorted(x): fn_model = Name_functions.model_SJ(s, i) c, cc = np.unique(y[i], return_counts=True) if min(cc) < cv * 2: index = np.where(cc == np.min(cc)) continue if len(c) <= 1: continue if os.path.exists(fn_model): good_splits += 1 continue else: generate_model(x[i], y[i], s, i) good_splits += 1 continue print('Done ({}/{} D^{}_j met requirements)'.format( good_splits, len(x), s)) return good_splits, len(x), 100 * good_splits / len(x)
def _eval_previous(self): print('Parsing Previous ... ', end='', flush=True) fn_recent = Name_functions.parameter_evaluation_evaluation_metric_file('Previous') if Filefunctions.exists(fn_recent): print('Already done') return with open(fn_recent, 'w+') as wf: wf.write('S;Day;NumEntries;accuracy;f1\n') for S in self.Multi['S']: predictor = Classifiers.PreviousClassifier(S) fn = Name_functions.DS_file(S) _, labels, times, ids = Di(fn).get_data(fn_subset_ids=self.test_ids_fn, return_split_values=True, return_identifiers=True) data = pd.DataFrame(index=ids) data['time'] = times data['y_true'] = [l[0] for l in labels] data['Day'] = np.floor(data['time']) # Calculate the accuracy score for each day for day in data['Day'].unique(): subset = data[data['Day'] == day] acc_score, f1_score = self.get_scores(predictor=predictor, true_labels=subset['y_true'], times=subset['time'], ids=subset.index ) if not (acc_score is None or f1_score is None): wf.write('{};{};{};{};{}\n'.format(S, day, len(subset), acc_score, f1_score)) print('Done')
def parse_ms(s): fn_data = Name_functions.DS_file(s) x, y, time, case_id = Di(fn_data).get_data(return_identifiers=True, return_split_values=True) print('\tM^{}_j ... '.format(s), end='', flush=True) # S predictions for i in sorted([int(i) for i in Name_functions.S_J_values(s)], reverse=True): if Filefunctions.exists(Name_functions.DSJ_probabilities(s, i)): continue model_i = Model_Functions.loadModel(Name_functions.model_SJ(s, i)) model_labels = model_i.classes_.tolist() model_end_time = Name_functions.SJ_period_end_time(s, i) with open(Name_functions.DSJ_probabilities(s, i), 'w+') as wf: for dx, t, idn in zip(x, time, case_id): if t < model_end_time: # Only test if the model existed before the data point continue model_predictions = model_i.predict_proba(dx.reshape(1, -1))[0] actual_predictions = [ (0 if (i not in model_labels) else model_predictions[model_labels.index(i)]) for i in all_labels ] wf.write('{};{};{}\n'.format( idn, t, ';'.join(['{:4f}'.format(x) for x in actual_predictions]))) print('Done') # Naive predictions print('\tM^{}_naive ... '.format(s), end='', flush=True) if Filefunctions.exists(Name_functions.DS_probabilities_naive(s)): print('Already done') return model_naive = Model_Functions.loadModel(Name_functions.model_S_naive(s)) model_naive_labels = model_naive.classes_.tolist() model_naive_end_time = Parameters.train_time_naive_stop with open(Name_functions.DS_probabilities_naive(s), 'w+') as wf: for dx, t, idn in zip(x, time, case_id): if t < model_naive_end_time: # Only test if the model existed before the data point continue model_predictions = model_naive.predict_proba(dx.reshape(1, -1))[0] actual_predictions = [ (0 if (i not in model_naive_labels) else model_predictions[model_naive_labels.index(i)]) for i in all_labels ] wf.write('{};{};{}\n'.format( idn, t, ';'.join(['{:4f}'.format(x) for x in actual_predictions]))) print('Done')
def parse_ms(s): print('D^{} ... '.format(s), end='', flush=True) if Filefunctions.exists(Name_functions.DS_train_ids(s)): if Filefunctions.exists(Name_functions.DS_test_ids(s)): print('Already done') return np.random.seed(0) X, y, times, ids = DI(Name_functions.DS_file(s)).get_data( Name_functions.DS_reduced_ids_DSJ(s), True, True) if Parameters.take_test_split_chronological: test_case_ids = [] train_case_ids = [] times_post_warm_up = [ t for t in times if t > Parameters.test_time_start ] times_post_warm_up.sort() train_start_index = int( (1 - Parameters.assessment_test_split) * len(times_post_warm_up)) train_time_end = times_post_warm_up[train_start_index] for case_start_time, case_id in zip(times, ids): if case_start_time <= Parameters.test_time_start: continue if case_start_time < train_time_end: train_case_ids.append(case_id) else: test_case_ids.append(case_id) else: indices = [ i for i in range(len(ids)) if times[i] > Parameters.test_time_start ] test_indices = [] train_indices = [] c, cc = np.unique(y[indices], return_counts=True) for label, label_count in zip(c, cc): num_test = int(label_count * Parameters.assessment_test_split) indices_c = [i for i in indices if y[i] == label] indices_c_test = np.random.choice(indices_c, num_test, replace=False) test_indices.extend(indices_c_test.tolist()) train_indices.extend( [i for i in indices_c if i not in indices_c_test]) test_case_ids = ids[test_indices] train_case_ids = ids[train_indices] with open(Name_functions.DS_train_ids(s), 'w+') as wf: for case_id in train_case_ids: wf.write('{}\n'.format(case_id)) with open(Name_functions.DS_test_ids(s), 'w+') as wf: for case_id in test_case_ids: wf.write('{}\n'.format(case_id)) print('Done')
def _eval_param(self, evaluated_parameter): print('Parsing parameter {} ... '.format(evaluated_parameter), end='', flush=True) fn = Name_functions.parameter_evaluation_evaluation_metric_file(evaluated_parameter) if Filefunctions.exists(fn): print('Already done') return with open(fn, 'w+') as wf: wf.write('S;Beta;Tau;P;Day;NumEntries;accuracy;f1\n') for S in self.values(evaluated_parameter, 'S'): predictor = Classifiers.BPTSClassifier(s=S, score_function=None) fn = Name_functions.DS_file(S) _, labels, times, ids = Di(fn).get_data(fn_subset_ids=self.test_ids_fn, return_split_values=True, return_identifiers=True) data = pd.DataFrame(index=ids) data['time'] = times data['y_true'] = [l[0] for l in labels] data['Day'] = np.floor(data['time']) for beta in self.values(evaluated_parameter, 'Beta'): for p in self.values(evaluated_parameter, 'P'): for tau in self.values(evaluated_parameter, 'Tau'): scoring_function = PeriodScoring(s=S, beta=beta, tau=tau, p=p) predictor.set_scoring_function(scoring_function) for day in data['Day'].unique(): subset = data[data['Day'] == day] acc_score, f1_score = self.get_scores(predictor=predictor, ids=subset.index, times=subset['time'], true_labels=subset['y_true'], ) if not (acc_score is None or f1_score is None): wf.write('{};{};{};{};{};{};{};{}\n'.format(S, beta, tau, p, day, len(subset), acc_score, f1_score)) print('Done')
def parse_naive(s): print('\tM^{}_naive ... '.format(s), end='', flush=True) fn_model = Name_functions.model_S_naive(s) if os.path.exists(fn_model): print("Already done") return 1.0, 1.0, 100 fn_data = Name_functions.DS_file(s) fn_subset = Name_functions.DS_reduced_ids_DSJ(s) x, y, t = Di(fn_data).get_data(fn_subset, True, False) y = y.ravel() # Only take data that is in the first year x = [ x[i] for i in range(len(t)) if t[i] < Parameters.train_time_naive_stop ] y = [ y[i] for i in range(len(t)) if t[i] < Parameters.train_time_naive_stop ] x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=0, test_size=0.2) # Get the best model best_model = None best_score = -1 for c in used_models: score, model = train_classifier(c, x_train, x_test, y_train, y_test) if score > best_score: best_score = score best_model = model # save the model Model_Functions.saveModel(best_model, fn_model) print("Done") return 1.0, 1.0, 100
def parse_ms(s): fn_target = Name_functions.DS_reduced_ids_DSJ(s) # Check existence print('\tD^S_j ... ', end='', flush=True) if Filefunctions.exists(fn_target): print('Already done') return fn_input = Name_functions.DS_file(s) x, y, ids = DataImporter(fn_input).split_data(int(s), return_identifiers=True) ids_keep = [] for i in sorted(x): xi, yi, indices = KMedoids.reduce_to_medoids(x[i], y[i], return_indices=True) ids_keep.extend([ids[i][j] for j in indices]) with open(fn_target, 'w+') as wf: for caseID in ids_keep: wf.write('{}\n'.format(caseID)) print('Done')
def parse_ms(s): print('\tGRAEC ... ', end='', flush=True) if Filefunctions.exists(Name_functions.S_GRAEC_enumeration_dictionary(s)): print('Already done') return enumeration_encoder = dict() fn_data = Name_functions.DS_file(s) fn_train_ids = Name_functions.DS_train_ids(s) fn_test_ids = Name_functions.DS_test_ids(s) x_train, labels_train, times_train, ids_train = DI(fn_data).get_data( fn_subset_ids=fn_train_ids, return_split_values=True, return_identifiers=True) x_test, labels_test, times_test, ids_test = DI(fn_data).get_data( fn_subset_ids=fn_test_ids, return_split_values=True, return_identifiers=True) enumeration = 0 predictor = Classifiers.BPTSClassifier(s=s, score_function=None) for B in Parameters.GRAEC_beta: for T in Parameters.GRAEC_tau: for P in Parameters.GRAEC_p if not T == 0 else [ 0 ]: # P has no use for T == 0 enumeration_encoder[enumeration] = '{};{};{}'.format(B, T, P) predictor.set_scoring_function( score_function=PeriodScoring(beta=B, p=P, tau=T, s=s)) with open( Name_functions.S_GRAEC_train_predictions( s, enumeration), 'w+') as wf: wf.write('SOID;time;True_label;Predicted_label\n') for case_id, t, true_label in zip(ids_train, times_train, labels_train): predicted_label = predictor.predict(case_id=case_id, time=t) wf.write('{};{};{};{}\n'.format( case_id, t, true_label[0], predicted_label)) with open( Name_functions.S_GRAEC_test_predictions( s, enumeration), 'w+') as wf: wf.write('Case_id;time;True_label;Predicted_label\n') for case_id, t, true_label in zip(ids_test, times_test, labels_test): predicted_label = predictor.predict(case_id=case_id, time=t) wf.write('{};{};{};{}\n'.format( case_id, t, true_label[0], predicted_label)) enumeration += 1 Human_Functions.save_dict_to_csv( enumeration_encoder, Name_functions.S_GRAEC_enumeration_dictionary(s)) fn_data = Name_functions.DS_file(s) fn_ids = Name_functions.DS_test_ids(s) x, labels, times, ids = DI(fn_data).get_data(fn_subset_ids=fn_ids, return_split_values=True, return_identifiers=True) print('Done') print('\tNaive and Previous ... ', end='', flush=True) naive_predictor = Classifiers.NaiveClassifier(s) previous_predictor = Classifiers.PreviousClassifier(s) with open(Name_functions.S_naive_test_predictions(s), 'w+') as wf_naive: with open(Name_functions.S_recent_test_predictions(s), 'w+') as wf_previous: wf_naive.write('{};{};{};{}\n'.format('Case_id', 'time', 'True_label', 'Predicted_label')) wf_previous.write('{};{};{};{}\n'.format('Case_id', 'time', 'True_label', 'Predicted_label')) for case_id, t, true_label in zip(ids, times, labels): predicted_label_naive = naive_predictor.predict( case_id=case_id, time=t) if predicted_label_naive is not None: wf_naive.write('{};{};{};{}\n'.format( case_id, t, true_label[0], predicted_label_naive)) predicted_label_previous = previous_predictor.predict( case_id=case_id, time=t) if predicted_label_previous is not None: wf_previous.write('{};{};{};{}\n'.format( case_id, t, true_label[0], predicted_label_previous)) print('Done')
def run(): for s in Parameters.S_values: create_labelled_dataset(event_log=Name_functions.event_log(), s=s, feature_filename=Name_functions.cases_info(), output_file=Name_functions.DS_file(s))