Beispiel #1
0
 def get_list_tf_on_tran(self, csv_name):
     rows = []
     for file_name in self.test_files_list:
         index_file = self.get_tran_index_fromFileName(file_name=file_name)
         # for start,end in self.dataset_answer_st_ed_list[index_file]:
         c = self.change_points[index_file]
         temp = c
         # temp = self.gen_tf_from_changepoint(start, end, self.change_points[index_file])
         temp.append(file_name)
         rows.append(temp)
     ut.list_to_csv(rows=rows, csv_name=csv_name)
Beispiel #2
0
    resultRow.append(false_alerts)
    resultRow.append(total_alert)
    resultRow.append(pos)
    resultRow.append(neg)
    tp_rate = float(true_alerts) / pos
    resultRow.append(tp_rate)
    fp_rate = float(false_alerts) / neg
    resultRow.append(fp_rate)

    resultRow.append(sum(found))
    return resultRow



if __name__ == '__main__':
    ut.list_to_csv(head=head)
    result = []
    df_list = []
    name_list = ut_light.gen_list_mix(main_path=dataset_path,data_type='bgs',mix_size="*sq_L3_I5_*")
    for index, name in enumerate(name_list):
        for threshold in thresholds:
            algo_name = "SEA_{}_{}".format(bin_sizes,block_type)
            ut.checkFolderandCreate("{}\\ROC\\{}".format(main_path, algo_name))
            pickle_ROC_file = '{}\\ROC\\{}\\_{}_threshold{}.pkl'.format(main_path, algo_name, name, threshold)
            if not os.path.exists(pickle_ROC_file):
                result = []
                data = ut_light.load_light_select_dataset(input_path=dataset_path, file_name=name, is_mix=is_mix)
                print("##### cal name : {} , threshold = {}  #####".format(name, threshold))
                ROC_row = genROCFile(name=name, data=data, threshold=threshold)
                result.append(ROC_row)
                df = pd.DataFrame(result, columns=ROC_head)
Beispiel #3
0
 for pattern in patterns:
     for I in IS:
         # path = 'D:\\git_project\\data stream\\lightcurve_benchmark\\{}'.format(data_type)
         path = 'C:\\git\\data_stream\\lightcurve_benchmark\\{}'.format(data_type)
         # path = 'C:\\git\\data_stream\\gaia\\{}'.format(data_type)
         data = data_stream(path=path, type=type,pattern=pattern, len=L,interval=I)
         data.load_data_fromfile()
         for bin_period in bin_periods:
             file_format_name = '{}_{}_bin{}'.format(type, data_type, bin_period)
             for k in ks:
                 data_sk, result_text = cal_sk_bin(data=data,
                                                   input_bin=bin_period,input_k=k,
                                                   file_format="{}_k{}".format(file_format_name,k), L=L, I=I)
                 print("{}_k{} cheb sk".format(file_format_name,k))
                 head, rows = data_sk.result_for_ROC(data_type, Algo='SK',K=k, Bin=bin_period, cheb_size=Cheb_win)
                 ut.list_to_csv(rows=rows, csv_name="ks_bin//roc_bin.csv", is_append=True)
                 head, rows = data_sk.result_for_ROC(data_type, Algo='SK', K=k,
                                                     Bin=bin_period,is_normalization=True,cheb_size=Cheb_win)
                 ut.list_to_csv(rows=rows, csv_name="ks_bin//roc_nor.csv", is_append=True)
             #
             #     # ut.list_to_txt(rows=result_text, csv_name="ks_bin//{}".format(txt_file_name),
             #     #                is_append=True)
                 data_bin_k,result_text = cal_CANDAR_bin(data=data,
                                           input_bin=bin_period,input_k=k,
                                           file_format="{}_k{}".format(file_format_name,k)
                                           , L=L,I=I)
                 print("{}_k{} cheb k".format(file_format_name,k))
                 head, rows = data_bin_k.result_for_ROC(data_type, Algo='CHEB_K', K=k, Bin=bin_period, cheb_size=Cheb_win)
                 ut.list_to_csv(rows=rows, csv_name="CANDAR_bin//roc_bin.csv", is_append=True)
                 head, rows = data_bin_k.result_for_ROC(data_type, Algo='CHEB_K', K=k, Bin=bin_period,
                                                        is_normalization=True,cheb_size=Cheb_win)
    types = ["Ligthcurve"]
    patterns = ["sq"]
    LS = [5]
    IS = [30]

    for data_type in data_types:
        for k in ks:
            for type in types:
                for pattern in patterns:
                    for L in LS:
                        for I in IS:
                            data = data_stream(
                                # path='D:\\git_project\\data stream\\lightcurve_benchmark\\{}'.format(data_type), type=type,
                                # pattern=pattern, len=L,
                                # interval=I)
                                path='C:\\git\\data_stream\\lightcurve_benchmark\\{}'.format(data_type), type=type,
                                pattern=pattern, len=L,
                                interval=I)
                            data.load_data_fromfile()

                            data = cal_k_tran(data=data, input_k=k)
                            # # Cheb

                            print("### end change detection##")

                            head,rows,csv_path = data.result_for_ROC_old(filter_type=data_type, Algorithm='Kalman', K=k)
                            ut.list_to_csv(rows=rows,csv_name=csv_path,is_append=True)
                            # data.save_f1_to_csv( L=L, I=I,
                            #                         Algorithm="Base Cheb",
                            #                         Dataset_type=type, pattern=pattern,K=k,Bin="None")