示例#1
0
def estimate_model(model_path, model_type, inputs, y_label, key=''):
    batch, _, _ = inputs.size()
    files = get_file_list(model_path)
    model_names = []
    if 'cnn' in model_type:
        model_names += [f for f in files if (f.endswith('.cnn') and key in f)]
    if 'lstm' in model_type:
        model_names += [f for f in files if (f.endswith('.lstm') and key in f)]
    for name in model_names:
        roc_name = 'ROC' + name
        d = torch.load(model_path + name)
        d.eval()

        bg = time.clock()         # time start
        outputs, _, _ = d(inputs)
        ed = time.clock()         # time end
        logging.info('{}, predict time={} for a {} batch'.format(name, ed-bg, batch))
        print('{}, predict time={} for a {} batch'.format(name, ed-bg, batch))
        y_score = outputs.detach().numpy()

        # statistics
        roc = plotROC()
        roc.analyse(y_label, y_score)
        auc = roc.auc(roc_type)
        logging.info('{}, {} auc = {}'.format(name, roc_type, auc))
        print('{}, {}, auc = {}'.format(name, roc_type, auc))
        roc.plot(roc_type, view=False, file=model_path+roc_name)
示例#2
0
 def load_normal_data(self, file_path, fault_time=None, snr=None):
     """
     load normal data where all data files are in file_path
     If fault_time=None, all data are normal,
     Else data before fault_time is normal.
     fault_time belongs to (0, 1)
     """
     list_files = get_file_list(file_path)
     for file in list_files:
         sig = read_normal_data(file_path + file, fault_time, snr)
         if self._data is None:
             self._data = sig
         else:
             self._data = np.concatenate((self._data, sig))
     self._data = discrete_data(self._data, self._dn)
示例#3
0
                        type=str,
                        choices=['cnn', 'lstm'],
                        help="choose the network")
    parser.add_argument("-b", "--batch", type=int, help="set batch size")
    args = parser.parse_args()

    system = ['bpsk', 'mt'] if args.system is None else [args.system]
    network = ['cnn', 'lstm'] if args.network is None else [args.network]
    test_batch = 20000 if args.batch is None else args.batch
    var_list = ['m', 'p', 'c', 's0', 's1']
    mode_list = ['N', 'TMA', 'PCR', 'CAR', 'MPL', 'AMP', 'TMB']
    # BPSK
    if 'bpsk' in system:
        data_path = parentdir + '\\bpsk_navigate\\data\\train\\'
        mana = BpskDataTank()
        list_files = get_file_list(data_path)
        for file in list_files:
            mana.read_data(data_path + file, step_len=128, snr=20)
        inputs, _, _, _ = mana.random_batch(test_batch,
                                            normal=1 / 7,
                                            single_fault=10,
                                            two_fault=0)
        # CNN
        if 'cnn' in network:
            ann = 'bpsk_cnn_distill_(8, 16, 32, 64).cnn'
            ann = parentdir + '\\ann_diagnoser\\bpsk\\train\\20db\\{}\\'.format(
                args.index) + ann
            important_vars = heat_map_feature_input(
                ann,
                inputs,
                figname=
#data amount
small_data = True
#settings
PATH = parentdir
DATA_PATH = PATH + "\\bpsk_navigate\\data\\" + ("big_data\\" if not small_data
                                                else "small_data\\")
ANN_PATH = PATH + "\\ddd\\ann_model\\" + ("big_data\\" if not small_data else
                                          "small_data\\")
step_len = 100
criterion = CrossEntropy
hdia_name = "HFE.pkl"

#prepare data
mana = BpskDataTank()
list_files = get_file_list(DATA_PATH)
for file in list_files:
    mana.read_data(DATA_PATH + file, step_len=step_len, snr=20, norm=True)

diagnoser = HBlockScanFE()
print(diagnoser)
optimizer = optim.Adam(diagnoser.parameters(), lr=0.001, weight_decay=8e-3)

#train
epoch = 2000
batch = 2000 if not small_data else 1000
train_loss = []
running_loss = 0.0
for i in range(epoch):
    inputs, labels, _, res = mana.random_batch(batch,
                                               normal=0.4,
示例#5
0
    predict_vector = []
    for p in predict:
        vector = [0]*(fault_num+1)
        vector[p] = 1
        predict_vector.append(vector)
    predict_vector = np.array(predict_vector)
    roc = plotROC()
    roc.analyse(labels, predict_vector)
    auc = roc.auc('micro')
    return auc


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("-s", "--system", type=str, choices=['bpsk', 'mt'], help="choose the system")
    parser.add_argument("-l", "--length", type=int, help="step length")
    parser.add_argument("-t", "--strategy", type=str, help="strategy")
    parser.add_argument("-i", "--index", type=int, help="index")
    args = parser.parse_args()

    path = parentdir + '\\utilities\\{}\\{}\\'.format(args.system, args.index)
    files = get_file_list(path)
    files = [f for f in files if f.endswith('.txt')]
    
    for f in files:
        if f.startswith('CNN') or f.startswith('LSTM'):
            auc = weka_estimator(path + f)
        else:
            auc = weka_slide_window_estimator(path+f, args.length, args.strategy)
        print('AUC of {} is: {}.'.format(f, auc))
示例#6
0
                        type=str,
                        choices=['bpsk', 'mt'],
                        help="choose the system")
    parser.add_argument("-b", "--batch", type=int, help="set batch size")
    args = parser.parse_args()

    snr = 20
    batch = 8000 if args.batch is None else args.batch
    if args.system == 'bpsk':
        mode_list = ['N', 'TMA', 'PCR', 'CAR', 'MPL', 'AMP', 'TMB']
        step_len = 128
        pca_selection = PCA_feature_selection(0.95)
        # train
        train_path = parentdir + '\\bpsk_navigate\\data\\train\\'
        mana_train = BpskDataTank()
        list_files = get_file_list(train_path)
        for file in list_files:
            mana_train.read_data(train_path + file, step_len=step_len, snr=snr)
        inputs, labels, _, _ = mana_train.random_batch(batch,
                                                       normal=0.4,
                                                       single_fault=10,
                                                       two_fault=0)
        inputs = inputs.detach().numpy()
        labels = torch.sum(labels * torch.Tensor([1, 2, 3, 4, 5, 6]), 1).long()
        labels = labels.detach().numpy()
        batch, variable, step = inputs.shape
        inputs = inputs.transpose((0, 2, 1))
        inputs = inputs.reshape((batch * step, variable))
        inputs = pca_selection.learn_from(inputs)
        labels = np.repeat(labels, step)
        _, fe_num = inputs.shape
示例#7
0
def load_bpsk_data(data_path, snr):
    mana = BpskDataTank()
    list_files = get_file_list(data_path)
    for file in list_files:
        mana.read_data(data_path+file, step_len=128, snr=snr)
    return mana
        model_name = "dcnn.pkl"
    elif dia == "dscnn":
        model_name = "dscnn.pkl"
    elif dia == "rdscnn":
        model_name = "rdscnn.pkl"
        norm = True
    elif dia == "rdsecnn":
        model_name = "rdsecnn.pkl"
        norm = True
    else:
        print("unkown object!")
        exit(0)
    dia_name.append(model_name)

mana = BpskDataTank()
list_files = get_file_list(TEST_DATA_PATH)
for file in list_files:
    mana.read_data(TEST_DATA_PATH + file,
                   step_len=step_len,
                   snr=snr,
                   norm=norm)
#load diagnoser
diagnoser = []
for name in dia_name:
    d = torch.load(ANN_PATH + name)
    d.eval()
    diagnoser.append(d)
batch = 10000

inputs, labels, _, res = mana.random_batch(batch,
                                           normal=0.4,