Beispiel #1
0
    args = parser.parse_args()

    system = ['bpsk', 'mt'] if args.system is None else [args.system]
    network = ['cnn', 'lstm'] if args.network is None else [args.network]
    test_batch = 20000 if args.batch is None else args.batch
    var_list = ['m', 'p', 'c', 's0', 's1']
    mode_list = ['N', 'TMA', 'PCR', 'CAR', 'MPL', 'AMP', 'TMB']
    # BPSK
    if 'bpsk' in system:
        data_path = parentdir + '\\bpsk_navigate\\data\\train\\'
        mana = BpskDataTank()
        list_files = get_file_list(data_path)
        for file in list_files:
            mana.read_data(data_path + file, step_len=128, snr=20)
        inputs, _, _, _ = mana.random_batch(test_batch,
                                            normal=1 / 7,
                                            single_fault=10,
                                            two_fault=0)
        # CNN
        if 'cnn' in network:
            ann = 'bpsk_cnn_distill_(8, 16, 32, 64).cnn'
            ann = parentdir + '\\ann_diagnoser\\bpsk\\train\\20db\\{}\\'.format(
                args.index) + ann
            important_vars = heat_map_feature_input(
                ann,
                inputs,
                figname=
                'bpsk\\importance_heat_map_between_varialbe_feature_of_CNN',
                isCNN=True)
            important_features = heat_map_fault_feature(
                ann,
                inputs,
list_files = get_file_list(DATA_PATH)
for file in list_files:
    mana.read_data(DATA_PATH + file, step_len=step_len, snr=20, norm=True)

diagnoser = HBlockScanFE()
print(diagnoser)
optimizer = optim.Adam(diagnoser.parameters(), lr=0.001, weight_decay=8e-3)

#train
epoch = 2000
batch = 2000 if not small_data else 1000
train_loss = []
running_loss = 0.0
for i in range(epoch):
    inputs, labels, _, res = mana.random_batch(batch,
                                               normal=0.4,
                                               single_fault=10,
                                               two_fault=0)
    sen_res = organise_tensor_data(inputs, res)
    optimizer.zero_grad()
    outputs = diagnoser(sen_res)
    loss = criterion(outputs, labels)
    loss.backward()
    optimizer.step()

    loss_i = loss.item()
    running_loss += loss_i
    train_loss.append(loss_i)
    if i % 10 == 9:
        print('%d loss: %.5f' % (i + 1, running_loss / 10))
        running_loss = 0.0
print('Finished Training')
t_nn1 = torch.load(ANN_PATH + "nn1.pkl")
t_nn2 = torch.load(ANN_PATH + "nn2.pkl")
t_nn3 = torch.load(ANN_PATH + "nn3.pkl")
t_nn4 = torch.load(ANN_PATH + "nn4.pkl")
t_nn0.eval()
t_nn1.eval()
t_nn2.eval()
t_nn3.eval()
t_nn4.eval()
batch2 = 1000
epoch2 = 100
eval_acc = np.zeros((5, 6))
print("Start evaluation!")
for i in range(epoch2):
    inputs, labels, _, _ = mana.random_batch(batch2,
                                             normal=0,
                                             single_fault=10,
                                             two_fault=0)
    x0 = inputs[:, [0], :]
    x1 = inputs[:, [1], :]
    x2 = inputs[:, [2], :]
    x3 = inputs[:, [3], :]
    x4 = inputs[:, [4], :]

    out0 = t_nn0(x0)
    eval_acc[0, :] = eval_acc[0, :] + accuracy(out0, labels) / epoch2

    out1 = t_nn1(x1)
    eval_acc[1, :] = eval_acc[1, :] + accuracy(out1, labels) / epoch2

    out2 = t_nn2(x2)
    eval_acc[2, :] = eval_acc[2, :] + accuracy(out2, labels) / epoch2
    inputs = torch.cat((p, r0), 1)
    return inputs

#embedded part only
optimizer1      = optim.Adam(diagnoser.parameters1(), lr=lr, weight_decay=weight_decay)
while True:
    parameters = sample_parameters(N, fault_type, grids, rang[0], rang[1], pref, para_set)
    file_list  = sample_data(FAULT_TIME, TIME, parameters, generated_path)
    print("file_list=",file_list)
    if len(file_list) == 0:
        break
    #a tmp data manager
    mana_t = BpskDataTank()
    add_files(generated_path, file_list, mana_t, step_len, snr=snr)
    #sample batch
    signals1, labels1, _, res1 = mana_t.random_batch(batch, normal=0.25, single_fault=10, two_fault=1)
    input1  = pr0(signals1, res1)
    labels1 = labels1[:, [0, 1, 5]]
    output1 = diagnoser.forward1(input1)
    loss1   = criterion(output1, labels1)
    print("evaluation loss=", loss1.item())
    if loss1 > loss:
        add_files(generated_path, file_list, mana1, step_len, snr=snr)
        while loss1.item() > loss:
            signals1, labels1, _, res1 = mana1.random_batch(batch, normal=0.25, single_fault=10, two_fault=1)
            input1  = pr0(signals1, res1)
            labels1  = labels1[:, [0, 1, 5]]
            optimizer1.zero_grad()
            output1 = diagnoser.forward1(input1)
            loss1   = criterion(output1, labels1)
            loss1.backward()
Beispiel #5
0
    args = parser.parse_args()

    snr = 20
    batch = 8000 if args.batch is None else args.batch
    if args.system == 'bpsk':
        mode_list = ['N', 'TMA', 'PCR', 'CAR', 'MPL', 'AMP', 'TMB']
        step_len = 128
        pca_selection = PCA_feature_selection(0.95)
        # train
        train_path = parentdir + '\\bpsk_navigate\\data\\train\\'
        mana_train = BpskDataTank()
        list_files = get_file_list(train_path)
        for file in list_files:
            mana_train.read_data(train_path + file, step_len=step_len, snr=snr)
        inputs, labels, _, _ = mana_train.random_batch(batch,
                                                       normal=0.4,
                                                       single_fault=10,
                                                       two_fault=0)
        inputs = inputs.detach().numpy()
        labels = torch.sum(labels * torch.Tensor([1, 2, 3, 4, 5, 6]), 1).long()
        labels = labels.detach().numpy()
        batch, variable, step = inputs.shape
        inputs = inputs.transpose((0, 2, 1))
        inputs = inputs.reshape((batch * step, variable))
        inputs = pca_selection.learn_from(inputs)
        labels = np.repeat(labels, step)
        _, fe_num = inputs.shape
        var_list = ['fe' + str(i) for i in range(fe_num)]
        numpy2arff(inputs, labels, 'pca_train.arff', var_list, mode_list)
        # test
        test_path = parentdir + '\\bpsk_navigate\\data\\test\\'
        mana_test = BpskDataTank()
Beispiel #6
0
autoencoder = cnn_encoder_decoder(input_size=[5, step_len],
                                  feature_sizes=[20, 10],
                                  kernel_sizes=kernel_sizes)
print(autoencoder)
MSE = nn.MSELoss()
optimizer = optim.Adam(autoencoder.parameters(), lr=0.01, weight_decay=8e-3)

#train
epoch = 1000
batch = 1000
train_loss = []
running_loss = 0.0
bg_time = time.time()
for i in range(epoch):
    input_data, _, _, _ = mana.random_batch(batch,
                                            normal=0.4,
                                            single_fault=10,
                                            two_fault=0)
    _, variable, time_step = input_data.size()
    output_data = input_data.detach()
    optimizer.zero_grad()

    output = autoencoder(input_data)

    mse_loss = MSE(output, output_data)
    l1_loss = L1(autoencoder, reg=5e-4)
    l = mse_loss + l1_loss

    loss_i = l.item()
    running_loss += loss_i
    train_loss.append(loss_i)
    if i % 10 == 9: