Esempio n. 1
0
                        help="choose the network")
    parser.add_argument("-b", "--batch", type=int, help="set batch size")
    args = parser.parse_args()

    system = ['bpsk', 'mt'] if args.system is None else [args.system]
    network = ['cnn', 'lstm'] if args.network is None else [args.network]
    test_batch = 20000 if args.batch is None else args.batch
    var_list = ['m', 'p', 'c', 's0', 's1']
    mode_list = ['N', 'TMA', 'PCR', 'CAR', 'MPL', 'AMP', 'TMB']
    # BPSK
    if 'bpsk' in system:
        data_path = parentdir + '\\bpsk_navigate\\data\\train\\'
        mana = BpskDataTank()
        list_files = get_file_list(data_path)
        for file in list_files:
            mana.read_data(data_path + file, step_len=128, snr=20)
        inputs, _, _, _ = mana.random_batch(test_batch,
                                            normal=1 / 7,
                                            single_fault=10,
                                            two_fault=0)
        # CNN
        if 'cnn' in network:
            ann = 'bpsk_cnn_distill_(8, 16, 32, 64).cnn'
            ann = parentdir + '\\ann_diagnoser\\bpsk\\train\\20db\\{}\\'.format(
                args.index) + ann
            important_vars = heat_map_feature_input(
                ann,
                inputs,
                figname=
                'bpsk\\importance_heat_map_between_varialbe_feature_of_CNN',
                isCNN=True)
Esempio n. 2
0
prefix = 'bpsk_cnn_student_'
#   log
log_path = parentdir + '\\log\\bpsk\\train\\{}db\\'.format(snr)
if not os.path.isdir(log_path):
    os.makedirs(log_path)
log_name = prefix + 'training_' + time.asctime(time.localtime(
    time.time())).replace(" ", "_").replace(":", "-") + '.txt'
logfile = log_path + log_name
LOG_FORMAT = "%(asctime)s - %(levelname)s - %(message)s"
logging.basicConfig(filename=logfile, level=logging.DEBUG, format=LOG_FORMAT)
#prepare data
data_path = parentdir + '\\bpsk_navigate\\data\\train\\'
mana = BpskDataTank()
list_files = get_file_list(data_path)
for file in list_files:
    mana.read_data(data_path + file, step_len=step_len, snr=snr)

# cumbersome models
cum_models = []
for t in range(times):
    model_path = parentdir + '\\ann_diagnoser\\bpsk\\train\\{}db\\{}\\'.format(
        snr, t)
    model_name = 'bpsk_cnn_distill_(8, 16, 32, 64).cnn'
    m = torch.load(model_path + model_name)
    m.eval()
    cum_models.append(m)


# define features
def sparse_features(models, indexes, x):
    m = cum_models[0]
small_data = True
#settings
PATH = parentdir
DATA_PATH = PATH + "\\bpsk_navigate\\data\\" + ("big_data\\" if not small_data
                                                else "small_data\\")
ANN_PATH = PATH + "\\ddd\\ann_model\\" + ("big_data\\" if not small_data else
                                          "small_data\\")
step_len = 100
criterion = CrossEntropy
hdia_name = "HFE.pkl"

#prepare data
mana = BpskDataTank()
list_files = get_file_list(DATA_PATH)
for file in list_files:
    mana.read_data(DATA_PATH + file, step_len=step_len, snr=20, norm=True)

diagnoser = HBlockScanFE()
print(diagnoser)
optimizer = optim.Adam(diagnoser.parameters(), lr=0.001, weight_decay=8e-3)

#train
epoch = 2000
batch = 2000 if not small_data else 1000
train_loss = []
running_loss = 0.0
for i in range(epoch):
    inputs, labels, _, res = mana.random_batch(batch,
                                               normal=0.4,
                                               single_fault=10,
                                               two_fault=0)
#data amount
small_data = True
#prepare data
snr = 20
PATH = parentdir
DATA_PATH = parentdir + "\\bpsk_navigate\\data\\" + (
    "big_data\\" if not small_data else "small_data\\")
ANN_PATH = parentdir + "\\ddd\\ann_model\\" + (
    "big_data\\" if not small_data else "small_data\\") + str(snr) + "db\\"
step_len = 100
criterion = CrossEntropy

mana = BpskDataTank()
list_files = get_file_list(DATA_PATH)
for file in list_files:
    mana.read_data(DATA_PATH + file, step_len=step_len, snr=snr)

# nn0 = SimpleDiagnoer()
# nn1 = SimpleDiagnoer()
# nn2 = SimpleDiagnoer()
# nn3 = SimpleDiagnoer()
# nn4 = SimpleDiagnoer()

# opt0 = optim.Adam(nn0.parameters(), lr=0.001, weight_decay=5e-3)
# opt1 = optim.Adam(nn1.parameters(), lr=0.001, weight_decay=5e-3)
# opt2 = optim.Adam(nn2.parameters(), lr=0.001, weight_decay=5e-3)
# opt3 = optim.Adam(nn3.parameters(), lr=0.001, weight_decay=5e-3)
# opt4 = optim.Adam(nn4.parameters(), lr=0.001, weight_decay=5e-3)

# #train
# epoch = 1000
        break
#sub 1
for i in range(epoch1):
    signals1, labels1, _, res1 = mana1.random_batch(batch, normal=0.25, single_fault=10, two_fault=1)
    input1  = pr0(signals1, res1)
    labels1  = labels1[:, [0, 1, 5]]
    optimizer1.zero_grad()
    outputs1 = diagnoser.forward1(input1)
    loss1   = criterion(outputs1, labels1)
    loss1.backward()
    optimizer1.step()
    print("generated training loss ", i, "=", loss1.item())
# #sub 0
list_files     = get_file_list(data_path)
for file in list_files:
    mana0.read_data(data_path+file, step_len=step_len, snr=snr, norm=True)
diagnoser.freeze_sub1()
optimizer0     = optim.Adam(filter(lambda p: p.requires_grad, diagnoser.parameters0()), lr=lr, weight_decay=weight_decay)
for i in range(epoch0):
    #historical data
    inputs0, labels0, _, res0 = mana0.random_batch(batch, normal=0.4, single_fault=10, two_fault=0)
    sen_res = organise_tensor_data(inputs0, res0)
    #optimization
    optimizer0.zero_grad()
    outputs0 = diagnoser.forward0(sen_res)
    loss0    = criterion(outputs0, labels0)
    loss0.backward()
    optimizer0.step()
    print("historical training loss", i, "=", loss0.item())
# sub 0 + sub 1
diagnoser.unfreeze_sub1()
Esempio n. 6
0
                        help="choose the system")
    parser.add_argument("-b", "--batch", type=int, help="set batch size")
    args = parser.parse_args()

    snr = 20
    batch = 8000 if args.batch is None else args.batch
    if args.system == 'bpsk':
        mode_list = ['N', 'TMA', 'PCR', 'CAR', 'MPL', 'AMP', 'TMB']
        step_len = 128
        pca_selection = PCA_feature_selection(0.95)
        # train
        train_path = parentdir + '\\bpsk_navigate\\data\\train\\'
        mana_train = BpskDataTank()
        list_files = get_file_list(train_path)
        for file in list_files:
            mana_train.read_data(train_path + file, step_len=step_len, snr=snr)
        inputs, labels, _, _ = mana_train.random_batch(batch,
                                                       normal=0.4,
                                                       single_fault=10,
                                                       two_fault=0)
        inputs = inputs.detach().numpy()
        labels = torch.sum(labels * torch.Tensor([1, 2, 3, 4, 5, 6]), 1).long()
        labels = labels.detach().numpy()
        batch, variable, step = inputs.shape
        inputs = inputs.transpose((0, 2, 1))
        inputs = inputs.reshape((batch * step, variable))
        inputs = pca_selection.learn_from(inputs)
        labels = np.repeat(labels, step)
        _, fe_num = inputs.shape
        var_list = ['fe' + str(i) for i in range(fe_num)]
        numpy2arff(inputs, labels, 'pca_train.arff', var_list, mode_list)
Esempio n. 7
0
def load_bpsk_data(data_path, snr):
    mana = BpskDataTank()
    list_files = get_file_list(data_path)
    for file in list_files:
        mana.read_data(data_path+file, step_len=128, snr=snr)
    return mana
    elif dia == "rdscnn":
        model_name = "rdscnn.pkl"
        norm = True
    elif dia == "rdsecnn":
        model_name = "rdsecnn.pkl"
        norm = True
    else:
        print("unkown object!")
        exit(0)
    dia_name.append(model_name)

mana = BpskDataTank()
list_files = get_file_list(TEST_DATA_PATH)
for file in list_files:
    mana.read_data(TEST_DATA_PATH + file,
                   step_len=step_len,
                   snr=snr,
                   norm=norm)
#load diagnoser
diagnoser = []
for name in dia_name:
    d = torch.load(ANN_PATH + name)
    d.eval()
    diagnoser.append(d)
batch = 10000

inputs, labels, _, res = mana.random_batch(batch,
                                           normal=0.4,
                                           single_fault=10,
                                           two_fault=0)
sen_res = organise_tensor_data(inputs, res)
for k, d in zip(range(len(dia_name)), diagnoser):