from ddd.utilities import organise_tensor_data

#data amount
small_data = True
#settings
PATH = parentdir
DATA_PATH = PATH + "\\bpsk_navigate\\data\\" + ("big_data\\" if not small_data
                                                else "small_data\\")
ANN_PATH = PATH + "\\ddd\\ann_model\\" + ("big_data\\" if not small_data else
                                          "small_data\\")
step_len = 100
criterion = CrossEntropy
hdia_name = "HFE.pkl"

#prepare data
mana = BpskDataTank()
list_files = get_file_list(DATA_PATH)
for file in list_files:
    mana.read_data(DATA_PATH + file, step_len=step_len, snr=20, norm=True)

diagnoser = HBlockScanFE()
print(diagnoser)
optimizer = optim.Adam(diagnoser.parameters(), lr=0.001, weight_decay=8e-3)

#train
epoch = 2000
batch = 2000 if not small_data else 1000
train_loss = []
running_loss = 0.0
for i in range(epoch):
    inputs, labels, _, res = mana.random_batch(batch,
Beispiel #2
0
                        "--network",
                        type=str,
                        choices=['cnn', 'lstm'],
                        help="choose the network")
    parser.add_argument("-b", "--batch", type=int, help="set batch size")
    args = parser.parse_args()

    system = ['bpsk', 'mt'] if args.system is None else [args.system]
    network = ['cnn', 'lstm'] if args.network is None else [args.network]
    test_batch = 20000 if args.batch is None else args.batch
    var_list = ['m', 'p', 'c', 's0', 's1']
    mode_list = ['N', 'TMA', 'PCR', 'CAR', 'MPL', 'AMP', 'TMB']
    # BPSK
    if 'bpsk' in system:
        data_path = parentdir + '\\bpsk_navigate\\data\\train\\'
        mana = BpskDataTank()
        list_files = get_file_list(data_path)
        for file in list_files:
            mana.read_data(data_path + file, step_len=128, snr=20)
        inputs, _, _, _ = mana.random_batch(test_batch,
                                            normal=1 / 7,
                                            single_fault=10,
                                            two_fault=0)
        # CNN
        if 'cnn' in network:
            ann = 'bpsk_cnn_distill_(8, 16, 32, 64).cnn'
            ann = parentdir + '\\ann_diagnoser\\bpsk\\train\\20db\\{}\\'.format(
                args.index) + ann
            important_vars = heat_map_feature_input(
                ann,
                inputs,
Beispiel #3
0
T = 20
kernel_sizes = (8, 4)
indexes = [9, 16, 4, 58, 18, 52, 27, 46, 31, 32]
prefix = 'bpsk_cnn_student_'
#   log
log_path = parentdir + '\\log\\bpsk\\train\\{}db\\'.format(snr)
if not os.path.isdir(log_path):
    os.makedirs(log_path)
log_name = prefix + 'training_' + time.asctime(time.localtime(
    time.time())).replace(" ", "_").replace(":", "-") + '.txt'
logfile = log_path + log_name
LOG_FORMAT = "%(asctime)s - %(levelname)s - %(message)s"
logging.basicConfig(filename=logfile, level=logging.DEBUG, format=LOG_FORMAT)
#prepare data
data_path = parentdir + '\\bpsk_navigate\\data\\train\\'
mana = BpskDataTank()
list_files = get_file_list(data_path)
for file in list_files:
    mana.read_data(data_path + file, step_len=step_len, snr=snr)

# cumbersome models
cum_models = []
for t in range(times):
    model_path = parentdir + '\\ann_diagnoser\\bpsk\\train\\{}db\\{}\\'.format(
        snr, t)
    model_name = 'bpsk_cnn_distill_(8, 16, 32, 64).cnn'
    m = torch.load(model_path + model_name)
    m.eval()
    cum_models.append(m)

Beispiel #4
0
    parser.add_argument("-p",
                        "--purpose",
                        type=str,
                        choices=['train', 'test', 'test2'],
                        help="purpose")
    args = parser.parse_args()

    snr = 20
    batch = 8000 if args.batch is None else args.batch
    if args.system == 'bpsk':
        var_list = ['m', 'p', 'c', 's0', 's1']
        mode_list = ['N', 'TMA', 'PCR', 'CAR', 'MPL', 'AMP', 'TMB']
        step_len = 128
        data_path = parentdir + '\\bpsk_navigate\\data\\{}\\'.format(
            args.purpose)
        mana = BpskDataTank()
        list_files = get_file_list(data_path)
        for file in list_files:
            mana.read_data(data_path + file, step_len=step_len, snr=snr)
        inputs, labels, _, _ = mana.random_batch(batch,
                                                 normal=0.4,
                                                 single_fault=10,
                                                 two_fault=0)
        labels = torch.sum(labels * torch.Tensor([1, 2, 3, 4, 5, 6]), 1).long()
        labels = labels.detach().numpy()
        if args.model is None:
            inputs = inputs.detach().numpy()
            numpy2arff(inputs, labels, args.output, var_list, mode_list)
        elif 'encoder' in args.model:
            encoder2arff(inputs, labels, args.model, args.output, mode_list)
        else:
import torch.optim as optim
import matplotlib.pyplot as pl
import numpy as np
#data amount
small_data = True
#prepare data
snr = 20
PATH = parentdir
DATA_PATH = parentdir + "\\bpsk_navigate\\data\\" + (
    "big_data\\" if not small_data else "small_data\\")
ANN_PATH = parentdir + "\\ddd\\ann_model\\" + (
    "big_data\\" if not small_data else "small_data\\") + str(snr) + "db\\"
step_len = 100
criterion = CrossEntropy

mana = BpskDataTank()
list_files = get_file_list(DATA_PATH)
for file in list_files:
    mana.read_data(DATA_PATH + file, step_len=step_len, snr=snr)

# nn0 = SimpleDiagnoer()
# nn1 = SimpleDiagnoer()
# nn2 = SimpleDiagnoer()
# nn3 = SimpleDiagnoer()
# nn4 = SimpleDiagnoer()

# opt0 = optim.Adam(nn0.parameters(), lr=0.001, weight_decay=5e-3)
# opt1 = optim.Adam(nn1.parameters(), lr=0.001, weight_decay=5e-3)
# opt2 = optim.Adam(nn2.parameters(), lr=0.001, weight_decay=5e-3)
# opt3 = optim.Adam(nn3.parameters(), lr=0.001, weight_decay=5e-3)
# opt4 = optim.Adam(nn4.parameters(), lr=0.001, weight_decay=5e-3)
from ddd.utilities import organise_tensor_data

#settings
snr             = 20
TIME            = 0.0001
FAULT_TIME      = TIME / 2
N               = 3
fault_type      = ["tma", "tmb", "pseudo_rate"]
rang            = [[0.2, (0.8 * 10**6, 7.3 * 10**6), -0.05], [0.9, (8.8 * 10**6, 13 * 10**6), 0.05]]
pref            = 3 #1->single-fault, 2->two-fault, 3->single-,two-fault
loss            = 0.05
grids           = [0.1, 1.41421*10**6, 0.01]
diagnoser       = rdsecnn_diagnoser()
lr              = 1e-3
weight_decay    = 8e-3
training_data   = BpskDataTank()
para_set        = {}
data_path       = parentdir + "\\bpsk_navigate\\data\\small_data\\"
generated_path  = parentdir + "\\ddd\\data\\" + str(snr) + "db\\"
ann_path        = parentdir + "\\ddd\\ann_model\\small_data\\"  + str(snr) + "db\\"
mana0           = BpskDataTank()                       #historical data
mana1           = BpskDataTank()                       #generated data
step_len        = 100
batch           = 1000
criterion       = CrossEntropy
epoch           = 100
epoch1          = 500
epoch0          = 1000
file_name       = "rdsecnn.pkl"

Beispiel #7
0
def load_bpsk_data(data_path, snr):
    mana = BpskDataTank()
    list_files = get_file_list(data_path)
    for file in list_files:
        mana.read_data(data_path+file, step_len=128, snr=snr)
    return mana