Exemple #1
0
def train(opt):
    core.network_init(printflag=True)

    categorys = os.listdir(opt.rec_tmp)
    categorys.sort()
    print('categorys:', categorys)
    category_num = len(categorys)

    received_signals = []
    received_labels = []

    sample_num = 1000
    for i in range(category_num):
        samples = os.listdir(os.path.join(opt.rec_tmp, categorys[i]))
        random.shuffle(samples)
        for j in range(len(samples)):
            txt = util.loadtxt(
                os.path.join(opt.rec_tmp, categorys[i], samples[j]))
            #print(os.path.join('./datasets/server/data',categorys[i],sample))
            txt_split = txt.split()
            signal_ori = np.zeros(len(txt_split))
            for point in range(len(txt_split)):
                signal_ori[point] = float(txt_split[point])

            for x in range(sample_num // len(samples)):
                ran = random.randint(1000, len(signal_ori) - 2000 - 1)
                this_signal = signal_ori[ran:ran + 2000]
                this_signal = arr.normliaze(this_signal, '5_95', truncated=4)

                received_signals.append(this_signal)
                received_labels.append(i)

    received_signals = np.array(received_signals).reshape(
        -1, opt.input_nc, opt.loadsize)
    received_labels = np.array(received_labels).reshape(-1, 1)
    received_signals_train,received_labels_train,received_signals_eval,received_labels_eval=\
    dataloader.segment_dataset(received_signals, received_labels, 0.8,random=False)
    print(received_signals_train.shape, received_signals_eval.shape)
    '''merge data'''
    signals_train, labels_train = dataloader.del_labels(
        ori_signals_train, ori_labels_train,
        np.linspace(0, category_num - 1, category_num, dtype=np.int64))
    signals_eval, labels_eval = dataloader.del_labels(
        ori_signals_eval, ori_labels_eval,
        np.linspace(0, category_num - 1, category_num, dtype=np.int64))

    signals_train = np.concatenate((signals_train, received_signals_train))
    labels_train = np.concatenate((labels_train, received_labels_train))
    signals_eval = np.concatenate((signals_eval, received_signals_eval))
    labels_eval = np.concatenate((labels_eval, received_labels_eval))

    label_cnt, label_cnt_per, label_num = statistics.label_statistics(
        labels_train)
    opt = options.get_auto_options(opt, label_cnt_per, label_num,
                                   signals_train)
    train_sequences = np.linspace(0,
                                  len(labels_train) - 1,
                                  len(labels_train),
                                  dtype=np.int64)
    eval_sequences = np.linspace(0,
                                 len(labels_eval) - 1,
                                 len(labels_eval),
                                 dtype=np.int64)

    for epoch in range(opt.epochs):
        t1 = time.time()

        core.train(signals_train, labels_train, train_sequences)
        core.eval(signals_eval, labels_eval, eval_sequences)

        t2 = time.time()
        if epoch + 1 == 1:
            util.writelog(
                '>>> per epoch cost time:' + str(round((t2 - t1), 2)) + 's',
                opt, True)
    plot.draw_heatmap(core.confusion_mats[-1], opt, name='final')
    core.save_traced_net()
Exemple #2
0
opt.parser.add_argument('--rec_tmp',
                        type=str,
                        default='./server_data/rec_data',
                        help='')
opt = opt.getparse()
opt.k_fold = 0
opt.save_dir = './checkpoints'
util.makedirs(opt.save_dir)
util.makedirs(opt.rec_tmp)

# -----------------------------Load original data-----------------------------
signals, labels = dataloader.loaddataset(opt)
ori_signals_train,ori_labels_train,ori_signals_eval,ori_labels_eval = \
signals[:opt.fold_index[0]].copy(),labels[:opt.fold_index[0]].copy(),signals[opt.fold_index[0]:].copy(),labels[opt.fold_index[0]:].copy()
label_cnt, label_cnt_per, label_num = statistics.label_statistics(labels)
opt = options.get_auto_options(opt, label_cnt_per, label_num,
                               ori_signals_train)

# -----------------------------def network-----------------------------
core = core.Core(opt)
core.network_init(printflag=True)


# -----------------------------train-----------------------------
def train(opt):
    core.network_init(printflag=True)

    categorys = os.listdir(opt.rec_tmp)
    categorys.sort()
    print('categorys:', categorys)
    category_num = len(categorys)
Exemple #3
0
# 2.shape  signals:[num,ch,length]    labels:[num]
# num:samples_num, ch :channel_num,  length:length of each sample
# for example:
signals = np.zeros((10,1,10),dtype='np.float64')
labels = np.array([0,0,0,0,0,1,1,1,1,1])      #0->class0    1->class1
* step2: input  ```--dataset_dir your_dataset_dir``` when running code.
"""

#----------------------------Load Data----------------------------
t1 = time.time()
signals, labels = dataloader.loaddataset(opt)
if opt.gan:
    signals, labels = augmenter.dcgan(opt, signals, labels)
label_cnt, label_cnt_per, label_num = statistics.label_statistics(labels)
util.writelog('label statistics: ' + str(label_cnt), opt, True)
opt = options.get_auto_options(opt, signals, labels)
train_sequences, eval_sequences = transforms.k_fold_generator(
    len(labels), opt.k_fold, opt.fold_index)
t2 = time.time()
print('Cost time: %.2f' % (t2 - t1), 's')

core = core.Core(opt)
core.network_init(printflag=True)

print('Begin to train ...')
final_confusion_mat = np.zeros((opt.label, opt.label), dtype=int)
final_results = {}
for fold in range(opt.k_fold):
    if opt.k_fold != 1:
        util.writelog(
            '------------------------------ k-fold:' + str(fold + 1) +
Exemple #4
0
* step1: Generate signals.npy and labels.npy in the following format.
# 1.type:numpydata   signals:np.float64   labels:np.int64
# 2.shape  signals:[num,ch,length]    labels:[num]
# num:samples_num, ch :channel_num,  length:length of each sample
# for example:
signals = np.zeros((10,1,10),dtype='np.float64')
labels = np.array([0,0,0,0,0,1,1,1,1,1])      #0->class0    1->class1
* step2: input  ```--dataset_dir your_dataset_dir``` when running code.
"""

#----------------------------Load Data----------------------------
t1 = time.time()
signals, labels = dataloader.loaddataset(opt)
label_cnt, label_cnt_per, label_num = statistics.label_statistics(labels)
util.writelog('label statistics: ' + str(label_cnt), opt, True)
opt = options.get_auto_options(opt, label_cnt_per, label_num, signals)
train_sequences, eval_sequences = transformer.k_fold_generator(
    len(labels), opt.k_fold, opt.fold_index)
t2 = time.time()
print('Cost time: %.2f' % (t2 - t1), 's')

core = core.Core(opt)
core.network_init(printflag=True)

print('Begin to train ...')
fold_final_confusion_mat = np.zeros((opt.label, opt.label), dtype=int)
for fold in range(opt.k_fold):
    if opt.k_fold != 1:
        util.writelog(
            '------------------------------ k-fold:' + str(fold + 1) +
            ' ------------------------------', opt, True)
Exemple #5
0
def dcgan(opt,signals,labels):
    print('Augment dataset using gan...')
    if opt.gpu_id != '-1':
        os.environ["CUDA_VISIBLE_DEVICES"] = str(opt.gpu_id)
    if not opt.no_cudnn:
        torch.backends.cudnn.benchmark = True

    signals_train = signals[:opt.fold_index[0]]
    labels_train  = labels[:opt.fold_index[0]]
    signals_eval = signals[opt.fold_index[0]:]
    labels_eval  = labels[opt.fold_index[0]:]


    signals_train = signals_train[labels_train.argsort()]
    labels_train = labels_train[labels_train.argsort()]
    out_signals = signals_train.copy()
    out_labels = labels_train.copy()
    label_cnt,label_cnt_per,label_num = statistics.label_statistics(labels_train)
    opt = options.get_auto_options(opt, signals_train, labels_train)


    generator = Generator(opt.loadsize,opt.input_nc,opt.gan_latent_dim)
    discriminator = Discriminator(opt.loadsize,opt.input_nc)
    show_paramsnumber(generator, opt)
    show_paramsnumber(discriminator, opt)

    ganloss = GANloss(opt.gpu_id,opt.batchsize)

    if opt.gpu_id != '-1':
        generator.cuda()
        discriminator.cuda()
        ganloss.cuda()

    # Optimizers
    optimizer_G = torch.optim.Adam(generator.parameters(), lr=opt.gan_lr, betas=(0.5, 0.999))
    optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=opt.gan_lr, betas=(0.5, 0.999))

    index_cnt = 0
    for which_label in range(len(label_cnt)):

        if which_label in opt.gan_labels:
            sub_signals = signals_train[index_cnt:index_cnt+label_cnt[which_label]]
            sub_labels = labels_train[index_cnt:index_cnt+label_cnt[which_label]]

            generator.apply(weights_init_normal)
            discriminator.apply(weights_init_normal)
            generator.train()
            discriminator.train()

            for epoch in range(opt.gan_epochs):
                epoch_g_loss = 0
                epoch_d_loss = 0
                iter_pre_epoch = len(sub_labels)//opt.batchsize
                transformer.shuffledata(sub_signals, sub_labels)
                t1 = time.time()
                for i in range(iter_pre_epoch):
                    real_signal = sub_signals[i*opt.batchsize:(i+1)*opt.batchsize].reshape(opt.batchsize,opt.input_nc,opt.loadsize)
                    real_signal = transformer.ToTensor(real_signal,gpu_id=opt.gpu_id)

                    #  Train Generator
                    optimizer_G.zero_grad()
                    z = transformer.ToTensor(np.random.normal(0, 1, (opt.batchsize, opt.gan_latent_dim)),gpu_id = opt.gpu_id)
                    gen_signal = generator(z)
                    g_loss = ganloss(discriminator(gen_signal),True)
                    epoch_g_loss += g_loss.item()
                    g_loss.backward()
                    optimizer_G.step()

                    #  Train Discriminator
                    optimizer_D.zero_grad()
                    d_real = ganloss(discriminator(real_signal), True)
                    d_fake = ganloss(discriminator(gen_signal.detach()), False)
                    d_loss = (d_real + d_fake) / 2
                    epoch_d_loss += d_loss.item()
                    d_loss.backward()
                    optimizer_D.step()
                t2 = time.time()
                print(
                    "[Label %d] [Epoch %d/%d] [D loss: %.4f] [G loss: %.4f] [time: %.2f]"
                    % (sub_labels[0], epoch+1, opt.gan_epochs, epoch_g_loss/iter_pre_epoch, epoch_d_loss/iter_pre_epoch, t2-t1)
                )

            plot.draw_gan_result(real_signal.data.cpu().numpy(), gen_signal.data.cpu().numpy(),opt)

            generator.eval()
            for i in range(int(len(sub_labels)*(opt.gan_augment_times-1))//opt.batchsize):
                z = transformer.ToTensor(np.random.normal(0, 1, (opt.batchsize, opt.gan_latent_dim)),gpu_id = opt.gpu_id)
                gen_signal = generator(z)
                out_signals = np.concatenate((out_signals, gen_signal.data.cpu().numpy()))
                #print(np.ones((opt.batchsize),dtype=np.int64)*which_label)
                out_labels = np.concatenate((out_labels,np.ones((opt.batchsize),dtype=np.int64)*which_label))

        index_cnt += label_cnt[which_label]
    opt.fold_index = [len(out_labels)]
    out_signals = np.concatenate((out_signals, signals_eval))
    out_labels = np.concatenate((out_labels, labels_eval))
    # return signals,labels
    return out_signals,out_labels
Exemple #6
0
opt.parser.add_argument('--rec_tmp',
                        type=str,
                        default='./server_data/rec_data',
                        help='')
opt = opt.getparse()
opt.k_fold = 0
opt.save_dir = './checkpoints'
util.makedirs(opt.save_dir)
util.makedirs(opt.rec_tmp)

# -----------------------------Load original data-----------------------------
signals, labels = dataloader.loaddataset(opt)
ori_signals_train,ori_labels_train,ori_signals_eval,ori_labels_eval = \
signals[:opt.fold_index[0]].copy(),labels[:opt.fold_index[0]].copy(),signals[opt.fold_index[0]:].copy(),labels[opt.fold_index[0]:].copy()
label_cnt, label_cnt_per, label_num = statistics.label_statistics(labels)
opt = options.get_auto_options(opt, ori_signals_train, ori_labels_train)
categorys = []
# -----------------------------def network-----------------------------
core = core.Core(opt)
core.network_init(printflag=True)


# -----------------------------train-----------------------------
def train(opt):
    core.network_init(printflag=True)
    global categorys
    categorys = os.listdir(opt.rec_tmp)
    categorys.sort()
    print('categorys:', categorys)
    category_num = len(categorys)