示例#1
0
def stagefrommat(mat):
    wide = mat.shape[0]
    stage_num = np.zeros(wide, dtype='int')
    for i in range(wide):
        stage_num[i] = np.sum(mat[i])
    util.writelog('statistics of dataset [S3 S2 S1 R W]:\n' + str(stage_num),
                  True)
示例#2
0
def stage(stages):	
    #N3->0  N2->1  N1->2  REM->3  W->4
    stage_cnt=np.array([0,0,0,0,0])
    for i in range(len(stages)):
        stage_cnt[stages[i]] += 1
    stage_cnt_per = stage_cnt/len(stages) 
    util.writelog('     dataset statistics [S3 S2 S1 R W]: '+str(stage_cnt),True)
    return stage_cnt,stage_cnt_per
示例#3
0
def stage(opt,stages):	
    #sleep stage: N3->0  N2->1  N1->2  REM->3  W->4
    stage_cnt=np.zeros(opt.label,dtype=np.int64)
    for i in range(len(stages)):
        stage_cnt[stages[i]] += 1
    stage_cnt_per = stage_cnt/len(stages) 
    util.writelog(str(stage_cnt),True)
    return stage_cnt,stage_cnt_per
示例#4
0
    def getparse(self):
        if not self.initialized:
            self.initialize()
        self.opt = self.parser.parse_args()

        if self.opt.dataset_name == 'sleep-edf':
            self.opt.sample_num = 8
        if self.opt.dataset_name not in ['sleep-edf', 'sleep-edfx', 'cc2018']:
            self.opt.BID = 'not-supported'
            self.opt.select_sleep_time = 'not-supported'
            self.opt.signal_name = 'not-supported'
            self.opt.sample_num = 'not-supported'

        if self.opt.no_cuda:
            self.opt.no_cudnn = True

        if self.opt.k_fold == 0:
            self.opt.k_fold = 1

        if self.opt.label_name == 'auto':
            if self.opt.dataset_name in ['sleep-edf', 'sleep-edfx', 'cc2018']:
                self.opt.label_name = ["N3", "N2", "N1", "REM", "W"]
            else:
                names = []
                for i in range(self.opt.label):
                    names.append(str(i))
                self.opt.label_name = names
        else:
            self.opt.label_name = self.opt.label_name.replace(" ",
                                                              "").split(",")

        self.opt.mergelabel = eval(self.opt.mergelabel)
        if self.opt.mergelabel_name != 'None':
            self.opt.mergelabel_name = self.opt.mergelabel_name.replace(
                " ", "").split(",")
        """Print and save options
        It will print both current options and default values(if different).
        It will save options into a text file / [checkpoints_dir] / opt.txt
        """
        message = ''
        message += '----------------- Options ---------------\n'
        for k, v in sorted(vars(self.opt).items()):
            comment = ''
            default = self.parser.get_default(k)
            if v != default:
                comment = '\t[default: %s]' % str(default)
            message += '{:>20}: {:<30}{}\n'.format(str(k), str(v), comment)
        message += '----------------- End -------------------'
        localtime = time.asctime(time.localtime(time.time()))
        util.makedirs(self.opt.save_dir)
        util.writelog(str(localtime) + '\n' + message, self.opt, True)

        return self.opt
示例#5
0
def statistics(mat, opt, logname, heatmapname):
    util.writelog(
        '------------------------------ ' + logname +
        ' result ------------------------------', opt, True)
    util.writelog(
        logname + ' -> macro-prec,reca,F1,err,kappa: ' + str(report(mat)), opt,
        True)
    util.writelog('confusion_mat:\n' + str(mat) + '\n', opt, True)
    heatmap.draw(mat, opt, name=heatmapname)
示例#6
0
import time
import transformer
import dataloader
import models
import torch
from torch import nn, optim
import statistics
import torch.backends.cudnn as cudnn
import heatmap
from options import Options
import warnings
warnings.filterwarnings("ignore")

opt = Options().getparse()
localtime = time.asctime(time.localtime(time.time()))
util.writelog('\n\n' + str(localtime) + '\n' + str(opt))

t1 = time.time()
signals, stages = dataloader.loaddataset(opt,
                                         opt.dataset_dir,
                                         opt.dataset_name,
                                         opt.signal_name,
                                         opt.sample_num,
                                         shuffle=True,
                                         BID=None)
stage_cnt, stage_cnt_per = statistics.stage(stages)
signals, stages = transformer.batch_generator(signals,
                                              stages,
                                              opt.batchsize,
                                              shuffle=True)
batch_length = len(stages)
示例#7
0
# num:samples_num, ch :channel_num,  num:length of each sample
# for example:
signals = np.zeros((10,1,10),dtype='np.float64')
labels = np.array([0,0,0,0,0,1,1,1,1,1])      #0->class0    1->class1
* step2: input  ```--dataset_dir your_dataset_dir``` when running code.
'''

signals, labels = dataloader.loaddataset(opt)
label_cnt, label_cnt_per, _ = statistics.label_statistics(labels)
train_sequences, test_sequences = transformer.k_fold_generator(
    len(labels), opt.k_fold)
t2 = time.time()
print('load data cost time: %.2f' % (t2 - t1), 's')

net = CreatNet(opt)
util.writelog('network:\n' + str(net), opt, True)

util.show_paramsnumber(net, opt)
weight = np.ones(opt.label)
if opt.weight_mod == 'auto':
    weight = 1 / label_cnt_per
    weight = weight / np.min(weight)
util.writelog('label statistics: ' + str(label_cnt), opt, True)
util.writelog('Loss_weight:' + str(weight), opt, True)
weight = torch.from_numpy(weight).float()
# print(net)

if not opt.no_cuda:
    net.cuda()
    weight = weight.cuda()
if not opt.no_cudnn:
示例#8
0
文件: train.py 项目: ljyljy/candock
import torch
from torch import nn, optim
import warnings
warnings.filterwarnings("ignore")

import util
import transformer
import dataloader
import statistics
import heatmap
from creatnet import CreatNet
from options import Options

opt = Options().getparse()
localtime = time.asctime(time.localtime(time.time()))
util.writelog('\n\n' + str(localtime) + '\n' + str(opt))
t1 = time.time()
'''
change your own data to train
but the data needs meet the following conditions: 
1.type   numpydata  signals:np.float16  stages:np.int16
2.shape             signals:[?,3000]   stages:[?]
3.fs = 100Hz
4.input signal data should be normalized!!
  we recommend signal data normalized useing 5_95_th for each subject, 
  example: signals_normalized=transformer.Balance_individualized_differences(signals_origin, '5_95_th')
'''
signals_train, labels_train, signals_test, labels_test = dataloader.loaddataset(
    opt.dataset_dir, opt.dataset_name, opt.signal_name, opt.sample_num,
    opt.BID, opt.select_sleep_time)