def dataload(Path,
             inputsrecord,
             targets,
             location,
             leadnum=12,
             downsample=2,
             buf_size=100):
    target_len = int(72000 / downsample)
    samplename = inputsrecord[location]
    samplelabel = targets[location]
    SEG_buf = np.zeros([1, target_len, leadnum], dtype=np.float32)
    SEGs = np.zeros([1, target_len, leadnum], dtype=np.float32)
    for i in range(len(samplename)):
        sig = np.load(Path + samplename[i])
        SEGt = np.float32(utils.sig_process(sig, target_length=target_len))
        SEG_buf = np.concatenate((SEG_buf, SEGt))
        del SEGt
        if SEG_buf.shape[0] >= buf_size:
            SEGs = np.concatenate((SEGs, SEG_buf[1:]))
            del SEG_buf
            SEG_buf = np.zeros([1, target_len, leadnum], dtype=np.float32)
    if SEG_buf.shape[0] > 1:
        SEGs = np.concatenate((SEGs, SEG_buf[1:]))
    del SEG_buf
    return SEGs[1:], samplelabel
示例#2
0
def datarecord2(input_directory,target_lead,buf_size=100,segnum=24,seg_length=750, full_seg=True, stt=0):
    input_files = []
    for f in os.listdir(input_directory):
        if os.path.isfile(os.path.join(input_directory, f)) and not f.lower().startswith('.') and f.lower().endswith('mat'):
            input_files.append(f)


    classes=get_classes(input_directory,input_files)
    num_files = len(input_files)
    datalabel=[]
    SEG_buf = np.zeros([1, seg_length, segnum], dtype=np.float32)
    SEGs = np.zeros([1, seg_length, segnum], dtype=np.float32)
    for i, f in enumerate(input_files):
        print('    {}/{}...'.format(i + 1, num_files))
        tmp_input_file = os.path.join(input_directory, f)
        data, header_data = load_challenge_data(tmp_input_file)
        datalabel.append(getdata_class(header_data))
        datalead = data[target_lead,:]
        # SEGt = np.float32(utils.sig_process(data, target_length=target_len))
        SEGt = utils.Stack_Segs_generate2(datalead, seg_num=segnum, seg_length=seg_length, full_seg=full_seg, stt=stt)
        del data,datalead
        SEG_buf = np.concatenate((SEG_buf, SEGt))
        del SEGt
        if SEG_buf.shape[0] >= buf_size:
            SEGs = np.concatenate((SEGs, SEG_buf[1:]))
            del SEG_buf
            SEG_buf = np.zeros([1, seg_length, segnum], dtype=np.float32)
    if SEG_buf.shape[0] > 1:
        SEGs = np.concatenate((SEGs, SEG_buf[1:]))
    del SEG_buf
    datalabel = np.array(datalabel)
    return SEGs[1:], datalabel,len(classes)
示例#3
0
def minibatches(Path,
                inputsrecord,
                targets,
                batch_size,
                leadnum=12,
                downsample=2):
    i = 0
    target_len = int(72000 / downsample)
    while 1:  # 要无限循环
        labels = []
        indices = np.arange(len(inputsrecord))
        SEG_buf = np.zeros([1, target_len, leadnum], dtype=np.float32)

        for b in range(batch_size):
            if i == len(inputsrecord):
                i = 0
                np.random.shuffle(indices)
            samplename = inputsrecord[indices[i]]
            samplelabel = targets[i]
            labels.append(samplelabel)
            i += 1
            sig = np.load(Path + samplename)
            SEGt = np.float32(utils.sig_process(sig, target_length=target_len))
            SEG_buf = np.concatenate((SEG_buf, SEGt))
            del SEGt

        yield SEG_buf[1:], np.array(labels)
示例#4
0
def datarecord(input_directory, downsample, buf_size=100, leadnum=12):
    input_files = []
    for f in os.listdir(input_directory):
        if os.path.isfile(
                os.path.join(input_directory, f)
        ) and not f.lower().startswith('.') and f.lower().endswith('mat'):
            input_files.append(f)

    classes = get_classes(input_directory, input_files)
    num_files = len(input_files)
    datalabel = []
    target_len = int(72000 / downsample)
    SEG_buf = np.zeros([1, target_len, leadnum], dtype=np.float32)
    SEGs = np.zeros([1, target_len, leadnum], dtype=np.float32)
    for i, f in enumerate(input_files):
        print('    {}/{}...'.format(i + 1, num_files))
        tmp_input_file = os.path.join(input_directory, f)
        data, header_data = load_challenge_data(tmp_input_file)
        datalabel.append(getdata_class(header_data))
        SEGt = np.float32(utils.sig_process(data, target_length=target_len))
        del data
        SEG_buf = np.concatenate((SEG_buf, SEGt))
        del SEGt
        if SEG_buf.shape[0] >= buf_size:
            SEGs = np.concatenate((SEGs, SEG_buf[1:]))
            del SEG_buf
            SEG_buf = np.zeros([1, target_len, leadnum], dtype=np.float32)
    if SEG_buf.shape[0] > 1:
        SEGs = np.concatenate((SEGs, SEG_buf[1:]))
    del SEG_buf
    datalabel = np.array(datalabel)
    return SEGs[1:], datalabel, len(classes)
示例#5
0
 def __init__(self, sig, fs=250.0):
     assert len(sig.shape) == 1, 'The signal must be 1-dimension.'
     assert sig.shape[0] >= fs * 6, 'The signal must >= 6 seconds.'
     self.sig = utils.WTfilt_1d(sig)
     self.fs = fs
     self.rpeaks, = ecg.hamilton_segmenter(signal=self.sig,
                                           sampling_rate=self.fs)
     self.rpeaks, = ecg.correct_rpeaks(signal=self.sig,
                                       rpeaks=self.rpeaks,
                                       sampling_rate=self.fs)
     self.RR_intervals = np.diff(self.rpeaks)
     self.dRR = np.diff(self.RR_intervals)
def datafeatrecord(input_directory,
                   records,
                   downsample,
                   buf_size=100,
                   leadnum=12,
                   featurenum=25):
    # input_files = []
    # for f in os.listdir(input_directory):
    #     if os.path.isfile(os.path.join(input_directory, f)) and not f.lower().startswith('.') and f.lower().endswith('mat'):
    #         input_files.append(f)

    classes = get_classes(input_directory, records)
    num_files = len(records)
    datalabel = np.zeros([1, 9])
    # label0temp=[]
    target_len = int(72000 / downsample)
    SEG_buf = np.zeros([1, target_len, leadnum + 1], dtype=np.float32)
    SEGs = np.zeros([1, target_len, leadnum + 1], dtype=np.float32)
    # feat_buf=np.zeros([1,1,target_len], dtype=np.float32)
    featurezero = np.zeros([target_len, 1])
    for i, f in enumerate(records):
        print('    {}/{}...'.format(i + 1, num_files))
        tmp_input_file = os.path.join(input_directory, f)
        data, header_data = load_challenge_data(tmp_input_file)
        labelonhot, label0 = getdata_class(header_data)
        datalabel = np.concatenate((datalabel, labelonhot), axis=0)
        # label0temp.append(label0)
        features = np.asarray(get_12ECG_features(data, header_data))
        featurezero[0:featurenum, 0] = features[0:featurenum]
        # feats_reshape = features.reshape(1, -1)
        feats_reshape = featurezero.reshape(
            [1, featurezero.shape[0], featurezero.shape[1]])
        # feat_buf=np.concatenate((feat_buf,feats_reshape))

        SEGt = np.float32(utils.sig_process(data, target_length=target_len))
        SEGt = np.concatenate((SEGt, feats_reshape), axis=2)
        del data
        SEG_buf = np.concatenate((SEG_buf, SEGt))
        del SEGt
        if SEG_buf.shape[0] >= buf_size:
            SEGs = np.concatenate((SEGs, SEG_buf[1:]))
            del SEG_buf
            SEG_buf = np.zeros([1, target_len, leadnum + 1], dtype=np.float32)
    if SEG_buf.shape[0] > 1:
        SEGs = np.concatenate((SEGs, SEG_buf[1:]))
    del SEG_buf
    # label0temp = np.array(label0temp)
    return SEGs[1:], datalabel[1:]
示例#7
0
records_label = np.load(config.REVISED_LABEL) - 1
class_num = len(np.unique(records_label))

train_val_records, _, train_val_labels, test_labels = train_test_split(
    records_name,
    records_label,
    test_size=0.2,
    random_state=config.RANDOM_STATE)

train_records, val_records, train_labels, val_labels = train_test_split(
    train_val_records,
    train_val_labels,
    test_size=0.2,
    random_state=config.RANDOM_STATE)

_, train_labels = utils.oversample_balance(train_records, train_labels,
                                           config.RANDOM_STATE)
_, val_labels = utils.oversample_balance(val_records, val_labels,
                                         config.RANDOM_STATE)

# 载入之前保存的网络输出概率以及人工特征 -------------------------------------------------------------------------------
pred_nnet_r = np.load(config.MODEL_PATH + 'pred_nnet_r.npy')
pred_nnet_v = np.load(config.MODEL_PATH + 'pred_nnet_v.npy')
pred_nnet_t = np.load(config.MODEL_PATH + 'pred_nnet_t.npy')

man_features_r = np.load(config.MAN_FEATURE_PATH + 'man_features_r.npy')
man_features_v = np.load(config.MAN_FEATURE_PATH + 'man_features_v.npy')
man_features_t = np.load(config.MAN_FEATURE_PATH + 'man_features_t.npy')

pred_r = np.concatenate((pred_nnet_r, man_features_r), axis=1)
pred_v = np.concatenate((pred_nnet_v, man_features_v), axis=1)
pred_t = np.concatenate((pred_nnet_t, man_features_t), axis=1)
示例#8
0
train_val_records, test_records, train_val_labels, test_labels = train_test_split(
    records_name,
    records_label,
    test_size=0.2,
    random_state=config.RANDOM_STATE)
del test_records, test_labels

train_records, val_records, train_labels, val_labels = train_test_split(
    train_val_records,
    train_val_labels,
    test_size=0.2,
    random_state=config.RANDOM_STATE)

# 过采样使训练和验证集样本分布平衡 -------------------------------------------------------------------------------------
train_records, train_labels = utils.oversample_balance(train_records,
                                                       train_labels,
                                                       config.RANDOM_STATE)
val_records, val_labels = utils.oversample_balance(val_records, val_labels,
                                                   config.RANDOM_STATE)

# 取出训练集和测试集病人对应导联信号,并进行切片和z-score标准化 --------------------------------------------------------
print('Fetching data ...-----------------\n')
TARGET_LEAD = 1
train_x = utils.Fetch_Pats_Lbs_sLead(train_records,
                                     Path=config.DATA_PATH,
                                     target_lead=TARGET_LEAD,
                                     seg_num=config.SEG_NUM,
                                     seg_length=config.SEG_LENGTH)
train_y = to_categorical(train_labels, num_classes=class_num)
val_x = utils.Fetch_Pats_Lbs_sLead(val_records,
                                   Path=config.DATA_PATH,