def main(): outputdir = 'result/special_v3/' if os.path.isdir(outputdir): print('save in :' + outputdir) else: os.makedirs(outputdir) train_img_path = '/data/MIT-BIH_AD_v3/' test_img_path = '/data/MIT-BIH_AD_v3/' train_file = './MIT-BIH_AD_sp_train.txt' test_file = './MIT-BIH_AD_sp_val.txt' num_classes = 7 f1 = open(train_file, 'r') f2 = open(test_file, 'r') lines = f1.readlines() f1.close() train_samples = len(lines) lines = f2.readlines() f2.close() val_samples = len(lines) batch_size = 32 epochs = 120 input_h = 128 input_w = 128 model = proposed_model(nb_classes=num_classes) lr = 0.0001 adam = Adam(lr=lr) model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy']) model.summary() history = model.fit_generator( generator_train_batch(train_file, batch_size, num_classes, train_img_path, input_h, input_w), steps_per_epoch=train_samples // batch_size, epochs=epochs, callbacks=[Step()], validation_data=generator_val_batch(test_file, batch_size, num_classes, test_img_path, input_h, input_w), validation_steps=val_samples // batch_size, verbose=1) plot_history(history, outputdir) save_history(history, outputdir) model.save_weights(outputdir + 'proposed_model')
def main(): proposed = False if proposed: outputdir = 'result/NoAugment_{}/'.format(proposed) if os.path.isdir(outputdir): print('save in :'+outputdir) else: os.makedirs(outputdir) train_img_path = '/data/MIT-BIH_AD/' train_file = '/home/ccl/Documents/ECG-Arrhythmia-classification-in-2D-CNN/MIT-BIH_AD_train_paper.txt' num_classes = 8 k = 10 f1 = open(train_file, 'r') lines = f1.readlines() f1.close() train_samples = len(lines) val_samples = len(lines)//k num = len(lines) new_lines = [] index = [n for n in range(num)] random.shuffle(index) for m in range(num): new_lines.append(lines[index[m]]) lines = new_lines temp = [] new_lines = [] for i in range(num): if i % val_samples == 0: temp = [] new_lines.append(temp) temp.append(lines[i]) batch_size = 32 epochs = 40 input_h = 96 input_w = 96 augmentation = False model = proposed_model() lr = 0.0001 adam = Adam(lr=lr) model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy']) model.summary() history = model.fit_generator(generator_train_batch_proposed(new_lines, k, batch_size, num_classes, train_img_path, input_h, input_w, augmentation=augmentation), steps_per_epoch=train_samples // batch_size, epochs=epochs, callbacks=[Step()], validation_data=generator_val_batch_proposed(new_lines, k, batch_size, num_classes, train_img_path, input_h, input_w, augmentation=augmentation), validation_steps=val_samples // batch_size, verbose=1) plot_history(history, outputdir) save_history(history, outputdir) model.save_weights(outputdir+'proposed_model_{}.h5'.format(proposed)) else: outputdir = 'result/NoAugment_{}/'.format(proposed) if os.path.isdir(outputdir): print('save in :' + outputdir) else: os.makedirs(outputdir) train_img_path = '/data/MIT-BIH_AD/' test_img_path = '/data/MIT-BIH_AD/' train_file = '/home/ccl/Documents/ECG-Arrhythmia-classification-in-2D-CNN/MIT-BIH_AD_train.txt' test_file = '/home/ccl/Documents/ECG-Arrhythmia-classification-in-2D-CNN/MIT-BIH_AD_val.txt' num_classes = 8 f1 = open(train_file, 'r') f2 = open(test_file, 'r') lines = f1.readlines() f1.close() train_samples = len(lines) lines = f2.readlines() f2.close() val_samples = len(lines) batch_size = 32 epochs = 40 input_h = 96 input_w = 96 model = proposed_model() lr = 0.0001 adam = Adam(lr=lr) model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy']) model.summary() history = model.fit_generator( generator_train_batch(train_file, batch_size, num_classes, train_img_path, input_h, input_w), steps_per_epoch=train_samples // batch_size, epochs=epochs, callbacks=[Step()], validation_data=generator_val_batch(test_file, batch_size, num_classes, test_img_path, input_h, input_w), validation_steps=val_samples // batch_size, verbose=1) plot_history(history, outputdir) save_history(history, outputdir) model.save_weights(outputdir+'proposed_model_{}.h5'.format(proposed))
def main(): # init model class_names = ['Normal', 'LBBB', 'RBBB', 'APC', 'PVC', 'PAB', 'VEB', 'VFW'] #PE is PAB imageh = 128 imagew = 128 inputH = 96 inputW = 96 # ---------------------------change file models & weights-------------------- model = proposed_model() lr = 0.0001 adm = Adam(lr=lr) model.compile(loss='categorical_crossentropy', optimizer=adm, metrics=['accuracy']) model.summary() model.load_weights('result/first_attempt_False/proposed_model_False.h5', by_name=True) # ---------------------------change models & weights-------------------- test_file = './MIT-BIH_AD_test.txt' test_img_path = '/home/cc_lee/Dataset/MIT-BIH_AD' augmentation = False output_img = False outputdir = os.path.join('./inference/', str(augmentation)) os.makedirs(outputdir, exist_ok=True) os.makedirs(outputdir+'/False', exist_ok=True) os.makedirs(outputdir+'/True', exist_ok=True) f = open(test_file, 'r') lines = f.readlines() random.shuffle(lines) TP = 0 count = 0 total = len(lines) counter = {'Normal': 0, 'LBBB': 0, 'RBBB': 0, 'APC': 0, 'PVC': 0, 'PAB': 0, 'VEB': 0, 'VFW': 0} tp_counter = {'Normal': 0, 'LBBB': 0, 'RBBB': 0, 'APC': 0, 'PVC': 0, 'PAB': 0, 'VEB': 0, 'VFW': 0} for line in tqdm(lines): path = line.split(' ')[0] label = line.split(' ')[-1] label = label.strip('\n') answer = int(label) img = os.path.join(test_img_path, path) image = cv2.imread(img) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) if augmentation: Hshmean = int(np.round(np.max([0, np.round((imageh - inputH) / 2)]))) Wshmean = int(np.round(np.max([0, np.round((imagew - inputW) / 2)]))) image = image[Hshmean:Hshmean + inputH, Wshmean:Wshmean + inputW, :] image = cv2.resize(image, (imagew, imageh)) else: pass input_data = np.zeros((1, imagew, imageh, 3), dtype='float32') input_data[0] = image pred = model.predict(input_data) label = np.argmax(pred[0]) if label == answer: TP += 1 tp_counter[class_names[label]] += 1 count += 1 counter[class_names[label]] += 1 if output_img: if np.argmax(pred[0]) == 1: color_t = (0, 255, 255) else: color_t = (0, 255, 0) image = cv2.resize(image, (128*3, 128*3)) cv2.putText(image, class_names[answer].split(' ')[-1].strip(), (10, 30), cv2.FONT_ITALIC, 1, color_t, 1) cv2.putText(image, class_names[label].split(' ')[-1].strip(), (10, 70), cv2.FONT_HERSHEY_SIMPLEX, 1, color_t, 1) cv2.putText(image, "prob: %.4f" % pred[0][label], (10, 110), cv2.FONT_HERSHEY_SIMPLEX, 1, color_t, 1) cv2.imwrite(os.path.join(outputdir, str(answer==label)) + '/' + '{}_{}'.format(class_names[answer], os.path.split(path)[1][:-4] + '_result.jpg', ), image) print('{}/{} Acc: {} Pred:{} Answer: {}'.format(count, total, str(TP / count), class_names[label], class_names[answer] ) ) print('Normal:{}/{}={},\n LBBB:{}/{}={},\n RBBB:{}/{}={},\n APC:{}/{}={},\n PVC:{}/{}={},\n PAB:{}/{}={},\n VEB:{}/{}={},\n VFW:{}/{}={}'.format( tp_counter['Normal'], counter['Normal'], (tp_counter['Normal']/counter['Normal']), tp_counter['LBBB'], counter['LBBB'], (tp_counter['LBBB'] / counter['LBBB']), tp_counter['RBBB'], counter['RBBB'], (tp_counter['RBBB'] / counter['RBBB']), tp_counter['APC'], counter['APC'], (tp_counter['APC'] / counter['APC']), tp_counter['PVC'], counter['PVC'], (tp_counter['PVC'] / counter['PVC']), tp_counter['PAB'], counter['PAB'], (tp_counter['PAB'] / counter['PAB']), tp_counter['VEB'], counter['VEB'], (tp_counter['VEB'] / counter['VEB']), tp_counter['VFW'], counter['VFW'], (tp_counter['VFW'] / counter['VFW']) ))
def main(): outputdir = 'result/192_128_class_weight_v3_120eps/' if os.path.isdir(outputdir): print('save in :' + outputdir) else: os.makedirs(outputdir) train_img_path = '/data/MIT-BIH_AD_v3/' test_img_path = '/data/MIT-BIH_AD_v3/' train_file = './MIT-BIH_AD_train.txt' test_file = './MIT-BIH_AD_val.txt' num_classes = 8 f1 = open(train_file, 'r') f2 = open(test_file, 'r') lines = f1.readlines() f1.close() train_samples = len(lines) lines = f2.readlines() f2.close() val_samples = len(lines) batch_size = 32 epochs = 120 input_h = 128 input_w = 128 class_weight = { 0: (1 - (75016 / 107620)) * 8, 1: (1 - (8072 / 107620)) * 8, 2: (1 - (7256 / 107620)) * 8, 3: (1 - (2544 / 107620)) * 8, 4: (1 - (7130 / 107620)) * 8, 5: (1 - (7024 / 107620)) * 8, 6: (1 - (106 / 107620)) * 8, 7: (1 - (472 / 107620)) * 8 } model = proposed_model(nb_classes=num_classes) lr = 0.0001 adam = Adam(lr=lr) model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy']) model.summary() history = model.fit_generator( generator_train_batch(train_file, batch_size, num_classes, train_img_path, input_h, input_w), steps_per_epoch=train_samples // batch_size, epochs=epochs, callbacks=[Step()], validation_data=generator_val_batch(test_file, batch_size, num_classes, test_img_path, input_h, input_w), validation_steps=val_samples // batch_size, verbose=1, class_weight=class_weight) plot_history(history, outputdir) save_history(history, outputdir) model.save_weights(outputdir + 'proposed_model.h5')