def out_test_run(self, **kwargs): dl_test_id = kwargs['dl_test_id'] test_type = kwargs['test_type'] tr_model_id = kwargs['tr_model_id'] file_path = kwargs['file_path'] local_path = file_path.replace('/public_open', '/medimg/public_open') # /public_open/analysis/PCB64CAED/upload open_save_path = file_path.replace('/upload', '/download/x') ckpt_path = '/home/user01/Javis_dl_system/models/I66/' # 불러와야될 tr_model의 설정값을 가져옴 cur = OraDB.prepareCursor() cur.execute( "select layer_cnt, loss_info, activation_info from training where tr_model_id=:tr_model_id", {'tr_model_id': tr_model_id}) for row in cur: layer_n, cost_name, act_func = list(row) OraDB.releaseConn() print('file path', file_path) # 공공 구분. 공공이면 파일 패스에서 다이컴 컨버팅 실행 print('local path', local_path) temp_path = local_path.replace( '/medimg/public_open', '/home/user01/Javis_dl_system/data_temp/public_open') print('temp path', temp_path) if not os.path.exists(temp_path): os.makedirs(temp_path) down_path = temp_path.replace('/upload', '/download') + '/x' nas_down_path = down_path.replace( '/home/user01/Javis_dl_system/data_temp/public_open', '/medimg/public_open') print('nas down path', nas_down_path) if not os.path.exists(nas_down_path): os.makedirs(nas_down_path) data_mover.nas_to_dlserver(local_path, temp_path) dcm_converting = Open_Dicom_converter.Converter(temp_path) dcm_converting._convert() data_loader = TestDataLoader.DataLoader(temp_path, test_type) tester = out_Tester(file_path=temp_path, ckpt_path=ckpt_path, data_loader=data_loader, act_func=act_func, cost_name=cost_name, layer_n=layer_n, tr_model_id=tr_model_id, dl_test_id=dl_test_id, save_path=open_save_path) tester.infer3(1, 1)
def in_test_run(self, **kwargs): dl_test_id = kwargs['dl_test_id'] test_type = kwargs['test_type'] tr_model_id = kwargs['tr_model_id'] studylist_id = kwargs['studylist_id'] file_path = kwargs['file_path'] local_path = file_path ckpt_path = '/home/user01/Javis_dl_system/models/I66/' # 불러와야될 tr_model의 설정값을 가져옴 cur = OraDB.prepareCursor() cur.execute( "select layer_cnt, loss_info, activation_info from training where tr_model_id=:tr_model_id", {'tr_model_id': tr_model_id}) for row in cur: layer_n, cost_name, act_func = list(row) OraDB.releaseConn() # 공공 구분. 공공이면 파일 패스에서 다이컴 컨버팅 실행 temp_path = local_path.replace( '/medimg/', '/home/user01/Javis_dl_system/data_temp/') if not os.path.exists(temp_path): os.makedirs(temp_path) data_mover.nas_to_dlserver(local_path, temp_path) data_loader = TestDataLoader.DataLoader(temp_path, test_type) tester = in_Tester(file_path=temp_path, ckpt_path=ckpt_path, data_loader=data_loader, act_func=act_func, cost_name=cost_name, layer_n=layer_n, tr_model_id=tr_model_id, dl_test_id=dl_test_id, studylist_id=studylist_id) a = data_loader._existence_label() if a == 0: tester.infer2(1, 1) del_path = temp_path + '/img/DownLoad' shutil.rmtree(del_path, ignore_errors=True) else: tester.infer1(1, 1) del_path = temp_path + '/img/DownLoad' shutil.rmtree(del_path, ignore_errors=True)
def __init__(self, data_loader, cost_name, act_func, layer_n, ckpt_path, file_path, tr_model_id, dl_test_id, save_path, opt_kwargs={}): self.data_loader = data_loader if tr_model_id == 1: self.ckpt_path = '/home/user01/Javis_dl_system/models/I66/1/Unet.cpkt' else: # '/home/obsk/Javis_dl_system/models/I66/' self.ckpt_path = ckpt_path + str(tr_model_id) + '/' + str( tr_model_id) + '.ckpt' self.file_path = file_path self.dl_test_id = dl_test_id self.save_path = save_path # model loader. model id 1 : default model if tr_model_id == 1: self.net = default_Unet.Unet( cost="mfc", cost_kwargs={"class_weights": [1e-6, 1 - 1e-6]}) else: self.net = Unet.Unet( cost=cost_name, act_func=act_func, layer_n=layer_n, cost_kwargs={"class_weights": [1e-6, 1 - 1e-6]}) self.opt_kwargs = opt_kwargs # default ai result self.status = 'N' # # mongo db information # self.client = MongoClient('mongodb://172.16.52.79:27017') # self.db = self.client.ohif_deployment # self.ruserid = self.db.dl_test.find_one({"dl_test_id": int(self.dl_test_id)})['ruserid'] # self.test_labelinfo_collection = self.db.test_labelinfo # self.dl_test_collection = self.db.dl_test # self.table_indexing_collection = self.db.table_indexing # # oracle db information self.cursor = OraDB.prepareCursor() self.ruserid = self.cursor.execute( "select ruserid from dl_test where dl_test_id=:dl_test_id", {'dl_test_id': self.dl_test_id}) print('tester initialized')
def __init__(self, data_loader, cost_name, act_func, layer_n, ckpt_path, file_path, tr_model_id, dl_test_id, studylist_id, opt_kwargs={}): self.data_loader = data_loader if tr_model_id == 1: self.ckpt_path = '/home/user01/Javis_dl_system/models/I66/1/Unet.cpkt' else: self.ckpt_path = ckpt_path + str(tr_model_id) + '/' + str( tr_model_id) + '.ckpt' self.file_path = file_path self.dl_test_id = dl_test_id self.studylist_id = studylist_id # model loader. model id 1 : default model if tr_model_id == 1: self.net = default_Unet.Unet( cost="mfc", cost_kwargs={"class_weights": [1e-6, 1 - 1e-6]}) else: self.net = Unet.Unet( cost=cost_name, act_func=act_func, layer_n=layer_n, cost_kwargs={"class_weights": [1e-6, 1 - 1e-6]}) self.opt_kwargs = opt_kwargs # default ai result self.status = 'N' # oracle db information self.cursor = OraDB.prepareCursor() self.series_uid = self.cursor.execute( "select series_uid from studylist where studylist_id=:studylist_id", {'studylist_id': self.studylist_id}) self.ruserid = self.cursor.execute( "select ruserid from dl_test where dl_test_id=:dl_test_id", {'dl_test_id': self.dl_test_id}) print('tester initialized')
def __init__(self, path): # /medimg/data/I66/DICOM/train/abnormal/I66_mri_AB_00000039 self.path = path # print(self.path) # I66_mri_AB_00000007 if '/abnormal' in self.path: start_n = self.path.find('/abnormal') leng = len('/abnormal') else: start_n = self.path.find('/normal') leng = len('/normal') # print(path[start_n + leng + 1:last_n]) self.patientName = self.path[start_n + leng + 1:] print('DataSaver Initialized') self.conn = OraDB.createConn(OraDB.INFO)
# -*- coding: utf-8 -*- """ Main Processor Module, Made by BJH, CGM, JYJ OBS Korea """ import subprocess import time from Oracle_connector import OraDB OraDB.createConn(OraDB.INFO) if __name__ == "__main__": while True: time.sleep(5) cur = OraDB.prepareCursor() cur.execute("select tr_model_id, tr_status, stop_yn from training") # training for row in cur: tr_model_id, tr_status, stop_yn = list(row) if tr_status == 'running': train_proc = subprocess.Popen(['python', 'train_process.py']) train_proc.wait() OraDB.releaseConn()
def infer1(self, n_t_iters, b_size, keep_prob=1.0): with tf.Session() as sess: sess.run(tf.global_variables_initializer()) saver = tf.train.Saver() saver.restore(sess, self.ckpt_path) print('model restored') t_accuracy = 0 positions = [] for _ in range(n_t_iters): for up_down in range(2): for crop_number in range(4): x, y, files_list, files_list_down, he, wi = self.data_loader.test_load_batch_1( b_size, up_down, crop_number) # testing=False predictions, dice = sess.run( (self.net.predict, self.net.dice), feed_dict={ self.net.x: x, self.net.y: y, self.net.training: False, self.net.keep_prob: keep_prob }) if crop_number == 0: x_offset, y_offset = 0, 0 elif crop_number == 1: x_offset, y_offset = 44, 0 elif crop_number == 2: x_offset, y_offset = 0, 44 else: x_offset, y_offset = 44, 44 batch_size, nz, ny, nx, _ = np.shape(predictions) for index in range(batch_size): files, offset = files_list[index] for z_index in range(nz): if offset == 0 or 128 <= offset + z_index: if offset == 0: file_path = files[z_index] file_down_path = files_list_down[ z_index] else: file_path = files[offset + z_index] file_down_path = files_list_down[ offset + z_index] # merge_path = file_path.replace('.jpg', '_prediction.jpg') image = cv2.imread(file_down_path) # print(file_down_path) if image is None: image = np.zeros([300, 300, 3]) roi = image[y_offset:y_offset + 256, x_offset:x_offset + 256] b_prediction = predictions[index, z_index, ..., 0] > 0.5 # 255 b_image = cv2.bitwise_and( roi, roi, mask=(~b_prediction * 255).astype( np.uint8)) # np.expand_dims f_image = np.stack( [ np.zeros([ny, nx]), np.zeros([ny, nx]), b_prediction * 255 ], axis=-1) # np.tile np.append merge = b_image + f_image # merge = np.hstack([image, label, prediction]) image[y_offset:y_offset + 256, x_offset:x_offset + 256] = merge # f_image cv2.imwrite(file_down_path, image) if crop_number == 3: position_t = cv2.imread(file_down_path) position = position_t[:, :, 2] position = cv2.resize( position, (wi, he), interpolation=cv2.INTER_AREA) _, position = cv2.threshold( position, 127, 255, cv2.THRESH_BINARY) num_labels, markers, state, cent = cv2.connectedComponentsWithStats( position) if num_labels != 1: self.status = 'Y' for idx in range(1, num_labels): x, y, w, h, size = state[idx] infor_position = [ z_index, w, h, x, y ] positions.append( infor_position) roi = cv2.imread(file_path) b_image = cv2.bitwise_and( roi, roi, mask=~position.astype(np.uint8)) f_image = np.stack([ np.zeros([he, wi]), np.zeros([he, wi]), position ], axis=-1) merge = b_image + f_image cv2.imwrite(file_down_path, merge) t_accuracy += dice print(positions) # ai result save self.cursor.execute( "update dl_test set ai_result=:ai_result where dl_test_id = :dl_test_id", { 'ai_result': self.status, 'dl_test_id': self.dl_test_id }) OraDB.dbCommit() print(self.status) # test_labelinfo에 레이블 정보 저장 for imgs in positions: print(imgs) imgindex, width, height, x_left_up, y_left_up = imgs x_right_down = x_left_up + width y_right_down = y_left_up + height handles = '{"start":{"x":' + str(x_left_up) + ',"y":' + str(y_left_up) + ',"highlight":true,"active":true},"end":{"x":' \ + str(x_right_down) + ',"y":' + str(y_right_down) + ',"highlight":true,"active": false},"textBox":' \ '{"active":false,"hasMoved":false,"movesIndependently":false,' \ '"allowedOutsideImage": true,"hasBoundingBox": true,"x":' \ + str(x_right_down) + ',"y":'+ str(y_right_down) + ',"boundingBox": {"width":' \ + str(width) + ', "height": ' + str(height) + ',"left":0,"top":0}}}' # self.test_labelinfo_collection.insert(data) self.cursor.execute( "insert into test_labelinfo(test_lb_id, dl_test_id, studylist_id, file_name, label_info, image_index_number, series_uid, tooltype, del_yn, createdate, ruserid) " "values(test_labelinfo_test_lb_id_seq.nextval, :dl_test_id, :studylist_id, :file_name, :label_info, :image_index_number, :series_uid, :tooltype, :del_yn, :createdate, :ruserid)", [ int(self.dl_test_id), self.studylist_id, None, handles, imgindex, self.series_uid, 'rectangleRoi', 'N', datetime.today().strftime("%Y%m%d%H%M%S"), self.ruserid ]) OraDB.dbCommit() print('data restored to test_labelinfo_collection') # dice score 저장 a_accuracy = t_accuracy / n_t_iters / 8 self.cursor.execute( "update dl_test set dice_score=:a_accuracy where dl_test_id=:dl_test_id", [a_accuracy, self.dl_test_id]) OraDB.dbCommit() print("test accuracy {:}".format(a_accuracy)) OraDB.releaseConn() del_path = self.file_path + 'DownLoad' shutil.rmtree(del_path, ignore_errors=True)
def infer3(self, n_t_iters, b_size, keep_prob=1.0): with tf.Session() as sess: sess.run(tf.global_variables_initializer()) saver = tf.train.Saver() saver.restore(sess, self.ckpt_path) print('model restored') target_img_number_set = '' for _ in range(n_t_iters): positions = [] for up_down in range(2): for crop_number in range(4): x, files_list, files_list_down, he, wi = self.data_loader.test_load_batch_2( b_size, up_down, crop_number) # testing=False predictions = sess.run(self.net.predict, feed_dict={ self.net.x: x, self.net.training: False, self.net.keep_prob: keep_prob }) if crop_number == 0: x_offset, y_offset = 0, 0 elif crop_number == 1: x_offset, y_offset = 44, 0 elif crop_number == 2: x_offset, y_offset = 0, 44 else: x_offset, y_offset = 44, 44 batch_size, nz, ny, nx, _ = np.shape(predictions) for index in range(batch_size): files, offset = files_list[index] for z_index in range(nz): if offset == 0 or 128 <= offset + z_index: if offset == 0: file_path = files[z_index] file_down_path = files_list_down[ z_index] else: file_path = files[offset + z_index] file_down_path = files_list_down[ offset + z_index] image = cv2.imread(file_down_path) if image is None: image = np.zeros([300, 300, 3]) roi = image[y_offset:y_offset + 256, x_offset:x_offset + 256] b_prediction = predictions[index, z_index, ..., 0] > 0.5 # 255 b_image = cv2.bitwise_and( roi, roi, mask=(~b_prediction * 255).astype( np.uint8)) # np.expand_dims f_image = np.stack( [ np.zeros([ny, nx]), np.zeros([ny, nx]), b_prediction * 255 ], axis=-1) # np.tile np.append merge = b_image + f_image # merge = np.hstack([image, label, prediction]) image[y_offset:y_offset + 256, x_offset:x_offset + 256] = merge # f_image cv2.imwrite(file_down_path, image) if crop_number == 3: position_t = cv2.imread(file_down_path) position = position_t[:, :, 2] position = cv2.resize( position, (wi, he), interpolation=cv2.INTER_AREA) _, position = cv2.threshold( position, 127, 255, cv2.THRESH_BINARY) num_labels, markers, state, cent = cv2.connectedComponentsWithStats( position) if num_labels != 1: self.status = 'Y' for idx in range(1, num_labels): x, y, w, h, size = state[idx] infor_position = [ z_index, w, h, x, y ] positions.append( infor_position) roi = cv2.imread(file_path) b_image = cv2.bitwise_and( roi, roi, mask=~position.astype(np.uint8)) f_image = np.stack([ np.zeros([he, wi]), np.zeros([he, wi]), position ], axis=-1) merge = b_image + f_image # print('file_down_path', file_down_path) cv2.imwrite(file_down_path, merge) # positions = [ [imageindex, width, height, x_start, y_start] ] print(positions) for img_info in positions: target_img_number_set += str(img_info[0] + 1) + '_' # print(positions) # ai result self.cursor.execute( "update dl_test set ai_result=:ai_result where dl_test_id = :dl_test_id", { 'ai_result': self.status, 'dl_test_id': self.dl_test_id }) OraDB.dbCommit() self.cursor.execute( "update op_test set file_path=:file_path where dl_test_id=:dl_test_id", { 'file_path': self.save_path, 'dl_test_id': self.dl_test_id }) OraDB.dbCommit() self.cursor.execute( "update op_test set valx1=:valx1 where dl_test_id=:dl_test_id", { 'valx1': target_img_number_set[:-1], 'dl_test_id': self.dl_test_id }) print(self.status) OraDB.releaseConn() print('data copying from local to nas') down_path = self.file_path.replace('/upload', '/download') + '/x' # temp_path = nas_path.replace('/medimg/', '/home/Javis_dl_system/data_temp/') nas_down_path = down_path.replace( '/home/user01/Javis_dl_system/data_temp/', '/medimg/') chmod_path = nas_down_path[:-2] data_mover.on_copytree(down_path, nas_down_path, chmod_path) print('data copy finished') print('data deleting from local') del_path = self.file_path[:self.file_path.find('analysis') + 8] shutil.rmtree(del_path, ignore_errors=True) print('local data deleting finished')
def saveYdata_labeled(self): cur = OraDB.prepareCursor() study_uid = cur.execute( 'select study_uid from ADMIN.studylist where patient_uid=:patient_uid', {'patient_uid': self.patientName}) labels = cur.execute( 'select * from ADMIN.ohif_labels where studyinstanceuid=:studyinstanceuid', {'studyinstanceuid': study_uid}) global w, h w = 0 h = 0 for row in labels: id, measurementdata, userid, studyinstanceuid, tooltype, seriesinstanceuid, \ currentimageidindex, patientid, patientname, studyindexnumber, \ imagewidth, imageheight, seriesdescription = list(row) if currentimageidindex == float: continue try: # 파일 디렉토리만 포함되어있음 ex - /opt/home/data/I66/NonDICOM/train/abnormal/I66_mri_AB_00000007/x # /home/bjh/obsk/v_nas2/I66/DICOM/train/abnormal/I66_mri_AB_00000039 labelPath = self.path + '/img/y/' + str( currentimageidindex) + '.jpg' imageLoc = self.path + '/img/y/' if not os.path.exists(imageLoc): os.makedirs(imageLoc) w, h = imagewidth, imageheight img = cv2.imread(labelPath) # print(img) if img is None: img = np.zeros((h, w)) # print(img) m_data = json.loads(measurementdata) handles = m_data['handles'] # print(handles) if tooltype == 'polygonRoi': pts = [] for pt in handles.values(): pts.append((int(pt['x']), int(pt['y']))) labeled = cv2.fillConvexPoly(img, np.array(pts), (255, 255, 255)) # print(img) cv2.imwrite(labelPath, labeled) except: continue OraDB.releaseConn() # self.path = /home/bjh/obsk/v_nas2/I66/DICOM/train/abnormal/I66_mri_AB_00000039 x_data_path = self.path + '/img/x/' x_data_cnt = len(os.listdir(x_data_path)) x_data = self._sort_by_number(os.listdir(x_data_path)) for i in range(x_data_cnt): if os.path.isfile(self.path + '/img/y/' + str(i + 1) + '.jpg') is True: os.rename(self.path + '/img/y/' + str(i + 1) + '.jpg', x_data_path.replace('/x/', '/y/') + x_data[i]) else: label = np.zeros((h, w)) file_name = x_data_path.replace('/x/', '/y/') + x_data[i] cv2.imwrite(file_name, label)
def ts_main_process(self): # DB 체크 후 process 정보 가져옴 time.sleep(3) # if cur.execute("select seq where index_name = test_lb_id") is None: # cur.execute("update table_indexing set seq = 0 where index_name = test_lb_id") # cur.commit() # if self.test_type == 'in': # # 테스트 시작 # print('test start') # db.dl_test.find_and_modify(query={'dl_test_id': int(self.dl_test_id)}, update={"$set": {'dl_status': "running"}}, upsert=False, full_response=True) # self.in_test_run(dl_test_id = self.dl_test_id, test_type = self.test_type, tr_model_id = self.tr_model_id, studylist_id = int(self.studylist_id), file_path = self.file_path) # # 테스트가 종료되면 dl_status를 end로 갱신 # db.dl_test.find_and_modify(query={'dl_test_id': int(self.dl_test_id)}, update={"$set": {'dl_status': "end"}}, upsert=False, full_response=True) # # elif self.test_type == 'out': # # 테스트 시작 # print('test start') # db.dl_test.find_and_modify(query={'dl_test_id': int(self.dl_test_id)}, update={"$set": {'dl_status': "running"}}, upsert=False, full_response=True) # self.out_test_run(dl_test_id=int(self.dl_test_id), test_type=self.test_type, tr_model_id=self.tr_model_id, file_path=self.file_path) # # # 테스트가 종료되면 dl_status를 end로 갱신 # db.dl_test.find_and_modify(query={'dl_test_id': int(self.dl_test_id)}, update={"$set": {'dl_status': "end"}}, upsert=False, full_response=True) try: cur = OraDB.prepareCursor() if self.test_type == 'in': # 테스트 시작 print('test start') cur.execute( "update dl_test set dl_status = running where dl_test_id = :dl_test_id", int(self.dl_test_id)) OraDB.dbCommit() self.in_test_run(dl_test_id=self.dl_test_id, test_type=self.test_type, tr_model_id=self.tr_model_id, studylist_id=self.studylist_id, file_path=self.file_path) # 테스트가 종료되면 dl_status를 end로 갱신 cur.execute( "update dl_test set dl_status = end where dl_test_id = :dl_test_id", int(self.dl_test_id)) OraDB.dbCommit() OraDB.releaseConn() elif self.test_type == 'out': # 테스트 시작 print('test start') cur.execute( "update dl_test set dl_status = running where dl_test_id = :dl_test_id", int(self.dl_test_id)) OraDB.dbCommit() self.out_test_run(dl_test_id=int(self.dl_test_id), test_type=self.test_type, tr_model_id=self.tr_model_id, file_path=self.file_path) # 테스트가 종료되면 dl_status를 end로 갱신 cur.execute( "update dl_test set dl_status = end where dl_test_id = :dl_test_id", int(self.dl_test_id)) OraDB.dbCommit() OraDB.releaseConn() except: cur = OraDB.prepareCursor() # 에러 발생시 dl_status를 fail로 갱신 cur.execute( "update dl_test set dl_status = fail where dl_test_id = :dl_test_id", int(self.dl_test_id)) OraDB.dbCommit() OraDB.releaseConn() print('Test Process Error Occurred')
except: cur = OraDB.prepareCursor() # 에러 발생시 dl_status를 fail로 갱신 cur.execute( "update dl_test set dl_status = fail where dl_test_id = :dl_test_id", int(self.dl_test_id)) OraDB.dbCommit() OraDB.releaseConn() print('Test Process Error Occurred') # process runner if __name__ == "__main__": time.sleep(3) try: cur = OraDB.prepareCursor() cur.execute( "select dl_test_id, test_type, tr_model_id, studylist_id, file_path, dl_status from dl_test" ) for row in cur: dl_test_id, test_type, tr_model_id, studylist_id, file_path, dl_status = list( row) if dl_status == 'run': test = test_proc(dl_test_id=dl_test_id, test_type=test_type, tr_model_id=tr_model_id, studylist_id=studylist_id, file_path=file_path) test.ts_main_process() break
def train(self, n_epochs, n_t_iters, n_v_iters, b_size=1, keep_prob=1.0): # tf.reset_default_graph() global_step = tf.Variable(0, trainable=False) with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)): self._get_optimizer(global_step, n_t_iters) with tf.Session() as sess: # sess = tf_debug.LocalCLIDebugWrapperSession(sess) # sess.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan) sess.run(tf.global_variables_initializer()) saver = tf.train.Saver() ckpt_st = tf.train.get_checkpoint_state(self.net_path) if ckpt_st is not None: saver.restore(sess, ckpt_st.model_checkpoint_path) for epoch in range(n_epochs): # epoch 진행 상황 저장 total_acc = 0 total_loss = 0 for up_down in range(2): for crop_number in range(4): offset = (crop_number + (up_down + epoch * 2) * 4) * (n_t_iters // 8 * 3 + n_v_iters // 8) for t_index in range(n_t_iters // 8): for augment_number in range(3): step = offset + t_index * 3 + augment_number + 1 total_step = offset + t_index * 3 + 3 + 1 x, y = self.data_loader.load_batch(b_size=1, up_down=up_down, crop_number=crop_number, augment_number=augment_number, training=True, testing=False) loss, _ = sess.run((self.net.cost, self.optimizer), feed_dict={self.net.x: x, self.net.y: y, self.net.training: True, self.net.keep_prob: keep_prob}) total_loss += loss / total_step print("step {:} : loss {:.4f}".format(step, loss), 'Training') for v_index in range(n_v_iters // 8): step = offset + n_t_iters // 8 * 3 + v_index + 1 # 3 + crop_number total_step = offset + n_t_iters // 8 * 3 + v_index + 1 x, y, files_list = self.data_loader.load_batch(b_size=1, up_down=up_down, crop_number=crop_number, augment_number=0, training=False, testing=True) # testing=False predictions, accuracy, dice = sess.run( (self.net.predict, self.net.accuracy, self.net.dice), feed_dict={ self.net.x: x, self.net.y: y, self.net.training: False, self.net.keep_prob: keep_prob } ) total_acc += accuracy / total_step print('validation') print("step {:} : accuracy {:.4f} dice {:.4f}".format(step, accuracy, dice), 'Validation') # 에폭별로 학습이 종료되면 에폭의 평균 loss, 평균 accuracy를 db에 업데이트 progress = epoch+1 // n_epochs * 100 cur = OraDB.prepareCursor() cur.execute("update training set epoch_cost=:epoch_cost, epoch_accuracy=:epoch_accuracy, epoch_num=:epoch_num, tr_progress=:tr_progress, ed_runtime=:ed_runtime where tr_model_id=:tr_model_id limit 1", [round(total_loss, 4), round(total_acc, 4), epoch + 1, progress, datetime.today().strftime("%Y%m%d%H%M%S")]) print('epoch_cost', round(total_loss, 4), 'epoch_accuracy', round(total_acc, 4), 'epoch_num', epoch + 1, 'tr_progress', progress, 'ed_runtime', datetime.today().strftime("%Y%m%d%H%M%S")) OraDB.dbCommit() OraDB.releaseConn() print('db writing finished') saver.save(sess, self.net_path) print('model saved') tf.reset_default_graph() sess.close()
def tr_main_process(): # DB 체크 후 process 정보 가져옴 time.sleep(5) # client = MongoClient('mongodb://172.16.52.79:27017') # db = client.ohif_deployment # cursor = db.training.find() cur = OraDB.prepareCursor() cur.execute( "select tr_status, tr_model_info, layer_cnt, tr_validation, loss_info, optimizer_info, learning_rate, drop_out_rate, activation_info, tr_model_id, normal_data, abnormal_data from training" ) for row in cur: tr_status, tr_model_info, layer_cnt, tr_validation, loss_info, optimizer_info, learning_rate, drop_out_rate, activation_info, tr_model_id, normal_data, abnormal_data = list( row) if tr_status == 'running': try: # 학습 시작 cur.execute( "update training set tr_status='running_now' where tr_model_id=:tr_model_id", {'tr_model_id': tr_model_id}) OraDB.dbCommit() print('Train Started') train_start_time = time.time() # print(row) train_run(tr_model_info=tr_model_info, layer_cnt=layer_cnt, tr_validation=tr_validation, loss_info=loss_info, optimizer_info=optimizer_info, learning_rate=learning_rate, drop_out_rate=drop_out_rate, act_func=activation_info, tr_model_id=tr_model_id, normal_data=normal_data, abnormal_data=abnormal_data) # 학습 종료되면 tr_status를 end로 갱신 print('Training Finished') train_end_time = time.time() duration = (train_end_time - train_start_time) // 60 cur.execute( "update training set tr_status='end', duration=:duration where tr_model_id=:tr_model_id", { 'duration': str(duration), 'tr_model_id': tr_model_id }) OraDB.dbCommit() OraDB.releaseConn() except: # 에러 발생시 tr_status를 fail로 갱신 cur.execute( "update training set tr_status='fail' where tr_model_id=:tr_model_id", {'tr_model_id': tr_model_id}) OraDB.dbCommit() OraDB.releaseConn() print('Error Occurred')
def train_run(**kwargs): cost = kwargs['loss_info'] optimizer = kwargs['optimizer_info'] learning_rate = kwargs['learning_rate'] drop_out_rate = kwargs['drop_out_rate'] act_func = kwargs['act_func'] layer_cnt = int(kwargs['layer_cnt']) model_id = int(kwargs['tr_model_id']) normal_data = kwargs['normal_data'] abnormal_data = kwargs['abnormal_data'] validation = kwargs['tr_validation'] k_fold_list = [] # validation 수치 변경 if int(validation) == 2: print(validation, type(validation)) k_fold_list.append(50) elif int(validation) == 5: print(validation, type(validation)) k_fold_list.append(20) elif int(validation) == 10: print(validation, type(validation)) k_fold_list.append(10) elif int(validation) == 15: print(validation, type(validation)) k_fold_list.append(18) elif int(validation) == 20: print(validation, type(validation)) k_fold_list.append(5) # print('dl_option : ', cost, optimizer, learning_rate, drop_out_rate, act_func, layer_cnt, model_id, validation) k_fold = k_fold_list[0] def _return_loop_number(string): if string != '_': under_Bar = string.find('_') first_data_n = int(string[:under_Bar]) last_data_n = int(string[under_Bar + 1:]) return first_data_n, last_data_n # path = '/home/obsk/Javis_dl_system/data/I66' # /opt/home/data/I66/DICOM/train/abnormal/I66_mri_AB_00000029 # /home/obsk/v_nas2/I66/DICOM/train/abnormal/I66_mri_AB_00000029 # path = 'D:/data/I66' # normal data get normal_file_path_list = [] normal_data_chklist = [] if normal_data != '_': normal_start, normal_fin = _return_loop_number(normal_data) # normal_length = normal_fin - normal_start + 1 # client = MongoClient('mongodb://172.16.52.79:27017') # db = client.ohif_deployment # cursor = db.study_normtraining.find().sort('normtrain_id', pymongo.ASCENDING) cur = OraDB.prepareCursor() cur.execute( "select del_yn, dataid, file_path, normtrain_id from study_normtraining order by normtrain_id asc" ) for row in cur: del_yn, dataid, file_path, normtrain_id = list(row) if 'del_yn' != 'y' and 'I66' in dataid: normal_data_chklist.append(file_path) print(normal_data_chklist) for idx in range(normal_start - 1, normal_fin): normal_file_path_list.append(normal_data_chklist[idx]) print(normal_file_path_list) OraDB.releaseConn() # abnormal data get abnormal_file_path_list = [] abnormal_data_chklist = [] if abnormal_data != '_': abnormal_start, abnormal_fin = _return_loop_number(abnormal_data) # abnormal_length = abnormal_fin - abnormal_start + 1 # client = MongoClient('mongodb://172.16.52.79:27017') # db = client.ohif_deployment # cursor = db.study_abnormtraining.find().sort('abnormtrain_id', pymongo.ASCENDING) cur = OraDB.prepareCursor() cur.execute( "select del_yn, dataid, file_path, normtrain_id from study_abnormtraining order by abnormtrain_id asc" ) for row in cur: del_yn, dataid, file_path, normtrain_id = list(row) if del_yn != 'y' and 'I66' in dataid: abnormal_data_chklist.append(file_path) print(abnormal_data_chklist) for idx in range(abnormal_start - 1, abnormal_fin): abnormal_file_path_list.append(abnormal_data_chklist[idx]) print(abnormal_file_path_list) OraDB.releaseConn() # copy nas normal data to local temp directory local_n_datapath_list = [] for nas_path in normal_file_path_list: temp_path = nas_path.replace( '/medimg/', '/home/user01/Javis_dl_system/data_temp/') if not os.path.exists(temp_path): os.makedirs(temp_path) data_mover.nas_to_dlserver(nas_path, temp_path) local_n_datapath_list.append(temp_path) # copy nas abnormal data to local temp directory local_ab_datapath_list = [] for nas_path in abnormal_file_path_list: temp_path = nas_path.replace( '/medimg/', '/home/user01/Javis_dl_system/data_temp/') if not os.path.exists(temp_path): os.makedirs(temp_path) data_mover.nas_to_dlserver(nas_path, temp_path) local_ab_datapath_list.append(temp_path) tot_datapath_list = local_n_datapath_list + local_ab_datapath_list # print('tot_data_id_list : ', tot_datapath_list) if len(tot_datapath_list) <= 1: print('CANNOT RUN WITH 0, 1 DATA SET') raise FileNotFoundError shuffle(tot_datapath_list) fin_datapath_list = [] # path : /home/bjh/obsk/v_nas2/I66/DICOM/train/abnormal/I66_mri_AB_00000039 for path in tot_datapath_list: datasaver = LabelDataSaver.DataSaver(path) datasaver.saveYdata_labeled() x_path = path + '/img/x' y_path = path + '/img/y' if os.path.isdir(y_path) is True: if len(os.listdir(y_path)) == len(os.listdir(x_path)): fin_datapath_list.append(path) dataset_cnt = len(fin_datapath_list) # n # b_size = 1 # k = k_fold data_loader = TrainDataLoader.DataLoader(data_path_list=fin_datapath_list, k_fold=k_fold, c_size=256, i_channel=1, n_class=2) trainer = Trainer(data_loader=data_loader, model_id=model_id, optimizer=optimizer, learning_rate=learning_rate, cost_name=cost, act_func=act_func, layer_n=layer_cnt) trainer.train(n_epochs=1, n_t_iters=(math.ceil(dataset_cnt / k_fold * (k_fold - 1)) - 1) * 8, n_v_iters=math.ceil(dataset_cnt / k_fold) * 8, b_size=1, keep_prob=drop_out_rate)