def train(): args = parse_args().parse_args(args=[]) path_to_save = os.path.join(args.save_folder, args.dataset, args.version) os.makedirs(path_to_save, exist_ok=True) # use hi-res backbone if args.high_resolution: print('use hi-res backbone') hr = True else: hr = False # cuda if args.cuda: print('use cuda') cudnn.benchmark = True device = torch.device("cuda", 0) else: device = torch.device("cpu") # multi-scale if args.multi_scale: print('use the multi-scale trick ...') train_size = config_0['NIH_pancreas_data_aimshape'] val_size = config_0['NIH_pancreas_data_aimshape'] else: train_size = config_0['NIH_pancreas_data_aimshape'] val_size = config_0['NIH_pancreas_data_aimshape'] cfg = train_cfg # dataset and evaluator print("Setting Arguments.. : ", args) print("----------------------------------------------------------") print('Loading the dataset...') if args.dataset == 'pnens': num_classes = 1 # dataset_pkl = load_from_pkl(r'/data/liyi219/pnens_3D_data/after_dealing/pre_order0_128_128_64_new.pkl') dataset_pkl = load_from_pkl( r'E:\ly\pnens_data\nas_data\v1_data\NIH\pre_order0_128_128_64_new.pkl' ) dataset = datanih(dataset_pkl) evaluator = myEvaluator(dataset=dataset, data_root="/data/data4T/ly/data/pnens_3D", img_size=val_size, device=device, transform=BaseTransform(val_size), labelmap=('pnens')) else: print('unknow dataset !! Only support voc and coco !!') exit(0) print('Training model on: yolov3_3D') print('The dataset size:', len(dataset)) print("----------------------------------------------------------") # dataloader dataloader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size, shuffle=False, collate_fn=detection_collate, num_workers=args.num_workers, pin_memory=True) if args.version == 'yolo_v3': from models.yolo_v3 import myYOLOv3 # anchor_size = MULTI_ANCHOR_SIZE if args.dataset == 'voc' else MULTI_ANCHOR_SIZE_COCO anchor_size = anchor_size_3D_try yolo_net = myYOLOv3(device, input_size=train_size, num_classes=num_classes, trainable=True, anchor_size=anchor_size, hr=hr) print('Let us train yolo_v3 on the %s dataset ......' % (args.dataset)) else: print('Unknown version !!!') exit() model = yolo_net model.to(device).train() # use tfboard if args.tfboard: print('use tensorboard') from torch.utils.tensorboard import SummaryWriter c_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) log_path = os.path.join('log/coco/', args.version, c_time) os.makedirs(log_path, exist_ok=True) writer = SummaryWriter(log_path) # keep training if args.resume is not None: print('keep training model: %s' % (args.resume)) model.load_state_dict(torch.load(args.resume, map_location=device)) # optimizer setup base_lr = args.lr tmp_lr = base_lr optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay) max_epoch = cfg['max_epoch'] epoch_size = len(dataset) // args.batch_size # start training loop t0 = time.time() for epoch in range(args.start_epoch, max_epoch): # if epoch == 1: break # use cos lr if args.cos and epoch > 20 and epoch <= max_epoch - 20: # use cos lr tmp_lr = 0.00001 + 0.5 * (base_lr - 0.00001) * ( 1 + math.cos(math.pi * (epoch - 20) * 1. / (max_epoch - 20))) set_lr(optimizer, tmp_lr) elif args.cos and epoch > max_epoch - 20: tmp_lr = 0.00001 set_lr(optimizer, tmp_lr) # use step lr else: if epoch in cfg['lr_epoch']: tmp_lr = tmp_lr * 0.1 set_lr(optimizer, tmp_lr) for iter_i, (images, targets) in enumerate(dataloader): """ images [1, 1, 128, 128, 64] targets [y1, y2, x1, x2, z1, z2, 0] 相对 """ # targets [x1, x2, y1, y2, z1, z2, 有无物体(0 or 1)] 相对坐标[0~1] # if iter_i == 0: break if not args.no_warm_up: if epoch < args.wp_epoch: tmp_lr = base_lr * pow((iter_i + epoch * epoch_size) * 1. / (args.wp_epoch * epoch_size), 4) # tmp_lr = 1e-6 + (base_lr-1e-6) * (iter_i+epoch*epoch_size) / (epoch_size * (args.wp_epoch)) set_lr(optimizer, tmp_lr) elif epoch == args.wp_epoch and iter_i == 0: tmp_lr = base_lr set_lr(optimizer, tmp_lr) # to device images = images.to(device) # multi-scale trick if iter_i % 10 == 0 and iter_i > 0 and args.multi_scale: # randomly choose a new size size = random.randint(10, 19) * 32 train_size = [size, size] model.set_grid(train_size) if args.multi_scale: # interpolate # 上采样 images = torch.nn.functional.interpolate(images, size=train_size, mode='bilinear', align_corners=False) # make labels # print(targets) targets = [label.tolist() for label in targets] targets = tools.multi_gt_creator3D(input_size=train_size, strides=model.stride, label_lists=targets, anchor_size=anchor_size) # [batch_index, featuremap_index, grid_x* grid_y * grid_z * ab_ind, 参数(15)] # (15): [obj, class, tx, ty, tz, th, tw, td, weight, (xmin, ymin, zmin, ymax, xmax, zmax)(绝对)] targets = torch.tensor(targets).float().to(device) # forward and loss conf_loss, cls_loss, txtytwth_loss, total_loss, dice_loss = model( images, target=targets) # backprop total_loss.backward() optimizer.step() optimizer.zero_grad() # display if iter_i % 10 == 0: if args.tfboard: # viz loss writer.add_scalar('object loss', conf_loss.item(), iter_i + epoch * epoch_size) writer.add_scalar('class loss', cls_loss.item(), iter_i + epoch * epoch_size) writer.add_scalar('local loss', txtytwth_loss.item(), iter_i + epoch * epoch_size) t1 = time.time() print( '[Epoch %d/%d][Iter %d/%d][lr %.6f]' '[Loss: obj %.2f ||dice_loss %.2f || cls %.2f || bbox %.2f || total %.2f || size %d || time: %.2f]' % (epoch + 1, max_epoch, iter_i, epoch_size, tmp_lr, conf_loss.item(), dice_loss.item(), cls_loss.item(), txtytwth_loss.item(), total_loss.item(), train_size[0], t1 - t0), flush=True) t0 = time.time() # evaluation if (epoch + 1) % args.eval_epoch == 0: model.trainable = False model.set_grid(val_size) model.eval() # evaluate # evaluator.evaluate(model) # convert to training mode. model.trainable = True model.set_grid(train_size) model.train() # save model if (epoch + 1) % 5 == 0: print('Saving state, saving in {} epoch{}:'.format( os.path.join(path_to_save, args.version), epoch + 1, )) torch.save( model.state_dict(), os.path.join(path_to_save, args.version + '_' + repr(epoch + 1) + '.pth'))
else: train_size = config_0['NIH_pancreas_data_aimshape'] val_size = config_0['NIH_pancreas_data_aimshape'] cfg = train_cfg # dataset and evaluator print("Setting Arguments.. : ", args) print("----------------------------------------------------------") print('Loading the dataset...') if args.dataset == 'pnens': num_classes = 1 # dataset2 = load_from_pkl(r'/data/liyi219/pnens_3D_data/after_dealing/pre_order0_128_128_64_new.pkl') dataset2 = load_from_pkl(r'E:\data\pre_order0_128_128_64_new.pkl') # dataset = rechange(dataset2) dataset = datanih(dataset2) evaluator = myEvaluator(dataset=dataset, data_root="/data/data4T/ly/data/pnens_3D", img_size=val_size, device=device, transform=BaseTransform(val_size), labelmap=('pnens')) else: print('unknow dataset !! Only support voc and coco !!') exit(0) print('Training model on: yolov3_3D') print('The dataset size:', len(dataset)) print("----------------------------------------------------------")
def obj_test(): args = parse_args().parse_args(args=[]) device = torch.device("cpu") input_size = [128, 128, 64] # dataset class_names = ('pnens') # dataset2 = load_from_pkl(r'/data/liyi219/pnens_3D_data/after_dealing/pre_order0_128_128_64_new.pkl') dataset2 = load_from_pkl(r'E:\data\pre_order0_128_128_64_new.pkl') dataset = datanih(dataset2) # load net from models.yolo_v3 import myYOLOv3 anchor_size = anchor_size_3D_try net = myYOLOv3(device, input_size=input_size, num_classes=1, conf_thresh=args.conf_thresh, nms_thresh=args.nms_thresh, anchor_size=anchor_size) net.load_state_dict( torch.load(os.path.join(args.trained_model + 'yolo_v3_200.pth'), map_location=device)) net.to(device).eval() num_images = len(dataset) result = [] images = [] bbox_gt = [] for index in range(num_images): if index == 4: break print('Testing image {:d}/{:d}....'.format(index + 1, num_images)) img, _, height, width, depth = dataset.pull_item(index) bb = dataset.get_bbox_juedui(index) bbox_gt.append(bb) scale = np.array([[height, width, depth, height, width, depth]]) images.append(img) # to tensor x = img x = x.unsqueeze(0).to(device) t0 = time.time() # forward bboxes, scores, _ = net(x) bboxes = bboxes * scale if not scores.size: print('{} has not bbox'.format(index)) continue best_scores = np.argmax(scores, axis=0) bboxes = bboxes[best_scores] # print(scores[best_scores]) # print([bboxes, best_scores]) result.append([bboxes, best_scores]) print("detection {} time used ".format(index), time.time() - t0, "s") if False: pred_1 = torch.sigmoid(pred_1) pred_1 = pred_1.data.cpu().numpy()[0, 0] img3D = pred_1 vol = mlab.pipeline.volume(mlab.pipeline.scalar_field(img3D), name='3-d ultrasound ') mlab.colorbar(orientation='vertical') mlab.show() show3Dslice(img3D) img_resize = resize3D(img3D, [128, 64, 64]) show3Dslice(img_resize) img = img.squeeze(0).data.cpu().numpy() show3Dslice(img) test_index = 1 img3D = images[test_index] img3D = img3D.squeeze(dim=0) img3D = np.array(img3D) # show3D(img3D) # show3Dslice(img3D) # 画bbox -- [x1, y1, z1, x2, y2, z2] line_thick = 1 bbox3D = result[test_index][0] # print(bbox3D) # print(result[0][1]) bbox3D = np.floor(bbox3D) bbox3D = np.array(bbox3D, dtype=int) # show3Dbbox_img(img3D, bbox3D, 2) bbox_in_img = get_bbox_in_img(img3D, bbox3D, line_thick) bb_gt = bbox_gt[test_index] x1, x2, y1, y2, z1, z2 = bb_gt bb_gt = [x1, y1, z1, x2, y2, z2] bb_gt = np.floor(bb_gt) bb_gt = np.array(bb_gt, dtype=int) print(bbox3D) print(bb_gt) bbox_in_img = get_bbox_in_img(bbox_in_img, bb_gt, line_thick, line_value=2e3) # bbox_in_img = np.hstack((bbox_in_img, img3D)) show3Dslice(bbox_in_img)
def see_feature_segment(): args = parse_args().parse_args(args=[]) device = torch.device("cpu") input_size = [128, 128, 64] # dataset num_classes = 1 class_names = ('pnens') # dataset2 = load_from_pkl(r'/data/liyi219/pnens_3D_data/after_dealing/pre_order0_128_128_64_new.pkl') dataset2 = load_from_pkl(r'E:\data\pre_order0_128_128_64_new.pkl') dataset = datanih(dataset2) # load net from models.yolo_v3 import myYOLOv3 anchor_size = anchor_size_3D_try net = myYOLOv3(device, input_size=input_size, num_classes=1, conf_thresh=args.conf_thresh, nms_thresh=args.nms_thresh, anchor_size=anchor_size) net.load_state_dict( torch.load(os.path.join(args.trained_model + 'yolo_v3_200.pth'), map_location=device)) net.to(device).eval() num_images = len(dataset) result = [] images = [] for index in range(num_images): if index == 1: break print('Testing image {:d}/{:d}....'.format(index + 1, num_images)) img, _, height, width, depth = dataset.pull_item(index) scale = np.array([[height, width, depth, height, width, depth]]) images.append(img) # to tensor x = img x = x.unsqueeze(0).to(device) t0 = time.time() # forward bboxes, scores, _ = net(x) pred_1, pred_2, pred_3 = net.get_feature_map(x) bboxes = bboxes * scale if not scores.size: print('{} has not bbox'.format(index)) continue best_scores = np.argmax(scores, axis=0) bboxes = bboxes[best_scores] result.append([bboxes, scores]) print("detection {} time used ".format(index), time.time() - t0, "s") img3D = images[0] img3D = img3D.squeeze(dim=0) img3D = np.array(img3D) pred_1 = torch.sigmoid(pred_1) pred_1 = pred_1.data.cpu().numpy()[0, 0] show3D(pred_1) img_resize = resize3D(pred_1, [128, 128, 64]) # show3Dslice(img_resize) img_and_fearturemap = np.hstack((img_resize, img3D)) show3Dslice(img_and_fearturemap)
def see_bbox(): args = parse_args().parse_args(args=[]) device = torch.device("cpu") input_size = [128, 128, 64] # dataset2 = load_from_pkl(r'/data/liyi219/pnens_3D_data/after_dealing/pre_order0_128_128_64_new.pkl') dataset2 = load_from_pkl( r'E:\ly\pnens_data\nas_data\v1_data\NIH\pre_order0_128_128_64_new.pkl') dataset = datanih(dataset2) # load net from models.yolo_v3 import myYOLOv3 anchor_size = anchor_size_3D_try net = myYOLOv3(device, input_size=input_size, num_classes=1, conf_thresh=args.conf_thresh, nms_thresh=args.nms_thresh, anchor_size=anchor_size) net.load_state_dict( torch.load(os.path.join(args.trained_model + 'yolo_v3_200.pth'), map_location=device)) net.to(device).eval() num_images = len(dataset) result = [] images = [] masks = [] bbox_gt = [] for index in range(num_images): if index == 1: break # target im, gt = dataset.__getitem__(index) targets = get_targets(gt) # gt [[obj, class, tx, ty, tz, tw, th, td, weight, (xmin, ymin, zmin, xmax, ymax, zmax)(绝对)]] gt = targets[np.where(targets == 1)[0:2]] txtytztwthtd = gt[0, 2:8] gt_obj = targets[:, :, 0] map_obj = obj_map_get(gt_obj) print('Testing image {:d}/{:d}....'.format(index + 1, num_images)) img, _, mask, height, width, depth = dataset.pull_item(index) bb = dataset.get_bbox_juedui(index) # [y1, y2, x1, x2, z1, z2] 绝对 bbox_gt.append(bb) scale = np.array([[width, height, depth, width, height, depth]]) images.append(img) masks.append(mask) # to tensor x = torch.tensor(img) x = x.unsqueeze(0).to(device) x = x.unsqueeze(0).to(device) t0 = time.time() # forward bboxes, scores, _ = net(x) # bboxes[x1, y1, z1, x2, y2, z2] 相对 bboxes = bboxes * scale # to绝对 if not scores.size: print('{} has not bbox'.format(index)) continue best_scores = np.argmax(scores, axis=0) bboxes = bboxes[best_scores] # print(scores[best_scores]) # print([bboxes, best_scores]) result.append([bboxes, best_scores]) print("detection {} time used ".format(index), time.time() - t0, "s") if False: pred_1 = torch.sigmoid(pred_1) pred_1 = pred_1.data.cpu().numpy()[0, 0] img3D = pred_1 vol = mlab.pipeline.volume(mlab.pipeline.scalar_field(img3D), name='3-d ultrasound ') mlab.colorbar(orientation='vertical') mlab.show() show3Dslice(img3D) img_resize = resize3D(img3D, [128, 64, 64]) show3Dslice(img_resize) img = img.squeeze(0).data.cpu().numpy() show3Dslice(img) test_index = 0 # img3D[y, x, z] img3D = images[test_index] mask3D = masks[test_index] # show3D(img3D) # show3Dslice(img3D) # 画bbox -- [x1, y1, z1, x2, y2, z2] 绝对 line_thick = 1 bbox3D = result[test_index][0] x1, y1, z1, x2, y2, z2 = bbox3D bbox3D = [y1, x1, z1, y2, x2, z2] bbox3D = np.floor(bbox3D) bbox3D = np.array(bbox3D, dtype=int) # show3Dbbox_img(img3D, bbox3D, 2) bboxInimg = bbox_in_img_for_slice(img3D, bbox3D, line_thick, -2e3) # show3Dslice(bboxInimg) bb_gt = bbox_gt[test_index] # [y1, y2, x1, x2, z1, z2] 绝对 y1, y2, x1, x2, z1, z2 = bb_gt bb_gt = [y1, x1, z1, y2, x2, z2] bb_gt = np.floor(bb_gt) bb_gt = np.array(bb_gt, dtype=int) print(bbox3D) print(bb_gt) bboxInimg = bbox_in_img_for_slice(bboxInimg, bb_gt, line_thick, 1e3) bboxInimg = bboxInimg + mask3D * 2e3 # bbox_in_img = np.hstack((bbox_in_img, img3D)) show3Dslice(bboxInimg) # show3D(bboxInimg) img_for_3D = np.zeros([128, 128, 64], dtype=np.int) img_for_3D = bbox_in_img_for_3D(img_for_3D, bb_gt, line_thick, 100) img_for_3D = bbox_in_img_for_3D(img_for_3D, bbox3D, line_thick, 200) show3D(img_for_3D)