Exemplo n.º 1
0
def main():
    from lib.dataset.data_augmentation import CT_Data_Augmentation, Xray_Data_Augmentation
    test_file = r'D:\Data\LIDC-HDF5-128\LIDC-IDRI-0001.20000101.3000566.1\ct_xray_data.h5'
    import h5py

    from lib.config.config import cfg, merge_dict_and_yaml
    opt = merge_dict_and_yaml(dict(), cfg)

    hdf = h5py.File(test_file, 'r')
    ct = np.asarray(hdf['ct'])
    xray = np.asarray(hdf['xray1'])
    xray = np.expand_dims(xray, 0)
    print(xray.shape)
    trans_CT = CT_Data_Augmentation(opt)
    trans_Xray = Xray_Data_Augmentation(opt)
    ct_trans = trans_CT(ct)
    xray_trans = trans_Xray(xray)

    visual_dict = {
        'ct': torch.unsqueeze(ct_trans, 0),
        'xray': torch.unsqueeze(xray_trans, 0)
    }
    visual = Visualizer(log_dir='../../demo/log')
    visual.add_image('a', visual_dict, 1)
    visual.add_scalar('b', 1, 1)
Exemplo n.º 2
0
def main():
    test_file = r'D:\Data\LIDC-HDF5-256\LIDC-IDRI-0001.20000101.3000566.1\ct_xray_data.h5'
    import h5py
    import matplotlib.pyplot as plt

    from lib.config.config import cfg, merge_dict_and_yaml
    opt = merge_dict_and_yaml(dict(), cfg)

    hdf = h5py.File(test_file, 'r')
    ct = np.asarray(hdf['ct'])
    xray = np.asarray(hdf['xray1'])
    xray = np.expand_dims(xray, 0)
    print(xray.shape)
    transforma = CT_XRAY_Data_Augmentation(opt)
    transform_normal = CT_XRAY_Data_Test(opt)
    ct_normal, xray_normal = transform_normal([ct, xray])
    ct_trans, xray_trans = transforma([ct, xray])
    ct_trans = tensor_backto_unnormalization_image(ct_trans,
                                                   opt.CT_MEAN_STD[0],
                                                   opt.CT_MEAN_STD[1])
    xray_trans = tensor_backto_unnormalization_image(xray_trans,
                                                     opt.XRAY1_MEAN_STD[0],
                                                     opt.XRAY1_MEAN_STD[1])
    ct_normal = tensor_backto_unnormalization_image(ct_normal,
                                                    opt.CT_MEAN_STD[0],
                                                    opt.CT_MEAN_STD[1])
    xray_normal = tensor_backto_unnormalization_image(xray_normal,
                                                      opt.XRAY1_MEAN_STD[0],
                                                      opt.XRAY1_MEAN_STD[1])
    bb = Normalization_to_range()
    ct_trans = bb(ct_trans)
    xray_trans = bb(xray_trans)
    # trans_CT = CT_Data_Augmentation(opt)
    # trans_Xray = Xray_Data_Augmentation(opt)
    # ct_trans = trans_CT(ct).numpy()
    # xray_trans = trans_Xray(xray).numpy()
    import cv2
    print(ct_trans.shape, ct_normal.shape)
    cv2.imshow('1', xray_trans[0].astype(np.uint8))
    cv2.imshow('2', ct_trans[80, :, :].astype(np.uint8))
    cv2.imshow('1-1', bb(xray_normal)[0].astype(np.uint8))
    cv2.imshow('2-1', bb(ct_normal)[80, :, :].astype(np.uint8))
    cv2.waitKey(0)
Exemplo n.º 3
0
    else:
      print('There is no gpu!')
      exit(0)

  # check point
  if args.check_point is None:
    args.epoch_count = 1
  else:
    args.epoch_count = int(args.check_point)

  # merge config with yaml
  if args.ymlpath is not None:
    cfg_from_yaml(args.ymlpath)
  # merge config with argparse
  opt = copy.deepcopy(cfg)
  opt = merge_dict_and_yaml(args.__dict__, opt)
  print_easy_dict(opt)

  opt.serial_batches = True

  # add data_augmentation
  datasetClass, _, dataTestClass, collateClass = get_dataset(opt.dataset_class)
  opt.data_augmentation = dataTestClass

  # get dataset
  dataset = datasetClass(opt)
  print('DataSet is {}'.format(dataset.name))
  dataloader = torch.utils.data.DataLoader(
    dataset,
    batch_size=1,
    shuffle=False,
Exemplo n.º 4
0
def evaluate(args):
  # check gpu
  if args.gpuid == '':
    args.gpu_ids = []
  else:
    if torch.cuda.is_available():
      split_gpu = str(args.gpuid).split(',')
      args.gpu_ids = [int(i) for i in split_gpu]
    else:
      print('There is no gpu!')
      exit(0)

  # check point
  if args.check_point is None:
    args.epoch_count = 1
  else:
    args.epoch_count = int(args.check_point)

  # merge config with yaml
  if args.ymlpath is not None:
    cfg_from_yaml(args.ymlpath)
  # merge config with argparse
  opt = copy.deepcopy(cfg)
  opt = merge_dict_and_yaml(args.__dict__, opt)
  print_easy_dict(opt)

  opt.serial_batches = True

  # add data_augmentation
  datasetClass, _, dataTestClass, collateClass = get_dataset(opt.dataset_class)
  opt.data_augmentation = dataTestClass

  # get dataset
  dataset = datasetClass(opt)
  print('DataSet is {}'.format(dataset.name))
  dataloader = torch.utils.data.DataLoader(
    dataset,
    batch_size=1,
    shuffle=False,
    num_workers=int(opt.nThreads),
    collate_fn=collateClass)

  dataset_size = len(dataloader)
  print('#Test images = %d' % dataset_size)

  # get model
  gan_model = get_model(opt.model_class)()
  print('Model --{}-- will be Used'.format(gan_model.name))

  # set to test
  gan_model.eval()

  gan_model.init_process(opt)
  total_steps, epoch_count = gan_model.setup(opt)

  # must set to test Mode again, due to  omission of assigning mode to network layers
  # model.training is test, but BN.training is training
  if opt.verbose:
    print('## Model Mode: {}'.format('Training' if gan_model.training else 'Testing'))
    for i, v in gan_model.named_modules():
      print(i, v.training)

  if 'batch' in opt.norm_G:
    gan_model.eval()
  elif 'instance' in opt.norm_G:
    gan_model.eval()
    # instance norm in training mode is better
    for name, m in gan_model.named_modules():
      if m.__class__.__name__.startswith('InstanceNorm'):
        m.train()
  else:
    raise NotImplementedError()

  if opt.verbose:
    print('## Change to Model Mode: {}'.format('Training' if gan_model.training else 'Testing'))
    for i, v in gan_model.named_modules():
      print(i, v.training)

  result_dir = os.path.join(opt.resultdir, opt.data, '%s_%s' % (opt.dataset, opt.check_point))
  if not os.path.exists(result_dir):
    os.makedirs(result_dir)

  avg_dict = dict()
  for epoch_i, data in tqdm.tqdm(enumerate(dataloader)):

    gan_model.set_input(data)
    gan_model.test()

    visuals = gan_model.get_current_visuals()
    img_path = gan_model.get_image_paths()

    #
    # Evaluate Part
    #
    generate_CT = visuals['G_fake'].data.clone().cpu().numpy()
    real_CT = visuals['G_real'].data.clone().cpu().numpy()
    # To [0, 1]
    # To NDHW
    if 'std' in opt.dataset_class or 'baseline' in opt.dataset_class:
      generate_CT_transpose = generate_CT
      real_CT_transpose = real_CT
    else:
      generate_CT_transpose = np.transpose(generate_CT, (0, 2, 1, 3))
      real_CT_transpose = np.transpose(real_CT, (0, 2, 1, 3))
    generate_CT_transpose = tensor_back_to_unnormalization(generate_CT_transpose, opt.CT_MEAN_STD[0],
                                                           opt.CT_MEAN_STD[1])
    real_CT_transpose = tensor_back_to_unnormalization(real_CT_transpose, opt.CT_MEAN_STD[0], opt.CT_MEAN_STD[1])
    # clip generate_CT
    generate_CT_transpose = np.clip(generate_CT_transpose, 0, 1)

    # CT range 0-1
    mae0 = MAE(real_CT_transpose, generate_CT_transpose, size_average=False)
    mse0 = MSE(real_CT_transpose, generate_CT_transpose, size_average=False)
    cosinesimilarity = Cosine_Similarity(real_CT_transpose, generate_CT_transpose, size_average=False)
    ssim = Structural_Similarity(real_CT_transpose, generate_CT_transpose, size_average=False, PIXEL_MAX=1.0)
    # CT range 0-4096
    generate_CT_transpose = tensor_back_to_unMinMax(generate_CT_transpose, opt.CT_MIN_MAX[0], opt.CT_MIN_MAX[1]).astype(
      np.int32)
    real_CT_transpose = tensor_back_to_unMinMax(real_CT_transpose, opt.CT_MIN_MAX[0], opt.CT_MIN_MAX[1]).astype(
      np.int32)
    psnr_3d = Peak_Signal_to_Noise_Rate_3D(real_CT_transpose, generate_CT_transpose, size_average=False, PIXEL_MAX=4095)
    psnr = Peak_Signal_to_Noise_Rate(real_CT_transpose, generate_CT_transpose, size_average=False, PIXEL_MAX=4095)
    mae = MAE(real_CT_transpose, generate_CT_transpose, size_average=False)
    mse = MSE(real_CT_transpose, generate_CT_transpose, size_average=False)

    name1 = os.path.splitext(os.path.basename(img_path[0][0]))[0]
    name2 = os.path.split(os.path.dirname(img_path[0][0]))[-1]
    name = name2 + '_' + name1
    print(cosinesimilarity, name)
    if cosinesimilarity is np.nan or cosinesimilarity > 1:
      print(os.path.splitext(os.path.basename(gan_model.get_image_paths()[0][0]))[0])
      continue

    metrics_list = [('MAE0', mae0), ('MSE0', mse0), ('MAE', mae), ('MSE', mse), ('CosineSimilarity', cosinesimilarity),
                    ('psnr-3d', psnr_3d), ('PSNR-1', psnr[0]),
                    ('PSNR-2', psnr[1]), ('PSNR-3', psnr[2]), ('PSNR-avg', psnr[3]),
                    ('SSIM-1', ssim[0]), ('SSIM-2', ssim[1]), ('SSIM-3', ssim[2]), ('SSIM-avg', ssim[3])]

    for key, value in metrics_list:
      if avg_dict.get(key) is None:
        avg_dict[key] = [] + value.tolist()
      else:
        avg_dict[key].extend(value.tolist())

    del visuals, img_path

  for key, value in avg_dict.items():
    print('### --{}-- total: {}; avg: {} '.format(key, len(value), np.round(np.mean(value), 7)))
    avg_dict[key] = np.mean(value)

  return avg_dict