Пример #1
0
 def __init__(self):
     self.model = UNet(init_features=32)
     self.model.load_state_dict(
         torch.load('/home/zhenyuli/workspace/us_robot/unet_usseg.pth')).to(
             device)
     self.sub_image = rospy.Subscriber("/us_image", Image,
                                       self.cb_unet_inference)
     self.pub_mask = rospy.Publisher("/us_segment", Image, queue_size=10)
Пример #2
0
class ImageProcessing:
    def __init__(self):
        self.model = UNet(init_features=32)
        self.model.load_state_dict(
            torch.load('/home/zhenyuli/workspace/us_robot/unet_usseg.pth')).to(
                device)
        self.sub_image = rospy.Subscriber("/us_image", Image,
                                          self.cb_unet_inference)
        self.pub_mask = rospy.Publisher("/us_segment", Image, queue_size=10)

    def cb_unet_inference(self, msg):
        rospy.loginfo("liuchang of course i still love you")
        msg.data
Пример #3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--batchsize', '-b', type=int, default=50)
    parser.add_argument('--epoch', '-e', type=int, default=1000)
    parser.add_argument('--gpu', '-g', type=int, default=-1)
    parser.add_argument('--out', '-o', default='')
    parser.add_argument('--resume', '-r', default='')
    parser.add_argument('--n_hidden', '-n', type=int, default=100)
    parser.add_argument('--seed', type=int, default=0)
    parser.add_argument('--snapshot_interval', type=int, default=100000)
    parser.add_argument('--display_interval', type=int, default=100)
    args = parser.parse_args()

    out_dir = 'result'
    if args.out != '':
        out_dir = '{}/{}'.format(out, args.out)
    print('GPU: {}'.format(args.gpu))
    print('# Minibatch-size: {}'.format(args.batchsize))
    print('# n_hidden: {}'.format(args.n_hidden))
    print('# epoch: {}'.format(args.epoch))
    print('# out: {}'.format(out_dir))
    print('')

    bottom_ch = 512
    unet = UNet([
        DownBlock(None, bottom_ch // 8),
        DownBlock(None, bottom_ch // 4),
        DownBlock(None, bottom_ch // 2)
    ], BottomBlock(None, bottom_ch), [
        UpBlock(None, bottom_ch // 2),
        UpBlock(None, bottom_ch // 4),
        UpBlock(None, bottom_ch // 8)
    ], L.Convolution2D(None, 12, 3, 1, 1))

    model = L.Classifier(unet)

    if args.gpu >= 0:
        chainer.backends.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()

    optimizer = chainer.optimizers.Adam()
    optimizer.setup(model)

    print('Loading Data...')
    images, labels = get_facade()
    print('Transforming Images...')
    images = transfrom_images(images)
    print('Transforming Labels...')
    labels = transform_labels(labels)

    train, test = (labels[:300], images[:300]), (labels[300:], images[300:])
    train, test = FacadeDataset(train[1],
                                train[0]), FacadeDataset(test[1], test[0])
    train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
    test_iter = chainer.iterators.SerialIterator(test,
                                                 args.batchsize,
                                                 repeat=False,
                                                 shuffle=False)

    snapshot_interval = (args.snapshot_interval, 'iteration')
    generateimage_interval = (args.snapshot_interval // 100, 'iteration')
    display_interval = (args.display_interval, 'iteration')

    print('Setting trainer...')
    updater = training.updater.StandardUpdater(train_iter,
                                               optimizer,
                                               device=args.gpu)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=out_dir)
    trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu))
    trainer.extend(extensions.dump_graph('main/loss'))
    trainer.extend(extensions.PrintReport(
        ['epoch', 'iteration', 'main/loss', 'main/accuracy']),
                   trigger=display_interval)
    trainer.extend(extensions.LogReport())
    if extensions.PlotReport.available():
        trainer.extend(
            extensions.PlotReport(['main/loss', 'validation/main/loss'],
                                  'epoch',
                                  file_name='loss.png'))
        trainer.extend(
            extensions.PlotReport(
                ['main/accuracy', 'validation/main/accuracy'],
                'epoch',
                file_name='accuracy.png'))
    trainer.extend(extensions.ProgressBar(update_interval=20))

    print('RUN')
    trainer.run()
Пример #4
0
    denoise_status_image = denoise_status_image * 0.80 + (
        1 - 0.80) * static_mask * noiseFrame
    denoise_status_image = denoise_status_image * static_mask + dynmic_mask * denoisedFrame
    #转Image
    out = denoise_status_image.astype(np.uint8)
    print("do_combine_fast-------->finish")
    return tvF.to_pil_image(out)


if __name__ == '__main__':
    # Parse test parameters
    params = parse_args()

    # Initialize model and test
    md_simple = MdSimpleNet()
    n2n_model = UNet()
    if use_cuda:
        md_simple = md_simple.cuda()
        n2n_model = n2n_model.cuda()
    if use_cuda:
        n2n_model.load_state_dict(torch.load(params.n2n_ckpt))
    else:
        n2n_model.load_state_dict(
            torch.load(params.n2n_ckpt, map_location='cpu'))
    n2n_model.train(False)

    #处理每一张图片
    save_path = os.path.dirname(params.result)
    if not os.path.isdir(save_path):
        os.mkdir(save_path)
    input_path = params.data
Пример #5
0
def main():
    args = parse_args()

    # create model
    print("=> creating model %s" %args.arch)
    model = UNet.UNet3d(in_channels=1, n_classes=2, n_channels=32)
    model = torch.nn.DataParallel(model).cuda()

    model.load_state_dict(torch.load('models/LITS_UNet_lym/2021-05-04-22-53-45/epoch143-0.9682-0.8594_model.pth'))
    model.eval()
    #model = tta.SegmentationTTAWrapper(model, tta.aliases.d4_transform(), merge_mode='mean')

    with warnings.catch_warnings():
        warnings.simplefilter('ignore')
    
    for file_index, file in enumerate(os.listdir(test_ct_path)):
        start_time = time()
        # 将要预测的CT读入
        ct = sitk.ReadImage(os.path.join(test_ct_path, file), sitk.sitkInt16)
        ct_array = sitk.GetArrayFromImage(ct)

        seg = sitk.ReadImage(os.path.join(test_seg_path,file.replace('volume', 'segmentation').replace('nii','nii.gz')), sitk.sitkUInt8)
        seg_array = sitk.GetArrayFromImage(seg)

        if file.replace('volume', 'segmentation').replace('nii', 'nii.gz') in os.listdir(pred_path):
            print('already predict {}'.format(file))
            continue

        print('start predict file:',file,ct_array.shape)

        origin = ct.GetOrigin()
        direction = ct.GetDirection()
        spacing = np.array(list(ct.GetSpacing()))
        print('-------',file,'-------')
        print('original space', np.array(ct.GetSpacing()))
        print('original shape and spacing:',ct_array.shape, spacing)

        # step1: spacing interpolation

        real_resize_factor, rezoom_factor = get_realfactor(spacing,new_spacing,ct_array)
        # 根据输出out_spacing设置新的size
        ct_array_zoom = ndimage.zoom(ct_array, real_resize_factor, order=3)
        seg_array_zoom = ndimage.zoom(seg_array, real_resize_factor, order=0)
        slice_predictions = np.zeros((ct_array_zoom.shape),dtype=np.int16)  #zoom之后大小,裁剪肝脏区域前的大小
        # 对金标准插值不应该使用高级插值方式,这样会破坏边界部分,检查数据输出很重要!!!
        
        # step2 :get mask effective range(startpostion:endpostion)
        pred_liver = seg_array_zoom.copy()
        pred_liver[pred_liver>0] = 1
        bb = find_bb(pred_liver)
        ct_array_zoom = ct_array_zoom[bb[0]:bb[1],bb[2]:bb[3],bb[4]:bb[5]]
        print('effective shape or before pad shape:', ct_array.shape,',',seg_array.shape)

        # step3:标准化Normalization
        ct_array_nor = normalize(ct_array_zoom)
        w, h, d = ct_array_nor.shape
        tmp = [w, h, d]  #这个为pad前的肝脏区域 即bb范围大小
        # 然后为了能够更好的进行预测,将需要测试的图像shape大小弄成patch的倍数,以便能够进行滑窗预测
        ct_array_nor, pad_p = make_patch(ct_array_nor,patch_size=[64,128,160])
        print('after pad shape', ct_array_nor.shape)

        # 开始预测
        pred_seg = infer_tumorandliver(model,ct_array_nor,pad_p, tmp, cube_shape=(64,128,160), pred_threshold=0.6)
        print('after infer shape', pred_seg.shape)  #大小为crop出来的肝脏区域大小
        slice_predictions[bb[0]:bb[1],bb[2]:bb[3],bb[4]:bb[5]] = pred_seg

        # 恢复到原始尺寸大小
        slice_predictions = ndimage.zoom(slice_predictions,rezoom_factor,order=0)
        slice_predictions = slice_predictions.astype(np.uint8)
        print('slice_predictions shape',slice_predictions.shape)

        predict_seg = sitk.GetImageFromArray(slice_predictions)
        predict_seg.SetDirection(ct.GetDirection())
        predict_seg.SetOrigin(ct.GetOrigin())
        predict_seg.SetSpacing(ct.GetSpacing())

        sitk.WriteImage(predict_seg, os.path.join(pred_path, file.replace('volume', 'segmentation').replace('nii', 'nii.gz')))

        speed = time() - start_time

        print(file, 'this case use {:.3f} s'.format(speed))
        print('-----------------------')

        torch.cuda.empty_cache()
Пример #6
0
        raise ValueError(f'{args.mask_dir} does not exist.')
    if not os.path.exists(args.dataset_dir):
        raise ValueError(f'{args.dataset_dir} does not exist.')

    if torch.cuda.device_count() == 0:
        device = torch.device("cpu")
        print('[Training] Running on CPU.')
    else:
        device = torch.device("cuda:0")
        print('[Training] Running on GPU.')

    image_files = read_instances_with_box(args.mask_dir, args.dataset_dir, args.box_file)

    print('Instantiating neural network...')
    cf = Configuration()
    net = UNet(has_sigmoid=True, multiplier=cf.multiplier).float()

    net.load_state_dict(torch.load(args.checkpoint, map_location=device))
    net.to(device)

    dataset = COCOTextSegmentationDataset(image_files, cf.im_size, None, None, False)
    dataloader = DataLoader(dataset, batch_size=1, shuffle=False, num_workers=0)

    os.makedirs(args.output_dir, exist_ok=True)

    evaluate(dataloader, net, device, has_sigmoid=True, pos_threshold=cf.pos_threshold, 
             visualize=args.visualize, output_dir=args.output_dir, img_files=image_files, verbose=True)

    try:
        np.save('metrics.npy', metrics)
    except:
Пример #7
0
        print('[Training] Running on CPU.')
    else:
        device = torch.device("cuda:0")
        print('[Training] Running on GPU.')

    image_files = read_instances_with_box(args.mask_dir, args.dataset_dir,
                                          args.box_file)

    print('Instantiating neural network...')
    cf = Configuration()
    if args.test_run:
        cf.batch_size = 2
        cf.batches_per_print = 1
        cf.epoches_per_save = 1

    net = UNet(has_sigmoid=cf.has_sigmoid, multiplier=cf.multiplier).float()

    # criterion = torch.nn.BCELoss()
    # criterion = torch.nn.BCEWithLogitsLoss(pos_weight=torch.tensor([cf.pos_weight]))
    criterion = torch.nn.BCEWithLogitsLoss(
        pos_weight=torch.tensor([cf.pos_weight]).to(device))
    if cf.optimizer == 'adam':
        print('Using Adam optimizer.')
        optimizer = optim.Adam(net.parameters(), lr=cf.learning_rate)
    else:
        print('Using SGD optimizer.')
        optimizer = optim.SGD(net.parameters(),
                              lr=cf.learning_rate,
                              momentum=cf.momentum)

    scheduler = optim.lr_scheduler.MultiplicativeLR(
Пример #8
0
parser.add_argument('--task',        type=str, default='',                  help='TASK')
parser.add_argument('--img_path',    type=str, default='/userhome/dped/validation/input/',               help='IMG_PATH')
parser.add_argument('--img_list',    type=str, default='/userhome/MYDGF/run/dataset.txt',          help='IMG_LIST')
parser.add_argument('--save_folder', type=str, default='/userhome/MYDGF/run/output/',           help='SAVE_FOLDER')
parser.add_argument('--gpu',         type=int, default=0,                          help='GPU')
parser.add_argument('--gray',         default=False, action='store_true', help='GPU')
parser.add_argument('--model_path', type=str, default='/userhome/MYDGF/checkpoints_dped/snapshots/', help='MODEL_FOLDER')
parser.add_argument('--model_id', type=str, default='net_epoch_11.pth', help='MODEL_ID')
args = parser.parse_args()


# Save Folder
if not os.path.isdir(args.save_folder):
    os.makedirs(args.save_folder)

model = UNet()


model_path = os.path.join(args.model_path, args.task, args.model_id)
print("load...",model_path)

model.load_state_dict(torch.load(model_path))

# data set
input_path = args.img_path
input_images = os.listdir(input_path)

# GPU
if args.gpu >= 0:
    with torch.cuda.device(args.gpu):
        model.cuda()
Пример #9
0
def main():
    args = parse_args()
    #args.dataset = "datasets"

    if args.name is None:
        if args.deepsupervision:
            args.name = '%s_%s_lym' % (args.dataset, args.arch)
        else:
            args.name = '%s_%s_lym' % (args.dataset, args.arch)
    timestamp = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
    if not os.path.exists('models/{}/{}'.format(args.name, timestamp)):
        os.makedirs('models/{}/{}'.format(args.name, timestamp))

    print('Config -----')
    for arg in vars(args):
        print('%s: %s' % (arg, getattr(args, arg)))
    print('------------')

    with open('models/{}/{}/args.txt'.format(args.name, timestamp), 'w') as f:
        for arg in vars(args):
            print('%s: %s' % (arg, getattr(args, arg)), file=f)

    joblib.dump(args, 'models/{}/{}/args.pkl'.format(args.name, timestamp))

    # define loss function (criterion)
    if args.loss == 'BCEWithLogitsLoss':
        criterion = nn.BCEWithLogitsLoss().cuda()
    else:
        criterion = losses.BCEDiceLoss().cuda()

    cudnn.benchmark = True

    # Data loading code
    img_paths = glob('./data/train_image/*')
    mask_paths = glob('./data/train_mask/*')

    train_img_paths, val_img_paths, train_mask_paths, val_mask_paths = \
        train_test_split(img_paths, mask_paths, test_size=0.3, random_state=39)
    print("train_num:%s" % str(len(train_img_paths)))
    print("val_num:%s" % str(len(val_img_paths)))

    # create model
    print("=> creating model %s" % args.arch)
    model = UNet.UNet3d(in_channels=1, n_classes=2, n_channels=32)
    model = torch.nn.DataParallel(model).cuda()
    #model._initialize_weights()
    #model.load_state_dict(torch.load('model.pth'))

    print(count_params(model))

    if args.optimizer == 'Adam':
        optimizer = optim.Adam(filter(lambda p: p.requires_grad,
                                      model.parameters()),
                               lr=args.lr)
    elif args.optimizer == 'SGD':
        optimizer = optim.SGD(filter(lambda p: p.requires_grad,
                                     model.parameters()),
                              lr=args.lr,
                              momentum=args.momentum,
                              weight_decay=args.weight_decay,
                              nesterov=args.nesterov)

    train_dataset = Dataset(args, train_img_paths, train_mask_paths, args.aug)
    val_dataset = Dataset(args, val_img_paths, val_mask_paths)

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               pin_memory=True,
                                               drop_last=True)
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             pin_memory=True,
                                             drop_last=False)

    log = pd.DataFrame(index=[],
                       columns=[
                           'epoch', 'lr', 'loss', 'iou', 'dice_1', 'dice_2',
                           'val_loss', 'val_iou', 'val_dice_1', 'val_dice_2'
                       ])

    best_loss = 100
    # best_iou = 0
    trigger = 0
    first_time = time.time()
    for epoch in range(args.epochs):
        print('Epoch [%d/%d]' % (epoch, args.epochs))

        # train for one epoch
        train_log = train(args, train_loader, model, criterion, optimizer,
                          epoch)
        # evaluate on validation set
        val_log = validate(args, val_loader, model, criterion)

        print(
            'loss %.4f - iou %.4f - dice_1 %.4f - dice_2 %.4f - val_loss %.4f - val_iou %.4f - val_dice_1 %.4f - val_dice_2 %.4f'
            % (train_log['loss'], train_log['iou'], train_log['dice_1'],
               train_log['dice_2'], val_log['loss'], val_log['iou'],
               val_log['dice_1'], val_log['dice_2']))

        end_time = time.time()
        print("time:", (end_time - first_time) / 60)

        tmp = pd.Series([
            epoch,
            args.lr,
            train_log['loss'],
            train_log['iou'],
            train_log['dice_1'],
            train_log['dice_2'],
            val_log['loss'],
            val_log['iou'],
            val_log['dice_1'],
            val_log['dice_2'],
        ],
                        index=[
                            'epoch', 'lr', 'loss', 'iou', 'dice_1', 'dice_2',
                            'val_loss', 'val_iou', 'val_dice_1', 'val_dice_2'
                        ])

        log = log.append(tmp, ignore_index=True)
        log.to_csv('models/{}/{}/log.csv'.format(args.name, timestamp),
                   index=False)

        trigger += 1

        val_loss = val_log['loss']
        if val_loss < best_loss:
            torch.save(
                model.state_dict(),
                'models/{}/{}/epoch{}-{:.4f}-{:.4f}_model.pth'.format(
                    args.name, timestamp, epoch, val_log['dice_1'],
                    val_log['dice_2']))
            best_loss = val_loss
            print("=> saved best model")
            trigger = 0

        # early stopping
        if not args.early_stop is None:
            if trigger >= args.early_stop:
                print("=> early stopping")
                break

        torch.cuda.empty_cache()