Exemplo n.º 1
0
def main(raw_args=None):
    """example:  python predict_batch.py --model '/home/zhaojin/data/TacomaBridge/segdata/train/checkpoint/weight_logloss_softmax/CP30.pth' --input '/home/zhaojin/data/TacomaBridge/capture/high-reso-clip2_rename' --output '/home/zhaojin/data/TacomaBridge/segdata/predict/high-reso-clip2_rename'"""
    args = get_args(raw_args)
    # args.model = '/home/zhaojin/data/TacomaBridge/segdata/train/checkpoint/logloss_softmax/CP12.pth'
    # in_files = ['/home/zhaojin/data/TacomaBridge/segdata/train/img/00034.png' ]
    # out_files = ['/home/zhaojin/my_path/dir/segdata/predict/00025.png']
    imgpath = args.input
    lblpath = args.output
    net = UNet(n_channels=1, n_classes=4)

    print("Loading model {}".format(args.model))

    if not args.cpu:
        print("Using CUDA version of the net, prepare your GPU !")
        net.cuda()
        net.load_state_dict(torch.load(args.model))
    else:
        net.cpu()
        net.load_state_dict(torch.load(args.model, map_location='cpu'))
        print("Using CPU version of the net, this may be very slow")

    print("Model loaded !")


    predict_img_batch(net=net,
                            imgpath=imgpath,
                            lblpath=lblpath,
                            scale_factor=args.scale,
                            out_threshold=args.mask_threshold,
                            use_dense_crf= not args.no_crf,
                            use_gpu=not args.cpu)
Exemplo n.º 2
0
def main(args):
    in_files = args.input
    out_files = get_output_filenames(args)

    n_channels = np.load(in_files[0]).shape[2]
    ## NPY 1 channel Uint16 medical images
    net = UNet(n_channels=n_channels, n_classes=2)

    print("Loading model {}".format(args.model))

    if not args.cpu:
        print("Using CUDA version of the net, prepare your GPU !")
        net.cuda()
        net.load_state_dict(torch.load(args.model))
    else:
        net.cpu()
        net.load_state_dict(torch.load(args.model, map_location='cpu'))
        print("Using CPU version of the net, this may be very slow")

    print("Model loaded !")

    for i, fn in enumerate(in_files):
        print("\nPredicting image {} ...".format(fn))

        img = np.load(fn)

        mask = predict_img(net=net,
                           img=img,
                           scale_factor=args.scale,
                           out_threshold=args.mask_threshold,
                           use_gpu=not args.cpu)

        if args.viz:
            print("Visualizing results for image {}, close to continue ...".
                  format(fn))
            ## save this plt
            fn = out_files[i][:-3] + 'jpg'

            if args.vizsave:
                save = True

            plot_img_and_mask(img, mask, save=save, fn=fn)

        if args.save:
            # Save NPY file
            out_fn = out_files[i]

            np.save(out_fn, mask)

            print("Mask saved to {}".format(out_files[i]))
Exemplo n.º 3
0
def main(raw_args):
    args = get_args(raw_args)
    args.model = '/home/zhaojin/data/TacomaBridge/segdata/train/checkpoint/logloss_softmax/CP12.pth'
    in_files = ['/home/zhaojin/data/TacomaBridge/segdata/train/img/00034.png']
    out_files = ['/home/zhaojin/my_path/dir/segdata/predict/00025.png']

    net = UNet(n_channels=1, n_classes=4)

    print("Loading model {}".format(args.model))

    if not args.cpu:
        print("Using CUDA version of the net, prepare your GPU !")
        net.cuda()
        net.load_state_dict(torch.load(args.model))
    else:
        net.cpu()
        net.load_state_dict(torch.load(args.model, map_location='cpu'))
        print("Using CPU version of the net, this may be very slow")

    print("Model loaded !")

    for i, fn in enumerate(in_files):
        print("\nPredicting image {} ...".format(fn))

        img = Image.open(fn)
        if img.size[0] < img.size[1]:
            print("Error: image height larger than the width")

        mask = predict_img(net=net,
                           full_img=img,
                           scale_factor=args.scale,
                           out_threshold=args.mask_threshold,
                           use_dense_crf=not args.no_crf,
                           use_gpu=not args.cpu)

        if args.viz:
            print("Visualizing results for image {}, close to continue ...".
                  format(fn))
            plot_img_and_mask(img, mask)
        # if not args.no_save:
        #     out_fn = out_files[i]
        #     print('mask', mask)
        #     result = mask_to_image(mask)
        #
        #     result.save(out_files[i])
        #
        #     print("Mask saved to {}".format(out_files[i]))

    return mask
Exemplo n.º 4
0
def prediction(args):
    in_files = args.input
    out_files = get_output_filenames(args)

    net = UNet(n_channels=3, n_classes=1)

    print("Loading model {}".format(args.model))

    if not args.cpu:
        print("Using CUDA version of the net, prepare your GPU !")
        net.cuda()
        net.load_state_dict(torch.load(args.model))
    else:
        net.cpu()
        net.load_state_dict(torch.load(args.model, map_location='cpu'))
        print("Using CPU version of the net, this may be very slow")

    print("Model loaded !")

    for i, fn in enumerate(in_files):
        print("\nPredicting image {} ...".format(fn))

        img = Image.open(fn)
        if img.size[0] < img.size[1]:
            print("Error: image height larger than the width")

        mask = predict_img(net=net,
                           full_img=img,
                           scale_factor=args.scale,
                           out_threshold=args.mask_threshold,
                           use_dense_crf=not args.no_crf,
                           use_gpu=not args.cpu)

        if args.viz:
            print("Visualizing results for image {}, close to continue ...".
                  format(fn))
            plot_img_and_mask(img, mask)

        if not args.no_save:
            out_fn = out_files[i]
            result = mask_to_image(mask)
            result.save(out_files[i])

            print("Mask saved to {}".format(out_files[i]))
Exemplo n.º 5
0
def main(raw_args=None):
    """example:  python predict.py --model '/home/zhaojin/data/TacomaBridge/segdata/train/checkpoint/weight_logloss_softmax/CP30.pth' --input '/home/zhaojin/data/TacomaBridge/segdata/train/img/00034.png' --viz"""
    args = get_args(raw_args)
    print('args', args)
    # args.model = '/home/zhaojin/data/TacomaBridge/segdata/train/checkpoint/logloss_softmax/CP12.pth'
    # in_files = ['/home/zhaojin/data/TacomaBridge/segdata/train/img/00034.png' ]
    # out_files = ['/home/zhaojin/my_path/dir/segdata/predict/00025.png']
    in_files = args.input
    net = UNet(n_channels=1, n_classes=4)

    print("Loading model {}".format(args.model))

    if not args.cpu:
        print("Using CUDA version of the net, prepare your GPU !")
        net.cuda()
        net.load_state_dict(torch.load(args.model))
    else:
        net.cpu()
        net.load_state_dict(torch.load(args.model, map_location='cpu'))
        print("Using CPU version of the net, this may be very slow")

    print("Model loaded !")

    for i, fn in enumerate(in_files):
        print("\nPredicting image {} ...".format(fn))

        img = Image.open(fn)
        if img.size[0] < img.size[1]:
            print("Error: image height larger than the width")

        mask = predict_img(net=net,
                           full_img=img,
                           scale_factor=args.scale,
                           out_threshold=args.mask_threshold,
                           use_dense_crf=not args.no_crf,
                           use_gpu=not args.cpu)

        if args.viz:
            print("Visualizing results for image {}, close to continue ...".
                  format(fn))
            mask = np.transpose(mask, axes=[1, 2, 0])
            plot_img_and_mask(img, mask)
Exemplo n.º 6
0
if __name__ == "__main__":

    args = get_args()
    in_files = args.input
    out_files = get_output_filenames(args)

    net = UNet(n_channels=1, n_classes=1)

    print("Loading model {}".format(args.model))

    if not args.cpu:
        print("Using CUDA version of the net, prepare your GPU !")
        net.cuda()
        net.load_state_dict(torch.load(args.model))
    else:
        net.cpu()
        net.load_state_dict(torch.load(args.model, map_location='cpu'))
        print("Using CPU version of the net, this may be very slow")

    print("Model loaded !")

    for i, fn in enumerate(in_files):
        print("\nPredicting image {} ...".format(fn))

        images, patient_number, frame_indices, rotated, true_masks = load_patient_images(fn)

        # if img.size[0] < img.size[1]:
        #     print("Error: image height larger than the width")
        predictions = []
        for i, image in enumerate(images):
            image = torch.FloatTensor(image)
Exemplo n.º 7
0
def main():
    # Arguments
    parser = argparse.ArgumentParser(
        description=
        'High Quality Monocular Depth Estimation via Transfer Learning')
    parser.add_argument('--epochs',
                        default=20,
                        type=int,
                        help='number of total epochs to run')
    parser.add_argument('--lr',
                        '--learning-rate',
                        default=0.0001,
                        type=float,
                        help='initial learning rate')
    parser.add_argument('--bs', default=4, type=int, help='batch size')
    parser.add_argument('--o',
                        dest='optimizer',
                        help='training optimizer',
                        default="adam",
                        type=str)
    parser.add_argument('--lr_decay_step',
                        dest='lr_decay_step',
                        help='step to do learning rate decay, unit is epoch',
                        default=5,
                        type=int)
    parser.add_argument('--lr_decay_gamma',
                        dest='lr_decay_gamma',
                        help='learning rate decay ratio',
                        default=0.1,
                        type=float)
    args = parser.parse_args()

    # Create model
    # model = Model().cuda()
    model = UNet(n_channels=3, n_classes=1, bilinear=True).cuda()
    writer_train = SummaryWriter('runs/train_0')
    # model.load_state_dict(torch.load(r'models\06-05-2020_15-04-20-n15000-e10-bs4-lr0.0001\weights.epoch9_model.pth'))
    print('Model created.')
    logger = Logger('./logs')

    # hyperparams
    lr = args.lr
    bs = args.bs
    lr_decay_step = args.lr_decay_step
    lr_decay_gamma = args.lr_decay_gamma
    DOUBLE_BIAS = True
    WEIGHT_DECAY = 0.0001

    # params
    params = []
    for key, value in dict(model.named_parameters()).items():
        if value.requires_grad:
            if 'bias' in key:
                params += [{'params': [value], 'lr': lr * (DOUBLE_BIAS + 1), \
                            'weight_decay': 4e-5 and WEIGHT_DECAY or 0}]
            else:
                params += [{'params': [value], 'lr': lr, 'weight_decay': 4e-5}]

    # optimizer
    if args.optimizer == "adam":
        optimizer = torch.optim.Adam(params,
                                     lr=lr,
                                     betas=(0.9, 0.999),
                                     eps=1e-08,
                                     weight_decay=4e-5)

    # Training parameters
    optimizer = torch.optim.Adam(model.parameters(), args.lr)
    batch_size = args.bs
    prefix = 'unet_' + str(batch_size)

    # Load data
    train_loader, test_loader = getTrainingTestingData(batch_size=batch_size,
                                                       trainCount=68730)

    # # Logging
    # writer = SummaryWriter(comment='{}-lr{}-e{}-bs{}'.format(prefix, args.lr, args.epochs, args.bs), flush_secs=30)

    # Loss
    # l1_criterion = nn.L1Loss()
    # rmse = RMSE()
    l1_criterion = nn.L1Loss()
    grad_criterion = GradLoss()
    normal_criterion = NormalLoss()
    # eval_metric = RMSE_log()

    now = datetime.datetime.now()  # current date and time
    runID = now.strftime("%m-%d-%Y_%H-%M-%S") + '-n' + str(
        len(train_loader)) + '-e' + str(
            args.epochs) + '-bs' + str(batch_size) + '-lr' + str(args.lr)
    outputPath = './models/'
    runPath = outputPath + runID
    pathlib.Path(runPath).mkdir(parents=True, exist_ok=True)

    # constants
    # grad_factor = 10.
    # normal_factor = 10.

    # Start training...
    for epoch in range(args.epochs):
        batch_time = AverageMeter()
        losses = AverageMeter()
        N = len(train_loader)

        # Switch to train mode
        model.train()

        end = time.time()

        for i, sample_batched in enumerate(train_loader):
            optimizer.zero_grad()

            # Prepare sample and target
            image = torch.autograd.Variable(sample_batched['image'].cuda())
            depth = torch.autograd.Variable(
                sample_batched['depth'].cuda(non_blocking=True))

            # Normalize depth
            depth_n = DepthNorm(depth)

            # Predict
            output = model(image)

            # Compute the loss
            l_depth = l1_criterion(output, depth_n)
            # l_ssim = torch.clamp((1 - ssim(output, depth_n, val_range=1000.0 / 10.0)) * 0.5, 0, 1)
            l_ssim = torch.clamp(
                (1 - ssim(output, depth_n, val_range=5.0)) * 0.5, 0,
                1)  # sbasak01

            # sbasak01 loss modification
            grad_real, grad_fake = imgrad_yx(depth_n), imgrad_yx(output)
            grad_loss = grad_criterion(
                grad_fake, grad_real)  # * grad_factor  # * (epoch > 3)
            normal_loss = normal_criterion(
                grad_fake, grad_real)  # * normal_factor  # * (epoch > 7)

            # loss = (1.0 * l_ssim) + (0.1 * l_depth)

            loss = (l_ssim) + (l_depth) + (grad_loss) + (normal_loss)

            # Update step
            losses.update(loss.data.item(), image.size(0))
            loss.backward()
            optimizer.step()
            # ADD TRAINING LOSS TO SummaryWriter
            writer_train.add_scalar('LOSS', loss.data.item(), i)

            # Measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()
            eta = str(datetime.timedelta(seconds=int(batch_time.val *
                                                     (N - i))))

            # Log progress
            niter = epoch * N + i
            if i % 5 == 0:
                # Print to console
                print(
                    'Epoch: [{0}][{1}/{2}]\t'
                    'Time {batch_time.val:.3f} ({batch_time.sum:.3f})\t'
                    'ETA {eta}\t'
                    'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                    'L1_Loss: {l1_loss:.4f} SSIM_Loss: {ssim_loss:.4f} grad_loss: {gradloss:.4f} normal_loss: {'
                    'normalloss:.4f} '.format(
                        epoch,
                        i,
                        N,
                        batch_time=batch_time,
                        loss=losses,
                        eta=eta,
                        l1_loss=l_depth,
                        ssim_loss=l_ssim,
                        gradloss=grad_loss,  # sbasak01 loss modification
                        normalloss=normal_loss  # sbasak01 loss modification
                    ))

                # # 1. Log scalar values (scalar summary)
                # info = {'loss': loss.item()}
                #
                # for tag, value in info.items():
                #     logger.scalar_summary(tag, value, i + 1)
                #
                # # 2. Log values and gradients of the parameters (histogram summary)
                # for tag, value in model.named_parameters():
                #     tag = tag.replace('.', '/')
                #     logger.histo_summary(tag, value.data.cpu().numpy(), i + 1)
                #     logger.histo_summary(tag + '/grad', value.grad.data.cpu().numpy(), i + 1)

            if i % 100 == 0:
                sys.stdout.write("Iterations: %d   \r" % (i))
                sys.stdout.flush()
        # save Model intermediate
        path = runPath + '/weights.epoch{0}_model.pth'.format(epoch)
        torch.save(model.cpu().state_dict(), path)  # saving model
        model.cuda()
Exemplo n.º 8
0
def train(train_loader, val_loader, train_param, data_param, loc_param, _log, _run):
    writer = SummaryWriter()
    model_dir, _ = create_dir(writer.file_writer.get_logdir())
    sig = nn.Sigmoid()

    if train_param['model'] == "FCN":
        train_model = model.FCN(data_param['feature_num']).cuda()
    elif train_param['model'] == 'FCNwPool':
        train_model = model.FCNwPool(data_param['feature_num'], data_param['pix_res']).cuda()
    elif train_param['model'] == 'UNet':
        train_model = UNet(data_param['feature_num'], 1).cuda()
    elif train_param['model'] == 'FCNwBottleneck':
        train_model = model.FCNwBottleneck(data_param['feature_num'], data_param['pix_res']).cuda()
    elif train_param['model'] == 'SimplerFCNwBottleneck':
        train_model = model.SimplerFCNwBottleneck(data_param['feature_num']).cuda()
    elif train_param['model'] == 'Logistic':
        train_model = model.Logistic(data_param['feature_num']).cuda()
    elif train_param['model'] == 'PolyLogistic':
        train_model = model.PolyLogistic(data_param['feature_num']).cuda()
    
    if th.cuda.device_count() > 1:
        train_model = nn.DataParallel(train_model)
    
    if loc_param['load_model']:
        train_model.load_state_dict(th.load(loc_param['load_model']))
    _log.info('[{}] model is initialized ...'.format(ctime()))
    
    if train_param['optim'] == 'Adam':
        optimizer = to.Adam(train_model.parameters(), lr=train_param['lr'], weight_decay=train_param['decay'])
    else:
        optimizer = to.SGD(train_model.parameters(), lr=train_param['lr'], weight_decay=train_param['decay'])
    
    scheduler = ReduceLROnPlateau(optimizer, mode='min', patience=train_param['patience'], verbose=True, factor=0.5)
    criterion = nn.BCEWithLogitsLoss(pos_weight=th.Tensor([train_param['pos_weight']]).cuda())

    valatZero = validate(train_model, val_loader, data_param, train_param, _log)
    _log.info('[{}] validation loss before training: {}'.format(ctime(), valatZero))
    _run.log_scalar('training.val_loss', valatZero, 0)
    trainatZero = validate(train_model, train_loader, data_param, train_param, _log)
    _log.info('[{}] train loss before training: {}'.format(ctime(), trainatZero))
    _run.log_scalar('training.loss_epoch', trainatZero, 0)
    
    loss_ = 0
    prune = data_param['prune']
    for epoch in range(train_param['n_epochs']):
        running_loss = 0
        train_iter = iter(train_loader)
        for iter_ in range(len(train_iter)):
            optimizer.zero_grad()

            batch_sample = train_iter.next()
            data, gt = batch_sample['data'].cuda(), batch_sample['gt'].cuda()
            if train_param['model'] == 'UNET':
                prds = train_model(data)[:, :, prune:-prune, prune:-prune]
            else:
                prds = train_model.forward(data)[:, :, prune:-prune, prune:-prune]
            indices = gt>=0
            loss = criterion(prds[indices], gt[indices])
            running_loss += loss.item()
            loss_ += loss.item()
            loss.backward()
            optimizer.step()

            _run.log_scalar("training.loss_iter", loss.item(), epoch*len(train_iter)+iter_+1)
            _run.log_scalar("training.max_prob", th.max(sig(prds)).item(), epoch*len(train_iter)+iter_+1)
            _run.log_scalar("training.min_prob", th.min(sig(prds)).item(), epoch*len(train_iter)+iter_+1)

            writer.add_scalar("loss/train_iter", loss.item(), epoch*len(train_iter)+iter_+1)
            writer.add_scalars(
                "probRange",
                {'min': th.min(sig(prds)), 'max': th.max(sig(prds))},
                epoch*len(train_iter)+iter_+1
            )
            if (epoch*len(train_iter)+iter_+1) % 20 == 0:
                _run.log_scalar("training.loss_20", loss_/20, epoch*len(train_iter)+iter_+1)
                writer.add_scalar("loss/train_20", loss_/20, epoch*len(train_iter)+iter_+1)
                _log.info(
                    '[{}] loss at [{}/{}]: {}'.format(
                        ctime(),
                        epoch*len(train_iter)+iter_+1,
                        train_param['n_epochs']*len(train_iter),
                        loss_/20
                    )
                )
                loss_ = 0
        
        v_loss = validate(train_model, val_loader, data_param, train_param, _log)
        scheduler.step(v_loss)
        _log.info('[{}] validation loss at [{}/{}]: {}'.format(ctime(), epoch+1, train_param['n_epochs'], v_loss))
        _run.log_scalar('training.val_loss', v_loss, epoch+1)
        _run.log_scalar('training.loss_epoch', running_loss/len(train_iter), epoch+1)
        writer.add_scalars(
            "loss/grouped",
            {'test': v_loss, 'train': running_loss/len(train_iter)},
            epoch+1
        )
        del data, gt, prds, indices
        if (epoch+1) % loc_param['save'] == 0:
            th.save(train_model.cpu().state_dict(), model_dir+'model_{}.pt'.format(str(epoch+1)))
            train_model = train_model.cuda()
    
    writer.export_scalars_to_json(model_dir+'loss.json')
    th.save(train_model.cpu().state_dict(), model_dir+'trained_model.pt')
    save_config(writer.file_writer.get_logdir()+'/config.txt', train_param, data_param)
    _log.info('[{}] model has been trained and config file has been saved.'.format(ctime()))
    
    return v_loss
Exemplo n.º 9
0
def main(args):
  # parse args
  best_acc1 = 0.0
  # tensorboard writer
  writer = SummaryWriter(args.experiment + "/logs")

  if args.gpu >= 0:
    print("Use GPU: {}".format(args.gpu))
  else:
    print('Using CPU for computing!')

  fixed_random_seed = 2019
  torch.manual_seed(fixed_random_seed)
  np.random.seed(fixed_random_seed)
  random.seed(fixed_random_seed)


  # set up transforms for data augmentation
  mn = [float(x) for x in args.mean] if(args.mean) else [0.485, 0.456, 0.406] 
  st = [float(x) for x in args.std] if(args.std) else [0.229, 0.224, 0.225] 

  normalize = transforms.Normalize(mean=mn, std=st)
  train_transforms = get_train_transforms(normalize)
  val_transforms = get_val_transforms(normalize)
  if(args.train_denoiser):
    normalize = transforms.Normalize(mean=mn, std=st)
    train_transforms = get_denoiser_train_transforms(normalize)
    val_transforms = get_denoiser_val_transforms(normalize)
  elif(args.cub_training):
    networks.CLASSES=200
    normalize = transforms.Normalize(mean=mn, std=st)
    train_transforms = get_cub_train_transforms(normalize)
    val_transforms = get_cub_val_transforms(normalize)
  if(args.spad_training):
    networks.CLASSES=122
    normalize = transforms.Normalize(mean=mn, std=st)
    train_transforms = get_spad_train_transforms(normalize)
    val_transforms = get_spad_val_transforms(normalize)
  elif(args.cars_training):
    networks.CLASSES=196
    normalize = transforms.Normalize(mean=mn, std=st)
    train_transforms = get_cub_train_transforms(normalize)
    val_transforms = get_cub_val_transforms(normalize)
  elif(args.imagenet_training):
    networks.CLASSES=1000
    normalize = transforms.Normalize(mean=mn, std=st)
    train_transforms = get_imagenet_train_transforms(normalize)
    val_transforms = get_imagenet_val_transforms(normalize)
  if (not args.evaluate):
    print("Training time data augmentations:")
    print(train_transforms)


  model_clean=None
  model_teacher=None
  if args.use_resnet18:
    model = torchvision.models.resnet18(pretrained=False)
    model.fc = nn.Linear(512, networks.CLASSES)
    if(args.use_resnet18!="random"):
        model.load_state_dict(torch.load(args.use_resnet18)['state_dict'])
  elif args.use_resnet34:
    model = torchvision.models.resnet34(pretrained=False)
    model.fc = nn.Linear(512, networks.CLASSES)
  elif args.use_resnet50:
    model = torchvision.models.resnet50(pretrained=False)
    model.fc = nn.Linear(2048, networks.CLASSES)
  elif args.use_inception_v3:
    model = torchvision.models.inception_v3(pretrained=False, aux_logits=False)
    model.fc = nn.Linear(2048, networks.CLASSES)
  elif args.use_photon_net:
    model = networks.ResNetContrast(BasicBlock, [2, 2, 2, 2], networks.CLASSES)
    if(args.use_photon_net!="random"):
        model.load_state_dict(torch.load(args.use_photon_net)['state_dict'])
#  elif args.use_contrastive_allfeats:
#    model = networks.ResNetContrast2(BasicBlock, [2, 2, 2, 2], networks.CLASSES)
#    if(args.use_contrastive_allfeats!="random"):
#        model.load_state_dict(torch.load(args.use_contrastive_allfeats)['state_dict'])
  elif args.train_denoiser:
    model = UNet(3,3)
  elif args.use_dirty_pixel:
    model = torchvision.models.resnet18(pretrained=False)
    model.fc = nn.Linear(512, networks.CLASSES)
    model_clean = UNet(3,3)
    if(args.evaluate==False):
        model_clean.load_state_dict(torch.load(args.use_dirty_pixel)['state_dict'])
        model_clean = model_clean.cuda(args.gpu)
  elif args.use_student_teacher:
    model = networks.ResNetPerceptual(BasicBlock, [2, 2, 2, 2], networks.CLASSES)
    model_teacher = networks.ResNetPerceptual(BasicBlock, [2, 2, 2, 2], networks.CLASSES, teacher_model=True)
    model_teacher.load_state_dict(torch.load(args.use_student_teacher)['state_dict'])
    model_teacher = model_teacher.cuda(args.gpu)
    model_teacher.eval()
    for param in model_teacher.parameters():
      param.requires_grad = False
  else:
    print("select correct model")
    exit(0)

  criterion1 = nn.CrossEntropyLoss()
  if(args.use_student_teacher or args.train_denoiser or args.use_dirty_pixel):
    criterion2 = nn.MSELoss()
  else:
    ps = AllPositivePairSelector(balance=False)
    criterion2 = OnlineContrastiveLoss(1., ps)
  # put everthing to gpu
  if args.gpu >= 0:
    model = model.cuda(args.gpu)
    criterion1 = criterion1.cuda(args.gpu)
    #criterion3 = criterion3.cuda(args.gpu)
    criterion2 = criterion2.cuda(args.gpu)
  criterion = [criterion1, criterion2]
  #criterion = [criterion1]
  # setup the optimizer
  opt_params = model.parameters()
  if(args.use_dirty_pixel):
    opt_params = list(model.parameters()) + list(model_clean.parameters())
  optimizer = torch.optim.SGD(opt_params, args.lr,
                momentum=args.momentum,
                weight_decay=args.weight_decay)

  # resume from a checkpoint?
  if args.resume:
    if os.path.isfile(args.resume):
      print("=> loading checkpoint '{}'".format(args.resume))
      if(args.gpu>=0):
        checkpoint = torch.load(args.resume)
      else:
        checkpoint = torch.load(args.resume, map_location=lambda storage, loc: storage)
      #best_acc1 = checkpoint['best_acc1']


      #new_state_dict = OrderedDict()
      #model_dict = model.state_dict()
      #for k, v in checkpoint['state_dict'].items():
      #  name = k[7:] # remove `module.`
      #  if(name.startswith('fc')):
      #      continue
      #  new_state_dict[name] = v
      #model_dict.update(new_state_dict)
      #model.load_state_dict(model_dict)
      model.load_state_dict(checkpoint['state_dict'])
      if args.gpu < 0:
        model = model.cpu()
      else:
        model = model.cuda(args.gpu)
      if(args.use_dirty_pixel):
        model_clean.load_state_dict(checkpoint['model_clean_state_dict'])
        model_clean = model_clean.cuda(args.gpu)
#      # only load the optimizer if necessary
#      if (not args.evaluate):
#        args.start_epoch = checkpoint['epoch']
#        optimizer.load_state_dict(checkpoint['optimizer'])
      print("=> loaded checkpoint '{}' (epoch {}, acc1 {})"
          .format(args.resume, checkpoint['epoch'], best_acc1))
    else:
      print("=> no checkpoint found at '{}'".format(args.resume))


  # setup dataset and dataloader
  val_dataset = IMMetricLoader(args.data_folder,
                  split='val', transforms=val_transforms, image_id=False, pil_loader=args.pil_loader, clean_image=args.train_denoiser, label_file=args.val_label_file)

  print('Validation Set Size: ', len(val_dataset))

  val_batch_size = 1 if args.train_denoiser else 50
  val_loader = torch.utils.data.DataLoader(
    val_dataset, batch_size=val_batch_size, shuffle=False,
    num_workers=args.workers, pin_memory=True, sampler=None, drop_last=False)
  val_dataset.reset_seed()
  # evaluation
  if args.evaluate:
    print("Testing the model ...")
    cudnn.deterministic = True
    validate_model(val_loader, model, -1, args, writer, model_clean)
    return
  load_clean_image = args.use_student_teacher or args.train_denoiser or args.use_dirty_pixel
  train_dataset = IMMetricLoader(args.data_folder,
                  split='train', transforms=train_transforms, label_file=args.label_file, pil_loader=args.pil_loader, clean_image=load_clean_image)
  print('Training Set Size: ', len(train_dataset))
  if(args.num_instances):
    train_loader = torch.utils.data.DataLoader(
    train_dataset, batch_size=args.batch_size,
    num_workers=args.workers, pin_memory=True, sampler=RandomIdentitySampler(train_dataset, num_instances=args.num_instances), drop_last=True)
  else:
    train_loader = torch.utils.data.DataLoader(
      train_dataset, batch_size=args.batch_size, shuffle=True,
      num_workers=args.workers, pin_memory=True, sampler=None, drop_last=True)

  # enable cudnn benchmark
  cudnn.enabled = True
  cudnn.benchmark = True


  if(args.train_denoiser):
    
    print("Training denoiser ...")
    for epoch in range(args.start_epoch, args.epochs):
      train_dataset.reset_seed()
      train_denoiser(train_loader, val_loader, model, criterion, optimizer, epoch, args, writer)
    return

  model.eval()
  top1 = AverageMeter()
  top5 = AverageMeter()
  val_acc1 = validate_model(val_loader, model, 0, args, writer, model_clean)
  writer.add_scalars('data/top1_accuracy',
     {"train" : top1.avg}, 0)
  writer.add_scalars('data/top5_accuracy',
     {"train" : top5.avg}, 0)
  model.train()

  # warmup the training
  if (args.start_epoch == 0) and (args.warmup_epochs > 0):
    print("Warmup the training ...")
    for epoch in range(0, args.warmup_epochs):
      acc1 = train_model(train_loader, val_loader, model, criterion, optimizer, epoch, "warmup", best_acc1, args, writer, model_clean, model_teacher)

  # start the training
  print("Training the model ...")
  for epoch in range(args.start_epoch, args.epochs):
    train_dataset.reset_seed()
    # train for one epoch
    acc1 = train_model(train_loader, val_loader, model, criterion, optimizer, epoch, "train", best_acc1, args, writer, model_clean, model_teacher)


    # save checkpoint
    best_acc1 = max(acc1, best_acc1)
Exemplo n.º 10
0
                       '-l',
                       type=str,
                       dest='load',
                       required=True,
                       help='the pre trained model path')
    return parse.parse_args()


if __name__ == "__main__":
    args = get_args()

    image_path = "image.jpg"
    output_path = "output.jpg"
    image_size = 512

    net = UNet(in_channel=1, num_classes=1)

    if args.gpu:
        print("Use CUDA!")
        net = net.cuda()
        net.load_state_dict(torch.load(args.load))
        print("Load model from ", args.load)
    else:
        print("Use CPU!")
        net = net.cpu()
        net.load_state_dict(torch.load(args.load, map_location='cpu'))
        print("Load model from ", args.load)

    #predict_net(net, image_path, output_path, image_size, args.gpu)
    predict_mem_net(net, image_path, output_path, image_size, args.gpu)
Exemplo n.º 11
0
def load_model(filepath):
    net = UNet(n_channels=3, n_classes=1)
    net.cpu()
    net.load_state_dict(torch.load(filepath, map_location='cpu'))
    return net
Exemplo n.º 12
0
                        help="path to save directory for output masks",
                        default="/data/unagi0/kanayama/dataset/nuclei_images/answer/")

    args = parser.parse_args()
    print("Using model file : {}".format(args.model))
    net = UNet(3, 1)
    net_gray = UNet(3, 1)
    net_color = UNet(3, 1)

    if args.gpu:
        print("Using CUDA version of the net, prepare your GPU !")
        net.cuda()
        net_gray.cuda()
        net_color.cuda()
    else:
        net.cpu()
        net_gray.cpu()
        net_color.cpu()
        print("Using CPU version of the net, this may be very slow")

    print("Loading model ...")
    net.load_state_dict(torch.load(args.model))
    #net_gray.load_state_dict(torch.load('/data/unagi0/kanayama/dataset/nuclei_images/checkpoints/gray4_CP150.pth'))
    #net_color.load_state_dict(torch.load('/data/unagi0/kanayama/dataset/nuclei_images/checkpoints/color4_CP300.pth'))

    print("Model loaded !")

    for file_name in os.listdir(args.test):
        in_file = args.test + file_name + "/images/" + file_name + ".png"
        out_file = args.save + file_name + ".png"