예제 #1
0
def main(cfg, gpu):
    torch.cuda.set_device(gpu)

    # Network Builders
    net_encoder = ModelBuilder.build_encoder(
        arch=cfg.MODEL.arch_encoder.lower(),
        fc_dim=cfg.MODEL.fc_dim,
        weights=cfg.MODEL.weights_encoder)
    net_decoder = ModelBuilder.build_decoder(
        arch=cfg.MODEL.arch_decoder.lower(),
        fc_dim=cfg.MODEL.fc_dim,
        num_class=cfg.DATASET.num_class,
        weights=cfg.MODEL.weights_decoder,
        use_softmax=True)

    crit = nn.NLLLoss(ignore_index=-1)

    segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)

    # Dataset and Loader
    dataset_val = ValDataset(cfg.DATASET.root_dataset, cfg.DATASET.list_val,
                             cfg.DATASET)
    loader_val = torch.utils.data.DataLoader(dataset_val,
                                             batch_size=cfg.VAL.batch_size,
                                             shuffle=False,
                                             collate_fn=user_scattered_collate,
                                             num_workers=5,
                                             drop_last=True)

    data_set_rec = ValDataset(cfg.DATASET.root_dataset,
                              cfg.DATASET.list_val,
                              cfg.DATASET,
                              rec_dataset=cfg.DATASET.rec_dataset)

    loader_rec = torch.utils.data.DataLoader(
        data_set_rec,
        batch_size=cfg.VAL.batch_size,  # we have modified data_parallel
        shuffle=False,  # we do not use this param
        collate_fn=user_scattered_collate,
        num_workers=5,
        drop_last=True)

    segmentation_module.cuda()

    # Main loop
    evaluate(segmentation_module, loader_val, loader_rec, cfg, gpu)

    print('Evaluation Done!')
예제 #2
0
def worker(args, dev_id, start_idx, end_idx, result_queue):
    torch.cuda.set_device(dev_id)

    # Dataset and Loader
    dataset_val = ValDataset(
        broden_dataset.record_list['validation'], args,
        max_sample=args.num_val, start_idx=start_idx,
        end_idx=end_idx)
    loader_val = torchdata.DataLoader(
        dataset_val,
        batch_size=args.batch_size,
        shuffle=False,
        collate_fn=user_scattered_collate,
        num_workers=2)

    # Network Builders
    builder = ModelBuilder()
    net_encoder = builder.build_encoder(
        arch=args.arch_encoder,
        fc_dim=args.fc_dim,
        weights=args.weights_encoder)
    net_decoder = builder.build_decoder(
        arch=args.arch_decoder,
        fc_dim=args.fc_dim,
        nr_classes=args.nr_classes,
        weights=args.weights_decoder,
        use_softmax=True)

    segmentation_module = SegmentationModule(net_encoder, net_decoder)

    segmentation_module.cuda()

    # Main loop
    evaluate(segmentation_module, loader_val, args, dev_id, result_queue)
예제 #3
0
def main(args):
    # import network architecture
    builder = ModelBuilder()
    model = builder.build_net(arch=args.id,
                              num_input=args.num_input,
                              num_classes=args.num_classes,
                              num_branches=args.num_branches,
                              padding_list=args.padding_list,
                              dilation_list=args.dilation_list)
    model = torch.nn.DataParallel(model, device_ids=list(range(
        args.num_gpus))).cuda()
    cudnn.benchmark = True

    if args.resume:
        if os.path.isfile(args.resume):
            print("=> Loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            state_dict = checkpoint['state_dict']
            model.load_state_dict(state_dict)
            print("=> Loaded checkpoint (epoch {})".format(
                checkpoint['epoch']))
        else:
            raise Exception("=> No checkpoint found at '{}'".format(
                args.resume))

    tf = ValDataset(test_dir, args)
    test_loader = DataLoader(tf,
                             batch_size=args.batch_size,
                             shuffle=args.shuffle,
                             num_workers=args.num_workers,
                             pin_memory=False)
    test(test_loader, model, args)
예제 #4
0
def main(args):
    if args.random:
        print("Randomly select image from ValDataset")
        val_dataset = ValDataset()
        rand_idx = random.randint(0, len(val_dataset) - 1)
        img, label = val_dataset[rand_idx]
    else:
        print("Loading image from {}".format(args.image))
        img = Image.open(args.image)
        img = transforms.functional.resize(img, (HEIGHT, WIDTH))
        img = transforms.functional.to_tensor(img)
    img = img.unsqueeze(0).to(device)

    ckpt = torch.load(args.checkpoint,
                      map_location=lambda storage, loc: storage)
    model = LPRModel().to(device)
    model.load_state_dict(ckpt)
    model.eval()
    print('weight has been loaded')

    predict = model(img)
    predict = torch.argmax(predict, axis=-1).cpu()

    if args.random:
        print('label:', decode(label.numpy()))
    print('predictions:', decode(predict[0].numpy()))
def worker(args, gpu_id, start_idx, end_idx, result_queue):
    torch.cuda.set_device(gpu_id)

    # Dataset and Loader
    dataset_val = ValDataset(args.list_val,
                             args,
                             max_sample=args.num_val,
                             start_idx=start_idx,
                             end_idx=end_idx)
    loader_val = torchdata.DataLoader(dataset_val,
                                      batch_size=args.batch_size,
                                      shuffle=False,
                                      collate_fn=user_scattered_collate,
                                      num_workers=2)

    # Network Builders
    builder = ModelBuilder()
    net_encoder = builder.build_encoder(arch=args.arch_encoder,
                                        fc_dim=args.fc_dim,
                                        weights=args.weights_encoder)
    net_decoder = builder.build_decoder(arch=args.arch_decoder,
                                        fc_dim=args.fc_dim,
                                        num_class=args.num_class,
                                        weights=args.weights_decoder,
                                        use_softmax=True)

    crit = nn.NLLLoss(ignore_index=-1)

    segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)

    segmentation_module.cuda()

    # Main loop
    evaluate(segmentation_module, loader_val, args, gpu_id, result_queue)
예제 #6
0
def main(args):
    torch.cuda.set_device(args.gpu_id)

    # Network Builders
    builder = ModelBuilder()
    net_encoder = builder.build_encoder(arch=args.arch_encoder,
                                        fc_dim=args.fc_dim,
                                        weights=args.weights_encoder)
    net_decoder = builder.build_decoder(arch=args.arch_decoder,
                                        fc_dim=args.fc_dim,
                                        weights=args.weights_decoder,
                                        use_softmax=True)

    crit = nn.NLLLoss(ignore_index=-1)

    segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)

    # Dataset and Loader
    dataset_val = ValDataset(args.list_val, args, max_sample=args.num_val)
    loader_val = torchdata.DataLoader(dataset_val,
                                      batch_size=args.batch_size,
                                      shuffle=False,
                                      collate_fn=user_scattered_collate,
                                      num_workers=5,
                                      drop_last=True)

    segmentation_module.cuda()

    # Main loop
    evaluate(segmentation_module, loader_val, args)

    print('Evaluation Done!')
def worker(cfg, gpu_id, start_idx, end_idx, result_queue):
    torch.cuda.set_device(gpu_id)

    # Dataset and Loader
    dataset_val = ValDataset(cfg.DATASET.root_dataset,
                             cfg.DATASET.list_val,
                             cfg.DATASET,
                             start_idx=start_idx,
                             end_idx=end_idx)
    loader_val = torch.utils.data.DataLoader(dataset_val,
                                             batch_size=cfg.VAL.batch_size,
                                             shuffle=False,
                                             collate_fn=user_scattered_collate,
                                             num_workers=2)

    # Network Builders
    net_encoder = ModelBuilder.build_encoder(
        arch=cfg.MODEL.arch_encoder.lower(),
        fc_dim=cfg.MODEL.fc_dim,
        weights=cfg.MODEL.weights_encoder)
    net_decoder = ModelBuilder.build_decoder(
        arch=cfg.MODEL.arch_decoder.lower(),
        fc_dim=cfg.MODEL.fc_dim,
        num_class=cfg.DATASET.num_class,
        weights=cfg.MODEL.weights_decoder,
        use_softmax=True)

    crit = nn.NLLLoss(ignore_index=-1)

    segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)

    segmentation_module.cuda()

    # Main loop
    evaluate(segmentation_module, loader_val, cfg, gpu_id, result_queue)
예제 #8
0
    def __init__(self, split='trainaug', dtype=None, val=True):
        torch.manual_seed(66)
        torch.cuda.manual_seed_all(66)

        self.log_dir = osp.join(settings.LOG_DIR, settings.EXP_NAME)
        self.model_dir = osp.join(settings.MODEL_DIR, settings.EXP_NAME)

        ensure_dir(self.log_dir)
        ensure_dir(self.model_dir)

        logger.info('set log dir as %s' % self.log_dir)
        logger.info('set model dir as %s' % self.model_dir)

        self.step = 1
        self.best_mIoU = 0
        self.writer = SummaryWriter(self.log_dir)

        self.split = split

        train_set = TrainDataset(split=split)
        self.train_loader = DataLoader(train_set,
                                       batch_size=settings.TRAIN_BATCH_SIZE,
                                       pin_memory=True,
                                       num_workers=settings.NUM_WORKERS,
                                       shuffle=True,
                                       drop_last=True)

        val_set = ValDataset(split='val')
        self.val_loader = DataLoader(val_set,
                                     batch_size=1,
                                     shuffle=False,
                                     num_workers=settings.NUM_WORKERS,
                                     drop_last=False)

        self.net = HamNet(settings.N_CLASSES, settings.N_LAYERS).cuda()
        params_count(self.net)

        self.opt = SGD(params=[{
            'params': get_params(self.net, key='1x'),
            'lr': 1 * settings.LR,
            'weight_decay': settings.WEIGHT_DECAY,
        }, {
            'params': get_params(self.net, key='1y'),
            'lr': 1 * settings.LR,
            'weight_decay': 0,
        }, {
            'params': get_params(self.net, key='2x'),
            'lr': 2 * settings.LR,
            'weight_decay': 0.0,
        }],
                       momentum=settings.LR_MOM)

        self.net = DataParallel(self.net)
        patch_replication_callback(self.net)
예제 #9
0
    def __init__(self, dt_split):
        torch.cuda.set_device(settings.DEVICE)

        self.log_dir = settings.LOG_DIR
        self.model_dir = settings.MODEL_DIR

        self.net = EANet(settings.N_CLASSES, settings.N_LAYERS).cuda()
        self.net = DataParallel(self.net, device_ids=[settings.DEVICE])
        dataset = ValDataset(split=dt_split)
        self.dataloader = DataLoader(dataset, batch_size=1, shuffle=False, 
                                     num_workers=2, drop_last=False)
        self.hist = 0
예제 #10
0
    def __init__(self, dt_split):
        self.model_dir = osp.join(settings.MODEL_DIR, settings.EXP_NAME)

        self.net = HamNet(settings.N_CLASSES, settings.N_LAYERS).cuda()
        self.net = DataParallel(self.net)
        dataset = ValDataset(split=dt_split)
        self.dataloader = DataLoader(dataset,
                                     batch_size=1,
                                     shuffle=False,
                                     num_workers=2,
                                     drop_last=False)
        self.hist = 0
예제 #11
0
    def prepare_data(self):
        """prepare_data

        load dataset
        """
        d = self.hparams['data']
        self.train_ds = Dataset(
            './data/articles.json', './data/users_list.json', self.w2v, maxlen=self.hparams['data']['maxlen'], pos_num=d['pos_k'], neg_k=d['neg_k'])
        self.val_ds = ValDataset(
            50, './data/articles.json', './data/users_list.json', self.w2v)
        tmp = [t.unsqueeze(0) for t in self.train_ds[0]]
        self.logger.experiment.add_graph(self.model, tmp)
def main(cfg, gpu):
    torch.cuda.set_device(gpu)

    # Network Builders
    net_encoder = ModelBuilder.build_encoder(
        arch=cfg.MODEL.arch_encoder.lower(),
        fc_dim=cfg.MODEL.fc_dim,
        weights=cfg.MODEL.weights_encoder)
    net_decoder = ModelBuilder.build_decoder(
        arch=cfg.MODEL.arch_decoder.lower(),
        fc_dim=cfg.MODEL.fc_dim,
        num_class=cfg.DATASET.num_class,
        weights=cfg.MODEL.weights_decoder,
        use_softmax=True)

    # use_softmax = True

    crit = nn.NLLLoss(ignore_index=-1)

    if cfg.MODEL.arch_decoder.endswith('deepsup'):
        segmentation_module = SegmentationModule(net_encoder, net_decoder,
                                                 crit,
                                                 cfg.TRAIN.deep_sup_scale)
    else:
        segmentation_module = SegmentationModule(net_encoder, net_decoder,
                                                 crit)

    # Dataset and Loader
    dataset_val = ValDataset(cfg.DATASET.root_dataset, cfg.DATASET.list_val,
                             cfg.DATASET)
    loader_val = torch.utils.data.DataLoader(dataset_val,
                                             batch_size=cfg.VAL.batch_size,
                                             shuffle=False,
                                             collate_fn=user_scattered_collate,
                                             num_workers=5,
                                             drop_last=True)

    segmentation_module.cuda()

    # Main loop
    evaluate(segmentation_module, loader_val, cfg, gpu)

    print('Evaluation Done!')
예제 #13
0
def main(args):
    torch.cuda.set_device(args.gpu)

    # Network Builders
    builder = ModelBuilder()
    net_encoder = builder.build_encoder(arch=args.arch_encoder,
                                        fc_dim=args.fc_dim,
                                        weights=args.weights_encoder)
    net_decoder = builder.build_decoder(arch=args.arch_decoder,
                                        fc_dim=args.fc_dim,
                                        num_class=args.num_class,
                                        weights=args.weights_decoder,
                                        use_softmax=True)

    crit = nn.NLLLoss(ignore_index=-1)

    segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)

    # Dataset and Loader
    #if len(args.test_imgs) == 1 and os.path.isdir(args.test_imgs[0]):
    #    test_imgs = find_recursive(args.test_imgs[0])
    #else:
    #    test_imgs = args.test_imgs
    #list_test = [{'fpath_img': x} for x in test_imgs]

    dataset_test = ValDataset(args.test_imgs, args, max_sample=args.num_val)

    # test_set = dl.loadTest()
    loader_test = torchdata.DataLoader(dataset_test,
                                       batch_size=args.batch_size,
                                       shuffle=False,
                                       num_workers=1,
                                       drop_last=True)

    segmentation_module.cuda()

    # Main loop
    test(segmentation_module, loader_test, args)

    print('Inference done!')
예제 #14
0
    def prepare_data(self):
        """prepare_data

        load dataset
        """
        print('Prepating train data...', end=' ')
        self.train_ds = Dataset(self.hparams['all_items'],
                                self.hparams['train_data'],
                                self.w2v,
                                maxlen=self.hparams['title_len'],
                                npratio=self.hparams['train']['npratio'],
                                his_len=self.hparams['his_len'])

        self.val_ds = ValDataset(self.hparams['all_items'],
                                 self.hparams['val_data'],
                                 self.hparams['val_items'],
                                 self.w2v,
                                 maxlen=self.hparams['title_len'],
                                 his_len=self.hparams['his_len'],
                                 pos=self.hparams['val']['pos'],
                                 neg=self.hparams['val']['neg'],
                                 total=self.hparams['val']['total'])
        print('Done')
예제 #15
0
def main(**args):
    r"""Performs the main training loop
    """
    gray_mode = args['gray']  # gray mode indicator
    C = 1 if gray_mode else 3  # number of color channels

    # Load dataset
    print('> Loading datasets ...')
    dataset_val = ValDataset(valsetdir=args['valset_dir'],
                             gray_mode=gray_mode)  # for grayscale/color video
    # dataset_val = ValDataset(valsetdir=args['valset_dir'], gray_mode=False) # for color videos only
    loader_train = train_dali_loader(batch_size=args['batch_size'],\
                                    file_root=args['trainset_dir'],\
                                    sequence_length=args['temp_patch_size'],\
                                    crop_size=args['patch_size'],\
                                    epoch_size=args['max_number_patches'],\
                                    random_shuffle=True,\
                                    temp_stride=3,\
                                    gray_mode=gray_mode)

    num_minibatches = int(args['max_number_patches'] // args['batch_size'])
    ctrl_fr_idx = (args['temp_patch_size'] - 1) // 2
    print("\t# of training samples: %d\n" % int(args['max_number_patches']))

    # Init loggers
    writer, logger = init_logging(args)

    # Define GPU devices
    device_ids = [0]
    torch.backends.cudnn.benchmark = True  # CUDNN optimization

    # Create model
    model = FastDVDnet(num_color_channels=C)

    model = model.cuda()

    # Define loss
    criterion = nn.MSELoss(reduction='sum')
    criterion.cuda()

    # Optimizer
    optimizer = optim.Adam(model.parameters(), lr=args['lr'])

    # [AMP initialization] automated half-precision training
    if args['fp16']:
        model, optimizer = amp.initialize(model,
                                          optimizer,
                                          opt_level=args['amp_opt_level'])

    # model = nn.DataParallel(model, device_ids=device_ids).cuda()
    model = nn.DataParallel(model)

    # Resume training or start anew
    start_epoch, training_params = resume_training(args, model, optimizer)

    # Training
    start_time = time.time()
    for epoch in range(start_epoch, args['epochs']):
        # Set learning rate
        current_lr, reset_orthog = lr_scheduler(epoch, args)
        if reset_orthog:
            training_params['no_orthog'] = True

        # set learning rate in optimizer
        for param_group in optimizer.param_groups:
            param_group["lr"] = current_lr
        print('\nlearning rate %f' % current_lr)

        # train

        for i, data in enumerate(loader_train, 0):

            # Pre-training step
            model.train()

            # When optimizer = optim.Optimizer(net.parameters()) we only zero the optim's grads
            optimizer.zero_grad()

            # convert inp to [N, num_frames*C. H, W] in  [0., 1.] from [N, num_frames, C. H, W] in [0., 255.]
            # extract ground truth (central frame)
            img_train, gt_train = normalize_augment(data[0]['data'],
                                                    ctrl_fr_idx, gray_mode)
            N, _, H, W = img_train.size()

            # std dev of each sequence
            stdn = torch.empty(
                (N, 1, 1, 1)).cuda().uniform_(args['noise_ival'][0],
                                              to=args['noise_ival'][1])
            # draw noise samples from std dev tensor
            noise = torch.zeros_like(img_train)
            noise = torch.normal(mean=noise, std=stdn.expand_as(noise))

            #define noisy input
            imgn_train = img_train + noise

            # Send tensors to GPU
            gt_train = gt_train.cuda(non_blocking=True)
            imgn_train = imgn_train.cuda(non_blocking=True)
            noise = noise.cuda(non_blocking=True)
            noise_map = stdn.expand(
                (N, 1, H, W)).cuda(non_blocking=True)  # one channel per image

            # Evaluate model and optimize it
            out_train = model(imgn_train, noise_map)

            # Compute loss
            loss = criterion(gt_train, out_train) / (N * 2)

            # [AMP scale loss to avoid overflow of float16] automated mixed precision training
            if args['fp16']:
                with amp.scale_loss(loss, optimizer) as scaled_loss:
                    scaled_loss.backward()
            else:
                loss.backward()
            optimizer.step()

            # Results
            if training_params['step'] % args['save_every'] == 0:
                # Apply regularization by orthogonalizing filters
                if not training_params['no_orthog']:
                    model.apply(svd_orthogonalization)

                # Compute training PSNR
                log_train_psnr(out_train, \
                                gt_train, \
                                loss, \
                                writer, \
                                epoch, \
                                i, \
                                num_minibatches, \
                                training_params)
            # update step counter
            training_params['step'] += 1

        # save model and checkpoint
        training_params['start_epoch'] = epoch + 1
        save_model_checkpoint(model, args, optimizer, training_params, epoch)

        # Call to model.eval() to correctly set the BN layers before inference
        model.eval()

        # Validation and log images
        validate_and_log(
                        model_temp=model, \
                        dataset_val=dataset_val, \
                        valnoisestd=args['val_noiseL'], \
                        temp_psz=args['temp_patch_size'], \
                        writer=writer, \
                        epoch=epoch, \
                        lr=current_lr, \
                        logger=logger, \
                        trainimg=img_train, \
                        gray_mode=gray_mode
                        )

    # Print elapsed time
    elapsed_time = time.time() - start_time
    print('Elapsed time {}'.format(
        time.strftime("%H:%M:%S", time.gmtime(elapsed_time))))

    # Close logger file
    close_logger(logger)
예제 #16
0
파일: train.py 프로젝트: w86763777/hcc-ml
def main(args):
    train_loader = DataLoader(dataset=TrainDataset(),
                              batch_size=args.batch_size,
                              shuffle=True)
    val_loader = DataLoader(dataset=ValDataset(), batch_size=args.batch_size)

    model = LPRModel()
    model = model.to(device)
    loss_fn = torch.nn.CrossEntropyLoss()
    optim = torch.optim.Adam(model.parameters(), lr=args.learning_rate)

    train_writer = SummaryWriter(logdir=os.path.join(args.log_dir, 'train'))
    val_writer = SummaryWriter(os.path.join(args.log_dir, 'val'))
    for epoch in range(1, args.epochs + 1):
        avg_loss = 0
        avg_acc = 0
        model.train()
        with tqdm(train_loader, ncols=0, leave=False) as pbar:
            pbar.set_description('Epoch %3d/%3d' % (epoch, args.epochs))
            for image, label in pbar:
                image = image.to(device)
                label = label.to(device)
                predict = model(image)
                loss = loss_fn(predict.transpose(1, 2), label)
                optim.zero_grad()
                loss.backward()
                optim.step()

                predict = torch.argmax(predict, dim=-1)
                acc = (label == predict).float().mean(dim=1).sum().cpu()
                avg_loss += loss.item()
                avg_acc += acc.item()
                pbar.set_postfix(loss='%.4f' % loss)
            avg_loss = avg_loss / len(train_loader)
            avg_acc = avg_acc / len(train_loader.dataset)
            train_writer.add_scalar('loss', avg_loss, epoch)
            train_writer.add_scalar('acc', avg_acc, epoch)

        avg_val_loss = 0
        avg_val_acc = 0
        model.eval()
        with torch.no_grad():
            for image, label in val_loader:
                image = image.to(device)
                label = label.to(device)
                predict = model(image)
                loss = loss_fn(predict.transpose(1, 2), label)
                predict = torch.argmax(predict, dim=-1)
                acc = (label == predict).float().mean(dim=1).sum().cpu()
                avg_val_loss += loss.item()
                avg_val_acc += acc.item()
        avg_val_loss = avg_val_loss / len(val_loader)
        avg_val_acc = avg_val_acc / len(val_loader.dataset)
        val_writer.add_scalar('loss', avg_val_loss, epoch)
        val_writer.add_scalar('acc', avg_val_acc, epoch)
        print(
            'Epoch %3d/%3d train_loss: %.4f, val_loss: %.4f, '
            'train_acc: %.4f, val_acc: %.4f' %
            (epoch, args.epochs, avg_loss, avg_val_loss, avg_acc, avg_val_acc))
        if epoch % args.checkpoint_period == 0:
            path = os.path.join(args.checkpoint_dir, 'ckpt%d.pt' % epoch)
            os.makedirs(args.checkpoint_dir, exist_ok=True)
            torch.save(model.state_dict(), path)
예제 #17
0
def main(args):
    # import network architecture
    builder = ModelBuilder()
    model = builder.build_net(arch=args.id,
                              num_input=args.num_input,
                              num_classes=args.num_classes,
                              num_branches=args.num_branches,
                              padding_list=args.padding_list,
                              dilation_list=args.dilation_list)
    device_ids = [0, 2]
    model = torch.nn.DataParallel(model, device_ids=device_ids).cuda()
    #     model = torch.nn.DataParallel(model, device_ids=list(range(args.num_gpus))).cuda()
    cudnn.benchmark = True

    if args.resume:
        if os.path.isfile(args.resume):
            print("=> Loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            state_dict = checkpoint['state_dict']
            model.load_state_dict(state_dict)
            print("=> Loaded checkpoint (epoch {})".format(
                checkpoint['epoch']))
        else:
            raise Exception("=> No checkpoint found at '{}'".format(
                args.resume))

    # initialization
    num_ignore = 0
    margin = [args.crop_size[k] - args.center_size[k] for k in range(3)]
    num_images = int(len(test_dir) / args.num_input)
    dice_score = np.zeros([num_images, 3]).astype(float)

    for i in range(num_images):
        # load the images, label and mask
        im = []
        for j in range(args.num_input):
            direct, _ = test_dir[args.num_input * i + j].split("\n")
            name = direct
            if j < args.num_input - 1:
                image = nib.load(args.root_path + direct + '.gz').get_data()
                image = np.expand_dims(image, axis=0)
                im.append(image)
                if j == 0:
                    mask = nib.load(args.root_path + direct +
                                    "/mask.nii.gz").get_data()
            else:
                labels = nib.load(args.root_path + direct + '.gz').get_data()

        images = np.concatenate(im, axis=0).astype(float)

        # divide the input images input small image segments
        # return the padding input images which can be divided exactly
        image_pad, mask_pad, label_pad, num_segments, padding_index, index = segment(
            images, mask, labels, args)

        # initialize prediction for the whole image as background
        labels_shape = list(labels.shape)
        labels_shape.append(args.num_classes)
        pred = np.zeros(labels_shape)
        pred[:, :, :, 0] = 1

        # initialize the prediction for a small segmentation as background
        pad_shape = [
            int(num_segments[k] * args.center_size[k]) for k in range(3)
        ]
        pad_shape.append(args.num_classes)
        pred_pad = np.zeros(pad_shape)
        pred_pad[:, :, :, 0] = 1

        # score_per_image stores the sum of each image
        score_per_image = np.zeros([3, 3])
        # iterate over the z dimension
        for idz in range(num_segments[2]):
            tf = ValDataset(image_pad, label_pad, mask_pad, num_segments, idz,
                            args)
            test_loader = DataLoader(tf,
                                     batch_size=args.batch_size,
                                     shuffle=args.shuffle,
                                     num_workers=args.num_workers,
                                     pin_memory=False)
            score_seg, pred_seg = test(test_loader, model, num_segments, args)
            pred_pad[:, :, idz * args.center_size[2]:(idz + 1) *
                     args.center_size[2], :] = pred_seg
            score_per_image += score_seg

        # decide the start and end point in the original image
        for k in range(3):
            if index[0][k] == 0:
                index[0][k] = int(margin[k] / 2 - padding_index[0][k])
            else:
                index[0][k] = int(margin[k] / 2 + index[0][k])

            index[1][k] = int(
                min(index[0][k] + num_segments[k] * args.center_size[k],
                    labels.shape[k]))

        dist = [index[1][k] - index[0][k] for k in range(3)]
        pred[index[0][0]:index[1][0], index[0][1]:index[1][1],
             index[0][2]:index[1][2]] = pred_pad[:dist[0], :dist[1], :dist[2]]

        if np.sum(score_per_image[0, :]) == 0 or np.sum(
                score_per_image[1, :]) == 0 or np.sum(
                    score_per_image[2, :]) == 0:
            num_ignore += 1
            continue
        # compute the Enhance, Core and Whole dice score
        dice_score_per = [
            2 * np.sum(score_per_image[k, 2]) /
            (np.sum(score_per_image[k, 0]) + np.sum(score_per_image[k, 1]))
            for k in range(3)
        ]
        print(
            'Image: %d, Enhance score: %.4f, Core score: %.4f, Whole score: %.4f'
            % (i, dice_score_per[0], dice_score_per[1], dice_score_per[2]))

        dice_score[i, :] = dice_score_per

        if args.visualize:
            vis = np.argmax(pred, axis=3)
            vis = np.swapaxes(vis, 0, 2).astype(dtype=np.uint8)
            visualize_result(name, vis, args)

    count_image = num_images - num_ignore
    dice_score = dice_score[:count_image, :]
    mean_dice = np.mean(dice_score, axis=0)
    std_dice = np.std(dice_score, axis=0)
    print('Evalution Done!')
    print(
        'Enhance score: %.4f, Core score: %.4f, Whole score: %.4f, Mean Dice score: %.4f'
        % (mean_dice[0], mean_dice[1], mean_dice[2], np.mean(mean_dice)))
    print(
        'Enhance std: %.4f, Core std: %.4f, Whole std: %.4f, Mean Std: %.4f' %
        (std_dice[0], std_dice[1], std_dice[2], np.mean(std_dice)))
예제 #18
0
from torch.utils import data as Data
import torchvision

from torchsummary import summary
from modules import Siamese, Siamese_Ultra
from dataset import MyDataset, ValDataset, FewDataset
from torch.utils.data import DataLoader
from tqdm import tqdm

if __name__ == '__main__':
    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")
    train_data = MyDataset()
    #train_data = FewDataset(k=10,seed=1127)

    val_data = ValDataset()

    train_loader = DataLoader(train_data,
                              batch_size=200,
                              shuffle=True,
                              num_workers=0)
    val_loader = DataLoader(val_data,
                            batch_size=1000,
                            shuffle=False,
                            num_workers=0)

    EPOCH = 2000
    PATIENCE = 100
    BATCH_SIZE = 150

    # ===== Loading pre_trained model =====
예제 #19
0
def main(**args):
    r"""Performs the main training loop
	"""
    # Load dataset
    print('> Loading datasets ...')
    dataset_val = ValDataset(valsetdir=args['valset_dir'],
                             gtvalsetdir=args['gt_valset_dir'],
                             gray_mode=False)
    loader_train = train_dali_loader(batch_size=args['batch_size'],\
            file_root=args['trainset_dir'],\
            gt_file_root=args['gt_trainset_dir'],\
            sequence_length=args['temp_patch_size'],\
            crop_size=args['patch_size'],\
            epoch_size=args['max_number_patches'],\
            device_id=args['device_id'][0],\
            random_shuffle=True,\
            temp_stride=3)

    num_minibatches = int(args['max_number_patches'] // args['batch_size'])
    ctrl_fr_idx = (args['temp_patch_size'] - 1) // 2
    print("\t# of training samples: %d\n" % int(args['max_number_patches']))

    # Init loggers
    writer, logger = init_logging(args)

    # Define GPU devices
    device_ids = args['device_id']
    torch.backends.cudnn.benchmark = True  # CUDNN optimization

    # Create model
    model = FastDVDnet()
    model = nn.DataParallel(model, device_ids=device_ids).cuda()

    # Define loss
    criterion = nn.MSELoss(reduction='sum')
    criterion.cuda()

    # Optimizer
    optimizer = optim.Adam(model.parameters(), lr=args['lr'])

    # Resume training or start anew
    start_epoch, training_params = resume_training(args, model, optimizer)

    # Training
    start_time = time.time()
    for epoch in range(start_epoch, args['epochs']):
        # Set learning rate
        current_lr, reset_orthog = lr_scheduler(epoch, args)
        if reset_orthog:
            training_params['no_orthog'] = True

        # set learning rate in optimizer
        for param_group in optimizer.param_groups:
            param_group["lr"] = current_lr
        print('\nlearning rate %f' % current_lr)

        # train

        for i, data in enumerate(loader_train, 0):
            # Pre-training step
            model.train()

            # When optimizer = optim.Optimizer(net.parameters()) we only zero the optim's grads
            optimizer.zero_grad()

            # convert inp to [N, num_frames*C. H, W] in  [0., 1.] from [N, num_frames, C. H, W] in [0., 255.]
            # extract ground truth (central frame)
            img_train, gt_train = normalize_augment(data[0], ctrl_fr_idx)

            N, _, H, W = img_train.size()
            # Send tensors to GPU
            gt_train = gt_train.cuda(non_blocking=True)
            img_train = img_train.cuda(non_blocking=True)

            # Evaluate model and optimize it
            out_train = model(img_train)
            '''
			while torch.isinf(out_train).any():
				print("out_inf")
				return
			'''

            loss = criterion(gt_train, out_train) / (N * 2)
            loss.backward()
            optimizer.step()

            # Results
            if training_params['step'] % args['save_every'] == 0:
                # Apply regularization by orthogonalizing filters
                if not training_params['no_orthog']:
                    model.apply(svd_orthogonalization)

                # Compute training PSNR
                log_train_psnr(out_train, \
                    gt_train, \
                    loss, \
                    writer, \
                    epoch, \
                    i, \
                    num_minibatches, \
                    training_params)
            # update step counter
            training_params['step'] += 1

        # Call to model.eval() to correctly set the BN layers before inference
        model.eval()

        # Validation and log images
        validate_and_log(
            model_temp=model, \
            dataset_val=dataset_val, \
            valnoisestd=0, \
            temp_psz=args['temp_patch_size'], \
            writer=writer, \
            epoch=epoch, \
            lr=current_lr, \
            logger=logger, \
            trainimg=img_train
            )

        # save model and checkpoint
        training_params['start_epoch'] = epoch + 1
        save_model_checkpoint(model, args, optimizer, training_params, epoch)

    # Print elapsed time
    elapsed_time = time.time() - start_time
    print('Elapsed time {}'.format(
        time.strftime("%H:%M:%S", time.gmtime(elapsed_time))))

    # Close logger file
    close_logger(logger)
def main(cfg, gpus):
    torch.backends.cudnn.enabled = False
    # cudnn.deterministic = False
    # cudnn.enabled = True
    # Network Builders
    net_encoder = ModelBuilder.build_encoder(
        arch=cfg.MODEL.arch_encoder.lower(),
        fc_dim=cfg.MODEL.fc_dim,
        weights=cfg.MODEL.weights_encoder)
    net_decoder = ModelBuilder.build_decoder(
        arch=cfg.MODEL.arch_decoder.lower(),
        fc_dim=cfg.MODEL.fc_dim,
        num_class=cfg.DATASET.num_class,
        weights=cfg.MODEL.weights_decoder)

    if cfg.MODEL.arch_decoder == 'ocr':
        print('Using cross entropy loss')
        crit = CrossEntropy(ignore_label=-1)
    else:
        crit = nn.NLLLoss(ignore_index=-1)

    if cfg.MODEL.arch_decoder.endswith('deepsup'):
        segmentation_module = SegmentationModule(net_encoder, net_decoder,
                                                 crit,
                                                 cfg.TRAIN.deep_sup_scale)
    else:
        segmentation_module = SegmentationModule(net_encoder, net_decoder,
                                                 crit)

    # Dataset and Loader
    dataset_train = TrainDataset(cfg.DATASET.root_dataset,
                                 cfg.DATASET.list_train,
                                 cfg.DATASET,
                                 batch_per_gpu=cfg.TRAIN.batch_size_per_gpu)

    loader_train = torch.utils.data.DataLoader(
        dataset_train,
        batch_size=len(gpus),
        shuffle=False,  # parameter is not used
        collate_fn=user_scattered_collate,
        num_workers=cfg.TRAIN.workers,
        drop_last=True,
        pin_memory=True)
    # create loader iterator
    iterator_train = iter(loader_train)
    print('1 Epoch = {} iters'.format(cfg.TRAIN.epoch_iters))

    if cfg.TRAIN.eval:
        # Dataset and Loader for validtaion data
        dataset_val = ValDataset(cfg.DATASET.root_dataset,
                                 cfg.DATASET.list_val, cfg.DATASET)
        loader_val = torch.utils.data.DataLoader(
            dataset_val,
            batch_size=cfg.VAL.batch_size,
            shuffle=False,
            collate_fn=user_scattered_collate,
            num_workers=5,
            drop_last=True)
        iterator_val = iter(loader_val)

    # load nets into gpu
    if len(gpus) > 1:
        segmentation_module = UserScatteredDataParallel(segmentation_module,
                                                        device_ids=gpus)
        # For sync bn
        patch_replication_callback(segmentation_module)
    segmentation_module.cuda()

    # Set up optimizers
    nets = (net_encoder, net_decoder, crit)
    optimizers = create_optimizers(nets, cfg)

    # Main loop
    history = {
        'train': {
            'epoch': [],
            'loss': [],
            'acc': [],
            'last_score': 0,
            'best_score': cfg.TRAIN.best_score
        }
    }
    for epoch in range(cfg.TRAIN.start_epoch, cfg.TRAIN.num_epoch):
        train(segmentation_module, iterator_train, optimizers, history,
              epoch + 1, cfg)
        # calculate segmentation score
        if cfg.TRAIN.eval and epoch in range(cfg.TRAIN.start_epoch,
                                             cfg.TRAIN.num_epoch,
                                             step=cfg.TRAIN.eval_step):
            iou, acc = evaluate(segmentation_module, iterator_val, cfg, gpus)
            history['train']['last_score'] = (iou + acc) / 2
            if history['train']['last_score'] > history['train']['best_score']:
                history['train']['best_score'] = history['train']['last_score']
                checkpoint(nets, history, cfg, 'best_score')
        # checkpointing
        checkpoint(nets, history, cfg, epoch + 1)
    print('Training Done!')
예제 #21
0
def main(cfg, gpu):
    """Adversarial examples for semantic segmentation models evaluated over all images in a list file."""

    # if check_all_done(cfg):
    #     print("Processing", cfg.ATTACK.list_attack)
    #     print("Arguments", sys.argv)
    #     print("Entire experiment is already done. Quitting")
    #     return

    # Save the command line args used to run the program
    f_cmdline = open(
        os.path.join(
            cfg.ATTACK.output_dir, 'cmdline' +
            str(datetime.datetime.now()).replace(' ', '_') + '.txt'), 'w')
    for i in range(0, len(sys.argv)):
        f_cmdline.write(sys.argv[i] + " ")
    f_cmdline.close()

    torch.cuda.device(gpu)

    cfg.MODEL.model_name = os.path.basename(cfg.MODEL.arch_encoder + '_' +
                                            cfg.MODEL.arch_decoder)

    cfg.MODEL.arch_encoder = cfg.MODEL.arch_encoder.lower()
    cfg.MODEL.arch_decoder = cfg.MODEL.arch_decoder.lower()

    net_encoder = ModelBuilder.build_encoder(arch=cfg.MODEL.arch_encoder,
                                             fc_dim=cfg.MODEL.fc_dim,
                                             weights=cfg.MODEL.weights_encoder)

    net_decoder = ModelBuilder.build_decoder(arch=cfg.MODEL.arch_decoder,
                                             fc_dim=cfg.MODEL.fc_dim,
                                             num_class=cfg.DATASET.num_class,
                                             weights=cfg.MODEL.weights_decoder,
                                             use_softmax=True)

    # Copy the model prototxt to the folder
    # shutil.copyfile(args.model_def, os.path.join(args.out_dir, model_name + '.prototxt'))

    # Create the network and start the actual experiment
    criterion = nn.NLLLoss(ignore_index=-1)

    segmentation_module = SegmentationModule(net_encoder, net_decoder,
                                             criterion)

    # image_names = open(args.image_file, 'r').readlines()
    # image_names = [x.strip() for x in image_names]
    #
    # for i, im_name in enumerate(image_names):
    #     args.image = im_name
    #     main_seg(args, net)
    #
    #     if i % args.iter_print == 0:
    #         time_str = str(datetime.datetime.now())
    #         print("[{}] Image {}: {}".format(time_str, i, im_name))
    #         sys.stdout.flush()

    # Dataset and Loader
    orig_dataset = ValDataset(cfg.DATASET.root_dataset, cfg.ATTACK.list_attack,
                              cfg.DATASET)

    orig_loader = torch.utils.data.DataLoader(
        orig_dataset,
        batch_size=cfg.ATTACK.batch_size,
        shuffle=False,
        collate_fn=user_scattered_collate,
        num_workers=5,
        drop_last=True)

    segmentation_module.cuda()

    # image_name = os.path.basename(args.image).split('.')[0].replace('_leftImg8bit', '')

    # TODO: Check if required and modify accordingly
    # if check_already_processed(cfg, image_name, model_name):
    #     return

    # if net is None:
    #     net = caffe.Net(args.model_def, args.model_weights, caffe.TEST)

    # image, im_height, im_width, orig_image = lib.PreprocessImage(
    #     args.image, args.pad_size, pad_value=args.pad_value, resize_dims=args.resize_dims, args=args)

    # orig_pred, orig_conf = PredictWrapper(
    #     net, image, orig_image, dummy_label=None, is_seg=True, args=args)

    orig_pred, _, orig_results = evaluate(segmentation_module, orig_loader,
                                          cfg, 'orig', gpu)

    attack_dataset = AttackDataset(cfg.DATASET.root_dataset,
                                   cfg.ATTACK.list_attack, cfg.DATASET)

    adv_func_args = get_adv_func_args(cfg, segmentation_module, attack_dataset,
                                      gpu)
    list_adv, _ = adv_attacks[cfg.ATTACK.method](**adv_func_args)

    pert_dataset = ValDataset(cfg.DATASET.root_dataset, list_adv, cfg.DATASET)

    pert_loader = torch.utils.data.DataLoader(
        pert_dataset,
        batch_size=cfg.ATTACK.batch_size,
        shuffle=False,
        collate_fn=user_scattered_collate,
        num_workers=5,
        drop_last=True)

    adv_pred, _, adv_results = evaluate(segmentation_module, pert_loader, cfg,
                                        'pert', gpu)

    print('Segmentation attack processing complete!')
예제 #22
0
def main(args):
    torch.cuda.set_device(args.gpu)

    # Network Builders
    builder = ModelBuilder()

    net_encoder = None
    net_decoder = None
    unet = None

    if args.unet == False:
        net_encoder = builder.build_encoder(arch=args.arch_encoder,
                                            fc_dim=args.fc_dim,
                                            weights=args.weights_encoder)
        net_decoder = builder.build_decoder(arch=args.arch_decoder,
                                            fc_dim=args.fc_dim,
                                            num_class=args.num_class,
                                            weights=args.weights_decoder,
                                            use_softmax=True)
    else:
        unet = builder.build_unet(num_class=args.num_class,
                                  arch=args.arch_unet,
                                  weights=args.weights_unet,
                                  use_softmax=True)

    crit = nn.NLLLoss()

    if args.unet == False:
        segmentation_module = SegmentationModule(net_encoder, net_decoder,
                                                 crit)
    else:
        segmentation_module = SegmentationModule(net_encoder,
                                                 net_decoder,
                                                 crit,
                                                 is_unet=args.unet,
                                                 unet=unet)
    '''
    # Dataset and Loader
    dataset_val = dl.loadVal()

    loader_val = torchdata.DataLoader(
        dataset_val,
        batch_size=5,
        shuffle=False,
        num_workers=1,
        drop_last=True)
    '''

    # Dataset and Loader
    dataset_val = ValDataset(args.list_val, args, max_sample=args.num_val)

    loader_val = torchdata.DataLoader(dataset_val,
                                      batch_size=args.batch_size,
                                      shuffle=False,
                                      collate_fn=user_scattered_collate,
                                      num_workers=5,
                                      drop_last=True)

    segmentation_module.cuda()

    # Main loop
    evaluate(segmentation_module, loader_val, args)

    print('Evaluation Done!')
        arch=cfg.MODEL.arch_encoder.lower(),
        fc_dim=cfg.MODEL.fc_dim,
        weights=cfg.MODEL.weights_encoder)
    net_decoder = ModelBuilder.build_decoder(
        arch=cfg.MODEL.arch_decoder.lower(),
        fc_dim=cfg.MODEL.fc_dim,
        num_class=cfg.DATASET.num_class,
        weights=cfg.MODEL.weights_decoder,
        use_softmax=True)

    crit = nn.NLLLoss(ignore_index=-1)

    segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)

    # Dataset and Loader
    dataset_val = ValDataset(cfg.DATASET.root_dataset, cfg.DATASET.list_val,
                             cfg.DATASET)
    loader_val = torch.utils.data.DataLoader(dataset_val,
                                             batch_size=BATCH_SIZE,
                                             shuffle=False,
                                             collate_fn=user_scattered_collate,
                                             num_workers=5,
                                             drop_last=True)

    segmentation_module.cuda()

    # Main loop
    evaluate(segmentation_module, loader_val, cfg, gpu, model['model_name'],
             model['arxiv_id'])

    print('Evaluation Done!')
예제 #24
0
def main(cfg, gpu, eval=False, interactive=False):
    global net_encoder, net_decoder, segmentation_module, \
        dataset_train, loader_train, iterator_train, history, epoch, \
        dataset_val, loader_val
    # Network Builders
    net_encoder = ModelBuilder.build_encoder(
        arch=cfg.MODEL.arch_encoder.lower(),
        fc_dim=cfg.MODEL.fc_dim,
        weights=cfg.MODEL.weights_encoder).cuda()
    net_decoder = BayesianC1_2(
        ModelBuilder.build_decoder(arch=cfg.MODEL.arch_decoder.lower(),
                                   fc_dim=cfg.MODEL.fc_dim,
                                   num_class=cfg.DATASET.num_class,
                                   weights=cfg.MODEL.weights_decoder).cuda())

    segmentation_module = BayesianSegmentationModule(net_encoder, net_decoder)

    if eval or interactive:
        load_checkpoint(net_decoder, cfg, 10)

    # Dataset and Loader
    if not eval or interactive:
        dataset_train = TrainDataset(
            cfg.DATASET.root_dataset,
            cfg.DATASET.list_train,
            cfg.DATASET,
            batch_per_gpu=cfg.TRAIN.batch_size_per_gpu)

        loader_train = torch.utils.data.DataLoader(
            dataset_train,
            batch_size=1,
            shuffle=False,
            collate_fn=lambda batch: batch[0],
            num_workers=cfg.TRAIN.workers,
            drop_last=True,
            pin_memory=True)
        print('1 Epoch = {} iters'.format(cfg.TRAIN.epoch_iters))

        iterator_train = iter(loader_train)

    if eval or interactive:
        dataset_val = ValDataset(cfg.DATASET.root_dataset,
                                 cfg.DATASET.list_val, cfg.DATASET)
        loader_val = torch.utils.data.DataLoader(
            dataset_val,
            batch_size=cfg.VAL.batch_size,
            shuffle=False,
            collate_fn=user_scattered_collate,
            num_workers=5,
            drop_last=True)

    if interactive:
        return

    if eval:
        evaluate(segmentation_module, loader_val, cfg, gpu)
        print('Evaluation Done!')
    else:
        # Main loop
        history = {'train': {'epoch': [], 'loss': [], 'acc': []}}

        for epoch in range(cfg.BAYESIAN.start_epoch, cfg.BAYESIAN.num_epoch):
            train(segmentation_module, iterator_train, history, epoch + 1, cfg,
                  gpu)

            # checkpointing
            checkpoint(net_decoder, history, cfg, epoch + 1)

        print('Training Done!')