コード例 #1
0
def get_numpy_dataset(task, root_dir):
    """ Builds the loader --> get train and test numpy data and returns.

    :param task: the string task to use
    :param root_dir: the directory to save and load MNIST from
    :returns: two numpy arrays, training and test
    :rtype: (np.array, np.array)

    """
    if task == 'celeba':
        return preprocess_celeba(root_dir)
    elif task == 'celeba140':
        return preprocess_celeba140(root_dir)

    # all other datasets are routed through our standard dataset loader
    Args = namedtuple("Args", "batch_size data_dir task cuda")
    kwargs = {'batch_size': 1, 'data_dir': root_dir, 'task': task, 'cuda': False}
    args = Args(**kwargs)
    loader = get_loader(args, **kwargs)

    # gather the training and test datasets in numpy
    x_train = data_loader_to_np(loader.train_loader)
    x_test = data_loader_to_np(loader.test_loader)

    return x_train, x_test
コード例 #2
0
def main(config):

    pp = pprint.PrettyPrinter(indent=4)
    pp.pprint(config)

    # ensure directories are setup
    prepare_dirs(config)

    # ensure reproducibility
    torch.manual_seed(config.random_seed)
    kwargs = {}
    if config.cuda:
        torch.cuda.manual_seed(config.random_seed)
        # kwargs = {'num_workers': 1, 'pin_memory': True}

    transform = torchvision.transforms.Resize(size=(config.height,
                                                    config.width))
    dl = get_loader(config, transform=[transform], **vars(config))

    # instantiate trainer
    trainer = Trainer(config, dl)

    # either train
    if config.is_train:
        save_config(config)
        trainer.train()

    # or load a pretrained model and test
    else:
        trainer.test()
コード例 #3
0
ファイル: supervised_main.py プロジェクト: jramapuram/ml_base
def build_loader_model_grapher(args):
    """builds a model, a dataloader and a grapher

    :param args: argparse
    :param transform: the dataloader transform
    :returns: a dataloader, a grapher and a model
    :rtype: list

    """
    train_transform, test_transform = build_train_and_test_transforms()
    loader_dict = {'train_transform': train_transform,
                   'test_transform': test_transform,
                   **vars(args)}
    loader = get_loader(**loader_dict)

    # set the input tensor shape (ignoring batch dimension) and related dataset sizing
    args.input_shape = loader.input_shape
    args.num_train_samples = loader.num_train_samples // args.num_replicas
    args.num_test_samples = loader.num_test_samples  # Test isn't currently split across devices
    args.num_valid_samples = loader.num_valid_samples // args.num_replicas
    args.steps_per_train_epoch = args.num_train_samples // args.batch_size  # drop-remainder
    args.total_train_steps = args.epochs * args.steps_per_train_epoch

    # build the network
    network = models.__dict__[args.arch](pretrained=args.pretrained, num_classes=loader.output_size)
    network = nn.SyncBatchNorm.convert_sync_batchnorm(network) if args.convert_to_sync_bn else network
    network = torch.jit.script(network) if args.jit else network
    network = network.cuda() if args.cuda else network
    lazy_generate_modules(network, loader.train_loader)
    network = layers.init_weights(network, init=args.weight_initialization)

    if args.num_replicas > 1:
        print("wrapping model with DDP...")
        network = layers.DistributedDataParallelPassthrough(network,
                                                            device_ids=[0],   # set w/cuda environ var
                                                            output_device=0,  # set w/cuda environ var
                                                            find_unused_parameters=True)

    # Get some info about the structure and number of params.
    print(network)
    print("model has {} million parameters.".format(
        utils.number_of_parameters(network) / 1e6
    ))

    # build the grapher object
    grapher = None
    if args.visdom_url is not None and args.distributed_rank == 0:
        grapher = Grapher('visdom', env=utils.get_name(args),
                          server=args.visdom_url,
                          port=args.visdom_port,
                          log_folder=args.log_dir)
    elif args.distributed_rank == 0:
        grapher = Grapher(
            'tensorboard', logdir=os.path.join(args.log_dir, utils.get_name(args)))

    return loader, network, grapher
コード例 #4
0
def get_model_and_loader():
    ''' helper to return the model and the loader '''
    aux_transform = None
    if args.synthetic_upsample_size > 0 and args.task == "multi_image_folder":
        aux_transform = lambda x: F.interpolate(
            torchvision.transforms.ToTensor()(x).unsqueeze(0),
            size=(args.synthetic_upsample_size, args.synthetic_upsample_size),
            mode='bilinear',
            align_corners=True).squeeze(0)

    # resizer = torchvision.transforms.Resize(size=(args.synthetic_upsample_size,
    #                                               args.synthetic_upsample_size))
    loader = get_loader(
        args,
        transform=None,  #transform=[resizer],
        sequentially_merge_test=False,
        aux_transform=aux_transform,
        postfix="_large",
        **vars(args))

    # append the image shape to the config & build the VAE
    args.img_shp = loader.img_shp
    vae = VRNN(
        loader.img_shp,
        n_layers=2,  # XXX: hard coded
        #bidirectional=True,    # XXX: hard coded
        bidirectional=False,  # XXX: hard coded
        kwargs=vars(args))

    # build the Variational Saccading module
    # and lazy generate the non-constructed modules
    saccader = Saccader(vae, loader.output_size, kwargs=vars(args))
    lazy_generate_modules(saccader, loader.train_loader)

    # FP16-ize, cuda-ize and parallelize (if requested)
    saccader = saccader.fp16() if args.half is True else saccader
    saccader = saccader.cuda() if args.cuda is True else saccader
    saccader.parallel() if args.ngpu > 1 else saccader

    # build the grapher object (tensorboard or visdom)
    # and plot config json to visdom
    if args.visdom_url is not None:
        grapher = Grapher('visdom',
                          env=saccader.get_name(),
                          server=args.visdom_url,
                          port=args.visdom_port)
    else:
        grapher = Grapher('tensorboard', comment=saccader.get_name())

    grapher.add_text('config',
                     pprint.PrettyPrinter(indent=4).pformat(saccader.config),
                     0)

    # register_nan_checks(saccader)
    return [saccader, loader, grapher]
コード例 #5
0
ファイル: validate.py プロジェクト: xiaoyufenfei/EDDSNet
def validate(args):

    # Setup Dataloader
    data_loader = get_loader(args.dataset)
    data_path = get_data_path(args.dataset)
    loader = data_loader(data_path, split=args.split, is_transform=True)
    n_classes = loader.n_classes
    valloader = data.DataLoader(loader, batch_size=1)

    # Setup Model
    model = Net(n_classes)
    print(get_n_params(model))
    model.load_state_dict(torch.load(args.model_path))
    # print(model)
    model.eval()
    if torch.cuda.is_available():
        model.cuda(0)

    gts, preds = [], []
    for i, (images, labels) in enumerate(valloader):
        if torch.cuda.is_available():
            images = Variable(images.cuda(0))
            labels = Variable(labels.cuda(0))
        else:
            images = Variable(images)
            labels = Variable(labels)
        outputs = model(images)
        pred = outputs.data.max(1)[1].cpu().numpy().astype(np.int)
        gt = labels.data.cpu().numpy().astype(np.int)
        for gt_, pred_ in zip(gt, pred):
            gts.append(gt_)
            preds.append(pred_)
        # pred = pred.reshape(360, 480)
        # pred = decode_segmap(pred)
        # m.imsave('./images/{}.png'.format(i), pred)

#break
    score, class_iou = scores(gts, preds, n_class=n_classes)
    for k, v in score.items():
        print(k, v)

    for i in range(n_classes):
        print(i, class_iou[i])
コード例 #6
0
def train_model_pool(args):
    if args.disable_sequential: # vanilla batch training
        loaders = get_loader(args, sequentially_merge_test=True)
        loaders = [loaders] if not isinstance(loaders, list) else loaders
    else: # classes split
        loaders = get_split_data_loaders(args, num_classes=10, sequentially_merge_test=True)

    # only operate over the number of meta models
    loaders = loaders[0:args.num_meta_models]
    for l in loaders:
        print("pool-train = ", len(l.train_loader.dataset),
              "pool-test = ", len(l.test_loader.dataset))

    model = ModelPool(input_shape=loaders[0].img_shp,
                      output_size=2,#output_size,
                      num_models=args.num_meta_models,
                      kwargs=vars(args))

    if isinstance(loaders, list): # has a sequential loader
        loaders = simple_merger(loaders, args.batch_size, args.cuda)

    if not model.model_exists:
        optimizer = [build_optimizer(list(m_i.parameters()) + list(pr_i.parameters()), args)
                     for m_i, pr_i in zip(model.models, model.project_to_class_models)]
        early_stop = [EarlyStopping(model, max_steps=10)
                      for _ in range(args.num_meta_models)]

        for i in range(args.num_meta_models):
            for epoch in range(1, args.epochs + 1):
                train(i, epoch, model, optimizer[i], loaders, args)
                loss, _ = test(i, epoch, model, loaders, args)
                if early_stop[i](loss):
                    early_stop[i].restore()
                    break

            # save the model
            model.save()

    # if we loaded test again and exit
    #_ = [test(i, -1, model, loaders, args) for i in range(args.num_meta_models)]
    del loaders
    return model
コード例 #7
0
def build_loader_model_grapher(args):
    """builds a model, a dataloader and a grapher

    :param args: argparse
    :param transform: the dataloader transform
    :returns: a dataloader, a grapher and a model
    :rtype: list

    """
    resize_shape = (args.image_size_override, args.image_size_override)
    transform = [torchvision.transforms.Resize(resize_shape)] \
        if args.image_size_override else None
    loader = get_loader(args, transform=transform, **vars(args))  # build the loader
    args.input_shape = loader.img_shp if args.image_size_override is None \
        else [loader.img_shp[0], *resize_shape]                   # set the input size

    # build the network
    vae_dict = {
        'simple': SimpleVAE,
        'msg': MSGVAE,
        'parallel': ParallellyReparameterizedVAE,
        'sequential': SequentiallyReparameterizedVAE,
        'vrnn': VRNN
    }
    network = vae_dict[args.vae_type](loader.img_shp, kwargs=deepcopy(vars(args)))
    lazy_generate_modules(network, loader.train_loader)
    network = network.cuda() if args.cuda else network
    network = append_save_and_load_fns(network, prefix="VAE_")
    if args.ngpu > 1:
        print("data-paralleling...")
        network.parallel()

    # build the grapher object
    if args.visdom_url:
        grapher = Grapher('visdom', env=get_name(args),
                          server=args.visdom_url,
                          port=args.visdom_port)
    else:
        grapher = Grapher('tensorboard', comment=get_name(args))

    return loader, network, grapher
コード例 #8
0
ファイル: validate.py プロジェクト: ndhung/Linknet_Pytorch
def validate(args):

    # Setup Dataloader
    data_loader = get_loader(args.dataset)
    data_path = get_data_path(args.dataset)
    loader = data_loader(data_path, split=args.split, is_transform=True)
    n_classes = loader.n_classes
    valloader = data.DataLoader(loader, batch_size=args.batch_size)

    # Setup Model
    model = LinkNet(n_classes)
    model.load_state_dict(torch.load(args.model_path))
    model.eval()

    if torch.cuda.is_available():
        model.cuda(0)

    gts, preds = [], []
    for i, (images, labels) in enumerate(valloader):
        if torch.cuda.is_available():
            images = Variable(images.cuda(0))
            labels = Variable(labels.cuda(0))
        else:
            images = Variable(images)
            labels = Variable(labels)
        t1 = time.time()
        outputs = model(images)
        t2 = time.time()
        print(t2 - t1)
        pred = outputs.data.max(1)[1].cpu().numpy()
        gt = labels.data.cpu().numpy()

        for gt_, pred_ in zip(gt, pred):
            gts.append(gt_)
            preds.append(pred_)
    score, class_iou = scores(gts, preds, n_class=n_classes)
    for k, v in score.items():
        print k, v

    for i in range(n_classes):
        print i, class_iou[i]
コード例 #9
0
def train_fid_model(args, fid_type='conv', batch_size=32):
    ''' builds and trains a classifier '''
    loader = get_loader(args)
    if isinstance(loader, list):  # has a sequential loader
        loader = simple_merger(loader, batch_size, args.cuda)

    # debug prints
    print("[FID] train = ",
          num_samples_in_loader(loader.train_loader), " | test = ",
          num_samples_in_loader(loader.test_loader), " | output_classes = ",
          loader.output_size)

    model = FID(loader.img_shp,
                loader.output_size,
                batch_size=batch_size,
                fid_type=fid_type,
                kwargs=vars(args))
    if not model.model_exists:
        optimizer = build_optimizer(model, args)
        early_stop = EarlyStopping(model, max_steps=50)

        for epoch in range(1, args.epochs + 1):
            train(epoch, model, optimizer, loader, args)
            loss, accuracy = test(epoch, model, loader, args)
            if early_stop(loss):
                early_stop.restore()
                break

        # save the model
        model.save()

    # test one final time to check accuracy .
    # this is useful to validate loaded models
    # Doesn't make sense for pretrained inceptionv3
    if fid_type == 'conv':
        test(epoch=-1, model=model, data_loader=loader, args=args)

    del loader  # force cleanup
    return model
コード例 #10
0
ファイル: main.py プロジェクト: mo666666/LifelongVAE_pytorch
def get_model_and_loader():
    ''' helper to return the model and the loader '''
    if args.disable_sequential:  # vanilla batch training
        loaders = get_loader(args)
        loaders = [loaders] if not isinstance(loaders, list) else loaders
    else:  # classes split
        loaders = get_split_data_loaders(args, num_classes=10)

    for l in loaders:
        print("train = ", num_samples_in_loader(l.train_loader), " | test = ",
              num_samples_in_loader(l.test_loader))

    # append the image shape to the config & build the VAE
    args.img_shp = loaders[0].img_shp,
    if args.vae_type == 'sequential':
        # Sequential : P(y|x) --> P(z|y, x) --> P(x|z)
        # Keep a separate VAE spawn here in case we want
        # to parameterize the sequence of reparameterizers
        vae = SequentiallyReparameterizedVAE(loaders[0].img_shp,
                                             kwargs=vars(args))
    elif args.vae_type == 'parallel':
        # Ours: [P(y|x), P(z|x)] --> P(x | z)
        vae = ParallellyReparameterizedVAE(loaders[0].img_shp,
                                           kwargs=vars(args))
    else:
        raise Exception("unknown VAE type requested")

    # build the combiner which takes in the VAE as a parameter
    # and projects the latent representation to the output space
    student_teacher = StudentTeacher(vae, kwargs=vars(args))
    #student_teacher = init_weights(student_teacher)

    # build the grapher object
    grapher = Grapher(env=student_teacher.get_name(),
                      server=args.visdom_url,
                      port=args.visdom_port)

    return [student_teacher, loaders, grapher]
コード例 #11
0
def build_loader_model_grapher(args):
    """builds a model, a dataloader and a grapher

    :param args: argparse
    :param transform: the dataloader transform
    :returns: a dataloader, a grapher and a model
    :rtype: list

    """
    resize_shape = (args.image_size_override, args.image_size_override)
    transform = [transforms.Resize(resize_shape)] \
        if args.image_size_override else None
    loader = get_loader(args, transform=transform,
                        **vars(args))  # build the loader
    args.input_shape = loader.img_shp if args.image_size_override is None \
        else [loader.img_shp[0], *resize_shape]                   # set the input size

    # build the network; to use your own model import and construct it here
    network = resnet18(num_classes=loader.output_size)
    lazy_generate_modules(network, loader.train_loader)
    network = network.cuda() if args.cuda else network
    network = append_save_and_load_fns(network, prefix="VAE_")
    if args.ngpu > 1:
        print("data-paralleling...")
        network.parallel()

    # build the grapher object
    if args.visdom_url:
        grapher = Grapher('visdom',
                          env=get_name(args),
                          server=args.visdom_url,
                          port=args.visdom_port)
    else:
        grapher = Grapher('tensorboard', comment=get_name(args))

    return loader, network, grapher
コード例 #12
0
def build_loader_model_grapher(args):
    """builds a model, a dataloader and a grapher

    :param args: argparse
    :param transform: the dataloader transform
    :returns: a dataloader, a grapher and a model
    :rtype: list

    """
    train_transform, test_transform = build_train_and_test_transforms()
    loader_dict = {'train_transform': train_transform,
                   'test_transform': test_transform, **vars(args)}
    loader = get_loader(**loader_dict)

    # set the input tensor shape (ignoring batch dimension) and related dataset sizing
    args.input_shape = loader.input_shape
    args.output_size = loader.output_size
    args.num_train_samples = loader.num_train_samples // args.num_replicas
    args.num_test_samples = loader.num_test_samples  # Test isn't currently split across devices
    args.num_valid_samples = loader.num_valid_samples // args.num_replicas
    args.steps_per_train_epoch = args.num_train_samples // args.batch_size  # drop-remainder
    args.total_train_steps = args.epochs * args.steps_per_train_epoch

    # build the network
    network = build_vae(args.vae_type)(loader.input_shape, kwargs=deepcopy(vars(args)))
    network = network.cuda() if args.cuda else network
    lazy_generate_modules(network, loader.train_loader)
    network = layers.init_weights(network, init=args.weight_initialization)

    if args.num_replicas > 1:
        print("wrapping model with DDP...")
        network = layers.DistributedDataParallelPassthrough(network,
                                                            device_ids=[0],   # set w/cuda environ var
                                                            output_device=0,  # set w/cuda environ var
                                                            find_unused_parameters=True)

    # Get some info about the structure and number of params.
    print(network)
    print("model has {} million parameters.".format(
        utils.number_of_parameters(network) / 1e6
    ))

    # add the test set as a np array for metrics calc
    if args.metrics_server is not None:
        network.test_images = get_numpy_dataset(task=args.task,
                                                data_dir=args.data_dir,
                                                test_transform=test_transform,
                                                split='test',
                                                image_size=args.image_size_override,
                                                cuda=args.cuda)
        print("Metrics test images: ", network.test_images.shape)

    # build the grapher object
    grapher = None
    if args.visdom_url is not None and args.distributed_rank == 0:
        grapher = Grapher('visdom', env=utils.get_name(args),
                          server=args.visdom_url,
                          port=args.visdom_port,
                          log_folder=args.log_dir)
    elif args.distributed_rank == 0:
        grapher = Grapher(
            'tensorboard', logdir=os.path.join(args.log_dir, utils.get_name(args)))

    return loader, network, grapher
コード例 #13
0
def train(args, out, net_name):
    data_path = get_data_path(args.dataset)
    data_loader = get_loader(args.dataset)
    loader = data_loader(data_path, is_transform=True)
    n_classes = loader.n_classes
    print(n_classes)
    kwargs = {'num_workers': 8, 'pin_memory': True}

    trainloader = data.DataLoader(loader,
                                  batch_size=args.batch_size,
                                  shuffle=True)

    another_loader = data_loader(data_path, split='val', is_transform=True)

    valloader = data.DataLoader(another_loader,
                                batch_size=args.batch_size,
                                shuffle=True)

    # compute weight for cross_entropy2d
    norm_hist = hist / np.max(hist)
    weight = 1 / np.log(norm_hist + 1.02)
    weight[-1] = 0
    weight = torch.FloatTensor(weight)
    model = Bilinear_Res(n_classes)

    if torch.cuda.is_available():
        model.cuda(0)
        weight = weight.cuda(0)
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args.lr_rate,
                                 weight_decay=args.w_decay)
    # optimizer = torch.optim.RMSprop(model.parameters(), lr=args.lr_rate)
    scheduler = StepLR(optimizer, step_size=100, gamma=args.lr_decay)

    for epoch in tqdm.tqdm(range(args.epochs),
                           desc='Training',
                           ncols=80,
                           leave=False):
        scheduler.step()
        model.train()
        loss_list = []
        file = open(out + '/{}_epoch_{}.txt'.format(net_name, epoch), 'w')
        for i, (images, labels) in tqdm.tqdm(enumerate(trainloader),
                                             total=len(trainloader),
                                             desc='Iteration',
                                             ncols=80,
                                             leave=False):
            if torch.cuda.is_available():
                images = Variable(images.cuda(0))
                labels = Variable(labels.cuda(0))
            else:
                images = Variable(images)
                labels = Variable(labels)
            optimizer.zero_grad()
            outputs = model(images)
            loss = cross_entropy2d(outputs, labels, weight=weight)
            loss_list.append(loss.data[0])
            loss.backward()
            optimizer.step()

        # file.write(str(np.average(loss_list)))
        print(np.average(loss_list))
        file.write(str(np.average(loss_list)) + '\n')
        model.eval()
        gts, preds = [], []
        if (epoch % 10 == 0):
            for i, (images, labels) in tqdm.tqdm(enumerate(valloader),
                                                 total=len(valloader),
                                                 desc='Valid Iteration',
                                                 ncols=80,
                                                 leave=False):
                if torch.cuda.is_available():
                    images = Variable(images.cuda(0))
                    labels = Variable(labels.cuda(0))
                else:
                    images = Variable(images)
                    labels = Variable(labels)
                outputs = model(images)
                pred = outputs.data.max(1)[1].cpu().numpy()
                gt = labels.data.cpu().numpy()
                for gt_, pred_ in zip(gt, pred):
                    gts.append(gt_)
                    preds.append(pred_)
            score, class_iou = scores(gts, preds, n_class=n_classes)
            for k, v in score.items():
                file.write('{} {}\n'.format(k, v))

            for i in range(n_classes):
                file.write('{} {}\n'.format(i, class_iou[i]))
            torch.save(
                model.state_dict(),
                out + "/{}_{}_{}.pkl".format(net_name, args.dataset, epoch))
        file.close()
コード例 #14
0
def get_model_and_loader():
    ''' helper to return the model and the loader '''
    aux_transform = None
    if args.synthetic_upsample_size > 0:  #and args.task == "multi_image_folder":
        to_pil = torchvision.transforms.ToPILImage()
        to_tensor = torchvision.transforms.ToTensor()
        resizer = torchvision.transforms.Resize(
            size=(args.synthetic_upsample_size, args.synthetic_upsample_size),
            interpolation=2)

        def extract_patches_2D(img, size):
            patch_H, patch_W = min(img.size(2),
                                   size[0]), min(img.size(3), size[1])
            patches_fold_H = img.unfold(2, patch_H, patch_H)
            if (img.size(2) % patch_H != 0):
                patches_fold_H = torch.cat(
                    (patches_fold_H, img[:, :, -patch_H:, ].permute(
                        0, 1, 3, 2).unsqueeze(2)),
                    dim=2)
                patches_fold_HW = patches_fold_H.unfold(3, patch_W, patch_W)

            if (img.size(3) % patch_W != 0):
                patches_fold_HW = torch.cat(
                    (patches_fold_HW,
                     patches_fold_H[:, :, :, -patch_W:, :].permute(
                         0, 1, 2, 4, 3).unsqueeze(3)),
                    dim=3)

                patches = patches_fold_HW.permute(0, 2, 3, 1, 4, 5).reshape(
                    -1, img.size(1), patch_H, patch_W)

            return patches

        def patch_extractor_lambda(crop):
            crop = crop.unsqueeze(0) if len(crop.shape) < 4 else crop
            return extract_patches_2D(crop, [224, 224])

        aux_transform = lambda x: patch_extractor_lambda(
            to_tensor(resizer(to_pil(to_tensor(x)))))

    loader = get_loader(args,
                        transform=None,
                        sequentially_merge_test=False,
                        aux_transform=aux_transform,
                        postfix="_large",
                        **vars(args))

    # append the image shape to the config & build the VAE
    args.img_shp = loader.img_shp
    model = MultiBatchModule(loader.output_size, checkpoint=args.checkpoint)

    # FP16-ize, cuda-ize and parallelize (if requested)
    model = model.half() if args.half is True else model
    model = model.cuda() if args.cuda is True else model
    model = nn.DataParallel(model) if args.ngpu > 1 else model

    # build the grapher object (tensorboard or visdom)
    # and plot config json to visdom
    if args.visdom_url is not None:
        grapher = Grapher('visdom',
                          env=get_name(),
                          server=args.visdom_url,
                          port=args.visdom_port)
    else:
        grapher = Grapher('tensorboard', comment=get_name())

    grapher.add_text('config',
                     pprint.PrettyPrinter(indent=4).pformat(vars(args)), 0)
    return [model, loader, grapher]
コード例 #15
0
ファイル: main.py プロジェクト: jramapuram/dataset_to_zarr
parser.add_argument('--task', type=str, default="fashion",
                    help="""task to work on (can specify multiple) [mnist / cifar10 /
                    fashion / svhn_centered / svhn / clutter / permuted] (default: mnist)""")
parser.add_argument('--batch-size', type=int, default=1, metavar='N',
                    help='input batch size for training (default: 1)')
parser.add_argument('--chunk-size', type=int, default=64,
                    help='chunk size for zarr; set this to a typical minibatch size (default: 64)')
parser.add_argument('--data-dir', type=str, default='./.datasets', metavar='DD',
                    help='directory which contains input data')
parser.add_argument('--output-data-dir', type=str, default='./processed',
                    help='directory which contains zarr data')
args = parser.parse_args()


# Read the full dataset with train / test / val
dataset = get_loader(task=args.task, data_dir=args.data_dir, batch_size=args.batch_size, cuda=False)


def loader_to_zarr(loader, loader_size, feature_shape, output_zarr_prefix):
    """Takes a train/test/val loader and creates a zarr array"""
    if not os.path.isdir(args.output_data_dir):
        os.makedirs(args.output_data_dir)

    z_dataset = zarr.open(os.path.join(args.output_data_dir, '{}_{}_data.zarr'.format(args.task, output_zarr_prefix)), mode='w',
                          shape=(loader_size, *feature_shape), chunks=(args.chunk_size, *feature_shape), dtype='float32')
    z_labels = zarr.open(os.path.join(args.output_data_dir, '{}_{}_labels.zarr'.format(args.task, output_zarr_prefix)), mode='w',
                         shape=(loader_size), chunks=(args.chunk_size), dtype='int64')

    for idx, (sample, label) in tqdm(enumerate(loader)):
        if len(sample.shape) == 4:
            sample = sample.squeeze(0)
コード例 #16
0
ファイル: train.py プロジェクト: xiaoyufenfei/EDDSNet
def train(args):
    if (args.dataset == 'pascal'):
        another_loader = VOC2011ClassSeg(root='/home/vietdv', transform=True)
        loader = SBDClassSeg(root='/home/vietdv', transform=True, augment=True)
    else:
        data_path = get_data_path(args.dataset)
        label_scale = False
        if (args.model == 'encoder'):
            label_scale = True
        data_loader = get_loader(args.dataset)
        loader = data_loader(data_path,
                             is_transform=True,
                             augment=True,
                             label_scale=label_scale)
        another_loader = data_loader(data_path,
                                     split='val',
                                     is_transform=True,
                                     label_scale=label_scale)

    n_classes = loader.n_classes
    trainloader = data.DataLoader(loader, batch_size=args.batch_size)

    valloader = data.DataLoader(another_loader, batch_size=1)
    # get weight for cross_entropy2d
    weight = loader.weight
    model = Net(n_classes)
    if torch.cuda.is_available():
        model.cuda(0)
        weight = weight.cuda(0)
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args.lr_rate,
                                 weight_decay=args.w_decay)
    criterion = CrossEntropyLoss2d(weight, False)
    # alpha = 0.5
    lambda1 = lambda epoch: pow((1 -
                                 (epoch / args.epochs)), 0.9)  ## scheduler 2
    scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda1)
    for epoch in range(args.epochs):
        model.train()
        loss_list = []
        file = open(args.folder + '/{}_{}.txt'.format('hnet', epoch), 'w')
        scheduler.step(epoch)
        for i, (images, labels) in enumerate(trainloader):
            if torch.cuda.is_available():
                images = Variable(images.cuda(0))
                labels = Variable(labels.cuda(0))
            else:
                images = Variable(images)
                labels = Variable(labels)
            optimizer.zero_grad()
            outputs = model(images)
            # loss = alpha * criterion(outputs, labels) / len(images) + (1 - alpha) * lovasz_softmax(outputs, labels, ignore=n_classes-1)
            loss = criterion(outputs, labels) / len(images)
            print(loss.data[0])
            loss_list.append(loss.data[0])
            loss.backward()
            optimizer.step()

        file.write(str(np.average(loss_list)) + '\n')
        model.eval()
        gts, preds = [], []
        for i, (images, labels) in enumerate(valloader):
            if torch.cuda.is_available():
                images = Variable(images.cuda(0))
                labels = Variable(labels.cuda(0))
            else:
                images = Variable(images)
                labels = Variable(labels)
            outputs = model(images)
            pred = outputs.data.max(1)[1].cpu().numpy()
            gt = labels.data.cpu().numpy()
            for gt_, pred_ in zip(gt, pred):
                gts.append(gt_)
                preds.append(pred_)
        score, class_iou = scores(gts, preds, n_class=n_classes)
        # scheduler.step(score['Mean IoU : \t'])
        for k, v in score.items():
            file.write('{} {}\n'.format(k, v))

        for i in range(n_classes - 1):
            file.write('{} {}\n'.format(i, class_iou[i]))
        torch.save(
            model.state_dict(),
            args.folder + "/{}_{}_{}.pkl".format('hnet', args.dataset, epoch))
        file.close()