def main(args):
    # chainer.set_debug(True)
    # Initialize the model to train
    model = models.archs[args.arch]()
    nowt = datetime.datetime.today()
    outputdir = os.path.join(args.out, args.arch, 'extract', args.layer)
    if not os.path.exists(outputdir):
        os.makedirs(outputdir)
    # Load the datasets and mean file
    mean = None
    if args.indices is None:
        args.indices = os.path.join(args.out, args.arch, 'extract',
                                    'top_' + args.layer + '.txt')
    if args.bounds is None:
        args.bounds = os.path.join(args.out, args.arch, 'extract',
                                   'maxbounds_' + args.layer + '.txt')

    val = ppds.PreprocessedDataset(args.val, args.root, mean, model.insize,
                                   False, args.indices)
    val_iter = chainer.iterators.SerialIterator(val,
                                                args.val_batchsize,
                                                repeat=False,
                                                shuffle=False)

    bounds = np.loadtxt(args.bounds, delimiter="\t", dtype='i')
    rows = val.rows
    cols = val.cols
    idx = 0
    for batch in val_iter:
        indices = np.arange(idx, idx + len(batch))
        batch_bounds = [bounds[i] for i in indices]
        idx += len(batch)
        for (i, ba, bo) in zip(indices, batch, batch_bounds):
            #print(ba, bo)
            patch = ba[0][:, int(bo[2]):int(bo[3]), int(bo[0]):int(bo[1])]
            patchimg = Image.fromarray(np.uint8(patch[::-1].transpose(1, 2,
                                                                      0)))
            patchimg.save(
                os.path.join(outputdir,
                             "{0:0>4}_{1:0>2}.png".format(i % cols,
                                                          i // cols)))
        #print(batch)
    '''# Set up an optimizer
Example #2
0
def main(args):
    chainer.set_debug(True)
    # Initialize the model to train
    model = models.archs[args.arch]()
    if args.finetune and hasattr(model, 'finetuned_model_path'):
        utils.finetuning.load_param(model.finetuned_model_path, model,
                                    args.ignore)
    if args.initmodel:
        print('Load model from', args.initmodel)
        chainer.serializers.load_npz(args.initmodel, model)
    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()  # Make the GPU current
        #if args.test:
        #cuda.cudnn_enabled = False
        model.to_gpu()

    nowt = datetime.datetime.today()
    outputdir = os.path.join(args.out, args.arch, 'extract')
    if args.initmodel is not None:
        outputdir = os.path.dirname(args.initmodel)
        if args.indices is None:
            args.indices = os.path.join(outputdir, 'features',
                                        'top_' + args.layer + '.txt')
    # Load the datasets and mean file
    mean = None
    if hasattr(model, 'mean_value'):
        mean = makeMeanImage(model.mean_value)
    else:
        mean = np.load(args.mean)
    assert mean is not None

    if args.indices is None:
        args.indices = os.path.join(args.out, args.arch, 'extract',
                                    'top_' + args.layer + '.txt')

    #top_path = os.path.join(args.out, args.arch, 'extract', 'top_' + args.layer + '.txt')
    #train = ppds.PreprocessedDataset(args.train, args.root, mean, model.insize)
    val = ppds.PreprocessedDataset(args.val, args.root, mean, model.insize,
                                   False, args.indices)
    # These iterators load the images with subprocesses running in parallel to
    # the training/validation.
    #train_iter = chainer.iterators.MultiprocessIterator(
    #    train, args.batchsize, shuffle=False, n_processes=args.loaderjob)
    #val_iter = chainer.iterators.MultiprocessIterator(
    #    val, args.val_batchsize, repeat=False, shuffle=False, n_processes=args.loaderjob)
    val_iter = chainer.iterators.SerialIterator(val,
                                                args.val_batchsize,
                                                repeat=False,
                                                shuffle=False)

    # Set up an optimizer
    optimizer = chainer.optimizers.MomentumSGD()
    optimizer.setup(model)

    # Set up a trainer
    updater = training.StandardUpdater(val_iter, optimizer, device=args.gpu)
    trainer = training.Trainer(updater, (1, 'epoch'), outputdir)

    #val_interval = (10 if args.test else int(len(train) / args.batchsize)), 'iteration'
    val_interval = (1, 'iteration')
    #snapshot_interval = (10, 'iteration') if args.test else (2, 'epoch')
    log_interval = (10, 'iteration')

    # Copy the chain with shared parameters to flip 'train' flag only in test
    eval_model = model.copy()
    eval_model.train = False
    val_acquirer = utils.DeconvAcquirer(val_iter, eval_model, device=args.gpu)
    val_acquirer.mean = mean
    val_acquirer.layer_rank = eval_model.layer_rank[args.layer]
    val_acquirer.layer_name = args.layer
    val_acquirer.operation = args.operation
    val_acquirer.fixed_RMS = args.rms
    val_acquirer.top = args.top
    val_acquirer.n_features = val.cols
    if 'googlenet' in args.arch:
        val_acquirer.lastname = 'validation/main/loss3'
    trainer.extend(val_acquirer, trigger=val_interval)
    #trainer.extend(extensions.dump_graph('main/loss'))
    #trainer.extend(extensions.snapshot(), trigger=snapshot_interval)
    #trainer.extend(extensions.snapshot_object(
    #    model, 'model_iter_{.updater.iteration}'), trigger=snapshot_interval)
    # Be careful to pass the interval directly to LogReport
    # (it determines when to emit log rather than when to read observations)
    #trainer.extend(extensions.LogReport(trigger=log_interval))
    trainer.extend(extensions.PrintReport([
        'epoch',
        'iteration',
        'main/loss',
        'validation/main/loss',
        'main/accuracy',
        'validation/main/accuracy',
    ]),
                   trigger=log_interval)
    trainer.extend(extensions.ProgressBar(update_interval=10))
    #trainer.extend(extensions.ExponentialShift('lr', args.gamma),
    #    trigger=(1, 'epoch'))

    if args.resume:
        chainer.serializers.load_npz(args.resume, trainer)

    #if not args.test:
    #    trainer.run()
    #    chainer.serializers.save_npz(outputdir + '/model', model)

    results = val_acquirer(trainer)
    results['outputdir'] = outputdir

    #if eval_model.layer_rank[args.layer] == 1:
    #    save_first_conv_filter(os.path.join(outputdir, args.layer),
    #    model[args.layer].W.data, cols = args.cols, pad = args.pad,
    #    scale = args.scale, gamma = args.gamma)

    #if args.test:
    #print(val_acquirer.confmat)
    #categories = utils.io.load_categories(args.categories)
    #confmat_csv_name = args.initmodel + '.csv'
    #confmat_fig_name = args.initmodel + '.eps'
    #utils.io.save_confmat_csv(confmat_csv_name, val_acquirer.confmat, categories)
    #utils.io.save_confmat_fig(confmat_fig_name, val_acquirer.confmat, categories,
    #                        mode="rate", saveFormat="eps")
    return results
Example #3
0
def main(args):
    chainer.set_debug(True)
    # Initialize the model to train
    model = models.archs[args.arch]()
    if args.finetune and hasattr(model, 'finetuned_model_path'):
        finetuning.load_param(model.finetuned_model_path, model, args.ignore)
    if args.initmodel:
        print('Load model from', args.initmodel)
        chainer.serializers.load_npz(args.initmodel, model)
    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()  # Make the GPU current
        #if args.test:
        #cuda.cudnn_enabled = False
        model.to_gpu()

    nowt = datetime.datetime.today()
    outputdir = args.out + '/' + args.arch + '/' + nowt.strftime(
        "%Y%m%d-%H%M") + '_bs' + str(args.batchsize)
    if args.test and args.initmodel is not None:
        outputdir = os.path.dirname(args.initmodel)
    # Load the datasets and mean file
    mean = None
    if hasattr(model, 'mean_value'):
        mean = makeMeanImage(model.mean_value)
    else:
        mean = np.load(args.mean)
    assert mean is not None

    train = ppds.PreprocessedDataset(args.train, args.root, mean, model.insize)
    val = ppds.PreprocessedDataset(args.val, args.root, mean, model.insize,
                                   False)
    # These iterators load the images with subprocesses running in parallel to
    # the training/validation.
    train_iter = chainer.iterators.MultiprocessIterator(
        train, args.batchsize, shuffle=False, n_processes=args.loaderjob)
    #val_iter = chainer.iterators.MultiprocessIterator(
    #    val, args.val_batchsize, repeat=False, shuffle=False, n_processes=args.loaderjob)
    val_iter = chainer.iterators.SerialIterator(val,
                                                args.val_batchsize,
                                                repeat=False,
                                                shuffle=False)

    # Set up an optimizer
    optimizer = chainer.optimizers.MomentumSGD(lr=args.baselr, momentum=0.9)
    optimizer.setup(model)

    # Set up a trainer
    updater = training.StandardUpdater(train_iter, optimizer, device=args.gpu)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), outputdir)

    #val_interval = (10 if args.test else int(len(train) / args.batchsize)), 'iteration'
    val_interval = (10, 'iteration') if args.test else (1, 'epoch')
    snapshot_interval = (10, 'iteration') if args.test else (2, 'epoch')
    log_interval = (10 if args.test else 200), 'iteration'

    # Copy the chain with shared parameters to flip 'train' flag only in test
    eval_model = model.copy()
    eval_model.train = False
    if not args.test:
        val_evaluator = extensions.Evaluator(val_iter,
                                             eval_model,
                                             device=args.gpu)
    else:
        val_evaluator = evaluator_plus.EvaluatorPlus(val_iter,
                                                     eval_model,
                                                     device=args.gpu)
        if 'googlenet' in args.arch:
            val_evaluator.lastname = 'validation/main/loss3'
    trainer.extend(val_evaluator, trigger=val_interval)
    trainer.extend(extensions.dump_graph('main/loss'))
    trainer.extend(extensions.snapshot(), trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        model, 'model_iter_{.updater.iteration}'),
                   trigger=snapshot_interval)
    # Be careful to pass the interval directly to LogReport
    # (it determines when to emit log rather than when to read observations)
    trainer.extend(extensions.LogReport(trigger=log_interval))
    trainer.extend(extensions.PrintReport([
        'epoch',
        'iteration',
        'main/loss',
        'validation/main/loss',
        'main/accuracy',
        'validation/main/accuracy',
    ]),
                   trigger=log_interval)
    trainer.extend(extensions.ProgressBar(update_interval=10))
    trainer.extend(extensions.ExponentialShift('lr', args.gamma),
                   trigger=(1, 'epoch'))

    if args.resume:
        chainer.serializers.load_npz(args.resume, trainer)

    if not args.test:
        trainer.run()
        chainer.serializers.save_npz(outputdir + '/model', model)

    results = val_evaluator(trainer)
    results['outputdir'] = outputdir

    if args.test:
        print(val_evaluator.confmat)
        categories = dataio.load_categories(args.categories)
        confmat_csv_name = args.initmodel + '.csv'
        confmat_fig_name = args.initmodel + '.eps'
        dataio.save_confmat_csv(confmat_csv_name, val_evaluator.confmat,
                                categories)
        dataio.save_confmat_fig(confmat_fig_name,
                                val_evaluator.confmat,
                                categories,
                                mode="rate",
                                saveFormat="eps")
    return results
Example #4
0
def main(args):
    chainer.set_debug(True)
    # Initialize the model to train
    model = models.archs[args.arch]()
    if args.finetune and hasattr(model, 'finetuned_model_path'):
        utils.finetuning.load_param(model.finetuned_model_path, model,
                                    args.ignore)
    if args.initmodel:
        print('Load model from', args.initmodel)
        chainer.serializers.load_npz(args.initmodel, model)
    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()  # Make the GPU current
        #cuda.cudnn_enabled = False
        model.to_gpu()

    nowt = datetime.datetime.today()
    outputdir = os.path.join(args.out, args.arch, 'extract')
    if args.initmodel is not None:
        outputdir = os.path.dirname(args.initmodel)
    # Load the datasets and mean file
    mean = None
    if hasattr(model, 'mean_value'):
        mean = makeMeanImage(model.mean_value)
    else:
        mean = np.load(args.mean)
    assert mean is not None

    val = ppds.PreprocessedDataset(args.val, args.root, mean, model.insize,
                                   False)
    val_iter = chainer.iterators.SerialIterator(val,
                                                args.val_batchsize,
                                                repeat=False,
                                                shuffle=False)

    # Set up an optimizer
    optimizer = chainer.optimizers.MomentumSGD()
    optimizer.setup(model)

    # Set up a trainer
    updater = training.StandardUpdater(val_iter, optimizer, device=args.gpu)
    trainer = training.Trainer(updater, (1, 'epoch'), outputdir)

    #val_interval = (10 if args.test else int(len(train) / args.batchsize)), 'iteration'
    val_interval = (1, 'iteration')
    #snapshot_interval = (10, 'iteration') if args.test else (2, 'epoch')
    #log_interval = (10, 'iteration')

    # Copy the chain with shared parameters to flip 'train' flag only in test
    eval_model = model.copy()
    eval_model.train = False
    val_extractor = utils.Extractor(val_iter, eval_model, device=args.gpu)
    val_extractor.layer_rank = eval_model.layer_rank[args.layer]
    val_extractor.layer_name = args.layer
    val_extractor.operation = args.operation
    val_extractor.save_features = args.savefeatures
    val_extractor.top = args.top
    if 'googlenet' in args.arch:
        val_extractor.lastname = 'validation/main/loss3'
    trainer.extend(val_extractor, trigger=val_interval)
    #trainer.extend(extensions.PrintReport([
    #    'epoch', 'iteration', 'main/loss', 'validation/main/loss',
    #    'main/accuracy', 'validation/main/accuracy',
    #]), trigger=log_interval)
    #trainer.extend(extensions.ProgressBar(update_interval=10))

    if args.resume:
        chainer.serializers.load_npz(args.resume, trainer)

    results = val_extractor(trainer)
    results['outputdir'] = outputdir

    return results