Esempio n. 1
0
def pipeline():
    """ Creates a pipeline configured to use a given model with a specified configuration.

    Notes
    -----
    Pipeline can be executed only if its config contains the following parameters:

    model_class : TFModel
        Architecture of model. List of available models is defined at 'AVAILABLE_MODELS'.

    model_config : Config
       Model parameters.

    Returns
    -------
    Pipeline
        A pipeline that contains model initialization and training with a given config.
    """

    test_pipeline = (Pipeline().init_variable('current_loss').init_model(
        'dynamic', C('model_class'), 'model',
        C('model_config')).to_array().train_model('model',
                                                  fetches='loss',
                                                  images=B('images'),
                                                  labels=B('labels'),
                                                  save_to=V('current_loss')))
    return test_pipeline
Esempio n. 2
0
        def _pipelines(model_class):
            config = {}
            data_format = predict_args = predict_kwargs = None
            if issubclass(model_class, TFModel):
                data_format = 'channels_last'
                config.update({'dtype': None})
                predict_args = ()
                predict_kwargs = dict(images=B('images'))
            elif issubclass(model_class, TorchModel):
                data_format = 'channels_first'
                config.update({'dtype': 'float32'})
                predict_args = (B('images'),)
                predict_kwargs = dict()

            dataset, model_config = model_setup_images_clf(data_format)
            config.update({'model_class': model_class, 'model_config': model_config})

            save_pipeline = (Pipeline()
                             .init_variable('predictions', default=[])
                             .init_model('dynamic', C('model_class'), 'model', C('model_config'))
                             .to_array(dtype=C('dtype'))
                             .predict_model('model', *predict_args,
                                            fetches='predictions', save_to=V('predictions', mode='a'),
                                            **predict_kwargs))
            load_pipeline = (Pipeline()
                             .init_variable('predictions', default=[])
                             .to_array(dtype=C('dtype'))
                             .predict_model('model', *predict_args,
                                            fetches='predictions', save_to=V('predictions', mode='a'),
                                            **predict_kwargs))

            save_pipeline = (save_pipeline << dataset) << config
            load_pipeline = (load_pipeline << dataset) << config
            return save_pipeline, load_pipeline
Esempio n. 3
0
def model_pipeline():
    """ Creates instance of Pipeline that is configured to use given model
    with passed parameters.

    Parameters
    ----------

    model_class : subclass of TFModel
        Architecture of model. List of available models is defined at 'AVAILABLE_MODELS'.

    current_config : dict
        Dictionary with parameters of model.

    Returns
    -------
    Pipeline
        Test pipeline that consists of initialization of model and
        preparing for training with given config.
    """

    test_pipeline = (Pipeline().init_variable('current_loss').init_model(
        'dynamic', C('model_class'), 'TestModel',
        C('model_config')).to_array().train_model('TestModel',
                                                  fetches='loss',
                                                  images=B('images'),
                                                  labels=B('labels'),
                                                  save_to=V('current_loss')))
    return test_pipeline
Esempio n. 4
0
    def train_args(model_class):
        """
        make args and kwargs for `.train_model`  that are compatible with `model_class`
        """
        args = kwargs = None
        if issubclass(model_class, TFModel):
            args = ()
            kwargs = dict(images=B('images'), labels=B('labels'))
        elif issubclass(model_class, TorchModel):
            args = (B('images'), B('labels'))
            kwargs = dict(fetches='loss')

        return args, kwargs
Esempio n. 5
0
def test_d(size, n_splits):
    """Test checks for behaviour of D expression in `set_dataset` action.

    size
        size of the dataset.
    n_splits
        the number if cv folds.
    """
    dataset = Dataset(size)
    dataset.cv_split(n_splits=n_splits)

    pipeline = (Pipeline().init_variable('indices', default=[]).update(
        V('indices', mode='a'),
        B('indices')[0])) << dataset.CV(C('fold')).train

    result = list(range(size))

    for fold in range(n_splits):
        pipeline.set_config({'fold': fold})
        start = fold * (size // n_splits)
        end = (fold + 1) * (size // n_splits)

        for _ in range(2):
            pipeline.reset('vars')
            pipeline.run(1)

            assert pipeline.v('indices') == result[:start] + result[end:]
Esempio n. 6
0
import pytest
import numpy as np

sys.path.append('..')
from batchflow import B, C, D, F, L, V, R, P, I, Dataset, Pipeline, Batch, apply_parallel, inbatch_parallel, action

#--------------------
#      COMMON
#--------------------


@pytest.mark.parametrize('named_expr', [
    C('option'),
    C('not defined', default=10),
    B('size'),
    D('size'),
    V('var'),
    R('normal', 0, 1),
    R('normal', 0, 1, size=B.size),
    F(lambda batch: 0),
    L(lambda: 0),
])
def test_general_get(named_expr):
    pipeline = (Dataset(10).pipeline({
        'option': 0
    }).init_variable('var').do_nothing(named_expr).run(2, lazy=True))

    failed = False
    try:
        _ = pipeline.next_batch()
Esempio n. 7
0
        'name': 'targets'
    },
    'initial_block/inputs': 'images',
    'body/block/layout': 'cna',
    'device': 'gpu:2'
}

mnist = MNIST()

train_ppl = (mnist.train.p.init_variable(
    'loss', init_on_each_run=list).init_variable(
        'accuracy', init_on_each_run=list).init_model(
            'dynamic', VGG16, 'conv', config=model_config).to_array(
                channels='first', dtype='float32').train_model(
                    'conv',
                    B('images'),
                    B('labels'),
                    fetches='loss',
                    save_to=V('loss', mode='w')).run(BATCH_SIZE,
                                                     shuffle=True,
                                                     n_epochs=1,
                                                     lazy=True))

test_ppl = (mnist.test.p.init_variable('predictions').init_variable(
    'metrics', init_on_each_run=None).import_model('conv', train_ppl).to_array(
        channels='first', dtype='float32').predict_model(
            'conv',
            B('images'),
            targets=B('labels'),
            fetches='predictions',
            save_to=V('predictions')).gather_metrics(
Esempio n. 8
0
    'device': C('device')  # it's technical parameter for TFModel
}

mnist = MNIST()
train_root = mnist.train.p.run(BATCH_SIZE,
                               shuffle=True,
                               n_epochs=None,
                               lazy=True)
test_root = mnist.test.p.run(BATCH_SIZE, shuffle=True, n_epochs=1, lazy=True)

train_template = (Pipeline().init_variable(
    'loss', init_on_each_run=list).init_variable(
        'accuracy', init_on_each_run=list).init_model(
            'dynamic', VGG16, 'conv',
            config=model_config).to_array().train_model('conv',
                                                        images=B('images'),
                                                        labels=B('labels'),
                                                        fetches='loss',
                                                        save_to=V('loss',
                                                                  mode='w')))

test_template = (Pipeline().init_variable('predictions').init_variable(
    'metrics', init_on_each_run=None).import_model(
        'conv', C('import_from')).to_array().predict_model(
            'conv',
            images=B('images'),
            fetches='predictions',
            save_to=V('predictions')).gather_metrics(
                'class',
                targets=B('labels'),
                predictions=V('predictions'),