import pytest import numpy as np sys.path.append('..') from batchflow import B, C, D, F, L, V, R, P, I, Dataset, Pipeline, Batch, apply_parallel, inbatch_parallel, action #-------------------- # COMMON #-------------------- @pytest.mark.parametrize('named_expr', [ C('option'), C('not defined', default=10), B('size'), D('size'), V('var'), R('normal', 0, 1), R('normal', 0, 1, size=B.size), F(lambda batch: 0), L(lambda: 0), ]) def test_general_get(named_expr): pipeline = (Dataset(10).pipeline({ 'option': 0 }).init_variable('var').do_nothing(named_expr).run(2, lazy=True)) failed = False try: _ = pipeline.next_batch() except KeyError:
""" Test research with tf model """ import sys sys.path.append("../../..") from batchflow import Pipeline, B, C, V, D from batchflow.opensets import MNIST from batchflow.models.tf import VGG16 from batchflow.research import Research BATCH_SIZE = 64 model_config = { 'inputs/images/shape': (28, 28, 1), 'inputs/labels/classes': D('num_classes'), 'initial_block/inputs': 'images', 'body/block/layout': 'cna', 'device': C('device') # it's technical parameter for TFModel } mnist = MNIST() train_root = mnist.train.p.run(BATCH_SIZE, shuffle=True, n_epochs=None, lazy=True) test_root = mnist.test.p.run(BATCH_SIZE, shuffle=True, n_epochs=1, lazy=True) train_template = (Pipeline().init_variable( 'loss', init_on_each_run=list).init_variable( 'accuracy', init_on_each_run=list).init_model( 'dynamic', VGG16, 'conv',