Beispiel #1
0
def test_profile():

    config_file = "tests/fixtures/configs/for_profile.py"
    expriment_id = "test_profile"
    train_run(None, None, config_file, expriment_id, recreate=True)

    setup_test_environment()

    run(expriment_id, None, None, 2, [])
Beispiel #2
0
def test_export():

    config_file = "tests/fixtures/configs/for_export.py"
    expriment_id = "test_export"
    train_run(None, None, config_file, expriment_id, recreate=True)

    setup_test_environment()

    run(expriment_id, None, (None, None), [], None)
Beispiel #3
0
def test_build_tfds_classification():
    environment.setup_test_environment()

    # Build TFDS Dataset
    config_file = "tests/fixtures/configs/for_build_tfds_classification.py"
    run(config_file, overwrite=True)

    # Check if the builded dataset can be loaded with the same config file
    expriment_id = "tfds_classification"
    train_run(None, None, config_file, expriment_id, recreate=True)

    # Check if the dataset was build correctly
    train_data_num = 3
    validation_data_num = 2
    config = config_util.load(config_file)

    train_dataset = setup_dataset(TFDSClassification,
                                  subset="train",
                                  batch_size=config.BATCH_SIZE,
                                  pre_processor=config.PRE_PROCESSOR,
                                  **config.DATASET.TFDS_KWARGS)

    validation_dataset = setup_dataset(TFDSClassification,
                                       subset="validation",
                                       batch_size=config.BATCH_SIZE,
                                       pre_processor=config.PRE_PROCESSOR,
                                       **config.DATASET.TFDS_KWARGS)

    assert train_dataset.num_per_epoch == train_data_num
    assert validation_dataset.num_per_epoch == validation_data_num

    for _ in range(train_data_num):
        images, labels = train_dataset.feed()

        assert isinstance(images, np.ndarray)
        assert images.shape[0] == config.BATCH_SIZE
        assert images.shape[1] == config.IMAGE_SIZE[0]
        assert images.shape[2] == config.IMAGE_SIZE[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == config.BATCH_SIZE
        assert labels.shape[1] == train_dataset.num_classes

    for _ in range(validation_data_num):
        images, labels = validation_dataset.feed()

        assert isinstance(images, np.ndarray)
        assert images.shape[0] == config.BATCH_SIZE
        assert images.shape[1] == config.IMAGE_SIZE[0]
        assert images.shape[2] == config.IMAGE_SIZE[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == config.BATCH_SIZE
        assert labels.shape[1] == validation_dataset.num_classes
Beispiel #4
0
def test_predict_object_detection():

    config_file = "tests/fixtures/configs/for_predict_object_detection.py"
    expriment_id = "test_predict_object_detection"
    train_run(None, None, config_file, expriment_id, recreate=True)

    setup_test_environment()

    run("tests/fixtures/sample_images",
        "outputs",
        expriment_id,
        None,
        None,
        save_images=True)
Beispiel #5
0
def set_test_environment():
    """Set test environment"""
    print("set test environment")

    yield environment.setup_test_environment()

    # By using a yield statement instead of return, all the code after the yield statement serves as the teardown code:
    # See also: https://docs.pytest.org/en/latest/fixture.html#fixture-finalization-executing-teardown-code
    environment.teardown_test_environment()
Beispiel #6
0
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import pytest

from executor.profile_model import run
from executor.train import run as train_run
from lmnet.environment import setup_test_environment

# Apply reset_default_graph() in conftest.py to all tests in this file.
# Set test environment
pytestmark = pytest.mark.usefixtures("reset_default_graph", "set_test_environment")


def test_profile():

    config_file = "tests/fixtures/configs/for_profile.py"
    expriment_id = "test_profile"
    train_run(None, None, config_file, expriment_id, recreate=True)

    setup_test_environment()

    run(expriment_id, None, None, 2, [])


if __name__ == '__main__':
    setup_test_environment()
    test_profile()
    for i in range(0, 10):
        images, labels = next(dataset_iterator)
        assert images.shape[0] == batch_size
        assert labels.shape[0] == batch_size


def test_dataset_iterator_batch_order():
    """Assert that data given by iterator is same whether enabele_prefetch ture or false."""

    batch_size = 8
    dataset = Dummy(subset="train", batch_size=batch_size)
    dataset_iterator = DatasetIterator(dataset, seed=10, enable_prefetch=False)
    prefetch_dataset_iterator = DatasetIterator(dataset,
                                                seed=10,
                                                enable_prefetch=True)

    for i in range(0, 30):
        images, labels = next(dataset_iterator)
        prefetch_images, prefetch_labels = next(prefetch_dataset_iterator)

        assert np.all(images == prefetch_images)
        assert np.all(labels == prefetch_labels)


if __name__ == '__main__':
    from lmnet import environment
    environment.setup_test_environment()
    test_dataset_iterator_batch_size()
    test_dataset_iterator_batch_order()