示例#1
0
def test_shuffle_tensor():
    # Make numerics
    numerical_tensor = np.random.uniform(size=(2, 5, 5, 2))

    # Case 1: Known ndim, axis = -1
    possible_numerical_outputs = [
        numerical_tensor, numerical_tensor[..., ::-1]
    ]
    # Make tensor and op
    tensor = A.placeholder(shape=[None] * 4)
    shuffled_tensor = A.shuffle_tensor(tensor, axis=-1)
    # Evaluate
    output = A.run(shuffled_tensor, {tensor: numerical_tensor})
    # Check if output checks out
    assert any([
        np.allclose(numerical_output, output)
        for numerical_output in possible_numerical_outputs
    ])

    # Case 2: unknown ndim, axis = 0
    possible_numerical_outputs = [
        numerical_tensor, numerical_tensor[::-1, ...]
    ]
    # Make tensor and op
    tensor = A.placeholder()
    shuffled_tensor = A.shuffle_tensor(tensor)
    # Evaluate
    output = A.run(shuffled_tensor, {tensor: numerical_tensor})
    # Check if output checks out
    assert any([
        np.allclose(numerical_output, output)
        for numerical_output in possible_numerical_outputs
    ])

    # Check whether the gradients can be computed
    tensor = A.placeholder()
    shuffled_tensor = A.shuffle_tensor(tensor, differentiable=True)
    grad_tensor = A.gradients(objective=A.reduce_(shuffled_tensor, 'mean'),
                              with_respect_to=tensor)
    numerical_grad_tensor = A.run(grad_tensor, {tensor: numerical_tensor})
    assert numerical_grad_tensor.values.shape == numerical_tensor.shape

    # Check whether the lookup error is raised otherwise
    with pytest.raises(LookupError):
        tensor = A.placeholder()
        shuffled_tensor = A.shuffle_tensor(tensor, differentiable=False)
        grad_tensor = A.gradients(objective=A.reduce_(shuffled_tensor, 'mean'),
                                  with_respect_to=tensor)

    # Case 3: Unknown ndim, axis = -1
    with pytest.raises(AssertionError):
        # Make tensor and op
        tensor = A.placeholder()
        shuffled_tensor = A.shuffle_tensor(tensor, axis=-1)

    # Case 4: Known dim, axis >= ndim
    with pytest.raises(AssertionError):
        # Make tensor and op
        tensor = A.placeholder(shape=[None] * 4)
        shuffled_tensor = A.shuffle_tensor(tensor, axis=4)
示例#2
0
def test_tversky_distance():
    _y = np.zeros(shape=(5, ))
    _yt = np.zeros(shape=(5, ))
    _y[0:2] = 1.
    _yt[0:2] = 1.
    y = A.placeholder()
    yt = A.placeholder()
    d = A.tversky_distance(y, yt, with_logits=False)
    _d = A.run(d, {y: _y, yt: _yt})
    assert _d == 0.
示例#3
0
def test_normalize():
    # Case 1: No mean and average known in advance
    # (other test cases should be covered in tensorflow)
    tensor = A.placeholder(shape=[None] * 2)
    normalized_tensor = A.normalize(tensor)

    rng = np.random.RandomState(42)
    numerical_tensor = rng.uniform(size=(10, 10), low=-1., high=5.)
    numerical_normalized_tensor = A.run(normalized_tensor,
                                        {tensor: numerical_tensor})

    assert np.allclose(np.mean(numerical_normalized_tensor), 0., atol=1e-7)
    assert np.allclose(np.std(numerical_normalized_tensor), 1., atol=1e-3)
示例#4
0
def test_feeder_runner():
    # ---- BASIC TEST
    # Define a random generator to mimic mnist
    def random_generator_mnist(batch_size=10):
        yield np.random.uniform(size=(batch_size, 28, 28, 1)).astype('float32'), \
              np.random.uniform(size=(batch_size, 1, 1, 1)).astype('float32')

    # Build preprocessing function
    def preprocessor(inputs):
        time.sleep(5)
        return inputs

    # Build feeder runner with multiple threads. Observe that the batch_size of the generator need
    # not be that of the feeder_runner
    feeder_runner = runners.FeederRunner(feeder=random_generator_mnist(),
                                         preprocessor=preprocessor,
                                         dtypes=['float32', 'float32'],
                                         input_shape=[[None, 28, 28, 1],
                                                      [None, 1, 1, 1]],
                                         num_threads=2,
                                         batch_size=5)
    images, labels = feeder_runner.dq()

    # We need to make a session right now (in this thread) because tensorflow would set a different
    # default session for every thread
    # sess = A.Session.session

    # Start generator threads
    feeder_runner.start_runner()

    # Dequeue
    np_images, np_labels = A.run([images, labels])

    # Stop generator threads
    feeder_runner.stop_runner()

    # Check shapes
    assert np_images.shape == (5, 28, 28, 1)
    assert np_labels.shape == (5, 1, 1, 1)