예제 #1
0
def test_random_batching():
    points = np.random.uniform(size=(10, 2))
    batching_function = batching_functions.random_batching(points)

    a, b = batching_function(5, points, {}), batching_function(5, points, {})
    # check that this is the whole dataset
    epoch_indices = np.concatenate((a, b)).tolist()
    assert len(epoch_indices) == 10
    assert len(epoch_indices) == len(set(epoch_indices))
    assert set(epoch_indices) == set(range(10))

    batches = [batching_function(2, points, {}) for _ in range(5)]
    assert all(len(batch) == 2 for batch in batches)
    epoch_indices2 = np.concatenate(batches).tolist()

    assert set(epoch_indices2) == set(epoch_indices)
예제 #2
0
train_size = 64_000 * 2

# data_points = np.random.normal(size=(train_size, 3))
# # l2 normalize the points
# data_points /= np.linalg.norm(data_points, axis=1, ord=2).reshape((-1, 1))
input_noise_fn = lambda size: np.random.uniform(size=(size, 100))  # NOQA
data_points = input_noise_fn(train_size)

targets = noise_as_targets.sample_from_heatmap(
    heatmap,
    train_size,
    sampling_method='even',
)

# batching_function = batching_functions.progressive_local_search(targets)
batching_function = batching_functions.random_batching(targets)

config = {
    'dataset_fn':
    lambda: (data_points, targets),
    'model_fn':
    lambda input_t, output_size: models.multi_layer_mlp(
        input_t, output_size, hidden_dims=[512, 512], activation_fn=tf.tanh),
    'batch_size':
    128,
    'batching_fn':
    batching_function,
    # 'eval_steps': 500
    'eval_steps':
    10_000,
    'input_noise_fn':