Exemplo n.º 1
0
def construct_model(lbann):
    """Construct LBANN model.

    Args:
        lbann (module): Module for LBANN Python frontend

    """

    # Input data
    # Note: Sum with a weights layer so that gradient checking will
    # verify that error signals are correct.
    x_weights = lbann.Weights(optimizer=lbann.SGD(),
                              initializer=lbann.ConstantInitializer(value=0.0),
                              name='input_weights')
    x = lbann.Sum(
        lbann.Input(),
        lbann.WeightsLayer(weights=x_weights,
                           dims=tools.str_list(_sample_size)))
    x_lbann = x

    # Objects for LBANN model
    obj = []
    metrics = []
    callbacks = []

    # --------------------------
    # Concatenate along axis 0
    # --------------------------

    # LBANN implementation
    x = x_lbann
    x = lbann.Reshape(x, dims=tools.str_list([5, 3, 4]))
    x_slice = lbann.Slice(x, axis=0, slice_points=tools.str_list([0, 1, 3, 5]))
    x1 = lbann.Identity(x_slice)
    x2 = lbann.Identity(x_slice)
    x3 = lbann.Identity(x_slice)
    y = lbann.Concatenation(x3, x2, x1, axis=0)
    z = lbann.L2Norm2(lbann.Multiply(x, y))
    obj.append(z)
    metrics.append(lbann.Metric(z, name='axis0'))

    # NumPy implementation
    vals = []
    for i in range(num_samples()):
        x = get_sample(i).reshape([5, 3, 4]).astype(np.float64)
        x1 = x[0:1, :, :]
        x2 = x[1:3, :, :]
        x3 = x[3:5, :, :]
        y = np.concatenate((x3, x2, x1), axis=0)
        z = tools.numpy_l2norm2(x * y)
        vals.append(z)
    val = np.mean(vals)
    tol = 8 * val * np.finfo(np.float32).eps
    callbacks.append(
        lbann.CallbackCheckMetric(metric=metrics[-1].name,
                                  lower_bound=val - tol,
                                  upper_bound=val + tol,
                                  error_on_failure=True,
                                  execution_modes='test'))

    # --------------------------
    # Slice along axis 1
    # --------------------------

    # LBANN implementation
    x = x_lbann
    x = lbann.Reshape(x, dims=tools.str_list([3, 4, 5]))
    x_slice = lbann.Slice(x, axis=1, slice_points=tools.str_list([0, 1, 3, 4]))
    x1 = lbann.Identity(x_slice)
    x2 = lbann.Identity(x_slice)
    x3 = lbann.Identity(x_slice)
    y = lbann.Concatenation(x2, x1, x3, axis=1)
    z = lbann.L2Norm2(lbann.Multiply(x, y))
    obj.append(z)
    metrics.append(lbann.Metric(z, name='axis1'))

    # NumPy implementation
    vals = []
    for i in range(num_samples()):
        x = get_sample(i).reshape([3, 4, 5]).astype(np.float64)
        x1 = x[:, 0:1, :]
        x2 = x[:, 1:3, :]
        x3 = x[:, 3:4, :]
        y = np.concatenate((x2, x1, x3), axis=1)
        z = tools.numpy_l2norm2(x * y)
        vals.append(z)
    val = np.mean(vals)
    tol = 8 * val * np.finfo(np.float32).eps
    callbacks.append(
        lbann.CallbackCheckMetric(metric=metrics[-1].name,
                                  lower_bound=val - tol,
                                  upper_bound=val + tol,
                                  error_on_failure=True,
                                  execution_modes='test'))

    # --------------------------
    # Slice along axis 2
    # --------------------------

    # LBANN implementation
    x = x_lbann
    x = lbann.Reshape(x, dims=tools.str_list([3, 4, 5]))
    x_slice = lbann.Slice(x,
                          axis=2,
                          slice_points=tools.str_list([0, 1, 2, 3, 5]))
    x1 = lbann.Identity(x_slice)
    x2 = lbann.Identity(x_slice)
    x3 = lbann.Identity(x_slice)
    x4 = lbann.Identity(x_slice)
    y = lbann.Concatenation(x2, x4, x1, x3, axis=2)
    z = lbann.L2Norm2(lbann.Multiply(x, y))
    obj.append(z)
    metrics.append(lbann.Metric(z, name='axis2'))

    # NumPy implementation
    vals = []
    for i in range(num_samples()):
        x = get_sample(i).reshape([3, 4, 5]).astype(np.float64)
        x1 = x[:, :, 0:1]
        x2 = x[:, :, 1:2]
        x3 = x[:, :, 2:3]
        x4 = x[:, :, 3:5]
        y = np.concatenate((x2, x4, x1, x3), axis=2)
        z = tools.numpy_l2norm2(x * y)
        vals.append(z)
    val = np.mean(vals)
    tol = 8 * val * np.finfo(np.float32).eps
    callbacks.append(
        lbann.CallbackCheckMetric(metric=metrics[-1].name,
                                  lower_bound=val - tol,
                                  upper_bound=val + tol,
                                  error_on_failure=True,
                                  execution_modes='test'))

    # --------------------------
    # Model-parallel
    # --------------------------

    # LBANN implementation
    x = x_lbann
    x = lbann.Reshape(x, dims=tools.str_list([60]))
    x_slice = lbann.Slice(x, slice_points=tools.str_list([0, 22, 23, 60]))
    x1 = lbann.Identity(x_slice)
    x2 = lbann.Identity(x_slice)
    x3 = lbann.Identity(x_slice)
    y = lbann.Concatenation(x3, x1, x2, data_layout='model_parallel')
    z = lbann.L2Norm2(lbann.Multiply(x, y))
    obj.append(z)
    metrics.append(lbann.Metric(z, name='model-parallel'))

    # NumPy implementation
    vals = []
    for i in range(num_samples()):
        x = get_sample(i).reshape([60]).astype(np.float64)
        x1 = x[0:22]
        x2 = x[22:23]
        x3 = x[23:60]
        y = np.concatenate((x3, x1, x2))
        z = tools.numpy_l2norm2(x * y)
        vals.append(z)
    val = np.mean(vals)
    tol = 8 * val * np.finfo(np.float32).eps
    callbacks.append(
        lbann.CallbackCheckMetric(metric=metrics[-1].name,
                                  lower_bound=val - tol,
                                  upper_bound=val + tol,
                                  error_on_failure=True,
                                  execution_modes='test'))

    # --------------------------
    # Gradient checking
    # --------------------------

    callbacks.append(lbann.CallbackCheckGradients(error_on_failure=True))

    # --------------------------
    # Construct model
    # --------------------------

    num_epochs = 0
    return lbann.Model(num_epochs,
                       layers=lbann.traverse_layer_graph(x_lbann),
                       objective_function=obj,
                       metrics=metrics,
                       callbacks=callbacks)
Exemplo n.º 2
0
def construct_model(lbann):
    """Construct LBANN model.

    Args:
        lbann (module): Module for LBANN Python frontend

    """

    # Input data
    # Note: Sum with weights layers so that gradient checking will
    # verify that error signals are correct.
    x0_weights = lbann.Weights(
        optimizer=lbann.SGD(),
        initializer=lbann.ConstantInitializer(value=0.0),
        name='input0_weights')
    x1_weights = lbann.Weights(
        optimizer=lbann.SGD(),
        initializer=lbann.ConstantInitializer(value=0.0),
        name='input1_weights')
    x_slice = lbann.Slice(lbann.Input(data_field='samples'),
                          slice_points=tools.str_list(
                              [0, _m * _k, _m * _k + _k * _n]))
    x0 = lbann.Sum(x_slice,
                   lbann.WeightsLayer(weights=x0_weights, dims=str(_m * _k)))
    x1 = lbann.Sum(x_slice,
                   lbann.WeightsLayer(weights=x1_weights, dims=str(_k * _n)))
    x0_lbann = x0
    x1_lbann = x1

    # Objects for LBANN model
    obj = []
    metrics = []
    callbacks = []

    # ------------------------------------------
    # NN GEMM
    # ------------------------------------------

    # LBANN implementation
    x0 = lbann.Reshape(x0_lbann, dims=tools.str_list([_m, _k]))
    x1 = lbann.Reshape(x1_lbann, dims=tools.str_list([_k, _n]))
    y = lbann.MatMul(x0, x1, data_layout='data_parallel')
    z = lbann.L2Norm2(y)
    obj.append(z)
    metrics.append(lbann.Metric(z, name='NN GEMM'))

    # NumPy implementation
    vals = []
    for i in range(num_samples()):
        x = get_sample(i).astype(np.float64)
        x0 = x[:_m * _k].reshape([_m, _k])
        x1 = x[_m * _k:].reshape([_k, _n])
        y = np.matmul(x0, x1)
        z = tools.numpy_l2norm2(y)
        vals.append(z)
    val = np.mean(vals)
    tol = 8 * val * np.finfo(np.float32).eps
    callbacks.append(
        lbann.CallbackCheckMetric(metric=metrics[-1].name,
                                  lower_bound=val - tol,
                                  upper_bound=val + tol,
                                  error_on_failure=True,
                                  execution_modes='test'))

    # ------------------------------------------
    # TN GEMM
    # ------------------------------------------

    # LBANN implementation
    x0 = lbann.Reshape(x0_lbann, dims=tools.str_list([_k, _m]))
    x1 = lbann.Reshape(x1_lbann, dims=tools.str_list([_k, _n]))
    y = lbann.MatMul(x0, x1, transpose_a=True, data_layout='data_parallel')
    z = lbann.L2Norm2(y)
    obj.append(z)
    metrics.append(lbann.Metric(z, name='TN GEMM'))

    # NumPy implementation
    vals = []
    for i in range(num_samples()):
        x = get_sample(i).astype(np.float64)
        x0 = x[:_m * _k].reshape([_k, _m])
        x1 = x[_m * _k:].reshape([_k, _n])
        y = np.matmul(x0.transpose(), x1)
        z = tools.numpy_l2norm2(y)
        vals.append(z)
    val = np.mean(vals)
    tol = 8 * val * np.finfo(np.float32).eps
    callbacks.append(
        lbann.CallbackCheckMetric(metric=metrics[-1].name,
                                  lower_bound=val - tol,
                                  upper_bound=val + tol,
                                  error_on_failure=True,
                                  execution_modes='test'))

    # ------------------------------------------
    # NT GEMM
    # ------------------------------------------

    # LBANN implementation
    x0 = lbann.Reshape(x0_lbann, dims=tools.str_list([_m, _k]))
    x1 = lbann.Reshape(x1_lbann, dims=tools.str_list([_n, _k]))
    y = lbann.MatMul(x0, x1, transpose_b=True, data_layout='data_parallel')
    z = lbann.L2Norm2(y)
    obj.append(z)
    metrics.append(lbann.Metric(z, name='NT GEMM'))

    # NumPy implementation
    vals = []
    for i in range(num_samples()):
        x = get_sample(i).astype(np.float64)
        x0 = x[:_m * _k].reshape([_m, _k])
        x1 = x[_m * _k:].reshape([_n, _k])
        y = np.matmul(x0, x1.transpose())
        z = tools.numpy_l2norm2(y)
        vals.append(z)
    val = np.mean(vals)
    tol = 8 * val * np.finfo(np.float32).eps
    callbacks.append(
        lbann.CallbackCheckMetric(metric=metrics[-1].name,
                                  lower_bound=val - tol,
                                  upper_bound=val + tol,
                                  error_on_failure=True,
                                  execution_modes='test'))

    # ------------------------------------------
    # TT GEMM
    # ------------------------------------------

    # LBANN implementation
    x0 = lbann.Reshape(x0_lbann, dims=tools.str_list([_k, _m]))
    x1 = lbann.Reshape(x1_lbann, dims=tools.str_list([_n, _k]))
    y = lbann.MatMul(x0,
                     x1,
                     transpose_a=True,
                     transpose_b=True,
                     data_layout='data_parallel')
    z = lbann.L2Norm2(y)
    obj.append(z)
    metrics.append(lbann.Metric(z, name='TT GEMM'))

    # NumPy implementation
    vals = []
    for i in range(num_samples()):
        x = get_sample(i).astype(np.float64)
        x0 = x[:_m * _k].reshape([_k, _m])
        x1 = x[_m * _k:].reshape([_n, _k])
        y = np.matmul(x0.transpose(), x1.transpose())
        z = tools.numpy_l2norm2(y)
        vals.append(z)
    val = np.mean(vals)
    tol = 8 * val * np.finfo(np.float32).eps
    callbacks.append(
        lbann.CallbackCheckMetric(metric=metrics[-1].name,
                                  lower_bound=val - tol,
                                  upper_bound=val + tol,
                                  error_on_failure=True,
                                  execution_modes='test'))

    # ------------------------------------------
    # Gradient checking
    # ------------------------------------------

    callbacks.append(lbann.CallbackCheckGradients(error_on_failure=True))

    # ------------------------------------------
    # Construct model
    # ------------------------------------------

    num_epochs = 0
    return lbann.Model(num_epochs,
                       layers=lbann.traverse_layer_graph(x0_lbann),
                       objective_function=obj,
                       metrics=metrics,
                       callbacks=callbacks)
Exemplo n.º 3
0
def construct_model(lbann):
    """Construct LBANN model.

    Args:
        lbann (module): Module for LBANN Python frontend

    """

    # Input data
    # Note: Sum with a weights layer so that gradient checking will
    # verify that error signals are correct.
    x_weights = lbann.Weights(optimizer=lbann.SGD(),
                              initializer=lbann.ConstantInitializer(value=0.0),
                              name='input_weights')
    x = lbann.Sum(
        lbann.Reshape(lbann.Input(), dims=tools.str_list(_sample_size)),
        lbann.WeightsLayer(weights=x_weights,
                           dims=tools.str_list(_sample_size)))
    x_lbann = x

    # Objects for LBANN model
    obj = []
    metrics = []
    callbacks = []

    # ------------------------------------------
    # Basic case
    # ------------------------------------------

    # LBANN implementation
    x = x_lbann
    y = lbann.ErfInv(x)
    z = lbann.L2Norm2(y)
    obj.append(z)
    metrics.append(lbann.Metric(z, name='erf'))

    # NumPy implementation
    vals = []
    for i in range(num_samples()):
        x = get_sample(i).astype(np.float64)
        y = scipy.special.erfinv(x)
        z = tools.numpy_l2norm2(y)
        vals.append(z)
    val = np.mean(vals)
    tol = 8 * val * np.finfo(np.float32).eps
    callbacks.append(
        lbann.CallbackCheckMetric(metric=metrics[-1].name,
                                  lower_bound=val - tol,
                                  upper_bound=val + tol,
                                  error_on_failure=True,
                                  execution_modes='test'))

    # ------------------------------------------
    # Gradient checking
    # ------------------------------------------

    callbacks.append(lbann.CallbackCheckGradients(error_on_failure=True))

    # ------------------------------------------
    # Construct model
    # ------------------------------------------

    num_epochs = 0
    return lbann.Model(num_epochs,
                       layers=lbann.traverse_layer_graph(x_lbann),
                       objective_function=obj,
                       metrics=metrics,
                       callbacks=callbacks)
Exemplo n.º 4
0
def construct_model(lbann):
    """Construct LBANN model.

    Args:
        lbann (module): Module for LBANN Python frontend

    """
    from lbann.modules.rnn import ChannelwiseGRU

    # Input data
    # Note: Sum with a weights layer so that gradient checking will
    # verify that error signals are correct.
    x_weights = lbann.Weights(initializer=lbann.ConstantInitializer(value=0.0),
                              name='input')
    h_weights = lbann.Weights(initializer=lbann.ConstantInitializer(value=0.0),
                              name='inital_hidden')
    input_ = lbann.Input(data_field='samples')
    input_slice = lbann.Slice(
        input_,
        slice_points=tools.str_list(
            [0, _num_channels * _input_size, _sample_size]),
    )
    x = lbann.Reshape(input_slice,
                      dims=tools.str_list([_num_channels, _input_size]),
                      name="input_reshape")
    x = lbann.Sum(x,
                  lbann.WeightsLayer(weights=x_weights,
                                     dims=tools.str_list(
                                         [_num_channels, _input_size])),
                  name="input_sum")

    h = lbann.Reshape(input_slice,
                      dims=tools.str_list([_num_channels, _hidden_size]),
                      name="hidden_reshape")
    h = lbann.Sum(h,
                  lbann.WeightsLayer(weights=h_weights,
                                     dims=tools.str_list(
                                         [_num_channels, _hidden_size])),
                  name="input_hidden_sum")

    x_lbann = x
    h_lbann = h

    # Objects for LBANN model
    obj = []
    metrics = []
    callbacks = []

    # Weights
    rnn_weights_numpy = []
    ih_matrix = np.random.uniform(
        low=-1,
        high=1,
        size=(3 * _hidden_size, _input_size),
    )
    hh_matrix = np.random.uniform(
        low=-1,
        high=1,
        size=(3 * _hidden_size, _hidden_size),
    )
    ih_bias = np.random.uniform(low=-1, high=1, size=(3 * _hidden_size, ))
    hh_bias = np.random.uniform(low=-1, high=1, size=(3 * _hidden_size, ))
    rnn_weights_numpy.extend([ih_matrix, ih_bias, hh_matrix, hh_bias])

    rnn_weights_lbann = [
        lbann.Weights(initializer=lbann.ValueInitializer(
            values=tools.str_list(np.nditer(w, order='F'))))
        for w in rnn_weights_numpy
    ]

    # LBANN implementation
    x = x_lbann
    h = h_lbann
    channelwise_GRU_cell = ChannelwiseGRU(num_channels=_num_channels,
                                          size=_hidden_size,
                                          weights=rnn_weights_lbann)
    y = channelwise_GRU_cell(x, h)
    z = lbann.L2Norm2(y)
    obj.append(z)
    metrics.append(
        lbann.Metric(z, name="Multi-channel, Unidirectional, GRU Cell"))

    # NumPy implementation
    vals = []
    for i in range(num_samples()):
        input_ = get_sample(i).astype(np.float64)
        x = input_[:_num_channels * _input_size].reshape(
            (_num_channels, _input_size))
        h = input_[_num_channels * _input_size:].reshape(
            (_num_channels, _hidden_size))
        y = numpy_gru_cell(x, h, rnn_weights_numpy)
        z = tools.numpy_l2norm2(y)
        vals.append(z)
    val = np.mean(vals)
    tol = 8 * val * np.finfo(np.float32).eps
    callbacks.append(lbann.CallbackPrintModelDescription())
    callbacks.append(
        lbann.CallbackCheckMetric(metric=metrics[-1].name,
                                  lower_bound=val - tol,
                                  upper_bound=val + tol,
                                  error_on_failure=True,
                                  execution_modes='test'))

    # ------------------------------------------
    # Gradient checking
    # ------------------------------------------

    callbacks.append(lbann.CallbackCheckGradients(error_on_failure=True))

    # ------------------------------------------
    # Construct model
    # ------------------------------------------

    num_epochs = 0
    return lbann.Model(num_epochs,
                       layers=lbann.traverse_layer_graph(x_lbann),
                       objective_function=obj,
                       metrics=metrics,
                       callbacks=callbacks)
Exemplo n.º 5
0
def construct_model(lbann):
    """Construct LBANN model.

    Args:
        lbann (module): Module for LBANN Python frontend

    """

    # Input data
    x = lbann.Identity(lbann.Input())
    x_lbann = x

    # Objects for LBANN model
    obj = []
    metrics = []
    callbacks = []

    # ------------------------------------------
    # No padding index
    # ------------------------------------------

    # Embeddings
    np.random.seed(20191015)
    embedding_dim = 5
    embeddings = np.random.normal(size=(_num_embeddings, embedding_dim))

    # LBANN implementation
    embedding_weights = lbann.Weights(
        optimizer=lbann.SGD(),
        initializer=lbann.ValueInitializer(
            values=tools.str_list(np.nditer(embeddings))))
    x = x_lbann
    y = lbann.Embedding(x,
                        weights=embedding_weights,
                        num_embeddings=_num_embeddings,
                        embedding_dim=embedding_dim)
    z = lbann.L2Norm2(y)
    obj.append(z)
    metrics.append(lbann.Metric(z, name='no padding index'))

    # NumPy implementation
    vals = []
    for i in range(num_samples()):
        x = get_sample(i)
        y = embeddings[x, :]
        z = tools.numpy_l2norm2(y)
        vals.append(z)
    val = np.mean(vals)
    tol = 8 * val * np.finfo(np.float32).eps
    callbacks.append(
        lbann.CallbackCheckMetric(metric=metrics[-1].name,
                                  lower_bound=val - tol,
                                  upper_bound=val + tol,
                                  error_on_failure=True,
                                  execution_modes='test'))

    # ------------------------------------------
    # Padding index 0
    # ------------------------------------------

    # Embeddings
    np.random.seed(201910152)
    embedding_dim = 7
    padding_idx = 0
    embeddings = np.random.normal(size=(_num_embeddings, embedding_dim))

    # LBANN implementation
    # Note: Embedding layer gradients are not exact if a padding index
    # is set. Avoid gradient checking by not using an optimizer.
    embedding_weights = lbann.Weights(
        optimizer=None,
        initializer=lbann.ValueInitializer(
            values=tools.str_list(np.nditer(embeddings))))
    x = x_lbann
    y = lbann.Embedding(x,
                        weights=embedding_weights,
                        num_embeddings=_num_embeddings,
                        embedding_dim=embedding_dim,
                        padding_idx=padding_idx)
    z = lbann.L2Norm2(y)
    metrics.append(lbann.Metric(z, name='padding index = 0'))

    # NumPy implementation
    vals = []
    for i in range(num_samples()):
        x = get_sample(i)
        y = np.where((x == padding_idx).reshape((-1, 1)), 0, embeddings[x, :])
        z = tools.numpy_l2norm2(y)
        vals.append(z)
    val = np.mean(vals)
    tol = 8 * val * np.finfo(np.float32).eps
    callbacks.append(
        lbann.CallbackCheckMetric(metric=metrics[-1].name,
                                  lower_bound=val - tol,
                                  upper_bound=val + tol,
                                  error_on_failure=True,
                                  execution_modes='test'))

    # ------------------------------------------
    # Gradient checking
    # ------------------------------------------

    callbacks.append(lbann.CallbackCheckGradients(error_on_failure=True))

    # ------------------------------------------
    # Construct model
    # ------------------------------------------

    # Construct model
    num_epochs = 0
    return lbann.Model(num_epochs,
                       layers=lbann.traverse_layer_graph(x_lbann),
                       objective_function=obj,
                       metrics=metrics,
                       callbacks=callbacks)
def construct_model(lbann):
    """Construct LBANN model.

    Args:
        lbann (module): Module for LBANN Python frontend

    """

    # Input data
    # Note: Sum with a weights layer so that gradient checking will
    # verify that error signals are correct.
    x_weights = lbann.Weights(optimizer=lbann.SGD(),
                              initializer=lbann.ConstantInitializer(value=0.0),
                              name='input_weights')
    x = lbann.Sum(
        lbann.Reshape(lbann.Input(data_field='samples'),
                      dims=tools.str_list(_sample_dims)),
        lbann.WeightsLayer(weights=x_weights,
                           dims=tools.str_list(_sample_dims)))
    x_lbann = x

    # Objects for LBANN model
    obj = []
    metrics = []
    callbacks = []

    # ------------------------------------------
    # Pooling
    # ------------------------------------------

    num_height_groups = tools.gpus_per_node(lbann)
    if num_height_groups == 0:
        e = 'this test requires GPUs.'
        print('Skip - ' + e)
        pytest.skip(e)

    pool_configs = []

    # 3x3 pooling with same padding
    for mode, val in [
        ("average", 700.1066377082393),  # _num_samples=8
        ("max", 1255.4813455546334),  # _num_samples=8
            # ("average", 830.2573008820838), # _num_samples=23
            # ("max", 1167.667676299899), # _num_samples=23
    ]:
        pool_configs.append({
            "name": "3x3 {} pooling".format(mode),
            "kernel_dims": (3, 3),
            "strides": (1, 1),
            "pads": (0, 0),
            "pool_mode": mode,
            "val": val,
        })

    # 2x2 strided pooling
    for mode, val in [
        ("average", 263.76437243059104),  # _num_samples=23
        ("max", 358.66104389177207),  # _num_samples=23
            # ("average", 293.61402789516825), # _num_samples=23
            # ("max", 351.4916288366334), # _num_samples=23
    ]:
        pool_configs.append({
            "name": "2x2 {} pooling".format(mode),
            "kernel_dims": (2, 2),
            "strides": (2, 2),
            "pads": (0, 0),
            "pool_mode": mode,
            "val": val,
        })

    # 2x2x2 3D pooling
    for mode, val in [
        ("average", 59.3851451701403),  # _num_samples=8
        ("max", 216.75871475407558),  # _num_samples=8
            # ("average", 89.61246528381926), # _num_samples=23
            # ("max", 198.65624293856985), # _num_samples=23
    ]:
        pool_configs.append({
            "name": "2x2x2 {} pooling".format(mode),
            "kernel_dims": (2, 2, 2),
            "strides": (2, 2, 2),
            "pads": (0, 0, 0),
            "pool_mode": mode,
            "val": val,
        })

    for p in pool_configs:
        # Apply pooling
        x = x_lbann
        if len(p["kernel_dims"]) == 3:
            x = lbann.Reshape(x, dims=tools.str_list(_sample_dims_3d))

        y = lbann.Pooling(
            x,
            num_dims=len(p["kernel_dims"]),
            has_vectors=True,
            pool_dims=tools.str_list(p["kernel_dims"]),
            pool_strides=tools.str_list(p["strides"]),
            pool_pads=tools.str_list(p["pads"]),
            pool_mode=p["pool_mode"],
            parallel_strategy=create_parallel_strategy(num_height_groups))
        z = lbann.L2Norm2(y)

        # Since max pooling is not differentiable, we only use average pooling.
        if p["pool_mode"] == "average":
            obj.append(z)

        metrics.append(lbann.Metric(z, name=p["name"]))

        # PyTorch implementation
        try:
            x = _samples
            if len(p["kernel_dims"]) == 3:
                x = np.reshape(x, [_num_samples] + _sample_dims_3d)

            y = pytorch_pooling(
                x,
                p["kernel_dims"],
                p["pool_mode"],
                stride=p["strides"],
                padding=p["pads"],
            )
            z = tools.numpy_l2norm2(y) / _num_samples
            val = z
        except:
            # Precomputed value
            val = p["val"]
        tol = 8 * val * np.finfo(np.float32).eps

        callbacks.append(
            lbann.CallbackCheckMetric(metric=metrics[-1].name,
                                      lower_bound=val - tol,
                                      upper_bound=val + tol,
                                      error_on_failure=True,
                                      execution_modes='test'))

    # ------------------------------------------
    # Gradient checking
    # ------------------------------------------

    callbacks.append(lbann.CallbackCheckGradients(error_on_failure=True))

    # ------------------------------------------
    # Construct model
    # ------------------------------------------

    num_epochs = 0
    return lbann.Model(num_epochs,
                       layers=lbann.traverse_layer_graph(x_lbann),
                       objective_function=obj,
                       metrics=metrics,
                       callbacks=callbacks)
Exemplo n.º 7
0
def construct_model(lbann):
    """Construct LBANN model.

    Args:
        lbann (module): Module for LBANN Python frontend

    """

    # Input data
    # Note: Sum with a weights layer so that gradient checking will
    # verify that error signals are correct.
    x_weights = lbann.Weights(optimizer=lbann.SGD(),
                              initializer=lbann.ConstantInitializer(value=0.0),
                              name='input_weights')
    x = lbann.Sum(
        lbann.Reshape(lbann.Input(), dims=tools.str_list(_sample_dims)),
        lbann.WeightsLayer(weights=x_weights,
                           dims=tools.str_list(_sample_dims)))
    x_lbann = x

    # Objects for LBANN model
    obj = []
    metrics = []
    callbacks = []

    # ------------------------------------------
    # Data-parallel layout
    # ------------------------------------------

    # LBANN implementation
    output_dims = (7, 4, 3)
    x = x_lbann
    y = lbann.Tessellate(x,
                         dims=tools.str_list(output_dims),
                         data_layout='data_parallel')
    z = lbann.L2Norm2(y)
    obj.append(z)
    metrics.append(lbann.Metric(z, name='data-parallel layout'))

    # NumPy implementation
    vals = []
    for i in range(num_samples()):
        x = get_sample(i).reshape(_sample_dims).astype(np.float64)
        y = np.tile(x, (3, 4, 1))[:7, :4, :3]
        z = tools.numpy_l2norm2(y)
        vals.append(z)
    val = np.mean(vals)
    tol = 8 * val * np.finfo(np.float32).eps
    callbacks.append(
        lbann.CallbackCheckMetric(metric=metrics[-1].name,
                                  lower_bound=val - tol,
                                  upper_bound=val + tol,
                                  error_on_failure=True,
                                  execution_modes='test'))

    # ------------------------------------------
    # Model-parallel layout
    # ------------------------------------------

    # LBANN implementation
    output_dims = (2, 1, 9)
    x = x_lbann
    y = lbann.Tessellate(x,
                         dims=tools.str_list(output_dims),
                         data_layout='model_parallel')
    z = lbann.L2Norm2(y)
    obj.append(z)
    metrics.append(lbann.Metric(z, name='model-parallel layout'))

    # NumPy implementation
    vals = []
    for i in range(num_samples()):
        x = get_sample(i).reshape(_sample_dims).astype(np.float64)
        y = np.tile(x, (1, 1, 3))[:2, :1, :9]
        z = tools.numpy_l2norm2(y)
        vals.append(z)
    val = np.mean(vals)
    tol = 8 * val * np.finfo(np.float32).eps
    callbacks.append(
        lbann.CallbackCheckMetric(metric=metrics[-1].name,
                                  lower_bound=val - tol,
                                  upper_bound=val + tol,
                                  error_on_failure=True,
                                  execution_modes='test'))

    # --------------------------
    # Gradient checking
    # --------------------------

    callbacks.append(lbann.CallbackCheckGradients(error_on_failure=True))

    # --------------------------
    # Construct model
    # --------------------------

    num_epochs = 0
    return lbann.Model(num_epochs,
                       layers=lbann.traverse_layer_graph(x_lbann),
                       objective_function=obj,
                       metrics=metrics,
                       callbacks=callbacks)
def construct_model(lbann):
    """Construct LBANN model.

    Args:
        lbann (module): Module for LBANN Python frontend

    """

    # Input data
    x = lbann.Input(data_field='samples')
    x_lbann = x

    # Objects for LBANN model
    metrics = []
    callbacks = []

    # ------------------------------------------
    # Data-parallel weights layer
    # ------------------------------------------
    # Note: Weights are stored in one column of (STAR,STAR)
    # distributed matrix

    # Weights
    weights_values = np.random.normal(size=_sample_dims).astype(np.float32)
    weights_file = os.path.join(weights_dir, 'dataparallel_weights.npy')
    np.save(weights_file, weights_values)

    # LBANN implementation
    x = lbann.Reshape(x_lbann, dims=tools.str_list(_sample_dims))
    weights = lbann.Weights(
        initializer=lbann.NumpyInitializer(file=weights_file),
    )
    weights = lbann.WeightsLayer(
        weights=weights,
        dims=tools.str_list(_sample_dims),
    )
    y = lbann.Multiply(x, weights)
    z = lbann.L2Norm2(y)
    metrics.append(lbann.Metric(z, name='data-parallel weights layer'))

    # NumPy implementation
    vals = []
    for i in range(num_samples()):
        x = get_sample(i).reshape(_sample_dims).astype(np.float64)
        y = x * weights_values
        z = tools.numpy_l2norm2(y)
        vals.append(z)
    val = np.mean(vals)
    tol = 8 * val * np.finfo(np.float32).eps
    callbacks.append(lbann.CallbackCheckMetric(
        metric=metrics[-1].name,
        lower_bound=val-tol,
        upper_bound=val+tol,
        error_on_failure=True,
        execution_modes='test'))

    # ------------------------------------------
    # Data-parallel FC layer
    # ------------------------------------------
    # Note: Weights are stored in (STAR,STAR) distributed matrix

    # Weights
    output_size = 7
    linearity = np.random.normal(size=(output_size, _sample_size)).astype(np.float32)
    linearity = linearity.astype(np.float64)
    bias = np.random.normal(size=output_size).astype(np.float32)
    linearity_file = os.path.join(weights_dir, 'dataparallel_fc_linearity.npy')
    bias_file = os.path.join(weights_dir, 'dataparallel_fc_bias.npy')
    np.save(linearity_file, linearity)
    np.save(bias_file, bias)

    # LBANN implementation
    x = x_lbann
    linearity_weights \
        = lbann.Weights(initializer=lbann.NumpyInitializer(file=linearity_file))
    bias_weights \
        = lbann.Weights(initializer=lbann.NumpyInitializer(file=bias_file))
    y = lbann.FullyConnected(
        x,
        weights=(linearity_weights, bias_weights),
        data_layout='data_parallel',
        num_neurons=output_size,
        has_bias=True,
        transpose=False)
    z = lbann.L2Norm2(y)
    metrics.append(lbann.Metric(z, name='data-parallel FC layer'))

    # NumPy implementation
    vals = []
    for i in range(num_samples()):
        x = get_sample(i).astype(np.float64)
        y = np.matmul(linearity, x) + bias
        z = tools.numpy_l2norm2(y)
        vals.append(z)
    val = np.mean(vals)
    tol = 8 * val * np.finfo(np.float32).eps
    callbacks.append(lbann.CallbackCheckMetric(
        metric=metrics[-1].name,
        lower_bound=val-tol,
        upper_bound=val+tol,
        error_on_failure=True,
        execution_modes='test'))

    # ------------------------------------------
    # Model-parallel FC layer
    # ------------------------------------------
    # Note: Weights are stored in (MC,MR) distributed matrix

    # Weights
    output_size = 9
    linearity = np.random.normal(size=(output_size, _sample_size)).astype(np.float32)
    bias = np.random.normal(size=output_size).astype(np.float32)
    bias = bias.astype(np.float64)
    linearity_file = os.path.join(weights_dir, 'modelparallel_fc_linearity.npy')
    bias_file = os.path.join(weights_dir, 'modelparallel_fc_bias.npy')
    np.save(linearity_file, linearity)
    np.save(bias_file, bias)

    # LBANN implementation
    x = x_lbann
    linearity_weights \
        = lbann.Weights(initializer=lbann.NumpyInitializer(file=linearity_file))
    bias_weights \
        = lbann.Weights(initializer=lbann.NumpyInitializer(file=bias_file))
    y = lbann.FullyConnected(
        x,
        weights=(linearity_weights, bias_weights),
        data_layout='model_parallel',
        num_neurons=output_size,
        has_bias=True,
        transpose=False)
    z = lbann.L2Norm2(y)
    metrics.append(lbann.Metric(z, name='model-parallel FC layer'))

    # NumPy implementation
    vals = []
    for i in range(num_samples()):
        x = get_sample(i).astype(np.float64)
        y = np.matmul(linearity, x) + bias
        z = tools.numpy_l2norm2(y)
        vals.append(z)
    val = np.mean(vals)
    tol = 8 * val * np.finfo(np.float32).eps
    callbacks.append(lbann.CallbackCheckMetric(
        metric=metrics[-1].name,
        lower_bound=val-tol,
        upper_bound=val+tol,
        error_on_failure=True,
        execution_modes='test'))

    # ------------------------------------------
    # Construct model
    # ------------------------------------------

    num_epochs = 0
    return lbann.Model(num_epochs,
                       layers=lbann.traverse_layer_graph(x_lbann),
                       metrics=metrics,
                       callbacks=callbacks)
def construct_model(lbann):
    """Construct LBANN model.

    Args:
        lbann (module): Module for LBANN Python frontend

    """

    # Input data
    # Note: Sum with weights layers so that gradient checking will
    # verify that error signals are correct.
    slice_size = _samples.shape[-1]
    x0_weights = lbann.Weights(optimizer=lbann.SGD(),
                               initializer=lbann.ConstantInitializer(value=0.0),
                               name='input0_weights')
    x1_weights = lbann.Weights(optimizer=lbann.SGD(),
                               initializer=lbann.ConstantInitializer(value=0.0),
                               name='input1_weights')
    x_slice = lbann.Slice(lbann.Input(data_field='samples'),
                          slice_points=tools.str_list([0, slice_size, 2*slice_size]))
    x0 = lbann.Sum(x_slice,
                   lbann.WeightsLayer(weights=x0_weights, dims=str(slice_size)))
    x1 = lbann.Sum(x_slice,
                   lbann.WeightsLayer(weights=x1_weights, dims=str(slice_size)))
    x0_lbann = x0
    x1_lbann = x1

    # Objects for LBANN model
    obj = []
    metrics = []
    callbacks = []

    # ------------------------------------------
    # Data-parallel layout
    # ------------------------------------------

    # LBANN implementation
    x0 = x0_lbann
    x1 = x1_lbann
    y = lbann.SigmoidBinaryCrossEntropy(x0, x1, data_layout='data_parallel')
    z = lbann.L2Norm2(y)
    obj.append(z)
    metrics.append(lbann.Metric(z, name='data-parallel layout'))

    # NumPy implementation
    vals = []
    for i in range(num_samples()):
        x = get_sample(i).astype(np.float64)
        x0 = x[:slice_size]
        x1 = x[slice_size:]
        y = -x1 * np.log1p(np.exp(-x0)) - (1-x1) * np.log1p(np.exp(x0))
        z = tools.numpy_l2norm2(y)
        vals.append(z)
    val = np.mean(vals)
    tol = 8 * val * np.finfo(np.float32).eps
    callbacks.append(lbann.CallbackCheckMetric(
        metric=metrics[-1].name,
        lower_bound=val-tol,
        upper_bound=val+tol,
        error_on_failure=True,
        execution_modes='test'))

    # ------------------------------------------
    # Model-parallel layout
    # ------------------------------------------

    # LBANN implementation
    x0 = x0_lbann
    x1 = x1_lbann
    y = lbann.SigmoidBinaryCrossEntropy(x0, x1, data_layout='model_parallel')
    z = lbann.L2Norm2(y)
    obj.append(z)
    metrics.append(lbann.Metric(z, name='model-parallel layout'))

    # NumPy implementation
    vals = []
    for i in range(num_samples()):
        x = get_sample(i).astype(np.float64)
        x0 = x[:slice_size]
        x1 = x[slice_size:]
        y = -x1 * np.log1p(np.exp(-x0)) - (1-x1) * np.log1p(np.exp(x0))
        z = tools.numpy_l2norm2(y)
        vals.append(z)
    val = np.mean(vals)
    tol = 8 * val * np.finfo(np.float32).eps
    callbacks.append(lbann.CallbackCheckMetric(
        metric=metrics[-1].name,
        lower_bound=val-tol,
        upper_bound=val+tol,
        error_on_failure=True,
        execution_modes='test'))

    # ------------------------------------------
    # Gradient checking
    # ------------------------------------------

    callbacks.append(lbann.CallbackCheckGradients(error_on_failure=True))

    # ------------------------------------------
    # Construct model
    # ------------------------------------------

    num_epochs = 0
    return lbann.Model(num_epochs,
                       layers=lbann.traverse_layer_graph(x0_lbann),
                       objective_function=obj,
                       metrics=metrics,
                       callbacks=callbacks)
def construct_model(lbann):
    """Construct LBANN model.

    Args:
        lbann (module): Module for LBANN Python frontend

    """

    # Input data
    # Note: Sum with a weights layer so that gradient checking will
    # verify that error signals are correct.
    x_weights = lbann.Weights(optimizer=lbann.SGD(),
                              initializer=lbann.ConstantInitializer(value=0.0),
                              name='input_weights')
    x = lbann.Sum(lbann.Reshape(lbann.Input(data_field='samples'),
                                dims=tools.str_list(_sample_dims)),
                  lbann.WeightsLayer(weights=x_weights,
                                     dims=tools.str_list(_sample_dims)))
    x_lbann = x

    # Objects for LBANN model
    obj = []
    metrics = []
    callbacks = []

    # ------------------------------------------
    # Data-parallel layout
    # ------------------------------------------

    # LBANN implementation
    x = x_lbann
    y = lbann.Identity(x, data_layout='data_parallel')

    slice_points = (0, 4, 8)
    x_slice = lbann.Slice(x, axis=2, slice_points=tools.str_list(slice_points),parallel_strategy = {'sub_branch_tag':0,'enable_subgraph':True})

    branch1 = lbann.Identity(x_slice, data_layout='data_parallel',parallel_strategy = {'sub_branch_tag':1,'enable_subgraph':True})
    branch2 = lbann.Identity(x_slice, data_layout='data_parallel',parallel_strategy = {'sub_branch_tag':2,'enable_subgraph':True})

    grid_sum = lbann.Cross_Grid_Sum([branch1,branch2],parallel_strategy = {'sub_branch_tag':0,'enable_subgraph':True})

    branch1 = lbann.Identity(grid_sum)
    branch2 = lbann.Identity(grid_sum)

    sum_branch = lbann.Sum([branch1,branch2],parallel_strategy = {'sub_branch_tag':0,'enable_subgraph':True})
    z = lbann.L2Norm2(sum_branch)
    obj.append(z)
    metrics.append(lbann.Metric(z, name='data-parallel layout'))

    # NumPy implementation
    vals = []
    for i in range(num_samples()):
        x = get_sample(i).reshape(_sample_dims).astype(np.float64)
        y = []

        cross_sum = 0
        for j in range(len(slice_points)-1):
            x_slice = x[:,:,slice_points[j]:slice_points[j+1]]

            if(j==0):
                cross_sum = x_slice
            else:
                cross_sum += x_slice

        z = 2*cross_sum
        z = tools.numpy_l2norm2(z)
        vals.append(z)

    val = np.mean(vals)
    tol = 8 * val * np.finfo(np.float32).eps
    callbacks.append(lbann.CallbackCheckMetric(
        metric=metrics[-1].name,
        lower_bound=val-tol,
        upper_bound=val+tol,
        error_on_failure=True,
        execution_modes='test'))



    # ------------------------------------------
    # Gradient checking
    # ------------------------------------------

    callbacks.append(lbann.CallbackCheckGradients(error_on_failure=True))

    # ------------------------------------------
    # Construct model
    # ------------------------------------------

    num_epochs = 0
    return lbann.Model(num_epochs,subgraph_communication=lbann.SubgraphCommunication.COLL_OPT,
                       layers=lbann.traverse_layer_graph(x_lbann),
                       objective_function=obj,
                       metrics=metrics,
                       callbacks=callbacks)
Exemplo n.º 11
0
def construct_model(lbann):
    """Construct LBANN model.

    Args:
        lbann (module): Module for LBANN Python frontend

    """

    # Input data
    # Note: Sum with a weights layer so that gradient checking will
    # verify that error signals are correct.
    x_weights = lbann.Weights(optimizer=lbann.SGD(),
                              initializer=lbann.ConstantInitializer(value=0.0),
                              name='input_weights')
    x = lbann.Sum(
        lbann.Reshape(lbann.Input(data_field='samples'),
                      dims=tools.str_list(_sample_dims)),
        lbann.WeightsLayer(weights=x_weights,
                           dims=tools.str_list(_sample_dims)))
    x_lbann = x

    # Objects for LBANN model
    obj = []
    metrics = []
    callbacks = []

    # ------------------------------------------
    # Basic 3^n convolution
    # ------------------------------------------
    # 3^n conv, stride=1, pad=1, dilation=1, bias

    num_height_groups = tools.gpus_per_node(lbann)
    if num_height_groups == 0:
        e = 'this test requires GPUs.'
        print('Skip - ' + e)
        pytest.skip(e)

    for num_dims, reference_val in [(2, 11913.852660080756),
                                    (3, 9952.365297083174)]:
        # Convolution settings
        kernel_dims = [
            5,
            _sample_dims[0] if num_dims == 2 else _sample_dims_3d[0],
        ] + [3] * num_dims
        strides = [1] * num_dims
        pads = [1] * num_dims
        dilations = [1] * num_dims
        kernel = make_random_array(kernel_dims, 11)

        # Apply convolution
        kernel_weights = lbann.Weights(
            optimizer=lbann.SGD(),
            initializer=lbann.ValueInitializer(
                values=tools.str_list(np.nditer(kernel))),
            name='kernel1_{}d'.format(num_dims))
        x = x_lbann
        if num_dims == 3:
            x = lbann.Reshape(x, dims=tools.str_list(_sample_dims_3d))

        y = lbann.Convolution(
            x,
            weights=(kernel_weights, ),
            num_dims=num_dims,
            num_output_channels=kernel_dims[0],
            has_vectors=True,
            conv_dims=tools.str_list(kernel_dims[2:]),
            conv_strides=tools.str_list(strides),
            conv_pads=tools.str_list(pads),
            conv_dilations=tools.str_list(dilations),
            has_bias=False,
            parallel_strategy=create_parallel_strategy(num_height_groups))
        z = lbann.L2Norm2(y)
        obj.append(z)
        metrics.append(
            lbann.Metric(z, name='basic {}D 3^n convolution'.format(num_dims)))

        # PyTorch implementation
        try:
            x = _samples
            if num_dims == 3:
                x = np.reshape(x, [_num_samples] + _sample_dims_3d)

            y = pytorch_convolution(x,
                                    kernel,
                                    stride=strides,
                                    padding=pads,
                                    dilation=dilations)
            z = tools.numpy_l2norm2(y) / _num_samples
            val = z
        except:
            # Precomputed value
            val = reference_val
            # val = 398.6956458317758 # _num_samples=8, 6 channels
            # val = 381.7401227915947 # _num_samples=23, 6 channels
        tol = 8 * val * np.finfo(np.float32).eps

        callbacks.append(
            lbann.CallbackCheckMetric(metric=metrics[-1].name,
                                      lower_bound=val - tol,
                                      upper_bound=val + tol,
                                      error_on_failure=True,
                                      execution_modes='test'))

    # ------------------------------------------
    # Gradient checking
    # ------------------------------------------

    callbacks.append(lbann.CallbackCheckGradients(error_on_failure=True))

    # ------------------------------------------
    # Construct model
    # ------------------------------------------

    num_epochs = 0
    return lbann.Model(num_epochs,
                       layers=lbann.traverse_layer_graph(x_lbann),
                       objective_function=obj,
                       metrics=metrics,
                       callbacks=callbacks)
def construct_model(lbann):
    """Construct LBANN model.

    Args:
        lbann (module): Module for LBANN Python frontend

    """

    # Input data
    # Note: Sum with a weights layer so that gradient checking will
    # verify that error signals are correct.
    x_weights = lbann.Weights(optimizer=lbann.SGD(),
                              initializer=lbann.ConstantInitializer(value=0.0),
                              name='input_weights')
    x0 = lbann.WeightsLayer(weights=x_weights,
                            dims=tools.str_list(_sample_dims))
    x1 = lbann.Reshape(lbann.Input(data_field='samples'),
                       dims=tools.str_list(_sample_dims),
                       name="Input_layer")
    x = lbann.Sum(x0, x1, name="Adding_weight_layer")
    x_lbann = x

    # Objects for LBANN model
    obj = []
    metrics = []
    callbacks = []

    num_channel_groups = tools.gpus_per_node(lbann)
    if num_channel_groups == 0:
        e = 'this test requires GPUs.'
        print('Skip - ' + e)
        pytest.skip(e)
    # ------------------------------------------
    # Compute expected metric values with NumPy
    # ------------------------------------------

    # Input and output dimensions
    input_channel_dims = _sample_dims[1:]
    output_channel_dims = (1, 3)
    input_channel_size = functools.reduce(operator.mul, input_channel_dims)
    output_channel_size = functools.reduce(operator.mul, output_channel_dims)

    # Weight values
    linearity = np.random.normal(size=(output_channel_size,
                                       input_channel_size)).astype(np.float32)
    bias = np.random.normal(size=(output_channel_size, 1)).astype(np.float32)

    # With bias

    x = (_samples.reshape(
        (-1, input_channel_size)).transpose().astype(np.float64))

    y = np.matmul(linearity.astype(np.float64), x) + bias.astype(np.float64)

    z = tools.numpy_l2norm2(y) / _num_samples
    val_with_bias = z

    # Without bias
    x = (_samples.reshape(
        (-1, input_channel_size)).transpose().astype(np.float64))
    y = np.matmul(linearity.astype(np.float64), x)
    z = tools.numpy_l2norm2(y) / _num_samples
    val_without_bias = z

    # ------------------------------------------
    # Non-transpose, bias
    # ------------------------------------------

    # LBANN implementation
    linearity_weights = lbann.Weights(
        optimizer=lbann.SGD(),
        initializer=lbann.ValueInitializer(
            values=tools.str_list(np.nditer(linearity, order='F'))))
    bias_weights = lbann.Weights(optimizer=lbann.SGD(),
                                 initializer=lbann.ValueInitializer(
                                     values=tools.str_list(np.nditer(bias))))
    x = x_lbann
    y = lbann.ChannelwiseFullyConnected(
        x,
        weights=(linearity_weights, bias_weights),
        output_channel_dims=output_channel_dims,
        parallel_strategy=create_parallel_strategy(num_channel_groups),
        name="bias")
    z = lbann.L2Norm2(y, name="L2Norm")
    obj.append(z)
    metrics.append(lbann.Metric(z, name='non-transpose, bias'))

    # NumPy implementation
    tol = 8 * val_with_bias * np.finfo(np.float32).eps
    callbacks.append(
        lbann.CallbackCheckMetric(metric=metrics[-1].name,
                                  lower_bound=val_with_bias - tol,
                                  upper_bound=val_with_bias + tol,
                                  error_on_failure=True,
                                  execution_modes='test'))

    # ------------------------------------------
    # Non-transpose, no bias
    # ------------------------------------------

    # LBANN implementation
    linearity_weights = lbann.Weights(
        optimizer=lbann.SGD(),
        initializer=lbann.ValueInitializer(
            values=tools.str_list(np.nditer(linearity, order='F'))))

    x = x_lbann
    y = lbann.ChannelwiseFullyConnected(
        x,
        weights=(linearity_weights),
        output_channel_dims=output_channel_dims,
        parallel_strategy=create_parallel_strategy(num_channel_groups),
        name="no_bias")
    z = lbann.L2Norm2(y)
    obj.append(z)
    metrics.append(lbann.Metric(z, name='non-transpose, no bias'))

    # NumPy implementation
    tol = 8 * val_without_bias * np.finfo(np.float32).eps
    callbacks.append(
        lbann.CallbackCheckMetric(metric=metrics[-1].name,
                                  lower_bound=val_without_bias - tol,
                                  upper_bound=val_without_bias + tol,
                                  error_on_failure=True,
                                  execution_modes='test'))
    # ------------------------------------------
    # Transpose, bias
    # ------------------------------------------

    # LBANN implementation
    linearity_weights = lbann.Weights(
        optimizer=lbann.SGD(),
        initializer=lbann.ValueInitializer(
            values=tools.str_list(np.nditer(linearity, order='C'))))
    bias_weights = lbann.Weights(optimizer=lbann.SGD(),
                                 initializer=lbann.ValueInitializer(
                                     values=tools.str_list(np.nditer(bias))))
    x = x_lbann
    y = lbann.ChannelwiseFullyConnected(
        x,
        weights=(linearity_weights, bias_weights),
        output_channel_dims=output_channel_dims,
        parallel_strategy=create_parallel_strategy(num_channel_groups),
        transpose=True,
    )
    z = lbann.L2Norm2(y)
    obj.append(z)
    metrics.append(lbann.Metric(z, name='transpose, bias'))

    # NumPy implementation
    tol = 8 * val_with_bias * np.finfo(np.float32).eps
    callbacks.append(
        lbann.CallbackCheckMetric(metric=metrics[-1].name,
                                  lower_bound=val_with_bias - tol,
                                  upper_bound=val_with_bias + tol,
                                  error_on_failure=True,
                                  execution_modes='test'))

    # ------------------------------------------
    # Transpose, no bias
    # ------------------------------------------

    # LBANN implementation
    linearity_weights = lbann.Weights(
        optimizer=lbann.SGD(),
        initializer=lbann.ValueInitializer(
            values=tools.str_list(np.nditer(linearity, order='C'))))
    x = x_lbann
    y = lbann.ChannelwiseFullyConnected(
        x,
        weights=(linearity_weights),
        output_channel_dims=output_channel_dims,
        parallel_strategy=create_parallel_strategy(num_channel_groups),
        bias=False,
        transpose=True)
    z = lbann.L2Norm2(y)
    obj.append(z)
    metrics.append(lbann.Metric(z, name='Non-transpose, no bias'))

    # NumPy implementation
    tol = 8 * val_without_bias * np.finfo(np.float32).eps
    callbacks.append(
        lbann.CallbackCheckMetric(metric=metrics[-1].name,
                                  lower_bound=val_without_bias - tol,
                                  upper_bound=val_without_bias + tol,
                                  error_on_failure=True,
                                  execution_modes='test'))

    # ------------------------------------------
    # Gradient checking
    # ------------------------------------------

    callbacks.append(lbann.CallbackCheckGradients(error_on_failure=True))

    # ------------------------------------------
    # Construct model
    # ------------------------------------------

    num_epochs = 1
    return lbann.Model(num_epochs,
                       layers=lbann.traverse_layer_graph(x_lbann),
                       objective_function=obj,
                       metrics=metrics,
                       callbacks=callbacks)
def construct_model(lbann):
    """Construct LBANN model.

    Args:
        lbann (module): Module for LBANN Python frontend

    """

    # Input data
    # Note: Sum with a weights layer so that gradient checking will
    # verify that error signals are correct.
    x_weights = lbann.Weights(optimizer=lbann.SGD(),
                              initializer=lbann.ConstantInitializer(value=0.0),
                              name='input_weights')
    x0 = lbann.WeightsLayer(weights=x_weights,
                            dims=tools.str_list(_sample_dims))
    x1 = lbann.Reshape(lbann.Input(data_field='samples'), dims=tools.str_list(_sample_dims))
    x = lbann.Sum(x0, x1)

    # Apply channel-wise scale/bias
    scale_values = tools.str_list(np.nditer(_scale))
    bias_values = tools.str_list(np.nditer(_bias))
    scalebias_weights = lbann.Weights(
        optimizer=lbann.SGD(),
        initializer=lbann.ValueInitializer(values='{} {}'.format(scale_values,
                                                                 bias_values)),
        name='scalebias_weights'
    )
    y = lbann.ChannelwiseScaleBias(x, weights=scalebias_weights)
    z = lbann.L2Norm2(y)

    # Objects for LBANN model
    obj = z
    metric = lbann.Metric(z, name='obj')
    layers = list(lbann.traverse_layer_graph(z))
    callbacks = []

    # Get expected metric value from NumPy implementation
    vals = []
    for i in range(num_samples()):
        x = get_sample(i).reshape(_sample_dims).astype(np.float64)
        y = _scale.astype(np.float64) * x + _bias.astype(np.float64)
        z = tools.numpy_l2norm2(y)
        vals.append(z)
    val = np.mean(vals)
    tol = 8 * val * np.finfo(np.float32).eps
    callbacks.append(lbann.CallbackCheckMetric(
        metric=metric.name,
        lower_bound=val-tol,
        upper_bound=val+tol,
        error_on_failure=True,
        execution_modes='test'))

    # Gradient checking
    callbacks.append(lbann.CallbackCheckGradients(error_on_failure=True))

    # Construct model
    num_epochs = 0
    return lbann.Model(num_epochs,
                       layers=layers,
                       objective_function=obj,
                       metrics=metric,
                       callbacks=callbacks)
def construct_model(lbann):
    """Construct LBANN model.

    Args:
        lbann (module): Module for LBANN Python frontend

    """

    # Input data
    # Note: Sum with a weights layer so that gradient checking will
    # verify that error signals are correct.
    x_weights = lbann.Weights(optimizer=lbann.SGD(),
                              initializer=lbann.ConstantInitializer(value=0.0),
                              name='input_weights')
    x = lbann.Sum(lbann.Reshape(lbann.Input(),
                                dims=tools.str_list(_sample_size)),
                  lbann.WeightsLayer(weights=x_weights,
                                     dims=tools.str_list(_sample_size)))
    x_lbann = x

    # Objects for LBANN model
    obj = []
    metrics = []
    callbacks = []

    # ------------------------------------------
    # Data-parallel layout
    # ------------------------------------------

    num_height_groups = tools.gpus_per_node(lbann)
    if num_height_groups == 0:
        e = 'this test requires GPUs.'
        print('Skip - ' + e)
        pytest.skip(e)

    # LBANN implementation
    x = x_lbann
    x = lbann.Reshape(x, dims="4 2 6")
    y = lbann.LeakyRelu(x, negative_slope=0.01,
                        data_layout='data_parallel',
                        parallel_strategy=create_parallel_strategy(
                            num_height_groups))
    y = lbann.Reshape(y, dims=str(sample_dims()))
    z = lbann.L2Norm2(y)
    obj.append(z)
    metrics.append(lbann.Metric(z, name='data-parallel layout'))

    # NumPy implementation
    vals = []
    for i in range(num_samples()):
        x = get_sample(i).astype(np.float64)
        y = np.where(x > 0, x, 0.01*x)
        z = tools.numpy_l2norm2(y)
        vals.append(z)
    val = np.mean(vals)
    tol = 8 * val * np.finfo(np.float32).eps
    callbacks.append(lbann.CallbackCheckMetric(
        metric=metrics[-1].name,
        lower_bound=val-tol,
        upper_bound=val+tol,
        error_on_failure=True,
        execution_modes='test'))

    # ------------------------------------------
    # Model-parallel layout
    # ------------------------------------------

    # LBANN implementation
    x = x_lbann
    x = lbann.Reshape(x, dims="4 2 6")
    y = lbann.LeakyRelu(x, negative_slope=2,
                        data_layout='model_parallel',
                        parallel_strategy=create_parallel_strategy(
                            num_height_groups))
    y = lbann.Reshape(y, dims=str(sample_dims()))
    z = lbann.L2Norm2(y)
    obj.append(z)
    metrics.append(lbann.Metric(z, name='model-parallel layout'))

    # NumPy implementation
    vals = []
    for i in range(num_samples()):
        x = get_sample(i).astype(np.float64)
        y = np.where(x > 0, x, 2*x)
        z = tools.numpy_l2norm2(y)
        vals.append(z)
    val = np.mean(vals)
    tol = 8 * val * np.finfo(np.float32).eps
    callbacks.append(lbann.CallbackCheckMetric(
        metric=metrics[-1].name,
        lower_bound=val-tol,
        upper_bound=val+tol,
        error_on_failure=True,
        execution_modes='test'))

    # ------------------------------------------
    # Gradient checking
    # ------------------------------------------

    callbacks.append(lbann.CallbackCheckGradients(error_on_failure=True))

    # ------------------------------------------
    # Construct model
    # ------------------------------------------

    num_epochs = 0
    return lbann.Model(num_epochs,
                       layers=lbann.traverse_layer_graph(x_lbann),
                       objective_function=obj,
                       metrics=metrics,
                       callbacks=callbacks)
def construct_model(lbann):
    """Construct LBANN model.

    Args:
        lbann (module): Module for LBANN Python frontend

    """

    # Input data
    # Note: Sum with a weights layer so that gradient checking will
    # verify that error signals are correct.
    x_weights = lbann.Weights(optimizer=lbann.SGD(),
                              initializer=lbann.ConstantInitializer(value=0.0),
                              name='input_weights')
    x = lbann.Sum(lbann.Reshape(lbann.Input(),
                                dims=tools.str_list(_sample_dims)),
                  lbann.WeightsLayer(weights=x_weights,
                                     dims=tools.str_list(_sample_dims)))
    x_lbann = x

    # Objects for LBANN model
    obj = []
    metrics = []
    callbacks = []

    # ------------------------------------------
    # Basic 3x3 convolution
    # ------------------------------------------
    # 3x3 conv, stride=1, pad=1, dilation=1, bias

    # Convolution settings
    kernel_dims = (5, _sample_dims[0], 3, 3)
    strides = (1, 1)
    pads = (1, 1)
    dilations = (1, 1)
    kernel = make_random_array(kernel_dims, 11)
    bias = make_random_array([kernel_dims[0]], 123)

    # Apply convolution
    kernel_weights = lbann.Weights(
        optimizer=lbann.SGD(),
        initializer=lbann.ValueInitializer(values=tools.str_list(np.nditer(kernel))),
        name='kernel1'
    )
    bias_weights = lbann.Weights(
        optimizer=lbann.SGD(),
        initializer=lbann.ValueInitializer(values=tools.str_list(np.nditer(bias))),
        name='bias1'
    )
    x = x_lbann
    y = lbann.Convolution(x,
                          weights=(kernel_weights, bias_weights),
                          num_dims=3,
                          num_output_channels=kernel_dims[0],
                          has_vectors=True,
                          conv_dims=tools.str_list(kernel_dims[2:]),
                          conv_strides=tools.str_list(strides),
                          conv_pads=tools.str_list(pads),
                          conv_dilations=tools.str_list(dilations),
                          has_bias=True,
                          parallel_strategy=create_parallel_strategy(4))
    z = lbann.L2Norm2(y)
    obj.append(z)
    metrics.append(lbann.Metric(z, name='basic 3x3 convolution'))

    # PyTorch implementation
    try:
        x = _samples
        y = pytorch_convolution(
            x, kernel, bias=bias,
            stride=strides, padding=pads, dilation=dilations
        )
        z = tools.numpy_l2norm2(y) / _num_samples
        val = z
    except:
        # Precomputed value
        val = 153.84937996554953
    tol = 8 * val * np.finfo(np.float32).eps
    callbacks.append(lbann.CallbackCheckMetric(
        metric=metrics[-1].name,
        lower_bound=val-tol,
        upper_bound=val+tol,
        error_on_failure=True,
        execution_modes='test'))

    # ------------------------------------------
    # 2x4 strided convolution
    # ------------------------------------------

    # Convolution settings
    kernel_dims = (3, _sample_dims[0], 2, 4)
    strides = (3, 1)
    pads = (3, 0)
    dilations = (1, 1)
    num_groups = 1
    kernel = make_random_array(kernel_dims, 19)

    # Apply convolution
    kernel_weights = lbann.Weights(
        optimizer=lbann.SGD(),
        initializer=lbann.ValueInitializer(values=tools.str_list(np.nditer(kernel))),
        name='kernel2'
    )
    x = x_lbann
    y = lbann.Convolution(x,
                          weights=(kernel_weights),
                          num_dims=3,
                          num_output_channels=kernel_dims[0],
                          has_vectors=True,
                          conv_dims=tools.str_list(kernel_dims[2:]),
                          conv_strides=tools.str_list(strides),
                          conv_pads=tools.str_list(pads),
                          conv_dilations=tools.str_list(dilations),
                          num_groups=num_groups,
                          has_bias=False,
                          parallel_strategy=create_parallel_strategy(4))
    z = lbann.L2Norm2(y)
    obj.append(z)
    metrics.append(lbann.Metric(z, name='2x4 convolution'))

    # PyTorch implementation
    try:
        x = _samples
        y = pytorch_convolution(
            x, kernel, bias=None,
            stride=strides, padding=pads,
            dilation=dilations, groups=num_groups
        )
        z = tools.numpy_l2norm2(y) / _num_samples
        val = z
    except:
        # Precomputed value
        val = 19.24587403346207
    tol = 8 * val * np.finfo(np.float32).eps
    callbacks.append(lbann.CallbackCheckMetric(
        metric=metrics[-1].name,
        lower_bound=val-tol,
        upper_bound=val+tol,
        error_on_failure=True,
        execution_modes='test'))

    # ------------------------------------------
    # Gradient checking
    # ------------------------------------------

    callbacks.append(lbann.CallbackCheckGradients(error_on_failure=True))

    # ------------------------------------------
    # Construct model
    # ------------------------------------------

    num_epochs = 0
    return lbann.Model(num_epochs,
                       layers=lbann.traverse_layer_graph(x_lbann),
                       objective_function=obj,
                       metrics=metrics,
                       callbacks=callbacks)
Exemplo n.º 16
0
def construct_model(lbann):
    """Construct LBANN model.

    Args:
        lbann (module): Module for LBANN Python frontend

    """

    # Input data
    # Note: Sum with a weights layer so that gradient checking will
    # verify that error signals are correct.
    x_weights = lbann.Weights(optimizer=lbann.SGD(),
                              initializer=lbann.ConstantInitializer(value=0.0),
                              name='input_weights')
    x = lbann.Sum(lbann.Reshape(lbann.Input(data_field='samples'),
                                dims=tools.str_list(_sample_dims)),
                  lbann.WeightsLayer(weights=x_weights,
                                     dims=tools.str_list(_sample_dims)))
    x_lbann = x

    # Objects for LBANN model
    obj = []
    metrics = []
    callbacks = []

    # --------------------------
    # Slice along axis 0
    # --------------------------

    # LBANN implementation
    slice_points = (2, 3, 6, 7)
    x = x_lbann
    x_slice = lbann.Slice(x, axis=0, slice_points=tools.str_list(slice_points))
    y = []
    for _ in range(len(slice_points)-1):
        y.append(lbann.L2Norm2(x_slice))
    z = lbann.Add(y[0], y[2])
    obj.append(z)
    metrics.append(lbann.Metric(z, name='axis0'))

    # NumPy implementation
    vals = []
    for i in range(num_samples()):
        x = get_sample(i).reshape(_sample_dims).astype(np.float64)
        y = []
        for j in range(len(slice_points)-1):
            x_slice = x[slice_points[j]:slice_points[j+1],:,:]
            y.append(tools.numpy_l2norm2(x_slice))
        z = y[0] + y[2]
        vals.append(z)
    val = np.mean(vals)
    tol = 8 * val * np.finfo(np.float32).eps
    callbacks.append(lbann.CallbackCheckMetric(
        metric=metrics[-1].name,
        lower_bound=val-tol,
        upper_bound=val+tol,
        error_on_failure=True,
        execution_modes='test'))

    # --------------------------
    # Slice along axis 1
    # --------------------------

    # LBANN implementation
    slice_points = (0, 2, 3, 4)
    x = x_lbann
    x_slice = lbann.Slice(x, axis=1, slice_points=tools.str_list(slice_points))
    y = []
    for _ in range(len(slice_points)-1):
        y.append(lbann.L2Norm2(x_slice))
    z = lbann.Add(y[0], y[2])
    obj.append(z)
    metrics.append(lbann.Metric(z, name='axis1'))

    # NumPy implementation
    vals = []
    for i in range(num_samples()):
        x = get_sample(i).reshape(_sample_dims).astype(np.float64)
        y = []
        for j in range(len(slice_points)-1):
            x_slice = x[:,slice_points[j]:slice_points[j+1],:]
            y.append(tools.numpy_l2norm2(x_slice))
        z = y[0] + y[2]
        vals.append(z)
    val = np.mean(vals)
    tol = 8 * val * np.finfo(np.float32).eps
    callbacks.append(lbann.CallbackCheckMetric(
        metric=metrics[-1].name,
        lower_bound=val-tol,
        upper_bound=val+tol,
        error_on_failure=True,
        execution_modes='test'))

    # --------------------------
    # Slice along axis 2
    # --------------------------

    # LBANN implementation
    slice_points = (1, 3)
    x = x_lbann
    x_slice = lbann.Slice(x, axis=2, slice_points=tools.str_list(slice_points))
    y = []
    for _ in range(len(slice_points)-1):
        y.append(lbann.L2Norm2(x_slice))
    z = y[0]
    obj.append(z)
    metrics.append(lbann.Metric(z, name='axis2'))

    # NumPy implementation
    vals = []
    for i in range(num_samples()):
        x = get_sample(i).reshape(_sample_dims).astype(np.float64)
        y = []
        for j in range(len(slice_points)-1):
            x_slice = x[:,:,slice_points[j]:slice_points[j+1]]
            y.append(tools.numpy_l2norm2(x_slice))
        z = y[0]
        vals.append(z)
    val = np.mean(vals)
    tol = 8 * val * np.finfo(np.float32).eps
    callbacks.append(lbann.CallbackCheckMetric(
        metric=metrics[-1].name,
        lower_bound=val-tol,
        upper_bound=val+tol,
        error_on_failure=True,
        execution_modes='test'))

    # --------------------------
    # Model-parallel
    # --------------------------

    # LBANN implementation
    slice_points = (31, 54, 56, 57)
    x = lbann.Reshape(x_lbann, dims=tools.str_list([105]))
    x_slice = lbann.Slice(x, slice_points=tools.str_list(slice_points),
                          data_layout='model_parallel')
    y = []
    for _ in range(len(slice_points)-1):
        y.append(lbann.L2Norm2(x_slice))
    z = lbann.Add(y[0], y[2])
    obj.append(z)
    metrics.append(lbann.Metric(z, name='model-parallel'))

    # NumPy implementation
    vals = []
    for i in range(num_samples()):
        x = get_sample(i).reshape(-1).astype(np.float64)
        y = []
        for j in range(len(slice_points)-1):
            x_slice = x[slice_points[j]:slice_points[j+1]]
            y.append(tools.numpy_l2norm2(x_slice))
        z = y[0] + y[2]
        vals.append(z)
    val = np.mean(vals)
    tol = 8 * val * np.finfo(np.float32).eps
    callbacks.append(lbann.CallbackCheckMetric(
        metric=metrics[-1].name,
        lower_bound=val-tol,
        upper_bound=val+tol,
        error_on_failure=True,
        execution_modes='test'))

    # --------------------------
    # Gradient checking
    # --------------------------

    callbacks.append(lbann.CallbackCheckGradients(error_on_failure=True))

    # --------------------------
    # Construct model
    # --------------------------

    num_epochs = 0
    return lbann.Model(num_epochs,
                       layers=lbann.traverse_layer_graph(x_lbann),
                       objective_function=obj,
                       metrics=metrics,
                       callbacks=callbacks)
Exemplo n.º 17
0
def construct_model(lbann):
    """Construct LBANN model.

    Args:
        lbann (module): Module for LBANN Python frontend

    """

    # Input data
    # Note: Sum with a weights layer so that gradient checking will
    # verify that error signals are correct.
    x_weights = lbann.Weights(optimizer=lbann.SGD(),
                              initializer=lbann.ConstantInitializer(value=0.0),
                              name='input_weights')
    x = lbann.Sum(
        lbann.Reshape(lbann.Input(data_field='samples'),
                      dims=tools.str_list(_input_size)),
        lbann.WeightsLayer(weights=x_weights,
                           dims=tools.str_list(_input_size)))
    x_lbann = x

    # Objects for LBANN model
    obj = []
    metrics = []
    callbacks = []

    # ------------------------------------------
    # Compute expected metric values with NumPy
    # ------------------------------------------

    # Weight values
    linearity = np.random.normal(size=(_output_size,
                                       _input_size)).astype(np.float32)
    bias = np.random.normal(size=(_output_size, 1)).astype(np.float32)

    # With bias
    x = _samples.transpose().astype(np.float64)
    y = np.matmul(linearity.astype(np.float64), x) + bias.astype(np.float64)
    z = tools.numpy_l2norm2(y) / _num_samples
    val_with_bias = z

    # Without bias
    x = _samples.transpose().astype(np.float64)
    y = np.matmul(linearity.astype(np.float64), x)
    z = tools.numpy_l2norm2(y) / _num_samples
    val_without_bias = z

    # ------------------------------------------
    # Data-parallel layout, non-transpose, bias
    # ------------------------------------------

    # LBANN implementation
    linearity_weights = lbann.Weights(
        optimizer=lbann.SGD(),
        initializer=lbann.ValueInitializer(
            values=tools.str_list(np.nditer(linearity, order='F'))))
    bias_weights = lbann.Weights(optimizer=lbann.SGD(),
                                 initializer=lbann.ValueInitializer(
                                     values=tools.str_list(np.nditer(bias))))
    x = x_lbann
    y = lbann.FullyConnected(x,
                             weights=(linearity_weights, bias_weights),
                             data_layout='data_parallel',
                             num_neurons=_output_size,
                             has_bias=True,
                             transpose=False)
    z = lbann.L2Norm2(y)
    obj.append(z)
    metrics.append(
        lbann.Metric(z, name='data-parallel layout, non-transpose, bias'))

    # NumPy implementation
    val = val_with_bias
    tol = 8 * val * np.finfo(np.float32).eps
    callbacks.append(
        lbann.CallbackCheckMetric(metric=metrics[-1].name,
                                  lower_bound=val - tol,
                                  upper_bound=val + tol,
                                  error_on_failure=True,
                                  execution_modes='test'))

    # ------------------------------------------
    # Model-parallel layout, non-transpose, bias
    # ------------------------------------------

    # LBANN implementation
    linearity_weights = lbann.Weights(
        optimizer=lbann.SGD(),
        initializer=lbann.ValueInitializer(
            values=tools.str_list(np.nditer(linearity, order='F'))))
    bias_weights = lbann.Weights(optimizer=lbann.SGD(),
                                 initializer=lbann.ValueInitializer(
                                     values=tools.str_list(np.nditer(bias))))
    x = x_lbann
    y = lbann.FullyConnected(x,
                             weights=(linearity_weights, bias_weights),
                             data_layout='model_parallel',
                             num_neurons=_output_size,
                             has_bias=True,
                             transpose=False)
    z = lbann.L2Norm2(y)
    obj.append(z)
    metrics.append(
        lbann.Metric(z, name='model-parallel layout, non-transpose, bias'))

    # NumPy implementation
    val = val_with_bias
    tol = 8 * val * np.finfo(np.float32).eps
    callbacks.append(
        lbann.CallbackCheckMetric(metric=metrics[-1].name,
                                  lower_bound=val - tol,
                                  upper_bound=val + tol,
                                  error_on_failure=True,
                                  execution_modes='test'))

    # ------------------------------------------
    # Data-parallel layout, transpose, no bias
    # ------------------------------------------

    # LBANN implementation
    linearity_weights = lbann.Weights(
        optimizer=lbann.SGD(),
        initializer=lbann.ValueInitializer(
            values=tools.str_list(np.nditer(linearity, order='C'))))
    x = x_lbann
    y = lbann.FullyConnected(x,
                             weights=linearity_weights,
                             data_layout='data_parallel',
                             num_neurons=_output_size,
                             has_bias=False,
                             transpose=True)
    z = lbann.L2Norm2(y)
    obj.append(z)
    metrics.append(
        lbann.Metric(z, name='data-parallel layout, transpose, no bias'))

    # NumPy implementation
    val = val_without_bias
    tol = 8 * val * np.finfo(np.float32).eps
    callbacks.append(
        lbann.CallbackCheckMetric(metric=metrics[-1].name,
                                  lower_bound=val - tol,
                                  upper_bound=val + tol,
                                  error_on_failure=True,
                                  execution_modes='test'))

    # ------------------------------------------
    # Model-parallel layout, transpose, no bias
    # ------------------------------------------

    # LBANN implementation
    linearity_weights = lbann.Weights(
        optimizer=lbann.SGD(),
        initializer=lbann.ValueInitializer(
            values=tools.str_list(np.nditer(linearity, order='C'))))
    x = x_lbann
    y = lbann.FullyConnected(x,
                             weights=linearity_weights,
                             data_layout='model_parallel',
                             num_neurons=_output_size,
                             has_bias=False,
                             transpose=True)
    z = lbann.L2Norm2(y)
    obj.append(z)
    metrics.append(
        lbann.Metric(z, name='data-parallel layout, transpose, no bias'))

    # NumPy implementation
    val = val_without_bias
    tol = 8 * val * np.finfo(np.float32).eps
    callbacks.append(
        lbann.CallbackCheckMetric(metric=metrics[-1].name,
                                  lower_bound=val - tol,
                                  upper_bound=val + tol,
                                  error_on_failure=True,
                                  execution_modes='test'))

    # ------------------------------------------
    # Gradient checking
    # ------------------------------------------

    callbacks.append(lbann.CallbackCheckGradients(error_on_failure=True))

    # ------------------------------------------
    # Construct model
    # ------------------------------------------

    num_epochs = 0
    return lbann.Model(num_epochs,
                       layers=lbann.traverse_layer_graph(x_lbann),
                       objective_function=obj,
                       metrics=metrics,
                       callbacks=callbacks)