Example #1
0
def setup_experiment(lbann):
    """Construct LBANN experiment.

    Args:
        lbann (module): Module for LBANN Python frontend

    """

    # Skip test on non-GPU systems
    if not tools.gpus_per_node(lbann):
        message = f'{os.path.basename(__file__)} requires GPUs'
        print('Skip - ' + message)
        pytest.skip(message)

    mini_batch_size = num_samples() // 2
    trainer = lbann.Trainer(mini_batch_size)
    model = construct_model(lbann)
    data_reader = construct_data_reader(lbann)
    optimizer = lbann.NoOptimizer()
    return trainer, model, data_reader, optimizer
Example #2
0
def setup_experiment(lbann):
    """Construct LBANN experiment.

    Args:
        lbann (module): Module for LBANN Python frontend

    """

    # Skip test on non-GPU systems
    # Note: Test requires cuDNN (on GPU) or oneDNN (on CPU).
    ### @todo Assume LBANN has been built with oneDNN?
    if not tools.gpus_per_node(lbann):
        message = f'{os.path.basename(__file__)} requires cuDNN or oneDNN'
        print('Skip - ' + message)
        pytest.skip(message)

    mini_batch_size = num_samples() // 2
    trainer = lbann.Trainer(mini_batch_size)
    model = construct_model(lbann)
    data_reader = construct_data_reader(lbann)
    optimizer = lbann.SGD()
    return trainer, model, data_reader, optimizer
Example #3
0
def construct_model(lbann):
    """Construct LBANN model.

    Args:
        lbann (module): Module for LBANN Python frontend

    """

    # Input data
    # Note: Sum with a weights layer so that gradient checking will
    # verify that error signals are correct.
    x_weights = lbann.Weights(optimizer=lbann.SGD(),
                              initializer=lbann.ConstantInitializer(value=0.0),
                              name='input_weights')
    x = lbann.Sum(
        lbann.Reshape(lbann.Input(data_field='samples'),
                      dims=tools.str_list(_sample_size)),
        lbann.WeightsLayer(weights=x_weights,
                           dims=tools.str_list(_sample_size)))
    x_lbann = x

    # Objects for LBANN model
    obj = []
    metrics = []
    callbacks = []

    # ------------------------------------------
    # Data-parallel layout with distconv
    # ------------------------------------------

    num_height_groups = tools.gpus_per_node(lbann)
    if num_height_groups == 0:
        e = 'this test requires GPUs.'
        print('Skip - ' + e)
        pytest.skip(e)

    # LBANN implementation
    x = x_lbann
    x = lbann.Reshape(x, dims="4 4 3")
    y = lbann.Identity(
        x,
        data_layout='data_parallel',
        parallel_strategy=create_parallel_strategy(num_height_groups))
    x = lbann.Reshape(x, dims="48")
    z = lbann.L2Norm2(y)
    obj.append(z)
    metrics.append(lbann.Metric(z, name='data-parallel layout'))

    # NumPy implementation
    vals = []
    for i in range(num_samples()):
        x = get_sample(i).astype(np.float64)
        y = x
        z = tools.numpy_l2norm2(y)
        vals.append(z)
    val = np.mean(vals)
    tol = 8 * val * np.finfo(np.float32).eps
    callbacks.append(
        lbann.CallbackCheckMetric(metric=metrics[-1].name,
                                  lower_bound=val - tol,
                                  upper_bound=val + tol,
                                  error_on_failure=True,
                                  execution_modes='test'))

    # ------------------------------------------
    # Model-parallel layout
    # ------------------------------------------

    # LBANN implementation
    x = x_lbann
    y = lbann.Identity(x, data_layout='model_parallel')
    z = lbann.L2Norm2(y)
    obj.append(z)
    metrics.append(lbann.Metric(z, name='model-parallel layout'))

    # NumPy implementation
    vals = []
    for i in range(num_samples()):
        x = get_sample(i).astype(np.float64)
        y = x
        z = tools.numpy_l2norm2(y)
        vals.append(z)
    val = np.mean(vals)
    tol = 8 * val * np.finfo(np.float32).eps
    callbacks.append(
        lbann.CallbackCheckMetric(metric=metrics[-1].name,
                                  lower_bound=val - tol,
                                  upper_bound=val + tol,
                                  error_on_failure=True,
                                  execution_modes='test'))

    # ------------------------------------------
    # Gradient checking
    # ------------------------------------------

    callbacks.append(lbann.CallbackCheckGradients(error_on_failure=True))

    # ------------------------------------------
    # Construct model
    # ------------------------------------------

    num_epochs = 0
    return lbann.Model(num_epochs,
                       layers=lbann.traverse_layer_graph(x_lbann),
                       objective_function=obj,
                       metrics=metrics,
                       callbacks=callbacks)
def construct_model(lbann):
    """Construct LBANN model.

    Args:
        lbann (module): Module for LBANN Python frontend

    """

    # Input data
    # Note: Sum with a weights layer so that gradient checking will
    # verify that error signals are correct.
    x_weights = lbann.Weights(optimizer=lbann.SGD(),
                              initializer=lbann.ConstantInitializer(value=0.0),
                              name='input_weights')
    x = lbann.Sum(
        lbann.Reshape(lbann.Input(), dims=tools.str_list(_sample_dims)),
        lbann.WeightsLayer(weights=x_weights,
                           dims=tools.str_list(_sample_dims)))
    x_lbann = x

    # Objects for LBANN model
    obj = []
    metrics = []
    callbacks = []

    # ------------------------------------------
    # Basic 3^n convolution
    # ------------------------------------------
    # 3^n conv, stride=1, pad=1, dilation=1, bias

    num_height_groups = tools.gpus_per_node(lbann)
    if num_height_groups == 0:
        e = 'this test requires GPUs.'
        print('Skip - ' + e)
        pytest.skip(e)

    for num_dims, reference_val in [(2, 11913.852660080756),
                                    (3, 9952.365297083174)]:
        # Convolution settings
        kernel_dims = [
            5,
            _sample_dims[0] if num_dims == 2 else _sample_dims_3d[0],
        ] + [3] * num_dims
        strides = [1] * num_dims
        pads = [1] * num_dims
        dilations = [1] * num_dims
        kernel = make_random_array(kernel_dims, 11)

        # Apply convolution
        kernel_weights = lbann.Weights(
            optimizer=lbann.SGD(),
            initializer=lbann.ValueInitializer(
                values=tools.str_list(np.nditer(kernel))),
            name='kernel1_{}d'.format(num_dims))
        x = x_lbann
        if num_dims == 3:
            x = lbann.Reshape(x, dims=tools.str_list(_sample_dims_3d))

        y = lbann.Convolution(
            x,
            weights=(kernel_weights, ),
            num_dims=num_dims,
            num_output_channels=kernel_dims[0],
            has_vectors=True,
            conv_dims=tools.str_list(kernel_dims[2:]),
            conv_strides=tools.str_list(strides),
            conv_pads=tools.str_list(pads),
            conv_dilations=tools.str_list(dilations),
            has_bias=False,
            parallel_strategy=create_parallel_strategy(num_height_groups))
        z = lbann.L2Norm2(y)
        obj.append(z)
        metrics.append(
            lbann.Metric(z, name='basic {}D 3^n convolution'.format(num_dims)))

        # PyTorch implementation
        try:
            x = _samples
            if num_dims == 3:
                x = np.reshape(x, [_num_samples] + _sample_dims_3d)

            y = pytorch_convolution(x,
                                    kernel,
                                    stride=strides,
                                    padding=pads,
                                    dilation=dilations)
            z = tools.numpy_l2norm2(y) / _num_samples
            val = z
        except:
            # Precomputed value
            val = reference_val
            # val = 398.6956458317758 # _num_samples=8, 6 channels
            # val = 381.7401227915947 # _num_samples=23, 6 channels
        tol = 8 * val * np.finfo(np.float32).eps

        callbacks.append(
            lbann.CallbackCheckMetric(metric=metrics[-1].name,
                                      lower_bound=val - tol,
                                      upper_bound=val + tol,
                                      error_on_failure=True,
                                      execution_modes='test'))

    # ------------------------------------------
    # Gradient checking
    # ------------------------------------------

    callbacks.append(lbann.CallbackCheckGradients(error_on_failure=True))

    # ------------------------------------------
    # Construct model
    # ------------------------------------------

    num_epochs = 0
    return lbann.Model(num_epochs,
                       layers=lbann.traverse_layer_graph(x_lbann),
                       objective_function=obj,
                       metrics=metrics,
                       callbacks=callbacks)
Example #5
0
def construct_model(lbann):
    """Construct LBANN model.

    Args:
        lbann (module): Module for LBANN Python frontend

    """

    gpus_per_node = tools.gpus_per_node(lbann)
    if gpus_per_node == 0:
        e = 'this test requires GPUs.'
        print('Skip - ' + e)
        pytest.skip(e)

    # Input data
    # Note: Sum with a weights layer so that gradient checking will
    # verify that error signals are correct.
    x_weights = lbann.Weights(optimizer=lbann.SGD(),
                              initializer=lbann.ConstantInitializer(value=0.0),
                              name='input_weights')
    x = lbann.Sum(
        lbann.Reshape(lbann.Input(data_field='samples'),
                      dims=tools.str_list(_sample_dims)),
        lbann.WeightsLayer(weights=x_weights,
                           dims=tools.str_list(_sample_dims)))

    # Objects for LBANN model
    obj = []
    metrics = []
    callbacks = []

    # Convolution/FC settings
    kernel_dims = (5, _sample_dims[0], 3, 3)
    strides = (1, 1)
    pads = (1, 1)
    dilations = (1, 1)
    kernel = make_random_array(kernel_dims, 11)
    fc_input_size = kernel_dims[0] * np.prod(_sample_dims[1:])
    linearity1 = make_random_array((_output_size, fc_input_size), 13)
    linearity2 = make_random_array((_output_size, _output_size), 17)
    linearity_no_opt = make_random_array((_output_size, _output_size), 19)
    biases = make_random_array((_output_size, ), 19)

    # Weight values
    kernel_weights = lbann.Weights(
        optimizer=lbann.SGD(),
        initializer=lbann.ValueInitializer(
            values=tools.str_list(np.nditer(kernel))),
        name='kernel1')
    linearity1_weights = lbann.Weights(
        optimizer=lbann.SGD(),
        initializer=lbann.ValueInitializer(
            values=tools.str_list(np.nditer(linearity1, order='F'))))
    linearity2_weights = lbann.Weights(
        optimizer=lbann.SGD(),
        initializer=lbann.ValueInitializer(
            values=tools.str_list(np.nditer(linearity2, order='F'))))
    linearity_no_opt_weights = lbann.Weights(
        optimizer=lbann.NoOptimizer(),
        initializer=lbann.ValueInitializer(
            values=tools.str_list(np.nditer(linearity_no_opt, order='F'))))
    biases_weights = lbann.Weights(
        optimizer=lbann.SGD(),
        initializer=lbann.ValueInitializer(
            values=tools.str_list(np.nditer(biases, order='F'))))

    def create_bn_weights(layer_name, num_channels):
        weights_ary = []
        for i in range(4):
            val = make_random_array((num_channels, ), 15 + i)
            weights_ary.append(
                lbann.Weights(optimizer=lbann.SGD(),
                              initializer=lbann.ValueInitializer(
                                  values=tools.str_list(np.nditer(val))),
                              name='{}_{}'.format(layer_name, i)))

        return weights_ary

    y = lbann.Convolution(x,
                          weights=(kernel_weights, ),
                          num_dims=3,
                          num_output_channels=kernel_dims[0],
                          has_vectors=True,
                          conv_dims=tools.str_list(kernel_dims[2:]),
                          conv_strides=tools.str_list(strides),
                          conv_pads=tools.str_list(pads),
                          conv_dilations=tools.str_list(dilations),
                          has_bias=False)
    # y = lbann.BatchNormalization(
    #     y,
    #     weights=create_bn_weights("bn1", kernel_dims[0]))
    y = lbann.Relu(y)
    y = lbann.FullyConnected(y,
                             weights=(linearity1_weights, ),
                             data_layout='data_parallel',
                             num_neurons=_output_size,
                             has_bias=False)
    # y = lbann.BatchNormalization(
    #     y,
    #     weights=create_bn_weights("bn2", _output_size))
    y = lbann.Relu(y)
    y = lbann.FullyConnected(y,
                             weights=(linearity2_weights, biases_weights),
                             data_layout='data_parallel',
                             num_neurons=_output_size,
                             has_bias=True)
    # y = lbann.BatchNormalization(
    #     y,
    #     weights=create_bn_weights("bn3", _output_size))
    y = lbann.Relu(y)
    y = lbann.FullyConnected(y,
                             weights=(linearity_no_opt_weights),
                             data_layout='data_parallel',
                             num_neurons=_output_size,
                             has_bias=False)
    z = lbann.L2Norm2(y)
    obj.append(z)

    # ------------------------------------------
    # Construct model
    # ------------------------------------------

    num_epochs = 1  ### @todo Remove
    return lbann.Model(num_epochs,
                       layers=lbann.traverse_layer_graph(x),
                       objective_function=obj,
                       metrics=metrics,
                       callbacks=callbacks)
def construct_model(lbann):
    """Construct LBANN model.

    Args:
        lbann (module): Module for LBANN Python frontend

    """

    # Input data
    # Note: Sum with a weights layer so that gradient checking will
    # verify that error signals are correct.
    x_weights = lbann.Weights(optimizer=lbann.SGD(),
                              initializer=lbann.ConstantInitializer(value=0.0),
                              name='input_weights')
    x = lbann.Sum(
        lbann.Reshape(lbann.Input(), dims=tools.str_list(_sample_dims)),
        lbann.WeightsLayer(weights=x_weights,
                           dims=tools.str_list(_sample_dims)))
    x_lbann = x

    # Objects for LBANN model
    obj = []
    metrics = []
    callbacks = []

    # ------------------------------------------
    # Pooling
    # ------------------------------------------

    num_height_groups = tools.gpus_per_node(lbann)
    if num_height_groups == 0:
        e = 'this test requires GPUs.'
        print('Skip - ' + e)
        pytest.skip(e)

    pool_configs = []

    # 3x3 pooling with same padding
    for mode, val in [
        ("average", 700.1066377082393),  # _num_samples=8
        ("max", 1255.4813455546334),  # _num_samples=8
            # ("average", 830.2573008820838), # _num_samples=23
            # ("max", 1167.667676299899), # _num_samples=23
    ]:
        pool_configs.append({
            "name": "3x3 {} pooling".format(mode),
            "kernel_dims": (3, 3),
            "strides": (1, 1),
            "pads": (0, 0),
            "pool_mode": mode,
            "val": val,
        })

    # 2x2 strided pooling
    for mode, val in [
        ("average", 263.76437243059104),  # _num_samples=23
        ("max", 358.66104389177207),  # _num_samples=23
            # ("average", 293.61402789516825), # _num_samples=23
            # ("max", 351.4916288366334), # _num_samples=23
    ]:
        pool_configs.append({
            "name": "2x2 {} pooling".format(mode),
            "kernel_dims": (2, 2),
            "strides": (2, 2),
            "pads": (0, 0),
            "pool_mode": mode,
            "val": val,
        })

    # 2x2x2 3D pooling
    for mode, val in [
        ("average", 59.3851451701403),  # _num_samples=8
        ("max", 216.75871475407558),  # _num_samples=8
            # ("average", 89.61246528381926), # _num_samples=23
            # ("max", 198.65624293856985), # _num_samples=23
    ]:
        pool_configs.append({
            "name": "2x2x2 {} pooling".format(mode),
            "kernel_dims": (2, 2, 2),
            "strides": (2, 2, 2),
            "pads": (0, 0, 0),
            "pool_mode": mode,
            "val": val,
        })

    for p in pool_configs:
        # Apply pooling
        x = x_lbann
        if len(p["kernel_dims"]) == 3:
            x = lbann.Reshape(x, dims=tools.str_list(_sample_dims_3d))

        y = lbann.Pooling(
            x,
            num_dims=len(p["kernel_dims"]),
            has_vectors=True,
            pool_dims=tools.str_list(p["kernel_dims"]),
            pool_strides=tools.str_list(p["strides"]),
            pool_pads=tools.str_list(p["pads"]),
            pool_mode=p["pool_mode"],
            parallel_strategy=create_parallel_strategy(num_height_groups))
        z = lbann.L2Norm2(y)

        # Since max pooling is not differentiable, we only use average pooling.
        if p["pool_mode"] == "average":
            obj.append(z)

        metrics.append(lbann.Metric(z, name=p["name"]))

        # PyTorch implementation
        try:
            x = _samples
            if len(p["kernel_dims"]) == 3:
                x = np.reshape(x, [_num_samples] + _sample_dims_3d)

            y = pytorch_pooling(
                x,
                p["kernel_dims"],
                p["pool_mode"],
                stride=p["strides"],
                padding=p["pads"],
            )
            z = tools.numpy_l2norm2(y) / _num_samples
            val = z
        except:
            # Precomputed value
            val = p["val"]
        tol = 8 * val * np.finfo(np.float32).eps

        callbacks.append(
            lbann.CallbackCheckMetric(metric=metrics[-1].name,
                                      lower_bound=val - tol,
                                      upper_bound=val + tol,
                                      error_on_failure=True,
                                      execution_modes='test'))

    # ------------------------------------------
    # Gradient checking
    # ------------------------------------------

    callbacks.append(lbann.CallbackCheckGradients(error_on_failure=True))

    # ------------------------------------------
    # Construct model
    # ------------------------------------------

    num_epochs = 0
    return lbann.Model(num_epochs,
                       layers=lbann.traverse_layer_graph(x_lbann),
                       objective_function=obj,
                       metrics=metrics,
                       callbacks=callbacks)
def construct_model(lbann):
    """Construct LBANN model.

    Args:
        lbann (module): Module for LBANN Python frontend

    """

    # Input data
    # Note: Sum with a weights layer so that gradient checking will
    # verify that error signals are correct.
    x_weights = lbann.Weights(optimizer=lbann.SGD(),
                              initializer=lbann.ConstantInitializer(value=0.0),
                              name='input_weights')
    x0 = lbann.WeightsLayer(weights=x_weights,
                            dims=tools.str_list(_sample_dims))
    x1 = lbann.Reshape(lbann.Input(data_field='samples'),
                       dims=tools.str_list(_sample_dims),
                       name="Input_layer")
    x = lbann.Sum(x0, x1, name="Adding_weight_layer")
    x_lbann = x

    # Objects for LBANN model
    obj = []
    metrics = []
    callbacks = []

    num_channel_groups = tools.gpus_per_node(lbann)
    if num_channel_groups == 0:
        e = 'this test requires GPUs.'
        print('Skip - ' + e)
        pytest.skip(e)
    # ------------------------------------------
    # Compute expected metric values with NumPy
    # ------------------------------------------

    # Input and output dimensions
    input_channel_dims = _sample_dims[1:]
    output_channel_dims = (1, 3)
    input_channel_size = functools.reduce(operator.mul, input_channel_dims)
    output_channel_size = functools.reduce(operator.mul, output_channel_dims)

    # Weight values
    linearity = np.random.normal(size=(output_channel_size,
                                       input_channel_size)).astype(np.float32)
    bias = np.random.normal(size=(output_channel_size, 1)).astype(np.float32)

    # With bias

    x = (_samples.reshape(
        (-1, input_channel_size)).transpose().astype(np.float64))

    y = np.matmul(linearity.astype(np.float64), x) + bias.astype(np.float64)

    z = tools.numpy_l2norm2(y) / _num_samples
    val_with_bias = z

    # Without bias
    x = (_samples.reshape(
        (-1, input_channel_size)).transpose().astype(np.float64))
    y = np.matmul(linearity.astype(np.float64), x)
    z = tools.numpy_l2norm2(y) / _num_samples
    val_without_bias = z

    # ------------------------------------------
    # Non-transpose, bias
    # ------------------------------------------

    # LBANN implementation
    linearity_weights = lbann.Weights(
        optimizer=lbann.SGD(),
        initializer=lbann.ValueInitializer(
            values=tools.str_list(np.nditer(linearity, order='F'))))
    bias_weights = lbann.Weights(optimizer=lbann.SGD(),
                                 initializer=lbann.ValueInitializer(
                                     values=tools.str_list(np.nditer(bias))))
    x = x_lbann
    y = lbann.ChannelwiseFullyConnected(
        x,
        weights=(linearity_weights, bias_weights),
        output_channel_dims=output_channel_dims,
        parallel_strategy=create_parallel_strategy(num_channel_groups),
        name="bias")
    z = lbann.L2Norm2(y, name="L2Norm")
    obj.append(z)
    metrics.append(lbann.Metric(z, name='non-transpose, bias'))

    # NumPy implementation
    tol = 8 * val_with_bias * np.finfo(np.float32).eps
    callbacks.append(
        lbann.CallbackCheckMetric(metric=metrics[-1].name,
                                  lower_bound=val_with_bias - tol,
                                  upper_bound=val_with_bias + tol,
                                  error_on_failure=True,
                                  execution_modes='test'))

    # ------------------------------------------
    # Non-transpose, no bias
    # ------------------------------------------

    # LBANN implementation
    linearity_weights = lbann.Weights(
        optimizer=lbann.SGD(),
        initializer=lbann.ValueInitializer(
            values=tools.str_list(np.nditer(linearity, order='F'))))

    x = x_lbann
    y = lbann.ChannelwiseFullyConnected(
        x,
        weights=(linearity_weights),
        output_channel_dims=output_channel_dims,
        parallel_strategy=create_parallel_strategy(num_channel_groups),
        name="no_bias")
    z = lbann.L2Norm2(y)
    obj.append(z)
    metrics.append(lbann.Metric(z, name='non-transpose, no bias'))

    # NumPy implementation
    tol = 8 * val_without_bias * np.finfo(np.float32).eps
    callbacks.append(
        lbann.CallbackCheckMetric(metric=metrics[-1].name,
                                  lower_bound=val_without_bias - tol,
                                  upper_bound=val_without_bias + tol,
                                  error_on_failure=True,
                                  execution_modes='test'))
    # ------------------------------------------
    # Transpose, bias
    # ------------------------------------------

    # LBANN implementation
    linearity_weights = lbann.Weights(
        optimizer=lbann.SGD(),
        initializer=lbann.ValueInitializer(
            values=tools.str_list(np.nditer(linearity, order='C'))))
    bias_weights = lbann.Weights(optimizer=lbann.SGD(),
                                 initializer=lbann.ValueInitializer(
                                     values=tools.str_list(np.nditer(bias))))
    x = x_lbann
    y = lbann.ChannelwiseFullyConnected(
        x,
        weights=(linearity_weights, bias_weights),
        output_channel_dims=output_channel_dims,
        parallel_strategy=create_parallel_strategy(num_channel_groups),
        transpose=True,
    )
    z = lbann.L2Norm2(y)
    obj.append(z)
    metrics.append(lbann.Metric(z, name='transpose, bias'))

    # NumPy implementation
    tol = 8 * val_with_bias * np.finfo(np.float32).eps
    callbacks.append(
        lbann.CallbackCheckMetric(metric=metrics[-1].name,
                                  lower_bound=val_with_bias - tol,
                                  upper_bound=val_with_bias + tol,
                                  error_on_failure=True,
                                  execution_modes='test'))

    # ------------------------------------------
    # Transpose, no bias
    # ------------------------------------------

    # LBANN implementation
    linearity_weights = lbann.Weights(
        optimizer=lbann.SGD(),
        initializer=lbann.ValueInitializer(
            values=tools.str_list(np.nditer(linearity, order='C'))))
    x = x_lbann
    y = lbann.ChannelwiseFullyConnected(
        x,
        weights=(linearity_weights),
        output_channel_dims=output_channel_dims,
        parallel_strategy=create_parallel_strategy(num_channel_groups),
        bias=False,
        transpose=True)
    z = lbann.L2Norm2(y)
    obj.append(z)
    metrics.append(lbann.Metric(z, name='Non-transpose, no bias'))

    # NumPy implementation
    tol = 8 * val_without_bias * np.finfo(np.float32).eps
    callbacks.append(
        lbann.CallbackCheckMetric(metric=metrics[-1].name,
                                  lower_bound=val_without_bias - tol,
                                  upper_bound=val_without_bias + tol,
                                  error_on_failure=True,
                                  execution_modes='test'))

    # ------------------------------------------
    # Gradient checking
    # ------------------------------------------

    callbacks.append(lbann.CallbackCheckGradients(error_on_failure=True))

    # ------------------------------------------
    # Construct model
    # ------------------------------------------

    num_epochs = 1
    return lbann.Model(num_epochs,
                       layers=lbann.traverse_layer_graph(x_lbann),
                       objective_function=obj,
                       metrics=metrics,
                       callbacks=callbacks)