Пример #1
0
def main():
    args = get_args()

    # Setup
    from nnabla.ext_utils import get_extension_context
    if args.context is None:
        extension_module = "cudnn"  # TODO: Hard coded!!!
    else:
        extension_module = args.context
    ctx = get_extension_context(extension_module,
                                device_id=args.device_id,
                                type_config=args.type_config)
    nn.set_default_context(ctx)

    # Load parameters
    channel_last, channels = load_parameters_and_config(
        args.weights, args.type_config)
    logger.info('Parameter configuration is deduced as:')
    logger.info(f'* channel_last={channel_last}')
    logger.info(f'* channels={channels}')

    # Read image
    image = read_image_with_preprocess(args.input_image,
                                       args.norm_config,
                                       channel_last=channel_last,
                                       channels=channels,
                                       spatial_size=args.spatial_size)
    img = nn.NdArray.from_numpy_array(image)

    # Perform inference
    from models import build_network
    num_classes = args.num_classes
    pred, _ = build_network(img,
                            num_classes,
                            args.arch,
                            test=True,
                            channel_last=channel_last)
    prob = F.softmax(pred)
    top5_index = F.sort(prob, reverse=True, only_index=True)[:, :5]

    # Get and print result
    labels = read_labels(args.labels)
    logger.info(f'Top-5 prediction:')
    for i in top5_index.data[0]:
        logger.info(
            f'* {int(i)} {labels[int(i)]}: {prob.data[0, int(i)] * 100:.2f}')
Пример #2
0
def infer():
    """
    Main script.
    """

    # get args.
    args = get_args()

    # Get context.
    from nnabla.ext_utils import get_extension_context
    extension_module = args.context
    if args.context is None:
        extension_module = 'cpu'
    logger.info("Running in %s" % extension_module)
    ctx = get_extension_context(extension_module,
                                device_id=args.device_id,
                                type_config=args.type_config)
    nn.set_default_context(ctx)
    nn.clear_parameters()  # To infer.

    # Get data from args.
    im = imread(args.input_file, num_channels=3)
    vdata = resize_and_crop_center(im)

    # Get a model.
    num_classes = 1000  # The number of class.
    v_model = get_model(args, num_classes)
    v_model.pred.persistent = True  # Not clearing buffer of pred in forward

    # Get parameters from parameter file.
    nn.load_parameters(args.weight_file)

    # Perfome inference.
    v_model.image.d = vdata
    v_model.image.data.cast(np.uint8, ctx)
    v_model.pred.forward(clear_buffer=True)
    values, labels = F.sort(-v_model.pred.data, with_index=True)
    ratios = F.softmax(-values)
    print_result(labels.data, ratios.data)
Пример #3
0
def forward_pass(ray_directions,
                 ray_origins,
                 near_plane,
                 far_plane,
                 app_emb,
                 trans_emb,
                 encode_position_function,
                 encode_direction_function,
                 config,
                 use_transient,
                 hwf=None,
                 image=None):

    if encode_direction_function is not None:
        view_directions = ray_directions
        view_directions = view_directions / \
            F.norm(view_directions, p=2, axis=-1, keepdims=True)
    else:
        view_directions = None

    # For Forward facing dataset like LLFF, scene discovery is done in NDC system
    if config.train.use_ndc:
        ray_origins, ray_directions = ndc_rays(hwf[0], hwf[1], hwf[2], 1,
                                               ray_origins, ray_directions)

    if isinstance(ray_directions, nn.Variable):
        randomize = True
    else:  # Inference
        randomize = False

    sample_points, depth_values = compute_sample_points_from_rays(
        ray_origins,
        ray_directions,
        near_plane,
        far_plane,
        config.train.num_samples_course,
        randomize=randomize)

    radiance_field = get_radiance_field(sample_points,
                                        view_directions,
                                        app_emb,
                                        trans_emb,
                                        encode_position_function,
                                        encode_direction_function,
                                        config.train.chunksize_course,
                                        'nerf_coarse',
                                        use_transient=use_transient)

    if use_transient:
        rgb_map_course, weights_course = volume_rendering_transient(
            radiance_field,
            ray_origins,
            depth_values,
            return_weights=True,
            white_bkgd=config.train.white_bkgd,
            raw_noise_std=config.train.raw_noise_std)

    else:
        (rgb_map_course, depth_map_course, acc_map_course, disp_map_course, weights_course) = \
            volumetric_rendering(radiance_field, ray_origins, depth_values,
                                 return_weights=True, white_bkgd=config.train.white_bkgd, raw_noise_std=config.train.raw_noise_std)

    # Get fine depth values
    num_additional_points = config.train.num_samples_fine - \
        config.train.num_samples_course
    if randomize is False:
        depth_values = F.broadcast(
            depth_values, (ray_origins.shape[0], depth_values.shape[-1]))

    depth_values_mid = 0.5 * (depth_values[..., 1:] + depth_values[..., :-1])
    depth_samples = sample_pdf(depth_values_mid,
                               weights_course[..., 1:-1],
                               num_additional_points,
                               det=not randomize)

    if isinstance(depth_samples, nn.Variable):
        depth_samples = depth_samples.get_unlinked_variable(need_grad=False)
    elif isinstance(depth_samples, nn.NdArray):
        pass
    elif isinstance(depth_samples, np.ndarray):
        if isinstance(radiance_field, nn.Variable):
            depth_samples = nn.Variable.from_numpy_array(depth_samples)
        else:
            depth_samples = nn.NdArray.from_numpy_array(depth_samples)
    else:
        raise NotImplementedError

    depth_values = F.sort(F.concatenate(depth_values,
                                        depth_samples,
                                        axis=depth_samples.ndim - 1),
                          axis=depth_values.ndim - 1)

    sample_points = ray_origins[..., None, :] + \
        ray_directions[..., None, :]*depth_values[..., :, None]
    radiance_field = get_radiance_field(sample_points,
                                        view_directions,
                                        app_emb,
                                        trans_emb,
                                        encode_position_function,
                                        encode_direction_function,
                                        config.train.chunksize_fine,
                                        'nerf_fine',
                                        use_transient=use_transient)

    if use_transient:
        rgb_map_fine, weights_fine, static_rgb_map_fine, transient_rgb_map_fine, beta = \
            volume_rendering_transient(radiance_field, ray_origins, depth_values,
                                       return_weights=False, white_bkgd=config.train.white_bkgd, raw_noise_std=config.train.raw_noise_std)
    else:
        rgb_map_fine, depth_map_fine, acc_map_fine, disp_map_fine, weights_fine = \
            volumetric_rendering(radiance_field, ray_origins, depth_values,
                                 return_weights=True, white_bkgd=config.train.white_bkgd, raw_noise_std=config.train.raw_noise_std)

    if use_transient:
        static_sigma = radiance_field[..., 3]
        transient_sigma = radiance_field[..., 7]

        return rgb_map_course, rgb_map_fine, static_rgb_map_fine, transient_rgb_map_fine, beta, static_sigma, transient_sigma

    else:
        return rgb_map_course, depth_map_course, disp_map_course, acc_map_course, rgb_map_fine, depth_map_fine, disp_map_fine, acc_map_fine
Пример #4
0
def test_equal_values(ctx, fname, reverse):
    with nn.context_scope(ctx), nn.auto_forward(True):
        x = nn.Variable.from_numpy_array([2, 3, 3, 4, 2])
        y, i = F.sort(x, reverse=reverse, with_index=True)
        assert all(y.d == ([4, 3, 3, 2, 2] if reverse else [2, 2, 3, 3, 4]))
        assert all(i.d == ([3, 1, 2, 0, 4] if reverse else [0, 4, 1, 2, 3]))
Пример #5
0
def pack_padded_sequence(padded_sequence,
                         lengths,
                         batch_first=False,
                         enforce_sorted=True):
    r"""Pack a padded variable-length sequences.

    This method packs a padded variable-length sequences.

    :math:`T` is the max length over the lengths of sequences.
    :math:`B` is the batch size equal to the length of the sequences.     
    :math:`*` is the remaining dimensions including none.

    .. note::
      This function **must** be used the dynamic computation mode.


    Example:

    .. code-block:: python

      import numpy as np
      import nnabla as nn
      import nnabla.functions as F
      import nnabla.utils.rnn as rnn_utils

      nn.set_auto_forward(True)

      l2v = lambda ldata: nn.Variable.from_numpy_array(np.asarray(ldata))
      a = l2v([1, 1, 1, 1])
      b = l2v([2, 2, 2])
      c = l2v([2, 2, 2])
      d = l2v([3, 3])
      e = l2v([3, 3])
      sequences = [a, b, c, d, e]
      lengths = l2v([seq.shape[0] for seq in sequences])

      padded_sequence = rnn_utils.pad_sequence(sequences)
      print(padded_sequence.d)

      packed_sequence = rnn_utils.pack_padded_sequence(padded_sequence, lengths)
      print(packed_sequence.data.d)
      print(packed_sequence.batch_sizes.d)

    Args: 
      padded_sequence (:obj:`nnabla.Variable`): Padded sequence of (:math:`T \times B \times *`)
                                                or (:math:`B \times T \times *`) shape.
      lengths (:obj:`nnabla.Variable`): Sequence length for each batch and always resides in CPU.
      batch_first (bool): `padded_sequence` is of (:math:`T`, :math:`B`, :math:`*`) shape if False,
                          otherwise (:math:`B`, :math:`T`, :math:`*`).
      enforce_sorted (bool): Sequences are sorted by the length in a decreasing order if True. Default is True.

    Returns: 
        :obj:`PackedSequence`
    """
    if enforce_sorted:
        sorted_indices = None
        unsorted_indices = None
    else:
        # TODO: replace cuda context when the bug fix of the sort
        with nn.context_scope(nn.Context()):
            lengths, sorted_indices = F.sort(lengths,
                                             axis=0,
                                             reverse=True,
                                             with_index=True)

        B = sorted_indices.shape[0]
        unsorted_indices = F.scatter_nd(F.arange(0, B),
                                        sorted_indices.reshape((1, B)),
                                        shape=(B, ))
        axis = 0 if batch_first else 1
        padded_sequence = F.gather(padded_sequence, sorted_indices, axis)

    packed_sequence, batch_sizes = F.pack_padded_sequence(
        padded_sequence, lengths, batch_first)
    packed_sequence0 = PackedSequence()
    packed_sequence0.data = packed_sequence
    packed_sequence0.batch_sizes = batch_sizes
    packed_sequence0.sorted_indices = sorted_indices
    packed_sequence0.unsorted_indices = unsorted_indices

    return packed_sequence0