Пример #1
0
def temporary_init_weights(data,
                           name,
                           train=True,
                           fp16_on=False,
                           load_global_first=True):
    local_func = bF.addInitializedInputTensor if train else bF.constant
    global_initializer = bF.get_global_initializer()
    local_name = bF.get_builder().getNameScope(name)
    if global_initializer.get(local_name,
                              None) is not None and load_global_first:
        # overwrite data
        if local_name not in bF.get_exclude_weights():
            new_data = bF.get_global_initializer()[local_name]
            assert np.all(
                np.asarray(new_data.shape) == np.asarray(
                    data.shape)), '{}: {} vs {}'.format(
                        name, new_data.shape, data.shape)
            data = new_data
    else:
        assert not bF.load_strict(
        ), 'The global load_strict has been set, but the missing weight is detected: {}'.format(
            name)
    data = data.astype(np.float16 if fp16_on else np.float32)
    weights = local_func(data, name)
    if train:
        GLOBAL_V['all_trainable_weights'].append(weights)
    GLOBAL_V['all_weights'].append(weights)
    return weights
Пример #2
0
def nms(input_scores,
        input_boxes,
        threshold=0.7,
        numDetections=300,
        score_threshold=None,
        debugContext=''):
    load_lib()
    input_scores = input_scores.cast('FLOAT')
    input_boxes = input_boxes.cast('FLOAT')
    valid_area_mask = bF.transpose(get_valid_area_mask(input_boxes),
                                   [1, 0])  # 1,n
    input_scores = input_scores + 1e-6  # if score==0, proposals will be ignored
    local_input_scores = bF.identity(input_scores * valid_area_mask,
                                     debugContext=debugContext).detach()
    local_input_boxes = bF.identity(input_boxes,
                                    debugContext=debugContext).detach()

    if local_input_scores.shape.ndims == 1:
        local_input_scores = local_input_scores.unsqueeze(0)
    if local_input_boxes.shape.ndims == 2:
        local_input_boxes = local_input_boxes.unsqueeze(0)
    assert local_input_boxes.pureShape[0] == 1, 'only implemented batch=1'
    if score_threshold is not None:
        assert isinstance(score_threshold, float)
        local_mask = bF.greater(
            local_input_scores,
            bF.to_tensor(score_threshold, dtype=local_input_scores.dtype))
        local_mask = bF.cast(local_mask, target_type=local_input_scores.dtype)
        local_input_scores = local_input_scores * local_mask
    with bF.name_scope("nms"):
        out = bF.get_builder().customOp(opName="nms",
                                        opVersion=1,
                                        domain="ai.graphcore",
                                        inputs=[
                                            local_input_scores.getIpuIndex(),
                                            local_input_boxes.getIpuIndex()
                                        ],
                                        attributes={
                                            "threshold": threshold,
                                            "numDetections": numDetections
                                        },
                                        numOutputs=3,
                                        name="nmsCustomOp")
        #
        _, output_boxes, output_keep = out[0], bF.TTensor(out[1]), bF.TTensor(
            out[2])
        targetType = input_scores.dtype
        roiKeeps_flag = bF.cast(bF.greater(
            output_keep, bF.constant(np.asarray(-1, dtype=np.int32))),
            target_type='INT32')
        num_valids = bF.reduceSum(roiKeeps_flag, axes=[1])
        roiKeeps_flag = bF.cast(roiKeeps_flag, target_type=targetType)
        roiKeeps_flag = bF.unsqueeze(roiKeeps_flag, [-1])
        output_boxes = bF.mul([output_boxes, roiKeeps_flag])
    return output_boxes, output_keep, num_valids
Пример #3
0
def roi_align(bottom_data,
              bottom_rois,
              spatial_scale=1 / 16.0,
              num_rois=300,
              aligned_height=7,
              aligned_width=7,
              fp16_on=None):
    """roi_align implements."""

    load_lib()
    assert isinstance(aligned_height, int) and isinstance(
        aligned_width,
        int), 'they should be int or IndexError: map::at will raised'
    cast_flag, bottom_data, fp16_on = bF.deduce_half(bottom_data, fp16_on)
    if fp16_on:
        bottom_rois = bottom_rois.cast('FLOAT16')
    else:
        bottom_rois = bottom_rois.cast('FLOAT')

    if fp16_on:
        raise NotImplementedError('maybe not implemented')

    # same as detectron2 roi_align version2(aligned=True and sampling_ratio=1)
    batch_size, channels, height, width = bottom_data.pureShape
    with bF.name_scope("roiAlign"):
        out = bF.get_builder().customOp(
            opName="roiAlign",
            opVersion=1,
            domain="ai.graphcore",
            inputs=[bottom_data.getIpuIndex(),
                    bottom_rois.getIpuIndex()],
            attributes={
                "spatial_scale": spatial_scale,
                "batch_size": batch_size,
                "num_rois": num_rois,
                "height": height,
                "width": width,
                "channels": channels,
                "aligned_height": aligned_height,
                "aligned_width": aligned_width
            },
            numOutputs=1)
    result = bF.TTensor(out[0])

    if cast_flag:
        result = result.cast(cast_flag)

    return result
Пример #4
0
 def __init__(self, pattern):
     self.builder = bF.get_builder()
     self.pattern = pattern
     self.stack = ExitStack()
Пример #5
0
 def init_proto(self, ):
     self.proto = bF.get_builder().getModelProto()