Exemplo n.º 1
0
    def __init__(self, net: core.Net, clip_anomalies: bool) -> None:
        self.clip_anomalies = clip_anomalies

        self._net = net
        self.ONE = self._net.NextBlob('ONE')
        self.ZERO = self._net.NextBlob('ZERO')
        self.MISSING = self._net.NextBlob('MISSING_VALUE')
        self.MISSING_U = self._net.NextBlob('MISSING_VALUE_U')
        self.MISSING_L = self._net.NextBlob('MISSING_VALUE_L')
        workspace.FeedBlob(self.ONE, np.array([1], dtype=np.float32))
        workspace.FeedBlob(self.ZERO, np.array([0], dtype=np.float32))
        workspace.FeedBlob(
            self.MISSING, np.array([MISSING_VALUE], dtype=np.float32)
        )
        workspace.FeedBlob(
            self.MISSING_U, np.array([MISSING_VALUE + 1e-4], dtype=np.float32)
        )
        workspace.FeedBlob(
            self.MISSING_L, np.array([MISSING_VALUE - 1e-4], dtype=np.float32)
        )
        self.MISSING_SCALAR = net.NextBlob('MISSING_SCALAR')
        workspace.FeedBlob(
            self.MISSING_SCALAR, np.array([MISSING_VALUE], dtype=np.float32)
        )
        net.GivenTensorFill(
            [], [self.MISSING_SCALAR], shape=[], values=[MISSING_VALUE]
        )
        self.parameters = [
            self.ZERO,
            self.ONE,
            self.MISSING,
            self.MISSING_L,
            self.MISSING_U,
            self.MISSING_SCALAR,
        ]
Exemplo n.º 2
0
    def gen_additional_blobs(
        predict_net: core.Net,
        probability_out,
        model_out: torch.Tensor,
        output_name: str,
        label_names: List[str],
    ) -> List[core.BlobReference]:
        """
        Utility method to generate additional blobs for human readable result for
        models that use explicit labels.
        """
        res = []
        tmp_out_score = predict_net.Log(probability_out)
        label_scores = predict_net.Split(tmp_out_score,
                                         label_names,
                                         axis=model_out.dim() - 1)

        # Make sure label_scores is iterable
        if not isinstance(label_scores, tuple):
            label_scores = (label_scores, )
        for name, label_score in zip(label_names, label_scores):
            res.append(
                predict_net.Copy(label_score,
                                 "{}:{}".format(output_name, name)))
        return res
Exemplo n.º 3
0
 def test_net_multi_use(self):
     with Task() as task:
         total = ops.Const(0)
         net = Net('my_net')
         net.Add([total, net.Const(1)], [total])
         ops.net(net)
         ops.net(net)
         result = final_output(total)
     with LocalSession() as session:
         session.run(task)
         self.assertEquals(2, result.fetch())
Exemplo n.º 4
0
def debug_net(net):
    """
    Given a Net, produce another net that logs info about the operator call
    before each operator execution. Use for debugging purposes.
    """
    assert isinstance(net, Net)
    debug_net = Net(str(net))
    assert isinstance(net, Net)
    for op in net.Proto().op:
        text = Text()
        print_op(op, text)
        debug_net.LogInfo(str(text))
        debug_net.Proto().op.extend([op])
    return debug_net
Exemplo n.º 5
0
def prepare_normalization(
    norm_net: core.Net,
    normalization_params: Dict[str, NormalizationParameters],
    features: List[str], blobname_template: str, clip_anomalies: bool
) -> Dict[int, str]:
    """
    Sets up operators for normalization net and returns a mapping from feature
    index to input blob name for the net.

    Note that the Caffe2 BatchBoxCox operator isn't implemented on CUDA GPU so
    we need to use a CPU context.

    :param norm_net: Caffe2 net for normalization.
    :param normalization_params: Mapping from feature names to
        NormalizationParameters.
    :param features: Array of feature names.
    :param blobname_template: String template for input blobs to norm_net.
    """
    norm_blob_map = {}
    with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU)):
        preprocessor = PreprocessorNet(norm_net, clip_anomalies)
        for idx, feature in enumerate(features):
            input_blob = blobname_template.format(idx)
            reshaped_input_blob = input_blob + '_reshaped'
            original_shape = input_blob + '_original_shape'

            workspace.FeedBlob(input_blob, np.zeros(1, dtype=np.float32))
            norm_net.Reshape(
                [input_blob], [reshaped_input_blob, original_shape],
                shape=[-1, 1]
            )
            normalization_param = normalization_params[feature]
            normalized_input_blob, _ = preprocessor.preprocess_blob(
                reshaped_input_blob, normalization_param
            )
            norm_net.ReplaceNaN(normalized_input_blob, normalized_input_blob)
            if normalization_param.feature_type != identify_types.ENUM:
                norm_net.Reshape(
                    [normalized_input_blob],
                    [normalized_input_blob, original_shape],
                    shape=[1, -1]
                )
            norm_blob_map[idx] = normalized_input_blob
        workspace.CreateNet(norm_net)
    return norm_blob_map
Exemplo n.º 6
0
    def export_to_caffe2(
        self,
        workspace: core.workspace,
        init_net: core.Net,
        predict_net: core.Net,
        model_out: torch.Tensor,
        output_name: str,
    ) -> List[core.BlobReference]:
        """
        Exports the doc classification layer to Caffe2.
        See `OutputLayerBase.export_to_caffe2()` for details.
        """
        if isinstance(self.loss_fn, BinaryCrossEntropyLoss):
            probability_out = predict_net.Sigmoid(output_name)
        else:
            probability_out = predict_net.Softmax(output_name, axis=model_out.dim() - 1)

        return OutputLayerUtils.gen_additional_blobs(
            predict_net, probability_out, model_out, output_name, self.target_names
        )
 def export_to_caffe2(
     self,
     workspace: core.workspace,
     init_net: core.Net,
     predict_net: core.Net,
     model_out: torch.Tensor,
     output_name: str,
 ) -> List[core.BlobReference]:
     """See `OutputLayerBase.export_to_caffe2()`."""
     probability_out = predict_net.Softmax(output_name, axis=model_out.dim() - 1)
     return OutputLayerUtils.gen_additional_blobs(
         predict_net, probability_out, model_out, output_name, self.target_names
     )
Exemplo n.º 8
0
 def export_to_caffe2(
     self,
     workspace: core.workspace,
     init_net: core.Net,
     predict_net: core.Net,
     model_out: torch.Tensor,
     output_name: str,
 ) -> List[core.BlobReference]:
     prob_out = predict_net.Softmax(output_name, axis=model_out.dim() - 1)
     # prepend an underscore to target_names to avoid conflicts between
     # existing cell names and target names
     edited_target_names = [f"_{name}" for name in self.target_names]
     return OutputLayerUtils.gen_additional_blobs(predict_net, prob_out,
                                                  model_out, output_name,
                                                  edited_target_names)
Exemplo n.º 9
0
def _prepare_gradient_while_ops(
        fwd_op, input_names, output_names, loop_grad_net, workspace_blob,
        init_grad_map, loop_grad_map):
    gradient_while_def = caffe2_pb2.OperatorDef()
    gradient_while_def.CopyFrom(fwd_op)
    if gradient_while_def.name:
        gradient_while_def.name += "_grad"

    loop_net_arg = caffe2_pb2.Argument()
    loop_net_arg.name = "loop_net"
    loop_net_arg.n.CopyFrom(loop_grad_net)

    cond_net_arg = caffe2_pb2.Argument()
    cond_net_arg.name = "cond_net"
    from caffe2.python.core import Net, BlobReference
    # Construct condition net - check that there're still forward workspaces
    # left using HasScope op
    cond_net = Net('gradient_loop_cond_net')
    cond_init_net = Net('gradient_loop_cond_net_init')
    cond_blob = cond_net.NextScopedBlob(cond_net.Name() + '/cond')
    cond_init_net.HasScope(workspace_blob, cond_blob)
    cond_net.HasScope(workspace_blob, cond_blob)
    for blob, init_grad_blob in init_grad_map.items():
        blob_name = str(blob)
        init_grad_blob_name = str(init_grad_blob)
        if blob_name in loop_grad_map and \
                loop_grad_map[blob_name] != init_grad_blob_name:
            cond_net.Copy(
                BlobReference(loop_grad_map[blob_name]), init_grad_blob)
            cond_init_net.Copy(
                init_grad_blob, BlobReference(loop_grad_map[blob_name]))
    cond_net_arg.n.CopyFrom(cond_net.Proto())

    del gradient_while_def.arg[:]
    gradient_while_def.arg.extend([loop_net_arg, cond_net_arg])

    del gradient_while_def.control_input[:]
    del gradient_while_def.input[:]
    gradient_while_def.input.extend(
        [str(cond_blob).encode('utf-8')] + list(input_names))
    del gradient_while_def.output[:]
    gradient_while_def.output.extend(output_names)
    gradient_while_def.is_gradient_op = True
    return [o for o in cond_init_net.Proto().op] + [gradient_while_def]
Exemplo n.º 10
0
def PyTorchModule(helper,
                  model,
                  sample_arguments,
                  caffe2_inputs,
                  prefix_name=None):
    """
    Embed an ONNX-exportable PyTorch Model into a Caffe2 model being built.

    Arguments:
        helper (caffe2.python.core.ModelHelder): the model helper where
            this imported network should be inserted
        model (torch.nn.Module): the model to be exported
        sample_arguments (tuple of arguments): the inputs to
            the model, e.g., such that ``model(*args)`` is a valid
            invocation of the model.  Any non-Variable arguments will
            be hard-coded into the exported model; any Variable arguments
            will become inputs of the exported model, in the order they
            occur in args.  If args is a Variable, this is equivalent
            to having called it with a 1-ary tuple of that Variable.
            (Note: passing keyword arguments to the model is not currently
            supported.  Give us a shout if you need it.)
        caffe2_inputs (list of str or caffe2.python.core.BlobReference): the
           caffe2 Blobs that should be inputs to this network. Must be
           the same length as sample_arguments
        prefix_name: prefix name to add to each member of the blob, if None then
           a fresh prefix pytorch_input_N/ is used
    Returns:
        A tuple of caffe2.python.core.BlobReference objects referring to the
        models outputs, or a single BlobReference when the model returns a single
        value.
    """
    if prefix_name is None:
        global _next_idx
        prefix_name = 'pytorch_import_' + str(_next_idx) + '/'
        _next_idx += 1

    # TODO: handle the case where model cannot be exported
    # and embed as a Python op in Caffe2
    f = io.BytesIO()
    torch.onnx.export(model, sample_arguments, f, export_params=True)
    onnx_model = onnx.load(io.BytesIO(f.getvalue()))
    init_net, predict_net = Caffe2Backend.onnx_graph_to_caffe2_net(onnx_model)

    initialized = set([x.name for x in onnx_model.graph.initializer])
    uninitialized_inputs = {
        x.name: i
        for i, x in enumerate(onnx_model.graph.input)
        if x.name not in initialized
    }

    if (len(uninitialized_inputs) != len(caffe2_inputs)):
        raise ValueError('Expected {} inputs but found {}'.format(
            len(uninitialized_inputs), len(caffe2_inputs)))

    def remap_blob_name(name):
        if name in uninitialized_inputs:
            idx = uninitialized_inputs[name]
            return str(caffe2_inputs[idx])
        return prefix_name + name

    predict_net = Net(predict_net).Clone('anon', _FakeDict(remap_blob_name))
    helper.net.AppendNet(predict_net)

    init_net = Net(init_net).Clone('anon', _FakeDict(remap_blob_name))
    helper.param_init_net.AppendNet(init_net)

    results = tuple([
        BlobReference(remap_blob_name(x.name), helper.net)
        for x in onnx_model.graph.output
    ])
    return results