Пример #1
0
    def __call__(self, *inputs, **kwargs):
        if (len(inputs) > self._schema.MaxNumInput()
                or len(inputs) < self._schema.MinNumInput()):
            raise ValueError("Operator {} expects [{}, " +
                             "{}] inputs, but received {}".format(
                                 type(self).__name__, self._schema.MinNumInput(
                                 ), self._schema.MaxNumInput(), len(inputs)))

        op_instance = _OperatorInstance(inputs, self, **kwargs)
        outputs = {}
        feature_names = []
        features = []
        for i, (feature_name, feature) in enumerate(self._features.items()):
            t_name = "_TFRecordReader" + "_id_" + str(
                op_instance.id) + "_output_" + str(i)
            t = TensorReference(t_name, self._device, op_instance)
            op_instance.spec.AddOutput(t.name, t.device)
            op_instance.append_output(t)
            outputs[feature_name] = t
            feature_names.append(feature_name)
            features.append(feature)

        op_instance.spec.AddArg("feature_names", feature_names)
        op_instance.spec.AddArg("features", features)
        return outputs
Пример #2
0
    def generate_outputs(self):
        # Add outputs
        if self._op.device == "gpu" or self._op.device == "mixed":
            output_device = "gpu"
        else:
            output_device = "cpu"

        num_output = self._op.schema.CalculateOutputs(self._spec) + self._op.schema.CalculateAdditionalOutputs(self._spec)

        for i in range(num_output):
            t_name = type(self._op).__name__ + "_id_" + str(self.id) + "_output_" + str(i)
            t = TensorReference(t_name, output_device, self)
            self._spec.AddOutput(t.name, t.device)
            self.append_output(t)