示例#1
0
    def add_ops(self, net):
        """
            Both the predict net and the eval net will call this function.

            For bootstrapping approach, the goal is to pass the cur_layer feature
            inputs through all the bootstrapped FCs that are stored under
            self.bootstrapped_FCs. Return the preds in the same output_schema
            with dummy indices (because they are not needed).
        """

        version_info = get_current_scope().get(
            get_fc_predictor_version.__name__, {"fc_version": "fp32"})
        predictor_fc_fp_version = version_info["fc_version"]

        for i in range(self.num_bootstrap):
            # these are dummy indices, not to be used anywhere
            indices = self._generate_bootstrapped_indices(
                net=net,
                copied_cur_layer=self.input_record.field_blobs()[0],
                iteration=i,
            )

            params = self.bootstrapped_FCs[i * 2:(i * 2) + 2]

            self._add_ops(
                net=net,
                features=self.input_record,
                params=params,
                iteration=i,
                version=predictor_fc_fp_version,
            )
示例#2
0
    def _get_default_init_op(self):
        scale = math.sqrt(1.0 / self.input_dim)

        cur_scope = get_current_scope()
        trainer_version = get_sparse_lookup_trainer_version(**cur_scope.get(
            get_sparse_lookup_trainer_version.__name__, {'version': 'fp32'}))

        if trainer_version == 'fp32':
            default_weight_init = ('UniformFill', {
                'min': -scale,
                'max': scale
            })
        elif trainer_version == 'fp16':
            default_weight_init = ("Float16UniformFill", {
                'min': -scale,
                'max': scale
            })
        else:
            raise NotImplementedError(
                "Train version {} is not currently supported".format(
                    trainer_version))

        self.trainer_version = trainer_version

        return default_weight_init
    def add_ops(self, net):
        version_info = get_current_scope().get(
            get_sparse_lookup_predictor_version.__name__, {'version': 'fp32'}
        )
        lookup_table_blob_size = self.shape[0] * self.shape[1]
        version = get_sparse_lookup_predictor_version(
            version_info['version'],
            blob_size=lookup_table_blob_size,
            min_blob_size_4bits=(
                version_info['min_blob_size_4bits']
                if 'min_blob_size_4bits' in version_info
                else None
            ),
            embedding_dim=self.shape[1],
            sparse_feature_name=self.sparse_key,
        )

        # TODO(amalevich): Layer should not be responsible for decision about
        # quantization.
        if not self.support_8bit() and version in {'uint8rowwise',
                                                   'fused_uint8rowwise',
                                                   'fused_uint4rowwise'}:
            version = 'fp16'

        self._add_ops(net, version, is_train=False)
示例#4
0
 def add_ops(self, net):
     """Both the predict net and the eval net will call this function
     """
     version_info = get_current_scope().get(
         get_fc_predictor_version.__name__, {'fc_version': 'fp32'})
     predictor_fc_fp_version = version_info['fc_version']
     self._add_ops(net, self.param_blobs, predictor_fc_fp_version)
示例#5
0
    def add_ops(self, net):
        cur_scope = get_current_scope()
        version = get_sparse_lookup_predictor_version(**cur_scope.get(
            get_sparse_lookup_predictor_version.__name__, {'version': 'fp32'}))

        if _is_id_list(self.input_record):
            self._add_ops_id_list(net, version=version)
        elif _is_id_score_list(self.input_record):
            self._add_ops_id_score_list(net, version=version)
        else:
            raise "Unsupported input type {0}".format(self.input_record)
示例#6
0
    def add_ops(self, net):
        cur_scope = get_current_scope()
        version = get_sparse_lookup_predictor_version(
            **cur_scope.get(get_sparse_lookup_predictor_version.__name__,
                            {'version': 'fp32'}))

        if _is_id_list(self.input_record):
            self._add_ops_id_list(net, version=version)
        elif _is_id_score_list(self.input_record):
            self._add_ops_id_score_list(net, version=version)
        else:
            raise "Unsupported input type {0}".format(self.input_record)
示例#7
0
    def add_ops(self, net):
        cur_scope = get_current_scope()
        version = get_sparse_lookup_predictor_version(**cur_scope.get(
            get_sparse_lookup_predictor_version.__name__, {'version': 'fp32'}))

        # TODO(amalevich): Layer should not be responsible for decision about
        # quantization.
        if not self.support_8bit() and version in {
                'uint8rowwise', 'fused_uint8rowwise'
        }:
            version = 'fp32'

        self._add_ops(net, version)
示例#8
0
    def add_ops(self, net):
        cur_scope = get_current_scope()
        version = get_sparse_lookup_predictor_version(**cur_scope.get(
            get_sparse_lookup_predictor_version.__name__, {'version': 'fp32'}))

        if schema.equal_schemas(self.input_record, IdList):
            self._add_ops_id_list(net, version=version)
        elif schema.equal_schemas(self.input_record,
                                  IdScoreList,
                                  check_field_types=False):
            self._add_ops_id_score_list(net, version=version)
        else:
            raise "Unsupported input type {0}".format(self.input_record)
示例#9
0
    def add_ops(self, net):
        cur_scope = get_current_scope()
        version = get_sparse_lookup_predictor_version(
            **cur_scope.get(get_sparse_lookup_predictor_version.__name__,
                            {'version': 'fp32'}))

        if schema.equal_schemas(self.input_record, IdList):
            self._add_ops_id_list(net, version=version)
        elif schema.equal_schemas(self.input_record,
                                  IdScoreList,
                                  check_field_types=False):
            self._add_ops_id_score_list(net, version=version)
        else:
            raise "Unsupported input type {0}".format(self.input_record)
示例#10
0
    def add_ops(self, net):
        cur_scope = get_current_scope()
        version = get_sparse_lookup_predictor_version(**cur_scope.get(
            get_sparse_lookup_predictor_version.__name__, {'version': 'fp32'}))

        # TODO(amalevich): Layer should not be responsible for decision about
        # quantization.
        if not self.support_8bit() and version in {
                'uint8rowwise', 'fused_uint8rowwise'
        }:
            version = 'fp32'

        if _is_id_list(self.input_record):
            self._add_ops_id_list(net, version=version)
        elif _is_id_score_list(self.input_record):
            self._add_ops_id_score_list(net, version=version)
        else:
            raise "Unsupported input type {0}".format(self.input_record)
示例#11
0
    def add_ops(self, net):
        cur_scope = get_current_scope()
        version = get_sparse_lookup_predictor_version(
            **cur_scope.get(get_sparse_lookup_predictor_version.__name__,
                            {'version': 'fp32'}))

        # TODO(amalevich): Layer should not be responsible for decision about
        # quantization.
        if not self.support_8bit() and version in {'uint8rowwise',
                                                   'fused_uint8rowwise'}:
            version = 'fp32'

        if _is_id_list(self.input_record):
            self._add_ops_id_list(net, version=version)
        elif _is_id_score_list(self.input_record):
            self._add_ops_id_score_list(net, version=version)
        else:
            raise "Unsupported input type {0}".format(self.input_record)
示例#12
0
    def __init__(self,
                 model,
                 input_record,
                 inner_shape,
                 reducer,
                 weight_init=None,
                 weight_optim=None,
                 name='sparse_lookup',
                 regularizer=None,
                 **kwargs):

        super(SparseLookup, self).__init__(model, name, input_record, **kwargs)

        # TODO Add some asserts about input type
        if isinstance(inner_shape, int):
            inner_shape = [inner_shape]
        assert isinstance(inner_shape, list) or isinstance(inner_shape, tuple),\
            "Unexpected type for inner_shape, expected list or tuple, got {0}".\
            format(type(inner_shape))

        if reducer == "PositionWeighted":
            assert _is_id_score_list(self.input_record), (
                "PositionWeighted only support IdScoreList, but got {} " +
                "please use PositionWeighted layer to convert IdList " +
                "to IdScoreList").format(repr(self.input_record))
            self.external_weights = input_record.values()

        elif reducer == "RecencyWeighted":
            assert _is_id_score_list(self.input_record), (
                "RecencyWeighted only supports IdScoreList.")
            self.external_weights = input_record.values()
        self.reducer = reducer

        input_dim = get_categorical_limit(input_record)
        assert input_dim > 0, (
            "{} should have categorical limit > 0, but got {}".format(
                get_key(input_record)(), input_dim))

        self.input_dim = input_dim
        self.shape = [input_dim] + inner_shape

        cur_scope = get_current_scope()
        trainer_version = get_sparse_lookup_trainer_version(**cur_scope.get(
            get_sparse_lookup_trainer_version.__name__, {'version': 'fp32'}))

        self.trainer_version = trainer_version

        default_init_op = self._get_default_init_op()

        self.weight_init = weight_init or default_init_op

        if self.trainer_version == "fp16":
            assert self.weight_init[0] in self._fp16_compatible_init_op_types,\
                "Fp16 training is enabled. Init op for weight parameter must be fp16"\
                "compatibale. Got {}. Supported ops: {}".format(
                    self.weight_init[0],
                    self._fp16_compatible_init_op_types)

        if _is_id_list(self.input_record):
            sparse_key = self.input_record.items()
        elif _is_id_score_list(self.input_record):
            sparse_key = self.input_record.keys()
        else:
            raise NotImplementedError()

        if self.input_record.lengths.metadata:
            avg_length = self.input_record.lengths.metadata.expected_value
        else:
            avg_length = None

        self.w = self.create_param(param_name='w',
                                   shape=self.shape,
                                   initializer=self.weight_init,
                                   optimizer=weight_optim,
                                   ps_param=LayerPsParam(
                                       sparse_key=sparse_key,
                                       average_length=avg_length),
                                   regularizer=regularizer)

        self.scale_bias_init = ('ConstantFill', {'value': 0.0})

        self.scale_bias = self.create_param(
            param_name='scale_bias',
            shape=[],
            initializer=self.scale_bias_init,
            optimizer=model.NoOptim,
        )

        self.output_schema = schema.Scalar(
            (np.float32, inner_shape),
            self.get_next_blob_reference('output'),
        )