def __init__(
            self,
            model,
            input_record,
            output_dims,
            sigma,  # bandwidth
            w_init=None,
            b_init=None,
            name='random_fourier_features',
            **kwargs):

        super(RandomFourierFeatures, self).__init__(model, name, input_record,
                                                    **kwargs)
        assert isinstance(input_record, schema.Scalar), "Incorrect input type"

        input_dims = input_record.field_type().shape[0]
        assert input_dims >= 1, "Expected input dimensions >= 1, got %s" \
                                % input_dims
        self.output_dims = output_dims
        assert self.output_dims >= 1, "Expected output dimensions >= 1, got %s" \
                                      % self.output_dims

        self.output_schema = schema.Scalar(
            (np.float32, (self.output_dims, )),
            model.net.NextScopedBlob(name + '_output')
        )

        assert sigma > 0.0, "Expected bandwidth > 0, got %s" % sigma

        # Initialize train_init_net parameters
        w_init = w_init if w_init else (
            'GaussianFill', {'mean': 0.0, 'std': 1.0 / sigma}
        )

        b_init = b_init if b_init else (
            'UniformFill', {'min': 0.0, 'max': 2 * np.pi}
        )

        self.w = model.net.NextScopedBlob(name + "_w")
        self.b = model.net.NextScopedBlob(name + "_b")
        self.params.append(
            LayerParameter(
                parameter=self.w,
                initializer=core.CreateOperator(w_init[0],
                                                [],
                                                self.w,
                                                shape=(self.output_dims, input_dims),
                                                **w_init[1]
                                                ),
                optimizer=model.NoOptim))
        self.params.append(
            LayerParameter(
                parameter=self.b,
                initializer=core.CreateOperator(b_init[0],
                                                [],
                                                self.b,
                                                shape=[self.output_dims],
                                                **b_init[1]
                                                ),
                optimizer=model.NoOptim))
Esempio n. 2
0
    def __init__(
        self,
        model,
        input_record,
        num_samples,
        num_elements,
        name='uniform_sampling',
        **kwargs
    ):
        super(UniformSampling, self).__init__(
            model, name, input_record, **kwargs
        )

        assert num_elements > 0
        assert isinstance(input_record, schema.Scalar)

        self.num_elements = num_elements

        self.num_samples = model.net.NextScopedBlob(name + "_num_samples")
        self.params.append(
            LayerParameter(
                parameter=self.num_samples,
                initializer=core.CreateOperator(
                    "GivenTensorInt64Fill",
                    [],
                    self.num_samples,
                    shape=(1, ),
                    values=[num_samples],
                ),
                optimizer=model.NoOptim,
            )
        )

        self.sampling_prob = model.net.NextScopedBlob(name + "_prob")
        self.params.append(
            LayerParameter(
                parameter=self.sampling_prob,
                initializer=core.CreateOperator(
                    "ConstantFill",
                    [],
                    self.sampling_prob,
                    shape=(num_samples, ),
                    value=float(num_samples) / num_elements,
                    dtype=core.DataType.FLOAT
                ),
                optimizer=model.NoOptim,
            )
        )

        self.output_schema = schema.Struct(
            (
                'samples', schema.Scalar(
                    np.int32, model.net.NextScopedBlob(name + "_samples")
                )
            ),
            ('sampling_prob', schema.Scalar(np.float32, self.sampling_prob)),
        )
Esempio n. 3
0
    def __init__(self,
                 model,
                 input_record,
                 output_dims,
                 weight_init=None,
                 bias_init=None,
                 weight_optim=None,
                 bias_optim=None,
                 name='fc',
                 **kwargs):
        super(FC, self).__init__(model, name, input_record, **kwargs)
        assert isinstance(input_record, schema.Scalar), "Incorrect input type"
        assert len(input_record.field_types()[0].shape) > 0, (
            "FC expects limited dimensions of the input tensor")

        input_dims = input_record.field_types()[0].shape[0]
        assert input_dims > 0, (
            "FC expects input dimensions > 0, got {}".format(input_dims))

        self.output_schema = schema.Scalar(
            (np.float32, (output_dims, )),
            model.net.NextScopedBlob(name + '_output'))

        scale = math.sqrt(1.0 / input_dims)
        weight_init = weight_init if weight_init else ('UniformFill', {
            'min': -scale,
            'max': scale
        })
        bias_init = bias_init if bias_init else ('UniformFill', {
            'min': -scale,
            'max': scale
        })

        self.w = model.net.NextScopedBlob(name + "_w")
        self.b = model.net.NextScopedBlob(name + "_b")

        self.params.append(
            LayerParameter(parameter=self.w,
                           initializer=core.CreateOperator(
                               weight_init[0], [],
                               self.w,
                               shape=[output_dims, input_dims],
                               **weight_init[1]),
                           optimizer=weight_optim))
        self.params.append(
            LayerParameter(parameter=self.b,
                           initializer=core.CreateOperator(bias_init[0], [],
                                                           self.b,
                                                           shape=[
                                                               output_dims,
                                                           ],
                                                           **bias_init[1]),
                           optimizer=bias_optim))
Esempio n. 4
0
    def __init__(self,
                 model,
                 input_record,
                 weight_optim=None,
                 name="position_weights"):
        super(PositionWeighted, self).__init__(model, name, input_record)

        # TODO: Replace this with correct estimation after we compute
        # cardinality from run_meta
        self.shape = 1000

        self.pos_w = model.net.NextScopedBlob(name + "_pos_w")
        self.params.append(
            LayerParameter(parameter=self.pos_w,
                           initializer=core.CreateOperator('ConstantFill', [],
                                                           self.pos_w,
                                                           shape=[
                                                               self.shape,
                                                           ],
                                                           value=1.0),
                           optimizer=weight_optim))

        self.output_schema = schema.Struct(
            ('position_weights',
             schema.Scalar((np.float32, self.shape),
                           model.net.NextScopedBlob(name + "_pos_w_gather"))))

        self.tags.update({Tags.HANDLE_AS_SPARSE_LAYER})
        self.tags.update({Tags.GRADIENT_FROM_PS})
Esempio n. 5
0
    def __init__(self,
                 model,
                 input_record,
                 max_index,
                 name='map_to_range',
                 **kwargs):
        super(MapToRange, self).__init__(model, name, input_record, **kwargs)

        assert max_index > 0
        assert isinstance(input_record, schema.Scalar)

        self.max_index = max_index
        self.handler = model.net.NextScopedBlob(name + "_handler")

        self.params.append(
            LayerParameter(
                parameter=self.handler,
                initializer=core.CreateOperator(
                    "LongIndexCreate",
                    [],
                    self.handler,
                    max_elements=self.max_index,
                ),
                optimizer=model.NoOptim,
            ))

        self.output_schema = schema.Scalar(
            np.int64, model.net.NextScopedBlob(name + "_indices"))
Esempio n. 6
0
    def _initialize_params(self,
                           w_blob,
                           b_blob,
                           w_init=None,
                           b_init=None,
                           w_optim=None,
                           b_optim=None):
        """
        Initializes the Layer Parameters for weight and bias terms for features

        Inputs :
            w_blob -- blob to contain w values
            b_blob -- blob to contain b values
            w_init -- initialization distribution for weight parameter
            b_init -- initialization distribution for bias parameter
            w_optim -- optimizer to use for w; if None, then will use no optimizer
            b_optim -- optimizer to user for b; if None, then will use no optimizer
        """

        w_init = w_init if w_init else ('GaussianFill', {
            'mean': 0.0,
            'std': self.stddev
        })
        w_optim = w_optim if w_optim else self.model.NoOptim

        b_init = b_init if b_init else ('UniformFill', {
            'min': -0.5 * self.stddev,
            'max': 0.5 * self.stddev
        })
        b_optim = b_optim if b_optim else self.model.NoOptim

        w_param = LayerParameter(parameter=w_blob,
                                 initializer=core.CreateOperator(
                                     w_init[0], [],
                                     w_blob,
                                     shape=(self.output_dims, self.input_dims),
                                     **w_init[1]),
                                 optimizer=w_optim)
        b_param = LayerParameter(parameter=b_blob,
                                 initializer=core.CreateOperator(
                                     b_init[0], [],
                                     b_blob,
                                     shape=[self.output_dims],
                                     **b_init[1]),
                                 optimizer=b_optim)

        return [w_param, b_param]
Esempio n. 7
0
    def __init__(self,
                 model,
                 input_record,
                 num_to_collect,
                 name='last_n_window_collector',
                 **kwargs):
        super(LastNWindowCollector, self).__init__(model, name, input_record,
                                                   **kwargs)
        assert num_to_collect > 0
        self.num_to_collect = num_to_collect
        assert isinstance(input_record, schema.Scalar), \
            "Got {!r}".format(input_record)

        self.last_n = model.net.NextScopedBlob(self.name + "_last_n")
        self.next_blob = model.net.NextScopedBlob(self.name + "_next")

        self.params.append(
            LayerParameter(
                parameter=self.last_n,
                initializer=core.CreateOperator('ConstantFill', [],
                                                self.last_n,
                                                shape=[0]),
                optimizer=model.NoOptim,
            ))
        self.params.append(
            LayerParameter(
                parameter=self.next_blob,
                initializer=core.CreateOperator(
                    'ConstantFill',
                    [],
                    self.next_blob,
                    shape=[],
                    value=0,
                    dtype=core.DataType.INT32,
                ),
                optimizer=model.NoOptim,
            ))

        self.output_schema = schema.from_blob_list(
            input_record, [model.net.NextScopedBlob(name + "_output")])
Esempio n. 8
0
    def __init__(self,
                 model,
                 input_record,
                 weight_optim=None,
                 name="position_weights"):
        super(PositionWeighted, self).__init__(model, name, input_record)

        assert isinstance(input_record, schema.List), "Incorrect input type"
        length_metadata = input_record.lengths.metadata
        max_length = (length_metadata.categorical_limit
                      if length_metadata is not None else None)
        if max_length is not None:
            self.shape = max_length
        else:
            self.shape = get_categorical_limit(input_record)
            logger.warning(
                '{}: categorical_limit of lengths is not available, using '
                'categorical_limit of the keys: {}'.format(
                    str(input_record.lengths()), self.shape))

        self.pos_w = model.net.NextScopedBlob(name + "_pos_w")
        self.params.append(
            LayerParameter(parameter=self.pos_w,
                           initializer=core.CreateOperator('ConstantFill', [],
                                                           self.pos_w,
                                                           shape=[
                                                               self.shape,
                                                           ],
                                                           value=1.0),
                           optimizer=weight_optim))

        self.output_schema = schema.Struct(
            ('position_weights',
             schema.Scalar((np.float32, self.shape),
                           model.net.NextScopedBlob(name + "_pos_w_gather"))))

        self.tags.update({Tags.HANDLE_AS_SPARSE_LAYER})
        self.tags.update({Tags.GRADIENT_FROM_PS})
Esempio n. 9
0
    def __init__(self,
                 model,
                 input_record,
                 bias_init=None,
                 bias_optim=None,
                 name='add_bias'):
        super(AddBias, self).__init__(model, name, input_record)
        assert isinstance(input_record, schema.Scalar), "Incorrect input type"
        assert len(input_record.field_type().shape) > 0, (
            "AddBias expects limited dimensions of the input tensor")

        input_dims = input_record.field_type().shape[0]
        assert input_dims > 0, (
            "AddBias expects input dimensions > 0, got {}".format(input_dims))

        self.output_schema = schema.Scalar(
            (input_record.field_type().base, (input_dims, )),
            model.net.NextScopedBlob(name + '_output'))

        scale = math.sqrt(1.0 / input_dims)
        bias_init = bias_init if bias_init else ('UniformFill', {
            'min': -scale,
            'max': scale
        })

        self.b = model.net.NextScopedBlob(name + "_b")

        self.params.append(
            LayerParameter(parameter=self.b,
                           initializer=core.CreateOperator(bias_init[0], [],
                                                           self.b,
                                                           shape=[
                                                               input_dims,
                                                           ],
                                                           **bias_init[1]),
                           optimizer=bias_optim))
Esempio n. 10
0
    def __init__(self,
                 model,
                 input_record,
                 name='batch_normalization',
                 scale_optim=None,
                 bias_optim=None,
                 momentum=0.9,
                 order='NCHW',
                 **kwargs):
        super(BatchNormalization, self).__init__(model, name, input_record,
                                                 **kwargs)

        assert isinstance(input_record, schema.Scalar), "Incorrect input type"

        self.input_shape = input_record.field_type().shape

        if len(self.input_shape) == 3:
            if order == "NCHW":
                input_dims = self.input_shape[0]
            elif order == "NHWC":
                input_dims = self.input_shape[2]
            else:
                raise ValueError("Please specify a correct order")
        else:
            assert len(self.input_shape) == 1, (
                "This layer supports only 4D or 2D tesnors")
            input_dims = self.input_shape[0]

        self.output_schema = schema.Scalar(
            (np.float32, self.input_shape),
            model.net.NextScopedBlob(name + '_output'))

        self.momentum = momentum
        self.order = order

        self.scale = model.net.NextScopedBlob(name + "_scale")
        self.bias = model.net.NextScopedBlob(name + "_bias")
        self.rm = model.net.NextScopedBlob(name + "_running_mean")
        self.riv = model.net.NextScopedBlob(name + "_running_inv_var")

        self.params.append(
            LayerParameter(parameter=self.scale,
                           initializer=core.CreateOperator(
                               'ConstantFill',
                               [],
                               self.scale,
                               shape=[input_dims],
                               value=1.0,
                           ),
                           optimizer=scale_optim))
        self.params.append(
            LayerParameter(parameter=self.bias,
                           initializer=core.CreateOperator(
                               'ConstantFill',
                               [],
                               self.bias,
                               shape=[input_dims],
                               value=0.0,
                           ),
                           optimizer=bias_optim))
        self.params.append(
            LayerParameter(parameter=self.rm,
                           initializer=core.CreateOperator(
                               'ConstantFill',
                               [],
                               self.rm,
                               shape=[input_dims],
                               value=0.0,
                           ),
                           optimizer=model.NoOptim))
        self.params.append(
            LayerParameter(parameter=self.riv,
                           initializer=core.CreateOperator(
                               'ConstantFill',
                               [],
                               self.riv,
                               shape=[input_dims],
                               vlaue=1.0,
                           ),
                           optimizer=model.NoOptim))
Esempio n. 11
0
    def __init__(self,
                 model,
                 input_record,
                 inner_shape,
                 reducer,
                 weight_init=None,
                 weight_optim=None,
                 name='sparse_lookup',
                 **kwargs):
        super(SparseLookup, self).__init__(model, name, input_record, **kwargs)

        if isinstance(inner_shape, int):
            inner_shape = [inner_shape]
        assert isinstance(inner_shape, list) or isinstance(inner_shape, tuple),\
            "Unexpected type for inner_shape, expected list or tuple, got {0}".\
            format(type(inner_shape))

        # TODO Add some asserts about input type
        assert reducer in self._supported_reducers, "Unsupported reducer: {}".\
            format(reducer)
        self.reducer = reducer

        assert input_record.items.metadata is not None,\
            "Features without metadata are not supported"
        input_dim = input_record.items.metadata.categorical_limit
        assert input_dim is not None, "Unbounded features are not supported"

        self.output_schema = schema.Scalar(
            (np.float32, inner_shape),
            model.net.NextScopedBlob(name + '_output'),
        )

        if self.request_only:
            schema.attach_metadata_to_scalars(
                self.output_schema,
                schema.Metadata(categorical_limit=None,
                                expected_value=None,
                                feature_specs=schema.FeatureSpec(
                                    feature_is_request_only=True)))
        scale = math.sqrt(1.0 / input_dim)
        self.shape = [input_dim] + inner_shape
        self.weight_init = weight_init if weight_init else ('UniformFill', {
            'min': -scale,
            'max': scale
        })

        self.w = model.net.NextScopedBlob(name + "_w")
        self.params.append(
            LayerParameter(parameter=self.w,
                           initializer=core.CreateOperator(
                               self.weight_init[0], [],
                               self.w,
                               shape=self.shape,
                               **self.weight_init[1]),
                           optimizer=weight_optim))

        if reducer == 'PositionWeighted':
            self.pos_w = model.net.NextScopedBlob(name + "_pos_w")
            self.params.append(
                LayerParameter(parameter=self.pos_w,
                               initializer=core.CreateOperator('ConstantFill',
                                                               [],
                                                               self.pos_w,
                                                               shape=[
                                                                   input_dim,
                                                               ],
                                                               value=1.0),
                               optimizer=weight_optim))
    def __init__(self,
                 model,
                 input_record,
                 inner_shape,
                 reducer,
                 weight_init=None,
                 weight_optim=None,
                 name='sparse_lookup',
                 **kwargs):

        super(SparseLookup, self).__init__(model, name, input_record, **kwargs)

        if reducer == "PositionWeighted":
            self.external_weights = input_record.values()

        if isinstance(inner_shape, int):
            inner_shape = [inner_shape]
        assert isinstance(inner_shape, list) or isinstance(inner_shape, tuple),\
            "Unexpected type for inner_shape, expected list or tuple, got {0}".\
            format(type(inner_shape))

        # TODO Add some asserts about input type
        assert reducer in self._supported_reducers, "Unsupported reducer: {}".\
            format(reducer)
        self.reducer = reducer

        input_dim = get_categorical_limit(input_record)

        assert input_dim is not None, "Unbounded features are not supported"

        self.output_schema = schema.Scalar(
            (np.float32, inner_shape),
            model.net.NextScopedBlob(name + '_output'),
        )

        scale = math.sqrt(1.0 / input_dim)
        self.shape = [input_dim] + inner_shape
        self.weight_init = weight_init if weight_init else ('UniformFill', {
            'min': -scale,
            'max': scale
        })

        self.w = model.net.NextScopedBlob(name + "_w")
        if schema.equal_schemas(self.input_record, IdList):
            sparse_key = self.input_record.items()
        elif schema.equal_schemas(self.input_record,
                                  IdScoreList,
                                  check_field_types=False):
            sparse_key = self.input_record.keys()
        else:
            raise NotImplementedError()

        if self.input_record.lengths.metadata:
            avg_length = self.input_record.lengths.metadata.expected_value
        else:
            avg_length = None
        self.params.append(
            LayerParameter(parameter=self.w,
                           initializer=core.CreateOperator(
                               self.weight_init[0], [],
                               self.w,
                               shape=self.shape,
                               **self.weight_init[1]),
                           optimizer=weight_optim,
                           ps_param=LayerPsParam(
                               sparse_key=sparse_key,
                               average_length=avg_length,
                           )))
Esempio n. 13
0
    def __init__(self,
                 model,
                 input_record,
                 num_to_collect,
                 name='reservoir_sampling',
                 **kwargs):
        super(ReservoirSampling, self).__init__(model, name, input_record,
                                                **kwargs)
        assert num_to_collect > 0
        self.num_to_collect = num_to_collect

        self.reservoir = model.net.NextScopedBlob(name + "_reservoir")
        self.num_visited_blob = model.net.NextScopedBlob(name + "_num_visited")
        self.mutex = model.net.NextScopedBlob(name + "_mutex")

        self.params.append(
            LayerParameter(
                parameter=self.reservoir,
                initializer=core.CreateOperator('ConstantFill', [],
                                                self.reservoir,
                                                shape=[0]),
                optimizer=model.NoOptim,
            ))
        self.params.append(
            LayerParameter(
                parameter=self.num_visited_blob,
                initializer=core.CreateOperator(
                    'ConstantFill',
                    [],
                    self.num_visited_blob,
                    shape=[],
                    value=0,
                    dtype=core.DataType.INT64,
                ),
                optimizer=model.NoOptim,
            ))
        self.params.append(
            LayerParameter(
                parameter=self.mutex,
                initializer=core.CreateOperator("CreateMutex", [], self.mutex),
                optimizer=model.NoOptim,
            ), )

        self.extra_input_blobs = []
        self.extra_output_blobs = []
        if 'object_id' in input_record:
            self.extra_input_blobs.append(input_record.object_id())
            object_to_pos = model.net.NextScopedBlob(name + "_object_to_pos")
            pos_to_object = model.net.NextScopedBlob(name + "_pos_to_object")
            self.extra_input_blobs.extend([object_to_pos, pos_to_object])
            self.extra_output_blobs.extend([object_to_pos, pos_to_object])
            self.params.append(
                LayerParameter(
                    parameter=object_to_pos,
                    initializer=core.CreateOperator(
                        'CreateMap',
                        [],
                        object_to_pos,
                        key_dtype=core.DataType.INT64,
                        valued_dtype=core.DataType.INT32,
                    ),
                    optimizer=model.NoOptim,
                ))
            self.params.append(
                LayerParameter(
                    parameter=pos_to_object,
                    initializer=core.CreateOperator(
                        'ConstantFill',
                        [],
                        pos_to_object,
                        shape=[0],
                        value=0,
                        dtype=core.DataType.INT64,
                    ),
                    optimizer=model.NoOptim,
                ))

        self.output_schema = schema.Struct(
            ('reservoir',
             schema.from_blob_list(input_record.data, [self.reservoir])),
            ('num_visited', schema.Scalar(blob=self.num_visited_blob)),
            ('mutex', schema.Scalar(blob=self.mutex)),
        )