コード例 #1
0
    def testRandomFourierFeatures(self, batch_size, input_dims, output_dims,
                                  bandwidth):
        X = np.random.random((batch_size, input_dims)).astype(np.float32)
        scale = np.sqrt(2.0 / output_dims)
        input_record = self.new_record(
            schema.Scalar((np.float32, (input_dims, ))))
        schema.FeedRecord(input_record, [X])
        input_blob = input_record.field_blobs()[0]
        rff_output = self.model.RandomFourierFeatures(input_record,
                                                      output_dims, bandwidth)
        self.model.output_schema = schema.Struct()

        self.assertEqual(schema.Scalar((np.float32, (output_dims, ))),
                         rff_output)

        train_init_net, train_net = self.get_training_nets()

        # Init net assertions
        init_ops = self.assertNetContainOps(train_init_net, [
            OpSpec("GaussianFill", None, None),
            OpSpec("UniformFill", None, None),
        ])

        # Operation specifications
        mat_mul_spec = OpSpec("MatMul", [input_blob, init_ops[0].output[0]],
                              None)
        add_spec = OpSpec("Add", [None, init_ops[1].output[0]], None, {
            'broadcast': 1,
            'axis': 1
        })
        cosine_spec = OpSpec("Cos", None, None)
        scale_spec = OpSpec("Scale", None, rff_output.field_blobs(),
                            {'scale': scale})

        # Train net assertions
        self.assertNetContainOps(
            train_net, [mat_mul_spec, add_spec, cosine_spec, scale_spec])

        workspace.RunNetOnce(train_init_net)
        W = workspace.FetchBlob(self.model.layers[0].w)
        b = workspace.FetchBlob(self.model.layers[0].b)

        workspace.RunNetOnce(train_net)
        train_output = workspace.FetchBlob(rff_output())
        train_ref = scale * np.cos(np.dot(X, W) + b)
        npt.assert_almost_equal(train_output, train_ref)

        # Eval net assertions
        eval_net = self.get_eval_net()
        self.assertNetContainOps(
            eval_net, [mat_mul_spec, add_spec, cosine_spec, scale_spec])
        schema.FeedRecord(input_record, [X])
        workspace.RunNetOnce(eval_net)

        eval_output = workspace.FetchBlob(rff_output())
        eval_ref = scale * np.cos(np.dot(X, W) + b)
        npt.assert_almost_equal(eval_output, eval_ref)

        # Predict net assertions
        predict_net = self.get_predict_net()
        self.assertNetContainOps(
            predict_net, [mat_mul_spec, add_spec, cosine_spec, scale_spec])

        schema.FeedRecord(input_record, [X])
        workspace.RunNetOnce(predict_net)

        predict_output = workspace.FetchBlob(rff_output())
        predict_ref = scale * np.cos(np.dot(X, W) + b)
        npt.assert_almost_equal(predict_output, predict_ref)
コード例 #2
0
    def testGatherRecord(self):
        indices = np.array([1, 3, 4], dtype=np.int32)
        dense = np.array(list(range(20)), dtype=np.float32).reshape(10, 2)
        lengths = np.array(list(range(10)), dtype=np.int32)
        items = np.array(list(range(lengths.sum())), dtype=np.int64)
        items_lengths = np.array(list(range(lengths.sum())), dtype=np.int32)
        items_items = np.array(list(range(items_lengths.sum())), dtype=np.int64)
        record = self.new_record(schema.Struct(
            ('dense', schema.Scalar(np.float32)),
            ('sparse', schema.Struct(
                ('list', schema.List(np.int64)),
                ('list_of_list', schema.List(schema.List(np.int64))),
            )),
            ('empty_struct', schema.Struct())
        ))
        indices_record = self.new_record(schema.Scalar(np.int32))
        input_record = schema.Struct(
            ('indices', indices_record),
            ('record', record),
        )
        schema.FeedRecord(
            input_record,
            [indices, dense, lengths, items, lengths, items_lengths,
             items_items])
        gathered_record = self.model.GatherRecord(input_record)
        self.assertTrue(schema.equal_schemas(gathered_record, record))

        self.run_train_net_forward_only()
        gathered_dense = workspace.FetchBlob(gathered_record.dense())
        np.testing.assert_array_equal(
            np.concatenate([dense[i:i + 1] for i in indices]), gathered_dense)
        gathered_lengths = workspace.FetchBlob(
            gathered_record.sparse.list.lengths())
        np.testing.assert_array_equal(
            np.concatenate([lengths[i:i + 1] for i in indices]),
            gathered_lengths)
        gathered_items = workspace.FetchBlob(
            gathered_record.sparse.list.items())
        offsets = lengths.cumsum() - lengths
        np.testing.assert_array_equal(
            np.concatenate([
                items[offsets[i]: offsets[i] + lengths[i]]
                for i in indices
            ]), gathered_items)

        gathered_items_lengths = workspace.FetchBlob(
            gathered_record.sparse.list_of_list.items.lengths())
        np.testing.assert_array_equal(
            np.concatenate([
                items_lengths[offsets[i]: offsets[i] + lengths[i]]
                for i in indices
            ]),
            gathered_items_lengths
        )

        nested_offsets = []
        nested_lengths = []
        nested_offset = 0
        j = 0
        for l in lengths:
            nested_offsets.append(nested_offset)
            nested_length = 0
            for _i in range(l):
                nested_offset += items_lengths[j]
                nested_length += items_lengths[j]
                j += 1
            nested_lengths.append(nested_length)

        gathered_items_items = workspace.FetchBlob(
            gathered_record.sparse.list_of_list.items.items())
        np.testing.assert_array_equal(
            np.concatenate([
                items_items[nested_offsets[i]:
                            nested_offsets[i] + nested_lengths[i]]
                for i in indices
            ]),
            gathered_items_items
        )
コード例 #3
0
    def testRandomFourierFeatures(self, batch_size, input_dims, output_dims, bandwidth):

        def _rff_hypothesis_test(rff_output, X, W, b, scale):
            """
            Runs hypothesis test for Semi Random Features layer.

            Inputs:
                rff_output -- output of net after running random fourier features layer
                X -- input data
                W -- weight parameter from train_init_net
                b -- bias parameter from train_init_net
                scale -- value by which to scale the output vector
            """
            output = workspace.FetchBlob(rff_output)
            output_ref = scale * np.cos(np.dot(X, np.transpose(W)) + b)
            npt.assert_allclose(output, output_ref, rtol=1e-4)

        X = np.random.random((batch_size, input_dims)).astype(np.float32)
        scale = np.sqrt(2.0 / output_dims)
        input_record = self.new_record(schema.Scalar((np.float32, (input_dims,))))
        schema.FeedRecord(input_record, [X])
        input_blob = input_record.field_blobs()[0]
        rff_output = self.model.RandomFourierFeatures(input_record,
                                                      output_dims,
                                                      bandwidth)
        self.model.output_schema = schema.Struct()

        self.assertEqual(
            schema.Scalar((np.float32, (output_dims, ))),
            rff_output
        )

        train_init_net, train_net = self.get_training_nets()

        # Init net assertions
        init_ops_list = [
            OpSpec("GaussianFill", None, None),
            OpSpec("UniformFill", None, None),
        ]
        init_ops = self._test_net(train_init_net, init_ops_list)
        W = workspace.FetchBlob(self.model.layers[0].w)
        b = workspace.FetchBlob(self.model.layers[0].b)

        # Operation specifications
        fc_spec = OpSpec("FC", [input_blob, init_ops[0].output[0],
                         init_ops[1].output[0]], None)
        cosine_spec = OpSpec("Cos", None, None)
        scale_spec = OpSpec("Scale", None, rff_output.field_blobs(),
                            {'scale': scale})
        ops_list = [
            fc_spec,
            cosine_spec,
            scale_spec
        ]

        # Train net assertions
        self._test_net(train_net, ops_list)
        _rff_hypothesis_test(rff_output(), X, W, b, scale)

        # Eval net assertions
        eval_net = self.get_eval_net()
        self._test_net(eval_net, ops_list)
        _rff_hypothesis_test(rff_output(), X, W, b, scale)

        # Predict net assertions
        predict_net = self.get_predict_net()
        self._test_net(predict_net, ops_list)
        _rff_hypothesis_test(rff_output(), X, W, b, scale)
コード例 #4
0
    def testSemiRandomFeatures(self, batch_size, input_dims, output_dims, s, scale,
                               set_weight_as_global_constant, use_struct_input):

        def _semi_random_hypothesis_test(srf_output, X_full, X_random, rand_w,
                                         rand_b, s):
            """
            Runs hypothesis test for Semi Random Features layer.

            Inputs:
                srf_output -- output of net after running semi random features layer
                X_full -- full input data
                X_random -- random-output input data
                rand_w -- random-initialized weight parameter from train_init_net
                rand_b -- random-initialized bias parameter from train_init_net
                s -- degree parameter

            """
            # Get output from net
            net_output = workspace.FetchBlob(srf_output)

            # Fetch learned parameter blobs
            learned_w = workspace.FetchBlob(self.model.layers[0].learned_w)
            learned_b = workspace.FetchBlob(self.model.layers[0].learned_b)

            # Computing output directly
            x_rand = np.matmul(X_random, np.transpose(rand_w)) + rand_b
            x_learn = np.matmul(X_full, np.transpose(learned_w)) + learned_b
            x_pow = np.power(x_rand, s)
            if s > 0:
                h_rand_features = np.piecewise(x_rand,
                                               [x_rand <= 0, x_rand > 0],
                                               [0, 1])
            else:
                h_rand_features = np.piecewise(x_rand,
                                               [x_rand <= 0, x_rand > 0],
                                               [0, lambda x: x / (1 + x)])
            output_ref = np.multiply(np.multiply(x_pow, h_rand_features), x_learn)

            # Comparing net output and computed output
            npt.assert_allclose(net_output, output_ref, rtol=1e-4)

        X_full = np.random.normal(size=(batch_size, input_dims)).astype(np.float32)
        if use_struct_input:
            X_random = np.random.normal(size=(batch_size, input_dims)).\
                astype(np.float32)
            input_data = [X_full, X_random]
            input_record = self.new_record(schema.Struct(
                ('full', schema.Scalar(
                    (np.float32, (input_dims,))
                )),
                ('random', schema.Scalar(
                    (np.float32, (input_dims,))
                ))
            ))
        else:
            X_random = X_full
            input_data = [X_full]
            input_record = self.new_record(schema.Scalar(
                (np.float32, (input_dims,))
            ))

        schema.FeedRecord(input_record, input_data)
        srf_output = self.model.SemiRandomFeatures(
            input_record,
            output_dims,
            s=s,
            scale=scale,
            set_weight_as_global_constant=set_weight_as_global_constant
        )

        self.model.output_schema = schema.Struct()

        self.assertEqual(
            schema.Struct(
                ('full', schema.Scalar(
                    (np.float32, (output_dims,))
                )),
                ('random', schema.Scalar(
                    (np.float32, (output_dims,))
                ))
            ),
            srf_output
        )

        init_ops_list = [
            OpSpec("GaussianFill", None, None),
            OpSpec("UniformFill", None, None),
            OpSpec("GaussianFill", None, None),
            OpSpec("UniformFill", None, None),
        ]
        train_init_net, train_net = self.get_training_nets()

        # Need to run to initialize the global constants for layer
        workspace.RunNetOnce(self.model.create_init_net(name='init_net'))

        if set_weight_as_global_constant:
            # If weight params are global constants, they won't be in train_init_net
            init_ops = self._test_net(train_init_net, init_ops_list[:2])
            rand_w = workspace.FetchBlob(
                self.model.global_constants['semi_random_features_fixed_rand_W']
            )
            rand_b = workspace.FetchBlob(
                self.model.global_constants['semi_random_features_fixed_rand_b']
            )

            # Operation specifications
            fc_random_spec = OpSpec("FC", [None, None, None], None)
            fc_learned_spec = OpSpec("FC", [None, init_ops[0].output[0],
                                     init_ops[1].output[0]], None)
        else:
            init_ops = self._test_net(train_init_net, init_ops_list)
            rand_w = workspace.FetchBlob(self.model.layers[0].random_w)
            rand_b = workspace.FetchBlob(self.model.layers[0].random_b)

            # Operation specifications
            fc_random_spec = OpSpec("FC", [None, init_ops[0].output[0],
                                    init_ops[1].output[0]], None)
            fc_learned_spec = OpSpec("FC", [None, init_ops[2].output[0],
                                     init_ops[3].output[0]], None)

        softsign_spec = OpSpec("Softsign", None, None)
        relu_spec = OpSpec("Relu", None, None)
        relu_output_spec = OpSpec("Relu", None, srf_output.random.field_blobs())
        pow_spec = OpSpec("Pow", None, None, {'exponent': float(s - 1)})
        mul_interim_spec = OpSpec("Mul", None, srf_output.random.field_blobs())
        mul_spec = OpSpec("Mul", None, srf_output.full.field_blobs())

        if s == 0:
            ops_list = [
                fc_learned_spec,
                fc_random_spec,
                softsign_spec,
                relu_output_spec,
                mul_spec,
            ]
        elif s == 1:
            ops_list = [
                fc_learned_spec,
                fc_random_spec,
                relu_output_spec,
                mul_spec,
            ]
        else:
            ops_list = [
                fc_learned_spec,
                fc_random_spec,
                relu_spec,
                pow_spec,
                mul_interim_spec,
                mul_spec,
            ]

        # Train net assertions
        self._test_net(train_net, ops_list)
        _semi_random_hypothesis_test(srf_output.full(), X_full, X_random,
                                     rand_w, rand_b, s)

        # Eval net assertions
        eval_net = self.get_eval_net()
        self._test_net(eval_net, ops_list)
        _semi_random_hypothesis_test(srf_output.full(), X_full, X_random,
                                     rand_w, rand_b, s)

        # Predict net assertions
        predict_net = self.get_predict_net()
        self._test_net(predict_net, ops_list)
        _semi_random_hypothesis_test(srf_output.full(), X_full, X_random,
                                     rand_w, rand_b, s)
コード例 #5
0
    def testBatchNormalization(self, X):
        input_record = self.new_record(schema.Scalar((np.float32, (5,))))
        schema.FeedRecord(input_record, [X])
        bn_output = self.model.BatchNormalization(input_record)
        self.assertEqual(schema.Scalar((np.float32, (5,))), bn_output)
        self.model.output_schema = schema.Struct()

        train_init_net, train_net = self.get_training_nets()

        init_ops = self.assertNetContainOps(
            train_init_net,
            [
                OpSpec("ConstantFill", None, None),
                OpSpec("ConstantFill", None, None),
                OpSpec("ConstantFill", None, None),
                OpSpec("ConstantFill", None, None),
            ]
        )

        input_blob = input_record.field_blobs()[0]
        output_blob = bn_output.field_blobs()[0]

        expand_dims_spec = OpSpec(
            "ExpandDims",
            [input_blob],
            None,
        )

        train_bn_spec = OpSpec(
            "SpatialBN",
            [None, init_ops[0].output[0], init_ops[1].output[0],
                init_ops[2].output[0], init_ops[3].output[0]],
            [output_blob, init_ops[2].output[0], init_ops[3].output[0], None, None],
            {'is_test': 0, 'order': 'NCHW', 'momentum': 0.9},
        )

        test_bn_spec = OpSpec(
            "SpatialBN",
            [None, init_ops[0].output[0], init_ops[1].output[0],
                init_ops[2].output[0], init_ops[3].output[0]],
            [output_blob],
            {'is_test': 1, 'order': 'NCHW', 'momentum': 0.9},
        )

        squeeze_spec = OpSpec(
            "Squeeze",
            [output_blob],
            [output_blob],
        )

        self.assertNetContainOps(
            train_net,
            [expand_dims_spec, train_bn_spec, squeeze_spec]
        )

        eval_net = self.get_eval_net()

        self.assertNetContainOps(
            eval_net,
            [expand_dims_spec, test_bn_spec, squeeze_spec]
        )

        predict_net = self.get_predict_net()

        self.assertNetContainOps(
            predict_net,
            [expand_dims_spec, test_bn_spec, squeeze_spec]
        )

        workspace.RunNetOnce(train_init_net)
        workspace.RunNetOnce(train_net)

        schema.FeedRecord(input_record, [X])
        workspace.RunNetOnce(eval_net)

        schema.FeedRecord(input_record, [X])
        workspace.RunNetOnce(predict_net)
コード例 #6
0
    def testArcCosineFeatureMap(self, batch_size, input_dims, output_dims, s,
                                scale):
        X = np.random.normal(size=(batch_size, input_dims)).astype(np.float32)
        input_record = self.new_record(
            schema.Scalar((np.float32, (input_dims, ))))
        schema.FeedRecord(input_record, [X])
        input_blob = input_record.field_blobs()[0]

        ac_output = self.model.ArcCosineFeatureMap(input_record,
                                                   output_dims,
                                                   s=s,
                                                   scale=scale)
        self.model.output_schema = schema.Struct()
        self.assertEqual(schema.Scalar((np.float32, (output_dims, ))),
                         ac_output)

        init_ops_list = [
            OpSpec("GaussianFill", None, None),
            OpSpec("UniformFill", None, None),
        ]
        train_init_net, train_net = self.get_training_nets()

        # Init net assertions
        init_ops = self._test_net(train_init_net, init_ops_list)
        workspace.RunNetOnce(self.model.param_init_net)
        W = workspace.FetchBlob(self.model.layers[0].random_w)
        b = workspace.FetchBlob(self.model.layers[0].random_b)

        # Operation specifications
        fc_spec = OpSpec(
            "FC", [input_blob, init_ops[0].output[0], init_ops[1].output[0]],
            None)
        gt_spec = OpSpec("GT", None, None, {'broadcast': 1})
        cast_spec = OpSpec("Cast", None, ac_output.field_blobs())
        relu_spec = OpSpec("Relu", None, None)
        relu_spec_output = OpSpec("Relu", None, ac_output.field_blobs())
        pow_spec = OpSpec("Pow", None, None, {'exponent': float(s - 1)})
        mul_spec = OpSpec("Mul", None, ac_output.field_blobs())

        if s == 0:
            ops_list = [
                fc_spec,
                gt_spec,
                cast_spec,
            ]

        elif s == 1:
            ops_list = [
                fc_spec,
                relu_spec_output,
            ]

        else:
            ops_list = [
                fc_spec,
                relu_spec,
                pow_spec,
                mul_spec,
            ]

        # Train net assertions
        self._test_net(train_net, ops_list)
        self._arc_cosine_hypothesis_test(ac_output(), X, W, b, s)

        # Eval net assertions
        eval_net = self.get_eval_net()
        self._test_net(eval_net, ops_list)
        self._arc_cosine_hypothesis_test(ac_output(), X, W, b, s)

        # Predict net assertions
        predict_net = self.get_predict_net()
        self._test_net(predict_net, ops_list)
        self._arc_cosine_hypothesis_test(ac_output(), X, W, b, s)
コード例 #7
0
ファイル: sparse_lookup.py プロジェクト: zlbing/caffe2
    def __init__(self, model, input_record, inner_shape, reducer,
                 weight_init=None, weight_optim=None,
                 name='sparse_lookup', **kwargs):
        super(SparseLookup, self).__init__(model, name, input_record, **kwargs)

        if isinstance(inner_shape, int):
            inner_shape = [inner_shape]
        assert isinstance(inner_shape, list) or isinstance(inner_shape, tuple),\
            "Unexpected type for inner_shape, expected list or tuple, got {0}".\
            format(type(inner_shape))

        # TODO Add some asserts about input type
        assert reducer in self._supported_reducers, "Unsupported reducer: {}".\
            format(reducer)
        self.reducer = reducer

        input_dim = get_categorical_limit(input_record)

        assert input_dim is not None, "Unbounded features are not supported"

        self.output_schema = schema.Scalar(
            (np.float32, inner_shape),
            model.net.NextScopedBlob(name + '_output'),
        )

        scale = math.sqrt(1.0 / input_dim)
        self.shape = [input_dim] + inner_shape
        self.weight_init = weight_init if weight_init else (
            'UniformFill', {'min': -scale, 'max': scale})

        self.w = model.net.NextScopedBlob(name + "_w")
        if schema.equal_schemas(self.input_record, IdList):
            sparse_key = self.input_record.items()
        elif schema.equal_schemas(self.input_record, IdScoreList):
            sparse_key = self.input_record.keys()
        else:
            raise NotImplementedError()

        if self.input_record.lengths.metadata:
            avg_length = self.input_record.lengths.metadata.expected_value
        else:
            avg_length = None
        self.params.append(
            LayerParameter(
                parameter=self.w,
                initializer=core.CreateOperator(self.weight_init[0],
                                                [],
                                                self.w,
                                                shape=self.shape,
                                                **self.weight_init[1]
                                                ),
                optimizer=weight_optim,
                ps_param=LayerPsParam(
                    sparse_key=sparse_key,
                    average_length=avg_length
                )
            ))

        if reducer == 'PositionWeighted':
            self.pos_w = model.net.NextScopedBlob(name + "_pos_w")
            self.params.append(
                LayerParameter(
                    parameter=self.pos_w,
                    initializer=core.CreateOperator('ConstantFill',
                                                    [],
                                                    self.pos_w,
                                                    shape=[input_dim, ],
                                                    value=1.0
                                                    ),
                    optimizer=weight_optim
                ))
コード例 #8
0
ファイル: feature_extractor.py プロジェクト: wycharry/Horizon
    def create_net(self):
        net = core.Net("feature_extractor")
        init_net = core.Net("feature_extractor_init")
        missing_scalar = self.create_const(init_net, "MISSING_SCALAR",
                                           MISSING_VALUE)

        action_schema = map_schema(
        ) if self.sorted_action_features else schema.Scalar()

        if self.max_q_learning:
            next_action_field = InputColumn.POSSIBLE_NEXT_ACTIONS
            next_action_schema = schema.List(action_schema)
        else:
            next_action_field = InputColumn.NEXT_ACTION
            next_action_schema = action_schema

        input_schema = schema.Struct(
            (InputColumn.STATE_FEATURES, map_schema()),
            (InputColumn.NEXT_STATE_FEATURES, map_schema()),
            (InputColumn.ACTION, action_schema),
            (next_action_field, next_action_schema),
        )

        input_record = net.set_input_record(input_schema)

        state = self.extract_float_features(
            net,
            "state",
            input_record[InputColumn.STATE_FEATURES],
            self.sorted_state_features,
            missing_scalar,
        )
        next_state = self.extract_float_features(
            net,
            "next_state",
            input_record[InputColumn.NEXT_STATE_FEATURES],
            self.sorted_state_features,
            missing_scalar,
        )

        if self.max_q_learning and self.sorted_action_features is not None:
            next_state_field = "tiled_next_state"
            # TODO: this will need to be more complicated to support sparse features
            next_state = net.LengthsTile(
                [next_state,
                 input_record.possible_next_actions.lengths()],
                ["tiled_next_state"],
            )
        else:
            next_state_field = "next_state"

        action = input_record.action
        next_action = input_record[next_action_field]
        if self.max_q_learning:
            next_action = next_action["values"]
        if self.sorted_action_features:
            action = self.extract_float_features(net, "action", action,
                                                 self.sorted_action_features,
                                                 missing_scalar)
            next_action = self.extract_float_features(
                net,
                next_action_field,
                next_action,
                self.sorted_action_features,
                missing_scalar,
            )

        next_action_output = (schema.List(
            next_action,
            lengths_blob=input_record.possible_next_actions.lengths)
                              if self.max_q_learning else next_action)

        net.set_output_record(
            schema.Struct(
                ("state", state),
                ("action", action),
                (next_state_field, next_state),
                (next_action_field, next_action_output),
            ))

        return FeatureExtractorNet(net, init_net)
コード例 #9
0
    def __init__(self,
                 model,
                 input_record,
                 inner_shape,
                 reducer,
                 weight_init=None,
                 weight_optim=None,
                 name='sparse_lookup',
                 **kwargs):

        super(SparseLookup, self).__init__(model, name, input_record, **kwargs)

        # TODO Add some asserts about input type
        if isinstance(inner_shape, int):
            inner_shape = [inner_shape]
        assert isinstance(inner_shape, list) or isinstance(inner_shape, tuple),\
            "Unexpected type for inner_shape, expected list or tuple, got {0}".\
            format(type(inner_shape))

        if reducer == "PositionWeighted":
            self.external_weights = input_record.values()
        self.reducer = reducer

        input_dim = get_categorical_limit(input_record)
        assert input_dim is not None, "Unbounded features are not supported"

        scale = math.sqrt(1.0 / input_dim)
        self.shape = [input_dim] + inner_shape
        self.weight_init = weight_init if weight_init else ('UniformFill', {
            'min': -scale,
            'max': scale
        })

        if schema.equal_schemas(self.input_record, IdList):
            sparse_key = self.input_record.items()
        elif schema.equal_schemas(self.input_record,
                                  IdScoreList,
                                  check_field_types=False):
            sparse_key = self.input_record.keys()
        else:
            raise NotImplementedError()

        if self.input_record.lengths.metadata:
            avg_length = self.input_record.lengths.metadata.expected_value
        else:
            avg_length = None

        self.w = self.create_param(param_name='w',
                                   shape=self.shape,
                                   initializer=self.weight_init,
                                   optimizer=weight_optim,
                                   ps_param=LayerPsParam(
                                       sparse_key=sparse_key,
                                       average_length=avg_length))

        self.scale_bias_init = ('ConstantFill', {'value': 0.0})

        self.scale_bias = self.create_param(param_name='scale_bias',
                                            shape=[],
                                            initializer=self.scale_bias_init,
                                            optimizer=model.NoOptim)

        self.output_schema = schema.Scalar(
            (np.float32, inner_shape),
            self.get_next_blob_reference('output'),
        )
コード例 #10
0
    def __init__(
            self,
            model,
            input_record,
            output_dims,
            s=1,
            scale=1.0,
            weight_init=None,
            bias_init=None,
            weight_optim=None,
            bias_optim=None,
            set_weight_as_global_constant=False,
            initialize_output_schema=True,
            name='arc_cosine_feature_map',
            **kwargs):

        super(ArcCosineFeatureMap, self).__init__(model, name, input_record,
                                                  **kwargs)
        assert isinstance(input_record, schema.Scalar), "Incorrect input type"
        self.params = []
        self.model = model
        self.set_weight_as_global_constant = set_weight_as_global_constant

        self.input_dims = input_record.field_type().shape[0]
        assert self.input_dims >= 1, "Expected input dimensions >= 1, got %s" \
                                     % self.input_dims

        if initialize_output_schema:
            self.output_schema = schema.Scalar(
                (np.float32, (output_dims, )),
                model.net.NextScopedBlob(name + '_output')
            )

        self.output_dims = output_dims
        assert self.output_dims >= 1, "Expected output dimensions >= 1, got %s" \
                                      % self.output_dims
        self.s = s
        assert (self.s >= 0), "Expected s >= 0, got %s" % self.s
        assert isinstance(self.s, int), "Expected s to be type int, got type %s" \
                                        % type(self.s)

        assert (scale > 0.0), "Expected scale > 0, got %s" % scale
        self.stddev = scale * np.sqrt(1.0 / self.input_dims)

        # Initialize train_init_net parameters
        # Random Parameters
        if set_weight_as_global_constant:
            w_init = np.random.normal(scale=self.stddev,
                                      size=(self.output_dims, self.input_dims))
            b_init = np.random.uniform(low=-0.5 * self.stddev,
                                       high=0.5 * self.stddev,
                                       size=self.output_dims)
            self.random_w = self.model.add_global_constant(
                name=self.name + "_fixed_rand_W",
                array=w_init
            )
            self.random_b = self.model.add_global_constant(
                name=self.name + "_fixed_rand_b",
                array=b_init
            )
        else:
            (self.random_w, self.random_b) = self._initialize_params(
                'random_w',
                'random_b',
                w_init=weight_init,
                b_init=bias_init,
                w_optim=weight_optim,
                b_optim=bias_optim
            )
コード例 #11
0
    def __init__(self,
                 model,
                 input_record,
                 input_specs,
                 name="feature_sparse_to_dense",
                 **kwargs):
        """
        `input_specs` follows the format of FeatureSpec from schema. To be more
        precise it's a namedtuple that should have:
            'feature_type', 'feature_names', 'feature_ids'
        """
        super(FeatureSparseToDense, self).__init__(model, name, input_record,
                                                   **kwargs)

        self.input_specs = input_specs

        outputs = []
        for field, feature_specs in self.input_specs:
            assert len(feature_specs.feature_names) == len(
                feature_specs.feature_ids)
            if feature_specs.feature_type == "FLOAT":
                outputs.append((
                    field,
                    schema.Scalar(
                        (np.float32, (len(feature_specs.feature_ids), )),
                        self.get_next_blob_reference(field + "_output"),
                    ),
                ))
            elif feature_specs.feature_type == "ID_LIST":
                outputs.append((
                    field,
                    schema.Struct(
                        (
                            "ranges",
                            schema.Scalar(
                                (np.int32,
                                 (len(feature_specs.feature_ids), 2)),
                                self.get_next_blob_reference(field +
                                                             "_ranges"),
                            ),
                        ),
                        (
                            "values",
                            schema.Scalar(
                                np.int64,
                                self.get_next_blob_reference(field +
                                                             "_values"),
                            ),
                        ),
                    ),
                ))
            elif feature_specs.feature_type == "ID_SCORE_LIST":
                outputs.append((
                    field,
                    schema.Struct(
                        (
                            "ranges",
                            schema.Scalar(
                                (np.int32,
                                 (len(feature_specs.feature_ids), 2)),
                                self.get_next_blob_reference(field +
                                                             "_ranges"),
                            ),
                        ),
                        (
                            "ids",
                            schema.Scalar(
                                np.int64,
                                self.get_next_blob_reference(field + "_ids"),
                            ),
                        ),
                        (
                            "scores",
                            schema.Scalar(
                                np.float32,
                                self.get_next_blob_reference(field +
                                                             "_scores"),
                            ),
                        ),
                    ),
                ))
            elif feature_specs.feature_type == "EMBEDDING":
                # We don't know dimensions of embeddings in input data.
                # Even though they should match dimensions from feature config,
                # we keep ranges blob to check input data later.
                outputs.append((
                    field,
                    schema.Struct(
                        (
                            "ranges",
                            schema.Scalar(
                                (np.int32,
                                 (len(feature_specs.feature_ids), 2)),
                                self.get_next_blob_reference(field +
                                                             "_ranges"),
                            ),
                        ),
                        (
                            "values",
                            schema.Scalar(
                                np.float32,
                                self.get_next_blob_reference(field +
                                                             "_values"),
                            ),
                        ),
                    ),
                ))
            elif feature_specs.feature_type == "GENERIC_FEATURE":
                # We don't know dimensions of embeddings in input data.
                # Even though they should match dimensions from feature config,
                # we keep ranges blob to check input data later.
                # Currently this schema with ranges and values is only for
                # generic type enum 1. If new types are implemented, we need to
                # modify the ParseGeneric operator, and this part accordinly
                outputs.append((
                    field,
                    schema.Struct(
                        (
                            "ranges",
                            schema.Scalar(
                                (np.int32,
                                 (len(feature_specs.feature_ids), 2)),
                                self.get_next_blob_reference(field +
                                                             "_ranges"),
                            ),
                        ),
                        (
                            "values",
                            schema.Scalar(
                                np.float32,
                                self.get_next_blob_reference(field +
                                                             "_values"),
                            ),
                        ),
                    ),
                ))
            else:
                raise TypeError("Unsupported input type: {0}".format(
                    feature_specs.feature_type))

        # TODO(amalevich): This schema is producing ranges. And thus if there is
        # something using it it should support ranges as well. It might be
        # confusing, if we don't add better support for ranges/have it as a
        # first layer
        self.output_schema = schema.Struct(*outputs)

        # TODO(amalevich): Consider moving this data to schema, instead
        # Structs doens't support attaching metadata to them and clonning
        # will break things badly, but this is the most elegant way to pass
        # this info around. Should we change it or it'll be too much work and
        # not worse it?
        for field, feature_specs in input_specs:
            schema.attach_metadata_to_scalars(
                self.output_schema[field],
                schema.Metadata(feature_specs=feature_specs))
        self.zero = model.global_constants["ZERO"]
        self.zero_range = model.global_constants["ZERO_RANGE"]
コード例 #12
0
ファイル: layers_test.py プロジェクト: zhijl/caffe2
 def testUniformSamplingWithIncorrectSampleSize(self):
     input_record = self.new_record(schema.Scalar(np.int32))
     num_samples = 200
     num_elements = 100
     with self.assertRaises(AssertionError):
         self.model.UniformSampling(input_record, num_samples, num_elements)
コード例 #13
0
    def __init__(self,
                 model,
                 input_record,
                 inner_shape,
                 reducer,
                 weight_init=None,
                 weight_optim=None,
                 name='sparse_lookup',
                 regularizer=None,
                 **kwargs):

        super(SparseLookup, self).__init__(model, name, input_record, **kwargs)

        # TODO Add some asserts about input type
        if isinstance(inner_shape, int):
            inner_shape = [inner_shape]
        assert isinstance(inner_shape, list) or isinstance(inner_shape, tuple),\
            "Unexpected type for inner_shape, expected list or tuple, got {0}".\
            format(type(inner_shape))

        if reducer == "PositionWeighted":
            assert _is_id_score_list(self.input_record), (
                "PositionWeighted only support IdScoreList, but got {} " +
                "please use PositionWeighted layer to convert IdList " +
                "to IdScoreList").format(repr(self.input_record))
            self.external_weights = input_record.values()

        elif reducer == "RecencyWeighted":
            assert _is_id_score_list(self.input_record), (
                "RecencyWeighted only supports IdScoreList.")
            self.external_weights = input_record.values()
        self.reducer = reducer

        input_dim = get_categorical_limit(input_record)
        assert input_dim > 0, (
            "{} should have categorical limit > 0, but got {}".format(
                get_key(input_record)(), input_dim))

        self.input_dim = input_dim
        self.shape = [input_dim] + inner_shape

        default_init_op = self._get_default_init_op()

        self.weight_init = weight_init or default_init_op

        if _is_id_list(self.input_record):
            sparse_key = self.input_record.items()
        elif _is_id_score_list(self.input_record):
            sparse_key = self.input_record.keys()
        else:
            raise NotImplementedError()

        if self.input_record.lengths.metadata:
            avg_length = self.input_record.lengths.metadata.expected_value
        else:
            avg_length = None

        self.w = self.create_param(param_name='w',
                                   shape=self.shape,
                                   initializer=self.weight_init,
                                   optimizer=weight_optim,
                                   ps_param=LayerPsParam(
                                       sparse_key=sparse_key,
                                       average_length=avg_length),
                                   regularizer=regularizer)

        self.scale_bias_init = ('ConstantFill', {'value': 0.0})

        self.scale_bias = self.create_param(
            param_name='scale_bias',
            shape=[],
            initializer=self.scale_bias_init,
            optimizer=model.NoOptim,
        )

        self.output_schema = schema.Scalar(
            (np.float32, inner_shape),
            self.get_next_blob_reference('output'),
        )
コード例 #14
0
 def filter_metrics_schema(self, white_set):
     logger.info("Filter metric schema with white_set {}".format(white_set))
     field_names = self._metrics_schema.field_names()
     for name in field_names:
         if name not in white_set:
             self._metrics_schema = self._metrics_schema - schema.Struct((name, schema.Scalar()))
コード例 #15
0
def init_model_with_schemas(model_name,
                            sig_input_dim,
                            tanh_input_dim,
                            pred_dim,
                            train_target=TrainTarget.ADJOINT):
    '''
	 output_records have to filled with existing blobs.
	'''
    workspace.ResetWorkspace()
    print('>>> Training Target: ' + train_target)
    if train_target == TrainTarget.ADJOINT:
        # When training the adjoint network, we also need to forward pass
        # through the origin network
        input_record_schema = schema.Struct(
            ('sig_input', schema.Scalar(
                (np.float32, (sig_input_dim, )))),  # sig
            ('tanh_input', schema.Scalar(
                (np.float32, (tanh_input_dim, )))),  # tanh
            ('adjoint_input', schema.Scalar((np.float32, (pred_dim, )))))
        output_record_schema = schema.Struct(
            ('loss', schema.Scalar((np.float32, (1, )))),
            ('origin_pred', schema.Scalar((np.float32, (pred_dim, )))),
            ('sig_adjoint_pred', schema.Scalar(
                (np.float32, (sig_input_dim, )))),
            ('tanh_adjoint_pred',
             schema.Scalar((np.float32, (tanh_input_dim, )))),
        )
        # use trainer_extra_schema as the loss input record
        trainer_extra_schema = schema.Struct(
            ('sig_loss_record',
             schema.Struct(
                 ('label', schema.Scalar((np.float32, (sig_input_dim, )))),
                 ('prediction', schema.Scalar(
                     (np.float32, (sig_input_dim, )))))),
            ('tanh_loss_record',
             schema.Struct(
                 ('label', schema.Scalar((np.float32, (tanh_input_dim, )))),
                 ('prediction', schema.Scalar(
                     (np.float32, (tanh_input_dim, )))))),
            ('origin_loss_record',
             schema.Struct(
                 ('label', schema.Scalar((np.float32, (pred_dim, )))),
                 ('prediction', schema.Scalar((np.float32, (pred_dim, )))))),
        )
    if train_target == TrainTarget.ORIGIN:
        # When training the origin network, no need of the adjoint network
        input_record_schema = schema.Struct(
            ('sig_input', schema.Scalar(
                (np.float32, (sig_input_dim, )))),  # sig
            ('tanh_input', schema.Scalar(
                (np.float32, (tanh_input_dim, )))),  # tanh
            ('adjoint_input', schema.Scalar((np.float32, (pred_dim, )))))
        output_record_schema = schema.Struct(
            ('loss', schema.Scalar((np.float32, (1, )))),
            ('origin_pred', schema.Scalar((np.float32, (pred_dim, )))),
        )
        # use trainer_extra_schema as the loss input record
        trainer_extra_schema = schema.Struct(
            ('origin_loss_record',
             schema.Struct(
                 ('label', schema.Scalar((np.float32, (pred_dim, )))),
                 ('prediction', schema.Scalar((np.float32, (pred_dim, )))))), )

    model = layer_model_helper.LayerModelHelper(model_name,
                                                input_record_schema,
                                                trainer_extra_schema)
    model.output_schema = output_record_schema
    return model
コード例 #16
0
 def _test_create_net_max_q_parametric_action(self, normalize):
     extractor = TrainingFeatureExtractor(
         state_normalization_parameters=self.
         get_state_normalization_parameters(),
         action_normalization_parameters=self.
         get_action_normalization_parameters(),
         include_possible_actions=True,
         normalize=normalize,
         max_num_actions=2,
     )
     expected_input_record = schema.Struct(
         ("state_features", map_schema()),
         ("next_state_features", map_schema()),
         ("action", map_schema()),
         ("next_action", map_schema()),
         ("not_terminal", schema.Scalar()),
         ("possible_actions", schema.List(map_schema())),
         ("possible_actions_mask", schema.List(schema.Scalar())),
         ("possible_next_actions", schema.List(map_schema())),
         ("possible_next_actions_mask", schema.List(schema.Scalar())),
     )
     expected_output_record = schema.Struct(
         ("state_features", schema.Scalar()),
         ("next_state_features", schema.Scalar()),
         ("action", schema.Scalar()),
         ("next_action", schema.Scalar()),
         ("not_terminal", schema.Scalar()),
         ("possible_actions", schema.Scalar()),
         ("possible_actions_mask", schema.Scalar()),
         ("possible_next_actions", schema.Scalar()),
         ("possible_next_actions_mask", schema.Scalar()),
     )
     self.check_create_net_spec(extractor, expected_input_record,
                                expected_output_record)
コード例 #17
0
 def testBatchSigmoidCrossEntropyLoss(self):
     input_record = self.new_record(
         schema.Struct(('label', schema.Scalar((np.float32, (32, )))),
                       ('prediction', schema.Scalar((np.float32, (32, ))))))
     loss = self.model.BatchSigmoidCrossEntropyLoss(input_record)
     self.assertEqual(schema.Scalar((np.float32, tuple())), loss)
コード例 #18
0
 def testDupField(self):
     with self.assertRaises(ValueError):
         schema.Struct(('a', schema.Scalar()), ('a', schema.Scalar()))
コード例 #19
0
    def __init__(self,
                 model,
                 input_record,
                 output_dims,
                 s=0,
                 scale=None,
                 weight_init=None,
                 bias_init=None,
                 weight_optim=None,
                 bias_optim=None,
                 set_weight_as_global_constant=False,
                 name='semi_random_features',
                 **kwargs):

        if isinstance(input_record, schema.Struct):
            schema.is_schema_subset(
                schema.Struct(
                    ('full', schema.Scalar()),
                    ('random', schema.Scalar()),
                ), input_record)
            self.input_record_full = input_record.full
            self.input_record_random = input_record.random

        elif isinstance(input_record, schema.Scalar):
            self.input_record_full = input_record
            self.input_record_random = input_record

        super(SemiRandomFeatures, self).__init__(
            model,
            self.input_record_full,
            output_dims,
            s=s,
            scale=scale,
            weight_init=weight_init,
            bias_init=bias_init,
            weight_optim=None,
            bias_optim=None,
            set_weight_as_global_constant=set_weight_as_global_constant,
            initialize_output_schema=False,
            name=name,
            **kwargs)

        self.output_schema = schema.Struct(
            (
                'full',
                schema.Scalar((np.float32, output_dims),
                              model.net.NextScopedBlob(name + '_full_output')),
            ),
            (
                'random',
                schema.Scalar(
                    (np.float32, output_dims),
                    model.net.NextScopedBlob(name + '_random_output')),
            ),
        )

        # Learned Parameters
        (self.learned_w,
         self.learned_b) = self._initialize_params('learned_w',
                                                   'learned_b',
                                                   w_init=weight_init,
                                                   b_init=bias_init,
                                                   w_optim=weight_optim,
                                                   b_optim=bias_optim)
コード例 #20
0
 def testAssignToField(self):
     with self.assertRaises(TypeError):
         s = schema.Struct(('a', schema.Scalar()))
         s.a = schema.Scalar()
コード例 #21
0
ファイル: dataio_test.py プロジェクト: zjf8888/pytorch
 def __init__(self, name, size, offset):
     self._schema = schema.Struct(('label', schema.Scalar()), )
     self._name = name
     self._size = size
     self._offset = offset
     self._src_ds = None
コード例 #22
0
 def testNormalizeField(self):
     s = schema.Struct(('field1', np.int32), ('field2', str))
     self.assertEquals(
         s,
         schema.Struct(('field1', schema.Scalar(dtype=np.int32)),
                       ('field2', schema.Scalar(dtype=str))))
コード例 #23
0
    def testSamplingTrain(self):
        output_dims = 1000

        indices = self.new_record(schema.Scalar((np.int32, (10,))))
        sampling_prob = self.new_record(schema.Scalar((np.float32, (10, ))))

        sampled_fc = self.model.SamplingTrain(
            schema.Struct(
                ('input', self.model.input_feature_schema.float_features),
                ('indices', indices),
                ('sampling_prob', sampling_prob),
            ),
            "FC",
            output_dims,
        )
        self.model.output_schema = sampled_fc

        # Check that we don't add prediction layer into the model
        self.assertEqual(1, len(self.model.layers))

        self.assertEqual(
            schema.Scalar((np.float32, (output_dims, ))),
            sampled_fc
        )

        train_init_net, train_net = self.get_training_nets()

        init_ops = self.assertNetContainOps(
            train_init_net,
            [
                OpSpec("UniformFill", None, None),
                OpSpec("UniformFill", None, None),
            ]
        )

        sampled_fc_layer = self.model.layers[0]

        gather_w_spec = OpSpec(
            "Gather",
            [
                init_ops[0].output[0],
                indices(),
            ],
            [
                sampled_fc_layer._prediction_layer.train_param_blobs[0]
            ]
        )
        gather_b_spec = OpSpec(
            "Gather",
            [
                init_ops[1].output[0],
                indices(),
            ],
            [
                sampled_fc_layer._prediction_layer.train_param_blobs[1]
            ]
        )
        train_fc_spec = OpSpec(
            "FC",
            [
                self.model.input_feature_schema.float_features(),
            ] + sampled_fc_layer._prediction_layer.train_param_blobs,
            sampled_fc.field_blobs()
        )
        log_spec = OpSpec("Log", [sampling_prob()], [None])
        sub_spec = OpSpec(
            "Sub",
            [sampled_fc.field_blobs()[0], None],
            sampled_fc.field_blobs()
        )

        train_ops = self.assertNetContainOps(
            train_net,
            [gather_w_spec, gather_b_spec, train_fc_spec, log_spec, sub_spec])

        self.assertEqual(train_ops[3].output[0], train_ops[4].input[1])

        predict_net = self.get_predict_net()
        self.assertNetContainOps(
            predict_net,
            [
                OpSpec(
                    "FC",
                    [
                        self.model.input_feature_schema.float_features(),
                        init_ops[0].output[0],
                        init_ops[1].output[0],
                    ],
                    sampled_fc.field_blobs()
                )
            ]
        )
コード例 #24
0
    def create_net(self):
        net = core.Net("feature_extractor")
        init_net = core.Net("feature_extractor_init")
        missing_scalar = self.create_const(init_net, "MISSING_SCALAR",
                                           MISSING_VALUE)

        action_schema = map_schema(
        ) if self.sorted_action_features else schema.Scalar()

        input_schema = schema.Struct(
            (InputColumn.STATE_FEATURES, map_schema()),
            (InputColumn.NEXT_STATE_FEATURES, map_schema()),
            (InputColumn.ACTION, action_schema),
            (InputColumn.NEXT_ACTION, action_schema),
            (InputColumn.NOT_TERMINAL, schema.Scalar()),
            (InputColumn.TIME_DIFF, schema.Scalar()),
        )
        if self.include_possible_actions:
            input_schema += schema.Struct(
                (InputColumn.POSSIBLE_ACTIONS_MASK, schema.List(
                    schema.Scalar())),
                (InputColumn.POSSIBLE_NEXT_ACTIONS_MASK,
                 schema.List(schema.Scalar())),
            )
            if self.sorted_action_features is not None:
                input_schema += schema.Struct(
                    (InputColumn.POSSIBLE_ACTIONS, schema.List(map_schema())),
                    (InputColumn.POSSIBLE_NEXT_ACTIONS,
                     schema.List(map_schema())),
                )

        input_record = net.set_input_record(input_schema)

        state = self.extract_float_features(
            net,
            "state",
            input_record[InputColumn.STATE_FEATURES],
            self.sorted_state_features,
            missing_scalar,
        )
        next_state = self.extract_float_features(
            net,
            "next_state",
            input_record[InputColumn.NEXT_STATE_FEATURES],
            self.sorted_state_features,
            missing_scalar,
        )

        if self.sorted_action_features:
            action = self.extract_float_features(
                net,
                InputColumn.ACTION,
                input_record[InputColumn.ACTION],
                self.sorted_action_features,
                missing_scalar,
            )
            next_action = self.extract_float_features(
                net,
                InputColumn.NEXT_ACTION,
                input_record[InputColumn.NEXT_ACTION],
                self.sorted_action_features,
                missing_scalar,
            )
            if self.include_possible_actions:
                possible_action_features = self.extract_float_features(
                    net,
                    InputColumn.POSSIBLE_ACTIONS,
                    input_record[InputColumn.POSSIBLE_ACTIONS]["values"],
                    self.sorted_action_features,
                    missing_scalar,
                )
                possible_next_action_features = self.extract_float_features(
                    net,
                    InputColumn.POSSIBLE_NEXT_ACTIONS,
                    input_record[InputColumn.POSSIBLE_NEXT_ACTIONS]["values"],
                    self.sorted_action_features,
                    missing_scalar,
                )
        else:
            action = input_record[InputColumn.ACTION]
            next_action = input_record[InputColumn.NEXT_ACTION]

        if self.normalize:
            C2.set_net_and_init_net(net, init_net)
            state, _ = PreprocessorNet().normalize_dense_matrix(
                state,
                self.sorted_state_features,
                self.state_normalization_parameters,
                blobname_prefix="state",
                split_expensive_feature_groups=True,
            )
            next_state, _ = PreprocessorNet().normalize_dense_matrix(
                next_state,
                self.sorted_state_features,
                self.state_normalization_parameters,
                blobname_prefix="next_state",
                split_expensive_feature_groups=True,
            )
            if self.sorted_action_features is not None:
                action, _ = PreprocessorNet().normalize_dense_matrix(
                    action,
                    self.sorted_action_features,
                    self.action_normalization_parameters,
                    blobname_prefix="action",
                    split_expensive_feature_groups=True,
                )
                next_action, _ = PreprocessorNet().normalize_dense_matrix(
                    next_action,
                    self.sorted_action_features,
                    self.action_normalization_parameters,
                    blobname_prefix="next_action",
                    split_expensive_feature_groups=True,
                )
                if self.include_possible_actions:
                    possible_action_features, _ = PreprocessorNet(
                    ).normalize_dense_matrix(
                        possible_action_features,
                        self.sorted_action_features,
                        self.action_normalization_parameters,
                        blobname_prefix="possible_action",
                        split_expensive_feature_groups=True,
                    )
                    possible_next_action_features, _ = PreprocessorNet(
                    ).normalize_dense_matrix(
                        possible_next_action_features,
                        self.sorted_action_features,
                        self.action_normalization_parameters,
                        blobname_prefix="possible_next_action",
                        split_expensive_feature_groups=True,
                    )
            C2.set_net_and_init_net(None, None)

        output_schema = schema.Struct(
            (InputColumn.STATE_FEATURES, state),
            (InputColumn.NEXT_STATE_FEATURES, next_state),
            (InputColumn.ACTION, action),
            (InputColumn.NEXT_ACTION, next_action),
            (InputColumn.NOT_TERMINAL, input_record[InputColumn.NOT_TERMINAL]),
            (InputColumn.TIME_DIFF, input_record[InputColumn.TIME_DIFF]),
        )

        if self.include_possible_actions:
            # Drop the "lengths" blob from possible_actions_mask since we know
            # it's just a list of [max_num_actions, max_num_actions, ...]
            output_schema += schema.Struct(
                (
                    InputColumn.POSSIBLE_ACTIONS_MASK,
                    input_record[InputColumn.POSSIBLE_ACTIONS_MASK]["values"],
                ),
                (
                    InputColumn.POSSIBLE_NEXT_ACTIONS_MASK,
                    input_record[InputColumn.POSSIBLE_NEXT_ACTIONS_MASK]
                    ["values"],
                ),
            )
            if self.sorted_action_features is not None:
                output_schema += schema.Struct(
                    (InputColumn.POSSIBLE_ACTIONS, possible_action_features),
                    (InputColumn.POSSIBLE_NEXT_ACTIONS,
                     possible_next_action_features),
                )

        net.set_output_record(output_schema)
        return FeatureExtractorNet(net, init_net)
コード例 #25
0
    def testAddLoss(self):
        input_record_LR = self.new_record(
            schema.Struct(
                ('label', schema.Scalar((np.float64, (1, )))),
                ('prediction', schema.Scalar((np.float32, (2, )))),
                ('weight', schema.Scalar((np.float64, (1, ))))
            )
        )
        loss_LR = self.model.BatchLRLoss(input_record_LR)

        self.model.add_loss(loss_LR)
        assert 'unnamed' in self.model.loss
        self.assertEqual(
            schema.Scalar((np.float32, tuple())), self.model.loss.unnamed
        )
        self.assertEqual(loss_LR, self.model.loss.unnamed)

        self.model.add_loss(loss_LR, 'addLoss')
        assert 'addLoss' in self.model.loss
        self.assertEqual(
            schema.Scalar((np.float32, tuple())), self.model.loss.addLoss
        )
        self.assertEqual(loss_LR, self.model.loss.addLoss)

        self.model.add_loss(
            schema.Scalar(
                dtype=np.float32, blob=core.BlobReference('loss_blob_1')
            ), 'addLoss'
        )
        assert 'addLoss_auto_0' in self.model.loss
        self.assertEqual(
            schema.Scalar((np.float32, tuple())), self.model.loss.addLoss_auto_0
        )
        assert core.BlobReference('loss_blob_1') in self.model.loss.field_blobs()

        self.model.add_loss(
            schema.Struct(
                (
                    'structName', schema.Scalar(
                        dtype=np.float32,
                        blob=core.BlobReference('loss_blob_2')
                    )
                )
            ), 'addLoss'
        )
        assert 'addLoss_auto_1' in self.model.loss
        self.assertEqual(
            schema.Struct(('structName', schema.Scalar((np.float32, tuple())))),
            self.model.loss.addLoss_auto_1
        )
        assert core.BlobReference('loss_blob_2') in self.model.loss.field_blobs()

        loss_in_tuple_0 = schema.Scalar(
            dtype=np.float32, blob=core.BlobReference('loss_blob_in_tuple_0')
        )

        loss_in_tuple_1 = schema.Scalar(
            dtype=np.float32, blob=core.BlobReference('loss_blob_in_tuple_1')
        )

        loss_tuple = schema.NamedTuple(
            'loss_in_tuple', * [loss_in_tuple_0, loss_in_tuple_1]
        )
        self.model.add_loss(loss_tuple, 'addLoss')
        assert 'addLoss_auto_2' in self.model.loss
        self.assertEqual(
            schema.Struct(
                ('loss_in_tuple_0', schema.Scalar((np.float32, tuple()))),
                ('loss_in_tuple_1', schema.Scalar((np.float32, tuple())))
            ), self.model.loss.addLoss_auto_2
        )
        assert core.BlobReference('loss_blob_in_tuple_0')\
         in self.model.loss.field_blobs()
        assert core.BlobReference('loss_blob_in_tuple_1')\
         in self.model.loss.field_blobs()
コード例 #26
0
def map_schema():
    return schema.Map(schema.Scalar(), schema.Scalar())
コード例 #27
0
    def testMapToRange(self):
        input_record = self.new_record(schema.Scalar(np.int32))
        indices_blob = self.model.MapToRange(input_record,
                                             max_index=100).indices
        self.model.output_schema = schema.Struct()

        train_init_net, train_net = self.get_training_nets()

        schema.FeedRecord(
            input_record,
            [np.array([10, 3, 20, 99, 15, 11, 3, 11], dtype=np.int32)]
        )
        workspace.RunNetOnce(train_init_net)
        workspace.RunNetOnce(train_net)
        indices = workspace.FetchBlob(indices_blob())
        np.testing.assert_array_equal(
            np.array([1, 2, 3, 4, 5, 6, 2, 6], dtype=np.int32),
            indices
        )

        schema.FeedRecord(
            input_record,
            [np.array([10, 3, 23, 35, 60, 15, 10, 15], dtype=np.int32)]
        )
        workspace.RunNetOnce(train_net)
        indices = workspace.FetchBlob(indices_blob())
        np.testing.assert_array_equal(
            np.array([1, 2, 7, 8, 9, 5, 1, 5], dtype=np.int32),
            indices
        )

        eval_net = self.get_eval_net()

        schema.FeedRecord(
            input_record,
            [np.array([10, 3, 23, 35, 60, 15, 200], dtype=np.int32)]
        )
        workspace.RunNetOnce(eval_net)
        indices = workspace.FetchBlob(indices_blob())
        np.testing.assert_array_equal(
            np.array([1, 2, 7, 8, 9, 5, 0], dtype=np.int32),
            indices
        )

        schema.FeedRecord(
            input_record,
            [np.array([10, 3, 23, 15, 101, 115], dtype=np.int32)]
        )
        workspace.RunNetOnce(eval_net)
        indices = workspace.FetchBlob(indices_blob())
        np.testing.assert_array_equal(
            np.array([1, 2, 7, 5, 0, 0], dtype=np.int32),
            indices
        )

        predict_net = self.get_predict_net()

        schema.FeedRecord(
            input_record,
            [np.array([3, 3, 20, 23, 151, 35, 60, 15, 200], dtype=np.int32)]
        )
        workspace.RunNetOnce(predict_net)
        indices = workspace.FetchBlob(indices_blob())
        np.testing.assert_array_equal(
            np.array([2, 2, 3, 7, 0, 8, 9, 5, 0], dtype=np.int32),
            indices
        )
コード例 #28
0
    def __init__(self, model, input_record, input_specs,
                 name='sparse_to_dense', **kwargs):
        """
        `input_specs` follows the format of FeatureSpec from schema. To be more
        precise it's a namedtuple that should have:
            'feature_type', 'feature_names', 'feature_ids'
        """
        super(SparseToDense, self).__init__(model, name,
                                            input_record, **kwargs)

        self.input_specs = input_specs

        outputs = []
        for field, feature_specs in self.input_specs:
            assert len(feature_specs.feature_names) ==\
                len(feature_specs.feature_ids)
            if feature_specs.feature_type == 'FLOAT':
                outputs.append((
                    field,
                    schema.Scalar(
                        (np.float32, (len(feature_specs.feature_ids), )),
                        model.net.NextScopedBlob(name + '_' + field + '_output')
                    )
                ))
            elif feature_specs.feature_type == 'ID_LIST':
                outputs.append((
                    field,
                    schema.Struct(
                        ('ranges',
                            schema.Scalar(
                                (
                                    np.int32,
                                    (len(feature_specs.feature_ids), 2)
                                ),
                                model.net.NextScopedBlob(
                                    name + '_' + field + '_ranges')
                            ),
                         ),
                        ('values', input_record[field].values.items),
                    )
                ))
            else:
                raise TypeError(
                    "Unsupported input type: {0}".
                    format(feature_specs.feature_type))

        # TODO(amalevich): This schema is producing ranges. And thus if there is
        # something using it it should support ranges as well. It might be
        # confusing, if we don't add better support for ranges/have it as a
        # first layer
        self.output_schema = schema.Struct(
            *outputs
        )

        # TODO(amalevich): Consider moving this data to schema, instead
        # Structs doens't support attaching metadata to them and clonning
        # will break things badly, but this is the most elegant way to pass
        # this info around. Should we change it or it'll be too much work and
        # not worse it?
        for field, feature_specs in input_specs:
            schema.attach_metadata_to_scalars(
                self.output_schema[field],
                schema.Metadata(
                    feature_specs=feature_specs)
            )
        self.zero = model.global_constants['ZERO']
        self.zero_range = model.global_constants['ZERO_RANGE']
コード例 #29
0
    def testArcCosineFeatureMap(self, batch_size, input_dims, output_dims, s, scale,
                                set_weight_as_global_constant):

        def _arc_cosine_hypothesis_test(ac_output, X, W, b, s):
            """
            Runs hypothesis test for Arc Cosine layer.

            Inputs:
                ac_output -- output of net after running arc cosine layer
                X -- input data
                W -- weight parameter from train_init_net
                b -- bias parameter from train_init_net
                s -- degree parameter
            """
            # Get output from net
            net_output = workspace.FetchBlob(ac_output)

            # Computing output directly
            x_rand = np.matmul(X, np.transpose(W)) + b
            x_pow = np.power(x_rand, s)
            if s > 0:
                h_rand_features = np.piecewise(x_rand,
                                               [x_rand <= 0, x_rand > 0],
                                               [0, 1])
            else:
                h_rand_features = np.piecewise(x_rand,
                                               [x_rand <= 0, x_rand > 0],
                                               [0, lambda x: x / (1 + x)])
            output_ref = np.multiply(x_pow, h_rand_features)

            # Comparing net output and computed output
            npt.assert_allclose(net_output, output_ref, rtol=1e-4)

        X = np.random.normal(size=(batch_size, input_dims)).astype(np.float32)
        input_record = self.new_record(schema.Scalar((np.float32, (input_dims,))))
        schema.FeedRecord(input_record, [X])
        input_blob = input_record.field_blobs()[0]

        ac_output = self.model.ArcCosineFeatureMap(
            input_record,
            output_dims,
            s=s,
            scale=scale,
            set_weight_as_global_constant=set_weight_as_global_constant
        )
        self.model.output_schema = schema.Struct()
        self.assertEqual(
            schema.Scalar((np.float32, (output_dims, ))),
            ac_output
        )

        train_init_net, train_net = self.get_training_nets()

        # Run create_init_net to initialize the global constants, and W and b
        workspace.RunNetOnce(train_init_net)
        workspace.RunNetOnce(self.model.create_init_net(name='init_net'))

        if set_weight_as_global_constant:
            W = workspace.FetchBlob(
                self.model.global_constants['arc_cosine_feature_map_fixed_rand_W']
            )
            b = workspace.FetchBlob(
                self.model.global_constants['arc_cosine_feature_map_fixed_rand_b']
            )
        else:
            W = workspace.FetchBlob(self.model.layers[0].random_w)
            b = workspace.FetchBlob(self.model.layers[0].random_b)

        # Operation specifications
        fc_spec = OpSpec("FC", [input_blob, None, None], None)
        softsign_spec = OpSpec("Softsign", None, None)
        relu_spec = OpSpec("Relu", None, None)
        relu_spec_output = OpSpec("Relu", None, ac_output.field_blobs())
        pow_spec = OpSpec("Pow", None, None, {'exponent': float(s - 1)})
        mul_spec = OpSpec("Mul", None, ac_output.field_blobs())

        if s == 0:
            ops_list = [
                fc_spec,
                softsign_spec,
                relu_spec_output,
            ]
        elif s == 1:
            ops_list = [
                fc_spec,
                relu_spec_output,
            ]
        else:
            ops_list = [
                fc_spec,
                relu_spec,
                pow_spec,
                mul_spec,
            ]

        # Train net assertions
        self._test_net(train_net, ops_list)
        _arc_cosine_hypothesis_test(ac_output(), X, W, b, s)

        # Eval net assertions
        eval_net = self.get_eval_net()
        self._test_net(eval_net, ops_list)
        _arc_cosine_hypothesis_test(ac_output(), X, W, b, s)

        # Predict net assertions
        predict_net = self.get_predict_net()
        self._test_net(predict_net, ops_list)
        _arc_cosine_hypothesis_test(ac_output(), X, W, b, s)
コード例 #30
0
    def __init__(self,
                 model,
                 input_record,
                 inner_shape,
                 reducer,
                 weight_init=None,
                 weight_optim=None,
                 name='sparse_lookup',
                 regularizer=None,
                 **kwargs):

        super(SparseLookup, self).__init__(model, name, input_record, **kwargs)

        # TODO Add some asserts about input type
        if isinstance(inner_shape, int):
            inner_shape = [inner_shape]
        assert isinstance(inner_shape, list) or isinstance(inner_shape, tuple),\
            "Unexpected type for inner_shape, expected list or tuple, got {0}".\
            format(type(inner_shape))

        if reducer == "PositionWeighted":
            assert _is_id_score_list(self.input_record), (
                "PositionWeighted only support IdScoreList, but got {} " +
                "please use PositionWeighted layer to convert IdList " +
                "to IdScoreList").format(repr(self.input_record))
            self.external_weights = input_record.values()

        elif reducer == "RecencyWeighted":
            assert _is_id_score_list(self.input_record), (
                "RecencyWeighted only supports IdScoreList.")
            self.external_weights = input_record.values()
        self.reducer = reducer

        input_dim = get_categorical_limit(input_record)
        assert input_dim > 0, (
            "{} should have categorical limit > 0, but got {}".format(
                get_key(input_record)(), input_dim))

        self.input_dim = input_dim
        self.shape = [input_dim] + inner_shape

        cur_scope = get_current_scope()
        trainer_version = get_sparse_lookup_trainer_version(**cur_scope.get(
            get_sparse_lookup_trainer_version.__name__, {'version': 'fp32'}))

        self.trainer_version = trainer_version

        default_init_op = self._get_default_init_op()

        self.weight_init = weight_init or default_init_op

        # If fp16 is used, make sure fp16 init op is used
        if self.trainer_version == "fp16":
            assert self.reducer in self._fp16_compatible_reducers, (
                "Fp16 training is enabled. The reducer specified is not supported. "
                "Got {}. Supported reducers: {}. Right now, in general, sum, mean, "
                "positional pooling are supported. Attention is not. Please check "
                "if there is fp16 trained sparse features using advanced pooling."
                .format(self.reducer, self._fp16_compatible_reducers))

            # if init op is UniformFill, we replace it directly
            if self.weight_init[0] == "UniformFill":
                self.weight_init = ("Float16UniformFill", self.weight_init[1])
            assert self.weight_init[0] in self._fp16_compatible_init_op_types, (
                "Fp16 training is enabled. Init op for weight parameter must be fp16 "
                "compatibale. Got {}. Supported ops: {}".format(
                    self.weight_init[0], self._fp16_compatible_init_op_types))

        if _is_id_list(self.input_record):
            sparse_key = self.input_record.items()
        elif _is_id_score_list(self.input_record):
            sparse_key = self.input_record.keys()
        else:
            raise NotImplementedError()

        if self.input_record.lengths.metadata:
            avg_length = self.input_record.lengths.metadata.expected_value
        else:
            avg_length = None

        self.w = self.create_param(param_name='w',
                                   shape=self.shape,
                                   initializer=self.weight_init,
                                   optimizer=weight_optim,
                                   ps_param=LayerPsParam(
                                       sparse_key=sparse_key,
                                       average_length=avg_length),
                                   regularizer=regularizer)

        self.scale_bias_init = ('ConstantFill', {'value': 0.0})

        self.scale_bias = self.create_param(
            param_name='scale_bias',
            shape=[],
            initializer=self.scale_bias_init,
            optimizer=model.NoOptim,
        )

        self.output_schema = schema.Scalar(
            (np.float32, inner_shape),
            self.get_next_blob_reference('output'),
        )