Example #1
0
    def __init__(self, name, input_feature_schema, trainer_extra_schema,
                 keep_blobs=False,
                 use_attribution=True):
        ''' TODO(amalevich): more documnetation on input args

        use_attribution:
            if True, will generate the atrribution net for feature importance
            calculation; Need to turn it to false when FC is quantized as FP16
            This attribute access will be consistent with MTML model.
        '''

        super(LayerModelHelper, self).__init__(name=name)
        self._layer_names = set()
        self._layers = []
        self._param_to_shape = {}

        # seed default
        self._seed = None
        self._sequence_seed = True

        # optimizer bookkeeping
        self.param_to_optim = {}
        self.param_to_reg = {}

        self._default_optimizer = None
        self._loss = None
        self._prediction = []
        self._output_schema = None

        self._post_grad_net_modifiers = []
        self._final_net_modifiers = []

        # breakdown map; breakdown features are categorical (like dense) but not
        # necessarily used to represent data for training
        self._breakdown_map = None

        # Connect Schema to self.net. That particular instance of schmea will be
        # use for generation of the Layers accross the network and would be used
        # for connection with Readers.
        self._input_feature_schema = schema.NewRecord(
            self.net,
            input_feature_schema
        ) if not keep_blobs else input_feature_schema.clone()
        self._trainer_extra_schema = schema.NewRecord(
            self.net,
            trainer_extra_schema
        ) if not keep_blobs else trainer_extra_schema.clone()
        self._metrics_schema = schema.Struct()

        self._preproc_output_schema = None

        self._init_global_constants()
        self.param_init_net = self.create_init_net('param_init_net')
        self._initialize_params = True

        # additional (hard-coded) diagnose_options to report based on the model
        # TODO(xlwang): it's hack!
        self.ad_hoc_diagnose_blobs_and_operations = []
        self.ad_hoc_plot_blobs = []
        self.use_attribution = use_attribution
Example #2
0
    def __init__(self,
                 name,
                 input_feature_schema,
                 trainer_extra_schema,
                 keep_blobs=False):
        super(LayerModelHelper, self).__init__(name=name)
        self._layer_names = set()
        self._layers = []
        self._param_to_shape = {}

        # optimizer bookkeeping
        self.param_to_optim = {}

        self._default_optimizer = None
        self._loss = None
        self._output_schema = None

        # Connect Schema to self.net. That particular instance of schmea will be
        # use for generation of the Layers accross the network and would be used
        # for connection with Readers.
        self._input_feature_schema = schema.NewRecord(
            self.net, input_feature_schema
        ) if not keep_blobs else input_feature_schema.clone()
        self._trainer_extra_schema = schema.NewRecord(
            self.net, trainer_extra_schema
        ) if not keep_blobs else trainer_extra_schema.clone()
        self._metrics_schema = schema.Struct()

        self._init_global_constants()
        self.param_init_net = self.create_init_net('param_init_net')
        self._initialize_params = True
 def testStructGet(self):
     net = core.Net('test_net')
     s1 = schema.NewRecord(net, schema.Scalar(np.float32))
     s2 = schema.NewRecord(net, schema.Scalar(np.float32))
     t = schema.Tuple(s1, s2)
     assert t.get('field_0', None) == s1
     assert t.get('field_1', None) == s2
     assert t.get('field_2', None) is None
Example #4
0
    def __init__(self,
                 name,
                 input_feature_schema,
                 trainer_extra_schema,
                 keep_blobs=False):
        ''' TODO(amalevich): more documnetation on input args
        '''

        super(LayerModelHelper, self).__init__(name=name)
        self._layer_names = set()
        self._layers = []
        self._param_to_shape = {}

        # seed default
        self._seed = None
        self._sequence_seed = True

        # optimizer bookkeeping
        self.param_to_optim = {}
        self.param_to_reg = {}

        self._default_optimizer = None
        self._loss = None
        self._output_schema = None

        self._post_grad_net_modifiers = []
        self._final_net_modifiers = []

        # breakdown map; breakdown features are categorical (like dense) but not
        # necessarily used to represent data for training
        self._breakdown_map = None

        # Connect Schema to self.net. That particular instance of schmea will be
        # use for generation of the Layers accross the network and would be used
        # for connection with Readers.
        self._input_feature_schema = schema.NewRecord(
            self.net, input_feature_schema
        ) if not keep_blobs else input_feature_schema.clone()
        self._trainer_extra_schema = schema.NewRecord(
            self.net, trainer_extra_schema
        ) if not keep_blobs else trainer_extra_schema.clone()
        self._metrics_schema = schema.Struct()

        self._preproc_output_schema = None

        self._init_global_constants()
        self.param_init_net = self.create_init_net('param_init_net')
        self._initialize_params = True
Example #5
0
 def testSetInputRecordWithBlobs(self):
     net = core.Net("test")
     record = schema.NewRecord(
         net, schema.Struct(("x", schema.Scalar(np.float)), ))
     input_record = net.set_input_record(record)
     self.assertTrue(net.BlobIsDefined(input_record.x()))
     self.assertIn(input_record.x(), net.external_inputs)
Example #6
0
 def test_extract_sarsa_parametric_action(self):
     extractor = TrainingFeatureExtractor(
         state_normalization_parameters=self.get_state_normalization_parameters(),
         action_normalization_parameters=self.get_action_normalization_parameters(),
         max_q_learning=False,
     )
     # Setup
     ws, net = self.create_ws_and_net(extractor)
     input_record = net.input_record() + schema.NewRecord(
         net, schema.Struct(("reward", schema.Scalar()))
     )
     self.setup_state_features(ws, input_record.state_features)
     self.setup_next_state_features(ws, input_record.next_state_features)
     self.setup_action_features(ws, input_record.action)
     self.setup_next_action_features(ws, input_record.next_action)
     reward = self.setup_reward(ws, input_record.reward)
     # Run
     ws.run(net)
     res = extractor.extract(ws, input_record, net.output_record())
     o = res.training_input
     npt.assert_array_equal(reward, o.reward.numpy())
     npt.assert_array_equal(
         self.expected_action_features(), o.action.float_features.numpy()
     )
     npt.assert_array_equal(
         self.expected_next_action_features(), o.next_action.float_features.numpy()
     )
     npt.assert_array_equal(
         self.expected_state_features(), o.state.float_features.numpy()
     )
     npt.assert_array_equal(
         self.expected_next_state_features(), o.next_state.float_features.numpy()
     )
Example #7
0
 def test_extract_max_q_discrete_action(self):
     extractor = TrainingFeatureExtractor(
         state_normalization_parameters=self.get_state_normalization_parameters(),
         max_q_learning=True,
     )
     # Setup
     ws, net = self.create_ws_and_net(extractor)
     input_record = net.input_record() + schema.NewRecord(
         net, schema.Struct(("reward", schema.Scalar()))
     )
     self.setup_state_features(ws, input_record.state_features)
     self.setup_next_state_features(ws, input_record.next_state_features)
     action = self.setup_action(ws, input_record.action)
     possible_next_actions = self.setup_possible_next_actions(
         ws, input_record.possible_next_actions
     )
     reward = self.setup_reward(ws, input_record.reward)
     # Run
     ws.run(net)
     res = extractor.extract(ws, input_record, net.output_record())
     o = res.training_input
     npt.assert_array_equal(reward, o.reward.numpy())
     npt.assert_array_equal(action, o.action.numpy())
     npt.assert_array_equal(
         possible_next_actions[0], o.possible_next_actions.lengths.numpy()
     )
     npt.assert_array_equal(
         possible_next_actions[1], o.possible_next_actions.actions.numpy()
     )
     npt.assert_array_equal(
         self.expected_state_features(), o.state.float_features.numpy()
     )
     npt.assert_array_equal(
         self.expected_next_state_features(), o.next_state.float_features.numpy()
     )
Example #8
0
    def __init__(self,
                 model,
                 input_record,
                 name='select_record_by_context',
                 check_field_metas=True,
                 use_copy=False,
                 default_output_record_field=None,
                 **kwargs):
        super(SelectRecordByContext, self).__init__(model, name, input_record,
                                                    **kwargs)

        assert isinstance(input_record, schema.Struct)
        assert len(input_record) > 1

        self.use_copy = use_copy
        self.default_output_record = (
            input_record[default_output_record_field] if
            (default_output_record_field is not None) else None)
        ref_record = input_record[0]
        for record in input_record:
            assert schema.equal_schemas(record,
                                        ref_record,
                                        check_field_metas=check_field_metas)

        self.output_schema = schema.NewRecord(model.net, ref_record)
Example #9
0
    def __init__(self,
                 model,
                 input_record,
                 seed=0,
                 modulo=None,
                 use_hashing=True,
                 name='sparse_feature_hash',
                 **kwargs):
        super(SparseFeatureHash, self).__init__(model, name, input_record,
                                                **kwargs)

        self.seed = seed
        self.use_hashing = use_hashing
        if schema.equal_schemas(input_record, IdList):
            self.modulo = modulo or self.extract_hash_size(
                input_record.items.metadata)
            metadata = schema.Metadata(
                categorical_limit=self.modulo,
                feature_specs=input_record.items.metadata.feature_specs,
                expected_value=input_record.items.metadata.expected_value)
            with core.NameScope(name):
                self.output_schema = schema.NewRecord(model.net, IdList)
            self.output_schema.items.set_metadata(metadata)

        elif schema.equal_schemas(input_record, IdScoreList):
            self.modulo = modulo or self.extract_hash_size(
                input_record.keys.metadata)
            metadata = schema.Metadata(
                categorical_limit=self.modulo,
                feature_specs=input_record.keys.metadata.feature_specs,
                expected_value=input_record.keys.metadata.expected_value)
            with core.NameScope(name):
                self.output_schema = schema.NewRecord(model.net, IdScoreList)
            self.output_schema.keys.set_metadata(metadata)

        else:
            assert False, "Input type must be one of (IdList, IdScoreList)"

        assert self.modulo >= 1, 'Unexpected modulo: {}'.format(self.modulo)
        if input_record.lengths.metadata:
            self.output_schema.lengths.set_metadata(
                input_record.lengths.metadata)

        # operators in this layer do not have CUDA implementation yet.
        # In addition, since the sparse feature keys that we are hashing are
        # typically on CPU originally, it makes sense to have this layer on CPU.
        self.tags.update([Tags.CPU_ONLY])
Example #10
0
def first_field_schema_init(self, model, input_record, *args, **kwargs):
    ModelLayer.__init__(self, model, self.operator, input_record, **kwargs)
    assert self.operator is not None, "Try to create invalid operator layer"
    assert isinstance(input_record, schema.Struct),\
        "Operator {0} expects schema.Struct as input, received {1} instead".\
        format(self.operator, input_record)
    self.args = args
    self.output_schema = schema.NewRecord(self.model.net, input_record[0])
Example #11
0
    def __init__(self,
                 model,
                 input_record,
                 num_outputs,
                 function,
                 name='functional',
                 **kwargs):
        super(Functional, self).__init__(model, name, input_record, **kwargs)
        self._function = function

        with scope.NameScope(self.name):
            self.output_schema = schema.NewRecord(model.net,
                                                  schema.RawTuple(num_outputs))

        # Fake execution of the function to infer shapes and types automatically
        had_issues = False
        try:
            type_net = core.Net('_temp_type_and_shape_inference_net')
            schema.InitEmptyRecord(type_net, input_record, enforce_types=True)

            function(type_net, self.input_record, self.output_schema)
            (shapes, types) = workspace.InferShapesAndTypes([type_net], {})
            for i in range(num_outputs):
                blob = self.output_schema[i]()
                if blob not in types or blob not in shapes:
                    had_issues = True
                    continue
                if shapes[blob] == []:
                    # Scalar type
                    shape = tuple()
                elif shapes[blob][0] == 0:
                    shape = tuple(shapes[blob][1:])
                else:
                    # If batch dimension is not first - give up on shape
                    # inference for that blob
                    had_issues = True
                    continue

                # TODO(amalevich): Move it to some shared library
                dtype = None
                if types[blob] == caffe2_pb2.TensorProto.DOUBLE:
                    dtype = (np.float64, shape)
                elif types[blob] == caffe2_pb2.TensorProto.FLOAT:
                    dtype = (np.float32, shape)
                elif types[blob] == caffe2_pb2.TensorProto.INT32:
                    dtype = (np.int32, shape)
                elif types[blob] == caffe2_pb2.TensorProto.INT64:
                    dtype = (np.int64, shape)

                if dtype is not None:
                    self.output_schema[i].set_type(dtype)
        except TypeError as ex:
            had_issues = True
            logger.warning(str(ex))

        if had_issues:
            logger.warning("Type inference had problems for layer: {}".format(
                self.name))
 def create_extra_input_record(self, net):
     return net.input_record() + schema.NewRecord(
         net,
         schema.Struct(
             ("reward", schema.Scalar()),
             ("action_probability", schema.Scalar()),
             ("step", schema.Scalar()),
         ),
     )
Example #13
0
    def __init__(self, model, input_record, name='gather_record', **kwargs):
        super(GatherRecord, self).__init__(model, name, input_record, **kwargs)

        assert 'indices' in input_record
        assert 'record' in input_record

        self.output_schema = schema.NewRecord(
            model.net, input_record.record.clone_schema())

        self._indices = self.input_record.indices()
    def __init__(self, model, input_record, name='select_record_by_context',
                 check_field_metas=True, **kwargs):
        super(SelectRecordByContext, self).__init__(model, name, input_record,
                                                    **kwargs)

        assert isinstance(input_record, schema.Struct)
        assert len(input_record) > 1

        ref_record = input_record[0]
        for record in input_record:
            assert schema.equal_schemas(record, ref_record,
                                        check_field_metas=check_field_metas)

        self.output_schema = schema.NewRecord(model.net, ref_record)
Example #15
0
    def __init__(self,
                 model,
                 input_record,
                 dropout_prob_train,
                 dropout_prob_eval,
                 dropout_prob_predict,
                 replacement_value,
                 name='sparse_dropout',
                 **kwargs):

        super(SparseDropoutWithReplacement,
              self).__init__(model, name, input_record, **kwargs)
        assert schema.equal_schemas(input_record,
                                    IdList), "Incorrect input type"

        self.dropout_prob_train = float(dropout_prob_train)
        self.dropout_prob_eval = float(dropout_prob_eval)
        self.dropout_prob_predict = float(dropout_prob_predict)
        self.replacement_value = int(replacement_value)
        assert (self.dropout_prob_train >= 0 and
                self.dropout_prob_train <= 1.0), \
            "Expected 0 <= dropout_prob_train <= 1, but got %s" \
            % self.dropout_prob_train
        assert (self.dropout_prob_eval >= 0 and
                self.dropout_prob_eval <= 1.0), \
            "Expected 0 <= dropout_prob_eval <= 1, but got %s" \
            % dropout_prob_eval
        assert (self.dropout_prob_predict >= 0 and
                self.dropout_prob_predict <= 1.0), \
            "Expected 0 <= dropout_prob_predict <= 1, but got %s" \
            % dropout_prob_predict
        assert(self.dropout_prob_train > 0 or
               self.dropout_prob_eval > 0 or
               self.dropout_prob_predict > 0), \
            "Ratios all set to 0.0 for train, eval and predict"

        self.output_schema = schema.NewRecord(model.net, IdList)
        if input_record.lengths.metadata:
            self.output_schema.lengths.set_metadata(
                input_record.lengths.metadata)
        if input_record.items.metadata:
            self.output_schema.items.set_metadata(input_record.items.metadata)
Example #16
0
    def __init__(self, model, input_record, name='merged'):
        super(MergeIdLists, self).__init__(model, name, input_record)
        assert all(schema.equal_schemas(x, IdList) for x in input_record), \
            "Inputs to MergeIdLists should all be IdLists."

        assert all(record.items.metadata is not None
                   for record in self.input_record), \
            "Features without metadata are not supported"

        merge_dim = max(
            get_categorical_limit(record) for record in self.input_record)
        assert merge_dim is not None, "Unbounded features are not supported"

        self.output_schema = schema.NewRecord(
            model.net,
            schema.List(
                schema.Scalar(
                    np.int64,
                    blob=model.net.NextBlob(name),
                    metadata=schema.Metadata(categorical_limit=merge_dim))))
Example #17
0
    def testMergeIdListsLayer(self, num_inputs, batch_size):
        inputs = []
        for _ in range(num_inputs):
            lengths = np.random.randint(5, size=batch_size).astype(np.int32)
            size = lengths.sum()
            values = np.random.randint(1, 10, size=size).astype(np.int64)
            inputs.append(lengths)
            inputs.append(values)
        input_schema = schema.Tuple(*[
            schema.List(
                schema.Scalar(dtype=np.int64,
                              metadata=schema.Metadata(categorical_limit=20)))
            for _ in range(num_inputs)
        ])

        input_record = schema.NewRecord(self.model.net, input_schema)
        schema.FeedRecord(input_record, inputs)
        output_schema = self.model.MergeIdLists(input_record)
        assert schema.equal_schemas(output_schema,
                                    IdList,
                                    check_field_names=False)
Example #18
0
    def testSparseLookup(self):
        record = schema.NewRecord(self.model.net, schema.Struct(
            ('sparse', schema.Struct(
                ('sparse_feature_0', schema.List(
                    schema.Scalar(np.int64,
                                  metadata=schema.Metadata(categorical_limit=1000)))),
            )),
        ))
        embedding_dim = 64
        embedding_after_pooling = self.model.SparseLookup(
            record.sparse.sparse_feature_0, [embedding_dim], 'Sum')
        self.model.output_schema = embedding_after_pooling
        self.assertEqual(
            schema.Scalar((np.float32, (embedding_dim, ))),
            embedding_after_pooling
        )

        train_init_net, train_net = self.get_training_nets()

        init_ops = self.assertNetContainOps(
            train_init_net,
            [
                OpSpec("UniformFill", None, None),
                OpSpec("ConstantFill", None, None),
            ]
        )
        sparse_lookup_op_spec = OpSpec(
            'SparseLengthsSum',
            [
                init_ops[0].output[0],
                record.sparse.sparse_feature_0.items(),
                record.sparse.sparse_feature_0.lengths(),
            ],
            [embedding_after_pooling()]
        )
        self.assertNetContainOps(train_net, [sparse_lookup_op_spec])

        predict_net = self.get_predict_net()
        self.assertNetContainOps(predict_net, [sparse_lookup_op_spec])
Example #19
0
    def __init__(self, model, input_record, output_names_or_num, function,
                 name='functional', output_dtypes=None, **kwargs):

        # allow coercion
        input_record = schema.as_record(input_record)

        super(Functional, self).__init__(model, name, input_record, **kwargs)
        self._function = function
        self._kwargs = kwargs
        return_struct = (
            isinstance(output_names_or_num, list) or
            (isinstance(output_names_or_num, six.integer_types) and
             output_names_or_num != 1)
        )

        with scope.NameScope(self.name, reset=True):
            if isinstance(output_names_or_num, int):
                struct_output_schema = schema.NewRecord(
                    model.net, schema.RawTuple(output_names_or_num))
            elif isinstance(output_names_or_num, schema.Field):
                self.output_schema = output_names_or_num.clone(keep_blobs=True)
                return
            else:
                if not isinstance(output_names_or_num, list):
                    output_names_or_num = [output_names_or_num]
                out_tuple = [(out, np.void) for out in output_names_or_num]
                struct_output_schema = schema.NewRecord(
                    model.net, schema.Struct(*out_tuple))

        num_outputs = len(struct_output_schema.field_blobs())

        # functional layer returns Struct if more than one outputs or output is
        # a list, otherwise Scalar
        if return_struct:
            self.output_schema = struct_output_schema
        else:
            self.output_schema = struct_output_schema[0]

        # If output_dtypes is provided, use it for output schema. Otherwise
        # the shape and type will be inferred.
        if output_dtypes is not None:
            if not isinstance(output_dtypes, list):
                output_dtypes = [output_dtypes] * num_outputs
            assert len(output_dtypes) == num_outputs
            for dtype, scalar in zip(output_dtypes,
                                     self.output_schema.all_scalars()):
                scalar.set_type(dtype)
            return

        # Fake execution of the function to infer shapes and types automatically
        had_issues = False
        try:
            type_net = core.Net('_temp_type_and_shape_inference_net')
            schema.InitEmptyRecord(type_net, input_record, enforce_types=True)

            function(type_net, self.input_record, self.output_schema, **kwargs)
            (shapes, types) = workspace.InferShapesAndTypes([type_net], {})
            for i in range(num_outputs):
                scalar_schema = (self.output_schema[i] if return_struct
                                 else self.output_schema)
                blob = scalar_schema()
                if blob not in types or blob not in shapes:
                    had_issues = True
                    continue
                if shapes[blob] == []:
                    # Scalar type
                    shape = tuple()
                elif shapes[blob][0] == 0:
                    shape = tuple(shapes[blob][1:])
                else:
                    logger.warning("unexpeced shape: {}".format(shapes[blob]))
                    # If batch dimension is not first - give up on shape
                    # inference for that blob
                    had_issues = True
                    continue

                # TODO(amalevich): Move it to some shared library
                dtype = None
                if types[blob] == caffe2_pb2.TensorProto.DOUBLE:
                    dtype = (np.float64, shape)
                elif types[blob] == caffe2_pb2.TensorProto.FLOAT:
                    dtype = (np.float32, shape)
                elif types[blob] == caffe2_pb2.TensorProto.INT32:
                    dtype = (np.int32, shape)
                elif types[blob] == caffe2_pb2.TensorProto.INT64:
                    dtype = (np.int64, shape)

                if dtype is not None:
                    scalar_schema.set_type(dtype)
        except TypeError as ex:
            had_issues = True
            logger.warning(str(ex))

        if had_issues:
            logger.warning(
                "Type inference had problems for layer: {}".format(self.name))
Example #20
0
 def add_trainer_extra_schema(self, trainer_extra_schema):
     trainer_extra_record = schema.NewRecord(self.net, trainer_extra_schema)
     self._trainer_extra_schema += trainer_extra_record
Example #21
0
def simple_init(self, model, input_record, *args, **kwargs):
    ModelLayer.__init__(self, model, self.operator, input_record, **kwargs)
    assert self.operator is not None, "Try to create invalid operator layer"
    self.args = args
    self.output_schema = schema.NewRecord(self.model.net, input_record)
Example #22
0
 def new_record(self, schema_obj):
     return schema.NewRecord(self.model.net, schema_obj)
    def __init__(self,
                 model,
                 input_record,
                 seed=0,
                 modulo=None,
                 use_hashing=True,
                 use_divide_mod=False,
                 divisor=None,
                 name='sparse_feature_hash',
                 **kwargs):
        super(SparseFeatureHash, self).__init__(model, name, input_record,
                                                **kwargs)

        assert use_hashing + use_divide_mod < 2, "use_hashing and use_divide_mod cannot be set true at the same time."

        if use_divide_mod:
            assert divisor >= 1, 'Unexpected divisor: {}'.format(divisor)

            self.divisor = self.create_param(
                param_name='divisor',
                shape=[1],
                initializer=('GivenTensorInt64Fill', {
                    'values': np.array([divisor])
                }),
                optimizer=model.NoOptim)

        self.seed = seed
        self.use_hashing = use_hashing
        self.use_divide_mod = use_divide_mod

        if schema.equal_schemas(input_record, IdList):
            self.modulo = modulo or self.extract_hash_size(
                input_record.items.metadata)
            metadata = schema.Metadata(
                categorical_limit=self.modulo,
                feature_specs=input_record.items.metadata.feature_specs,
                expected_value=input_record.items.metadata.expected_value)
            with core.NameScope(name):
                self.output_schema = schema.NewRecord(model.net, IdList)
            self.output_schema.items.set_metadata(metadata)

        elif schema.equal_schemas(input_record, IdScoreList):
            self.modulo = modulo or self.extract_hash_size(
                input_record.keys.metadata)
            metadata = schema.Metadata(
                categorical_limit=self.modulo,
                feature_specs=input_record.keys.metadata.feature_specs,
                expected_value=input_record.keys.metadata.expected_value)
            with core.NameScope(name):
                self.output_schema = schema.NewRecord(model.net, IdScoreList)
            self.output_schema.keys.set_metadata(metadata)

        else:
            assert False, "Input type must be one of (IdList, IdScoreList)"

        assert self.modulo >= 1, 'Unexpected modulo: {}'.format(self.modulo)
        if input_record.lengths.metadata:
            self.output_schema.lengths.set_metadata(
                input_record.lengths.metadata)

        # operators in this layer do not have CUDA implementation yet.
        # In addition, since the sparse feature keys that we are hashing are
        # typically on CPU originally, it makes sense to have this layer on CPU.
        self.tags.update([Tags.CPU_ONLY])