コード例 #1
0
 def test_parameter_sharing_nested_scopes(self):
     # Test parameter sharing
     with scope.NameScope('global_scope'):
         with ParameterSharing({'model_b': 'model_a'}):
             param_global = parameter_sharing_context.get_parameter_name(
                 'w')
             self.assertEquals(param_global, 'global_scope/w')
             # This scope is overridden to match 'model_a'
             with scope.NameScope('model_b'):
                 with ParameterSharing({'shared_scope': ''}):
                     param_4 = parameter_sharing_context.get_parameter_name(
                         'w')
                     self.assertEquals(param_4, 'global_scope/model_a/w')
                     with scope.NameScope('shared_scope'):
                         param_5 = parameter_sharing_context.\
                             get_parameter_name('w')
                         self.assertEquals(param_5,
                                           'global_scope/model_a/w')
             # This scope is supposed to have not sharing
             with scope.NameScope('model_c'):
                 with ParameterSharing({'shared_scope': ''}):
                     param_4 = parameter_sharing_context.get_parameter_name(
                         'w')
                     self.assertEquals(param_4, 'global_scope/model_c/w')
                     with scope.NameScope('shared_scope'):
                         param_5 = parameter_sharing_context.\
                             get_parameter_name('w')
                         self.assertEquals(param_5,
                                           'global_scope/model_c/w')
コード例 #2
0
    def test_layer_parameter_name(self):
        output_dims = 2
        with scope.NameScope('global_scope'):
            fc1_output = self.model.FC(
                self.model.input_feature_schema.float_features,
                output_dims
            )
            self.assertEquals(self.model.layers[-1].w, 'global_scope/fc/w')
            self.assertEquals(fc1_output(), 'global_scope/fc/output')

            with scope.NameScope('nested_scope'):
                fc2_output = self.model.FC(
                    fc1_output,
                    output_dims
                )
                self.assertEquals(self.model.layers[-1].w,
                                  'global_scope/nested_scope/fc/w')
                self.assertEquals(fc2_output(),
                                  'global_scope/nested_scope/fc/output')

                fc3_output = self.model.FC(
                    fc1_output,
                    output_dims
                )
                self.assertEquals(self.model.layers[-1].w,
                                  'global_scope/nested_scope/fc_auto_0/w')
                self.assertEquals(fc3_output(),
                                  'global_scope/nested_scope/fc_auto_0/output')
コード例 #3
0
 def test_parameter_sharing_default_scopes(self):
     # Test no sharing default scopes
     param_1 = parameter_sharing_context.get_parameter_name('w')
     self.assertEquals(param_1, 'w')
     with scope.NameScope('scope'):
         param_2 = parameter_sharing_context.get_parameter_name('w')
         self.assertEquals(param_2, 'scope/w')
         with scope.NameScope('scope_2'):
             param_3 = parameter_sharing_context.get_parameter_name('w')
             self.assertEquals(param_3, 'scope/scope_2/w')
コード例 #4
0
 def test_deep_hierarchy(self):
     model = model_helper.ModelHelper(name="test")
     with ParameterSharing({'a': 'b'}):
         with scope.NameScope('a'):
             with ParameterSharing({'c': 'd'}):
                 with scope.NameScope('c'):
                     with ParameterSharing({'e': 'f'}):
                         with scope.NameScope('e'):
                             p = model.create_param(
                                 'w',
                                 shape=[2],
                                 initializer=Initializer("ConstantFill"))
     self.assertNotEqual(model.get_param_info(p), None)
コード例 #5
0
 def test_parameter_sharing_subscopes(self):
     # Sharing only one of the subscopes
     with ParameterSharing({'global_scope/b': 'global_scope/a'}):
         with scope.NameScope('global_scope'):
             param_6 = parameter_sharing_context.get_parameter_name('w')
             self.assertEquals(param_6, 'global_scope/w')
             with scope.NameScope('a'):
                 param_7 = parameter_sharing_context.get_parameter_name('w')
                 self.assertEquals(param_7, 'global_scope/a/w')
             with scope.NameScope('b'):
                 param_8 = parameter_sharing_context.get_parameter_name('w')
                 self.assertEquals(param_8, 'global_scope/a/w')
             with scope.NameScope('c'):
                 param_9 = parameter_sharing_context.get_parameter_name('w')
                 self.assertEquals(param_9, 'global_scope/c/w')
コード例 #6
0
    def test_layer_duplicated_parameter_init(self):
        output_dims = 2
        with scope.NameScope('global_scope'):
            with ParameterSharing({'new_fc': 'shared_fc'}):
                self.model.FC(
                    self.model.input_feature_schema.float_features,
                    output_dims,
                    name='shared_fc'
                )
                self.model.FC(
                    self.model.input_feature_schema.float_features,
                    output_dims,
                    name='new_fc'
                )

        train_init_net = core.Net('train_init_net')
        train_net = core.Net('train_net')
        for layer in self.model.layers:
            layer.add_operators(train_net, train_init_net)
        op_outputs = []
        for op in train_init_net._net.op:
            op_outputs.extend(op.output)

        # only fill these parameter blobs once
        self.assertEquals(
            sorted(op_outputs),
            ['global_scope/shared_fc/b', 'global_scope/shared_fc/w']
        )
コード例 #7
0
    def apply_over_sequence(
        self,
        model,
        inputs,
        seq_lengths,
        initial_states,
        outputs_with_grads=None,
    ):
        inputs = self.cell.prepare_input(model, inputs)

        # Now they are blob references - outputs of splitting the input sequence
        split_inputs = model.net.Split(
            inputs,
            [str(inputs) + "_timestep_{}".format(i) for i in range(self.T)],
            axis=0)
        if self.T == 1:
            split_inputs = [split_inputs]

        states = initial_states
        all_states = []
        for t in range(0, self.T):
            scope_name = "timestep_{}".format(t)
            # Parameters of all timesteps are shared
            with ParameterSharing({scope_name: ''}),\
                 scope.NameScope(scope_name):
                timestep = model.param_init_net.ConstantFill(
                    [],
                    "timestep",
                    value=t,
                    shape=[1],
                    dtype=core.DataType.INT32,
                    device_option=core.DeviceOption(caffe2_pb2.CPU))
                states = self.cell._apply(
                    model=model,
                    input_t=split_inputs[t],
                    seq_lengths=seq_lengths,
                    states=states,
                    timestep=timestep,
                )
            all_states.append(states)

        all_states = zip(*all_states)
        all_states = [
            model.net.Concat(list(full_output), [
                str(full_output[0])[len("timestep_0/"):] + "_concat",
                str(full_output[0])[len("timestep_0/"):] + "_concat_info"
            ],
                             axis=0)[0] for full_output in all_states
        ]
        outputs = tuple(
            six.next(it)
            for it in itertools.cycle([iter(all_states),
                                       iter(states)]))
        outputs_without_grad = set(range(
            len(outputs))) - set(outputs_with_grads)
        for i in outputs_without_grad:
            model.net.ZeroGradient(outputs[i], [])
        logging.debug("Added 0 gradients for blobs:",
                      [outputs[i] for i in outputs_without_grad])
        return None, outputs
コード例 #8
0
ファイル: scope_test.py プロジェクト: GeekLiB/caffe2-master
    def testMultiThreaded(self):
        """
        Test that name/device scope are properly local to the thread
        and don't interfere
        """
        global SUCCESS_COUNT
        self.assertEquals(scope.CurrentNameScope(), "")
        self.assertEquals(scope.CurrentDeviceScope(), None)

        threads = []
        for i in range(4):
            threads.append(
                threading.Thread(
                    target=thread_runner,
                    args=(i, self),
                ))
        for t in threads:
            t.start()

        with scope.NameScope("master"):
            self.assertEquals(scope.CurrentDeviceScope(), None)
            self.assertEquals(scope.CurrentNameScope(), "master/")
            for t in threads:
                t.join()

            self.assertEquals(scope.CurrentNameScope(), "master/")
            self.assertEquals(scope.CurrentDeviceScope(), None)

        # Ensure all threads succeeded
        self.assertEquals(SUCCESS_COUNT, 4)
コード例 #9
0
 def add_operators(self,
                   net,
                   init_net=None,
                   context=InstantiationContext.TRAINING):
     # Namescope below should warranty that all intermediate blobs will be
     # assiciated with the layer that produces them
     with scope.NameScope(self.name):
         if context not in {
                 InstantiationContext.PREDICTION, InstantiationContext.EVAL,
                 InstantiationContext.ACCUMULATE_PRED
         }:
             assert init_net, (
                 "Only prediction and eval context don't need init_net")
         if init_net:
             for param in self.params:
                 # TODO(amalevich): Either return back to lambdas, that add
                 # all params (looks a bit safer and breaking less
                 # abstractions) or extend Net interface to this type of
                 # operations better
                 init_net._net.op.extend([param.initializer])
         if context == InstantiationContext.TRAINING:
             self.add_train_ops(net)
         elif context == InstantiationContext.EVAL:
             self.add_eval_ops(net)
         elif context == InstantiationContext.ACCUMULATE_PRED:
             self.add_ops_to_accumulate_pred(net)
         else:
             self.add_ops(net)
コード例 #10
0
    def test_get_params(self):
        def param(x):
            return core.ScopedBlobReference(x)

        def to_str_list(x):
            return sorted([str(p) for p in x])

        model = ModelHelper(name="test_model")
        model.AddParameter(param("a"))
        model.AddParameter(param("b"), tags=ParameterTags.COMPUTED_PARAM)
        with scope.NameScope("c"):
            model.AddParameter(param("a"))
            model.AddParameter(param("d"), tags=ParameterTags.COMPUTED_PARAM)
            self.assertEqual(to_str_list(model.GetParams()), ['c/a'])
            self.assertEqual(to_str_list(model.GetComputedParams()), ['c/d'])
            self.assertEqual(to_str_list(model.GetAllParams()), ['c/a', 'c/d'])
            # Get AllParams from the global Scope
            self.assertEqual(to_str_list(model.GetAllParams('')),
                             ['a', 'b', 'c/a', 'c/d'])
        self.assertEqual(to_str_list(model.GetParams()), ['a', 'c/a'])
        self.assertEqual(to_str_list(model.GetComputedParams()), ['b', 'c/d'])
        self.assertEqual(to_str_list(model.GetAllParams()),
                         ['a', 'b', 'c/a', 'c/d'])
        self.assertEqual(to_str_list(model.GetAllParams('')),
                         ['a', 'b', 'c/a', 'c/d'])
        # Get AllParams from the scope 'c'
        self.assertEqual(to_str_list(model.GetAllParams('c')), ['c/a', 'c/d'])
        self.assertEqual(to_str_list(model.GetAllParams('c/')), ['c/a', 'c/d'])
コード例 #11
0
ファイル: gru_test.py プロジェクト: AdityaTewari/caffe2
def _prepare_gru_unit_op(gc,
                         n,
                         d,
                         outputs_with_grads,
                         forward_only=False,
                         drop_states=False,
                         two_d_initial_states=None):
    print("Dims: (n,d) = ({},{})".format(n, d))

    def generate_input_state(n, d):
        if two_d_initial_states:
            return np.random.randn(n, d).astype(np.float32)
        else:
            return np.random.randn(1, n, d).astype(np.float32)

    model = ModelHelper(name='external')

    with scope.NameScope("test_name_scope"):
        hidden_t_prev, gates_t, seq_lengths, timestep = \
            model.net.AddScopedExternalInputs(
                "hidden_t_prev",
                "gates_t",
                'seq_lengths',
                "timestep",
            )
        workspace.FeedBlob(hidden_t_prev,
                           generate_input_state(n, d).astype(np.float32),
                           device_option=gc)
        workspace.FeedBlob(gates_t,
                           generate_input_state(n, 3 * d).astype(np.float32),
                           device_option=gc)

        hidden_t = model.net.GRUUnit(
            [
                hidden_t_prev,
                gates_t,
                seq_lengths,
                timestep,
            ],
            ['hidden_t'],
            forget_bias=0.0,
            drop_states=drop_states,
        )
        model.net.AddExternalOutputs(hidden_t)
        workspace.RunNetOnce(model.param_init_net)

        # 10 is used as a magic number to simulate some reasonable timestep
        # and generate some reasonable seq. lengths
        workspace.FeedBlob(seq_lengths,
                           np.random.randint(1, 10,
                                             size=(n, )).astype(np.int32),
                           device_option=gc)
        workspace.FeedBlob(
            timestep,
            np.random.randint(1, 10, size=(1, )).astype(np.int32),
            device_option=core.DeviceOption(caffe2_pb2.CPU),
        )
        print("Feed {}".format(timestep))

    return hidden_t, model.net
コード例 #12
0
 def add_operators(self,
                   net,
                   init_net=None,
                   context=InstantiationContext.TRAINING):
     '''
     Adds layer trainig or initialization operators to the passed in net.
     init_net can be None and can be called independently from add_init_params
     '''
     # Namescope below should warranty that all intermediate blobs will be
     # assiciated with the layer that produces them
     with scope.NameScope(self.name):
         if context not in {
                 InstantiationContext.PREDICTION, InstantiationContext.EVAL,
                 InstantiationContext.ACCUMULATE_PRED
         }:
             assert init_net, (
                 "Only prediction and eval context don't need init_net")
         if init_net:
             self.add_init_params(init_net)
         if context == InstantiationContext.TRAINING:
             self.add_train_ops(net)
         elif context == InstantiationContext.EVAL:
             self.add_eval_ops(net)
         elif context == InstantiationContext.ACCUMULATE_PRED:
             self.add_ops_to_accumulate_pred(net)
         else:
             self.add_ops(net)
コード例 #13
0
ファイル: scope_test.py プロジェクト: GeekLiB/caffe2-master
    def testNamescopeBasic(self):
        self.assertEquals(scope.CurrentNameScope(), "")

        with scope.NameScope("test_scope"):
            self.assertEquals(scope.CurrentNameScope(), "test_scope/")

        self.assertEquals(scope.CurrentNameScope(), "")
コード例 #14
0
ファイル: functional.py プロジェクト: yalechang/caffe2
    def __init__(self,
                 model,
                 input_record,
                 num_outputs,
                 function,
                 name='functional',
                 **kwargs):
        super(Functional, self).__init__(model, name, input_record, **kwargs)
        self._function = function

        with scope.NameScope(self.name):
            self.output_schema = schema.NewRecord(model.net,
                                                  schema.RawTuple(num_outputs))

        # Fake execution of the function to infer shapes and types automatically
        had_issues = False
        try:
            type_net = core.Net('_temp_type_and_shape_inference_net')
            schema.InitEmptyRecord(type_net, input_record, enforce_types=True)

            function(type_net, self.input_record, self.output_schema)
            (shapes, types) = workspace.InferShapesAndTypes([type_net], {})
            for i in range(num_outputs):
                blob = self.output_schema[i]()
                if blob not in types or blob not in shapes:
                    had_issues = True
                    continue
                if shapes[blob] == []:
                    # Scalar type
                    shape = tuple()
                elif shapes[blob][0] == 0:
                    shape = tuple(shapes[blob][1:])
                else:
                    # If batch dimension is not first - give up on shape
                    # inference for that blob
                    had_issues = True
                    continue

                # TODO(amalevich): Move it to some shared library
                dtype = None
                if types[blob] == caffe2_pb2.TensorProto.DOUBLE:
                    dtype = (np.float64, shape)
                elif types[blob] == caffe2_pb2.TensorProto.FLOAT:
                    dtype = (np.float32, shape)
                elif types[blob] == caffe2_pb2.TensorProto.INT32:
                    dtype = (np.int32, shape)
                elif types[blob] == caffe2_pb2.TensorProto.INT64:
                    dtype = (np.int64, shape)

                if dtype is not None:
                    self.output_schema[i].set_type(dtype)
        except TypeError as ex:
            had_issues = True
            logger.warning(str(ex))

        if had_issues:
            logger.warning("Type inference had problems for layer: {}".format(
                self.name))
コード例 #15
0
ファイル: layers.py プロジェクト: zangcq/caffe2
 def create_param(self, param_name, shape, initializer, optimizer,
                    ps_param=None):
     with scope.NameScope(self.name, reset=True):
         param = self.model.create_param(param_name=param_name,
                                         shape=shape,
                                         initializer=initializer,
                                         optimizer=optimizer,
                                         ps_param=ps_param)
         self.params.append(param)
         return param.parameter
コード例 #16
0
    def test_layer_shared_parameter_name_within_same_namescope(self):
        output_dims = 2
        with scope.NameScope('global_scope'):
            with ParameterSharing({'fc_auto_0': 'fc'}):
                self.model.FC(self.model.input_feature_schema.float_features,
                              output_dims)
                self.assertEquals(self.model.layers[-1].w, 'global_scope/fc/w')

                self.model.FC(self.model.input_feature_schema.float_features,
                              output_dims)
                self.assertEquals(self.model.layers[-1].w, 'global_scope/fc/w')
コード例 #17
0
    def testNamescopeAssertion(self):
        self.assertEquals(scope.CurrentNameScope(), "")

        try:
            with scope.NameScope("test_scope"):
                self.assertEquals(scope.CurrentNameScope(), "test_scope/")
                raise Exception()
        except Exception:
            pass

        self.assertEquals(scope.CurrentNameScope(), "")
コード例 #18
0
    def test_layer_shared_parameter_name_different_namescopes(self):
        output_dims = 2
        with scope.NameScope('global_scope'):
            with ParameterSharing({'scope_1': 'scope_0'}):
                with scope.NameScope('scope_0'):
                    fc1_output = self.model.FC(
                        self.model.input_feature_schema.float_features,
                        output_dims)
                    self.assertEquals(self.model.layers[-1].w,
                                      'global_scope/scope_0/fc/w')
                    self.assertEquals(fc1_output(),
                                      'global_scope/scope_0/fc/output')

                with scope.NameScope('scope_1'):
                    fc2_output = self.model.FC(
                        self.model.input_feature_schema.float_features,
                        output_dims)
                    self.assertEquals(self.model.layers[-1].w,
                                      'global_scope/scope_0/fc/w')
                    self.assertEquals(fc2_output(),
                                      'global_scope/scope_1/fc/output')
コード例 #19
0
ファイル: rnn_cell_test.py プロジェクト: zj2089/caffe2
def _prepare_lstm(t, n, dim_in, create_lstm, outputs_with_grads,
                  forget_bias, memory_optim=False,
                  forward_only=False, drop_states=False, T=None,
                  two_d_initial_states=None, dim_out=None):
    if dim_out is None:
        dim_out = [dim_in]
    print("Dims: ", t, n, dim_in, dim_out)

    model = ModelHelper(name='external')

    if two_d_initial_states is None:
        two_d_initial_states = np.random.randint(2)

    def generate_input_state(n, d):
        if two_d_initial_states:
            return np.random.randn(n, d).astype(np.float32)
        return np.random.randn(1, n, d).astype(np.float32)

    states = []
    for layer_id, d in enumerate(dim_out):
        h, c = model.net.AddExternalInputs(
            "hidden_init_{}".format(layer_id),
            "cell_init_{}".format(layer_id),
        )
        states.extend([h, c])
        workspace.FeedBlob(h, generate_input_state(n, d).astype(np.float32))
        workspace.FeedBlob(c, generate_input_state(n, d).astype(np.float32))

    # Due to convoluted RNN scoping logic we make sure that things
    # work from a namescope
    with scope.NameScope("test_name_scope"):
        input_blob, seq_lengths = model.net.AddScopedExternalInputs(
            'input_blob', 'seq_lengths')

        outputs = create_lstm(
            model, input_blob, seq_lengths, states,
            dim_in=dim_in, dim_out=dim_out, scope="external/recurrent",
            outputs_with_grads=outputs_with_grads,
            memory_optimization=memory_optim,
            forget_bias=forget_bias,
            forward_only=forward_only,
            drop_states=drop_states,
            static_rnn_unroll_size=T,
        )

    workspace.RunNetOnce(model.param_init_net)

    workspace.FeedBlob(
        seq_lengths,
        np.random.randint(1, t + 1, size=(n,)).astype(np.int32)
    )
    return outputs, model.net, states + [input_blob]
コード例 #20
0
    def test_layer_shared_parameter_name_different_shapes(self):
        output_dims = 2
        with scope.NameScope('global_scope'):
            with ParameterSharing({'fc_auto_0': 'fc'}):
                self.model.FC(self.model.input_feature_schema.float_features,
                              output_dims)
                self.assertEquals(self.model.layers[-1].w, 'global_scope/fc/w')

                with six.assertRaisesRegex(self, ValueError,
                                           'Got inconsistent shapes .*'):
                    self.model.FC(
                        self.model.input_feature_schema.float_features,
                        output_dims + 1)
コード例 #21
0
    def testGetNonTrainableParams(self):
        m = seq2seq_model_helper.Seq2SeqModelHelper()

        m.AddParam('test_param1', init_value=1, trainable=True)
        p2 = m.AddParam('test_param2', init_value=2, trainable=False)

        self.assertEqual(m.GetNonTrainableParams(), [p2])

        with scope.NameScope('A', reset=True):
            p3 = m.AddParam('test_param3', init_value=3, trainable=False)
            self.assertEqual(m.GetNonTrainableParams(), [p3])

        self.assertEqual(m.GetNonTrainableParams(), [p2, p3])
コード例 #22
0
    def create_param(self, param_name, shape, initializer, optimizer,
                     ps_param=None):
        with scope.NameScope(self.name, reset=True):
            param = self.model.create_param(param_name=param_name,
                                            shape=shape,
                                            initializer=initializer,
                                            optimizer=optimizer,
                                            ps_param=ps_param)

            # make sure we don't share parameters in the same layer
            assert all(param.parameter != p.parameter for p in self.params)

            self.params.append(param)
            return param.parameter
コード例 #23
0
 def test_create_param(self):
     model = model_helper.ModelHelper(name="test")
     # Test no sharing default scopes
     p1 = model.create_param('w',
                             shape=[2],
                             initializer=Initializer("ConstantFill"))
     with scope.NameScope('some_global_scope'):
         p2 = model.create_param('w',
                                 shape=[2],
                                 initializer=Initializer("ConstantFill"))
     self.assertNotEqual(model.get_param_info(p1), None)
     self.assertNotEqual(model.get_param_info(p2), None)
     self.assertNotEqual(model.get_param_info(p1), model.get_param_info(p2))
     model.Validate()
コード例 #24
0
    def test_layer_shared_parameter_name_within_same_namescope_customized_name(
            self):
        output_dims = 2
        with scope.NameScope('global_scope'):
            with ParameterSharing({'new_fc': 'shared_fc'}):
                self.model.FC(self.model.input_feature_schema.float_features,
                              output_dims,
                              name='shared_fc')
                self.assertEquals(self.model.layers[-1].w,
                                  'global_scope/shared_fc/w')

                self.model.FC(self.model.input_feature_schema.float_features,
                              output_dims,
                              name='new_fc')
                self.assertEquals(self.model.layers[-1].w,
                                  'global_scope/shared_fc/w')
コード例 #25
0
ファイル: scope_test.py プロジェクト: GeekLiB/caffe2-master
def thread_runner(idx, testobj):
    global SUCCESS_COUNT
    testobj.assertEquals(scope.CurrentNameScope(), "")
    testobj.assertEquals(scope.CurrentDeviceScope(), None)
    namescope = "namescope_{}".format(idx)
    dsc = core.DeviceOption(caffe2_pb2.CUDA, idx)
    with scope.DeviceScope(dsc):
        with scope.NameScope(namescope):
            testobj.assertEquals(scope.CurrentNameScope(), namescope + "/")
            testobj.assertEquals(scope.CurrentDeviceScope(), dsc)

            time.sleep(0.01 + idx * 0.01)
            testobj.assertEquals(scope.CurrentNameScope(), namescope + "/")
            testobj.assertEquals(scope.CurrentDeviceScope(), dsc)

    testobj.assertEquals(scope.CurrentNameScope(), "")
    testobj.assertEquals(scope.CurrentDeviceScope(), None)
    SUCCESS_COUNT += 1
コード例 #26
0
ファイル: layers.py プロジェクト: gyh2556406/caffe2
 def add_operators(self, net, init_net=None,
                   context=InstantiationContext.TRAINING):
     # Namescope below should warranty that all intermediate blobs will be
     # assiciated with the layer that produces them
     with scope.NameScope(self.name):
         if context != InstantiationContext.PREDICTION:
             assert init_net,\
                 "Only prediction context can be used without init_net"
         if init_net:
             for param in self.params:
                 # TODO(amalevich): Either return back to lambdas, that add
                 # all params (looks a bit safer and breaking less
                 # abstractions) or extend Net interface to this type of
                 # operations better
                 init_net._net.op.extend([param.initializer])
         if context == InstantiationContext.TRAINING:
             self.add_train_ops(net)
         else:
             self.add_ops(net)
コード例 #27
0
    def test_parameter_sharing_brew(self):
        # Test no sharing default scopes
        model = model_helper.ModelHelper(name="test")
        data = model.net.AddExternalInput("data")
        fc1 = brew.fc(model, data, "fc1", dim_in=16, dim_out=16)
        # Shared params are expected to share the same shape and fail if it's
        # not true
        with self.assertRaises(AssertionError):
            _ = brew.fc(model, data, "fc1", dim_in=2, dim_out=2)  # noqa

        output_blobs = set()
        with scope.NameScope('some_global_scope'):
            with scope.NameScope('model_a'):
                output_blobs.add(str(brew.fc(model, fc1, 'output', 16, 16)))
            with ParameterSharing({'model_b': 'model_a'}),\
                    scope.NameScope('model_b'):
                with ParameterSharing({'shared_1': '', 'shared_2': ''}):
                    # All params in DenseLayers from shared_1, shared_2 and
                    # model_a are shared and will be pointing to:
                    # [some_global_scope/model_a/output_W,
                    #  some_global_scope/model_a/output_b]
                    with scope.NameScope('shared_1'):
                        output_blobs.add(
                            str(brew.fc(model, fc1, 'output', 16, 16)))
                    with scope.NameScope('shared_2'):
                        output_blobs.add(
                            str(brew.fc(model, fc1, 'output', 16, 16)))
                    # Params of this layer are not shared with anyone unless
                    # there is some explicit sharing with model_a/unshared (not
                    # in this example).
                    # Names of the blobs are
                    # [some_global_scope/model_a/unshared/output_W,
                    #  some_global_scope/model_a/unshared/output_b]
                    with scope.NameScope('unshared'):
                        output_blobs.add(
                            str(brew.fc(model, fc1, 'output', 16, 16)))

        self.assertEqual(len(model._parameters_info), 6)
        self.assertEqual(len(output_blobs), 4)
        self.assertEqual(sorted(model._parameters_info.keys()), [
            'fc1_b',
            'fc1_w',
            'some_global_scope/model_a/output_b',
            'some_global_scope/model_a/output_w',
            'some_global_scope/model_a/unshared/output_b',
            'some_global_scope/model_a/unshared/output_w',
        ])
        model.Validate()
コード例 #28
0
 def build_decay(self, model, intervals, input_size, output_size,
                 namescope):
     with scope.NameScope(namescope):
         decays = brew.fc(
             model,
             intervals,
             self.scope('intervals_fc'),
             dim_in=input_size,
             dim_out=output_size,
             axis=2,
         )
         ZEROS = model.net.ConstantFill([decays],
                                        self.scope("ZEROS"),
                                        value=0.0)
         # in-place update
         decays = model.net.Max([decays, ZEROS],
                                self.scope("max_intervals_fc"))
         decays = model.net.Negative([decays],
                                     self.scope("neg_max_interval_fc"))
         decays = model.net.Exp([decays], self.scope("decays"))
     return decays
コード例 #29
0
    def __init__(self, model, input_record, output_names_or_num, function,
                 name='functional', output_dtypes=None, **kwargs):

        # allow coercion
        input_record = schema.as_record(input_record)

        super(Functional, self).__init__(model, name, input_record, **kwargs)
        self._function = function
        self._kwargs = kwargs
        return_struct = (
            isinstance(output_names_or_num, list) or
            (isinstance(output_names_or_num, six.integer_types) and
             output_names_or_num != 1)
        )

        with scope.NameScope(self.name, reset=True):
            if isinstance(output_names_or_num, int):
                struct_output_schema = schema.NewRecord(
                    model.net, schema.RawTuple(output_names_or_num))
            elif isinstance(output_names_or_num, schema.Field):
                self.output_schema = output_names_or_num.clone(keep_blobs=True)
                return
            else:
                if not isinstance(output_names_or_num, list):
                    output_names_or_num = [output_names_or_num]
                out_tuple = [(out, np.void) for out in output_names_or_num]
                struct_output_schema = schema.NewRecord(
                    model.net, schema.Struct(*out_tuple))

        num_outputs = len(struct_output_schema.field_blobs())

        # functional layer returns Struct if more than one outputs or output is
        # a list, otherwise Scalar
        if return_struct:
            self.output_schema = struct_output_schema
        else:
            self.output_schema = struct_output_schema[0]

        # If output_dtypes is provided, use it for output schema. Otherwise
        # the shape and type will be inferred.
        if output_dtypes is not None:
            if not isinstance(output_dtypes, list):
                output_dtypes = [output_dtypes] * num_outputs
            assert len(output_dtypes) == num_outputs
            for dtype, scalar in zip(output_dtypes,
                                     self.output_schema.all_scalars()):
                scalar.set_type(dtype)
            return

        # Fake execution of the function to infer shapes and types automatically
        had_issues = False
        try:
            type_net = core.Net('_temp_type_and_shape_inference_net')
            schema.InitEmptyRecord(type_net, input_record, enforce_types=True)

            function(type_net, self.input_record, self.output_schema, **kwargs)
            (shapes, types) = workspace.InferShapesAndTypes([type_net], {})
            for i in range(num_outputs):
                scalar_schema = (self.output_schema[i] if return_struct
                                 else self.output_schema)
                blob = scalar_schema()
                if blob not in types or blob not in shapes:
                    had_issues = True
                    continue
                if shapes[blob] == []:
                    # Scalar type
                    shape = tuple()
                elif shapes[blob][0] == 0:
                    shape = tuple(shapes[blob][1:])
                else:
                    logger.warning("unexpeced shape: {}".format(shapes[blob]))
                    # If batch dimension is not first - give up on shape
                    # inference for that blob
                    had_issues = True
                    continue

                # TODO(amalevich): Move it to some shared library
                dtype = None
                if types[blob] == caffe2_pb2.TensorProto.DOUBLE:
                    dtype = (np.float64, shape)
                elif types[blob] == caffe2_pb2.TensorProto.FLOAT:
                    dtype = (np.float32, shape)
                elif types[blob] == caffe2_pb2.TensorProto.INT32:
                    dtype = (np.int32, shape)
                elif types[blob] == caffe2_pb2.TensorProto.INT64:
                    dtype = (np.int64, shape)

                if dtype is not None:
                    scalar_schema.set_type(dtype)
        except TypeError as ex:
            had_issues = True
            logger.warning(str(ex))

        if had_issues:
            logger.warning(
                "Type inference had problems for layer: {}".format(self.name))
コード例 #30
0
 def get_next_blob_reference(self, name):
     with scope.NameScope(self.name, reset=True):
         return self.model.net.NextScopedBlob(name)