def add_blobs(queue, num_samples):
     blob = core.BlobReference("blob")
     status = core.BlobReference("blob_status")
     for i in range(num_samples):
         data = self._create_test_tensor_protos(i)
         data = np.array([data], dtype=str)
         self._add_blob_to_queue(queue, data, blob, status)
 def add_blobs(queue, num_samples):
     blob = core.BlobReference("blob")
     status = core.BlobReference("blob_status")
     for i in range(num_samples):
         self._add_blob_to_queue(queue,
                                 self._create_test_tensor_protos(i),
                                 blob, status)
示例#3
0
    def test_checkpoint_params(self):
        def add_input_ops(model):
            pass

        def add_model_ops(model, loss_scale):
            model.NHWC2NCHW("data", "data_nchw")
            model.Conv("data_nchw",
                       'conv1',
                       3,
                       64,
                       weight_init=("MSRAFill", {}),
                       kernel=7,
                       stride=2,
                       pad=3,
                       no_bias=0)
            model.SpatialBN('conv1', 'conv1_spatbn_relu', 64, epsilon=1e-3)
            model.Relu('conv1_spatbn_relu', 'conv1_spatbn_relu')
            model.MaxPool('conv1_spatbn_relu', 'pool1', kernel=3, stride=2)
            model.FC('pool1', 'fc', dim_in=(64 * 56 * 56), dim_out=100)
            model.Sigmoid('fc', 'fc_sigm')
            model.Softmax('fc_sigm', 'softmax')
            model.LabelCrossEntropy(['softmax', 'label'], 'xent')
            loss = model.AveragedLoss('xent', 'loss')

            # Add a duplicate param init to ensure it does not cause issues
            model.param_init_net.ConstantFill([], ["fc_w"],
                                              shape=((64 * 56 * 56), 1000))
            return [loss]

        def add_optimizer(model):
            optimizer.build_sgd(model, 0.1, policy="fixed", momentum=0.9)

        model = cnn.CNNModelHelper(
            order="NHWC",
            name="test",
        )
        data_parallel_model.Parallelize_CPU(
            model,
            input_builder_fun=add_input_ops,
            forward_pass_builder_fun=add_model_ops,
            optimizer_builder_fun=add_optimizer,
            devices=[1, 2, 3],
        )

        # Only gpu_1 params should be returned (gpu_1 is the first gpu)
        checkpoint_params = data_parallel_model.GetCheckpointParams(model)
        for p in model.GetParams("cpu_1/"):
            self.assertTrue(p in checkpoint_params)
            self.assertTrue(p + "_momentum" in checkpoint_params)
        for p in model.GetParams("cpu_2/"):
            self.assertFalse(p in checkpoint_params)
        self.assertTrue(
            core.BlobReference("cpu_1/fc_w_momentum") in checkpoint_params)
        for c in model.GetComputedParams("cpu_1/"):
            self.assertTrue(c in checkpoint_params)
        for c in model.GetComputedParams("cpu_2/"):
            self.assertFalse(c in checkpoint_params)
        self.assertFalse(core.BlobReference("cpu_1/data") in checkpoint_params)
        self.assertTrue(
            core.BlobReference("optimizer_iteration") in checkpoint_params)
def load_from_db(filename, db_type, device_option=None, *args, **kwargs):
    # global_init_net in meta_net_def will load parameters from
    # predictor_constants.PREDICTOR_DBREADER
    create_db = core.CreateOperator(
        'CreateDB', [],
        [core.BlobReference(predictor_constants.PREDICTOR_DBREADER)],
        db=filename, db_type=db_type)
    assert workspace.RunOperatorOnce(create_db), (
        'Failed to create db {}'.format(filename))

    # predictor_constants.META_NET_DEF is always stored before the parameters
    load_meta_net_def = core.CreateOperator(
        'Load',
        [core.BlobReference(predictor_constants.PREDICTOR_DBREADER)],
        [core.BlobReference(predictor_constants.META_NET_DEF)])
    assert workspace.RunOperatorOnce(load_meta_net_def)

    blob = workspace.FetchBlob(predictor_constants.META_NET_DEF)
    meta_net_def = serde.deserialize_protobuf_struct(
        blob if isinstance(blob, bytes)
        else str(blob).encode('utf-8'),
        metanet_pb2.MetaNetDef)

    if device_option is None:
        device_option = scope.CurrentDeviceScope()

    if device_option is not None:
        # Set the device options of all loaded blobs
        for kv in meta_net_def.nets:
            net = kv.value
            for op in net.op:
                op.device_option.CopyFrom(device_option)

    return meta_net_def
示例#5
0
 def testBlobReferenceIsIndependentFromNameScope(self):
     blob_v = core.BlobReference("v")
     with core.NameScope("foo"):
         blob_w = core.BlobReference("w")
         with core.NameScope("bar"):
             blob_x = core.BlobReference("x")
     self.assertEqual(str(blob_v), "v")
     self.assertEqual(str(blob_w), "w")
     self.assertEqual(str(blob_x), "x")
示例#6
0
    def _enqueue(self, blob_name, queue, data_arr):
        '''
        Enqueue the correctly sized batch arrays to Caffe2's queue.
        '''
        scratch_name = self._namescope + blob_name + \
            "_scratch_" + self._input_source_name
        blob = core.BlobReference(scratch_name)
        status = core.BlobReference(scratch_name + "_status")
        workspace.FeedBlob(blob, data_arr, device_option=self._device_option)

        op = core.CreateOperator("SafeEnqueueBlobs", [queue, blob],
                                 [blob, status],
                                 device_option=self._device_option)
        workspace.RunOperatorOnce(op)
示例#7
0
    def testAddLoss(self):
        input_record_LR = self.new_record(
            schema.Struct(('label', schema.Scalar((np.float64, (1, )))),
                          ('prediction', schema.Scalar((np.float32, (2, )))),
                          ('weight', schema.Scalar((np.float64, (1, ))))))
        loss_LR = self.model.BatchLRLoss(input_record_LR)

        self.model.add_loss(loss_LR)
        assert 'unnamed' in self.model.loss
        self.assertEqual(schema.Scalar((np.float32, tuple())),
                         self.model.loss.unnamed)
        self.assertEqual(loss_LR, self.model.loss.unnamed)

        self.model.add_loss(loss_LR, 'addLoss')
        assert 'addLoss' in self.model.loss
        self.assertEqual(schema.Scalar((np.float32, tuple())),
                         self.model.loss.addLoss)
        self.assertEqual(loss_LR, self.model.loss.addLoss)

        self.model.add_loss(
            schema.Scalar(dtype=np.float32,
                          blob=core.BlobReference('loss_blob_1')), 'addLoss')
        assert 'addLoss_auto_0' in self.model.loss
        self.assertEqual(schema.Scalar((np.float32, tuple())),
                         self.model.loss.addLoss_auto_0)
        assert core.BlobReference(
            'loss_blob_1') in self.model.loss.field_blobs()

        self.model.add_loss(
            schema.Struct(
                ('structName',
                 schema.Scalar(dtype=np.float32,
                               blob=core.BlobReference('loss_blob_2')))),
            'addLoss')
        assert 'addLoss_auto_1' in self.model.loss
        self.assertEqual(
            schema.Struct(('structName', schema.Scalar(
                (np.float32, tuple())))), self.model.loss.addLoss_auto_1)
        assert core.BlobReference(
            'loss_blob_2') in self.model.loss.field_blobs()

        loss_in_tuple_0 = schema.Scalar(
            dtype=np.float32, blob=core.BlobReference('loss_blob_in_tuple_0'))

        loss_in_tuple_1 = schema.Scalar(
            dtype=np.float32, blob=core.BlobReference('loss_blob_in_tuple_1'))

        loss_tuple = schema.NamedTuple('loss_in_tuple',
                                       *[loss_in_tuple_0, loss_in_tuple_1])
        self.model.add_loss(loss_tuple, 'addLoss')
        assert 'addLoss_auto_2' in self.model.loss
        self.assertEqual(
            schema.Struct(
                ('loss_in_tuple_0', schema.Scalar((np.float32, tuple()))),
                ('loss_in_tuple_1', schema.Scalar((np.float32, tuple())))),
            self.model.loss.addLoss_auto_2)
        assert core.BlobReference('loss_blob_in_tuple_0')\
         in self.model.loss.field_blobs()
        assert core.BlobReference('loss_blob_in_tuple_1')\
         in self.model.loss.field_blobs()
示例#8
0
    def modify_net(self,
                   net,
                   init_net=None,
                   grad_map=None,
                   blob_to_device=None,
                   modify_output_record=False):

        for blob_name in self._blobs:
            blob = core.BlobReference(blob_name)
            assert net.BlobIsDefined(
                blob
            ), 'blob {} is not defined in net {} whose proto is {}'.format(
                blob, net.Name(), net.Proto())

            cast_blob = net.Cast(blob, to=core.DataType.FLOAT)
            stats_name = net.NextScopedBlob(prefix=blob +
                                            self._field_name_suffix)
            stats = net.Summarize(cast_blob, stats_name, to_file=0)
            net.Print(stats, [], every_n=self._logging_frequency)

            if modify_output_record:
                output_field_name = str(blob) + self._field_name_suffix
                output_scalar = schema.Scalar((np.float, (1, )), stats)

                if net.output_record() is None:
                    net.set_output_record(
                        schema.Struct((output_field_name, output_scalar)))
                else:
                    net.AppendOutputRecordField(output_field_name,
                                                output_scalar)
示例#9
0
    def __call__(self, rec):
        assert not self._frozen
        prefix = '/worker:%d/' % len(self._blob_maps)
        blob_remap = {}
        for net in self.thread_init_nets:
            new_net, _ = core.clone_and_bind_net(net,
                                                 str(net) + prefix, prefix,
                                                 blob_remap)
            self._cloned_init_nets.append(new_net)

        new_net, remappings = core.clone_and_bind_net(self.net,
                                                      str(self.net) + prefix,
                                                      prefix, blob_remap, rec)

        if self._stop_signal is None:
            stop_signal = None
        elif str(self._stop_signal) in remappings:
            stop_signal = core.BlobReference(remappings[str(
                self._stop_signal)],
                                             net=new_net)
        else:
            stop_signal = self._stop_signal

        self._blob_maps.append(remappings)
        return Output([new_net], new_net.output_record(), stop_signal)
示例#10
0
    def test_bounded_grad_proj(self, X, left_open, right_open, eps, ub, lb, gc,
                               dc):
        if ub - (eps if right_open else 0.) < lb + (eps if left_open else 0.):
            return
        param = core.BlobReference("X")
        workspace.FeedBlob(param, X)
        train_init_net, train_net = self.get_training_nets()
        reg = regularizer.BoundedGradientProjection(lb=lb,
                                                    ub=ub,
                                                    left_open=left_open,
                                                    right_open=right_open,
                                                    epsilon=eps)
        output = reg(train_net,
                     train_init_net,
                     param,
                     by=RegularizationBy.ON_LOSS)
        reg(
            train_net,
            train_init_net,
            param,
            grad=None,
            by=RegularizationBy.AFTER_OPTIMIZER,
        )
        workspace.RunNetOnce(train_init_net)
        workspace.RunNetOnce(train_net)

        def ref(X):
            return np.clip(X, lb + (eps if left_open else 0.),
                           ub - (eps if right_open else 0.))

        assert output is None
        npt.assert_allclose(workspace.blobs[param], ref(X), atol=1e-7)
示例#11
0
def _ComputeBlobsToSync(model):
    '''
    We sync all blobs that are generated by param init net and
    are 'data parallel', i.e assigned to a gpu
    '''
    sync_names = set()
    blobs_to_sync = []
    for op in model.param_init_net.Proto().op:
        dp_outputs = [
            o for o in op.output
            if o.startswith("{}_".format(model._device_prefix))
        ]
        sync_names.update([stripParamName(o) for o in dp_outputs])
        blobs_to_sync.extend(dp_outputs)

    # Sanity check
    diff = set(model._param_names) - sync_names
    assert diff == set(), \
       "Some params not instantiated in param init net: {}".format(diff)

    # Remove duplicates and sort
    blobs_to_sync = sorted(list(set(blobs_to_sync)))

    blobs_to_sync = [core.BlobReference(b) for b in blobs_to_sync]
    return (blobs_to_sync, sync_names)
示例#12
0
 def testNameScopeWithOp(self):
     global_x = core.BlobReference("x")
     global_y = core.BlobReference("y")
     with core.NameScope("foo"):
         # Raw strings should have namescope prepended.
         op = core.CreateOperator("Relu", "x", "y")
         self.assertEqual(len(op.input), 1)
         self.assertEqual(op.input[0], "foo/x")
         self.assertEqual(len(op.output), 1)
         self.assertEqual(op.output[0], "foo/y")
         # BlobReferences should not.
         op = core.CreateOperator("Relu", global_x, global_y)
         self.assertEqual(len(op.input), 1)
         self.assertEqual(op.input[0], "x")
         self.assertEqual(len(op.output), 1)
         self.assertEqual(op.output[0], "y")
示例#13
0
    def modify_net(self,
                   net,
                   init_net=None,
                   grad_map=None,
                   blob_to_device=None):

        for blob_name in self._blobs:
            blob = core.BlobReference(blob_name)
            if not net.BlobIsDefined(blob):
                raise Exception('blob {0} is not defined in net {1}'.format(
                    blob, net.Name()))

            cast_blob = net.Cast(blob, to=core.DataType.FLOAT)
            stats_name = net.NextScopedBlob(prefix=blob + '_summary')
            stats = net.Summarize(cast_blob, stats_name, to_file=0)
            net.Print(stats, [], every_n=self._logging_frequency)

            output_field_name = str(blob) + '_summary'
            output_scalar = schema.Scalar((np.float, (1, )), stats)

            if net.output_record() is None:
                net.set_output_record(
                    schema.Struct((output_field_name, output_scalar)))
            else:
                net.AppendOutputRecordField(output_field_name, output_scalar)
示例#14
0
    def test_create_net(self):
        action_blob = core.BlobReference("action")

        N = 10
        action_feature_ids = [100, 300, 200]
        serving_max_scale = np.array(action_feature_ids) / 100.0
        serving_min_scale = np.zeros(len(action_feature_ids)) - 5.0
        actions = np.random.randn(N, len(action_feature_ids)).astype(np.float32)
        workspace.FeedBlob(action_blob, actions)
        ot = ActorOutputTransformer(
            action_feature_ids, serving_max_scale, serving_min_scale
        )
        output_record = schema.Struct(("action", schema.Scalar(blob=action_blob)))
        nets = ot.create_net(output_record)
        workspace.RunNetOnce(nets.init_net)
        workspace.RunNetOnce(nets.net)

        external_outputs = {str(b) for b in nets.net.external_outputs}

        def fetch_blob(b):
            self.assertIn(b, external_outputs)
            return workspace.FetchBlob(b)

        lengths = fetch_blob("output/float_features.lengths")
        keys = fetch_blob("output/float_features.keys")
        values = fetch_blob("output/float_features.values")

        scaled_actions = (actions + np.ones(len(action_feature_ids)) - 1e-6) / (
            (1 - 1e-6) * 2
        ) * (serving_max_scale - serving_min_scale) + serving_min_scale

        npt.assert_array_equal([len(action_feature_ids)] * N, lengths)
        npt.assert_array_equal(action_feature_ids * N, keys)
        npt.assert_array_almost_equal(scaled_actions.reshape(-1), values)
示例#15
0
    def create_param(self, param_name, shape, initializer, optimizer=None,
                     ps_param=None):
        if isinstance(param_name, core.BlobReference):
            param_name = str(param_name)
        elif isinstance(param_name, six.string_types):
            # Parameter name will be equal to current Namescope that got
            # resolved with the respect of parameter sharing of the scopes.
            param_name = parameter_sharing_context.get_parameter_name(
                param_name)
        else:
            raise "Unsupported type for param_name"

        param_blob = core.BlobReference(param_name)

        if len(initializer) == 1:
            init_op_args = {}
        else:
            assert len(initializer) == 2
            init_op_args = initializer[1]
        if shape is not None:
            init_op_args.update({'shape': shape})

        param = layers.LayerParameter(
            parameter=param_blob,
            initializer=core.CreateOperator(
                initializer[0],
                [],
                param_blob,
                **init_op_args
            ),
            optimizer=optimizer,
            ps_param=ps_param,
        )

        return param
示例#16
0
def DoUntil(name, condition_blob_or_net, nets_or_steps):
    """
    Similar to DoWhile() but execute nets_or_steps when
    condition_blob_or_net returns false. It will execute
    nets_or_steps before evaluating condition_blob_or_net.

    Special case: if condition_blob_or_net is a blob and is pre-set to
    true, then only the first net/step of nets_or_steps will be executed and
    loop is exited. So you need to be careful about the initial value the
    condition blob when using DoUntil(), esp when DoUntil() is called twice.
    """
    if not isinstance(condition_blob_or_net, core.Net):
        stop_blob = core.BlobReference(condition_blob_or_net)
        return core.scoped_execution_step(_get_next_step_name('DoUntil', name),
                                          nets_or_steps,
                                          should_stop_blob=stop_blob)

    nets_or_steps = _AppendNets(nets_or_steps, condition_blob_or_net)
    stop_blob = GetConditionBlobFromNet(condition_blob_or_net)

    # If stop_blob is pre-set to True (this may happen when DoWhile() is
    # called twice), the loop will exit after executing the first net/step
    # in nets_or_steps. This is not what we want. So we use BootNet to
    # set stop_blob to False.
    bool_net = BoolNet((stop_blob, False))
    return Do(
        name + '/DoUntil', bool_net,
        core.scoped_execution_step(
            _get_next_step_name('DoUntil-inner', name),
            nets_or_steps,
            should_stop_blob=stop_blob,
        ))
示例#17
0
    def modify_net(self,
                   net,
                   init_net=None,
                   grad_map=None,
                   blob_to_device=None):

        p = self._p

        for blob_name in self._blobs:
            blob = core.BlobReference(blob_name)
            if not net.BlobIsDefined(blob):
                raise Exception('blob {0} is not defined in net {1}'.format(
                    blob, net.Name()))

            norm_name = net.NextScopedBlob(prefix=blob + '_l{}_norm'.format(p))
            norm = net.LpNorm(blob, norm_name, p=p)

            if self._logging_frequency >= 1:
                net.Print(norm, [], every_n=self._logging_frequency)

            output_field_name = str(blob) + '_l{}_norm'.format(p)
            output_scalar = schema.Scalar((np.float, (1, )), norm)

            if net.output_record() is None:
                net.set_output_record(
                    schema.Struct((output_field_name, output_scalar)))
            else:
                net.AppendOutputRecordField(output_field_name, output_scalar)
示例#18
0
    def test_log_barrier(self, X):
        param = core.BlobReference("X")
        workspace.FeedBlob(param, X)
        train_init_net, train_net = self.get_training_nets()
        reg = regularizer.LogBarrier(1.0)
        output = reg(train_net,
                     train_init_net,
                     param,
                     by=RegularizationBy.ON_LOSS)
        reg(
            train_net,
            train_init_net,
            param,
            grad=None,
            by=RegularizationBy.AFTER_OPTIMIZER,
        )
        workspace.RunNetOnce(train_init_net)
        workspace.RunNetOnce(train_net)

        def ref(X):
            return (
                np.array(np.sum(-np.log(np.clip(X, 1e-9, None))) * 0.5).astype(
                    np.float32),
                np.clip(X, 1e-9, None),
            )

        for x, y in zip(workspace.FetchBlobs([output, param]), ref(X)):
            npt.assert_allclose(x, y, rtol=1e-3)
    def testAddFieldByNestedName(self):
        f_a = schema.Scalar(blob=core.BlobReference('blob1'))
        f_b = schema.Struct(
            ('c', schema.Struct(
                ('d', schema.Scalar(blob=core.BlobReference('blob2'))),
            )),
        )
        f_x = schema.Struct(
            ('x', schema.Scalar(blob=core.BlobReference('blob3'))),
        )

        with self.assertRaises(TypeError):
            st = schema.Struct(
                ('a', f_a),
                ('b', f_b),
                ('b:c:d', f_x),
            )
        with self.assertRaises(TypeError):
            st = schema.Struct(
                ('a', f_a),
                ('b', f_b),
                ('b:c:d:e', f_x),
            )

        st = schema.Struct(
            ('a', f_a),
            ('b', f_b),
            ('e:f', f_x),
        )
        self.assertEqual(['a', 'b:c:d', 'e:f:x'], st.field_names())
        self.assertEqual(['blob1', 'blob2', 'blob3'], st.field_blobs())

        st = schema.Struct(
            ('a', f_a),
            ('b:c:e', f_x),
            ('b', f_b),
        )
        self.assertEqual(['a', 'b:c:e:x', 'b:c:d'], st.field_names())
        self.assertEqual(['blob1', 'blob3', 'blob2'], st.field_blobs())

        st = schema.Struct(
            ('a:a1', f_a),
            ('b:b1', f_b),
            ('a', f_x),
        )
        self.assertEqual(['a:a1', 'a:x', 'b:b1:c:d'], st.field_names())
        self.assertEqual(['blob1', 'blob3', 'blob2'], st.field_blobs())
示例#20
0
    def modify_net(self,
                   net,
                   init_net=None,
                   grad_map=None,
                   blob_to_device=None,
                   modify_output_record=False):

        p = self._p
        compute_averaged_norm = self._compute_averaged_norm
        row_index = self.row_index

        CPU = muji.OnCPU()
        # if given, blob_to_device is a map from blob to device_option
        blob_to_device = blob_to_device or {}
        for blob_name in self._blobs:
            blob = core.BlobReference(blob_name)
            assert net.BlobIsDefined(
                blob
            ), 'blob {} is not defined in net {} whose proto is {}'.format(
                blob, net.Name(), net.Proto())
            if blob in blob_to_device:
                device = blob_to_device[blob]
            else:
                device = CPU

            with core.DeviceScope(device):
                if row_index and row_index >= 0:
                    blob = net.Slice(
                        [blob],
                        net.NextScopedBlob(prefix=blob +
                                           '_row_{0}'.format(row_index)),
                        starts=[row_index, 0],
                        ends=[row_index + 1, -1])

                cast_blob = net.Cast(blob,
                                     net.NextScopedBlob(prefix=blob +
                                                        '_float'),
                                     to=core.DataType.FLOAT)

                norm_name = net.NextScopedBlob(prefix=blob +
                                               self._field_name_suffix)
                norm = net.LpNorm(cast_blob,
                                  norm_name,
                                  p=p,
                                  average=compute_averaged_norm)

                if self._logging_frequency >= 1:
                    net.Print(norm, [], every_n=self._logging_frequency)

                if modify_output_record:
                    output_field_name = str(blob) + self._field_name_suffix
                    output_scalar = schema.Scalar((np.float, (1, )), norm)

                    if net.output_record() is None:
                        net.set_output_record(
                            schema.Struct((output_field_name, output_scalar)))
                    else:
                        net.AppendOutputRecordField(output_field_name,
                                                    output_scalar)
示例#21
0
    def _init_scratch(self):
        self._scratch_blob = {}
        self._scratch_status = {}
        for blob_name in self._input_blob_names:
            scratch_name = self._namescope + blob_name + \
                "_scratch_" + self._input_source_name
            self._scratch_blob[blob_name] = core.BlobReference(scratch_name)
            self._scratch_status[blob_name] = core.BlobReference(scratch_name +
                                                                 "_status")

        # Feed empty arrays to the scratch blobs here, so that there won't be
        # race conditions when calling FeedBlob (which calls wworkspace
        # CreateBlob()) from enqueue threads
        for b in self._scratch_blob.values() + self._scratch_status.values():
            workspace.FeedBlob(
                b,
                np.array([]).astype(np.float32),
                device_option=self._device_option,
            )
示例#22
0
        def add_parameter_update_ops(model):
            model.Iter("ITER")
            LR = model.param_init_net.ConstantFill([],
                                                   'LR',
                                                   shape=[1],
                                                   value=0.1)
            for param in model.GetParams():
                param_grad = model.param_to_grad[param]
                param_momentum = model.param_init_net.ConstantFill([param],
                                                                   param +
                                                                   '_momentum',
                                                                   value=0.0)
                model.net.MomentumSGDUpdate(
                    [param_grad, param_momentum, LR, param],
                    [param_grad, param_momentum, param],
                )
            model = cnn.CNNModelHelper(
                order="NHWC",
                name="test",
            )
            data_parallel_model.Parallelize_GPU(
                model,
                input_builder_fun=add_input_ops,
                forward_pass_builder_fun=add_model_ops,
                param_update_builder_fun=add_parameter_update_ops,
                devices=[1, 2, 3],
            )

            # Only gpu_1 params should be returned (gpu_1 is the first gpu)
            checkpoint_params = data_parallel_model.GetCheckpointParams(model)
            for p in model.GetParams("gpu_1/"):
                self.assertTrue(p in checkpoint_params)
                self.assertTrue(p + "_momentum" in checkpoint_params)
            for p in model.GetParams("gpu_2/"):
                self.assertTrue(p in checkpoint_params)
            for c in model.GetComputedParams("gpu_1/"):
                self.assertFalse(c in checkpoint_params)
            for c in model.GetComputedParams("gpu_2/"):
                self.assertFalse(c in checkpoint_params)
            self.assertFalse(
                core.BlobReference("gpu_1/data") in checkpoint_params)
            self.assertTrue(
                core.BlobReference("gpu_1/ITER") in checkpoint_params)
示例#23
0
 def export_q_values(self, net, q_values, action_names, action_name_blob):
     batch_size = self.get_batch_size_blob(net, q_values)
     feature_lengths_blob = core.BlobReference(
         "output/string_weighted_multi_categorical_features.lengths")
     net.ConstantFill(
         batch_size,
         feature_lengths_blob,
         value=1,
         dtype=core.DataType.INT32,
         input_as_shape=1,
     )
     feature_keys_blob = core.BlobReference(
         "output/string_weighted_multi_categorical_features.keys")
     net.ConstantFill(
         batch_size,
         feature_keys_blob,
         value=0,
         dtype=core.DataType.INT64,
         input_as_shape=1,
     )
     values_lengths_blob = core.BlobReference(
         "output/string_weighted_multi_categorical_features.values.lengths")
     net.ConstantFill(
         batch_size,
         values_lengths_blob,
         value=len(action_names),
         dtype=core.DataType.INT32,
         input_as_shape=1,
     )
     values_keys_blob = core.BlobReference(
         "output/string_weighted_multi_categorical_features.values.keys")
     net.Tile([action_name_blob, batch_size], values_keys_blob, axis=0)
     values_values_blob = core.BlobReference(
         "output/string_weighted_multi_categorical_features.values.values")
     net.FlattenToVec(q_values, values_values_blob)
     net.AddExternalOutput(
         feature_lengths_blob,
         feature_keys_blob,
         values_lengths_blob,
         values_keys_blob,
         values_values_blob,
     )
示例#24
0
 def _update_param_info(self):
     assert len(self._param_info) <= len(self.params)
     for param in self.params[len(self._param_info):]:
         if not isinstance(param, core.BlobReference):
             param = core.BlobReference(str(param), net=self._param_init_net)
         self._param_info.append(ParameterInfo(
             param_id=len(self._param_info),
             param=param,
             shape=self._infer_param_shape(param)))
     for info in self._param_info:
         info.grad = self.param_to_grad.get(info.name)
示例#25
0
    def __call__(self, net, param_init_net, param, grad=None):
        if grad is None:
            assert isinstance(param, parameter_info.ParameterInfo)
            assert param.grad is not None
        else:
            if isinstance(param, basestring):
                param = core.BlobReference(param)
            param = parameter_info.ParameterInfo(
                param_id=None, param=param, grad=grad)

        self._run(net, param_init_net, param)
示例#26
0
    def test_create_blobs_queue_db(self):
        num_samples = 1000
        batch_size = 10
        init_net = core.Net('init_net')
        net = core.Net('test_create_blobs_queue_db')
        queue = init_net.CreateBlobsQueue([], 'queue', capacity=num_samples)
        reader = init_net.CreateBlobsQueueDB(
            [queue],
            'blobs_queue_db_reader',
            value_blob_index=0,
            timeout_secs=0.1,
        )
        workspace.RunNetOnce(init_net)

        blob = core.BlobReference("blob")
        status = core.BlobReference("blob_status")
        for i in range(num_samples):
            self._add_blob_to_queue(queue, self._create_test_tensor_protos(i),
                                    blob, status)
        net.TensorProtosDBInput([reader], ['image', 'label'],
                                batch_size=batch_size)
        workspace.CreateNet(net)

        close_net = core.Net('close_net')
        close_net.CloseBlobsQueue([queue], [])

        for i in range(int(num_samples / batch_size)):
            print("Running net, iteration {}".format(i))
            with timeout_guard.CompleteInTimeOrDie(2.0):
                workspace.RunNet(net)

            images = workspace.FetchBlob('image')
            labels = workspace.FetchBlob('label')
            self.assertEqual(batch_size, len(images))
            self.assertEqual(batch_size, len(labels))
            for idx, item in enumerate(images):
                self.assertEqual(
                    "foo{}".format(i * batch_size + idx).encode('utf-8'), item)
            for item in labels:
                self.assertEqual(0, item)
        workspace.RunNetOnce(close_net)
示例#27
0
    def create_net(self):
        net = core.Net("feature_extractor")
        init_net = core.Net("feature_extractor_init")
        missing_scalar = self.create_const(init_net, "MISSING_SCALAR",
                                           MISSING_VALUE)

        input_schema = schema.Struct((
            "float_features",
            schema.Map(
                keys=core.BlobReference("input/float_features.keys"),
                values=core.BlobReference("input/float_features.values"),
                lengths_blob=core.BlobReference(
                    "input/float_features.lengths"),
            ),
        ))

        input_record = net.set_input_record(input_schema)

        state = self.extract_float_features(
            net,
            "state",
            input_record.float_features,
            self.sorted_state_features,
            missing_scalar,
        )

        output_record = schema.Struct(("state", state))

        if self.sorted_action_features:
            action = self.extract_float_features(
                net,
                "action",
                input_record.float_features,
                self.sorted_action_features,
                missing_scalar,
            )
            output_record += schema.Struct(("action", action))

        net.set_output_record(output_record)

        return FeatureExtractorNet(net, init_net)
示例#28
0
def add_fpn_onto_conv_body(model,
                           conv_body_func,
                           fpn_level_info_func,
                           P2only=False):
    """Add the specified conv body to the model and then add FPN levels to it.
    """
    # Note: blobs_conv is in revsersed order: [fpn5, fpn4, fpn3, fpn2]
    # similarly for dims_conv: [2048, 1024, 512, 256]
    # similarly for spatial_scales_fpn: [1/32, 1/16, 1/8, 1/4]

    conv_body_func(model)
    blobs_fpn, dim_fpn, spatial_scales_fpn = add_fpn(model,
                                                     fpn_level_info_func())

    if cfg.MODEL.SIBLING_BACKBONE_ON:
        # Resnet stage to fork backbone weights
        fork_at = cfg.SIBLING.FORK_AT
        assert fork_at in [0, 2, 3, 4, 5]
        prefix_len = 0
        # No shared weights
        if fork_at == 0:
            fork_node = core.ScopedName("data".format(fork_at, fork_at))
        # Fork weights at stage `fork_at`
        else:
            fork_node = core.ScopedName("res{}_{}_sum".format(
                fork_at, fork_at))
            ops = model.net.Proto().op
            while (fork_node not in ops[prefix_len].output):
                prefix_len += 1
            prefix_len += 2
        temp_net, _ = c2_utils.SuffixNet('temp_net', model.net, prefix_len,
                                         blobs_fpn)
        # Preffix sibling backbone
        temp_net_preffixed, _ = c2_utils.RenameNet("temp_net_preffixed",
                                                   temp_net,
                                                   cfg.SIBLING.PREFFIX,
                                                   excluded_nodes=[fork_node])
        model.AddParams([
            core.BlobReference(input_name)
            for op in temp_net_preffixed.Proto().op for input_name in op.input
            if input_name[-2] == "_"
        ])
        # Merge the backbones
        model.net = c2_utils.MergeNets("net", [model.net, temp_net_preffixed])
        del temp_net
        del temp_net_preffixed

    if P2only:
        # use only the finest level
        return blobs_fpn[-1], dim_fpn, spatial_scales_fpn[-1]
    else:
        # use all levels
        return blobs_fpn, dim_fpn, spatial_scales_fpn
示例#29
0
def GetConditionBlobFromNet(condition_net):
    """
    The condition blob is the last external_output that must
    be a single bool
    """
    assert len(condition_net.Proto().external_output) > 0, (
        "Condition net %s must has at least one external output" %
        condition_net.Proto.name)
    # we need to use a blob reference here instead of a string
    # otherwise, it will add another name_scope to the input later
    # when we create new ops (such as OR of two inputs)
    return core.BlobReference(condition_net.Proto().external_output[-1])
示例#30
0
def load_from_db(filename, db_type):
    # global_init_net in meta_net_def will load parameters from
    # predictor_constants.PREDICTOR_DBREADER
    create_db = core.CreateOperator(
        'CreateDB', [],
        [core.BlobReference(predictor_constants.PREDICTOR_DBREADER)],
        db=filename,
        db_type=db_type)
    assert workspace.RunOperatorOnce(create_db), (
        'Failed to create db {}'.format(filename))

    # predictor_constants.META_NET_DEF is always stored before the parameters
    load_meta_net_def = core.CreateOperator(
        'Load', [core.BlobReference(predictor_constants.PREDICTOR_DBREADER)],
        [core.BlobReference(predictor_constants.META_NET_DEF)])
    assert workspace.RunOperatorOnce(load_meta_net_def)

    meta_net_def = serde.deserialize_protobuf_struct(
        str(workspace.FetchBlob(predictor_constants.META_NET_DEF)),
        metanet_pb2.MetaNetDef)
    return meta_net_def