Пример #1
0
  def testCreateWorkspace(self):
    workspaces = workspace.Workspaces()
    self.assertEqual(len(workspaces), 1)
    self.assertEqual(workspaces[0], "default")
    self.net = core.Net("test-net")
    self.net.ConstantFill([], "testblob", shape=[1, 2, 3, 4], value=1.0)
    self.assertEqual(
        workspace.RunNetOnce(self.net.Proto().SerializeToString()), True)
    self.assertEqual(workspace.HasBlob("testblob"), True)
    self.assertEqual(workspace.SwitchWorkspace("test", True), True)
    self.assertEqual(workspace.HasBlob("testblob"), False)
    self.assertEqual(workspace.SwitchWorkspace("default"), True)
    self.assertEqual(workspace.HasBlob("testblob"), True)

    try:
        # The following should raise an error.
        workspace.SwitchWorkspace("non-existing")
        # so this should never happen.
        self.assertEqual(True, False)
    except RuntimeError:
        pass

    workspaces = workspace.Workspaces()
    self.assertEqual(len(workspaces), 2)
    workspaces.sort()
    self.assertEqual(workspaces[0], "default")
    self.assertEqual(workspaces[1], "test")
 def CheckNet(self, net, inputs={}, ignore=set()):
     """Checks a network by inspecting all of its intermediate results, and see
 if things match.
 """
     old_ws_name = workspace.CurrentWorkspace()
     results = []
     blobs_to_check = sum([list(op.outputs) for op in net.operators], [])
     blobs_to_check = [b for b in blobs_to_check if b not in ignore]
     workspace.SwitchWorkspace("_device_check_", True)
     for i, device_option in enumerate(self._device_options):
         for name, arr in inputs.iteritems():
             workspace.FeedBlob(name, arr, device_option)
         for op in net.operators:
             op.device_option.CopyFrom(device_option)
         workspace.RunNetOnce(net)
         results.append(
             [workspace.FetchBlob(name) for name in blobs_to_check])
     # After running on all devices, check correctness
     success = True
     for i in range(1, len(results)):
         for j in range(len(blobs_to_check)):
             x = results[i][j]
             y = results[0][j]
             if np.any(np.abs(x - y) > self._threshold):
                 print 'Failure in checking device option', i, 'and blob ',
                 print blobs_to_check[j], '. The outputs are:'
                 print x.flatten()
                 print y.flatten()
                 success = False
                 continue
     workspace.SwitchWorkspace(old_ws_name)
     return success
    def CheckSimple(self, op, inputs, outputs_to_check):
        """Checks the operator in a very simple fashion by stacking a sum of squares
    on the top.

    Inputs:
      op: the operator to be checked.
      inputs: the input data in numpy arrays.
      input_to_check: an index specifying which input blob we should
          check.
      outputs_with_grads: indices specifying which output blobs will we
          need to check gradients with. For these outputs, we will collect a
          squared sum and also feed in their gradients.
      grad_operator: the gradient operator. If not given, we will get the
          gradient operator from the gradient registry.
    Outputs:
      boolean: True if it passes, False if it does not pass.
    """
        # Entering the checker workspace
        old_ws_name = workspace.CurrentWorkspace()
        results = []
        workspace.SwitchWorkspace("_device_check_", True)
        for i, device_option in enumerate(self._device_options):
            for i, arr in enumerate(inputs):
                workspace.FeedBlob(op.inputs[i], arr, device_option)
            op.device_option.CopyFrom(device_option)
            workspace.RunOperatorOnce(op)
            results.append([
                workspace.FetchBlob(op.outputs[idx])
                for idx in outputs_to_check
            ])
            # Everything is done, reset the workspace.
            workspace.ResetWorkspace()
        # After running on all devices, check correctness
        success = True
        for i in range(1, len(self._device_options)):
            for j in range(len(outputs_to_check)):
                x = results[i][j]
                y = results[0][j]
                if np.any(np.abs(x - y) > self._threshold):
                    print 'Failure in checking device option', i, 'and output ',
                    print op.outputs[j], '. The outputs are:'
                    print x.flatten()
                    print y.flatten()
                    success = False
                    continue
        workspace.SwitchWorkspace(old_ws_name)
        return success
Пример #4
0
    def CheckSimple(self, op, inputs, outputs_to_check):
        """Checks the operator with different device implementations.

    Inputs:
      op: the operator to be checked.
      inputs: the input data in numpy arrays.
      outputs_to_check: the outputs to check between devices.
    Outputs:
      boolean: True if it passes, False if it does not pass.
    """
        # Entering the checker workspace
        old_ws_name = workspace.CurrentWorkspace()
        results = []
        workspace.SwitchWorkspace("_device_check_", True)
        for i, device_option in enumerate(self._device_options):
            for i, arr in enumerate(inputs):
                workspace.FeedBlob(op.input[i], arr, device_option)
            op.device_option.CopyFrom(device_option)
            workspace.RunOperatorOnce(op)
            results.append([
                workspace.FetchBlob(op.output[idx]) for idx in outputs_to_check
            ])
            # Everything is done, reset the workspace.
            workspace.ResetWorkspace()
        # After running on all devices, check correctness
        success = True
        for i in range(1, len(self._device_options)):
            for j in range(len(outputs_to_check)):
                x = results[i][j]
                y = results[0][j]
                if np.any(np.abs(x - y) > self._threshold):
                    print 'Failure in checking device option', i, 'and output ',
                    print op.output[j], '. The outputs are:'
                    print x.flatten()
                    print y.flatten()
                    success = False
                #else:
                #  print ('Passed device pair (0, %d), %s %s' %
                #         (i, outputs_to_check[j], y.shape))
        workspace.SwitchWorkspace(old_ws_name)
        return success
Пример #5
0
tensors = caffe2_pb2.TensorProtos()
tensors.ParseFromString(open('inception_tensors.pb').read())

DEVICE_OPTION = caffe2_pb2.DeviceOption()
# Let's use CPU in our example.
DEVICE_OPTION.device_type = caffe2_pb2.CPU

# If you have a GPU and want to run things there, uncomment the below two lines.
# If you have multiple GPUs, you also might want to specify a gpu id.
#DEVICE_OPTION.device_type = caffe2_pb2.CUDA
#DEVICE_OPTION.cuda_gpu_id = 0

# Caffe2 has a concept of "workspace", which is similar to that of Matlab. Each workspace
# is a self-contained set of tensors and networks. In this case, we will just use the default
# workspace so we won't dive too deep into it.
workspace.SwitchWorkspace('default')

# First, we feed all the parameters to the workspace.
for param in tensors.protos:
    workspace.FeedBlob(param.name, param, DEVICE_OPTION)
# The network expects an input blob called "input", which we create here.
# The content of the input blob is going to be fed when we actually do
# classification.
workspace.CreateBlob("input")
# Specify the device option of the network, and then create it.
net.device_option.CopyFrom(DEVICE_OPTION)
workspace.CreateNet(net)

########################################
### MY CODE ############################
Пример #6
0
 def setUp(self):
   workspace.SwitchWorkspace("default")
   workspace.ResetWorkspace()
    def CheckSimple(self,
                    op,
                    inputs,
                    input_to_check,
                    outputs_with_grads,
                    grad_ops=None):
        """Checks the operator in a very simple fashion by stacking a sum of squares
    on the top.

    Inputs:
      op: the operator to be checked.
      inputs: the input data in numpy arrays.
      input_to_check: an index specifying which input blob we should
          check.
      outputs_with_grads: indices specifying which output blobs will we
          need to check gradients with. For these outputs, we will collect a
          squared sum and also feed in their gradients.
      grad_operator: the gradient operator. If not given, we will get the
          gradient operator from the gradient registry.
    Outputs:
      boolean: True if it passes, False if it does not pass.
    """
        # Entering the checker workspace
        old_ws_name = workspace.CurrentWorkspace()
        if self._workspace_name != old_ws_name:
            workspace.SwitchWorkspace(self._workspace_name, True)

        op.device_option.CopyFrom(self._device_option)
        if grad_ops is None:
            grad_ops = core.GradientRegistry.GetGradient(op)

        dims_to_check = inputs[input_to_check].size
        # First, feed in the input.
        for i, arr in enumerate(inputs):
            workspace.FeedBlob(op.inputs[i], arr, self._device_option)

        # Get the loss and gradient for the original.
        input_name = op.inputs[input_to_check]
        loss, grad = self.GetLossAndGrad(op, grad_ops, inputs[input_to_check],
                                         input_name, outputs_with_grads)
        grad_estimate = np.zeros_like(inputs[input_to_check])
        for current_dim in range(dims_to_check):
            # Positive gradient
            inputs[input_to_check].flat[current_dim] += self._stepsize
            pos_loss, _ = self.GetLossAndGrad(op, grad_ops,
                                              inputs[input_to_check],
                                              input_name, outputs_with_grads)
            # Negative gradient
            inputs[input_to_check].flat[current_dim] -= self._stepsize * 2
            neg_loss, _ = self.GetLossAndGrad(op, grad_ops,
                                              inputs[input_to_check],
                                              input_name, outputs_with_grads)
            # Recover the value
            inputs[input_to_check].flat[current_dim] += self._stepsize
            grad_estimate.flat[current_dim] = (pos_loss -
                                               neg_loss) / self._stepsize / 2
        # Now, check correctness
        scale = np.maximum(np.maximum(np.abs(grad), np.abs(grad_estimate)), 1)
        fail_mat = (np.abs(grad - grad_estimate) > scale * self._threshold)
        if np.any(fail_mat):
            idx = np.flatnonzero(fail_mat)
            #print 'Failed. [idx, grad, grad_estimate] are:'
            #print np.vstack([idx, grad.flat[idx], grad_estimate.flat[idx]]).T
            ret = False
        else:
            ret = True
        # After finishing, cleaning up things.
        if self._workspace_name != old_ws_name:
            # We reset the workspace to make sure everything intermediate is cleaned
            # up. Note that there is no need to delete a workspace - when empty it
            # takes a very limited amount of memory.
            workspace.ResetWorkspace()
            workspace.SwitchWorkspace(old_ws_name)
        return ret, grad, grad_estimate