def CheckNet(self, net, inputs={}, ignore=set()): """Checks a network by inspecting all of its intermediate results, and see if things match. """ old_ws_name = workspace.CurrentWorkspace() results = [] blobs_to_check = sum([list(op.outputs) for op in net.operators], []) blobs_to_check = [b for b in blobs_to_check if b not in ignore] workspace.SwitchWorkspace("_device_check_", True) for i, device_option in enumerate(self._device_options): for name, arr in inputs.iteritems(): workspace.FeedBlob(name, arr, device_option) for op in net.operators: op.device_option.CopyFrom(device_option) workspace.RunNetOnce(net) results.append( [workspace.FetchBlob(name) for name in blobs_to_check]) # After running on all devices, check correctness success = True for i in range(1, len(results)): for j in range(len(blobs_to_check)): x = results[i][j] y = results[0][j] if np.any(np.abs(x - y) > self._threshold): print 'Failure in checking device option', i, 'and blob ', print blobs_to_check[j], '. The outputs are:' print x.flatten() print y.flatten() success = False continue workspace.SwitchWorkspace(old_ws_name) return success
def testFetchBlobGPU(self): self.assertEqual(workspace.RunNetOnce(self.net.Proto().SerializeToString()), True) fetched = workspace.FetchBlob("testblob") # check if fetched is correct. self.assertEqual(fetched.shape, (1, 2, 3, 4)) np.testing.assert_array_equal(fetched, 1.0) fetched[:] = 2.0 self.assertEqual(workspace.FeedBlob("testblob", fetched), True) fetched_again = workspace.FetchBlob("testblob") self.assertEqual(fetched_again.shape, (1, 2, 3, 4)) np.testing.assert_array_equal(fetched_again, 2.0)
def GetLossAndGrad(self, op, grad_ops, x, input_name, outputs_with_grads): # First, feed in the current input. Note that we are not changing anything # else, so we don't need to feed in others. workspace.FeedBlob(input_name, x, self._device_option) # Run. workspace.RunOperatorOnce(op) loss = 0. # Get Loss and feed in the gradients, run gradient ops. for idx in outputs_with_grads: name = op.outputs[idx] arr = workspace.FetchBlob(name) loss += (arr**2).sum() workspace.FeedBlob(core.GetGradientName(name), arr, self._device_option) loss /= 2. # Run gradient ops workspace.RunOperatorsOnce(grad_ops) # Get gradients grad = workspace.FetchBlob(core.GetGradientName(input_name)) return loss, grad
def CheckSimple(self, op, inputs, outputs_to_check): """Checks the operator in a very simple fashion by stacking a sum of squares on the top. Inputs: op: the operator to be checked. inputs: the input data in numpy arrays. input_to_check: an index specifying which input blob we should check. outputs_with_grads: indices specifying which output blobs will we need to check gradients with. For these outputs, we will collect a squared sum and also feed in their gradients. grad_operator: the gradient operator. If not given, we will get the gradient operator from the gradient registry. Outputs: boolean: True if it passes, False if it does not pass. """ # Entering the checker workspace old_ws_name = workspace.CurrentWorkspace() results = [] workspace.SwitchWorkspace("_device_check_", True) for i, device_option in enumerate(self._device_options): for i, arr in enumerate(inputs): workspace.FeedBlob(op.inputs[i], arr, device_option) op.device_option.CopyFrom(device_option) workspace.RunOperatorOnce(op) results.append([ workspace.FetchBlob(op.outputs[idx]) for idx in outputs_to_check ]) # Everything is done, reset the workspace. workspace.ResetWorkspace() # After running on all devices, check correctness success = True for i in range(1, len(self._device_options)): for j in range(len(outputs_to_check)): x = results[i][j] y = results[0][j] if np.any(np.abs(x - y) > self._threshold): print 'Failure in checking device option', i, 'and output ', print op.outputs[j], '. The outputs are:' print x.flatten() print y.flatten() success = False continue workspace.SwitchWorkspace(old_ws_name) return success
def CheckSimple(self, op, inputs, outputs_to_check): """Checks the operator with different device implementations. Inputs: op: the operator to be checked. inputs: the input data in numpy arrays. outputs_to_check: the outputs to check between devices. Outputs: boolean: True if it passes, False if it does not pass. """ # Entering the checker workspace old_ws_name = workspace.CurrentWorkspace() results = [] workspace.SwitchWorkspace("_device_check_", True) for i, device_option in enumerate(self._device_options): for i, arr in enumerate(inputs): workspace.FeedBlob(op.input[i], arr, device_option) op.device_option.CopyFrom(device_option) workspace.RunOperatorOnce(op) results.append([ workspace.FetchBlob(op.output[idx]) for idx in outputs_to_check ]) # Everything is done, reset the workspace. workspace.ResetWorkspace() # After running on all devices, check correctness success = True for i in range(1, len(self._device_options)): for j in range(len(outputs_to_check)): x = results[i][j] y = results[0][j] if np.any(np.abs(x - y) > self._threshold): print 'Failure in checking device option', i, 'and output ', print op.output[j], '. The outputs are:' print x.flatten() print y.flatten() success = False #else: # print ('Passed device pair (0, %d), %s %s' % # (i, outputs_to_check[j], y.shape)) workspace.SwitchWorkspace(old_ws_name) return success
# Let's use CPU in our example. DEVICE_OPTION.device_type = caffe2_pb2.CPU # If you have a GPU and want to run things there, uncomment the below two lines. # If you have multiple GPUs, you also might want to specify a gpu id. #DEVICE_OPTION.device_type = caffe2_pb2.CUDA #DEVICE_OPTION.cuda_gpu_id = 0 # Caffe2 has a concept of "workspace", which is similar to that of Matlab. Each workspace # is a self-contained set of tensors and networks. In this case, we will just use the default # workspace so we won't dive too deep into it. workspace.SwitchWorkspace('default') # First, we feed all the parameters to the workspace. for param in tensors.protos: workspace.FeedBlob(param.name, param, DEVICE_OPTION) # The network expects an input blob called "input", which we create here. # The content of the input blob is going to be fed when we actually do # classification. workspace.CreateBlob("input") # Specify the device option of the network, and then create it. net.device_option.CopyFrom(DEVICE_OPTION) workspace.CreateNet(net) ######################################## ### MY CODE ############################ for param in tensors.protos: print(param.name) filters = workspace.FetchBlob(param.name) import h5py
if __name__ == '__main__': if len(sys.argv) == 1: print( 'If you do not explicitly ask to run this test, I will not run it. ' 'Pass in any argument to have the test run for you.') sys.exit(0) if not os.path.exists('data/testdata/caffe_translator'): print 'No testdata existing for the caffe translator test. Exiting.' sys.exit(0) # We will do all the computation stuff in the global space. caffenet = caffe_pb2.NetParameter() caffenet_pretrained = caffe_pb2.NetParameter() text_format.Merge( open('data/testdata/caffe_translator/deploy.prototxt').read(), caffenet) caffenet_pretrained.ParseFromString( open( 'data/testdata/caffe_translator/bvlc_reference_caffenet.caffemodel' ).read()) net, pretrained_params = caffe_translator.TranslateModel( caffenet, caffenet_pretrained) caffe_translator.DeleteDropout(net) for param in pretrained_params.protos: workspace.FeedBlob(param.name, utils.Caffe2TensorToNumpyArray(param)) # Let's also feed in the data from the Caffe test code. data = np.load('data/testdata/caffe_translator/data_dump.npy').astype( np.float32) workspace.FeedBlob('data', data) # Actually running the test. workspace.RunNetOnce(net.SerializeToString()) unittest.main()
def CheckSimple(self, op, inputs, input_to_check, outputs_with_grads, grad_ops=None): """Checks the operator in a very simple fashion by stacking a sum of squares on the top. Inputs: op: the operator to be checked. inputs: the input data in numpy arrays. input_to_check: an index specifying which input blob we should check. outputs_with_grads: indices specifying which output blobs will we need to check gradients with. For these outputs, we will collect a squared sum and also feed in their gradients. grad_operator: the gradient operator. If not given, we will get the gradient operator from the gradient registry. Outputs: boolean: True if it passes, False if it does not pass. """ # Entering the checker workspace old_ws_name = workspace.CurrentWorkspace() if self._workspace_name != old_ws_name: workspace.SwitchWorkspace(self._workspace_name, True) op.device_option.CopyFrom(self._device_option) if grad_ops is None: grad_ops = core.GradientRegistry.GetGradient(op) dims_to_check = inputs[input_to_check].size # First, feed in the input. for i, arr in enumerate(inputs): workspace.FeedBlob(op.inputs[i], arr, self._device_option) # Get the loss and gradient for the original. input_name = op.inputs[input_to_check] loss, grad = self.GetLossAndGrad(op, grad_ops, inputs[input_to_check], input_name, outputs_with_grads) grad_estimate = np.zeros_like(inputs[input_to_check]) for current_dim in range(dims_to_check): # Positive gradient inputs[input_to_check].flat[current_dim] += self._stepsize pos_loss, _ = self.GetLossAndGrad(op, grad_ops, inputs[input_to_check], input_name, outputs_with_grads) # Negative gradient inputs[input_to_check].flat[current_dim] -= self._stepsize * 2 neg_loss, _ = self.GetLossAndGrad(op, grad_ops, inputs[input_to_check], input_name, outputs_with_grads) # Recover the value inputs[input_to_check].flat[current_dim] += self._stepsize grad_estimate.flat[current_dim] = (pos_loss - neg_loss) / self._stepsize / 2 # Now, check correctness scale = np.maximum(np.maximum(np.abs(grad), np.abs(grad_estimate)), 1) fail_mat = (np.abs(grad - grad_estimate) > scale * self._threshold) if np.any(fail_mat): idx = np.flatnonzero(fail_mat) #print 'Failed. [idx, grad, grad_estimate] are:' #print np.vstack([idx, grad.flat[idx], grad_estimate.flat[idx]]).T ret = False else: ret = True # After finishing, cleaning up things. if self._workspace_name != old_ws_name: # We reset the workspace to make sure everything intermediate is cleaned # up. Note that there is no need to delete a workspace - when empty it # takes a very limited amount of memory. workspace.ResetWorkspace() workspace.SwitchWorkspace(old_ws_name) return ret, grad, grad_estimate