def CheckNet(self, net, inputs={}, ignore=set()): """Checks a network by inspecting all of its intermediate results, and see if things match. """ old_ws_name = workspace.CurrentWorkspace() results = [] blobs_to_check = sum([list(op.outputs) for op in net.operators], []) blobs_to_check = [b for b in blobs_to_check if b not in ignore] workspace.SwitchWorkspace("_device_check_", True) for i, device_option in enumerate(self._device_options): for name, arr in inputs.iteritems(): workspace.FeedBlob(name, arr, device_option) for op in net.operators: op.device_option.CopyFrom(device_option) workspace.RunNetOnce(net) results.append( [workspace.FetchBlob(name) for name in blobs_to_check]) # After running on all devices, check correctness success = True for i in range(1, len(results)): for j in range(len(blobs_to_check)): x = results[i][j] y = results[0][j] if np.any(np.abs(x - y) > self._threshold): print 'Failure in checking device option', i, 'and blob ', print blobs_to_check[j], '. The outputs are:' print x.flatten() print y.flatten() success = False continue workspace.SwitchWorkspace(old_ws_name) return success
def CheckSimple(self, op, inputs, outputs_to_check): """Checks the operator in a very simple fashion by stacking a sum of squares on the top. Inputs: op: the operator to be checked. inputs: the input data in numpy arrays. input_to_check: an index specifying which input blob we should check. outputs_with_grads: indices specifying which output blobs will we need to check gradients with. For these outputs, we will collect a squared sum and also feed in their gradients. grad_operator: the gradient operator. If not given, we will get the gradient operator from the gradient registry. Outputs: boolean: True if it passes, False if it does not pass. """ # Entering the checker workspace old_ws_name = workspace.CurrentWorkspace() results = [] workspace.SwitchWorkspace("_device_check_", True) for i, device_option in enumerate(self._device_options): for i, arr in enumerate(inputs): workspace.FeedBlob(op.inputs[i], arr, device_option) op.device_option.CopyFrom(device_option) workspace.RunOperatorOnce(op) results.append([ workspace.FetchBlob(op.outputs[idx]) for idx in outputs_to_check ]) # Everything is done, reset the workspace. workspace.ResetWorkspace() # After running on all devices, check correctness success = True for i in range(1, len(self._device_options)): for j in range(len(outputs_to_check)): x = results[i][j] y = results[0][j] if np.any(np.abs(x - y) > self._threshold): print 'Failure in checking device option', i, 'and output ', print op.outputs[j], '. The outputs are:' print x.flatten() print y.flatten() success = False continue workspace.SwitchWorkspace(old_ws_name) return success
def CheckSimple(self, op, inputs, outputs_to_check): """Checks the operator with different device implementations. Inputs: op: the operator to be checked. inputs: the input data in numpy arrays. outputs_to_check: the outputs to check between devices. Outputs: boolean: True if it passes, False if it does not pass. """ # Entering the checker workspace old_ws_name = workspace.CurrentWorkspace() results = [] workspace.SwitchWorkspace("_device_check_", True) for i, device_option in enumerate(self._device_options): for i, arr in enumerate(inputs): workspace.FeedBlob(op.input[i], arr, device_option) op.device_option.CopyFrom(device_option) workspace.RunOperatorOnce(op) results.append([ workspace.FetchBlob(op.output[idx]) for idx in outputs_to_check ]) # Everything is done, reset the workspace. workspace.ResetWorkspace() # After running on all devices, check correctness success = True for i in range(1, len(self._device_options)): for j in range(len(outputs_to_check)): x = results[i][j] y = results[0][j] if np.any(np.abs(x - y) > self._threshold): print 'Failure in checking device option', i, 'and output ', print op.output[j], '. The outputs are:' print x.flatten() print y.flatten() success = False #else: # print ('Passed device pair (0, %d), %s %s' % # (i, outputs_to_check[j], y.shape)) workspace.SwitchWorkspace(old_ws_name) return success
def CheckSimple(self, op, inputs, input_to_check, outputs_with_grads, grad_ops=None): """Checks the operator in a very simple fashion by stacking a sum of squares on the top. Inputs: op: the operator to be checked. inputs: the input data in numpy arrays. input_to_check: an index specifying which input blob we should check. outputs_with_grads: indices specifying which output blobs will we need to check gradients with. For these outputs, we will collect a squared sum and also feed in their gradients. grad_operator: the gradient operator. If not given, we will get the gradient operator from the gradient registry. Outputs: boolean: True if it passes, False if it does not pass. """ # Entering the checker workspace old_ws_name = workspace.CurrentWorkspace() if self._workspace_name != old_ws_name: workspace.SwitchWorkspace(self._workspace_name, True) op.device_option.CopyFrom(self._device_option) if grad_ops is None: grad_ops = core.GradientRegistry.GetGradient(op) dims_to_check = inputs[input_to_check].size # First, feed in the input. for i, arr in enumerate(inputs): workspace.FeedBlob(op.inputs[i], arr, self._device_option) # Get the loss and gradient for the original. input_name = op.inputs[input_to_check] loss, grad = self.GetLossAndGrad(op, grad_ops, inputs[input_to_check], input_name, outputs_with_grads) grad_estimate = np.zeros_like(inputs[input_to_check]) for current_dim in range(dims_to_check): # Positive gradient inputs[input_to_check].flat[current_dim] += self._stepsize pos_loss, _ = self.GetLossAndGrad(op, grad_ops, inputs[input_to_check], input_name, outputs_with_grads) # Negative gradient inputs[input_to_check].flat[current_dim] -= self._stepsize * 2 neg_loss, _ = self.GetLossAndGrad(op, grad_ops, inputs[input_to_check], input_name, outputs_with_grads) # Recover the value inputs[input_to_check].flat[current_dim] += self._stepsize grad_estimate.flat[current_dim] = (pos_loss - neg_loss) / self._stepsize / 2 # Now, check correctness scale = np.maximum(np.maximum(np.abs(grad), np.abs(grad_estimate)), 1) fail_mat = (np.abs(grad - grad_estimate) > scale * self._threshold) if np.any(fail_mat): idx = np.flatnonzero(fail_mat) #print 'Failed. [idx, grad, grad_estimate] are:' #print np.vstack([idx, grad.flat[idx], grad_estimate.flat[idx]]).T ret = False else: ret = True # After finishing, cleaning up things. if self._workspace_name != old_ws_name: # We reset the workspace to make sure everything intermediate is cleaned # up. Note that there is no need to delete a workspace - when empty it # takes a very limited amount of memory. workspace.ResetWorkspace() workspace.SwitchWorkspace(old_ws_name) return ret, grad, grad_estimate