def _run_compiled(self, compiled): plan, output_list, workspace_type = compiled # make sure the output blobs belong to the parent workspace outputs = [] for name in output_list.names(): self._ws.create_blob(str(name)) outputs.append(core.BlobReference(str(name))) output_list.set_values(outputs, _fetch_func=self._fetch_output) task_ws = (workspace.C.Workspace(self._ws) if workspace_type == WorkspaceType.PRIVATE else self._ws) with workspace.WorkspaceGuard(task_ws): task_ws.run(plan)
def _infer_shape_from_initializer(self): for arg in self.initializer.arg: if arg.name == 'shape': return list(arg.ints) with workspace.WorkspaceGuard("model_init_by_loading_params"): try: net = core.Net("shape_checker") net._net.op.extend([self.initializer]) shape_blob = net.NextScopedBlob(self.parameter + "_shape") net.Shape([self.parameter], shape_blob) workspace.RunNetOnce(net) return workspace.FetchBlob(shape_blob).tolist() except RuntimeError: logger.warning( "Cannot infer the shape of blob {} from operator {}". format(self.parameter, self.initializer.type)) workspace.ResetWorkspace() return None
def _run_task_group(self, task_group): if task_group not in self._plan_caches: task = task_group.to_task() plan = core.Plan('task_group_plan') plan.AddStep(task.get_step()) self._plan_caches[task_group] = (plan, task) plan, task = self._plan_caches[task_group] # make sure the output blobs belong to the parent workspace outputs = [] for name in task.output_names(): self._ws.create_blob(str(name)) outputs.append(core.BlobReference(str(name))) task.set_outputs(outputs, _fetch_func=self._fetch_output) task_ws = (workspace.C.Workspace(self._ws) if task.workspace_type == WorkspaceType.PRIVATE else self._ws) with workspace.WorkspaceGuard(task_ws): task_ws.run(plan)
def test_relu(self, X, engine): X += 0.02 * np.sign(X) X[X == 0.0] += 0.02 output = Functional.Relu(X) Y_l = output[0] Y_d = output["output_0"] with workspace.WorkspaceGuard("tmp_workspace"): op = core.CreateOperator("Relu", ["X"], ["Y"], engine=engine) workspace.FeedBlob("X", X) workspace.RunOperatorOnce(op) Y_ref = workspace.FetchBlob("Y") np.testing.assert_array_equal( Y_l, Y_ref, err_msg='Functional Relu result mismatch') np.testing.assert_array_equal( Y_d, Y_ref, err_msg='Functional Relu result mismatch')
def run_conv_or_fc( test_case, init_net, net, X, W, b, op_type, engine, order, gc, outputs, scale=None, zero_point=None, ): if order: # Conv Output = collections.namedtuple("Output", ["Y", "op_type", "engine", "order"]) else: # FC Output = collections.namedtuple("Output", ["Y", "op_type", "engine"]) # We run DNNLOWP ops multiple times to test their first runs that # do caching so exercises different code paths from the subsequent # runs # self.ws.run re-creates operator every time so this test covers # cases when we have multiple nets sharing the same workspace test_case.ws.create_blob("X").feed(X, device_option=gc) test_case.ws.create_blob("W").feed(W, device_option=gc) test_case.ws.create_blob("b").feed(b, device_option=gc) if scale is not None and zero_point is not None: with workspace.WorkspaceGuard(test_case.ws): dnnlowp_pybind11.CreateInt8QuantParamsBlob("quant_param", float(scale), int(zero_point)) if init_net: test_case.ws.run(init_net) for i in range(1 if engine == "" else 2): test_case.ws.run(net) Y = test_case.ws.blobs["Y"].fetch() if order: outputs.append( Output(Y=Y, op_type=op_type, engine=engine, order=order)) else: outputs.append(Output(Y=Y, op_type=op_type, engine=engine)) # workspace.CreateNet + workspace.RunNet reuses the same operator if engine != "": workspace.FeedBlob("X", X) workspace.FeedBlob("W", W) workspace.FeedBlob("b", b) if scale is not None and zero_point is not None: dnnlowp_pybind11.CreateInt8QuantParamsBlob("quant_param", float(scale), int(zero_point)) if init_net: workspace.RunNetOnce(init_net) workspace.CreateNet(net) for i in range(2): workspace.RunNet(net) Y = workspace.FetchBlob("Y") if order: outputs.append( Output(Y=Y, op_type=op_type, engine=engine, order=order)) else: outputs.append(Output(Y=Y, op_type=op_type, engine=engine))