def run_pyskip(self, num_threads=1): kernel = pyskip.Tensor.from_numpy(self._numpy_kernel()) with num_threads_scope(num_threads): set_value("accelerated_eval", True) # TODO: check me set_value("flush_tree_size_threshold", 2**30) t = Timer() with t: _ = pyskip.convolve.conv_3d(self.megatensor, kernel).eval() return t.duration_ms
def run_pyskip(self, num_threads=1): operand = pyskip.Tensor.from_numpy(self.mnist_np_array) kernels = [pyskip.Tensor.from_numpy(e) for e in self._numpy_kernels()] with num_threads_scope(num_threads): set_value("accelerated_eval", False) # TODO: check me set_value("flush_tree_size_threshold", 2**30) t = Timer() with t: for kernel in kernels: _ = pyskip.convolve.conv_2d(operand, kernel).eval() return t.duration_ms
def run_pyskip(self, num_threads=1, use_custom_kernel=False): inputs = self._pyskip_inputs gc.collect() with num_threads_scope(num_threads): set_value("accelerated_eval", False) # TODO: check me set_value("flush_tree_size_threshold", 2**30) if use_custom_kernel: set_value("custom_eval_kernel", "add_int32") t = Timer() with t: _ = functools.reduce(lambda x, y: x + y, inputs).eval() return t.duration_ms
def run_pyskip(self, num_threads=1, use_custom_kernel=False, use_accelerated=False): inputs = [_pyskip_cpp_ext.from_numpy(t) for t in self._numpy_inputs] gc.collect() with num_threads_scope(self.override_pyskip_threads or num_threads): set_value("accelerated_eval", use_accelerated) set_value("flush_tree_size_threshold", 2**30) if use_custom_kernel: set_value("custom_eval_kernel", "add_int32") t = Timer() with t: _ = functools.reduce(lambda x, y: x + y, inputs).eval() return t.duration_ms
def run_pyskip_accelerated(self): with config_scope(): set_value("accelerated_eval", True) return self._run_pyskip()
def run_pyskip_slow(self): with config_scope(): set_value("accelerated_eval", False) return self._run_pyskip()