def test_subgraph_backend_gluon_ext1(tmpdir): def get_net(): net = nn.HybridSequential() # Here we use the class HybridSequential. net.add(nn.Dense(256, activation='relu'), nn.Dense(128, activation='relu'), nn.Dense(2)) return net # regular inference x = mx.np.random.normal(size=(1, 512), ctx=mx.current_context()) net = get_net() net.initialize(ctx=mx.current_context()) outputs1 = net(x) param_path = os.path.join(str(tmpdir), 'test_subgraph_backend_gluon_ext1.params') net.save_parameters(param_path) # after partitioning net = get_net() net.load_parameters(param_path, ctx=mx.current_context()) subgraph_backend = 'default' op_names = ['FullyConnected'] check_call( _LIB.MXSetSubgraphPropertyOpNamesV2(c_str(subgraph_backend), mx_uint(len(op_names)), c_str_array(op_names))) net.optimize_for(x, backend=subgraph_backend) outputs2 = net(x) check_call(_LIB.MXRemoveSubgraphPropertyOpNamesV2(c_str(subgraph_backend))) # compare outputs assert len(outputs1) == len(outputs2) for i in range(len(outputs1)): assert_almost_equal( mx.np.abs((outputs1[i] - outputs2[i])).sum().asnumpy(), onp.zeros(shape=(1, )))
def copyMakeBorder(src, top, bot, left, right, border_type=cv2.BORDER_CONSTANT, value=0): """Pad image border Wrapper for cv2.copyMakeBorder that uses mx.nd.NDArray Parameters ---------- src : NDArray Image in (width, height, channels). Others are the same with cv2.copyMakeBorder Returns ------- img : NDArray padded image """ hdl = NDArrayHandle() check_call( _LIB.MXCVcopyMakeBorder( src.handle, ctypes.c_int(top), ctypes.c_int(bot), ctypes.c_int(left), ctypes.c_int(right), ctypes.c_int(border_type), ctypes.c_double(value), ctypes.byref(hdl), ) ) return mx.nd.NDArray(hdl)
def push_completion_callback(on_complete): # Call on_complete directly check_call( BYTESCHEDULER_LIB.bytescheduler_mxnet_on_complete( c_void_p(on_complete))) # Called after push instead pull self.notify_finish()
def _check_subgraph_exe1(sym, subgraph_backend, op_names): """Use the partitioned sym to simple_bind an executor and compare the outputs with those of the original executor""" out = SymbolHandle() check_call(_LIB.MXBuildSubgraphByOpNames(sym.handle, c_str(subgraph_backend), mx_uint(len(op_names)), c_str_array(op_names), ctypes.byref(out))) partitioned_sym = Symbol(out) assert partitioned_sym.list_inputs() == sym.list_inputs() assert partitioned_sym.list_arguments() == sym.list_arguments() assert partitioned_sym.list_auxiliary_states() == sym.list_auxiliary_states() exe = sym.simple_bind(ctx=mx.current_context(), grad_req='null') partitioned_exe = partitioned_sym.simple_bind(ctx=mx.current_context(), grad_req='null') input_names = sym.list_inputs() for name in input_names: if name in exe.arg_dict: exe.arg_dict[name][:] = mx.nd.random.uniform(shape=exe.arg_dict[name].shape) partitioned_exe.arg_dict[name][:] = exe.arg_dict[name] else: assert name in exe.aux_dict exe.aux_dict[name][:] = mx.nd.random.uniform(shape=exe.aux_dict[name].shape) partitioned_exe.aux_dict[name][:] = exe.aux_dict[name] exe.forward() partitioned_exe.forward() assert len(exe.outputs) == len(partitioned_exe.outputs) for i in range(len(exe.outputs)): assert_almost_equal((exe.outputs[i] - partitioned_exe.outputs[i]).abs().sum().asnumpy(), np.zeros(shape=(1,)))
def _init_symbol_module(root_namespace): """List and add all the atomic symbol functions to current module.""" plist = ctypes.POINTER(ctypes.c_char_p)() size = ctypes.c_uint() check_call(_LIB.MXListAllOpNames(ctypes.byref(size), ctypes.byref(plist))) op_names = [] for i in range(size.value): op_names.append(py_str(plist[i])) module_obj = _sys.modules["%s.symbol" % root_namespace] module_sparse = _sys.modules["%s.symbol.sparse" % root_namespace] module_internal = _sys.modules["%s.symbol._internal" % root_namespace] module_contrib = _sys.modules["%s.contrib.symbol" % root_namespace] for name in op_names: hdl = OpHandle() check_call(_LIB.NNGetOpHandle(c_str(name), ctypes.byref(hdl))) function = _make_atomic_symbol_function(hdl, name) if function.__name__.startswith('_contrib_'): function.__name__ = function.__name__[9:] function.__module__ = 'mxnet.contrib.symbol' setattr(module_contrib, function.__name__, function) elif function.__name__.startswith('_'): setattr(module_internal, function.__name__, function) else: setattr(module_obj, function.__name__, function) # register sparse ops under mxnet.symbol.sparse if function.__name__.startswith('_sparse_'): function.__name__ = function.__name__[8:] function.__module__ = 'mxnet.symbol.sparse' setattr(module_sparse, function.__name__, function)
def test_subgraph_exe7(sym, subgraph_backend, op_names): """Call optimize_for to trigger graph partitioning without infer shapes/types before, then bind and compare results of the partitioned sym and the original sym.""" # bind sym, _, _ = sym arg_shapes, _, aux_shapes = sym.infer_shape() arg_array = [mx.nd.random.uniform(shape=shape) for shape in arg_shapes] aux_array = [mx.nd.random.uniform(shape=shape) for shape in aux_shapes] exe1 = sym._bind(ctx=mx.current_context(), args=arg_array, aux_states=aux_array, grad_req='null') exe1.forward() # partition before bind check_call( _LIB.MXSetSubgraphPropertyOpNamesV2(c_str(subgraph_backend), mx_uint(len(op_names)), c_str_array(op_names))) part_sym = sym.optimize_for(subgraph_backend) check_call(_LIB.MXRemoveSubgraphPropertyOpNamesV2(c_str(subgraph_backend))) exe2 = part_sym._bind(ctx=mx.current_context(), args=arg_array, aux_states=aux_array, grad_req='null') exe2.forward() # compare outputs outputs1 = exe1.outputs outputs2 = exe2.outputs assert len(outputs1) == len(outputs2) for i in range(len(outputs1)): assert_almost_equal((outputs1[i] - outputs2[i]).abs().sum().asnumpy(), onp.zeros(shape=(1, )))
def test_subgraph_exe2(sym, subgraph_backend, op_names): def get_executor(sym, subgraph_backend=None, op_names=None, original_exec=None): exe = sym._simple_bind(ctx=mx.current_context(), grad_req='null') input_names = sym.list_inputs() for name in input_names: if name in exe.arg_dict: exe.arg_dict[name][:] = mx.nd.random.uniform(shape=exe.arg_dict[name].shape)\ if original_exec is None else original_exec.arg_dict[name] else: assert name in exe.aux_dict exe.aux_dict[name][:] = mx.nd.random.uniform(shape=exe.aux_dict[name].shape)\ if original_exec is None else original_exec.aux_dict[name] exe.forward() return exe sym, _, _ = sym original_exec = get_executor(sym) check_call( _LIB.MXSetSubgraphPropertyOpNames(c_str(subgraph_backend), mx_uint(len(op_names)), c_str_array(op_names))) partitioned_exec = get_executor(sym, subgraph_backend, op_names, original_exec) check_call(_LIB.MXRemoveSubgraphPropertyOpNames(c_str(subgraph_backend))) outputs1 = original_exec.outputs outputs2 = partitioned_exec.outputs assert len(outputs1) == len(outputs2) for i in range(len(outputs1)): assert_almost_equal((outputs1[i] - outputs2[i]).abs().sum().asnumpy(), onp.zeros(shape=(1, )))
def get_executor(sym, subgraph_backend=None, op_names=None, original_exec=None): if subgraph_backend is not None: os.environ['MXNET_SUBGRAPH_BACKEND'] = subgraph_backend check_call( _LIB.MXSetSubgraphPropertyOpNames(c_str(subgraph_backend), mx_uint(len(op_names)), c_str_array(op_names))) arg_shapes, _, aux_shapes = sym.infer_shape() if subgraph_backend is None: arg_array = [ mx.nd.random.uniform(shape=shape) for shape in arg_shapes ] aux_array = [ mx.nd.random.uniform(shape=shape) for shape in aux_shapes ] else: arg_array = None aux_array = None exe = sym.bind(ctx=mx.current_context(), args=arg_array if subgraph_backend is None else original_exec.arg_arrays, aux_states=aux_array if subgraph_backend is None else original_exec.aux_arrays, grad_req='null') exe.forward() if subgraph_backend is not None: check_call( _LIB.MXRemoveSubgraphPropertyOpNames(c_str(subgraph_backend))) del os.environ['MXNET_SUBGRAPH_BACKEND'] return exe
def broadcast_(tensor, root_rank, name=None, priority=0): """ A function that broadcasts the input tensor on root rank to the same input tensor on all other Horovod processes. The operation is performed in-place. The broadcast operation is keyed by the name. If name is not provided, an incremented auto-generated name is used. The tensor type and shape must be the same on all Horovod processes for a given name. The broadcast will not start until all processes are ready to send and receive the tensor. Arguments: tensor: A tensor to broadcast. root_rank: The rank to broadcast the value from. name: A name of the broadcast operation. priority: The priority of this operation. Higher priority operations are likely to be executed before other operations. Returns: A tensor of the same shape and type as `tensor`, with the value broadcasted from root rank. """ c_in = tensor.handle c_out = tensor.handle if isinstance(name, string_types): check_call( MPI_MXNET_LIB_CTYPES.horovod_mxnet_broadcast_async( c_in, c_out, c_str(name), ctypes.c_int(root_rank), ctypes.c_int(priority))) else: check_call( MPI_MXNET_LIB_CTYPES.horovod_mxnet_broadcast_async( c_in, c_out, name, ctypes.c_int(root_rank), ctypes.c_int(priority))) return tensor
def _prepare(self): """Post start barrier OP, start OP, comm OP, end OP and end barrier OP to MXNet engine. The function of each kind of OP is explained below. start barrier OP: barrier the start of a parent ByteTask, used to maintain original dependency. start OP: It notifies Core about task readiness. It is also used to delay the start of a child ByteTask. comm OP: the OP that does real communication, e.g., push, pull, allreduce. end OP: an OP that runs after a child ByteTask is finished. It notifies Core about the task completion. end barrier OP: an OP that runs after the parent ByteTask is finished, used to maintain original dependency. """ if self.parent is None: real = self._tensor.handle avatar = NDArrayHandle() check_call( BYTESCHEDULER_LIB.bytescheduler_get_ndarray_avatar( real, byref(avatar))) self._avatar = NDArray(avatar) avatar = self._avatar.handle else: real = self.parent._tensor.handle avatar = self._tensor.handle self._post_start_barrier(avatar, real) self._post_start_op(avatar) # Post real op if self.parent is None: self._post_communication(self._avatar) else: self._post_communication(self._tensor) self._post_end_op(avatar) self._post_end_barrier(avatar, real)
def batchnorm_add_relu(rank, data, addend, io_layout, batchnorm_layout, bn_group, local_gpus, local_comm, **kwargs): # Transpose as needed to batchnorm_layout transposed_data_as_needed = transform_layout(data, io_layout, batchnorm_layout) transposed_addend_as_needed = transform_layout(addend, io_layout, batchnorm_layout) bn_axis = 3 if batchnorm_layout == 'NHWC' else 1 xbuf_ptr = (ctypes.c_void_p * local_gpus)() if bn_group>1: sync_depth = bn_group_to_sync_depth(bn_group) if local_comm is not None: handler = np.zeros(handler_bytes(),dtype=np.byte) check_call(_LIB.MXInitXBufSingle(rank, sync_depth, xbuf_ptr, handler.ctypes.data_as(ctypes.c_void_p))) handlers = np.asarray([np.zeros(handler_bytes(),dtype=np.byte)]*local_gpus) local_comm.Allgather([handler, handler_bytes(), MPI.BYTE], [handlers, handler_bytes(), MPI.BYTE]) (_LIB.MXOpenIpcHandles(rank, local_gpus, sync_depth, xbuf_ptr, handlers.ctypes.data_as(ctypes.c_void_p))) else: check_call(_LIB.MXInitXBuf(local_gpus, sync_depth, xbuf_ptr)) anti_gc.append(xbuf_ptr) batchnormed = mx.sym.BatchNormAddRelu(data=transposed_data_as_needed, addend=transposed_addend_as_needed, axis=bn_axis, bn_group=bn_group, xbuf_ptr=ctypes.addressof(xbuf_ptr), **kwargs) # Transpose back to i/o layout as needed return transform_layout(batchnormed, batchnorm_layout, io_layout)
def allreduce_(tensor, average=True, name=None, priority=0): """ A function that performs in-place averaging or summation of the input tensor over all the Horovod processes. The reduction operation is keyed by the name. If name is not provided, an incremented auto-generated name is used. The tensor type and shape must be the same on all Horovod processes for a given name. The reduction will not start until all processes are ready to send and receive the tensor. Arguments: tensor: A tensor to average and sum. average: A flag indicating whether to compute average or summation, defaults to average. name: A name of the reduction operation. priority: The priority of this operation. Higher priority operations are likely to be executed before other operations. Returns: A tensor of the same shape and type as `tensor`, averaged or summed across all processes. """ c_in = tensor.handle c_out = tensor.handle if isinstance(name, string_types): check_call( MPI_MXNET_LIB_CTYPES.horovod_mxnet_allreduce_async( c_in, c_out, c_str(name), ctypes.c_bool(average), ctypes.c_int(priority))) else: check_call( MPI_MXNET_LIB_CTYPES.horovod_mxnet_allreduce_async( c_in, c_out, name, ctypes.c_bool(average), ctypes.c_int(priority))) return tensor
def _post_start_op(self, avatar): """The start op is only for notifying the Core about task ready. It does not add any dependency to the original DAG.""" if self._immediate: return def start_callback(on_complete): if self._immediate: # call on_complete directly check_call( BYTESCHEDULER_LIB.bytescheduler_mxnet_on_complete( c_void_p(on_complete))) return self._on_complete = on_complete self.notify_ready() # avoid garbage collection self._mxnet_start_callback = callback_t(start_callback) # post start op if self.op == "push_pull": tensor_out = (NDArrayHandle * 1)(*[self._barrier_tensor.handle]) check_call( BYTESCHEDULER_LIB.bytescheduler_mxnet_op( tensor_out, 0, tensor_out, 1, self._mxnet_start_callback, 1000000 - self.priority)) else: tensor_out = (NDArrayHandle * len(avatar))(*avatar) check_call( BYTESCHEDULER_LIB.bytescheduler_mxnet_op( tensor_out, 0, tensor_out, len(avatar), self._mxnet_start_callback, 1000000 - self.priority))
def scatter_reduce(tensor, name=None, priority=0): """ Arguments: tensor: A tensor to average and sum. name: A name of the reduction operation. priority: The priority of this operation. Higher priority operations are likely to be executed before other operations. Returns: A tensor of the same shape and type as `tensor`, averaged or summed across all processes. """ output = mx.nd.zeros(shape=tensor.shape, ctx=tensor.context, dtype=tensor.dtype) c_in = tensor.handle c_out = output.handle if isinstance(name, string_types): check_call( MPI_MXNET_LIB_CTYPES.horovod_mxnet_scatter_reduce_async( c_in, c_out, c_str(name), ctypes.c_int(priority))) else: check_call( MPI_MXNET_LIB_CTYPES.horovod_mxnet_allreduce_async( c_in, c_out, name, ctypes.c_int(priority))) return output
def scatter_reduce_(tensor, name=None, priority=0): """ A function that performs in-place scatter reduce of the input tensor over all the Horovod processes. Arguments: tensor: A tensor to average and sum. name: A name of the reduction operation. priority: The priority of this operation. Higher priority operations are likely to be executed before other operations. Returns: A tensor of the same shape and type as `tensor`, averaged or summed across all processes. """ c_in = tensor.handle c_out = tensor.handle if isinstance(name, string_types): check_call( MPI_MXNET_LIB_CTYPES.horovod_mxnet_scatter_reduce_async( c_in, c_out, c_str(name), ctypes.c_int(priority))) else: check_call( MPI_MXNET_LIB_CTYPES.horovod_mxnet_allreduce_async( c_in, c_out, name, ctypes.c_int(priority))) return tensor
def test_subgraph_exe6(sym, subgraph_backend, op_names): """Call optimize_for to trigger graph partitioning with shapes/types, then _simple_bind and compare results of the partitioned sym and the original sym.""" # _simple_bind sym, _, _ = sym exe1 = sym._simple_bind(ctx=mx.current_context(), grad_req='null') input_names = sym.list_inputs() set_random_inputs(exe1, input_names) exe1.forward() # infer shape/type before partition before _simple_bind check_call( _LIB.MXSetSubgraphPropertyOpNamesV2(c_str(subgraph_backend), mx_uint(len(op_names)), c_str_array(op_names))) part_sym = sym.optimize_for(subgraph_backend, exe1.arg_dict, exe1.aux_dict) check_call(_LIB.MXRemoveSubgraphPropertyOpNamesV2(c_str(subgraph_backend))) exe2 = part_sym._simple_bind(ctx=mx.current_context(), grad_req='null') copy_inputs_between_executors(exe1, exe2, input_names) exe2.forward() # compare outputs outputs1 = exe1.outputs outputs2 = exe2.outputs assert len(outputs1) == len(outputs2) for i in range(len(outputs1)): assert_almost_equal((outputs1[i] - outputs2[i]).abs().sum().asnumpy(), onp.zeros(shape=(1, )))
def test_subgraph_exe8(sym, subgraph_backend, op_names): """Call optimize_for to infer shapes, types and dtypes followed by graph partitioning, then bind and compare results of the partitioned sym and the original sym.""" # bind sym, _, _ = sym arg_shapes, _, aux_shapes = sym.infer_shape() arg_names = sym.list_arguments() aux_names = sym.list_auxiliary_states() arg_dict = {name:mx.nd.random.uniform(shape=shape) for name,shape in zip(arg_names,arg_shapes)} aux_dict = {name:mx.nd.random.uniform(shape=shape) for name,shape in zip(aux_names,aux_shapes)} exe1 = sym.bind(ctx=mx.current_context(), args=arg_dict, aux_states=aux_dict, grad_req='null') exe1.forward() # infer shape/type before partition before bind check_call(_LIB.MXSetSubgraphPropertyOpNamesV2(c_str(subgraph_backend), mx_uint(len(op_names)), c_str_array(op_names))) part_sym = sym.optimize_for(subgraph_backend, arg_dict, aux_dict) check_call(_LIB.MXRemoveSubgraphPropertyOpNamesV2(c_str(subgraph_backend))) exe2 = part_sym.bind(ctx=mx.current_context(), args=arg_dict, aux_states=aux_dict, grad_req='null') exe2.forward() # compare outputs outputs1 = exe1.outputs outputs2 = exe2.outputs assert len(outputs1) == len(outputs2) for i in range(len(outputs1)): assert_almost_equal((outputs1[i] - outputs2[i]).abs().sum().asnumpy(), np.zeros(shape=(1,)))
def copyMakeBorder(src, top, bot, left, right, border_type=cv2.BORDER_CONSTANT, value=0): """Pad image border Wrapper for cv2.copyMakeBorder that uses mx.nd.NDArray Parameters ---------- src : NDArray Image in (width, height, channels). Others are the same with cv2.copyMakeBorder Returns ------- img : NDArray padded image """ hdl = NDArrayHandle() check_call( _LIB.MXCVcopyMakeBorder(src.handle, ctypes.c_int(top), ctypes.c_int(bot), ctypes.c_int(left), ctypes.c_int(right), ctypes.c_int(border_type), ctypes.c_double(value), ctypes.byref(hdl))) return mx.nd.NDArray(hdl)
def load(fname): """Loads an array from file. See more details in ``save``. Parameters ---------- fname : str The filename. Returns ------- list of NDArray or dict of str to NDArray Loaded data. """ if not isinstance(fname, string_types): raise TypeError('fname required to be a string') out_size = mx_uint() out_name_size = mx_uint() handles = ctypes.POINTER(NDArrayHandle)() names = ctypes.POINTER(ctypes.c_char_p)() check_call( _LIB.MXNDArrayLoad(c_str(fname), ctypes.byref(out_size), ctypes.byref(handles), ctypes.byref(out_name_size), ctypes.byref(names))) if out_name_size.value == 0: return [ _ndarray_cls(NDArrayHandle(handles[i])) for i in range(out_size.value) ] else: assert out_name_size.value == out_size.value return dict((py_str(names[i]), _ndarray_cls(NDArrayHandle(handles[i]))) for i in range(out_size.value))
def _post_end_barrier(self, avatar, real): """The end barrier is for keeping the original dependency. It does not need any callback.""" if self.parent is None: deps = real + avatar barrier_tensors_out = (NDArrayHandle * len(deps))(*deps) check_call( BYTESCHEDULER_LIB.bytescheduler_mxnet_barrier( barrier_tensors_out, 0, barrier_tensors_out, len(deps), 10000000 - self.priority)) else: # _child_tensors is a list of avatar, and avatar itself is also a list if not hasattr(self.parent, "_children_tensors"): self.parent._children_tensors = [avatar] else: self.parent._children_tensors.append(avatar) if len(self.parent._children_tensors) == len(self.parent.children): tensors_in = [ _ for sublist in self.parent._children_tensors for _ in sublist ] barrier_tensors_in = (NDArrayHandle * len(tensors_in))(*tensors_in) barrier_tensors_out = (NDArrayHandle * len(real))(*real) check_call( BYTESCHEDULER_LIB.bytescheduler_mxnet_barrier( barrier_tensors_in, len(tensors_in), barrier_tensors_out, len(real), 10000000 - self.priority))
def _do(self): """Let the start OP complete so that the real comm OP can run.""" if hasattr(self, "_on_complete"): check_call( BYTESCHEDULER_LIB.bytescheduler_mxnet_on_complete( c_void_p(self._on_complete))) return
def _partition_single_tensor(self, tensor, size): """Only partition a single tensor. Arguments: size: An integer. After partitioning, each tensor partition size must be equal or smaller than `size`. Returns: A list of partitioned tensors. """ number = (tensor.size - 1) // size + 1 if number > tensor.shape[0]: self._logger.warning( "The number of tensor rows (with shape {}) is smaller than partition number {}." .format(tensor.shape, number)) number = tensor.shape[0] num_per_partition = tensor.shape[0] // number partitions_with_extra = tensor.shape[0] % number partitions = [] start = 0 end = num_per_partition for i in range(number): handle = NDArrayHandle() check_call( BYTESCHEDULER_LIB.bytescheduler_get_ndarray_avatar( tensor.handle, byref(handle))) avatar = NDArray(handle)[start:end] partitions.append(avatar) start = end end += num_per_partition if i >= number - partitions_with_extra - 1: end += 1 return partitions
def byteps_push_pull(tensor, version=0, priority=0, name=None, is_average=True): """ A function that performs pushing and pulling tensors The operation is keyed by the name. If name is not provided, an incremented auto-generated name is used. The tensor type and shape must be the same on all BytePS processes for a given name. The reduction will not start until all processes are ready to send and receive the tensor. This acts as a thin wrapper around an autograd function. If your input tensor requires tensors, then callings this function will allow tensors to be computed and backpropagated. Arguments: tensor: A tensor to average and sum. average: A flag indicating whether to compute average or summation, defaults to average. name: A name of the reduction operation. Returns: None """ c_in = tensor.handle if isinstance(name, string_types): check_call(MXNET_LIB_CTYPES.byteps_mxnet_push_pull_async(c_in, c_str(name), ctypes.c_int(version), ctypes.c_int(priority), ctypes.c_bool(is_average))) else: check_call(MXNET_LIB_CTYPES.byteps_mxnet_push_pull_async(c_in, name, ctypes.c_int(version), ctypes.c_int(priority), ctypes.c_bool(is_average))) return
def _check_subgraph_exe3(sym, subgraph_backend, op_names): """Use the partitioned sym to bind an executor and compare the outputs with those of the original executor""" out = SymbolHandle() check_call(_LIB.MXBuildSubgraphByOpNames(sym.handle, c_str(subgraph_backend), mx_uint(len(op_names)), c_str_array(op_names), ctypes.byref(out))) partitioned_sym = Symbol(out) input_names = sym.list_inputs() arg_names = sym.list_arguments() aux_names = sym.list_auxiliary_states() assert partitioned_sym.list_inputs() == input_names assert partitioned_sym.list_arguments() == arg_names assert partitioned_sym.list_auxiliary_states() == aux_names arg_shapes, _, aux_shapes = sym.infer_shape() arg_array = [mx.nd.random.uniform(shape=shape) for shape in arg_shapes] aux_array = [mx.nd.random.uniform(shape=shape) for shape in aux_shapes] exe = sym.bind(ctx=mx.current_context(), args=arg_array, aux_states=aux_array, grad_req='null') partitioned_exe = partitioned_sym.bind(ctx=mx.current_context(), args=arg_array, aux_states=aux_array, grad_req='null') exe.forward() partitioned_exe.forward() assert len(exe.outputs) == len(partitioned_exe.outputs) for i in range(len(exe.outputs)): assert_almost_equal((exe.outputs[i] - partitioned_exe.outputs[i]).abs().sum().asnumpy(), np.zeros(shape=(1,)))
def get_executor(sym, subgraph_backend=None, op_names=None, original_exec=None): if subgraph_backend is not None: os.environ['MXNET_SUBGRAPH_BACKEND'] = subgraph_backend check_call( _LIB.MXSetSubgraphPropertyOpNames(c_str(subgraph_backend), mx_uint(len(op_names)), c_str_array(op_names))) exe = sym.simple_bind(ctx=mx.current_context(), grad_req='null') input_names = sym.list_inputs() for name in input_names: if name in exe.arg_dict: exe.arg_dict[name][:] = mx.nd.random.uniform(shape=exe.arg_dict[name].shape)\ if original_exec is None else original_exec.arg_dict[name] else: assert name in exe.aux_dict exe.aux_dict[name][:] = mx.nd.random.uniform(shape=exe.aux_dict[name].shape)\ if original_exec is None else original_exec.aux_dict[name] exe.forward() if subgraph_backend is not None: check_call( _LIB.MXRemoveSubgraphPropertyOpNames(c_str(subgraph_backend))) del os.environ['MXNET_SUBGRAPH_BACKEND'] return exe
def check_subgraph_exe9(sym, subgraph_backend, op_names): """Call hybridize() to partition the graph, and then compare results of the partitioned sym and the original sym. Here do an inference before hybridizing with the subgraph_backend which means we'll pass shapes/types""" # create Gluon block for given symbol inputs = [mx.sym.var(i, dtype=mx_real_t) for i in sym[1]] sym_block = nn.SymbolBlock(sym[0], inputs) sym_block.initialize(ctx=mx.current_context()) x = [ mx.nd.random.uniform(shape=s, ctx=mx.current_context()) for s in sym[2] ] # hybridize and export to get baseline sym_block.hybridize() outputs1 = sym_block(*x) sym_block.export('check_subgraph_exe9') # load model and partition sym_block = nn.SymbolBlock.imports('check_subgraph_exe9-symbol.json', sym[1], 'check_subgraph_exe9-0000.params', ctx=mx.current_context()) check_call( _LIB.MXSetSubgraphPropertyOpNamesV2(c_str(subgraph_backend), mx_uint(len(op_names)), c_str_array(op_names))) sym_block.hybridize(backend=subgraph_backend) outputs2 = sym_block(*x) check_call(_LIB.MXRemoveSubgraphPropertyOpNamesV2(c_str(subgraph_backend))) # compare outputs assert len(outputs1) == len(outputs2) for i in range(len(outputs1)): assert_almost_equal((outputs1[i] - outputs2[i]).abs().sum().asnumpy(), np.zeros(shape=(1, )))
def _partition_tensor(self, size): """Zero-copy implementation. Note: ndarray works for up to ~4 billion parameters. Below 2 lines are buggy with horovod -- causing bad performance. tmp = self._tensor.reshape(-1, 1) avatar = tmp[start:end] """ number = (self._tensor.size - 1) // size + 1 if number > self._tensor.shape[0]: self._logger.warning( "The number of tensor rows (with shape {}) is smaller than partition number {}." .format(self._tensor.shape, number)) number = self._tensor.shape[0] num_per_partition = self._tensor.shape[0] // number partitions_with_extra = self._tensor.shape[0] % number partitions = [] start = 0 end = num_per_partition for i in range(number): handle = NDArrayHandle() check_call( BYTESCHEDULER_LIB.bytescheduler_get_ndarray_avatar( self._tensor.handle, byref(handle))) avatar = NDArray(handle)[start:end] partitions.append(avatar) start = end end += num_per_partition if i >= number - partitions_with_extra - 1: end += 1 return partitions
def grouped_allreduce_(tensors, average=None, name=None, priority=0, prescale_factor=1.0, postscale_factor=1.0, process_set=global_process_set, op=None): """ A function that performs in-place averaging or summation of the input tensors over all the Horovod processes. The reduction operations are keyed by the base name. If a base name is not provided, an incremented auto-generated base name is used. Reductions are performed across tensors in the same list position. The tensor type and shape must be the same on all Horovod processes for tensors sharing positions in the input tensor list. The reduction will not start until all processes are ready to send and receive the tensors. Arguments: tensors: A list of tensors to average or sum. average: .. warning:: .. deprecated:: 0.24.0 Use `op` instead. Will be removed in v1.0. op: The reduction operation to combine tensors across different ranks. Can be Average (default) or Sum. name: A base name to use for the group reduction operation priority: The priority of this operation. Higher priority operations are likely to be executed before other operations. prescale_factor: Multiplicative factor to scale tensor before allreduce postscale_factor: Multiplicative factor to scale tensor after allreduce process_set: Process set object to limit this operation to a subset of Horovod processes. Default is the global process set. Returns: A list containing tensors of the same shape and type as in `tensors`, averaged or summed across all processes. """ op = handle_average_backwards_compatibility(op, average) assert op in [Average, Sum] if not tensors: return tensors c_in = c_handle_array(tensors) c_out = c_handle_array(tensors) c_name = c_str(name) if isinstance(name, string_types) else ctypes.c_char_p(None) check_call( MPI_MXNET_LIB_CTYPES.horovod_mxnet_allreduce_async( c_in, c_out, c_name, ctypes.c_bool(op == Average), ctypes.c_int(priority), ctypes.c_double(prescale_factor), ctypes.c_double(postscale_factor), ctypes.c_int(len(tensors)), ctypes.c_int(process_set.process_set_id))) return tensors
def reducescatter(tensor, op=Average, name=None, priority=0, process_set=global_process_set): """ A function that performs asynchronous averaging or summation of the input tensor over all the Horovod processes, then scatters the results across all Horovod processes. The input tensor is not modified. The reduction operation is keyed by the name. If name is not provided, an incremented auto-generated name is used. The tensor type and shape must be the same on all Horovod processes for a given name. The reduction will not start until all processes are ready to send and receive the tensor. This acts as a thin wrapper around an autograd function. If your input tensor requires gradients, then callings this function will allow gradients to be computed and backpropagated. Arguments: tensor: A tensor to average/sum and scatter. op: The reduction operation to combine tensors across different ranks. Can be Average (default) or Sum. name: A name of the reduction operation. priority: The priority of this operation. Higher priority operations are likely to be executed before other operations. process_set: Process set object to limit this operation to a subset of Horovod processes. Default is the global process set. Returns: A tensor of the same rank and type as `tensor` across all processes. The shape is identical to the input shape except for the first dimension, which will be divided across the different Horovod processes. """ assert (isinstance(tensor, mx.nd.NDArray)) assert (op in [Average, Sum]) # Size of output is unknown, create output array that # will be resized during Horovod operation output = mx.nd.empty(shape=(1, ), ctx=tensor.context, dtype=tensor.dtype) c_in = tensor.handle c_out = output.handle if isinstance(name, string_types): check_call( MPI_MXNET_LIB_CTYPES.horovod_mxnet_reducescatter_async( c_in, c_out, c_str(name), ctypes.c_int(priority), ctypes.c_int(process_set.process_set_id))) else: check_call( MPI_MXNET_LIB_CTYPES.horovod_mxnet_reducescatter_async( c_in, c_out, name, ctypes.c_int(priority), ctypes.c_int(process_set.process_set_id))) # Need to block here so changes to output tensor are visible output.wait_to_read() if op == Average: output /= process_set.size() return output
def _do(self): """Let the start OP complete so that the real comm OP can run../.""" if hasattr(self, "_on_complete"): # print("_do() has run in {}".format(self.desc)) check_call( BYTESCHEDULER_LIB.bytescheduler_mxnet_on_complete( c_void_p(self._on_complete))) return
def run_cast_storage_synthetic(): def dense_to_sparse(m, n, density, ctx, repeat, stype): set_default_context(ctx) data_shape = (m, n) dns_data = rand_ndarray(data_shape, stype, density).tostype('default') dns_data.wait_to_read() # do one warm up run, verify correctness assert same( mx.nd.cast_storage(dns_data, stype).asnumpy(), dns_data.asnumpy()) # start benchmarking cost = measure_cost(repeat, mx.nd.cast_storage, dns_data, stype) results = '{:10.1f} {:>10} {:8d} {:8d} {:10.2f}'.format( density * 100, str(ctx), m, n, cost * 1000) print(results) check_call(_LIB.MXSetNumOMPThreads(ctypes.c_int(args.num_omp_threads))) # params # m number of rows # n number of columns # density density of the matrix # num_repeat number of benchmark runs to average over # contexts mx.cpu(), mx.gpu() # note: benchmark different contexts separately; to benchmark cpu, compile without CUDA # benchmarks dns_to_csr, dns_to_rsp m = [512, 512] n = [50000, 100000] density = [1.00, 0.80, 0.60, 0.40, 0.20, 0.10, 0.05, 0.02, 0.01] num_repeat = 10 contexts = [mx.gpu()] benchmarks = ["dns_to_csr", "dns_to_rsp"] # run benchmark for b in benchmarks: stype = '' print("==================================================") if b is "dns_to_csr": stype = 'csr' print(" cast_storage benchmark: dense to csr, size m x n ") elif b is "dns_to_rsp": stype = 'row_sparse' print(" cast_storage benchmark: dense to rsp, size m x n ") else: print("invalid benchmark: %s" % b) continue print("==================================================") headline = '{:>10} {:>10} {:>8} {:>8} {:>10}'.format( 'density(%)', 'context', 'm', 'n', 'time(ms)') print(headline) for i in range(len(n)): for ctx in contexts: for den in density: dense_to_sparse(m[i], n[i], den, ctx, num_repeat, stype) print("") print("")
def run_cast_storage_synthetic(): def dense_to_sparse(m, n, density, ctx, repeat, stype): set_default_context(ctx) data_shape = (m, n) dns_data = rand_ndarray(data_shape, stype, density).tostype('default') dns_data.wait_to_read() # do one warm up run, verify correctness assert same(mx.nd.cast_storage(dns_data, stype).asnumpy(), dns_data.asnumpy()) # start benchmarking cost = measure_cost(repeat, mx.nd.cast_storage, dns_data, stype) results = '{:10.1f} {:>10} {:8d} {:8d} {:10.2f}'.format(density*100, str(ctx), m, n, cost*1000) print(results) check_call(_LIB.MXSetNumOMPThreads(ctypes.c_int(args.num_omp_threads))) # params # m number of rows # n number of columns # density density of the matrix # num_repeat number of benchmark runs to average over # contexts mx.cpu(), mx.gpu() # note: benchmark different contexts separately; to benchmark cpu, compile without CUDA # benchmarks dns_to_csr, dns_to_rsp m = [ 512, 512] n = [50000, 100000] density = [1.00, 0.80, 0.60, 0.40, 0.20, 0.10, 0.05, 0.02, 0.01] num_repeat = 10 contexts = [mx.gpu()] benchmarks = ["dns_to_csr", "dns_to_rsp"] # run benchmark for b in benchmarks: stype = '' print("==================================================") if b is "dns_to_csr": stype = 'csr' print(" cast_storage benchmark: dense to csr, size m x n ") elif b is "dns_to_rsp": stype = 'row_sparse' print(" cast_storage benchmark: dense to rsp, size m x n ") else: print("invalid benchmark: %s" %b) continue print("==================================================") headline = '{:>10} {:>10} {:>8} {:>8} {:>10}'.format('density(%)', 'context', 'm', 'n', 'time(ms)') print(headline) for i in range(len(n)): for ctx in contexts: for den in density: dense_to_sparse(m[i], n[i], den, ctx, num_repeat, stype) print("") print("")
def main(): args = parse_args() lhs_row_dim = int(args.lhs_row_dim) lhs_col_dim = int(args.lhs_col_dim) rhs_col_dim = int(args.rhs_col_dim) density = float(args.density) lhs_stype = args.lhs_stype rhs_stype = args.rhs_stype if args.rhs_density: rhs_density = float(args.rhs_density) else: rhs_density = density dot_func = mx.nd.sparse.dot if lhs_stype == "csr" else mx.nd.dot check_call(_LIB.MXSetNumOMPThreads(ctypes.c_int(args.num_omp_threads))) bench_dot(lhs_row_dim, lhs_col_dim, rhs_col_dim, density, rhs_density, dot_func, False, lhs_stype, rhs_stype, args.only_storage)
def imdecode(str_img, flag=1): """Decode image from str buffer. Wrapper for cv2.imdecode that uses mx.nd.NDArray Parameters ---------- str_img : str str buffer read from image file flag : int same as flag for cv2.imdecode Returns ------- img : NDArray decoded image in (width, height, channels) with BGR color channel order """ hdl = NDArrayHandle() check_call(_LIB.MXCVImdecode(ctypes.c_char_p(str_img), mx_uint(len(str_img)), flag, ctypes.byref(hdl))) return mx.nd.NDArray(hdl)
def get_executor(sym, subgraph_backend=None, op_names=None, original_exec=None): if subgraph_backend is not None: os.environ['MXNET_SUBGRAPH_BACKEND'] = subgraph_backend check_call(_LIB.MXSetSubgraphPropertyOpNames(c_str(subgraph_backend), mx_uint(len(op_names)), c_str_array(op_names))) exe = sym.simple_bind(ctx=mx.current_context(), grad_req='null') input_names = sym.list_inputs() for name in input_names: if name in exe.arg_dict: exe.arg_dict[name][:] = mx.nd.random.uniform(shape=exe.arg_dict[name].shape)\ if original_exec is None else original_exec.arg_dict[name] else: assert name in exe.aux_dict exe.aux_dict[name][:] = mx.nd.random.uniform(shape=exe.aux_dict[name].shape)\ if original_exec is None else original_exec.aux_dict[name] exe.forward() if subgraph_backend is not None: check_call(_LIB.MXRemoveSubgraphPropertyOpNames(c_str(subgraph_backend))) del os.environ['MXNET_SUBGRAPH_BACKEND'] return exe
def resize(src, size, interpolation=cv2.INTER_LINEAR): """Decode image from str buffer. Wrapper for cv2.imresize that uses mx.nd.NDArray Parameters ---------- src : NDArray image in (width, height, channels) size : tuple target size in (width, height) interpolation : int same as interpolation for cv2.imresize Returns ------- img : NDArray resized image """ hdl = NDArrayHandle() check_call(_LIB.MXCVResize(src.handle, mx_uint(size[0]), mx_uint(size[1]), interpolation, ctypes.byref(hdl))) return mx.nd.NDArray(hdl)
def get_executor(sym, subgraph_backend=None, op_names=None, original_exec=None): if subgraph_backend is not None: os.environ['MXNET_SUBGRAPH_BACKEND'] = subgraph_backend check_call(_LIB.MXSetSubgraphPropertyOpNames(c_str(subgraph_backend), mx_uint(len(op_names)), c_str_array(op_names))) arg_shapes, _, aux_shapes = sym.infer_shape() if subgraph_backend is None: arg_array = [mx.nd.random.uniform(shape=shape) for shape in arg_shapes] aux_array = [mx.nd.random.uniform(shape=shape) for shape in aux_shapes] else: arg_array = None aux_array = None exe = sym.bind(ctx=mx.current_context(), args=arg_array if subgraph_backend is None else original_exec.arg_arrays, aux_states=aux_array if subgraph_backend is None else original_exec.aux_arrays, grad_req='null') exe.forward() if subgraph_backend is not None: check_call(_LIB.MXRemoveSubgraphPropertyOpNames(c_str(subgraph_backend))) del os.environ['MXNET_SUBGRAPH_BACKEND'] return exe
def tell(self): """Returns the current position of read head. """ pos = ctypes.c_size_t() check_call(_LIB.MXRecordIOReaderTell(self.handle, ctypes.byref(pos))) return pos.value
def test_dot_synthetic(): """benchmark mx.nd.dot(sparse_ndarray, dense_ndarray) with given density. `t_sparse` is the time cost of dot(csr, dns), while `t_dense` is the time cost of dot(dns, dns), with the same matrix except that it is in default storage type. """ def measure_cost_forward_baseline(repeat, dot, lhs, rhs): start = time.time() for i in range(repeat): dot(lhs, rhs) end = time.time() diff = end - start return diff / repeat def measure_cost_backward_baseline(repeat, dot, transpose, lhs, rhs): start = time.time() for i in range(repeat): dot(transpose(lhs), rhs) end = time.time() diff = end - start return diff / repeat def bench_dot_forward(m, k, n, density, ctx, repeat): set_default_context(ctx) dns = mx.nd.random.uniform(shape=(k, n)).copyto(ctx) data_shape = (m, k) csr_data = rand_ndarray(data_shape, 'csr', density) dns_data = csr_data.tostype('default') rhs_dns_np = dns.asnumpy() lhs_csr_sp = sp.csr_matrix(dns_data.asnumpy()) # csr in scipy lhs_dns_np = lhs_csr_sp.tostype('default') data = [dns_data, csr_data] costs = [] for d in data: dns.wait_to_read() d.wait_to_read() cost = measure_cost(repeat, mx.nd.dot, d, dns) costs.append(cost) ratio = costs[0] / costs[1] costs_baseline = [] cost = measure_cost_forward_baseline(repeat, np.dot, lhs_dns_np, rhs_dns_np) costs_baseline.append(cost) cost = measure_cost_forward_baseline(repeat, sp.spmatrix.dot, lhs_csr_sp, rhs_dns_np) costs_baseline.append(cost) ratio_baseline = costs_baseline[0] / costs_baseline[1] fmt = "%0.1f\t\t%s\t%d\t%d\t%d\t%0.2f\t\t\t%0.2f\t%0.5f\t\t%0.2f\t\t\t\t%0.6f\t%0.5f" print(fmt % (density * 100, str(ctx), n, m, k, ratio, costs[0], costs[1], ratio_baseline, costs_baseline[0], costs_baseline[1])) def bench_dot_backward(m, k, n, density, ctx, repeat): set_default_context(ctx) dns = mx.nd.random.uniform(shape=(m, n)).copyto(ctx) data_shape = (m, k) csr_data = rand_ndarray(data_shape, 'csr', density) dns_data = csr_data.tostype('default') rhs_dns_np = dns.asnumpy() lhs_csr_sp = sp.csr_matrix(dns_data.asnumpy()) lhs_dns_np = lhs_csr_sp.tostype('default') data = [dns_data, csr_data] costs = [] for d in data: dns.wait_to_read() d.wait_to_read() cost = measure_cost(repeat, mx.nd.dot, d, dns, transpose_a=True) costs.append(cost) ratio = costs[0] / costs[1] costs_baseline = [] cost = measure_cost_backward_baseline(repeat, np.dot, np.transpose, lhs_dns_np, rhs_dns_np) costs_baseline.append(cost) cost = measure_cost_backward_baseline(repeat, sp.spmatrix.dot, sp.spmatrix.transpose, lhs_csr_sp, rhs_dns_np) costs_baseline.append(cost) ratio_baseline = costs_baseline[0] / costs_baseline[1] fmt = "%0.1f\t\t%s\t%d\t%d\t%d\t%0.2f\t\t\t%0.2f\t%0.5f\t\t%0.2f\t\t\t\t%0.6f\t%0.5f" print(fmt % (density * 100, str(ctx), n, m, k, ratio, costs[0], costs[1], ratio_baseline, costs_baseline[0], costs_baseline[1])) print("A = sparse NDArray of shape(m, k)") print("B = dense NDArray of shape(k, n)") print("dot_forward\tdot(csr, dns)") print('density(%)\tcontext\tn\tm\tk\tt_dense/t_sparse\tt_dense\tt_sparse' '\tt_scipy_dense/t_scipy_sparse\tt_scipy_dense\tt_scipy_sparse') check_call(_LIB.MXSetNumOMPThreads(ctypes.c_int(args.num_omp_threads))) # TODO(haibin) make these runtime options m = 512 k = [50000, 100000] n = [64, 128] density = [1.00, 0.90, 0.70, 0.50, 0.30, 0.20, 0.10, 0.07, 0.05, 0.02, 0.01, 0.005, 0.001] num_repeat = 10 # contexts = [mx.cpu(), mx.gpu(0)] contexts = [mx.cpu()] for i in range(2): for ctx in contexts: for den in density: bench_dot_forward(m, k[i], n[i], den, ctx, num_repeat) print("dot_backward\tdot(csr.T, dns)") print('density(%)\tcontext\tn\tm\tk\tt_dense/t_sparse\tt_dense\tt_sparse' '\tt_scipy_dense/t_scipy_sparse\tt_scipy_dense\tt_scipy_sparse') for i in range(2): for ctx in contexts: for den in density: bench_dot_backward(m, k[i], n[i], den, ctx, num_repeat)
def test_dot_synthetic(data_dict): """benchmark sparse mxnet dot and scipy dot operator with matrices of given density. `t_sparse` is the runtime of the invoked sparse dot operator in ms, while `t_dense` is the runtime of dot(dns, dns), with the same matrices except that they are in default storage type. """ # Benchmark MXNet and Scipys dot operator def bench_dot(lhs_shape, rhs_shape, lhs_stype, rhs_stype, lhs_den, rhs_den, trans_lhs, ctx, num_repeat=10, fw="mxnet", distribution="uniform"): set_default_context(ctx) assert fw == "mxnet" or fw == "scipy" # Set funcs dot_func_sparse = mx.nd.sparse.dot if fw == "mxnet" else sp.spmatrix.dot dot_func_dense = mx.nd.dot if fw == "mxnet" else np.dot # Create matrix instances lhs_nd = rand_ndarray(lhs_shape, lhs_stype, density=lhs_den, distribution=distribution) # only uniform distribution supported for rhs rhs_nd = rand_ndarray(rhs_shape, rhs_stype, density=rhs_den, distribution="uniform") lhs_dns = None rhs_dns = None dense_cost = None sparse_cost = None if fw == "mxnet": lhs_dns = lhs_nd if lhs_stype == 'default' else lhs_nd.tostype('default') rhs_dns = rhs_nd if rhs_stype == 'default' else rhs_nd.tostype('default') # One warm up run, verify correctness out = dot_func_sparse(lhs_nd, rhs_dns, trans_lhs) out_expected = dot_func_dense(lhs_dns, rhs_dns, trans_lhs) assert_almost_equal(out.asnumpy(), out_expected.asnumpy(), rtol=1e-1, atol=1e-1) sparse_cost = measure_cost(num_repeat, False, False, dot_func_sparse, lhs_nd, rhs_nd, trans_lhs) dense_cost = measure_cost(num_repeat, False, False, dot_func_dense, lhs_dns, rhs_dns, trans_lhs) else: lhs_dns = lhs_nd.asnumpy() rhs_dns = rhs_nd.asnumpy() lhs_nd = sp.csr_matrix(lhs_nd.asnumpy()) rhs_nd = rhs_nd.asnumpy() # One warm up run, verify correctness lhs_nd_copy = sp.spmatrix.transpose(lhs_nd) if trans_lhs else lhs_nd out = dot_func_sparse(lhs_nd_copy, rhs_dns) sparse_cost = measure_cost(num_repeat, trans_lhs, False, dot_func_sparse, lhs_nd, rhs_nd) dense_cost = measure_cost(num_repeat, trans_lhs, True, dot_func_dense, lhs_dns, rhs_dns) speedup = dense_cost / sparse_cost # Print results m = lhs_shape[0] k = lhs_shape[1] n = rhs_shape[1] result_pattern = '{:15.1f} {:15.1f} {:>10} {:8d} {:8d} {:8d} {:13.2f} {:13.2f} {:8.2f}' results = result_pattern.format(lhs_den*100, rhs_den*100, str(ctx), m, k, n, sparse_cost*1000, dense_cost*1000, speedup) print(results) def print_benchmark_info(lhs, rhs, lhs_trans, fw): trans_str = "^T" if lhs_trans else "" print("========================================================") print(" %s sparse dot benchmark: dot(%s, %s) = %s ") % (fw, lhs, rhs, rhs) print(" (matrix multiplication: (m x k)%s * (k x n) = m x n) ") % (trans_str) print("========================================================") headline_pattern = '{:>15} {:>15} {:>10} {:>8} {:>8} {:>8} {:>13} {:>13} {:>8}' headline = headline_pattern.format('lhs_density(%)', 'rhs_density(%)', 'context', 'm', 'k', 'n', 't_sparse(ms)', 't_dense(ms)', 'speedup') print(headline) def run_benchmark(ctx=None, lhs="csr", lhs_trans=False, rhs="dns", fw="mxnet", rhs_density=1, distribution="uniform"): if lhs != "csr": raise ValueError("Value other than csr for lhs not supported") if rhs_density > 1 or rhs_density < 0: raise ValueError("rhs_density has to be between 0 and 1") print_benchmark_info(lhs, rhs, lhs_trans, fw) lhs_stype = "csr" rhs_stype = "row_sparse" if rhs == "rsp" else "default" feature_dim_list = data_dict['feature_dim'] output_dim_list = data_dict['m'] batch_size_list = data_dict['batch_size'] density_list = data_dict['density'] default_output_index = data_dict['default_index']['output_dim'] default_batch_size_index = data_dict['default_index']['batch_size'] default_feature_index = data_dict['default_index']['feature_dim'] default_density_index = data_dict['default_index']['density'] num_repeat = data_dict['num_repeat'] for output_dim in output_dim_list: if lhs_trans: output_row_dim = batch_size_list[default_batch_size_index] else: output_row_dim = feature_dim_list[default_feature_index] bench_dot((batch_size_list[default_batch_size_index], feature_dim_list[default_feature_index]), (output_row_dim, output_dim), lhs_stype, rhs_stype, density_list[default_density_index], rhs_density, lhs_trans, ctx, num_repeat=num_repeat, fw=fw, distribution=distribution) for feature_dim in feature_dim_list: if lhs_trans: output_row_dim = batch_size_list[default_batch_size_index] else: output_row_dim = feature_dim bench_dot((batch_size_list[default_batch_size_index], feature_dim), (output_row_dim, output_dim_list[default_output_index]), lhs_stype, rhs_stype, density_list[default_density_index], rhs_density, lhs_trans, ctx, num_repeat=num_repeat, fw=fw, distribution=distribution) for batch_size in batch_size_list: if lhs_trans: output_row_dim = batch_size else: output_row_dim = feature_dim_list[default_feature_index] bench_dot((batch_size, feature_dim_list[default_feature_index]), (output_row_dim, output_dim_list[default_output_index]), lhs_stype, rhs_stype, density_list[default_density_index], rhs_density, lhs_trans, ctx, num_repeat=num_repeat, fw=fw, distribution=distribution) for density in density_list: if lhs_trans: output_row_dim = batch_size_list[default_batch_size_index] else: output_row_dim = feature_dim_list[default_feature_index] bench_dot((batch_size_list[default_batch_size_index], feature_dim_list[default_feature_index]), (output_row_dim, output_dim_list[default_output_index]), lhs_stype, rhs_stype, density, rhs_density, lhs_trans, ctx, num_repeat=num_repeat, fw=fw, distribution=distribution) check_call(_LIB.MXSetNumOMPThreads(ctypes.c_int(ARGS.num_omp_threads))) context = mx.gpu() if ARGS.gpu else mx.cpu() # TODO(anirudh): make the data dicts to config which can be passed at runtime distributions = ["uniform", "powerlaw"] for distribution in distributions: run_benchmark(context, lhs="csr", rhs="default", lhs_trans=False, fw="mxnet", rhs_density=1, distribution=distribution) run_benchmark(context, lhs="csr", rhs="default", lhs_trans=True, fw="mxnet", rhs_density=1, distribution=distribution) run_benchmark(context, lhs="csr", rhs="rsp", lhs_trans=False, fw="mxnet", rhs_density=0.05, distribution=distribution) if not ARGS.gpu: run_benchmark(context, lhs="csr", rhs="default", lhs_trans=False, fw="scipy", rhs_density=1, distribution=distribution) run_benchmark(context, lhs="csr", rhs="default", lhs_trans=True, fw="scipy", rhs_density=1, distribution=distribution)