def test_asarray_from_numpy_array_with_zero_copy(): obj = array_utils.create_dummy_ndarray( numpy, (2, 3), 'float32', padding=True) obj_refcount_before = sys.getrefcount(obj) a = chainerx.asarray(obj, dtype='float32') assert sys.getrefcount(obj) == obj_refcount_before + 1 chainerx.testing.assert_array_equal_ex(obj, a) # test buffer is shared (zero copy) a += a chainerx.testing.assert_array_equal_ex(obj, a) # test possibly freed memory obj_copy = obj.copy() del obj chainerx.testing.assert_array_equal_ex(obj_copy, a, strides_check=False) # test possibly freed memory (the other way) obj = array_utils.create_dummy_ndarray( numpy, (2, 3), 'float32', padding=True) a = chainerx.asarray(obj, dtype='float32') a_copy = a.copy() del a chainerx.testing.assert_array_equal_ex(a_copy, obj, strides_check=False)
def test_asarray_to_numpy_identity(device, slice1, slice2): start1, end1, step1 = slice1 start2, end2, step2 = slice2 x = numpy.arange(1500).reshape((30, 50))[ start1:end1:step1, start2:end2:step2] y = chainerx.asarray(x) z = chainerx.to_numpy(y) chainerx.testing.assert_array_equal_ex(x, y) chainerx.testing.assert_array_equal_ex(x, z, strides_check=False)
def test_asarray_from_chainerx_array(dtype): obj = array_utils.create_dummy_ndarray(chainerx, (2, 3), 'int32') a = chainerx.asarray(obj, dtype=dtype) if a.dtype == obj.dtype: assert a is obj else: assert a is not obj e = chainerx.array(obj, dtype=dtype, copy=False) chainerx.testing.assert_array_equal_ex(e, a) assert e.device is a.device
def test_asarray_from_numpy_array_with_copy(): obj = array_utils.create_dummy_ndarray(numpy, (2, 3), 'int32') a = chainerx.asarray(obj, dtype='float32') e = chainerx.array(obj, dtype='float32', copy=False) chainerx.testing.assert_array_equal_ex(e, a) assert e.device is a.device # test buffer is not shared a += a assert not numpy.array_equal(obj, chainerx.to_numpy(a))
def asanyarray(a, dtype=None, device=None): """Converts an object to an array. This is currently equivalent to :func:`~chainerx.asarray`, since there are no subclasses of ndarray in ChainerX. Note that the original :func:`numpy.asanyarray` returns the input array as is, if it is an instance of a subtype of :class:`numpy.ndarray`. .. seealso:: :func:`chainerx.asarray`, :func:`numpy.asanyarray` """ return chainerx.asarray(a, dtype, device)
def _array_to_chainerx(array, device=None): # If device is None, appropriate device is chosen according to the input # arrays. assert device is None or isinstance(device, chainerx.Device) if array is None: return None if array.dtype not in chainerx.all_dtypes: raise TypeError( 'Dtype {} is not supported in ChainerX.'.format(array.dtype.name)) if isinstance(array, chainerx.ndarray): if device is None: return array if device is array.device: return array return array.to_device(device) if isinstance(array, numpy.ndarray): if device is None: device = chainerx.get_device('native', 0) return chainerx.array(array, device=device, copy=False) if isinstance(array, cuda.ndarray): if device is None: device = chainerx.get_device('cuda', array.device.id) elif device.backend.name != 'cuda': # cupy to non-cuda backend # TODO(niboshi): Remove conversion to numpy when both CuPy and # ChainerX support the array interface. array = _cpu._to_cpu(array) return chainerx.array(array, device=device, copy=False) elif device.index != array.device.id: # cupy to cuda backend but different device array = cuda.to_gpu(array, device=device.index) # cupy to cuda backend with the same device return chainerx._core._fromrawpointer( array.data.mem.ptr, array.shape, array.dtype, array.strides, device, array.data.ptr - array.data.mem.ptr, array) if isinstance(array, intel64.mdarray): return _array_to_chainerx(numpy.array(array), device) if numpy.isscalar(array): return chainerx.asarray(array) raise TypeError( 'Array cannot be converted into chainerx.ndarray' '\nActual type: {0}.'.format(type(array)))
def test_asarray_with_device(device): a = chainerx.asarray([0, 1], 'float32', device) b = chainerx.asarray([0, 1], 'float32') chainerx.testing.assert_array_equal_ex(a, b) array_utils.check_device(a, device)
def test_asarray_from_python_tuple_or_list(): obj = _array_params_list a = chainerx.asarray(obj, dtype='float32') e = chainerx.array(obj, dtype='float32', copy=False) chainerx.testing.assert_array_equal_ex(e, a) assert e.device is a.device
def test_serialize_chainerx(self): self.check_serialize(chainerx.asarray(self.data), 'w')
def test_deserialize_chainerx(self): y = numpy.empty((2, 3), dtype=numpy.float32) self.check_deserialize(chainerx.asarray(y), 'y')
def main(): parser = argparse.ArgumentParser(description='Compare chainer vs chainerx') parser.add_argument('--batchsize', '-b', type=int, default=100) parser.add_argument('--epoch', '-e', type=int, default=10) parser.add_argument('--gpu', '-g', type=int, default=0, choices=[-1, 0, 1, 2, 3]) parser.add_argument('--chxon', '-c', type=int, default=1) args = parser.parse_args() # setup start = time.time() chx.available = True if args.chxon == 1 else False batch_size = args.batchsize # get MNIST train, test = chainer.datasets.get_mnist() if chx_available == True: device_name = 'cuda:{}'.format(args.gpu) # data with chx.using_device(device_name): train_images, train_labels = map(lamda d:chx.asarray(d), train._datasets) test_images, test_labels = map(lamda d:chx.asarray(d), test._datasets) # model chx.set_default_device(device_name) model = MLP(n_units=1000, n_out=10) optimizer = SGD(lr=0.01) else: device_name = args.gpu # data train_iter = chainer.iterators.SerialIterator(train, batch_size) test_iter = chainer.iterators.SerialIterator(train, batch_size, repeat=False, shuffle=False) # model model = MLP_chain(n_units=1000, n_out=10) model.to_gpu() chainer.cuda.get_device_from_id(device_name).use() optimizer = chainer.optimizers.SGD(lr=0.01) optimizer.setup(model) N_train, N_test = len(train), len(test) all_indices_np = np.arange(N_train, dtype=np.int64) # for chainerx epoch = 0 while epoch <= args.epoch: epoch += 1 if chx_available == True: np.random.shuffle(all_indices_np) all_indices = chx.array(all_indices_np) for i in range(0, N_train, batch_size): # time 1 if chx_available == True: indices = all_indices[i: i + batch_size] x = train_images.take(indices, axis=0) t = train_labels.take(indices, axis=0) else: batch = train_iter.next() x, t = convert.concat_examples(batch, device=device_name) y = model.forward(x) # time 2 # time 3 if chx_available == True: loss = compute_loss(y, t) else: loss = F.softmax_cross_entropy(y, t) model.cleargrads() loss.backward() # time 4 optimizer.update() # time 5 if chx_available == True: with chx.no_backprop_mode(): total_loss = chx.array(0, dtype=chx.float32) num_correct = chx.array(0, dtype=chx.int64) for i in range(0, N_test, batch_size): x = test_images[i:min(i + batch_size, N_test)] x = test_labels[i:min(i + batch_size, N_test)] y = model.forward(x) total_loss += compute_loss(y, t) * len(t) num_correct += (y.argmax(axis=1).astype(t.dtype) == t).astype(chx.int32).sum() else: test_iter.reset() with chainer.using_config('enable_backprop', False): total_loss = 0 num_correct = 0 for batch in test_iter: x, t = convert.concat_examples(batch, device=device_name) y = model.forward(x) total_loss += float(F.softmax_cross_entropy(y, t).array) * len(t) num_correct += float(F.accuracy(y, t).array) * len(t) mean_loss = float(total_loss) / N_test accuracy = int(num_correct) / N_test elapsed_time = time.time() - start print('epoch {} ... loss={}, accuracy, elapsed_time={}'.format( epoch, mean_loss, accuracy, elapsed_time))
def test_deserialize_chainerx_non_native(self): y = numpy.empty((2, 3), dtype=numpy.float32) self.check_deserialize(chainerx.asarray(y, device='cuda:0'))