def asfortranarray(shp, dtype, offseted_outer, offseted_inner, sliced, order): cpu, gpu = gen_gpuarray(shp, dtype, offseted_outer, offseted_inner, sliced, order, ctx=ctx) a = numpy.asfortranarray(cpu) b = pygpu.asfortranarray(gpu) # numpy upcast with a view to 1d scalar. if gpu.flags['F_CONTIGUOUS']: assert b.gpudata == gpu.gpudata elif (sliced != 1 or shp == () or (offseted_outer and len(shp) > 1) or (order != 'f' and len(shp) > 1)): assert b is not gpu else: assert b is gpu assert a.shape == b.shape assert a.dtype == b.dtype assert a.flags.f_contiguous assert b.flags['F_CONTIGUOUS'] if not any([s == 1 for s in cpu.shape]): # Older version then Numpy 1.10 do not set c/f contiguous more # frequently as we do. This cause extra copy. assert a.strides == b.strides assert numpy.allclose(cpu, a) assert numpy.allclose(cpu, b)
def transfer_fortran(shp, dtype): a = numpy.random.rand(*shp) * 10 b = pygpu.array(a, context=ctx) a_ = numpy.asfortranarray(a) if len(shp) > 1: assert a_.strides != a.strides a = a_ b = pygpu.asfortranarray(b) c = numpy.asarray(b) assert a.shape == b.shape == c.shape assert a.dtype == b.dtype == c.dtype assert a.flags.f_contiguous assert c.flags.f_contiguous assert a.strides == b.strides == c.strides assert numpy.allclose(c, a)