Пример #1
0
def test_evaluate_custom_metric_success():
    eval_df = pd.DataFrame({
        "prediction": [1.2, 1.9, 3.2],
        "target": [1, 2, 3]
    })
    metrics = _get_regressor_metrics(eval_df["target"], eval_df["prediction"])

    def example_custom_metric(_, given_metrics):
        return {
            "example_count_times_1_point_5":
            given_metrics["example_count"] * 1.5,
            "sum_on_label_minus_5": given_metrics["sum_on_label"] - 5,
            "example_np_metric_1": np.float32(123.2),
            "example_np_metric_2": np.ulonglong(10000000),
        }

    res_metrics, res_artifacts = _evaluate_custom_metric(
        0, example_custom_metric, eval_df, metrics)
    assert res_metrics == {
        "example_count_times_1_point_5": metrics["example_count"] * 1.5,
        "sum_on_label_minus_5": metrics["sum_on_label"] - 5,
        "example_np_metric_1": np.float32(123.2),
        "example_np_metric_2": np.ulonglong(10000000),
    }
    assert res_artifacts is None

    def example_custom_metric_with_artifacts(given_df, given_metrics):
        return (
            {
                "example_count_times_1_point_5":
                given_metrics["example_count"] * 1.5,
                "sum_on_label_minus_5": given_metrics["sum_on_label"] - 5,
                "example_np_metric_1": np.float32(123.2),
                "example_np_metric_2": np.ulonglong(10000000),
            },
            {
                "pred_target_abs_diff":
                np.abs(given_df["prediction"] - given_df["target"]),
                "example_dictionary_artifact": {
                    "a": 1,
                    "b": 2
                },
            },
        )

    res_metrics_2, res_artifacts_2 = _evaluate_custom_metric(
        0, example_custom_metric_with_artifacts, eval_df, metrics)
    assert res_metrics_2 == {
        "example_count_times_1_point_5": metrics["example_count"] * 1.5,
        "sum_on_label_minus_5": metrics["sum_on_label"] - 5,
        "example_np_metric_1": np.float32(123.2),
        "example_np_metric_2": np.ulonglong(10000000),
    }
    assert "pred_target_abs_diff" in res_artifacts_2
    assert res_artifacts_2["pred_target_abs_diff"].equals(
        np.abs(eval_df["prediction"] - eval_df["target"]))

    assert "example_dictionary_artifact" in res_artifacts_2
    assert res_artifacts_2["example_dictionary_artifact"] == {"a": 1, "b": 2}
Пример #2
0
    def load64(x):
        r = ulonglong(0)

        for i in range(8):
            i = ulonglong(i)
            r |= ulonglong(x[i]) << ulonglong(8) * i

        return r
Пример #3
0
def gen_vert_weight(gen_fun,
                    weight_extra_len: int,
                    weight_dir: str,
                    file: str,
                    variables: list,
                    z_levels: np.ndarray,
                    verbose: bool = False) -> str:
    """
   Generate weights for vertical interpolation routines.
    :param gen_fun: Vertical interpolation routine written in C. Must have same arguments as create_weights
                    found in romspy/interpolation/vertical/linear
    :param weight_extra_len: length of the extra dimension added to weights
    :param weight_dir: Directory to store weights in
    :param file: input file to create weights for
    :param variables: name of the variables used by this weight, used to create weight name
    :param z_levels: 3D array of depth points
    :param verbose: whether to print whenever this function is called
    :return: name of weight file
    """
    if verbose:
        print("Making weight")
    with netCDF4.Dataset(file, mode='r') as my_file:
        # Get Z to interpolate to and its length
        z_levels = np.require(z_levels,
                              requirements=["C", "O", "A"],
                              dtype=np.float32)
        z_slice = np.prod(z_levels.shape[1:], dtype=np.ulonglong)
        z_len = np.ulonglong(z_levels.shape[0])
        # Get weight array which will store results
        weight_arr = np.require(
            np.empty(tuple(z_levels.shape + (weight_extra_len, )),
                     dtype=np.float32),
            requirements=['C', "W", "O", "A"
                          ])  # 2 here is [index, index percentage] see c code

        # Get depth and depth length
        depth = np.require(my_file.variables["depth"][:],
                           dtype=np.float32,
                           requirements=["C", "O", "A"])
        depth_len = np.ulonglong(depth.size)
        if verbose:
            print("Calling external C routine for generating weights.")
        # GET WEIGHTS
        gen_fun(depth, depth_len, z_levels, z_slice, z_len, weight_arr)
        if verbose:
            print("C routine has been completed.")
        # Get name to save weights under
        prefix = "vertical_" + "_".join(variables) + "_"
        path = os.path.split(file)[1]
        weight_file_name = os.path.join(weight_dir, prefix + path + ".npy")
        # print(weight_arr)
        # Save weights
        np.save(weight_file_name, weight_arr)
    return weight_file_name
Пример #4
0
 def test_numpy(self):
     """NumPy objects get serialized to readable JSON."""
     l = [
         np.float32(12.5),
         np.float64(2.0),
         np.float16(0.5),
         np.bool(True),
         np.bool(False),
         np.bool_(True),
         np.unicode_("hello"),
         np.byte(12),
         np.short(12),
         np.intc(-13),
         np.int_(0),
         np.longlong(100),
         np.intp(7),
         np.ubyte(12),
         np.ushort(12),
         np.uintc(13),
         np.ulonglong(100),
         np.uintp(7),
         np.int8(1),
         np.int16(3),
         np.int32(4),
         np.int64(5),
         np.uint8(1),
         np.uint16(3),
         np.uint32(4),
         np.uint64(5),
     ]
     l2 = [l, np.array([1, 2, 3])]
     roundtripped = loads(dumps(l2, cls=EliotJSONEncoder))
     self.assertEqual([l, [1, 2, 3]], roundtripped)
Пример #5
0
    def __init__(self):

        self.s = longdouble(0)
        self.m = longdouble(0)
        self.last_m = longdouble(0)
        self.n = ulonglong(0)
        self.is_started = False
Пример #6
0
 def test_numpy(self):
     """NumPy objects get serialized to readable JSON."""
     l = [
         np.float32(12.5),
         np.float64(2.0),
         np.float16(0.5),
         np.bool(True),
         np.bool(False),
         np.bool_(True),
         np.unicode_("hello"),
         np.byte(12),
         np.short(12),
         np.intc(-13),
         np.int_(0),
         np.longlong(100),
         np.intp(7),
         np.ubyte(12),
         np.ushort(12),
         np.uintc(13),
         np.ulonglong(100),
         np.uintp(7),
         np.int8(1),
         np.int16(3),
         np.int32(4),
         np.int64(5),
         np.uint8(1),
         np.uint16(3),
         np.uint32(4),
         np.uint64(5),
     ]
     l2 = [l, np.array([1, 2, 3])]
     roundtripped = loads(dumps(l2, cls=EliotJSONEncoder))
     self.assertEqual([l, [1, 2, 3]], roundtripped)
Пример #7
0
    def __init__( self ):

        self.s = longdouble( 0 )
        self.m = longdouble( 0 )
        self.last_m = longdouble( 0 )
        self.n = ulonglong( 0 )
        self.is_started = False
Пример #8
0
def compute_kmer_device (lmers, pkmers, skmers, kmerBitMask, readLength, readCount):
    # module_logger = logging.getLogger('eulercuda.pyencode.compute_kmer_device')
    module_logger.info("started compute_kmer_device.")
    mod = SourceModule("""
    typedef unsigned  long long KEY_T ;
    typedef KEY_T * KEY_PTR ;
    #define LMER_PREFIX(lmer,bitMask) ((lmer & (bitMask<<2))>>2)
    #define LMER_SUFFIX(lmer,bitMask) ((lmer & bitMask))

    __global__ void computeKmerDevice(
            KEY_PTR lmers,
            KEY_PTR pkmers,
            KEY_PTR skmers,
            KEY_T validBitMask,
            unsigned int readCount
        )
    {
       const unsigned int tid = (blockDim.x * blockDim.y * gridDim.x * blockIdx.y) + (blockDim.x * blockDim.y * blockIdx.x) + (blockDim.x * threadIdx.y) + threadIdx.x;
        
        if (tid < readCount)
        {
     

            KEY_T lmer;
            //fetch lmer
            lmer = lmers[tid];
            //find prefix
            pkmers[tid] = LMER_PREFIX(lmer,validBitMask);
            //find suffix
            skmers[tid] = LMER_SUFFIX(lmer,validBitMask);
           // __syncthreads();
        }
    }
    """, options=['--compiler-options', '-Wall'])
    compute_kmer = mod.get_function("computeKmerDevice")

    block_dim, grid_dim = getOptimalLaunchConfiguration(readCount, readLength)
    np_pkmers = gpuarray.to_gpu(pkmers)
    np_skmers = gpuarray.to_gpu(skmers)
    if isinstance(lmers, np.ndarray) and isinstance(pkmers, np.ndarray) and isinstance(skmers, np.ndarray):
        module_logger.info("Going to GPU.")
        compute_kmer(
            drv.In(lmers),
            np_pkmers,
            np_skmers,
            np.ulonglong(kmerBitMask),
            np.uintc(readCount),
            block=block_dim, grid=grid_dim
        )
        np_pkmers.get(pkmers)
        np_skmers.get(skmers)
    else:
        module_logger.warn("PROBLEM WITH GPU.")
    devdata = pycuda.tools.DeviceData()
    orec = pycuda.tools.OccupancyRecord(devdata, block_dim[0] * grid_dim[0])
    module_logger.debug("Occupancy = %s" % (orec.occupancy * 100))

    module_logger.info("leaving compute_kmer_device.")
    return pkmers, skmers
Пример #9
0
 def example_custom_metric(_, given_metrics):
     return {
         "example_count_times_1_point_5":
         given_metrics["example_count"] * 1.5,
         "sum_on_label_minus_5": given_metrics["sum_on_label"] - 5,
         "example_np_metric_1": np.float32(123.2),
         "example_np_metric_2": np.ulonglong(10000000),
     }
Пример #10
0
def k_means_plus(img, cluster_centers):
    """
    k_means++初始化中心点函数
    :param img:数据集,二维数组,或者是Mat形式,单通道
    :param cluster_centers: 聚类初始点,是一个list,list内是Point类
    :return:返回List存放的k个初始点
    """
    # 获取图像高宽
    height, width = img.shape
    # 随机选取一个为中心点
    x0 = random.randint(0, width - 1)
    y0 = random.randint(0, height - 1)
    # 将随机值赋给初始第一个Point点的x和y值
    cluster_centers[0].x, cluster_centers[0].y = x0, y0
    cluster_centers[0].value = img[y0][x0]  # 为初始点一号赋value值
    # print(cluster_centers[0].x, cluster_centers[0].y, cluster_centers[0].value)

    # 初始化存数每个数据点到最近中心点的距离的Mat数组
    distance = np.zeros([height, width], np.uint16)  # 定义一个同等大小的数组,初始归0

    # 递归求初始化
    for i in range(1, len(cluster_centers)):  # k-1次递归,k为聚类数
        # 计算每个点到中心质点的距离并储存
        # bug记录2:好像是有符号数达到了最大尺寸的一半会导致溢出,所以改用numpy的ulonglong类型
        cc_sum = np.ulonglong(0)  # 保存每个中心点的总和

        for h in range(height):  # 遍历每一个点
            for w in range(width):
                temporarily_point = Point(w, h, img[h][w])  # 取到目前点的坐标和值
                distance[h][w] = nearest_center(
                    temporarily_point,
                    cluster_centers[:i])[1]  # 计算该点离最近中心点的距离并储存
                cc_sum += distance[h][w]  # 存储总距离

        # print(distance)
        # print(cc_sum)

        # 化用公式(distance[i]/cc_sum),并用一个0到1之间的随机数遍历进行寻找
        random_cc_sum = cc_sum * random.random()  # 化用第一步,直接给总数乘以0到1的随机数
        # print("random_cc_sum:%s" % random_cc_sum)

        break_flag = False  # 跳出多层循环的标志
        for h2 in range(height):  # 再次遍历每一个点
            for w2 in range(width):
                random_cc_sum -= distance[h2][w2]  # 如果小于0说明在这个区间内
                if random_cc_sum > 0:
                    continue
                else:
                    cluster_centers[i] = Point(w2, h2,
                                               img[h2][w2])  # 获得其点存入质心数组
                    break_flag = True
                    break
            if break_flag:
                break

    return cluster_centers
Пример #11
0
 def part2(self):
     # I borrowed this really cool approach from github.com/alfiejfs
     self.fish_prop_map.clear()
     fish = dict()
     for i in range(7):
         fish[i] = np.sum(self.input == i)
     total = np.ulonglong(0)
     for i in range(7):
         total += fish[i] * self.fish_props(256 - i)
     return total
Пример #12
0
    def fish_props(self, duration):
        if duration in self.fish_prop_map:
            return self.fish_prop_map[duration]

        if duration <= 0:
            self.fish_prop_map[duration] = np.ulonglong(1)
            return self.fish_prop_map[duration]

        if duration not in self.fish_prop_map:
            self.fish_prop_map[duration] = (self.fish_props(duration - 7) +
                                            self.fish_props(duration - 9))

            return self.fish_prop_map[duration]
Пример #13
0
 def example_custom_metric_with_artifacts(given_df, given_metrics):
     return (
         {
             "example_count_times_1_point_5":
             given_metrics["example_count"] * 1.5,
             "sum_on_label_minus_5": given_metrics["sum_on_label"] - 5,
             "example_np_metric_1": np.float32(123.2),
             "example_np_metric_2": np.ulonglong(10000000),
         },
         {
             "pred_target_abs_diff":
             np.abs(given_df["prediction"] - given_df["target"]),
             "example_dictionary_artifact": {
                 "a": 1,
                 "b": 2
             },
         },
     )
Пример #14
0
def formfactor(mem, tn, irow, icol):

    gpixel = 16 * 1000 * 1000
    pnum = np.ulonglong(tn * irow * icol)

    mp = int(pnum / (mem * gpixel)) + 1

    for i in range(101):
        if (i * i >= mp):
            break

    if i >= 10000:

        return -1

    else:

        return i
Пример #15
0
    def _init(self, grid=None):
        """This class accepts grids with already filled, values.
        The provided grid is checked for errors. If grid is not
        provided an empty board will be filled."""

        self._filled = 0
        self._state_index = 0

        self._grid = grid

        if grid is None:
            self._grid = numpy.zeros((self.n**4), dtype=numpy.ulonglong)

            self._value_indices[0] = int(
                numpy.random.random_integers(0, self.n**4 - 1, 1))

        self._candidate_values[0, :] = numpy.ulonglong((2**64) - 1)
        self._candidate_nums[0, :] = numpy.uint(self.n**2)
        self._available[0, :] = numpy.short(1)
Пример #16
0
 def __init__(self, RandSeed=None, ncore=1):
     """
     PyTurbSim 'run' objects can be initialized with a specific
     random seed, `RandSeed`, and number of cores, `ncore`.
     """
     # Initialize the random number generator before doing anything else.
     if RandSeed is None:
         self.RandSeed = random.randint(-2147483647, 2147483647)
     else:
         self.RandSeed = RandSeed
     # Seeds for numpy must be positive, but original-TurbSim had
     # negative seeds.  In order to attempt to be consistent, we
     # use the values in the files but make them positive for the
     # numpy random generator.
     self.randgen = random.RandomState(ulonglong(self.RandSeed +
                                                 2147483648))
     self.ncore = ncore
     if dbg:
         self.timer = dbg.timer('Veers84')
Пример #17
0
    def test_scalar_to_int_coerce_does_not_cast(self, dtype):
        """
        Signed integers are currently different in that they do not cast other
        NumPy scalar, but instead use scalar.__int__(). The hardcoded
        exception to this rule is `np.array(scalar, dtype=integer)`.
        """
        dtype = np.dtype(dtype)
        invalid_int = np.ulonglong(-1)

        float_nan = np.float64(np.nan)

        for scalar in [float_nan, invalid_int]:
            # This is a special case using casting logic and thus not failing:
            coerced = np.array(scalar, dtype=dtype)
            cast = np.array(scalar).astype(dtype)
            assert_array_equal(coerced, cast)

            # However these fail:
            with pytest.raises((ValueError, OverflowError)):
                np.array([scalar], dtype=dtype)
            with pytest.raises((ValueError, OverflowError)):
                cast[()] = scalar
Пример #18
0
    def __init__(self):
        NT = namedtuple('NT', tuple('abc'))

        self.values = [
                np.longlong(-1), np.int_(-1), np.intc(-1), np.short(-1), np.byte(-1),
                np.ubyte(1), np.ushort(1), np.uintc(1), np.uint(1), np.ulonglong(1),
                np.half(1.0), np.single(1.0), np.float_(1.0), np.longfloat(1.0),
                np.csingle(1.0j), np.complex_(1.0j), np.clongfloat(1.0j),
                np.bool_(0), np.str_('1'), np.unicode_('1'), np.void(1),
                np.object(), np.datetime64('NaT'), np.timedelta64('NaT'), np.nan,
                12, 12.0, True, None, float('NaN'), object(), (1, 2, 3),
                NT(1, 2, 3), datetime.date(2020, 12, 31), datetime.timedelta(14),
        ]

        # Datetime & Timedelta
        for precision in ['ns', 'us', 'ms', 's', 'm', 'h', 'D', 'M', 'Y']:
            for kind, ctor in (('m', np.timedelta64), ('M', np.datetime64)):
                self.values.append(ctor(12, precision))

        for size in (1, 8, 16, 32, 64, 128, 256, 512):
            self.values.append(bytes(size))
            self.values.append('x' * size)
Пример #19
0
class TestNumpyJSONEncoder(unittest.TestCase):
    @parameterized.expand(
        [(numpy.bool_(1), True), (numpy.bool8(1), True), (numpy.byte(1), 1),
         (numpy.int8(1), 1), (numpy.ubyte(1), 1), (numpy.uint8(1), 1),
         (numpy.short(1), 1), (numpy.int16(1), 1), (numpy.ushort(1), 1),
         (numpy.uint16(1), 1), (numpy.intc(1), 1), (numpy.int32(1), 1),
         (numpy.uintc(1), 1), (numpy.uint32(1), 1), (numpy.int_(1), 1),
         (numpy.int32(1), 1), (numpy.uint(1), 1), (numpy.uint32(1), 1),
         (numpy.longlong(1), 1), (numpy.int64(1), 1), (numpy.ulonglong(1), 1),
         (numpy.uint64(1), 1), (numpy.half(1.0), 1.0),
         (numpy.float16(1.0), 1.0), (numpy.single(1.0), 1.0),
         (numpy.float32(1.0), 1.0), (numpy.double(1.0), 1.0),
         (numpy.float64(1.0), 1.0), (numpy.longdouble(1.0), 1.0)] + ([
             (numpy.float128(1.0), 1.0)  # unavailable on windows
         ] if hasattr(numpy, 'float128') else []))
    def test_numpy_primary_type_encode(self, np_val, py_val):
        self.assertEqual(json.dumps(py_val),
                         json.dumps(np_val, cls=NumpyEncoder))

    @parameterized.expand([
        (numpy.array([1, 2, 3], dtype=numpy.int), [1, 2, 3]),
        (numpy.array([[1], [2], [3]], dtype=numpy.double), [[1.0], [2.0],
                                                            [3.0]]),
        (numpy.zeros((2, 2), dtype=numpy.bool_), [[False, False],
                                                  [False, False]]),
        (numpy.array([('Rex', 9, 81.0), ('Fido', 3, 27.0)],
                     dtype=[('name', 'U10'), ('age', 'i4'),
                            ('weight', 'f4')]), [['Rex', 9, 81.0],
                                                 ['Fido', 3, 27.0]]),
        (numpy.rec.array([(1, 2., 'Hello'), (2, 3., "World")],
                         dtype=[('foo', 'i4'), ('bar', 'f4'),
                                ('baz', 'U10')]), [[1, 2.0, "Hello"],
                                                   [2, 3.0, "World"]])
    ])
    def test_numpy_array_encode(self, np_val, py_val):
        self.assertEqual(json.dumps(py_val),
                         json.dumps(np_val, cls=NumpyEncoder))
Пример #20
0
    def populateConversionTables(self):
        # `red_table[op][c_type]` maps to `charm_reducer_type`, where:
        #     - op is the identifier for internal reducer (SUM, PRODUCT, MAX or INT)
        #     - c_type is identifier for C type (C_CHAR, C_SHORT, etc)
        #     - charm_reducer_type is value for internal reducer type as they appear in CkReductionTypesExt
        self.red_table = [[]] * 7
        self.red_table[SUM]     = [0] * NUM_C_TYPES
        self.red_table[PRODUCT] = [0] * NUM_C_TYPES
        self.red_table[MAX]     = [0] * NUM_C_TYPES
        self.red_table[MIN]     = [0] * NUM_C_TYPES
        self.red_table[AND]     = [0] * NUM_C_TYPES
        self.red_table[OR]      = [0] * NUM_C_TYPES
        self.red_table[XOR]     = [0] * NUM_C_TYPES

        fields = self.charm.lib.getReductionTypesFields()  # get names of fields in CkReductionTypesExt
        maxFieldVal = max([getattr(self.charm.ReducerType, f) for f in fields])
        # charm_reducer_to_ctype maps the values in CkReductionTypesExt to C type identifier
        self.charm_reducer_to_ctype = [None] * (maxFieldVal + 1)
        for f in fields:
            if f == 'nop':
                continue
            elif f == 'external_py':
                op, c_type_str = None, 'char'
            elif f.startswith('logical'):
                op, c_type_str = f.split('_')[1:]
            else:
                op, c_type_str = f.split('_', 1)        # e.g. from 'sum_long' extracts 'sum' and 'long'
            ctype_code = c_typename_to_id[c_type_str]   # e.g. map 'long' to C_LONG
            f_val = getattr(self.charm.ReducerType, f)  # value of the field in CkReductionTypesExt
            # print(f, "ctype_code", ctype_code, "f_val=", f_val)
            self.charm_reducer_to_ctype[f_val] = ctype_code
            if   op == 'sum':     self.red_table[SUM][ctype_code] = f_val
            elif op == 'product': self.red_table[PRODUCT][ctype_code] = f_val
            elif op == 'max':     self.red_table[MAX][ctype_code] = f_val
            elif op == 'min':     self.red_table[MIN][ctype_code] = f_val
            elif op == 'and':     self.red_table[AND][ctype_code] = f_val
            elif op == 'or':      self.red_table[OR][ctype_code] = f_val
            elif op == 'xor':     self.red_table[XOR][ctype_code] = f_val

        # ------ numpy data types ------
        if haveNumpy:
            # map numpy data types to internal reduction C code identifier
            self.numpy_type_map = {'bool': C_BOOL, 'int8': C_CHAR, 'int16': C_SHORT,
                                   'int32': C_INT, 'int64': C_LONG, 'uint8': C_UCHAR,
                                   'uint16': C_USHORT, 'uint32': C_UINT, 'uint64': C_ULONG,
                                   #'float16': ?
                                   'float32': C_FLOAT, 'float64': C_DOUBLE}
            if np.dtype('int64').itemsize > self.charm.lib.sizeof(C_LONG):
                self.numpy_type_map['int64']  = C_LONG_LONG
                self.numpy_type_map['uint64'] = C_ULONG_LONG

            # verify that mapping is correct
            for dt, c_type in self.numpy_type_map.items():
                assert np.dtype(dt).itemsize == self.charm.lib.sizeof(c_type)

            self.rev_np_array_type_map = [None] * NUM_C_TYPES
            reverse_lookup = {v: k for k, v in self.numpy_type_map.items()}
            for c_type in range(NUM_C_TYPES):
                if c_type in reverse_lookup:
                    self.rev_np_array_type_map[c_type] = reverse_lookup[c_type]
            if self.rev_np_array_type_map[C_LONG] is None:
                self.rev_np_array_type_map[C_LONG] = np.int_().dtype.name
                self.rev_np_array_type_map[C_ULONG] = np.uint().dtype.name
                assert np.dtype('int_').itemsize == self.charm.lib.sizeof(C_LONG)
                assert np.dtype('uint').itemsize == self.charm.lib.sizeof(C_ULONG)
            if self.rev_np_array_type_map[C_LONG_LONG] is None:
                self.rev_np_array_type_map[C_LONG_LONG] = np.longlong().dtype.name
                self.rev_np_array_type_map[C_ULONG_LONG] = np.ulonglong().dtype.name
                assert np.dtype('longlong').itemsize == self.charm.lib.sizeof(C_LONG_LONG)
                assert np.dtype('ulonglong').itemsize == self.charm.lib.sizeof(C_ULONG_LONG)

        # ------ array.array data types ------

        # map array.array data types to internal reduction C code identifier
        self.array_type_map = {'b': C_CHAR, 'B': C_UCHAR, 'h': C_SHORT, 'H': C_USHORT,
                               'i': C_INT, 'I': C_UINT, 'l': C_LONG, 'L': C_ULONG,
                               'f': C_FLOAT, 'd': C_DOUBLE}
        if sys.version_info >= (3, 3, 0):
            self.array_type_map['q'] = C_LONG_LONG
            self.array_type_map['Q'] = C_ULONG_LONG

        # verify that mapping is correct
        for dt, c_type in self.array_type_map.items():
            assert array.array(dt).itemsize == self.charm.lib.sizeof(c_type)

        self.rev_array_type_map = ['b', 'b', 'h', 'i', 'l', 'q', 'B', 'H', 'I', 'L', 'Q', 'f', 'd']
        assert len(self.rev_array_type_map) == NUM_C_TYPES

        # ------ python data types ------

        # map python types to internal reduction C code identifier
        self.python_type_map = {float: C_DOUBLE, bool: C_BOOL}
        if self.charm.lib.sizeof(C_LONG) >= 8:
            self.python_type_map[int] = C_LONG
        else:
            self.python_type_map[int] = C_LONG_LONG
            assert self.charm.lib.sizeof(C_LONG_LONG) >= 8
        if haveNumpy:
            # this is a bit of a hack
            self.python_type_map[np.bool_] = C_BOOL
Пример #21
0
        galois.Poly.Random(2.0)
    with pytest.raises(TypeError):
        galois.Poly.Random(2, field=galois.FieldClass)
    with pytest.raises(ValueError):
        galois.Poly.Random(-1)
    with pytest.raises(ValueError):
        galois.Poly.Random(2, seed=-1)
    with pytest.raises(ValueError):
        galois.Poly.Random(2, seed=3.14)


@pytest.mark.parametrize("field", FIELDS)
@pytest.mark.parametrize("seed", [
    None, 42,
    np.int64(1337),
    np.ulonglong(27182818284),
    np.random.default_rng(123456)
])
def test_random(field, seed):
    p = galois.Poly.Random(2, field=field, seed=seed)
    assert isinstance(p, galois.Poly)
    assert p.field is field
    assert p.degree == 2


def test_integer_exceptions():
    with pytest.raises(TypeError):
        galois.Poly.Integer(5.0)
    with pytest.raises(TypeError):
        galois.Poly.Integer(5, field=galois.FieldClass)
    with pytest.raises(ValueError):
Пример #22
0
def apply_vert_weights(cdo, apply_fun, weight_file: str, file: str,
                       outfile: str, variables: list, options: str,
                       verbose: bool):
    if verbose:
        print("Interpolating vertically with weights: " + " ".join(variables))
    # Get weight
    weight_data = np.load(weight_file)
    # Get a temporary filepath
    split = os.path.split(file)
    temp_out_path = os.path.join(split[0], "temp_vert_" + split[1])
    # Open input file
    with netCDF4.Dataset(file, mode="r+") as in_file:
        # Get the dimension names used in the variables
        dims = in_file.variables[variables[0]].dimensions
        var_dims: list = [x if x != "depth" else "s_rho" for x in dims]
        # Get dimension lengths
        var_dim_lens = weight_data.shape[:-1]
        time_len = len(in_file.dimensions["time"])
        # Create destination array
        var_out_arr = np.require(np.zeros(tuple(var_dim_lens),
                                          dtype=np.float32),
                                 requirements=["C", "W", "O", "A"])
        arr_len = np.ulonglong(var_out_arr.size)

        with netCDF4.Dataset(temp_out_path, mode="w") as dest_file:
            # Create dimensions
            dest_file.createDimension("time", time_len)
            dest_file.createDimension("s_rho", var_dim_lens[0])
            dest_file.createDimension(var_dims[-2], var_dim_lens[1])
            dest_file.createDimension(var_dims[-1], var_dim_lens[2])
            for var in variables:
                # create variable and set attributes
                new_var: netCDF4.Variable = dest_file.createVariable(
                    var, 'f', tuple(var_dims))
                var_obj = in_file.variables[var]
                var_attrs = {
                    x: str(var_obj.getncattr(x))
                    for x in var_obj.ncattrs() if x != "_FillValue"
                }
                new_var.setncatts(var_attrs)
                # Time step wise interpolation
                if verbose:
                    print("Interpolating " + var +
                          " vertically with external C routine timestep wise.")
                for i in range(time_len):
                    var_data = np.ascontiguousarray(var_obj[i],
                                                    dtype=np.float32)
                    apply_fun(weight_data, var_data, var_out_arr, arr_len)
                    new_var[i] = np.require(var_out_arr, dtype=np.float32)
                if verbose:
                    print("Finished interpolating " + var +
                          " vertically with external C routine.")
                in_file.renameVariable(var, "tmp_" + var)
    # Merge into dataset
    # doesn not work CDO limitations! cdo.replace(input=file + " " + temp_out_path, output=outfile, options=options)
    temp_merge = cdo.merge(input=file + " " + temp_out_path, options=options)
    if outfile is not None:
        cdo.delname(",".join(["tmp_" + var for var in variables]),
                    input=temp_merge,
                    output=outfile,
                    options=options)
    else:
        outfile = cdo.delname(",".join(["tmp_" + var for var in variables]),
                              input=temp_merge,
                              options=options)
    os.remove(temp_out_path)
    # dest_dir = os.path.split(outfile)
    # temp_file = os.path.join(dest_dir[0], "temp_file.nc")
    # cdo.copy(input=temp_out_path, output = temp_file, options = options)
    return outfile
Пример #23
0
        (np.bytes_("foo"), b"foo"),
        (np.float16(3.14), 3.14),
        (np.float32(3.14159), 3.14159),
        (np.float64(3.14159), 3.14159),
        # Evidently float128 is not available on Windows:
        (getattr(np, "float128", np.float64)(3.14159), 3.14159),
        (np.int8(42), 42),
        (np.int16(42), 42),
        (np.int32(42), 42),
        (np.int64(42), 42),
        (np.longlong(42), 42),
        (np.uint8(42), 42),
        (np.uint16(42), 42),
        (np.uint32(42), 42),
        (np.uint64(42), 42),
        (np.ulonglong(42), 42),
    ])
def test_numpy_scalar(numpy_value, expected_value):
    ctx = asdf.AsdfFile()
    tree = {"value": numpy_value}
    buffer = io.BytesIO()

    yamlutil.dump_tree(tree, buffer, ctx)
    buffer.seek(0)

    if isinstance(expected_value, float) and NUMPY_LT_1_14:
        assert yamlutil.load_tree(buffer)["value"] == pytest.approx(
            expected_value, rel=0.001)
    else:
        assert yamlutil.load_tree(buffer)["value"] == expected_value
Пример #24
0
reveal_type(np.byte())  # E: {byte}
reveal_type(np.short())  # E: {short}
reveal_type(np.intc())  # E: {intc}
reveal_type(np.intp())  # E: {intp}
reveal_type(np.int0())  # E: {intp}
reveal_type(np.int_())  # E: {int_}
reveal_type(np.longlong())  # E: {longlong}

reveal_type(np.ubyte())  # E: {ubyte}
reveal_type(np.ushort())  # E: {ushort}
reveal_type(np.uintc())  # E: {uintc}
reveal_type(np.uintp())  # E: {uintp}
reveal_type(np.uint0())  # E: {uintp}
reveal_type(np.uint())  # E: {uint}
reveal_type(np.ulonglong())  # E: {ulonglong}

reveal_type(np.half())  # E: {half}
reveal_type(np.single())  # E: {single}
reveal_type(np.double())  # E: {double}
reveal_type(np.float_())  # E: {double}
reveal_type(np.longdouble())  # E: {longdouble}
reveal_type(np.longfloat())  # E: {longdouble}

reveal_type(np.csingle())  # E: {csingle}
reveal_type(np.singlecomplex())  # E: {csingle}
reveal_type(np.cdouble())  # E: {cdouble}
reveal_type(np.complex_())  # E: {cdouble}
reveal_type(np.cfloat())  # E: {cdouble}
reveal_type(np.clongdouble())  # E: {clongdouble}
reveal_type(np.clongfloat())  # E: {clongdouble}
Пример #25
0
pmc_to_ulonglong = _PMCIntegers.pmc_to_ulonglong

def ulonglong_to_pmc(*args):
  return _PMCIntegers.ulonglong_to_pmc(*args)
ulonglong_to_pmc = _PMCIntegers.ulonglong_to_pmc
try:
    import numpy

    RegisterPy2PMC(
        is_py = lambda x: type(x) is numpy.ulonglong,
        py2pmc = lambda x: ulonglong_to_pmc(long(x)),
    )

    RegisterPMC2Py(
        is_pmc = pmc_is_ulonglong,
        pmc2py = lambda x: numpy.ulonglong(pmc_to_ulonglong(x)),
    )
except ImportError: pass

import ctypes

RegisterPy2PMC(
    is_py = lambda x: type(x) is ctypes.c_ulonglong,
    py2pmc = lambda x: ulonglong_to_pmc(x.value),)

RegisterPMC2Py(
    is_pmc = pmc_is_ulonglong,
    pmc2py = lambda x: ctypes.c_ulonglong(pmc_to_ulonglong(x)),
)

def filter_(pd, vd, w, h, im, values, accurate):
    # Initialize all parameters
    n = w*h
    blurVariance = 0 if accurate else 0.5

    # scaleFactor = (pd + 1) * np.sqrt(1./6 + blurVariance)
    scaleFactor = np.empty((pd,), dtype=np.float32)
    for i in range(pd):
        scaleFactor[i] = (pd+1)*np.sqrt((1.0/6 + blurVariance)/((i+1)*(i+2)))
    # scalef_gpu = cuda.mem_alloc(scaleFactor.nbytes)
    # cuda.memcpy_htod(scalef_gpu, scaleFactor) # scaleFactor.hostToDevice()
    scalef_gpu = gpuarray.to_gpu(scaleFactor)

    values = np.float32(im).ravel() # shape (n,vd) -> (n*vd,)
    # vals_gpu = cuda.mem_alloc(values.nbytes)
    # cuda.memcpy_htod(vals_gpu, values)
    vals_gpu = gpuarray.to_gpu(values)

    positions = np.float32(np.where(~np.isnan(values))).T.ravel() # shape (n,pd) -> (n*pd,)
    # pos_gpu = cuda.mem_alloc(positions.nbytes)
    # cuda.memcpy_htod(pos_gpu, positions)
    pos_gpu = gpuarray.to_gpu(positions)

    # matrixStruct = GPUStruct([(np.int32, 'index', 0),
    #                           (np.float32,'weight', 0.)])

    # allocate matrix structs on the gpu
    matrix_structs = map(lambda x: GPUStruct([(np.int32, 'index',0),
                                              (np.float32,'weight', 0.)]),
                         range(n*(pd+1)))
    map(lambda x: x.copy_to_gpu(), matrix_structs)

    # get pointer adresses of the structs
    struct_ptrs = np.asarray(map(lambda x: x.get_ptr(), matrix_structs),
                             dtype=np.intp)

    # allocate array for the matrix structs
    matrix_structs_gpu = gpuarray.to_gpu(struct_ptrs)

    # TODO need to sent the following instructions to the device
    # // Populate constant memory for hash helpers
    __host_two32 = np.ulonglong(1) << np.ulonglong(32) # unsigned long long int

    __host_div_c = [2*(n*(pd+1))]
    __host_div_c = np.uint32(__host_div_c)
    __host_div_l = [np.ceil(np.log(np.float32(__host_div_c) / np.log(2.0)))]
    __host_div_l = np.uint32(__host_div_l)

    __host_div_m = (__host_two32<<__host_div_l)/__host_div_c - __host_two32 + 1

    # __div_c = cuda.mem_alloc(__host_div_c.nbytes)
    # __div_l = cuda.mem_alloc(__host_div_l.nbytes)
    # __div_m = cuda.mem_alloc(__host_div_m.nbytes)
    #
    # cuda.memcpy_htod(__div_c, __host_div_c)
    # cuda.memcpy_htod(__div_l, __host_div_l)
    # cuda.memcpy_htod(__div_m, __host_div_m)

    # CUDA_SAFE_CALL(cudaMemcpyToSymbol((char*)&__div_c, &__host_div_c, sizeof(unsigned int)));
    # CUDA_SAFE_CALL(cudaMemcpyToSymbol((char*)&__div_l, &__host_div_l, sizeof(unsigned int)));
    # CUDA_SAFE_CALL(cudaMemcpyToSymbol((char*)&__div_m, &__host_div_m, sizeof(unsigned int)));

    # // Populate constant memory with hash of offset vectors
    hOffset_host = np.empty(pd+1, dtype=np.uint) # usigned int
    hOffset_host[:-1] = np.ones(pd, dtype=np.uint)
    offset = np.empty(pd+1, dtype=np.short) # signed short

    def hash(kd, key):
        k = 0
        for i in range(kd):
            k += key[i]
            k *= 2531011
        return k

    offset -= pd+1
    for i in range(pd+1):
        hOffset_host[i] = hash(pd, offset) # TODO get hash working
    offset += pd+1

    # hOffset = cuda.mem_alloc(hOffset_host.nbytes)
    # cuda.memcpy_htod(hOffset, hOffset_host)

    # CUDA_SAFE_CALL(cudaMemcpyToSymbol((char*)&hOffset, &hOffset_host, sizeof(unsigned int)*({{ pd }}+1)));

    cuda_dir = '/home/adrian/code/pydensecrf/densecrf/external/permutohedral_cuda'
    cuda_file = os.path.join(cuda_dir, 'permutohedral_pycuda.cu')
    with open(cuda_file) as f:
        f_txt = f.read()

    tpl = Template(f_txt)
    rendered_tpl = tpl.render(pd=pd, vd=vd)

    # cubin_file = compile(rendered_tpl, no_extern_c=True,
    #                      include_dirs=[cuda_dir])
    # print [txt for txt in cubin_file.split('\x00') if 'text' in txt]

    mod = SourceModule(rendered_tpl, no_extern_c=True, include_dirs=[cuda_dir])

    # createHashTable({{ pd }}, {{ vd }}+1, n*({{ pd }}+1));
    def createHashTable(kd, vd, capacity):
        table_capacity_gpu, _ = mod.get_global('table_capacity')
        cuda.memcpy_htod(table_capacity_gpu, np.uint([capacity]))

        # CUDA_SAFE_CALL(cudaMemcpyToSymbol(table_capacity,
        #           &capacity,
        #           sizeof(unsigned int)));

        table_vals_gpu, table_vals_size = mod.get_global('table_values') # pointer-2-pointer
        values_gpu = gpuarray.zeros((capacity*vd,1), dtype=np.float32)
        # values_gpu = gpuarray.zeros((capacity*vd,1), dtype=np.float32)
        # cuda.memset_d32(values_gpu.gpudata, 0, values_gpu.size)
        cuda.memcpy_dtod(table_vals_gpu, values_gpu.gpudata, table_vals_size)

        # float *values;
        # allocateCudaMemory((void**)&values, capacity*vd*sizeof(float));
        # CUDA_SAFE_CALL(cudaMemset((void *)values, 0, capacity*vd*sizeof(float)));
        # CUDA_SAFE_CALL(cudaMemcpyToSymbol(table_values,
        #                   &values,
        #                   sizeof(float *)));

        table_entries, table_entries_size = mod.get_global('table_entries')
        entries_gpu = gpuarray.empty((capacity*2,1), dtype=np.int)
        entries_gpu.fill(-1)
        # cuda.memset_d32(entries_gpu.gpudata, 1, entries_gpu.size)
        cuda.memcpy_dtod(table_entries, entries_gpu.gpudata, table_entries_size)

        # int *entries;
        # allocateCudaMemory((void **)&entries, capacity*2*sizeof(int));
        # CUDA_SAFE_CALL(cudaMemset((void *)entries, -1, capacity*2*sizeof(int)));
        # CUDA_SAFE_CALL(cudaMemcpyToSymbol(table_entries,
        #                   &entries,
        #                   sizeof(unsigned int *)));

        ########################################
        # Assuming LINEAR_D_MEMORY not defined #
        ########################################

        #  #ifdef LINEAR_D_MEMORY
        # char *ranks;
        # allocateCudaMemory((void**)&ranks, capacity*sizeof(char));
        # CUDA_SAFE_CALL(cudaMemcpyToSymbol(table_rank,
        #                   &ranks,
        #                   sizeof(char *)));
        #
        # signed short *zeros;
        # allocateCudaMemory((void**)&zeros, capacity*sizeof(signed short));
        # CUDA_SAFE_CALL(cudaMemcpyToSymbol(table_zeros,
        #                   &zeros,
        #                   sizeof(char *)));
        #
        # #else

        table_keys_gpu, table_keys_size = mod.get_global('table_keys')
        keys_gpu = gpuarray.zeros((capacity*kd,1), dtype=np.short)
        # keys_gpu = gpuarray.empty((capacity*kd,1), dtype=np.short)
        # cuda.memset_d32(keys_gpu.gpudata, 0, keys_gpu.size)
        cuda.memcpy_dtod(table_keys_gpu, keys_gpu.gpudata, table_keys_size)

        # signed short *keys;
        # allocateCudaMemory((void **)&keys, capacity*kd*sizeof(signed short));
        # CUDA_SAFE_CALL(cudaMemset((void *)keys, 0, capacity*kd*sizeof(signed short)));
        # CUDA_SAFE_CALL(cudaMemcpyToSymbol(table_keys,
        #                   &keys,

    createHashTable(pd, vd + 1, n * ( pd + 1))

    t = np.zeros(5)

    # get pointers of the variables on the device and assign them values
    __div_c, _ = mod.get_global('__div_c')
    cuda.memcpy_htod(__div_c, __host_div_c)
    __div_l, _ = mod.get_global('__div_l')
    cuda.memcpy_htod(__div_l, __host_div_l)
    __div_m, _ = mod.get_global('__div_m')
    cuda.memcpy_htod(__div_m, __host_div_m)

    hOffset, _ = mod.get_global('hOffset')
    cuda.memcpy_htod(hOffset, hOffset_host)

    #########################
    # create lattice matrix #
    #########################

    BLOCK_SIZE = (8, 8, 1)
    GRID_SIZE = ((w-1)/8+1, (h-1)/8+1, 1)

    create_mat_fn_name = "_Z12createMatrixiiPKfS0_S0_P11MatrixEntry"
    pycuda_create_mat_fn = mod.get_function(create_mat_fn_name)

    # createMatrix<<<blocks, blockSize>>>(w, h, positions.device,
    #                                     values.device,
    #                                     scaleFactor.device,
    #                                     matrix.device);

    t[0] = time()
    pycuda_create_mat_fn(np.int32(w), np.int32(h), pos_gpu, vals_gpu, scalef_gpu,
                         matrix_structs_gpu.gpudata,
                         block=BLOCK_SIZE, grid=GRID_SIZE)
                         # matrixStruct.get_ptr(), block=BLOCK_SIZE, grid=GRID_SIZE)
    t[0] = time() - t[0]

    ####################################
    # fix duplicate hash table entries #
    ####################################

    CLEAN_BLOCK_SIZE = (32, 1, 1)
    CLEAN_GRID_SIZE = ((n-1)/CLEAN_BLOCK_SIZE[0]+1, 2*(pd+1), 1)

    clean_hash_fn_name = "_Z14cleanHashTableiiP11MatrixEntry"
    pycuda_clean_hash_fn = mod.get_function(clean_hash_fn_name)

    # cleanHashTable<<<cleanBlocks, cleanBlockSize>>>({{ pd }}, 2*n*({{ pd }}+1),
    #                                                 matrix.device);
    # CUT_CHECK_ERROR("clean failed\n");

    t[1] = time()
    pycuda_clean_hash_fn(np.int32(pd), np.int32(2*n*(pd+1)),
                         matrix_structs_gpu.gpudata, # matrixStruct.get_ptr(),
                         block=CLEAN_BLOCK_SIZE, grid=CLEAN_GRID_SIZE)
    t[1] = time() - t[1]

    #########################
    # splat splits by color #
    #########################
    # ... need to extend the y coordinate to our blocks to represent that

    GRID_SIZE = (GRID_SIZE[0], GRID_SIZE[1] * ( pd + 1 )) # blocks.y *= pd+1;

    splat_cache_fn_name = "_Z10splatCacheiiPfP11MatrixEntry"
    pycuda_clean_hash_fn = mod.get_function(splat_cache_fn_name)

    # splatCache<<<blocks, blockSize>>>(w, h, values.device, matrix.device);
    # CUT_CHECK_ERROR("splat failed\n");

    t[2] = time()
    pycuda_clean_hash_fn(np.int32(w), np.int32(h), vals_gpu,
                         matrix_structs_gpu.gpudata, # matrixStruct.get_ptr(),
                         block=BLOCK_SIZE, grid=GRID_SIZE)
    t[2] = time() - t[2]


    if accurate:
        new_vals_size = n*(pd+1)*(vd+1)
        new_vals_gpu = gpuarray.zeros((new_vals_size,1), dtype=np.float32)
        # new_vals_gpu = gpuarray.empty((new_vals_size,1), dtype=np.float32)
        # cuda.memset_d32(new_vals_gpu.gpudata, 0, new_vals_gpu.size)

        # float *newValues;
        # allocateCudaMemory((void**)&(newValues), n*({{ pd }}+1)*({{ vd }}+1)*sizeof(float));
        # CUDA_SAFE_CALL(cudaMemset((void *)newValues, 0, n*({{ pd }}+1)*({{ vd }}+1)*sizeof(float)));

        ########
        # blur #
        ########

        blur_fn_name = "_Z4bluriPfiP11MatrixEntry"
        pycuda_blur_fn = mod.get_function(blur_fn_name)

        def swapHashTableValues(new_vals):
            table_vals, table_vals_size = mod.get_global('table_values') # (device_ptr, size_in_bytes)
            old_vals_gpu = cuda.mem_alloc(table_vals_size)
            # old_vals_gpu = gpuarray.empty((table_vals_size,1), )
            cuda.memcpy_dtod(old_vals_gpu, table_vals, table_vals_size)
            cuda.memcpy_dtod(table_vals, new_vals.gpudata, table_vals_size)
            return old_vals_gpu

        t[3] = time()
        for color in range(pd+1):
            pycuda_blur_fn(np.int32(n*(pd+1)), new_vals_gpu, np.int32(color),
                           matrix_structs_gpu.gpudata, # matrixStruct.get_ptr(),
                           block=CLEAN_BLOCK_SIZE, grid=CLEAN_GRID_SIZE)
            # TODO: newValues = swapHashTableValues(newValues);
            print color
            new_vals_gpu.gpudata = swapHashTableValues(new_vals_gpu)

            # blur<<<cleanBlocks, cleanBlockSize>>>(n*({{ pd }}+1), newValues, matrix.device, color);
            # CUT_CHECK_ERROR("blur failed\n");
        t[3] = time() - t[3]

    #########
    # slice #
    #########

    GRID_SIZE = (GRID_SIZE[0], GRID_SIZE[1] / ( pd + 1 ))

    slice_fn_name = "_Z4bluriPfP11MatrixEntryi"
    pycuda_slice_fn = mod.get_function(slice_fn_name)

    t[4] = time()
    pycuda_slice_fn(np.int32(w), np.int32(h), vals_gpu, matrix_structs_gpu.gpudata, # matrixStruct.get_ptr(),
                    block=BLOCK_SIZE, grid=GRID_SIZE)
    t[4] = time() - t[4]

    # slice<<<blocks, blockSize>>>(w, h, values.device, matrix.device);
    # CUT_CHECK_ERROR("slice failed\n");


    total_t = np.sum(t)
    print "Total time: {:3.3f} ms\n".format(total_t)
    # TODO: command (unsigned int) gpu_mem = GPU_MEMORY_ALLOCATION
    # print "Total GPU memory usage: %u bytes\n".format(gpu_mem)

    # cuda.memcpy_dtoh(values, vals_gpu) # values.deviceToHost();
    values = vals_gpu.get()

    def destroyHashTable():
        # assuming LINEAR_D_MEMORY not defined
        table_keys, _ = mod.get_global('table_keys')
        table_keys.free()

        table_vals, _ = mod.get_global('table_values')
        table_vals.free()

        table_ents, _ = mod.get_global('table_entries')
        table_ents.free()
    destroyHashTable() # TODO: command destroyHashTable();

    # Python deinitialises objects as soon as the reference count for them
    # becomes zero. If you need to do it explicitly, I think just "del
    # gpuarray_obj" will be enough.

    return values
Пример #27
0
def ulonglong_to_pmc(*args):
    return _PMCIntegers.ulonglong_to_pmc(*args)


ulonglong_to_pmc = _PMCIntegers.ulonglong_to_pmc
try:
    import numpy

    RegisterPy2PMC(
        is_py=lambda x: type(x) is numpy.ulonglong,
        py2pmc=lambda x: ulonglong_to_pmc(long(x)),
    )

    RegisterPMC2Py(
        is_pmc=pmc_is_ulonglong,
        pmc2py=lambda x: numpy.ulonglong(pmc_to_ulonglong(x)),
    )
except ImportError:
    pass

import ctypes

RegisterPy2PMC(
    is_py=lambda x: type(x) is ctypes.c_ulonglong,
    py2pmc=lambda x: ulonglong_to_pmc(x.value),
)

RegisterPMC2Py(
    is_pmc=pmc_is_ulonglong,
    pmc2py=lambda x: ctypes.c_ulonglong(pmc_to_ulonglong(x)),
)
# importar libreria de soporte para vectores y matrices
import numpy as np

# Tipos de datos primitivos (se convierte el valor al tipo especificado)
np.bool(valor)			# booleano almacenado como un byte (verdadero o falso)
np.byte(valor)			# entero con signo almacenado como un byte (definido por la plataforma)
np.ubyte(valor)			# entero sin signo almacenado como un byte (definido por la plataforma)
np.short(valor)			# entero corto con signo (definido por la plataforma)
np.ushort(valor)		# entero corto sin signo (definido por la plataforma)
np.intc(valor)			# entero medio con signo (definido por la plataforma)
np.uintc(valor)			# entero medio sin signo (definido por la plataforma)
np.int_(valor)			# entero largo con signo (definido por la plataforma)
np.uint(valor)			# entero largo sin signo (definido por la plataforma)
np.longlong(valor)		# entero largo largo con signo (definido por la plataforma)
np.ulonglong(valor)		# entero largo largo sin signo (definido por la plataforma)
np.half(valor) 			# Flotante de precisión media (signo de 1 bit, exponente de 5 bits, mantisa de 10 bits)
np.float16(valor)	 	# Flotante de precisión media (signo de 1 bit, exponente de 5 bits, mantisa de 10 bits)
np.single(valor)		# Flotante de precisión simple (signo de 1 bit, exponente de 8 bits, mantisa de 23 bits)
np.double(valor)		# Flotante de precisión doble (signo de 1 bit, exponente de 11 bits, mantisa de 52 bits)
np.longdouble(valor)	# Flotante de precisión extendida (definido por la plataforma)
np.csingle(valor)		# Número complejo representado por dos flotantes de precisión simple (componente real e imaginario)
np.cdouble(valor)		# Número complejo representado por dos flotantes de precisión doble (componente real e imaginario)
np.clongdouble(valor)	# Número complejo representado por dos flotantes de precisión extendida (componente real e imaginario)

# Tipos de datos con Alias de tamaño (se convierte el valor al tipo especificado)
np.int8(valor)				# entero de 1 byte con signo (-128 a 127)
np.uint8(valor)				# entero de 1 byte sin signo (0 a 255)
np.int16(valor)				# entero de 2 bytes con signo (-32768 a 32767)
np.uint16(valor)			# entero de 2 bytes sin signo (0 a 65535)
np.int32(valor)				# entero de 4 bytes con signo (-2147483648 a 2147483647)
np.uint32(valor)			# entero de 4 bytes sin signo (0 a 4294967295)
Пример #29
0
        (np.str_("foo"), {"string"}),
        (np.bytes_("foo"), set()),
        (np.float16(3.14), {"number"}),
        (np.float32(3.14159), {"number"}),
        (np.float64(3.14159), {"number"}),
        # Evidently float128 is not available on Windows:
        (getattr(np, "float128", np.float64)(3.14159), {"number"}),
        (np.int8(42), {"number", "integer"}),
        (np.int16(42), {"number", "integer"}),
        (np.int32(42), {"number", "integer"}),
        (np.longlong(42), {"number", "integer"}),
        (np.uint8(42), {"number", "integer"}),
        (np.uint16(42), {"number", "integer"}),
        (np.uint32(42), {"number", "integer"}),
        (np.uint64(42), {"number", "integer"}),
        (np.ulonglong(42), {"number", "integer"}),
    ])
def test_numpy_scalar_type_validation(numpy_value, valid_types):
    def _assert_validation(jsonschema_type, expected_valid):
        validator = schema.get_validator()
        try:
            validator.validate(numpy_value, _schema={"type": jsonschema_type})
        except ValidationError:
            valid = False
        else:
            valid = True

        if valid is not expected_valid:
            if expected_valid:
                description = "valid"
            else:
Пример #30
0
class TestScalarDiscovery:
    def test_void_special_case(self):
        # Void dtypes with structures discover tuples as elements
        arr = np.array((1, 2, 3), dtype="i,i,i")
        assert arr.shape == ()
        arr = np.array([(1, 2, 3)], dtype="i,i,i")
        assert arr.shape == (1, )

    def test_char_special_case(self):
        arr = np.array("string", dtype="c")
        assert arr.shape == (6, )
        assert arr.dtype.char == "c"
        arr = np.array(["string"], dtype="c")
        assert arr.shape == (1, 6)
        assert arr.dtype.char == "c"

    def test_char_special_case_deep(self):
        # Check that the character special case errors correctly if the
        # array is too deep:
        nested = ["string"]  # 2 dimensions (due to string being sequence)
        for i in range(np.MAXDIMS - 2):
            nested = [nested]

        arr = np.array(nested, dtype='c')
        assert arr.shape == (1, ) * (np.MAXDIMS - 1) + (6, )
        with pytest.raises(ValueError):
            np.array([nested], dtype="c")

    def test_unknown_object(self):
        arr = np.array(object())
        assert arr.shape == ()
        assert arr.dtype == np.dtype("O")

    @pytest.mark.parametrize("scalar", scalar_instances())
    def test_scalar(self, scalar):
        arr = np.array(scalar)
        assert arr.shape == ()
        assert arr.dtype == scalar.dtype

        arr = np.array([[scalar, scalar]])
        assert arr.shape == (1, 2)
        assert arr.dtype == scalar.dtype

    # Additionally to string this test also runs into a corner case
    # with datetime promotion (the difference is the promotion order).
    @pytest.mark.filterwarnings("ignore:Promotion of numbers:FutureWarning")
    def test_scalar_promotion(self):
        for sc1, sc2 in product(scalar_instances(), scalar_instances()):
            sc1, sc2 = sc1.values[0], sc2.values[0]
            # test all combinations:
            try:
                arr = np.array([sc1, sc2])
            except (TypeError, ValueError):
                # The promotion between two times can fail
                # XFAIL (ValueError): Some object casts are currently undefined
                continue
            assert arr.shape == (2, )
            try:
                dt1, dt2 = sc1.dtype, sc2.dtype
                expected_dtype = np.promote_types(dt1, dt2)
                assert arr.dtype == expected_dtype
            except TypeError as e:
                # Will currently always go to object dtype
                assert arr.dtype == np.dtype("O")

    @pytest.mark.parametrize("scalar", scalar_instances())
    def test_scalar_coercion(self, scalar):
        # This tests various scalar coercion paths, mainly for the numerical
        # types.  It includes some paths not directly related to `np.array`
        if isinstance(scalar, np.inexact):
            # Ensure we have a full-precision number if available
            scalar = type(scalar)((scalar * 2)**0.5)

        if type(scalar) is rational:
            # Rational generally fails due to a missing cast. In the future
            # object casts should automatically be defined based on `setitem`.
            pytest.xfail("Rational to object cast is undefined currently.")

        # Use casting from object:
        arr = np.array(scalar, dtype=object).astype(scalar.dtype)

        # Test various ways to create an array containing this scalar:
        arr1 = np.array(scalar).reshape(1)
        arr2 = np.array([scalar])
        arr3 = np.empty(1, dtype=scalar.dtype)
        arr3[0] = scalar
        arr4 = np.empty(1, dtype=scalar.dtype)
        arr4[:] = [scalar]
        # All of these methods should yield the same results
        assert_array_equal(arr, arr1)
        assert_array_equal(arr, arr2)
        assert_array_equal(arr, arr3)
        assert_array_equal(arr, arr4)

    @pytest.mark.xfail(IS_PYPY, reason="`int(np.complex128(3))` fails on PyPy")
    @pytest.mark.filterwarnings("ignore::numpy.ComplexWarning")
    @pytest.mark.parametrize("cast_to", scalar_instances())
    def test_scalar_coercion_same_as_cast_and_assignment(self, cast_to):
        """
        Test that in most cases:
           * `np.array(scalar, dtype=dtype)`
           * `np.empty((), dtype=dtype)[()] = scalar`
           * `np.array(scalar).astype(dtype)`
        should behave the same.  The only exceptions are paramteric dtypes
        (mainly datetime/timedelta without unit) and void without fields.
        """
        dtype = cast_to.dtype  # use to parametrize only the target dtype

        for scalar in scalar_instances(times=False):
            scalar = scalar.values[0]

            if dtype.type == np.void:
                if scalar.dtype.fields is not None and dtype.fields is None:
                    # Here, coercion to "V6" works, but the cast fails.
                    # Since the types are identical, SETITEM takes care of
                    # this, but has different rules than the cast.
                    with pytest.raises(TypeError):
                        np.array(scalar).astype(dtype)
                    np.array(scalar, dtype=dtype)
                    np.array([scalar], dtype=dtype)
                    continue

            # The main test, we first try to use casting and if it succeeds
            # continue below testing that things are the same, otherwise
            # test that the alternative paths at least also fail.
            try:
                cast = np.array(scalar).astype(dtype)
            except (TypeError, ValueError, RuntimeError):
                # coercion should also raise (error type may change)
                with pytest.raises(Exception):
                    np.array(scalar, dtype=dtype)

                if (isinstance(scalar, rational)
                        and np.issubdtype(dtype, np.signedinteger)):
                    return

                with pytest.raises(Exception):
                    np.array([scalar], dtype=dtype)
                # assignment should also raise
                res = np.zeros((), dtype=dtype)
                with pytest.raises(Exception):
                    res[()] = scalar

                return

            # Non error path:
            arr = np.array(scalar, dtype=dtype)
            assert_array_equal(arr, cast)
            # assignment behaves the same
            ass = np.zeros((), dtype=dtype)
            ass[()] = scalar
            assert_array_equal(ass, cast)

    @pytest.mark.parametrize("pyscalar", [10, 10.32, 10.14j, 10**100])
    def test_pyscalar_subclasses(self, pyscalar):
        """NumPy arrays are read/write which means that anything but invariant
        behaviour is on thin ice.  However, we currently are happy to discover
        subclasses of Python float, int, complex the same as the base classes.
        This should potentially be deprecated.
        """
        class MyScalar(type(pyscalar)):
            pass

        res = np.array(MyScalar(pyscalar))
        expected = np.array(pyscalar)
        assert_array_equal(res, expected)

    @pytest.mark.parametrize("dtype_char", np.typecodes["All"])
    def test_default_dtype_instance(self, dtype_char):
        if dtype_char in "SU":
            dtype = np.dtype(dtype_char + "1")
        elif dtype_char == "V":
            # Legacy behaviour was to use V8. The reason was float64 being the
            # default dtype and that having 8 bytes.
            dtype = np.dtype("V8")
        else:
            dtype = np.dtype(dtype_char)

        discovered_dtype, _ = _discover_array_parameters([], type(dtype))

        assert discovered_dtype == dtype
        assert discovered_dtype.itemsize == dtype.itemsize

    @pytest.mark.parametrize("dtype", np.typecodes["Integer"])
    @pytest.mark.parametrize(["scalar", "error"],
                             [(np.float64(np.nan), ValueError),
                              (np.ulonglong(-1), OverflowError)])
    def test_scalar_to_int_coerce_does_not_cast(self, dtype, scalar, error):
        """
        Signed integers are currently different in that they do not cast other
        NumPy scalar, but instead use scalar.__int__(). The hardcoded
        exception to this rule is `np.array(scalar, dtype=integer)`.
        """
        dtype = np.dtype(dtype)

        # This is a special case using casting logic.  It warns for the NaN
        # but allows the cast (giving undefined behaviour).
        with np.errstate(invalid="ignore"):
            coerced = np.array(scalar, dtype=dtype)
            cast = np.array(scalar).astype(dtype)
        assert_array_equal(coerced, cast)

        # However these fail:
        with pytest.raises(error):
            np.array([scalar], dtype=dtype)
        with pytest.raises(error):
            cast[()] = scalar
Пример #31
0
def calculate_psnr(img_A, img_B):
    n, m, _= img_A.shape
    MSE = np.ulonglong(np.sum((1/(m*n*3))*np.square(img_A-img_B))) 
    return 10*np.log10(255*255/MSE)
Пример #32
0
np.byte()
np.short()
np.intc()
np.intp()
np.int0()
np.int_()
np.longlong()

np.ubyte()
np.ushort()
np.uintc()
np.uintp()
np.uint0()
np.uint()
np.ulonglong()

np.half()
np.single()
np.double()
np.float_()
np.longdouble()
np.longfloat()

np.csingle()
np.singlecomplex()
np.cdouble()
np.complex_()
np.cfloat()
np.clongdouble()
np.clongfloat()
Пример #33
0
reveal_type(np.byte())  # E: numpy.signedinteger[numpy.typing._
reveal_type(np.short())  # E: numpy.signedinteger[numpy.typing._
reveal_type(np.intc())  # E: numpy.signedinteger[numpy.typing._
reveal_type(np.intp())  # E: numpy.signedinteger[numpy.typing._
reveal_type(np.int0())  # E: numpy.signedinteger[numpy.typing._
reveal_type(np.int_())  # E: numpy.signedinteger[numpy.typing._
reveal_type(np.longlong())  # E: numpy.signedinteger[numpy.typing._

reveal_type(np.ubyte())  # E: numpy.unsignedinteger[numpy.typing._
reveal_type(np.ushort())  # E: numpy.unsignedinteger[numpy.typing._
reveal_type(np.uintc())  # E: numpy.unsignedinteger[numpy.typing._
reveal_type(np.uintp())  # E: numpy.unsignedinteger[numpy.typing._
reveal_type(np.uint0())  # E: numpy.unsignedinteger[numpy.typing._
reveal_type(np.uint())  # E: numpy.unsignedinteger[numpy.typing._
reveal_type(np.ulonglong())  # E: numpy.unsignedinteger[numpy.typing._

reveal_type(np.half())  # E: numpy.floating[numpy.typing._
reveal_type(np.single())  # E: numpy.floating[numpy.typing._
reveal_type(np.double())  # E: numpy.floating[numpy.typing._
reveal_type(np.float_())  # E: numpy.floating[numpy.typing._
reveal_type(np.longdouble())  # E: numpy.floating[numpy.typing._
reveal_type(np.longfloat())  # E: numpy.floating[numpy.typing._

reveal_type(np.csingle())  # E: numpy.complexfloating[numpy.typing._
reveal_type(np.singlecomplex())  # E: numpy.complexfloating[numpy.typing._
reveal_type(np.cdouble())  # E: numpy.complexfloating[numpy.typing._
reveal_type(np.complex_())  # E: numpy.complexfloating[numpy.typing._
reveal_type(np.cfloat())  # E: numpy.complexfloating[numpy.typing._
reveal_type(np.clongdouble())  # E: numpy.complexfloating[numpy.typing._
reveal_type(np.clongfloat())  # E: numpy.complexfloating[numpy.typing._
Пример #34
0
def test_table_typing_numpy():
    # Pulled from https://numpy.org/devdocs/user/basics.types.html

    # Numerics
    table = wandb.Table(columns=["A"], dtype=[NumberType])
    table.add_data(None)
    table.add_data(42)
    table.add_data(np.byte(1))
    table.add_data(np.short(42))
    table.add_data(np.ushort(42))
    table.add_data(np.intc(42))
    table.add_data(np.uintc(42))
    table.add_data(np.int_(42))
    table.add_data(np.uint(42))
    table.add_data(np.longlong(42))
    table.add_data(np.ulonglong(42))
    table.add_data(np.half(42))
    table.add_data(np.float16(42))
    table.add_data(np.single(42))
    table.add_data(np.double(42))
    table.add_data(np.longdouble(42))
    table.add_data(np.csingle(42))
    table.add_data(np.cdouble(42))
    table.add_data(np.clongdouble(42))
    table.add_data(np.int8(42))
    table.add_data(np.int16(42))
    table.add_data(np.int32(42))
    table.add_data(np.int64(42))
    table.add_data(np.uint8(42))
    table.add_data(np.uint16(42))
    table.add_data(np.uint32(42))
    table.add_data(np.uint64(42))
    table.add_data(np.intp(42))
    table.add_data(np.uintp(42))
    table.add_data(np.float32(42))
    table.add_data(np.float64(42))
    table.add_data(np.float_(42))
    table.add_data(np.complex64(42))
    table.add_data(np.complex128(42))
    table.add_data(np.complex_(42))

    # Booleans
    table = wandb.Table(columns=["A"], dtype=[BooleanType])
    table.add_data(None)
    table.add_data(True)
    table.add_data(False)
    table.add_data(np.bool_(True))

    # Array of Numerics
    table = wandb.Table(columns=["A"], dtype=[[NumberType]])
    table.add_data(None)
    table.add_data([42])
    table.add_data(np.array([1, 0], dtype=np.byte))
    table.add_data(np.array([42, 42], dtype=np.short))
    table.add_data(np.array([42, 42], dtype=np.ushort))
    table.add_data(np.array([42, 42], dtype=np.intc))
    table.add_data(np.array([42, 42], dtype=np.uintc))
    table.add_data(np.array([42, 42], dtype=np.int_))
    table.add_data(np.array([42, 42], dtype=np.uint))
    table.add_data(np.array([42, 42], dtype=np.longlong))
    table.add_data(np.array([42, 42], dtype=np.ulonglong))
    table.add_data(np.array([42, 42], dtype=np.half))
    table.add_data(np.array([42, 42], dtype=np.float16))
    table.add_data(np.array([42, 42], dtype=np.single))
    table.add_data(np.array([42, 42], dtype=np.double))
    table.add_data(np.array([42, 42], dtype=np.longdouble))
    table.add_data(np.array([42, 42], dtype=np.csingle))
    table.add_data(np.array([42, 42], dtype=np.cdouble))
    table.add_data(np.array([42, 42], dtype=np.clongdouble))
    table.add_data(np.array([42, 42], dtype=np.int8))
    table.add_data(np.array([42, 42], dtype=np.int16))
    table.add_data(np.array([42, 42], dtype=np.int32))
    table.add_data(np.array([42, 42], dtype=np.int64))
    table.add_data(np.array([42, 42], dtype=np.uint8))
    table.add_data(np.array([42, 42], dtype=np.uint16))
    table.add_data(np.array([42, 42], dtype=np.uint32))
    table.add_data(np.array([42, 42], dtype=np.uint64))
    table.add_data(np.array([42, 42], dtype=np.intp))
    table.add_data(np.array([42, 42], dtype=np.uintp))
    table.add_data(np.array([42, 42], dtype=np.float32))
    table.add_data(np.array([42, 42], dtype=np.float64))
    table.add_data(np.array([42, 42], dtype=np.float_))
    table.add_data(np.array([42, 42], dtype=np.complex64))
    table.add_data(np.array([42, 42], dtype=np.complex128))
    table.add_data(np.array([42, 42], dtype=np.complex_))

    # Array of Booleans
    table = wandb.Table(columns=["A"], dtype=[[BooleanType]])
    table.add_data(None)
    table.add_data([True])
    table.add_data([False])
    table.add_data(np.array([True, False], dtype=np.bool_))

    # Nested arrays
    table = wandb.Table(columns=["A"])
    table.add_data([[[[1, 2, 3]]]])
    table.add_data(np.array([[[[1, 2, 3]]]]))
class TestNumpy:
    @staticmethod
    def test_get_numpy() -> None:
        """
        Test get_numpy when module is present
        """
        # Arrange

        # Act
        result = Numpy.get_numpy()

        # Assert
        assert result is np

    @staticmethod
    def test_get_numpy_missing(mocker: MockFixture) -> None:
        """
        Test get_numpy when module is missing
        """
        # Arrange
        mocker.patch.dict("sys.modules", {"numpy": None})

        # Act
        result = Numpy.get_numpy()

        # Assert
        assert result is None

    @staticmethod
    def test_get_numpy_missing_error(mocker: MockFixture) -> None:
        """
        Test get_numpy when module is missing raises error
        """
        # Arrange
        mocker.patch.dict("sys.modules", {"numpy": None})

        # Act / assert
        with pytest.raises(ImportError, match="foo"):
            Numpy.get_numpy(raise_error=True, custom_error_message="foo")

    @staticmethod
    @pytest.mark.parametrize("value, expected", [(np.array([1, 2, 3]), True),
                                                 ([1, 2, 3], False)])
    def test_is_numpy_object(value, expected) -> None:
        """
        Test is_numpy_object
        """
        # Arrange

        # Act
        result = Numpy.is_numpy_object(value)

        # Assert
        assert result == expected

    @staticmethod
    def test_get_numpy_primatives() -> None:
        """
        Test _get_numpy_primatives
        """
        # Arrange

        # Act
        result = Numpy._get_numpy_primatives(np)

        # Assert
        assert len(result) == 33  # Expected number of types
        for thing in result:
            assert "numpy" in getattr(thing, "__module__", "").split(
                ".")  # Check that type is from numpy
            assert type(thing) is type  # Check that each type is a type

    @staticmethod
    def test_encode_numpy_error():
        """ Test that the encode_numpy raises an error if no encoding is defined. """
        # Arrange
        value = "not a numpy"

        # Act & Assert
        with pytest.raises(NotImplementedError):
            Numpy.encode_numpy(value)

    @staticmethod
    @pytest.mark.parametrize(
        "value, expected",
        [
            # fmt: off
            (np.array([['balloons'], ['are'], ['awesome']
                       ]), [['balloons'], ['are'], ['awesome']]),
            (np.bool_(1), True),
            (np.byte(4), 4),
            (np.ubyte(4), 4),
            (np.short(4), 4),
            (np.ushort(4), 4),
            (np.intc(4), 4),
            (np.uintc(4), 4),
            (np.int_(4), 4),
            (np.uint(4), 4),
            (np.longlong(4), 4),
            (np.ulonglong(4), 4),
            (np.float16(4), 4),
            (np.single(4), 4),
            (np.double(4), 4),
            (np.longdouble(4), 4),
            (np.csingle(4), 4),
            (np.cdouble(4), 4),
            (np.clongdouble(4), 4),
            (np.int8(4), 4),
            (np.int16(4), 4),
            (np.int32(4), 4),
            (np.int64(4), 4),
            (np.uint8(4), 4),
            (np.uint16(4), 4),
            (np.uint32(4), 4),
            (np.uint64(4), 4),
            (np.intp(4), 4),
            (np.uintp(4), 4),
            (np.float32(4), 4),
            (np.float64(4), 4),
            (np.complex64(4), 4 + 0j),
            (np.complex128(4), 4 + 0j),
            (np.complex_(4), 4 + 0j),
            # fmt: on
        ],
    )
    def test_encode_numpy(value, expected) -> None:
        """
        Test encode_numpy
        """
        # Arrange

        # Act
        result = Numpy.encode_numpy(value)

        # Assert
        assert result == expected