def analysis(pointxy, pointxy1, values, methodOfAnalysis=""):
    global file, singleValues, distances
    t = values.split(" ")
    COL = int(t[1])
    ROW = int(t[0])

    list = []
    for r in range(ROW):
        for c in range(COL):
            list.append(
                (r, c, pointxy[0], pointxy[1], pointxy1[0], pointxy1[1]))

    shared_array_base = Array(c_double, ROW * COL)
    singleValues = as_array(shared_array_base.get_obj())
    singleValues = singleValues.reshape(COL, ROW)

    shared_array = Array(c_double, ROW * COL * 50)
    distances = as_array(shared_array.get_obj())
    distances = distances.reshape(COL, ROW, 50)

    with ProcessPoolExecutor() as executor:
        if methodOfAnalysis is "strain":
            executor.map(multiprocessing_func, list)
        else:
            executor.map(intensity, list)
    entry.delete(0, tk.END)
Ejemplo n.º 2
0
def test_cgeometrylist_from_geometrylist():
    """Tests `from_geometrylist` of the `CGeometryList` class."""

    x_coordinates = np.array([0.0, 1.0, 2.0, 3.0, 4.0], dtype=np.double)
    y_coordinates = np.array([5.0, 6.0, 7.0, 8.0, 9.0], dtype=np.double)
    values = np.array([10.0, 11.0, 12.0, 13.0, 14.0], dtype=np.double)
    geometry_separator = 15.0
    inner_outer_separator = 16.0

    geometry_list = GeometryList(x_coordinates, y_coordinates)
    geometry_list.values = values
    geometry_list.geometry_separator = geometry_separator
    geometry_list.inner_outer_separator = inner_outer_separator

    c_geometry_list = CGeometryList.from_geometrylist(geometry_list)

    # Get the numpy arrays from the ctypes object
    c_geometry_list_x_coordinates = as_array(c_geometry_list.x_coordinates,
                                             (5, ))
    c_geometry_list_y_coordinates = as_array(c_geometry_list.y_coordinates,
                                             (5, ))
    c_geometry_list_values = as_array(c_geometry_list.values, (5, ))

    assert_array_equal(c_geometry_list_x_coordinates, x_coordinates)
    assert_array_equal(c_geometry_list_y_coordinates, y_coordinates)
    assert_array_equal(c_geometry_list_values, values)

    assert c_geometry_list.geometry_separator == geometry_separator
    assert c_geometry_list.inner_outer_separator == inner_outer_separator
    assert c_geometry_list.n_coordinates == x_coordinates.size
Ejemplo n.º 3
0
def setup_stage():
	"""docstring for setup_stage
	"""
	import ctypes
	
	e7xx = ctypes.windll.LoadLibrary('E7XX_GCS_DLL.dll')
	try:
		print "Connecting to stage"
		id = e7xx.E7XX_ConnectRS232(1, 57600)

		print "Initializing axes"
		e7xx.E7XX_INI(id, '134')

		print "initializing servos"
		err = e7xx.E7XX_SVO(id, '134', ctl.as_ctypes(ones(4, dtype=int32)))
		if err:
			print "Servos initialized OK"
		else:
			import sys
			sys.exit(e7xx.E7XX_GetError(id))
		svo = ctl.as_ctypes(ones(4, dtype=int32))
		err = e7xx.E7XX_qSVO(id, '134', svo)
		if err:
			print "Read servos OK"
		else:
			print e7xx.E7XX_GetError(id)
			time.sleep(5)
		
		while not(all(ctl.as_array(svo))):
			e7xx.E7XX_qSVO(id, '134', svo)
			print "Servo status: ", ctl.as_array(svo), ctl.as_array(svo).all()
			time.sleep(1)

	finally:
		return e7xx, id
Ejemplo n.º 4
0
def test_cmesh1d_from_mesh1d():
    r"""Tests `from_mesh1d` of the `CMesh1D` class with a simple mesh.

      1   3
     / \ /
    0   2
    """

    node_x = np.array([0.0, 1.0, 2.0, 3.0], dtype=np.double)
    node_y = np.array([0.0, 1.0, 0.0, 1.0], dtype=np.double)
    edge_nodes = np.array([0, 1, 1, 2, 2, 3], dtype=np.int32)

    mesh1d = Mesh1d(node_x, node_y, edge_nodes)

    c_mesh1d = CMesh1d.from_mesh1d(mesh1d)

    # Get the numpy arrays from the ctypes object
    c_mesh1d_node_x = as_array(c_mesh1d.node_x, (4, ))
    c_mesh1d_node_y = as_array(c_mesh1d.node_y, (4, ))
    c_mesh1d_edge_nodes = as_array(c_mesh1d.edge_nodes, (6, ))

    # Assert data is correct
    assert_array_equal(c_mesh1d_node_x, node_x)
    assert_array_equal(c_mesh1d_node_y, node_y)
    assert_array_equal(c_mesh1d_edge_nodes, edge_nodes)

    assert c_mesh1d.num_nodes == 4
    assert c_mesh1d.num_edges == 3
Ejemplo n.º 5
0
 def run(self):
   print "CameraStreamer.run(): [pid: {}, OS pid: {}]".format(self.pid, os.getpid())
   # * Interpret shared objects properly (NOTE this needs to happen in the child process)
   self.image = ctypeslib.as_array(self.imageObj.get_obj())  # get flattened image array
   self.image.shape = ctypeslib.as_array(self.imageShapeObj.get_obj())  # restore original shape
   
   # * Open camera and set desired capture properties
   self.camera = cv2.VideoCapture(0)
   if self.camera.isOpened():
     result_width = self.camera.set(cv.CV_CAP_PROP_FRAME_WIDTH, camera_frame_width)
     result_height = self.camera.set(cv.CV_CAP_PROP_FRAME_HEIGHT, camera_frame_height)
     print "CameraStreamer.run(): Camera frame size set to {width}x{height} (result: {result_width}, {result_height})".format(width=camera_frame_width, height=camera_frame_height, result_width=result_width, result_height=result_height)
   else:
     print "CameraStreamer.run(): Unable to open camera; aborting..."
     self.stayAliveObj.value = False
     return
   
   # * Keep reading frames into shared image until stopped or read error occurs
   while self.stayAliveObj.value:
     try:
       #print "CameraStreamer.run(): Frame # {}, stay alive? {}".format(self.frameCountObj.value, self.stayAliveObj.value)  # [debug]
       isOkay, frame = self.camera.read()
       if not isOkay:
         self.stayAliveObj.value = False
       self.frameCountObj.value = self.frameCountObj.value + 1
       self.image[:] = frame
     except KeyboardInterrupt:
       self.stayAliveObj.value = False
   
   # * Clean-up
   self.camera.release()
Ejemplo n.º 6
0
def analysis(pointxy, values, methodOfAnalysis):
    global file, singleValues, distances, Usethisarray, mean_dist
    t = values.split(" ")
    COL = int(t[1])
    ROW = int(t[0])
    list = []
    for r in range(ROW):
        for c in range(COL):
            list.append((r, c, pointxy[0], pointxy[1]))

    shared_array_base = Array(c_double, ROW * COL)
    singleValues = as_array(shared_array_base.get_obj())
    singleValues = singleValues.reshape(COL, ROW)

    shared_array = Array(c_double, ROW * COL * 20)
    distances = as_array(shared_array.get_obj())
    distances = distances.reshape(COL, ROW, 20)

    with ProcessPoolExecutor() as executor:
        if methodOfAnalysis is "strain":
            executor.map(multiprocessing_func, list)
        else:
            executor.map(intensity, list)
    entry.delete(0, tk.END)
    count = 0

    for i in singleValues:
        mean_dist += i
        count = count + 1
    temp = 0
    mean_dist = mean_dist / count
    for i in range(len(singleValues)):
        temp = singleValues[i]
        singleValues[i] = (temp**(-1) - mean_dist**(-1)) / (mean_dist**(-1))
Ejemplo n.º 7
0
    def test_struct_array_pointer(self):
        from ctypes import c_int16, Structure, pointer

        class Struct(Structure):
            _fields_ = [('a', c_int16)]

        Struct3 = 3 * Struct

        c_array = (2 * Struct3)(
            Struct3(Struct(a=1), Struct(a=2), Struct(a=3)),
            Struct3(Struct(a=4), Struct(a=5), Struct(a=6))
        )

        expected = np.array([
            [(1,), (2,), (3,)],
            [(4,), (5,), (6,)],
        ], dtype=[('a', np.int16)])

        def check(x):
            assert_equal(x.dtype, expected.dtype)
            assert_equal(x, expected)

        # all of these should be equivalent
        check(as_array(c_array))
        check(as_array(pointer(c_array), shape=()))
        check(as_array(pointer(c_array[0]), shape=(2,)))
        check(as_array(pointer(c_array[0][0]), shape=(2, 3)))
Ejemplo n.º 8
0
def test_populate_z_photosphere(clib, formal_integral_model, p):
    '''
    Test the case where p < r[0]
    That means we 'hit' all shells from inside to outside.
    '''
    func = clib.populate_z
    func.restype = ctypes.c_int64
    func.argtypes = [
        ctypes.POINTER(StorageModel),  # storage
        c_double,  # p
        ndpointer(dtype=np.float64),  # oz
        ndpointer(dtype=np.int64)  # oshell_id
    ]

    size = formal_integral_model.no_of_shells_i
    r_inner = as_array(formal_integral_model.r_inner_i, (size, ))
    r_outer = as_array(formal_integral_model.r_outer_i, (size, ))

    p = r_inner[0] * p
    oz = np.zeros_like(r_inner)
    oshell_id = np.zeros_like(oz, dtype=np.int64)

    N = func(formal_integral_model, p, oz, oshell_id)
    assert N == size

    ntest.assert_allclose(oshell_id, np.arange(0, size, 1))

    ntest.assert_allclose(oz, 1 - calculate_z(r_outer, p), atol=1e-5)
Ejemplo n.º 9
0
    def K2y(self, X, X2, y):
        res = zeros(X.shape[0])
        if len(X.shape) == 1:
            X = X.reshape((X.shape[0], 1))
        if len(X2.shape) == 1:
            X2 = X2.reshape((X2.shape[0], 1))
        share_base = Array(ctypes.c_double, X.shape[0]*X.shape[1], lock=False)
        share = as_array(share_base)
        share = share.reshape(X.shape)
        share[:, :] = X

        share2_base = Array(ctypes.c_double, X2.shape[0]*X2.shape[1], lock=False)
        share2 = as_array(share2_base)
        share2 = share2.reshape(X2.shape)
        share2[:, :] = X2

        pool = Pool(self.num_proc, maxtasksperchild=50, initializer=initShared2, initargs=[share2, share])
        cs = pool.imap(para_func2, ((i, X2.shape, X.shape, self.d, self.cnum) for i in xrange(self.m)), 10)
        for c, c2 in cs:
            for cls in unique(c):
                if cls > -1:
                    res[c.flatten() == cls] += y[c2.flatten() == cls].sum()
        res /= self.m
        pool.close()
        pool.join()
        return res
Ejemplo n.º 10
0
def get_equiv_atom_map(crystal_coordinate, rotation, translation,
                       index_reference_atom):
    """Wrapper for get_equiv_atom_map defined in equiv_map.c
    """
    libsym = ct.CDLL(os.path.dirname(__file__) + os.path.sep + 'libsymm.so',
                     mode=ct.RTLD_GLOBAL)

    libsym.get_equiv_atom_map.argtypes = [
        ct.c_int,
        npct.ndpointer(dtype=float, ndim=2, flags='C_CONTIGUOUS'), ct.c_int,
        npct.ndpointer(dtype=np.int32, ndim=3, flags='C_CONTIGUOUS'),
        npct.ndpointer(dtype=float, ndim=2, flags='C_CONTIGUOUS')
    ]

    libsym.get_equiv_atom_map.restype = ct.POINTER(ct.POINTER(ct.c_int))

    _equiv_atom_map = libsym.get_equiv_atom_map(len(crystal_coordinate),
                                                crystal_coordinate,
                                                len(rotation), rotation,
                                                translation,
                                                index_reference_atom)

    map_count = npct.as_array(_equiv_atom_map[0], (len(crystal_coordinate), ))
    max_count = max(map_count)

    equiv_atom_map = npct.as_array(
        _equiv_atom_map[1],
        (len(crystal_coordinate), len(crystal_coordinate), max_count))

    return equiv_atom_map
def analysis(pointxy, values, methodOfAnalysis=""):
    global file, singleValues, distances
    t = values.split(" ")
    COL = int(t[1])
    ROW = int(t[0])

    list = []
    for r in range(ROW):
        for c in range(COL):
            list.append((r, c, pointxy[0], pointxy[1]))

    shared_array_base = Array(c_double, ROW * COL)
    singleValues = as_array(shared_array_base.get_obj())
    singleValues = singleValues.reshape(COL, ROW)

    shared_array = Array(c_double, ROW * COL * 50)
    distances = as_array(shared_array.get_obj())
    distances = distances.reshape(COL, ROW, 50)

    with ProcessPoolExecutor() as executor:
        if methodOfAnalysis is "strain":
            executor.map(multiprocessing_func, list)
        else:
            executor.map(intensity, list)
    entry.delete(0, tk.END)
    f = open("Distances", "w")
    w = writer(f)
    for i in distances:
        w.writerow(i)
    f.close()
    label1['text'] = label1['text'] + "File saved.\n"
    entry.delete(0, tk.END)
Ejemplo n.º 12
0
    def _get_parameters(self):
        gx = POINTER(c_double)()
        nx = c_int()
        gy = POINTER(c_double)()
        ny = c_int()
        gz = POINTER(c_double)()
        nz = c_int()
        # Call C API to get grid parameters
        _dll.openmc_rectilinear_mesh_get_grid(self._index, gx, nx, gy, ny, gz,
                                              nz)

        # Convert grid parameters to Numpy arrays
        grid_x = as_array(gx, (nx.value, ))
        grid_y = as_array(gy, (ny.value, ))
        grid_z = as_array(gz, (nz.value, ))

        # Calculate lower_left, upper_right, width, and dimension from grid
        lower_left = np.array((grid_x[0], grid_y[0], grid_z[0]))
        upper_right = np.array((grid_x[-1], grid_y[-1], grid_z[-1]))
        dimension = np.array((nx.value - 1, ny.value - 1, nz.value - 1))
        width = np.zeros(list(dimension) + [3])

        for i, diff_x in enumerate(np.diff(grid_x)):
            for j, diff_y in enumerate(np.diff(grid_y)):
                for k, diff_z in enumerate(np.diff(grid_z)):
                    width[i, j, k, :] = diff_x, diff_y, diff_z

        return (lower_left, upper_right, dimension, width)
Ejemplo n.º 13
0
    def test_struct_array_pointer(self):
        from ctypes import c_int16, Structure, pointer

        class Struct(Structure):
            _fields_ = [('a', c_int16)]

        Struct3 = 3 * Struct

        c_array = (2 * Struct3)(Struct3(Struct(a=1), Struct(a=2), Struct(a=3)),
                                Struct3(Struct(a=4), Struct(a=5), Struct(a=6)))

        expected = np.array([
            [(1, ), (2, ), (3, )],
            [(4, ), (5, ), (6, )],
        ],
                            dtype=[('a', np.int16)])

        def check(x):
            assert_equal(x.dtype, expected.dtype)
            assert_equal(x, expected)

        # all of these should be equivalent
        check(as_array(c_array))
        check(as_array(pointer(c_array), shape=()))
        check(as_array(pointer(c_array[0]), shape=(2, )))
        check(as_array(pointer(c_array[0][0]), shape=(2, 3)))
Ejemplo n.º 14
0
def test_populate_z_shells(clib, formal_integral_model, p):
    '''
    Test the case where p > r[0]
    '''
    func = clib.populate_z
    func.restype = ctypes.c_int64
    func.argtypes = [
            ctypes.POINTER(StorageModel),   # storage
            c_double,                       # p
            ndpointer(dtype=np.float64),    # oz
            ndpointer(dtype=np.int64)       # oshell_id
            ]

    size = formal_integral_model.no_of_shells
    r_inner = as_array(formal_integral_model.r_inner, (size,))
    r_outer = as_array(formal_integral_model.r_outer, (size,))

    p = r_inner[0] + (r_outer[-1] - r_inner[0]) * p
    idx = np.searchsorted(r_outer, p, side='right')

    oz = np.zeros(size * 2)
    oshell_id = np.zeros_like(oz, dtype=np.int64)

    offset = size - idx

    expected_N = (offset) * 2
    expected_oz = np.zeros_like(oz)
    expected_oshell_id = np.zeros_like(oshell_id)

    # Calculated way to determine which shells get hit
    expected_oshell_id[:expected_N] = np.abs(
            np.arange(0.5, expected_N, 1) - offset) - 0.5 + idx

    expected_oz[0:offset] = 1 + calculate_z(
            r_outer[np.arange(size, idx, -1) - 1],
            p)
    expected_oz[offset:expected_N] = 1 - calculate_z(
            r_outer[np.arange(idx, size, 1)],
            p)

    N = func(
            formal_integral_model,
            p,
            oz,
            oshell_id
            )

    assert N == expected_N

    ntest.assert_allclose(
            oshell_id,
            expected_oshell_id
            )

    ntest.assert_allclose(
            oz,
            expected_oz,
            atol=1e-5
            )
Ejemplo n.º 15
0
def test_populate_z_shells(clib, formal_integral_model, p):
    '''
    Test the case where p > r[0]
    '''
    func = clib.populate_z
    func.restype = ctypes.c_int64
    func.argtypes = [
            ctypes.POINTER(StorageModel),   # storage
            c_double,                       # p
            ndpointer(dtype=np.float64),    # oz
            ndpointer(dtype=np.int64)       # oshell_id
            ]

    size = formal_integral_model.no_of_shells_i
    r_inner = as_array(formal_integral_model.r_inner_i, (size,))
    r_outer = as_array(formal_integral_model.r_outer_i, (size,))

    p = r_inner[0] + (r_outer[-1] - r_inner[0]) * p
    idx = np.searchsorted(r_outer, p, side='right')

    oz = np.zeros(size * 2)
    oshell_id = np.zeros_like(oz, dtype=np.int64)

    offset = size - idx

    expected_N = (offset) * 2
    expected_oz = np.zeros_like(oz)
    expected_oshell_id = np.zeros_like(oshell_id)

    # Calculated way to determine which shells get hit
    expected_oshell_id[:expected_N] = np.abs(
            np.arange(0.5, expected_N, 1) - offset) - 0.5 + idx

    expected_oz[0:offset] = 1 + calculate_z(
            r_outer[np.arange(size, idx, -1) - 1],
            p)
    expected_oz[offset:expected_N] = 1 - calculate_z(
            r_outer[np.arange(idx, size, 1)],
            p)

    N = func(
            formal_integral_model,
            p,
            oz,
            oshell_id
            )

    assert N == expected_N

    ntest.assert_allclose(
            oshell_id,
            expected_oshell_id
            )

    ntest.assert_allclose(
            oz,
            expected_oz,
            atol=1e-5
            )
Ejemplo n.º 16
0
def calc_correlation(params):
    global POW_trace, POW_hypo, CORRELATION
    a, b, c = params
    POW_hypo = ctypeslib.as_array(POW_hypo)
    POW_trace = ctypeslib.as_array(POW_trace)
    CORRELATION = ctypeslib.as_array(CORRELATION)
    # Use numpy corrcoef() function to calculate correlation coefficient
    CORRELATION[a][b:c] = corrcoef(POW_hypo[a], POW_trace[b:c])[0][1]
Ejemplo n.º 17
0
def getCorrcoef(params):
    global PC_a, PC_h, CC
    (i, j1, j2) = params
    PC_h = ctypeslib.as_array(PC_h)
    PC_a = ctypeslib.as_array(PC_a)
    CC = ctypeslib.as_array(CC)
    cor = corrcoef(PC_h[i], PC_a[j1:j2])[0][1]
    CC[i][j1:j2] = cor
Ejemplo n.º 18
0
 def _get_parameters(self):
     ll = POINTER(c_double)()
     ur = POINTER(c_double)()
     w = POINTER(c_double)()
     n = c_int()
     _dll.openmc_regular_mesh_get_params(self._index, ll, ur, w, n)
     return (as_array(ll, (n.value, )), as_array(ur, (n.value, )),
             as_array(w, (n.value, )))
Ejemplo n.º 19
0
 def wrapper(n, x, new_x, m, g_ptr, user_data):
     try:
         x_array = as_array(x, (n, ))
         g_array = as_array(g_ptr, (m, ))
         return g(x_array, new_x, g_array)
     except BaseException as e:
         if callable(handler):
             handler(e)
         return 0
Ejemplo n.º 20
0
 def wrapper(n, x, new_x, obj_value, user_data):
     try:
         x_array = as_array(x, (n, ))
         obj_value_array = as_array(obj_value, ())
         return f(x_array, new_x, obj_value_array)
     except BaseException as e:
         if callable(handler):
             handler(e)
         return 0
Ejemplo n.º 21
0
 def callback_(mappingPtr, centerPtr):
     mapping = ctl.as_array(mappingPtr, (len(points), ))
     nclust = np.max(mapping) + 1
     centers = ctl.as_array(centerPtr, (nclust, numdims))
     if cb is not None:
         r = cb(mapping, centers)
         if r is None:
             return 1
         return r
     return 1
Ejemplo n.º 22
0
    def test_array(self):
        from ctypes import c_int

        pair_t = c_int * 2
        a = as_array(pair_t(1, 2))
        assert_equal(a.shape, (2, ))
        assert_array_equal(a, np.array([1, 2]))
        a = as_array((pair_t * 3)(pair_t(1, 2), pair_t(3, 4), pair_t(5, 6)))
        assert_equal(a.shape, (3, 2))
        assert_array_equal(a, np.array([[1, 2], [3, 4], [5, 6]]))
Ejemplo n.º 23
0
    def test_array(self):
        from ctypes import c_int

        pair_t = c_int * 2
        a = as_array(pair_t(1, 2))
        assert_equal(a.shape, (2,))
        assert_array_equal(a, np.array([1, 2]))
        a = as_array((pair_t * 3)(pair_t(1, 2), pair_t(3, 4), pair_t(5, 6)))
        assert_equal(a.shape, (3, 2))
        assert_array_equal(a, np.array([[1, 2], [3, 4], [5, 6]]))
Ejemplo n.º 24
0
 def wrapper(n, x, new_x, m, nele_jac, iRow, jCol, values, user_data):
     try:
         x_array = as_array(x, (n, )) if x else None
         i_array = as_array(iRow, (nele_jac, )) if iRow else None
         j_array = as_array(jCol, (nele_jac, )) if jCol else None
         values_array = as_array(values, (nele_jac, )) if values else None
         return jac_g(x_array, new_x, i_array, j_array, values_array)
     except BaseException as e:
         if callable(handler):
             handler(e)
         return 0
        def worker_fit(id_w, num_workers, X_w, y_w, weights_w, shape, indices,
                       counter, start_barrier, params_w):
            assert params_w.regularizer is not None
            # reconstruct numpy shared array
            num_samples, num_features = shape
            weights_w = ctypeslib.as_array(weights_w)
            weights_w.shape = (num_features, )

            if not isspmatrix(X_w):
                X_w = ctypeslib.as_array(X_w)
                X_w.shape = (num_samples, num_features)
                y_w = ctypeslib.as_array(y_w)
                y_w.shape = (num_samples, )

            memory = GradientMemory(take_k=params_w.take_k,
                                    take_top=params_w.take_top,
                                    with_memory=params_w.with_memory)

            start_barrier.wait()
            while True:
                with counter.get_lock():
                    idx = counter.value
                    counter.value += 1

                if idx >= num_samples * params_w.num_epoch:
                    break

                sample_idx = indices[idx]
                epoch = idx // num_samples
                iteration = idx % num_samples
                lr = self.lr(epoch, iteration, num_samples, num_features)

                x = X_w[sample_idx]

                if isspmatrix(x):
                    minus_grad = -1. * params_w.regularizer * weights_w
                    sparse_minus_grad = y[sample_idx] * x * sigmoid(
                        -y[sample_idx] * x.dot(weights_w).squeeze(0))
                    minus_grad[
                        sparse_minus_grad.indices] += sparse_minus_grad.data

                else:
                    minus_grad = y[sample_idx] * x * sigmoid(
                        -y[sample_idx] * x.dot(weights_w))
                    minus_grad -= params_w.regularizer * weights_w

                sparse = params_w.take_k and (params_w.take_k < num_features)
                lr_minus_grad = memory(lr * minus_grad, sparse=sparse)

                if sparse:
                    weights_w[lr_minus_grad[0]] += lr_minus_grad[1]
                else:
                    weights_w += lr_minus_grad
Ejemplo n.º 26
0
        def callback_(startA, numspotsA, countsPtr, indicesPtr, numIndices):
            #print(f"num indices: {numIndices}. numspotsA: {numspotsA}")
            counts = ctl.as_array(countsPtr, (numspotsA, ))
            if numIndices == 0:
                indices = np.zeros(0, dtype=np.int32)
            else:
                indices = ctl.as_array(indicesPtr, (numIndices, ))

            r = callback(startA, counts, indices)
            if r is None:
                return 1
            return r
Ejemplo n.º 27
0
    def eval_wrapper(instance, x, g, n, step):
        """Wrapper function to handle converting to numpy data structures"""
        x = npct.as_array(x, (n,))
        g = npct.as_array(g, (n,))

        if instance:
            instance = instance.contents
        else:
            instance = None

        fx, gret = evaluate(instance, x, n, step)
        g[:] = gret
        return fx
Ejemplo n.º 28
0
 def wrapper(n, x, new_x, obj_factor, m, mult, new_mult, nele_hess, iRow,
             jCol, values, user_data):
     try:
         x_array = as_array(x, (n, )) if x else None
         mult_array = as_array(mult, (m, )) if mult else None
         i_array = as_array(iRow, (nele_hess, )) if iRow else None
         j_array = as_array(jCol, (nele_hess, )) if jCol else None
         values_array = as_array(values, (nele_hess, )) if values else None
         return h(x_array, new_x, obj_factor, mult_array, new_mult, i_array,
                  j_array, values_array)
     except BaseException as e:
         if callable(handler):
             handler(e)
         return 0
Ejemplo n.º 29
0
	def dumper(nSamples,nlive,nPar,
			   physLive,posterior,paramConstr,
			   maxLogLike,logZ,logZerr,nullcontext):
		if dump_callback:
			# It's not clear to me what the desired PyMultiNest dumper callback
			# syntax is... but this should pass back the right numpy arrays,
			# without copies. Untested!
			pc =  as_array(paramConstr,shape=(nPar,4))
			
			dump_callback(nSamples,nlive,nPar,
				as_array(physLive,shape=(nPar+1,nlive)).T,
				as_array(posterior,shape=(nPar+2,nSamples)).T, 
				(pc[:,0],pc[:,1],pc[:,2],pc[:,3]), # (mean,std,bestfit,map)
				maxLogLike,logZ,logZerr, 0)
Ejemplo n.º 30
0
def attack():
    global SAMPLE_SIZE, KEY_SIZE, TRACE_NUM, POW_hypo, POW, trace, CORRELATION

    key1 = ""
    key2 = ""

    while True:

        # Initialise memory for parallel processing
        hypo_arr = ctypeslib.as_array(
            multiprocessing.Array(ctypes.c_float,
                                  KEY_SIZE * SAMPLE_SIZE).get_obj())
        POW_hypo = hypo_arr.reshape(KEY_SIZE, SAMPLE_SIZE)

        trace_arr = ctypeslib.as_array(
            multiprocessing.Array(ctypes.c_float,
                                  SAMPLE_SIZE * TRACE_NUM).get_obj())
        POW_trace = trace_arr.reshape(TRACE_NUM, SAMPLE_SIZE)

        corrco_arr = ctypeslib.as_array(
            multiprocessing.Array(ctypes.c_float,
                                  KEY_SIZE * TRACE_NUM).get_obj())
        CORRELATION = corrco_arr.reshape(KEY_SIZE, TRACE_NUM)

        # Generate ciphertext inputs
        ciphertexts = gen_ciphers()
        # Acquire oracle decryptions and power consumption traces
        traces, plaintexts = gen_samples(ciphertexts)
        traces = next_trace_set(traces)

        # Attack Key 2
        print "---- KEY 2 ATTACK ----\n"
        key2 = attack_key2(ciphertexts, traces)
        print "KEY 2: " + key2

        encrypt_tweak_vals(ciphertexts, key2)

        # Attack Key 1
        print "---- KEY 1 ATTACK ----\n"
        key1 = attack_key1(plaintexts, traces)
        print "KEY 1: " + key1

        if not XTS_Validate(key1, key2):
            # Double sample size
            print "Invalid key recovered, attempting again with larger sample size"
            SAMPLE_SIZE <<= 1
        else:
            break

    return key1, key2
Ejemplo n.º 31
0
    def dumper(nSamples,nlive,nPar,
               physLive,posterior,paramConstr,
               maxLogLike,logZ,logZerr,nullcontext):
        if dump_callback:
            # It's not clear what the desired MultiNest dumper callback
            # syntax is... but this should pass back the right numpy arrays,
            # without copies. Untested!
            pc =  as_array(paramConstr,shape=(nPar,4))

            dump_callback(nSamples,nlive,nPar,
                as_array(physLive,shape=(nPar+1,nlive)).T,
                as_array(posterior,shape=(nPar+2,nSamples)).T,
                (pc[0,:],pc[1,:],pc[2,:],pc[3,:]), # (mean,std,bestfit,map)
                maxLogLike,logZ,logZerr)
Ejemplo n.º 32
0
def _initSharedMemory():
    global PC_a, PC_h, CC
    warnings.filterwarnings("ignore")

    PC_h_base = multiprocessing.Array(ctypes.c_float, KEY_RANGE * SAMPLES)
    PC_h = ctypeslib.as_array(PC_h_base.get_obj())
    PC_h = PC_h.reshape(KEY_RANGE, SAMPLES)

    PC_a_base = multiprocessing.Array(ctypes.c_float, SAMPLES * TRACE_NUM)
    PC_a = ctypeslib.as_array(PC_a_base.get_obj())
    PC_a = PC_a.reshape(TRACE_NUM, SAMPLES)

    CC_base = multiprocessing.Array(ctypes.c_float, KEY_RANGE * TRACE_NUM)
    CC = ctypeslib.as_array(CC_base.get_obj())
    CC = CC.reshape(KEY_RANGE, TRACE_NUM)
Ejemplo n.º 33
0
    def test_pointer(self):
        from ctypes import c_int, cast, POINTER

        p = cast((c_int * 10)(*range(10)), POINTER(c_int))

        a = as_array(p, shape=(10,))
        assert_equal(a.shape, (10,))
        assert_array_equal(a, np.arange(10))

        a = as_array(p, shape=(2, 5))
        assert_equal(a.shape, (2, 5))
        assert_array_equal(a, np.arange(10).reshape((2, 5)))

        # shape argument is required
        assert_raises(TypeError, as_array, p)
Ejemplo n.º 34
0
    def test_pointer(self):
        from ctypes import c_int, cast, POINTER

        p = cast((c_int * 10)(*range(10)), POINTER(c_int))

        a = as_array(p, shape=(10, ))
        assert_equal(a.shape, (10, ))
        assert_array_equal(a, np.arange(10))

        a = as_array(p, shape=(2, 5))
        assert_equal(a.shape, (2, 5))
        assert_array_equal(a, np.arange(10).reshape((2, 5)))

        # shape argument is required
        assert_raises(TypeError, as_array, p)
Ejemplo n.º 35
0
def global_tallies():
    """Mean and standard deviation of the mean for each global tally.

    Returns
    -------
    list of tuple
        For each global tally, a tuple of (mean, standard deviation)

    """
    ptr = POINTER(c_double)()
    _dll.openmc_global_tallies(ptr)
    array = as_array(ptr, (4, 3))

    # Get sum, sum-of-squares, and number of realizations
    sum_ = array[:, 1]
    sum_sq = array[:, 2]
    n = num_realizations()

    # Determine mean
    if n > 0:
        mean = sum_ / n
    else:
        mean = sum_.copy()

    # Determine standard deviation
    nonzero = np.abs(mean) > 0
    stdev = np.empty_like(mean)
    stdev.fill(np.inf)
    if n > 1:
        stdev[nonzero] = np.sqrt((sum_sq[nonzero]/n - mean[nonzero]**2)/(n - 1))

    return list(zip(mean, stdev))
Ejemplo n.º 36
0
def source_bank():
    """Return source bank as NumPy array

    Returns
    -------
    numpy.ndarray
        Source sites

    """
    # Get pointer to source bank
    ptr = POINTER(_SourceSite)()
    n = c_int64()
    _dll.openmc_source_bank(ptr, n)

    try:
        # Convert to numpy array with appropriate datatype
        bank_dtype = np.dtype(_SourceSite)
        return as_array(ptr, (n.value,)).view(bank_dtype)

    except ValueError as err:
        # If a known numpy error was raised (github.com/numpy/numpy/issues
        # /14214), re-raise with a more helpful error message.
        if len(err.args) == 0:
            raise err
        if err.args[0].startswith('invalid shape in fixed-type tuple'):
            raise ValueError('The source bank is too large to access via '
                'openmc.lib with this version of numpy.  Use a different '
                'version of numpy or reduce the bank size (fewer particles '
                'per MPI process) so that it is smaller than 2 GB.') from err
        else:
            raise err
Ejemplo n.º 37
0
def make_numpy_item((v, shape)):
    try:
        v = ctypeslib.as_array(v)
        v.shape = shape
    except:
        pass
    return v
Ejemplo n.º 38
0
def cMatrixToNumpy(x):
    """
    Convert a ctypes 2d array (or matrix) into a numpy array for python use
    :param x: thing to convert
    :return: numpy.ndarray
    """
    return numpc.as_array(x).copy()
Ejemplo n.º 39
0
def global_tallies():
    """Mean and standard deviation of the mean for each global tally.

    Returns
    -------
    list of tuple
        For each global tally, a tuple of (mean, standard deviation)

    """
    ptr = POINTER(c_double)()
    _dll.openmc_global_tallies(ptr)
    array = as_array(ptr, (4, 3))

    # Get sum, sum-of-squares, and number of realizations
    sum_ = array[:, 1]
    sum_sq = array[:, 2]
    n = num_realizations()

    # Determine mean
    if n > 0:
        mean = sum_ / n
    else:
        mean = sum_.copy()

    # Determine standard deviation
    nonzero = np.abs(mean) > 0
    stdev = np.empty_like(mean)
    stdev.fill(np.inf)
    if n > 1:
        stdev[nonzero] = np.sqrt(
            (sum_sq[nonzero] / n - mean[nonzero]**2) / (n - 1))

    return list(zip(mean, stdev))
Ejemplo n.º 40
0
 def get_dm_list(self):
     ndms = self.get_dm_count()
     func = lib.dedisp_get_dm_list
     c_float_p = C.POINTER(C.c_float)
     func.restype = c_float_p
     array_pointer = C.cast(func(self.plan),c_float_p)
     return as_array(array_pointer,shape=(ndms,)).copy()
Ejemplo n.º 41
0
Archivo: piio.py Proyecto: carlodef/iio
def read(filename):
   '''
   IIO: numpyarray = read(filename)
   '''
   from numpy import array, zeros, ctypeslib
   from ctypes import c_int, c_float, c_void_p, POINTER, cast, byref

   iioread = libiio.iio_read_image_float_vec
   
   w=c_int()
   h=c_int()
   nch=c_int()
   
   iioread.restype = c_void_p  # it's like this
   tptr = iioread(str(filename),byref(w),byref(h),byref(nch))
   c_float_p = POINTER(c_float)       # define a new type of pointer
   ptr = cast(tptr, c_float_p)
   #print w,h,nch
   
   #nasty read data into array using buffer copy
   #http://stackoverflow.com/questions/4355524/getting-data-from-ctypes-array-into-numpy
   #http://docs.scipy.org/doc/numpy/reference/generated/numpy.frombuffer.html
   
   # this numpy array uses the memory provided by the c library, which will be freed
   data_tmp = ctypeslib.as_array( ptr, (h.value,w.value,nch.value) )
   # so we copy it to the definitive array before the free
   data = data_tmp.copy()
   
   # free the memory
   iiofreemem = libiio.freemem
   iiofreemem(ptr)
   return data
Ejemplo n.º 42
0
def read(filename):
    '''
   IIO: numpyarray = read(filename)
   '''
    from numpy import array, zeros, ctypeslib
    from ctypes import c_int, c_float, c_void_p, POINTER, cast, byref

    iioread = libiio.iio_read_image_float_vec

    w = c_int()
    h = c_int()
    nch = c_int()

    iioread.restype = c_void_p  # it's like this
    tptr = iioread(str(filename), byref(w), byref(h), byref(nch))
    c_float_p = POINTER(c_float)  # define a new type of pointer
    ptr = cast(tptr, c_float_p)
    #print w,h,nch

    #nasty read data into array using buffer copy
    #http://stackoverflow.com/questions/4355524/getting-data-from-ctypes-array-into-numpy
    #http://docs.scipy.org/doc/numpy/reference/generated/numpy.frombuffer.html

    # this numpy array uses the memory provided by the c library, which will be freed
    data_tmp = ctypeslib.as_array(ptr, (h.value, w.value, nch.value))
    # so we copy it to the definitive array before the free
    data = data_tmp.copy()

    # free the memory
    iiofreemem = libiio.freemem
    iiofreemem(ptr)
    return data
Ejemplo n.º 43
0
def cMatrixToNumpy(x):
    """
    Convert a ctypes 2d array (or matrix) into a numpy array for python use
    :param x: thing to convert
    :return: numpy.ndarray
    """
    return numpc.as_array(x)
Ejemplo n.º 44
0
def borrow_memory(param, memory):
    """
    Spawn different processes with the shared memory
    of your theano model's variables.

    Inputs:
    -------

    param          TensorSharedVariable : the Theano shared variable where
                                          shared memory should be used instead.
    memory multiprocessing.sharedctypes : the memory shared across processes (e.g.
                                          from `wrap_params`)

    Outputs:
    --------

    None

    Usage
    -----

    For each process in the target function run the theano_borrow_memory
    method on the parameters you want to have share memory across processes.

    In this example we have a model called "mymodel" with parameters stored in
    a list called "params". We loop through each theano shared variable and
    call `theano_borrow_memory` on it to share memory across processes.

        def spawn_model(path, wrapped_params):
            # prevent recompilation and arbitrary locks
            theano.config.reoptimize_unpickled_function = False
            theano.gof.compilelock.set_lock_status(False)

            # load your model from its pickled instance (from path)
            mymodel = MyModel.load(path)
            
            # for each parameter in your model
            # apply the borrow memory strategy to replace
            # the internal parameter's memory with the
            # across-process memory
            for param, memory in zip(mymodel.params, wrapped_params):
                borrow_memory(param, memory)
            
            # acquire your dataset (either through some smart shared memory
            # or by reloading it for each process)
            dataset, dataset_labels = acquire_dataset()
            
            # then run your model forward in this process
            epochs = 20
            for epoch in range(epochs):
                model.update_fun(dataset, dataset_labels)

    See `borrow_all_memories` for list usage.

    """

    param_value = ctypeslib.as_array(memory)
    param_value.shape = param.get_value(True,True).shape
    param.set_value(param_value, borrow=True)
Ejemplo n.º 45
0
def mat_rowrange_mul(args):

    # a little ugly, but allows running with a Pool
    # which accept only 1 argument
    a_row_domain, a_shape, b_shape, shared_a, shared_b, shared_c = args

    # access shared memory object as numpy array, set dimensions
    nd_c = ctypeslib.as_array(shared_c).reshape((a_shape[0],b_shape[1]))
    nd_a = ctypeslib.as_array(shared_a).reshape(a_shape)
    nd_b = ctypeslib.as_array(shared_b).reshape(b_shape)

    # write answer to shared memory
    # it would be better if numpy.dot could write "in-place"
    nd_c[a_row_domain[0]:a_row_domain[1],:] = \
    numpy.dot(nd_a[a_row_domain[0]:a_row_domain[1],:],nd_b)

    return None
Ejemplo n.º 46
0
    def K2(self, X, X2):
        #if X.ndim == 0:
        #    X = X.reshape((1, 1))
        #if X2.ndim == 0:
        #    X2 = X2.reshape((1, 1))
        if X.ndim == 1:
            X = X.reshape((X.shape[0], 1))
        if X2.ndim == 1:
            X2 = X2.reshape((X2.shape[0], 1))
        if X.ndim == 0:
            Xsh = 1
            Xsh2 = 1
        else:
            Xsh = X.shape[0]
            Xsh2 = X.shape[1]
        if X2.ndim == 0:
            X2sh = 1
            X2sh2 = 1
        else:
            X2sh = X2.shape[0]
            X2sh2 = X2.shape[1]
        res = zeros((Xsh, X2sh))
        share_base = Array(ctypes.c_double, Xsh*Xsh2, lock=False)
        share = as_array(share_base)
        share = share.reshape((Xsh, Xsh2))
        share[:, :] = X

        share2_base = Array(ctypes.c_double, X2sh*X2sh2, lock=False)
        share2 = as_array(share2_base)
        share2 = share2.reshape(X2.shape)
        share2[:, :] = X2
        pool = Pool(self.num_proc, maxtasksperchild=50, initializer=initShared2, initargs=[share2, share])
        cs = pool.imap(para_func2, ((i, X2.shape, X.shape, self.d, self.cnum) for i in xrange(self.m)), 10)
        for c, c2 in cs:
            for i, c_v in enumerate(c):
                for j, c_v2 in enumerate(c2):
                    if c_v == c_v2 and c_v != -1:
                        res[i, j] += 1.
        res /= self.m
        pool.close()
        pool.join()
        if X.ndim == 0:
            res = res.flatten()
        return res
Ejemplo n.º 47
0
def as_numpy( data ):
    '''maps data content as a numpy array'''

    ptr, shape, typename = data.getValueVoidPtr()

    type = ctypeFromName.get(typename,None)
    if not type: raise Exception("can't map data of type " + typename)

    array = ctypes.cast( ctypes.c_void_p(ptr), ctypes.POINTER(type))
    return ctypeslib.as_array(array, shape )
Ejemplo n.º 48
0
Archivo: pool.py Proyecto: jpzk/evopy
def make_numpy_item((v, shape)):
    if shape is not None:
        try:
            v = ctypeslib.as_array(v)
            v.shape = shape
            log_debug('converting common array to numpy array')
        except:
            log_debug('NOT converting common array to numpy array')
            pass
    return v
Ejemplo n.º 49
0
def main():
    ra = sharedctypes.RawArray("i", 4)
    arr = ctypeslib.as_array(ra)
    arr.shape = (2, 2)
    p1 = Process(target=fill_arr, args=(arr[:1, :], 1))
    p2 = Process(target=fill_arr, args=(arr[1:, :], 2))
    p1.start()
    p2.start()
    p1.join()
    p2.join()
    print(arr)
Ejemplo n.º 50
0
 def run(self):
   print "StreamViewer.run(): [pid: {}, OS pid: {}]".format(self.pid, os.getpid())
   # * Interpret shared objects properly (NOTE this needs to happen in the child process)
   self.image = ctypeslib.as_array(self.imageObj.get_obj())  # get flattened image array
   self.image.shape = ctypeslib.as_array(self.imageShapeObj.get_obj())  # restore original shape
   
   print "StreamViewer.run(): Starting display loop [Esc or Q to quit]..."
   while self.stayAliveObj.value:
     try:
       if self.frameCountObj.value != self.lastFrameCount:
         cv2.imshow("Image", self.image)
         self.lastFrameCount = self.frameCountObj.value
       key = cv2.waitKey(frame_delay)
       if key != -1:
         keyCode = key & 0x00007f
         keyChar = chr(keyCode)
         if keyCode == 0x1b or keyChar == 'q':
             self.stayAliveObj.value = False
     except KeyboardInterrupt:
       self.stayAliveObj.value = False
Ejemplo n.º 51
0
def move_and_image(mmc, e7xx, id, coords, exptime, image_queue, **kwargs):
    """
    move_and_image moves the stage to the given coordinates, takes an
    exposure for exptime seconds, then adds it to image_queue.
    """

    DEBUG = False

    for key in kwargs:
        if key == "DEBUG":
            DEBUG = True
        else:
            raise TypeError, 'Unknown argument "%s"' % key

    if DEBUG:
        print "Moving to ", coords
    err = e7xx.E7XX_MOV(id, "14", ctl.as_ctypes(array(coords, dtype=float)))

    if err:
        print "Moved OK"
    else:
        err = e7xx.E7XX_GetError(id)
        print err

    time.sleep(0.03)
    if DEBUG:
        res = ctl.as_ctypes(empty(4, dtype=float))
        e7xx.E7XX_qMOV(id, "14", res)
        print "Moved to ", ctl.as_array(res)

    noImage = True

    while noImage:
        try:
            if image_queue.qsize() < 1000:
                if DEBUG:
                    print "Snapping Image"
                mmc.snapImage()
                im1 = mmc.getImage()
                if DEBUG:
                    print "Got image"
                image_queue.put(im1)
                if DEBUG:
                    print "Queueing image"
                noImage = False
                if DEBUG:
                    print "Leaving Loop"
        except MemoryError:
            if DEBUG:
                print "Memory Error.  Going to sleep"
            time.sleep(1)
    if DEBUG:
        print "Done"
Ejemplo n.º 52
0
def AsDataStream(arr):
    """
    copy numpy array to Ogre.MemoryDataStream that can be used in Ogre
    @param arr: some numpy array
    """
    size = int(np.prod(arr.shape) * arr.dtype.itemsize)
    ret = Ogre.MemoryDataStream(size)
    tp = ctypes.POINTER(ctypes.c_ubyte)
    np_view = npc.as_array(ctypes.cast(int(ret.getPtr()), tp), (size, ))
    np_view[:] = arr.ravel().view(np.ubyte)
    
    return ret
Ejemplo n.º 53
0
def test_populate_z_photosphere(clib, formal_integral_model, p):
    '''
    Test the case where p < r[0]
    That means we 'hit' all shells from inside to outside.
    '''
    func = clib.populate_z
    func.restype = ctypes.c_int64
    func.argtypes = [
            ctypes.POINTER(StorageModel),   # storage
            c_double,                       # p
            ndpointer(dtype=np.float64),    # oz
            ndpointer(dtype=np.int64)       # oshell_id
            ]

    size = formal_integral_model.no_of_shells_i
    r_inner = as_array(formal_integral_model.r_inner_i, (size,))
    r_outer = as_array(formal_integral_model.r_outer_i, (size,))

    p = r_inner[0] * p
    oz = np.zeros_like(r_inner)
    oshell_id = np.zeros_like(oz, dtype=np.int64)

    N = func(
            formal_integral_model,
            p,
            oz,
            oshell_id
            )
    assert N == size

    ntest.assert_allclose(
            oshell_id,
            np.arange(0, size, 1)
            )

    ntest.assert_allclose(
            oz,
            1 - calculate_z(r_outer, p),
            atol=1e-5
            )
Ejemplo n.º 54
0
    def __call__(self, *args):
        input_data,component_memberships,loglikelihoods,num_components,num_dimensions,num_events,min_iters, max_iters,cvtype, ret_likelihood = args
        #print input_data
        input_data =input_data.ctypes.data_as(POINTER(c_float))
        component_memberships = component_memberships.ctypes.data_as(POINTER(c_float))
        loglikelihoods = loglikelihoods.ctypes.data_as(POINTER(c_float))

        #return value
        ret_likelihood = c_float()
        ret_means = pointer(c_float())
        ret_covar = pointer(c_float())
        self._c_function(input_data,component_memberships,loglikelihoods,num_components,num_dimensions,num_events,min_iters, max_iters,cvtype, byref(ret_likelihood),byref(ret_means),byref(ret_covar))

        return ret_likelihood.value,as_array(ret_means,shape=(num_components* num_dimensions,)),as_array(ret_covar,shape=(num_components* num_dimensions* num_dimensions,))
Ejemplo n.º 55
0
def kernel(A):
    (arr, p) = pack_matrix(A)
    kern = ffpack.kernel
    kern.restype = ctypes.POINTER(ctypes.c_int)
    kern.argtypes = [ndpointer(ctypes.c_int), ctypes.c_size_t,
                    ctypes.c_size_t, ctypes.c_int,
                    ctypes.POINTER(ctypes.c_size_t)]
    kernel_size = ctypes.c_size_t(0)
    K_p = kern(arr, A.row, A.column, p,
            ctypes.byref(kernel_size))
    dim = kernel_size.value
    if dim == 0: return None
    k_arr = as_array(K_p, (A.column, dim))
    K = unpack_matrix((k_arr, p))
    ffpack.free_k(K_p)
    return K
    def worker(id, job):
        """ worker function for MP """

        S = ctypeslib.as_array(S_ctypes)
        S.shape = shape

        for i in job:
            for j in xrange(n_oxygen):

                N = scan_nitrogen[i]
                O = scan_oxygen[j]
                param = (N, O)

                hb_energies = calculate_hydrogen(param, shb)
                param_energies = shb_pm6 + hb_energies
                param_rmsd = rmsd(shb_lib, param_energies)
                S[i, j] = param_rmsd
Ejemplo n.º 57
0
def worker(S, shape, i, j, A, C, conn):
    S = ctypeslib.as_array(S)
    S.shape = shape
    S = S[:, i:j]
    sys.stdout.flush()
    while True:
        try:
            job = conn.recv()
        except EOFError:
            job = None
        if job is None:
            break
        S[:] = dot(A, S)
        if C is not None:
            add(S, C, S)
        conn.send(True)
    conn.close()
Ejemplo n.º 58
0
def source_bank():
    """Return source bank as NumPy array

    Returns
    -------
    numpy.ndarray
        Source sites

    """
    # Get pointer to source bank
    ptr = POINTER(_Bank)()
    n = c_int64()
    _dll.openmc_source_bank(ptr, n)

    # Convert to numpy array with appropriate datatype
    bank_dtype = np.dtype(_Bank)
    return as_array(ptr, (n.value,)).view(bank_dtype)
Ejemplo n.º 59
0
    def get_matter_transfer_data(self):
        """
        Get matter transfer function data and sigma8 for calculated results.

        :return: :class:`.MatterTransferData` instance holding output arrays (copies, not pointers)
        """
        if not self.Params.WantTransfer:
            raise CAMBError("must have Params.WantTransfer to get matter transfers and power")

        cdata = _MatterTransferData()
        CAMBdata_mattertransferdata(self._key, byref(cdata))
        data = MatterTransferData()
        data.nq = cdata.num_q_trans
        data.q = nplib.as_array(cdata.q_trans, shape=(data.nq,))
        data.sigma_8 = fortran_array(cdata.sigma_8, cdata.sigma_8_size)
        data.sigma2_vdelta_8 = fortran_array(cdata.sigma2_vdelta_8, cdata.sigma2_vdelta_8_size)
        data.transfer_data = fortran_array(cdata.TransferData, cdata.TransferData_size, dtype=np.float32)
        return data