示例#1
0
    def get_dc_joystick_parameters(self):
        """
        Returns DC joystick parameters.

        Returns
        -------
        out : tuple
            (maximum velocity lo, maximum velocity hi, acceleration lo,
             acceleration hi, direction sense)
            direction sense:
            - DC_JS_DIRSENSE_POS = 1
            - DC_JS_DIRSENSE_NEG = 2
        """
        maximum_velocity_lo = ctypes.c_float()
        maximum_velocity_hi = ctypes.c_float()
        acceleration_lo = ctypes.c_float()
        acceleration_hi = ctypes.c_float()
        direction_sense = ctypes.c_long()
        err_code = _lib.MOT_GetDCJoystickParams(self._serial_number,
                ctypes.byref(maximum_velocity_lo),
                ctypes.byref(maximum_velocity_hi),
                ctypes.byref(acceleration_lo),
                ctypes.byref(acceleration_hi),
                ctypes.byref(direction_sense))
        if (err_code != 0):
            raise Exception("Getting DC joystick parameters failed: %s" %
                    _get_error_text(err_code))
        return (maximum_velocity_lo.value, 
                maximum_velocity_hi.value,
                acceleration_lo.value,
                acceleration_hi.value,
                direction_sense.value
               )
示例#2
0
    def load(cls, filename):
        """ @brief Loads a png from a file as a 16-bit heightmap.
            @param filename Name of target .png image
            @returns A 16-bit, 1-channel image.
        """

        # Get various png parameters so that we can allocate the
        # correct amount of storage space for the new image
        dx, dy, dz = ctypes.c_float(), ctypes.c_float(), ctypes.c_float()
        ni, nj = ctypes.c_int(), ctypes.c_int()
        libfab.load_png_stats(filename, ni, nj, dx, dy, dz)

        # Create a python image data structure
        img = cls(ni.value, nj.value, channels=1, depth=16)

        # Add bounds to the image
        if math.isnan(dx.value):
            print 'Assuming 72 dpi for x resolution.'
            img.xmin, img.xmax = 0, 72*img.width/25.4
        else:   img.xmin, img.xmax = 0, dx.value

        if math.isnan(dy.value):
            print 'Assuming 72 dpi for y resolution.'
            img.ymin, img.ymax = 0, 72*img.height/25.4
        else:   img.ymin, img.ymax = 0, dy.value

        if not math.isnan(dz.value):    img.zmin, img.zmax = 0, dz.value

        # Load the image data from the file
        libfab.load_png(filename, img.pixels)
        img.filename = filename

        return img
示例#3
0
    def draw_tracked_explo(self, te):
        read_game = self.env.read_game
        frame = self.env.frame

        head_pos = VECTOR(te.pos.x, te.pos.y, te.pos.z + 10)
        feet = read_game.world_to_screen(te.pos)
        head = read_game.world_to_screen(head_pos)
        if feet and head:
            # claymore friend tracking
            if te.model_name == "WEAPON_CLAYMORE":
                if not te.planter.enemy:
                    te.model_name = "WEAPON_CLAYMORE-friend"
            size_y = feet.y - head.y
            if size_y < 12:  size_y = 12.0
            sprite = self.env.sprites.get_sprite(te.model_name)
            if sprite:
                frame.sprite.Begin(0)
                scaling = size_y / float(_EXPLO_SPRITE_SIZE)
                sprite_center = D3DXVECTOR2(0, 0)
                trans = D3DXVECTOR2(feet.x - _EXPLO_SPRITE_SIZE*scaling/2, feet.y - _EXPLO_SPRITE_SIZE*scaling)
                matrix = D3DMATRIX()
                d3dxdll.D3DXMatrixAffineTransformation2D(byref(matrix), #@UndefinedVariable
                                                         c_float(scaling),          # scaling
                                                         byref(sprite_center),      # rotation center
                                                         c_float(0),                # angle
                                                         byref(trans)               # translation
                                                         )
                frame.sprite.SetTransform(matrix)
                frame.sprite.Draw(sprite, None, None, None, COLOR_CLAYMORE_SPRITE)
                frame.sprite.End()
                self.draw_distance_ESP(te.pos, feet.x, feet.y, COLOR_CLAYMORE_DISTANCE)
            else:
                pass
def sgemm(transA, transB, alpha, A, A_offset, lda, B, B_offset, ldb, beta, C,
          C_offset, ldc, m, n, k, _queue=None, wait_for=None):
    if _queue is None:
        _queue = queues[random.randint(0, len(queues) - 1)]
    cblas_row_major = ct.c_int(0)
    transA = ct.c_int(1 if transA else 0)
    transB = ct.c_int(1 if transB else 0)
    lda = ct.c_size_t(int(lda))
    ldb = ct.c_size_t(int(ldb))
    ldc = ct.c_size_t(int(ldc))
    m = ct.c_size_t(int(m))
    n = ct.c_size_t(int(n))
    k = ct.c_size_t(int(k))
    alpha = ct.c_float(alpha)
    beta = ct.c_float(beta)
    if wait_for is None:
        num_wait = 0
    else:
        num_wait, wait_for = make_event_array(wait_for)
    done_evt = cl.cl_event()
    err = _clblaslib.clblasSgemm(cblas_row_major, transA, transB, m, n, k,
                                 alpha, A.ocl_buf, ct.c_size_t(A_offset),
                                 lda, B.ocl_buf, ct.c_size_t(B_offset), ldb,
                                 beta, C.ocl_buf, ct.c_size_t(C_offset), ldc,
                                 ct.c_size_t(1), ct.byref(_queue),
                                 ct.c_size_t(num_wait), wait_for,
                                 ct.byref(done_evt))
    if err:
        raise Exception("clBLAS sgemm returned error code {}".format(err))
    return done_evt
def convOutp3D(images, hidSums, targets, conv_desc, scaleTargets=0, dbias=None):
  _ConvNet.convOutp3DGemm(
    images.p_mat, hidSums.p_mat, targets.p_mat,
    images.p_shape4d, hidSums.p_shape4d, targets.p_shape4d,
    conv_desc, ct.c_float(scaleTargets), ct.c_float(1))
  if dbias is not None:
    AddUpAllLocs3D(hidSums, dbias)
示例#6
0
    def _init_from_npy2d(self, mat, missing, nthread):
        """
        Initialize data from a 2-D numpy matrix.

        If ``mat`` does not have ``order='C'`` (aka row-major) or is not contiguous,
        a temporary copy will be made.

        If ``mat`` does not have ``dtype=numpy.float32``, a temporary copy will be made.

        So there could be as many as two temporary data copies; be mindful of input layout
        and type if memory use is a concern.
        """
        if len(mat.shape) != 2:
            raise ValueError('Input numpy.ndarray must be 2 dimensional')
        # flatten the array by rows and ensure it is float32.
        # we try to avoid data copies if possible (reshape returns a view when possible
        # and we explicitly tell np.array to try and avoid copying)
        data = np.array(mat.reshape(mat.size), copy=False, dtype=np.float32)
        self.handle = ctypes.c_void_p()
        missing = missing if missing is not None else np.nan
        if nthread is None:
            _check_call(_LIB.XGDMatrixCreateFromMat(
                data.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),
                c_bst_ulong(mat.shape[0]),
                c_bst_ulong(mat.shape[1]),
                ctypes.c_float(missing),
                ctypes.byref(self.handle)))
        else:
            _check_call(_LIB.XGDMatrixCreateFromMat_omp(
                data.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),
                c_bst_ulong(mat.shape[0]),
                c_bst_ulong(mat.shape[1]),
                ctypes.c_float(missing),
                ctypes.byref(self.handle),
                nthread))
示例#7
0
文件: cublas.py 项目: Kiiree/PyFR
    def mul(self, a, b, out, alpha=1.0, beta=0.0):
        w = self._wrappers

        # Ensure the matrices are compatible
        if a.nrow != out.nrow or a.ncol != b.nrow or b.ncol != out.ncol:
            raise ValueError('Incompatible matrices for out = a*b')

        # CUBLAS expects inputs to be column-major (or Fortran order in
        # numpy parlance).  However as C = A*B => C^T = (A*B)^T
        # = (B^T)*(A^T) with a little trickery we can multiply our
        # row-major matrices directly.
        m, n, k = b.ncol, a.nrow, a.ncol
        A, B, C = b, a, out

        # Do not transpose either A or B
        opA = opB = w.CUBLAS_OP_N

        # α and β factors for C = α*(A*op(B)) + β*C
        if a.dtype == np.float64:
            cublasgemm = w.cublasDgemm
            alpha_ct, beta_ct = c_double(alpha), c_double(beta)
        else:
            cublasgemm = w.cublasSgemm
            alpha_ct, beta_ct = c_float(alpha), c_float(beta)

        class MulKernel(ComputeKernel):
            def run(iself, queue):
                w.cublasSetStream(self._handle, queue.cuda_stream_comp.handle)
                cublasgemm(self._handle, opA, opB, m, n, k,
                           alpha_ct, A, A.leaddim, B, B.leaddim,
                           beta_ct, C, C.leaddim)

        return MulKernel()
示例#8
0
文件: libJHTDB.py 项目: lowks/pyJHTDB
 def getBoxFilter(self,
         time, point_coords,
         data_set = 'isotropic1024coarse',
         make_modulo = False,
         field = 'velocity',
         filter_width = 7*2*np.pi / 1024):
     if not self.connection_on:
         print('you didn\'t connect to the database')
         sys.exit()
     if not (point_coords.shape[-1] == 3):
         print ('wrong number of values for coordinates in getBoxFilter')
         sys.exit()
         return None
     if not (point_coords.dtype == np.float32):
         print 'point coordinates in getBoxFilter must be floats. stopping.'
         sys.exit()
         return None
     npoints = point_coords.shape[0]
     for i in range(1, len(point_coords.shape)-1):
         npoints *= point_coords.shape[i]
     if make_modulo:
         pcoords = np.zeros(point_coords.shape, np.float64)
         pcoords[:] = point_coords
         np.mod(pcoords, 2*np.pi, point_coords)
     result_array = point_coords.copy()
     self.lib.getBoxFilter(self.authToken,
              ctypes.c_char_p(data_set),
              ctypes.c_char_p(field),
              ctypes.c_float(time),
              ctypes.c_float(filter_width),
              ctypes.c_int(npoints),
              point_coords.ctypes.data_as(ctypes.POINTER(ctypes.POINTER(ctypes.c_float))),
              result_array.ctypes.data_as(ctypes.POINTER(ctypes.POINTER(ctypes.c_float))))
     return result_array
示例#9
0
文件: libJHTDB.py 项目: lowks/pyJHTDB
 def getBlineAlt(self,
         time, nsteps, ds,
         x0,
         sinterp = 4,
         tinterp = 0,
         data_set = 'mhd1024'):
     if not self.connection_on:
         print('you didn\'t connect to the database')
         sys.exit()
     if not (x0.shape[1] == 3 and len(x0.shape) == 2):
         print ('wrong shape of initial condition in getBlineAlt, ', x0.shape)
         sys.exit()
         return None
     npoints = x0.shape[0]
     result_array = np.empty((nsteps+1, npoints, 3), dtype=np.float32)
     result_array[0] = x0
     self.lib.getBline(
             self.authToken,
             ctypes.c_char_p(data_set),
             ctypes.c_float(time),
             ctypes.c_int(nsteps),
             ctypes.c_float(ds),
             ctypes.c_int(sinterp), ctypes.c_int(tinterp), ctypes.c_int(npoints),
             result_array.ctypes.data_as(ctypes.POINTER(ctypes.POINTER(ctypes.c_float))))
     return result_array
 def ENgetcontrol(self, iCindex):
     """
     retrieves parameters that define a simple control
     
     Arguments:
      * iCindex = control index (position of control statement
                  in the input file, starting from 1)
     
     Returns: tuple( int, int, float, int, float )
      * control type code (see toolkit.optControlTypes)
      * index of controlled link
      * control setting on link
      * index of controlling node (0 for TIMER
        or TIMEOFDAY control)
      * control level (tank level, junction
        pressure, or time (seconds)
     
     """
     iCtype = ctypes.c_int()
     iLindex = ctypes.c_int()
     fSetting = ctypes.c_float()
     iNindex = ctypes.c_int()
     fLevel = ctypes.c_float()
     self.errcode = self.ENlib.ENgetcontrol(iCindex, byref(iCtype),
                                            byref(iLindex), byref(fSetting),
                                            byref(iNindex), byref(fLevel))
     self._error()
     return (iCtype.value, iLindex.value, fSetting.vlaue, iNindex.value, 
                     fLevel.value)
示例#11
0
文件: APTMotor.py 项目: XavierDR/OCT
 def getVelocityParameters(self):
     minimumVelocity = c_float()
     acceleration = c_float()
     maximumVelocity = c_float()
     self.aptdll.MOT_GetVelParams(self.SerialNum, pointer(minimumVelocity), pointer(acceleration), pointer(maximumVelocity))
     velocityParameters = [minimumVelocity.value, acceleration.value, maximumVelocity.value]
     return velocityParameters
示例#12
0
文件: channel.py 项目: Oire/TWBlue
 def get_3d_attributes(self):
  """Retrieves the 3D attributes of a sample, stream, or MOD music channel with 3D functionality."""
  answer = dict(mode=c_ulong(), min=c_float(), max=c_float(), iangle=c_ulong(), oangle=c_ulong(), outvol=c_float())
  bass_call(BASS_ChannelGet3DAttributes, self.handle, pointer(answer['mode']), pointer(answer['min']), pointer(answer['max']), pointer(answer['iangle']), pointer(answer['oangle']), pointer(answer['outvol']))
  for k in answer:
   answer[k] = answer[k].value()
  return answer
示例#13
0
    def add_sums(self, mat, axis, mult=1., beta=1.):
        """
        Add a multiple of the sums of the matrix mat along the given dimension
        to self. Self is scaled by beta before adding anything.
        """

        m = _cudamat.get_leading_dimension(mat.p_mat)
        n = _cudamat.get_nonleading_dimension(mat.p_mat)

        if axis == 0:
            # sum along leading dimension
            check_ones_matrix(m)
            left = CUDAMatrix.ones.slice(0, m)
            left.set_trans(True)
            right = mat

        elif axis == 1:
            # sum along non-leading dimension
            left = mat
            check_ones_matrix(n)
            right = CUDAMatrix.ones.slice(0, n)

        err_code = _cudamat.dot(left.p_mat, right.p_mat, self.p_mat,
                                ct.c_float(beta), ct.c_float(mult))
        if err_code:
            raise generate_exception(err_code)

        return self
示例#14
0
def cameralookat(camera_x, camera_y, camera_z, center_x, center_y, center_z, up_x, up_y, up_z):
    """
    This function sets the view matrix by getting the position of the camera, the position of the center of focus and the direction which should point up. This function takes effect when the next image is created. Therefore if you want to take pictures of the same data from different perspectives, you can call and  gr3.cameralookat(), gr3.getpixmap_(), gr3.cameralookat(), gr3.getpixmap_(), ... without calling gr3_clear() and gr3_drawmesh() again.

    **Parameters:**

        `camera_x` : The x-coordinate of the camera

        `camera_y` : The y-coordinate of the camera

        `camera_z` : The z-coordinate of the camera

        `center_x` : The x-coordinate of the center of focus

        `center_y` : The y-coordinate of the center of focus

        `center_z` : The z-coordinate of the center of focus

        `up_x` : The x-component of the up direction

        `up_y` : The y-component of the up direction

        `up_z` : The z-component of the up direction


    .. note::
        Source: http://www.opengl.org/sdk/docs/man/xhtml/gluLookAt.xml
            (as of 10/24/2011, licensed under SGI Free Software Licence B)

    """
    _gr3.gr3_cameralookat(c_float(camera_x), c_float(camera_y), c_float(camera_z), c_float(center_x), c_float(center_y), c_float(center_z), c_float(up_x), c_float(up_y), c_float(up_z))
示例#15
0
def setcameraprojectionparameters(vertical_field_of_view, zNear, zFar):
    """
    This function sets the projection parameters. This function takes effect when the next image is created.

    **Parameters:**

        `vertical_field_of_view` : This parameter is the vertical field of view in degrees. It must be greater than 0 and less than 180.

        `zNear` : The distance to the near clipping plane.

        `zFar` : The distance to the far clipping plane.

    **Raises:**

    `gr3.GR3_Error.GR3_ERROR_EXPORT`: Raises GR3_Exception

        +-------------------------+-------------------------------------------------------+
        | GR3_ERROR_NONE          | on success                                            |
        +-------------------------+-------------------------------------------------------+
        | GR3_ERROR_INVALID_VALUE | if one (or more) of the arguments is out of its range |
        +-------------------------+-------------------------------------------------------+


    .. note::

        The ratio between zFar and zNear influences the precision of the depth buffer, the greater `$ \\\\frac{zFar}{zNear} $`, the more likely are errors. So you should try to keep both values as close to each other as possible while making sure everything you want to be visible, is visible.

    """
    _gr3.gr3_setcameraprojectionparameters(c_float(vertical_field_of_view), c_float(zNear), c_float(zFar))
示例#16
0
    def forward_gpu(self, x):
        n, c, h, w = x[0].shape
        out_h = conv.get_conv_outsize(h, self.kh, self.sy, self.ph)
        out_w = conv.get_conv_outsize(w, self.kw, self.sx, self.pw)
        out_c = self.W.shape[0]
        y = cuda.empty((n, out_c, out_h, out_w), dtype=self.dtype)
        if cuda.cudnn_enabled and self.use_cudnn:
            handle = cudnn.get_handle()
            x_desc = cudnn.create_tensor_descriptor(x[0])
            y_desc = cudnn.create_tensor_descriptor(y)

            self.filter_desc = cudnn.create_filter_descriptor(self.W)
            self.conv_desc = cudnn.create_convolution_descriptor(
                (self.ph, self.pw), (self.sy, self.sx))
            if self.b is not None:
                self.bias_desc = cudnn.create_tensor_descriptor(
                    self.b[None, :, None, None])

            algo = libcudnn.getConvolutionForwardAlgorithm(
                handle, x_desc.value, self.filter_desc.value,
                self.conv_desc.value, y_desc.value, _fwd_pref,
                self.max_workspace_size)
            workspace_size = libcudnn.getConvolutionForwardWorkspaceSize(
                handle, x_desc.value, self.filter_desc.value,
                self.conv_desc.value, y_desc.value, algo)
            workspace = cuda.empty(
                (max(workspace_size // 4, 1),), dtype=self.dtype)

            one = ctypes.c_float(1)
            zero = ctypes.c_float(0)
            libcudnn.convolutionForward(
                handle, one, x_desc.value, x[0].data.ptr,
                self.filter_desc.value, self.W.data.ptr, self.conv_desc.value,
                algo, workspace.data.ptr, workspace_size, zero, y_desc.value,
                y.data.ptr)

            # TODO(beam2d): Support unshared bias
            if self.b is not None:
                libcudnn.addTensor(
                    handle, libcudnn.CUDNN_ADD_SAME_C, one,
                    self.bias_desc.value, self.b.data.ptr, one, y_desc.value,
                    y.data.ptr)
        else:
            # Implementation using im2col
            self.col = conv.im2col_gpu(
                x[0], self.kh, self.kw, self.sy, self.sx, self.ph, self.pw)

            # TODO(beam2d): Use streams
            W_mat = self.W.reshape(out_c, c * self.kh * self.kw)
            col_mats = self.col.reshape(
                n, c * self.kh * self.kw, out_h * out_w)
            y_mats = y.reshape(n, out_c, out_h * out_w)
            for i in moves.range(n):
                y_mats[i] = W_mat.dot(col_mats[i])

            # TODO(beam2d): Support unshared bias
            if self.b is not None:
                y += self.b.reshape((1, out_c, 1, 1))

        return y,
示例#17
0
def drawimage(xmin, xmax, ymin, ymax, pixel_width, pixel_height, window):
    global _gr3
    err = _gr3.gr3_drawimage(c_float(xmin), c_float(xmax),
                       c_float(ymin), c_float(ymax),
                         c_int(pixel_width), c_int(pixel_height), c_int(window))
    if err:
        raise GR3_Exception(err)
示例#18
0
文件: daq.py 项目: u55/PyIOTech
    def CvtLinearSetupConvert(self, nscan, readingsPos, nReadings, signal1, voltage1, signal2, voltage2, avg, scans):
        """Both sets up the linear conversion process and converts the ADC
        readings into floating point numbers."""

        counts = self.dataBuf
        fValues = (ct.c_float * self.dBufSz)()
        nValues = self.dBufSz

        err = daq.daqCvtLinearSetupConvert(
            wt.DWORD(nscan),
            wt.DWORD(readingsPos),
            wt.DWORD(nReadings),
            ct.c_float(signal1),
            ct.c_float(voltage1),
            ct.c_float(signal2),
            ct.c_float(voltage2),
            wt.DWORD(avg),
            ct.pointer(counts),
            wt.DWORD(scans),
            ct.pointer(fValues),
            wt.DWORD(nValues),
            )

        if err != 0:
            raise DaqError(err)
        return fValues
示例#19
0
	def homeParams(self):
		direction = c_long()
		switch = c_long()
		velocity = c_float()
		zero_offset = c_float()
		self.ctrl.GetHomeParams(self.channel, byref(direction), byref(switch), byref(velocity), byref(zero_offset))
		return direction.value, switch.value, velocity.value, zero_offset.value
 def setVelocityParameters(self, serialNumber, minVel, acc, maxVel):
     HWSerialNum = c_long(serialNumber)
     minimumVelocity = c_float(minVel)
     acceleration = c_float(acc)
     maximumVelocity = c_float(maxVel)
     self.aptdll.MOT_SetVelParams(HWSerialNum, minimumVelocity, acceleration, maximumVelocity)
     return True
示例#21
0
文件: daq.py 项目: u55/PyIOTech
    def CvtSetAdcRange(self, Admin, Admax):
        """Sets the ADC range for use by the conversion functions (i.e., all
        functions of the form daqCvt... )."""

        err = daq.daqCvtSetAdcRange(ct.c_float(Admin), ct.c_float(Admax))
        if err != 0:
            raise DaqError(err)
示例#22
0
def sum(mat, axis, target = None):
    """
    Sum the matrix along the given dimension, where 0 represents the leading
    dimension and 1 represents the non-leading dimension. If a target is
    not prvided, a new vector is created for storing the result.
    """

    m = _cudamat.get_leading_dimension(mat.p_mat)
    n = _cudamat.get_nonleading_dimension(mat.p_mat)

    if axis == 0:
        # sum along leading dimension
        left = CUDAMatrix.ones.slice(0, m)
        left.set_trans(True)
        right = mat

        if not target:
            target = empty((1, n))
 
    elif axis == 1:
        # sum along non-leading dimension
        left = mat
        right = CUDAMatrix.ones.slice(0, n)

        if not target:
            target = empty((m, 1))

    err_code = _cudamat.dot(left.p_mat, right.p_mat, target.p_mat, ct.c_float(0.), ct.c_float(1.))
    if err_code:
        raise generate_exception(err_code)

    return target
示例#23
0
文件: dd.py 项目: tardini/pyddww
 def getTimeBaseIndices(self, name, tBegin, tEnd):
     """ Return time indices of name corresponding to tBegin and tEnd """
     if not self.status:
         raise Exception('Shotfile not open!')
     try:
         sigName = ctypes.c_char_p(name)
     except TypeError:
         sigName = ctypes.c_char_p(name.encode())
     error = ctypes.c_int32(0)
     info = self.getTimeBaseInfo(name)
     if tEnd < tBegin:
         temp = tEnd
         tEnd = tBegin
         tBegin = temp
     if tBegin < info.tBegin:
         tBegin = info.tBegin
     if tEnd > info.tEnd:
         tEnd = info.tEnd
     try:
         time1 = ctypes.c_float(tBegin)
     except TypeError:
         time1 = ctypes.c_float(tBegin.value)
     try:
         time2 = ctypes.c_float(tEnd)
     except TypeError:
         time2 = ctypes.c_float(tEnd.value)
     k1 = ctypes.c_uint32(0)
     k2 = ctypes.c_uint32(0)
     lname = ctypes.c_uint64(len(name))
     __libddww__.ddtindex_(ctypes.byref(error), ctypes.byref(self.diaref), sigName, ctypes.byref(time1), 
                           ctypes.byref(time2), ctypes.byref(k1), ctypes.byref(k2), lname)
     getError(error.value)
     return numpy.uint32(k1.value), numpy.uint32(k2.value)
示例#24
0
def getMaxNumRef(sMax, volume, sMin=0.0, multip=2):
    fn = lib.__cfml_reflections_utilities_MOD_get_maxnumref
    fn.argtypes = [POINTER(c_float), POINTER(c_float), POINTER(c_float),
                   POINTER(c_int)]
    fn.restype = c_int
    numref = fn(c_float(sMax), c_float(volume), c_float(sMin), c_int(multip))
    return numref
示例#25
0
文件: c6d.py 项目: eadamsatx/c6dwifi
 def _widget_value(self, pair):
     (root, child) = pair
     w_type = self._widget_type(pair)
     if w_type == 'text' or w_type == 'menu' or w_type == 'radio':
         ptr = ctypes.c_char_p()
         res = gphoto.gp_widget_get_value(child, ctypes.pointer(ptr))
         gphoto_check(res)
         return (w_type, ptr.value)
     elif w_type == 'range':
         top = ctypes.c_float()
         bottom = ctypes.c_float()
         step = ctypes.c_float()
         value = ctypes.c_float()
         res = gphoto.gp_widget_get_range(child, ctypes.pointer(bottom), ctypes.pointer(top), ctypes.pointer(step))
         gphoto_check(res)
         res = gphoto.gp_widget_get_value(child, ctypes.pointer(value))
         gphoto_check(res)
         return (w_type, value.value, bottom.value, top.value, step.value)
     elif w_type == 'toggle' or w_type == 'date':
         value = ctypes.c_int()
         res = gphoto.gp_widget_get_value(child, ctypes.pointer(value))
         gphoto_check(res)
         return (w_type, value.value)
     else:
         return None
示例#26
0
def call_pipe(app_data):
    rows = app_data['rows']
    cols = app_data['cols']

    app_args = app_data['app_args']
    colour_temp = float(app_args.colour_temp)
    contrast = float(app_args.contrast)
    gamma = float(app_args.gamma)

    img_data = app_data['img_data']
    IN = img_data['IN']
    M3200 = img_data['M3200']
    M7000 = img_data['M7000']
    OUT = img_data['OUT']

    # lib function name
    func_name = 'pipeline_'+app_data['app']
    pipe_func = app_data[func_name]

    # lib function args
    pipe_args = []
    pipe_args += [ctypes.c_int(cols)]
    pipe_args += [ctypes.c_int(rows)]
    pipe_args += [ctypes.c_float(colour_temp)]
    pipe_args += [ctypes.c_float(contrast)]
    pipe_args += [ctypes.c_float(gamma)]
    pipe_args += [ctypes.c_void_p(IN.ctypes.data)]
    pipe_args += [ctypes.c_void_p(M3200.ctypes.data)]
    pipe_args += [ctypes.c_void_p(M7000.ctypes.data)]
    pipe_args += [ctypes.c_void_p(OUT.ctypes.data)]

    # call lib function
    pipe_func(*pipe_args)

    return
示例#27
0
    def rednoise(self,startwidth=6,endwidth=100,endfreq=1.0):
        """Perform rednoise removal via Presto style method.

        :param startwidth: size of initial array for median calculation
        :type startwidth: int

        :param endwidth: size of largest array for median calculation
        :type endwidth: int

        :param endfreq: remove rednoise up to this frequency
        :type endfreq: float

        :return: whitened fourier series
        :rtype: :class:`~sigpyproc.FourierSeries.FourierSeries`

        """
        out_ar   = np.empty_like(self)
        buf_c1   = np.empty(2*endwidth,dtype="float32")
        buf_c2   = np.empty(2*endwidth,dtype="float32")
        buf_f1   = np.empty(endwidth,dtype="float32")
        lib.rednoise(as_c(self),
                     as_c(out_ar),
                     as_c(buf_c1),
                     as_c(buf_c2),
                     as_c(buf_f1),
                     C.c_int(self.size/2),
                     C.c_float(self.header.tsamp),
                     C.c_int(startwidth),
                     C.c_int(endwidth),
                     C.c_float(endfreq))
        return FourierSeries(out_ar,self.header.newHeader())
示例#28
0
    def detectionOutput_fprop(self, conf_view, loc_view, detection, prior_boxes,
                              proposals, nms_top_k, image_top_k, score_threshold, nms_threshold):
        conf = c_longlong(conf_view._tensor.ctypes.data)
        loc = c_longlong(loc_view._tensor.ctypes.data)
        detection = c_longlong(detection._tensor.ctypes.data)
        prior_boxes = c_longlong(prior_boxes._tensor.ctypes.data)
        L, num_class, bs = conf_view.shape
        proposals = c_longlong(proposals._tensor.ctypes.data)
        result = np.zeros((bs, image_top_k, 6), dtype=np.float32)
        result_ptr = c_longlong(result.ctypes.data)
        result_len = np.zeros(bs, dtype=np.int64)
        result_len_ptr = c_longlong(result_len.ctypes.data)

        self.mklEngine.detection_fprop(conf, loc, result_ptr, prior_boxes,
                                       result_len_ptr, c_longlong(L), c_longlong(num_class),
                                       c_longlong(bs), c_longlong(nms_top_k),
                                       c_longlong(image_top_k),
                                       c_float(score_threshold),
                                       c_float(nms_threshold))
        batch_all_detections = [None] * self.bsz
        for i in range(bs):
            leng = np.long(result_len[i])
            res_batch = np.zeros((leng, 6))
            res_batch[:] = result[i, 0:leng, :]
            batch_all_detections[i] = res_batch
        return batch_all_detections
示例#29
0
def sgemv(transA, M, N, alpha, bufA, offA, lda, bufX, offX, incx, beta, bufY,
          offY, incy, wait_for=None):
    cblas_row_major = ct.c_int(0)
    transA = ct.c_int(1 if transA else 0)
    lda = ct.c_size_t(int(lda))
    incx = ct.c_size_t(int(incx))
    incy = ct.c_size_t(int(incy))
    M = ct.c_size_t(int(M))
    N = ct.c_size_t(int(N))
    alpha = ct.c_float(alpha)
    beta = ct.c_float(beta)
    if wait_for is None:
        num_wait = 0
    else:
        num_wait = 1
        wait_for = ct.byref(wait_for)
    done_evt = cl.cl_event()
    err = _clblaslib.clblasSgemv(cblas_row_major, transA, M, N,
                                 alpha, bufA.ocl_buf, ct.c_size_t(offA), lda,
                                 bufX.ocl_buf, ct.c_size_t(offX), incx, beta,
                                 bufY.ocl_buf, ct.c_size_t(offY), incy,
                                 ct.c_size_t(1), ct.byref(queues[0]),
                                 ct.c_size_t(num_wait), wait_for,
                                 ct.byref(done_evt))
    if err:
        raise Exception("clBLAS sgemv returned error code {}".format(err))
    return done_evt
 def getVelocityParameterLimits(self, serialNumber):
     HWSerialNum = c_long(serialNumber)
     maximumAcceleration = c_float()
     maximumVelocity = c_float()
     self.aptdll.MOT_GetVelParamLimits(HWSerialNum, pointer(maximumAcceleration), pointer(maximumVelocity))
     velocityParameterLimits = [maximumAcceleration.value, maximumVelocity.value]
     return velocityParameterLimits  
示例#31
0
    def test_array_types(self):
        # This test need to make sure that the Scala type selected is at least
        # as large as the python's types. This is necessary because python's
        # array types depend on C implementation on the machine. Therefore there
        # is no machine independent correspondence between python's array types
        # and Scala types.
        # See: https://docs.python.org/2/library/array.html

        def assert_collect_success(typecode, value, element_type):
            self.assertEqual(
                element_type,
                str(_infer_type(array.array(typecode, [value])).element_type))

        # supported string types
        #
        # String types in python's array are "u" for Py_UNICODE and "c" for char.
        # "u" will be removed in python 4, and "c" is not supported in python 3.
        supported_string_types = []
        if sys.version_info[0] < 4:
            supported_string_types += ['u']
            # test unicode
            assert_collect_success('u', u'a', 'CHAR')
        if sys.version_info[0] < 3:
            supported_string_types += ['c']
            # test string
            assert_collect_success('c', 'a', 'CHAR')

        # supported float and double
        #
        # Test max, min, and precision for float and double, assuming IEEE 754
        # floating-point format.
        supported_fractional_types = ['f', 'd']
        assert_collect_success('f', ctypes.c_float(1e+38).value, 'FLOAT')
        assert_collect_success('f', ctypes.c_float(1e-38).value, 'FLOAT')
        assert_collect_success('f', ctypes.c_float(1.123456).value, 'FLOAT')
        assert_collect_success('d', sys.float_info.max, 'DOUBLE')
        assert_collect_success('d', sys.float_info.min, 'DOUBLE')
        assert_collect_success('d', sys.float_info.epsilon, 'DOUBLE')

        def get_int_data_type(size):
            if size <= 8:
                return "TINYINT"
            if size <= 16:
                return "SMALLINT"
            if size <= 32:
                return "INT"
            if size <= 64:
                return "BIGINT"

        # supported signed int types
        #
        # The size of C types changes with implementation, we need to make sure
        # that there is no overflow error on the platform running this test.
        supported_signed_int_types = list(
            set(_array_signed_int_typecode_ctype_mappings.keys()).intersection(
                set(_array_type_mappings.keys())))
        for t in supported_signed_int_types:
            ctype = _array_signed_int_typecode_ctype_mappings[t]
            max_val = 2**(ctypes.sizeof(ctype) * 8 - 1)
            assert_collect_success(t, max_val - 1,
                                   get_int_data_type(ctypes.sizeof(ctype) * 8))
            assert_collect_success(t, -max_val,
                                   get_int_data_type(ctypes.sizeof(ctype) * 8))

        # supported unsigned int types
        #
        # JVM does not have unsigned types. We need to be very careful to make
        # sure that there is no overflow error.
        supported_unsigned_int_types = list(
            set(_array_unsigned_int_typecode_ctype_mappings.keys()).
            intersection(set(_array_type_mappings.keys())))
        for t in supported_unsigned_int_types:
            ctype = _array_unsigned_int_typecode_ctype_mappings[t]
            max_val = 2**(ctypes.sizeof(ctype) * 8 - 1)
            assert_collect_success(
                t, max_val, get_int_data_type(ctypes.sizeof(ctype) * 8 + 1))

        # all supported types
        #
        # Make sure the types tested above:
        # 1. are all supported types
        # 2. cover all supported types
        supported_types = (supported_string_types +
                           supported_fractional_types +
                           supported_signed_int_types +
                           supported_unsigned_int_types)
        self.assertEqual(set(supported_types),
                         set(_array_type_mappings.keys()))

        # all unsupported types
        #
        # Keys in _array_type_mappings is a complete list of all supported types,
        # and types not in _array_type_mappings are considered unsupported.
        # `array.typecodes` are not supported in python 2.
        if sys.version_info[0] < 3:
            all_types = {
                'c', 'b', 'B', 'u', 'h', 'H', 'i', 'I', 'l', 'L', 'f', 'd'
            }
        else:
            all_types = set(array.typecodes)
        unsupported_types = all_types - set(supported_types)
        # test unsupported types
        for t in unsupported_types:
            with self.assertRaises(TypeError):
                _infer_schema_from_data([Row(myarray=array.array(t))])
示例#32
0
文件: shapes.py 项目: selpp/packagec
 def __call__( self: 'Circle', window: 'Window' ) -> None:
      bdl.drawCircle( window.window, self.pos.to_c( ), ctypes.c_float( self.rad ), self.color.to_c( ), ctypes.c_int( self.outline ), self.outline_color.to_c( ) )
示例#33
0
 def setKineticCycleTime(self, kinetic_time):
     setCurrentCamera(self.camera_handle)
     self._abortIfAcquiring_()
     andorCheck(andor.SetKineticCycleTime(ctypes.c_float(kinetic_time)), "SetKineticCycleTime")
     self.kinetic_cycle_time = kinetic_time
示例#34
0
 def setExposureTime(self, exposure_time):
     setCurrentCamera(self.camera_handle)
     self._abortIfAcquiring_()
     andorCheck(andor.SetExposureTime(ctypes.c_float(exposure_time)), "SetExposureTime")
     self.exposure_time = exposure_time
示例#35
0
    def __init__(self, andor_path, camera_handle):
        self.camera_handle = camera_handle

        # General
        self.pixels = 0

        # Camera properties storage.
        self._props_ = {}

        # Initialize the camera.
        setCurrentCamera(self.camera_handle)
        andorCheck(andor.Initialize(andor_path + "Detector.ini"), "Initialize")

        # Determine camera capabilities (useful??).
        caps = AndorCapabilities(ctypes.sizeof(ctypes.c_ulong)*12,0,0,0,0,0,0,0,0,0,0,0)
        andorCheck(andor.GetCapabilities(ctypes.byref(caps)), "GetCapabilities")
        self._props_['AcqModes'] = caps.ulAcqModes
        self._props_['ReadModes'] = caps.ulReadModes
        self._props_['TriggerModes'] = caps.ulTriggerModes
        self._props_['CameraType'] = caps.ulCameraType
        self._props_['PixelMode'] = caps.ulPixelMode
        self._props_['SetFunctions'] = caps.ulSetFunctions
        self._props_['GetFunctions'] = caps.ulGetFunctions
        self._props_['Features'] = caps.ulFeatures
        self._props_['PCICard'] = caps.ulPCICard
        self._props_['EMGainCapability'] = caps.ulEMGainCapability
        self._props_['FTReadModes'] = caps.ulFTReadModes

        # Determine camera bit depth.

        # FIXME: Use andor.GetBitDepth()
        for i in [[1, 2**8], [2, 2**14], [4, 2**16], [8, 2**32]]:
            if (i[0] & self._props_['PixelMode']):
                self._props_['MaxIntensity'] = i[1]

        # Determine camera pixel size.
        x_pixels = ctypes.c_long()
        y_pixels = ctypes.c_long()
        andorCheck(andor.GetDetector(ctypes.byref(x_pixels), ctypes.byref(y_pixels)), "GetDetector")
        self._props_['XPixels'] = x_pixels.value
        self._props_['YPixels'] = y_pixels.value

        # Determine camera head model.
        head_model = ctypes.create_string_buffer(32)
        andorCheck(andor.GetHeadModel(head_model), "GetHeadModel")
        self._props_['HeadModel'] = head_model.value.decode("utf-8")

        # Determine hardware version.
        plug_in_card_version = ctypes.c_uint()
        flex_10k_file_version = ctypes.c_uint()
        dummy1 = ctypes.c_uint()
        dummy2 = ctypes.c_uint()
        camera_firmware_version = ctypes.c_uint()
        camera_firmware_build = ctypes.c_uint()
        andorCheck(andor.GetHardwareVersion(ctypes.byref(plug_in_card_version),
                                            ctypes.byref(flex_10k_file_version),
                                            ctypes.byref(dummy1),
                                            ctypes.byref(dummy2),
                                            ctypes.byref(camera_firmware_version),
                                            ctypes.byref(camera_firmware_build)),
                   "GetHardwareVersion")
        self._props_["PlugInCardVersion"] = plug_in_card_version.value
        self._props_["Flex10kFileVersion"] = flex_10k_file_version.value
        self._props_["CameraFirmwareVersion"] = camera_firmware_version.value
        self._props_["CameraFirmwareBuild"] = camera_firmware_build.value

        # Determine vertical shift speeds.
        number = ctypes.c_int()
        andorCheck(andor.GetNumberVSSpeeds(ctypes.byref(number)), "GetNumberVSSpeeds")
        self._props_["VSSpeeds"] = list(range(number.value))
        for i in range(number.value):
            index = ctypes.c_int(i)
            speed = ctypes.c_float()
            andorCheck(andor.GetVSSpeed(index, ctypes.byref(speed)), "GetVSSpeed")
            self._props_["VSSpeeds"][i] = round(speed.value, 4)

        # Determine horizontal shift speeds.
        andorCheck(andor.GetNumberADChannels(ctypes.byref(number)), "GetNumberADChannels")
        self._props_["NumberADChannels"] = number.value
        self._props_["HSSpeeds"] = list(range(number.value))
        for i in range(number.value):
            channel = ctypes.c_int(i)
            andorCheck(andor.GetNumberHSSpeeds(channel, 0, ctypes.byref(number)), "GetNumberHSSpeeds")
            self._props_["HSSpeeds"][i] = list(range(number.value))
            for j in range(number.value):
                type = ctypes.c_int(j)
                speed = ctypes.c_float()
                andorCheck(andor.GetHSSpeed(channel, 0, type, ctypes.byref(speed)), "GetHSSpeed")
                self._props_["HSSpeeds"][i][j] = round(speed.value, 4)
        
        # Determine temperature range.
        min_temp = ctypes.c_int()
        max_temp = ctypes.c_int()
        andorCheck(andor.GetTemperatureRange(ctypes.byref(min_temp), ctypes.byref(max_temp)), "GetTemperatureRange")
        self._props_["TemperatureRange"] = [min_temp.value, max_temp.value]

        # Determine preamp gains available.
        number = ctypes.c_int()
        andorCheck(andor.GetNumberPreAmpGains(ctypes.byref(number)), "GetNumberPreAmpGains")
        self._props_["PreAmpGains"] = list(range(number.value))
        for i in range(number.value):
            index = ctypes.c_int(i)
            gain = ctypes.c_float()
            andorCheck(andor.GetPreAmpGain(index, ctypes.byref(gain)), "GetPreAmpGain")
            self._props_["PreAmpGains"][i] = round(gain.value, 2)

        # Determine EM gain range.
        low = ctypes.c_int()
        high = ctypes.c_int()
        andorCheck(andor.GetEMGainRange(ctypes.byref(low), ctypes.byref(high)), "GetEMGainRange")
        self._props_["EMGainRange"] = [low.value, high.value]

        # Determine number of EM gain modes.
        n_modes = 0
        while (self.setEMGainMode(n_modes)):
            n_modes += 1
        self._props_["NumberEMGainModes"] = n_modes - 1
        self.setEMGainMode(0)

        # Determine the maximum binning values.
        max_binning = ctypes.c_int()
        andorCheck(andor.GetMaximumBinning(4, 0, ctypes.byref(max_binning)), "GetMaximumBinning")
        self._props_["MaxBinning"] = [max_binning.value]
        andorCheck(andor.GetMaximumBinning(4, 1, ctypes.byref(max_binning)), "GetMaximumBinning")
        self._props_["MaxBinning"].append(max_binning.value)
        
        # Determine maximum exposure time.
        max_exp = ctypes.c_float()
        andorCheck(andor.GetMaximumExposure(ctypes.byref(max_exp)), "GetMaximumExposure")
        self._props_["MaxExposure"] = max_exp.value
示例#36
0
def set_damping(p_state, damping, idx_image=-1, idx_chain=-1):
    """Set the Gilbert damping parameter [unitless]."""
    _LLG_Set_Damping(ctypes.c_void_p(p_state), ctypes.c_float(damping),
                     ctypes.c_int(idx_image), ctypes.c_int(idx_chain))
示例#37
0
def set_timestep(p_state, dt, idx_image=-1, idx_chain=-1):
    """Set the time step [ps] for the calculation."""
    _LLG_Set_Time_Step(p_state, ctypes.c_float(dt), idx_image, idx_chain)
示例#38
0
    def setReservoirTsetColdSample(SetReservoirTsetColdSampleK):
        """
		AttoDRY_Interface_setReservoirTsetColdSample
		"""
        ADRY.setReservoirTsetColdSample(
            ctypes.c_float(SetReservoirTsetColdSampleK))
示例#39
0
    def setVTIHeaterPower(VTIHeaterPowerW):
        """
		AttoDRY_Interface_setVTIHeaterPower
		"""
        ADRY.setVTIHeaterPower(ctypes.c_float(VTIHeaterPowerW))
示例#40
0
    def setReservoirTsetWarmSample(ReservoirTsetWarmSampleW):
        """
		AttoDRY_Interface_setReservoirTsetWarmSample
		"""
        ADRY.setReservoirTsetWarmSample(
            ctypes.c_float(ReservoirTsetWarmSampleW))
示例#41
0
    def setUserTemperature(Temperature):
        """
		Sets the user temperature. This is the temperature used when temperature 
		control is enabled.
		"""
        ADRY.setUserTemperature(ctypes.c_float(Temperature))
示例#42
0
    def setReservoirTsetWarmMagnet(ReservoirTsetWarmMagnetW):
        """
		AttoDRY_Interface_setReservoirTsetWarmMagnet
		"""
        ADRY.setReservoirTsetWarmMagnet(
            ctypes.c_float(ReservoirTsetWarmMagnetW))
示例#43
0
def art(data, theta, center, num_grid, iters, init_matrix):
    """
    Applies Algebraic Reconstruction Technique (ART) 
    to obtain reconstructions.
    
    Parameters
    ----------
    data : ndarray, float32
        3-D tomographic data with dimensions:
        [projections, slices, pixels]
        
    theta : ndarray, float32
        Projection angles in radians.
        
    center : scalar, float32
        Pixel index corresponding to the 
        center of rotation axis.
        
    num_grid : scalar, int32
        Grid size of the econstructed images.
        
    iters : scalar int32
        Number of mlem iterations.
    
    init_matrix : ndarray
       Initial guess for the reconstruction. Its
       shape is the same as the reconstructed data.
       
    Returns
    -------
    output : ndarray
        Reconstructed data with dimensions:
        [slices, num_grid, num_grid]
        
    References
    ----------
    - `http://en.wikipedia.org/wiki/Algebraic_Reconstruction_Technique \
    <http://en.wikipedia.org/wiki/Algebraic_Reconstruction_Technique>`_
    - `http://en.wikipedia.org/wiki/Kaczmarz_method \
    <http://en.wikipedia.org/wiki/Kaczmarz_method>`_
        
    Examples
    --------
    - Reconstruct using ART:
        
        >>> import tomopy
        >>> 
        >>> # Load data
        >>> myfile = 'demo/data.h5'
        >>> data, white, dark, theta = tomopy.xtomo_reader(myfile, slices_start=0, slices_end=1)
        >>> 
        >>> # Construct tomo object
        >>> d = tomopy.xtomo_dataset(log='error')
        >>> d.dataset(data, white, dark, theta)
        >>> d.normalize()
        >>> d.correct_drift()
        >>> d.center = 662
        >>> 
        >>> # Perform reconstruction
        >>> d.art()
        >>> 
        >>> # Save reconstructed data
        >>> output_file='tmp/recon_'
        >>> tomopy.xtomo_writer(d.data_recon, output_file)
        >>> print "Images are succesfully saved at " + output_file + '...'
    """
    num_projections = np.array(data.shape[0], dtype='int32')
    num_slices = np.array(data.shape[1], dtype='int32')
    num_pixels = np.array(data.shape[2], dtype='int32')

    # Call C function.
    c_float_p = ctypes.POINTER(ctypes.c_float)
    librecon.art.restype = ctypes.POINTER(ctypes.c_void_p)
    librecon.art(data.ctypes.data_as(c_float_p),
                 theta.ctypes.data_as(c_float_p), ctypes.c_float(center),
                 ctypes.c_int(num_projections), ctypes.c_int(num_slices),
                 ctypes.c_int(num_pixels), ctypes.c_int(num_grid),
                 ctypes.c_int(iters), init_matrix.ctypes.data_as(c_float_p))
    return init_matrix
示例#44
0
    def setSampleHeaterPower(HeaterPowerW):
        """
		Sets the sample heater value to the specified value
		"""
        ADRY.setSampleHeaterPower(ctypes.c_float(HeaterPowerW))
示例#45
0
import ctypes as C
mate = C.CDLL('./mylib.so')

#=================================
# Suma de dos punto flotante de C.
#=================================

mate.add_float.restype = C.c_float  #"Declaramos" que el resultado de llamar a la funcion es un punto flotante de C.
mate.add_float.argtypes = [
    C.c_float, C.c_float
]  #"Declaramos" que los argumnetos de la funcion son un punto flotante de C.

x_1 = C.c_float(1.1)
x_2 = C.c_float(10.10)
print 'add_float', mate.add_float(x_1, x_2)

# Suma de dos enteros de C.
math.add_int.restype = C.c_int  #"Declaramos" que el resultado de llamar a la funcion es un entero de C.
math.add_int.argtypes = [
    C.c_int, C.c_int
]  #"Declaramos" que los argumnetos de la funcion son enteros de C.

n_1_1 = C.c_int(1)
n_2 = C.c_int(10)
print 'add_int', math.add_int(n_1, n_2)

# Suma de dos enteros C pasados por referencia.
x_1 = C.c_float(1.1)
x_2 = C.c_float(-0.10)
res = C.c_float()
mate.add_float_ref(C.byref(x_1), C.byref(x_2), C.byref(res))
示例#46
0
    def setUserMagneticField(MagneticField):
        """
		Sets the user magntic field. This is used as the set point when field 
		control is active
		"""
        ADRY.setUserMagneticField(ctypes.c_float(MagneticField))
示例#47
0
    def _unpack_argument(self, ty, val, sycl_queue, kernelargs, device_arrs,
                         access_type):
        """
        Unpacks the arguments that are to be passed to the SYCL kernel from
        Numba types to Ctypes.

        Args:
            ty: The data types of the kernel argument defined as in instance of
                numba.types.
            val: The value of the kernel argument.
            sycl_queue (dpctl.SyclQueue): A ``dpctl.SyclQueue`` object. The
                queue object will be used whenever USM memory allocation is
                needed during unpacking of an numpy.ndarray argument.
            kernelargs (list): The list of kernel arguments into which the
                current kernel argument will be appended.
            device_arrs (list): A list of tuples that is used to store the
                triples corresponding to the USM memorry allocated for an
                ``numpy.ndarray`` argument, a wrapper ``ndarray`` created from
                the USM memory, and the original ``ndarray`` argument.
            access_type : The type of access for an array argument.

        Raises:
            NotImplementedError: If the type of argument is not yet supported,
                then a ``NotImplementedError`` is raised.

        """

        device_arrs.append(None)

        if isinstance(ty, USMNdArrayType):
            self._unpack_device_array_argument(
                val.size,
                val.dtype.itemsize,
                val.usm_data,
                val.shape,
                val.strides,
                val.ndim,
                kernelargs,
            )
        elif isinstance(ty, types.Array):
            packed_val = val
            usm_mem = has_usm_memory(val)
            if usm_mem is None:
                default_behavior = self.check_for_invalid_access_type(
                    access_type)
                usm_mem = as_usm_obj(val, queue=sycl_queue, copy=False)

                orig_val = val
                packed = False
                if not val.flags.c_contiguous:
                    # If the numpy.ndarray is not C-contiguous
                    # we pack the strided array into a packed array.
                    # This allows us to treat the data from here on as C-contiguous.
                    # While packing we treat the data as C-contiguous.
                    # We store the reference of both (strided and packed)
                    # array and during unpacking we use numpy.copyto() to copy
                    # the data back from the packed temporary array to the
                    # original strided array.
                    packed_val = val.flatten(order="C")
                    packed = True

                if (default_behavior or self.valid_access_types[access_type]
                        == _NUMBA_DPPY_READ_ONLY
                        or self.valid_access_types[access_type]
                        == _NUMBA_DPPY_READ_WRITE):
                    copy_from_numpy_to_usm_obj(usm_mem, packed_val)

                device_arrs[-1] = (usm_mem, orig_val, packed_val, packed)

            self._unpack_device_array_argument(
                packed_val.size,
                packed_val.dtype.itemsize,
                usm_mem,
                packed_val.shape,
                packed_val.strides,
                packed_val.ndim,
                kernelargs,
            )
        elif ty == types.int64:
            cval = ctypes.c_longlong(val)
            kernelargs.append(cval)
        elif ty == types.uint64:
            cval = ctypes.c_ulonglong(val)
            kernelargs.append(cval)
        elif ty == types.int32:
            cval = ctypes.c_int(val)
            kernelargs.append(cval)
        elif ty == types.uint32:
            cval = ctypes.c_uint(val)
            kernelargs.append(cval)
        elif ty == types.float64:
            cval = ctypes.c_double(val)
            kernelargs.append(cval)
        elif ty == types.float32:
            cval = ctypes.c_float(val)
            kernelargs.append(cval)
        elif ty == types.boolean:
            cval = ctypes.c_uint8(int(val))
            kernelargs.append(cval)
        elif ty == types.complex64:
            raise NotImplementedError(ty, val)
        elif ty == types.complex128:
            raise NotImplementedError(ty, val)
        else:
            raise NotImplementedError(ty, val)
示例#48
0
# c_int int int/long
# c_uint  unsigned int  int/long
# c_long  long  int/long
# c_ulong unsigned long int/long
# c_longlong  __int64 or longlong int/long
# c_ulonglong unsigned __int64 or unsigned long long  int/long
# c_float float float
# c_double  double  float
# c_longdouble  long double float float
# c_char_p  char *  string or None
# c_wchar_p wchar_t * unicode or None
# c_void_p  void *  int/long or None
import ctypes
gpio = ctypes.CDLL('./jetsongpio.so')
gpio.init_robot_gpio()
gpio.set_speed(ctypes.c_float(0.5))
gpio.go_straight()
gpio.go_straight_with_time(ctypes.c_uint(1))
gpio.go_back_with_time(ctypes.c_uint(1))
gpio.go_swerve_with_time(ctypes.c_uint(1), ctypes.c_uint(45))
# gpio.go_straight()
# gpio.go_back()
# gpio.go_speed(800, 250)

# gpio.go_swerve(800, 250)

gpio.go_stop()

# gpio.release_robot_gpio()
gpio.release_robot_gpio()
示例#49
0
      def run(self):
        frameType = c_short * (self.height.value * self.width.value) #used for acquired frame
        frameBuffer = frameType()

        frameType = c_byte * (self.height.value * self.width.value)  #used for streaming frame
        frame8bit = frameType()

        treePtr = c_void_p(0)
        status = ZELOS2150GV.mdsLib.camOpenTree(c_char_p(self.device.getTree().name), c_int(self.device.getTree().shot), byref(treePtr))
        if status == -1:
          Data.execute('DevLogErr($1,$2)', self.device.getNid(), 'Cannot open tree')
          raise mdsExceptions.TclFAILED_ESSENTIAL

        if self.device.frame_sync.data() == 'EXTERNAL':
          isExternal = 1
          timebaseNid=self.device.frame_clock.getNid()
        else:
          isExternal = 0
          timebaseNid=c_int(-1)

        if self.device.streaming.data() == 'Stream and Store':
          isStreaming = 1
          isStorage = 1
        if self.device.streaming.data() == 'Only Stream':
          isStreaming = 1
          isStorage = 0
        if self.device.streaming.data() == 'Only Store':
          isStreaming = 0
          isStorage = 1

        autoScale = self.device.stream_autos.data()                 #autoscaling pixel grey depth for streaming operation
        if autoScale == 'YES':
          autoScale=c_int(1)
        else:
          autoScale=c_int(0)

        lowLim=c_int(self.device.stream_lolim.data())
        highLim=c_int(self.device.stream_hilim.data())
        minLim=c_int(0)
        maxLim=c_int(32767)

        tcpStreamHandle=c_int(-1)
        streamPort=c_int(self.device.stream_port.data())
        #frameTimeInt=0
        prevFrameTime=0
        totFrameTime=0
        framePeriod = int(self.device.frame_period.data()*1000)
        skipFrameStream=int(float(1/self.device.frame_period.data())/25.0)-1
        print('skipFrameStream:',skipFrameStream)
        if(skipFrameStream<0):
          skipFrameStream=0
        frameStreamCounter = skipFrameStream
        frameTotalCounter = 0
        status=c_int(-1)
        self.idx = 0

        while not self.stopReq:

          ZELOS2150GV.kappaLib.kappaGetFrame(self.device.handle, byref(status), frameBuffer)
          if status.value==3:
            print('get frame timeout!')
          else:
            frameStreamCounter = frameStreamCounter + 1   #reset according to Stream decimation
            frameTotalCounter = frameTotalCounter + 1     #never resetted

          if isExternal==0:  #internal clock source -> S.O. timestamp
             timestamp=datetime.now()
             frameTime=int(mktime(timestamp.timetuple())*1000)+int(timestamp.microsecond/1000)  #ms
             if frameTotalCounter==1:
               prevFrameTime=int(mktime(timestamp.timetuple())*1000)+int(timestamp.microsecond/1000)
               deltaT=frameTime-prevFrameTime
               totFrameTime=0
             else:
               deltaT=frameTime-prevFrameTime
               prevFrameTime=frameTime
               totFrameTime=totFrameTime+deltaT
             if (deltaT<framePeriod) and (deltaT>5):
               sleep(float(framePeriod-deltaT)/1000.0)


          #if( (isStorage==1) and ((status.value==1) or (status.value==2)) ):    #frame complete or incomplete
          if( (isStorage==1) and (status.value==1)  ):  #frame complete
            ZELOS2150GV.mdsLib.camSaveFrame(frameBuffer, self.width, self.height, c_float(float(totFrameTime)/1000.0), c_int(14), treePtr, self.device.frames.getNid(), timebaseNid, c_int(frameTotalCounter-1), 0, 0, 0)
            self.idx = self.idx + 1
            print('saved frame idx:', self.idx)

          if(isStreaming==1):
            if(tcpStreamHandle.value==-1):
              fede=ZELOS2150GV.streamLib.camOpenTcpConnection(streamPort, byref(tcpStreamHandle), self.width, self.height)
              if(fede!=-1):
                print('\nConnected to FFMPEG on localhost:',streamPort.value)
            if(frameStreamCounter == skipFrameStream+1):
              frameStreamCounter=0
            if(frameStreamCounter == 0 and tcpStreamHandle.value!=-1):
              ZELOS2150GV.streamLib.camFrameTo8bit(frameBuffer, self.width, self.height, frame8bit, autoScale, byref(lowLim), byref(highLim), minLim, maxLim)
              ZELOS2150GV.streamLib.camSendFrameOnTcp(byref(tcpStreamHandle), self.width, self.height, frame8bit)

        #endwhile
        ZELOS2150GV.streamLib.camCloseTcpConnection(byref(tcpStreamHandle))
        #print 'Stream Tcp Connection Closed'

        status = ZELOS2150GV.kappaLib.kappaStopAcquisition(self.device.handle, self.hBuffers)
        if status != 0:
          Data.execute('DevLogErr($1,$2)', self.device.getNid(), 'Cannot stop camera acquisition')

        #close device and remove from info
        ZELOS2150GV.kappaLib.kappaClose(self.device.handle)
        self.device.removeInfo()
        raise mdsExceptions.TclFAILED_ESSENTIAL
示例#50
0
 def write_float(self, variable, value):
     self.testlib.escreve_float(self.variable_float[variable],
                                ctypes.c_float(value))
示例#51
0
 def n_crit(self, value):
     self._lib.set_n_crit(byref(c_float(value)))
示例#52
0
    def heatmap(self,
                points,
                dotsize=150,
                opacity=128,
                size=(1024, 1024),
                scheme="classic",
                area=None,
                weighted=0,
                srcepsg=None,
                dstepsg='EPSG:3857'):
        """
        points   -> A representation of the points (x,y values) to process.
                    Can be a flattened array/tuple or any combination of 2 dimensional 
                    array or tuple iterables i.e. [x1,y1,x2,y2], [(x1,y1),(x2,y2)], etc.
                    If weights are being used there are expected to be 3 'columns'
                    in the 2 dimensionable iterable or a multiple of 3 points in the 
                    flat array/tuple i.e. (x1,y1,z1,x2,y2,z2), ([x1,y1,z1],[x2,y2,z2]) etc.
                    The third (weight) value can be anything but it is
                    best to have a normalised weight between 0 and 1.
                    For best performance, if convenient use a flattened array 
                    as this is what is used internally and requires no conversion.
        dotsize  -> the size of a single coordinate in the output image in
                    pixels, default is 150px.  Tweak this parameter to adjust
                    the resulting heatmap.
        opacity  -> the strength of a single coordiniate in the output image.
                    Tweak this parameter to adjust the resulting heatmap.
        size     -> tuple with the width, height in pixels of the output PNG
        scheme   -> Name of color scheme to use to color the output image.
                    Use schemes() to get list.  (images are in source distro)
        area     -> Specify bounding coordinates of the output image. Tuple of
                    tuples: ((minX, minY), (maxX, maxY)).  If None or unspecified,
                    these values are calculated based on the input data.
        weighted -> Is the data weighted (0 or 1)
        srcepsg  -> epsg code of the source, set to None to ignore.
                    If using KML output make sure this is set otherwise either the image
                    or the overlay coordinates will be out.
        dstepsg  -> epsg code of the destination, ignored if srcepsg is not set.
                    Defaults to EPSG:3857 (Cylindrical Mercator). 
                    Due to linear interpolation in heatmap.c it only makes sense to use linear 
                    output projections. If outputting to KML for google earth client overlay use 
                    EPSG:4087 (World Equidistant Cylindrical).
        """
        self.dotsize = dotsize
        self.opacity = opacity
        self.size = size
        self.points = points
        self.weighted = weighted
        self.srcepsg = srcepsg
        self.dstepsg = dstepsg

        if self.srcepsg and not use_pyproj:
            raise Exception('srcepsg entered but pyproj is not available')

        if area is not None:
            self.area = area
            self.override = 1
        else:
            self.area = ((0, 0), (0, 0))
            self.override = 0

        #convert area for heatmap.c if required
        ((east, south), (west, north)) = self.area
        if use_pyproj and self.srcepsg is not None and self.srcepsg != self.dstepsg:
            source = pyproj.Proj(init=self.srcepsg)
            dest = pyproj.Proj(init=self.dstepsg)
            (east, south) = pyproj.transform(source, dest, east, south)
            (west, north) = pyproj.transform(source, dest, west, north)

        if scheme not in self.schemes():
            tmp = "Unknown color scheme: %s.  Available schemes: %s" % (
                scheme, self.schemes())
            raise Exception(tmp)

        arrPoints = self._convertPoints()
        arrScheme = self._convertScheme(scheme)
        arrFinalImage = self._allocOutputBuffer()

        ret = self._heatmap.tx(arrPoints, len(arrPoints), size[0], size[1],
                               dotsize, arrScheme, arrFinalImage, opacity,
                               self.override, ctypes.c_float(east),
                               ctypes.c_float(south), ctypes.c_float(west),
                               ctypes.c_float(north), weighted)

        if not ret:
            raise Exception("Unexpected error during processing.")

        self.img = Image.frombuffer('RGBA', (self.size[0], self.size[1]),
                                    arrFinalImage, 'raw', 'RGBA', 0, 1)
        return self.img
示例#53
0
 def xtr(self):
     """tuple(float, float): Top and bottom flow trip x/c locations."""
     xtr_top = c_float()
     xtr_bot = c_float()
     self._lib.get_xtr(byref(xtr_top), byref(xtr_bot))
     return float(xtr_top), float(xtr_bot)
示例#54
0
    def init(self):
      global kappaLib
      self.restoreInfo()
      self.frames.setCompressOnPut(False)

      status = ZELOS2150GV.kappaLib.kappaSetColorCoding(self.handle, c_int(6))   #Y14
      if status < 0:
        Data.execute('DevLogErr($1,$2)', self.getNid(), 'Cannot Set Color Coding')
        raise mdsExceptions.TclFAILED_ESSENTIAL

###Exposure	Mode
      if self.frame_sync.data() == 'EXTERNAL':
        status = ZELOS2150GV.kappaLib.kappaSetExposureMode(self.handle, c_int(3)) #3 = ZELOS_ENUM_EXPOSUREMODE_RESETRESTART
      else:
        status = ZELOS2150GV.kappaLib.kappaSetExposureMode(self.handle, c_int(2)) #2 = ZELOS_ENUM_EXPOSUREMODE_FREERUNNINGSEQUENTIAL

      if status < 0:
        Data.execute('DevLogErr($1,$2)', self.getNid(), 'Cannot Set Exposure Mode')
        raise mdsExceptions.TclFAILED_ESSENTIAL

###Exposure
      autoExp = self.auto_exp.data()
      if autoExp == 'YES':
        status = ZELOS2150GV.kappaLib.kappaSetAET(self.handle, c_int(1))
        if status < 0:
          Data.execute('DevLogErr($1,$2)', self.getNid(), 'Cannot Set AET On')
          raise mdsExceptions.TclFAILED_ESSENTIAL
        status = ZELOS2150GV.kappaLib.kappaSetAutoExposureLevel(self.handle, c_int(self.exp_lev.data()))
        if status < 0:
          Data.execute('DevLogErr($1,$2)', self.getNid(), 'Cannot Set Auto Exposure Level')
          raise mdsExceptions.TclFAILED_ESSENTIAL
      else:
        status = ZELOS2150GV.kappaLib.kappaSetAET(self.handle, c_int(0))
        if status < 0:
          Data.execute('DevLogErr($1,$2)', self.getNid(), 'Cannot Set AET Off')
          raise mdsExceptions.TclFAILED_ESSENTIAL
        status = ZELOS2150GV.kappaLib.kappaSetExposure(self.handle, c_float(self.exp_time.data()))
        if status < 0:
          Data.execute('DevLogErr($1,$2)', self.getNid(), 'Cannot Set Exposure Time')
          raise mdsExceptions.TclFAILED_ESSENTIAL

###Gain
      autoGain = self.auto_gain.data()
      if autoGain == 'YES':
        status = ZELOS2150GV.kappaLib.kappaSetAGC(self.handle, c_int(1))
        if status < 0:
          Data.execute('DevLogErr($1,$2)', self.getNid(), 'Cannot Set AGC On')
          raise mdsExceptions.TclFAILED_ESSENTIAL
      else:
        status = ZELOS2150GV.kappaLib.kappaSetAGC(self.handle, c_int(0))
        if status < 0:
          Data.execute('DevLogErr($1,$2)', self.getNid(), 'Cannot Set AGC On')
          raise mdsExceptions.TclFAILED_ESSENTIAL
        status = ZELOS2150GV.kappaLib.kappaSetGain(self.handle, c_int(self.gain_lev.data()))
        if status < 0:
          Data.execute('DevLogErr($1,$2)', self.getNid(), 'Cannot Set Gain')
          raise mdsExceptions.TclFAILED_ESSENTIAL

###Slow Scan
      slowScan = self.slow_scan.data()
      if slowScan == 'YES':
       status = ZELOS2150GV.kappaLib.kappaSetSlowScan(self.handle, c_int(1))
      else:
       status = ZELOS2150GV.kappaLib.kappaSetSlowScan(self.handle, c_int(0))
      if status < 0:
        Data.execute('DevLogErr($1,$2)', self.getNid(), 'Cannot Set Slow Scan')
        raise mdsExceptions.TclFAILED_ESSENTIAL

###Frame Area
      status = ZELOS2150GV.kappaLib.kappaSetReadoutArea(self.handle, c_int(self.frame_x.data()),c_int(self.frame_y.data()),c_int(self.frame_width.data()),c_int(self.frame_height.data()))
      if status < 0:
        Data.execute('DevLogErr($1,$2)', self.getNid(), 'Cannot Set Readout Area')
        raise mdsExceptions.TclFAILED_ESSENTIAL

###Measure Area
      status = ZELOS2150GV.kappaLib.kappaSetMeasureWindow(self.handle, c_int(self.meas_x.data()),c_int(self.meas_y.data()),c_int(self.meas_width.data()),c_int(self.meas_height.data()))
      if status < 0:
        Data.execute('DevLogErr($1,$2)', self.getNid(), 'Cannot Set Measure Window')
        raise mdsExceptions.TclFAILED_ESSENTIAL

###Binning
      status = ZELOS2150GV.kappaLib.kappaSetBinning(self.handle, c_int(self.hor_binning), c_int(self.ver_binning))
      if status < 0:
        Data.execute('DevLogErr($1,$2)', self.getNid(), 'Cannot set horizontal or vertical binning')
        raise mdsExceptions.TclFAILED_ESSENTIAL
      self.saveInfo()
      return
示例#55
0
 def Re(self, value):
     self._lib.set_reynolds(byref(c_float(value)))
示例#56
0
 def xtr(self, value):
     self._lib.set_xtr(byref(c_float(value[0])), byref(c_float(value[1])))
"""ctypes single-value "parameter" arguments (e.g. byref)
"""
REGISTRY_NAME = 'ctypeparameter'
import ctypes, _ctypes

from OpenGL import constants, constant
from OpenGL.arrays import formathandler
import operator

c = ctypes.c_float(0)
ParamaterType = ctypes.byref(c).__class__
DIRECT_RETURN_TYPES = (
    ParamaterType,
    # these pointer types are implemented as _SimpleCData
    # despite being pointers...
    ctypes.c_void_p,
    ctypes.c_char_p,
    ctypes.c_wchar_p,
)
try:
    del c
except NameError as err:
    pass


class CtypesParameterHandler(formathandler.FormatHandler):
    """Ctypes Paramater-type-specific data-type handler for OpenGL"""
    isOutput = True
    HANDLED_TYPES = (ParamaterType, _ctypes._SimpleCData)

    def from_param(cls, value, typeCode=None):
示例#58
0
 def M(self, value):
     self._lib.set_mach(byref(c_float(value)))
示例#59
0
def main():
	
	read_mode_top = 4	# readout mode options: 0 Full Vertical binning;	1 Multi-Track;	2 Random-Track;	 3 Single-Track;	4 Image;


	# Load the atmcd64.dll file 
	andor_dll = ctypes.cdll.atmcd64d


	# Initialize camera
	aBuffer = ctypes.c_char_p()		# The buffer tells the initialize function where the driver files are. Currently, they're in the same folder as this .py file

	error_value = andor_dll.Initialize(aBuffer)
	check_success(error_value, "Initialize")


	# Determine size (in pixels of camera)
	gblXPixels = ctypes.c_int()		# Total number of horizontal pixels
	gblYPixels = ctypes.c_int()		# Total number of vertical pixels

	error_value = andor_dll.GetDetector(ctypes.byref(gblXPixels),ctypes.byref(gblYPixels))
	check_success(error_value,"GetDetector")


	# Set vertical shift speed to recommended value
	vertical_shift_index = ctypes.c_int()	# the index to access specific vertical shift speeds 
	vertical_speed = ctypes.c_float()	# speed of the vertical speed shift in microseconds per pixel shift

	error_value = andor_dll.GetFastestRecommendedVSSpeed(ctypes.byref(vertical_shift_index),ctypes.byref(vertical_speed))
	check_success(error_value,"Get Fastest Recommended Vertical Shift Speed")
	error_value = andor_dll.SetVSSpeed(vertical_shift_index)
	check_success(error_value,"Set Vertical Shift Speed")


	# Set horizontal shift speed to the maximum
	horizontal_shift_index = ctypes.c_int(0)		# the index to access specific horizontal shift speeds
	AD_converter_index = ctypes.c_int()				# the specific index to access a given A-D converter
	number_AD = ctypes.c_int(0)						# the number of A-D converters in the camera
	number_speeds = ctypes.c_int()					# number of speeds available
	horizontal_speed = ctypes.c_float()				# horizontal shift speed
	max_horizontal_speed = ctypes.c_float(0)		# maximum horizontal speed

	error_value = andor_dll.GetNumberADChannels(ctypes.byref(number_AD))
	check_success(error_value,"Get Number AD Channels")

	for each_AD in range(number_AD.value):
		error_value = andor_dll.GetNumberHSSpeeds(AD_converter_index, ctypes.c_int(0), ctypes.byref(number_speeds))
		check_success(error_value, "Get Number Horizontal Shift Speeds")
		for each_speed_index in range(number_speeds.value):
			error_value = andor_dll.GetHSSpeed(ctypes.c_int(each_AD),ctypes.c_int(0),ctypes.c_int(each_speed_index),ctypes.byref(horizontal_speed))
			check_success(error_value,"Get Horizontal Shift Speed")
			if (horizontal_speed.value > max_horizontal_speed.value):
				max_horizontal_speed.value = horizontal_speed.value
				horizontal_shift_index = ctypes.c_int(each_speed_index)
				AD_converter_index = ctypes.c_int(each_AD)
	
	error_value = andor_dll.SetADChannel(AD_converter_index)
	check_success(error_value,"Set AD Channel")

	error_value = andor_dll.SetHSSpeed(ctypes.c_int(0), horizontal_shift_index)
	check_success(error_value, "Set Horizontal Speed Index")

	# Turn the camera cooler on
	error_value = andor_dll.CoolerON()
	check_success(error_value,"Turn Cooler On")


	# Check to make sure cooler is on
	cooler_on = ctypes.c_int()

	error_value = andor_dll.IsCoolerOn(ctypes.byref(cooler_on))
	check_success(error_value, "Check if cooler is on")
	if (cooler_on.value != 1):
		print("Error: Cooler not on", "Exiting...")
		return False	#TODO: exit code or somethings


	# Set the readout mode of the camera 
	read_mode = ctypes.c_int(read_mode_top)		# readout mode options: 0 Full Vertical binning;	1 Multi-Track;	2 Random-Track;	 3 Single-Track;	4 Image;
	error_value = andor_dll.SetReadMode(read_mode)
	check_success(error_value,"Set Read Mode")


	# Set the acquisition mode
	acquisition_mode = ctypes.c_int(1)		# acquisition mode options: 1 Single scan;	2 Accumulate;	3 Kinetics;	 4 Fast Kinetics;	5 Run till abort;
	error_value = andor_dll.SetAcquisitionMode(acquisition_mode)
	check_success(error_value,"Set Acquisition Mode")


	# Set exposure time
	exposure_time = ctypes.c_float(0.1)		# time in seconds
	error_value = andor_dll.SetExposureTime(exposure_time)
	check_success(error_value, "Set Exposure Time")

	
	# Set trigger mode
	trigger_mode = ctypes.c_int(0)	# trigger mode options:	0 internal;	1 external;	6 external start;	7 external exposure (bulb);	9 external FVB EM;	10 software trigger;	12 external charge shifting;
	error_value = andor_dll.SetTriggerMode(trigger_mode)
	check_success(error_value, "Set Trigger Mode")

	# TODO Set up accumulation and kinetic capture & probs not video
	"""
	// only needed for accumulation acquisition 

	//float accumulation_cycle_time = .1; // seconds
	//errorValue = SetAccumulationCycleTime(accumulation_cycle_time);
	//if (errorValue != DRV_SUCCESS) {
	//std::cout << "Set accumulation cycle time Error\n";
	//std::cout << "Error: " << errorValue << "\n";
	//}
	
	//Only needed for kinetic capture

	//errorValue = SetBaselineClamp(1);
	//if (errorValue != DRV_SUCCESS) {
	//std::cout << "Set Baseline Clamp Error\n";
	//std::cout << "Error: " << errorValue << "\n";
	//}
	
	"""

	
	# Determine the actual times the camera is using for acquisition
	actual_exposure_time = ctypes.c_float()
	actual_accumulate_time = ctypes.c_float()
	actual_kinetic_time = ctypes.c_float()

	error_value = andor_dll.GetAcquisitionTimings(ctypes.byref(actual_exposure_time),ctypes.byref(actual_accumulate_time),ctypes.byref(actual_kinetic_time))
	check_success(error_value, "Get Acquisition Timings")

	
	# Wait for two seconds to allow the camera to calibrate fully before starting acquisition
	time.sleep(2)	

	
	# Make sure the camera is in an idle state before starting an acquisition
	camera_status = ctypes.c_int()

	error_value = andor_dll.GetStatus(ctypes.byref(camera_status))
	check_success(error_value, "Get Camera Status")
	while (camera_status.value != 20073):	
		error_value = andor_dll.GetStatus(ctypes.byref(camera_status))
		check_success(error_value, "Get Camera Status")
		#print("Camera Status is ", camera_status.value)


	# Set the horizontal and vertical binning and the area of the image to be captured
	horizontal_binning = ctypes.c_int(1)		# Number of pixels to bin horizontally
	vertical_binning = ctypes.c_int(1)			# Number of pixels to bin vertically
	horizontal_start = ctypes.c_int(1)			# Start column of image to be taken (inclusive)
	horizontal_end = gblXPixels					# End column of image to be taken (inclusive)
	vertical_start = ctypes.c_int(1)			# Start row of image to be taken (inclusive)
	vertical_end = gblYPixels					# End row of image to be taken (inclusive)

	error_value = andor_dll.SetImage(horizontal_binning, vertical_binning, horizontal_start, horizontal_end, vertical_start, vertical_end);
	check_success(error_value, "Set Image")


	# Start the acquisition process 
	error_value = andor_dll.StartAcquisition()
	acquiring = check_success(error_value, "Start Acquisition")
	if (acquiring == False):
		andor_dll.AbortAcquisition()
	else:
		print("Starting Acquisition")


		# Wait until the acquisition is complete
		error_value = andor_dll.GetStatus(ctypes.byref(camera_status))
		check_success(error_value, "Get Camera Status")
		while (camera_status.value != 20073): 
			error_value = andor_dll.GetStatus(ctypes.byref(camera_status))
			check_success(error_value, "Get Camera Status")
			#print("Camera Status is ", camera_status.value)

		# Get the image data from the camera
		size = ctypes.c_int(gblXPixels.value*gblYPixels.value)
		image_pointer = ctypes.cast(ctypes.create_string_buffer( size.value*ctypes.sizeof(ctypes.c_long()) ),ctypes.POINTER(ctypes.c_long))

		error_value = andor_dll.GetAcquiredData(image_pointer, size)
		check_success(error_value, "Get Acquired Data")


		# Transfer the image from a pointer to a numpy array
		image = np.zeros((gblYPixels.value,gblXPixels.value))
		for x in range(gblXPixels.value):
			for y in range(gblYPixels.value):
				image[y,x] = image_pointer[x + y*gblXPixels.value]

		plt.imsave('filename.png', image, cmap=cm.gray)

	# Shut down camera
	error_value = andor_dll.ShutDown()
	check_success(error_value, "Shut down")
示例#60
0
assert_pico_ok(status["trigger"])

# Set number of pre and post trigger samples to be collected
preTriggerSamples = 2500
postTriggerSamples = 2500
maxSamples = preTriggerSamples + postTriggerSamples

# Get timebase information
# handle = chandle
timebase = 8
# noSamples = maxSamples
# pointer to timeIntervalNanoseconds = ctypes.byref(timeIntervalns)
oversample = 1
# pointer to maxSamples = ctypes.byref(returnedMaxSamples)
# segment index = 0
timeIntervalns = ctypes.c_float()
returnedMaxSamples = ctypes.c_int32()
status["getTimebase"] = ps.ps5000GetTimebase(chandle, timebase, maxSamples,
                                             ctypes.byref(timeIntervalns),
                                             oversample,
                                             ctypes.byref(returnedMaxSamples),
                                             0)
assert_pico_ok(status["getTimebase2"])

# Run block capture
# handle = chandle
# number of pre-trigger samples = preTriggerSamples
# number of post-trigger samples = PostTriggerSamples
# timebase = 8 = 80 ns (see Programmer's guide for mre information on timebases)
# oversample = 1
# time indisposed ms = None (not needed in the example)