def decode(data):
    """
    Decode the binary data into event time (absolute, highest precision),
    channel number and special bit. See HH documentation about the details.
    """

    if len(data) == 0:
        return np.zeros((0, 3), dtype='u8')

    t0 = time.time()
    length = data.shape[0]
    print '* decode {} events... '.format(length),

    event_time = np.bitwise_and(data, 2**25-1)
    channel = np.bitwise_and(np.right_shift(data, 25), 2**6 - 1)
    special = np.bitwise_and(np.right_shift(data, 31), 1)

    # we want to convert time that is supplemented by overflows into absolute
    # time in ps -- this requires 64 bit precision! ('u8')
    ret = np.zeros((length, 3), dtype='u8')
    ret[:,0] = event_time
    ret[:,1] = channel
    ret[:,2] = special

    t1 = time.time() - t0
    print 'done ({:.2f} s).'.format(t1)
    return ret
示例#2
0
文件: pywav.py 项目: MaximOtto/wavepy
    def writeframesraw(self, data):
        nframes = len(data) // (self._nchannels)

        if self._wFormatTag == WAVE_FORMAT_PCM and 2 == self._sampwidth:
            data_to_write = np.multiply(data, 0x7fff)
            data_to_write.astype(np.int16).tofile(self._file)

        elif self._wFormatTag == WAVE_FORMAT_PCM and 4 == self._sampwidth:
            data_to_write = np.multiply(data, 0x7fffffff)
            data_to_write.astype(np.int32).tofile(self._file)

        elif self._wFormatTag == WAVE_FORMAT_PCM and 3 == self._sampwidth:
            data = np.multiply(data, 0x7fffffff)
            data = data.astype(np.int32)
            bytes = np.zeros(data.size * 3, dtype=np.uint8)

            bytes[0::3] = np.right_shift(data, 8)
            bytes[1::3] = np.right_shift(data, 16)
            bytes[2::3] = np.right_shift(data, 24)

            bytes.tofile(self._file)

        elif self._wFormatTag == WAVE_FORMAT_IEEE_FLOAT and 4 == self._sampwidth:
            data.tofile(self._file)

        else:
            print 'oops'

        self._datawritten = self._datawritten + (len(data) * self._sampwidth)
        self._nframeswritten = self._nframeswritten + nframes
def _split(data, flip=False):
    '''
    Helper function to take the 16-bit integer saved in the neural data file 
    and map it back to the three fields of the message type (see docs on 
    communication protocol for details)

    Parameters
    ----------
    data : np.ndarray 
        Integer data and timestamps as stored in the neural data file when messages were sent during experiment

    Returns
    -------
    np.ndarray
        Raw message data split into the fields (type, aux, "payload")
    '''
    # If the data is a 1D array, extract the timestamps and the raw event codes
    if len(data.shape) < 2:
        data = np.array(data[data['chan'] == 257][['ts', 'unit']].tolist())
    msgs = data[:,1].astype(np.int16)
    
    if not flip:
        msgs = ~msgs # bit-flip the binary messages
    msgtype = np.right_shift(np.bitwise_and(msgs, msgtype_mask), 8).astype(np.uint8)
    auxdata = np.right_shift(np.bitwise_and(msgs, auxdata_mask), 8).astype(np.uint8)
    rawdata = np.bitwise_and(msgs, rawdata_mask)
    return np.vstack([data[:,0], msgtype, auxdata, rawdata]).T
示例#4
0
def runCode():
    while True:
        global pixdata, json_data, image, multiplicationFactor
        FileName = "output.png"
        with Lepton() as l:
            a, _ = l.capture()
        cv2.normalize(a, a, 0, 65535, cv2.NORM_MINMAX)  # extend contrast

        np.right_shift(a, 8, a)  # fit data into 8 bits
        cv2.imwrite(FileName, np.uint8(a))
        image = Image.open(FileName)
        imageOrigin = Image.open(FileName)
        image = image.convert('RGB')
        pixdata = image.load()
        analyze()

        imageOrigin = imageOrigin.resize((80 * multiplicationFactor, 60 * multiplicationFactor))
        imageOrigin.save(FileName)

        with open(FileName, 'rb') as f:
            imdata = f.read()
            f.close()

        json_blobs = json.dumps(blobs, default=obj_dict)
        outjson = {
            "blobs": json.loads(json_blobs),
            "img": imdata.encode('base64'),
            "multiplicationFactor": multiplicationFactor
        }
        json_data = json.dumps(outjson)
示例#5
0
 def take_still(self, pic_path):
     #TODO push the spi specifics into config paramters
     with pylepton.Lepton("/dev/spidev0.1") as l:
         a,_ = l.capture()
         cv2.normalize(a, a, 0, 65535, cv2.NORM_MINMAX)
         np.right_shift(a, 8, a)
         cv2.imwrite(pic_path, np.uint8(a))
示例#6
0
def unpack_4byte_IBM(file, count, endian='>'):
    """
    Unpacks 4 byte IBM floating points.
    """
    # Read as 4 byte integer so bit shifting works.
    data = np.fromstring(file.read(count * 4), dtype='int32')
    # Swap the byteorder if necessary.
    if BYTEORDER != endian:
        data = data.byteswap()
    # See http://mail.scipy.org/pipermail/scipy-user/2009-January/019392.html
    # XXX: Might need check for values out of range:
    # http://bytes.com/topic/c/answers/
    #         221981-c-code-converting-ibm-370-floating-point-ieee-754-a
    sign = np.bitwise_and(np.right_shift(data, 31), 0x01)
    sign = np.require(sign, 'float32')
    exponent = np.bitwise_and(np.right_shift(data, 24), 0x7f)
    mantissa = np.bitwise_and(data, 0x00ffffff)
    # Force single precision.
    mantissa = np.require(mantissa, 'float32')
    mantissa /= 0x1000000
    # Do the following calculation in a weird way to avoid autocasting to
    # float64.
    # data = (1.0 - 2.0 * sign) * mantissa * 16.0 ** (exponent - 64.0)
    sign *= -2.0
    sign += 1.0
    mantissa *= 16.0 ** (exponent - 64)
    mantissa *= sign
    return mantissa
示例#7
0
def load_packed_data_3_32(infile, sample, readlen):
    start = (sample // 3) * 4
    offset = sample % 3
    start, offset

    infile.seek(start)

    # we need another word in case offset != 0
    needed = int(np.ceil(readlen * 3 / 4) * 4) + 4

    inbuf = infile.read(needed)
    indata = np.fromstring(inbuf, 'uint32', len(inbuf) // 4)

    if len(indata) < needed:
        return None

    unpacked = np.zeros(len(indata) * 3, dtype=np.int16)

    # By using strides the unpacked data can be loaded with no additional copies
    np.bitwise_and(indata, 0x3ff, out = unpacked[0::3])
    # hold the shifted bits in it's own array to avoid an allocation
    tmp = np.right_shift(indata, 10)
    np.bitwise_and(tmp, 0x3ff, out = unpacked[1::3])
    np.right_shift(indata, 20, out = tmp)
    np.bitwise_and(tmp, 0x3ff, out = unpacked[2::3])

    return unpacked[offset:offset + readlen]
示例#8
0
def amagacolor(secret, red, green, blue):
    red = np.left_shift(np.right_shift(red,2),2)
    green = np.left_shift(np.right_shift(green,2),2)
    blue = np.left_shift(np.right_shift(blue,2),2)
    
    secretred,secretgreen,secretblue = separaimatge(secret,2,2,2)
    return red+secretred, green+secretgreen, blue+secretblue
示例#9
0
def process_t3records(t3records, time_bit=10, dtime_bit=15,
                      ch_bit=6, special_bit=True, ovcfunc=None):
    """Extract the different fields from the raw t3records array (.ht3).

    Returns:
        3 arrays representing detectors, timestamps and nanotimes.
    """
    if special_bit:
        ch_bit += 1
    assert ch_bit <= 8
    assert dtime_bit <= 16

    detectors = np.bitwise_and(
        np.right_shift(t3records, time_bit + dtime_bit), 2**ch_bit - 1).astype('uint8')
    nanotimes = np.bitwise_and(
        np.right_shift(t3records, time_bit), 2**dtime_bit - 1).astype('uint16')

    assert time_bit <= 16
    dt = np.dtype([('low16', 'uint16'), ('high16', 'uint16')])

    t3records_low16 = np.frombuffer(t3records, dt)['low16']     # View
    timestamps = t3records_low16.astype(np.int64)               # Copy
    np.bitwise_and(timestamps, 2**time_bit - 1, out=timestamps)

    overflow_ch = 2**ch_bit - 1
    overflow = 2**time_bit
    if ovcfunc is None:
        ovcfunc = _correct_overflow
    ovcfunc(timestamps, detectors, overflow_ch, overflow)
    return detectors, timestamps, nanotimes
示例#10
0
文件: convert.py 项目: metno/mipp
def _dec10216(inbuf):
    inbuf = np.fromstring(inbuf, dtype=np.uint8)
    arr10 = inbuf.astype(np.uint16)
    arr16 = np.zeros((len(arr10) / 5 * 4,), dtype=np.uint16)
    arr10_len = (len(arr16) * 5) / 4
    arr10 = arr10[:arr10_len] # adjust size
    """
    /*
     * pack 4 10-bit words in 5 bytes into 4 16-bit words
     * 
     * 0       1       2       3       4       5
     * 01234567890123456789012345678901234567890
     * 0         1         2         3         4
     */      
    ip = &in_buffer[i];
    op = &out_buffer[j];
    op[0] = ip[0]*4 + ip[1]/64;
    op[1] = (ip[1] & 0x3F)*16 + ip[2]/16;
    op[2] = (ip[2] & 0x0F)*64 + ip[3]/4;
    op[3] = (ip[3] & 0x03)*256 +ip[4];
    """
    arr16.flat[::4] = np.left_shift(arr10[::5], 2) + \
        np.right_shift((arr10[1::5]), 6)
    arr16.flat[1::4] = np.left_shift((arr10[1::5] & 63), 4) + \
        np.right_shift((arr10[2::5]), 4)
    arr16.flat[2::4] = np.left_shift(arr10[2::5] & 15, 6) + \
        np.right_shift((arr10[3::5]), 2)
    arr16.flat[3::4] = np.left_shift(arr10[3::5] & 3, 8) + \
        arr10[4::5]    
    return arr16.tostring()
def cloudMask(tiffFolder):
    """
    The cloudMask includes pixels identified as cloud, shadow, or snow in the Quality Assessment band (BQA).
    Masked pixels have a value of 0 and clear pixels have a value of 1. If there is no BQA, invoke Fmask.
    """
    return_value = True;
    inputTiffName=os.path.join(tiffFolder,os.path.basename(tiffFolder)) + "_BQA.TIF"
    print "In cloudMask checking for: " +inputTiffName
    outputTiffName=os.path.join(tiffFolder,os.path.basename(tiffFolder)) + "_MTLFmask.TIF"
    if os.path.exists(inputTiffName):
        [maskArray, geoTiffAtts]= LSFGeoTIFF.ReadableLSFGeoTIFF.fromFile(inputTiffName).asGeoreferencedArray() 
        # USGS documentation
        # shown here: https://landsat.usgs.gov/collectionqualityband
        # for example, decimal 2800 = binary 0000101011110000 which would be:
        # bits 15	14	13	12	11	10	9	8	7	6	5	4	3	2	1	0
        #      0	0	0	0	1	0	1	0	1	1	1	1	0  	0	0       0
        # high confidence cloud, bits 4, 5, and 6.
        cloud=np.equal(np.right_shift(np.bitwise_and(maskArray, 112), 4), 7)
        # high confidence cloud shadow, bits 7 and 8
        shadow=np.equal(np.right_shift(np.bitwise_and(maskArray, 496), 7), 3)
        # high confidence snow/ice, bits 9 and 10.
        snow=np.equal(np.right_shift(np.bitwise_and(maskArray, 1536), 9), 3)
        # if cloud, shadow, or snow mask is set for a pixel, mask it in newMask
        newMask = np.logical_not(np.logical_or(np.logical_or(cloud,shadow),snow))
        LSFGeoTIFF.Unsigned8BitLSFGeoTIFF.fromArray(newMask, geoTiffAtts).write(outputTiffName)

    else:
        print "Begin Fmask processing " + str(datetime.datetime.now())
        return_value = runFmask(tiffFolder,fmaskShellCall)
        print "End Fmask processing " + str(datetime.datetime.now())
    return return_value
示例#12
0
文件: lytro.py 项目: imageio/imageio
    def rearrange_bits(array):
        # Do bit rearrangement for the 10-bit lytro raw format
        # Normalize output to 1.0 as float64
        t0 = array[0::5]
        t1 = array[1::5]
        t2 = array[2::5]
        t3 = array[3::5]
        lsb = array[4::5]

        t0 = np.left_shift(t0, 2) + np.bitwise_and(lsb, 3)
        t1 = np.left_shift(t1, 2) + np.right_shift(np.bitwise_and(lsb, 12), 2)
        t2 = np.left_shift(t2, 2) + np.right_shift(np.bitwise_and(lsb, 48), 4)
        t3 = np.left_shift(t3, 2) + np.right_shift(np.bitwise_and(lsb, 192), 6)

        image = np.zeros(LYTRO_ILLUM_IMAGE_SIZE, dtype=np.uint16)
        image[:, 0::4] = t0.reshape(
            (LYTRO_ILLUM_IMAGE_SIZE[0], LYTRO_ILLUM_IMAGE_SIZE[1] // 4)
        )
        image[:, 1::4] = t1.reshape(
            (LYTRO_ILLUM_IMAGE_SIZE[0], LYTRO_ILLUM_IMAGE_SIZE[1] // 4)
        )
        image[:, 2::4] = t2.reshape(
            (LYTRO_ILLUM_IMAGE_SIZE[0], LYTRO_ILLUM_IMAGE_SIZE[1] // 4)
        )
        image[:, 3::4] = t3.reshape(
            (LYTRO_ILLUM_IMAGE_SIZE[0], LYTRO_ILLUM_IMAGE_SIZE[1] // 4)
        )

        # Normalize data to 1.0 as 64-bit float.
        # Division is by 1023 as the Lytro Illum saves 10-bit raw data.
        return np.divide(image, 1023.0).astype(np.float64)
示例#13
0
def get_mask(modQA, dilate=7):
    """ Return a mask image from an input QA band from M[OY]D09G[AQ]

    Args:
      modQA (ndarray): input QA/QC band image
      dilate (int, optional): pixels around aerosols and clouds to buffer

    Returns:
      mask (ndarray): output mask image with only good observations masked

    Porting note:
        MATLAB's 'bitshift' shifts to the left

    """
    # Identify land from water
    land = (np.mod(np.right_shift(modQA, 3) + 6, 8) / 7).astype(np.uint8)
    # Identify cloud
    cloud = (np.mod(modQA, 8) |  # unsure!
             np.mod(np.right_shift(modQA, 8), 4) |  # cirrus == '00' (none)
             np.mod(np.right_shift(modQA, 10), 2) |  # cloud mask == '0'
             np.mod(np.right_shift(modQA, 13), 2)) > 0  # adjacent to cloud

    cloud_buffer = scipy.ndimage.morphology.binary_dilation(
        cloud, structure=np.ones((dilate, dilate)))

    return ((cloud_buffer == 0) * land)
示例#14
0
  def testIntOps(self):
    for dtype in self.int_types:
      self._testBinary(
          gen_math_ops._truncate_div,
          np.array([3, 3, -1, -9, -8], dtype=dtype),
          np.array([2, -2, 7, 2, -4], dtype=dtype),
          expected=np.array([1, -1, 0, -4, 2], dtype=dtype))
      self._testSymmetricBinary(
          bitwise_ops.bitwise_and,
          np.array([0b1, 0b101, 0b1000], dtype=dtype),
          np.array([0b0, 0b101, 0b1001], dtype=dtype),
          expected=np.array([0b0, 0b101, 0b1000], dtype=dtype))
      self._testSymmetricBinary(
          bitwise_ops.bitwise_or,
          np.array([0b1, 0b101, 0b1000], dtype=dtype),
          np.array([0b0, 0b101, 0b1001], dtype=dtype),
          expected=np.array([0b1, 0b101, 0b1001], dtype=dtype))

      lhs = np.array([0, 5, 3, 14], dtype=dtype)
      rhs = np.array([5, 0, 7, 11], dtype=dtype)
      self._testBinary(
          bitwise_ops.left_shift, lhs, rhs,
          expected=np.left_shift(lhs, rhs))
      self._testBinary(
          bitwise_ops.right_shift, lhs, rhs,
          expected=np.right_shift(lhs, rhs))

      if dtype in [np.int8, np.int16, np.int32, np.int64]:
        lhs = np.array([-1, -5, -3, -14], dtype=dtype)
        rhs = np.array([5, 0, 1, 11], dtype=dtype)
        self._testBinary(
            bitwise_ops.right_shift, lhs, rhs,
            expected=np.right_shift(lhs, rhs))
示例#15
0
def countBits(values):
    # bit shifting routines are in numpy 1.4
    from numpy import array, left_shift, right_shift
    
    v = array(values).astype('uint32')
    
    # Bit counting for a 32 bit unsigned integer.
    # there is a fully generic version of this method at
    # http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
    # Binary magic numbers method, also found in pages 187-188 of Software Optimization Guide for AMD Athlon 64 and Opteron Processors.

    # The C version is:
    # v = v - ((v >> 1) & 0x55555555);                    # reuse input as temporary
    # v = (v & 0x33333333) + ((v >> 2) & 0x33333333);     # temp
    # c = ((v + (v >> 4) & 0xF0F0F0F) * 0x1010101) >> 24; # count

    fives  = int('0x55555555', base=16)
    threes = int('0x33333333', base=16)
    effs   = int('0xF0F0F0F',  base=16)
    ones   = int('0x1010101',  base=16)
    
    v = v - ( (right_shift(v, 1)) & fives);                        # reuse input as temporary
    v = (v & threes) + ( (right_shift(v,2)) & threes);             # temp
    c = right_shift(((v + (right_shift(v,4)) & effs) * ones), 24); # count

    return c
def GetImage(raw_values):
    STORED_IMAGE_NAME = "DetectionImage.jpg"
    #img = get_raw_values()                                  # Get raw image values
    cv2.normalize(raw_values, raw_values, 0, 65535, cv2.NORM_MINMAX)      # Normalize image
    np.right_shift(raw_values,8,raw_values)                               # Shift to 8 bit array
    cv2.imwrite(STORED_IMAGE_NAME, np.uint8(raw_values))           # Write the image to file
    simplecv_img = Image(STORED_IMAGE_NAME)                 # Take the image from file as SIMPLECV image
    return simplecv_img
示例#17
0
文件: flir.py 项目: DevinMui/FLIR
def capture(flip_v = False, device = "/dev/spidev0.1"):
	with Lepton(device) as l:
		a,_ = l.capture()
	if flip_v:
		cv2.flip(a,0,a)
	cv2.normalize(a, a, 0, 65535, cv2.NORM_MINMAX)
	np.right_shift(a, 8, a)
	return np.uint8(a)
示例#18
0
def lcls2float(t):
   if isinstance(t, numpy.ndarray):
      t0 = numpy.right_shift(t.astype(numpy.uint64), numpy.uint64(32))
   else:
      t0 = numpy.right_shift(numpy.uint64(t), numpy.uint64(32))
   t1 = numpy.bitwise_and(numpy.uint64(t), numpy.uint64(0x00000000fffffffff))
   t2 = t0 + t1*1.e-9
   return t2
示例#19
0
def capture(flip_v = False, device = "/dev/spidev0.0"):
  with Lepton(device) as l:
    lepton_buf,_ = l.capture()
  if flip_v:
    cv2.flip(lepton_buf,0,lepton_buf)
  cv2.normalize(lepton_buf, lepton_buf, 0, 65535, cv2.NORM_MINMAX)
  np.right_shift(lepton_buf, 8, lepton_buf)
  return np.uint8(lepton_buf)
示例#20
0
def get_maia_ct_flag(ct_flag):
    #bits 4-8, start at 0 counting
    maia_ct_flag = (16*np.bitwise_and(np.right_shift(ct_flag,8),1) + 
                    8*np.bitwise_and(np.right_shift(ct_flag,7),1) +
                    4*np.bitwise_and(np.right_shift(ct_flag,6),1) + 
                    2*np.bitwise_and(np.right_shift(ct_flag,5),1) + 
                    1*np.bitwise_and(np.right_shift(ct_flag,4),1))
    return  maia_ct_flag 
示例#21
0
    def test_shift(self):
        from numpy import left_shift, right_shift, dtype

        assert (left_shift([5, 1], [2, 13]) == [20, 2 ** 13]).all()
        assert (right_shift(10, range(5)) == [10, 5, 2, 1, 0]).all()
        bool_ = dtype("bool").type
        assert left_shift(bool(1), 3) == left_shift(1, 3)
        assert right_shift(bool(1), 3) == right_shift(1, 3)
示例#22
0
 def _TH_decode(self, data):
     """
     Decode the binary data into event time (absolute, highest precision),
     channel number and special bit. See TH documentation about the details.
     """
     event_time = np.bitwise_and(data, 2**25-1)
     channel = np.bitwise_and(np.right_shift(data, 25), 2**6 - 1)
     special = np.bitwise_and(np.right_shift(data, 31), 1)
     return event_time, channel, special
示例#23
0
  def extractChannel ( self ):
    """Convert the uint32 back into 4x8 bit channels"""

    zdim, ydim, xdim = self.data.shape
    newcube = np.zeros( (ydim, xdim, 4), dtype=np.uint8 )
    newcube[:,:,0] = np.bitwise_and(self.data, 0xffff, dtype=np.uint8)
    newcube[:,:,1] = np.uint8 ( np.right_shift( self.data, 16) & 0xffff )
    newcube[:,:,2] = np.uint8 ( np.right_shift( self.data, 32) & 0xffff )
    newcube[:,:,3] = np.uint8 ( np.right_shift (self.data, 48) )
    self.data = newcube
示例#24
0
  def RGBAChannel ( self ):
    """Convert the uint32 back into 4x8 bit channels"""

    zdim, ydim, xdim = self.data.shape
    newcube = np.zeros( (4, zdim, ydim, xdim), dtype=np.uint16 )
    newcube[0,:,:,:] = np.bitwise_and(self.data, 0xffff, dtype=np.uint16)
    newcube[1,:,:,:] = np.uint16 ( np.right_shift( self.data, 16) & 0xffff )
    newcube[2,:,:,:] = np.uint16 ( np.right_shift( self.data, 32) & 0xffff )
    newcube[3,:,:,:] = np.uint16 ( np.right_shift (self.data, 48) )
    self.data = newcube
示例#25
0
def _RGBto3dby8 ( indata ):
  """Convert a numpy array of 32bit RGB to 3d, 8-bit data"""

  _3ddata = np.zeros ( [indata.shape[0],indata.shape[1],indata.shape[2],3], dtype=np.uint8)

  _3ddata[:,:,:,0] = np.uint8(indata&0x000000FF) 
  _3ddata[:,:,:,1] = np.uint8(np.right_shift(indata&0x0000FF00,8)) 
  _3ddata[:,:,:,2] = np.uint8(np.right_shift(indata&0x00FF0000,16)) 
  
  return _3ddata
示例#26
0
def process_t3records_t3rfile(t3records, reserved=1, valid=1, time_bit=12,
                              dtime_bit=16, ch_bit=2, special_bit=False):
    """ Decode t3records from .T3R files.

    See also :func:`process_t3records`.

    Arguments:
        reserved (int): reserved bit
        valid (int): valid bit. If valid==1 the Data == Channel
            else Data = Overflow[1], Reserved[8], Marker[3]
        time_bit (int): bits for nanotimes
        dtime_bit (int): bits for TimeTag (timestamps)
        ch_bit (int): number of bits encoding channel
        special_bit (bool): True if the record contatins the special bit.

    Returns:
        A 3-element tuple containing the following 1D arrays (all of the same
        length):

        - **timestamps** (*array of int64*): the macro-time (or number of sync)
          of each photons after overflow correction. Units are specified in
          the file header.
        - **nanotimes** (*array of uint16*): the micro-time (TCSPC time), i.e.
          the time lag between the photon detection and the previous laser
          sync. Units (i.e. the bin width) are specified in the file header.
        - **detectors** (*arrays of uint8*): detector number. When
          `special_bit = True` the highest bit in `detectors` will be
          the special bit.
    """
    if special_bit:
        ch_bit += 1
    assert ch_bit <= 8
    assert time_bit <= 16
    assert time_bit+reserved+valid+dtime_bit+ch_bit == 32

    detectors = np.bitwise_and(
        np.right_shift(t3records, time_bit+dtime_bit+reserved+valid),
        2**ch_bit - 1).astype('uint8')
    nanotimes = np.bitwise_and(
        np.right_shift(t3records, dtime_bit),
        2**time_bit - 1).astype('uint16')

    valid = np.bitwise_and(
        np.right_shift(t3records, time_bit+dtime_bit+reserved+valid),
        2**valid - 1).astype('uint8')

    dt = np.dtype([('low16', 'uint16'), ('high16', 'uint16')])
    t3records_low16 = np.frombuffer(t3records, dt)['low16']     # View
    timestamps = t3records_low16.astype(np.int64)               # Copy
    np.bitwise_and(timestamps, 2**dtime_bit - 1, out=timestamps)

    overflow = 2**dtime_bit
    _correct_overflow1(timestamps, valid, 0, overflow)
    return detectors, timestamps, nanotimes
示例#27
0
def pcomp(n):
    p2 = 1;
    i = 0;
    while( p2 < n ):
        p2 = np.left_shift(p2, 2);
        i = np.add(i, 2);
    
    p2= np.right_shift(p2, 2);
    i = np.subtract(i, 2);
    p2  = np.right_shift(p2, i/2)
    return p2
示例#28
0
def get_calipso_clouds_of_type_i_feature_classification_flags_one_layer(cflag, calipso_cloudtype=0):
    #bits 10-12, start at 1 counting
    cal_vert_feature = np.zeros(cflag.shape)-9.0
    feature_array = (4*np.bitwise_and(np.right_shift(cflag,11),1) + 
                     2*np.bitwise_and(np.right_shift(cflag,10),1) + 
                     1*np.bitwise_and(np.right_shift(cflag,9),1))
    cal_vert_feature = np.where(
        np.not_equal(cflag,1),feature_array,cal_vert_feature)

    is_requested_type =  cal_vert_feature == calipso_cloudtype
    return is_requested_type 
def ComputeJerModelDriven(csm, kernelShape):
    """Computes a lookup table of joint encoding relationships (JER) using the
    model driven formulation given in Beatty PJ. Reconstruction methods for
    fast magnetic resonance imaging. PhD thesis, Stanford University, 2006.
    JERs were previous called "correlation values"; the name has been changed 
    to avoid confusion with correlation coefficients, used to relate
    two random variables.

    Parameters
    ----------
    
    csm : (Nx, Ny, Nc) array
        coil sensitivity map (can also be weighted coil sensitivity map)
    kernelShape : length 2 vector
        kernel shape on a fully sampled grid, [kx_extent, ky_extent]
        e.g. for acceleration=2, ky_extent=7 would use 4 source points along ky; 
        for acceleration=4, only 2 source points would be used.

    Returns
    -------
    jerLookup : (kx,ky,Nc, kx, ky, Nc) 6-D array
        lookup table of all joint encoding relations between kernel sample locations.
        
    Notes
    -----
    Code made available for the ISMRM 2015 Sunrise Educational Course

    This Source Code Form is subject to the terms of the Mozilla Public
    License, v. 2.0. If a copy of the MPL was not distributed with this
    file, You can obtain one at http://mozilla.org/MPL/2.0/.
        
    Philip J. Beatty ([email protected])
    """
    import Transforms
    
    channelDim = csm.ndim-1
    numChannels = csm.shape[channelDim]

    jerLookup = np.zeros(kernelShape + kernelShape + [numChannels, numChannels], dtype=np.complex)

    nx = csm.shape[0]
    ny = csm.shape[1]
    nx_2 = np.right_shift(nx, 1)
    ny_2 = np.right_shift(ny, 1)

    for ic2 in range(numChannels):
        for ic1 in range(numChannels):
            lookup = Transforms.TransformImageToKspace(np.conj(csm[:,:,ic1]) * csm[:,:,ic2], scale = [1.0, 1.0])
            for ikyb in range(kernelShape[1]):
                for ikxb in range(kernelShape[0]):
                    for ikya in range(kernelShape[1]):
                        for ikxa in range(kernelShape[0]):
                            jerLookup[ikxa, ikya, ikxb, ikyb, ic1, ic2] = lookup[nx_2 + ikxb - ikxa, ny_2 + ikyb-ikya]
    return jerLookup
示例#30
0
def get_calipso_aerosol_of_type_i(caObj, atype=0):
    #bits 10-12, start at 1 counting
    cflag = caObj.calipso_aerosol.feature_classification_flags[::,0]
    cal_vert_feature = np.zeros(cflag.shape)-9.0
    feature_array = (4*np.bitwise_and(np.right_shift(cflag,11),1) + 
                     2*np.bitwise_and(np.right_shift(cflag,10),1) + 
                     1*np.bitwise_and(np.right_shift(cflag,9),1))
    cal_vert_feature = np.where(
        np.not_equal(cflag,1),feature_array,cal_vert_feature)

    is_requested_type =  cal_vert_feature == atype
    return is_requested_type 
示例#31
0
def guess(low,high):
    import numpy
    return numpy.right_shift(low+high, 1)    
示例#32
0
 def get_groups(self, hits):
     # return np.right_shift( np.bitwise_and( hits, group_mask ), 24 ) == 0
     return np.right_shift(hits, 24) == 0

f = File( r"C:\Users\laptop\Google Drive\Google Drive\Shared folder Tasos-VanBoven\Sample_data\Broccoli\35m\Rijweg_stalling1-8-5.las", mode='r')
#f = pclpy.read(r"C:\Users\laptop\Google Drive\Google Drive\Shared folder Tasos-VanBoven\Sample_data\Broccoli\35m\Rijweg_stalling1-8-5.las", "PointXYZRGBA")

#%%
#body

# check las file version
# RGB contains
if f._header.data_format_id in (2, 3, 5):
    red = (f.red)
    green = (f.green)
    blue = (f.blue)
    # 16bit to convert 8bit data(data Storage First 8 bits case)
    red = np.right_shift(red, 8).astype(np.uint8)
    green = np.right_shift(green, 8).astype(np.uint8)
    blue = np.right_shift(blue, 8).astype(np.uint8)
    # (data Storage After 8 bits case)
    # red = red.astype(np.uint8)
    # green = green.astype(np.uint8)
    # blue = blue.astype(np.uint8)
# =============================================================================
#     red = red.astype(np.uint32)
#     green = green.astype(np.uint32)
#     blue = blue.astype(np.uint32)
# =============================================================================
    rgb = np.left_shift(red, 16) + np.left_shift(green, 8) + np.left_shift(blue, 0)
    ptcloud = np.vstack((f.x, f.y, f.z, rgb)).transpose()
    
    cloud = pcl.search.KdTree.PointXYZRGBA()
def parse_depth_sunrgbd(dp):
    dp = np.bitwise_or(np.right_shift(dp, 3), np.left_shift(dp, 16 - 3))
    dp = np.array(dp, dtype=np.float32) / 1000
    dp[dp > 8.0] = 8.0
    return dp
示例#35
0
def test_2():
    # perform language test

    ne = NDexpr()
    ne.set_ae(False)
    x1 = xr.DataArray(np.random.randn(2, 3))
    y1 = xr.DataArray(np.random.randn(2, 3))
    z1 = xr.DataArray(
        np.array([[[0, 1, 2], [3, 4, 5], [6, 7, 8]],
                  [[9, 10, 11], [12, 13, 14], [15, 16, 17]],
                  [[18, 19, 20], [21, 22, 23], [24, 25, 26]]]))
    z2 = z1 * 2
    z3 = np.arange(27)
    mask1 = z1 > 4

    assert ne.test("arccos(z1)", xr.ufuncs.arccos(z1))
    assert ne.test("angle(z1)", xr.ufuncs.angle(z1))
    assert ne.test("arccos(z1)", xr.ufuncs.arccos(z1))
    assert ne.test("arccosh(z1)", xr.ufuncs.arccosh(z1))
    assert ne.test("arcsin(z1)", xr.ufuncs.arcsin(z1))
    assert ne.test("arcsinh(z1)", xr.ufuncs.arcsinh(z1))
    assert ne.test("arctan(z1)", xr.ufuncs.arctan(z1))
    assert ne.test("arctanh(z1)", xr.ufuncs.arctanh(z1))
    assert ne.test("ceil(z1)", xr.ufuncs.ceil(z1))
    assert ne.test("conj(z1)", xr.ufuncs.conj(z1))
    assert ne.test("cos(z1)", xr.ufuncs.cos(z1))
    assert ne.test("cosh(z1)", xr.ufuncs.cosh(z1))
    assert ne.test("deg2rad(z1)", xr.ufuncs.deg2rad(z1))
    assert ne.test("degrees(z1)", xr.ufuncs.degrees(z1))
    assert ne.test("exp(z1)", xr.ufuncs.exp(z1))
    assert ne.test("expm1(z1)", xr.ufuncs.expm1(z1))
    assert ne.test("fabs(z1)", xr.ufuncs.fabs(z1))
    assert ne.test("fix(z1)", xr.ufuncs.fix(z1))
    assert ne.test("floor(z1)", xr.ufuncs.floor(z1))
    assert ne.test("frexp(z3)", xr.ufuncs.frexp(z3))
    assert ne.test("imag(z1)", xr.ufuncs.imag(z1))
    assert ne.test("iscomplex(z1)", xr.ufuncs.iscomplex(z1))
    assert ne.test("isfinite(z1)", xr.ufuncs.isfinite(z1))
    assert ne.test("isinf(z1)", xr.ufuncs.isinf(z1))
    assert ne.test("isnan(z1)", xr.ufuncs.isnan(z1))
    assert ne.test("isreal(z1)", xr.ufuncs.isreal(z1))
    assert ne.test("log(z1)", xr.ufuncs.log(z1))
    assert ne.test("log10(z1)", xr.ufuncs.log10(z1))
    assert ne.test("log1p(z1)", xr.ufuncs.log1p(z1))
    assert ne.test("log2(z1)", xr.ufuncs.log2(z1))
    assert ne.test("pow(z1, 2.0)", np.power(z1, 2.0))
    assert ne.test("pow(z1, 2)", np.power(z1, 2))
    assert ne.test("z1^2", np.power(z1, 2))
    assert ne.test("z1**2", np.power(z1, 2))
    assert ne.test("rad2deg(z1)", xr.ufuncs.rad2deg(z1))
    assert ne.test("radians(z1)", xr.ufuncs.radians(z1))
    assert ne.test("real(z1)", xr.ufuncs.real(z1))
    assert ne.test("rint(z1)", xr.ufuncs.rint(z1))
    assert ne.test("sign(z1)", xr.ufuncs.sign(z1))
    assert ne.test("signbit(z1)", xr.ufuncs.signbit(z1))
    assert ne.test("sin(z1)", xr.ufuncs.sin(z1))
    assert ne.test("sinh(z1)", xr.ufuncs.sinh(z1))
    assert ne.test("sqrt(z1)", xr.ufuncs.sqrt(z1))
    assert ne.test("square(z1)", xr.ufuncs.square(z1))
    assert ne.test("tan(z1)", xr.ufuncs.tan(z1))
    assert ne.test("tanh(z1)", xr.ufuncs.tanh(z1))
    assert ne.test("trunc(z1)", xr.ufuncs.trunc(z1))

    assert ne.test("arctan2(z1, z2)", xr.ufuncs.arctan2(z1, z2))
    assert ne.test("copysign(z1, z2)", xr.ufuncs.copysign(z1, z2))
    assert ne.test("fmax(z1, z2)", xr.ufuncs.fmax(z1, z2))
    assert ne.test("fmin(z1, z2)", xr.ufuncs.fmin(z1, z2))
    assert ne.test("fmod(z1, z2)", xr.ufuncs.fmod(z1, z2))
    assert ne.test("hypot(z1, z2)", xr.ufuncs.hypot(z1, z2))
    assert ne.test("ldexp(z1, z2)", xr.DataArray(xr.ufuncs.ldexp(z1, z2)))
    assert ne.test("logaddexp(z1, z2)", xr.ufuncs.logaddexp(z1, z2))
    assert ne.test("logaddexp2(z1, z2)", xr.ufuncs.logaddexp2(z1, z2))
    assert ne.test("logicaland(z1, z2)", xr.ufuncs.logical_and(z1, z2))
    assert ne.test("logicalnot(z1, z2)", xr.ufuncs.logical_not(z1, z2))
    assert ne.test("logicalor(z1, z2)", xr.ufuncs.logical_or(z1, z2))
    assert ne.test("logicalxor(z1, z2)", xr.ufuncs.logical_xor(z1, z2))
    assert ne.test("maximum(z1, z2)", xr.ufuncs.maximum(z1, z2))
    assert ne.test("minimum(z1, z2)", xr.ufuncs.minimum(z1, z2))
    assert ne.test("nextafter(z1, z2)", xr.ufuncs.nextafter(z1, z2))

    assert ne.test("all(z1)", xr.DataArray.all(z1))
    assert ne.test("all(z1, 0)", xr.DataArray.all(z1, axis=0))
    assert ne.test("all(z1, 0, 1)", xr.DataArray.all(z1, axis=(0, 1)))
    assert ne.test("all(z1, 0, 1, 2)", xr.DataArray.all(z1, axis=(0, 1, 2)))

    assert ne.test("any(z1)", xr.DataArray.any(z1))
    assert ne.test("any(z1, 0)", xr.DataArray.any(z1, axis=0))
    assert ne.test("any(z1, 0, 1)", xr.DataArray.any(z1, axis=(0, 1)))
    assert ne.test("any(z1, 0, 1, 2)", xr.DataArray.any(z1, axis=(0, 1, 2)))

    assert ne.test("argmax(z1)", xr.DataArray.argmax(z1))
    assert ne.test("argmax(z1, 0)", xr.DataArray.argmax(z1, axis=0))
    assert ne.test("argmax(z1, 1)", xr.DataArray.argmax(z1, axis=1))
    assert ne.test("argmax(z1, 2)", xr.DataArray.argmax(z1, axis=2))

    assert ne.test("argmin(z1)", xr.DataArray.argmin(z1))
    assert ne.test("argmin(z1, 0)", xr.DataArray.argmin(z1, axis=0))
    assert ne.test("argmin(z1, 1)", xr.DataArray.argmin(z1, axis=1))
    assert ne.test("argmin(z1, 2)", xr.DataArray.argmin(z1, axis=2))

    assert ne.test("max(z1)", xr.DataArray.max(z1))
    assert ne.test("max(z1, 0)", xr.DataArray.max(z1, axis=0))
    assert ne.test("max(z1, 0, 1)", xr.DataArray.max(z1, axis=(0, 1)))
    assert ne.test("max(z1, 0, 1, 2)", xr.DataArray.max(z1, axis=(0, 1, 2)))

    assert ne.test("mean(z1)", xr.DataArray.mean(z1))
    assert ne.test("mean(z1, 0)", xr.DataArray.mean(z1, axis=0))
    assert ne.test("mean(z1, 0, 1)", xr.DataArray.mean(z1, axis=(0, 1)))
    assert ne.test("mean(z1, 0, 1, 2)", xr.DataArray.mean(z1, axis=(0, 1, 2)))

    assert ne.test("median(z1)", xr.DataArray.median(z1))
    assert ne.test("median(z1, 0)", xr.DataArray.median(z1, axis=0))
    assert ne.test("median(z1, 0, 1)", xr.DataArray.median(z1, axis=(0, 1)))
    assert ne.test("median(z1, 0, 1, 2)",
                   xr.DataArray.median(z1, axis=(0, 1, 2)))

    assert ne.test("min(z1)", xr.DataArray.min(z1))
    assert ne.test("min(z1, 0)", xr.DataArray.min(z1, axis=0))
    assert ne.test("min(z1, 0, 1)", xr.DataArray.min(z1, axis=(0, 1)))
    assert ne.test("min(z1, 0, 1, 2)", xr.DataArray.min(z1, axis=(0, 1, 2)))

    assert ne.test("prod(z1)", xr.DataArray.prod(z1))
    assert ne.test("prod(z1, 0)", xr.DataArray.prod(z1, axis=0))
    assert ne.test("prod(z1, 0, 1)", xr.DataArray.prod(z1, axis=(0, 1)))
    assert ne.test("prod(z1, 0, 1, 2)", xr.DataArray.prod(z1, axis=(0, 1, 2)))

    assert ne.test("sum(z1)", xr.DataArray.sum(z1))
    assert ne.test("sum(z1, 0)", xr.DataArray.sum(z1, axis=0))
    assert ne.test("sum(z1, 0, 1)", xr.DataArray.sum(z1, axis=(0, 1)))
    assert ne.test("sum(z1, 0, 1, 2)", xr.DataArray.sum(z1, axis=(0, 1, 2)))

    assert ne.test("std(z1)", xr.DataArray.std(z1))
    assert ne.test("std(z1, 0)", xr.DataArray.std(z1, axis=0))
    assert ne.test("std(z1, 0, 1)", xr.DataArray.std(z1, axis=(0, 1)))
    assert ne.test("std(z1, 0, 1, 2)", xr.DataArray.std(z1, axis=(0, 1, 2)))

    assert ne.test("var(z1)", xr.DataArray.var(z1))
    assert ne.test("var(z1, 0)", xr.DataArray.var(z1, axis=0))
    assert ne.test("var(z1, 0, 1)", xr.DataArray.var(z1, axis=(0, 1)))
    assert ne.test("var(z1, 0, 1, 2)", xr.DataArray.var(z1, axis=(0, 1, 2)))

    assert ne.test("percentile(z1, 50)", np.percentile(z1, 50))
    assert ne.test("percentile(z1, 50)+percentile(z1, 50)",
                   np.percentile(z1, 50) + np.percentile(z1, 50))
    assert ne.test("percentile(z1, (50))", np.percentile(z1, (50)))
    assert ne.test("percentile(z1, (50, 60))", np.percentile(z1, (50, 60)))
    assert ne.test("percentile(z1, (50, 60, 70))",
                   np.percentile(z1, (50, 60, 70)))
    assert ne.test(
        "percentile(z1, (50, 60, 70)) + percentile(z1, (50, 60, 70))",
        np.percentile(z1, (50, 60, 70)) + np.percentile(z1, (50, 60, 70)))
    assert ne.test("1 + var(z1, 0, 0+1, 2) + 1",
                   1 + xr.DataArray.var(z1, axis=(0, 0 + 1, 2)) + 1)

    assert ne.test("1 + ((z1+z1)*0.0005)**2 + 1", 1 + np.power(
        (z1 + z1) * 0.0005, 2) + 1)

    assert ne.test("z1{mask1}", xr.DataArray.where(z1, mask1))
    assert ne.test("z1{z1>2}", xr.DataArray.where(z1, z1 > 2))
    assert ne.test("z1{z1>=2}", xr.DataArray.where(z1, z1 >= 2))
    assert ne.test("z1{z1<2}", xr.DataArray.where(z1, z1 < 2))
    assert ne.test("z1{z1<=2}", xr.DataArray.where(z1, z1 <= 2))
    assert ne.test("z1{z1==2}", xr.DataArray.where(z1, z1 == 2))
    assert ne.test("z1{z1!=2}", xr.DataArray.where(z1, z1 != 2))

    assert ne.test("z1{z1<2 | z1>5}",
                   xr.DataArray.where(z1, (z1 < 2) | (z1 > 5)))
    assert ne.test("z1{z1>2 & z1<5}",
                   xr.DataArray.where(z1, (z1 > 2) & (z1 < 5)))

    ne.evaluate("m = z1+1")
    assert ne.test("m", z1 + 1)

    assert ne.test("z1{~mask1}", xr.DataArray.where(z1, ~mask1))

    assert ne.test("(1<0?1+1;2+2)", 4)
    assert ne.test("(0<1?1+1;2+2)", 2)
    assert ne.test("z1+mask1", xr.DataArray.where(z1, mask1))

    assert ne.is_number(1) is True

    assert ne.test("-z1", -z1)
    assert ne.test("z1{!(z1<2)}",
                   xr.DataArray.where(z1, xr.ufuncs.logical_not(z1 < 2)))

    assert ne.test("z1>>1", np.right_shift(z1, 1))
    assert ne.test("z1<<1", np.left_shift(z1, 1))

    assert ne.test("(z1+z1)", z1 + z1)
    assert ne.evaluate("(z1, z1)") == (z1, z1)
    assert ne.evaluate('(1, (4, 5))') == (1, (4, 5))
import numpy as np
import cv2
from pylepton import Lepton
import time
import datetime

for i in range(1,301):
    # Image capture and Normalisation
    with Lepton() as l:
        a,_ = l.capture()
    cv2.normalize(a, a, 0, 65535, cv2.NORM_MINMAX) # extend contrast
    np.right_shift(a, 8, a) # fit data into 8 bits

    # Add Date and Timestamp to Image
    timestamp = time.time()
    stamp = datetime.datetime.fromtimestamp(timestamp).strftime('%d-%m-%Y_%H-%M-%S')

    filename = "capture" + str(i) + stamp + ".jpg"
    cv2.imwrite(filename, np.uint8(a)) # write it!

    # time.sleep(0.1) # Delay

示例#37
0
    print("13与17的二进制形式:")
    a, b = 13, 17
    print(bin(a), bin(b), "\n")
    print("13与17的位与:")
    print(np.bitwise_and(13, 17))

    # 2.bitwise_or()对数组中整数的二进制形式执行位或运算
    print("3与17的位或:")
    print(np.bitwise_or(13, 17))

    # 3.invert()对数组中的整数进行位取反运算,即0变成1,1变成0
    print("13的位反转,其中ndarray的dtype是uint8:")
    print(np.invert(np.array([13], dtype=np.uint8)), "\n")
    # 比较13和242的二进制表示,发现位的反转
    print("13的二进制表示:")
    print(np.binary_repr(13, width=8), "\n")
    print("242的二进制表示:")
    print(np.binary_repr(242, width=8), "\n")

    # 4.left_shift()将数组元素的二进制形式向左移动到指定位置,右侧附加相等数量的0
    print("将10左移两位:")
    print(np.left_shift(10, 2), "\n")
    print("10的二进制表示:")
    print(np.binary_repr(10, width=8), "\n")
    print("40的二进制表示:")
    print(np.binary_repr(40, width=8), "\n")

    # 5.right_shift()将数组元素的二进制形式向右移动到指定位置,左侧附加相等数量的0
    print("将40右移两位:")
    print(np.right_shift(40, 2), "\n")
def raw_to_8bit(data):
    cv2.normalize(data, data, 0, 65535, cv2.NORM_MINMAX)
    np.right_shift(data, 8, data)
    return cv2.cvtColor(np.uint8(data), cv2.COLOR_GRAY2RGB)
示例#39
0
    lambda c1, c2: c1.cast(BooleanType()) | c2.cast(BooleanType()),
    "logical_xor":
    lambda c1, c2: (
        # mimics xor by logical operators.
        (c1.cast(BooleanType()) | c2.cast(BooleanType()))
        & (~(c1.cast(BooleanType())) | ~(c2.cast(BooleanType())))),
    "maximum":
    F.greatest,
    "minimum":
    F.least,
    "modf":
    F.pandas_udf(lambda s1, s2: np.modf(s1, s2), DoubleType()),
    "nextafter":
    F.pandas_udf(lambda s1, s2: np.nextafter(s1, s2), DoubleType()),
    "right_shift":
    F.pandas_udf(lambda s1, s2: np.right_shift(s1, s2), LongType()),
})


# Copied from pandas.
# See also https://docs.scipy.org/doc/numpy/reference/arrays.classes.html#standard-array-subclasses
def maybe_dispatch_ufunc_to_dunder_op(ser_or_index, ufunc: Callable,
                                      method: str, *inputs, **kwargs: Any):
    special = {
        "add",
        "sub",
        "mul",
        "pow",
        "mod",
        "floordiv",
        "truediv",
def np_float2np_bf16(arr):
    """Convert a numpy array of float to a numpy array
    of bf16 in uint16"""
    orig = arr.view("<u4")
    bias = np.bitwise_and(np.right_shift(orig, 16), 1) + 0x7FFF
    return np.right_shift(orig + bias, 16).astype("uint16")
示例#41
0
    def _run(env, remote):
        m = 2
        n = 8
        imm_shift = np.random.randint(0, 8)
        imm_scale = np.random.randint(1, 5)
        # compute
        a = te.placeholder((m, n, env.BATCH, env.BLOCK_OUT),
                           name="a",
                           dtype=env.acc_dtype)
        a_buf = te.compute((m, n, env.BATCH, env.BLOCK_OUT), lambda *i: a(*i),
                           "a_buf")  # DRAM->SRAM
        res_shift = te.compute((m, n, env.BATCH, env.BLOCK_OUT),
                               lambda *i: a_buf(*i) + imm_shift,
                               "res_shift")  # compute
        res_scale = te.compute((m, n, env.BATCH, env.BLOCK_OUT),
                               lambda *i: res_shift(*i) >> imm_scale,
                               "res_scale")  # compute
        res = te.compute((m, n, env.BATCH, env.BLOCK_OUT),
                         lambda *i: res_scale(*i).astype(env.inp_dtype),
                         "res")  # SRAM->DRAM
        # schedule
        s = te.create_schedule(res.op)
        s[a_buf].set_scope(env.acc_scope)  # SRAM
        s[res_shift].set_scope(env.acc_scope)  # SRAM
        s[res_scale].set_scope(env.acc_scope)  # SRAM
        s[a_buf].pragma(a_buf.op.axis[0], env.dma_copy)  # DRAM->SRAM
        s[res_shift].pragma(res_shift.op.axis[0], env.alu)  # compute
        s[res_scale].pragma(res_scale.op.axis[0], env.alu)  # compute
        s[res].pragma(res.op.axis[0], env.dma_copy)  # SRAM->DRAM
        # build
        mod = vta.build(s, [a, res],
                        tvm.target.Target("ext_dev", host=env.target_host))
        if not remote:
            return
        temp = utils.tempdir()
        mod.save(temp.relpath("load_act.o"))
        remote.upload(temp.relpath("load_act.o"))
        f = remote.load_module("load_act.o")
        # verify
        dev = remote.ext_dev(0)
        a_np = np.random.randint(-10,
                                 10,
                                 size=(m, n, env.BATCH,
                                       env.BLOCK_OUT)).astype(a.dtype)
        res_np = np.right_shift((a_np + imm_shift), imm_scale)
        res_np = res_np.astype(res.dtype)
        a_nd = tvm.nd.array(a_np, dev)
        res_nd = tvm.nd.array(
            np.zeros((m, n, env.BATCH, env.BLOCK_OUT)).astype(res.dtype), dev)

        if env.TARGET in ["sim", "tsim"]:
            simulator.clear_stats()

        f(a_nd, res_nd)

        np.testing.assert_equal(res_np, res_nd.numpy())

        if env.TARGET in ["sim", "tsim"]:
            sim_stats = simulator.stats()
            print("Shift and scale execution statistics:")
            for k, v in sim_stats.items():
                print("\t{:<16}: {:>16}".format(k, v))
示例#42
0
    def get_swath_data(self, item, fill=None):
        """Retrieve the item asked for then set it to the specified data type, scale it, and mask it.
        """
        if fill is None:
            fill = self.get_fill_value(item)
        var_info = self.file_type_info.get(item)
        variable = self[var_info.var_name]
        data = variable.get()
        if var_info.index is not None:
            data = data[var_info.index]
        # before or after scaling/offset?
        if var_info.bit_mask is not None:
            bit_mask = var_info.bit_mask
            shift_amount = var_info.right_shift
            offset = var_info.additional_offset
            numpy.bitwise_and(data, bit_mask, data)
            numpy.right_shift(data, shift_amount, data)
            numpy.add(data, offset, data)

        # Convert to the correct data type
        data = data.astype(var_info.data_type)

        # Get the fill value
        if var_info.fill_attr_name and isinstance(var_info.fill_attr_name,
                                                  str):
            fill_value = self[var_info.var_name + "." +
                              var_info.fill_attr_name]
            mask = data == fill_value
        elif var_info.fill_attr_name:
            fill_value = var_info.fill_attr_name
            mask = data >= fill_value
        else:
            fill_value = -999.0
            mask = data == fill_value

        # Get the valid_min and valid_max
        valid_min, valid_max = None, None
        if var_info.range_attr_name:
            if isinstance(var_info.range_attr_name, str):
                valid_min, valid_max = self[var_info.var_name + "." +
                                            var_info.range_attr_name]
            else:
                valid_min, valid_max = var_info.range_attr_name

        # Certain data need to have special values clipped
        if var_info.clip_saturated and valid_max is not None:
            LOG.debug(
                "Setting any saturation or \"can't aggregate\" values to valid maximum"
            )
            data[(data == self.CANT_AGGR_VALUE) |
                 (data == self.SATURATION_VALUE)] = valid_max

        if mask is not None and valid_max is not None:
            mask[(data < valid_min) | (data > valid_max)] = True

        # Get the scaling factors
        scale_value = None
        if var_info.scale_attr_name:
            try:
                scale_value = self[var_info.var_name + "." +
                                   var_info.scale_attr_name]
                if var_info.index is not None:
                    scale_value = scale_value[var_info.index]
                scale_value = float(scale_value)
            except KeyError:
                LOG.debug("No scaling factors for %s", item)
        offset_value = None
        if var_info.offset_attr_name is not None:
            try:
                offset_value = self[var_info.var_name + "." +
                                    var_info.offset_attr_name]
                if var_info.index is not None:
                    offset_value = offset_value[var_info.index]
                offset_value = float(offset_value)
            except KeyError:
                LOG.debug("No offset for %s", item)

        LOG.debug("Variable " + str(var_info.var_name) +
                  " is using scale value " + str(scale_value) +
                  " and offset value " + str(offset_value))

        if offset_value is not None:
            data -= data.dtype.type(offset_value)
        if scale_value is not None:
            data *= data.dtype.type(scale_value)

        # Special case: 250m Resolution
        if var_info.interpolate:
            if mask is not None:
                data[mask] = numpy.nan

            if item in [K_LONGITUDE_250, K_LATITUDE_250]:
                cache_key = "250"
                lon_key = K_LONGITUDE_250
                lat_key = K_LATITUDE_250
                res_factor = 4
            elif item in [K_LONGITUDE_500, K_LATITUDE_500]:
                cache_key = "500"
                lon_key = K_LONGITUDE_500
                lat_key = K_LATITUDE_500
                res_factor = 2
            else:
                raise ValueError("Don't know how to interpolate item '%s'" %
                                 (item, ))

            if self.nav_interpolation[cache_key][
                    0] is not None and self.nav_interpolation[cache_key][
                        1] is not None:
                LOG.debug(
                    "Returning previously interpolated %sm resolution geolocation data",
                    cache_key)
                data = self.nav_interpolation[cache_key][not (item == lon_key)]
                self.nav_interpolation[cache_key] = [None, None]
                return data

            self.nav_interpolation[cache_key][not (item == lon_key)] = data

            if self.nav_interpolation[cache_key][
                    0] is None or self.nav_interpolation[cache_key][1] is None:
                # We don't have the other coordinate data yet
                self.get_swath_data(lon_key if item == lat_key else lat_key,
                                    fill=fill)
            else:
                # We already have the other coordinate variable, the user isn't asking for this item so just return
                LOG.debug(
                    "Returning 'None' because this instance of the function shouldn't have been called by the user"
                )
                return None

            LOG.info("Interpolating to higher resolution: %s" %
                     (var_info.var_name, ))
            lon_data, lat_data = self.nav_interpolation[cache_key]

            new_lon_data, new_lat_data = interpolate_geolocation_cartesian(
                lon_data, lat_data, res_factor=res_factor)

            new_lon_data[numpy.isnan(new_lon_data)] = fill
            new_lat_data[numpy.isnan(new_lat_data)] = fill
            # Cache the results when the user requests the other coordinate
            self.nav_interpolation[cache_key] = [new_lon_data, new_lat_data]
            data = new_lon_data if item == lon_key else new_lat_data
        elif mask is not None:
            data[mask] = fill

        return data
示例#43
0
def shift(x, s, **kwargs):
    if s < 0:
        return np.right_shift(x, -s, **kwargs)
    else:
        return np.left_shift(x, s, **kwargs)
示例#44
0
        def run(self):
            import ast
            import re

            def lcm(a, b):
                from fractions import gcd
                return (a * b / gcd(int(a), int(b)))

            def lcma(arr):
                ans = 1.
                for e in arr:
                    ans = lcm(ans, e)
                return int(ans)

            if self.dev.debug:
                print("MDSWorker running")

            event_name = self.dev.seg_event.data()

            for card in self.dev.slots:
                # Retrive the actual value of NACC (samples) already set in the ACQ box
                # nacc_str = uut.s1.get_knob('nacc')
                nacc_str = self.dev.slots[card].nacc

                if nacc_str == '0,0,0':
                    nacc_sample = 1
                else:
                    nacc_tuple = ast.literal_eval(nacc_str)
                    nacc_sample = nacc_tuple[0]

            if self.dev.debug:
                print("The ACQ NACC sample value is {}".format(nacc_sample))

            # nacc_sample values are always between 1 and 32, set in the ACQ box by the device INIT() function
            dt = float(1. / self.dev.freq.data() * nacc_sample)

            if self.dev.debug:
                print("The SR is {} and timebase delta t is {}".format(
                    self.dev.freq.data(), dt))

            decimator = lcma(self.decim)

            if self.seg_length % decimator:
                self.seg_length = (self.seg_length // decimator +
                                   1) * decimator

            self.device_thread.start()

            segment = 0
            running = self.dev.running
            max_segments = self.dev.max_segments.data()

            # If resampling is choosen, i.e. self.resampling=1, then the res_factor is read from the tree node:
            if self.resampling:
                res_factor = self.dev.res_factor.data()

            while running.on and segment < max_segments:
                try:
                    buf = self.full_buffers.get(block=True, timeout=1)
                except Empty:
                    continue

                if self.dev.trig_time.getDataNoRaise() is None:
                    self.dev.trig_time.record = self.device_thread.trig_time - \
                        ((self.device_thread.io_buffer_size / np.int32(0).nbytes) * dt)

                buffer = np.right_shift(np.frombuffer(buf, dtype='int32'), 8)
                i = 0
                for c in self.chans:
                    slength = self.seg_length / self.decim[i]
                    deltat = dt * self.decim[i]
                    #Choice between executing resampling or not:
                    if c.on and self.resampling:
                        resampled = getattr(self.dev, str(c) + ':RESAMPLED')
                        b = buffer[i::self.nchans * self.decim[i]]
                        begin = segment * slength * deltat
                        end = begin + (slength - 1) * deltat
                        dim = MDSplus.Range(begin, end, deltat)
                        c.makeSegmentResampled(begin, end, dim, b, resampled,
                                               res_factor)
                    elif c.on:
                        b = buffer[i::self.nchans * self.decim[i]]
                        begin = segment * slength * deltat
                        end = begin + (slength - 1) * deltat
                        dim = MDSplus.Range(begin, end, deltat)
                        c.makeSegment(begin, end, dim, b)
                    i += 1
                segment += 1
                MDSplus.Event.setevent(event_name)

                self.empty_buffers.put(buf)

            self.device_thread.stop()
示例#45
0
文件: basic.py 项目: mfkiwl/gap_sdk
 def _impl(self, *args, **kwargs):
     return np.right_shift(args[0], args[1], dtype=np.int32)
示例#46
0
    def __rx_non_complex(self):
        if not self.__rxbuf:
            self._rx_init_channels()
        self.__rxbuf.refill()
        data = bytearray()
        for ec in self.rx_enabled_channels:
            chan = self._rxadc.find_channel(self._rx_channel_names[ec])
            data.extend(chan.read(self.__rxbuf))

        if isinstance(self._rx_data_type, list):
            return self.__multi_type_rx(data)

        x = np.frombuffer(data, dtype=self._rx_data_type)
        if self._rx_mask != 0:
            x = np.bitwise_and(x, self._rx_mask)
        if self._rx_shift > 0:
            x = np.right_shift(x, self._rx_shift)
        elif self._rx_shift < 0:
            x = np.left_shift(x, -(self._rx_shift))

        sig = []
        stride = len(self.rx_enabled_channels)

        if self._rx_stack_interleaved:
            # Convert data to sample interleaved from channel interleaved
            sigi = np.empty((x.size, ), dtype=x.dtype)
            for i, _ in enumerate(self.rx_enabled_channels):
                sigi[i::stride] = x[i * self.rx_buffer_size:(i + 1) *
                                    self.rx_buffer_size]
            x = sigi

        if self._rx_output_type == "raw":
            for c in range(stride):
                sig.append(x[c::stride])
        elif self._rx_output_type == "SI":
            rx_scale = []
            rx_offset = []
            for i in self.rx_enabled_channels:
                v = self._rxadc.find_channel(self._rx_channel_names[i])
                if "scale" in v.attrs:
                    scale = self._get_iio_attr(self._rx_channel_names[i],
                                               "scale", False)
                else:
                    scale = 1.0

                if "offset" in v.attrs:
                    offset = self._get_iio_attr(self._rx_channel_names[i],
                                                "offset", False)
                else:
                    offset = 0.0
                rx_scale.append(scale)
                rx_offset.append(offset)

            for c in range(stride):
                raw = x[c::stride]
                sig.append(raw * rx_scale[c] + rx_offset[c])
        else:
            raise Exception("_rx_output_type undefined")

        # Don't return list if a single channel
        if len(self.rx_enabled_channels) == 1:
            return sig[0]
        return sig
示例#47
0
 def get_channels(self, hits):
     return np.right_shift(np.bitwise_and(hits, channel_mask), 24)
示例#48
0
文件: mmrhist.py 项目: NiftyPET/NIPET
def hist(
        datain,
        txLUT,
        axLUT,
        Cnt,
        t0=0,
        t1=0,
        cmass_sig=5,
        frms=None,  # np.array([0], dtype=np.uint16),
        use_stored=False,
        store=False,
        outpath=''):
    '''
    Process list mode data with histogramming and optional bootstrapping:
    Cnt['BTP'] = 0: no bootstrapping [default];
    Cnt['BTP'] = 1: non-parametric bootstrapping;
    Cnt['BTP'] = 2: parametric bootstrapping (using Poisson distribution with mean = 1)
    '''

    if Cnt['SPN'] == 1: nsinos = Cnt['NSN1']
    elif Cnt['SPN'] == 11: nsinos = Cnt['NSN11']
    elif Cnt['SPN'] == 0: nsinos = Cnt['NSEG0']

    log.debug('histogramming with span {}.'.format(Cnt['SPN']))

    if (use_stored is True and 'sinos' in datain and os.path.basename(
            datain['sinos']) == f"sinos_s{Cnt['SPN']}_frm-{t0}-{t1}.npz"):
        hstout = dict(np.load(datain['sinos'], allow_pickle=True))
        nitag = len(hstout['phc'])
        log.debug(
            'acquisition duration by integrating time tags is {} sec.'.format(
                nitag))

    elif os.path.isfile(datain['lm_bf']):
        # gather info about the LM time tags
        nele, ttags, tpos = mmr_lmproc.lminfo(datain['lm_bf'])
        nitag = int((ttags[1] - ttags[0] + 999) / 1000)
        log.debug(
            'acquisition duration by integrating time tags is {} sec.'.format(
                nitag))

        # adjust frame time if outside the limit
        if t1 > nitag: t1 = nitag
        # check if the time point is allowed
        if t0 >= nitag:
            raise ValueError(
                'e> the time frame definition is not allowed! (outside acquisition time)'
            )

        # ---------------------------------------
        # preallocate all the output arrays
        VTIME = 2
        MXNITAG = 5400  # limit to 1hr and 30mins
        if (nitag > MXNITAG):
            tn = int(MXNITAG / (1 << VTIME))
        else:
            tn = int((nitag + (1 << VTIME) - 1) / (1 << VTIME))

        pvs = np.zeros((tn, Cnt['NSEG0'], Cnt['NSBINS']), dtype=np.uint32)
        phc = np.zeros((nitag), dtype=np.uint32)
        dhc = np.zeros((nitag), dtype=np.uint32)
        mss = np.zeros((nitag), dtype=np.float32)

        bck = np.zeros((2, nitag, Cnt['NBCKT']), dtype=np.uint32)
        fan = np.zeros((Cnt['NRNG'], Cnt['NCRS']), dtype=np.uint32)

        # > prompt and delayed sinograms
        psino = np.zeros((nsinos, Cnt['NSANGLES'], Cnt['NSBINS']),
                         dtype=np.uint16)
        dsino = np.zeros((nsinos, Cnt['NSANGLES'], Cnt['NSBINS']),
                         dtype=np.uint16)

        # > single slice rebinned prompots
        ssr = np.zeros((Cnt['NSEG0'], Cnt['NSANGLES'], Cnt['NSBINS']),
                       dtype=np.uint32)

        hstout = {
            'phc': phc,
            'dhc': dhc,
            'mss': mss,
            'pvs': pvs,
            'bck': bck,
            'fan': fan,
            'psn': psino,
            'dsn': dsino,
            'ssr': ssr
        }
        # ---------------------------------------

        # do the histogramming and processing
        mmr_lmproc.hist(hstout, datain['lm_bf'], t0, t1, txLUT, axLUT, Cnt)

        if store:
            if outpath == '':
                fsino = os.path.dirname(datain['lm_bf'])
            else:
                fsino = os.path.join(outpath, 'sino')
                nimpa.create_dir(fsino)
            # complete the path with the file name
            fsino = os.path.join(fsino,
                                 f"sinos_s{Cnt['SPN']}_frm-{t0}-{t1}.npz")
            # store to the above path
            np.savez(fsino, **hstout)

    else:
        log.error('input list-mode data is not defined.')
        return

    # short (interval) projection views
    pvs_sgtl = np.right_shift(hstout['pvs'], 8).astype(np.float32)
    pvs_crnl = np.bitwise_and(hstout['pvs'], 255).astype(np.float32)

    cmass = Cnt['SO_VXZ'] * ndi.filters.gaussian_filter(
        hstout['mss'], cmass_sig, mode='mirror')
    log.debug(
        'centre of mass of axial radiodistribution (filtered with Gaussian of SD ={}):  COMPLETED.'
        .format(cmass_sig))

    # ========================= BUCKET SINGLES =========================
    # > number of single rates reported for the given second
    # > the last two bits are used for the number of reports
    nsr = (hstout['bck'][1, :, :] >> 30)

    # > average in a second period
    hstout['bck'][0, nsr > 0] = hstout['bck'][0, nsr > 0] / nsr[nsr > 0]

    # > time indeces when single rates given
    tmsk = np.sum(nsr, axis=1) > 0
    single_rate = np.copy(hstout['bck'][0, tmsk, :])

    # > time
    t = np.arange(nitag)
    t = t[tmsk]

    # > get the average bucket singles:
    buckets = np.int32(np.sum(single_rate, axis=0) / single_rate.shape[0])
    log.debug('dynamic and static buckets single rates:  COMPLETED.')
    # ==================================================================

    # account for the fact that when t0==t1 that means that full dataset is processed
    if t0 == t1: t1 = t0 + nitag

    return {
        't0': t0,
        't1': t1,
        'dur': t1 - t0,           # duration
        'phc': hstout['phc'],     # prompts head curve
        'dhc': hstout['dhc'],     # delayeds head curve
        'cmass': cmass,           # centre of mass of the radiodistribution in axial direction
        'pvs_sgtl': pvs_sgtl,     # sagittal projection views in short intervals
        'pvs_crnl': pvs_crnl,     # coronal projection views in short intervals
        'fansums': hstout['fan'], # fan sums of delayeds for variance reduction of randoms
        'sngl_rate': single_rate, # bucket singles over time
        'tsngl': t,               # time points of singles measurements in list-mode data
        'buckets': buckets,       # average bucket singles
        'psino': hstout['psn'].astype(np.uint16), # prompt sinogram
        'dsino': hstout['dsn'].astype(np.uint16), # delayeds sinogram
        'pssr': hstout['ssr']     # single-slice rebinned sinogram of prompts
    }  # yapf: disable
示例#49
0
    lambda c1, c2: (
        # mimics xor by logical operators.
        (c1.cast(BooleanType()) | c2.cast(BooleanType()))
        & (~(c1.cast(BooleanType())) | ~(c2.cast(BooleanType())))),
    "maximum":
    F.greatest,
    "minimum":
    F.least,
    "modf":
    pandas_udf(lambda s1, s2: np.modf(s1, s2), DoubleType()),  # type: ignore
    "nextafter":
    pandas_udf(lambda s1, s2: np.nextafter(s1, s2),
               DoubleType()),  # type: ignore
    "right_shift":
    pandas_udf(  # type: ignore
        lambda s1, s2: np.right_shift(s1, s2), LongType()),
})


# Copied from pandas.
# See also https://docs.scipy.org/doc/numpy/reference/arrays.classes.html#standard-array-subclasses
def maybe_dispatch_ufunc_to_dunder_op(ser_or_index: IndexOpsMixin,
                                      ufunc: Callable, method: str,
                                      *inputs: Any,
                                      **kwargs: Any) -> IndexOpsMixin:
    special = {
        "add",
        "sub",
        "mul",
        "pow",
        "mod",
示例#50
0
def image_as_uint(im, bitdepth=None):
    """ Convert the given image to uint (default: uint8)
    
    If the dtype already matches the desired format, it is returned
    as-is. If the image is float, and all values are between 0 and 1,
    the values are multiplied by np.power(2.0, bitdepth). In all other
    situations, the values are scaled such that the minimum value
    becomes 0 and the maximum value becomes np.power(2.0, bitdepth)-1
    (255 for 8-bit and 65535 for 16-bit).
    """
    if not bitdepth:
        bitdepth = 8
    if not isinstance(im, np.ndarray):
        raise ValueError("Image must be a numpy array")
    if bitdepth == 8:
        out_type = np.uint8
    elif bitdepth == 16:
        out_type = np.uint16
    else:
        raise ValueError("Bitdepth must be either 8 or 16")
    dtype_str1 = str(im.dtype)
    dtype_str2 = out_type.__name__
    if (im.dtype == np.uint8 and bitdepth == 8) or (
        im.dtype == np.uint16 and bitdepth == 16
    ):
        # Already the correct format? Return as-is
        return im
    if dtype_str1.startswith("float") and np.nanmin(im) >= 0 and np.nanmax(im) <= 1:
        _precision_warn(dtype_str1, dtype_str2, "Range [0, 1].")
        im = im.astype(np.float64) * (np.power(2.0, bitdepth) - 1) + 0.499999999
    elif im.dtype == np.uint16 and bitdepth == 8:
        _precision_warn(dtype_str1, dtype_str2, "Losing 8 bits of resolution.")
        im = np.right_shift(im, 8)
    elif im.dtype == np.uint32:
        _precision_warn(
            dtype_str1,
            dtype_str2,
            "Losing {} bits of resolution.".format(32 - bitdepth),
        )
        im = np.right_shift(im, 32 - bitdepth)
    elif im.dtype == np.uint64:
        _precision_warn(
            dtype_str1,
            dtype_str2,
            "Losing {} bits of resolution.".format(64 - bitdepth),
        )
        im = np.right_shift(im, 64 - bitdepth)
    else:
        mi = np.nanmin(im)
        ma = np.nanmax(im)
        if not np.isfinite(mi):
            raise ValueError("Minimum image value is not finite")
        if not np.isfinite(ma):
            raise ValueError("Maximum image value is not finite")
        if ma == mi:
            raise ValueError("Max value == min value, ambiguous given dtype")
        _precision_warn(dtype_str1, dtype_str2, "Range [{}, {}].".format(mi, ma))
        # Now make float copy before we scale
        im = im.astype("float64")
        # Scale the values between 0 and 1 then multiply by the max value
        im = (im - mi) / (ma - mi) * (np.power(2.0, bitdepth) - 1) + 0.499999999
    assert np.nanmin(im) >= 0
    assert np.nanmax(im) < np.power(2.0, bitdepth)
    return im.astype(out_type)
示例#51
0
def merge_results_concat(criteria, ssdag, ssdagA, rsltA, critB, ssdagB, rsltB,
                         merged_err_cut, max_merge, **kw):
    bsfull = [x[0] for x in ssdag.bbspec]
    bspartA = [x[0] for x in ssdagA.bbspec]
    bspartB = [x[0] for x in ssdagB.bbspec]
    assert bsfull[-len(bspartA):] == bspartA
    assert bsfull[:len(bspartB)] == bspartB

    # print('merge_results_concat ssdag.bbspec', ssdag.bbspec)
    # print('merge_results_concat criteria.bbspec', criteria.bbspec)
    rsltB = subset_result(rsltB, slice(max_merge))

    binner = critB.binner
    hash_table = critB.hash_table
    from_seg = criteria.from_seg

    assert len(ssdagB.bbs[-1]) == len(ssdagA.bbs[0])
    assert len(ssdagB.bbs[-1]) == len(ssdag.bbs[from_seg])
    assert len(ssdagB.bbs[-1]) == 1, "did you set merge_bblock?"
    assert ssdagB.bbs[-1][0].filehash == ssdagA.bbs[0][0].filehash
    assert ssdagB.bbs[-1][0].filehash == ssdag.bbs[from_seg][0].filehash
    for _ in range(from_seg):
        f = [bb.filehash for bb in ssdag.bbs[_]]
        assert f == [bb.filehash for bb in ssdagB.bbs[_]]
    for _ in range(len(ssdag.verts) - from_seg):
        f = [bb.filehash for bb in ssdag.bbs[from_seg + _]]
        assert f == [bb.filehash for bb in ssdagA.bbs[_]]

    n = len(rsltB.idx)
    nv = len(ssdag.verts)
    merged = ResultJIT(
        pos=np.empty((n, nv, 4, 4), dtype="f4"),
        idx=np.empty((n, nv), dtype="i4"),
        err=9e9 * np.ones((n, ), dtype="f8"),
        stats=np.empty(n, dtype="i4"),
    )
    ok = np.ones(n, dtype=np.bool)
    for i_in_rslt in range(n):
        # print(rsltB.pos[i_in_rslt, -1])
        val = _get_hash_val(binner, hash_table, rsltB.pos[i_in_rslt, -1],
                            criteria.nfold)
        # print(
        # 'merge_results_concat', i_in_rslt, val, np.right_shift(val, 32),
        # np.right_shift(val, 16) % 16,
        # np.right_shift(val, 8) % 8, val % 8
        # )
        if val < 0:
            print("val < 0")
            ok[i_in_rslt] = False
            continue
        i_ot_rslt = np.right_shift(val, 32)
        assert i_ot_rslt < len(rsltA.idx)

        # check score asap
        pos = np.concatenate((
            rsltB.pos[i_in_rslt, :-1],
            rsltB.pos[i_in_rslt, -1] @ rsltA.pos[i_ot_rslt, :],
        ))
        assert np.allclose(pos[from_seg], rsltB.pos[i_in_rslt, -1])
        err = criteria.score(pos.reshape(-1, 1, 4, 4))
        merged.err[i_in_rslt] = err
        # print('merge_results_concat', i_in_rslt, pos)
        # print('merge_results_concat', i_in_rslt, err)
        if err > merged_err_cut:
            continue

        i_outer = rsltA.idx[i_ot_rslt, 0]
        i_outer2 = rsltA.idx[i_ot_rslt, -1]
        i_inner = rsltB.idx[i_in_rslt, -1]
        v_inner = ssdagB.verts[-1]
        v_outer = ssdagA.verts[0]
        ibb = v_outer.ibblock[i_outer]
        assert ibb == 0
        ires_in = v_inner.ires[i_inner, 0]
        ires_out = v_outer.ires[i_outer, 1]
        isite_in = v_inner.isite[i_inner, 0]
        isite_out = v_outer.isite[i_outer, 1]
        isite_out2 = ssdagA.verts[-1].isite[i_outer2, 0]
        mrgv = ssdag.verts[from_seg]
        assert max(mrgv.ibblock) == 0
        assert max(ssdagA.verts[-1].ibblock) == 0

        imerge = util.binary_search_pair(mrgv.ires, (ires_in, ires_out))
        if imerge == -1:
            # if imerge < 0:
            ok[i_in_rslt] = False
            continue
        idx = np.concatenate(
            (rsltB.idx[i_in_rslt, :-1], [imerge], rsltA.idx[i_ot_rslt, 1:]))
        assert len(idx) == len(ssdag.verts)
        for ii, v in zip(idx, ssdag.verts):
            if v is not None:
                assert ii < v.len
        assert len(pos) == len(idx) == nv
        merged.pos[i_in_rslt] = pos
        merged.idx[i_in_rslt] = idx
        merged.stats[i_in_rslt] = i_ot_rslt
    # print(merged.err[:100])
    nbad = np.sum(1 - ok)
    if nbad:
        print("bad imerge", nbad, "of", n)
    # print('bad score', np.sum(merged.err > merged_err_cut), 'of', n)
    ok[merged.err > merged_err_cut] = False
    ok = np.where(ok)[0][np.argsort(merged.err[ok])]
    merged = subset_result(merged, ok)
    return merged
示例#52
0
def get_sfc_qc(qa_data, mask57 = 0b11100000):
    sfc_qa = np.right_shift(np.bitwise_and(qa_data, mask57), 5)
    return sfc_qa
示例#53
0
 def get_rising(self, hits):
     return np.right_shift(hits, 30) == rising_mask
示例#54
0
 def get_falling(self, hits):
     return np.right_shift(hits, 30) == falling_mask
示例#55
0
    # settings for pixel register (to input into pixel SR)
    # can be an integer representing the binary number desired,
    # or a bitarray (of the form bitarray("10101100")).

    chip['PIXEL_REG'][:] = bitarray('10' * 64)
    chip['PIXEL_REG'][0] = 0

    print("program pixel register...")
    chip.program_pixel_reg()

    # Get output size in bytes
    print("chip['DATA'].get_FIFO_SIZE() = {}".format(
        chip['DATA'].get_FIFO_SIZE()))

    # Get output in bytes
    print("chip['DATA'].get_data()")
    rxd = chip['DATA'].get_data()  # get data from sram fifo

    data0 = rxd.astype(
        np.uint8
    )  # Change type to unsigned int 8 bits and take from rxd only the last 8 bits
    data1 = np.right_shift(rxd, 8).astype(
        np.uint8)  # Rightshift rxd 8 bits and take again last 8 bits
    data = np.reshape(
        np.vstack((data1, data0)), -1, order='F'
    )  # data is now a 1 dimensional array of all bytes read from the FIFO
    bdata = np.unpackbits(data)

    print("data = {}".format(data))
    print("bdata = {}".format(bdata))
示例#56
0
 def f(value):
     return np.equal(
         np.right_shift(np.bitwise_and(value, 0x70000000), 28), channel)
示例#57
0
 def numpy_right_shift(x1, x2):
     x1 = dpnp.asnumpy(x1) if isinstance(x1, dparray) else x1
     x2 = dpnp.asnumpy(x2) if isinstance(x2, dparray) else x2
     return numpy.right_shift(x1, x2)