Example #1
0
def add_bucket_entry(uhash, pieces, first_bucket_vector, second_bucket_vector, point_index):
    h_index = np.uint64(first_bucket_vector[0]) + np.uint64(second_bucket_vector[0 + 2])
    if h_index >= const.prime_default:
        h_index -= const.prime_default
    assert(h_index < const.prime_default)
    h_index = np.uint32(h_index)
    h_index = h_index % uhash.table_size
    
    control = np.uint64(first_bucket_vector[1]) + np.uint64(second_bucket_vector[1 + 2])
    if control >= const.prime_default:
        control -= const.prime_default
    assert(control < const.prime_default)
    control = np.uint32(control)

    if uhash.t == 1:
        b = uhash.ll_hash_table[h_index] 
        while b and b.control_value != control:
            b = b.next_bucket_in_chain
        # if bucket does not exist
        if b is None:
            uhash.buckets += 1
            uhash.ll_hash_table[h_index] = lsh_structs.bucket(control, point_index, uhash.ll_hash_table[h_index])
        else:
            bucket_entry = lsh_structs.bucket_entry(point_index, b.first_entry.next_entry)
            b.first_entry.next_entry = bucket_entry
    uhash.points += 1
Example #2
0
    def test_valid(self):
        prop = bcpp.Int()

        assert prop.is_valid(None)

        assert prop.is_valid(0)
        assert prop.is_valid(1)

        assert prop.is_valid(np.int8(0))
        assert prop.is_valid(np.int8(1))
        assert prop.is_valid(np.int16(0))
        assert prop.is_valid(np.int16(1))
        assert prop.is_valid(np.int32(0))
        assert prop.is_valid(np.int32(1))
        assert prop.is_valid(np.int64(0))
        assert prop.is_valid(np.int64(1))
        assert prop.is_valid(np.uint8(0))
        assert prop.is_valid(np.uint8(1))
        assert prop.is_valid(np.uint16(0))
        assert prop.is_valid(np.uint16(1))
        assert prop.is_valid(np.uint32(0))
        assert prop.is_valid(np.uint32(1))
        assert prop.is_valid(np.uint64(0))
        assert prop.is_valid(np.uint64(1))

        # TODO (bev) should fail
        assert prop.is_valid(False)
        assert prop.is_valid(True)
Example #3
0
    def testInt(self):
        num = np.int(2562010)
        self.assertEqual(np.int(ujson.decode(ujson.encode(num))), num)

        num = np.int8(127)
        self.assertEqual(np.int8(ujson.decode(ujson.encode(num))), num)

        num = np.int16(2562010)
        self.assertEqual(np.int16(ujson.decode(ujson.encode(num))), num)

        num = np.int32(2562010)
        self.assertEqual(np.int32(ujson.decode(ujson.encode(num))), num)

        num = np.int64(2562010)
        self.assertEqual(np.int64(ujson.decode(ujson.encode(num))), num)

        num = np.uint8(255)
        self.assertEqual(np.uint8(ujson.decode(ujson.encode(num))), num)

        num = np.uint16(2562010)
        self.assertEqual(np.uint16(ujson.decode(ujson.encode(num))), num)

        num = np.uint32(2562010)
        self.assertEqual(np.uint32(ujson.decode(ujson.encode(num))), num)

        num = np.uint64(2562010)
        self.assertEqual(np.uint64(ujson.decode(ujson.encode(num))), num)
Example #4
0
def get_bucket(uhash, pieces, first_bucket_vector, second_bucket_vector):
    h_index = np.uint64(first_bucket_vector[0]) + np.uint64(second_bucket_vector[0 + 2])
    if h_index >= const.prime_default:
        h_index -= const.prime_default
    assert(h_index < const.prime_default)
    h_index = np.uint32(h_index)
    h_index = h_index % uhash.table_size

    control = np.uint64(first_bucket_vector[1]) + np.uint64(second_bucket_vector[1 + 2])
    if control >= const.prime_default:
        control -= const.prime_default
    assert(control < const.prime_default)
    control = np.uint32(control)
    
    if uhash.t == 2:
        index_hybrid = uhash.hybrid_hash_table[h_index]

        while index_hybrid:
            if index_hybrid.control_value == control:
                index_hybrid = C.pointer(index_hybrid)[1]
                return index_hybrid
            else:
                index_hybrid = C.pointer(index_hybrid)[1]
                if index_hybrid.point.is_last_bucket:
                    return None
                l = index_hybrid.point.bucket_length
                index_hybrid = C.pointer(index_hybrid)[l]
        return None
def _ints_arr_to_bits(ints_arr, out):
    """
    Convert an array of integers representing the set bits into the
    corresponding integer.

    Compiled as a ufunc by Numba's `@guvectorize`: if the input is a
    2-dim array with shape[0]=K, the function returns a 1-dim array of
    K converted integers.

    Parameters
    ----------
    ints_arr : ndarray(int32, ndim=1)
        Array of distinct integers from 0, ..., 63.

    Returns
    -------
    np.uint64
        Integer with set bits represented by the input integers.

    Examples
    --------
    >>> ints_arr = np.array([0, 1, 2], dtype=np.int32)
    >>> _ints_arr_to_bits(ints_arr)
    7
    >>> ints_arr2d = np.array([[0, 1, 2], [3, 0, 1]], dtype=np.int32)
    >>> _ints_arr_to_bits(ints_arr2d)
    array([ 7, 11], dtype=uint64)

    """
    m = ints_arr.shape[0]
    out[0] = 0
    for i in range(m):
        out[0] |= np.uint64(1) << np.uint64(ints_arr[i])
Example #6
0
    def testIntMax(self):
        num = np.int(np.iinfo(np.int).max)
        self.assertEqual(np.int(ujson.decode(ujson.encode(num))), num)

        num = np.int8(np.iinfo(np.int8).max)
        self.assertEqual(np.int8(ujson.decode(ujson.encode(num))), num)

        num = np.int16(np.iinfo(np.int16).max)
        self.assertEqual(np.int16(ujson.decode(ujson.encode(num))), num)

        num = np.int32(np.iinfo(np.int32).max)
        self.assertEqual(np.int32(ujson.decode(ujson.encode(num))), num)

        num = np.uint8(np.iinfo(np.uint8).max)
        self.assertEqual(np.uint8(ujson.decode(ujson.encode(num))), num)

        num = np.uint16(np.iinfo(np.uint16).max)
        self.assertEqual(np.uint16(ujson.decode(ujson.encode(num))), num)

        num = np.uint32(np.iinfo(np.uint32).max)
        self.assertEqual(np.uint32(ujson.decode(ujson.encode(num))), num)

        if platform.architecture()[0] != '32bit':
            num = np.int64(np.iinfo(np.int64).max)
            self.assertEqual(np.int64(ujson.decode(ujson.encode(num))), num)

            # uint64 max will always overflow as it's encoded to signed
            num = np.uint64(np.iinfo(np.int64).max)
            self.assertEqual(np.uint64(ujson.decode(ujson.encode(num))), num)
Example #7
0
 def __init__(self, index, desc):
     opt_lis = desc.split(',')
     self.key = opt_lis[0]
     self.index = index
     self.control = False
     self.event = False
     self.width = None
     self.mult = None
     self.unit = None
     # TODO Add gauge.
     for opt in opt_lis[1:]:
         if len(opt) == 0:
             continue
         elif opt[0] == 'C':
             self.control = True
         elif opt[0] == 'E':
             self.event = True
         elif opt[0:2] == 'W=':
             self.width = int(opt[2:])
         elif opt[0:2] == 'U=':
             i = 2
             while i < len(opt) and opt[i].isdigit():
                 i += 1
             if i > 2:
                 self.mult = numpy.uint64((opt[2:i]))
             if i < len(opt):
                 self.unit = opt[i:]
             if self.unit == "KB":
                 self.mult = numpy.uint64(1024)
                 self.unit = "B"
         else:
             error("unrecognized option `%s' in schema entry spec `%s'\n", opt, desc)
Example #8
0
 def __init__(self, i, s):
     opt_lis = s.split(',')
     self.key = opt_lis[0]
     self.index = i
     self.is_control = False
     self.is_event = False
     self.width = None
     self.mult = None
     self.unit = None
     for opt in opt_lis[1:]:
         if len(opt) == 0:
             continue
         elif opt[0] == 'C':
             self.is_control = True
         elif opt[0] == 'E':
             self.is_event = True
         elif opt[0:2] == 'W=':
             self.width = int(opt[2:])
         elif opt[0:2] == 'U=':
             j = 2
             while j < len(opt) and opt[j].isdigit():
                 j += 1
             if j > 2:
                 self.mult = numpy.uint64(opt[2:j])
             if j < len(opt):
                 self.unit = opt[j:]
             if self.unit == "KB":
                 self.mult = numpy.uint64(1024)
                 self.unit = "B"
         else:
             # XXX
             raise ValueError("unrecognized option `%s' in schema entry spec `%s'\n", opt, s)
Example #9
0
def _combine_hash_arrays(arrays, num_items):
    """
    Parameters
    ----------
    arrays : generator
    num_items : int

    Should be the same as CPython's tupleobject.c
    """
    try:
        first = next(arrays)
    except StopIteration:
        return np.array([], dtype=np.uint64)

    arrays = itertools.chain([first], arrays)

    mult = np.uint64(1000003)
    out = np.zeros_like(first) + np.uint64(0x345678)
    for i, a in enumerate(arrays):
        inverse_i = num_items - i
        out ^= a
        out *= mult
        mult += np.uint64(82520 + inverse_i + inverse_i)
    assert i + 1 == num_items, 'Fed in wrong num_items'
    out += np.uint64(97531)
    return out
Example #10
0
def find_overlapping_tx_low(src_tx_low, int_tx_low):
    """Finds TX_LOW entries in the source that are overlapped by the TX_LOW entries in other flow.

    Args:
        src_tx_low (Numpy Array):  Source TX_LOW numpy array of entries
        int_tx_low (Numpy Array):  Other TX_LOW numpy array of entries
        phy_sample_rate (int):     Sample rate of the PHY         

    Returns:
        indexes (tuple):
            Tuple containing indexes into the provided arrays indicating which entries overlapped
    """
    import numpy as np

    import wlan_exp.log.coll_util as collision_utility

    src_ts = src_tx_low['timestamp']
    int_ts = int_tx_low['timestamp']

    src_dur = np.uint64(calc_tx_time(src_tx_low['mcs'], src_tx_low['phy_mode'], src_tx_low['length'], src_tx_low['phy_samp_rate']))
    int_dur = np.uint64(calc_tx_time(int_tx_low['mcs'], int_tx_low['phy_mode'], int_tx_low['length'], int_tx_low['phy_samp_rate']))

    src_idx = []
    int_idx = []

    src_idx, int_idx = collision_utility._collision_idx_finder(src_ts, src_dur, int_ts, int_dur)

    src_idx = src_idx[src_idx>0]
    int_idx = int_idx[int_idx>0]

    return (src_idx, int_idx)
Example #11
0
    def makeMovies(self,beginTick, endTick, backgroundFrame, accumulate=False):
        tick0 = np.uint64(beginTick)
        tick1 = np.uint64(endTick)
        for iRow in range(cosmic.file.nRow):
            for iCol in range(cosmic.file.nCol):
                gtpl = self.getTimedPacketList(iRow,iCol,sec0,1)
        timestamps = gtpl['timestamps']
        timestamps *= cosmic.file.ticksPerSec
        ts32 = timestamps.astype(np.uint32)
        for ts in ts32:
            tindex = ts-t0
            try:
                listOfPixelsToMark[tindex].append((iRow,iCol))
            except IndexError:
                pass
            for tick in range(t0,t1):
                frames.append(frameSum)
                title = makeTitle(tick,t0,t1)
                titles.append(title)

                mfn0 = "m-%s-%s-%s-%s-%010d-%010d-i.gif"%(run,sundownDate,obsDate,seq,t0,t1)
                utils.makeMovie(frames, titles, outName=mfn0, delay=0.1, colormap=mpl.cm.gray,
                                listOfPixelsToMark=listOfPixelsToMark,
                                pixelMarkColor='red')

        for i in range(len(listOfPixelsToMark)-1):
            listOfPixelsToMark[i+1].extend(listOfPixelsToMark[i])

        mfn1 = "m-%s-%s-%s-%s-%010d-%010d-a.gif"%(run,sundownDate,obsDate,seq,t0,t1)
        utils.makeMovie(frames, titles, outName=mfn1, delay=0.1, colormap=mpl.cm.gray,
                        listOfPixelsToMark=listOfPixelsToMark,
                        pixelMarkColor='green')
Example #12
0
 def check_synapse(self, id):
     """ Get neuron pairs and coordinates
     """
     # Get the resolution and the scale
     res = 0
     sc = 0.5**res
     scales = np.array([sc, sc, 1])
     # If synapse exists
     if self.is_synapse(id):
         # Get the neuron pairs
         parents = self.synapse_parent(id)['parent_neurons']
         # Reverse the dictionary
         parents = {i[1]:i[0] for i in parents.items()}
         # If bidirectionl synapse
         if 3 in parents:
             neurons = [parents[3], parents[3]]
         # If two neuron parents
         else:
             neurons = [parents[1], parents[2]]
         # Get the synapse coordinates
         keypoint = self.synapse_keypoint(res, id)['keypoint']
         full_keypoint = np.uint64(keypoint / scales).tolist()
         # Return all neuron ids and cooridnates
         return np.uint64(neurons + full_keypoint)
     # Return nothing if non-existent
     return np.uint64([])
Example #13
0
    def calculateComplexDerefOpAddress(complexDerefOp, registerMap):

        match = re.match("((?:\\-?0x[0-9a-f]+)?)\\(%([a-z0-9]+),%([a-z0-9]+),([0-9]+)\\)", complexDerefOp)
        if match != None:
            offset = 0L
            if len(match.group(1)) > 0:
                offset = long(match.group(1), 16)

            regA = RegisterHelper.getRegisterValue(match.group(2), registerMap)
            regB = RegisterHelper.getRegisterValue(match.group(3), registerMap)

            mult = long(match.group(4), 16)

            # If we're missing any of the two register values, return None
            if regA == None or regB == None:
                if regA == None:
                    return (None, "Missing value for register %s" % match.group(2))
                else:
                    return (None, "Missing value for register %s" % match.group(3))

            if RegisterHelper.getBitWidth(registerMap) == 32:
                val = int32(uint32(regA)) + int32(uint32(offset)) + (int32(uint32(regB)) * int32(uint32(mult)))
            else:
                # Assume 64 bit width
                val = int64(uint64(regA)) + int64(uint64(offset)) + (int64(uint64(regB)) * int64(uint64(mult)))
            return (long(val), None)

        return (None, "Unknown failure.")
Example #14
0
 def prefixSumUp(self, e, data, ndata, data2, ndata2, events):
     import numpy as np
     import pyopencl as cl
     mf = cl.mem_flags
     
     if not isinstance(data, cl.Buffer):
         data_buf = cl.Buffer(self.ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=data)
     else:
         data_buf = data
     
     if not isinstance(data2, cl.Buffer):
         data2_buf = cl.Buffer(self.ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=data2)
     else:
         data2_buf = data2
             
     kernel = self.prg.prefixSumUp
     kernel.set_args(data_buf, np.uint64(ndata), data2_buf, np.uint64(ndata2))
     
     global_dims = self.get_global(self.get_grid_dims(ndata))
     
     print "prefixSumUp"
     if e is None:
         e  = ( cl.enqueue_nd_range_kernel(self.queue, kernel, global_dims, self.localDims, wait_for=e), )
     else:
         e  = ( cl.enqueue_nd_range_kernel(self.queue, kernel, global_dims, self.localDims), )
     events += e
     
     return (e, data_buf, data2_buf)
Example #15
0
    def write_mwhite_subsample(self, subsample, output):
        size = self.comm.allreduce(len(subsample))
        offset = sum(self.comm.allgather(len(subsample))[: self.comm.rank])

        if self.comm.rank == 0:
            with open(output, "wb") as ff:
                dtype = numpy.dtype(
                    [
                        ("eflag", "int32"),
                        ("hsize", "int32"),
                        ("npart", "int32"),
                        ("nsph", "int32"),
                        ("nstar", "int32"),
                        ("aa", "float"),
                        ("gravsmooth", "float"),
                    ]
                )
                header = numpy.zeros((), dtype=dtype)
                header["eflag"] = 1
                header["hsize"] = 20
                header["npart"] = size
                header.tofile(ff)

        self.comm.barrier()

        with open(output, "r+b") as ff:
            ff.seek(28 + offset * 12)
            numpy.float32(subsample["Position"]).tofile(ff)
            ff.seek(28 + offset * 12 + size * 12)
            numpy.float32(subsample["Velocity"]).tofile(ff)
            ff.seek(28 + offset * 4 + size * 24)
            numpy.float32(subsample["Density"]).tofile(ff)
            ff.seek(28 + offset * 8 + size * 28)
            numpy.uint64(subsample["ID"]).tofile(ff)
 def __init__(self, fpga, comb, f_start, f_stop, logger=logging.getLogger(__name__)):
     """ f_start and f_stop must be in Hz
     """
     self.logger = logger
     snap_name = "snap_{a}x{b}".format(a=comb[0], b=comb[1])
     self.snapshot0 = Snapshot(fpga,
                              "{name}_0".format(name = snap_name),
                              dtype='>i8',
                              cvalue=True,
                              logger=self.logger.getChild("{name}_0".format(name = snap_name)))
     self.snapshot1 = Snapshot(fpga,
                              "{name}_1".format(name = snap_name),
                              dtype='>i8',
                              cvalue=True,
                              logger=self.logger.getChild("{name}_1".format(name = snap_name)))
     self.f_start = np.uint64(f_start)
     self.f_stop = np.uint64(f_stop)
     # this will change from None to an array of phase offsets for each frequency bin 
     # if calibration gets applied at a later stage.
     # this is an array of phases introduced by the system. So if a value is positive, 
     # it means that the system is introducing a phase shift between comb[0] and comb[1]
     # in other words comb1 is artificially delayed. 
     self.calibration_phase_offsets = None
     self.calibration_cable_length_offsets = None
     self.arm()
     self.fetch_signal()
     self.frequency_bins = np.linspace(
         start = self.f_start,
         stop = self.f_stop,
         num = len(self.signal),
         endpoint = False)
Example #17
0
    def hash_array(vals):
        """Given a 1d array, return an array of deterministic integers."""
        # work with cagegoricals as ints. (This check is above the complex
        # check so that we don't ask numpy if categorical is a subdtype of
        # complex, as it will choke.
        if is_categorical_dtype(vals.dtype):
            vals = vals.codes

        # we'll be working with everything as 64-bit values, so handle this
        # 128-bit value early
        if np.issubdtype(vals.dtype, np.complex128):
            return hash_array(vals.real) + 23 * hash_array(vals.imag)

        # MAIN LOGIC:

        # First, turn whatever array this is into unsigned 64-bit ints, if we can
        # manage it.
        if vals.dtype == np.bool:
            vals = vals.astype('u8')

        elif (np.issubdtype(vals.dtype, np.datetime64) or
              np.issubdtype(vals.dtype, np.timedelta64) or
              np.issubdtype(vals.dtype, np.number)) and vals.dtype.itemsize <= 8:

            vals = vals.view('u{}'.format(vals.dtype.itemsize)).astype('u8')
        else:
            vals = np.array([hash(x) for x in vals], dtype=np.uint64)

        # Then, redistribute these 64-bit ints within the space of 64-bit ints
        vals ^= vals >> 30
        vals *= np.uint64(0xbf58476d1ce4e5b9)
        vals ^= vals >> 27
        vals *= np.uint64(0x94d049bb133111eb)
        vals ^= vals >> 31
        return vals
Example #18
0
  def __compile_kernels(self):
    """ DFS module """
    f = self.forest
    self.find_min_kernel = f.find_min_kernel  
    self.fill_kernel = f.fill_kernel 
    self.scan_reshuffle_tex = f.scan_reshuffle_tex 
    self.comput_total_2d = f.comput_total_2d 
    self.reduce_2d = f.reduce_2d
    self.scan_total_2d = f.scan_total_2d 
    self.scan_reduce = f.scan_reduce 
    
    """ BFS module """
    self.scan_total_bfs = f.scan_total_bfs
    self.comput_bfs_2d = f.comput_bfs_2d
    self.fill_bfs = f.fill_bfs 
    self.reshuffle_bfs = f.reshuffle_bfs 
    self.reduce_bfs_2d = f.reduce_bfs_2d 
    self.get_thresholds = f.get_thresholds 

    """ Other """
    self.predict_kernel = f.predict_kernel 
    self.mark_table = f.mark_table
    const_sorted_indices = f.bfs_module.get_global("sorted_indices_1")[0]
    const_sorted_indices_ = f.bfs_module.get_global("sorted_indices_2")[0]
    cuda.memcpy_htod(const_sorted_indices, np.uint64(self.sorted_indices_gpu.ptr)) 
    cuda.memcpy_htod(const_sorted_indices_, np.uint64(self.sorted_indices_gpu_.ptr)) 
def uint64_from_uint63(x):
    out = np.empty(len(x) // 2, dtype=np.uint64)
    for i in range(0, len(x), 2):
        a = x[i] & np.uint64(0xffffffff00000000)
        b = x[i + 1] >> np.uint64(32)
        out[i // 2] = a | b
    return out
Example #20
0
	def FromByteString(cls,bytestr):
		"""
		Initialize Packet from the given byte string
		"""
		# check correct size packet
		len_bytes = len(bytestr)
		if not len_bytes == cls.BYTES_IN_PACKET:
			raise ValueError("Packet should comprise {0} bytes, but has {1} bytes".format(len_bytes,cls.BYTES_IN_PACKET))
		# unpack header
		hdr = unpack(">{0}Q".format(cls.BYTES_IN_HEADER/8),bytestr[:cls.BYTES_IN_HEADER])
		ut = uint32(hdr[0] & 0xFFFFFFFF)
		pktnum = uint32((hdr[0]>>uint32(32)) & 0xFFFFF)
		did = uint8(hdr[0]>>uint32(52) & 0x3F)
		ifid = uint8(hdr[0]>>uint32(58) & 0x3F)
		ud1 = uint32(hdr[1] & 0xFFFFFFFF)
		ud0 = uint32((hdr[1]>>uint32(32)) & 0xFFFFFFFF)
		res0 = uint64(hdr[2])
		res1 = uint64(hdr[3]&0x7FFFFFFFFFFFFFFF)
		fnt = not (hdr[3]&0x8000000000000000 == 0)
		# unpack data in 64bit mode to correct for byte-order
		data_64bit = array(unpack(">{0}Q".format(cls.BYTES_IN_PAYLOAD/8),bytestr[cls.BYTES_IN_HEADER:]),dtype=uint64)
		data = zeros(cls.BYTES_IN_PAYLOAD,dtype=int8)
		for ii in xrange(len(data_64bit)):
			for jj in xrange(8):
				data[ii*8+jj] = int8((data_64bit[ii]>>uint64(8*jj))&uint64(0xFF))
		return Packet(ut,pktnum,did,ifid,ud0,ud1,res0,res1,fnt,data)
Example #21
0
def parseSynchData(synch_data, offset=0x8000):
    '''
    This routine takes an array of data from the SIS3316 and returns the averaged values and timestamps
    from the raw dataset. It assumes that the default short unsigned int dataset has no raw samples and
    2 averaged samples - this works out to 10 unsigned short words per event:

    (averages, timestamps) = parseSynchdata(synch_data, offset=0x8000)

    Args:
        synch_data:     Array from the sis3316 digitizer from the h5 file. Assumes that no
         raw samples are taken and only two averaged samples are taken.
        offset   :     default=0x8000. Offset value to convert the raw short unsigned int
            to floating voltage values.

    Returns:
        averages:       nx2 array of 2 averaged samples taken by the digitizer. Rescaled by offset to give
                            double voltages.
        timestamps:     Array of timestamp values corresponding to the averaged samples.
    '''
    #Cast as an arrow just in case it hasn't already been done.
    synch_data = np.array(synch_data)
    t3 = synch_data[1::10]
    t1 = synch_data[2::10]
    t2 = synch_data[3::10]
    #bitshift the second and third chunks leftwise to create the final timestamps.
    timestamps = np.uint64(t1) + (np.uint64(t2) << 16) + (np.uint64(t3) << 32)
    #Now, take care of the data itself:
    avs1 = synch_data[8::10]
    avs2 = synch_data[9::10]
    #Subtract offset, divide by max uint value, and rescale over 5V range:
    avs1 = (avs1.astype(float) - offset) / 0xffff * 5.0
    avs2 = (avs2.astype(float) - offset) / 0xffff * 5.0
    avs = np.vstack((avs1, avs2)).T
    return avs, timestamps
    def xtest_trigger_pulse_one_sequence(self):
        cprint('testing usrp trigger with one sequence','red')
        seq = create_testsequence()
        swing = 0
        nSamples_per_pulse =  self.fill_tx_shm_with_one_pulse(seq, swing)

        nSamples_rx = np.uint64(np.round((RFRATE) * (seq.ctrlprm['number_of_samples'] / seq.ctrlprm['baseband_samplerate'])))  # TODO: thiss has to changed for integration period 

        cprint('sending setup command', 'blue')
        offset_sample_list = [offset * RFRATE for offset in seq.pulse_offsets_vector]

        cmd = usrp_setup_command([self.serversock], seq.ctrlprm['tfreq'], seq.ctrlprm['rfreq'],RFRATE, RFRATE, seq.npulses, nSamples_rx, nSamples_per_pulse, offset_sample_list, swing)
        cmd.transmit()
        client_returns = cmd.client_return()
        for r in client_returns:
            assert(r == UHD_SETUP)

    
        for i in range(10):
#        while True:
            # grab current usrp time from one usrp_driver
            cmd = usrp_get_time_command(self.serversock)
            cmd.transmit()
            usrp_time = cmd.recv_time(self.serversock)
            cmd.client_return()

            cprint('sending trigger pulse command', 'blue')
            trigger_time = usrp_time +  INTEGRATION_PERIOD_SYNC_TIME
            cmd = usrp_trigger_pulse_command([self.serversock], trigger_time, swing)
            cmd.transmit()
            client_returns = cmd.client_return()
            for r in client_returns:
                assert(r == UHD_TRIGGER_PULSE) 


            cprint('checking trigger pulse data', 'blue')
            # request pulse data
            cmd = usrp_ready_data_command([self.serversock], swing)
            cmd.transmit()
            ret = cmd.recv_metadata(self.serversock)
            print("  recieved READY STATUS: status:{}, ant: {}, nSamples: {}, fault: {}".format(ret['status'], ret['antenna'], ret['nsamples'], ret['fault']))

            client_returns = cmd.client_return()
            for r in client_returns:
                assert(r == UHD_READY_DATA) 

            cprint('finished test trigger pulse', 'green')
            
        # plot data
        num_rx_samples = np.uint64(2*np.round((RFRATE) * (seq.ctrlprm['number_of_samples'] / seq.ctrlprm['baseband_samplerate'])))
        rx_shm = rx_shm_list[SIDEA][swing][0]
        rx_shm.seek(0)
        ar = np.frombuffer(rx_shm, dtype=np.int16, count=num_rx_samples)
        arp = np.sqrt(np.float32(ar[0::2]) ** 2 + np.float32(ar[1::2]) ** 2)
        print('sampled power')
        print(arp[:200000:1000])

        print('sampled phase')
        import matplotlib.pyplot as plt
Example #23
0
def inject_events(number_of_events, device):
  # Variables
  out_send = [0 for x in range(number_of_events)]
  err_send = [0 for x in range(number_of_events)]
  max_uint64 = np.iinfo(np.uint64).max;
  
  # Prepare XML log document
  doc = Document()
  root = doc.createElement('root')
  doc.appendChild(root)

  # Send events
  for x in range(0, number_of_events):
    eid = np.uint64(rnd.randint(0, max_uint64))
    epara = np.uint64(rnd.randint(0, max_uint64))
    process_send = subprocess.Popen(["saft-ctl", device, "inject", str(eid), str(epara), "0", "-v", "-x",], stdout=subprocess.PIPE)
    out_send[x], err_send[x] = process_send.communicate()
    out_send_split = out_send[x].split()
    
    #print "send string:"
    print out_send[x]
        
    # Dump feedback (send stuff) to XML log file
    main = doc.createElement('event')
    root.appendChild(main)
    
    # Log ID
    if eid == int(out_send_split[3], 0): # check if given id was send
      p = doc.createElement('id')
      text = doc.createTextNode(out_send_split[3])
      p.appendChild(text)
      main.appendChild(p)
    else:
      sys.exit(1)
    
    # Log parameter
    if epara == int(out_send_split[4], 0): # check if given parameter was send
      p = doc.createElement('parameter')
      text = doc.createTextNode(out_send_split[4])
      p.appendChild(text)
      main.appendChild(p)
    else:
      sys.exit(1)
    
    # Log execution tme
    p = doc.createElement('time')
    text = doc.createTextNode(out_send_split[5])
    p.appendChild(text)
    main.appendChild(p)
  
  # Save events to XML file
  f = open("injected_events.xml", "w")
  try:
    f.write(doc.toprettyxml(indent="  "))
  finally:
    f.close()
  
  # Done
  return 0
Example #24
0
    def __init__(self, init_data, n_generators):

        self.ctx = curr_gpu.make_context()
        self.module = pycuda.compiler.SourceModule(kernels_cuda_src, no_extern_c=True)
        (free, total) = cuda.mem_get_info()
        print(("Global memory occupancy:%f%% free" % (free * 100 / total)))
        print(("Global free memory :%i Mo free" % (free / 10 ** 6)))

        ################################################################################################################

        self.width_mat = np.int32(init_data.shape[0])
        #        self.gpu_init_data = ga.to_gpu(init_data)
        self.gpu_init_data = cuda.mem_alloc(init_data.nbytes)
        cuda.memcpy_htod(self.gpu_init_data, init_data)

        self.cpu_new_data = np.zeros_like(init_data, dtype=np.float32)
        print("size new data = ", self.cpu_new_data.nbytes / 10 ** 6)
        (free, total) = cuda.mem_get_info()
        print(("Global memory occupancy:%f%% free" % (free * 100 / total)))
        print(("Global free memory :%i Mo free" % (free / 10 ** 6)))

        self.gpu_new_data = cuda.mem_alloc(self.cpu_new_data.nbytes)
        cuda.memcpy_htod(self.gpu_new_data, self.cpu_new_data)
        #        self.gpu_new_data = ga.to_gpu(self.cpu_new_data)

        self.cpu_vect_sum = np.zeros((self.width_mat,), dtype=np.float32)
        self.gpu_vect_sum = cuda.mem_alloc(self.cpu_vect_sum.nbytes)
        cuda.memcpy_htod(self.gpu_vect_sum, self.cpu_vect_sum)
        #        self.gpu_vect_sum = ga.to_gpu(self.cpu_vect_sum)
        ################################################################################################################
        self.init_rng = self.module.get_function("init_rng")
        self.gen_rand_mat = self.module.get_function("gen_rand_mat")
        self.sum_along_axis = self.module.get_function("sum_along_axis")
        self.norm_along_axis = self.module.get_function("norm_along_axis")
        self.init_vect_sum = self.module.get_function("init_vect_sum")
        self.copy_mat = self.module.get_function("copy_mat")
        ################################################################################################################
        self.n_generators = n_generators
        seed = 1
        self.rng_states = cuda.mem_alloc(
            n_generators
            * characterize.sizeof("curandStateXORWOW", "#include <curand_kernel.h>")
        )
        self.init_rng(
            np.int32(n_generators),
            self.rng_states,
            np.uint64(seed),
            np.uint64(0),
            block=(64, 1, 1),
            grid=(n_generators // 64 + 1, 1),
        )
        (free, total) = cuda.mem_get_info()

        size_block_x = 32
        size_block_y = 32
        n_blocks_x = int(self.width_mat) // (size_block_x) + 1
        n_blocks_y = int(self.width_mat) // (size_block_y) + 1
        self.grid = (n_blocks_x, n_blocks_y, 1)
        self.block = (size_block_x, size_block_y, 1)
Example #25
0
 def foo(v1, v2):
     d = dictobject.new_dict(int64, float64)
     c1 = np.uint64(2 ** 61 - 1)
     c2 = np.uint64(0)
     assert hash(c1) == hash(c2)
     d[c1] = v1
     d[c2] = v2
     return (d[c1], d[c2])
Example #26
0
def reverse(bb):            # A naive and really slow reverse
    r = EMPTY
    for c in range(64):
        r |= bb & np.uint64(1)
        if c < 63:
            r <<= np.uint64(1)
        bb >>= np.uint64(1)
    return r
Example #27
0
 def to_ntp(self):
     """
     Converts the IonTime object into a RFC 5905 (NTPv4) compliant 64bit time stamp
     """
     left = np.uint64(self.seconds << 32)
     right = np.uint64(self.useconds / 1e6 * self.FRAC)
     timestamp = np.uint64(left + right)
     return self.htonll(timestamp)
Example #28
0
def hash_row(row):
    one = np.uint64(1)
    zero = np.uint64(0)
    acc = zero
    for i in row:
        acc = np.left_shift(acc, one)
        acc = np.bitwise_or(acc, one if i else zero)
    return acc
Example #29
0
def lcls2float(t):
   if isinstance(t, numpy.ndarray):
      t0 = numpy.right_shift(t.astype(numpy.uint64), numpy.uint64(32))
   else:
      t0 = numpy.right_shift(numpy.uint64(t), numpy.uint64(32))
   t1 = numpy.bitwise_and(numpy.uint64(t), numpy.uint64(0x00000000fffffffff))
   t2 = t0 + t1*1.e-9
   return t2
def uniform32_from_uint64(x):
    x = np.uint64(x)
    upper = np.array(x >> np.uint64(32), dtype=np.uint32)
    lower = np.uint64(0xffffffff)
    lower = np.array(x & lower, dtype=np.uint32)
    joined = np.column_stack([lower, upper]).ravel()
    out = (joined >> np.uint32(9)) * (1.0 / 2 ** 23)
    return out.astype(np.float32)
Example #31
0
 def propagate(self, grid, index, collapsed):
     self.reduce_to_allowed(np.unravel_index(index, self.model.world_shape),
                            np.uint64(collapsed), grid)
Example #32
0
np.int32(4)
np.int64(-1)
np.uint8(B())
np.uint32()
np.int32("1")
np.int64(b"2")

np.float16(A())
np.float32(16)
np.float64(3.0)
np.float64(None)
np.float32("1")
np.float16(b"2.5")

if sys.version_info >= (3, 8):
    np.uint64(D())
    np.float32(D())
    np.complex64(D())

np.bytes_(b"hello")
np.bytes_("hello", 'utf-8')
np.bytes_("hello", encoding='utf-8')
np.str_("hello")
np.str_(b"hello", 'utf-8')
np.str_(b"hello", encoding='utf-8')

# Protocols
float(np.int8(4))
int(np.int16(5))
np.int8(np.float32(6))
Example #33
0
 def white_P_east_attacks(self):
     # White pawn east attacks are north east (+9) AND NOT the A File
     return (self.white_P_bb << np.uint64(9)) & ~np.uint64(File.hexA)
Example #34
0
 def black_pawn_east_attacks(self):
     # Black pawn east attacks are south east (-7) AND NOT the A File
     return (self.white_P_bb >> np.uint64(7)) & ~np.uint64(File.hexA)
Example #35
0
def ReadVMD(f, varidx, varsStartPosition, varsTotalLength):
    startPosition = f.tell()
    print("  Var {0:5d}".format(varidx))
    print("      Starting offset : {0}".format(startPosition))
    # 4 bytes TAG
    tag = f.read(4)
    if (tag != b"[VMD"):
        print("  Tag: " + str(tag))
        print("ERROR: VAR group does not start with [VMD")
        return False
    print("      Tag             : " + tag.decode('ascii'))

    # 8 bytes VMD Length
    vmdlen = np.fromfile(f, dtype=np.uint64, count=1)[0]
    print("      Var block size  : {0} bytes (+4 for Tag)".format(vmdlen))
    expectedVarBlockLength = vmdlen + 4  # [VMD is not included in vmdlen

    if (startPosition + expectedVarBlockLength >
            varsStartPosition + varsTotalLength):
        print("ERROR: There is not enough bytes inside this PG to read "
              "this Var block")
        print("VarsStartPosition = {0} varsTotalLength = {1}".format(
            varsStartPosition, varsTotalLength))
        print(
            "current var's start position = {0} var block length = {1}".format(
                startPosition, expectedVarBlockLength))
        return False

    # 4 bytes VAR MEMBER ID
    memberID = np.fromfile(f, dtype=np.uint32, count=1)[0]
    print("      Member ID       : {0}".format(memberID))

    # VAR NAME, 2 bytes length + string without \0
    sizeLimit = expectedVarBlockLength - (f.tell() - startPosition)
    status, varname = ReadEncodedString(f, "Var Name", sizeLimit)
    if not status:
        return False
    print("      Var Name        : " + varname)

    # VAR PATH, 2 bytes length + string without \0
    # sizeLimit = expectedVarBlockLength - (f.tell() - startPosition)
    # status, varpath = ReadEncodedString(f, "Var Path", sizeLimit)
    # if not status:
    #     return False
    # print("      Var Path        : " + varpath)

    # 1 byte ORDER (K, C, F)
    order = f.read(1)
    if (order != b'K' and order != b'C' and order != b'F'):
        print("ERROR: Next byte for Order must be 'K', 'C', or 'F' "
              "but it isn't = {0}".format(order))
        return False
    print("        Order           : " + order.decode('ascii'))

    # 1 byte UNUSED
    unused = f.read(1)
    print("        Unused byte     : {0}".format(ord(unused)))

    # 1 byte TYPE
    typeID = np.fromfile(f, dtype=np.uint8, count=1)[0]
    print("      Type            : {0} ({1}) ".format(
        bp4dbg_utils.GetTypeName(typeID), typeID))

    # ISDIMENSIONS 1 byte, 'y' or 'n'
    isDimensionVar = f.read(1)
    if (isDimensionVar != b'y' and isDimensionVar != b'n'):
        print("ERROR: Next byte for isDimensionVar must be 'y' or 'n' "
              "but it isn't = {0}".format(isDimensionVar))
        return False
    print("      isDimensionVar  : " + isDimensionVar.decode('ascii'))

    # 1 byte NDIMENSIONS
    ndims = np.fromfile(f, dtype=np.uint8, count=1)[0]
    print("      # of Dimensions : {0}".format(ndims))

    # DIMLENGTH
    dimsLen = np.fromfile(f, dtype=np.uint16, count=1)[0]
    print("      Dims Length     : {0}".format(dimsLen))

    nElements = np.uint64(1)
    ldims = np.zeros(ndims, dtype=np.uint64)
    isLocalValueArray = False
    for i in range(ndims):
        print("      Dim[{0}]".format(i))
        # Read Local Dimensions (1 byte flag + 8 byte value)
        # Is Dimension a variable ID 1 byte, 'y' or 'n' or '\0'
        isDimensionVarID = f.read(1)
        if (isDimensionVarID != b'y' and isDimensionVarID != b'n'
                and isDimensionVarID != b'\0'):
            print("ERROR: Next byte for isDimensionVarID must be 'y' or 'n' "
                  "but it isn't = {0}".format(isDimensionVarID))
            return False
        if (isDimensionVarID == b'\0'):
            isDimensionVarID = b'n'
        ldims[i] = np.fromfile(f, dtype=np.uint64, count=1)[0]
        print("           local  dim : {0}".format(ldims[i]))
        nElements = nElements * ldims[i]
        # Read Global Dimensions (1 byte flag + 8 byte value)
        # Is Dimension a variable ID 1 byte, 'y' or 'n' or '\0'
        isDimensionVarID = f.read(1)
        if (isDimensionVarID != b'y' and isDimensionVarID != b'n'
                and isDimensionVarID != b'\0'):
            print("ERROR: Next byte for isDimensionVarID must be 'y' or 'n' "
                  "but it isn't = {0}".format(isDimensionVarID))
            return False
        if (isDimensionVarID == b'\0'):
            isDimensionVarID = b'n'
        gdim = np.fromfile(f, dtype=np.uint64, count=1)[0]
        if i == 0 and ldims[i] == 0 and gdim == bp4dbg_utils.LocalValueDim:
            print("           global dim : LocalValueDim ({0})".format(gdim))
            isLocalValueArray = True
        else:
            print("           global dim : {0}".format(gdim))

        # Read Offset Dimensions (1 byte flag + 8 byte value)
        # Is Dimension a variable ID 1 byte, 'y' or 'n' or '\0'
        isDimensionVarID = f.read(1)
        if (isDimensionVarID != b'y' and isDimensionVarID != b'n'
                and isDimensionVarID != b'\0'):
            print("ERROR: Next byte for isDimensionVarID must be 'y' or 'n' "
                  "but it isn't = {0}".format(isDimensionVarID))
            return False
        if (isDimensionVarID == b'\0'):
            isDimensionVarID = b'n'
        offset = np.fromfile(f, dtype=np.uint64, count=1)[0]
        print("           offset dim : {0}".format(offset))

    sizeLimit = expectedVarBlockLength - (f.tell() - startPosition)
    status = ReadCharacteristicsFromData(f, sizeLimit, typeID, ndims)
    if not status:
        return False

    # Padded end TAG
    # 1 byte length of tag
    endTagLen = np.fromfile(f, dtype=np.uint8, count=1)[0]
    tag = f.read(endTagLen)
    if (not tag.endswith(b"VMD]")):
        print("  Tag: " + str(tag))
        print("ERROR: VAR group metadata does not end with VMD]")
        return False
    print("      Tag (pad {0:2d})    : {1}".format(endTagLen - 4,
                                                   tag.decode('ascii')))

    # special case: LocalValueDim: local values turned into 1D global array
    # but it seems there is no data block at all for these variables
    if isLocalValueArray:
        ldims[0] = 1
        nElements = np.uint64(1)
    else:
        expectedVarDataSize = expectedVarBlockLength - \
            (f.tell() - startPosition)
        status = ReadVarData(f, nElements, typeID, ldims, expectedVarDataSize,
                             varsStartPosition, varsTotalLength)
    if not status:
        return False

    return True
Example #36
0
def uniform32_from_uint53(x):
    x = np.uint64(x) >> np.uint64(16)
    x = np.uint32(x & np.uint64(0xffffffff))
    out = (x >> np.uint32(9)) * (1.0 / 2 ** 23)
    return out.astype(np.float32)
Example #37
0
def motif4struct_bin(A):
    '''
    Structural motifs are patterns of local connectivity. Motif frequency
    is the frequency of occurrence of motifs around a node.

    Parameters
    ----------
    A : NxN np.ndarray
        binary directed connection matrix

    Returns
    -------
    F : 199xN np.ndarray
        motif frequency matrix
    f : 199x1 np.ndarray
        motif frequency vector (averaged over all nodes)
    '''
    from scipy import io
    import os
    fname = os.path.join(os.path.dirname(__file__), motiflib)
    mot = io.loadmat(fname)
    m4n = mot['m4n']
    id4 = mot['id4'].squeeze()

    n = len(A)
    f = np.zeros((199,))
    F = np.zeros((199, n))  # frequency

    A = binarize(A, copy=True)  # ensure A is binary
    As = np.logical_or(A, A.T)  # symmetrized adjmat

    for u in range(n - 3):
        # v1: neighbors of u (>u)
        V1 = np.append(np.zeros((u,), dtype=int), As[u, u + 1:n + 1])
        for v1 in np.where(V1)[0]:
            V2 = np.append(np.zeros((u,), dtype=int), As[v1, u + 1:n + 1])
            V2[V1] = 0  # not already in V1
            # and all neighbors of u (>v1)
            V2 = np.logical_or(
                np.append(np.zeros((v1,)), As[u, v1 + 1:n + 1]), V2)
            for v2 in np.where(V2)[0]:
                vz = np.max((v1, v2))  # vz: largest rank node
                # v3: all neighbors of v2 (>u)
                V3 = np.append(np.zeros((u,), dtype=int), As[v2, u + 1:n + 1])
                V3[V2] = 0  # not already in V1 and V2
                # and all neighbors of v1 (>v2)
                V3 = np.logical_or(
                    np.append(np.zeros((v2,)), As[v1, v2 + 1:n + 1]), V3)
                V3[V1] = 0  # not already in V1
                # and all neighbors of u (>vz)
                V3 = np.logical_or(
                    np.append(np.zeros((vz,)), As[u, vz + 1:n + 1]), V3)
                for v3 in np.where(V3)[0]:

                    a = np.array((A[v1, u], A[v2, u], A[v3, u], A[u, v1], A[v2, v1],
                                  A[v3, v1], A[u, v2], A[v1, v2], A[
                                      v3, v2], A[u, v3], A[v1, v3],
                                  A[v2, v3]))

                    s = np.uint64(
                        np.sum(np.power(10, np.arange(11, -1, -1)) * a))
                    ix = id4[np.squeeze(s == m4n)]
                    F[ix, u] += 1
                    F[ix, v1] += 1
                    F[ix, v2] += 1
                    F[ix, v3] += 1
                    f[ix] += 1

    return f, F
Example #38
0
File: bip.py Project: whigg/sarpy
def read_bip(fid,
             datasize,
             offset=0,
             datatype='float32',
             bands=1,
             swapbytes=False,
             dim1range=None,
             dim2range=None):
    """Generic function for reading data band interleaved by pixel.

    Data is read directly from disk with no transformation.  The most quickly
    incresing dimension on disk will be the most quickly increasing dimension in
    the array in memory.  No assumptions are made as to what the bands
    represent (complex i/q, etc.)

    INPUTS:
       fid: File identifier from open().  Must refer to a file that is open for
          reading as binary.
       datasize: 1x2 tuple/list (number of elements in first dimension, number
          of elements in the second dimension).  In keeping with the Python
          standard, the second dimension is the more quickly increasing as
          written in the file.
       offset: Index (in bytes) from the beginning of the file to the beginning
          of the data.  Default is 0 (beginning of file).
       datatype: Data type specifying binary data precision.  Default is
          dtype('float32').
       bands: Number of bands in data.  Default is 1.
       swapbytes: Whether the "endianness" of the data matches the "endianess"
          of our file reads.  Default is False.
       dim1range: ([start, stop,] step).  Similar syntax as Python range() or
          NumPy arange() functions.  This is the range of data to read in the
          less quickly increasing dimension (as written in the file).  Default
          is entire range.
       dim2range: ([start, stop,] step).  Similar syntax as Python range() or
          NumPy arange() functions.  This is the range of data to read in the
          more quickly increasing dimension (as written in the file).  Default
          is entire range.

    OUTPUT: Array of complex data values read from file.

    """

    # Check input arguments
    datasize, dim1range, dim2range = chipper.check_args(
        datasize, dim1range, dim2range)
    offset = np.array(offset, dtype='uint64')
    if offset.size == 1:  # Second term of offset allows for line prefix/suffix
        offset = np.append(offset, np.array(0, dtype='uint64'))
    # Determine element size
    datatype = np.dtype(datatype)  # Allows caller to pass dtype or string
    elementsize = np.uint64(datatype.itemsize * bands)

    # Read data (region of interest only)
    fid.seek(offset[0] +  # Beginning of data
             (dim1range[0] *
              (datasize[1] * elementsize + offset[1])) +  # Skip to first row
             (dim2range[0] * elementsize))  # Skip to first column
    dim2size = dim2range[1] - dim2range[0]
    lendim1range = len(range(*dim1range))
    dataout = np.zeros((bands, lendim1range, len(range(*dim2range))), datatype)
    # NOTE: MATLAB allows a "skip" parameter in its fread function.  This allows
    # one to do very fast reads when subsample equals 1 using only a single line
    # of code-- no loops!  Not sure of an equivalent way to do this in Python,
    # so we have to use "for" loops-- yuck!
    for i in range(lendim1range):
        single_line = np.fromfile(fid, datatype, np.uint64(bands) * dim2size)
        for j in range(bands):  # Pixel intervleaved
            dataout[j, i, :] = single_line[j::dim2range[2] * np.uint64(bands)]
        fid.seek(
            ((datasize[1] * elementsize) + offset[1]) *
            (dim1range[2] - np.uint64(1)) +  # Skip unread rows
            ((datasize[1] - dim2size) * elementsize) + offset[1],
            1)  # Skip to beginning of dim2range
    if swapbytes:
        dataout.byteswap(True)
    return dataout
Example #39
0
def Shp(*values):
    """Convert values to a tuple of numpy unsigned integers."""
    return tuple(np.uint64(value) for value in values)
Example #40
0
def test_complex_serialization(ray_start_regular):
    def assert_equal(obj1, obj2):
        module_numpy = (type(obj1).__module__ == np.__name__
                        or type(obj2).__module__ == np.__name__)
        if module_numpy:
            empty_shape = ((hasattr(obj1, "shape") and obj1.shape == ())
                           or (hasattr(obj2, "shape") and obj2.shape == ()))
            if empty_shape:
                # This is a special case because currently
                # np.testing.assert_equal fails because we do not properly
                # handle different numerical types.
                assert obj1 == obj2, ("Objects {} and {} are "
                                      "different.".format(obj1, obj2))
            else:
                np.testing.assert_equal(obj1, obj2)
        elif hasattr(obj1, "__dict__") and hasattr(obj2, "__dict__"):
            special_keys = ["_pytype_"]
            assert (set(list(obj1.__dict__.keys()) + special_keys) == set(
                list(obj2.__dict__.keys()) +
                special_keys)), ("Objects {} and {} are different.".format(
                    obj1, obj2))
            for key in obj1.__dict__.keys():
                if key not in special_keys:
                    assert_equal(obj1.__dict__[key], obj2.__dict__[key])
        elif type(obj1) is dict or type(obj2) is dict:
            assert_equal(obj1.keys(), obj2.keys())
            for key in obj1.keys():
                assert_equal(obj1[key], obj2[key])
        elif type(obj1) is list or type(obj2) is list:
            assert len(obj1) == len(obj2), ("Objects {} and {} are lists with "
                                            "different lengths.".format(
                                                obj1, obj2))
            for i in range(len(obj1)):
                assert_equal(obj1[i], obj2[i])
        elif type(obj1) is tuple or type(obj2) is tuple:
            assert len(obj1) == len(obj2), ("Objects {} and {} are tuples "
                                            "with different lengths.".format(
                                                obj1, obj2))
            for i in range(len(obj1)):
                assert_equal(obj1[i], obj2[i])
        elif (is_named_tuple(type(obj1)) or is_named_tuple(type(obj2))):
            assert len(obj1) == len(obj2), (
                "Objects {} and {} are named "
                "tuples with different lengths.".format(obj1, obj2))
            for i in range(len(obj1)):
                assert_equal(obj1[i], obj2[i])
        else:
            assert obj1 == obj2, "Objects {} and {} are different.".format(
                obj1, obj2)

    long_extras = [0, np.array([["hi", u"hi"], [1.3, 1]])]

    PRIMITIVE_OBJECTS = [
        0, 0.0, 0.9, 1 << 62, 1 << 100, 1 << 999, [1 << 100, [1 << 100]], "a",
        string.printable, "\u262F", u"hello world",
        u"\xff\xfe\x9c\x001\x000\x00", None, True, False, [], (), {},
        np.int8(3),
        np.int32(4),
        np.int64(5),
        np.uint8(3),
        np.uint32(4),
        np.uint64(5),
        np.float32(1.9),
        np.float64(1.9),
        np.zeros([100, 100]),
        np.random.normal(size=[100, 100]),
        np.array(["hi", 3]),
        np.array(["hi", 3], dtype=object)
    ] + long_extras

    COMPLEX_OBJECTS = [
        [[[[[[[[[[[[]]]]]]]]]]]],
        {
            "obj{}".format(i): np.random.normal(size=[100, 100])
            for i in range(10)
        },
        # {(): {(): {(): {(): {(): {(): {(): {(): {(): {(): {
        #      (): {(): {}}}}}}}}}}}}},
        (
            (((((((((), ), ), ), ), ), ), ), ), ),
        {
            "a": {
                "b": {
                    "c": {
                        "d": {}
                    }
                }
            }
        },
    ]

    class Foo:
        def __init__(self, value=0):
            self.value = value

        def __hash__(self):
            return hash(self.value)

        def __eq__(self, other):
            return other.value == self.value

    class Bar:
        def __init__(self):
            for i, val in enumerate(PRIMITIVE_OBJECTS + COMPLEX_OBJECTS):
                setattr(self, "field{}".format(i), val)

    class Baz:
        def __init__(self):
            self.foo = Foo()
            self.bar = Bar()

        def method(self, arg):
            pass

    class Qux:
        def __init__(self):
            self.objs = [Foo(), Bar(), Baz()]

    class SubQux(Qux):
        def __init__(self):
            Qux.__init__(self)

    class CustomError(Exception):
        pass

    Point = collections.namedtuple("Point", ["x", "y"])
    NamedTupleExample = collections.namedtuple(
        "Example", "field1, field2, field3, field4, field5")

    CUSTOM_OBJECTS = [
        Exception("Test object."),
        CustomError(),
        Point(11, y=22),
        Foo(),
        Bar(),
        Baz(),  # Qux(), SubQux(),
        NamedTupleExample(1, 1.0, "hi", np.zeros([3, 5]), [1, 2, 3]),
    ]

    # Test dataclasses in Python 3.7.
    if sys.version_info >= (3, 7):
        from dataclasses import make_dataclass

        DataClass0 = make_dataclass("DataClass0", [("number", int)])

        CUSTOM_OBJECTS.append(DataClass0(number=3))

        class CustomClass:
            def __init__(self, value):
                self.value = value

        DataClass1 = make_dataclass("DataClass1", [("custom", CustomClass)])

        class DataClass2(DataClass1):
            @classmethod
            def from_custom(cls, data):
                custom = CustomClass(data)
                return cls(custom)

            def __reduce__(self):
                return (self.from_custom, (self.custom.value, ))

        CUSTOM_OBJECTS.append(DataClass2(custom=CustomClass(43)))

    BASE_OBJECTS = PRIMITIVE_OBJECTS + COMPLEX_OBJECTS + CUSTOM_OBJECTS

    LIST_OBJECTS = [[obj] for obj in BASE_OBJECTS]
    TUPLE_OBJECTS = [(obj, ) for obj in BASE_OBJECTS]
    # The check that type(obj).__module__ != "numpy" should be unnecessary, but
    # otherwise this seems to fail on Mac OS X on Travis.
    DICT_OBJECTS = ([{
        obj: obj
    } for obj in PRIMITIVE_OBJECTS if (
        obj.__hash__ is not None and type(obj).__module__ != "numpy")] +
                    [{
                        0: obj
                    } for obj in BASE_OBJECTS] + [{
                        Foo(123): Foo(456)
                    }])

    RAY_TEST_OBJECTS = (BASE_OBJECTS + LIST_OBJECTS + TUPLE_OBJECTS +
                        DICT_OBJECTS)

    @ray.remote
    def f(x):
        return x

    # Check that we can pass arguments by value to remote functions and
    # that they are uncorrupted.
    for obj in RAY_TEST_OBJECTS:
        assert_equal(obj, ray.get(f.remote(obj)))
        assert_equal(obj, ray.get(ray.put(obj)))

    # Test StringIO serialization
    s = io.StringIO(u"Hello, world!\n")
    s.seek(0)
    line = s.readline()
    s.seek(0)
    assert ray.get(ray.put(s)).readline() == line
Example #41
0
def test_simple_serialization(ray_start_regular):
    primitive_objects = [
        # Various primitive types.
        0,
        0.0,
        0.9,
        1 << 62,
        1 << 999,
        b"",
        b"a",
        "a",
        string.printable,
        "\u262F",
        u"hello world",
        u"\xff\xfe\x9c\x001\x000\x00",
        None,
        True,
        False,
        [],
        (),
        {},
        type,
        int,
        set(),
        # Collections types.
        collections.Counter([np.random.randint(0, 10) for _ in range(100)]),
        collections.OrderedDict([("hello", 1), ("world", 2)]),
        collections.defaultdict(lambda: 0, [("hello", 1), ("world", 2)]),
        collections.defaultdict(lambda: [], [("hello", 1), ("world", 2)]),
        collections.deque([1, 2, 3, "a", "b", "c", 3.5]),
        # Numpy dtypes.
        np.int8(3),
        np.int32(4),
        np.int64(5),
        np.uint8(3),
        np.uint32(4),
        np.uint64(5),
        np.float32(1.9),
        np.float64(1.9),
    ]

    composite_objects = ([[obj] for obj in primitive_objects] +
                         [(obj, ) for obj in primitive_objects] + [{
                             (): obj
                         } for obj in primitive_objects])

    @ray.remote
    def f(x):
        return x

    # Check that we can pass arguments by value to remote functions and
    # that they are uncorrupted.
    for obj in primitive_objects + composite_objects:
        new_obj_1 = ray.get(f.remote(obj))
        new_obj_2 = ray.get(ray.put(obj))
        assert obj == new_obj_1
        assert obj == new_obj_2
        # TODO(rkn): The numpy dtypes currently come back as regular integers
        # or floats.
        if type(obj).__module__ != "numpy":
            assert type(obj) == type(new_obj_1)
            assert type(obj) == type(new_obj_2)
Example #42
0
def flip_bit(bit64, index):
    """Set bit index on 64 bit unsigned integer to opposite of what it was."""
    bit64 ^= np.uint64(1) << np.uint64(index)
    return bit64
Example #43
0
 def test_uint64_from_negative(self, level=rlevel):
     assert_equal(np.uint64(-2), np.uint64(18446744073709551614))
Example #44
0
def set_bit(bit64, index):
    """Set bit index on 64 bit unsigned integer to one."""
    bit64 |= np.uint64(1) << np.uint64(index)
    return bit64
Example #45
0
def read_str_u64(f):
    return np.uint64(read_str_int(f, 'u64'))
Example #46
0
np.datetime64(0)  # E: non-matching overload


class A:
    def __float__(self):
        return 1.0


np.int8(A())  # E: incompatible type
np.int16(A())  # E: incompatible type
np.int32(A())  # E: incompatible type
np.int64(A())  # E: incompatible type
np.uint8(A())  # E: incompatible type
np.uint16(A())  # E: incompatible type
np.uint32(A())  # E: incompatible type
np.uint64(A())  # E: incompatible type

np.void("test")  # E: incompatible type

np.generic(1)  # E: Cannot instantiate abstract class
np.number(1)  # E: Cannot instantiate abstract class
np.integer(1)  # E: Cannot instantiate abstract class
np.inexact(1)  # E: Cannot instantiate abstract class
np.character("test")  # E: Cannot instantiate abstract class
np.flexible(b"test")  # E: Cannot instantiate abstract class

np.float64(value=0.0)  # E: Unexpected keyword argument
np.int64(value=0)  # E: Unexpected keyword argument
np.uint64(value=0)  # E: Unexpected keyword argument
np.complex128(value=0.0j)  # E: Unexpected keyword argument
np.str_(value='bob')  # E: No overload variant
Example #47
0
def motif4struct_wei(W):
    '''
    Structural motifs are patterns of local connectivity. Motif frequency
    is the frequency of occurrence of motifs around a node. Motif intensity
    and coherence are weighted generalizations of motif frequency.

    Parameters
    ----------
    W : NxN np.ndarray
        weighted directed connection matrix (all weights between 0 and 1)

    Returns
    -------
    I : 199xN np.ndarray
        motif intensity matrix
    Q : 199xN np.ndarray
        motif coherence matrix
    F : 199xN np.ndarray
        motif frequency matrix

    Notes
    -----
    Average intensity and coherence are given by I./F and Q./F.
    '''
    from scipy import io
    import os
    fname = os.path.join(os.path.dirname(__file__), motiflib)
    mot = io.loadmat(fname)
    m4 = mot['m4']
    m4n = mot['m4n']
    id4 = mot['id4'].squeeze()
    n4 = mot['n4'].squeeze()

    n = len(W)
    I = np.zeros((199, n))  # intensity
    Q = np.zeros((199, n))  # coherence
    F = np.zeros((199, n))  # frequency

    A = binarize(W, copy=True)  # ensure A is binary
    As = np.logical_or(A, A.T)  # symmetrized adjmat

    for u in range(n - 3):
        # v1: neighbors of u (>u)
        V1 = np.append(np.zeros((u,), dtype=int), As[u, u + 1:n + 1])
        for v1 in np.where(V1)[0]:
            V2 = np.append(np.zeros((u,), dtype=int), As[v1, u + 1:n + 1])
            V2[V1] = 0  # not already in V1
            # and all neighbors of u (>v1)
            V2 = np.logical_or(
                np.append(np.zeros((v1,)), As[u, v1 + 1:n + 1]), V2)
            for v2 in np.where(V2)[0]:
                vz = np.max((v1, v2))  # vz: largest rank node
                # v3: all neighbors of v2 (>u)
                V3 = np.append(np.zeros((u,), dtype=int), As[v2, u + 1:n + 1])
                V3[V2] = 0  # not already in V1 and V2
                # and all neighbors of v1 (>v2)
                V3 = np.logical_or(
                    np.append(np.zeros((v2,)), As[v1, v2 + 1:n + 1]), V3)
                V3[V1] = 0  # not already in V1
                # and all neighbors of u (>vz)
                V3 = np.logical_or(
                    np.append(np.zeros((vz,)), As[u, vz + 1:n + 1]), V3)
                for v3 in np.where(V3)[0]:
                    a = np.array((A[v1, u], A[v2, u], A[v3, u], A[u, v1], A[v2, v1],
                                  A[v3, v1], A[u, v2], A[v1, v2], A[
                                      v3, v2], A[u, v3], A[v1, v3],
                                  A[v2, v3]))
                    s = np.uint64(
                        np.sum(np.power(10, np.arange(11, -1, -1)) * a))
                    # print np.shape(s),np.shape(m4n)
                    ix = np.squeeze(s == m4n)

                    w = np.array((W[v1, u], W[v2, u], W[v3, u], W[u, v1], W[v2, v1],
                                  W[v3, v1], W[u, v2], W[v1, v2], W[
                                      v3, v2], W[u, v3], W[v1, v3],
                                  W[v2, v3]))

                    M = w * m4[ix, :]
                    id = id4[ix] - 1
                    l = n4[ix]
                    x = np.sum(M, axis=1) / l  # arithmetic mean
                    M[M == 0] = 1  # enable geometric mean
                    i = np.prod(M, axis=1)**(1 / l)  # intensity
                    q = i / x  # coherence

                    # then add to cumulative count
                    I[id, u] += i
                    I[id, v1] += i
                    I[id, v2] += i
                    I[id, v3] += i
                    Q[id, u] += q
                    Q[id, v1] += q
                    Q[id, v2] += q
                    Q[id, v3] += q
                    F[id, u] += 1
                    F[id, v1] += 1
                    F[id, v2] += 1
                    F[id, v3] += 1

    return I, Q, F
Example #48
0
    def testNumpyDtypeJSONSerialize(self):
        for serial_type in self._get_serial_types():
            provider = JsonSerializeProvider(
                data_serial_type=serial_type,
                pickle_protocol=TEST_PICKLE_PROTOCOL)

            node9 = Node9(b1=np.int8(-2),
                          b2=np.int16(2000),
                          b3=np.int32(-5000),
                          b4=np.int64(500000),
                          c1=np.uint8(2),
                          c2=np.uint16(2000),
                          c3=np.uint32(5000),
                          c4=np.uint64(500000),
                          d1=np.float16(2.5),
                          d2=np.float32(7.37),
                          d3=np.float64(5.976321),
                          f1=np.int8(3))

            serials = serializes(provider, [node9])
            d_node9, = deserializes(provider, [Node9], serials)

            self.assertIsNot(node9, d_node9)
            self.assertEqual(node9.b1, d_node9.b1)
            self.assertEqual(node9.b2, d_node9.b2)
            self.assertEqual(node9.b3, d_node9.b3)
            self.assertEqual(node9.b4, d_node9.b4)
            self.assertEqual(node9.c1, d_node9.c1)
            self.assertEqual(node9.c2, d_node9.c2)
            self.assertEqual(node9.c3, d_node9.c3)
            self.assertEqual(node9.c4, d_node9.c4)
            self.assertAlmostEqual(node9.d1, d_node9.d1, places=2)
            self.assertAlmostEqual(node9.d2, d_node9.d2, places=4)
            self.assertAlmostEqual(node9.d3, d_node9.d3)
            self.assertEqual(node9.f1, d_node9.f1)

            node_rec1 = Node9(f1=np.dtype([('label', 'int32'), (
                's0', '<U16'), ('s1', 'int32'), ('s2', 'int32'), (
                    'd0', '<U16'), ('d1', 'int32'), ('d2',
                                                     'int32'), ('d3',
                                                                '<U256')]))
            node_rec2 = Node9(f1=np.dtype([('label', 'int32'), (
                's0', '<U16'), ('s1', 'int32'), ('s2', 'int32'), (
                    's3',
                    '<U256'), ('d0',
                               '<U16'), ('d1',
                                         'int32'), ('d2',
                                                    'int32'), ('d3',
                                                               '<U256')]))

            serials = serializes(provider, [node_rec1])
            loads_fun = _loads_with_check if serial_type == dataserializer.SerialType.PICKLE \
                else original_pickle_loads
            with unittest.mock.patch('pickle.loads', new=loads_fun):
                d_node_rec1, = deserializes(provider, [Node9], serials)

            self.assertIsNot(node_rec1, d_node_rec1)
            self.assertEqual(node_rec1.f1, d_node_rec1.f1)

            serials = serializes(provider, [node_rec2])
            loads_fun = _loads_with_check if serial_type == dataserializer.SerialType.PICKLE \
                else original_pickle_loads
            with unittest.mock.patch('pickle.loads', new=loads_fun):
                d_node_rec2, = deserializes(provider, [Node9], serials)

            self.assertIsNot(node_rec2, d_node_rec2)
            self.assertEqual(node_rec2.f1, d_node_rec2.f1)
Example #49
0
def uniform_from_uint64(x):
    return (x >> np.uint64(11)) * (1.0 / 9007199254740992.0)
Example #50
0
    def preload_source(t_query):
        """load info from example tile (image)

        Then gets three needed values from the given \
path from the :class:`TileQuery` t_query

        Arguments
        -----------
        t_query: :class:`TileQuery`
            Only the file path is needed

        Returns
        --------
        dict
            Will be empty if filename does not give \
a valid json file pointing to the tiff grid.

            * :class:`RUNTIME` ``.IMAGE.BLOCK.NAME``
                (numpy.ndarray) -- 3x1 for any give tile shape
            * :class:`OUTPUT` ``.INFO.TYPE.NAME``
                (str) -- numpy dtype of any given tile
            * :class:`OUTPUT` ``.INFO.SIZE.NAME``
                (numpy.ndarray) -- 3x1 for full volume shape
        """
        # Keyword names
        output = t_query.OUTPUT.INFO
        runtime = t_query.RUNTIME.IMAGE
        boss_field = runtime.SOURCE.BOSS
        info_field = boss_field.INFO
        block_field = info_field.BLOCK
        full_field = info_field.EXTENT
        start_field = info_field.START

        # Get the name and ending of the target file
        filename = t_query.OUTPUT.INFO.PATH.VALUE
        ending = os.path.splitext(filename)[1]

        # Return if the ending is not json
        if ending not in BossGrid._meta_files:
            return {}

        # Return if the path does not exist
        if not os.path.exists(filename):
            return {}

        # Get function to read the metainfo file
        order = BossGrid._meta_files.index(ending)
        reader = BossGrid._read[order]

        # Get information from json file
        with open(filename, 'r') as jd:
            # Get all the filenames
            all_info = reader(jd)
            boss = all_info.get(boss_field.ALL, [])
            info = all_info.get(info_field.NAME, {})
            # Return if no metadata
            if not len(info):
                return {}
            # All the paths
            path_dict = {}
            any_path = None

            # Origin of first tile
            start_info = info.get(start_field.NAME, {})
            start_list = map(start_info.get, start_field.ZYX)
            # Set default first tile origin
            if any([s is None for s in start_list]):
                start_list = start_field.VALUE
            # Extract offset of first tile
            tile_start = np.uint64(start_list)
            any_y, any_x = tile_start[1:]

            # Shape of one tile
            block_info = info.get(block_field.NAME, {})
            block_list = map(block_info.get, block_field.ZYX)
            # Return if no block shape
            if not all(block_list):
                return {}

            # Shape of full volume
            full_info = info.get(full_field.NAME)
            full_extent = map(full_info.get, full_field.ZYX)
            # Return if no full extent shape
            if not all(full_extent):
                return {}

            # Block shape as a numpy array
            block_shape = np.uint64(block_list)
            if block_shape.shape != (3, ):
                return {}
            # Finally, list all the mip levels
            block_shapes = block_shape[np.newaxis]

            # Full shape as a numpy array
            full_bounds = np.uint64(full_extent)
            if full_bounds.shape != (3, 2):
                return {}
            # Finally, get the full shape from extent
            full_shape = np.diff(full_bounds).T[0]

            # All paths in dictionary
            for d in boss:
                path = d.get(boss_field.PATH, '')
                # Update the maximum value
                z, y, x = map(d.get, boss_field.ZYX)
                z_format = x is None or y is None

                # Set any path
                if not any_path:
                    any_path = path
                    if z_format:
                        any_path = path.format(column=any_x, row=any_y)
                    if not os.path.exists(any_path):
                        any_path = None

                # Allow for simple section formats
                if z_format:
                    path_dict[z] = path
                    continue

                # Allow for specific paths per tile
                if z not in path_dict:
                    path_dict[z] = {y: {x: path}}
                    continue
                # Add column to dictionary
                if y not in path_dict[z]:

                    path_dict[z][y] = {x: path}
                    continue
                # Add row to dictionary
                path_dict[z][y][x] = path

            # Return if no paths
            if not any_path:
                return {}

            # Get the tile size from a tile
            any_tile = BossGrid.imread(any_path)
            any_dtype = str(any_tile.dtype)

            # All keys to follow API
            keywords = {
                start_field.NAME: tile_start,
                boss_field.PATHS.NAME: path_dict,
                runtime.BLOCK.NAME: block_shapes,
                output.SIZE.NAME: full_shape,
                output.TYPE.NAME: any_dtype,
            }

            # Combine results with parent method
            common = Datasource.preload_source(t_query)
            return dict(common, **keywords)
Example #51
0
def check_integer(indexer_cls):
    value = indexer_cls((1, np.uint64(2),)).tuple
    assert all(isinstance(v, int) for v in value)
    assert value == (1, 2)
Example #52
0
def test_uint_from():
    with raises(Exception):
        assert get_embedded_from_uint(
            np.uint8, np.uint32(0xFFFFFFFF), [31, 0]
        ) == np.uint32(0xFFFFFFFF)
    assert get_embedded_from_uint(
        np.uint32, np.uint32(0xFFFFFFFF), [31, 0]
    ) == np.uint32(0xFFFFFFFF)
    assert isinstance(
        get_embedded_from_uint(np.uint32, np.uint32(0xFFFFFFFF), [31, 0]), np.uint32
    )
    assert get_embedded_from_uint(np.uint8, np.uint32(0xFFFFFFFF), [7, 0]) == np.uint8(
        0xFF
    )
    assert isinstance(
        get_embedded_from_uint(np.uint8, np.uint32(0xFFFFFFFF), [7, 0]), np.uint8
    )
    assert get_embedded_from_uint(np.uint8, np.uint32(0xFFFFFFFF), [0, 0]) == np.uint8(
        0x1
    )
    assert get_embedded_from_uint(
        np.uint8, np.uint32(0xFFFFFFFF), [31, 31]
    ) == np.uint8(0x1)
    assert get_embedded_from_uint(
        np.uint8, np.uint32(0x0FFFFFFF), [31, 31]
    ) == np.uint8(0x0)
    assert get_embedded_from_uint(
        np.uint8, np.uint32(0xF0FFFFFF), [31 - 4, 31 - 4]
    ) == np.uint8(0x0)
    assert get_embedded_from_uint(
        np.uint8, np.uint32(0xF0FFFFFF), [31 - 3, 31 - 3]
    ) == np.uint8(0x1)

    assert get_embedded_from_uint(bool, np.uint32(0xFFFFFFFF), [31, 31])
    assert not get_embedded_from_uint(bool, np.uint32(0x0FFFFFFF), [31, 31])
    assert isinstance(
        get_embedded_from_uint(bool, np.uint32(0xFFFFFFFF), [31, 31]), bool
    )

    assert get_embedded_from_uint(np.int8, np.uint32(0xFFFFFFFF), [12, 10]) == np.int8(
        -1
    )

    c = set_embedded_in_uint(np.int8(-3), np.uint64(0), [15, 10])
    assert isinstance(c, np.uint64)
    get_embedded_from_uint(np.int8, c, [15, 10]) == np.int8(-3)

    c = set_embedded_in_uint(np.int8(-3), np.uint64(0), [12, 10])
    assert isinstance(c, np.uint64)
    get_embedded_from_uint(np.int8, c, [12, 10]) == np.int8(-3)

    # 3 bits: 0..7
    c = np.bitwise_not(np.uint64(0x0))
    for x in range(0, 8):
        assert x >= 0 and x < 8
        c = set_embedded_in_uint(np.uint8(x), c, [60, 58])
        assert isinstance(c, np.uint64)
        assert get_embedded_from_uint(np.uint8, c, [60, 58]) == np.uint8(x)
    assert isinstance(c, np.uint64)
    assert c == np.bitwise_not(np.uint64(0x0))

    c = set_embedded_in_uint(np.uint8(8), c, [60, 58])
    assert isinstance(c, np.uint64)
    get_embedded_from_uint(np.uint8, c, [60, 58]) != np.uint8(8)

    # 3 bits: -4..3
    c = np.bitwise_not(np.uint64(0x0))
    for x in range(-4, 4):
        assert x >= -4 and x < 4
        c = set_embedded_in_uint(np.int8(x), c, [60, 58])
        assert isinstance(c, np.uint64)
        ret = get_embedded_from_uint(np.int8, c, [60, 58])
        assert ret == np.int8(x)

    c = set_embedded_in_uint(np.int8(-1), c, [60, 58])
    assert c == np.bitwise_not(np.uint64(0x0))

    c = np.uint64(0x0)
    c = set_embedded_in_uint(np.int8(-1), c, [60, 58])
    c = set_embedded_in_uint(np.int8(0), c, [60, 58])
    assert c == np.uint64(0x0)
Example #53
0
 def black_pawn_west_attacks(self):
     # Black pawn west attacks are south west (-9) AND NOT the H File
     return (self.white_P_bb >> np.uint64(9)) & ~np.uint64(File.hexH)
Example #54
0
import random, copy, struct
import warnings
import numpy as np

from datasketch.hashfunc import sha1_hash32

# The size of a hash value in number of bytes
hashvalue_byte_size = len(bytes(np.int64(42).data))

# http://en.wikipedia.org/wiki/Mersenne_prime
_mersenne_prime = np.uint64((1 << 61) - 1)
_max_hash = np.uint64((1 << 32) - 1)
_hash_range = (1 << 32)

class MinHash(object):
    '''MinHash is a probabilistic data structure for computing
    `Jaccard similarity`_ between sets.

    Args:
        num_perm (int, optional): Number of random permutation functions.
            It will be ignored if `hashvalues` is not None.
        seed (int, optional): The random seed controls the set of random
            permutation functions generated for this MinHash.
        hashfunc (optional): The hash function used by this MinHash.
            It takes the input passed to the `update` method and
            returns an integer that can be encoded with 32 bits.
            The default hash function is based on SHA1 from hashlib_.
        hashobj (**deprecated**): This argument is deprecated since version
            1.4.0. It is a no-op and has been replaced by `hashfunc`.
        hashvalues (`numpy.array` or `list`, optional): The hash values is
            the internal state of the MinHash. It can be specified for faster
Example #55
0
 def white_P_west_attacks(self):
     # White pawn west attacks are north west (+7) AND NOT the H File
     return (self.white_P_bb << np.uint64(7)) & ~np.uint64(File.hexH)
def extract_xyz(xyz_dat, timestamp, verbose=False):
    '''Reads a stream of I32s, finds the first timestamp,
       then starts de-interleaving the demodulated data
       from the FPGA'''
    
    if timestamp == 0.0:
        # if no timestamp given, use current time
        # and set the timing threshold for 1 month.
        # This threshold is used to identify the timestamp 
        # in the stream of I32s
        timestamp = time.time()
        diff_thresh = 31.0 * 24.0 * 3600.0
    else:
        timestamp = timestamp * (10.0**(-9))
        diff_thresh = 60.0

    writing_data = False
    xyz_ind = 0

    xyz_time = []
    xyz = [[], [], []]

    for ind, dat in enumerate(xyz_dat):

        # Data in the 'xyz' FIFO comes through as:
        # time_MSB -> time_LSB ->
        # X        -> Y        -> Z   -> 
        # and then repeats. Position  variables are 
        # arbitrarily scaled so thinking of them as 32-bit integers
        # is okay. We just care about the bits anyway
        if writing_data:
            if xyz_ind == 0 and ind != (len(xyz_dat) - 1):
                high = np.uint32(xyz_dat[ind])
                low = np.uint32(xyz_dat[ind+1])
                dattime = (high.astype(np.uint64) << np.uint64(32)) \
                           + low.astype(np.uint64)
                xyz_time.append(dattime)
            elif xyz_ind == 2:
                xyz[0].append(dat)
            elif xyz_ind == 3:
                xyz[1].append(dat)
            elif xyz_ind == 4:
                xyz[2].append(dat)
            
            xyz_ind += 1
            xyz_ind = xyz_ind % 5

        # Check for the timestamp
        if not writing_data and xyz_ind == 0:
            # Assemble time stamp from successive I32s, since
            # it's a 64 bit object
            high = np.int32(xyz_dat[ind])
            low = np.int32(xyz_dat[ind+1])
            dattime = (high.astype(np.uint64) << np.uint64(32)) \
                        + low.astype(np.uint64)

            # Time stamp from FPGA is a U64 with the UNIX epoch 
            # time in nanoseconds, synced to the host's clock
            if (np.abs(timestamp - float(dattime) * 10**(-9)) < diff_thresh):
                if verbose:
                    print "found timestamp  : ", float(dattime) * 10**(-9)
                    print "comparison time  : ", timestamp 
                xyz_time.append(dattime)
                xyz_ind += 1
                writing_data = True

    # Since the FIFO read request is asynchronous, sometimes
    # the timestamp isn't first to come out, but the total amount of data
    # read out is a multiple of 5 (2 time + X + Y + Z) so the Z
    # channel usually  ends up with less samples.
    # The following is coded very generally

    min_len = 10.0**9  # Assumes we never more than 1 billion samples
    for ind in [0,1,2]:
        if len(xyz[ind]) < min_len:
            min_len = len(xyz[ind])

    # Re-size everything by the minimum length and convert to numpy array
    xyz_time = np.array(xyz_time[:min_len])
    for ind in [0,1,2]:
        xyz[ind]   = xyz[ind][:min_len]
    xyz = np.array(xyz)        

    return xyz_time, xyz
Example #57
0
    def update_bitboards(self, piece_map):
        for key, val in piece_map.items():
            # TODO: make more efficient by storing and updating the piece group that changed

            # White Pieces
            if key == Piece.wP:
                self.white_P_bb = np.uint64(0)
                for bit in val:
                    self.white_P_bb |= set_bit(self.white_P_bb, np.uint64(bit))

            elif key == Piece.wR:
                self.white_R_bb = np.uint64(0)
                for bit in val:
                    self.white_R_bb |= set_bit(self.white_R_bb, np.uint64(bit))

            elif key == Piece.wN:
                self.white_N_bb = np.uint64(0)
                for bit in val:
                    self.white_N_bb |= set_bit(self.white_N_bb, np.uint64(bit))

            elif key == Piece.wB:
                self.white_B_bb = np.uint64(0)
                for bit in val:
                    self.white_B_bb |= set_bit(self.white_B_bb, np.uint64(bit))

            elif key == Piece.wQ:
                self.white_Q_bb = np.uint64(0)
                for bit in val:
                    self.white_Q_bb |= set_bit(self.white_Q_bb, np.uint64(bit))

            elif key == Piece.wK:
                self.white_K_bb = np.uint64(0)
                for bit in val:
                    self.white_K_bb |= set_bit(self.white_K_bb, np.uint64(bit))

            # Black Pieces
            if key == Piece.bP:
                self.black_P_bb = np.uint64(0)
                for bit in val:
                    self.black_P_bb |= set_bit(self.black_P_bb, np.uint64(bit))

            elif key == Piece.bR:
                self.black_R_bb = np.uint64(0)
                for bit in val:
                    self.black_P_bb |= set_bit(self.black_R_bb, np.uint64(bit))

            elif key == Piece.bN:
                self.black_N_bb = np.uint64(0)
                for bit in val:
                    self.black_N_bb |= set_bit(self.black_N_bb, np.uint64(bit))

            elif key == Piece.bB:
                self.black_B_bb = np.uint64(0)
                for bit in val:
                    self.black_B_bb |= set_bit(self.black_B_bb, np.uint64(bit))

            elif key == Piece.bQ:
                self.black_Q_bb = np.uint64(0)
                for bit in val:
                    self.black_Q_bb |= set_bit(self.black_Q_bb, np.uint64(bit))

            elif key == Piece.bK:
                self.black_K_bb = np.uint64(0)
                for bit in val:
                    self.black_K_bb |= set_bit(self.black_K_bb, np.uint64(bit))
Example #58
0
class LaxRandomTest(jtu.JaxTestCase):

  def _CheckCollisions(self, samples, nbits):
    fail_prob = 0.01  # conservative bound on statistical fail prob by Chebyshev
    nitems = len(samples)
    nbins = 2 ** nbits
    nexpected = nbins * (1 - ((nbins - 1) / nbins) ** nitems)
    ncollisions = len(np.unique(samples))
    sq_percent_deviation = ((ncollisions - nexpected) / nexpected) ** 2
    self.assertLess(sq_percent_deviation, 1 / np.sqrt(nexpected * fail_prob))

  def _CheckKolmogorovSmirnovCDF(self, samples, cdf):
    fail_prob = 0.01  # conservative bound on statistical fail prob by Kolmo CDF
    self.assertGreater(scipy.stats.kstest(samples, cdf).pvalue, fail_prob)

  def _CheckChiSquared(self, samples, pmf):
    alpha = 0.01  # significance level, threshold for p-value
    values, actual_freq = np.unique(samples, return_counts=True)
    expected_freq = pmf(values) * samples.size
    # per scipy: "A typical rule is that all of the observed and expected
    # frequencies should be at least 5."
    valid = (actual_freq > 5) & (expected_freq > 5)
    self.assertGreater(valid.sum(), 1,
                       msg='not enough valid frequencies for chi-squared test')
    _, p_value = scipy.stats.chisquare(
        actual_freq[valid], expected_freq[valid])
    self.assertGreater(
        p_value, alpha,
        msg=f'Failed chi-squared test with p={p_value}.\n'
            'Expected vs. actual frequencies:\n'
            f'{expected_freq[valid]}\n{actual_freq[valid]}')

  @parameterized.named_parameters(jtu.cases_from_list(
      {"testcase_name": "_dtype={}".format(np.dtype(dtype).name), "dtype": dtype}
      for dtype in jtu.dtypes.floating))
  def testNumpyAndXLAAgreeOnFloatEndianness(self, dtype):
    bits_dtype = np.uint32 if jnp.finfo(dtype).bits == 32 else np.uint64
    numpy_bits = np.array(1., dtype).view(bits_dtype)
    xla_bits = api.jit(
        lambda: lax.bitcast_convert_type(np.array(1., dtype), bits_dtype))()
    self.assertEqual(numpy_bits, xla_bits)

  def testThreefry2x32(self):
    # We test the hash by comparing to known values provided in the test code of
    # the original reference implementation of Threefry. For the values, see
    # https://github.com/DEShawResearch/Random123-Boost/blob/65e3d874b67aa7b3e02d5ad8306462f52d2079c0/libs/random/test/test_threefry.cpp#L30-L32
    def result_to_hex(result):
      return tuple([hex(x.copy()).rstrip("L") for x in result])

    expected = ("0x6b200159", "0x99ba4efe")
    result = random.threefry_2x32(np.uint32([0, 0]), np.uint32([0, 0]))

    self.assertEqual(expected, result_to_hex(result))

    expected = ("0x1cb996fc", "0xbb002be7")
    result = random.threefry_2x32(np.uint32([-1, -1]), np.uint32([-1, -1]))
    self.assertEqual(expected, result_to_hex(result))

    expected = ("0xc4923a9c", "0x483df7a0")
    result = random.threefry_2x32(
        np.uint32([0x13198a2e, 0x03707344]),
        np.uint32([0x243f6a88, 0x85a308d3]))
    self.assertEqual(expected, result_to_hex(result))

  def testThreefry2x32Large(self):
    n = 10000000
    result = random.threefry_2x32(
      (np.uint32(0x13198a2e), np.uint32(0x03707344)),
      jnp.concatenate([
        jnp.full((n,), 0x243f6a88, jnp.uint32),
        jnp.full((n,), 0x85a308d3, jnp.uint32)
      ]))
    np.testing.assert_equal(result[:n], np.full((n,), 0xc4923a9c, dtype=np.uint32))
    np.testing.assert_equal(result[n:], np.full((n,), 0x483df7a0, dtype=np.uint32))

  def testThreefry2x32Empty(self):
    # Regression test for an op-by-op crash for empty arrays in CUDA mode.
    with api.disable_jit():
      result = random.threefry_2x32(
        (np.uint32(0x13198a2e), np.uint32(0x03707344)),
        jnp.ones((10, 0,), jnp.uint32))
    np.testing.assert_equal(result, np.zeros((10, 0,), dtype=np.uint32))

  def testRngRandomBitsViewProperty(self):
    # TODO: add 64-bit if it ever supports this property.
    # TODO: will this property hold across endian-ness?
    N = 10
    key = random.PRNGKey(1701)
    nbits = [8, 16, 32]
    rand_bits = [jax._src.random._random_bits(key, n, (N * 64 // n,))
                 for n in nbits]
    rand_bits_32 = np.array([np.array(r).view(np.uint32) for r in rand_bits])
    assert np.all(rand_bits_32 == rand_bits_32[0])

  def testRngRandomBits(self):
    # Test specific outputs to ensure consistent random values between JAX versions.
    key = random.PRNGKey(1701)

    bits8 = jax._src.random._random_bits(key, 8, (3,))
    expected8 = np.array([216, 115,  43], dtype=np.uint8)
    self.assertArraysEqual(bits8, expected8)

    bits16 = jax._src.random._random_bits(key, 16, (3,))
    expected16 = np.array([41682,  1300, 55017], dtype=np.uint16)
    self.assertArraysEqual(bits16, expected16)

    bits32 = jax._src.random._random_bits(key, 32, (3,))
    expected32 = np.array([56197195, 4200222568, 961309823], dtype=np.uint32)
    self.assertArraysEqual(bits32, expected32)

    with jtu.ignore_warning(category=UserWarning, message="Explicitly requested dtype.*"):
      bits64 = jax._src.random._random_bits(key, 64, (3,))
    if config.x64_enabled:
      expected64 = np.array([3982329540505020460, 16822122385914693683,
                             7882654074788531506], dtype=np.uint64)
    else:
      expected64 = np.array([676898860, 3164047411, 4010691890], dtype=np.uint32)
    self.assertArraysEqual(bits64, expected64)

  @parameterized.named_parameters(jtu.cases_from_list(
      {"testcase_name": "_dtype={}".format(np.dtype(dtype).name), "dtype": dtype}
      for dtype in float_dtypes))
  def testRngUniform(self, dtype):
    key = random.PRNGKey(0)
    rand = lambda key: random.uniform(key, (10000,), dtype)
    crand = api.jit(rand)

    uncompiled_samples = rand(key)
    compiled_samples = crand(key)

    for samples in [uncompiled_samples, compiled_samples]:
      self._CheckCollisions(samples, jnp.finfo(dtype).nmant)
      self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.uniform().cdf)

  @parameterized.named_parameters(jtu.cases_from_list(
      {"testcase_name": "_dtype={}".format(np.dtype(dtype).name), "dtype": dtype}
      for dtype in int_dtypes + uint_dtypes))
  def testRngRandint(self, dtype):
    lo = 5
    hi = 10

    key = random.PRNGKey(0)
    rand = lambda key: random.randint(key, (10000,), lo, hi, dtype)
    crand = api.jit(rand)

    uncompiled_samples = rand(key)
    compiled_samples = crand(key)

    for samples in [uncompiled_samples, compiled_samples]:
      self.assertTrue(np.all(lo <= samples))
      self.assertTrue(np.all(samples < hi))

  @parameterized.named_parameters(jtu.cases_from_list(
      {"testcase_name": "_dtype={}".format(np.dtype(dtype).name), "dtype": dtype}
      for dtype in float_dtypes))
  def testNormal(self, dtype):
    key = random.PRNGKey(0)
    rand = lambda key: random.normal(key, (10000,), dtype)
    crand = api.jit(rand)

    uncompiled_samples = rand(key)
    compiled_samples = crand(key)

    for samples in [uncompiled_samples, compiled_samples]:
      self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.norm().cdf)

  @parameterized.named_parameters(jtu.cases_from_list(
      {"testcase_name": "dtype={}".format(np.dtype(dtype).name), "dtype": dtype}
      for dtype in complex_dtypes))
  def testNormalComplex(self, dtype):
    key = random.PRNGKey(0)
    rand = lambda key: random.normal(key, (10000,), dtype)
    crand = api.jit(rand)

    uncompiled_samples = rand(key)
    compiled_samples = crand(key)

    for samples in [uncompiled_samples, compiled_samples]:
      self._CheckKolmogorovSmirnovCDF(jnp.real(samples), scipy.stats.norm(scale=1/np.sqrt(2)).cdf)
      self._CheckKolmogorovSmirnovCDF(jnp.imag(samples), scipy.stats.norm(scale=1/np.sqrt(2)).cdf)
      self.assertEqual(dtype, samples.dtype)

  @parameterized.named_parameters(jtu.cases_from_list(
      {"testcase_name": "_dtype={}".format(np.dtype(dtype).name), "dtype": dtype}
      for dtype in float_dtypes))
  def testTruncatedNormal(self, dtype):
    key = random.PRNGKey(0)
    rand = lambda key: random.truncated_normal(key, -0.3, 0.3, (10000,), dtype)
    crand = api.jit(rand)

    uncompiled_samples = rand(key)
    compiled_samples = crand(key)

    min_val = np.min(uncompiled_samples)
    max_val = np.max(uncompiled_samples)
    self.assertTrue(min_val > -0.3)
    self.assertTrue(max_val < 0.3)
    for samples in [uncompiled_samples, compiled_samples]:
      self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.truncnorm(-0.3, 0.3).cdf)

  @parameterized.named_parameters(jtu.cases_from_list(
      {"testcase_name": "_dtype={}".format(np.dtype(dtype).name), "dtype": dtype}
      for dtype in jtu.dtypes.floating + jtu.dtypes.integer))
  def testShuffle(self, dtype):
    key = random.PRNGKey(0)
    x = np.arange(100).astype(dtype)
    rand = lambda key: random.shuffle(key, x)
    crand = api.jit(rand)

    with self.assertWarns(FutureWarning):
      perm1 = rand(key)
    with self.assertWarns(FutureWarning):
      perm2 = crand(key)

    self.assertAllClose(perm1, perm2)
    self.assertFalse(np.all(perm1 == x))  # seems unlikely!
    self.assertAllClose(np.sort(perm1), x, check_dtypes=False)

  @parameterized.named_parameters(jtu.cases_from_list(
      {"testcase_name": "_{}_shape={}_replace={}_weighted={}_array_input={}".format(
          np.dtype(dtype).name, shape, replace, weighted, array_input),
        "dtype": dtype, "shape": shape, "replace": replace,
        "weighted": weighted, "array_input": array_input}
      for dtype in jtu.dtypes.floating + jtu.dtypes.integer
      for shape in [(), (5,), (4, 5)]
      for replace in [True, False]
      for weighted in [True, False]
      for array_input in [False, 'jnp', 'np']))
  def testChoice(self, dtype, shape, replace, weighted, array_input):
    N = 100
    key = random.PRNGKey(0)
    x = (N if not array_input else
         jnp.arange(N, dtype=dtype) if array_input == 'jnp' else
         np.arange(N, dtype=dtype))
    p = None if not weighted else jnp.arange(N)
    rand = lambda key: random.choice(key, x, shape, p=p, replace=replace)
    crand = api.jit(rand)

    sample1 = rand(key)
    sample2 = crand(key)

    self.assertEqual(shape, sample1.shape)
    if array_input == 'jnp':
      self.assertEqual(x.dtype, sample1.dtype)
    if not replace:
      assert len(np.unique(sample1)) == len(np.ravel(sample1))
    self.assertAllClose(sample1, sample2)

  @parameterized.named_parameters(jtu.cases_from_list(
      {"testcase_name": "_{}".format(jtu.format_shape_dtype_string(shape, dtype)),
       "dtype": dtype, "shape": shape}
      for dtype in jtu.dtypes.floating + jtu.dtypes.integer
      for shape in [100, (10, 10), (10, 5, 2)]))
  def testPermutationArray(self, dtype, shape):
    key = random.PRNGKey(0)
    x = jnp.arange(np.prod(shape)).reshape(shape).astype(dtype)
    rand = lambda key: random.permutation(key, x)
    crand = api.jit(rand)

    perm1 = rand(key)
    perm2 = crand(key)

    self.assertAllClose(perm1, perm2)
    self.assertFalse(np.all(perm1 == x))  # seems unlikely!
    self.assertAllClose(np.sort(perm1.ravel()), x.ravel(), check_dtypes=False)
    self.assertArraysAllClose(
      x, jnp.arange(np.prod(shape)).reshape(shape).astype(dtype))

  def testPermutationInteger(self):
    key = random.PRNGKey(0)
    x = 100
    rand = lambda key: random.permutation(key, x)
    crand = api.jit(rand)

    perm1 = rand(key)
    perm2 = crand(key)

    self.assertAllClose(perm1, perm2)
    self.assertEqual(perm1.dtype, perm2.dtype)
    self.assertFalse(np.all(perm1 == np.arange(100)))  # seems unlikely!
    self.assertAllClose(np.sort(perm1), np.arange(100), check_dtypes=False)

  def testPermutationErrors(self):
    key = random.PRNGKey(0)
    with self.assertRaises(TypeError):
      random.permutation(key, 10.)
    with self.assertRaises(core.ConcretizationTypeError):
      api.jit(random.permutation)(key, 10)

  @parameterized.named_parameters(jtu.cases_from_list(
      {"testcase_name": "_p={}_dtype={}".format(p, np.dtype(dtype).name),
       "p": p, "dtype": dtype}
      for p in [0.1, 0.5, 0.9]
      for dtype in jtu.dtypes.floating))
  def testBernoulli(self, p, dtype):
    key = random.PRNGKey(0)
    p = np.array(p, dtype=dtype)
    rand = lambda key, p: random.bernoulli(key, p, (10000,))
    crand = api.jit(rand)

    uncompiled_samples = rand(key, p)
    compiled_samples = crand(key, p)

    for samples in [uncompiled_samples, compiled_samples]:
      self._CheckChiSquared(samples, scipy.stats.bernoulli(p).pmf)

  @parameterized.named_parameters(jtu.cases_from_list(
    {"testcase_name": "_p={}_{}_{}".format(p, np.dtype(dtype).name, sample_shape),
     "p": p, "axis": axis, "dtype": dtype, 'sample_shape': sample_shape}
    for (p, axis) in [
        ([.25] * 4, -1),
        ([.1, .2, .3, .4], -1),
        ([[.5, .5], [.1, .9]], 1),
        ([[.5, .1], [.5, .9]], 0),
    ]
    for sample_shape in [(10000,), (5000, 2)]
    for dtype in jtu.dtypes.floating))
  def testCategorical(self, p, axis, dtype, sample_shape):
    key = random.PRNGKey(0)
    p = np.array(p, dtype=dtype)
    logits = np.log(p) - 42 # test unnormalized
    out_shape = tuple(np.delete(logits.shape, axis))
    shape = sample_shape + out_shape
    rand = partial(random.categorical, shape=shape, axis=axis)
    crand = api.jit(rand)

    uncompiled_samples = rand(key, logits)
    compiled_samples = crand(key, logits)

    if axis < 0:
      axis += len(logits.shape)

    for samples in [uncompiled_samples, compiled_samples]:
      assert samples.shape == shape
      samples = jnp.reshape(samples, (10000,) + out_shape)
      if len(p.shape[:-1]) > 0:
        ps = np.transpose(p, (1, 0)) if axis == 0 else p
        for cat_samples, cat_p in zip(samples.transpose(), ps):
          self._CheckChiSquared(cat_samples, pmf=lambda x: cat_p[x])
      else:
        self._CheckChiSquared(samples, pmf=lambda x: p[x])

  def testBernoulliShape(self):
    key = random.PRNGKey(0)
    x = random.bernoulli(key, np.array([0.2, 0.3]), shape=(3, 2))
    assert x.shape == (3, 2)

  @parameterized.named_parameters(jtu.cases_from_list(
      {"testcase_name": "_a={}_b={}_dtype={}".format(a, b, np.dtype(dtype).name),
       "a": a, "b": b, "dtype": dtype}
      for a in [0.2, 5.]
      for b in [0.2, 5.]
      for dtype in [np.float64]))  # NOTE: KS test fails with float32
  def testBeta(self, a, b, dtype):
    if not config.x64_enabled:
      raise SkipTest("skip test except on X64")
    key = random.PRNGKey(0)
    rand = lambda key, a, b: random.beta(key, a, b, (10000,), dtype)
    crand = api.jit(rand)

    uncompiled_samples = rand(key, a, b)
    compiled_samples = crand(key, a, b)

    for samples in [uncompiled_samples, compiled_samples]:
      self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.beta(a, b).cdf)

  @parameterized.named_parameters(jtu.cases_from_list(
      {"testcase_name": "_dtype={}".format(np.dtype(dtype).name), "dtype": dtype}
      for dtype in float_dtypes))
  def testCauchy(self, dtype):
    key = random.PRNGKey(0)
    rand = lambda key: random.cauchy(key, (10000,), dtype)
    crand = api.jit(rand)

    uncompiled_samples = rand(key)
    compiled_samples = crand(key)

    for samples in [uncompiled_samples, compiled_samples]:
      self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.cauchy().cdf)

  @parameterized.named_parameters(jtu.cases_from_list(
      {"testcase_name": "_alpha={}_dtype={}".format(alpha, np.dtype(dtype).name),
       "alpha": alpha, "dtype": dtype}
      for alpha in [
          np.array([0.2, 1., 5.]),
      ]
      for dtype in jtu.dtypes.floating))
  @jtu.skip_on_devices("tpu")  # TODO(mattjj): slow compilation times
  def testDirichlet(self, alpha, dtype):
    key = random.PRNGKey(0)
    rand = lambda key, alpha: random.dirichlet(key, alpha, (10000,), dtype)
    crand = api.jit(rand)

    uncompiled_samples = rand(key, alpha)
    compiled_samples = crand(key, alpha)

    for samples in [uncompiled_samples, compiled_samples]:
      self.assertAllClose(samples.sum(-1), np.ones(10000, dtype=dtype))
      alpha_sum = sum(alpha)
      for i, a in enumerate(alpha):
        self._CheckKolmogorovSmirnovCDF(samples[..., i], scipy.stats.beta(a, alpha_sum - a).cdf)

  @parameterized.named_parameters(jtu.cases_from_list(
      {"testcase_name": "_dtype={}".format(np.dtype(dtype).name), "dtype": dtype}
      for dtype in float_dtypes))
  def testExponential(self, dtype):
    key = random.PRNGKey(0)
    rand = lambda key: random.exponential(key, (10000,), dtype)
    crand = api.jit(rand)

    uncompiled_samples = rand(key)
    compiled_samples = crand(key)

    for samples in [uncompiled_samples, compiled_samples]:
      self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.expon().cdf)

  @parameterized.named_parameters(jtu.cases_from_list(
      {"testcase_name": "_a={}_dtype={}".format(a, np.dtype(dtype).name),
       "a": a, "dtype": dtype}
      for a in [0.1, 1., 10.]
      for dtype in jtu.dtypes.floating))
  def testGamma(self, a, dtype):
    key = random.PRNGKey(0)
    rand = lambda key, a: random.gamma(key, a, (10000,), dtype)
    crand = api.jit(rand)

    uncompiled_samples = rand(key, a)
    compiled_samples = crand(key, a)

    for samples in [uncompiled_samples, compiled_samples]:
      self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.gamma(a).cdf)

  def testGammaShape(self):
    key = random.PRNGKey(0)
    x = random.gamma(key, np.array([0.2, 0.3]), shape=(3, 2))
    assert x.shape == (3, 2)

  @parameterized.named_parameters(jtu.cases_from_list(
      {"testcase_name": "_a={}".format(alpha), "alpha": alpha}
      for alpha in [1e-4, 1e-3, 1e-2, 1e-1, 1e0, 1e1, 1e2, 1e3, 1e4]))
  def testGammaGrad(self, alpha):
    rng = random.PRNGKey(0)
    alphas = np.full((100,), alpha)
    z = random.gamma(rng, alphas)
    actual_grad = api.grad(lambda x: random.gamma(rng, x).sum())(alphas)

    eps = 0.01 * alpha / (1.0 + np.sqrt(alpha))
    cdf_dot = (scipy.stats.gamma.cdf(z, alpha + eps)
               - scipy.stats.gamma.cdf(z, alpha - eps)) / (2 * eps)
    pdf = scipy.stats.gamma.pdf(z, alpha)
    expected_grad = -cdf_dot / pdf

    self.assertAllClose(actual_grad, expected_grad, check_dtypes=True,
                        rtol=2e-2 if jtu.device_under_test() == "tpu" else 7e-4)

  def testGammaGradType(self):
    # Regression test for https://github.com/google/jax/issues/2130
    key = random.PRNGKey(0)
    a = jnp.array(1., dtype=jnp.float32)
    b = jnp.array(3., dtype=jnp.float32)
    f = lambda x, y: random.gamma(key=key, a=x, dtype=jnp.float32) / y
    # Should not crash with a type error.
    api.vjp(f, a, b)

  @parameterized.named_parameters(jtu.cases_from_list(
      {"testcase_name": "_lam={}_dtype={}".format(lam, np.dtype(dtype).name),
       "lam": lam, "dtype": np.dtype(dtype)}
      for lam in [0.5, 3, 9, 11, 50, 500]
      for dtype in [np.int16, np.int32, np.int64]))
  def testPoisson(self, lam, dtype):
    key = random.PRNGKey(0)
    rand = lambda key, lam: random.poisson(key, lam, (10000,), dtype)
    crand = api.jit(rand)

    uncompiled_samples = rand(key, lam)
    compiled_samples = crand(key, lam)

    for samples in [uncompiled_samples, compiled_samples]:
      self._CheckChiSquared(samples, scipy.stats.poisson(lam).pmf)
      # TODO(shoyer): determine error bounds for moments more rigorously (e.g.,
      # based on the central limit theorem).
      self.assertAllClose(samples.mean(), lam, rtol=0.01, check_dtypes=False)
      self.assertAllClose(samples.var(), lam, rtol=0.03, check_dtypes=False)

  def testPoissonBatched(self):
    key = random.PRNGKey(0)
    lam = jnp.concatenate([2 * jnp.ones(10000), 20 * jnp.ones(10000)])
    samples = random.poisson(key, lam, shape=(20000,))
    self._CheckChiSquared(samples[:10000], scipy.stats.poisson(2.0).pmf)
    self._CheckChiSquared(samples[10000:], scipy.stats.poisson(20.0).pmf)

  def testPoissonShape(self):
    key = random.PRNGKey(0)
    x = random.poisson(key, np.array([2.0, 20.0]), shape=(3, 2))
    assert x.shape == (3, 2)

  def testPoissonZeros(self):
    key = random.PRNGKey(0)
    lam = jnp.concatenate([jnp.zeros(10), 20 * jnp.ones(10)])
    samples = random.poisson(key, lam, shape=(2, 20))
    self.assertArraysEqual(samples[:, :10], jnp.zeros_like(samples[:, :10]))

  @parameterized.named_parameters(jtu.cases_from_list(
      {"testcase_name": "_dtype={}".format(np.dtype(dtype).name), "dtype": dtype}
      for dtype in jtu.dtypes.floating))
  def testGumbel(self, dtype):
    key = random.PRNGKey(0)
    rand = lambda key: random.gumbel(key, (10000,), dtype)
    crand = api.jit(rand)

    uncompiled_samples = rand(key)
    compiled_samples = crand(key)

    for samples in [uncompiled_samples, compiled_samples]:
      self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.gumbel_r().cdf)

  @parameterized.named_parameters(jtu.cases_from_list(
      {"testcase_name": "_dtype={}".format(np.dtype(dtype).name), "dtype": dtype}
      for dtype in float_dtypes))
  def testLaplace(self, dtype):
    key = random.PRNGKey(0)
    rand = lambda key: random.laplace(key, (10000,), dtype)
    crand = api.jit(rand)

    uncompiled_samples = rand(key)
    compiled_samples = crand(key)

    for samples in [uncompiled_samples, compiled_samples]:
      self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.laplace().cdf)

  @parameterized.named_parameters(jtu.cases_from_list(
      {"testcase_name": "_dtype={}".format(np.dtype(dtype).name), "dtype": dtype}
      for dtype in float_dtypes))
  def testLogistic(self, dtype):
    key = random.PRNGKey(0)
    rand = lambda key: random.logistic(key, (10000,), dtype)
    crand = api.jit(rand)

    uncompiled_samples = rand(key)
    compiled_samples = crand(key)

    for samples in [uncompiled_samples, compiled_samples]:
      self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.logistic().cdf)

  @parameterized.named_parameters(jtu.cases_from_list(
      {"testcase_name": "_b={}_dtype={}".format(b, np.dtype(dtype).name),
       "b": b, "dtype": dtype}
      for b in [0.1, 1., 10.]
      for dtype in jtu.dtypes.floating))
  def testPareto(self, b, dtype):
    key = random.PRNGKey(0)
    rand = lambda key, b: random.pareto(key, b, (10000,), dtype)
    crand = api.jit(rand)

    uncompiled_samples = rand(key, b)
    compiled_samples = crand(key, b)

    for samples in [uncompiled_samples, compiled_samples]:
      self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.pareto(b).cdf)

  def testParetoShape(self):
    key = random.PRNGKey(0)
    x = random.pareto(key, np.array([0.2, 0.3]), shape=(3, 2))
    assert x.shape == (3, 2)

  @parameterized.named_parameters(jtu.cases_from_list(
      {"testcase_name": "_df={}_dtype={}".format(df, np.dtype(dtype).name),
       "df": df, "dtype": dtype}
      for df in [0.1, 1., 10.]
      for dtype in jtu.dtypes.floating))
  @jtu.skip_on_devices("cpu", "tpu")  # TODO(phawkins): slow compilation times
  def testT(self, df, dtype):
    key = random.PRNGKey(0)
    rand = lambda key, df: random.t(key, df, (10000,), dtype)
    crand = api.jit(rand)

    uncompiled_samples = rand(key, df)
    compiled_samples = crand(key, df)

    for samples in [uncompiled_samples, compiled_samples]:
      self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.t(df).cdf)

  @parameterized.named_parameters(jtu.cases_from_list(
      {"testcase_name": "_dim={}_dtype={}_method={}".format(
          dim, np.dtype(dtype), method),
       "dim": dim, "dtype": dtype, "method": method}
      for dim in [1, 3, 5]
      for dtype in float_dtypes
      for method in ['svd', 'eigh', 'cholesky']))
  def testMultivariateNormal(self, dim, dtype, method):
    r = np.random.RandomState(dim)
    mean = r.randn(dim)
    cov_factor = r.randn(dim, dim)
    cov = np.dot(cov_factor, cov_factor.T) + dim * np.eye(dim)

    key = random.PRNGKey(0)
    rand = partial(random.multivariate_normal, mean=mean, cov=cov,
                   shape=(10000,), method=method)
    crand = api.jit(rand)

    uncompiled_samples = np.asarray(rand(key), np.float64)
    compiled_samples = np.asarray(crand(key), np.float64)

    inv_scale = scipy.linalg.lapack.dtrtri(np.linalg.cholesky(cov), lower=True)[0]
    for samples in [uncompiled_samples, compiled_samples]:
      centered = samples - mean
      whitened = np.einsum('nj,ij->ni', centered, inv_scale)

      # This is a quick-and-dirty multivariate normality check that tests that a
      # uniform mixture of the marginals along the covariance matrix's
      # eigenvectors follow a standard normal distribution.
      self._CheckKolmogorovSmirnovCDF(whitened.ravel(), scipy.stats.norm().cdf)

  @parameterized.named_parameters(jtu.cases_from_list(
      {"testcase_name": "_dim={}_mean_batch_size={}_cov_batch_size={}_shape={}"\
       .format(dim, mean_batch_size, cov_batch_size, shape),
       "dim": dim,
       "mean_batch_size": mean_batch_size,
       "cov_batch_size": cov_batch_size,
       "shape": shape}
      for dim in [1, 2, 4]
      for mean_batch_size in [(), (3,), (2, 3)]
      for cov_batch_size in [(), (3,), (2, 3)]
      for shape in [(), (1,), (5,)]))
  def testMultivariateNormalShapes(self, dim, mean_batch_size, cov_batch_size,
                                   shape):
    r = np.random.RandomState(0)
    key = random.PRNGKey(0)
    eff_batch_size = mean_batch_size \
      if len(mean_batch_size) > len(cov_batch_size) else cov_batch_size
    mean = r.randn(*(mean_batch_size + (dim,)))
    cov_factor = r.randn(*(cov_batch_size + (dim, dim)))
    cov = np.einsum('...ij,...kj->...ik', cov_factor, cov_factor)
    cov += 1e-3 * np.eye(dim)
    shape = shape + eff_batch_size
    samples = random.multivariate_normal(key, mean, cov, shape=shape)
    assert samples.shape == shape + (dim,)

  def testMultivariateNormalCovariance(self):
    # test code based on https://github.com/google/jax/issues/1869
    N = 100000
    cov = jnp.array([[ 0.19,  0.00, -0.13,  0.00],
                   [  0.00,  0.29,  0.00, -0.23],
                   [ -0.13,  0.00,  0.39,  0.00],
                   [  0.00, -0.23,  0.00,  0.49]])
    mean = jnp.zeros(4)

    out_np = np.random.RandomState(0).multivariate_normal(mean, cov, N)

    key = random.PRNGKey(0)
    out_jnp = random.multivariate_normal(key, mean=mean, cov=cov, shape=(N,))

    var_np = out_np.var(axis=0)
    var_jnp = out_jnp.var(axis=0)
    self.assertAllClose(var_np, var_jnp, rtol=1e-2, atol=1e-2,
                        check_dtypes=False)

    var_np = np.cov(out_np, rowvar=False)
    var_jnp = np.cov(out_jnp, rowvar=False)
    self.assertAllClose(var_np, var_jnp, rtol=1e-2, atol=1e-2,
                        check_dtypes=False)

  def testIssue222(self):
    x = random.randint(random.PRNGKey(10003), (), 0, 0)
    assert x == 0

  def testFoldIn(self):
    key = random.PRNGKey(0)
    keys = [random.fold_in(key, i) for i in range(10)]
    assert np.unique(np.ravel(keys)).shape == (20,)

  def testStaticShapeErrors(self):
    if config.read("jax_disable_jit"):
      raise SkipTest("test only relevant when jit enabled")

    @api.jit
    def feature_map(n, d, sigma=1.0, seed=123):
      key = random.PRNGKey(seed)
      W = random.normal(key, (d, n)) / sigma
      w = random.normal(key, (d, )) / sigma
      b = 2 * jnp.pi * random.uniform(key, (d, ))

      phi = lambda x, t: jnp.sqrt(2.0 / d) * jnp.cos(jnp.matmul(W, x) + w*t + b)
      return phi

    self.assertRaisesRegex(TypeError, 'Shapes must be 1D.*',
                           lambda: feature_map(5, 3))

  def testIssue756(self):
    key = random.PRNGKey(0)
    w = random.normal(key, ())
    if config.x64_enabled:
      self.assertEqual(np.result_type(w), np.float64)
    else:
      self.assertEqual(np.result_type(w), np.float32)

  def testIssue1789(self):
    def f(x):
      return random.gamma(random.PRNGKey(0), x)

    grad(lambda x: jnp.sum(vmap(f)(x)))(jnp.ones(2))

  def testNoOpByOpUnderHash(self):
    if not config.omnistaging_enabled:
      raise SkipTest("test requires omnistaging")
    def fail(*args, **kwargs): assert False
    apply_primitive, xla.apply_primitive = xla.apply_primitive, fail
    try:
      _ = random.threefry_2x32(np.zeros(2, np.uint32), np.arange(10, dtype=np.uint32))
    finally:
      xla.apply_primitive = apply_primitive

  def testPRNGValues(self):
    # Test to ensure consistent random values between JAX versions
    k = random.PRNGKey(0)

    if config.x64_enabled:
        self.assertAllClose(
            random.randint(k, (3, 3), 0, 8),
            np.array([[7, 2, 6],
                       [2, 1, 0],
                       [6, 7, 7]], dtype='int64'))
    else:
        self.assertAllClose(
            random.randint(k, (3, 3), 0, 8),
            np.array([[2, 1, 3],
                       [6, 1, 5],
                       [6, 3, 4]], dtype='int32'))

    self.assertAllClose(
        random.split(k, 4),
        np.array([[2285895361, 1501764800],
                   [1518642379, 4090693311],
                   [ 433833334, 4221794875],
                   [ 839183663, 3740430601]], dtype='uint32'))

    self.assertAllClose(
        random.fold_in(k, 4),
        np.array([2285895361,  433833334], dtype='uint32'))

  def testDtypeErrorMessage(self):
    with self.assertRaisesRegex(ValueError, r"dtype argument to.*"):
      random.normal(random.PRNGKey(0), (), dtype=jnp.int32)

  def testRandomBroadcast(self):
    """Issue 4033"""
    # test for broadcast issue in https://github.com/google/jax/issues/4033
    key = random.PRNGKey(0)
    shape = (10, 2)
    x = random.uniform(key, shape, minval=jnp.zeros(2), maxval=jnp.ones(2))
    assert x.shape == shape
    x = random.randint(key, shape, jnp.array([0, 1]), jnp.array([1, 2]))
    assert x.shape == shape

  def testMaxwellSample(self):
    num_samples = 10**5
    rng = random.PRNGKey(0)

    rand = lambda x: random.maxwell(x, (num_samples, ))
    crand = api.jit(rand)

    loc = scipy.stats.maxwell.mean()
    std = scipy.stats.maxwell.std()

    uncompiled_samples = rand(rng)
    compiled_samples = crand(rng)

    for samples in [uncompiled_samples, compiled_samples]:
      # Check first and second moments.
      self.assertEqual((num_samples,), samples.shape)
      self.assertAllClose(np.mean(samples), loc, atol=0., rtol=0.1)
      self.assertAllClose(np.std(samples), std, atol=0., rtol=0.1)
      self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.maxwell().cdf)

  @parameterized.named_parameters(
      ('test1', 4.0, 1.0),
      ('test2', 2.0, 3.0))
  def testWeibullSample(self, concentration, scale):
    num_samples = 10**5
    rng = random.PRNGKey(0)

    rand = lambda x: random.weibull_min(x, scale, concentration, (num_samples,))
    crand = api.jit(rand)

    loc = scipy.stats.weibull_min.mean(c=concentration, scale=scale)
    std = scipy.stats.weibull_min.std(c=concentration, scale=scale)

    uncompiled_samples = rand(rng)
    compiled_samples = crand(rng)

    for samples in [uncompiled_samples, compiled_samples]:
      # Check first and second moments.
      self.assertEqual((num_samples,), samples.shape)
      self.assertAllClose(np.mean(samples), loc, atol=0., rtol=0.1)
      self.assertAllClose(np.std(samples), std, atol=0., rtol=0.1)
      self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.weibull_min(
          c=concentration, scale=scale).cdf)

  @parameterized.named_parameters(
      ('test1', 4.0, 1.0),
      ('test2', 2.0, 3.0))
  def testDoublesidedMaxwellSample(self, loc, scale):
    num_samples = 10**5
    rng = random.PRNGKey(0)

    rand = lambda key: random.double_sided_maxwell(
        rng, loc, scale, (num_samples,))
    crand = api.jit(rand)

    mean = loc
    std = np.sqrt(3.) * scale

    uncompiled_samples = rand(rng)
    compiled_samples = crand(rng)

    # Compute the double sided maxwell CDF through the one sided maxwell cdf.
    # This is done as follows:
    # P(DSM <= x) = P (loc + scale * radamacher_sample * one_sided_sample <=x) =
    # P (radamacher_sample * one_sided_sample <= (x - loc) / scale) =
    # 1/2 P(one_sided_sample <= (x - loc) / scale)
    #    + 1/2 P( - one_sided_sample <= (x - loc) / scale) =
    #  1/2 P(one_sided_sample <= (x - loc) / scale)
    #    + 1/2 P(one_sided_sample >= - (x - loc) / scale) =
    # 1/2 CDF_one_maxwell((x - loc) / scale))
    #   + 1/2 (1 - CDF_one_maxwell(- (x - loc) / scale)))
    def double_sided_maxwell_cdf(x, loc, scale):
      pos = scipy.stats.maxwell().cdf((x - loc)/ scale)
      neg = (1 - scipy.stats.maxwell().cdf((-x + loc)/ scale))
      return (pos + neg) / 2

    for samples in [uncompiled_samples, compiled_samples]:
      # Check first and second moments.
      self.assertEqual((num_samples,), samples.shape)
      self.assertAllClose(np.mean(samples), mean, atol=0., rtol=0.1)
      self.assertAllClose(np.std(samples), std, atol=0., rtol=0.1)

      self._CheckKolmogorovSmirnovCDF(
          samples, lambda x: double_sided_maxwell_cdf(x, loc, scale))

  def testRadamacher(self):
    rng = random.PRNGKey(0)
    num_samples = 10**5

    rand = lambda x: random.rademacher(x, (num_samples,))
    crand = api.jit(rand)

    uncompiled_samples = rand(rng)
    compiled_samples = crand(rng)

    for samples in [uncompiled_samples, compiled_samples]:
      unique_values, counts = np.unique(samples, return_counts=True)
      assert len(unique_values) == 2
      assert len(counts) == 2

      self.assertAllClose(
          counts[0]/ num_samples, 0.5, rtol=1e-02, atol=1e-02)
      self.assertAllClose(
          counts[1]/ num_samples, 0.5, rtol=1e-02, atol=1e-02)

  def testChoiceShapeIsNotSequenceError(self):
    key = random.PRNGKey(0)
    with self.assertRaises(TypeError):
      random.choice(key, 5, 2, replace=False)
    with self.assertRaises(TypeError):
      random.choice(key, 5, 2, replace=True)

  def test_eval_shape_big_random_array(self):
    if not config.omnistaging_enabled:
      raise SkipTest("after deleting lazy constants, requires omnistaging")
    def f(x):
      return random.normal(random.PRNGKey(x), (int(1e12),))
    with core.skipping_checks():  # check_jaxpr will materialize array
      api.eval_shape(f, 0)  # doesn't error

  @parameterized.named_parameters(jtu.cases_from_list(
    {"testcase_name": "seed={seed}_type={type}_jit={jit}".format(**dct), **dct} for dct in [
      {"seed": 0, "type": int, "jit": True, "key": [0, 0]},
      {"seed": 0, "type": int, "jit": False, "key": [0, 0]},
      {"seed": 1, "type": np.int32, "jit": True, "key": [0, 1]},
      {"seed": 1, "type": np.int32, "jit": False, "key": [0, 1]},
      {"seed": 2, "type": np.uint32, "jit": True, "key": [0, 2]},
      {"seed": 2, "type": np.uint32, "jit": False, "key": [0, 2]},
      {"seed": 3, "type": np.int64, "jit": True, "key": [0, 3]},
      {"seed": 3, "type": np.int64, "jit": False, "key": [0, 3]},
      {"seed": -1, "type": int, "jit": True, "key": [4294967295, 4294967295] if config.x64_enabled else [0, 4294967295]},
      {"seed": -1, "type": int, "jit": False, "key": [4294967295, 4294967295] if config.x64_enabled else [0, 4294967295]},
      {"seed": -2, "type": np.int32, "jit": True, "key": [0, 4294967294]},
      {"seed": -2, "type": np.int32, "jit": False, "key": [0, 4294967294]},
      {"seed": -3, "type": np.int64, "jit": True, "key": [4294967295, 4294967293] if config.x64_enabled else [0, 4294967293]},
      {"seed": -3, "type": np.int64, "jit": False, "key": [4294967295, 4294967293] if config.x64_enabled else [0, 4294967293]},
      {"seed": np.iinfo(np.int32).max + 100, "type": int, "jit": True, "key": [0, 2147483747]},
      {"seed": np.iinfo(np.int32).max + 100, "type": int, "jit": False, "key": [0, 2147483747]},
      {"seed": np.iinfo(np.int32).max + 101, "type": np.uint32, "jit": True, "key": [0, 2147483748]},
      {"seed": np.iinfo(np.int32).max + 101, "type": np.uint32, "jit": False, "key": [0, 2147483748]},
      {"seed": np.iinfo(np.int32).min - 100, "type": int, "jit": True, "key": [4294967295, 2147483548] if config.x64_enabled else [0, 2147483548]},
      {"seed": np.iinfo(np.int32).min - 100, "type": int, "jit": False, "key": [4294967295, 2147483548] if config.x64_enabled else [0, 2147483548]},
      {"seed": np.iinfo(np.int32).min - 101, "type": np.int64, "jit": True, "key": [4294967295, 2147483547] if config.x64_enabled else [0, 2147483547]},
      {"seed": np.iinfo(np.int32).min - 101, "type": np.int64, "jit": False, "key": [4294967295, 2147483547] if config.x64_enabled else [0, 2147483547]},
    ]
  ))
  def test_prng_seeds_and_keys(self, seed, type, jit, key):
    seed = type(seed)
    if jit:
      actual = api.jit(random.PRNGKey)(seed)
    else:
      actual = random.PRNGKey(seed)
    expected = jnp.array(key, dtype=jnp.uint32)
    self.assertArraysEqual(actual, expected)

  @parameterized.named_parameters(jtu.cases_from_list(
      {"testcase_name": f"_seed={seed}_type={type}", "seed": seed, "type": type}
      for type in ["int", "np.array", "jnp.array"]
      for seed in [-1, 0, 1, (1 << 32) - 1, (1 << 63) - 1, np.uint64((1 << 64) - 1)]))
  def test_prng_jit_invariance(self, seed, type):
    if type == "int" and seed == (1 << 64) - 1:
      self.skipTest("Expected failure: Python int too large.")
    type = {"int": int, "np.array": np.array, "jnp.array": jnp.array}[type]
    args_maker = lambda: [type(seed)]
    self._CompileAndCheck(random.PRNGKey, args_maker)

  def test_prng_errors(self):
    seed = np.iinfo(np.uint64).max
    with self.assertRaises(OverflowError):
      random.PRNGKey(seed)
    with self.assertRaises(OverflowError):
      api.jit(random.PRNGKey)(seed)

  def test_random_split_doesnt_device_put_during_tracing(self):
    if not config.omnistaging_enabled:
      raise SkipTest("test requires omnistaging")
    key = random.PRNGKey(1).block_until_ready()
    with jtu.count_device_put() as count:
      api.jit(random.split)(key)
    self.assertEqual(count[0], 1)  # 1 for the argument device_put
Example #59
0
def unset_bit(bit64, index):
    """Set bit index on 64 bit unsigned integer to zero."""
    bit64 &= ~(np.uint64(1) << np.uint64(index))
    return bit64
def extract_quad(quad_dat, timestamp, verbose=False):
    '''Reads a stream of I32s, finds the first timestamp,
       then starts de-interleaving the demodulated data
       from the FPGA'''
    
    if timestamp == 0.0:
        # if no timestamp given, use current time
        # and set the timing threshold for 1 month.
        # This threshold is used to identify the timestamp 
        # in the stream of I32s
        timestamp = time.time()
        diff_thresh = 31.0 * 24.0 * 3600.0
    else:
        timestamp = timestamp * (10.0**(-9))
        diff_thresh = 60.0

    writing_data = False
    quad_ind = 0

    quad_time = []
    amp = [[], [], [], [], []]
    phase = [[], [], [], [], []]
    for ind, dat in enumerate(quad_dat):

        # Data in the 'quad' FIFO comes through as:
        # time_MSB -> time_LSB ->
        # amp0     -> amp1     -> amp2   -> amp3   -> amp4   ->
        # phase0   -> phase1   -> phase2 -> phase3 -> phase4 ->
        # and then repeats. Amplitude and phase variables are 
        # arbitrarily scaled so thinking of them as 32-bit integers
        # is okay. We just care about the bits anyway. The amplitude
        # is unsigned, so we get an extra bit of precision there
        if writing_data:
            if quad_ind == 0 and ind != (len(quad_dat) - 1):
                high = np.uint32(quad_dat[ind])
                low = np.uint32(quad_dat[ind+1])
                dattime = (high.astype(np.uint64) << np.uint64(32)) \
                           + low.astype(np.uint64)
                quad_time.append(dattime)
            elif quad_ind == 2:
                amp[0].append(dat.astype(np.uint32))
            elif quad_ind == 3:
                amp[1].append(dat.astype(np.uint32))
            elif quad_ind == 4:
                amp[2].append(dat.astype(np.uint32))
            elif quad_ind == 5:
                amp[3].append(dat.astype(np.uint32))
            elif quad_ind == 6:
                amp[4].append(dat.astype(np.uint32))
            elif quad_ind == 7:
                phase[0].append(dat)
            elif quad_ind == 8:
                phase[1].append(dat)
            elif quad_ind == 9:
                phase[2].append(dat)
            elif quad_ind == 10:
                phase[3].append(dat)
            elif quad_ind == 11:
                phase[4].append(dat)
            
            quad_ind += 1
            quad_ind = quad_ind % 12

                # Check for the timestamp
        if not writing_data and quad_ind == 0:
            # Assemble time stamp from successive I32s, since
            # it's a 64 bit object
            high = np.int32(quad_dat[ind])
            low = np.int32(quad_dat[ind+1])
            dattime = (high.astype(np.uint64) << np.uint64(32)) \
                        + low.astype(np.uint64)

            # Time stamp from FPGA is a U64 with the UNIX epoch 
            # time in nanoseconds, synced to the host's clock
            if (np.abs(timestamp - float(dattime) * 10**(-9)) < diff_thresh):
                if verbose:
                    print "found timestamp  : ", float(dattime) * 10**(-9)
                    print "comparison time  : ", timestamp 
                quad_time.append(dattime)
                quad_ind += 1
                writing_data = True

    # Since the FIFO read request is asynchronous, sometimes
    # the timestamp isn't first to come out, but the total amount of data
    # read out is a multiple of 12 (2 time + 5 amp + 5 phase) so an
    # amplitude or phase channel ends up with less samples.
    # The following is coded very generally

    min_len = 10.0**9  # Assumes we never more than 1 billion samples
    for ind in [0,1,2,3,4]:
        if len(amp[ind]) < min_len:
            min_len = len(amp[ind])
        if len(phase[ind]) < min_len:
            min_len = len(phase[ind])

    # Re-size everything by the minimum length and convert to numpy array
    quad_time = np.array(quad_time[:min_len])
    for ind in [0,1,2,3,4]:
        amp[ind]   = amp[ind][:min_len]
        phase[ind] = phase[ind][:min_len]
    amp = np.array(amp)
    phase = np.array(phase)
      

    return quad_time, amp, phase