Esempio n. 1
0
def elucidate_cc_split(parent_id, split_id):
	parent_id_bytes = numpy.array(tuple(parent_id)).view(dtype = numpy.uint8)
	split_id_bytes = numpy.array(tuple(split_id)).view(dtype = numpy.uint8)

	parent_id_bits = numpy.unpackbits(parent_id_bytes)
	split_id_bits = numpy.unpackbits(split_id_bytes)

	n_parent_bits = len(parent_id_bits)
	n_split_bits = len(split_id_bits)

	child1_bits = numpy.zeros(n_parent_bits, dtype = numpy.uint8)
	child2_bits = numpy.zeros(n_parent_bits, dtype = numpy.uint8)

	j = 0
	for i in range(n_parent_bits):
		if parent_id_bits[i] == 1:
			if j < n_split_bits:
				if split_id_bits[j] == 1:
					child1_bits[i] = 1
				else:
					child2_bits[i] = 1
			else:
				child2_bits[i] = 1

			j += 1

	child1_bytes = numpy.packbits(child1_bits)
	child2_bytes = numpy.packbits(child2_bits)

	child1_id = child1_bytes.tostring().rstrip("\x00") # urgh C (null terminated strings)
	child2_id = child2_bytes.tostring().rstrip("\x00") # vs Python (not null terminated) strings

	return child1_id, child2_id
Esempio n. 2
0
File: util.py Progetto: Brtle/obspy
def _read_bcd(fi, length, left_part):
    """
    Interprets a byte string as binary coded decimals.

    See: https://en.wikipedia.org/wiki/Binary-coded_decimal#Basics

    :param fi: A buffer containing the bytes to read.
    :param length: number of bytes to read.
    :type length: int or float
    :param left_part: If True, start the reading from the first half part
        of the first byte. If False, start the reading from
        the second half part of the first byte.
    :type left_part: bool
    """
    tens = np.power(10, range(12))[::-1]
    nbr_half_bytes = round(2*length)
    if isinstance(length, float):
        length = int(length) + 1
    byte_values = fi.read(length)
    ints = np.frombuffer(byte_values, dtype='<u1', count=length)
    if left_part is True:
        unpack_bits = np.unpackbits(ints).reshape(-1, 4)[0:nbr_half_bytes]
    else:
        unpack_bits = np.unpackbits(ints).reshape(-1, 4)[1:nbr_half_bytes+1]
    bits = np.dot(unpack_bits, np.array([1, 2, 4, 8])[::-1].reshape(4, 1))
    if np.any(bits > 9):
        raise ValueError('invalid bcd values encountered')
    return np.dot(tens[-len(bits):], bits)[0]
Esempio n. 3
0
 def fix(index):
     ep = self.index[index]
     ev = v
     if self._metric == "hamming":
         ep = numpy.unpackbits(ep)
         ev = numpy.unpackbits(ev)
     return (index, pd[self._metric]['distance'](ep, ev))
Esempio n. 4
0
def build_syndrom_table(H):
    """
    >>> H = np.array([[0, 1, 1, 0, 1],\
                      [1, 1, 1, 1, 0],\
                      [1, 0, 0, 1, 0]], np.uint8)
    >>> for a, b in build_syndrom_table(H):\
            print("{}: {}".format(a, b))
    [0 1 1]: [0 0 0 1 0]
    [1 1 0]: [0 0 1 0 0]
    [1 0 1]: [0 0 1 1 0]
    [1 0 0]: [0 0 0 0 1]
    [1 1 1]: [0 0 0 1 1]
    [0 1 0]: [0 0 1 0 1]
    [0 0 1]: [0 0 1 1 1]
    """
    r, n = H.shape
    table = []
    already = set()
    for y in range(0, 2 ** n):
        y = np.unpackbits(np.array([y], np.int64).view(np.uint8).reshape(-1, 1), axis=-1)[:, ::-1].ravel()[:n]
        s = np.dot(y, H.T) % 2
        if np.all(s == 0) or str(s) in already:
            continue
        already.add(str(s))
        best_e = None
        for e in range(0, 2 ** n):
            e = np.unpackbits(np.array([e], np.int64).view(np.uint8).reshape(-1, 1), axis=-1)[:, ::-1].ravel()[:n]
            if not np.all(s == np.dot(e, H.T) % 2):
                continue
            if best_e is None or np.sum(e) <= np.sum(best_e):
                best_e = e
        table.append((s, best_e))
    return table
Esempio n. 5
0
 def decodeStripe(self, off, length, nStripe):
     stripeOff = off
     end = off + length
     x = 8 * nStripe
     y = 0
     while (stripeOff < end):
         count = self.res.data[stripeOff]
         stripeOff += 1
         if (count & 0x80):
             count &= 0x7F
             bits = unpackbits(uint8(self.res.data[stripeOff]))
             stripeOff += 1
             for j in range(0, count):
                 for k in range(0, 8):
                     if bits[k] == 1:
                         self.emptyMask = False
                         self.img.putpixel((x + k, y), 1)
                 y += 1
         else:
             for j in range(0, count):
                 bits = unpackbits(uint8(self.res.data[stripeOff]))
                 stripeOff += 1
                 for k in range(0, 8):
                     if bits[k] == 1:
                         self.emptyMask = False
                         self.img.putpixel((x + k, y), 1)
                 y += 1
Esempio n. 6
0
 def write_tune_mask(self, mask):
         # 1  -> Sign = 1, TDac = 15 1111(lowest)
         # ...
         # 15 -> Sign = 1, TDac = 0  0000
         # 16 -> Sign = 0, TDac = 0  0001
         # ...
         # 31 -> Sign = 0, TDac = 15 1111
         
         mask_out = np.copy(mask)
         mask_bits = np.unpackbits(mask_out)
         mask_bits_array = np.reshape(mask_bits, (64,64,8))
         mask_out[mask_bits_array[:,:,3] == 0] = 16 - mask_out[mask_bits_array[:,:,3] == 0]#15
         #investigate here how to set 0 to 0
         mask_bits = np.unpackbits(mask_out)
         mask_bits_array = np.reshape(mask_bits, (64,64,8)).astype(np.bool)
         mask_bits_array[:,:,3] = ~mask_bits_array[:,:,3]
          
         for bit in range(4):
             mask_bits_sel = mask_bits_array[:,:,7-bit]
             self.write_pixel(mask_bits_sel)
             self['global_conf']['TDacLd'][bit] = 1
             self.write_global()
             self['global_conf']['TDacLd'][bit] = 0
         
         mask_bits_sel = mask_bits_array[:,:,3]
         self.write_pixel(mask_bits_sel)
         self['global_conf']['SignLd'] = 1
         self.write_global()
         self['global_conf']['SignLd'] = 0
Esempio n. 7
0
def get_batch(batch_size, binary=False):
    """Gets a batch of data.

    Args:
        batch_size (int): how much data to generate.
        binary (Optional[bool]): whether the data should be scalars in (0,1]
            or binary vectors (8 bit, with 16 bit results).

    Returns:
        batch: (a, b, target) where target = ab (elementwise).
    """
    if not binary:
        # eas
        input_a = np.random.random((batch_size, 1))
        input_b = np.random.random((batch_size, 1))
        target = input_a * input_b
    else:
        input_a = np.random.randint(256, size=(batch_size, 1))
        input_b = np.random.randint(256, size=(batch_size, 1))
        target = input_a * input_b
        input_a = np.unpackbits(input_a.astype(np.uint8))
        input_b = np.unpackbits(input_b.astype(np.uint8))
        # now do target
        target_lsb = target & 0xff
        target_lsb = np.unpackbits(target_lsb.astype(np.uint8))
        target_msb = target >> 8
        target_msb = np.unpackbits(target_msb.astype(np.uint8))
        target = np.hstack((target_msb.reshape(batch_size, 8),
                            target_lsb.reshape(batch_size, 8)))
    return input_a, input_b, target
Esempio n. 8
0
 def set_color(self, led, intensity, color):
     addr_b = np.unpackbits(np.array([led], dtype=np.uint8))[2:]
     intensity_b = np.unpackbits(np.array([intensity], dtype=np.uint8))
     color_b = itertools.chain(*[np.unpackbits(np.array([c>>4], dtype=np.uint8))[4:] for c in reversed(color)])
     self.write_begin()
     for b in itertools.chain(addr_b, intensity_b, color_b):
         self.write_bit(b)
     self.write_end()
 def __init__(self, filename="mario_ROM.zip", offset=2049):
     self.offset = offset
     if zipfile.is_zipfile(filename):
         zp = zipfile.ZipFile(filename)
         data = np.unpackbits(np.frombuffer(zp.read(zp.filelist[0]), dtype=np.uint8))
     else:
         data = np.unpackbits(np.fromfile(filename, dtype=np.uint8))
     self.data = data.reshape((-1, 8, 8))
Esempio n. 10
0
def test_unpackbits_large():
    # test all possible numbers via comparison to already tested packbits
    d = np.arange(277, dtype=np.uint8)
    assert_array_equal(np.packbits(np.unpackbits(d)), d)
    assert_array_equal(np.packbits(np.unpackbits(d[::2])), d[::2])
    d = np.tile(d, (3, 1))
    assert_array_equal(np.packbits(np.unpackbits(d, axis=1), axis=1), d)
    d = d.T.copy()
    assert_array_equal(np.packbits(np.unpackbits(d, axis=0), axis=0), d)
Esempio n. 11
0
def main(args):
    x1 = np.load(args.infile1)
    x2 = np.load(args.infile2)
    assert len(x1.shape) == 2, 'infile1 should be 2d array!'
    assert len(x2.shape) == 2, 'infile2 should be 2d array!'
    assert x1.shape[0] == x2.shape[0], 'two infile should have same rows!'
    x1 = np.unpackbits(x1, axis=1)
    x2 = np.unpackbits(x2, axis=1)
    r1 = x1.shape[1] if args.row1 == 0 else args.row1
    r2 = x2.shape[1] if args.row2 == 0 else args.row2
    N = x1.shape[0]
    print(r1, r2, N)
    x1 = np.packbits(x1[:, :r1].T, axis=1)
    x2 = np.packbits(x2[:, :r2].T, axis=1)

    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()
        x1 = cuda.to_gpu(x1)
        x2 = cuda.to_gpu(x2)
        xp = cupy
    else:
        xp = np
    # popcount LUT
    pc = xp.zeros(256, dtype=np.uint8)
    for i in range(256):
        pc[i] = ( i & 1 ) + pc[i/2]

    hamm = xp.zeros((r1, r2), dtype=np.int32)
    for i in tqdm(range(r1)):
        x1i = xp.tile(x1[i], (r2, 1))
        if args.operation == 'xor':
            hamm[i] = xp.take(pc, xp.bitwise_xor(x1i, x2).astype(np.int32)).sum(axis=1)
        elif args.operation == 'nand':
            hamm[i] = xp.take(pc, xp.invert(xp.bitwise_and(x1i, x2)).astype(np.int32)).sum(axis=1)
        #for j in range(r2):
            #hamm[i, j] = xp.take(pc, xp.bitwise_xor(x1[i], x2[j])).sum()
    x1non0 = xp.tile((x1.sum(axis=1)>0), (r2, 1)).T.astype(np.int32)
    x2non0 = xp.tile((x2.sum(axis=1)>0), (r1, 1)).astype(np.int32)
    print(x1non0.shape, x2non0.shape)
    non0filter = x1non0 * x2non0
    print(non0filter.max(), non0filter.min())
    hamm = non0filter * hamm + np.iinfo(np.int32).max * (1 - non0filter)
    #non0filter *= np.iinfo(np.int32).max
    #hamm *= non0filter
    if xp == cupy:
        hamm = hamm.get()
    #xp.savetxt(args.out, hamm, delimiter=args.delim)
    np.save(args.out, hamm)

    if args.nearest > 0:
        hamm_s = np.sort(hamm.flatten())
        hamm_as = np.argsort(hamm.flatten())
        x, y = np.unravel_index(hamm_as[:args.nearest], hamm.shape)
        fname, ext = os.path.splitext(args.out)
        np.savetxt(fname + '_top{0}.tsv'.format(args.nearest),
            np.concatenate((x[np.newaxis], y[np.newaxis], hamm_s[np.newaxis,:args.nearest]), axis=0).T,
            fmt='%d', delimiter='\t')
    def get_keys(self, key_array=None, offset=0, n_keys=None):
        """ Get the ordered list of keys that the combination allows

        :param key_array: \
            Optional array into which the returned keys will be placed
        :type key_array: array-like of int
        :param offset: \
            Optional offset into the array at which to start placing keys
        :type offset: int
        :param n_keys: \
            Optional limit on the number of keys returned. If less than this\
            number of keys are available, only the keys available will be added
        :type n_keys: int
        :return: A tuple of an array of keys and the number of keys added to\
            the array
        :rtype: tuple(array-like of int, int)
        """
        # Get the position of the zeros in the mask - assume 32-bits
        unwrapped_mask = numpy.unpackbits(
            numpy.asarray([self._mask], dtype=">u4").view(dtype="uint8"))
        zeros = numpy.where(unwrapped_mask == 0)[0]

        # If there are no zeros, there is only one key in the range, so
        # return that
        if len(zeros) == 0:
            if key_array is None:
                key_array = numpy.zeros(1, dtype=">u4")
            key_array[offset] = self._base_key
            return key_array, 1

        # We now know how many values there are - 2^len(zeros)
        max_n_keys = 2 ** len(zeros)
        if key_array is not None and len(key_array) < max_n_keys:
            max_n_keys = len(key_array)
        if n_keys is None or n_keys > max_n_keys:
            n_keys = max_n_keys
        if key_array is None:
            key_array = numpy.zeros(n_keys, dtype=">u4")

        # Create a list of 2^len(zeros) keys
        unwrapped_key = numpy.unpackbits(
            numpy.asarray([self._base_key], dtype=">u4").view(dtype="uint8"))

        # for each key, create its key with the idea of a neuron ID being
        # continuous and live at an offset position from the bottom of
        # the key
        for value in range(n_keys):
            key = numpy.copy(unwrapped_key)
            unwrapped_value = numpy.unpackbits(
                numpy.asarray([value], dtype=">u4")
                     .view(dtype="uint8"))[-len(zeros):]
            key[zeros] = unwrapped_value
            key_array[value + offset] = \
                numpy.packbits(key).view(dtype=">u4")[0].item()
        return key_array, n_keys
Esempio n. 13
0
def test_unpackbits_count():
    # test complete invertibility of packbits and unpackbits with count
    x = np.array([
        [1, 0, 1, 0, 0, 1, 0],
        [0, 1, 1, 1, 0, 0, 0],
        [0, 0, 1, 0, 0, 1, 1],
        [1, 1, 0, 0, 0, 1, 1],
        [1, 0, 1, 0, 1, 0, 1],
        [0, 0, 1, 1, 1, 0, 0],
        [0, 1, 0, 1, 0, 1, 0],
    ], dtype=np.uint8)

    padded1 = np.zeros(57, dtype=np.uint8)
    padded1[:49] = x.ravel()

    packed = np.packbits(x)
    for count in range(58):
        unpacked = np.unpackbits(packed, count=count)
        assert_equal(unpacked.dtype, np.uint8)
        assert_array_equal(unpacked, padded1[:count])
    for count in range(-1, -57, -1):
        unpacked = np.unpackbits(packed, count=count)
        assert_equal(unpacked.dtype, np.uint8)
        # count -1 because padded1 has 57 instead of 56 elements
        assert_array_equal(unpacked, padded1[:count-1])
    for kwargs in [{}, {'count': None}]:
        unpacked = np.unpackbits(packed, **kwargs)
        assert_equal(unpacked.dtype, np.uint8)
        assert_array_equal(unpacked, padded1[:-1])
    assert_raises(ValueError, np.unpackbits, packed, count=-57)

    padded2 = np.zeros((9, 9), dtype=np.uint8)
    padded2[:7, :7] = x

    packed0 = np.packbits(x, axis=0)
    packed1 = np.packbits(x, axis=1)
    for count in range(10):
        unpacked0 = np.unpackbits(packed0, axis=0, count=count)
        assert_equal(unpacked0.dtype, np.uint8)
        assert_array_equal(unpacked0, padded2[:count, :x.shape[1]])
        unpacked1 = np.unpackbits(packed1, axis=1, count=count)
        assert_equal(unpacked1.dtype, np.uint8)
        assert_array_equal(unpacked1, padded2[:x.shape[1], :count])
    for count in range(-1, -9, -1):
        unpacked0 = np.unpackbits(packed0, axis=0, count=count)
        assert_equal(unpacked0.dtype, np.uint8)
        # count -1 because one extra zero of padding
        assert_array_equal(unpacked0, padded2[:count-1, :x.shape[1]])
        unpacked1 = np.unpackbits(packed1, axis=1, count=count)
        assert_equal(unpacked1.dtype, np.uint8)
        assert_array_equal(unpacked1, padded2[:x.shape[0], :count-1])
    for kwargs in [{}, {'count': None}]:
        unpacked0 = np.unpackbits(packed0, axis=0, **kwargs)
        assert_equal(unpacked0.dtype, np.uint8)
        assert_array_equal(unpacked0, padded2[:-1, :x.shape[1]])
        unpacked1 = np.unpackbits(packed1, axis=1, **kwargs)
        assert_equal(unpacked1.dtype, np.uint8)
        assert_array_equal(unpacked1, padded2[:x.shape[0], :-1])
    assert_raises(ValueError, np.unpackbits, packed0, axis=0, count=-9)
    assert_raises(ValueError, np.unpackbits, packed1, axis=1, count=-9)
Esempio n. 14
0
def test_pack_unpack_order():
    a = np.array([[2], [7], [23]], dtype=np.uint8)
    b = np.unpackbits(a, axis=1)
    assert_equal(b.dtype, np.uint8)
    b_little = np.unpackbits(a, axis=1, bitorder='little')
    b_big = np.unpackbits(a, axis=1, bitorder='big')
    assert_array_equal(b, b_big)
    assert_array_equal(a, np.packbits(b_little, axis=1, bitorder='little'))
    assert_array_equal(b[:,::-1], b_little)
    assert_array_equal(a, np.packbits(b_big, axis=1, bitorder='big'))
    assert_raises(ValueError, np.unpackbits, a, bitorder='r')
    assert_raises(TypeError, np.unpackbits, a, bitorder=10)
Esempio n. 15
0
def makePGA2311AttenSig(attenLvlRight,attenLvlLeft):
    numpts = 16*2 + 2   # one clock cycle is 2 steps and there are 16 data bits. Plus, an extra bit on either side to raise the CS line
    
    # CS load signal 
    loadSig = np.zeros(numpts, dtype=np.uint8)
    loadSig[0] = 1 
    loadSig[-1] = 1  

    # SCLK clock signal (low on first half, high on second half)    
    clkSig = np.zeros(numpts, dtype=np.uint8)
    bitTracker=np.zeros(numpts, dtype=np.uint8)     
    for n in range(0, 16):
        clkSig[n*2+2] = 1
        bitTracker[n*2+2] = n % 8
        
    # SDI Data signal
    dataSig =  np.zeros(numpts, dtype=np.uint8) 
    # first byte: right side
    Nright=255-(31.5+attenLvlRight)*2
    Nright=np.uint8(np.clip(Nright,0,255)) 
    dataR=np.unpackbits(Nright)
    for n in range(0, 8):
        dataSig[n*2+1]=dataR[n]
        dataSig[n*2+2]=dataR[n]
    
    # second byte: left side 
    Nleft=255-(31.5+attenLvlLeft)*2
    Nleft=np.uint8(np.clip(Nleft,0,255)) 
    dataL=np.unpackbits(Nleft)
    for n in range(0, 8):
        dataSig[n*2+16+1]=dataL[n]
        dataSig[n*2+16+2]=dataL[n]

#    print(loadSig)
#    print(clkSig)
#    print(dataSig)
#    print(bitTracker)
#    print('data',data)
      
    # combine the signals together and then form 8-bit numbers
    # bit 0=CS, 1=SDI, 2=SCLK    
    sig = np.zeros(numpts, dtype=np.uint8)
    combinedData=np.transpose(np.vstack((sig,sig,sig,sig,sig,clkSig,dataSig,loadSig)))
#    combinedData=np.transpose(np.vstack((clkSig,dataSig,loadSig)))    
    sig=np.packbits(combinedData,axis=1)
#    print(combinedData.shape, sig.shape)
#    print(combinedData)
#    print(sig)
        
    return sig
Esempio n. 16
0
    def to_bit_sequence(self, event):
        """ Creates an array of bits containing the details in the event
        dictionary. This array is then upsampled and converted to float64 to be
        sent down an analog output. Once created, the array is cached to speed
        up future calls.

        Parameters
        ----------
        event: dict
            A dictionary describing the current component event. It should have
            3 keys: name, action, and metadata.

        Returns
        -------
        The array of bits expressed as analog values
        """

        key = (event["name"], event["action"], event["metadata"])
        # Check if the bit string is already stored
        if key in self.map_to_bit:
            return self.map_to_bit[key]

        trim = lambda ss, l: ss.ljust(l)[:l]
        # Set up int8 arrays where strings are converted to integers using ord
        name_array = np.array(map(ord, trim(event["name"], self.name_bytes)),
                              dtype=np.uint8)
        action_array = np.array(map(ord, trim(event["action"],
                                              self.action_bytes)),
                                dtype=np.uint8)

        # Add the metadata array if a value was passed
        if event["metadata"] is not None:
            metadata_array = np.array(map(ord, trim(event["metadata"],
                                                    self.metadata_bytes)),
                                      dtype=np.uint8)
        else:
            metadata_array = np.array([], dtype=np.uint8)

        sequence = ([True] +
                    np.unpackbits(name_array).astype(bool).tolist() +
                    np.unpackbits(action_array).astype(bool).tolist() +
                    np.unpackbits(metadata_array).astype(bool).tolist() +
                    [False])
        sequence = np.repeat(sequence, self.upsample_factor).astype("float64")
        sequence *= self.scaling

        self.map_to_bit[key] = sequence

        return sequence
Esempio n. 17
0
 def text2bits(self, filename):
     # Given a text file, convert to bits
     with open(filename, 'r') as f:
         lines = f.read().split('\n')
     bit_array = np.array([], bool)
     for i, line in enumerate(lines):
         for ch in line:
             int_val = ord(ch)
             char_bits = np.unpackbits(np.array([int_val], dtype=np.uint8))
             bit_array = np.append(bit_array, char_bits) 
         if i != (len(lines) - 1):
             n_int_val = ord('\n')
             n_char_bits = np.unpackbits(np.array([n_int_val], dtype=np.uint8))
             bit_array = np.append(bit_array, n_char_bits)
     return bit_array            
Esempio n. 18
0
def _get_voc_color_map(n=256):
    color_map = np.zeros((n, 3))
    for i in xrange(n):
        r = b = g = 0
        cid = i
        for j in xrange(0, 8):
            r = np.bitwise_or(r, np.left_shift(np.unpackbits(np.array([cid], dtype=np.uint8))[-1], 7-j))
            g = np.bitwise_or(g, np.left_shift(np.unpackbits(np.array([cid], dtype=np.uint8))[-2], 7-j))
            b = np.bitwise_or(b, np.left_shift(np.unpackbits(np.array([cid], dtype=np.uint8))[-3], 7-j))
            cid = np.right_shift(cid, 3)

        color_map[i][0] = r
        color_map[i][1] = g
        color_map[i][2] = b
    return color_map
Esempio n. 19
0
def unpackbits_axis(x, axis=-1, axissize=None):
    """Inverse of packbits_axis

    Parameters
    ----------
    x : ndarray
        record array of any shape, with multiple data of type uint8
    axissize : integer
        max size of expanded axis. Default is 8 * len(x.dtype)

    Returns
    -------
    X : ndarray
        array of shape x.shape[:axis] + (8 * d,) + x.shape[axis:]
        where d is the number of unsigned ints in each element of the
        record array.
    """
    assert all(x.dtype[i] == np.uint8 for i in range(len(x.dtype)))
    X = np.ndarray(x.shape + (len(x.dtype),), dtype=np.uint8, buffer=x)
    X = np.unpackbits(X, -1)

    if axissize is not None:
        slices = [slice(None) for i in range(X.ndim)]
        slices[-1] = slice(0, axissize)
        X = X[slices]

    return np.rollaxis(X, -1, axis)
Esempio n. 20
0
 def _read_data(self, fh, byteorder='>'):
     """Return image data from open file as numpy array."""
     fh.seek(len(self.header))
     data = fh.read()
     dtype = 'u1' if self.maxval < 256 else byteorder + 'u2'
     depth = 1 if self.magicnum == b"P7 332" else self.depth
     shape = [-1, self.height, self.width, depth]
     size = functools.reduce(operator.mul, shape[1:], 1)  # prod()
     if self.magicnum in b"P1P2P3":
         data = numpy.array(data.split(None, size)[:size], dtype)
         data = data.reshape(shape)
     elif self.maxval == 1:
         shape[2] = int(math.ceil(self.width / 8))
         data = numpy.frombuffer(data, dtype).reshape(shape)
         data = numpy.unpackbits(data, axis=-2)[:, :, :self.width, :]
     else:
         size *= numpy.dtype(dtype).itemsize
         data = numpy.frombuffer(data[:size], dtype).reshape(shape)
     if data.shape[0] < 2:
         data = data.reshape(data.shape[1:])
     if data.shape[-1] < 2:
         data = data.reshape(data.shape[:-1])
     if self.magicnum == b"P7 332":
         rgb332 = numpy.array(list(numpy.ndindex(8, 8, 4)), numpy.uint8)
         rgb332 *= [36, 36, 85]
         data = numpy.take(rgb332, data, axis=0)
     return data
Esempio n. 21
0
 def _unpack_glyph(
         face, code_height, code_width, req_height, req_width,
         force_double, force_single, carry_col_9, carry_row_9
     ):
     """Convert byte list to glyph pixels, numpy implementation."""
     glyph = numpy.unpackbits(face, axis=0).reshape((code_height, code_width)).astype(bool)
     # repeat last rows (e.g. for 9-bit high chars)
     if req_height > glyph.shape[0]:
         if carry_row_9:
             repeat_row = glyph[-1]
         else:
             repeat_row = numpy.zeros((1, code_width), dtype=numpy.uint8)
         while req_height > glyph.shape[0]:
             glyph = numpy.vstack((glyph, repeat_row))
     if force_double:
         glyph = glyph.repeat(2, axis=1)
     elif force_single:
         glyph = glyph[:, ::2]
     # repeat last cols (e.g. for 9-bit wide chars)
     if req_width > glyph.shape[1]:
         if carry_col_9:
             repeat_col = numpy.atleast_2d(glyph[:,-1]).T
         else:
             repeat_col = numpy.zeros((code_height, 1), dtype=numpy.uint8)
         while req_width > glyph.shape[1]:
             glyph = numpy.hstack((glyph, repeat_col))
     return glyph
Esempio n. 22
0
    def get_all_distance_and_image(self, frac_coords1, frac_coords2):
        """
        Gets distance between two frac_coords and nearest periodic images.

        Args:
            fcoords1 (3x1 array): Reference fcoords to get distance from.
            fcoords2 (3x1 array): fcoords to get distance from.

        Returns:
            [(distance, jimage)] List of distance and periodic lattice
            translations of the other site for which the distance applies.
            This means that the distance between frac_coords1 and (jimage +
            frac_coords2) is equal to distance.
        """
        #The following code is heavily vectorized to maximize speed.
        #Get the image adjustment necessary to bring coords to unit_cell.
        adj1 = np.floor(frac_coords1)
        adj2 = np.floor(frac_coords2)
        #Shift coords to unitcell
        coord1 = frac_coords1 - adj1
        coord2 = frac_coords2 - adj2
        # Generate set of images required for testing.
        # This is a cheat to create an 8x3 array of all length 3
        # combinations of 0,1
        test_set = np.unpackbits(np.array([5, 57, 119],
                                          dtype=np.uint8)).reshape(8, 3)
        images = np.copysign(test_set, coord1 - coord2)
        # Create tiled cartesian coords for computing distances.
        vec = np.tile(coord2 - coord1, (8, 1)) + images
        vec = self.get_cartesian_coords(vec)
        # Compute distances manually.
        dist = np.sqrt(np.sum(vec ** 2, 1)).tolist()
        return list(zip(dist, adj1 - adj2 + images))
Esempio n. 23
0
    def _read_bucket(self, doc, column_set, column_dtypes, include_symbol, include_images, columns):
        rtn = {}
        if doc[VERSION] != 3:
            raise ArcticException("Unhandled document version: %s" % doc[VERSION])
        rtn[INDEX] = np.cumsum(np.fromstring(lz4.decompress(doc[INDEX]), dtype='uint64'))
        doc_length = len(rtn[INDEX])
        rtn_length = len(rtn[INDEX])
        if include_symbol:
            rtn['SYMBOL'] = [doc[SYMBOL], ] * rtn_length
        column_set.update(doc[COLUMNS].keys())
        for c in column_set:
            try:
                coldata = doc[COLUMNS][c]
                dtype = np.dtype(coldata[DTYPE])
                values = np.fromstring(lz4.decompress(coldata[DATA]), dtype=dtype)
                self._set_or_promote_dtype(column_dtypes, c, dtype)
                rtn[c] = self._empty(rtn_length, dtype=column_dtypes[c])
                rowmask = np.unpackbits(np.fromstring(lz4.decompress(coldata[ROWMASK]),
                                                      dtype='uint8'))[:doc_length].astype('bool')
                rtn[c][rowmask] = values
            except KeyError:
                rtn[c] = None

        if include_images and doc.get(IMAGE_DOC, {}).get(IMAGE, {}):
            rtn = self._prepend_image(rtn, doc[IMAGE_DOC], rtn_length, column_dtypes, column_set, columns)
        return rtn
Esempio n. 24
0
 def test_001_t (self):
     # set up fg
     phr = np.random.randint(0,2,size=(12,))
     data = np.array(np.random.randint(0,256, size=(6*3,)))
     data_bin = np.unpackbits(np.array(data,dtype=np.uint8))
     self.src = blocks.vector_source_b(data, False, 1, [])
     self.s2ts = blocks.stream_to_tagged_stream(gr.sizeof_char, 1, 6, "packet_len")
     self.ts2pdu = blocks.tagged_stream_to_pdu(blocks.byte_t, "packet_len")
     self.pref = ieee802_15_4.phr_prefixer(phr)
     self.pdu2ts = blocks.pdu_to_tagged_stream(blocks.byte_t, "packet_len")
     self.snk = blocks.vector_sink_b(1)
     self.tb.connect(self.src, self.s2ts, self.ts2pdu)
     self.tb.msg_connect(self.ts2pdu, "pdus", self.pref, "in")
     self.tb.msg_connect(self.pref, "out", self.pdu2ts, "pdus")
     self.tb.connect(self.pdu2ts, self.snk)
     self.tb.start()
     time.sleep(1)
     self.tb.stop()
     # check data
     data_out = self.snk.data()
     # print "input:"
     # for i in data:
     # 	print i
     # print "output:"
     # for i in data_out:
     # 	print data_out
     expected_output = np.concatenate((phr,data_bin[0:6*8], phr, data_bin[6*8:12*8], phr, data_bin[12*8:18*8]))
     self.assertFloatTuplesAlmostEqual(data_out, expected_output)
Esempio n. 25
0
    def convert_v2_to_tuple(self, content):
        """
            Convert v2 binary training data to packed tensors

            v2 struct format is
                int32 ver
                float probs[19*18+1]
                byte planes[19*19*16/8]
                byte to_move
                byte winner

            packed tensor formats are
                float32 winner
                float32*362 probs
                uint8*6498 planes
        """
        (ver, probs, planes, to_move, winner) = self.v2_struct.unpack(content)
        # Unpack planes.
        planes = np.unpackbits(np.frombuffer(planes, dtype=np.uint8))
        assert len(planes) == 19*19*16
        # Now we add the two final planes, being the 'color to move' planes.
        stm = to_move
        assert stm == 0 or stm == 1
        # Flattern all planes to a single byte string
        planes = planes.tobytes() + self.flat_planes[stm]
        assert len(planes) == (18 * 19 * 19), len(planes)

        winner = float(winner * 2 - 1)
        assert winner == 1.0 or winner == -1.0, winner
        winner = struct.pack('f', winner)

        return (planes, probs, winner)
Esempio n. 26
0
def dqt(ud16,lL0):
    """ decode quad tree integer to lon Lat

    Parameters
    ----------

    lL : nd.array (2xN)
        longitude Latitude
    lL0 : nd.array (,2)
        lower left corner of the 1degree tile

    """

    N = len(ud16)
    # offset from the lower left corner
    #d = lL-lL0[:,None]
    #dui8 = np.floor(d*256).astype('uint8')
    uh8  = ud16/256
    ul8  = ud16-uh8*256
    ud8 =  (np.vstack((uh8,ul8)).T).astype('uint8')
    ud16 = np.unpackbits(ud8).reshape(N,16)
    ndu8 = np.empty((2,N,8)).astype('int')
    ndu8[0,:,:]=ud16[:,1::2]
    ndu8[1,:,:]=ud16[:,0::2]
    du8 = np.packbits(ndu8).reshape(2,N)/256.
    lL = lL0[:,None]+du8

    return(lL)
Esempio n. 27
0
    def convert_v1_to_v2(self, text_item):
        """
            Convert v1 text format to v2 packed binary format

            Converts a set of 19 lines of text into a byte string
            [[plane_1],[plane_2],...],...
            [probabilities],...
            winner,...
        """
        # We start by building a list of 16 planes,
        # each being a 19*19 == 361 element array
        # of type np.uint8
        planes = []
        for plane in range(0, 16):
            # first 360 first bits are 90 hex chars, encoded MSB
            hex_string = text_item[plane][0:90]
            array = np.unpackbits(np.frombuffer(
                bytearray.fromhex(hex_string), dtype=np.uint8))
            # Remaining bit that didn't fit. Encoded LSB so
            # it needs to be specially handled.
            last_digit = text_item[plane][90]
            if not (last_digit == "0" or last_digit == "1"):
                return False, None
            # Apply symmetry and append
            planes.append(array)
            planes.append(np.array([last_digit], dtype=np.uint8))

        # We flatten to a single array of len 16*19*19, type=np.uint8
        planes = np.concatenate(planes)
        # and then to a byte string
        planes = np.packbits(planes).tobytes()

        # Get the 'side to move'
        stm = text_item[16][0]
        if not(stm == "0" or stm == "1"):
            return False, None
        stm = int(stm)

        # Load the probabilities.
        probabilities = np.array(text_item[17].split()).astype(np.float32)
        if np.any(np.isnan(probabilities)):
            # Work around a bug in leela-zero v0.3, skipping any
            # positions that have a NaN in the probabilities list.
            return False, None
        if not(len(probabilities) == 362):
            return False, None

        probs = probabilities.tobytes()
        if not(len(probs) == 362 * 4):
            return False, None

        # Load the game winner color.
        winner = float(text_item[18])
        if not(winner == 1.0 or winner == -1.0):
            return False, None
        winner = int((winner + 1) / 2)

        version = struct.pack('i', 1)

        return True, self.v2_struct.pack(version, probs, planes, stm, winner)
Esempio n. 28
0
    def test_payload_basics(self):
        assert self.payload.complex_data is False
        assert self.payload.sample_shape == (2,)
        assert self.payload.bps == 8
        assert self.payload.nbytes == 8
        assert self.payload.shape == (4, 2)
        assert self.payload.size == 8
        assert self.payload.ndim == 2
        assert np.all(self.payload.data.ravel() ==
                      self.payload.words.view(np.int8))
        assert np.all(np.array(self.payload).ravel() ==
                      self.payload.words.view(np.int8))
        assert np.all(np.array(self.payload, dtype=np.int8).ravel() ==
                      self.payload.words.view(np.int8))
        payload = self.Payload(self.payload.words, bps=4)
        with pytest.raises(KeyError):
            payload.data
        with pytest.raises(ValueError):
            self.Payload(self.payload.words.astype('>u4'), bps=4)
        payload = self.Payload(self.payload.words, bps=8, complex_data=True)
        assert np.all(payload.data ==
                      self.payload.data[:, 0] + 1j * self.payload.data[:, 1])

        assert self.payload1bit.complex_data is True
        assert self.payload1bit.sample_shape == (5,)
        assert self.payload1bit.bps == 1
        assert self.payload1bit.shape == (16, 5)
        assert self.payload1bit.nbytes == 20
        assert np.all(self.payload1bit.data.ravel() ==
                      np.unpackbits(self.payload1bit.words.view(np.uint8))
                      .astype(np.float32).view(np.complex64))
Esempio n. 29
0
def generate_lines(file):
    pattern = load_pattern()

    line = np.zeros((42,), dtype=np.uint8)

    # constant bytes. can be used for horizontal alignment.
    line[0] = 0x18
    line[1 + pattern_length] = 0x18
    line[41] = 0x18

    offset = 0
    while True:
        # insert pattern slice into line
        line[1:1 + pattern_length] = pattern[offset:offset + pattern_length]

        # encode the offset for maximum readability
        offset_list = [(offset >> n) & 0xff for n in range(0, 24, 8)]
        # add a checksum
        offset_list.append(checksum(offset_list))
        # convert to a list of bits, LSB first
        offset_arr = np.array(offset_list, dtype=np.uint8)
        # repeat each bit 3 times, then convert back in to t42 bytes
        offset_arr = np.packbits(np.repeat(np.unpackbits(offset_arr[::-1])[::-1], 3)[::-1])[::-1]

        # insert encoded offset into line
        line[2 + pattern_length:14 + pattern_length] = offset_arr

        # calculate next offset for maximum distance
        offset += 65521  # greatest prime less than 2097152/32
        offset &= 0x1fffff  # mod 2097152

        # write to stdout
        file.write(line.tobytes())
Esempio n. 30
0
    def v2_apply_symmetry(self, symmetry, content):
        """
            Apply a random symmetry to a v2 record.
        """
        assert symmetry >= 0 and symmetry < 8

        # unpack the record.
        (ver, probs, planes, to_move, winner) = self.v2_struct.unpack(content)

        planes = np.unpackbits(np.frombuffer(planes, dtype=np.uint8))
        # We use the full length reflection tables to apply symmetry
        # to all 16 planes simultaneously
        planes = planes[self.full_reflection_table[symmetry]]
        assert len(planes) == 19*19*16
        planes = np.packbits(planes)
        planes = planes.tobytes()

        probs = np.frombuffer(probs, dtype=np.float32)
        # Apply symmetries to the probabilities.
        probs = probs[self.prob_reflection_table[symmetry]]
        assert len(probs) == 362
        probs = probs.tobytes()

        # repack record.
        return self.v2_struct.pack(ver, probs, planes, to_move, winner)
xmax = 6
Nbins = 2
xedges = np.linspace(xmin, xmax, Nbins + 1)

print("INFO: bin edges (%i):" % Nbins)
print(xedges)

# Set here the truth-level distribution
# smaller dtype is uint8, i.e. [0-255]
x = [5, 10]
x = np.array(x, dtype='uint8')  # (3)
print("INFO: x decimal representation:", x.shape)
print(x)

# convert to bit representation
x_b = np.unpackbits(x)  # (4)*8 = (24)
print("INFO: x binary representation:", x_b.shape)
print(x_b)

# Response matrix
R = [[3, 1], [1, 2]]
R = np.array(R, dtype='uint8')  # (3,3)
print("INFO: Response matrix:", R.shape)
print(R)

R_b = d2b(R)
print("INFO: R binary representation:", R_b.shape)
print(R_b)

y = np.dot(R, x)
#y = np.array(y, dtype='uint8')
Esempio n. 32
0
    with open('mars.txt', 'r') as f:
        text = f.read()
    key = 'INFORMATIKA2018'
    key_ = 'INFORMATIKA2017'
    print(len(text))
    bits = text_to_bits(text)
    print(bits)
    print(bits_to_text(bits))

    # File names
    in_name = 'lena.png'
    out_name = 'lena_out.png'

    # Read data and convert to a list of bits
    in_bytes = np.fromfile(in_name, dtype="uint8")
    in_bits = np.unpackbits(in_bytes)
    data = list(in_bits)
    text = bits_to_text(data)

    print('\nECB:')
    enc = ECB(text, key)
    hex_enc = text_to_hex(enc)
    write_to('enc.txt', hex_enc)
    start = time.time()
    data = ECB(enc, key, True)
    end = time.time()
    # plot_encryption(text_to_bits(text), text_to_bits(enc))
    print(end - start)

    # Convert the list of bits back to bytes and save
    out_bits = np.array(data)
Esempio n. 33
0
import tensorflow as tf
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import pickle

# load data
file_name = "Ising2DFM_reSample_L40_T=All.pkl"  # this file contains 16*10000 samples taken in T=np.arange(0.25,4.0001,0.25)
data = pickle.load(
    open("./" + file_name, 'rb')
)  # pickle reads the file and returns the Python object (1D array, compressed bits)
data = np.unpackbits(data).reshape(
    -1, 1600)  # Decompress array and reshape for convenience
data = data.astype('int')

file_name = "Ising2DFM_reSample_L40_T=All_labels.pkl"  # this file contains 16*10000 samples taken in T=np.arange(0.25,4.0001,0.25)
labels = pickle.load(
    open("./" + file_name, 'rb')
)  # pickle reads the file and returns the Python object (here just a 1D array with the binary labels)

# divide data into ordered, critical and disordered
X_ordered = data[:70000, :]
Y_ordered = labels[:70000]

X_critical = data[70000:100000, :]
Y_critical = labels[70000:100000]

X_disordered = data[100000:, :]
Y_disordered = labels[100000:]

X = np.concatenate((X_ordered, X_disordered))
Esempio n. 34
0
def reverse_expand_bits(plane):
    return np.unpackbits(np.array([plane], dtype=np.uint8))[::-1].astype(
        np.float32).tobytes()
Esempio n. 35
0
plt.ylabel("Cross correlation @ nIFFT")
plt.plot(cc)
plt.axvline(x=offset,color='g')

plt.figure()
plt.title("Sum of the square of the imaginary parts of the pilots")
plt.xlabel("Relative sample index")
plt.ylabel("Sum(imag(pilots)^2)")
plt.plot(np.arange(-searchRangeForPilotPeak,searchRangeForPilotPeak),sumofimag)
print("Symbol start sample index =",offset)

ofdm.initDecode(complex_signal,offset)
sig_sym = (Npixels-1+nbytes)//nbytes
            
rx_byte = np.empty(0, dtype='uint8')

rx_byte = np.uint8([ofdm.decode()[0] for i in range(sig_sym)]).ravel()

rx_im = rx_byte[0:Npixels].reshape(tx_im.size[1],tx_im.size[0])

plt.figure()
plt.title("Decoded image")
plt.imshow(rx_im, cmap='gray')

tx_bin = np.unpackbits(tx_byte.flatten())
rx_bin = np.unpackbits(rx_byte[0:Npixels])
ber = (rx_bin ^ tx_bin).sum()/tx_bin.size
print('ber= ', ber)

plt.show()
Esempio n. 36
0
def get_pixeldata(dicom_dataset):
    """If NumPy is available, return an ndarray of the Pixel Data.
    Raises
    ------
    TypeError
        If there is no Pixel Data or not a supported data type.
    ImportError
        If NumPy isn't found
    NotImplementedError
        if the transfer syntax is not supported
    AttributeError
        if the decoded amount of data does not match the expected amount
    Returns
    -------
    numpy.ndarray
       The contents of the Pixel Data element (7FE0,0010) as an ndarray.
    """
    if (dicom_dataset.file_meta.TransferSyntaxUID
            not in NumpySupportedTransferSyntaxes):
        raise NotImplementedError("Pixel Data is compressed in a "
                                  "format pydicom does not yet handle. "
                                  "Cannot return array. Pydicom might "
                                  "be able to convert the pixel data "
                                  "using GDCM if it is installed.")

    if not have_numpy:
        msg = ("The Numpy package is required to use pixel_array, and "
               "numpy could not be imported.")
        raise ImportError(msg)
    if 'PixelData' not in dicom_dataset:
        raise TypeError("No pixel data found in this dataset.")

    # Make NumPy format code, e.g. "uint16", "int32" etc
    # from two pieces of info:
    # dicom_dataset.PixelRepresentation -- 0 for unsigned, 1 for signed;
    # dicom_dataset.BitsAllocated -- 8, 16, or 32
    if dicom_dataset.BitsAllocated == 1:
        # single bits are used for representation of binary data
        format_str = 'uint8'
    elif dicom_dataset.PixelRepresentation == 0:
        format_str = 'uint{}'.format(dicom_dataset.BitsAllocated)
    elif dicom_dataset.PixelRepresentation == 1:
        format_str = 'int{}'.format(dicom_dataset.BitsAllocated)
    else:
        format_str = 'bad_pixel_representation'
    try:
        numpy_dtype = numpy.dtype(format_str)
    except TypeError:
        msg = ("Data type not understood by NumPy: "
               "format='{}', PixelRepresentation={}, "
               "BitsAllocated={}".format(format_str,
                                         dicom_dataset.PixelRepresentation,
                                         dicom_dataset.BitsAllocated))
        raise TypeError(msg)

    if dicom_dataset.is_little_endian != sys_is_little_endian:
        numpy_dtype = numpy_dtype.newbyteorder('S')

    pixel_bytearray = dicom_dataset.PixelData

    if dicom_dataset.BitsAllocated == 1:
        # if single bits are used for binary representation, a uint8 array
        # has to be converted to a binary-valued array (that is 8 times bigger)
        try:
            pixel_array = numpy.unpackbits(
                numpy.frombuffer(pixel_bytearray, dtype='uint8'))
        except NotImplementedError:
            # PyPy2 does not implement numpy.unpackbits
            raise NotImplementedError(
                'Cannot handle BitsAllocated == 1 on this platform')
    else:
        pixel_array = numpy.frombuffer(pixel_bytearray, dtype=numpy_dtype)
    length_of_pixel_array = pixel_array.nbytes
    expected_length = dicom_dataset.Rows * dicom_dataset.Columns
    if ('NumberOfFrames' in dicom_dataset
            and dicom_dataset.NumberOfFrames > 1):
        expected_length *= dicom_dataset.NumberOfFrames
    if ('SamplesPerPixel' in dicom_dataset
            and dicom_dataset.SamplesPerPixel > 1):
        expected_length *= dicom_dataset.SamplesPerPixel
    if dicom_dataset.BitsAllocated > 8:
        expected_length *= (dicom_dataset.BitsAllocated // 8)
    padded_length = expected_length
    if expected_length & 1:
        padded_length += 1
    if length_of_pixel_array != padded_length:
        raise AttributeError("Amount of pixel data %d does not "
                             "match the expected data %d" %
                             (length_of_pixel_array, padded_length))
    if expected_length != padded_length:
        pixel_array = pixel_array[:expected_length]
    if should_change_PhotometricInterpretation_to_RGB(dicom_dataset):
        dicom_dataset.PhotometricInterpretation = "RGB"
    if dicom_dataset.Modality.lower().find('ct') >= 0:  # CT图像需要得到其CT值图像
        pixel_array = pixel_array * dicom_dataset.RescaleSlope + dicom_dataset.RescaleIntercept  # 获得图像的CT值
    pixel_array = pixel_array.reshape(
        dicom_dataset.Rows,
        dicom_dataset.Columns * dicom_dataset.SamplesPerPixel)
    return pixel_array, dicom_dataset.Rows, dicom_dataset.Columns
#     root='./data', train=True, transform=data_tf, download=True)
train_data = torchvision.datasets.MNIST(
    root='./mnist/',
    train=True,  # this is training data
    transform=data_tf,  # Converts a PIL.Image or numpy.ndarray to
    # torch.FloatTensor of shape (C x H x W) and normalize in the range [0.0, 1.0]
    download=True,
)


# #plot one example
loaded = train_data.train_data.numpy() #将数据由张量变为数组
trX = loaded.reshape((60000*28*28, 1))
trX = np.array(trX)
# 将数据比特化
Xp = np.unpackbits(trX, axis=1)     # 进行二进制转换
# ×××××××××××××××××××××××××××××××××××××××××××××××××××××××××
# 生成系数矩阵
beta = np.zeros((60000*784, 8), dtype=np.uint8)
beta = np.array(beta)

for i in range(0, 8):
    beta[:, i] = 2**(8-i-1)
# ××××××××××××××××××××××××××××××××××××××××××××××××××××××××
# 系数矩阵与二值化矩阵点乘
Xp_beta = np.multiply(Xp, beta)
alpha = np.load('/home/alexrich/MNIST/coef7.npy')
trX_recov = np.dot(Xp_beta, alpha)
trX_recov = trX_recov.reshape(60000, 28, 28)
trX_recov = trX_recov.astype(np.uint8)
trX_recov = torch.from_numpy(trX_recov)
Esempio n. 38
0
    def _save_csv(self, trace_csv, start_pos, stop_pos):
        """Parse the input data and generate a `*.csv` file.

        This method can be used along with the DMA. The input data is assumed
        to be 64-bit. The generated `*.csv` file can be then used as the trace
        file.

        This method also returns the wavelanes based on the given positions.
        The data output has a similar format as `analyze()`:

        [{'name': '', 'pin': 'D1', 'wave': '1...0.....'},
         {'name': '', 'pin': 'D2', 'wave': '0.1..01.01'}]

        Note
        ----
        The `trace_csv` file will be put into the specified path, or in the 
        working directory in case the path does not exist.

        Parameters
        ----------
        trace_csv : str
            Name of the output file (`*.csv`) which can be opened in 
            text editor.
        start_pos : int
            Starting sample number, no less than 1.
        stop_pos : int
            Stopping sample number, no more than the maximum number of samples.

        Returns
        -------
        list
            A list of dictionaries, each dictionary consisting the pin number,
            and the waveform pattern in string format.

        """
        if not self.probes:
            raise ValueError("Must set probes before parsing samples.")

        if not 1 <= start_pos <= stop_pos <= MAX_NUM_TRACE_SAMPLES:
            raise ValueError("Start or stop position out of range "
                             "[1, {}].".format(MAX_NUM_TRACE_SAMPLES))

        if os.path.isdir(os.path.dirname(trace_csv)):
            trace_csv_abs = trace_csv
        else:
            trace_csv_abs = os.getcwd() + '/' + trace_csv

        if os.system('rm -rf ' + trace_csv_abs):
            raise RuntimeError("Cannot remove old trace_csv file.")

        tri_state_pins, _, _ = \
            get_tri_state_pins(self.intf_spec['traceable_inputs'],
                               self.intf_spec['traceable_outputs'],
                               self.intf_spec['traceable_tri_states'])
        self.num_decoded_samples = stop_pos - start_pos
        temp_bytes = np.frombuffer(self.samples[start_pos:stop_pos],
                                   dtype=np.uint8)
        bit_array = np.unpackbits(temp_bytes)
        temp_lanes = bit_array.reshape(self.num_decoded_samples,
                                       self.intf_spec['monitor_width']).T[::-1]

        wavelanes = list()
        temp_samples = None
        for index, pin_name in enumerate(self.probes.keys()):
            pin_label = self.probes[pin_name]
            output_lane = temp_lanes[self.intf_spec['traceable_outputs']
                                     [pin_label]]
            input_lane = temp_lanes[self.intf_spec['traceable_inputs']
                                    [pin_label]]
            tri_lane = temp_lanes[self.intf_spec['traceable_tri_states']
                                  [pin_label]]
            cond_list = [tri_lane == 0, tri_lane == 1]
            choice_list = [output_lane, input_lane]
            temp_lane = np.select(cond_list, choice_list)

            bitstring = ''.join(temp_lane.astype(str).tolist())
            wave = bitstring_to_wave(bitstring)
            wavelanes.append({
                'name': pin_name,
                'pin': pin_label,
                'wave': wave
            })

            temp_sample = temp_lane.reshape(-1, 1)
            if index == 0:
                temp_samples = deepcopy(temp_sample)
            else:
                temp_samples = np.concatenate((temp_samples, temp_sample),
                                              axis=1)

        np.savetxt(trace_csv_abs, temp_samples, fmt='%d', delimiter=',')
        self.trace_csv = trace_csv_abs
        self.trace_sr = ''

        return wavelanes
Esempio n. 39
0
    def analyze(self, steps):
        """Analyze the captured pattern.

        This function will process the captured pattern and put the pattern
        into a Wavedrom compatible format.

        Each bit of the 20-bit patterns, from LSB to MSB, corresponds to:
        D0, D1, ..., D18 (A4), D19 (A5), respectively.

        The data output is of format:

        [{'name': '', 'pin': 'D1', 'wave': '1...0.....'},
         {'name': '', 'pin': 'D2', 'wave': '0.1..01.01'}]

        Note the all the lanes should have the same number of samples.

        Note
        ----
        The first sample captured is a dummy sample (for both pattern generator
        and FSM generator), therefore we have to discard the first sample.

        Parameters
        ----------
        steps : int
            Number of samples to analyze, if it is non-zero, it means the 
            generator is working in the `step()` mode.

        Returns
        -------
        list
            A list of dictionaries, each dictionary consisting the pin number,
            and the waveform pattern in string format.

        """
        tri_state_pins, non_tri_inputs, non_tri_outputs = \
            get_tri_state_pins(self.intf_spec['traceable_inputs'],
                               self.intf_spec['traceable_outputs'],
                               self.intf_spec['traceable_tri_states'])
        trace_bit_width = self.intf_spec['monitor_width']
        trace_byte_width = round(trace_bit_width / 8)

        samples = self.logictools_controller.ndarray_from_buffer(
            'trace_buf', (1 + self.num_analyzer_samples) * trace_byte_width,
            dtype=BYTE_WIDTH_TO_NPTYPE[trace_byte_width])

        # Exclude the first dummy sample when not in step()
        if steps == 0:
            num_valid_samples = len(samples) - 1
            self.samples = np.zeros(num_valid_samples, dtype='>i8')
            np.copyto(self.samples, samples[1:])
        else:
            num_valid_samples = 1
            self.samples = np.zeros(num_valid_samples, dtype='>i8')
            np.copyto(self.samples, samples[0])
        temp_bytes = np.frombuffer(self.samples, dtype=np.uint8)
        bit_array = np.unpackbits(temp_bytes)
        temp_lanes = bit_array.reshape(num_valid_samples,
                                       self.intf_spec['monitor_width']).T[::-1]

        wavelanes = list()
        # Adding tri-state captures
        for pin_label in tri_state_pins:
            output_lane = temp_lanes[self.intf_spec['traceable_outputs']
                                     [pin_label]]
            input_lane = temp_lanes[self.intf_spec['traceable_inputs']
                                    [pin_label]]
            tri_lane = temp_lanes[self.intf_spec['traceable_tri_states']
                                  [pin_label]]
            cond_list = [tri_lane == 0, tri_lane == 1]
            choice_list = [output_lane, input_lane]
            temp_lane = np.select(cond_list, choice_list)
            bitstring = ''.join(temp_lane.astype(str).tolist())
            wave = bitstring_to_wave(bitstring)
            wavelanes.append({'name': '', 'pin': pin_label, 'wave': wave})

        # Adding non tri-state captures
        for pin_label in non_tri_inputs:
            temp_lane = temp_lanes[self.intf_spec['traceable_inputs']
                                   [pin_label]]
            bitstring = ''.join(temp_lane.astype(str).tolist())
            wave = bitstring_to_wave(bitstring)
            wavelanes.append({'name': '', 'pin': pin_label, 'wave': wave})

        for pin_label in non_tri_outputs:
            temp_lane = temp_lanes[self.intf_spec['traceable_outputs']
                                   [pin_label]]
            bitstring = ''.join(temp_lane.astype(str).tolist())
            wave = bitstring_to_wave(bitstring)
            wavelanes.append({'name': '', 'pin': pin_label, 'wave': wave})

        return wavelanes
Esempio n. 40
0
 def to_numpy(self, planes):
     return np.unpackbits(np.array(planes, dtype='>u8').view(
         np.uint8)).view(np.float32)
Esempio n. 41
0
def _stack_h5dump(data, hdr_info, saving_path, raw_binary=False):
    """
    Incremental reading of a large stack dask array object and saving it in a h5 file.

    Parameters
    ----------
    data: dask array object
    hdr_info: dict, header info parsed by the parse_hdr function
    saving_path: str, h5 file name and path
    raw_binary: default False - Need to be True for binary RAW data

    Returns
    -------
    None
    """
    stack_num = 100
    hdr_bits = get_hdr_bits(hdr_info)
    width = hdr_info['width']
    height = hdr_info['height']
    width_height = width * height
    if raw_binary is True:
        # RAW 1 bit data: the header bits are written as uint8 but the frames
        # are binary and need to be unpacked as such.
        data = data.reshape(-1, int(width_height / 8 + hdr_bits))
    else:
        data = data.reshape(-1, int(width_height + hdr_bits))

    data = data[:, hdr_bits:]
    iters_num = int(data.shape[0] / stack_num) + 1
    for i in range(iters_num):
        if (i + 1) * stack_num < data.shape[0]:
            if i == 0:
                print(i)
                data_dump0 = data[:(i + 1) * stack_num, :]
                print(data_dump0.shape)
                if raw_binary is True:
                    data_dump1 = np.unpackbits(data_dump0)
                    data_dump1.reshape(data_dump0.shape[0], data_dump0.shape[1] * 8)
                    data_dump1 = _untangle_raw(data_dump1, hdr_info, data_dump0.shape[0])
                else:
                    data_dump1 = _untangle_raw(data_dump0, hdr_info, data_dump0.shape[0])

                _h5_chunk_write(data_dump1, saving_path)
                print(data_dump1.shape)
                del data_dump0
                del data_dump1
            else:
                print(i)
                data_dump0 = data[i * stack_num:(i + 1) * stack_num, :]
                print(data_dump0.shape)
                if raw_binary is True:
                    data_dump1 = np.unpackbits(data_dump0)
                    data_dump1.reshape(data_dump0.shape[0], data_dump0.shape[1] * 8)
                    data_dump1 = _untangle_raw(data_dump1, hdr_info, data_dump0.shape[0])
                else:
                    data_dump1 = _untangle_raw(data_dump0, hdr_info, data_dump0.shape[0])
                _h5_chunk_write(data_dump1, saving_path)
                print(data_dump1.shape)
                del data_dump0
                del data_dump1
        else:
            print(i)
            data_dump0 = data[i * stack_num:, :]
            print(data_dump0.shape)
            if raw_binary is True:
                data_dump1 = np.unpackbits(data_dump0)
                data_dump1.reshape(data_dump0.shape[0], data_dump0.shape[1] * 8)
                data_dump1 = _untangle_raw(data_dump1, hdr_info, data_dump0.shape[0])
            else:
                data_dump1 = _untangle_raw(data_dump0, hdr_info, data_dump0.shape[0])
            _h5_chunk_write(data_dump1, saving_path)
            print(data_dump1.shape)
            del data_dump0
            del data_dump1
            return
Esempio n. 42
0
def decode_error(error_code):
    bits = numpy.unpackbits(numpy.asarray(error_code, dtype=numpy.uint8))
    return tuple(numpy.array(dynamixelErrors)[bits == 1])
Esempio n. 43
0
    def get_spikes(
            self, label, buffer_manager, region,
            placements, graph_mapper, application_vertex, machine_time_step):

        spike_times = list()
        spike_ids = list()
        ms_per_tick = machine_time_step / 1000.0

        vertices = \
            graph_mapper.get_machine_vertices(application_vertex)

        missing_str = ""

        progress_bar = ProgressBar(len(vertices),
                                   "Getting spikes for {}".format(label))
        for vertex in vertices:

            placement = placements.get_placement_of_vertex(vertex)
            vertex_slice = graph_mapper.get_slice(vertex)

            x = placement.x
            y = placement.y
            p = placement.p
            lo_atom = vertex_slice.lo_atom

            # Read the spikes
            n_words = int(math.ceil(vertex_slice.n_atoms / 32.0))
            n_bytes_per_block = n_words * 4

            # for buffering output info is taken form the buffer manager
            neuron_param_region_data_pointer, data_missing = \
                buffer_manager.get_data_for_vertex(
                    placement, region)
            if data_missing:
                missing_str += "({}, {}, {}); ".format(x, y, p)
            raw_data = neuron_param_region_data_pointer.read_all()
            offset = 0
            while offset < len(raw_data):
                ((time, n_blocks), offset) = (
                    struct.unpack_from("<II", raw_data, offset), offset + 8)
                (spike_data, offset) = (numpy.frombuffer(
                    raw_data, dtype="uint8",
                    count=n_bytes_per_block * n_blocks, offset=offset),
                    offset + (n_bytes_per_block * n_blocks))
                spikes = spike_data.view("<i4").byteswap().view("uint8")
                bits = numpy.fliplr(numpy.unpackbits(spikes).reshape(
                    (-1, 32))).reshape((-1, n_bytes_per_block * 8))
                indices = numpy.nonzero(bits)[1]
                times = numpy.repeat([time * ms_per_tick], len(indices))
                indices = indices + lo_atom
                spike_ids.append(indices)
                spike_times.append(times)
            progress_bar.update()

        progress_bar.end()
        if len(missing_str) > 0:
            logger.warn(
                "Population {} is missing spike data in region {} from the"
                " following cores: {}".format(label, region, missing_str))

        if len(spike_ids) > 0:
            spike_ids = numpy.hstack(spike_ids)
            spike_times = numpy.hstack(spike_times)
            result = numpy.dstack((spike_ids, spike_times))[0]
            return result[numpy.lexsort((spike_times, spike_ids))]

        return numpy.zeros((0, 2))
Esempio n. 44
0
def bytes_to_bits(input):
    return np.unpackbits(np.array(input)).astype(np.bool)
Esempio n. 45
0
 def _normal_to_bitplane(image: np.ndarray) -> np.ndarray:
     image_shape = image.shape
     bit_plane_size = (image_shape[0], image_shape[1], image_shape[2], 1)
     cgc_bit_plane = np.reshape(image, bit_plane_size)
     return np.unpackbits(cgc_bit_plane, axis=3)
                                                    features_fn,
                                                    base_updatepath)
    print command
    os.system(command)
    feats = []
    with open(features_fn, "rb") as f_prefeats:
        for i in range(len(list_feats_id)):
            feats.append(
                np.frombuffer(f_prefeats.read(feature_num * 4),
                              dtype=np.float32))
    for i in range(len(list_feats_id)):
        print feats[i]
        print np.max(feats[i])
        print feats[i].shape
    # query hashcodes
    command = "./get_precomp_hashcodes {} {} {}".format(
        query_precomp_fn, hashcodes_fn, base_updatepath)
    print command
    os.system(command)
    hashcodes = []
    with open(hashcodes_fn, "rb") as f_prehash:
        for i in range(len(list_feats_id)):
            hashcodes.append(
                np.frombuffer(f_prehash.read(bits_num / 8), dtype=np.uint8))
    for i in range(len(list_feats_id)):
        print hashcodes[i]
        tmp = np.unpackbits(hashcodes[i])
        print tmp.shape, tmp
        print np.max(hashcodes[i])
        print hashcodes[i].shape
Esempio n. 47
0
# Hyper Parameters
dataDim = 8
alpha = 0.1
inputDim = 2
hiddenDim = 16
outputDim = 1
iterNum = 20000
iterPrintNum = 200
accu = {}
errors = {}

largest_num = int(pow(2, dataDim) / 2)
smallest_num = int(-pow(2, dataDim) / 2)
binary = np.unpackbits(np.array([range(smallest_num, largest_num)],
                                dtype=np.uint8).T,
                       axis=1)
num_table = {}
for i in range(smallest_num, largest_num):
    num_table[i + largest_num] = i

U = np.random.normal(0, 1, [inputDim, hiddenDim])
V = np.random.normal(0, 1, [hiddenDim, outputDim])
W = np.random.normal(0, 2, [hiddenDim, hiddenDim])

dU = np.zeros_like(U)
dV = np.zeros_like(V)
db = np.zeros_like(W)

for i in range(iterNum + 1):
    error = 0
Esempio n. 48
0
        level=logging.WARNING,
        format=
        "[%(asctime)s] [%(levelname)s] %(message)s (%(funcName)s@%(filename)s:%(lineno)s)"
    )
    logger.setLevel(logging.INFO)
    bpr.index.logger.setLevel(logging.INFO)

    passage_db = PassageDB(args.passage_db_file)
    embedding_data = joblib.load(args.embedding_file, mmap_mode="r")
    ids, embeddings = embedding_data["ids"], embedding_data["embeddings"]
    dim_size = embeddings.shape[1]

    logger.info("Building index...")
    if embeddings.dtype == np.uint8:
        if args.binary_to_float:
            embeddings = np.unpackbits(embeddings).reshape(
                -1, dim_size * 8).astype(np.float32)
            embeddings = embeddings * 2 - 1
            base_index = faiss.IndexFlatIP(dim_size * 8)
            index = FaissIndex.build(ids, embeddings, base_index)

        elif args.use_binary_hash:
            base_index = faiss.IndexBinaryHash(dim_size * 8,
                                               args.hash_num_bits)
            index = FaissBinaryIndex.build(ids, embeddings, base_index)

        else:
            base_index = faiss.IndexBinaryFlat(dim_size * 8)
            index = FaissBinaryIndex.build(ids, embeddings, base_index)

    elif args.use_hnsw:
        base_index = faiss.IndexHNSWFlat(dim_size + 1, args.hnsw_store_n)
def binary_array(number: np.uint8):
    return np.unpackbits(np.array(number, dtype = np.uint8))
Esempio n. 50
0
    def convert_v6_to_tuple(self, content):
        """
        Unpack a v6 binary record to 5-tuple (state, policy pi, result, q, m)

        v6 struct format is (8356 bytes total):
                                  size         1st byte index
        uint32_t version;                               0
        uint32_t input_format;                          4
        float probabilities[1858];  7432 bytes          8
        uint64_t planes[104];        832 bytes       7440
        uint8_t castling_us_ooo;                     8272
        uint8_t castling_us_oo;                      8273
        uint8_t castling_them_ooo;                   8274
        uint8_t castling_them_oo;                    8275
        uint8_t side_to_move_or_enpassant;           8276
        uint8_t rule50_count;                        8277
        // Bitfield with the following allocation:
        //  bit 7: side to move (input type 3)
        //  bit 6: position marked for deletion by the rescorer (never set by lc0)
        //  bit 5: game adjudicated (v6)
        //  bit 4: max game length exceeded (v6)
        //  bit 3: best_q is for proven best move (v6)
        //  bit 2: transpose transform (input type 3)
        //  bit 1: mirror transform (input type 3)
        //  bit 0: flip transform (input type 3)
        uint8_t invariance_info;                     8278
        uint8_t dep_result;                               8279
        float root_q;                                8280
        float best_q;                                8284
        float root_d;                                8288
        float best_d;                                8292
        float root_m;      // In plies.              8296
        float best_m;      // In plies.              8300
        float plies_left;                            8304
        float result_q;                              8308
        float result_d;                              8312
        float played_q;                              8316
        float played_d;                              8320
        float played_m;                              8324
        // The folowing may be NaN if not found in cache.
        float orig_q;      // For value repair.      8328
        float orig_d;                                8332
        float orig_m;                                8336
        uint32_t visits;                             8340
        // Indices in the probabilities array.
        uint16_t played_idx;                         8344
        uint16_t best_idx;                           8346
        uint64_t reserved;                           8348
        """
        # unpack the V6 content from raw byte array, arbitrarily chose 4 2-byte values
        # for the 8 "reserved" bytes
        (ver, input_format, probs, planes, us_ooo, us_oo, them_ooo, them_oo,
         stm, rule50_count, invariance_info, dep_result, root_q, best_q,
         root_d, best_d, root_m, best_m, plies_left, result_q, result_d,
         played_q, played_d, played_m, orig_q, orig_d, orig_m, visits,
         played_idx, best_idx, reserved1, reserved2, reserved3,
         reserved4) = self.v6_struct.unpack(content)
        """
        v5 struct format was (8308 bytes total)
            int32 version (4 bytes)
            int32 input_format (4 bytes)
            1858 float32 probabilities (7432 bytes)
            104 (13*8) packed bit planes of 8 bytes each (832 bytes)
            uint8 castling us_ooo (1 byte)
            uint8 castling us_oo (1 byte)
            uint8 castling them_ooo (1 byte)
            uint8 castling them_oo (1 byte)
            uint8 side_to_move (1 byte)
            uint8 rule50_count (1 byte)
            uint8 dep_ply_count (1 byte) (unused)
            int8 result (1 byte)
            float32 root_q (4 bytes)
            float32 best_q (4 bytes)
            float32 root_d (4 bytes)
            float32 best_d (4 bytes)
            float32 root_m (4 bytes)
            float32 best_m (4 bytes)
            float32 plies_left (4 bytes)
        """
        # v3/4 data sometimes has a useful value in dep_ply_count (now invariance_info),
        # so copy that over if the new ply_count is not populated.
        if plies_left == 0:
            plies_left = invariance_info
        plies_left = struct.pack('f', plies_left)

        assert input_format == self.expected_input_format

        # Unpack bit planes and cast to 32 bit float
        planes = np.unpackbits(np.frombuffer(planes, dtype=np.uint8)).astype(
            np.float32)
        rule50_divisor = 99.0
        if input_format > 3:
            rule50_divisor = 100.0
        rule50_plane = struct.pack('f', rule50_count / rule50_divisor) * 64

        if input_format == 1:
            middle_planes = self.flat_planes[us_ooo] + \
                            self.flat_planes[us_oo] + \
                            self.flat_planes[them_ooo] + \
                            self.flat_planes[them_oo] + \
                            self.flat_planes[stm]
        elif input_format == 2:
            # Each inner array has to be reversed as these fields are in opposite endian to the planes data.
            them_ooo_bytes = reverse_expand_bits(them_ooo)
            us_ooo_bytes = reverse_expand_bits(us_ooo)
            them_oo_bytes = reverse_expand_bits(them_oo)
            us_oo_bytes = reverse_expand_bits(us_oo)
            middle_planes = us_ooo_bytes + (6*8*4) * b'\x00' + them_ooo_bytes + \
                            us_oo_bytes + (6*8*4) * b'\x00' + them_oo_bytes + \
                            self.flat_planes[0] + \
                            self.flat_planes[0] + \
                            self.flat_planes[stm]
        elif input_format == 3 or input_format == 4 or input_format == 132 or input_format == 5 or input_format == 133:
            # Each inner array has to be reversed as these fields are in opposite endian to the planes data.
            them_ooo_bytes = reverse_expand_bits(them_ooo)
            us_ooo_bytes = reverse_expand_bits(us_ooo)
            them_oo_bytes = reverse_expand_bits(them_oo)
            us_oo_bytes = reverse_expand_bits(us_oo)
            enpassant_bytes = reverse_expand_bits(stm)
            middle_planes = us_ooo_bytes + (6*8*4) * b'\x00' + them_ooo_bytes + \
                            us_oo_bytes + (6*8*4) * b'\x00' + them_oo_bytes + \
                            self.flat_planes[0] + \
                            self.flat_planes[0] + \
                            (7*8*4) * b'\x00' + enpassant_bytes

        # Concatenate all byteplanes. Make the last plane all 1's so the NN can
        # detect edges of the board more easily
        aux_plus_6_plane = self.flat_planes[0]
        if (input_format == 132
                or input_format == 133) and invariance_info >= 128:
            aux_plus_6_plane = self.flat_planes[1]
        planes = planes.tobytes() + \
                 middle_planes + \
                 rule50_plane + \
                 aux_plus_6_plane + \
                 self.flat_planes[1]

        assert len(planes) == ((8 * 13 * 1 + 8 * 1 * 1) * 8 * 8 * 4)

        if ver == V6_VERSION:
            winner = struct.pack('fff', 0.5 * (1.0 - result_d + result_q),
                                 result_d, 0.5 * (1.0 - result_d - result_q))
        else:
            dep_result = float(dep_result)
            assert dep_result == 1.0 or dep_result == -1.0 or dep_result == 0.0
            winner = struct.pack('fff', dep_result == 1.0, dep_result == 0.0,
                                 dep_result == -1.0)

        best_q_w = 0.5 * (1.0 - best_d + best_q)
        best_q_l = 0.5 * (1.0 - best_d - best_q)
        assert -1.0 <= best_q <= 1.0 and 0.0 <= best_d <= 1.0
        best_q = struct.pack('fff', best_q_w, best_d, best_q_l)

        return (planes, probs, winner, best_q, plies_left)
Esempio n. 51
0
def get_order_list(n=10):
    binary_array = np.unpackbits(np.arange(2**n, dtype=np.uint16).view(
        np.uint8)[:, None],
                                 axis=1)
    binary_array = np.hstack((binary_array[1::2], binary_array[::2]))[:, -n:]
    return np.sum(binary_array, axis=1)
Esempio n. 52
0
 def __init__(self, jic_filename):
     jic = open(jic_filename, "rb").read()
     jic = np.frombuffer(jic, dtype=np.uint8)
     self.jic_uint8 = jic
     jic = np.unpackbits(jic)
     self.jic = jic
Esempio n. 53
0
    def convert_v5_to_tuple(self, content):
        """
        Unpack a v5 binary record to 5-tuple (state, policy pi, result, q, m)

        v5 struct format is (8308 bytes total)
            int32 version (4 bytes)
            int32 input_format (4 bytes)
            1858 float32 probabilities (7432 bytes)
            104 (13*8) packed bit planes of 8 bytes each (832 bytes)
            uint8 castling us_ooo (1 byte)
            uint8 castling us_oo (1 byte)
            uint8 castling them_ooo (1 byte)
            uint8 castling them_oo (1 byte)
            uint8 side_to_move (1 byte)
            uint8 rule50_count (1 byte)
            uint8 dep_ply_count (1 byte) (unused)
            int8 result (1 byte)
            float32 root_q (4 bytes)
            float32 best_q (4 bytes)
            float32 root_d (4 bytes)
            float32 best_d (4 bytes)
            float32 root_m (4 bytes)
            float32 best_m (4 bytes)
            float32 plies_left (4 bytes)
        """
        (ver, input_format, probs, planes, us_ooo, us_oo, them_ooo, them_oo,
         stm, rule50_count, dep_ply_count, winner, root_q, best_q, root_d,
         best_d, root_m, best_m, plies_left) = self.v5_struct.unpack(content)
        # v3/4 data sometimes has a useful value in dep_ply_count, so copy that over if the new ply_count is not populated.
        if plies_left == 0:
            plies_left = dep_ply_count
        plies_left = struct.pack('f', plies_left)

        assert input_format == self.expected_input_format

        # Unpack bit planes and cast to 32 bit float
        planes = np.unpackbits(np.frombuffer(planes, dtype=np.uint8)).astype(
            np.float32)
        rule50_divisor = 99.0
        if input_format > 3:
            rule50_divisor = 100.0
        rule50_plane = struct.pack('f', rule50_count / rule50_divisor) * 64

        if input_format == 1:
            middle_planes = self.flat_planes[us_ooo] + \
                            self.flat_planes[us_oo] + \
                            self.flat_planes[them_ooo] + \
                            self.flat_planes[them_oo] + \
                            self.flat_planes[stm]
        elif input_format == 2:
            # Each inner array has to be reversed as these fields are in opposite endian to the planes data.
            them_ooo_bytes = reverse_expand_bits(them_ooo)
            us_ooo_bytes = reverse_expand_bits(us_ooo)
            them_oo_bytes = reverse_expand_bits(them_oo)
            us_oo_bytes = reverse_expand_bits(us_oo)
            middle_planes = us_ooo_bytes + (6*8*4) * b'\x00' + them_ooo_bytes + \
                            us_oo_bytes + (6*8*4) * b'\x00' + them_oo_bytes + \
                            self.flat_planes[0] + \
                            self.flat_planes[0] + \
                            self.flat_planes[stm]
        elif input_format == 3 or input_format == 4 or input_format == 132:
            # Each inner array has to be reversed as these fields are in opposite endian to the planes data.
            them_ooo_bytes = reverse_expand_bits(them_ooo)
            us_ooo_bytes = reverse_expand_bits(us_ooo)
            them_oo_bytes = reverse_expand_bits(them_oo)
            us_oo_bytes = reverse_expand_bits(us_oo)
            enpassant_bytes = reverse_expand_bits(stm)
            middle_planes = us_ooo_bytes + (6*8*4) * b'\x00' + them_ooo_bytes + \
                            us_oo_bytes + (6*8*4) * b'\x00' + them_oo_bytes + \
                            self.flat_planes[0] + \
                            self.flat_planes[0] + \
                            (7*8*4) * b'\x00' + enpassant_bytes

        # Concatenate all byteplanes. Make the last plane all 1's so the NN can
        # detect edges of the board more easily
        aux_plus_6_plane = self.flat_planes[0]
        if input_format == 132 and dep_ply_count >= 128:
            aux_plus_6_plane = self.flat_planes[1]
        planes = planes.tobytes() + \
                 middle_planes + \
                 rule50_plane + \
                 aux_plus_6_plane + \
                 self.flat_planes[1]

        assert len(planes) == ((8 * 13 * 1 + 8 * 1 * 1) * 8 * 8 * 4)
        winner = float(winner)
        assert winner == 1.0 or winner == -1.0 or winner == 0.0
        winner = struct.pack('fff', winner == 1.0, winner == 0.0,
                             winner == -1.0)

        best_q_w = 0.5 * (1.0 - best_d + best_q)
        best_q_l = 0.5 * (1.0 - best_d - best_q)
        assert -1.0 <= best_q <= 1.0 and 0.0 <= best_d <= 1.0
        best_q = struct.pack('fff', best_q_w, best_d, best_q_l)

        return (planes, probs, winner, best_q, plies_left)
Esempio n. 54
0
def sigmoid(x):
    output = 1 / (1 +- np.exp(-x))
    return output


# convert output of sigmoid function to its derivative
def sigmoid_output_to_derivative(output):
    return output * (1 - output)


# training dataset generation
int2binary = {}
binary_dim = 8

largest_number = pow(2, binary_dim)
binary = np.unpackbits(np.array([range(largest_number)], dtype=np.uint8).T, axis=1)
for i in range(largest_number):
    int2binary[i] = binary[i]

# input variables
alpha = 0.1
input_dim = 2
hidden_dim = 16
output_dim = 1

# initialize neural network weights,让里面也有负数
synapse_0 = 2 * np.random.random((input_dim, hidden_dim)) - 1
synapse_1 = 2 * np.random.random((hidden_dim, output_dim)) - 1
synapse_h = 2 * np.random.random((hidden_dim, hidden_dim)) - 1

synapse_0_update = np.zeros_like(synapse_0)
Esempio n. 55
0
def read_plain_boolean(raw_bytes, count):
    """Read `count` booleans using the plain encoding."""
    return np.unpackbits(np.fromstring(raw_bytes, dtype=np.uint8)).reshape(
            (-1, 8))[:, ::-1].ravel().astype(bool)[:count]
Esempio n. 56
0
            chunk = f.read(chunksize)
            if chunk:
                for b in chunk:
                    yield b
            else:
                break


binary_str = bytes_from_file(data_file)

c = 0
bits = ''
for b in binary_str:
    if c == 1000:
        break
    result = np.unpackbits(np.array([b], dtype='uint8'))
    for r in result:
        bits += str(r)
    c += 1
#
## encode binary string
#ct = 0
#sub_ct = 0
#s = ''
#d = {}
#d['0000'] = ' '
#char_ct = 60
#num_str = ''
#for b in binary_str:
#    num_str += str(b)
#    if sub_ct == char_len-1:
Esempio n. 57
0
    def __getitem__(self, idx):
        ''' Returns an item of the dataset.

        Args:
            idx (int): ID of data point
        '''
        data_path = self.data[idx]['data_path']
        subject = self.data[idx]['subject']
        gender = self.data[idx]['gender']
        data = {}

        aug_rot = self.augm_params().astype(np.float32)

        points_dict = np.load(data_path)

        # 3D models and points
        loc = points_dict['loc'].astype(np.float32)
        trans = points_dict['trans'].astype(np.float32)
        root_loc = points_dict['Jtr'][0].astype(np.float32)
        scale = points_dict['scale'].astype(np.float32)

        # Also get GT SMPL poses
        pose_body = points_dict['pose_body']
        pose_hand = points_dict['pose_hand']
        pose = np.concatenate([pose_body, pose_hand], axis=-1)
        pose = R.from_rotvec(pose.reshape([-1, 3]))

        body_mesh_a_pose = points_dict['a_pose_mesh_points']
        # Break symmetry if given in float16:
        if body_mesh_a_pose.dtype == np.float16:
            body_mesh_a_pose = body_mesh_a_pose.astype(np.float32)
            body_mesh_a_pose += 1e-4 * np.random.randn(*body_mesh_a_pose.shape)
        else:
            body_mesh_a_pose = body_mesh_a_pose.astype(np.float32)

        n_smpl_points = body_mesh_a_pose.shape[0]

        bone_transforms = points_dict['bone_transforms'].astype(np.float32)
        # Apply rotation augmentation to bone transformations
        bone_transforms_aug = np.matmul(np.expand_dims(aug_rot, axis=0), bone_transforms)
        bone_transforms_aug[:, :3, -1] += root_loc - trans - np.dot(aug_rot[:3, :3], root_loc - trans)
        bone_transforms = bone_transforms_aug
        # Get augmented posed-mesh
        skinning_weights = self.skinning_weights[gender]
        if self.use_abs_bone_transforms:
            J_regressor = self.J_regressors[gender]

        T = np.dot(skinning_weights, bone_transforms.reshape([-1, 16])).reshape([-1, 4, 4])

        homogen_coord = np.ones([n_smpl_points, 1], dtype=np.float32)
        a_pose_homo = np.concatenate([body_mesh_a_pose - trans, homogen_coord], axis=-1).reshape([n_smpl_points, 4, 1])
        body_mesh = np.matmul(T, a_pose_homo)[:, :3, 0].astype(np.float32) + trans

        posed_trimesh = trimesh.Trimesh(vertices=body_mesh, faces=self.faces)
        input_pointcloud, _ = get_3DSV(posed_trimesh)
        noise = self.input_pointcloud_noise * np.random.randn(*input_pointcloud.shape)
        input_pointcloud = (input_pointcloud + noise).astype(np.float32)

        # Get extents of model.
        bb_min = np.min(input_pointcloud, axis=0)
        bb_max = np.max(input_pointcloud, axis=0)
        # total_size = np.sqrt(np.square(bb_max - bb_min).sum())
        total_size = (bb_max - bb_min).max()
        # Scales all dimensions equally.
        scale = max(1.6, total_size)    # 1.6 is the magic number from IPNet
        loc = np.array(
            [(bb_min[0] + bb_max[0]) / 2,
             (bb_min[1] + bb_max[1]) / 2,
             (bb_min[2] + bb_max[2]) / 2],
            dtype=np.float32
        )

        if self.input_pointcloud_n <= input_pointcloud.shape[0]:
            rand_inds = np.random.choice(input_pointcloud.shape[0], size=self.input_pointcloud_n, replace=False)
        else:
            rand_inds = np.random.choice(input_pointcloud.shape[0], size=self.input_pointcloud_n, replace=True)

        input_pointcloud = input_pointcloud[rand_inds, :]

        n_points_uniform = int(self.points_size * self.points_uniform_ratio)
        n_points_surface = self.points_size - n_points_uniform

        boxsize = 1 + self.points_padding
        points_uniform = np.random.rand(n_points_uniform, 3)
        points_uniform = boxsize * (points_uniform - 0.5)
        # Scale points in (padded) unit box back to the original space
        points_uniform *= scale
        points_uniform += loc
        # Sample points around posed-mesh surface
        n_points_surface_cloth = n_points_surface // 2 if self.double_layer else n_points_surface
        points_surface = posed_trimesh.sample(n_points_surface_cloth)

        points_surface += np.random.normal(scale=self.points_sigma, size=points_surface.shape)

        if self.double_layer:
            n_points_surface_minimal = n_points_surface // 2

            posedir = self.posedirs[gender]
            minimal_shape_path = os.path.join(self.cape_path, 'cape_release', 'minimal_body_shape', subject, subject + '_minimal.npy')
            minimal_shape = np.load(minimal_shape_path)
            pose_mat = pose.as_matrix()
            ident = np.eye(3)
            pose_feature = (pose_mat - ident).reshape([207, 1])
            pose_offsets = np.dot(posedir.reshape([-1, 207]), pose_feature).reshape([6890, 3])
            minimal_shape += pose_offsets

            if self.use_abs_bone_transforms:
                Jtr_cano = np.dot(J_regressor, minimal_shape)
                Jtr_cano = Jtr_cano[IPNET2SMPL_IDX, :]

            a_pose_homo = np.concatenate([minimal_shape, homogen_coord], axis=-1).reshape([n_smpl_points, 4, 1])
            minimal_body_mesh = np.matmul(T, a_pose_homo)[:, :3, 0].astype(np.float32) + trans
            minimal_posed_trimesh = trimesh.Trimesh(vertices=minimal_body_mesh, faces=self.faces)

            # Sample points around minimally clothed posed-mesh surface
            points_surface_minimal = minimal_posed_trimesh.sample(n_points_surface_minimal)
            points_surface_minimal += np.random.normal(scale=self.points_sigma, size=points_surface_minimal.shape)

            points_surface = np.vstack([points_surface, points_surface_minimal])

        # Check occupancy values for sampled ponits
        query_points = np.vstack([points_uniform, points_surface]).astype(np.float32)
        if self.double_layer:
            # Double-layer occupancies, as was done in IPNet
            # 0: outside, 1: between body and cloth, 2: inside body mesh
            occupancies_cloth = check_mesh_contains(posed_trimesh, query_points)
            occupancies_minimal = check_mesh_contains(minimal_posed_trimesh, query_points)
            occupancies = occupancies_cloth.astype(np.int64)
            occupancies[occupancies_minimal] = 2
        else:
            occupancies = check_mesh_contains(posed_trimesh, query_points).astype(np.float32)

        # Skinning inds by querying nearest SMPL vertex on the clohted mesh
        kdtree = KDTree(body_mesh if self.query_on_clothed else minimal_body_mesh)
        _, p_idx = kdtree.query(query_points)
        pts_W = skinning_weights[p_idx, :]
        skinning_inds_ipnet = self.part_labels[p_idx] # skinning inds (14 parts)
        skinning_inds_smpl = pts_W.argmax(1)   # full skinning inds (24 parts)
        if self.num_joints == 14:
            skinning_inds = skinning_inds_ipnet
        else:
            skinning_inds = skinning_inds_smpl

        # Invert LBS to get query points in A-pose space
        T = np.dot(pts_W, bone_transforms.reshape([-1, 16])).reshape([-1, 4, 4])
        T = np.linalg.inv(T)

        homogen_coord = np.ones([self.points_size, 1], dtype=np.float32)
        posed_homo = np.concatenate([query_points - trans, homogen_coord], axis=-1).reshape([self.points_size, 4, 1])
        query_points_a_pose = np.matmul(T, posed_homo)[:, :3, 0].astype(np.float32) + trans

        if self.use_abs_bone_transforms:
            assert (not self.use_v_template and self.num_joints == 24)
            query_points_a_pose -= Jtr_cano[SMPL2IPNET_IDX[skinning_inds], :]

        if self.use_v_template:
            v_template = self.v_templates[gender]
            pose_shape_offsets = v_template - minimal_shape
            query_points_template = query_points_a_pose + pose_shape_offsets[p_idx, :]

        sc_factor = 1.0 / scale * 1.5 if self.normalized_scale else 1.0 # 1.5 is the magic number from IPNet
        offset = loc

        bone_transforms_inv = bone_transforms.copy()
        bone_transforms_inv[:, :3, -1] += trans - loc
        bone_transforms_inv = np.linalg.inv(bone_transforms_inv)
        bone_transforms_inv[:, :3, -1] *= sc_factor

        data = {
            None: (query_points - offset) * sc_factor,
            'occ': occupancies,
            'trans': trans,
            'root_loc': root_loc,
            'pts_a_pose': (query_points_a_pose - (trans if self.use_global_trans else offset)) * sc_factor,
            'skinning_inds': skinning_inds,
            'skinning_inds_ipnet': skinning_inds_ipnet,
            'skinning_inds_smpl': skinning_inds_smpl,
            'loc': loc,
            'scale': scale,
            'bone_transforms': bone_transforms,
            'bone_transforms_inv': bone_transforms_inv,
        }

        if self.use_v_template:
            data.update({'pts_template': (query_points_template - (trans if self.use_global_trans else offset)) * sc_factor})

        if self.mode in ['test']:
            data.update({'smpl_vertices': body_mesh, 'smpl_a_pose_vertices': body_mesh_a_pose})
            if self.double_layer:
                data.update({'minimal_smpl_vertices': minimal_body_mesh})

        data_out = {}
        field_name = 'points' if self.mode in ['train', 'test'] else 'points_iou'
        for k, v in data.items():
            if k is None:
                data_out[field_name] = v
            else:
                data_out['%s.%s' % (field_name, k)] = v

        if self.input_type == 'pointcloud':
            data_out.update(
                {'inputs': (input_pointcloud - offset) * sc_factor,
                 'idx': idx,
                }
            )
        elif self.input_type == 'voxel':
            voxels = np.unpackbits(points_dict['voxels_occ']).astype(np.float32)
            voxels = np.reshape(voxels, [self.voxel_res] * 3)
            data_out.update(
                {'inputs': voxels,
                 'idx': idx,
                }
            )
        else:
            raise ValueError('Unsupported input type: {}'.format(self.input_type))

        return data_out
Esempio n. 58
0
 def do(self):
     mask = None if self.data is None else np.unpackbits(self.data).astype(
         np.bool)[:self.length]
     self.apply_mask(mask)
Esempio n. 59
0
def mib_dask_reader(mib_filename, h5_stack_path=None):
    """Read a .mib file using dask and return as a lazy pyXem / hyperspy signal.

    Parameters
    ----------
    mib_filename : str
    h5_stack_path: str, default None
        this is the h5 file path that we can read the data from in the case of large scan arrays
    Returns
    -------
    data_hs : reshaped hyperspy.signals.Signal2D
    If the data is detected to be STEM is reshaped using two functions, one using the 
    exposure times appearing on the header and if no exposure times available using the 
    sum frames and detecting the flyback frames. If TEM data, a single frame or if 
    reshaping the STEM fails, the stack is returned.
                The metadata adds the following domains:
                General
                │   └── title =
                └── Signal
                    ├── binned = False
                    ├── exposure_time = 0.001
                    ├── flyback_times = [0.066, 0.071, 0.065, 0.017825]
                    ├── frames_number_skipped = 90
                    ├── scan_X = 256
                    └── signal_type = STEM

    """
    hdr_stuff = parse_hdr(mib_filename)
    width = hdr_stuff['width']
    height = hdr_stuff['height']
    width_height = width * height
    if h5_stack_path is None:
        data = mib_to_daskarr(hdr_stuff, mib_filename)
        depth = get_mib_depth(hdr_stuff, mib_filename)
        hdr_bits = get_hdr_bits(hdr_stuff)
        if hdr_stuff['Counter Depth (number)'] == 1:
            # RAW 1 bit data: the header bits are written as uint8 but the frames
            # are binary and need to be unpacked as such.
            data = data.reshape(-1, int(width_height / 8 + hdr_bits))
            data = data[:, hdr_bits:]
            # get the shape axis 1 before unpackbit
            s0 = data.shape[0]
            s1 = data.shape[1]
            data = np.unpackbits(data)
            data.reshape(s0, s1 * 8)
        else:
            data = data.reshape(-1, int(width_height + hdr_bits))
            data = data[:, hdr_bits:]
        if hdr_stuff['raw'] == 'R64':
            data = _untangle_raw(data, hdr_stuff, depth)
        elif hdr_stuff['raw'] == 'MIB':
            data = data.reshape(depth, width, height)
    else:
        data = h5stack_to_hs(h5_stack_path, hdr_stuff)
        data = data.data

    exp_times_list = read_exposures(hdr_stuff, mib_filename)
    data_dict = STEM_flag_dict(exp_times_list)

    if hdr_stuff['Assembly Size'] == '2x2':
        # add_crosses expects a dask array object
        data = add_crosses(data)

    data_hs = hs.signals.Signal2D(data).as_lazy()

    # Tranferring dict info to metadata
    if data_dict['STEM_flag'] == 1:
        data_hs.metadata.Signal.signal_type = 'STEM'
    else:
        data_hs.metadata.Signal.signal_type = 'TEM'
    data_hs.metadata.Signal.scan_X = data_dict['scan_X']
    data_hs.metadata.Signal.exposure_time = data_dict['exposure time']
    data_hs.metadata.Signal.frames_number_skipped = data_dict['number of frames_to_skip']
    data_hs.metadata.Signal.flyback_times = data_dict['flyback_times']
    print(data_hs)

    # only attempt reshaping if it is not already reshaped!

    if len(data_hs.data.shape) == 3:
        try:
            if data_hs.metadata.Signal.signal_type == 'TEM':
                print('This mib file appears to be TEM data. The stack is returned with no reshaping.')
                return data_hs
            # to catch single frames:
            if data_hs.axes_manager[0].size == 1:
                print('This mib file is a single frame.')
                return data_hs
            # If the exposure time info not appearing in the header bits use reshape_4DSTEM_SumFrames
            # to reshape otherwise use reshape_4DSTEM_FlyBack function
            if data_hs.metadata.Signal.signal_type == 'STEM' and data_hs.metadata.Signal.exposure_time is None:
                print('reshaping using sum frames intensity')
                (data_hs, skip_ind) = reshape_4DSTEM_SumFrames(data_hs)
                data_hs.metadata.Signal.signal_type = 'STEM'
                data_hs.metadata.Signal.frames_number_skipped = skip_ind
            else:
                print('reshaping using flyback pixel')
                data_hs = reshape_4DSTEM_FlyBack(data_hs)
        except TypeError:
            print(
                'Warning: Reshaping did not work or TEM data with no exposure info. Returning the stack with no reshaping!')
            return data_hs
        except ValueError:
            print(
                'Warning: Reshaping did not work or TEM data with no exposure info. Returning the stack with no reshaping!')
            return data_hs
    return data_hs
Esempio n. 60
0
    def analyze(self, steps):
        """Analyze the captured pattern.

        This function will process the captured pattern and put the pattern
        into a Wavedrom compatible format.

        The data output is of format:

        [{'name': '', 'pin': 'D1', 'wave': '1...0.....'},
         {'name': '', 'pin': 'D2', 'wave': '0.1..01.01'}]

        Note the all the lanes should have the same number of samples.
        All the pins are assumed to be tri-stated and traceable.

        Currently only no `step()` method is supported for PS controlled 
        trace analyzer.

        Parameters
        ----------
        steps : int
            Number of samples to analyze. A value 0 means to analyze all the
            valid samples.

        Returns
        -------
        list
            A list of dictionaries, each dictionary consisting the pin number,
            and the waveform pattern in string format.

        """
        tri_state_pins, non_tri_inputs, non_tri_outputs = \
            get_tri_state_pins(self.intf_spec['traceable_inputs'],
                               self.intf_spec['traceable_outputs'],
                               self.intf_spec['traceable_tri_states'])

        if steps == 0:
            num_valid_samples = self.num_analyzer_samples
        else:
            num_valid_samples = steps
        self.samples = np.zeros(num_valid_samples, dtype='>i8')
        np.copyto(self.samples, self._cma_array)
        temp_bytes = np.frombuffer(self.samples, dtype=np.uint8)
        bit_array = np.unpackbits(temp_bytes)
        temp_lanes = bit_array.reshape(num_valid_samples,
                                       self.intf_spec['monitor_width']).T[::-1]

        wavelanes = list()
        for pin_label in tri_state_pins:
            output_lane = temp_lanes[self.intf_spec['traceable_outputs']
                                     [pin_label]]
            input_lane = temp_lanes[self.intf_spec['traceable_inputs']
                                    [pin_label]]
            tri_lane = temp_lanes[self.intf_spec['traceable_tri_states']
                                  [pin_label]]
            cond_list = [tri_lane == 0, tri_lane == 1]
            choice_list = [output_lane, input_lane]
            temp_lane = np.select(cond_list, choice_list)
            bitstring = ''.join(temp_lane.astype(str).tolist())
            wave = bitstring_to_wave(bitstring)
            wavelanes.append({'name': '', 'pin': pin_label, 'wave': wave})

        return wavelanes