Beispiel #1
0
def _read_id_struct(fid, tag, shape, rlims):
    """Read ID struct tag."""
    return dict(
        version=int(np.frombuffer(fid.read(4), dtype=">i4")),
        machid=np.frombuffer(fid.read(8), dtype=">i4"),
        secs=int(np.frombuffer(fid.read(4), dtype=">i4")),
        usecs=int(np.frombuffer(fid.read(4), dtype=">i4")))
Beispiel #2
0
def from_stream(stream, storage, form):
    """Reverses to_stream, returning data"""
    if storage == "pure-plain":
        if isinstance(stream, str):
            txt = stream            
        else:
            assert not stream.startswith(MAGIC_SEAMLESS)
            assert not stream.startswith(MAGIC_NUMPY)
            txt = stream.decode("utf-8")
        result = json.loads(txt)
        return result
    elif storage == "pure-binary":
        b = BytesIO(stream)
        return np.load(b)
    assert stream.startswith(MAGIC_SEAMLESS)
    l = len(MAGIC_SEAMLESS)
    s1 = stream[l:l+8]
    s2 = stream[l+8:l+16]
    len_jsons = np.frombuffer(s1, dtype=np.uint64).tolist()[0]
    buffersize = np.frombuffer(s2, dtype=np.uint64).tolist()[0]
    assert len(stream) == l + 16 + len_jsons + buffersize
    bytes_jsons = stream[l+16:l+16+len_jsons]
    jsons = json.loads(bytes_jsons.decode("utf-8"))
    bytebuffer = stream[l+16+len_jsons:]
    buffer = np.frombuffer(bytebuffer,dtype=np.uint8)
    data = _from_stream(
        None, storage, form,
        jsons, buffer
    )
    return data
    def allocate(values, classes):
        '''This function allocates memory for the variance matrix, error matrix,
        and pivot matrix.  It also moves the variance matrix and error matrix from
        numpy types to a ctypes, shared memory array.'''

        numClass = classes
        numVal = len(values)

        varCtypes = multiprocessing.RawArray(ctypes.c_double, numVal*numVal)
        varMat = numpy.frombuffer(varCtypes)
        varMat.shape = (numVal,numVal)

        for x in range(0,len(values)):
            varMat[x] = values[:]
            varMat[x][0:x] = 0
        print varMat
        errCtypes = multiprocessing.RawArray(ctypes.c_double, classes*numVal)
        errorMat = numpy.frombuffer(errCtypes)
        errorMat.shape = (classes, numVal)

        pivotShape = (classes, numVal)
        pivotMat = numpy.ndarray(pivotShape, dtype=numpy.float)

        #Initialize the arrays as globals.
        initArr(varMat, errorMat)

        return pivotMat, numClass
Beispiel #4
0
def _read_dig_point_struct(fid, tag, shape, rlims):
    """Read dig point struct tag."""
    return dict(
        kind=int(np.frombuffer(fid.read(4), dtype=">i4")),
        ident=int(np.frombuffer(fid.read(4), dtype=">i4")),
        r=np.frombuffer(fid.read(12), dtype=">f4"),
        coord_frame=FIFF.FIFFV_COORD_UNKNOWN)
Beispiel #5
0
def get_trace(f, num_points, big):
    """
    Get a trace from an open RNMRTK file.

    Parameters
    -----------
    f : file object
        Open file object to read from.
    num_points : int
        Number of points in trace (R+I)
    big : bool
        True for data that is big-endian, False for little-endian.

    Returns
    -------
    trace : ndarray
        Raw trace of NMR data.

    """
    if big:
        bsize = num_points * np.dtype('>f4').itemsize
        return np.frombuffer(f.read(bsize), dtype='>f4')
    else:
        bsize = num_points * np.dtype('<f4').itemsize
        return np.frombuffer(f.read(bsize), dtype='<f4')
Beispiel #6
0
    def fetch(self,portB=0,portA=0,dataFormat=1,**kwargs):
        if dataFormat == 'all':
            return {'FORM1':self.fetch(portB=portB,portA=portA,dataFormat=1),
                    'FORM4':self.fetch(portB=portB,portA=portA,dataFormat=4)}
        else:
            self.write('FORM{0:d};'.format(dataFormat))
            
            if dataFormat == 4:
                s = numpy.array(self.ask_for_values('OUTPDATA'))
                s.shape=(-1,2)
                sValues =  s[:,0]+1j*s[:,1]
            elif dataFormat == 1:
                #http://www.vnahelp.com/tip23.html
                rawData = self.ask_raw('OUTPDATA')
                assert rawData[0] == '#', 'OUTPDATA should start with "#"'
                lengthFromHeader = 4+numpy.frombuffer(rawData[2:4],numpy.dtype('>i2'))[0]
                assert len(rawData) == lengthFromHeader,'Buffer length {0} does not correspond with length {1} announced in the header field'.format(len(rawData),lengthFromHeader)
                
                tupleData = numpy.frombuffer(rawData[4:],numpy.dtype(">i2,>i2,i1,i1"))
                fieldData = numpy.array(tupleData.tolist())
#                assert (fieldData[:,2] == 0).all(),'The fifth byte was supposed to be zero, but is not always. Maybe there is supplementary precision to exploit...'
                sValues = (1.0*fieldData[:,1] + 1.0j*fieldData[:,0]) * (2.0**(fieldData[:,3]-15))
               
            network = skrf.Network(name='S_{bNatural:d}{aNatural:d}'.format(bNatural=portB+1,aNatural=portA+1))
            network.s = sValues

            network.frequency = self.frequency
            return network
Beispiel #7
0
def eleventhPass(idxArray, ageArray, disPops, P_Age, length, name, myPipe):
	# collect sample stats
	subObs = {}
	subExp = {}
	sampleN = myPipe.recv()
	idxArray = np.frombuffer(idxArray.get_obj(), dtype=np.int32)
	ageArray = np.frombuffer(ageArray.get_obj(), dtype=np.int8)
	while sampleN != 'END':
		# sample = np.random.choice(idxArray, length, replace=True)
		randomIndexs = np.random.choice(xrange(idxArray.shape[0]), length, replace=True)
		sample = idxArray[randomIndexs]
		sampleAges = ageArray[randomIndexs]
		counts = Counter(sample)
		ageCounts = Counter(sampleAges)
		for i in xrange(len(disPops)):
			disPop = np.frombuffer(disPops[i].get_obj(), dtype=np.int32)
			P_Dis_Age = P_Age[i]
			obsSampled = set(disPop) & set(sample)
			expSampled = np.sum([P_Dis_Age[k] * v for k, v in ageCounts.iteritems()])
			try:
				subObs[i].append(sum([counts[s] for s in obsSampled])) 
				subExp[i].append(expSampled)
			except KeyError:
				subObs[i] = [sum([counts[s] for s in obsSampled])]
				subExp[i] = [expSampled]
		del sample
		if sampleN % 50 == 0:
			sys.stdout.write('.')
			sys.stdout.flush()
		sampleN = myPipe.recv()

	myPipe.send([subObs, subExp])
	# print len(subStats[0])
	return 0
Beispiel #8
0
def get_err_buffers_graph( g ):
    """Get TGraph x and y buffers"""
    npoints = g.GetN()
    if npoints==0: return None, None
    from numpy import frombuffer, double
    return frombuffer( g.GetEX(), double, npoints)\
         , frombuffer( g.GetEY(), double, npoints)
    def prepare_np_frame(self, shape_str, buf_str):
        '''
        Convert raw frame buffer to numpy array and apply warp perspective
        transformation.

        Emits:

            frame-update : New numpy video frame available with perspective
                transform applied.
        '''
        height, width, channels = np.frombuffer(shape_str, count=3,
                                                dtype='uint32')
        im_buf = np.frombuffer(buf_str, dtype='uint8',
                               count=len(buf_str)).reshape(height, width, -1)

        # Warp and scale
        if self.frame_shape != (width, height):
            # Frame shape has changed.
            old_frame_shape = self.frame_shape
            self.frame_shape = width, height
            self.emit('frame-shape-changed', old_frame_shape, self.frame_shape)
            if self.shape is None:
                self.shape = width, height
        np_warped = cv2.warpPerspective(im_buf, self.transform, self.shape)
        self.emit('frame-update', np_warped)
Beispiel #10
0
    def __set_data(self):
        """
        waveファイルをNumpy配列に変換し、
        dataプロパティに格納
        """
        # ファイルポインタをオーディオストリームの先頭に戻す
        self.wf.rewind()
        # バッファの格納(バイト文字列)
        wbuffer = self.wf.readframes(self.wf.getnframes())
        # ファイルポインタをオーディオストリームの先頭に戻す
        self.wf.rewind()

        # Numpy配列に変換
        # バイナリなので2バイトずつ整数(-32768-32767)にまとめる
        bit = self.sampwidth * 8
        if bit == 8:
            self.data_raw = np.frombuffer(wbuffer, dtype="int8")
        elif bit == 16:
            self.data_raw = np.frombuffer(wbuffer, dtype="int16")
        elif bit == 32:
            self.data_raw = np.frombuffer(wbuffer, dtype="int32")
        elif bit == 24:
            # 24bit データを読み込み、16bitに変換
            self.data_raw = np.frombuffer(wbuffer, 'b').reshape(-1, 3)[:, 1:].flatten().view('i2')
        else:
            print 'Wargning!!!!!!! bit %d is none' % bit
Beispiel #11
0
 def test_create_with_metadata(self):
     for length in range(0, 1000, 3):
         # Create an object id string.
         object_id = random_object_id()
         # Create a random metadata string.
         metadata = generate_metadata(length)
         # Create a new buffer and write to it.
         memory_buffer = np.frombuffer(self.plasma_client.create(object_id,
                                                                 length,
                                                                 metadata),
                                       dtype="uint8")
         for i in range(length):
             memory_buffer[i] = i % 256
         # Seal the object.
         self.plasma_client.seal(object_id)
         # Get the object.
         memory_buffer = np.frombuffer(
             self.plasma_client.get_buffers([object_id])[0], dtype="uint8")
         for i in range(length):
             assert memory_buffer[i] == i % 256
         # Get the metadata.
         metadata_buffer = np.frombuffer(
             self.plasma_client.get_metadata([object_id])[0], dtype="uint8")
         assert len(metadata) == len(metadata_buffer)
         for i in range(len(metadata)):
             assert metadata[i] == metadata_buffer[i]
 def nodeValues(self):
     start = time.time()
     with open(self.__basename+'.z2', 'rb') as f:
         nb_nodes, nb_th  = numpy.frombuffer(f.read(8), dtype=numpy.int32)
         f.seek(self.date()*4*(1+nb_nodes), 1)
         res = numpy.frombuffer(f.read(4*(1+nb_nodes)), dtype=numpy.float32)[1:]
         return res - self.nodeCoord()[:,2]
Beispiel #13
0
 def _read_data(self, fh, byteorder='>'):
     """Return image data from open file as numpy array."""
     fh.seek(len(self.header))
     data = fh.read()
     dtype = 'u1' if self.maxval < 256 else byteorder + 'u2'
     depth = 1 if self.magicnum == b"P7 332" else self.depth
     shape = [-1, self.height, self.width, depth]
     size = functools.reduce(operator.mul, shape[1:], 1)  # prod()
     if self.magicnum in b"P1P2P3":
         data = numpy.array(data.split(None, size)[:size], dtype)
         data = data.reshape(shape)
     elif self.maxval == 1:
         shape[2] = int(math.ceil(self.width / 8))
         data = numpy.frombuffer(data, dtype).reshape(shape)
         data = numpy.unpackbits(data, axis=-2)[:, :, :self.width, :]
     else:
         size *= numpy.dtype(dtype).itemsize
         data = numpy.frombuffer(data[:size], dtype).reshape(shape)
     if data.shape[0] < 2:
         data = data.reshape(data.shape[1:])
     if data.shape[-1] < 2:
         data = data.reshape(data.shape[:-1])
     if self.magicnum == b"P7 332":
         rgb332 = numpy.array(list(numpy.ndindex(8, 8, 4)), numpy.uint8)
         rgb332 *= [36, 36, 85]
         data = numpy.take(rgb332, data, axis=0)
     return data
Beispiel #14
0
def unmake_packet(whitened_payload_with_crc, whitener_offset=0, dewhitening=True):
    """
    Return (payload)

    @param whitened_payload_with_crc: string
    """
    whitener_offset=0
    dewhitening = True

    if dewhitening:
        i = frombuffer(whitened_payload_with_crc,  dtype = byte)
        j = frombuffer(random_mask[0:len(whitened_payload_with_crc)],  dtype = byte)
        try:
            payload = (bitwise_xor(i, j)).tostring()
        except:
            print "Error: receiving arguments do not have equal length!"
            len(i)
            len(j)
    else:
        payload = (whitened_payload_with_crc)

    if 0:
        print "payload_with_crc =", string_to_hex_list(payload_with_crc)
        print "ok = %r, len(payload) = %d" % (ok, len(payload))
        print "payload =", string_to_hex_list(payload)

    return payload
Beispiel #15
0
    def v2_apply_symmetry(self, symmetry, content):
        """
            Apply a random symmetry to a v2 record.
        """
        assert symmetry >= 0 and symmetry < 8

        # unpack the record.
        (ver, probs, planes, to_move, winner) = self.v2_struct.unpack(content)

        planes = np.unpackbits(np.frombuffer(planes, dtype=np.uint8))
        # We use the full length reflection tables to apply symmetry
        # to all 16 planes simultaneously
        planes = planes[self.full_reflection_table[symmetry]]
        assert len(planes) == 19*19*16
        planes = np.packbits(planes)
        planes = planes.tobytes()

        probs = np.frombuffer(probs, dtype=np.float32)
        # Apply symmetries to the probabilities.
        probs = probs[self.prob_reflection_table[symmetry]]
        assert len(probs) == 362
        probs = probs.tobytes()

        # repack record.
        return self.v2_struct.pack(ver, probs, planes, to_move, winner)
Beispiel #16
0
    def check_entries(self, entries, prefix="TEST", save=None):
        orig_num_files = len(self.image.files)
        filenames = []
        count = 1
        for data in entries:
            filename = "%s%d.BIN" % (prefix, count)
            self.image.write_file(filename, None, data)
            assert len(self.image.files) == orig_num_files + count
            data2 = np.frombuffer(self.image.find_file(filename), dtype=np.uint8)
            assert np.array_equal(data, data2[0:len(data)])
            count += 1

        # loop over them again to make sure data wasn't overwritten
        count = 1
        for data in entries:
            filename = "%s%d.BIN" % (prefix, count)
            data2 = np.frombuffer(self.image.find_file(filename), dtype=np.uint8)
            assert np.array_equal(data, data2[0:len(data)])
            count += 1
            filenames.append(filename)

        if save is not None:
            self.image.save(save)

        return filenames
    def plotdata(self, offset, nsamples=spksamples):
        f = self.datafile
        f.seek(offset*nchannels*4)
        data = np.frombuffer(f.read(4*nsamples*nchannels), dtype=np.float32)
        nsamples = len(data) // nchannels

        t = np.arange(offset, offset+nsamples)/samplingrate
        axis = [t.min(), t.max(), -maxamp, maxamp]
        
        for p in self.p:
            if p: p.remove()
        for p in self.spk:
            p.remove()
        
        for i in xrange(nchannels):
            ax = self.ax[i]
            self.p[i], = ax.plot(t, data[i::nchannels], 'k-',
                                 scalex=False, scaley=False)
            ax.axis(axis)
            
        f = self.validationdata
        f.seek(offset*4)
        data = np.frombuffer(f.read(4*nsamples), dtype=np.float32)
        data = np.convolve(self.filt, data)[self.filti:-self.filtj]
        ax = self.ax[nchannels]
        self.p[nchannels], = ax.plot(t, data, 'k-',
                                 scalex=False, scaley=False)
        ax.axis([t.min(), t.max(), -self.validationamp, self.validationamp])
            
        return nsamples
Beispiel #18
0
def unpack_attribute(att):
    """Unpack an embedded attribute into a python or numpy object."""
    if att.unsigned:
        log.warning('Unsupported unsigned attribute!')

    # TDS 5.0 now has a dataType attribute that takes precedence
    if att.len == 0:  # Empty
        val = None
    elif att.dataType == stream.STRING:  # Then look for new datatype string
        val = att.sdata
    elif att.dataType:  # Then a non-zero new data type
        val = np.frombuffer(att.data,
                            dtype='>' + _dtypeLookup[att.dataType], count=att.len)
    elif att.type:  # Then non-zero old-data type0
        val = np.frombuffer(att.data,
                            dtype=_attrConverters[att.type], count=att.len)
    elif att.sdata:  # This leaves both 0, try old string
        val = att.sdata
    else:  # Assume new datatype is Char (0)
        val = np.array(att.data, dtype=_dtypeLookup[att.dataType])

    if att.len == 1:
        val = val[0]

    return att.name, val
Beispiel #19
0
def unconvert(values, dtype, compress=None):

    as_is_ext = isinstance(values, ExtType) and values.code == 0

    if as_is_ext:
        values = values.data

    if dtype == np.object_:
        return np.array(values, dtype=object)

    if not as_is_ext:
        values = values.encode('latin1')

    if compress == 'zlib':
        import zlib
        values = zlib.decompress(values)
        return np.frombuffer(values, dtype=dtype)

    elif compress == 'blosc':
        import blosc
        values = blosc.decompress(values)
        return np.frombuffer(values, dtype=dtype)

    # from a string
    return np.fromstring(values, dtype=dtype)
 def decode_measurements(self, metadata, data):
     offset = 0
     ddata = {}
     self.assertEqual(len(metadata), 4)
     for object_name, md in metadata[0]:
         items = {}
         ddata[object_name] = items
         for feature, count in md:
             next_offset = offset + count * 8
             items[feature] = np.frombuffer(
                     data[offset:next_offset], np.float64)
             offset = next_offset
     for object_name, md in metadata[1]:
         if object_name not in ddata:
             items = {}
             ddata[object_name] = items
         else:
             items = ddata[object_name]
         for feature, count in md:
             next_offset = offset + count * 4
             items[feature] = np.frombuffer(
                     data[offset:next_offset], np.float32)
             offset = next_offset
     for object_name, md in metadata[2]:
         if object_name not in ddata:
             items = {}
             ddata[object_name] = items
         else:
             items = ddata[object_name]
         for feature, count in md:
             next_offset = offset + count * 4
             items[feature] = np.frombuffer(
                     data[offset:next_offset], np.int32)
             offset = next_offset
     return ddata
Beispiel #21
0
    def test_decompression(self):

        with open("test/simple_points_uncompressed.bin", mode="rb") as fin:
            points_ground_truth = np.frombuffer(fin.read(), dtype=POINT_DTYPE)

        with open("test/simple.laz", mode="rb") as fin:
            raw_data = fin.read()

        laszip_vlr_data = raw_data[
            OFFSET_TO_LASZIP_VLR_DATA : OFFSET_TO_LASZIP_VLR_DATA + LASZIP_VLR_DATA_SIZE
        ]

        raw_points = raw_data[OFFSET_TO_POINT_DATA + SIZEOF_CHUNK_TABLE_OFFSET :]

        laszip_vlr_data = np.frombuffer(laszip_vlr_data, dtype=np.uint8)
        compressed_points = np.frombuffer(raw_points, dtype=np.uint8)

        decompressor = VLRDecompressor(
            compressed_points, POINT_DTYPE.itemsize, laszip_vlr_data
        )
        points_decompressed = decompressor.decompress_points(POINT_COUNT)

        points_decompressed = np.frombuffer(points_decompressed, dtype=POINT_DTYPE)

        self.assertTrue(
            np.all(
                np.allclose(
                    points_ground_truth[dim_name], points_decompressed[dim_name]
                )
                for dim_name in POINT_DTYPE.names
            )
        )
Beispiel #22
0
 def get_next_batch(self):
     epoch, batchnum = self.curr_epoch, self.curr_batchnum
     self.advance_batch()
     bidx = batchnum - self.batch_range[0]
     input_name = self.get_data_file_name(bidx)
     input_file = open(input_name)
     input_file.seek(0,2)
     size = input_file.tell()
     input_file.seek(0,0)
     bytes_per_image = (self.image_size * self.image_size * 3)
     bytes_per_image_plus_label = (bytes_per_image + 4)
     image_count = (size / bytes_per_image_plus_label)
     if (image_count * bytes_per_image_plus_label) != size:
       sys.stderr.write('Bad file size %s for %s - expected %dx%dx%dx3 + %dx4\n' % (size, input_name, image_count, self.image_size, self.image_size, image_count))
       exit(1)
     #mm = mmap.mmap(input_file.fileno(), size, access=mmap.ACCESS_READ)
     mm = input_file.read(size)
     input_file.close()
     entry = {}
     entry['labels'] = n.frombuffer(mm, dtype=n.float32, count = image_count)
     entry['data'] = n.frombuffer(mm,
       dtype=n.uint8,
       offset = (4 * image_count),
       count = (bytes_per_image * image_count)).reshape((bytes_per_image, image_count))
     return epoch, batchnum, entry
Beispiel #23
0
    def create_vectors(self, verbs):
        """Create vectors with simple frequency."""
        self.logger.info('Creating frequency vectors for %d features with '
                         '%s...', len(self.feats), verbs)

        j_indices = array.array(str('i'))
        indptr = array.array(str('i'))
        indptr.append(0)
        values = array.array(str('i'))

        for verb in verbs:
            verb_ngrams = verb.ngrams()
            for ngram in verb_ngrams:
                try:
                    j_indices.append(self.feats[ngram])
                except KeyError:
                    pass
                else:
                    values.append(verb_ngrams[ngram])
            indptr.append(len(j_indices))

        j_indices = np.frombuffer(j_indices, dtype=np.intc)
        indptr = np.frombuffer(indptr, dtype=np.intc)
        values = np.frombuffer(values, dtype=np.intc)

        dtm = sparse.csr_matrix((values, j_indices, indptr),
                                shape=(len(indptr) - 1, len(self.feats)))

        dtm.sum_duplicates()

        if self.should_normalize:
            normalize(dtm)

        return dtm
def load_data():
    """Loads the Fashion-MNIST dataset.
    # Returns
        Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
    """
    dirname = os.path.join('datasets', 'fashion-mnist')
    base = 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/'
    files = ['train-labels-idx1-ubyte.gz', 'train-images-idx3-ubyte.gz',
             't10k-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz']

    paths = []
    for file in files:
        paths.append(get_file(file, origin=base + file, cache_subdir=dirname))

    with gzip.open(paths[0], 'rb') as lbpath:
        y_train = np.frombuffer(lbpath.read(), np.uint8, offset=8)

    with gzip.open(paths[1], 'rb') as imgpath:
        x_train = np.frombuffer(imgpath.read(), np.uint8,
                                offset=16).reshape(len(y_train), 28, 28)

    with gzip.open(paths[2], 'rb') as lbpath:
        y_test = np.frombuffer(lbpath.read(), np.uint8, offset=8)

    with gzip.open(paths[3], 'rb') as imgpath:
        x_test = np.frombuffer(imgpath.read(), np.uint8,
                               offset=16).reshape(len(y_test), 28, 28)

    return (x_train, y_train), (x_test, y_test)
Beispiel #25
0
    def micro_step(self, adve=True, cond=True):
        """ Defining microphysical step """
        libopts = libcl.lgrngn.opts_t()
        libopts.cond = cond
        libopts.adve = adve
        libopts.coal = libopts.sedi = False

        self.micro.step_sync(libopts, self.state_micro["th_d"], self.state_micro["rv"], self.state_micro["rho_d"])
        self.micro.step_async(libopts)
        
        # absolute number of super-droplets per grid cell
        self.micro.diag_sd_conc()
        self.state_micro["sd"][:] = np.frombuffer(self.micro.outbuf())
        
        # number of particles (per kg of dry air) with r_w < .5 um
        self.micro.diag_wet_rng(0, .5e-6)
        self.micro.diag_wet_mom(0)
        self.state_micro["na"][:] = np.frombuffer(self.micro.outbuf())
        
        # number of particles (per kg of dry air) with r_w > .5 um
        self.micro.diag_wet_rng(.5e-6, 1)
        self.micro.diag_wet_mom(0)
        self.state_micro["nc"][:] = np.frombuffer(self.micro.outbuf())
        
        # cloud water mixing ratio [kg/kg] (same size threshold as above)
        self.micro.diag_wet_mom(3)
        rho_H2O = 1e3
        self.state_micro["rc"][:] = 4./3 * math.pi * rho_H2O * np.frombuffer(self.micro.outbuf())
Beispiel #26
0
    def _term_counts_to_matrix(self, n_doc, i_indices, j_indices, values):
        """Construct COO matrix from indices and values.

        i_indices and j_indices should be constructed with _make_int_array.
        """
        # array("i") corresponds to np.intc, which is also what scipy.sparse
        # wants for indices, so they won't be copied by the coo_matrix ctor.
        # The length check works around a bug in old NumPy versions:
        # http://projects.scipy.org/numpy/ticket/1943
        if len(i_indices) > 0:
            i_indices = np.frombuffer(i_indices, dtype=np.intc)
        if len(j_indices) > 0:
            j_indices = np.frombuffer(j_indices, dtype=np.intc)

        if self.dtype == np.intc and len(values) > 0:
            values = np.frombuffer(values, dtype=np.intc)
        else:
            # In Python 3.2, SciPy 0.10.1, the coo_matrix ctor won't accept an
            # array.array.
            values = np.asarray(values, dtype=self.dtype)

        shape = (n_doc, max(six.itervalues(self.vocabulary_)) + 1)
        spmatrix = sp.coo_matrix((values, (i_indices, j_indices)),
                                 shape=shape, dtype=self.dtype)
        if self.binary:
            spmatrix.data.fill(1)
        return spmatrix
def bits_float(BYTES):
    d0 = np.frombuffer(BYTES[0::3], dtype='u1').astype(float)
    d1 = np.frombuffer(BYTES[1::3], dtype='u1').astype(float)
    d2 = np.frombuffer(BYTES[2::3], dtype='i1').astype(float)
    d0 += 256 * d1
    d0 += 65536 * d2
    return d0
 def get_cbs(self, gene_id, cb_type):
     if cb_type == 'ub':
         return numpy.frombuffer(self.ubs[gene_id])
     elif cb_type == 'lb':
         return numpy.frombuffer(self.lbs[gene_id])
     else: 
         assert False, "Unrecognized confidence bound type '%s'" % cb_type
Beispiel #29
0
    def _get_data(self):
        if self._train:
            data, label = self._train_data, self._train_label
        else:
            data, label = self._test_data, self._test_label

        namespace = 'gluon/dataset/'+self._namespace
        data_file = download(_get_repo_file_url(namespace, data[0]),
                             path=self._root,
                             sha1_hash=data[1])
        label_file = download(_get_repo_file_url(namespace, label[0]),
                              path=self._root,
                              sha1_hash=label[1])

        with gzip.open(label_file, 'rb') as fin:
            struct.unpack(">II", fin.read(8))
            label = np.frombuffer(fin.read(), dtype=np.uint8).astype(np.int32)

        with gzip.open(data_file, 'rb') as fin:
            struct.unpack(">IIII", fin.read(16))
            data = np.frombuffer(fin.read(), dtype=np.uint8)
            data = data.reshape(len(label), 28, 28, 1)

        self._data = nd.array(data, dtype=data.dtype)
        self._label = label
Beispiel #30
0
def decode(obj, chain=None):
    """
    Decoder for deserializing numpy data types.
    """

    try:
        if b'nd' in obj:
            if obj[b'nd'] is True:

                # Check if b'kind' is in obj to enable decoding of data
                # serialized with older versions (#20):
                if b'kind' in obj and obj[b'kind'] == b'V':
                    descr = [tuple(tostr(t) if type(t) is bytes else t for t in d) \
                             for d in obj[b'type']]
                else:
                    descr = obj[b'type']
                return np.frombuffer(obj[b'data'],
                            dtype=np.dtype(descr)).reshape(obj[b'shape'])
            else:
                descr = obj[b'type']
                return np.frombuffer(obj[b'data'],
                            dtype=np.dtype(descr))[0]
        elif b'complex' in obj:
            return complex(tostr(obj[b'data']))
        else:
            return obj if chain is None else chain(obj)
    except KeyError:
        return obj if chain is None else chain(obj)
Beispiel #31
0
 def read_single_record(self):
     record_bytes = self.f.read(self.record_size)
     passage_len = int.from_bytes(record_bytes[:4], 'big')
     passage = np.frombuffer(record_bytes[4:], dtype=self.dtype)
     return passage_len, passage
Beispiel #32
0
 def stream_audio(self, audio_connection, stream, CHUNK):
     data = np.frombuffer(stream.read(CHUNK, exception_on_overflow=False),dtype=np.int16)
     print(stream.get_input_latency())
     return data, pa.paContinue
Beispiel #33
0
    def __call__(self):
        from genomic_data import GenomicData
        import pandas as pd
        import numpy as np

        known = GenomicData(self.known_file, [self.feature])
        y_pred = []
        y_true = []
        names = []
        length = []
        for name, seq, structure, energy in read_rnafold(self.infile):
            names.append(name)
            structure = np.frombuffer(structure, dtype='S1')
            length.append(len(structure))
            y_pred.append((structure != '.').astype('int32'))
            y_true_seq = known.feature(self.feature, name)
            if known.feature(self.feature, name) is None:
                found = np.nonzero(map(lambda x: x.startswith(name), known.names))[0]
                if len(found) == 0:
                    raise ValueError('sequence {} could not be found'.format(name))
                elif len(found) == 1:
                    self.logger.warn('partial sequence name match {} => {}'.format(known.names[found[0]], name))
                    y_true_seq = known.feature(self.feature, known.names[found[0]])
                else:
                    raise ValueError('multiple partial matches found for {}'.format(name))
            y_true.append(y_true_seq)
        """
        y_pred = np.concatenate(y_pred)
        y_true = np.concatenate(y_true)

        scores = {}
        for metric in self.metrics:
            # y_pred is an array of continous scores
            scorer = get_scorer(metric)
            scores[metric] = scorer(y_true, y_pred)
            self.logger.info('metric {} = {}'.format(metric, scores[metric]))
        if self.outfile is not None:
            self.logger.info('save file: {}'.format(self.outfile))
            prepare_output_file(self.outfile)
            fout = h5py.File(self.outfile, 'w')
            fout.create_dataset('y_true', data=y_true)
            fout.create_dataset('y_pred', data=y_pred)
            fout.create_dataset('y_pred_labels', data=y_pred)
            grp = fout.create_group('metrics')
            for metric in self.metrics:
                scorer = get_scorer(metric)
                if get_scorer_type(metric) == 'continuous':
                    try:
                        score = scorer(y_true, y_pred)
                    except ValueError:
                        score = np.nan
                else:
                    score = scorer(y_true, y_pred_labels)
                
                grp.create_dataset(metric, data=scores[metric])
            fout.close()"""
        if True:
            self.logger.info('calculate metrics by sequence')
            records = []
            for i in range(len(names)):
                y_true_ = y_true[i]
                y_pred_ = y_pred[i]
                y_pred_labels_ = y_pred_
                scores = []
                for metric in self.metrics:
                    scorer = get_scorer(metric)
                    if get_scorer_type(metric) == 'continuous':
                        try:
                            score = scorer(y_true_, y_pred_)
                        except ValueError:
                            score = np.nan
                    else:
                        score = scorer(y_true_, y_pred_labels_)
                    scores.append(score)
                records.append([names[i], length[i]] + scores)
            records = pd.DataFrame.from_records(records, columns=['name', 'length'] + self.metrics)
            self.logger.info('save metric by sequence file: ' + self.outfile)
            prepare_output_file(self.outfile)
            records.to_csv(self.outfile, sep='\t', index=False, na_rep='nan')
Beispiel #34
0
    def __call__(self):
        import numpy as np
        import pandas as pd
        import h5py
        from formats import read_rnafold, structure_to_pairs

        self.logger.info('load model: {}'.format(self.model_file))
        model = keras.models.load_model(self.model_file)
        window_size = K.int_shape(model.input)[1]
        self.logger.info('load input data (in %s format): %s'%(self.format, self.infile))
        have_structure = False
        if self.format == 'fasta':
            # list of tuples: (name, seq)
            input_data = list(read_fasta(self.infile))
        elif self.format == 'ct_dir':
            # read all .ct files from the directory
            # list of tuples: (name, seq, pairs)
            input_data = []
            for filename in os.listdir(self.infile):
                title, seq, pairs = read_ct(os.path.join(self.infile, filename))
                title = os.path.splitext(filename)[0]
                input_data.append((title, seq, pairs))
            have_structure = True
        elif self.format == 'ct':
            title, seq, pairs = read_ct(self.infile)
            title = os.path.splitext(os.path.basename(self.infile))[0]
            input_data = [(title, seq, pairs)]
            have_structure = True
        elif self.format == 'rnafold':
            input_data = []
            for name, seq, structure, energy in read_rnafold(self.infile, parse_energy=False):
                pairs = structure_to_pairs(structure)
                input_data.append((name, seq, pairs))
            have_structure = True
        elif self.format == 'genomic_data':
            from genomic_data import GenomicData
            input_data = []
            data = GenomicData(self.infile)
            for name in data.names:
                input_data.append((name,
                    data.feature('sequence', name).tostring(),
                    data.feature('reactivity', name)))
            del data
            have_structure = True

        # combine all structures (base-pairs) into one array in the ct file
        if have_structure:
            structure = []
            for i in range(len(input_data)):
                structure.append(np.asarray(input_data[i][2], dtype='int32'))
            structure = np.concatenate(structure)
        else:
            structure = None

        X = []
        names = []
        # offset default to the center of the window
        if self.offset is None:
            self.offset = (window_size + 1)/2
        offset = self.offset

        # convert sequences to windows
        windows = []
        length = []
        sequence = []
        for item in input_data:
            name = item[0]
            seq = item[1]
            windows += self.sequence_to_windows(seq, window_size, offset)
            names.append(name)
            length.append(len(seq))
            sequence.append(seq)
        # combine all sequences into one dataset
        sequence = np.frombuffer(''.join(sequence), dtype='S1')
        length = np.asarray(length, dtype='int64')

        n_samples = len(windows)
        windows = np.frombuffer(''.join(windows), dtype='S1').reshape((n_samples, window_size))
        X = onehot_encode(windows, self.alphabet)
        # set one-hot coding of padded sequence to [0.25, 0.25, 0.25, 0.25]
        X[X.sum(axis=2) == 0] = 1.0/len(self.alphabet)

        self.logger.info('run the model')
        y_pred = model.predict(X, batch_size=self.batch_size)
        y_pred = np.squeeze(y_pred)
        if self.swap_labels:
            self.logger.info('swap labels')
            y_pred = 1 - y_pred

        # start/end position of each transcript in the y_pred
        end = np.cumsum(length)
        start = end - length
        if len(y_pred.shape) > 1:
            # average the predictions
            self.logger.info('average windows for dense prediction')
            y_pred_dense = []
            for i in range(len(input_data)):
                y_pred_dense.append(self.predict_dense(y_pred[start[i]:end[i]], offset))

            if self.dense_pred_file:
                self.logger.info('save dense predictions: ' + self.dense_pred_file)
                f = h5py.File(self.dense_pred_file, 'w')
                for i in range(len(names)):
                    g = f.create_group(names[i])
                    g.create_dataset('predicted_values_dense', data=y_pred[start[i]:end[i]])
                    g.create_dataset('predicted_values_average', data=y_pred_dense[i])
                    # 0-based start/end position of each transcript in the array (y_pred, sequence, structure)
                    g.create_dataset('sequence', data=sequence[start[i]:end[i]])
                    if structure is not None:
                        g.create_dataset('structure', data=structure[start[i]:end[i]])
                f.close()

            y_pred = np.concatenate(y_pred_dense)
            y_pred_labels = np.round(y_pred).astype('int32')
        else:
            y_pred_labels = np.round(y_pred).astype('int32')

        if self.restraint_file:
            header = ['name', 'position', 'pred', 'base']
            table = pd.DataFrame()
            table['name'] = np.repeat(np.asarray(names, dtype='S'), length)
            # start position of each transcript relative to the y_pred
            start = np.repeat(cum_length - length, length)
            # position (1-based) relative to the transcript
            position = np.arange(1, length.sum() + 1) - start
            table['position'] = position
            table['pred'] = y_pred_labels
            table['base'] = sequence
            table['true'] = structure
            self.logger.info('write restraint file: ' + self.restraint_file)
            prepare_output_file(self.restraint_file)
            table.to_csv(self.restraint_file, sep='\t', index=False)
        if self.metric_file:
            self.logger.info('save metric file: ' + self.metric_file)
            prepare_output_file(self.metric_file)
            f = h5py.File(self.metric_file, 'w')
            from sklearn.metrics import accuracy_score
            f.create_dataset('y_pred', data=y_pred)
            f.create_dataset('y_pred_labels', data=y_pred_labels)
            if have_structure:
                #print structure
                y_true = (structure > 0).astype('int32')
                f.create_dataset('y_true', data=y_true)
                g = f.create_group('metrics')
                for metric in self.metrics:
                    scorer = get_scorer(metric)
                    if get_scorer_type(metric) == 'continous':
                        score = scorer(y_true, y_pred)
                    else:
                        score = scorer(y_true, y_pred_labels)
                    self.logger.info('%s: %f'%(metric, score))
                    g.create_dataset(metric, data=score)
            f.close()
        if self.metric_by_sequence_file:
            self.logger.info('calculate metrics by sequence')
            records = []
            for i in range(len(names)):
                y_true_ = (structure[start[i]:end[i]] > 0).astype('int32')
                y_pred_ = y_pred[start[i]:end[i]]
                y_pred_labels_ = y_pred_labels[start[i]:end[i]]
                scores = []
                for metric in self.metrics:
                    scorer = get_scorer(metric)
                    if get_scorer_type(metric) == 'continuous':
                        try:
                            score = scorer(y_true_, y_pred_)
                        except ValueError:
                            score = np.nan
                    else:
                        score = scorer(y_true_, y_pred_labels_)
                    scores.append(score)
                records.append([names[i], length[i]] + scores)
            records = pd.DataFrame.from_records(records, columns=['name', 'length'] + self.metrics)
            self.logger.info('save metric by sequence file: ' + self.metric_by_sequence_file)
            prepare_output_file(self.metric_by_sequence_file)
            records.to_csv(self.metric_by_sequence_file, sep='\t', index=False, na_rep='nan')
        if self.pred_file:
            self.logger.info('save predictions to file: ' + self.pred_file)
            prepare_output_file(self.pred_file)
            f = h5py.File(self.pred_file, 'w')
            for i in range(len(names)):
                y_true_ = (structure[start[i]:end[i]] > 0).astype('int32')
                g = f.create_group(names[i])
                g.create_dataset('sequence', data=sequence[start[i]:end[i]])
                g.create_dataset('predicted_values', data=y_pred[start[i]:end[i]])
                g.create_dataset('predicted_labels', data=y_pred[start[i]:end[i]])
                g.create_dataset('true_labels', data=y_true_)
            f.close()
Beispiel #35
0
def unpack_bits(data: bytes, repetitions: int) -> np.ndarray:
    """Unpack bits from a byte array into numpy array of bools."""
    byte_arr = np.frombuffer(data, dtype='uint8').reshape((len(data), 1))
    bits = np.unpackbits(byte_arr, axis=1)[:, ::-1].reshape(-1).astype(bool)
    return bits[:repetitions]
def _read32(bytestream):
    dt = numpy.dtype(numpy.uint32).newbyteorder('>')
    return numpy.frombuffer(bytestream.read(4), dtype=dt)[0]
    def run(self, consumer, msg_count, msg_array, metadata_array):
        try:
            while True:
                msg = consumer.poll(0.5)
                if msg == None:
                    continue
                elif msg.error() == None:

                    # convert image bytes data to numpy array of dtype uint8
                    nparr = np.frombuffer(msg.value(), np.uint8)

                    # decode image
                    img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
                    img = cv2.resize(img, (224, 224))
                    msg_array.append(img)

                    # get metadata
                    frame_no = msg.timestamp()[1]
                    video_name = msg.headers()[0][1].decode("utf-8")

                    metadata_array.append((frame_no, video_name))

                    # bulk process
                    msg_count += 1
                    if msg_count % self.batch_size == 0:
                        # predict on batch
                        img_array = np.asarray(msg_array)
                        img_array = preprocess_input(img_array)
                        predictions = self.model.predict(img_array)
                        labels = decode_predictions(predictions)

                        self.videos_map = reset_map(self.videos_map)
                        for metadata, label in zip(metadata_array, labels):
                            top_label = label[0][1]
                            confidence = label[0][2]
                            confidence = confidence.item()
                            frame_no, video_name = metadata
                            doc = {
                                "frame": frame_no,
                                "label": top_label,
                                "confidence": confidence
                            }
                            self.videos_map[video_name].append(doc)

                        # insert bulk results into mongodb
                        insert_data_unique(self.db, self.videos_map)

                        # commit synchronously
                        consumer.commit(asynchronous=False)
                        # reset the parameters
                        msg_count = 0
                        metadata_array = []
                        msg_array = []

                elif msg.error().code() == KafkaError._PARTITION_EOF:
                    print('End of partition reached {0}/{1}'.format(
                        msg.topic(), msg.partition()))
                else:
                    print('Error occured: {0}'.format(msg.error().str()))

        except KeyboardInterrupt:
            print("Detected Keyboard Interrupt. Quitting...")
            pass

        finally:
            consumer.close()
        print(" {}: with idx {}\n  id of local_nparray_in_process is {} in PID {}"\
            .format(worker_fn.__name__, idx, id(main_nparray), os.getpid()))
    # we can do any work on the array, here we set every item in this row to
    # have the value of the process id for this process
    main_nparray[idx, :] = np.random.random(main_nparray.shape[1])


if __name__ == '__main__':
    DEFAULT_VALUE = 42
    NBR_OF_PROCESSES = 4

    # create a block of bytes, reshape into a local numpy array
    NBR_ITEMS_IN_ARRAY = SIZE_A * SIZE_B
    shared_array_base = multiprocessing.Array(
        ctypes.c_double, NBR_ITEMS_IN_ARRAY, lock=False)
    main_nparray = np.frombuffer(shared_array_base, dtype=ctypes.c_double)
    main_nparray = main_nparray.reshape(SIZE_A, SIZE_B)
    # Assert no copy was made
    assert main_nparray.base.base is shared_array_base
    print("Created shared array with {:,} nbytes".format(main_nparray.nbytes))
    print("Shared array id is {} in PID {}".format(id(main_nparray), os.getpid()))
    print("Starting with an array of 0 values:")
    print(main_nparray)
    print()

    # Modify the data via our local numpy array
    main_nparray.fill(DEFAULT_VALUE)
    print("Original array filled with value {}:".format(DEFAULT_VALUE))
    print(main_nparray)

    input("Press a key to start workers using multiprocessing...")
Beispiel #39
0
                                                      fallback=math.inf)

    json_file = compose_path(config['OCR']['OCR_JSON'], config_path)
    ocr_engine = PytorchEngineLineOCR(json_file, gpu_id=0)

    env = lmdb.open(lmdb_db)
    txn = env.begin()

    f = open(input_file_path, "r")
    lines = f.readlines()

    batch_img = []
    batch_txt = []
    f = open(output_file_path, "w", encoding='utf-8')
    for e, line in enumerate(lines):
        if e % batch_size == 0:
            batch_img = []
            batch_txt = []
        image_name = line.split(' ')[0]
        batch_txt.append(image_name)
        data = txn.get(image_name.encode())
        img = cv2.imdecode(np.frombuffer(data, dtype=np.uint8), 1)
        batch_img.append(img)
        if e != 0 and (e + 1) % batch_size == 0:
            process_batch(f, batch_img, batch_txt, ocr_engine, decoder)

    if batch_size != len(batch_img) and len(batch_img) != 0:
        process_batch(f, batch_img, batch_txt, ocr_engine, decoder)

    f.close()
Beispiel #40
0
def canvas2rgb_array(canvas):
    canvas.draw()
    buf = canvas.tostring_rgb()
    ncols, nrows = canvas.get_width_height()
    return np.frombuffer(buf, dtype=np.uint8).reshape(nrows, ncols, 3)
Beispiel #41
0
while True:
    # Ingest data
    data = sys.stdin.buffer.read(INGEST_SIZE * 2)
    if not data:
        break
    data = remaining_data + data

    tmbase = time.time()

    # Save odd byte
    if len(data) % 2 == 1:
        print("Odd byte, that's odd", file=sys.stderr)
        remaining_data = data[-1:]
        data = data[:-1]

    # Convert to complex numbers
    iqdata = numpy.frombuffer(data, dtype=numpy.uint8)
    iqdata = iqdata - 127.5
    iqdata = iqdata / 128.0
    iqdata = iqdata.view(complex)

    # Forward I/Q samples to all channels
    for k, d in demodulators.items():
        d.ingest(iqdata)

for k, d in demodulators.items():
    d.close_queue()

for k, d in demodulators.items():
    d.drain_queue()
Beispiel #42
0
    def _count_vocab(self, raw_documents, fixed_vocab):
        """Create sparse feature matrix, and vocabulary where fixed_vocab=False
        """
        if fixed_vocab:
            vocabulary = self.vocabulary_
        else:
            # Add a new value when a new vocabulary item is seen
            vocabulary = defaultdict()
            vocabulary.default_factory = vocabulary.__len__

        analyze = self.build_analyzer()
        j_indices = []
        indptr = []

        values = make_int_array()
        indptr.append(0)
        for doc in raw_documents:
            feature_counter = {}
            for feature in analyze(doc):
                try:
                    feature_idx = vocabulary[feature]
                    if feature_idx not in feature_counter:
                        feature_counter[feature_idx] = 1
                    else:
                        feature_counter[feature_idx] += 1
                except KeyError:
                    # Ignore out-of-vocabulary items for fixed_vocab=True
                    continue

            j_indices.extend(feature_counter.keys())
            values.extend(feature_counter.values())
            indptr.append(len(j_indices))

        if not fixed_vocab:
            # disable defaultdict behaviour
            vocabulary = dict(vocabulary)
            if not vocabulary:
                raise ValueError("empty vocabulary; perhaps the documents only"
                                 " contain stop words")

        if indptr[-1] > 2147483648:  # = 2**31 - 1
            if sp_version >= (0, 14):
                indices_dtype = np.int64
            else:
                raise ValueError(('sparse CSR array has {} non-zero '
                                  'elements and requires 64 bit indexing, '
                                  ' which is unsupported with scipy {}. '
                                  'Please upgrade to scipy >=0.14').format(
                                      indptr[-1], '.'.join(sp_version)))

        else:
            indices_dtype = np.int32
        j_indices = np.asarray(j_indices, dtype=indices_dtype)
        indptr = np.asarray(indptr, dtype=indices_dtype)
        values = np.frombuffer(values, dtype=np.intc)

        X = sp.csr_matrix((values, j_indices, indptr),
                          shape=(len(indptr) - 1, len(vocabulary)),
                          dtype=self.dtype)
        X.sort_indices()
        return vocabulary, X
Beispiel #43
0
z = np.indices((5, 3))
# wielowymiarowe macierze możemy również generować funkcją mgrid
x, y = np.mgrid[0:5, 0:5]

# podobnie jak w MATLAB-ie możemy tworzyć macierze diagonalne
mat_diag = np.diag([a for a in range(5)])
# w powyższym przykładzie stworzony wektor wartości zostanie umieszczony na głównej przekątnej macierzy
# możemy podać drugi parametr funkcji diag, który określa indeks przekątnej względem głównej przekątnej,
# która zostanie wypełniona wartościami podanego wektora
mat_diag_k = np.diag([a for a in range(5)], -2)

# Numpy jest w stanie stworzyć tablicę jednowymiarową z dodolnego obiektu iterowalnego (iterable)
z = np.fromiter(range(5), dtype='int32')
# ciekawą funkcją Numpy jest funkcja frombuffer, dzięki której możemy stworzyć np. tablicę znaków
marcin = b'Marcin'
mar = np.frombuffer(marcin, dtype='S1')
mar_2 = np.frombuffer(marcin, dtype='S2')
# powyższa funkcja ma jednak pewną wadę dla Pythona 3.x, która powoduje, że trzeba jawnie określać
# iż ciąg znaków przekazujemy jako ciąg bajtów co osiągamy poprzez podanie litery 'b' przed wartością
# zmiennej tekstowej. Można podobny efekt osiągnąć inaczej, sprawdź poniższe przykłady
marcin = 'Marcin'
mar_3 = np.array(list(marcin))
mar_3 = np.array(list(marcin), dtype='S1')
mar_3 = np.array(list(b'Marcin'))
mar_3 = np.fromiter(marcin, dtype='S1')
mar_3 = np.fromiter(marcin, dtype='U1')
# tablice numpy możemy w prosty sposób do siebie dodawać
mat = np.ones((2, 2))
mat2 = np.ones((2, 2))
mat = mat + mat2
print(mat)
 def to_array(line):
     x = np.frombuffer(codecs.decode(line.strip(), 'hex_codec'),
                       dtype='<f8')
     return x.reshape((len(x) // 3, 3))
Beispiel #45
0
 def extract_data(filename, num_data, head_size, data_size):
     with gzip.open(filename) as bytestream:
         bytestream.read(head_size)
         buf = bytestream.read(data_size * num_data)
         data = np.frombuffer(buf, dtype=np.uint8).astype(np.float)
     return data
def load_mnist_labels(filename):

    with gzip.open(filename, 'rb') as f:
        data = np.frombuffer(f.read(), np.uint8, offset=8)

    return data
Beispiel #47
0
def np_frombuffer_allocated_dtype(shape):
    arr = np.ones(shape, dtype=np.int32)
    return np.frombuffer(arr, dtype=np.complex64)
Beispiel #48
0
    def _convert(self, pid, bin_data, mode='print'):
        r"""
        Parses the packet
        Args:
            pid (int): Packet ID
            bin_data: Binary data
            mode: logging mode {'print', None}

        Returns:
            data (

        """
        data = None
        if pid == 13:
            data = np.frombuffer(bin_data, dtype=self.dt_int16)
            data[0:3] *= 0.061  # Unit [mg/LSB]
            data[3:6] *= 8.750  # Unit [mdps/LSB]
            data[6:] *= 1.52  # Unit [mgauss/LSB]
            if mode == 'print':
                print("Accelerometer: ", data)

        elif pid == 19:
            temperature = bin_data[0]
            light = (1000 / 4095) * np.frombuffer(bin_data[1:3], dtype=self.dt_uint16)  # Unit Lux
            battery = (16.8 / 6.8) * (1.8 / 2457) * np.frombuffer(bin_data[3:5], dtype=self.dt_uint16)  # Unit Volt
            if mode == 'print':
                print("Temperature: ", temperature, ", Light: ", light, ", Battery: ", battery)
            data = (temperature, light, battery)

        elif pid == 27:
            pass  # TODO: make sure the timestamp packet doesn't give an error

        elif pid == 144:  # 4 channel device
            data = self._bit24ToInt(bin_data)
            nChan = 5
            vref = 2.4
            nPacket = 33
            data = data.reshape((nPacket, nChan)).astype(np.float).T
            data[1:, :] = data[1:, :] * vref / ((2 ** 23) - 1) * 6. / 32.
            if mode == 'print':
                print("EEG data: ", data[1:, 32])

        elif pid == 146:  # 8 channel device + status (ADS1298)
            data = self._bit24ToInt(bin_data)
            nChan = 9
            vref = 2.4
            nPacket = -1
            data = data.reshape((nPacket, nChan)).astype(np.float).T
            data[0:, :] = data[0:, :] * vref / ((2 ** 23) - 1) * 6. / 32.
            if mode == 'print':
                print("EEG data: ", data[0:, -1])

        elif pid == 30:  # 8 channel device + status (ADS1299)
            data = self._bit24ToInt(bin_data)
            nChan = 9
            vref = 4.5
            nPacket = -1
            data = data.reshape((nPacket, nChan)).astype(np.float).T
            data[1:, :] = data[1:, :] * vref / ((2 ** 23) - 1) * 6. / 32.
            if mode == 'print':
                print("EEG data: ", data[0:, -1])

        elif pid == 62:  # 8 channel device (ADS1298)
            data = self._bit24ToInt(bin_data)
            nChan = 8
            vref = 4.5
            nPacket = -1
            data = data.reshape((nPacket, nChan)).astype(np.float).T
            data[0:, :] = data[0:, :] * vref / ((2 ** 23) - 1) * 6. / 32.
            if mode == 'print':
                print("EEG data: ", data[0:, -1])

        return data
Beispiel #49
0
def np_frombuffer_dtype(b):
    return np.frombuffer(b, dtype=np.complex64)
 def _read_radolan_composite(self, loaddata=True):
     """Read quantitative radar composite format of the German Weather Service
 
     The quantitative composite format of the DWD (German Weather Service) was
     established in the course of the
     RADOLAN project and includes several file
     types, e.g. RX, RO, RK, RZ, RP, RT, RC, RI, RG, PC, PG and many, many more.
     (see format description on the RADOLAN project homepage :cite:`DWD2009`).
     At the moment, the national RADOLAN composite is a 900 x 900 grid with 1 km
     resolution and in polar-stereographic projection. There are other grid
     resolutions for different composites (eg. PC, PG)
 
     Warning
     -------
     This function already evaluates and applies the so-called
     PR factor which is specified in the header section of the RADOLAN files.
     The raw values in an RY file are in the unit 0.01 mm/5min, while
     read_radolan_composite returns values in mm/5min (i. e. factor 100 higher).
     The factor is also returned as part of attrs dictionary under
     keyword "precision".
 
     Parameters
     ----------
     f : string or file handle
         path to the composite file or file handle
     missing : int
         value assigned to no-data cells
     loaddata : bool
         True | False, If False function returns (None, attrs)
 
     Returns
     -------
     output : tuple
         tuple of two items (data, attrs):
             - data : :func:`numpy:numpy.array` of shape (number of rows,
               number of columns)
             - attrs : dictionary of metadata information from the file header
 
     Examples
     --------
     See :ref:`/notebooks/radolan/radolan_format.ipynb`.
     """
     
     mask = 0xFFF  # max value integer
 
     # If a file name is supplied, get a file handle
     try:
         self._header = self._read_radolan_header()
     except AttributeError:
         f = get_radolan_filehandle(f)
         self._header = read_radolan_header(f)
     
     attrs = self._parse_dwd_composite_header()
 
     if not loaddata:
         self._fobj.close()
         self._meta = attrs
         return None, attrs
     
     attrs["nodataflag"] = self._missing
     
     if not attrs["radarid"] == "10000":
         warnings.warn("WARNING: You are using function e" +
                       "wradlib.io.read_RADOLAN_composit for a non " +
                       "composite file.\n " +
                       "This might work...but please check the validity " +
                       "of the results")
     
     
     all_pixels = attrs['nrow'] * attrs['ncol']
     
     # read the actual data
     indat = self._read_radolan_binary_array(attrs['datasize'])
     
     if attrs['producttype'] in ('RX', 'EX', 'WX'):
         # convert to 8bit integer
         arr = np.frombuffer(indat, np.uint8).astype(np.uint8)
         # numpy.where(condition[, x, y])
         # Return elements, either from x or y, depending on condition.
         #nodata = np.where(arr == 250, self._missing, arr)
         nodata   = np.where(arr == 250)[0]
         clu_mask = np.where(arr == 249)[0]
         attrs['cluttermask'] = clu_mask
         
         # apply precision factor
         # this promotes arr to float if precision is float
         arr = arr * attrs['precision']
     
     elif attrs['producttype'] in ('PG', 'PC'):
         arr = decode_radolan_runlength_array(indat, attrs)
     else:
         # convert to 16-bit integers
         arr = np.frombuffer(indat, np.uint16).astype(np.uint16)    # uint16: Unsigned integer (0 to 65535)
         # evaluate bits 13, 14, 15 and 16
         # where: The numpy.where function takes a condition as an argument and
         # returns >>the indices<< where that condition is true.
         secondary = np.where(arr & 0x1000)[0]
         attrs['secondary'] = secondary
         nodata   = np.where(arr & 0x2000)[0]
         negative = np.where(arr & 0x4000)[0]
         clu_mask = np.where(arr & 0x8000)[0]
         
         # Gleiches Vorgehen für Stationsflag- und Clutter-Feld:
         
         station_flag_field = np.zeros(all_pixels, int)    # empty station flag field
         station_flag_field[secondary] = 1
         
         # mask out the last 4 bits
         arr &= mask
         # consider negative flag if product is RD (differences from adjustment)
         if attrs['producttype'] == 'RD':
             # NOT TESTED, YET
             arr[negative] = -arr[negative]
         
         # apply precision factor
         # this promotes arr to float if precision is float
         arr = arr * attrs['precision']
         
         self._field_station_flag = station_flag_field.reshape((attrs['nrow'], attrs['ncol']))
     # else
     
     # set nodata value
     #arr[nodata] = self._missing
     arr[nodata] = np.nan    # besser für mean-Berechnung
     
     attrs['cluttermask'] = clu_mask
     #print(clu_mask)    # Struktur (Beispiel, Indizes): [  5877   5878   6778 ... 809824 809825 809828]
     #clutter = np.zeros(all_pixels, bool)    # erstmal 'flattened array'; bool: zeros = False
     clutter = np.zeros(all_pixels, int)    # empty clutter field
     #print(clutter)
     clutter[clu_mask] = 1    # True
     self._field_clutter = clutter.reshape((attrs['nrow'], attrs['ncol']))
     
     # Exclude zeros. Only possible if values are of float type:
     if self._zeroes_to_nan:
         arr[arr==0] = np.nan
     
     # anyway, bring it into right shape
     arr = arr.reshape((attrs['nrow'], attrs['ncol']))
     
     return arr, attrs
Beispiel #51
0
    def Execute(self, request, context):
        """Execute is called on TRITONBACKEND_ModelInstanceExecute. Inference
        happens in this function. This function mainly converts gRPC
        protobufs to the triton_python_backend_utils.InferenceRequest and
        triton_python_backend_utils.InferenceResponse.
        Parameters
        ----------
        request : python_host_pb2.ExecuteRequest
            Contains a `requests` attribute which is a list of python_host_pb2.InferenceRequest
        """

        requests = request.requests
        inference_requests = []
        for request in requests:
            # This object contains a list of tpb_utils.Tensor
            input_tensors = []
            for request_input in request.inputs:
                x = request_input
                numpy_type = tpb_utils.triton_to_numpy_type(x.dtype)

                # We need to deserialize TYPE_STRING
                if numpy_type == np.object_ or numpy_type == np.bytes_:
                    numpy_data = deserialize_bytes_tensor(x.raw_data)
                    tensor = tpb_utils.Tensor(x.name,
                                              numpy_data.reshape(x.dims))
                    input_tensors.append(tensor)
                else:
                    tensor = tpb_utils.Tensor(
                        x.name,
                        np.frombuffer(x.raw_data,
                                      dtype=numpy_type).reshape(x.dims))
                    input_tensors.append(tensor)

            request_id = request.id
            correlation_id = request.correlation_id
            requested_output_names = request.requested_output_names
            inference_request = tpb_utils.InferenceRequest(
                input_tensors, request_id, correlation_id,
                requested_output_names)
            inference_requests.append(inference_request)

        # Execute inference on the Python backend responses contains a list of
        # triton_python_backend_utils.InferenceResponse. Each backend must
        # implement an execute method
        if not hasattr(self.backend, 'execute'):
            context.set_code(grpc.StatusCode.INTERNAL)
            context.set_details('Backend does not implement `execute` method')
            return ExecuteResponse()

        try:
            responses = self.backend.execute(inference_requests)
        except Exception as e:
            context.set_code(grpc.StatusCode.INTERNAL)
            tb = traceback.format_exc()
            context.set_details(tb)

        # Make sure that number of InferenceResponse and InferenceRequest
        # objects match
        if len(inference_requests) != len(responses):
            context.set_code(grpc.StatusCode.INTERNAL)
            context.set_details(
                'Number of inference responses and requests don\'t match ( requests='
                + len(inference_requests) + ' != responses=' + len(responses) +
                ')')
            return ExecuteResponse()

        exec_responses = []
        for response in responses:
            # If there is an error do not look into output_tensors
            if response.has_error():
                error = Error(message=response.error().message())
                inference_response = InferenceResponse(outputs=[],
                                                       error=error,
                                                       failed=True)
                exec_responses.append(inference_response)
                continue

            output_tensors = response.output_tensors()
            response_tensors = []

            for output_tensor in output_tensors:
                output_np_array = output_tensor.as_numpy()
                output_shape = output_np_array.shape

                # We need to serialize TYPE_STRING
                if output_np_array.dtype.type is np.object or output_np_array.dtype.type is np.bytes_:
                    output_np_array = serialize_byte_tensor(output_np_array)

                tensor = Tensor(name=output_tensor.name(),
                                dtype=tpb_utils.numpy_to_triton_type(
                                    output_np_array.dtype.type),
                                dims=output_shape,
                                raw_data=output_np_array.tobytes())

                response_tensors.append(tensor)
            exec_responses.append(InferenceResponse(outputs=response_tensors))
        execute_response = ExecuteResponse(responses=exec_responses)

        return execute_response
Beispiel #52
0
def np_frombuffer_allocated(shape):
    """
    np.frombuffer() on a Numba-allocated buffer.
    """
    arr = np.ones(shape, dtype=np.int32)
    return np.frombuffer(arr)
Beispiel #53
0
 def audiodata_to_array(self, data):
     """
     Re-implemented from RecorderParent
     """
     return np.frombuffer(data, dtype=np.int16).reshape(
         (self.chunk_size, self.channels)) / 2**15
Beispiel #54
0
def np_frombuffer(b):
    """
    np.frombuffer() on a Python-allocated buffer.
    """
    return np.frombuffer(b)
Beispiel #55
0
 def _recv_ndarray(self):
     request_id, response = self._recv()
     arr_info, arr_val = jsonapi.loads(response[1]), response[2]
     X = np.frombuffer(_buffer(arr_val), dtype=str(arr_info['dtype']))
     return Response(request_id,
                     self.formatter(X.reshape(arr_info['shape'])))
Beispiel #56
0
    def __init__(self, data_path, image_list, name, use_cache=0, image_size=None,
                 image_format="NHWC", pre_process=None, count=None):
        super(Imagenet, self).__init__()
        if image_size is None:
            self.image_size = [224, 224, 3]
        else:
            self.image_size = image_size
        self.image_list = []
        self.label_list = []
        self.count = count
        self.use_cache = use_cache
        self.cache_dir = os.path.join("/tmp", "preprocessed", name, image_format)
        self.data_path = data_path
        self.pre_process = pre_process
        # input images are in HWC
        self.need_transpose = True if image_format == "NCHW" else False
        not_found = 0
        if image_list is None:
            # by default look for val_map.txt
            image_list = os.path.join(data_path, "val_map.txt")

        os.makedirs(self.cache_dir, exist_ok=True)

        start = time.time()
        with open(image_list, 'r') as f:
            for s in f:
                image_name, label = re.split(r"\s+", s.strip())
                src = os.path.join(data_path, image_name)
                dst = os.path.join(self.cache_dir, os.path.basename(image_name))
                if not os.path.exists(src):
                    # if the image does not exists ignore it
                    not_found += 1
                    continue
                if not os.path.exists(dst):
                    # cache a preprocessed version of the image
                    # TODO: make this multi threaded ?
                    with Image.open(src) as img_org:
                        img = self.pre_process(img_org, need_transpose=self.need_transpose, dims=self.image_size)
                        with open(dst, "wb") as fimg:
                            img.tofile(fimg)

                if self.use_cache:
                    # if we use cache, preload the image
                    with open(dst, "rb") as fimg:
                        img = fimg.read()
                        img = np.frombuffer(img, dtype=np.float32)
                        img = img.reshape(self.image_size)
                        if self.need_transpose:
                            img = img.reshape(self.image_size[2], self.image_size[0], self.image_size[1])
                        else:
                            img = img.reshape(self.image_size)
                        self.image_list.append(img)
                else:
                    # else use the image path and load at inference time
                    self.image_list.append(dst)

                self.label_list.append(int(label))

                # limit the dataset if requested
                if self.count and len(self.image_list) > self.count:
                    break

        time_taken = time.time() - start
        if not self.image_list:
            log.error("no images in image list found")
            raise ValueError("no images in image list found")
        if not_found > 0:
            log.info("reduced image list, %d images not found", not_found)

        log.info("loaded {} images, cache={}, took={:.1f}sec".format(
            len(self.image_list), use_cache, time_taken))

        self.label_list = np.array(self.label_list)
        if use_cache:
            self.image_list = np.array(self.image_list)
Beispiel #57
0
    def _read_channel_description(self, channel_description):
        '''
        reads channel description (24 bytes) and returns a dict with keys:
            auth            (1 byte)    [bool]  False for off; True for on
            transformation  (1 byte)    [int]   0 = no transformation;
                                                1 = Canadian compression applied before signature
                                                2 = Canadian compression applied after signature
                                                3 = Steim compression applied before signature
                                                4 = Steim compression applied after signature
            sensor_type     (1 byte)    [int]   0 = seismic
                                                1 = hydroacoustic
                                                2 = infrasound
                                                3 = weather
                                                >3 = other
            option_flag     (1_byte)    [int]   0 = unused
                                                1 = calib and calper provided in bytes 17-24
            site_name       (5 bytes)   [str]   site name, left justified, padded with ASCII null bytes as required
            channel_name    (3 bytes)   [str]   channel name, left justified, padded with ASCII null bytes as required
            location_name   (2 bytes)   [str]   location name, left justified, padded with ASCII null bytes as required
            data_format     (2 bytes)   [str]   uncompressed data format (CSS 3.0 data type), set before signature frame is signed
            calib           (4 bytes)   [float] CSS 3.0 calibration factor when byte 4 is 1, IEEE float
            calper          (4 bytes)   [float] CSS 3.0 calibration period when byte 4 is 1, IEEE float
        '''

        #        print(channel_description,'\n')

        dt = np.dtype(np.single)
        dt = dt.newbyteorder('B')

        cd_pos = 0
        cd = {}
        cd['auth'] = bool(
            int.from_bytes(channel_description[cd_pos:cd_pos + 1],
                           byteorder='big'))
        #        print('auth',cd_pos,cd_pos+1,channel_description[cd_pos:cd_pos+1],cd['auth'])
        cd_pos += 1
        cd['transformation'] = int.from_bytes(
            channel_description[cd_pos:cd_pos + 1], byteorder='big')
        #        print('transformation',cd_pos,cd_pos+1,channel_description[cd_pos:cd_pos+1],cd['transformation'])
        cd_pos += 1
        cd['sensor_type'] = int.from_bytes(channel_description[cd_pos:cd_pos +
                                                               1],
                                           byteorder='big')
        #        print('sensor_type',cd_pos,cd_pos+1,channel_description[cd_pos:cd_pos+1],cd['sensor_type'])
        cd_pos += 1
        cd['option_flag'] = int.from_bytes(channel_description[cd_pos:cd_pos +
                                                               1],
                                           byteorder='big')
        #        print('option_flag',cd_pos,cd_pos+1,channel_description[cd_pos:cd_pos+1],cd['option_flag'])
        cd_pos += 1
        cd['site_name'] = channel_description[cd_pos:cd_pos +
                                              5].strip(b'\x00').decode()
        #        print('site_name',cd_pos,cd_pos+5,channel_description[cd_pos:cd_pos+5],cd['site_name'])
        cd_pos += 5
        cd['channel_name'] = channel_description[cd_pos:cd_pos +
                                                 3].strip(b'\x00').decode()
        #        print('channel_name',cd_pos,cd_pos+3,channel_description[cd_pos:cd_pos+3],cd['channel_name'])
        cd_pos += 3
        cd['location_name'] = channel_description[cd_pos:cd_pos +
                                                  2].strip(b'\x00').decode()
        #        print('location_name',cd_pos,cd_pos+2,channel_description[cd_pos:cd_pos+2],cd['location_name'])
        cd_pos += 2
        cd['data_format'] = channel_description[cd_pos:cd_pos +
                                                2].strip(b'\x00').decode()
        #        print('data_format',cd_pos,cd_pos+2,channel_description[cd_pos:cd_pos+2],cd['data_format'])
        cd_pos += 2
        if cd['option_flag'] == 1:
            cd['calib'] = np.frombuffer(
                channel_description[cd_pos:cd_pos + self._sizes['IEEE float']],
                dtype=dt).item()
            #            print('calib',cd_pos,cd_pos+self._sizes['IEEE float'],channel_description[cd_pos:cd_pos+self._sizes['IEEE float']],cd['calib'])
            cd_pos += self._sizes['IEEE float']
            cd['calper'] = np.frombuffer(
                channel_description[cd_pos:cd_pos + self._sizes['IEEE float']],
                dtype=dt).item()
            #            print('calper',cd_pos,cd_pos+self._sizes['IEEE float'],channel_description[cd_pos:cd_pos+self._sizes['IEEE float']],cd['calper'])
            cd_pos += self._sizes['IEEE float']
        else:
            cd['calib'] = 1.0
            cd['calper'] = 1.0
            cd_pos += 2 * self._sizes['IEEE float']

#        print('\n')

        return cd
def unpack(file, legacy=False):
    """
    Unpacks PulsOn 440 radar data from input file
    """
    with open(file, 'rb') as f:

        # Read configuration part of data
        config = read_config_data(f, legacy)

        # Compute range bins in datas
        scan_start_time = float(config['scan_start'])
        start_range = SPEED_OF_LIGHT * (
            (scan_start_time * 1e-12) - DT_0 * 1e-9) / 2

        # Read data
        data = dict()
        data = {
            'scan_data': [],
            'time_stamp': [],
            'packet_ind': [],
            'packet_pulse_ind': [],
            'range_bins': [],
            'config': config
        }
        single_scan_data = []
        packet_count = 0
        pulse_count = 0

        while True:

            # Read a single data packet and break loop if not a complete packet
            # (in terms of size)
            packet = f.read(1452)
            if len(packet) < 1452:
                break

            # Get information from first packet about how scans are stored and
            # range bins collected
            if packet_count == 0:
                num_range_bins = np.frombuffer(packet[44:48], dtype='>u4')[0]
                num_packets_per_scan = np.frombuffer(packet[50:52],
                                                     dtype='>u2')[0]
                drange_bins = SPEED_OF_LIGHT * T_BIN * 1e-9 / 2
                range_bins = start_range + drange_bins * np.arange(
                    0, num_range_bins, 1)
            packet_count += 1

            # Number of samples in current packet and packet index
            num_samples = np.frombuffer(packet[42:44], dtype='>u2')[0]
            data['packet_ind'].append(
                np.frombuffer(packet[48:50], dtype='>u2')[0])

            # Extract radar data samples from current packet; process last
            # packet within a scan seperately to get all data
            packet_data = np.frombuffer(packet[52:(52 + 4 * num_samples)],
                                        dtype='>i4')
            single_scan_data.append(packet_data)

            if packet_count % num_packets_per_scan == 0:
                data['scan_data'].append(np.concatenate(single_scan_data))
                data['time_stamp'].append(
                    np.frombuffer(packet[8:12], dtype='>u4')[0])
                single_scan_data = []
                pulse_count += 1

        # Add last partial scan if present
        if single_scan_data:
            single_scan_data = np.concatenate(single_scan_data)
            num_pad = data['scan_data'][0].size - single_scan_data.size
            single_scan_data = np.pad(single_scan_data, (0, num_pad),
                                      'constant',
                                      constant_values=0)
            data['scan_data'].append(single_scan_data)

        # Stack scan data into 2-D array
        # (rows -> pulses, columns -> range bins)
        data['scan_data'] = np.stack(data['scan_data'])

        # Finalize entries in data
        data['time_stamp'] = np.asarray(data['time_stamp'])
        data['range_bins'] = range_bins

        with open('../Raw_Data/data.pkl', 'wb') as o:
            pickle.dump(data, o)
        return data
Beispiel #59
0
def main():

    MAXCORUN = int(args.maxCoRun)  # max jobs per gpu
    RANDSEED = int(args.seed)
    gpuNum = int(args.gpuNum)

    logger.debug(
        "GPUs(-g)={}\tMaxCoRun(-c)={}\trandseed(-s)={}\tsaveFile(-f)={}".
        format(gpuNum, MAXCORUN, RANDSEED, args.ofile))

    #----------------------------------------------------------------------
    # 1) application status table : 5 columns
    #----------------------------------------------------------------------
    #
    #    jobid      gpu     status      starT       endT
    #       0       0           1       1           2
    #       1       1           1       1.3         2.4
    #       2       0           0       -           -
    #       ...
    #----------------------------------------------------------------------
    maxJobs = 10000
    rows, cols = maxJobs, 5  # note: init with a large prefixed table
    d_arr = mp.Array(ctypes.c_double, rows * cols)
    arr = np.frombuffer(d_arr.get_obj())
    AppStat = arr.reshape((rows, cols))

    id2name = {}

    #----------------------------------------------------------------------
    # 2) gpu node status: 1 columns
    #----------------------------------------------------------------------
    #
    #    GPU_Node(rows)     ActiveJobs
    #       0               0
    #       1               0
    #       2               0
    #       ...
    #----------------------------------------------------------------------
    #GpuStat = manager.dict()
    #for i in xrange(gpuNum):
    #    GpuStat[i] = 0
    gpuStat = [0 for i in xrange(gpuNum)]

    #--------------------------------------------------------------------------
    # input: app, app2dir_dd in app_info.py
    #--------------------------------------------------------------------------
    if len(app) <> len(app2dir_dd):
        print "Error: app number wrong, check ../prepare/app_info.py!"
        sys.exit(1)

    #
    # randomize the input sequences
    #
    app_s1 = genRandSeq(app, seed=RANDSEED)

    apps_num = len(app)
    logger.debug("Total GPU Workloads = {}.".format(apps_num))

    #--------------------------------------------------------------------------
    # 3) model for predicting the best gpu to use
    #--------------------------------------------------------------------------
    logger.debug("Loading a Neural Net model to predict best GPU to use.")

    # metrics
    df_app_metrics = pd.read_csv('../07_nn/appmetrics_with_appname.csv'
                                 )  #  [NOTE: change the metrics if needed !!!]
    df_app_metrics = df_app_metrics.drop(df_app_metrics.columns[0],
                                         axis=1)  # drop the 1st column

    # delete rodinia_heartwall
    df_app_metrics.drop(
        df_app_metrics[df_app_metrics.AppName == 'rodinia_heartwall'].index,
        inplace=True)

    df_rows = df_app_metrics.shape[0]
    logger.debug("Total profiling metrics = {}.".format(df_rows))

    #-----------------------
    # find out the mismatch
    #-----------------------
    count_same = 0
    count_diff = 0
    if df_rows > apps_num:
        print "\n[Warning] input metrics has more apps than the input sequence"
        app_in_df = list(df_app_metrics['AppName'])
        for i in app_in_df:
            if i not in app_s1:
                print("[Warning] {} not in app_s1".format(i))
        print "[Warning] Please fix the error before running!"
        sys.exit(1)

    if df_rows < apps_num:
        print "\n[Warning] input metrics has fewer apps than the input sequence"
        app_in_df = list(df_app_metrics['AppName'])
        for i in app_s1:
            if i not in app_in_df:
                print("[Warning] not in input metrics.".format(i))
        print "[Warning] Please fix the error before running!"
        sys.exit(1)

    if df_rows == apps_num:
        if set(app_s1) == set(list(df_app_metrics['AppName'])):
            logger.debug("Great! The app lists match.")
        else:
            logger.debug(
                "Bummer! The app lists between input sequence and metrics are not equal. Please fix the error."
            )
            sys.exit(1)

    #--------------------
    # load trained model
    #--------------------
    nn_model = pickle.load(open('../07_nn/fastdev_NN_featAll.pkl', 'rb'))

    #
    # predict the best device to use  / assign to each GPU work queue
    #

    gpuWorkq = [[] for i in xrange(gpuNum)]
    app_dev = {}
    for appname in app_s1:
        df_current_app = df_app_metrics.loc[df_app_metrics['AppName'] ==
                                            appname]
        df_current_app = df_current_app.drop(
            df_current_app.columns[0],
            axis=1)  # drop the 1st column : "AppName"
        targetdev = nn_model.predict(df_current_app)
        targetdev = int(targetdev[0])
        app_dev[appname] = targetdev
        #print("{0:<40}:\t {1:2d}".format(appname, targetdev[0]))
        gpuWorkq[targetdev].append(appname)

    #print gpuWorkq[0]
    #print "\n----------\n"
    #print gpuWorkq[1]
    #print "\n----------\n"

    #--------------------------------------------------------------------------
    # 4) model for interference analysis
    #--------------------------------------------------------------------------
    logger.debug("Loading model to predict co-running interference.")

    app2metric = np.load('../prepare/app2metric_featAll.npy').item()  # featAll
    bestmodel = joblib.load(
        '../00_classification_interference/featall_bestmodel.pkl'
    )  # load model, predict app class

    app2class_dd = predict_appclass(app2metric, bestmodel)

    #print app2class_dd

    #--------------------------------------------------------------------------
    # 5) prioritize the interference-insensitive workloads
    # 6) rearrange according to the bin size
    #--------------------------------------------------------------------------
    app_binsize_dd = np.load(
        '../00_classification_interference/app_binsize_dd.npy').item()

    gpuWorkq_new = [[] for i in xrange(gpuNum)]
    for gid, que in enumerate(gpuWorkq):
        #print gid, que
        robust_list = []
        sensitive_list = []
        for curapp in que:
            if app2class_dd[curapp] == 0:
                #print("sensitive : {}".format(curapp))
                sensitive_list.append(curapp)
            else:
                #print("in-sensitive : {}".format(curapp))
                robust_list.append(curapp)

        #
        # sort the app order according to the bin size
        #
        robust_bin_dd = {}
        for ap in robust_list:
            robust_bin_dd[ap] = app_binsize_dd[ap]
        robust_bin_sorted = sorted(robust_bin_dd.items(),
                                   key=operator.itemgetter(1),
                                   reverse=True)
        robust_sorted = [ap for (ap, _) in robust_bin_sorted]

        sensitive_bin_dd = {}
        for ap in sensitive_list:
            sensitive_bin_dd[ap] = app_binsize_dd[ap]
        sensitive_bin_sorted = sorted(sensitive_bin_dd.items(),
                                      key=operator.itemgetter(1),
                                      reverse=True)
        sensitive_sorted = [ap for (ap, _) in sensitive_bin_sorted]

        #gpuWorkq_new[gid].extend(robust_list)
        #gpuWorkq_new[gid].extend(sensitive_list)

        gpuWorkq_new[gid].extend(robust_sorted)  # robust first
        gpuWorkq_new[gid].extend(sensitive_sorted)

    # update work queue order
    gpuWorkq = gpuWorkq_new

    #--------------------------------------------------------------------------
    # Run
    #--------------------------------------------------------------------------
    jobID = -1
    workers = []  # for mp processes
    current_jobid_list = []  # keep track of current application

    while hasworkloads(gpuWorkq):
        Dispatch = has_slot(gpuStat, MAXCORUN)
        #print Dispatch

        #Dispatch, targetGPU, workloadName = select_gpu(gpuStat, gpuWorkq, MAXCORUN)
        #print Dispatch, targetGPU, workloadName
        #print gpuWorkq[targetGPU]
        #print len(gpuWorkq[0]) , len(gpuWorkq[1])

        if Dispatch:
            targetGPU, workloadName = select_gpu(gpuStat, gpuWorkq)
            #print targetGPU

            gpuStat[targetGPU] += 1  # increase the active jobs on the target
            jobID += 1
            id2name[jobID] = workloadName
            current_jobid_list.append(jobID)

            process = Process(target=run_work,
                              args=(jobID, AppStat, app2dir_dd[workloadName],
                                    targetGPU))
            process.daemon = False
            workers.append(process)
            process.start()

        else:
            # spinning, waiting for a free spot
            while True:
                break_loop = False

                #current_running_jobs = 0
                jobs2del = []

                # figure out the jobs that have ended
                for jid in current_jobid_list:
                    if AppStat[jid,
                               2] == 1:  # check the status, if one is done
                        jobs2del.append(jid)
                        break_loop = True
                        break

                if break_loop:
                    for id2del in jobs2del:
                        current_jobid_list.remove(id2del)  # del ended jobs
                        gpuInUse = int(AppStat[id2del, 1])
                        gpuStat[gpuInUse] -= 1  # update gpu active jobs

                    # break the spinning
                    break

            #------------------------------------
            # after spinning, schedule the work
            #------------------------------------
            targetGPU, workloadName = select_gpu(gpuStat, gpuWorkq)
            #print targetGPU

            gpuStat[targetGPU] += 1  # increase the active jobs on the target
            jobID += 1
            id2name[jobID] = workloadName
            current_jobid_list.append(jobID)

            process = Process(target=run_work,
                              args=(jobID, AppStat, app2dir_dd[workloadName],
                                    targetGPU))
            process.daemon = False
            workers.append(process)
            process.start()

        #break
        #if jobID == 10: break

    #=========================================================================#
    # end of running all the jobs
    #=========================================================================#
    for p in workers:
        p.join()

    total_jobs = jobID + 1
    if total_jobs <> apps_num:
        logger.debug("[Warning] job number doesn't match.")

    # print out / save trace table
    if args.ofile:
        PrintGpuJobTable(AppStat, total_jobs, id2name, saveFile=args.ofile)
    else:
        PrintGpuJobTable(AppStat, total_jobs, id2name)
sock=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
sock.bind((host,port))
sock.listen(1)
print("Server is listening")
cv2.namedWindow("DebugScreen")
connection,addrClient=sock.accept()
cam=cv2.VideoCapture(0)
k=0
try:
	while(True):

		#connection,addrClient=sock.accept()
		data=connection.recv(1000000)
		#imData=Image.open(BytesIO(data))
		npImage=np.frombuffer(data,np.uint8)
		try:
			im=cv2.imdecode(npImage,cv2.IMREAD_UNCHANGED)
			#blur=cv2.blur(im,(5,5))
			cv2.imshow("DebugScreen",im)
			cv2.waitKey(1)
		except:
			print("Something is wrong with a transmitted packet")
		k=k+1
		sendmessage=str(k)
		connection.send(bytes(sendmessage,'utf-8'))
		#print(type(imData))
		#imData.save("/home/kv/Documents/TEST/clientserver/clientServerCSharpImage/1CsharpTOPythonImage.jpg")
		#connection.send(b"OK")
	connection.close
except KeyboardInterrupt: