Example #1
0
 def setUp(self):
     super(TestRing, self).setUp()
     self.testdir = mkdtemp()
     self.testgz = os.path.join(self.testdir, 'whatever.ring.gz')
     self.intended_replica2part2dev_id = [
         array.array('H', [0, 1, 0, 1]),
         array.array('H', [0, 1, 0, 1]),
         array.array('H', [3, 4, 3, 4])]
     self.intended_devs = [{'id': 0, 'region': 0, 'zone': 0, 'weight': 1.0,
                            'ip': '10.1.1.1', 'port': 6000,
                            'replication_ip': '10.1.0.1',
                            'replication_port': 6066},
                           {'id': 1, 'region': 0, 'zone': 0, 'weight': 1.0,
                            'ip': '10.1.1.1', 'port': 6000,
                            'replication_ip': '10.1.0.2',
                            'replication_port': 6066},
                           None,
                           {'id': 3, 'region': 0, 'zone': 2, 'weight': 1.0,
                            'ip': '10.1.2.1', 'port': 6000,
                            'replication_ip': '10.2.0.1',
                            'replication_port': 6066},
                           {'id': 4, 'region': 0, 'zone': 2, 'weight': 1.0,
                            'ip': '10.1.2.2', 'port': 6000,
                            'replication_ip': '10.2.0.1',
                            'replication_port': 6066}]
     self.intended_part_shift = 30
     self.intended_reload_time = 15
     ring.RingData(
         self.intended_replica2part2dev_id,
         self.intended_devs, self.intended_part_shift).save(self.testgz)
     self.ring = ring.Ring(
         self.testdir,
         reload_time=self.intended_reload_time, ring_name='whatever')
Example #2
0
 def test_bug_782369(self):
     for i in range(10):
         b = array.array('B', range(64))
     rc = sys.getrefcount(10)
     for i in range(10):
         b = array.array('B', range(64))
     self.assertEqual(rc, sys.getrefcount(10))
Example #3
0
    def test_mul(self):
        a = 5*array.array(self.typecode, self.example)
        self.assertEqual(
            a,
            array.array(self.typecode, 5*self.example)
        )

        a = array.array(self.typecode, self.example)*5
        self.assertEqual(
            a,
            array.array(self.typecode, self.example*5)
        )

        a = 0*array.array(self.typecode, self.example)
        self.assertEqual(
            a,
            array.array(self.typecode)
        )

        a = (-1)*array.array(self.typecode, self.example)
        self.assertEqual(
            a,
            array.array(self.typecode)
        )

        a = 5 * array.array(self.typecode, self.example[:1])
        self.assertEqual(
            a,
            array.array(self.typecode, [a[0]] * 5)
        )

        self.assertRaises(TypeError, a.__mul__, "bad")
Example #4
0
    def CreateGraph(self, name):
	x      = array("d")
	y      = array("d")
	x_err  = array("d")
	y_errl = array("d")
	y_errh = array("d")

	for point in self.data:
	    x.append(float(point.mass))
	    x_err.append(0.)
	    if name == "Observed":
	        y.append(float(point.observed))
		y_errl.append(float(0.))
		y_errh.append(float(0.))
	    else:
		y.append(float(point.expected))
		if not name.find("1") == -1:
	    	    y_errl.append(float(point.expectedMinus1Sigma))
	    	    y_errh.append(float(point.expectedPlus1Sigma))
		elif not name.find("2") == -1:
                    y_errl.append(float(point.expectedMinus2Sigma))
                    y_errh.append(float(point.expectedPlus2Sigma))
		else:	
		    y_errl.append(float(0.))
		    y_errh.append(float(0.))
	graph = ROOT.TGraphAsymmErrors(len(x),x,y,x_err,x_err,y_errl,y_errh)
	graph.SetName(name)
	return graph
Example #5
0
    def test_pop(self):
        a = array.array(self.typecode)
        self.assertRaises(IndexError, a.pop)

        a = array.array(self.typecode, 2*self.example)
        self.assertRaises(TypeError, a.pop, 42, 42)
        self.assertRaises(TypeError, a.pop, None)
        self.assertRaises(IndexError, a.pop, len(a))
        self.assertRaises(IndexError, a.pop, -len(a)-1)

        self.assertEntryEqual(a.pop(0), self.example[0])
        self.assertEqual(
            a,
            array.array(self.typecode, self.example[1:]+self.example)
        )
        self.assertEntryEqual(a.pop(1), self.example[2])
        self.assertEqual(
            a,
            array.array(self.typecode, self.example[1:2]+self.example[3:]+self.example)
        )
        self.assertEntryEqual(a.pop(0), self.example[1])
        self.assertEntryEqual(a.pop(), self.example[-1])
        self.assertEqual(
            a,
            array.array(self.typecode, self.example[3:]+self.example[:-1])
        )
Example #6
0
    def test_len(self):
        a = array.array(self.typecode)
        a.append(self.example[0])
        self.assertEqual(len(a), 1)

        a = array.array(self.typecode, self.example)
        self.assertEqual(len(a), len(self.example))
Example #7
0
    def test_cmp(self):
        a = array.array(self.typecode, self.example)
        self.assertIs(a == 42, False)
        self.assertIs(a != 42, True)

        self.assertIs(a == a, True)
        self.assertIs(a != a, False)
        self.assertIs(a < a, False)
        self.assertIs(a <= a, True)
        self.assertIs(a > a, False)
        self.assertIs(a >= a, True)

        al = array.array(self.typecode, self.smallerexample)
        ab = array.array(self.typecode, self.biggerexample)

        self.assertIs(a == 2*a, False)
        self.assertIs(a != 2*a, True)
        self.assertIs(a < 2*a, True)
        self.assertIs(a <= 2*a, True)
        self.assertIs(a > 2*a, False)
        self.assertIs(a >= 2*a, False)

        self.assertIs(a == al, False)
        self.assertIs(a != al, True)
        self.assertIs(a < al, False)
        self.assertIs(a <= al, False)
        self.assertIs(a > al, True)
        self.assertIs(a >= al, True)

        self.assertIs(a == ab, False)
        self.assertIs(a != ab, True)
        self.assertIs(a < ab, True)
        self.assertIs(a <= ab, True)
        self.assertIs(a > ab, False)
        self.assertIs(a >= ab, False)
Example #8
0
    def load_from_file(self, filename):
        print('Загружаю граф из файла: ' + filename)

        with open(filename) as f:
            (n, _nlinks) = (map(int, f.readline().split()))
            
            self._titles = []  # список названий статей
            self._sizes = array.array('L', [0]*n) # список с размерами i-ой статьи
            self._links = array.array('L', [0]*_nlinks) # список список ссылок на статьи - метод Compressed Sparse Row
            self._redirect = array.array('B', [0]*n) # флаг перенаправления 
            self._offset = array.array('L', [0]*(n+1)) #  в i-ой ячейке содержится информация с какого номера начинаются ссылки в self._links  для i-й статьи
            current_link = 0
            for i in range(n):
                __title = f.readline()
                self._titles.append(__title.rstrip())
                (size, redirect, amout_links) = (map(int, f.readline().split()))
                self._sizes[i] = size
                self._redirect[i] = redirect
                for j in range(current_link, current_link + amout_links):
                    self._links[j] = int(f.readline())
                current_link += amout_links
                if n > 0:
                    self._offset[i+1] = self._offset[i] + amout_links

        print('Граф загружен')
Example #9
0
    def load_from_file(self, filename):
        print('Загружаю граф из файла: ' + filename)

        with open(filename) as f:
            s=f.readline()
            s=s.split()
            (n, _nlinks) = (int(s[0]), int(s[1])) # TODO: прочитать из файла
            
            self._titles = []
            self._edges=array.array('L',[0]*_nlinks)
            self._sizes = array.array('L', [0]*n)
            self._links = array.array('L', [0]*_nlinks)
            self._redirect = array.array('B', [0]*n)
            self._offset = array.array('L', [0]*(n+1))

            # TODO: прочитать граф из файла
            for i in range(int(s[0])):
                k=f.readline()
                self._titles.append(k[:-1])
                k=f.readline()
                k=k.split()
                self._sizes[i]=int(k[0])
                self._redirect[i]=int(k[1])
                self._links[i]=int(k[2])
                self._offset[i+1]=self._offset[i]+self._links[i]   #исправить тип integer
                for j in range(int(k[2])):
                    m=f.readline()
                    self._edges[j+self._offset[i]]=int(m)
        print('Граф загружен')
Example #10
0
File: hist.py Project: vgm64/rootpy
    def __init__(self, *args, **kwargs):

        name = kwargs.get('name', None)
        title = kwargs.get('title', None)

        params = self._parse_args(*args)

        if params[0]['bins'] is None and params[1]['bins'] is None:
            Object.__init__(self, name, title,
                params[0]['nbins'], params[0]['low'], params[0]['high'],
                params[1]['nbins'], params[1]['low'], params[1]['high'])
        elif params[0]['bins'] is None and params[1]['bins'] is not None:
            Object.__init__(self, name, title,
                params[0]['nbins'], params[0]['low'], params[0]['high'],
                params[1]['nbins'], array('d', params[1]['bins']))
        elif params[0]['bins'] is not None and params[1]['bins'] is None:
            Object.__init__(self, name, title,
                params[0]['nbins'], array('d', params[0]['bins']),
                params[1]['nbins'], params[1]['low'], params[1]['high'])
        else:
            Object.__init__(self, name, title,
                params[0]['nbins'], array('d', params[0]['bins']),
                params[1]['nbins'], array('d', params[1]['bins']))

        self._post_init(**kwargs)
Example #11
0
    def load_from_file(self, filename):
        print('Загружаю граф из файла: ' + filename)

        with open(filename) as f:
            file_data = f.readline().split()
            self._n = int(file_data[0]) #n - количество статей
            self._nlinks = int(file_data[-1]) #nlinks - количество ссылок во всех статьях
            
            self._titles = [] #titles - названия статей
            self._sizes = array.array('L', [0]*self._n) #sizes - размеры статей
            self._links = array.array('L', [0]*self._nlinks) #links - количество ссылок в статьях
            self._redirect = array.array('B', [0]*self._n) #redirect - флаги перенаправления
            self._offset = array.array('L', [0]*(self._n+1)) # offset - номер статьи, на которую ссылается статья

            links_iterator = 0
            for title_number in range(self._n):
                self._titles.append(f.readline())
                title_size, redirect_flag, title_links_number = [int(x) for x in f.readline().split()]
                self._sizes[title_number] = title_size
                self._redirect[title_number] = redirect_flag
                if title_number == 0:
                    self._offset[title_number] = links_iterator
                for link_number in range(title_links_number):
                    self._links[links_iterator] = (int(f.readline()))
                    links_iterator += 1

        print('Граф загружен')
Example #12
0
 def testGetAccumulate(self):
     group = self.WIN.Get_group()
     size = group.Get_size()
     group.Free()
     for array in arrayimpl.ArrayTypes:
         for typecode in arrayimpl.TypeMap:
             for count in range(self.COUNT_MIN, 10):
                 for rank in range(size):
                     ones = array([1]*count, typecode)
                     sbuf = array(range(count), typecode)
                     rbuf = array(-1, typecode, count+1)
                     gbuf = array(-1, typecode, count+1)
                     for op in (MPI.SUM, MPI.PROD,
                                MPI.MAX, MPI.MIN,
                                MPI.REPLACE, MPI.NO_OP):
                         self.WIN.Lock(rank)
                         self.WIN.Put(ones.as_mpi(), rank)
                         self.WIN.Flush(rank)
                         r = self.WIN.Rget_accumulate(sbuf.as_mpi(),
                                                      rbuf.as_mpi_c(count),
                                                      rank, op=op)
                         r.Wait()
                         self.WIN.Flush(rank)
                         r = self.WIN.Rget(gbuf.as_mpi_c(count), rank)
                         r.Wait()
                         self.WIN.Unlock(rank)
                         #
                         for i in range(count):
                             self.assertEqual(sbuf[i], i)
                             self.assertEqual(rbuf[i], 1)
                             self.assertEqual(gbuf[i], op(1, i))
                         self.assertEqual(rbuf[-1], -1)
                         self.assertEqual(gbuf[-1], -1)
  def proc_gentlink(self):
    #get GELINK Header info    
    cmd='TASKSTATS\0'
    mlen=len(cmd) + 4

    msg_ts=array.array(str('B'))
    msg_ts.fromstring(struct.pack("BBxx", CTRL_CMD_GETFAMILY, 0))
    msg_ts.fromstring(struct.pack("HH", mlen, CTRL_ATTR_FAMILY_NAME))
    msg_ts.fromstring(cmd)
    tmp=((4 - (len(msg_ts) % 4)) & 0x3)
    msg_ts.fromstring('\0' * ((4 - (len(cmd) % 4)) & 0x3))

    nlmhdr_msg=array.array(str('B'),struct.pack(str('=IHHII'), len(msg_ts) + 16, NETLINK_GENERIC, NLM_F_REQUEST, 0, 0))
    nlmhdr_msg.extend(msg_ts)

    self.socket.send(nlmhdr_msg)

    data = self.socket.recv(65536)#(16384)
    data=self.unpack_nlhdr(data)
    data=self.unpack_genlhdr(data)

    while len(data) > 0:
          data=self.unpack_attr_hdr(data)
    #
    if self.flags & 0x2 == 0:
          if debug>0: print "End of receiving message!"
          return
Example #14
0
    def vectorizer(self):
        """
        Turns tokens into a word vector in the bag of words format. Also
        builds a vocabulary which contains all of the unique words from the
        tokens.
        """
        vocab = defaultdict(None)
        vocab.default_factory = vocab.__len__

        indices = array.array(str('i'))
        indptr = array.array(str('i'))
        indptr.append(0)

        for tokens in Bow.tokenizer(self):
            for token in tokens:
                indices.append(vocab[token])
            indptr.append(len(indices))

        vocab = dict(vocab)
        values = np.ones(len(indices), dtype=np.int)

        word_matrix = sp.csr_matrix((values, indices, indptr), shape = (len(indptr) - 1, len(vocab)))

        word_matrix.sum_duplicates()
        
        self.vocab = vocab
        self.word_matrix = word_matrix
Example #15
0
def doc2bow(tokens, vocab):
    """
    Transforms tokens into bag of word representation from a given vocabulary,
    a port of the gensim function of the same name

    Parameters
    ----------
    tokens: list
        a list of tokens from a body of text

    vocab : dict
        a dictionary of format {word : index}

    Returns
    -------

    word_vector : scipy.sparse.csr_matrix
        a sparse vector bag of word representation of the given tokens

    """
    indices = array.array(str('i'))
    indptr = array.array(str('i'))
    indptr.append(0)
    
    for token in tokens:
        indices.append(vocab[token])
    
    indptr.append(len(indices))
    values = np.ones(len(indices))
    word_vector = sp.csr_matrix((values, indices, indptr), shape = (len(indptr) - 1, len(vocab)))
    
    word_vector.sum_duplicates()
    return word_vector
Example #16
0
    def __init__(self, data):
        self.raw = data
        self.u16s = array.array('H', data)
        assert(self.u16s.itemsize == 2)
        self.u32s = array.array('I', data)
        assert(self.u32s.itemsize == 4)

        stream = Reader(data)

        # parse header
        stream.read(36)
        if stream.u32() != 0x70:
            print('Warning, unexpected header size!')
        if stream.u32() != 0x12345678:
            print('Warning, unexpected endianess tag!')

        self.link = SizeOff(stream)
        self.map_off = stream.u32()
        self.string_ids = SizeOff(stream)
        self.type_ids = SizeOff(stream)
        self.proto_ids = SizeOff(stream)
        self.field_ids = SizeOff(stream)
        self.method_ids = SizeOff(stream)
        self.class_defs = SizeOff(stream)
        self.data = SizeOff(stream)

        defs = self.class_defs
        self.classes = []
        for i in range(defs.size):
            self.classes.append(DexClass(self, defs.off, i))
Example #17
0
 def __init__(self, width, height):
     self.width = int(width)
     self.height = height
     try:
         self._b = [ array('c', ' '*int(width)) for line in range(height) ]
     except TypeError:
         self._b = [ array('u', ' '*int(width)) for line in range(height) ]
Example #18
0
 def __create_pie_image(self):
   self.__create_on_disk()
   vals=[]
   colors=[]
   for n,col in zip((self.n_fails,self.n_nulls,self.n_successes,self.n_skiped),(kRed,kYellow,kGreen,kBlue)):
     if n!=0:
       vals.append(n)
       colors.append(col)
   valsa=array('f',vals)
   colorsa=array('i',colors)
   can = TCanvas("cpie","TPie test",100,100);
   try:
     pie = TPie("ThePie",self.name,len(vals),valsa,colorsa);
     label_n=0
     if self.n_fails!=0:
       pie.SetEntryLabel(label_n, "Fail: %.1f(%i)" %(self.get_fail_rate(),self.n_fails) );
       label_n+=1
     if self.n_nulls!=0:
       pie.SetEntryLabel(label_n, "Null: %.1f(%i)" %(self.get_null_rate(),self.n_nulls) );      
       label_n+=1
     if self.n_successes!=0:
       pie.SetEntryLabel(label_n, "Success: %.1f(%i)" %(self.get_success_rate(),self.n_successes) );
     if self.n_skiped!=0:
       pie.SetEntryLabel(label_n, "Skipped: %.1f(%i)" %(self.get_skiped_rate(),self.n_skiped));
     pie.SetY(.52);
     pie.SetAngularOffset(0.);    
     pie.SetLabelsOffset(-.3);
     #pie.SetLabelFormat("#splitline{%val (%perc)}{%txt}");
     pie.Draw("3d  nol");
     can.Print(self.get_summary_chart_name());    
   except:
     print("self.name = %s" %self.name)
     print("len(vals) = %s (vals=%s)" %(len(vals),vals))
     print("valsa = %s" %valsa)
     print("colorsa = %s" %colorsa)
Example #19
0
        def __init__(self, *args, **kwargs):

            params = self._parse_args(args)
            name = kwargs.pop('name', None)
            title = kwargs.pop('title', None)

            if params[0]['bins'] is None and params[1]['bins'] is None:
                super(Hist2D, self).__init__(
                    params[0]['nbins'], params[0]['low'], params[0]['high'],
                    params[1]['nbins'], params[1]['low'], params[1]['high'],
                    name=name, title=title)
            elif params[0]['bins'] is None and params[1]['bins'] is not None:
                super(Hist2D, self).__init__(
                    params[0]['nbins'], params[0]['low'], params[0]['high'],
                    params[1]['nbins'], array('d', params[1]['bins']),
                    name=name, title=title)
            elif params[0]['bins'] is not None and params[1]['bins'] is None:
                super(Hist2D, self).__init__(
                    params[0]['nbins'], array('d', params[0]['bins']),
                    params[1]['nbins'], params[1]['low'], params[1]['high'],
                    name=name, title=title)
            else:
                super(Hist2D, self).__init__(
                    params[0]['nbins'], array('d', params[0]['bins']),
                    params[1]['nbins'], array('d', params[1]['bins']),
                    name=name, title=title)

            self._post_init(**kwargs)
Example #20
0
    def sample (self, percent):
        """Sample the tags for a given percentage.

        Warning: the current object is changed!
        """
        self.total = 0
        for key in self._locations.keys():
            plus = self._locations[key][0]
            plus_ind = self._indexes[key][0]
            total_plus = len(plus)
            num = int(total_plus*percent)
            ind_tokeep = sorted(random_sample(xrange(total_plus), num))
            self._locations[key][0] = array(BYTE4, (plus[i] for i in ind_tokeep))
            total_unique = 0
            self._indexes[key][0] = array(BYTE4, [])
            pappend = self._indexes[key][0].append
            for i in ind_tokeep:
                pappend(plus_ind[i])
                if plus_ind[i] == 0:
                    total_unique += 1
            
            minus = self._locations[key][1]
            minus_ind = self._indexes[key][1]
            total_minus = len(minus)
            num = int(total_minus*percent)
            ind_tokeep = sorted(random_sample(xrange(total_minus), num))
            self._locations[key][1] = array(BYTE4, (minus[i] for i in ind_tokeep))
            self._indexes[key][1] = array(BYTE4, [])
            mappend = self._indexes[key][1].append
            for i in ind_tokeep:
                mappend(minus_ind[i])
                if minus_ind[i] == 0:
                    total_unique += 1

            self.total += total_unique
Example #21
0
 def remove_redundant (self):
     """Remove redundant position, keep the highest score.
     
     """
     chrs = set(self._data.keys())
     ndata = {}
     for chrom in chrs:
         ndata[chrom] = [array(BYTE4,[]),array(FBYTE4,[])] # for (pos,value)
         nd_p_append = ndata[chrom][0].append
         nd_s_append = ndata[chrom][1].append
         (p,s) = self._data[chrom]
         prev_p = None
         prev_s = None
         for i in xrange(len(p)):
             pi = p[i]
             si = s[i]
             if not prev_p:
                 prev_p = pi
                 prev_s = si
             else:
                 if pi == prev_p:
                     if si>prev_s:
                         prev_s = si
                 else:
                    nd_p_append (prev_p)
                    nd_s_append (prev_s)
         nd_p_append (prev_p)
         nd_s_append (prev_s)
     del self._data
     self._data = ndata
 def getGenotypesForSubject(self, s, raw=False):
     """ Returns list of genotypes for all m markers
         for subject s.  If raw==True, then an array
         of raw integer gcodes is returned instead
     """
     if self._quick:
         nmarkers = len(self._markers)
         raw_array = array('i', [0]*nmarkers)
         seek_nibble = s % 4
         for m in xrange(nmarkers):
             seek_byte = m * self._bytes_per_marker + s/4 + HEADER_LENGTH
             self._bedf.seek(seek_byte)
             geno = struct.unpack('B', self._bedf.read(1))[0]
             quartet = INT_TO_GCODE[geno]
             gcode = quartet[seek_nibble]
             raw_array[m] = gcode
     else:
         raw_array = array('i', [row[s] for row in self._genotypes])
         
     if raw:
         return raw_array
     else:
         result = []
         for m, gcode in enumerate(raw_array):
             result.append(self._marker_allele_lookup[m][gcode])
         return result
Example #23
0
def set_palette(name='default', ncontours=200):
    """Set a color palette from a given RGB list
    stops, red, green and blue should all be lists of the same length
    see set_decent_colors for an example"""

    if name == 'gray' or name == 'grayscale':
        stops = [0.00, 0.34, 0.61, 0.84, 1.00]
        red   = [1.00, 0.84, 0.61, 0.34, 0.00]
        green = [1.00, 0.84, 0.61, 0.34, 0.00]
        blue  = [1.00, 0.84, 0.61, 0.34, 0.00]
    # elif name == "whatever":
        # (define more palettes)
    elif name == 'brett':
        # brett palette
        print 'Setting palette to "Brett"'
        stops = [0.00, 0.05, 0.10, 0.20, 0.30, 0.40, 0.50, 0.60, 0.70, 0.80,  0.90, 0.95, 1.00]
        red   = [0.00, 0.00, 0.05, 0.05, 0.10, 0.15, 0.70, 0.90, 1.00, 1.00,  1.00, 1.00, 0.51]
        green = [0.00, 0.00, 0.50, 0.50, 1.00, 0.90, 0.90, 0.90, 0.80, 0.60,  0.60, 0.00, 0.00]
        blue  = [0.51, 1.00, 1.00, 0.60, 0.50, 0.30, 0.20, 0.15, 0.10, 0.05,  0.05, 0.00, 0.00]

    else:
        # default palette, looks cool
        stops = [0.00, 0.34, 0.61, 0.84, 1.00]
        red   = [0.00, 0.00, 0.87, 1.00, 0.51]
        green = [0.00, 0.81, 1.00, 0.20, 0.00]
        blue  = [0.51, 1.00, 0.12, 0.00, 0.00]

    s = array('d', stops)
    r = array('d', red)
    g = array('d', green)
    b = array('d', blue)

    npoints = len(s)
    TColor.CreateGradientColorTable(npoints, s, r, g, b, ncontours)
    gStyle.SetNumberContours(ncontours)
Example #24
0
def setBrightness(brightness):
    global lastBrightness

    if (abs(brightness - lastBrightness) < 5):
        # if analog setting drifs a little, don't bother changing the brightness
        return
    lastBrightness = brightness
    
    if brightness > 255:
        brightness = 255
    elif brightness < 0:
        brightness = 0
        
    # for Sparkfun Serial backpack, brightness range is 128 to 157
    if DISPLAY_TYPE == 'SPARKFUN':
        brightness = 128 + (brightness*29)/ 255        
    if Debug:
        print("setting brightness to " + str(brightness))
    if DISPLAY_TYPE == 'MATRIX_ORBITAL':
        cmd = array.array('B', (COMMAND_PREFIX, 153, brightness))
    elif DISPLAY_TYPE == 'SPARKFUN':  
        cmd = array.array('B', (124, brightness))
    else:
        cmd = array.array('B', (128, brightness))
    writeToDisplay(cmd.tostring())
Example #25
0
def setCursorPos(row, col, clearRow = False):
    """Position the LCD cursor - row and col are 1-based"""
    if clearRow and DISPLAY_TYPE != 'SPARKFUN':
        # clear the row by writing blanks to all columns in the row
        # move to start of row 
        if DISPLAY_TYPE == 'MATRIX_ORBITAL':
            cmd = array.array('B', (COMMAND_PREFIX, 71, 1, row))
        else:
            cmd = array.array('B', (COMMAND_PREFIX, 128, (row - 1) * numCols))        
        writeToDisplay(cmd.tostring())   
        writeToDisplay(' ' * numCols)   
    if DISPLAY_TYPE == 'MATRIX_ORBITAL':
        cmd = array.array('B', (COMMAND_PREFIX, 71, col, row))
    elif DISPLAY_TYPE == 'SPARKFUN':
        offset = 127 + col
        if row == 2:
            offset = 128  + 63 + col
        elif row == 3:
            offset = 128 + 19 + col
        elif row == 4:
            offset = 128 + 83 + col
        if Debug:
            print("setting cursor pos for Sparkfun " + str(offset))
        cmd = array.array('B', (COMMAND_PREFIX, offset))
    else:        
        cmd = array.array('B', (COMMAND_PREFIX, 128, (row - 1) * numCols + col - 1))
    writeToDisplay(cmd.tostring())  
Example #26
0
 def __init__(self, width, height, texsize=128):
     self.umap = array('f', [0.0 for i in range(width * height)])
     self.vmap = array('f', [0.0 for i in range(width * height)])
     self.mask = array('B', [0 for i in range(width * height)])
     self.width = width
     self.height = height
     self.texsize = texsize
Example #27
0
def makeAutomat(mapping):
    """automat is array of integers (signed 16bits)
    on position 2*i is stored state transfer from state i if 0 is read      
    on position 2*i+1 is stored transfer if 1 is read
    if transfer & 1  than transfer links to result array
    otherwise transfer>>1 is new state, transfers from new states are in automat on positions transfer and transfer+1
    in results, the lies on positions transfer-1 and transfer
    """
    automat=array('i',[-1,-1])
    results=array('i')
    for bits, (d1,d2) in mapping.iteritems():
        state=0
        #make state  transfer for all but last bit
        for bit in bits[:-1]:
            bit=int(bit)
            if automat[state|bit]==-1:
                automat[state|bit]=len(automat)
                automat.extend([-1,-1])
            state=automat[state|bit]
            assert not state&1
        #make last bit link to endstates table
        bit=int(bits[-1])
        assert automat[state|bit]==-1
        automat[state|bit]=len(results)|1
        results.extend([d1,d2])
    return automat, results
 def getGenotypesByIndices(self, s, mlist, format):
     """ needed for grr if lped - deprecated but..
     """
     mlist = dict(zip(mlist,[True,]*len(mlist))) # hash quicker than 'in' ?
     raw_array = array('i', [row[s] for m,row in enumerate(self._genotypes) if mlist.get(m,None)])            
     if format == 'raw':
         return raw_array
     elif format == 'ref':
         result = array('i', [0]*len(mlist))
         for m, gcode in enumerate(raw_array):
             if gcode == HOM0:
                 nref = 3
             elif gcode == HET:
                 nref = 2
             elif gcode == HOM1:
                 nref = 1
             else:
                 nref = 0
             result[m] = nref
         return result
     else:
         result = []
         for m, gcode in enumerate(raw_array):
             result.append(self._marker_allele_lookup[m][gcode])
         return result
Example #29
0
    def test_pack_into(self):
        test_string = b'Reykjavik rocks, eow!'
        writable_buf = array.array('b', b' '*100)
        fmt = '21s'
        s = struct.Struct(fmt)

        # Test without offset
        s.pack_into(writable_buf, 0, test_string)
        from_buf = writable_buf.tobytes()[:len(test_string)]
        self.assertEqual(from_buf, test_string)

        # Test with offset.
        s.pack_into(writable_buf, 10, test_string)
        from_buf = writable_buf.tobytes()[:len(test_string)+10]
        self.assertEqual(from_buf, test_string[:10] + test_string)

        # Go beyond boundaries.
        small_buf = array.array('b', b' '*10)
        self.assertRaises((ValueError, struct.error), s.pack_into, small_buf, 0,
                          test_string)
        self.assertRaises((ValueError, struct.error), s.pack_into, small_buf, 2,
                          test_string)

        # Test bogus offset (issue 3694)
        sb = small_buf
        self.assertRaises((TypeError, struct.error), struct.pack_into, b'', sb,
                          None)
def __load_binary(data) :
    """Load binary graph as used by the cpp implementation of this algorithm
    """
    if type(data) == types.StringType :
        data = open(data, "rb")

    reader = array.array("I")
    reader.fromfile(data, 1)
    num_nodes = reader.pop()
    reader = array.array("I")
    reader.fromfile(data, num_nodes)
    cum_deg = reader.tolist()
    num_links = reader.pop()
    reader = array.array("I")
    reader.fromfile(data, num_links)
    links = reader.tolist()
    graph = nx.Graph()
    graph.add_nodes_from(range(num_nodes))
    prec_deg = 0

    for index in range(num_nodes) :
        last_deg = cum_deg[index]
        neighbors = links[prec_deg:last_deg]
        graph.add_edges_from([(index, int(neigh)) for neigh in neighbors])
        prec_deg = last_deg

    return graph
arreglo = [1, 2, 3]  # equivalente a ArrayList de Java
# En CPython ver implementacion: https://github.com/python/cpython/blob/master/Objects/listobject.c

print(f"Id arreglo: {id(arreglo)} original")
arreglo.append(1)  # O(1) casi siempre. En caso de que se llene el arreglo y hacer realloc seria O(n)
print(f"Id arreglo: {id(arreglo)} depués de append")
arreglo+=[1]  # no crea nuevo arreglo
print(f"Id arreglo: {id(arreglo)} depués de concat")

arr2 = [3, 4]
print(f"Id arr2: {id(arr2)} original")

print(f"Id arreglo+arr2: {id(arreglo+arr2)}")  # crea nuevo arreglo
print(f"Id arr2+arreglo: {id(arr2+arreglo)}")

arreglo.insert(1, 2); # more i>0.... o sea O(n)
arreglo.insert(2, 2);
arreglo.insert(3, 2);

# https://docs.python.org/3/library/array.html
from array import array
arr = array('l', [1, 2, 3, 4, 5])
arr.append(2)
print(arr)
for a in arr:
    print(a)


Example #32
0
def test_gdal_contour_4():
    if test_cli_utilities.get_gdal_contour_path() is None:
        return 'skip'

    try:
        os.remove('tmp/contour_orientation.shp')
    except:
        pass
    try:
        os.remove('tmp/contour_orientation.dbf')
    except:
        pass
    try:
        os.remove('tmp/contour_orientation.shx')
    except:
        pass

    drv = gdal.GetDriverByName('GTiff')
    wkt = 'GEOGCS[\"WGS 84\",DATUM[\"WGS_1984\",SPHEROID[\"WGS 84\",6378137,298.257223563,AUTHORITY[\"EPSG\",\"7030\"]],AUTHORITY[\"EPSG\",\"6326\"]],PRIMEM[\"Greenwich\",0,AUTHORITY[\"EPSG\",\"8901\"]],UNIT[\"degree\",0.0174532925199433,AUTHORITY[\"EPSG\",\"9108\"]],AUTHORITY[\"EPSG\",\"4326\"]]'

    size = 160
    precision = 1. / size

    ds = drv.Create('tmp/gdal_contour_orientation.tif', size, size, 1)
    ds.SetProjection( wkt )
    ds.SetGeoTransform( [ 1, precision, 0, 50, 0, -precision ] )

# Make the elevation 15 for the whole image
    raw_data = array.array('h',[15 for i in range(int(size))]).tostring()
    for i in range(int(size)):
        ds.WriteRaster( 0, i, int(size), 1, raw_data,
                        buf_type = gdal.GDT_Int16,
                        band_list = [1] )

# Create a hill with elevation 25
    raw_data = array.array('h',[25 for i in range(2)]).tostring()
    for i in range(2):
        ds.WriteRaster( int(size/4)+int(size/8)-1, i+int(size/2)-1, 2, 1, raw_data,
                        buf_type = gdal.GDT_Int16,
                        band_list = [1] )

# Create a depression with elevation 5
    raw_data = array.array('h',[5 for i in range(2)]).tostring()
    for i in range(2):
        ds.WriteRaster( int(size/2)+int(size/8)-1, i+int(size/2)-1, 2, 1, raw_data,
                        buf_type = gdal.GDT_Int16,
                        band_list = [1] )

    ds = None

    gdaltest.runexternal(test_cli_utilities.get_gdal_contour_path() + ' -a elev -i 10 tmp/gdal_contour_orientation.tif tmp/contour_orientation1.shp')

    ds = ogr.Open('tmp/contour_orientation1.shp')

    expected_contours = [ 'LINESTRING (1.621875 49.493749999999999,'+
                                      '1.628125 49.493749999999999,'+
                                      '1.63125 49.496875000000003,'+
                                      '1.63125 49.503124999999997,'+
                                      '1.628125 49.50625,'+
                                      '1.621875 49.50625,'+
                                      '1.61875 49.503124999999997,'+
                                      '1.61875 49.496875000000003,'+
                                      '1.621875 49.493749999999999)',
                          'LINESTRING (1.371875 49.493749999999999,'+
                                      '1.36875 49.496875000000003,'+
                                      '1.36875 49.503124999999997,'+
                                      '1.371875 49.50625,'+
                                      '1.378125 49.50625,'+
                                      '1.38125 49.503124999999997,'+
                                      '1.38125 49.496875000000003,'+
                                      '1.378125 49.493749999999999,'+
                                      '1.371875 49.493749999999999)' ]
    expected_elev = [ 10, 20 ]

    lyr = ds.ExecuteSQL("select * from contour_orientation1 order by elev asc")

    if lyr.GetFeatureCount() != len(expected_contours):
        print('Got %d features. Expected %d' % (lyr.GetFeatureCount(), len(expected_contours)))
        return 'fail'

    i = 0
    test_failed = False
    feat = lyr.GetNextFeature()
    while feat is not None:
        expected_geom = ogr.CreateGeometryFromWkt(expected_contours[i])
        if feat.GetField('elev') != expected_elev[i]:
            print('Got %f. Expected %f' % (feat.GetField('elev'), expected_elev[i]))
            return 'fail'
        if ogrtest.check_feature_geometry(feat, expected_geom) != 0:
            print('Got      %s.\nExpected %s' % (feat.GetGeometryRef().ExportToWkt(),expected_contours[i]))
            test_failed = True
        i = i + 1
        feat = lyr.GetNextFeature()

    ds.ReleaseResultSet(lyr)
    ds.Destroy()

    if test_failed:
        return 'fail'
    else:
        return 'success'
        'uncerDn' : [0.0537,0.0667,0.0789,0.0920,0.1049,0.1189,0.133,0.1484],

        'x_values': sqrts[1:],
        'setup' : {'color': kAzure, 'linestyle': kSolid ,'linewidth': 2, 'fillstyle' : 3004, 'draw_opt' : "E2" },
        'legend' : {'text': "Standard model (m_{H} = 125 GeV, N^{3}LO gg#rightarrow H)", 'draw_opt' : 'fl'}
            },

}


#create asymetric graphs from 'graphs'
#v_data_hi_allunc = TVectorD(len(data_hi_allunc), array('d',[data_hi_allunc[i] for i in range(len(data_hi_allunc))]))
#for gr in graphs_list.keys():
for gr_key, gr_setup in graphs_list.iteritems():
    #create arrays from lists
    v_x_values = TVectorD(len(gr_setup['x_values']), array('d',[gr_setup['x_values'][i] for i in range(len(gr_setup['x_values']))]))
    if gr_key.startswith('model_dependancy'):
        v_x_valuesUp = v_x_valuesDn = TVectorD(len(gr_setup['x_values']), array('d',[0.2 for i in range(len(gr_setup['x_values']))]))
    else:
        v_x_valuesUp = v_x_valuesDn = TVectorD(len(gr_setup['x_values']), array('d',[0.0 for i in range(len(gr_setup['x_values']))]))

    v_central = TVectorD(len(gr_setup['central']), array('d',[gr_setup['central'][i] for i in range(len(gr_setup['central']))]))
    v_uncerUp = TVectorD(len(gr_setup['uncerUp']), array('d',[gr_setup['uncerUp'][i] for i in range(len(gr_setup['uncerUp']))]))
    v_uncerDn = TVectorD(len(gr_setup['uncerDn']), array('d',[gr_setup['uncerDn'][i] for i in range(len(gr_setup['uncerDn']))]))
    gr_setup['graph'] = TGraphAsymmErrors(v_x_values,v_central,v_x_valuesDn,v_x_valuesUp, v_uncerDn,v_uncerUp)
    if not gr_key.startswith("measured"):
        gr_setup['graph'].SetFillStyle(gr_setup['setup']['fillstyle']);
        gr_setup['graph'].SetFillColor(gr_setup['setup']['color'])
        gr_setup['graph'].SetLineColor(gr_setup['setup']['color'])
        #gr_setup['graph'].SetLineColor(kBlack)
        gr_setup['graph'].SetLineStyle(gr_setup['setup']['linestyle'])
    def writeCktFile(self, filePath):
 
        rootObj = dict()
        
        numWires = len(self.wireList)
        nextCtrl = 0xFFFE

        # 'B' for unsigned integer, minimum of 1 byte
        wirePulled = array('B', [0] * numWires)

        # 'I' for unsigned int, minimum of 2 bytes
        wireControlFets = array('I')
        wireGates = array('I')
        numNoneWires = 0
        wireNames = []

        for i, wire in enumerate(self.wireList):
            if wire == None:
                wireControlFets.append(0)
                wireControlFets.append(nextCtrl)
                wireGates.append(0)
                wireGates.append(nextCtrl)
                numNoneWires += 1
                wireNames.append('')
                continue

            wirePulled[i] = wire.pulled

            wireControlFets.append(len(wire.ins))
            for transInd in wire.ins:
                wireControlFets.append(transInd)
            wireControlFets.append(nextCtrl)

            wireGates.append(len(wire.outs))
            for transInd in wire.outs:
                wireGates.append(transInd)
            wireGates.append(nextCtrl)

            wireNames.append(wire.name)

        noWire = 0xFFFD
        numFets = len(self.transistorList)
        fetSide1WireInds = array('I', [noWire] * numFets)
        fetSide2WireInds = array('I', [noWire] * numFets)
        fetGateWireInds  = array('I', [noWire] * numFets)

        for i, trans in enumerate(self.transistorList):
            if trans == None:
                continue
            fetSide1WireInds[i] = trans.c1
            fetSide2WireInds[i] = trans.c2
            fetGateWireInds[i] = trans.gate

        rootObj['NUM_WIRES'] = numWires
        rootObj['NEXT_CTRL'] = nextCtrl
        rootObj['NO_WIRE'] = noWire
        rootObj['WIRE_PULLED'] = wirePulled
        rootObj['WIRE_CTRL_FETS'] = wireControlFets
        rootObj['WIRE_GATES'] = wireGates
        rootObj['WIRE_NAMES'] = wireNames
        rootObj['NUM_FETS'] = numFets
        rootObj['FET_SIDE1_WIRE_INDS'] = fetSide1WireInds
        rootObj['FET_SIDE2_WIRE_INDS'] = fetSide2WireInds
        # Extra info to verify the data and connections
        rootObj['FET_GATE_INDS'] = fetGateWireInds
        rootObj['NUM_NULL_WIRES'] = numNoneWires

        of = open(filePath, 'wb')
        pickle.dump(rootObj, of)
        of.close()
def image_from_rgba_data(size, data_list):
    data = array('B', data_list).tobytes()
    image = Image.frombytes('RGBA', size, data)
    return image
def image_from_palette_data(size, data_list):
    data = array('B', data_list).tobytes()
    image = Image.frombytes('P', size, data)
    return image
Example #37
0
def main():

    f = open("baseline.result", "r")
    baselines = f.readlines()
    f.close()
    baselines = [line.split(",") for line in baselines]

    (nvar, tauc_avg, tauc_error) = zip(*baselines)

    nvar = [int(i) for i in nvar]
    tauc_avg = [float(i) for i in tauc_avg]
    tauc_error = [float(i) for i in tauc_error]
    max_n_var = len(nvar)

    leg = ROOT.TLegend(0.5, 0.2, 0.8, 0.35)
    leg.SetBorderSize(0)
    leg.SetFillColor(0)
    leg.SetTextSize(0.025)
    leg.SetTextFont(42)

    x = array.array('f', range(1, max_n_var + 1))
    y = array.array('f', tauc_avg)
    ye = array.array('f', tauc_error)
    xe = array.array('f', [0] * max_n_var)
    gr = ROOT.TGraphErrors(max_n_var, x, y, xe, ye)
    gr.SetMarkerColor(38)
    gr.SetLineColor(38)
    gr.SetMarkerStyle(20)
    gr.SetTitle("ROC Integral vs No. of Variables")
    gr.GetXaxis().SetTitle("No. of Training Variables")
    gr.GetYaxis().SetTitle("Relative Performance (AUROC)")
    leg.AddEntry(
        gr,
        "#splitline{Reference Baseline}{(Randomly selected variable sets as basis of comparison)}",
        "p")

    f = open("roc-tmva.result", "r")
    lines = f.readlines()
    f.close()
    blabel = [float((l.split(",")[0]).split("-")[-1]) for l in lines]
    bdata = [float((l.split(",")[-1])) for l in lines]
    x2 = array.array('f', blabel)
    y2 = array.array('f', bdata)
    d = dict()

    gr4 = plot_file("roc-tmva.result", 813, 813)
    gr9 = plot_file("itrRm.result", 905, 905)

    c1 = ROOT.TCanvas(
        "c1", "Relative Performance (AUROC) vs No. of Training Variables", 950,
        600)
    gr.Draw("APL")

    gr4.Draw("PL")
    leg.AddEntry(
        gr4,
        "#splitline{TMVA out-of-the-box Ranking}{(Performance of top N vars as ranked by TMVA)}",
        "p")
    leg.AddEntry(
        gr9,
        "#splitline{Iterative Removal - vSearch}{(Removing var with least impact on performance iteratively)}",
        "p")

    leg.Draw()

    print "press enter to continue\n"
    raw_input()
Example #38
0
 def test_hash_array(self):
     a = array.array("b", range(10))
     for cons in self.hash_constructors:
         c = cons(a)
         c.hexdigest()
Example #39
0
    N = int(sys.argv[1])
except:
    N = 200

try:
    runs = int(sys.argv[2])
except:
    runs = 10
    
#sys.stdout = open("fourpeaks%d.txt" % N, "w")
sys.stdout = open("fourpeaks.csv", "a")  
  
#N=200
T=int((N/5)*4)
fill = [2] * N
ranges = array('i', fill)

iters = 500000

ef = FourPeaksEvaluationFunction(T)
odd = DiscreteUniformDistribution(ranges)
nf = DiscreteChangeOneNeighbor(ranges)
mf = DiscreteChangeOneMutation(ranges)
cf = SingleCrossOver()
df = DiscreteDependencyTree(.1, ranges)
hcp = GenericHillClimbingProblem(ef, odd, nf)
gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
pop = GenericProbabilisticOptimizationProblem(ef, odd, df)

t0 = time.time()
calls = []
Example #40
0
#!/usr/bin/python
# -*- coding: utf-8 -*-

# Python 2.x

# a simple binary grep utility

import os, glob, mmap, shutil, sys, array

if len(sys.argv) != 2:
    print "usage: bgrep.py hex_string"
    print "example of hex_string: 001122aabb"
    exit(0)

pattern = array.array('B', sys.argv[1].decode("hex"))

for dir, subdir, files in os.walk(os.getcwd()):
    for file in files:
        try:
            fullfname = dir + "/" + file
            fp = open(fullfname, 'r+')
            if os.stat(fp.name).st_size > 0:
                #print ("processing %s" % fp.name)
                mm = mmap.mmap(fp.fileno(), os.stat(fp.name).st_size)
                if mm.find(pattern, 0) != -1:
                    sys.stdout.write("[%s] pattern found\n" % fp.name)
                    sys.stdout.flush()
                mm.close()
        except IOError:
            sys.stderr.write("%s: IOError\n" % fullfname)
Example #41
0
def get_fiducial_histograms(source, obs, prods, add_stat_only=False, scaling={}):
    if '%s/fiducial' % confdir not in sys.path:
        sys.path.append('%s/fiducial' % confdir)

    from nuisances import nuisances
    
    nominals = {}
    for prod in prods:
        phist = source.Get('fiducial/%s/histo_%s' % (obs, prod))
        try:
            phist.Scale(scaling[prod])
        except KeyError:
            pass

        nominals[prod] = phist
        phist.SetDirectory(0)
    
    htotal = nominals[prods[0]].Clone('total_%s' % obs)
    htotal.SetDirectory(0)
    for prod in prods[1:]:
        htotal.Add(nominals[prod])

    if add_stat_only:
        statonly = htotal.Clone()

    uncert = rnp.array(htotal.GetSumw2()) # stat uncert squared

    for nuis in nuisances.itervalues():
        up = np.zeros_like(uncert)
        down = np.zeros_like(uncert)
        
        if nuis['type'] == 'shape':
            for prod in nuis['samples']:
                if prod not in prods:
                    continue

                dup = rnp.hist2array(source.Get('fiducial/%s/histo_%s_%sUp' % (obs, prod, nuis['name'])), include_overflow=True, copy=False)
                ddown = rnp.hist2array(source.Get('fiducial/%s/histo_%s_%sDown' % (obs, prod, nuis['name'])), include_overflow=True, copy=False)
                try:
                    dup *= scaling[prod]
                    ddown *= scaling[prod]
                except KeyError:
                    pass

                up += dup
                down += ddown

        elif nuis['type'] == 'lnN':
            for prod, value in nuis['samples'].iteritems():
                if prod not in prods:
                    continue
                
                nom = rnp.hist2array(nominals[prod], include_overflow=True, copy=False)
                if '/' in value:
                    vdown, vup = map(float, value.split('/'))
                    up += nom * vup
                    down += nom * vdown
                else:
                    value = float(value)
                    up += nom * value
                    down += nom / value

        up -= down
        up *= 0.5
        uncert += np.square(up)

    htotal.GetSumw2().Set(len(uncert), array.array('d', uncert))

    if add_stat_only:
        return nominals, htotal, statonly
    else:
        return nominals, htotal
Example #42
0
    def _add_rules(self, rules, _legalese=common_license_words, _spdx_tokens=frozenset()):
        """
        Add a list of Rule objects to the index and constructs optimized and
        immutable index structures.

        `_legalese` is a set of common license-specific words aka. legalese
        `_spdx_tokens` is a set of token strings used in SPDX license identifiers
        """
        if self.optimized:
            raise Exception('Index has been optimized and cannot be updated.')

        # initial dictionary mapping for known legalese tokens
        ########################################################################

        # FIXME: we should start at 1, and ids are become valid unichr values

        self.dictionary = dictionary = {
            ts: tid for tid, ts in enumerate(sorted(_legalese))}
        dictionary_get = dictionary.get

        self.len_legalese = len_legalese = len(dictionary)
        highest_tid = len_legalese - 1

        # Add SPDX key tokens to the dictionary
        # these are always treated as non-legalese. This may seem weird
        # but they are detected in expressions alright and some of their
        # tokens exist as rules too (e.g. GPL)
        ########################################################################
        for sts in sorted(_spdx_tokens):
            stid = dictionary_get(sts)
            if stid is None:
                # we have a never yet seen token, so we assign a new tokenid
                highest_tid += 1
                stid = highest_tid
                dictionary[sts] = stid

        self.rules_by_rid = rules_by_rid = list(rules)
        # ensure that rules are sorted
        rules_by_rid.sort()
        len_rules = len(rules_by_rid)

        # create index data structures
        # OPTIMIZATION: bind frequently used methods to the local scope for
        # index structures
        ########################################################################
        tids_by_rid_append = self.tids_by_rid.append

        false_positive_rids_add = self.false_positive_rids.add
        regular_rids_add = self.regular_rids.add
        approx_matchable_rids_add = self.approx_matchable_rids.add

        # since we only use these for regular rules, these lists may be sparse.
        # their index is the rule rid
        self.high_postings_by_rid = high_postings_by_rid = [None] * len_rules
        self.sets_by_rid = sets_by_rid = [None] * len_rules
        self.msets_by_rid = msets_by_rid = [None] * len_rules

        # track all duplicate rules: fail and report dupes at once at the end
        dupe_rules_by_hash = defaultdict(list)

        rules_automaton_add = partial(match_aho.add_sequence,
            automaton=self.rules_automaton, with_duplicates=False)

        if USE_AHO_FRAGMENTS:
            fragments_automaton_add = partial(
                match_aho.add_sequence,
                automaton=self.fragments_automaton,
                with_duplicates=True,
            )

        if USE_RULE_STARTS:
            starts_automaton_add_start = partial(
                match_aho.add_start,
                automaton=self.starts_automaton,
            )

        # OPTIMIZED: bind frequently used objects to local scope
        rid_by_hash = self.rid_by_hash
        match_hash_index_hash = match_hash.index_hash
        match_set_tids_set_counter = match_set.tids_set_counter
        match_set_multiset_counter = match_set.multiset_counter

        len_starts = SMALL_RULE
        min_len_starts = SMALL_RULE * 6

        ngram_len = AHO_FRAGMENTS_NGRAM_LEN

        # Index each rule
        ########################################################################
        for rid, rule in enumerate(rules_by_rid):

            # assign rid
            rule.rid = rid

            rule_token_ids = array('h', [])
            tids_by_rid_append(rule_token_ids)

            # A rule is weak if it does not contain at least one legalese word:
            # we consider all rules to be weak until proven otherwise below.
            # "weak" rules can only be matched with an automaton.
            is_weak = True

            for rts in rule.tokens():
                rtid = dictionary_get(rts)
                if rtid is None:
                    # we have a never yet seen token, so we assign a new tokenid
                    # note: we could use the length of the dictionary instead
                    highest_tid += 1
                    rtid = highest_tid
                    dictionary[rts] = rtid
                if is_weak and rtid < len_legalese:
                    is_weak = False

                rule_token_ids.append(rtid)

            # build hashes index and check for duplicates rule texts
            rule_hash = match_hash_index_hash(rule_token_ids)
            dupe_rules_by_hash[rule_hash].append(rule)

            ####################
            # populate automaton with the whole rule tokens sequence, for all
            # RULEs, be they "standard"/regular, weak, false positive or small
            ####################
            rules_automaton_add(tids=rule_token_ids, rid=rid)

            if rule.is_false_positive:
                # False positive rules do not participate in the set or sequence
                # matching at all: they are used for exact matching and in post-
                # matching filtering
                false_positive_rids_add(rid)
                continue

            # from now on, we have regular rules
            rid_by_hash[rule_hash] = rid
            regular_rids_add(rid)

            # Some rules cannot be matched as a sequence are "weak" rules
            if not is_weak:
                approx_matchable_rids_add(rid)

                ####################
                # update high postings: positions by high tids used to
                # speed up sequence matching
                ####################
                # no postings for rules that cannot be matched as a sequence (too short and weak)
                # TODO: this could be optimized with a group_by
                postings = defaultdict(list)
                for pos, tid in enumerate(rule_token_ids):
                    if tid < len_legalese:
                        postings[tid].append(pos)
                # OPTIMIZED: for speed and memory: convert postings to arrays
                postings = {tid: array('h', value) for tid, value in postings.items()}
                high_postings_by_rid[rid] = postings

                ####################
                # ... and ngram fragments: compute ngrams and populate an automaton with ngrams
                ####################
                if (USE_AHO_FRAGMENTS
                    and rule.minimum_coverage < 100
                    and rule.length > ngram_len
                ):
                    all_ngrams = tokenize.ngrams(rule_token_ids, ngram_length=ngram_len)
                    all_ngrams_with_pos = tokenize.select_ngrams(all_ngrams, with_pos=True)
                    # all_ngrams_with_pos = enumerate(all_ngrams)
                    for pos, ngram in all_ngrams_with_pos:
                        fragments_automaton_add(tids=ngram, rid=rid, start=pos)

                ####################
                # use the start and end of this rule as a break point for query runs
                ####################
                if USE_RULE_STARTS and rule.length > min_len_starts:
                    starts_automaton_add_start(
                        tids=rule_token_ids[:len_starts],
                        rule_identifier=rule.identifier,
                        rule_length=rule.length,
                    )

            ####################
            # build sets and multisets indexes, for all regular rules as we need
            # the thresholds
            ####################
            tids_set, mset = match_set.build_set_and_mset(
                rule_token_ids, _use_bigrams=USE_BIGRAM_MULTISETS)
            sets_by_rid[rid] = tids_set
            msets_by_rid[rid] = mset

            ####################################################################
            ####################################################################
            # FIXME!!!!!!! we should store them: we need them and we recompute
            # them later at match time
            tids_set_high = match_set.high_tids_set_subset(
                tids_set, len_legalese)
            mset_high = match_set.high_multiset_subset(
                mset, len_legalese, _use_bigrams=USE_BIGRAM_MULTISETS)

            # FIXME!!!!!!!
            ####################################################################
            ####################################################################

            ####################
            # update rule thresholds
            ####################
            rule.length_unique = match_set_tids_set_counter(tids_set)
            rule.high_length_unique = match_set_tids_set_counter(tids_set_high)

            rule.high_length = match_set_multiset_counter(mset_high)
            rule.compute_thresholds()

        ########################################################################
        # Finalize index data structures
        ########################################################################

        # some tokens are made entirely of digits and these can create some
        # worst case behavior when there are long runs on these
        ########################################################################
        self.digit_only_tids = intbitset([
            i for i, s in enumerate(self.tokens_by_tid) if s.isdigit()])

        # Create the tid -> token string lookup structure.
        ########################################################################
        self.tokens_by_tid = tokens_by_tid = [
            ts for ts, _tid in sorted(dictionary.items(), key=itemgetter(1))]
        self.len_tokens = len_tokens = len(tokens_by_tid)

        # Finalize automatons
        ########################################################################
        self.rules_automaton.make_automaton()
        if USE_AHO_FRAGMENTS:
            self.fragments_automaton.make_automaton()
        if USE_RULE_STARTS:
            match_aho.finalize_starts(self.starts_automaton)

        ########################################################################
        # Do some sanity checks
        ########################################################################

        msg = 'Inconsistent structure lengths'
        assert len_tokens == highest_tid + 1 == len(dictionary), msg

        msg = 'Cannot support more than licensedcode.index.MAX_TOKENS: %d' % MAX_TOKENS
        assert len_tokens <= MAX_TOKENS, msg

        dupe_rules = [rules for rules in dupe_rules_by_hash.values() if len(rules) > 1]
        if dupe_rules:
            dupe_rule_paths = [
                '\n'.join(
                    sorted([
                        ('file://' + rule.text_file)
                        if rule.text_file
                        else ('text: ' + rule.stored_text)
                            for rule in rules])
                    )
                for rules in dupe_rules
            ]
            msg = ('Duplicate rules: \n' + '\n\n'.join(dupe_rule_paths))
            raise AssertionError(msg)

        self.optimized = True
Example #43
0
 def __bytes__(self):
     return (bytes([ord(self.typecode)]) +
             bytes(array(self.typecode, self)))
Example #44
0
	def __out(self, string, filt):
		self._outp += array.array('B', re.sub(".*(" + filt + ").*\.\s*(\[.*\])?", "", string))
Example #45
0
def TMVAReader (filenameSTRING,sigtreeSTRINGS,bkgtreeSTRING,variablesSTRING,varnames,varweightfiles,xmins,xmaxs):
    for sigtreeSTRING in sigtreeSTRINGS:
        inputfile = R.TFile(filenameSTRING,"read")
        reader = R.TMVA.Reader()
        signalhists = []
        backgroundhists= []
        for varname,xmin,xmax in zip(varnames,xmins,xmaxs):
            signalhist = R.TH1F("SignalHist" + varname+sigtreeSTRING,"Signal"+varname+sigtreeSTRING,60,xmin,xmax )
            backgroundhist =  R.TH1F("BackgroundHist Hist" + "Background"+ varname,varname,60,xmin,xmax)
            signalhists.append(signalhist)
            backgroundhists.append(backgroundhist)

        sigtree = inputfile.Get(sigtreeSTRING)
        bkgtree = inputfile.Get(bkgtreeSTRING)

        from array import array
        branchdict = {}
        for i in variablesSTRING:
            branchdict[i] = array('f',[0.])
            reader.AddVariable(i,branchdict[i])
            sigtree.SetBranchAddress(i,branchdict[i])
            bkgtree.SetBranchAddress(i,branchdict[i])

        for name,file in zip(varnames,varweightfiles):
            reader.BookMVA(name,file)

        i = 0

        while sigtree.GetEntry(i):
            i += 1
            for  name,histo in zip(varnames,signalhists):
                classify = reader.EvaluateMVA(name)
                if i == 1:
                    print(str(classify) + ' <<---Signal for ' + name)

                histo.Fill(classify)
        i = 0

        while bkgtree.GetEntry(i):
            i +=1
            for  name,histo in zip(varnames,backgroundhists):
                classify = reader.EvaluateMVA(name)
                if i == 1:
                    print(str(classify) + ' <<-- Background for ' + name)
                histo.Fill(classify)


        for histosig,histobkg,name in zip(signalhists,backgroundhists,varnames):
            canvas = R.TCanvas()
            histosig.SetStats(1)
            ymax = None
            if  histosig.GetBinContent(histosig.GetMaximumBin()) > histobkg.GetBinContent(histobkg.GetMaximumBin()):
                ymax = histosig.GetBinContent(histosig.GetMaximumBin())
            else:
                ymax = histobkg.GetBinContent(histobkg.GetMaximumBin())

            histosig.GetYaxis().SetRangeUser(0,ymax*1.5)
            histosig.SetLineColor(R.kSpring-4)
            histosig.SetFillColor(R.kSpring-4)
            histosig.Draw("HIST")
            histobkg.Draw("HIST,same")
            histosig.Draw("Hist,same")
            R.gPad.RedrawAxis()
            canvas.Print(name+"_"+sigtreeSTRING+".png")
 def __init__(self):
     self._lock = threading.Lock()
     self._cv = threading.Condition(self._lock)
     self._event = None
     self._buffer = array.array('B')
     self._closed = False
def read_ps4ds(jsdev):
    """
    The Sony PlayStation 4 controller has fewer buttons. The throttles are
    analog (see axes) and binary (see buttons). Runs as a thread.

    axis    0: left stick X
            1: left stick Y
            2: left throttle
            3: right stick X
            4: right stick Y
            5: right throttle
            6: dPad X
            7: dPad Y

    button  0: cross            NS B
            1: circle           NS A
            2: triangle         NS X
            3: square           NS Y
            4: left trigger     NS left trigger
            5: right trigger    NS right trigger
            6: left throttle    NS left throttle
            7: right throttle   NS right throttle
            8: share            NS minus
            9: options          NS plus
           10: logo             NS home
           11: left stick button  NS left stick button
           12: right stick button NS rgith stick button


    share   options

            triangle
    square          circle
            cross
    """

    BUTTON_MAP = array.array('B', [
        DS4Button.SQUARE,
        DS4Button.CROSS,
        DS4Button.CIRCLE,
        DS4Button.TRIANGLE,
        DS4Button.L1,
        DS4Button.R1,
        DS4Button.L2,
        DS4Button.R2,
        DS4Button.SHARE,
        DS4Button.OPTIONS,
        DS4Button.L3,
        DS4Button.R3,
        DS4Button.LOGO,
        DS4Button.TPAD])

    while True:
        try:
            evbuf = jsdev.read(8)
        except:
            jsdev.close()
            break
        if evbuf:
            timestamp, value, type, number = unpack('IhBB', evbuf)
            if type == 0x01: # button event
                button_out = BUTTON_MAP[number]
                if value:
                    DS4G.press(button_out)
                else:
                    DS4G.release(button_out)

            if type == 0x02: # axis event
                axis = ((value + 32768) >> 8)
                # Axes 0,1 left stick X,Y
                if number == 0:
                    DS4G.leftXAxis(axis)
                elif number == 1:
                    DS4G.leftYAxis(axis)
                elif number == 2:
                    DS4G.leftTrigger(axis)
                # Axes 3,4 right stick X,Y
                elif number == 3:
                    DS4G.rightXAxis(axis)
                elif number == 4:
                    DS4G.rightYAxis(axis)
                elif number == 5:
                    DS4G.rightTrigger(axis)
                # Axes 6,7 directional pad X,Y
                elif number == 6:
                    DS4G.dPadXAxis(axis)
                elif number == 7:
                    DS4G.dPadYAxis(axis)
Example #48
0
import ROOT
from ROOT import *
gROOT.SetBatch(True)
gStyle.SetOptStat(0)
gStyle.SetOptTitle(0)
ROOT.gInterpreter.Declare(
    "#include \"MyTools/RootUtils/interface/SeabornInterface.h\"")
ROOT.gInterpreter.Declare(
    "#include \"MyTools/RootUtils/interface/CanvasHelpers.h\"")
gSystem.Load(
    "~/Dijets/CMSSW_7_4_15/lib/slc6_amd64_gcc491/libMyToolsRootUtils.so")
seaborn = Root.SeabornInterface()
seaborn.Initialize()
Root.SetCanvasStyle()

acceptance_mass_array = array(
    'd', [1500., 1750., 2000., 2500., 3000., 4000., 5000.])
# acceptance_array = array('d', [0.085, 0.072, 0.035, 0.025, 0.021,]) # My old values, taken from the ATLAS tables
acceptance_array = array('d', [
    0.0993114, 0.0857177, 0.0742023, 0.0398483, 0.0264772, 0.0173586,
    0.00952033
])  # Kirtimaan's values
tg_acceptance = TGraph(len(acceptance_mass_array), acceptance_mass_array,
                       acceptance_array)

mass_array = array('d', [1250., 1500., 1750., 2000., 2500., 3000., 4000.])
# xs*BR*A values taken from digitizing the ATLAS plot
# My values
#xsBRAs = {
#	1250.:0.04739380332427473,
#	1500.:0.03107133633710692,
#	1750.:0.04851860651757168,
Example #49
0
print('---< about bytearray(python 2) >---')
cafe_arr = bytearray(cafe)
print('cafe_arr = {0}'.format(cafe_arr))
print('cafe_arr[0] = {0}'.format(cafe_arr[0]))
print('cafe_arr[:1] = {0}'.format(cafe_arr[:1]))
print()

print('---< fromhex in binary sequence >---')
print('bytes.fromhex("31 4B CE A9") = {0}'.format(
    bytes.fromhex('31 4B CE A9')))
print()

print('---< Initialize bytes from raw array data. >---')

numbers = array.array('h', [-2, -1, 0, 1, 2])
print('number(raw array) = \n{0}'.format(numbers))
octets = bytes(numbers)
print('bytes(number) = {0}'.format(octets))
print()
'''
------------------------------------------------------------------------------------------------------------------------
struct and memory view

struct is Interpret bytes as packed binary data.

------------------------------------------------------------------------------------------------------------------------
'''
print('--------------------------------------------\n'
      '   struct and memory view\n'
      '--------------------------------------------\n')
def read_hori_mini4(jsdev):
    """
    The Hori Mini4 is a licensed PS4 compatible controller. The throttles are
    analog (see axes) and binary (see buttons). Runs as a thread.

    axis    0: left stick X
            1: left stick Y
            2: right stick X
            3: left throttle
            4: right throttle
            5: right stick Y
            6: dPad X
            7: dPad Y

    button  0: cross
            1: circle
            2: triangle
            3: square
            4: L1
            5: R1
            6: L2
            7: R2
            8: share
            9: options
           10: logo
           11: L3
           12: R3


    share   options

            triangle
    square          circle
            cross
    """

    BUTTON_MAP = array.array('B', [
        DS4Button.SQUARE,
        DS4Button.CROSS,
        DS4Button.CIRCLE,
        DS4Button.TRIANGLE,
        DS4Button.L1,
        DS4Button.R1,
        DS4Button.L2,
        DS4Button.R2,
        DS4Button.SHARE,
        DS4Button.OPTIONS,
        DS4Button.L3,
        DS4Button.R3,
        DS4Button.LOGO,
        DS4Button.TPAD])

    while True:
        try:
            evbuf = jsdev.read(8)
        except:
            jsdev.close()
            break
        if evbuf:
            timestamp, value, type, number = unpack('IhBB', evbuf)
            if type == 0x01: # button event
                button_out = BUTTON_MAP[number]
                if value:
                    DS4G.press(button_out)
                else:
                    DS4G.release(button_out)

            if type == 0x02: # axis event
                axis = ((value + 32768) >> 8)
                # Axes 0,1 left stick X,Y
                if number == 0:
                    DS4G.leftXAxis(axis)
                elif number == 1:
                    DS4G.leftYAxis(axis)
                # Axes 2,3 right stick X,Y
                elif number == 2:
                    DS4G.rightXAxis(axis)
                elif number == 3:
                    DS4G.leftTrigger(axis)
                elif number == 4:
                    DS4G.rightTrigger(axis)
                elif number == 5:
                    DS4G.rightYAxis(axis)
                # Axes 6,7 directional pad X,Y
                elif number == 6:
                    DS4G.dPadXAxis(axis)
                elif number == 7:
                    DS4G.dPadYAxis(axis)
    #weight residuals using errors
    weight = []
    for i in range(len(dmb)):
        w = dmb[i] / sum(dmb)
        weight.append(w)
    #add scriptm to model
    scriptm = sum(residuals*weight)
    muModelData = muModelData + scriptm
    likeResiduals = np.matrix(mb - muModelData)
    #chi^2
    likeValue = likeResiduals * np.linalg.inv(covarianceMatrix) * likeResiduals.getH()

    return likeValue

#empty arrays to store results
omegaMArr =arr.array('d', [])
omegaLArr = arr.array('d', [])
omegaKArr = arr.array('d', [])
wAppend = arr.array('i', [])
likeArray = arr.array('d', [])
#for counting
count = arr.array('i', [])
count.append(0)

def start(omegaMmin,omegaMmax,omegaLmin,omegaLmax,stepsize,iterations):
    c = 0
    omegaM = random.uniform(omegaMmin,omegaMmax)
    omegaL = random.uniform(omegaLmin,omegaLmax)
    omegaK = 1 - omegaL - omegaM
    omegaMArr.append(round(omegaM,5))
    omegaLArr.append(round(omegaL,5))
def construct_bst(preorder: array,
                  start: Optional[int] = None,
                  end: Optional[int] = None) -> Optional[Node]:
    """
    BST: left - smaller than current
         right - bigger than current
    """
    if start is None:
        start = 0
    if end is None:
        end = len(preorder) - 1
    if start > end:
        return None

    node = Node(preorder[start])

    if start < end:
        index = start + 1

        while index < end and preorder[index] < preorder[start]:
            index += 1

        node.left = construct_bst(preorder, start + 1, index - 1)
        node.right = construct_bst(preorder, index, end)

    return node


if __name__ == "__main__":
    inorder(construct_bst(array("I", [10, 5, 1, 7, 40, 50])))
Example #53
0
def main():
    parser = Parser(
        prog='spr2image',
        description='Default action is to convert a spr file to a gif.',
        epilog='example: spr2image bubble.spr => convert bubble.spr to bubble.gif'
    )

    parser.add_argument(
        'file',
        metavar='file.spr',
        action=ResolvePathAction
    )

    parser.add_argument(
        '-d',
        metavar='file.gif',
        dest='dest',
        default=os.getcwd(),
        action=ResolvePathAction,
        help='image file to create'
    )

    parser.add_argument(
        '-q',
        dest='quiet',
        action='store_true',
        help='quiet mode'
    )

    parser.add_argument(
        '-v', '--version',
        dest='version',
        action='version',
        help=argparse.SUPPRESS,
        version=f'{parser.prog} version {qcli.__version__}'
    )

    args = parser.parse_args()

    # Validate source file
    if not spr.is_sprfile(args.file):
        print(f'{parser.prog}: cannot find or open {args.file}', file=sys.stderr)
        sys.exit(1)

    # Validate or create out file
    if args.dest == os.getcwd():
        image_path = os.path.dirname(args.file)
        image_name = os.path.basename(args.file).split('.')[0] + '.gif'
        args.dest = os.path.join(image_path, image_name)

    dest_dir = os.path.dirname(args.dest) or '.'
    if not os.path.exists(dest_dir):
        os.makedirs(dest_dir)

    image_filename = os.path.basename(args.dest)
    image_extension = image_filename.split('.')[-1]

    with spr.Spr.open(args.file) as spr_file:
        if not args.quiet:
            print(f'Converting: {os.path.basename(args.file)}')

        # Flatten out palette
        palette = [channel for rgb in vgio.quake.palette for channel in rgb]

        # Default frame animation is 10 frames per second
        default_duration = 10 / 60 * 1000

        # Build a sequence of images from spr frames
        images = []
        for frame in spr_file.frames:
            if frame.type == spr.SINGLE:
                size = frame.width, frame.height
                data = array.array('B', frame.pixels)

                img = Image.frombuffer('P', size, data, 'raw', 'P', 0, 1)
                img.putpalette(palette)
                images.append(img)

            else:
                print(f'{parser.prog}: frame groups are not supported', file=sys.stderr)
                sys.exit(1)

    # Save as gif
    if image_extension.upper() == 'GIF':
        first_frame = images[0]
        first_frame.putpalette(palette)
        remaining_frames = images[1:]
        first_frame.save(
            args.dest,
            save_all=True,
            append_images=remaining_frames,
            duration=default_duration,
            loop=0,
            optimize=False,
            #transparency=255,
            palette=palette
        )

    else:
        image_directory = os.path.dirname(args.dest)
        image_name = image_filename.split('.')[0]
        for image_index, image in enumerate(images):
            filename = '{}_{}.{}'.format(image_name, image_index, image_extension)
            image.save(
                os.path.join(image_directory, filename),
                optimize=False,
                #transparency=255,
                palette=palette
            )

    sys.exit(0)
Example #54
0
            print('Controller connected')

            jsdev = open(fn, 'rb')
            break

# Get the device name.
buf = bytearray(63)
# buf = array.array('u', ['\0'] * 64)
ioctl(jsdev, 0x80006a13 + (0x10000 * len(buf)), buf)  # JSIOCGNAME(len)
# Get rid of random padding
buf = buf.rstrip(b'\0')
js_name = str(buf, encoding='utf-8')
print('Device name: %s' % js_name)

# Get number of axes and buttons.
buf = array.array('B', [0])
ioctl(jsdev, 0x80016a11, buf)  # JSIOCGAXES
num_axes = buf[0]

buf = array.array('B', [0])
ioctl(jsdev, 0x80016a12, buf)  # JSIOCGBUTTONS
num_buttons = buf[0]

# Get the axis map.
buf = array.array('B', [0] * 0x40)
ioctl(jsdev, 0x80406a32, buf)  # JSIOCGAXMAP

for axis in buf[:num_axes]:
    axis_name = axis_names.get(axis, 'unknown(0x%02x)' % axis)
    axis_map.append(axis_name)
    axis_states[axis_name] = 0.0
Example #55
0
#All Values of same type
#Arrays don't have specific type

import array as arr  #Alias

vals = arr.array("i", [1, -2, 3])  # i -> typecode -> any number

# b, B, u, h, H, i, I, l, L, q, Q, f or d
# small case -> signed data type
# upper case -> unsigned data type -> 0 to infinity

print(vals)

print(vals.buffer_info())
# (Address , size)

vals.append(4)
print(vals)

print("--------------")

print(vals[0])

print("-------------")

for i in range(len(vals)):
    print(vals[i], end=" ")

print()
print("-------------")
Example #56
0
import array
import random
import sys

#read filenames from argv
for filename in sys.argv[1:]:
    #create a random value matrix
    l = [random.randint(-100, 100) for i in xrange(64)]

    a = array.array('i', l)
    with open(filename, 'wb') as f:
        a.tofile(f)

    #print matrix info to cout
    l = [l[i:i + 8] for i in xrange(64) if i % 8 == 0]
    print "Write matrix into " + filename + ":"
    for i in xrange(8):
        print l[i]
    print
def main_part2(file_cfg):
    # time logging
    global start_time
    start_time = time.time()

    # param list:
    with open(file_cfg, "rb") as f:
        print("load configs from: " + file_cfg)
        config = pickle.load(f)

    seed_number = config.seed_number
    idx_gender = config.idx_gender
    idx_bg = config.idx_bg
    idx_fshape = config.idx_fshape
    idx_cloth = config.idx_cloth
    disp_bg = config.disp_bg

    # human data source:
    idx = config.idx_seq
    ishape = config.idx_ishape
    stride = config.stride


    from pickle import load
    import argparse
    log_message("input idx: %d" % idx)
    log_message("input ishape: %d" % ishape)
    log_message("input stride: %d" % stride)

    if idx == None:
        exit(1)
    if ishape == None:
        exit(1)
    if stride == None:
        log_message("WARNING: stride not specified, using default value 50")
        stride = 50

    # import idx info (name, split)
    idx_info = load(open("pkl/idx_info.pickle", 'rb'))

    # get runpass
    (runpass, idx) = divmod(idx, len(idx_info))

    log_message("start part 2")

    import hashlib
    import random
    # initialize random seeds with sequence id
    s = "synth_data:%d:%d:%d" % (idx, runpass, ishape)
    log_message("GENERATED SEED %d from string '%s'" % (seed_number, s))
    random.seed(seed_number)
    np.random.seed(seed_number)

    # import configuration
    import config
    params = config.load_file('config', 'SYNTH_DATA')

    smpl_data_folder = params['smpl_data_folder']
    smpl_data_filename = params['smpl_data_filename']
    resy = params['resy']
    resx = params['resx']
    tmp_path = params['tmp_path']
    output_path = params['output_path']
    output_types = params['output_types']
    stepsize = params['stepsize']
    clipsize = params['clipsize']
    openexr_py2_path = params['openexr_py2_path']

    name = idx_info[idx]["name"]

    # compute number of cuts
    nb_ishape = max(1, int(np.ceil((idx_info[idx]['nb_frames'] - (clipsize - stride))/stride)))
    ishape = ishape%nb_ishape


    output_path = join(output_path, 'run%d' % runpass, name.replace(" ", ""))
    log_message("output path: " + output_path)
    tmp_path = join(tmp_path, 'run%d_%s_c%04d' % (runpass, name.replace(" ", ""), (ishape + 1)))

    # check whether openexr_py2_path is loaded from configuration file
    if 'openexr_py2_path' in locals() or 'openexr_py2_path' in globals():
        for exr_path in openexr_py2_path.split(':'):
            sys.path.insert(1, exr_path)

    # to read exr imgs
    import OpenEXR
    import array
    import Imath

    log_message("Loading SMPL data")
    smpl_data = np.load(join(smpl_data_folder, smpl_data_filename))
    cmu_parms, name = load_body_data(smpl_data, idx)

    res_paths = {k:join(tmp_path, '%05d_%s'%(idx, k)) for k in output_types if output_types[k]}

    data = cmu_parms[name]
    nframes = len(data['poses'][::stepsize])

    # .mat files
    matfile_normal = join(output_path, name.replace(" ", "") + "_c%04d_normal.mat" % (ishape + 1))
    matfile_gtflow = join(output_path, name.replace(" ", "") + "_c%04d_gtflow.mat" % (ishape + 1))
    matfile_depth = join(output_path, name.replace(" ", "") + "_c%04d_depth.mat" % (ishape + 1))
    matfile_segm = join(output_path, name.replace(" ", "") + "_c%04d_segm.mat" % (ishape + 1))
    dict_normal = {}
    dict_gtflow = {}
    dict_depth = {}
    dict_segm = {}
    get_real_frame = lambda ifr: ifr
    FLOAT = Imath.PixelType(Imath.PixelType.FLOAT)

    # overlap determined by stride (# subsampled frames to skip)
    fbegin = ishape*stepsize*stride
    fend = min(ishape*stepsize*stride + stepsize*clipsize, len(data['poses']))
    # LOOP OVER FRAMES
    for seq_frame, (pose, trans) in enumerate(zip(data['poses'][fbegin:fend:stepsize], data['trans'][fbegin:fend:stepsize])):
        iframe = seq_frame

        log_message("Processing frame %d" % iframe)

        for k, folder in res_paths.items():
            if not k== 'vblur' and not k=='fg':
                for ii in range(2):
                    path = join(folder, 'Image%04d_%d.exr' % (get_real_frame(seq_frame), ii))
                    exr_file = OpenEXR.InputFile(path)
                    if k == 'normal':
                        mat = np.transpose(np.reshape([array.array('f', exr_file.channel(Chan, FLOAT)).tolist() for Chan in ("R", "G", "B")], (3, resx, resy)), (1, 2, 0))
                        dict_normal['normal_%04d_%01d' % (iframe + 1, ii+1)] = mat.astype(np.float32, copy=False) # +1 for the 1-indexing
                    elif k == 'gtflow':
                        mat = np.transpose(np.reshape([array.array('f', exr_file.channel(Chan, FLOAT)).tolist() for Chan in ("R", "G")], (2, resx, resy)), (1, 2, 0))
                        dict_gtflow['gtflow_%04d_%01d' % (iframe + 1, ii+1)] = mat.astype(np.float32, copy=False)
                    elif k == 'depth':
                        mat = np.reshape([array.array('f', exr_file.channel(Chan, FLOAT)).tolist() for Chan in ("R")], (resx, resy))
                        dict_depth['depth_%04d_%01d' % (iframe + 1, ii+1)] = mat.astype(np.float32, copy=False)
                    elif k == 'segm':
                        mat = np.reshape([array.array('f', exr_file.channel(Chan, FLOAT)).tolist() for Chan in ("R")], (resx, resy))
                        dict_segm['segm_%04dd_%01d' % (iframe + 1, ii+1)] = mat.astype(np.uint8, copy=False)
                    # remove(path)

    print("render infos: ")
    print(res_paths)
    print("#normal: %d"%(len(dict_normal.keys())))
    print("#depth: %d"%(len(dict_depth.keys())))
    print("#segm: %d"%(len(dict_segm.keys())))
    import scipy.io
    scipy.io.savemat(matfile_normal, dict_normal, do_compression=True)
    scipy.io.savemat(matfile_gtflow, dict_gtflow, do_compression=True)
    scipy.io.savemat(matfile_depth, dict_depth, do_compression=True)
    scipy.io.savemat(matfile_segm, dict_segm, do_compression=True)

    # cleaning up tmp
    if tmp_path != "" and tmp_path != "/":
        log_message("Cleaning up tmp")
        os.system('rm -rf %s' % tmp_path)

    log_message("Completed batch")
Example #58
0
def test_gdal_contour_1():
    if test_cli_utilities.get_gdal_contour_path() is None:
        pytest.skip()

    try:
        os.remove('tmp/contour.shp')
    except OSError:
        pass
    try:
        os.remove('tmp/contour.dbf')
    except OSError:
        pass
    try:
        os.remove('tmp/contour.shx')
    except OSError:
        pass

    drv = gdal.GetDriverByName('GTiff')
    sr = osr.SpatialReference()
    sr.ImportFromEPSG(4326)
    wkt = sr.ExportToWkt()

    size = 160
    precision = 1. / size

    ds = drv.Create('tmp/gdal_contour.tif', size, size, 1)
    ds.SetProjection(wkt)
    ds.SetGeoTransform([1, precision, 0, 50, 0, -precision])

    raw_data = array.array('h', [10 for i in range(int(size / 2))]).tostring()
    for i in range(int(size / 2)):
        ds.WriteRaster(int(size / 4),
                       i + int(size / 4),
                       int(size / 2),
                       1,
                       raw_data,
                       buf_type=gdal.GDT_Int16,
                       band_list=[1])

    raw_data = array.array('h', [20 for i in range(int(size / 2))]).tostring()
    for i in range(int(size / 4)):
        ds.WriteRaster(int(size / 4) + int(size / 8),
                       i + int(size / 4) + int(size / 8),
                       int(size / 4),
                       1,
                       raw_data,
                       buf_type=gdal.GDT_Int16,
                       band_list=[1])

    raw_data = array.array('h', [25 for i in range(int(size / 4))]).tostring()
    for i in range(int(size / 8)):
        ds.WriteRaster(int(size / 4) + int(size / 8) + int(size / 16),
                       i + int(size / 4) + int(size / 8) + int(size / 16),
                       int(size / 8),
                       1,
                       raw_data,
                       buf_type=gdal.GDT_Int16,
                       band_list=[1])

    ds = None

    (_, err) = gdaltest.runexternal_out_and_err(
        test_cli_utilities.get_gdal_contour_path() +
        ' -a elev -i 10 tmp/gdal_contour.tif tmp/contour.shp')
    assert (err is None or err == ''), 'got error/warning'

    ds = ogr.Open('tmp/contour.shp')

    expected_envelopes = [[1.25, 1.75, 49.25, 49.75],
                          [
                              1.25 + 0.125, 1.75 - 0.125, 49.25 + 0.125,
                              49.75 - 0.125
                          ]]
    expected_height = [10, 20]

    lyr = ds.ExecuteSQL("select * from contour order by elev asc")

    assert lyr.GetSpatialRef().ExportToWkt(
    ) == wkt, 'Did not get expected spatial ref'

    assert lyr.GetFeatureCount() == len(expected_envelopes)

    i = 0
    feat = lyr.GetNextFeature()
    while feat is not None:
        envelope = feat.GetGeometryRef().GetEnvelope()
        assert feat.GetField('elev') == expected_height[i]
        for j in range(4):
            if expected_envelopes[i][j] != pytest.approx(
                    envelope[j], abs=precision / 2 * 1.001):
                print('i=%d, wkt=%s' %
                      (i, feat.GetGeometryRef().ExportToWkt()))
                print(feat.GetGeometryRef().GetEnvelope())
                pytest.fail(
                    '%f, %f' %
                    (expected_envelopes[i][j] - envelope[j], precision / 2))
        i = i + 1
        feat = lyr.GetNextFeature()

    ds.ReleaseResultSet(lyr)
    ds.Destroy()
Example #59
0
def string2bits(bit_str: str) -> array.array:
    return array.array("B", map(int, bit_str))
Example #60
0
rscale = 1.

with open(cardpath) as datacard:
    lines = datacard.read().strip().split('\n')
    matches = re.match('# R x ([0-9.e+-]+)', lines[-1])
    if matches:
        rscale = float(matches.group(1))

if rscale == 1.:
    sys.exit(0)

shutil.copyfile(sourcepath, 'limittmp.root')

variables = {
    'limit': array.array('d', [0.]),
    'limitErr': array.array('d', [0.]),
    'mh': array.array('d', [0.]),
    'syst': array.array('i', [0]),
    'iToy': array.array('i', [0]),
    'iSeed': array.array('i', [0]),
    'iChannel': array.array('i', [0]),
    't_cpu': array.array('f', [0.]),
    't_real': array.array('f', [0.]),
    'quantileExpected': array.array('f', [0.])
}

source = ROOT.TFile.Open(sourcepath)

output = ROOT.TFile.Open('limittmp.root', 'update')