示例#1
0
    def DownloadThrottlerThread(self):
        while self.threadRun:
            time.sleep(self.updateTime)

            tot_down_size_KB = float(
                sum(self.DOWNLOAD_RATE_LIMIT_BUFFER) / 1024.0)
            download_speed_limit_KBps = float(Prefs['download_speed_limit'])
            if tot_down_size_KB > 0 and download_speed_limit_KBps > 0:
                tot_down_speed_KBps = round(
                    float(tot_down_size_KB / self.updateTime), 3)
                if Prefs['use_debug']:
                    Log('Download Throttler:---> Timestamp:%s | Total Down Speed: %s KB/s | Speed Limit: %s KB/s'
                        % (time.time(), tot_down_speed_KBps,
                           download_speed_limit_KBps))

                if tot_down_speed_KBps > download_speed_limit_KBps:
                    self.throttle = True
                    self.throttleStateSleepTime = round(
                        tot_down_speed_KBps / download_speed_limit_KBps, 3)
                    if Prefs['use_debug']:
                        Log("Download Throttler:---> Sleep for %s sec." %
                            self.throttleStateSleepTime)
                    time.sleep(self.throttleStateSleepTime)

            self.reset()
示例#2
0
    def logp_partial_gradient(self, variable, calculation_set=None):
        """
        Calculates the partial gradient of the posterior of self with respect to variable.
        Returns zero if self is not in calculation_set.
        """
        if (calculation_set is None) or (self in calculation_set):

            if not datatypes.is_continuous(variable):
                return zeros(shape(variable.value))

            if variable is self:
                try:
                    gradient_func = self._logp_partial_gradients["value"]

                except KeyError:
                    raise NotImplementedError(repr(self) + " has no gradient function for 'value'")

                gradient = np.reshape(gradient_func.get(), np.shape(variable.value))
            else:
                gradient = __builtin__.sum(
                    [self._pgradient(variable, parameter, value) for parameter, value in self.parents.iteritems()]
                )

            return gradient
        else:
            return 0
示例#3
0
    def test_indexing(self):
        # Vector variables
        p = Problem(Maximize(self.x[0,0]), [self.x[0,0] <= 2, self.x[1,0] == 3])
        result = p.solve()
        self.assertAlmostEqual(result, 2)
        self.assertItemsAlmostEqual(self.x, [2,3])

        n = 10
        A = matrix(range(n*n), (n,n))
        x = Variable(n,n)
        p = Problem(Minimize(sum(x)), [x == A])
        result = p.solve()
        answer = n*n*(n*n+1)/2 - n*n
        self.assertAlmostEqual(result, answer)

        # Matrix variables
        import __builtin__
        p = Problem(Maximize( __builtin__.sum(self.A[i,i] + self.A[i,1-i] for i in range(2)) ),
                             [self.A <= [[1,-2],[-3,4]]])
        result = p.solve()
        self.assertAlmostEqual(result, 0)
        self.assertItemsAlmostEqual(self.A.value, [1,-2,-3,4])

        # Indexing arithmetic expressions.
        exp = [[1,2],[3,4]]*self.z + self.x
        p = Problem(Minimize(exp[1,0]), [self.x == self.z, self.z == [1,2]])
        result = p.solve()
        self.assertAlmostEqual(result, 12)
        self.assertItemsAlmostEqual(self.x.value, self.z.value)
def doSearch(name, tvdb_id):
    Log.Debug('[ {} ] Searching the show {} among ItalianSubs shows'.format(PLUGIN_NAME,name))
    f = prepare_name(name)
    shows = get_shows()
    priority = []
    id_show = 0
    for name_s, id_s in shows:
        occurrences = sum([1 for el in f if el in name_s])
        priority.append( (occurrences, id_s) )
    if priority:
        #show = sorted(priority, key=lambda x: x[0], reverse=True)[0][1]
        priority = sorted(priority, key=lambda x: x[0], reverse=True)
        for each in priority:
          if tvdb_id:
            tvdb_id_occurrence = XML.ElementFromURL(ITASA_SHOW.format(each[1], ITASA_KEY)).find('.//id_tvdb').text
            if tvdb_id == tvdb_id_occurrence:
              id_show = each[1]
              break
          else:
            if each[0] == len(f):
              id_show = each[1]
              break
        id_show = id_show or priority[0][1]
        Log.Debug('[ {} ] Match found for {}. ID on ItalianSubs: {}'.format(PLUGIN_NAME,name, id_show))
        return id_show #return id show
    Log.Debug('[ {} ] No matches found for {}'.format(PLUGIN_NAME, name))
    return None
示例#5
0
    def test_indexing(self):
        # Vector variables
        p = Problem(Maximize(self.x[0, 0]),
                    [self.x[0, 0] <= 2, self.x[1, 0] == 3])
        result = p.solve()
        self.assertAlmostEqual(result, 2)
        self.assertItemsAlmostEqual(self.x, [2, 3])

        n = 10
        A = matrix(range(n * n), (n, n))
        x = Variable(n, n)
        p = Problem(Minimize(sum(x)), [x == A])
        result = p.solve()
        answer = n * n * (n * n + 1) / 2 - n * n
        self.assertAlmostEqual(result, answer)

        # Matrix variables
        import __builtin__
        p = Problem(
            Maximize(
                __builtin__.sum(self.A[i, i] + self.A[i, 1 - i]
                                for i in range(2))),
            [self.A <= [[1, -2], [-3, 4]]])
        result = p.solve()
        self.assertAlmostEqual(result, 0)
        self.assertItemsAlmostEqual(self.A.value, [1, -2, -3, 4])

        # Indexing arithmetic expressions.
        exp = [[1, 2], [3, 4]] * self.z + self.x
        p = Problem(Minimize(exp[1, 0]), [self.x == self.z, self.z == [1, 2]])
        result = p.solve()
        self.assertAlmostEqual(result, 12)
        self.assertItemsAlmostEqual(self.x.value, self.z.value)
示例#6
0
def TaylorTwographSRG(q):
    r"""
    constructing a strongly regular graph from the Taylor's two-graph for `U_3(q)`, `q` odd

    This is a strongly regular graph with parameters
    `(v,k,\lambda,\mu)=(q^3+1, q(q^2+1)/2, (q^2+3)(q-1)/4, (q^2+1)(q+1)/4)`
    in the Seidel switching class of
    :func:`Taylor two-graph <sage.combinat.designs.twographs.taylor_twograph>`.
    Details are in §7E of [BvL84]_.

    INPUT:

    - ``q`` -- a power of an odd prime number

    .. SEEALSO::

        * :meth:`~sage.graphs.graph_generators.GraphGenerators.TaylorTwographDescendantSRG`

    EXAMPLES::

        sage: t=graphs.TaylorTwographSRG(3); t
        Taylor two-graph SRG: Graph on 28 vertices
        sage: t.is_strongly_regular(parameters=True)
        (28, 15, 6, 10)

    """
    G, l, v0 = TaylorTwographDescendantSRG(q, clique_partition=True)
    G.add_vertex(v0)
    G.seidel_switching(sum(l[:(q**2+1)/2],[]))
    G.name("Taylor two-graph SRG")
    return G
示例#7
0
 def logp_gradient_contribution(self, calculation_set = None):
     """
     Calculates the gradient of the joint log posterior with respect to self. 
     Calculation of the log posterior is restricted to the variables in calculation_set. 
     """
     #NEED some sort of check to see if the log p calculation has recently failed, in which case not to continue
         
     return self.logp_partial_gradient(self, calculation_set) + __builtin__.sum([child.logp_partial_gradient(self, calculation_set) for child in self.children] )
示例#8
0
def sum(xs, y_from_x=lambda x: x):
    '''
    >>> range(10) | sum()
    45
    >>> range(10) | sum(lambda x: -x)
    -45
    '''
    return __builtin__.sum(y_from_x(x) for x in xs)
示例#9
0
def get_reverted_revs(revs):
    reverted_revs = []
    diff_sizes = get_diff_sizes(revs)
    for rev_num, (editor, size) in enumerate(diff_sizes):
        rev_range = [x[1] for x in diff_sizes[rev_num:rev_num+REVERT_LOOKAHEAD+1]]
        if any([ r != 0 for r in rev_range ]) and sum(rev_range) == REVERT_THRESHOLD:
            reverted_revs.append(rev_num)
    return reverted_revs
示例#10
0
 def __mul__(p,q):
     if isinstance(q,matrix):
         qt = q.transpose()
         if len(p)!=len(qt):
             raise ValueError("incompatible")
         return matrix([[__builtin__.sum([i*j for i,j in zip(r,c)])
                  for c in qt] for r in p])
     else: # assume scalar
         return matrix([[i*q for i in r] for r in p])
示例#11
0
def sum(l):
    """
    Override built-in sum to make sure this is never called with a dict instead
    of a list
    :param l:
    :return:
    """
    assert isinstance(l, list)
    return __builtin__.sum(l)
示例#12
0
 def get_value(self, varmap={}):
     values = map(lambda x: x.get_value(varmap), self.children)
     if self.func == "+":
         return __builtin__.sum(values)
     elif self.func == "*":
         return values[0] * values[1]
     elif self.func == "/":
         return values[0] / values[1]
     return self.func(values)
示例#13
0
 def determinant(m):
     if len(m) == 0:
         return 1
     else:
         if not isinstance(m[0],list) or len(m[0]) != len(m):
             raise ValueError("not square")
         return __builtin__.sum([
                 ((-1)**c)*m[0][c]*m.minor(0,c).determinant()
                     for c in range(len(m))])
示例#14
0
def is_twograph(T):
    r"""
    Checks that the incidence system `T` is a two-graph

    INPUT:

    - ``T`` -- an :class:`incidence structure <sage.combinat.designs.IncidenceStructure>`

    EXAMPLES:

    a two-graph from a graph::

        sage: from sage.combinat.designs.twographs import (is_twograph, TwoGraph)
        sage: p=graphs.PetersenGraph().twograph()
        sage: is_twograph(p)
        True

    a non-regular 2-uniform hypergraph which is a two-graph::

        sage: is_twograph(TwoGraph([[1,2,3],[1,2,4]]))
        True

    TESTS:

    wrong size of blocks::

        sage: is_twograph(designs.projective_plane(3))
        False

    a triple system which is not a two-graph::

        sage: is_twograph(designs.projective_plane(2))
        False
    """
    if not T.is_uniform(3):
        return False

    # A structure for a fast triple existence check
    v_to_blocks = {v: set() for v in range(T.num_points())}
    for B in T._blocks:
        B = frozenset(B)
        for x in B:
            v_to_blocks[x].add(B)

    def has_triple(x_y_z):
        x, y, z = x_y_z
        return bool(v_to_blocks[x] & v_to_blocks[y] & v_to_blocks[z])

    # Check that every quadruple contains an even number of triples
    from __builtin__ import sum
    for quad in combinations(range(T.num_points()), 4):
        if sum(map(has_triple, combinations(quad, 3))) % 2 == 1:
            return False

    return True
示例#15
0
文件: twographs.py 项目: Babyll/sage
def is_twograph(T):
    r"""
    Checks that the incidence system `T` is a two-graph

    INPUT:

    - ``T`` -- an :class:`incidence structure <sage.combinat.designs.IncidenceStructure>`

    EXAMPLES:

    a two-graph from a graph::

        sage: from sage.combinat.designs.twographs import (is_twograph, TwoGraph)
        sage: p=graphs.PetersenGraph().twograph()
        sage: is_twograph(p)
        True

    a non-regular 2-uniform hypergraph which is a two-graph::

        sage: is_twograph(TwoGraph([[1,2,3],[1,2,4]]))
        True

    TESTS:

    wrong size of blocks::

        sage: is_twograph(designs.projective_plane(3))
        False

    a triple system which is not a two-graph::

        sage: is_twograph(designs.projective_plane(2))
        False
    """
    if not T.is_uniform(3):
        return False

    # A structure for a fast triple existence check
    v_to_blocks = {v:set() for v in range(T.num_points())}
    for B in T._blocks:
        B = frozenset(B)
        for x in B:
            v_to_blocks[x].add(B)

    def has_triple(x_y_z):
        x, y, z = x_y_z
        return bool(v_to_blocks[x] & v_to_blocks[y] & v_to_blocks[z])

    # Check that every quadruple contains an even number of triples
    from __builtin__ import sum
    for quad in combinations(range(T.num_points()),4):
        if sum(map(has_triple,combinations(quad,3))) % 2 == 1:
            return False

    return True
示例#16
0
    def transform_coordinates(self, resolutions, labels, transformation):
        # gather data and transform
        intensity = self.get_masked()
        coords = self.get_grid()
        transcoords = transformation(*coords)

        # get rid of invalids & masked intensities
        valid = ~__builtin__.sum((~numpy.isfinite(t) for t in transcoords), intensity.mask)
        transcoords = tuple(t[valid] for t in transcoords)

        return self.from_image(resolutions, labels, transcoords, intensity[valid])
示例#17
0
def sum(sequence):
    """
    This is equivalent to calling reduce(op_add, sequence, 0).

        >>> sum([1, 2, 3, 4, 5])
        15

        >>> sum([])
        0
    """
    return __builtin__.sum(sequence)
示例#18
0
def getlinedata(lineid):
    '''Return an (x0, y0, x1, y1,...) array from the specified line(s).

    lineid   -- integer zero based index or a matplotlib Line2D instance.
                Can be also a list of indices or Line2D objects.

    Return numpy array.
    '''
    import numpy
    linehandles = findlines(lineid)
    rv = numpy.array(sum([h.get_data() for h in linehandles], ()))
    return rv
示例#19
0
def chi2(wrps, x_min=0, x_max=0):
    """
    Expects two Histowrappers. Returns FloatWrapper.
    """
    wrps = iterableize(wrps)
    wrps = iter(wrps)
    try:
        first, second = next(wrps), next(wrps)
    except StopIteration:
        raise TooFewWrpsError("chi2 needs exactly two HistoWrappers.")
    try:
        wrps.next()
        raise TooManyWrpsError("chi2 needs exactly two HistoWrappers.")
    except StopIteration:
        pass
    for w in (first, second):
        if not isinstance(w, wrappers.HistoWrapper):
            raise WrongInputError(
                "chi2 needs type HistoWrapper. w: "
                + str(w)
            )
    if first.histo.GetNbinsX() != second.histo.GetNbinsX():
        raise WrongInputError(
            "chi2 needs histos with same number of bins."
        )
    if not x_max:
        x_max = int(first.histo.GetNbinsX() - 1)

    def get_weight_for_bin(i):
        val = (first.histo.GetBinContent(i+1)
               - second.histo.GetBinContent(i+1))**2
        err1 = first.histo.GetBinError(i+1)
        err2 = second.histo.GetBinError(i+1)
        if err1 and err2:
            return val / (err1**2 + err2**2)
        else:
            return 0.

    chi2_val = __builtin__.sum(
        get_weight_for_bin(i)
        for i in xrange(x_min, x_max)
    )
    info = second.all_info()
    info.update(first.all_info())
    return wrappers.FloatWrapper(
        chi2_val,
        **info
    )
示例#20
0
def sum(obj):
    """Sum of vector elements."""
    # matrix behaviour somewhat undefined.
    # jem. possibly needs revision to reduce behaviour it's overriding. Can't
    # use __builtin__.sum because it does too much.
    if is1x1(obj):
        return obj
    elif isinstance(obj, (matrix, spmatrix)):
        return __builtin__.sum(obj)
    else:
        if isinstance(obj, multfunction) and equiv(obj.lhs, -1):
            # jem experimental. may provide unexpected (and thus unhelpful)
            # simplification at times.
            return -1 * sumfunction(-1 * obj)
        else:
            return sumfunction(obj)
示例#21
0
def sum(obj):
    """Sum of vector elements."""
    # matrix behaviour somewhat undefined.
    # jem. possibly needs revision to reduce behaviour it's overriding. Can't
    # use __builtin__.sum because it does too much.
    if is1x1(obj):
        return obj
    elif isinstance(obj, (matrix, spmatrix)):
        return __builtin__.sum(obj)
    else:
        if isinstance(obj, multfunction) and equiv(obj.lhs, -1):
            # jem experimental. may provide unexpected (and thus unhelpful)
            # simplification at times.
            return -1*sumfunction(-1*obj)
        else:
            return sumfunction(obj)
示例#22
0
def search(name=None, uid=None, type=None):
    """
    Return a list of items that match `name`, `uid`, and/or `type` attributes.
    """
    global _index

    if type is None:
       type = Item
    if _index is None:
        _index = _create_index()

    def match(item):
        return (uid is None or item.uid == uid) and isinstance(item, type)
    if name:
        items = _index.get(name) or []
    else:
        items = __builtin__.sum(_index.values(), [])
    items = [item for item in items if match(item)]
    items = sorted(items, key=lambda item: item.name)
    return IndexedList(items)
示例#23
0
    def logp_partial_gradient(self, variable, calculation_set = None):
        """
        gets the logp gradient of this deterministic with respect to variable
        """
        if self.verbose > 0:
            print '\t' + self.__name__ + ': logp_partial_gradient accessed.'

        if not (datatypes.is_continuous(variable) and datatypes.is_continuous(self)):
                return zeros(shape(variable.value))

        # loop through all the parameters and add up all the gradients of log p with respect to the approrpiate variable
        gradient = __builtin__.sum([child.logp_partial_gradient(self, calculation_set) for child in self.children ])

        totalGradient = 0
        for parameter, value in self.parents.iteritems():
            if value is variable:
                    
                totalGradient += self.apply_jacobian(parameter, variable, gradient )

        return np.reshape(totalGradient, shape(variable.value))
示例#24
0
文件: numc.py 项目: oschuett/numc
	def __init__(self, arg, slices):
		self.arg = assimilate(arg) #no copy - creates a view
		self.dtype = self.arg.dtype
		if(not hasattr(slices, "__iter__")):
			slices = (slices, )
			
		#replace first Ellipsis
		for (i,s) in enumerate(slices):
			if(s == Ellipsis):
				c = __builtin__.sum(1 for x in slices if x not in (None, numpy.newaxis))
				fill = (slice(None, None, None),) * (self.arg.ndim -c-i+1)
				slices = slices[:i] + fill + slices[i+1:]
				break
				
		#replace remaining Ellipsis
		for (i,s) in enumerate(slices):
			if(s == Ellipsis):
				slices = slices[:i] +(slice(None, None, None),)  + slices[i+1:]
		
		j = 0 #points to dim in self.arg.shape which we need to consume next
		out_shape = []
		for s in slices:
			if(s in (None, numpy.newaxis)):
				out_shape.append(1)  	#not incrementing j
			elif(isinstance(s, int)):
				assert(abs(s) <= self.arg.shape[j])
				j += 1
			elif(isinstance(s, slice)):
				(first, last, step) = s.indices(self.arg.shape[j])
				out_shape.append(max(0, int((last - first)/step))) #TODO: correct?
				j += 1
			else:
				raise(Exception("Strange slice: "+str(s)))
				
		out_shape += self.arg.shape[j:]
		assert(all(s>=0 for s in out_shape))
		self.shape = tuple(out_shape)
		self.slices = tuple(slices)
示例#25
0
def findlines(lineid='all'):
    '''Convert lineid to a list of matplotlib Line2D instances.

    lineid   -- integer zero based index or a matplotlib Line2D instance.
                Can be also a slice, list of indices or Line2D objects.
                When 'all', use all line object in the current axis.

    Return a list of Line2D instances.
    '''
    from matplotlib import pyplot
    if _isiterablenotstring(lineid):
        rv = sum(map(findlines, lineid), [])
        return rv
    if isinstance(lineid, pyplot.Line2D):
        return [lineid]
    curaxes = findaxes()
    lines = curaxes and curaxes[0].get_lines()[:]
    if lineid == 'all':
        rv = lines
    elif isinstance(lineid, slice):
        rv = lines[lineid]
    else:
        rv = [lines[int(lineid)]]
    return rv
示例#26
0
def findtexts(textid='all'):
    '''Convert textid to a list of matplotlib Text instances.

    textid   -- integer zero based index or a matplotlib Text instance.
                Can be also a slice, list of indices or Text objects.
                When 'all', use all text objects in the current axis.

    Return a list of Text instances.
    '''
    from matplotlib import pyplot
    if _isiterablenotstring(textid):
        rv = sum(map(findtexts, textid), [])
        return rv
    if isinstance(textid, pyplot.Text):
        return [textid]
    curaxes = findaxes()
    alltexts = curaxes and curaxes[0].texts[:]
    if textid == 'all':
        rv = alltexts
    elif isinstance(textid, slice):
        rv = alltexts[textid]
    else:
        rv = [alltexts[int(textid)]]
    return rv
示例#27
0
    def __init__(self, f, pt, eps=None):
        """Return the gradient of f at pt.

        :param f: a differentiable function such that f(*pt) is a scalar
        :param pt: an ndarray, a list of ndarrays or tuple of ndarrays

        This function computes the gradient by a one-sided finite differences of a
        fixed step size (eps).

        It is assumed that f(...) will return a scalar.
        It is assumed that all f's inputs are numpy.ndarray objects.

        :param eps: the stepsize for the finite differencing.  None means input
        dtype-dependent. See `type_eps`.
        """

        def prod(inputs):
            rval = 1
            for i in inputs:
                rval *= i
            return rval

        packed_pt = False
        if not isinstance(pt, (list, tuple)):
            pt = [pt]
            packed_pt = True

        apt = [numpy.array(p) for p in pt]

        shapes = [p.shape for p in apt]
        dtypes = [str(p.dtype) for p in apt]

        # TODO: remove this eventually (why was this here in the first place ?)
        # In the case of CSM, the arguments are a mixture of floats and integers...
        #if not dtypes == [dtypes[0]] * len(apt):
            #raise TypeError('All function arguments must have same dtype')

        total_size = __builtin__.sum(prod(sh) for sh in shapes)

        working_dtype = __builtin__.min((self.type_eps[dt], dt) for dt in dtypes)[1]

        #create un-initialized memory
        x = numpy.ndarray((total_size,), dtype=working_dtype)
        gx = numpy.ndarray((total_size,), dtype=working_dtype)

        if eps is None:
            eps = __builtin__.max(self.type_eps[dt] for dt in dtypes)


        #set up aliases so that apt[i] is backed by memory in x
        # and self.gf is backed by memory in gx
        cur_pos = 0
        self.gf = []
        for i,p in enumerate(apt):
            p_size = prod(p.shape)
            # set up alias
            apt[i] = x[cur_pos:cur_pos+p_size].reshape(p.shape)
            self.gf.append(gx[cur_pos:cur_pos+p_size].reshape(p.shape))
            # initialize with p's value
            apt[i][...] = p
            cur_pos += p_size

        f_x = f(*[p.copy() for p in apt])

        # now iterate over the elements of x, and call f on apt.
        x_copy = x.copy()
        for i in xrange(total_size):
            x[:] = x_copy

            x[i] += eps
            f_eps = f(*apt)

            gx[i] = numpy.asarray((f_eps - f_x)/eps)

        if packed_pt:
            self.gf = self.gf[0]
示例#28
0
  def test_custom2(self):
    numpy.random.seed(1)
    
    for with_C in [True]:
      for with_H in [True]:
        print "with_C", with_C
        print "with_H", with_H
        
        n = 5
        
        m = 3 if with_C else n
        h = [2,3]
        h = [3,1]

        H_ = DMatrix(numpy.random.random((n,__builtin__.sum(h))))  if with_H else DMatrix()
        P_ = numpy.random.random((n,n))
        R_ = numpy.random.random((n,3))

        A_ = randstable(n)

        C_ = DMatrix(numpy.random.random((n,m))) if with_C else DMatrix()
        V_ = DMatrix(numpy.random.random((m,m)))
        V_ = (V_+V_.T)/2
        
        H = MX.sym("H",H_.sparsity())

        A = MX.sym("A",A_.sparsity())
        C = MX.sym("C",C_.sparsity())

        Vs = V = MX.sym("V",V_.sparsity())
        V = (V+V.T)/2

        N = 100

        D = [C if with_C else DMatrix.eye(n)]
        for i in range(N):
          D.append(mul(A,D[-1]))

        DD = horzcat(D)
        Ls = [0]+list(numpy.cumsum(h))
      
        if with_H:
          Y = [ mul([mul(Li.T,DD),blkdiag([V]*(N+1)),mul(DD.T,Li)]) for Li in horzsplit(H,Ls)]
        else: 
          Y = [ mul([DD,blkdiag([V]*(N+1)),DD.T]) ]
        
        if with_C:
          if with_H:
            f = MXFunction(lrdleIn(a=A,c=C,v=Vs,h=H),[blkdiag(Y)])
          else:
            f = MXFunction(lrdleIn(a=A,c=C,v=Vs),[blkdiag(Y)])
        else:
          if with_H:
            f = MXFunction(lrdleIn(a=A,v=Vs,h=H),[blkdiag(Y)])
          else:
            f = MXFunction(lrdleIn(a=A,v=Vs),[blkdiag(Y)])
        f.init()
        
        
        for Solver, options in lrdlesolvers:
          print Solver
          g = LrDleSolver(Solver,lrdleStruct(a=A.sparsity(),c=C.sparsity(),v=Vs.sparsity(),h=H.sparsity()),h  if with_H else [])
          g.setOption(options)
          g.init()
          
          for i in [f,g]:
            i.setInput(A_,"a")
            i.setInput(V_,"v")
            if with_C:
              i.setInput(C_,"c")
            if with_H:
              i.setInput(H_,"h")
          
          try:
            self.checkfunction(g,f,sens_der=True,hessian=True,evals=2,digits=7)
          except Exception as e:
            if "second order derivatives are not supported" in str(e):
              self.checkfunction(g,f,evals=1,hessian=False,sens_der=False,digits=7)
            else:
              raise e
示例#29
0
  def test_dple_CH(self):

    for with_C in [True]:
      for with_H in [True]:
        print "with_C", with_C
        print "with_H", with_H
        
        numpy.random.seed(1)
        
        n = 5
        
        m = 3 if with_C else n
        h = [3,1]
        
        Ls = [0]+list(numpy.cumsum(h))
         
        K = 3

        H_ = [DMatrix(numpy.random.random((n,__builtin__.sum(h))))  if with_H else DMatrix() for i in range(K)]

        A_ = [ randstable(n,margin=0.2) for i in range(K) ]
  
        C_ = [ DMatrix(numpy.random.random((n,m))) if with_C else DMatrix() for i in range(K) ]
        V_ = [ DMatrix(numpy.random.random((m,m))) for i in range(K) ]
        V_ = [ (i+i.T)/2 for i in V_ ] 
        
        H = MX.sym("H",horzcat(H_).sparsity())
        Hs_ = horzsplit(H,__builtin__.sum(h))
        Hss_ = [ horzsplit(i,Ls) for i in Hs_]
        
        A = MX.sym("A",horzcat(A_).sparsity())
        As_ = horzsplit(A,n)
        
        C = MX.sym("C",horzcat(C_).sparsity())
        Cs_ = horzsplit(C,m)
        
        Vs = V = MX.sym("V",horzcat(V_).sparsity())
        Vs_ = [(i+i.T)/2 for i in horzsplit(V,m)]
        
        print "Vs_", [i.dimString() for i in Vs_]
        #V = horzcat([ (i+i.T)/2 for i in Vs_])

        N = 100
        
        V = blkdiag([Vs_[-1],blkdiag(Vs_[:-1])])
        
        Hn = blkdiag(Hs_)
        An = blockcat([[MX.sparse(n,(K-1)*n),As_[-1]],[blkdiag(As_[:-1]),MX.sparse((K-1)*n,n)]])
        Cn = blkdiag([Cs_[-1],blkdiag(Cs_[:-1])]) if with_C else None
        Vn = blkdiag([Vs_[-1],blkdiag(Vs_[:-1])])

        D = [Cn if with_C else DMatrix.eye(n*K)]
        for i in range(N):
          D.append(mul(An,D[-1]))

        DD = horzcat(D)
        
        Ls2 = [i*K for i in Ls]
        
        Ls2 = [0]+list(numpy.cumsum(h*K))

      
        if with_H:
          Y = [ blkdiag([ mul([mul(Li.T,DD),blkdiag([Vn]*(N+1)),mul(DD.T,Li)]) for Li in horzsplit(Hnn,Ls)]) for Hnn in horzsplit(Hn,__builtin__.sum(h))]
        else: 
          Y = diagsplit(mul([DD,blkdiag([Vn]*(N+1)),DD.T]),n)
        
        
        if with_C:
          if with_H:
            for i in Y:
              i.sparsity().spy()
            f = MXFunction(lrdpleIn(a=A,c=C,v=Vs,h=H),[horzcat(Y)])
          else:
            f = MXFunction(lrdpleIn(a=A,c=C,v=Vs),[horzcat(Y)])
        else:
          if with_H:
            f = MXFunction(lrdpleIn(a=A,v=Vs,h=H),[horzcat(Y)])
          else:
            f = MXFunction(lrdpleIn(a=A,v=Vs),[horzcat(Y)])
        f.setOption("name","reference")
        f.init()
        
        print "f",f
        
        
        temp = MXFunction([A],[An])
        temp.init()
        Af_ = temp([horzcat(A_)])[0]
        print "Af"
        Af_.printDense()
        E = numpy.linalg.eig(Af_)[0]
        
        assert max(abs(E))<=0.95, str(max(abs(E)))
        
        
        
        print [ numpy.linalg.eig(i)[0] for i in A_ ] 
        
        print numpy.linalg.eig(mul([i.T for i in reversed(A_)]))[0]
        print numpy.linalg.eig(mul([i for i in reversed(A_)]))[0]
        print numpy.linalg.eig(mul([i.T for i in A_]))[0]
        print numpy.linalg.eig(mul([i for i in A_]))[0]
        
        
        for Solver, options in lrdplesolvers:
          print Solver, options
          print "c", [i.sparsity() for i in Cs_]
          g = LrDpleSolver(Solver,lrdpleStruct(a=[i.sparsity() for i in As_],c=[i.sparsity() for i in Cs_],v=[ i.sparsity() for i in Vs_],h=[i.sparsity() for i in Hs_]if with_H else [])  ,[h]*K)
          
          print g.dictionary()
          g.setOption(options)
          g.init()
          
          for i in [f,g]:
            i.setInput(horzcat(A_),"a")
            i.setInput(horzcat(V_),"v")
            if with_C:
              i.setInput(horzcat(C_),"c")
            if with_H:
              i.setInput(horzcat(H_),"h")
              
            i.evaluate()
                 
                    
          try:
            self.checkfunction(g,f,sens_der=True,hessian=True,evals=2)
          except Exception as e:
            if "second order derivatives are not supported" in str(e):
              self.checkfunction(g,f,evals=1,hessian=False,sens_der=False)
            else:
              raise e
示例#30
0
    def inflectional_entropy(self, smooth=1, verbose=False, use_lemma=True):
        """This function collapses across all relevant lemmas, e.g. the noun
        "build" and the verb "build", or the various "wind" verbs.

        Caution: if there are two ways to express the same inflection, the
        function will treat them as the same cell in the inflection
        distribution (e.g. "hanged" and "hung"). Probably worth adding this
        as an option in a future version.

        This function supports the following three types of inflectional
        entropy, but there are many more ways to carve up the various
        inflections.

        Paradigm 1: separate_bare

        bare forms are separated into nominal and verbal, but the
        verbal bare form is not further differentiated between present
        plural agreeing form and infinitive

        ache (singular), aches (plural), ache (verb -- infinitive,
        present tense except third singular),
        aches (3rd singular present),
        aching (participle), ached (past tense),
        ached (participle -- passive and past_tense)

        Paradigm 2: collapsed_bare

        Same as separate_bare but collapsing across bare forms:

        ache (singular noun and all bare verbal forms --
        so all forms with no overt inflection), aches (plural),
        aches (3rd singular present), aching (participle),
        ached (past tense), ached (participles)

        Paradigm 3: no_bare

        Same as collapsed_bare, only without bare form:

        aches (plural), aches (3rd singular present),
        aching (participle), ached (past tense), ached (participles)
        """

        for record in self._dict:
            if use_lemma:
                item = self._dict[record]['lemma_headword']
            else:
                item = record
                raise ValueError("use_lemma must be True.")
            clx_lemmas = clx.lemma_lookup(item)
            # Use __builtin__ here in case sum is overshadowed by numpy
            all_wordforms = __builtin__.sum((clx.lemma_to_wordforms(clx_lemma)
                                             for clx_lemma in clx_lemmas), [])

            counter = collections.Counter()

            for wf in all_wordforms:
                infl = wf.FlectType
                freq = wf.Cob
                if (infl[0] == 'present_tense' and infl[1] != '3rd_person_verb'
                    or infl[0] == 'infinitive'):
                    counter['bare_verb'] += freq
                if infl[0] == 'singular':
                    counter['bare_noun'] += freq
                if infl[0] == 'plural':
                    counter['noun_plural'] += freq
                if infl[0] == 'past_tense':
                    counter['past_tense'] += freq
                if infl == ['positive']:
                    counter['positive'] += freq
                if infl == ['comparative']:
                    counter['comparative'] += freq
                if infl == ['superlative']:
                    counter['superlative'] += freq
                if infl == ['headword_form']:
                    counter['headword_form'] += freq
                if infl == ['present_tense', '3rd_person_verb', 'singular']:
                    counter['third_sg'] += freq
                if infl == ['participle', 'present_tense']:
                    counter['part_ing'] += freq
                if infl == ['participle', 'past_tense']:
                    counter['part_ed'] += freq

            common = ['noun_plural', 'third_sg', 'part_ing', 'part_ed',
                      'past_tense', 'comparative', 'superlative']
            bare = ['bare_noun', 'bare_verb', 'positive', 'headword_form']
            common_freqs = [counter[i] for i in common if i in counter]
            bare_freqs = [counter[i] for i in bare if i in counter]

            if verbose:
                print counter

            self._dict[record]['infl_ent_separate_bare'] = self.entropy(bare_freqs + common_freqs, smooth)
            self._dict[record]['infl_ent_collapsed_bare'] = self.entropy([sum(bare_freqs)] + common_freqs, smooth)
            self._dict[record]['infl_ent_no_bare'] = self.entropy(common_freqs, smooth)
示例#31
0
    def analyse(self):
        primaries = self.vselect( 'PVs' , ISPRIMARY )
        if primaries.empty() :
            return self.Warning('No primary vertices are found', SUCCESS )

        mcB = self.mcselect(
            'mcB', "[( B+ ==>  ( J/psi(1S) =>  mu+  mu-  )  K+  pi+  pi- )]CC")

        mcB_psi = self.mcselect(
            'mcB_psi', "[( B+ ==> K+ (psi(2S) => ( J/psi(1S) =>  mu+  mu-  ) pi+ pi-))]CC")

        mcB_X = self.mcselect(
            'mcB_X', "[( B+ ==> K+ (X_1(3872) => ( J/psi(1S) =>  mu+  mu-  ) pi+ pi-))]CC")

        mcB_K_Ks = self.mcselect(
            'mcB_K_Ks', "[( B+ ==>  ( J/psi(1S) =>  mu+  mu-  )  (K_1(1270)+  => (K*(892)0 => K+ pi-) pi+) )]CC")

        mcB_K_rho = self.mcselect(
            'mcB_K_rho', "[( B+ ==>  ( J/psi(1S) =>  mu+  mu-  )  (K_1(1270)+  => (rho(770)0 => pi+ pi-) K+) )]CC")

        mcB_K_K0s = self.mcselect(
            'mcB_K_K0s', "[( B+ ==>  ( J/psi(1S) =>  mu+  mu-  )  (K_1(1270)+  => (K*_0(1430)0 => K+ pi-) pi+) )]CC")


        sizes = [
            ("psi", mcB_psi.size()),
            ("X", mcB_X.size()),
            ("K_Ks", mcB_K_Ks.size()),
            ("K_rho", mcB_K_rho.size()),
            ("K_K0s", mcB_K_K0s.size()),
        ]

        nt_sizes = self.nTuple("sizes")

        for name, size in sizes:
            nt_sizes.column('mcB_' + name, size)
        
        nt_sizes.write()


        if mcB.size() != 1 or __builtin__.sum([x[1] for x in sizes]) != 1:
            return self.Warning("Something wrong with MC size " + str(mcB.size()), SUCCESS)


        mcK = self.mcselect(
            "mcK",  "[( B+ ==>  ( J/psi(1S) =>  mu+  mu-  )  ^K+  pi- pi+ )]CC")
        mcPi = self.mcselect(
            "mcPi",  "[( B+ ==>  ( J/psi(1S) =>  mu+  mu-  )  K+  ^pi- ^pi+ )]CC")
        mcMu = self.mcselect(
            "mcMu", "[( B+ ==>  ( J/psi(1S) =>  ^mu+  ^mu-  )  K+  pi- pi+ )]CC")
        mcPsi = self.mcselect(
            "mcPsi", "[( B+ ==>  ^( J/psi(1S) =>  mu+  mu-  )  K+  pi- pi+ )]CC")

        if mcK.empty() or mcMu.empty() or mcPsi.empty() or mcPi.empty():
            return self.Warning('No true MC-decay components are found', SUCCESS )

        match = self.mcTruth()
        trueB = MCTRUTH(match, mcB)

        true_mcB_psi = MCTRUTH(match, mcB_psi)
        true_mcB_X = MCTRUTH(match, mcB_X)
        true_mcB_K_Ks = MCTRUTH(match, mcB_K_Ks)
        true_mcB_K_rho = MCTRUTH(match, mcB_K_rho)
        true_mcB_K_K0s = MCTRUTH(match, mcB_K_K0s)


        trueK = MCTRUTH(match, mcK)
        truePi = MCTRUTH(match, mcPi)
        truePsi = MCTRUTH(match, mcPsi)
        trueMu = MCTRUTH(match, mcMu)

        kaons  = self.select ( 'K'  ,  ('K+'  == ABSID) & trueK )
        pions  = self.select ( 'pi'  ,  ('pi+'  == ABSID) & truePi )
        piplus  = self.select ( 'pi+' ,  pions , Q > 0 )
        piminus = self.select ( 'pi-' ,  pions , Q < 0 )
        psis = self.select( "Jpsi", ('J/psi(1S)'  == ABSID) & truePsi )


        k_counter = self.counter("k_counter")
        pi_counter = self.counter("pi_counter")
        jpsi_counter = self.counter("jpsi_counter")
        b_counter = self.counter("b_counter")

        k_counter += kaons.size()
        pi_counter += pions.size()
        jpsi_counter += psis.size()


        
        if kaons.empty():
            return self.Warning("No reconstructed kaons", SUCCESS)  # RETURN
    

        if pions.empty():
            return self.Warning("No reconstructed pions", SUCCESS)  # RETURN
        
        if piplus.empty() or piminus.empty():
            return self.Warning("Both piplus and piminus are empty", SUCCESS)  # RETURN

        if psis.empty():
            return self.Warning("No reconstructed psis", SUCCESS)  # RETURN


        # myB = self.select('Bu' , '[( B+ ->  J/psi(1S)  K+  pi+  pi-)]CC' )
        myB = self.loop('Jpsi K pi+ pi-', 'B+')



        cut_pi  = ( PT          > 200 * MeV ) & \
                in_range ( 2          , ETA , 5         ) & \
                in_range ( 3.2 * GeV  , P   , 150 * GeV ) & \
                ( MIPCHI2DV()  > 4        )

                # ( PROBNNpi     > 0.1      ) & \
                # ( CLONEDIST   > 5000      ) & \
                # ( TRGHOSTPROB < 0.5       ) & \
                # ( TRCHI2DOF   < 4         ) & \
                # HASRICH                     #& \
        
        cut_k = ( PT          > 200 * MeV ) & \
                ( CLONEDIST   > 5000      ) & \
                ( TRGHOSTPROB < 0.5       ) & \
                ( TRCHI2DOF   < 4         ) & \
                in_range ( 2          , ETA , 5         ) & \
                in_range ( 3.2 * GeV  , P   , 150 * GeV ) & \
                HASRICH                     & \
                ( PROBNNk      > 0.1      ) & \
                ( MIPCHI2DV()  > 4        )

        cut_b = VFASPF(VCHI2PDOF) < 12 #& \
                #(CTAU > (75 * micrometer))

        for b in myB:
            if not 0 < VCHI2(b) < 100:
                continue
            k, pi1, pi2 = b(2), b(3), b(4)
            if Q(k) > 0:
                b.setPID("B+")
            else:
                b.setPID("B-")
            if not trueB(b):
                continue

            # if not (cut_pi(b(3)) and cut_pi(b(4))):
            #     continue

            if not cut_k(k):
                continue
            if not (cut_pi(pi1) and cut_pi(pi2)):
                continue
            # if not cut_b(b):
            #     continue

            b.save("BB")

        bb = self.selected("BB")
        b_counter += bb.size()

        # Constrains
        dtffun_ctau = DTF_CTAU(0, True)
        dtffun_chi2 = DTF_CHI2NDOF(True, "J/psi(1S)")
        dtffun_m = DTF_FUN(M, True, "J/psi(1S)")


        nt = self.nTuple("t")

        for myb in bb:
            if not all([myb(i) for i in xrange(0, 5)]):
                continue


            b, jpsi, k, pi1, pi2 = tuple(myb(i) for i in xrange(5))


            if not dtffun_m(myb) / GeV > 5.0:
                continue

            # add DTF-applied information
            nt.column('DTFm_b', dtffun_m(myb) / GeV)
            nt.column('DTFctau', dtffun_ctau(myb))
            nt.column('DTFchi2ndof', dtffun_chi2(myb))

            MIPCHI2DVfun = MIPCHI2DV()

            self.treatKine(nt, b, '_b')
            self.treatKine(nt, jpsi, '_jpsi')

            # add the information for Pid efficiency correction
            self.treatPions(nt, b)
            self.treatKaons(nt, b)
            self.treatMuons(nt, b)
            self.treatTracks(nt, b)


            # ==========================================
            # Do fake
            # ==========================================
            nt.column('mass', self._mass ( b )  / GeV )

            ## try with pi1->K
            with fakeK ( pi2, pid = LHCb.ParticleID( int(Q(pi2)) * 321 ) ) :
                nt.column ( 'mass_pi2ask' , self._mass ( b ) / GeV )

            self.fillMasses(nt, myb, "c2", True, "J/psi(1S)")


            nt.column('MIPCHI2DV_k', MIPCHI2DVfun(k))
            nt.column('MIPCHI2DV_pi1', MIPCHI2DVfun(pi1))
            nt.column('MIPCHI2DV_pi2', MIPCHI2DVfun(pi2))

            nt.column ( 'mcTrueB'    , trueB(b)          )
            nt.column ( 'mcTrueB_psi'   , true_mcB_psi(myb))
            nt.column ( 'mcTrueB_X'   , true_mcB_X(myb))
            nt.column ( 'mcTrueB_K_Ks'   , true_mcB_K_Ks(myb))
            nt.column ( 'mcTrueB_K_rho'   , true_mcB_K_rho(myb))
            nt.column ( 'mcTrueB_K_K0s'   , true_mcB_K_K0s(myb))
            nt.column ( 'mcTruePsi' , truePsi(jpsi(0)    ))
            nt.column ( 'mcTrueK'    , trueK(myb(2))     )
            nt.column ( 'mcTruePi1'  , truePi(myb(3))    )
            nt.column ( 'mcTruePi2'  , truePi(myb(4))    )

            nt.column ( 'mcTrueMu1'  , trueMu(jpsi(1))    )
            nt.column ( 'mcTrueMu2'  , trueMu(jpsi(2))    )



            # add the information needed for TisTos
            self.tisTos ( jpsi  , nt  , 'psi_' ,
                          self.lines [ 'psi' ] , self.l0tistos , self.l1tistos , self.l2tistos )

            self.tisTos ( jpsi  , nt  , 'psi1_' ,
                          self.lines [ 'psi1' ] , self.l0tistos , self.l1tistos , self.l2tistos )

            self.tisTos ( jpsi  , nt  , 'psi2_' ,
                          self.lines [ 'psi2' ] , self.l0tistos , self.l1tistos , self.l2tistos )

            self.tisTos ( jpsi  , nt  , 'psi3_' ,
                          self.lines [ 'psi3' ] , self.l0tistos , self.l1tistos , self.l2tistos )

            nt.write()

        return SUCCESS
示例#32
0
    def build_statements(tensor_index_labels,index_join_pairs,result_index_labels):
    #@+at
    # This routine recursively builds a list of statements which performs the full tensor contraction.
    # 
    # First, if there is only one tensor left, then transpose and reshape it to match the result_index_labels.
    #@@c
        if len(tensor_index_labels) == 1:
            if len(result_index_labels) == 0:
                return ["return A"]
            else:
                final_index_labels = tensor_index_labels[0]
                result_indices = [[final_index_labels.index(index) for index in index_group] for index_group in result_index_labels]
                transposed_indices = __builtin__.sum(result_indices,[])
                assert type(transposed_indices) == list
                assert len(final_index_labels) == len(transposed_indices)
                new_shape = ",".join(["(%s)" % "*".join(["shape[%i]"%index for index in index_group]) for index_group in result_indices])     
                return ["shape=A.shape","return A.transpose(%s).reshape(%s)" % (transposed_indices,new_shape)]
    #@+at
    # Second, if all joins have finished, then take outer products to combine all remaining tensors into one.
    #@@c
        elif len(index_join_pairs) == 0:
            if tensor_index_labels[-1] is None:
                return build_statements(tensor_index_labels[:-1],index_join_pairs,result_index_labels)
            elif len(tensor_index_labels[-1]) == 0:
                v = n2l[len(tensor_index_labels)-1]
                return ["A*=%s" % v, "del %s" % v] + build_statements(tensor_index_labels[:-1],index_join_pairs,result_index_labels)
            else:
                v = n2l[len(tensor_index_labels)-1]
                tensor_index_labels[0] += tensor_index_labels[-1]
                return ["A = multiply.outer(A,%s)" % v, "del %s" % v] + build_statements(tensor_index_labels[:-1],index_join_pairs,result_index_labels)
    #@+at
    # Otherwise, do the first join, walking through index_join_pairs to find any other pairs which connect the same two tensors.
    #@@c
        else:
            #@+<< Search for all joins between these tensors >>
            #@+node:gcross.20100923134429.1872: *5* << Search for all joins between these tensors >>
            #@+at
            # This function searches for the tensors which are joined, and reorders the indices in the join so that the index corresponding to the tensor appearing first in the list of tensors appears first in the join.
            #@@c
            def find_tensor_ids(join):
                reordered_join = [None,None]
                tensor_ids = [0,0]
                join = list(join)
                while tensor_ids[0] < len(tensor_index_labels):
                    index_labels = tensor_index_labels[tensor_ids[0]]
                    if index_labels is None:
                        tensor_ids[0] += 1
                    elif join[0] in index_labels:
                        reordered_join[0] = index_labels.index(join[0])
                        del join[0]
                        break
                    elif join[1] in index_labels:
                        reordered_join[0] = index_labels.index(join[1])
                        del join[1]
                        break
                    else:
                        tensor_ids[0] += 1
                assert len(join) == 1 # otherwise index was not found in any tensor
                tensor_ids[1] = tensor_ids[0] + 1
                while tensor_ids[1] < len(tensor_index_labels):
                    index_labels = tensor_index_labels[tensor_ids[1]]
                    if index_labels is None:
                        tensor_ids[1] += 1
                    elif join[0] in index_labels:
                        reordered_join[reordered_join.index(None)] = index_labels.index(join[0])
                        del join[0]
                        break
                    else:
                        tensor_ids[1] += 1
                assert len(join) == 0 # otherwise index was not found in any tensor
                return tensor_ids, reordered_join

            join_indices = [0]
            tensor_ids,reordered_join = find_tensor_ids(index_join_pairs[0])

            indices = [[],[]]

            for j in xrange(2):
                indices[j].append(reordered_join[j])

            # Search for other joins between these tensors
            for i in xrange(1,len(index_join_pairs)):
                tensor_ids_,reordered_join = find_tensor_ids(index_join_pairs[i])
                if tensor_ids == tensor_ids_:
                    join_indices.append(i)
                    for j in xrange(2):
                        indices[j].append(reordered_join[j])

            #@-<< Search for all joins between these tensors >>

            #@+<< Build tensor contraction statements >>
            #@+node:gcross.20100923134429.1873: *5* << Build tensor contraction statements >>
            tensor_vars = [n2l[id] for id in tensor_ids]

            statements = [
                "try:",
                "   %s = tensordot(%s,%s,%s)" % (tensor_vars[0],tensor_vars[0],tensor_vars[1],indices),
                "   del %s" % tensor_vars[1],
                "except ValueError:",
                "   raise ValueError('indices %%s do not match for tensor %%i, shape %%s, and tensor %%i, shape %%s.' %% (%s,%i,%s.shape,%i,%s.shape))" % (indices,tensor_ids[0],tensor_vars[0],tensor_ids[1],tensor_vars[1])
            ]
            #@-<< Build tensor contraction statements >>

            #@+<< Delete joins from list and update tensor specifications >>
            #@+node:gcross.20100923134429.1874: *5* << Delete joins from list and update tensor specifications >>
            join_indices.reverse()
            for join_index in join_indices:
                del index_join_pairs[join_index]

            new_tensor_index_labels_0 = list(tensor_index_labels[tensor_ids[0]])
            indices[0].sort(reverse=True)
            for index in indices[0]:
                del new_tensor_index_labels_0[index]

            new_tensor_index_labels_1 = list(tensor_index_labels[tensor_ids[1]])
            indices[1].sort(reverse=True)
            for index in indices[1]:
                del new_tensor_index_labels_1[index]

            tensor_index_labels[tensor_ids[0]] = new_tensor_index_labels_0+new_tensor_index_labels_1
            tensor_index_labels[tensor_ids[1]] = None
            #@-<< Delete joins from list and update tensor specifications >>

            return statements + build_statements(tensor_index_labels,index_join_pairs,result_index_labels)
示例#33
0
 def S(x,y):
     return sum(map(lambda j: x[j]*y[2-j]**q, xrange(3)))
示例#34
0
 def P(x,y):
     return sum(map(lambda j: x[j]*y[m-1-j]**q, xrange(m)))==0
    def analyse(self):
        primaries = self.vselect( 'PVs' , ISPRIMARY )
        if primaries.empty() :
            return self.Warning('No primary vertices are found', SUCCESS )

        mcB = self.mcselect(
            'mcB', "[( B+ ==>  ( J/psi(1S) =>  mu+  mu-  )  K+  pi+  pi- )]CC")

        mcB_psi = self.mcselect(
            'mcB_psi', "[( B+ ==> K+ (psi(2S) => ( J/psi(1S) =>  mu+  mu-  ) pi+ pi-))]CC")

        mcB_X = self.mcselect(
            'mcB_X', "[( B+ ==> K+ (X_1(3872) => ( J/psi(1S) =>  mu+  mu-  ) pi+ pi-))]CC")

        mcB_K_Ks = self.mcselect(
            'mcB_K_Ks', "[( B+ ==>  ( J/psi(1S) =>  mu+  mu-  )  (K_1(1270)+  => (K*(892)0 => K+ pi-) pi+) )]CC")

        mcB_K_rho = self.mcselect(
            'mcB_K_rho', "[( B+ ==>  ( J/psi(1S) =>  mu+  mu-  )  (K_1(1270)+  => (rho(770)0 => pi+ pi-) K+) )]CC")

        mcB_K_K0s = self.mcselect(
            'mcB_K_K0s', "[( B+ ==>  ( J/psi(1S) =>  mu+  mu-  )  (K_1(1270)+  => (K*_0(1430)0 => K+ pi-) pi+) )]CC")


        sizes = [
            ("psi", mcB_psi.size()),
            ("X", mcB_X.size()),
            ("K_Ks", mcB_K_Ks.size()),
            ("K_rho", mcB_K_rho.size()),
            ("K_K0s", mcB_K_K0s.size()),
        ]

        nt_sizes = self.nTuple("sizes")

        for name, size in sizes:
            nt_sizes.column('mcB_' + name, size)
        
        nt_sizes.write()


        if mcB.size() != 1 or __builtin__.sum([x[1] for x in sizes]) != 1:
            return self.Warning("Something wrong with MC size " + str(mcB.size()), SUCCESS)


        mcK = self.mcselect(
            "mcK",  "[( B+ ==>  ( J/psi(1S) =>  mu+  mu-  )  ^K+  pi- pi+ )]CC")
        mcPi = self.mcselect(
            "mcPi",  "[( B+ ==>  ( J/psi(1S) =>  mu+  mu-  )  K+  ^pi- ^pi+ )]CC")
        mcMu = self.mcselect(
            "mcMu", "[( B+ ==>  ( J/psi(1S) =>  ^mu+  ^mu-  )  K+  pi- pi+ )]CC")
        mcPsi = self.mcselect(
            "mcPsi", "[( B+ ==>  ^( J/psi(1S) =>  mu+  mu-  )  K+  pi- pi+ )]CC")

        if mcK.empty() or mcMu.empty() or mcPsi.empty() or mcPi.empty():
            return self.Warning('No true MC-decay components are found', SUCCESS )

        match = self.mcTruth()
        trueB = MCTRUTH(match, mcB)

        true_mcB_psi = MCTRUTH(match, mcB_psi)
        true_mcB_X = MCTRUTH(match, mcB_X)
        true_mcB_K_Ks = MCTRUTH(match, mcB_K_Ks)
        true_mcB_K_rho = MCTRUTH(match, mcB_K_rho)
        true_mcB_K_K0s = MCTRUTH(match, mcB_K_K0s)


        trueK = MCTRUTH(match, mcK)
        truePi = MCTRUTH(match, mcPi)
        truePsi = MCTRUTH(match, mcPsi)
        trueMu = MCTRUTH(match, mcMu)


        myB = self.select('Bu' , '[( B+ ->  J/psi(1S)  K+  pi+  pi-)]CC' )

        # Constrains
        dtffun_ctau = DTF_CTAU(0, True)
        dtffun_chi2 = DTF_CHI2NDOF(True, "J/psi(1S)")
        dtffun_m = DTF_FUN(M, True, "J/psi(1S)")


        nt = self.nTuple("t")

        for myb in myB:
            if not all([myb(i) for i in xrange(0, 5)]):
                continue


            b, jpsi, k, pi1, pi2 = tuple(myb(i) for i in xrange(5))

            # add DTF-applied information
            nt.column('DTFm_b', dtffun_m(myb) / GeV)
            nt.column('DTFctau', dtffun_ctau(myb))
            nt.column('DTFchi2ndof', dtffun_chi2(myb))

            MIPCHI2DVfun = MIPCHI2DV()

            self.treatKine(nt, b, '_b')
            self.treatKine(nt, jpsi, '_jpsi')

            # add the information for Pid efficiency correction
            self.treatPions(nt, b)
            self.treatKaons(nt, b)
            self.treatMuons(nt, b)
            self.treatTracks(nt, b)


            # ==========================================
            # Do fake
            # ==========================================
            nt.column('mass', self._mass ( b )  / GeV )

            ## try with pi1->K
            with fakeK ( pi2, pid = LHCb.ParticleID( int(Q(pi2)) * 321 ) ) :
                nt.column ( 'mass_pi2ask' , self._mass ( b ) / GeV )

            self.fillMasses(nt, myb, "c2", True, "J/psi(1S)")


            nt.column('MIPCHI2DV_k', MIPCHI2DVfun(k))
            nt.column('MIPCHI2DV_pi1', MIPCHI2DVfun(pi1))
            nt.column('MIPCHI2DV_pi2', MIPCHI2DVfun(pi2))

            nt.column ( 'mcTrueB'    , trueB(b)          )
            nt.column ( 'mcTrueB_psi'   , true_mcB_psi(myb))
            nt.column ( 'mcTrueB_X'   , true_mcB_X(myb))
            nt.column ( 'mcTrueB_K_Ks'   , true_mcB_K_Ks(myb))
            nt.column ( 'mcTrueB_K_rho'   , true_mcB_K_rho(myb))
            nt.column ( 'mcTrueB_K_K0s'   , true_mcB_K_K0s(myb))
            nt.column ( 'mcTruePsi' , truePsi(jpsi(0)    ))
            nt.column ( 'mcTrueK'    , trueK(myb(2))     )
            nt.column ( 'mcTruePi1'  , truePi(myb(3))    )
            nt.column ( 'mcTruePi2'  , truePi(myb(4))    )

            nt.column ( 'mcTrueMu1'  , trueMu(jpsi(1))    )
            nt.column ( 'mcTrueMu2'  , trueMu(jpsi(2))    )



            # add the information needed for TisTos
            self.tisTos ( jpsi  , nt  , 'psi_' ,
                          self.lines [ 'psi' ] , self.l0tistos , self.l1tistos , self.l2tistos )

            self.tisTos ( jpsi  , nt  , 'psi1_' ,
                          self.lines [ 'psi1' ] , self.l0tistos , self.l1tistos , self.l2tistos )

            self.tisTos ( jpsi  , nt  , 'psi2_' ,
                          self.lines [ 'psi2' ] , self.l0tistos , self.l1tistos , self.l2tistos )

            self.tisTos ( jpsi  , nt  , 'psi3_' ,
                          self.lines [ 'psi3' ] , self.l0tistos , self.l1tistos , self.l2tistos )

            nt.write()

        return SUCCESS