def extract_blobs(img, img_proc_pipeline = pipeline_otsu, frame_selection = frame_selection, blob_selection = blob_selection, params = params, orig_img = None, debug=False): img_processed = reduce(lambda x, func: func(x, params), [img] + img_proc_pipeline) if img_proc_pipeline else img blobs, nblobs = ndimage.label(img_processed,structure=np.ones((3,3))) if debug: return [((n,n),reduce(lambda x, func: func(x, params), [img] + img_proc_pipeline[:n])) for n in xrange(len(img_proc_pipeline))] + [((0,0),blobs)] if frame_selection and not frame_selection(img_processed, blobs,nblobs,params): return [] fragments = [] if orig_img == None: orig_img = img for blob_num in xrange(1,nblobs+1): if blob_selection and not blob_selection(blobs, blob_num, params): continue indices = np.argwhere(blobs==blob_num) xblocksize, yblocksize = params.get('blocksize',(1,1)) # fragment of original image within bounding box ((xmin,ymin),(xmax,ymax)) = np.min(indices,axis=0),np.max(indices,axis=0) #fragment = img[xmin*xblocksize:(xmax+1)*xblocksize, ymin*yblocksize:(ymax+1)*yblocksize] X,Y = np.ogrid[:(xmax-xmin+1)*xblocksize,:(ymax-ymin+1)*yblocksize] fragment = orig_img[xmin*xblocksize:(xmax+1)*xblocksize, ymin*yblocksize:(ymax+1)*yblocksize]*(blobs[xmin:xmax+1,ymin:ymax+1][X/xblocksize,Y/yblocksize]) mask = blobs[xmin:xmax+1,ymin:ymax+1][X/xblocksize,Y/yblocksize]==blob_num fragment[~mask] = params.get('pixel_mask',np.nan) fragment /= blob_num # stupid work around since saving to sql doesn't really work with the more obvious way fragments.append(((xmin*xblocksize,ymin*yblocksize), fragment)) return fragments
def get_filter_arguments(filter_target): """ Get `Q` object passed to `filter` function. """ queries = [] fields = [col.field for col in self.columns if col.searchable] value = filter_target # 暂时的解决办法 if value: if value[0] == '*': value = value[1:] queries.append(Q(**{"tags__name__contains": value})) queries.append(Q(**{"tags__abbreviation__contains": value})) reduce(lambda x, y: x | y, queries) for field in fields: if field: if isinstance(field, set): for sub_field in field: key = "__".join(sub_field.split(".") + ["contains"]) queries.append(Q(**{key: value})) else: key = "__".join(field.split(".") + ["contains"]) queries.append(Q(**{key: value})) else: raise NameError return reduce(lambda x, y: x | y, queries)
def filter_search(self, request_data): for key, val in request_data.iteritems(): if key in ['label', 'tags', 'reason', 'main_file', 'script_arguments']: field_list = [x.strip() for x in val.split(',')] self.sim_list = self.sim_list.filter(reduce(lambda x, y: x | y, [Q(**{"%s__contains" % key: word}) for word in field_list])) # __icontains (?) elif key == 'fulltext_inquiry': # search without using the search form results = [] field_list = [x.strip() for x in request_data['fulltext_inquiry'].split(',')] for item in models.Record.params_search: intermediate_res = self.sim_list.filter(reduce(lambda x, y: x | y, [Q(**{"%s__contains" % item: word}) for word in field_list])) results = list(set(results).union(set(intermediate_res))) self.sim_list = results break # if we have fulltext inquiry it is not possible to have others elif isinstance(val, datetime.date): self.sim_list = self.sim_list.filter(timestamp__year=val.year, timestamp__month=val.month, timestamp__day=val.day) elif isinstance(val, models.Executable): self.sim_list = self.sim_list.filter(executable__path=val.path) elif isinstance(val, models.Repository): self.sim_list = self.sim_list.filter(repository__url=val.url) if hasattr(self, 'date_base') and self.date_base: # in case user specifies "date within" in the search field self.date_base = strptime(self.date_base, "%m/%d/%Y") # from text input in the search form base = datetime.date(self.date_base.tm_year, self.date_base.tm_mon, self.date_base.tm_mday) nb_days = self.dict_dates[self.date_interval] # date interval from the search form dateIntvl = {'min': base - datetime.timedelta(days=nb_days), 'max': base + datetime.timedelta(days=nb_days)} # interval of the dates self.sim_list = filter(lambda x: x.timestamp >= datetime.datetime.combine(dateIntvl['min'], datetime.time()) and x.timestamp <= datetime.datetime.combine(dateIntvl['max'], datetime.time(23, 59)), self.sim_list) # all the records inside the specified interval elif self.tags: self.sim_list = self.sim_list.filter(tags__icontains=self.tags.strip())
def reduce(self, function, new_collection, initializer=None, **kwargs): """ Reduces a collection into a new collection with a given function. | Arguments: | ``function`` -- The function used for reducing. | ``new_collection`` -- The name of the collection to insert the new value into. Any existing values will be deleted. If ``None``, the current collection is replaced with the reduction output. | Keyword arguments: | ``create_if_missing`` -- when False a ValueError is raised if the new collection doesn't exist | ``error_if_exists`` -- When True a ValueError is raised if the new collection already exists """ reduced = None if initializer != None: reduced = reduce(function, self.iterator(), initializer) else: reduced = reduce(function, self.iterator()) collection = None if new_collection in [None, self.name]: collection = self else: collection = self.parent_db.collection(new_collection, reset_collection=True, **kwargs) collection.append(reduced) return collection
def mana_cost(self): """ Calculates the mana cost for this card. This cost is the base cost for the card, modified by any tags from the card itself, or from other cards (such as :class:`hearthbreaker.cards.minions.neutral.VentureCoMercenary`) :return: representing the actual mana cost of this card. :rtype: int """ from hearthbreaker.tags.status import ManaChange # Mana appears to be calculated in reverse order from other stats (auras first, then buffs) mana = reduce(lambda a, b: b.update(self, a), [aura.status for p in self.player.game.players for aura in p.object_auras if aura.match(self) and isinstance(aura.status, ManaChange)], self.mana) mana = reduce(lambda a, b: b.update(self, a), [buff.status for buff in self.buffs if isinstance(buff.status, ManaChange) and (not buff.condition or buff.condition.evaluate(self, self))], mana) return mana
def add_strings(x, z=0): """ This was in sage.misc.misc but commented out. Needed to add lists of strings in the word_problem method below. Return the sum of the elements of x. If x is empty, return z. INPUT: - ``x`` -- iterable - ``z`` -- the ``0`` that will be returned if ``x`` is empty. OUTPUT: The sum of the elements of ``x``. EXAMPLES:: sage: from sage.groups.abelian_gps.dual_abelian_group_element import add_strings sage: add_strings([], z='empty') 'empty' sage: add_strings(['a', 'b', 'c']) 'abc' """ if len(x) == 0: return z if not isinstance(x, list): m = iter(x) y = next(m) return reduce(operator.add, m, y) else: return reduce(operator.add, x[1:], x[0])
def align_faces(from_obj, to_obj): fpolys = from_obj.data.polygons tpolys = to_obj.data.polygons fpoly = fpolys[fpolys.active] tpoly = tpolys[tpolys.active] to_obj.rotation_mode = 'QUATERNION' tnorm = to_obj.rotation_quaternion * tpoly.normal fnorm = fpoly.normal axis = fnorm.cross(tnorm) dot = fnorm.normalized().dot(tnorm.normalized()) dot = clamp(dot, -1.0, 1.0) # Parallel faces need a new rotation vactor if axis.length < 1.0e-8: axis = Vector(get_ortho(fnorm.x, fnorm.y, fnorm.z)) from_obj.rotation_mode = 'AXIS_ANGLE' from_obj.rotation_axis_angle = [math.acos(dot) + math.pi, axis[0], axis[1], axis[2]] bpy.context.scene.update() # Move from_obj so that faces match fvertices = [from_obj.data.vertices[i].co for i in fpoly.vertices] tvertices = [to_obj.data.vertices[i].co for i in tpoly.vertices] fbary = from_obj.matrix_world * (reduce(Vector.__add__, fvertices) / len(fvertices)) tbary = to_obj.matrix_world * (reduce(Vector.__add__, tvertices) / len(tvertices)) from_obj.location = tbary - (fbary - from_obj.location)
def _num_elements(grad): """The number of elements in the `grad` tensor.""" if isinstance(grad, ops.Tensor): return functools.reduce(operator.mul, grad._shape_tuple(), 1) # pylint: disable=protected-access if isinstance(grad, ops.IndexedSlices): return functools.reduce(operator.mul, grad.values._shape_tuple(), 1) # pylint: disable=protected-access raise ValueError("`grad` not a Tensor or IndexedSlices.")
def h_op(x): x1 = numpy.zeros_like(focka) x1[mask] = x x1 = x1 - x1.T x2 = numpy.zeros_like(focka) #: x2[nb:,:na] = numpy.einsum('sp,qs->pq', focka[:na,nb:], x1[:na,:na]) #: x2[nb:,:na] += numpy.einsum('rq,rp->pq', focka[:na,:na], x1[:na,nb:]) #: x2[na:,:na] -= numpy.einsum('sp,rp->rs', focka[:na,:na], x1[na:,:na]) #: x2[na:,:na] -= numpy.einsum('ps,qs->pq', focka[na:], x1[:na]) * 2 #: x2[nb:na,:nb] += numpy.einsum('qr,pr->pq', focka[:nb], x1[nb:na]) #: x2[nb:na,:nb] -= numpy.einsum('rq,sq->rs', focka[nb:na], x1[:nb]) #: x2[nb:,:na] += numpy.einsum('sp,qs->pq', fockb[:nb,nb:], x1[:na,:nb]) #: x2[nb:,:na] += numpy.einsum('rq,rp->pq', fockb[:nb,:na], x1[:nb,nb:]) #: x2[nb:,:nb] -= numpy.einsum('sp,rp->rs', fockb[:nb], x1[nb:]) #: x2[nb:,:nb] -= numpy.einsum('rq,sq->rs', fockb[nb:], x1[:nb]) * 2 x2[viridxb[:,None],occidxa] = \ (numpy.einsum('sp,qs->pq', focka[occidxa[:,None],viridxb], x1[occidxa[:,None],occidxa]) +numpy.einsum('rq,rp->pq', focka[occidxa[:,None],occidxa], x1[occidxa[:,None],viridxb])) x2[viridxa[:,None],occidxa] -= \ (numpy.einsum('sp,rp->rs', focka[occidxa[:,None],occidxa], x1[viridxa[:,None],occidxa]) +numpy.einsum('ps,qs->pq', focka[viridxa], x1[occidxa]) * 2) x2[occidxa[:,None],occidxb] += \ (numpy.einsum('qr,pr->pq', focka[occidxb], x1[occidxa]) -numpy.einsum('rq,sq->rs', focka[occidxa], x1[occidxb])) x2[viridxb[:,None],occidxa] += \ (numpy.einsum('sp,qs->pq', fockb[occidxb[:,None],viridxb], x1[occidxa[:,None],occidxb]) +numpy.einsum('rq,rp->pq', fockb[occidxb[:,None],occidxa], x1[occidxb[:,None],viridxb])) x2[viridxb[:,None],occidxb] -= \ (numpy.einsum('sp,rp->rs', fockb[occidxb], x1[viridxb]) +numpy.einsum('rq,sq->rs', fockb[viridxb], x1[occidxb]) * 2) x2 *= .5 d1a = reduce(numpy.dot, (mo_coeff[:,viridxa], x1[viridxa[:,None],occidxa], mo_coeff[:,occidxa].T)) d1b = reduce(numpy.dot, (mo_coeff[:,viridxb], x1[viridxb[:,None],occidxb], mo_coeff[:,occidxb].T)) if hasattr(mf, 'xc'): if APPROX_XC_HESSIAN: vj, vk = mf.get_jk(mol, numpy.array((d1a+d1a.T,d1b+d1b.T))) if abs(hyb) < 1e-10: dvhf = vj[0] + vj[1] else: dvhf = (vj[0] + vj[1]) - vk * hyb else: from pyscf.dft import uks if save_for_dft[0] is None: save_for_dft[0] = mf.make_rdm1(mo_coeff, mo_occ) save_for_dft[1] = mf.get_veff(mol, save_for_dft[0]) dm1 = numpy.array((save_for_dft[0][0]+d1a+d1a.T, save_for_dft[0][1]+d1b+d1b.T)) vhf1 = uks.get_veff_(mf, mol, dm1, dm_last=save_for_dft[0], vhf_last=save_for_dft[1]) dvhf = (vhf1[0]-save_for_dft[1][0], vhf1[1]-save_for_dft[1][1]) save_for_dft[0] = dm1 save_for_dft[1] = vhf1 else: dvhf = mf.get_veff(mol, numpy.array((d1a+d1a.T,d1b+d1b.T))) x2[viridxa[:,None],occidxa] += reduce(numpy.dot, (mo_coeff[:,viridxa].T, dvhf[0], mo_coeff[:,occidxa])) x2[viridxb[:,None],occidxb] += reduce(numpy.dot, (mo_coeff[:,viridxb].T, dvhf[1], mo_coeff[:,occidxb])) return x2[mask]
def h_op(x): x = x.reshape(nvir,nocc) x2 =-numpy.einsum('sq,ps->pq', foo, x) * 2 x2+= numpy.einsum('pr,rq->pq', fvv, x) * 2 d1 = reduce(numpy.dot, (mo_coeff[:,viridx], x, mo_coeff[:,occidx].T)) if hasattr(mf, 'xc'): if APPROX_XC_HESSIAN: vj, vk = mf.get_jk(mol, d1+d1.T) if abs(hyb) < 1e-10: dvhf = vj else: dvhf = vj - vk * hyb * .5 else: if save_for_dft[0] is None: save_for_dft[0] = mf.make_rdm1(mo_coeff, mo_occ) save_for_dft[1] = mf.get_veff(mol, save_for_dft[0]) dm1 = save_for_dft[0] + d1 + d1.T vhf1 = mf.get_veff(mol, dm1, dm_last=save_for_dft[0], vhf_last=save_for_dft[1]) dvhf = vhf1 - save_for_dft[1] save_for_dft[0] = dm1 save_for_dft[1] = vhf1 else: dvhf = mf.get_veff(mol, d1+d1.T) x2 += reduce(numpy.dot, (mo_coeff[:,viridx].T, dvhf, mo_coeff[:,occidx])) * 4 return x2.reshape(-1)
def __init__(self, lib, dtype, partitions, N, C, K, D=1, H=1, W=1, M=1, P=1, Q=1): super(Inception, self).__init__(lib, dtype, N) self.partitions = partitions self.C = C self.K = K self.M = M self.P = P self.Q = Q self.NCK = (N, C, K) self.DHW = (D, H, W) self.MPQ = (M, P, Q) self.dimI = (C, D, H, W, N) self.dimO = (K, M, P, Q, N) self.dimI2 = (C*D*H*W, N) self.dimO2 = (K*M*P*Q, N) self.sizeI = reduce(mul, self.dimI, 1) self.sizeO = reduce(mul, self.dimO, 1) self.nOut = reduce(mul, self.MPQ, 1) * K self.sizeF = 0 self.flops = 0 for part in partitions: for layer in part: self.flops += layer.flops self.sizeF = max(self.sizeF, layer.sizeF) if self.sizeF == layer.sizeF: self.dimF = layer.dimF
def filter_products_by_attributes(qs, filter_value): attributes = Attribute.objects.prefetch_related('values') attributes_map = { attribute.slug: attribute.pk for attribute in attributes} values_map = { attr.slug: {value.slug: value.pk for value in attr.values.all()} for attr in attributes} queries = defaultdict(list) # Convert attribute:value pairs into a dictionary where # attributes are keys and values are grouped in lists for attr_name, val_slug in filter_value: if attr_name not in attributes_map: raise ValueError('Unknown attribute name: %r' % (attr_name, )) attr_pk = attributes_map[attr_name] attr_val_pk = values_map[attr_name].get(val_slug, val_slug) queries[attr_pk].append(attr_val_pk) # Combine filters of the same attribute with OR operator # and then combine full query with AND operator. combine_and = [ functools.reduce( operator.or_, [ Q(**{'variants__attributes__%s' % (key, ): v}) | Q(**{'attributes__%s' % (key, ): v}) for v in values]) for key, values in queries.items()] query = functools.reduce(operator.and_, combine_and) return qs.filter(query).distinct()
def updateXY(self): self.maxxval = len(self.datas) self.minxval = 0.0 self.xrange = self.maxxval - self.minxval if self.xrange == 0: self.xscale = 1.0 else: self.xscale = 1.0 / self.xrange if not self.datas.values(): self.maxyval = 0.0 self.minyval = 0.0 else: self.maxyval = max([reduce(lambda x, y: max(x, y), x.values()) for x in self.datas.values()]) self.minyval = min([reduce(lambda x, y: min(x, y), x.values()) for x in self.datas.values()]) if self.minyval > 0: self.minyval = 0.0 self.yrange = self.maxyval - self.minyval if self.yrange == 0: self.yscale = 1.0 else: self.yscale = 1.0 / self.yrange
def hop_uhf2ghf(x1): x1ab = [] x1ba = [] ip = 0 for k in range(nkpts): nv = nvira[k] no = noccb[k] x1ab.append(x1[ip:ip+nv*no].reshape(nv,no)) ip += nv * no for k in range(nkpts): nv = nvirb[k] no = nocca[k] x1ba.append(x1[ip:ip+nv*no].reshape(nv,no)) ip += nv * no dm1ab = [] dm1ba = [] for k in range(nkpts): d1ab = reduce(numpy.dot, (orbva[k], x1ab[k], orbob[k].T.conj())) d1ba = reduce(numpy.dot, (orbvb[k], x1ba[k], orboa[k].T.conj())) dm1ab.append(d1ab+d1ba.T.conj()) dm1ba.append(d1ba+d1ab.T.conj()) v1ao = vresp1(lib.asarray([dm1ab,dm1ba])) x2ab = [0] * nkpts x2ba = [0] * nkpts for k in range(nkpts): x2ab[k] = numpy.einsum('pr,rq->pq', fvva[k], x1ab[k]) x2ab[k]-= numpy.einsum('sq,ps->pq', foob[k], x1ab[k]) x2ba[k] = numpy.einsum('pr,rq->pq', fvvb[k], x1ba[k]) x2ba[k]-= numpy.einsum('qs,ps->pq', fooa[k], x1ba[k]) x2ab[k] += reduce(numpy.dot, (orbva[k].T.conj(), v1ao[0][k], orbob[k])) x2ba[k] += reduce(numpy.dot, (orbvb[k].T.conj(), v1ao[1][k], orboa[k])) return numpy.hstack([x.real.ravel() for x in (x2ab+x2ba)])
def sum(seq): """ | Sums objects in sequence or entries of array-like object. :param seq: number, :ref:`scalar object<scalar_ref>`, :ref:`multidimensional object<multi_ref>`, tuple or list of such objects. :return: number, :ref:`scalar object<scalar_ref>` or :ref:`multidimensional object<multi_ref>`. """ # Number or scalars object if (np.isscalar(seq) or type(seq).__name__ in SCALAR_OBJS): return seq # Matrix or object array elif (type(seq) is cvxpy_matrix or type(seq).__name__ in ARRAY_OBJS): return reduce(lambda x,y: x + y, [seq[i,j] for i in range(0,seq.shape[0]) for j in range(0,seq.shape[1])],0) # List or tuple elif type(seq) is tuple or type(seq) is list: return reduce(lambda x,y: x + y,seq,0) # Other else: raise TypeError('Invalid argument')
def __init__(self,cga, *args): self.cga = cga self.layout = cga.layout # note: self.layout is the cga layout self.einf = self.cga.einf # we use this alot if len(args) == 0: # generate random highest dimension flat nulls = [self.cga.null_vector() for k in range(self.layout.dims-2)] self.mv = reduce(op,nulls + [self.einf]) elif len(args) == 1: # from existing multivector if isinstance(args[0], MultiVector): self.mv = args[0] # generate random flat for given dimension elif isinstance(args[0], int): dim = args[0] points = [self.cga.base_vector() for k in range(dim+1)] points = list(map(self.cga.up, points)) self.mv = reduce(op,points + [self.einf]) # from vectors on flat else: nulls = map(self.cga.null_vector,args) if self.einf not in nulls: nulls = list(nulls)+[self.einf] self.mv = reduce(op,nulls) self.mv = self.mv.normal()
def _ERIS(mc, mo, method='incore'): nmo = mo.shape[1] ncore = mc.ncore ncas = mc.ncas mem_incore, mem_outcore, mem_basic = mc_ao2mo._mem_usage(ncore, ncas, nmo) mem_now = lib.current_memory()[0] if (method == 'incore' and mc._scf._eri is not None and (mem_incore+mem_now < mc.max_memory*.9) or mc.mol.incore_anyway): ppaa, papa, pacv, cvcv = trans_e1_incore(mc, mo) else: max_memory = max(2000, mc.max_memory-mem_now) ppaa, papa, pacv, cvcv = \ trans_e1_outcore(mc, mo, max_memory=max_memory, verbose=mc.verbose) dmcore = numpy.dot(mo[:,:ncore], mo[:,:ncore].T) vj, vk = mc._scf.get_jk(mc.mol, dmcore) vhfcore = reduce(numpy.dot, (mo.T, vj*2-vk, mo)) eris = {} eris['vhf_c'] = vhfcore eris['ppaa'] = ppaa eris['papa'] = papa eris['pacv'] = pacv eris['cvcv'] = cvcv eris['h1eff'] = reduce(numpy.dot, (mo.T, mc.get_hcore(), mo)) + vhfcore return eris
def threshold(imageArray): balanceAr = [] newAr = imageArray for eachRow in imageArray: for eachPix in eachRow: avgNum = reduce(lambda x,y: int(x) + int(y), eachPix[:3])/len(eachPix[:3]) balanceAr.append(avgNum) balance = reduce(lambda x,y: x + y, balanceAr) / len(balanceAr) print ('balance: ', balance) for eachRow in newAr: for eachPix in eachRow: if reduce(lambda x,y: int(x) + int(y), eachPix[:3]/3) > balance: eachPix[0] = 255 eachPix[1] = 255 eachPix[2] = 255 eachPix[3] = 255 else: eachPix[0] = 0 eachPix[1] = 0 eachPix[2] = 0 eachPix[3] = 255 return newAr
def combined_rating(self): """ Method that computes the multitag's rating from the ratings of unit subtags (the default implementation uses the geometric mean - with a special treatment for proper nouns - but this method can be overridden) @returns: the rating of the multitag """ # by default, the rating of a multitag is the geometric mean of its # unit subtags' ratings product = reduce(lambda x, y: x * y, self.subratings, 1.0) root = self.size # but proper nouns shouldn't be penalized by stopwords if product == 0.0 and self.proper: nonzero = [r for r in self.subratings if r > 0.0] if len(nonzero) == 0: return 0.0 product = reduce(lambda x, y: x * y, nonzero, 1.0) root = len(nonzero) return product ** (1.0 / root)
def run(self): if deep == 0: print('level 1 :', 1, '/', 1) return screen = curses.initscr() # 初始化终端界面输出窗口 maxFile = [0] * (self.deep + 1) while True: links = list(self.QLinks.__dict__['queue']) # 队列中每个URL此时的深度值 deeps = [x[1] for x in links] '''keys中元素是[deep值,次数], deep=0为最里子层,deep=n-1为父层''' keys = [[x, 0] for x in range(self.deep + 1)] n = len(keys) for d in deeps: keys[d][1] += 1 screen.clear() # 清屏,等待输出 count = 0 for d in range(1, n + 1): count += 1 if keys[n - d][1] > maxFile[d - 1]: maxFile[d - 1] = keys[n - d][1] screen.addstr(count, 0, 'level ' + str(d) + ' : ' + str(keys[n - d][1]) + ' / ' + str(maxFile[d - 1])) screen.refresh() # 使生效 time.sleep(0.2) total = functools.reduce(lambda x, y: x + y, [i[1] for i in keys]) totalMax = functools.reduce(lambda x, y: x + y, maxFile) if self.event.is_set(): curses.endwin() logging.info('Done at ' + time.ctime()) break
def _gen_Max(self, args, ret_type): if ret_type == int_type: # FIXME better way to do this? return reduce(lambda arg1, arg2: self.builder.select( self.builder.icmp_signed('>', arg1, arg2), arg1, arg2), args) elif ret_type == real_type: return reduce(lambda arg1, arg2: self.call_fp_intr('llvm.maxnum', [arg1, arg2]), args)
def getMinUpgrade(vulnerableList, unaffectedList, portdbapi, vardbapi, minimize=True): """ Checks if the systemstate is matching an atom in I{vulnerableList} and returns string describing the lowest version for the package that matches an atom in I{unaffectedList} and is greater than the currently installed version. It will return an empty list if the system is affected, and no upgrade is possible or None if the system is not affected. Both I{vulnerableList} and I{unaffectedList} should have the same base package. @type vulnerableList: List of Strings @param vulnerableList: atoms matching vulnerable package versions @type unaffectedList: List of Strings @param unaffectedList: atoms matching unaffected package versions @type portdbapi: portage.dbapi.porttree.portdbapi @param portdbapi: Ebuild repository @type vardbapi: portage.dbapi.vartree.vardbapi @param vardbapi: Installed package repository @type minimize: Boolean @param minimize: True for a least-change upgrade, False for emerge-like algorithm @rtype: String | None @return: the lowest unaffected version that is greater than the installed version. """ rValue = "" v_installed = reduce(operator.add, [match(v, vardbapi) for v in vulnerableList], []) u_installed = reduce(operator.add, [match(u, vardbapi) for u in unaffectedList], []) # remove all unaffected atoms from vulnerable list v_installed = list(set(v_installed).difference(set(u_installed))) if not v_installed: return None # this tuple holds all vulnerable atoms, and the related upgrade atom vuln_update = [] avail_updates = set() for u in unaffectedList: # TODO: This had match_type="match-all" before. I don't think it should # since we disregarded masked items later anyway (match(=rValue, "porttree")) avail_updates.update(match(u, portdbapi)) # if an atom is already installed, we should not consider it for upgrades avail_updates.difference_update(u_installed) for vuln in v_installed: update = "" for c in avail_updates: c_pv = portage.catpkgsplit(c) if vercmp(c.version, vuln.version) > 0 \ and (update == "" \ or (minimize ^ (vercmp(c.version, update.version) > 0))) \ and portdbapi._pkg_str(c, None).slot == vardbapi._pkg_str(vuln, None).slot: update = c_pv[0]+"/"+c_pv[1]+"-"+c_pv[2] if c_pv[3] != "r0": # we don't like -r0 for display update += "-"+c_pv[3] vuln_update.append([vuln, update]) return vuln_update
def run(self, assets, parameters = tuple()): args = self.parser.parse_args(parameters) # FIXME: move checks out so can they can be made before calling 'run' if (args.filter_exclude is None) and (args.filter_include is None): raise ValueError('No filter specified.') cmd = [self._execpath, 'view', '-o', assets.target.bamfile.name] if (args.filter_exclude is not None): filter_exclude = reduce(lambda x,y: x^y, (x for x in args.filter_exclude)) cmd.extend(('-F', filter_exclude)) if (args.filter_include is not None): filter_include = reduce(lambda x,y: x^y, (x for x in args.filter_include)) cmd.extend(('-f', filter_include)) cmd.extend(('-b', assets.source.bamfile.name)) with open(os.devnull, 'w') as fnull: logging.debug(cmd) returncode = subprocess.check_call(cmd, stdout = fnull, stderr = fnull) if not os.path.exists(assets.source.bamfile.name): # target is missing. Suspected infamous 'bam.bam' issue bam_bam = assets.source.bamfile.name + '.bam' if os.path.exists(bam_bam): # '.bam.bam' issue warnings.warn("'.bam.bam' issue detected. Moving the product to intended target %s." % assets.source.samfile.name) shutil.move(bam_bam, assets.source.bamfile.name) else: raise Exception('The target %s is mysteriously missing.' % assets.source.samfile.name) return (cmd, returncode)
def test_set_observed_state(self): # set up configuration config_dir = path.join(path.dirname(__file__), "netcdf") cfg = orchestration.YAMLConfig( "atnsjoen_simulation.yaml", "atnsjoen", config_dir=config_dir, data_dir=shyftdata_dir) # get a simulator simulator = cfg.get_simulator() n_cells = simulator.region_model.size() state_repos = DefaultStateRepository(cfg.model_t, n_cells) state = state_repos.get_state(0) simulator.run(cfg.time_axis, state) simulator.region_model.get_states(state) obs_discharge = 0.0 state = simulator.discharge_adjusted_state(obs_discharge, state) self.assertAlmostEqual(0.0, reduce(operator.add, (state[i].kirchner.q for i in range(state.size())))) simulator.region_model.get_states(state) obs_discharge = 10.0 # m3/s state = simulator.discharge_adjusted_state(obs_discharge, state) # Convert from l/h to m3/s by dividing by 3.6e6 adj_discharge = reduce(operator.add, (state[i].kirchner.q*cell.geo.area() for (i, cell) in enumerate(simulator.region_model.get_cells())))/(3.6e6) self.assertAlmostEqual(obs_discharge, adj_discharge)
def multiply(lhs, rhs): """ Perform element-wise multiplication Parameters ---------- lhs : Array or float value left hand side operand rhs : Array of float value right hand side operand Returns ------- out: Array result array """ # pylint: disable= no-member, protected-access if isinstance(lhs, numeric_types): if isinstance(rhs, numeric_types): return lhs * rhs else: return multiply(rhs, lhs) elif isinstance(rhs, numeric_types): return NDArray._mul_scalar(lhs, float(rhs)) elif isinstance(rhs, NDArray): lsize = functools.reduce(operator.mul, lhs.shape) rsize = functools.reduce(operator.mul, rhs.shape) if lsize < rsize: lhs = lhs.broadcast_to(rhs.shape) elif lsize > rsize: rhs = rhs.broadcast_to(lhs.shape) return NDArray._mul(lhs, rhs) else: raise TypeError('type %s not supported' % str(type(rhs)))
def install_dependencies(software, osname=None, fake=False): if osname is None: osname = dependency.get_platform() theOs = dependency.OsInterfaceFactory().create(osname) dependencies = dependency.DependencySolver(software, osname) vrun = False vdev = False voth = False if len(dependencies.runtime_distribution_packages()): print("Do you wish to install the required runtime packages:\n") print(reduce(lambda x,y: str(x)+" "+str(y), dependencies.runtime_distribution_packages(), "") + " (y/n)?") vrun = input() if len(dependencies.development_distribution_packages()): print("Do you wish to install the development packages:\n") print(reduce(lambda x,y: str(x)+" "+str(y), dependencies.development_distribution_packages(), "") + " (y/n)?") vdev = input() if len(dependencies.other_packages()): print("Do you wish to install the other packages:\n") print(reduce(lambda x,y: str(x)+" "+str(y), dependencies.other_packages(), "") + " (y/n)?") voth = input() if vrun == "y": theOs.install_packages(dependencies.runtime_distribution_packages(), fake) if vdev == "y": theOs.install_packages(dependencies.development_distribution_packages(), fake) if voth == "y": theOs.install_packages(dependencies.other_packages(), fake)
def grammar(): g = Grammar() g['lbracket'] = terminals([ord('[')]) g['rbracket'] = terminals([ord(']')]) g['bslash'] = terminals([ord('\\')]) g['hyphen'] = terminals([ord('-')]) g['lparen'] = terminals([ord('(')]) g['rparen'] = terminals([ord(')')]) g['pipe'] = terminals([ord('|')]) g['star'] = terminals([ord('*')]) g['special'] = reduce(union, map(g.__getitem__, [ 'lbracket', 'rbracket', 'bslash', 'hyphen', 'lparen', 'rparen', 'pipe', 'star', ])) g['non_special'] = negate_terminals(g['special']) g['escaped'] = concat(g['bslash'], g['special']) g['char'] = union(g['escaped'], g['non_special']) g['simple_range'] = reduce(concat, [ g['char'], g['hyphen'], g['char'] ]) g['char_class_atom'] = union(g['char'], g['simple_range']) g['char_class'] = reduce(concat, [ g['lbracket'], plus(g['char_class_atom']), g['rbracket'] ]) g['concat_atom'] = union(g['char'], g['char_class']) g['concat'] = plus(g['concat_atom']) g['union_sequence'] = sequence(g['concat'], g['pipe']) g['union'] = reduce(concat, [ g['lparen'], g['union_sequence'], g['rparen'] ]) g['repeat'] = concat(union(g['concat'], g['union']), g['star']) g['regular'] = plus(reduce(union, [ g['union'], g['concat'], g['repeat'] ])) return g
def checkio(image): FontNumbers = [1 if i == 'X' else 0 for i in list(FONT)] FontNumbers = [ FontNumbers[i * (len(FONT) // 5) : (i + 1) * len(FONT) // 5] for i in range(5) ] FontNumbers = SplitImage(FontNumbers) FontNumbers = [FontNumbers[-1]] + FontNumbers[:-1] ImageNumbers = SplitImage(image) result = [] for i in ImageNumbers: for j in range(len(FontNumbers)): NumberDotStream = reduce(lambda x, y: x + y, i) FontDotStream = reduce(lambda x, y: x + y, FontNumbers[j]) if ( sum( [ 1 for k0, k1 in enumerate(NumberDotStream) if FontDotStream[k0] != k1 ] ) <= 1 ): result.append(str(j)) break return int(''.join(result))
def send(self, target, nick, msg, msgtype, ignore_length=False, filters=None): """ Send a message. Records the message in the log. """ if not isinstance(msg, str): raise Exception("Trying to send a %s to irc, only strings allowed." % type(msg).__name__) msgs = [] if filters is None: filters = self.outputfilter[target] for i in filters: if target != self.config['core']['ctrlchan']: msg = i(msg) # Avoid spam from commands that produce excessive output. MAX_LEN = 650 msg = [x.encode() for x in msg] if functools.reduce(lambda x, y: x + len(y), msg, 0) > MAX_LEN and not ignore_length: msg, _ = misc.split_msg(msg, MAX_LEN) msg += "..." msg = [x.encode() for x in msg] max_len = self.get_max_length(target, msgtype) # We can't send messages > 512 bytes to irc. while functools.reduce(lambda x, y: x + len(y), msg, 0) > max_len: split, msg = misc.split_msg(msg, max_len) msgs.append(split) msgs.append(''.join([x.decode() for x in msg]).strip()) for i in msgs: self.do_log(target, nick, i, msgtype) if msgtype == 'action': self.connection.action(target, i) else: self.rate_limited_send(target, i)
def return_charts(close, predictions, actual): # plot.plot(close, label="close") return_values_ft = [1] return_values_pre = [1] # calculate simple returns simple_returns = [] simple_returns_pre = [] for i in range(0, len(close) - 1): simple_returns_pre.append(((close[i + 1] - close[i]) / close[i])) simple_returns = [1 + rt for rt in simple_returns_pre] for i in range(0, len(predictions)): current_return = reduce(mul, simple_returns[:i + 1]) if predictions[i] == actual[i]: simple_returns_pre[i] = 1 + abs(simple_returns_pre[i]) if predictions[i] != actual[i]: simple_returns_pre[i] = 1 - abs(simple_returns_pre[i]) agg_return = reduce(mul, simple_returns_pre[:i + 1]) return_values_ft.append(current_return) return_values_pre.append(agg_return) plot.plot(return_values_ft, label="return of ft", linewidth=2.5, color="blueviolet") plot.plot(return_values_pre, '--', label="return of predictions", linewidth=2.5, color="black") plot.xlabel("Trading Days") plot.ylabel("Return") plot.legend(loc="upper left") plot.show()
def search_new_transactions(cls, transactions): transactions_in_chain = reduce( add, list(map(lambda block: block.transactions, cls.blocks))) return list( filter(lambda tx: tx not in transactions_in_chain, transactions))
def _numel(self): if self._numel_cache is None: self._numel_cache = reduce(lambda total, p: total + p.numel(), self._params, 0) return self._numel_cache
def multi_merge(*v): return reduce(merge, v)
def sharpen_region(self, region): data = self.dataset root = self._root nbins = self.n_bins xvar = yvar = zvar = None if 0 <= self.x_var_index < len(self.x_var_model): xvar = self.x_var_model[self.x_var_index] if 0 <= self.y_var_index < len(self.y_var_model): yvar = self.y_var_model[self.y_var_index] if 0 <= self.z_var_index < len(self.z_var_model): zvar = self.z_var_model[self.z_var_index] if data is None or xvar is None or yvar is None or root is None: return if not QRectF(*root.brect).intersects(region): return def bin_func(xbins, ybins): return grid_bin(data, xvar, yvar, xbins, ybins, zvar) def min_depth(node, region): if not region.intersects(QRectF(*node.brect)): return np.inf elif node.is_leaf: return 1 elif node.is_empty: return 1 else: xs, xe, ys, ye = bindices(node, region) children = node.children[xs:xe, ys:ye].ravel() contingency = node.contingencies[xs:xe, ys:ye] if contingency.ndim == 3: contingency = contingency.reshape(-1, contingency.shape[2]) if any(ch is None and np.any(val) for ch, val in zip(children, contingency)): return 1 else: ch_depth = [ min_depth(ch, region) + 1 for ch in filter(is_not_none, children.flat) ] return min(ch_depth if ch_depth else [1]) depth = min_depth(self._root, region) bw = self._sampling_width() nodes = self.select_nodes_to_sharpen(self._root, region, bw, depth + 1) def update_rects(node): scored = score_candidate_rects(node, region) ind1 = set(zip(*Node_nonzero(node))) ind2 = set(zip(*node.children.nonzero())) \ if not node.is_leaf else set() ind = ind1 - ind2 return [(score, r) for score, i, j, r in scored if (i, j) in ind] scored_rects = reduce(operator.iadd, map(update_rects, nodes), []) scored_rects = sorted(scored_rects, reverse=True, key=operator.itemgetter(0)) root = self._root update_time = time.time() with self.progressBar(len(scored_rects)) as progress_bar: for i, (_, rect) in enumerate(scored_rects): root = sharpen_region_recur(root, rect.intersected(region), nbins, depth + 1, bin_func) tick = time.time() - update_time if tick > 2.0: self.update_map(root) update_time = time.time() progress_bar.advance() self._root = root self._cache[xvar, yvar, zvar] = self._root self.update_map(self._root)
def readarray(self, size=None, offset=0, dtype=np.uint8, shape=None): """ Similar to file.read(), but returns the contents of the underlying file as a numpy array (or mmap'd array if memmap=True) rather than a string. Usually it's best not to use the `size` argument with this method, but it's provided for compatibility. """ if not hasattr(self._file, 'read'): raise EOFError if not isinstance(dtype, np.dtype): dtype = np.dtype(dtype) if size and size % dtype.itemsize != 0: raise ValueError('size {} not a multiple of {}'.format( size, dtype)) if isinstance(shape, int): shape = (shape, ) if not (size or shape): warnings.warn( 'No size or shape given to readarray(); assuming a ' 'shape of (1,)', AstropyUserWarning) shape = (1, ) if size and not shape: shape = (size // dtype.itemsize, ) if size and shape: actualsize = np.prod(shape) * dtype.itemsize if actualsize > size: raise ValueError('size {} is too few bytes for a {} array of ' '{}'.format(size, shape, dtype)) elif actualsize < size: raise ValueError('size {} is too many bytes for a {} array of ' '{}'.format(size, shape, dtype)) filepos = self._file.tell() try: if self.memmap: if self._mmap is None: # Instantiate Memmap array of the file offset at 0 (so we # can return slices of it to offset anywhere else into the # file) memmap = Memmap(self._file, mode=MEMMAP_MODES[self.mode], dtype=np.uint8) # Now we immediately discard the memmap array; we are # really just using it as a factory function to instantiate # the mmap object in a convenient way (may later do away # with this usage) self._mmap = memmap.base # Prevent dorking with self._memmap._mmap by memmap.__del__ # in Numpy 1.6 (see # https://github.com/numpy/numpy/commit/dcc355a0b179387eeba10c95baf2e1eb21d417c7) memmap._mmap = None del memmap return np.ndarray(shape=shape, dtype=dtype, offset=offset, buffer=self._mmap) else: count = reduce(operator.mul, shape) self._file.seek(offset) data = _array_from_file(self._file, dtype, count) data.shape = shape return data finally: # Make sure we leave the file in the position we found it; on # some platforms (e.g. Windows) mmaping a file handle can also # reset its file pointer self._file.seek(filepos)
def mul_func(a,b): return reduce(lambda x,y:int(x)*int(y),num[a:b])
def dotprod(v1, v2): return reduce(lambda x, y: x + y, map(lambda x, y: x * y, v1, v2))
def predict(self, image, threshold=0.5, warmup=0, repeats=1): ''' Args: image (str/np.ndarray): path of image/ np.ndarray read by cv2 threshold (float): threshold of predicted box' score Returns: results (dict): include 'boxes': np.ndarray: shape:[N,6], N: number of box, matix element:[class, score, x_min, y_min, x_max, y_max] MaskRCNN's results include 'masks': np.ndarray: shape:[N, class_num, mask_resolution, mask_resolution] ''' inputs, im_info = self.preprocess(image) np_boxes, np_masks = None, None if self.config.use_python_inference: for i in range(warmup): outs = self.executor.run(self.program, feed=inputs, fetch_list=self.fecth_targets, return_numpy=False) t1 = time.time() for i in range(repeats): outs = self.executor.run(self.program, feed=inputs, fetch_list=self.fecth_targets, return_numpy=False) t2 = time.time() ms = (t2 - t1) * 1000.0 / repeats print("Inference: {} ms per batch image".format(ms)) np_boxes = np.array(outs[0]) if self.config.mask_resolution is not None: np_masks = np.array(outs[1]) else: input_names = self.predictor.get_input_names() for i in range(len(input_names)): input_tensor = self.predictor.get_input_tensor(input_names[i]) input_tensor.copy_from_cpu(inputs[input_names[i]]) for i in range(warmup): self.predictor.zero_copy_run() output_names = self.predictor.get_output_names() boxes_tensor = self.predictor.get_output_tensor(output_names[0]) np_boxes = boxes_tensor.copy_to_cpu() if self.config.mask_resolution is not None: masks_tensor = self.predictor.get_output_tensor( output_names[1]) np_masks = masks_tensor.copy_to_cpu() t1 = time.time() for i in range(repeats): self.predictor.zero_copy_run() output_names = self.predictor.get_output_names() boxes_tensor = self.predictor.get_output_tensor(output_names[0]) np_boxes = boxes_tensor.copy_to_cpu() if self.config.mask_resolution is not None: masks_tensor = self.predictor.get_output_tensor(output_names[1]) np_masks = masks_tensor.copy_to_cpu() t2 = time.time() ms = (t2 - t1) * 1000.0 / repeats print("Inference: {} ms per batch image".format(ms)) if reduce(lambda x, y: x * y, np_boxes.shape) < 6: print('[WARNNING] No object detected.') results = {'boxes': np.array([])} else: results = self.postprocess(np_boxes, np_masks, im_info, threshold=threshold) return results
def filters_join(filters): return Orange.data.filter.Values( reduce(list.__iadd__, (f.conditions for f in filters), []))