def get_queryset(self): result = super(TutorBuscaListView, self).get_queryset() query = self.request.GET.get('q') form = self.form_class(self.request.GET or None) if query: query_list = query.split() result = result.filter( reduce(operator.and_, (Q(_nome__icontains=q) for q in query_list)) | reduce(operator.and_, (Q(_email__icontains=q) for q in query_list)) | reduce(operator.and_, (Q(_cpf__icontains=q) for q in query_list)) ) else: if form.is_valid(): if form.cleaned_data['_nome']: result = result.filter(_nome__icontains=form.cleaned_data['_nome']) if form.cleaned_data['_cpf']: result = result.filter(_cpf__icontains=form.cleaned_data['_cpf']) if form.cleaned_data['_email']: result = result.filter(_email__icontains=form.cleaned_data['_email']) if form.cleaned_data['_bairro']: result = result.filter(_bairro__icontains=form.cleaned_data['_bairro']) if form.cleaned_data['_cidade']: result = result.filter(_cidade__icontains=form.cleaned_data['_cidade']) if form.cleaned_data['_cep']: result = result.filter(_cep__icontains=form.cleaned_data['_cep']) if form.cleaned_data['_uf']: result = result.filter(_uf__icontains=form.cleaned_data['_uf']) return result
def get_balance(self, sender=None): if sender is None: if self.public_key is None: return None participant_ = self.public_key else: participant_ = sender tx_sender = [[ tx.amount for tx in block.transactions if tx.sender == participant_ ] for block in self.__chain] open_tx_sender = [ tx.amount for tx in self.__open_transactions if tx.sender == participant_ ] tx_recipient = [[ tx.amount for tx in block.transactions if tx.recipient == participant_ ] for block in self.__chain] tx_sender.append(open_tx_sender) print(tx_sender) amount_sent = reduce( lambda tx_sum, tx_amt: tx_sum + sum(tx_amt) if len(tx_amt) > 0 else tx_sum + 0, tx_sender, 0) amount_received = reduce( lambda tx_sum, tx_amt: tx_sum + sum(tx_amt) if len(tx_amt) > 0 else tx_sum + 0, tx_recipient, 0) balance = round(amount_received - amount_sent, 13) return balance
def select(table, plus=None, column=None, page=None, nbparpage=None, listFiltre=None): if listFiltre == None: n = table.objects.count() if plus == None or column == None: if page == None or nbparpage == None or n <= nbparpage: var = table.objects.all() else: var = table.objects.all()[((page - 1) * nbparpage) : (page * nbparpage)] else: if page == None or nbparpage == None or n <= nbparpage: var = table.objects.all().order_by(plus + column) else: var = table.objects.all().order_by(plus + column)[((page - 1) * nbparpage) : (page * nbparpage)] else: objets_q = [Q(x) for x in listFiltre] n = table.objects.filter(reduce(operator.and_, objets_q)).count() if plus == None or column == None: if page == None or nbparpage == None or n <= nbparpage: var = table.objects.filter(reduce(operator.and_, objets_q)) else: var = table.objects.filter(reduce(operator.and_, objets_q))[ ((page - 1) * nbparpage) : (page * nbparpage) ] else: if page == None or nbparpage == None or n <= nbparpage: var = table.objects.filter(reduce(operator.and_, objets_q)).order_by(plus + column) else: var = table.objects.filter(reduce(operator.and_, objets_q)).order_by(plus + column)[ ((page - 1) * nbparpage) : (page * nbparpage) ] return [var, n]
def add(self, *args): """ Add new elements to the list :param \*args: List of new elements """ for arg in args: _id = getattr(arg, 'id') if _id in self.map['id']: continue self.objects[_id] = arg _main = reduce(self._iter_func, self.main_sort.split('.'), arg) _sec = reduce(self._iter_func, self.break_tie_sort.split('.'), arg) _pos = bisect_left(self.main, _main) main_pos_r = bisect_right(self.main, _main) if _pos == main_pos_r: self.list.insert(_pos, _id) self.main.insert(_pos, _main) self.secondary.insert(_pos, _sec) else: _pos = bisect_left(self.secondary[_pos:main_pos_r], _sec) + _pos self.list.insert(_pos, _id) self.main.insert(_pos, _main) self.secondary.insert(_pos, _sec) self.map_insert(self.map['id'], self.map['pos'], _pos, _id)
def reduce_Example(): _list = [ 1, 2, 3, 4, 56, 6, 7, 8, 9, 11, 32, 43, 54, 223, 9, 0, 8, 6, 5, 4 ] #getting biggest number in list largest = reduce(lambda a, b: a if a > b else b, _list) print("largest element in list is {}".format(largest)) _list.remove(largest) print("second largest element {}".format( reduce(lambda a, b: a if a > b else b, _list)))
def getFundRankList(request): draw = request.GET.get('draw') select_category = request.GET.get("select_category") select_categor_array = select_category.split(",") print("@@@@@@@@@@@@@" + str(select_categor_array)) for s in select_categor_array: print(s) orderByColumns = [ 'category', 'fund_id', 'fund_name', 'cal_date', 'net_asset_value', 'accumulative', 'oneday', 'oneweek', 'onemonth', 'threemonth', 'sixmonth', 'oneyear', 'twoyear', 'threeyear', 'thisyear', 'setup', 'score' ] order_column = int(request.GET.get("order[0][column]")) order_dir = request.GET.get("order[0][dir]") order_column_name = orderByColumns[order_column] if order_dir == 'desc': order_column_name = "-" + orderByColumns[order_column] start = int(request.GET.get('start')) length = start + int(request.GET.get('length')) minParamsDict = { "oneweek": request.GET.get('minWeek'), "onemonth": request.GET.get('minMonth'), "threemonth": request.GET.get('minThreeMonth'), "sixmonth": request.GET.get('minSixMonth'), "oneyear": request.GET.get('minOneYear'), "twoyear": request.GET.get('minTwoYear'), "threeyear": request.GET.get('minThreeYear'), "thisyear": request.GET.get('minThisYear'), "setup": request.GET.get('minSetUp'), "score": request.GET.get('minScore') } minParamsList = [] print(minParamsDict) for (k, v) in minParamsDict.items(): q_obj = Q(**{k + "__gte": v}) minParamsList.append(q_obj) fundRankAllListCount = FundRank.objects.all().count() fundRankList = FundRank.objects.filter( reduce(operator.and_, minParamsList)).filter(category__in=select_categor_array) fundRankPageList = FundRank.objects.filter(reduce( and_, minParamsList)).filter(category__in=select_categor_array).order_by( order_column_name)[start:length] fundRankPageListJSON = serializers.serialize('json', fundRankPageList, ensure_ascii=False) recordsFiltered = fundRankList.count() print("################" + '{"data":' + fundRankPageListJSON + ',"recordsTotal":53,"recordsFiltered":33}') return HttpResponse('{"data":' + fundRankPageListJSON + ',"recordsTotal":' + str(fundRankAllListCount) + ',"recordsFiltered":' + str(recordsFiltered) + ',"draw":' + draw + '}', content_type='application/json')
def test_functools_reduce(): import _functools words = ["I", "am", "the", "walrus"] combine = lambda s,t: s + " " + t Assert(hasattr(_functools, "reduce")) AreEqual(_functools.reduce(combine, words), "I am the walrus") AreEqual(_functools.reduce(combine, words), reduce(combine, words))
def str2float(str): def char2int(c): return ord(c) - 48 def seq2int(x, y): return x * 10 + y L = list(map(char2int, str)) dot = L.index(-2) L.pop(dot) return reduce(seq2int, L)/reduce(seq2int, [1]+[0 for x in range(len(L)-dot)])
def statics_write_out(self, show, save): """ Write the statistic output file :param show: True for showing the statistics, False otherwise. :param save: True for saving the statistics, False otherwise. """ if not (show or save): return wtimes = self.mapper.wtimes slds = self.mapper.slowdowns sim_time_ = 'Simulation time: {0:.2f} secs\n'.format( self.end_simulation_time - self.start_simulation_time) disp_method_ = 'Dispathing method: {}\n'.format(self.mapper.dispatcher) total_jobs_ = 'Total jobs: {}\n'.format(self.loaded_jobs) makespan_ = 'Makespan: {}\n'.format( self.mapper.last_run_time - self.mapper.first_time_dispatch if self.mapper.last_run_time and self.mapper.first_time_dispatch else 'NA') if wtimes: avg_wtimes_ = 'Avg. waiting times: {:.2f}\n'.format( reduce(lambda x, y: x + y, wtimes) / float(len(wtimes))) else: avg_wtimes_ = 'Avg. waiting times: NA\n' if slds: avg_slowdown_ = 'Avg. slowdown: {:.2f}\n'.format( reduce(lambda x, y: x + y, slds) / float(len(slds))) else: avg_slowdown_ = 'Avg. slowdown: NA\n' if show: self._logger.info('\t ' + sim_time_[:-1]) self._logger.info('\t ' + disp_method_[:-1]) self._logger.info('\t ' + total_jobs_[:-1]) self._logger.info('\t ' + makespan_[:-1]) self._logger.info('\t ' + avg_wtimes_[:-1]) self._logger.info('\t ' + avg_slowdown_[:-1]) if save: _filepath = path.join( self.constants.RESULTS_FOLDER_PATH, self.constants.STATISTICS_PREFIX + self.constants.WORKLOAD_FILENAME) with open(_filepath, 'a') as f: f.write(sim_time_) f.write(disp_method_) f.write(total_jobs_) f.write(makespan_) f.write(avg_wtimes_) f.write(avg_slowdown_)
def NFAtoDFA(N): q0 = frozenset(N.q0) Q = set([q0]) unprocessedQ = Q.copy() delta = {} F = [] Sigma = N.alphabet() while len(unprocessedQ) > 0: qSet = unprocessedQ.pop() delta[qSet] = {} for a in Sigma: nextStates = reduce(lambda x,y: x | y, [N.getStateBySimbol(q,a) for q in qSet]) nextStates = frozenset(nextStates) if (nextStates is not frozenset([])): delta[qSet][a] = nextStates if not nextStates in Q: if (nextStates is not frozenset([])): Q.add(nextStates) unprocessedQ.add(nextStates) for qSet in Q: if len(qSet & N.F) > 0: F.append(qSet) M = DFA(delta, q0, F) return M
def NFAtoDFA(N): q0 = frozenset(N.q0) Q = set([q0]) unprocessedQ = Q.copy() delta = {} F = [] Sigma = N.alphabet() while len(unprocessedQ) > 0: qSet = unprocessedQ.pop() delta[qSet] = {} for a in Sigma: nextStates = reduce(lambda x, y: x | y, [N.getStateBySimbol(q, a) for q in qSet]) nextStates = frozenset(nextStates) if (nextStates is not frozenset([])): delta[qSet][a] = nextStates if not nextStates in Q: if (nextStates is not frozenset([])): Q.add(nextStates) unprocessedQ.add(nextStates) for qSet in Q: if len(qSet & N.F) > 0: F.append(qSet) M = DFA(delta, q0, F) return M
def factors(n): step = 2 if n % 2 else 1 return set( reduce(list.__add__, ([i, n // i] for i in range(1, int(sqrt(n)) + 1, step) if n % i == 0)))
def valBigProd(Args): t= Args[1] p= Args[0] l = [val(dot(t,b)) for b in solutionSet(p)] if len(l)==0: return 1 return reduce(mul, l)
def jcp024(): #method 1 a = 2.0 b = 1.0 s = 0 for n in range(1, 21): s += a / b t = a a = a + b b = t print(s) # method 2 s = 0.0 for n in range(1, 21): s += a / b b, a = a, a + b print(s) #method 3 l = [] for n in range(1, 21): b, a = a, a + b l.append(a / b) print(reduce(lambda x, y: x + y, l))
def avg(self, a_list): ''' :param a_list :returns the average between the elements of the list a_list ''' a_sum = float(reduce(lambda x, y: x+y, a_list)) return a_sum / len(a_list)
def count_if(predicate, seq): """Count the number of elements of seq for which the predicate is true. >>> count_if(callable, [42, None, max, min]) 2 """ f = lambda count, x: count + (not not predicate(x)) return reduce(f, seq, 0)
def max_in_list(list): def max_of_two(num1, num2): if num1 >= num2: return num1 else: return num2 return _functools.reduce(max_of_two, list)
def set_of_supports(node): if node.get('children') == []: return set(node.get('index')) lst = list(map(set_of_supports, node.get('children'))) output_set = reduce(lambda x, y: x.union(y), lst) return output_set
def serialize(txobj): #if isinstance(txobj, bytes): # txobj = bytes_to_hex_string(txobj) o = [] if json_is_base(txobj, 16): json_changedbase = json_changebase(txobj, lambda x: binascii.unhexlify(x)) hexlified = safe_hexlify(serialize(json_changedbase)) return hexlified o.append(encode(txobj["version"], 256, 4)[::-1]) o.append(num_to_var_int(len(txobj["ins"]))) for inp in txobj["ins"]: o.append(inp["outpoint"]["hash"][::-1]) o.append(encode(inp["outpoint"]["index"], 256, 4)[::-1]) o.append( num_to_var_int(len(inp["script"])) + (inp["script"] if inp["script"] or is_python2 else bytes())) o.append(encode(inp["sequence"], 256, 4)[::-1]) o.append(num_to_var_int(len(txobj["outs"]))) for out in txobj["outs"]: o.append(encode(out["value"], 256, 8)[::-1]) o.append(num_to_var_int(len(out["script"])) + out["script"]) o.append(encode(txobj["locktime"], 256, 4)[::-1]) return ''.join(o) if is_python2 else reduce(lambda x, y: x + y, o, bytes())
def clamserialize(txobj): #if isinstance(txobj, bytes): # txobj = bytes_to_hex_string(txobj) o = [] if json_is_base(txobj, 16): json_changedbase = json_changebase(txobj, lambda x: binascii.unhexlify(x)) hexlified = safe_hexlify(clamserialize(json_changedbase)) return hexlified o.append(encode(txobj["version"], 256, 4)[::-1]) o.append(encode(txobj["time"], 256, 4)[::-1]) o.append(num_to_var_int(len(txobj["ins"]))) for inp in txobj["ins"]: o.append(inp["outpoint"]["hash"][::-1]) o.append(encode(inp["outpoint"]["index"], 256, 4)[::-1]) o.append(num_to_var_int(len(inp["script"]))+(inp["script"] if inp["script"] or is_python2 else bytes())) o.append(encode(inp["sequence"], 256, 4)[::-1]) o.append(num_to_var_int(len(txobj["outs"]))) for out in txobj["outs"]: o.append(encode(out["value"], 256, 8)[::-1]) o.append(num_to_var_int(len(out["script"]))+out["script"]) o.append(encode(txobj["locktime"], 256, 4)[::-1]) if txobj["version"] == 2: o.append(num_to_var_int(len(txobj["comment"]))+(txobj["comment"] if txobj["comment"] or is_python2 else bytes())) return ''.join(o) if is_python2 else reduce(lambda x,y: x+y, o, bytes())
def filter_data_nascimento(self, queryset, field_name, value): #_where = "date_part('year', age(timestamp '%s', data_nascimento)) != date_part('year', age(timestamp '%s', data_nascimento))" # return queryset.extra(where=_where, params=value) if not value[0] or not value[1]: return queryset now = datetime.datetime.strptime(value[0], "%d/%m/%Y").date() then = datetime.datetime.strptime(value[1], "%d/%m/%Y").date() if now > then: a = now now = then then = a # Build the list of month/day tuples. monthdays = [(now.month, now.day)] while now <= then: monthdays.append((now.month, now.day)) now += timedelta(days=1) # Tranform each into queryset keyword args. monthdays = (dict(zip(("data_nascimento__month", "data_nascimento__day"), t)) for t in monthdays) # Compose the djano.db.models.Q objects together for a single query. query = reduce(operator.or_, (Q(**d) for d in monthdays)) # Run the query. return queryset.extra(select={ 'month': 'extract( month from data_nascimento )', 'day': 'extract( day from data_nascimento )', } ).order_by('month', 'day', 'nome').filter(query)
def main(): arr = [3, 1, 1, 2, 2, 1] sum = reduce((lambda x, y: x + y), arr) print('sum={0}'.format(sum)) if sum % 2 != 0: print('Cannot be partitioned!') return part = [[None for i in range(len(arr) + 1)] for i in range(sum // 2 + 1)] soln = [[-1 for i in range(len(arr) + 1)] for i in range(sum // 2 + 1)] print('Before:') print(*part, sep='\n') for i in range(len(arr) + 1): #a null subset always has 0 sum part[0][i] = True for i in range(1, sum // 2 + 1): #a null subset cannot have any sum part[i][0] = False for i in range(1, sum // 2 + 1): for j in range(1, len(arr) + 1): part[i][j] = part[i][j - 1] if i >= arr[j - 1]: part[i][j] = part[i][j] or part[i - arr[j - 1]][j - 1] if part[i - arr[j - 1]][j - 1] == True: soln[i - arr[j - 1]][j - 1] = arr[j - 1] print('After:') print(*part, sep='\n')
def getAliceStrategies(self): alice1strategies = [{ x1: choiceOfOutputs[x1] for x1 in range(self.bellScenario.numberOfInputsAlice1()) } for choiceOfOutputs in product(*[ range(numberOfOutputs) for numberOfOutputs in self.bellScenario.getNumberOfOutputsPerInputAlice1() ])] extendOutputsAlice2 = reduce(lambda x, y: x + y, [ self.bellScenario.numberOfInputsAlice1() * [x] for x in self.bellScenario.getNumberOfOutputsPerInputAlice2() ], []) alice2strategies = [{ (x1, x2): choiceOfOutputs[self.bellScenario.numberOfInputsAlice1() * x2 + x1] for x1, x2 in product( range(self.bellScenario.numberOfInputsAlice1()), range(self.bellScenario.numberOfInputsAlice2())) } for choiceOfOutputs in product(*[ range(numberOfOutputs) for numberOfOutputs in extendOutputsAlice2 ])] return [{(x1, x2): (stgAlice1[x1], stgAlice2[(x1, x2)]) for x1, x2 in product( range(self.bellScenario.numberOfInputsAlice1()), range(self.bellScenario.numberOfInputsAlice2()))} for stgAlice1 in alice1strategies for stgAlice2 in alice2strategies]
def filter_data_nascimento(self, queryset, field_name, value): #_where = "date_part('year', age(timestamp '%s', data_nascimento)) != date_part('year', age(timestamp '%s', data_nascimento))" # return queryset.extra(where=_where, params=value) if not value[0] or not value[1]: return queryset now = datetime.datetime.strptime(value[0], "%d/%m/%Y").date() then = datetime.datetime.strptime(value[1], "%d/%m/%Y").date() if now > then: a = now now = then then = a # Build the list of month/day tuples. monthdays = [(now.month, now.day)] while now <= then: monthdays.append((now.month, now.day)) now += timedelta(days=1) # Tranform each into queryset keyword args. monthdays = (dict( zip(("data_nascimento__month", "data_nascimento__day"), t)) for t in monthdays) # Compose the djano.db.models.Q objects together for a single query. query = reduce(operator.or_, (Q(**d) for d in monthdays)) # Run the query. return queryset.extra( select={ 'month': 'extract( month from data_nascimento )', 'day': 'extract( day from data_nascimento )', }).order_by('month', 'day', 'nome').filter(query)
def genWindows(data): """""" windows = [] classes = data[:, 0].astype(int) attributes = data[:, 1:].astype(int) samples = data.shape[0] current_class = classes[0] current_attributes = attributes[0].tolist() start = 0 for i in range(1, samples): a_and_b = [ a == b for a, b in zip(current_attributes, attributes[i].tolist()) ] same_attributes = reduce(lambda a, b: a and b, a_and_b) if not ((classes[i] == current_class) and same_attributes): end = i windows.append((start, end, current_class, current_attributes)) start = end current_class = classes[i] current_attributes = attributes[i].tolist() end = samples windows.append((start, end, current_class, current_attributes)) return windows
def main(): G = [[0 for i in range(2 * N)] for i in range(2 * N)] for i in range(N): for j in range(N, 2 * N): G[i][j] = 1 G[j][i] = 1 if N > 1: if i == 0: G[i][i + 1] = 1 G[i + N][i + N + 1] = 1 elif i == N - 1: G[i][i - 1] = 1 G[i + N][i + N - 1] = 1 else: G[i][i + 1] = G[i][i - 1] = 1 G[i + N][i + N + 1] = G[i + N][i + N - 1] = 1 #print('G:') #print(*G,sep='\n') paths = [ 0 for i in range(2 * N) ] #path[i] contains path of length '2*N' distinct nodes starting at node 'i' traversed = [ False for i in range(2 * N) ] #traversed[i] contains whether node'i' is already part of the path for i in range(2 * N): paths[i] = FindPaths(G, 2 * N, i, traversed) #print('paths:') #print(*paths,sep=',') print('Total num of paths={0}'.format( reduce((lambda x, y: (x + y) % MOD), paths)))
def getGeneratorForVertices(self): #local vertices yield from BellPolytope.getGeneratorForVertices(self) #distributions with nontrivial use of the communication channel communicationStrgs = [ format(i, '0' + str(self._numberOfInputsAlice()) + 'b') for i in range(1, 2**(self._numberOfInputsAlice() - 1)) ] strgsAlice = [[ (stgAlice[i], int(comm[i])) for i in range(0, len(stgAlice)) ] for stgAlice in self._strategiesGenerator(self.outputsAlice) for comm in communicationStrgs] strgsBob = [ stgBob for stgBob in self._strategiesGenerator( reduce(lambda acum, elem: acum + [elem, elem], self.outputsBob, [])) if stgBob[0::2] != stgBob[1::2] ] yield from ([ int(a == stgAlice[x][0]) & (b == stgBob[2 * y + stgAlice[x][1]]) for x in range(self._numberOfInputsAlice()) for y in range(self._numberOfInputsBob()) for a in range(self.outputsAlice[x]) for b in range(self.outputsBob[y]) ] for stgAlice in strgsAlice for stgBob in strgsBob)
def number_of_leaves(node): if node.get('children') == []: return 1 lst = list(map(number_of_leaves, node.get('children'))) summ = reduce(lambda x, y: x + y, lst) return summ
def strint2(s): def f2(x,y): return x*10 +y def simple(s): Digitals = {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9} # 可以应用于密码加密和解码 return Digitals[s] return reduce(f2,list(map(simple,s)))
def __init__(self, n, alpha): self.n = n # Calculate Zeta values from 1 to n: tmp = [1. / (math.pow(float(i), alpha)) for i in range(1, n + 1)] zeta = reduce(lambda sums, x: sums + [sums[-1] + x], tmp, [0]) # Store the translation map: self.distMap = [x / zeta[-1] for x in zeta]
def sizeOfGroupingHash(self): if self.grouping: if self.running: return len(self.grouping) else: return reduce(lambda x, y: x + y, (lambda dic: [len(v) for (k, v) in dic.items()])( self.grouping))
def sum_of_supports(node): if node.get('children') == []: length = len(node.get('index')) return length lst = list(map(sum_of_supports, node.get('children'))) summ = reduce(lambda x, y: x + y, lst) return summ
def analisysUserContent(self): contents = session.query(TitleDetail).filter(TitleDetail.user_url==self.user_url).all() # @UndefinedVariable if len(contents) == 0: return [] contents = map(lambda c : c.content ,contents) sentence = reduce(lambda c1,c2:c1 + c2,contents) tags = analyse.extract_tags(sentence=sentence ,topK=200,allowPOS=('n','ns','vn'),withWeight=True,withFlag=True) return tags
def calc(s): res = 0 if s.find('^')>=0: l = list(map(int,s.split('^')))[::-1] res = reduce(mul,l[1:]) res*= log(l[0]) else: res = log(int(s)) return res
def getLoveNumber(sum, data): l = list(map(buildMax, getCandidateLoveNumbers(sum, len(data), data))) # 得到每个备选的最大数字排序 candidateLoveNumbers = [] for item in l: num = reduce(lambda x, y: x*10 + y, item) candidateLoveNumbers.append(num) return max(candidateLoveNumbers)
def findingPercentage(): n = int(input()) student_marks = {} for _ in range(n): name, *line = input().split() scores = list(map(float, line)) student_marks[name] = scores marks = student_marks[input()] print('%0.2f' % (reduce(lambda x, y: x + y, marks) / len(marks)))
def create_averaged_vector(vectors): if not vectors: log.error("No vectors to average!") return None if len(vectors) == 1: return vectors[0] vector_sum = reduce(lambda a, b: a + b, vectors) return vector_sum / len(vectors)
def prac_19(self): for i in range(2, 1000): arr = [] limN = int(i / 2) for j in range(1, limN + 1): if (i % j == 0): arr.append(j) # print(i,arr); if ((i) == reduce(lambda x, y: x + y, arr)): print(i, arr)
def get_queryset(self): result = super(ExameBuscaListView, self).get_queryset() query = self.request.GET.get('q') if query: query_list = query.split() result = result.filter( reduce(operator.and_, (Q(_data__icontains=q) for q in query_list)) | reduce(operator.and_, (Q(_diagnostico__icontains=q) for q in query_list)) | reduce(operator.and_, (Q(animal___nome__icontains=q) for q in query_list))| reduce(operator.and_, (Q(tecnico___nome__icontains=q) for q in query_list)) | reduce(operator.and_, (Q(veterinario___nome__icontains=q) for q in query_list)) ) return result
def getMagicNumbers(count=10): result = [2] a, b = divmod(count - 1, 3) triples = [(1, 2 * i, 1) for i in range(1, a + 1)] result = result + list(reduce(lambda t1, t2 : t1 + t2, triples)) if b == 1: result.append(1) elif b == 2: result = result + [1, 2 * (a + 1)] return result
def freq_counts(self, arrs, lens): """ Calculates frequencies of samples. Parameters ---------- arrs A sequence of arrays. lens A sequence of number of distinct values in arrays. Returns ------- numpy.ndarray A 1D numpy array of frequencies. """ no_nans = reduce(np.logical_and, [~np.isnan(a) if bn.anynan(a) else np.ones(self.m).astype(bool) for a in arrs]) combined = reduce(add, [arrs[i][no_nans]*reduce(mul, lens[:i]) for i in range(1, len(arrs))], arrs[0][no_nans]) return np.bincount(combined.astype(np.int32, copy=False), minlength=reduce(mul, lens)).astype(float)
def get_scriptsig(*args, **kwargs): """Return scriptSig for 'txid:index'""" if len(args) == 1 and ':' in args[0]: txid, vout = args[0].split(':') elif len(args) == 2 and args[0][:8] == '01000000' and str(args[1]).isdigit(): txh, vout = args[0], int(args[1]) network = kwargs.get('network', 'btc') try: txo = deserialize(fetchtx(txid, network)) except: txo = deserialize(txh) scriptsig = reduce(access, ["ins", vout, "script"], txo) return scriptsig
def addRouteByNames(routelst, names): def addRoute(rs, line): words = line.split() if words[2] in names: rs[ipaddress.IPv4Network(names[words[2]] + "/" + words[3]).compressed] = words[1] else: rs[ipaddress.IPv4Network(words[2] + "/" + words[3]).compressed] = words[1] return rs return reduce( addRoute, routelst, SubnetTree.SubnetTree())
def productExceptSelf(self, nums): output = [] for i,num in enumerate(nums): #test iterable : print(isinstance(nums.remove(num), Iterable)) #test type : print(type(nums.remove(num))) nums.remove(num) #test : print(nums) res = reduce(lambda x,y:x*y , nums) nums.insert(i,num) output.append(res) return output
def __str__(self): return (u'{0:04X} {1:02X} {2:02X} ' '{3:08X} {4:08X} ' '{5:02X} ' '{6} ' '{7:02X} {8:02X}' .format(self.header, self.length, self.index, self.lWheelSpd, self.rWheelSpd, self.ctrlWord.value, reduce((lambda x, y: x + y), ['%02X ' % v for v in tuple(self.reserved)]), self.sum, self.tail) )
def get_scriptpubkey(*args, **kwargs): """Return scriptPubKey for 'txid:index'""" # TODO: can use biteasy to retrieve a Tx's SPK if len(args) == 1 and ':' in args[0]: txid, vout = args[0].split(':') elif len(args) == 2 and args[0][:8] == '01000000' and str(args[1]).isdigit(): txh, vout = args[0], int(args[1]) network = kwargs.get('network', 'btc') try: txo = deserialize(fetchtx(txid, network)) except: txo = deserialize(txh) script_pubkey = reduce(access, ["outs", vout, "script"], txo) return script_pubkey
def get_queryset(self): result = super(AnimalBuscaListView, self).get_queryset() query = self.request.GET.get('q') form = self.form_class(self.request.GET or None) if query: query_list = query.split() result = result.filter( reduce(operator.and_, (Q(_nome__icontains=q) for q in query_list)) | reduce(operator.and_, (Q(_rg__icontains=q) for q in query_list)) | reduce(operator.and_, (Q(_especie__icontains=q) for q in query_list)) | reduce(operator.and_, (Q(_raca__icontains=q) for q in query_list)) ) else: if form.is_valid(): if form.cleaned_data['_animal']: result = result.filter(_nome__icontains=form.cleaned_data['_animal']) if form.cleaned_data['_rg']: result = result.filter(_rg__icontains=form.cleaned_data['_rg']) if form.cleaned_data['_raca']: result = result.filter(_raca__icontains=form.cleaned_data['_raca']) if form.cleaned_data['sexo']: result = result.filter(sexo__icontains=form.cleaned_data['sexo']) if form.cleaned_data['_idade']: result = result.filter(_idade__icontains=form.cleaned_data['_idade']) if form.cleaned_data['_especie']: result = result.filter(_especie__icontains=form.cleaned_data['_especie']) '''if form.cleaned_data['_tutor']: result = result.filter(tutor=TutorEndTel.objects.filter(_nome__icontains=form.cleaned_data['_tutor']))''' #result = Animal.objects.filter(_tutor__icontains=tutor for tutor in tutores) return result
def bip32_path(*args, **kwargs): if len(args) == 2 and isinstance(args[1], list): key, path = args elif len(args) == 2 and RE_BIP32_PATH.match(str(args[1])): key = args[0] path = _parse_bip32_path(str(args[1])) else: key, path = args[0], map(int, args[1:]) is_public = (str(args[1]).startswith("M/") or str(args[1]).endswith(".pub")) or \ kwargs.get("public", False) ret = reduce(bip32_ckd, path, key) return bip32_privtopub(ret) if is_public else ret
def __str__(self): return (u'{0:04X} {1:02X} {2:02X} ' '{3:04X} {4:04X} {5:04X} {6:04X} ' '{7:04X} {8:04X} {9:04X} {10:04X} ' '{11:08X} {12:08X} ' '{13} ' '{14:02X} {15:02X}' .format(self.header, self.length, self.index, self.lMBrakeP, self.lABrakeP, self.rMBrakeP, self.rABrakeP, self.lMRotateP, self.lARotateP, self.rMRotateP, self.rARotateP, self.lWheelSpd, self.rWheelSpd, reduce((lambda x, y: x + y), ['%02X ' % v for v in tuple(self.reserved)]), self.sum, self.tail) )
def handle(self, *args, **options): self.stdout.write('options: %s'% ( options ) ) profile_qs = Profile.objects.all() if options['ranges']: q_objs = [Q(id__in=r) for r in options['ranges'] ] profile_qs = profile_qs.filter( reduce(operator.or_, q_objs) ) newly_created_count = 0 if options['force']: for profile in profile_qs: if self._prepare_consistent_user_profile(profile): newly_created_count += 1 else: for profile in profile_qs: if self._prepare_user_profile_if_necessary(profile): newly_created_count += 1 self.stdout.write('total profile: %d, newly created: %d'% ( profile_qs.count(), newly_created_count ) )
def test_from_cpython(self): from _functools import reduce class SequenceClass(object): def __init__(self, n): self.n = n def __getitem__(self, i): if 0 <= i < self.n: return i else: raise IndexError from operator import add assert reduce(add, SequenceClass(5)) == 10 assert reduce(add, SequenceClass(5), 42) == 52 raises(TypeError, reduce, add, SequenceClass(0)) assert reduce(add, SequenceClass(0), 42) == 42 assert reduce(add, SequenceClass(1)) == 0 assert reduce(add, SequenceClass(1), 42) == 42 d = {"one": 1, "two": 2, "three": 3} assert reduce(add, d) == "".join(d.keys())
from _functools import reduce def _gcd(x,y): while y!=0: x,y=y,x%y return x for _ in range(int(input())): n = int(input()) print(int(reduce(lambda x,y: x*y/_gcd(x,y), range(1,n+1))))
def factors(n): return set(reduce(list.__add__, ([i, n//i] for i in range(1, int(n**0.5) + 1) if n % i == 0)))
''' ''' from _functools import reduce from fractions import Fraction def isTarget(a, b): fraction = float(a) / float(b) if fraction >= 1: return False for c in a: if c in b: return c != "0" and float(a.replace(c, "", 1)) / float(b.replace(c, "", 1)) == fraction return False if __name__ == '__main__': targets = [(a, b) for b in range(11, 100) for a in range(10, b) if a % 10 != 0 and b % 10 != 0 and isTarget(str(a), str(b))] print(targets) n, d = reduce(lambda t1, t2: (t1[0] * t2[0], t1[1] * t2[1]), targets, (1, 1)) print(Fraction(n, d))
17 47 82 18 35 87 10 20 04 82 47 65 19 01 23 75 03 34 88 02 77 73 07 63 67 99 65 04 28 06 16 70 92 41 41 26 56 83 40 80 70 33 41 48 72 33 47 32 37 16 94 29 53 71 44 65 25 43 91 52 97 51 14 70 11 33 28 77 73 17 78 39 68 17 57 91 71 52 38 17 14 91 43 58 50 27 29 48 63 66 04 68 89 53 67 30 73 16 69 87 40 31 04 62 98 27 23 09 70 98 73 93 38 53 60 04 23''' def getMax(s, column, row, ns): if row >= len(ns): return s return max(getMax(s + ns[row][column], column, row + 1, ns), getMax(s + ns[row][column + 1], column + 1, row + 1, ns)) def maxsum(summ, line): a = list(map(lambda x: x[0] + x[1], zip(summ, line))) b = list(map(lambda x: x[0] + x[1], zip(summ[1:], line))) return list(map(max, zip(a, b))) if __name__ == '__main__': with open("p067_triangle.txt") as fil: numbs = [[int(i) for i in s.split()] for s in fil.readlines()] # print(getMax(75, 0, 1, numbs)) print(reduce(maxsum, numbs[::-1]))
def factReduce(n): l = list(range(1,n+1)) #I don't want the 0 in the range and I want n included #print(l) return reduce(lambda x, y: x * y, l)
#print (df_med_basket_size) df_med_trans_value = df_by_customer['trans_value'].median() customer_dataframes.append(df_med_trans_value) for country in distinct_countries: customer_dataframes.append(df_by_customer[country].mean()) customer_dataframes.append(df_by_customer["StockCode"].count()) customer_dataframes.append(df_by_customer["isNight"].mean()) customer_dataframes.append(df_by_customer["isWeekEnd"].mean()) #merging all the customers features customer_final_df = reduce(lambda left,right: pd.merge(left,right,on='CustomerID'), customer_dataframes) np_dataset_array = customer_final_df.values cust_original_space = [x for x in np_dataset_array] features_values = np.array([x[1:] for x in np_dataset_array]) features_values = MinMaxScaler().fit_transform(features_values) #normalizing the data as KMeans is distance based sensitive (i.e. one un-normalized fetaure can dominate the distance function) cluster_model = KMeans(n_clusters=5) cluster_model.fit(features_values) closest_centroids = cluster_model.predict(features_values) #assigning each customer to the closest centroid #printing the clustering output f = open(centroids_output,'w') f.write("Cluster_ID," + (",".join(customer_final_df.columns.values))) #writing header f.write("\n") for cust_index in range(0,len(cust_original_space)): f.write(str(closest_centroids[cust_index]))