def find_predictor(user, restaurants, feature_fn): """Return a rating predictor (a function from restaurants to ratings), for USER by performing least-squares linear regression using FEATURE_FN on the items in RESTAURANTS. Also, return the R^2 value of this model. Arguments: user -- A user restaurants -- A sequence of restaurants feature_fn -- A function that takes a restaurant and returns a number """ reviews_by_user = {review_restaurant_name(review): review_rating(review) for review in user_reviews(user).values()} xs = [feature_fn(r) for r in restaurants] ys = [reviews_by_user[restaurant_name(r)] for r in restaurants] mean_xs = mean(xs) mean_ys = mean(ys) s_xx = sum([pow(x - mean_xs, 2) for x in xs]) s_yy = sum([pow(y - mean_ys, 2) for y in ys]) s_xy = sum([mul(z[0], z[1]) for z in zip([x - mean_xs for x in xs], [y - mean_ys for y in ys])]) b = s_xy / s_xx a = mean_ys - b * mean_xs r_squared = pow(s_xy, 2) / mul(s_xx, s_yy) def predictor(restaurant): return b * feature_fn(restaurant) + a return predictor, r_squared
def generate_message(cj, cij, incoming): ''' Generate a normalized message. ''' # Compute max over xj for the message to xi. if cj.names[0] == cij.names[0]: edge = lambda xi, xj: (xj, xi) xi_nstates = cij.table.shape[1] else: edge = lambda xi, xj: (xi, xj) xi_nstates = cij.table.shape[0] # Compute the total response for the states of xi. incoming_tab = np.array([ mul(cj((xj,)), reduce(mul, (m(xj) for m in incoming), 1.)) for xj in range(cj.nstates[0])]) message = lambda xi: max(mul(cij(edge(xi, xj)), incoming_tab[xj]) for xj in range(cj.nstates[0])) # Combine into a single table over states of xi. table = np.fromiter((message((xi,)) for xi in xrange(xi_nstates)), dtype=float) np.divide(table, np.sum(table), table) return lambda xi: table[xi]
def buy_calc_amts(self, sec, amt, allo, val): print 'buy... ' print 'price of ' + sec + 'is ', self.get_price(sec) print 'currently have ', self.get_current_amt(sec), ' share of ', sec print 'portfolio value is ', self.portfolio_value print 'DESIRED ALLOCATION IS ', allo price = self.get_price(sec) current = self.get_current_amt(sec) current_amt = current*price shares = 0 allocation = 0 #amount = 0 #value = 0 if allo > 0.0: print 'GOT HERE YOU STUPID BITCH' amt_desired = operator.mul(allo,self.portfolio_value) # have enough and don't have the stock --> buy all desired if amt_desired < float(self.free_cash) and current == 0: shares = int(amt_desired/price) # have enough and do have the stock --> buy remaining amount to reach desired allocation elif current != 0 and (amt_desired - current_amt) < float(self.free_cash): shares = int((amt_desired - current_amt)/price) # don't have enough and don't have the stock --> use all remaining cash elif amt_desired > self.free_cash and current == 0: shares = int(self.free_cash/price) # don't have enough and have the stock --> use all remaining cash elif current != 0 and (amt_desired - current_amt) > float(self.free_cash): shares = int(self.free_cash/price) #not 100% sure if this will ever be triggered else: return 0, 0, price print "time to buy ", shares print "buying them for ", price amt_to_purchase = operator.mul(shares,int(price)) allocation = amt_to_purchase/float(self.portfolio_value) print "allocation is ", allocation elif amt > 0.0: if current == 0 and amt*price < self.free_cash: shares = amt elif current < amt and (amt - current)*price < self.free_cash: shares = amt - current # elif val > 0: # if val < self.free_cash and current == 0: # shares = int(val/price) # elif val > self.free_cash and current != 0: # shares = int(self.free_cash/price) # value = shares*price return shares, 100*allocation, price
def train_epoch(self, inputs, targets, optimizer, criterion, epoch_no=0, batch_size=64, max_step=50, max_norm=5, eval_step=10): hidden = self.model.init_hidden(batch_size) counter = 0 x_generator = get_batch(inputs, batch_size, max_step) y_generator = get_batch(targets, batch_size, max_step) for x, y in zip(x_generator, y_generator): self.model.train() x = Variable(torch.from_numpy(np.array(x, dtype=np.float32))).long() y = Variable(torch.from_numpy(np.array(y, dtype=np.float32))).long() if CUDA_AVAILABLE: x = x.cuda() y = y.cuda() if isinstance(hidden, tuple): hidden = tuple([Variable(each.data) for each in hidden]) else: hidden = Variable(hidden.data) self.model.zero_grad() # 重置梯度 output, hidden = self.model.forward(x, hidden) # 将 output 的维度进行转换: # [batch_size, step_size, vocab_size] -> [batch_size * step_size, vocab_size] # y 是 1D 的就好 step_size = x.size(1) # batch 里序列的长度有可能不足 max_step cross_entropy_loss = criterion( output.view(batch_size * step_size, -1), y.view(batch_size * step_size).long() ) focal_loss = FocalLoss(gamma=2)( output.view(batch_size * step_size, -1), y.view(batch_size * step_size).long() ) ploss = pullaway_loss(output.view(batch_size * step_size, -1)) loss = cross_entropy_loss + focal_loss + 0.1 * ploss loss.backward() torch.nn.utils.clip_grad_norm(self.model.parameters(), max_norm) optimizer.step() counter += 1 if (counter % eval_step) == 0: print("Epoch: {}; Step: {}; Loss: {:.4f}".format( epoch_no + 1, counter, loss.data[0] )) # 从 x 中随机挑选内容 pos = np.random.randint(0, mul(*x.size()) - 2) length = np.random.randint(1, min(5, mul(*x.size()) - pos - 1)) start_tokens = x.view(-1)[pos:pos + length].data.numpy() start_text = ''.join(self.vectorizer.inverse_transform([start_tokens])[0]).strip() if start_text: result = self.generate(start_text, max_len=100) print("[%s]: %r" % (start_text, result))
def twenty_fourteen(): """Come up with the most creative expression that evaluates to 2014, using only numbers and the functions add(. . .) and mul(. . .). >>> twenty_fourteen() 2014 """ "*** YOUR CODE HERE ***" return add(mul(20, mul(10,10)), add(mul(5,2), 4))
def main(): most = 0 best = (0, 0) for a, b in product(range(-999, 1000), range(-999, 1000)): formula = lambda n: n ** 2 + a * n + b num = numPrimes(formula) if num > most: most = num best = (a, b) print mul(*best)
def power(a, b): if b == 0: return 1 elif b == 1: return a elif b == 2: return operator.mul(a, a) elif b % 2 == 0: return power(power(a, b / 2), 2) else: return operator.mul(power(a, b - 1), a)
def fast_gpow(x, n, mul, mul_identity): """Raise generalized numeric x to integer power n.""" m = mul_identity while n > 0: if n & 1: m = mul(m, x) n -= 1 else: x = mul(x, x) n >>= 1 return m
def test_multiplication_with_autoconvert(self, input_tuple, expected): self.ureg.autoconvert_offset_to_baseunit = True qin1, qin2 = input_tuple q1, q2 = self.Q_(*qin1), self.Q_(*qin2) input_tuple = q1, q2 if expected == 'error': self.assertRaises(OffsetUnitCalculusError, op.mul, q1, q2) else: expected = self.Q_(*expected) self.assertEqual(op.mul(q1, q2).units, expected.units) self.assertQuantityAlmostEqual(op.mul(q1, q2), expected, atol=0.01)
def pout(gen, indi) : message(name(indi)+"\n") cols("",add(5,mul(4,gen)),d(add(gen,1))+"-- ") outp(indi) next = add(1,gen) (fam,sp,num,iter) = families(indi) while fam : cols("",add(5,mul(4,gen))," sp-") outp(sp) if lt(next,15) : for (no0,child) in children(fam) : pout(next,child) (fam,sp,num) = families(iter)
def all_values_on_form(form, value): """ Returns all lattice points (not necessarily coprime) that produce the desired value on the form Given the recurrence for the form, these values can serve to determine *all* solutions for the given value due to the repeating nature of the infinite river """ factor_list = factors(value) valid_factors = [factor for factor in factor_list if is_power(value/factor, 2)] roots = all_positive_roots(form) found = set() for root in roots: candidates = seek_up_to_val(root, value) to_add = [candidate for candidate in candidates if candidate[1] in valid_factors] + \ [candidate for candidate in root if candidate[1] in valid_factors] found.update(to_add) found = list(found) # We may get some duplicates from since when we include # values from the river, we don't check that they come from # a different iteration of the river x_mult, y_mult, _ = get_recurrence(form) checked = found[:] for candidate in found: coords, val = candidate next_x = sum([operator.mul(*pair) for pair in zip(coords, x_mult)]) next_y = sum([operator.mul(*pair) for pair in zip(coords, y_mult)]) if ((next_x, next_y), val) in found: checked.remove(((next_x, next_y), val)) # Finally we must scale up factors to account for # the reduction by a square multiple result = [] for cell in checked: (x, y), val = cell if val < value: ratio = int(sqrt(value/val)) x *= ratio y *= ratio result.append((x,y)) return result
def test_multiplication_with_scalar(self, input_tuple, expected): self.ureg.default_as_delta = False in1, in2 = input_tuple if type(in1) is tuple: in1, in2 = self.Q_(*in1), in2 else: in1, in2 = in1, self.Q_(*in2) input_tuple = in1, in2 # update input_tuple for better tracebacks if expected == 'error': self.assertRaises(OffsetUnitCalculusError, op.mul, in1, in2) else: expected = self.Q_(*expected) self.assertEqual(op.mul(in1, in2).units, expected.units) self.assertQuantityAlmostEqual(op.mul(in1, in2), expected, atol=0.01)
def predeal_atom2(range_rule): atom_rule = [0] * len(range_rule) shadows = list() segnum = list() base = list() for dim in range(SF_DIM_NUM): shadows.append(pc.shadow_rules(range_rule, dim)) segnum.append(range(len(shadows[dim]) >> 1)) print segnum for dim in range(SF_DIM_NUM - 1): # reduce(mul, (d[1] - d[0] + 1 for d in self.dims)) base.append(reduce(mul, (len(segnum[d]) for d in range(dim, SF_DIM_NUM)))) base.append(1) for atom_index in product(*segnum): atom_rect = list() atom = 0 for dim in range(SF_DIM_NUM): atom_rect.append([shadows[dim][atom_index[dim] << 1], shadows[dim][(atom_index[dim] << 1) + 1]]) atom += mul(atom_index[dim], base[dim]) for i in range(len(range_rule)): if rule_le(atom_rect, range_rule[i]): atom_rule[i] += atom break # print atom_index return atom_rule
def calculate_paper(dimensions): if not dimensions: return 0 dimensions = get_clean_dimensions(dimensions) l, w, h = dimensions extra = mul(*sorted(dimensions)[:2]) return (2*l*w) + (2*w*h) + (2*h*l) + extra
def p_expression_binop(self, p): """expression : expression PLUS expression | expression MINUS expression | expression TIMES expression | expression DIVIDE expression | expression MODULUS expression | expression EXPONENT expression""" op = p[2] left = self._sumDiceRolls(p[1]) right = self._sumDiceRolls(p[3]) if op == '+': p[0] = operator.add(left, right) elif op == '-': p[0] = operator.sub(left, right) elif op == '*': p[0] = operator.mul(left, right) elif op == '/': p[0] = operator.floordiv(left, right) elif op == '%': p[0] = operator.mod(left, right) elif op == '^': if -self._MAX_EXPONENT <= left <= self._MAX_EXPONENT and -self._MAX_EXPONENT <= right <= self._MAX_EXPONENT: p[0] = operator.pow(left, right) else: raise InvalidOperandsException(u'operand or exponent is larger than the maximum {}' .format(self._MAX_EXPONENT))
def doprob(): op = choice('+-*/') nums = [randint(1,10) for i in range(2)] nums.sort(reverse=True) if op != '/': ans = ops[op](*nums) pr = '%d %s %d =' % (nums[0],op,nums[1]) else: ans = div(nums[0],nums[1]) if div (nums[0] * 10,nums[1]) ==ans * 10: pr = '%d %s %d = ' %(nums[0],op,nums[1]) else: ans = mul(num[0],nums[1]) pr = '%d %s %d =' (nums[0], ' *',nums[1]) opps =0 while True: try: if int(raw_input(pr)) == ans: print 'correct' break if opps == MAXTRIES: print 'answer\n %s%d'%(pr,ans) else: print 'incorrect ...try again' opps +=1 except (KeyboardInterrupt, \ EOFError,ValueError): print 'invalid input...try again'
def exp_sqr(x, n): if n == 0: return 1 if n % 2 == 1: return mul(x, exp_sqr(x, n-1)) a = exp_sqr(x, n/2) return pow(a, 2)
def index_view(request): print(request.POST) try: if request.POST: first_number_input = float(request.POST['first_number_input']) second_number_input = float(request.POST['second_number_input']) operation = request.POST["operator"] if operation == "+ (add)": answer = operator.add(first_number_input, second_number_input) operation = "+" elif operation == "- (subtract)": answer = operator.sub(first_number_input, second_number_input) operation = "-" elif operation == "x (multiply)": answer = operator.mul(first_number_input, second_number_input) operation = "x" else: try: answer = operator.truediv(first_number_input, second_number_input) except ZeroDivisionError: answer = "Answer is Undefined (You can't divide by zero)" operation = "/" return render(request, 'index.html', {'answer': answer, 'operation': operation, 'first_number': first_number_input, 'second_number': second_number_input}) else: return render(request,'index.html',{}) except (ValueError, TypeError): answer = "INVALID ENTRY" return render(request, "index.html",{'answer':answer})
def make_anonymous_factorial(): """Return the value of an expression that computes factorial. >>> make_anonymous_factorial()(5) 120 """ return (lambda f: lambda k: f(f, k))(lambda f, k: k if k == 1 else mul(k, f(f, sub(k, 1))))
def panlindrom(lo, hi): global maxi for p in product(range(lo, hi), repeat=2): prod = str(mul(*p)) if prod == prod[::-1] and int(prod) > maxi: maxi = int(prod) print maxi
def make_anonymous_factorial(): """Return the value of an expression that computes factorial. >>> make_anonymous_factorial()(5) 120 """ return lambda n: (lambda f, n: f(f, n))(lambda f, n: 1 if n == 1 else mul(n, f(f, sub(n, 1))), n)
def make_anonymous_factorial(): """Return the value of an expression that computes factorial. >>> make_anonymous_factorial()(5) 120 """ return lambda val : (lambda f, v : f(f, v)) (lambda f, v : 1 if v == 0 else mul(v, f(f, sub(v, 1))), val)
def evaluate(self): result = None left = self.left.evaluate() right = self.right.evaluate() if self.operation == '+': result = operator.add(left, right) elif self.operation == '-': result = operator.sub(left, right) elif self.operation == '*': result = operator.mul(left, right) elif self.operation == '/': result = operator.div(left, right) elif self.operation == '^': result = operator.pow(left, right) elif self.operation == 'and': result = left and right elif self.operation == 'or': result = left or right elif self.operation == '<': result = operator.lt(left, right) elif self.operation == '<=': result = operator.le(left, right) elif self.operation == '==': result = operator.eq(left, right) elif self.operation == '!=': result = operator.ne(left, right) elif self.operation == '>': result = operator.gt(left, right) elif self.operation == '>=': result = operator.ge(left, right) elif self.operation == 'in': result = (left in right) return result
def _call(self, arguments, delta_time, computing_context): try: result = OperatorComputingResult(mul(*arguments), NoneComputingContext()) except: result = NoneComputingResult() return result
def train(self, images, statuses): numStatuses = len(self.possibleStatuses) ds = self.datasetMethod(mul(*self.imageSize), 1, numStatuses) [ds.addSample(self._loadToArray(i), e.value) for i, e in izip(images, statuses)] #convert to one output per class. Apparently this is a better format? # http://pybrain.org/docs/tutorial/fnn.html ds._convertToOneOfMany() trainer = self.trainMethod(self.net, dataset=ds) start = time.clock() trainErrors, validationErrors = trainer.trainUntilConvergence(convergence_threshold=4) trainTime = time.clock() - start iterations = len(trainErrors) + len(validationErrors) print "Training took {} iterations".format(iterations) if trainErrors: print "Errors: {}, {}".format(trainErrors[-1], validationErrors[-1]) else: print "Training unsuccesfull. Trainerrors is empty." self.trainTime = float(trainTime) / iterations self.error = validationErrors[-1] return trainErrors, validationErrors
def _loadToArray(self, imagePath): """ Creates input array. Applies scale factor to each image. """ try: image = PIL.Image.open(imagePath) except IOError as e: #print("Trying to open by converting to png") png = os.path.splitext(imagePath)[0] + '.png' wand.image.Image(filename=imagePath).convert('PNG').save(filename=png) image = PIL.Image.open(png) #resize scaleFactor = np.divide(self.imageSize, image.size) newSize = tuple(round(x * s) for x, s in zip(image.size, scaleFactor)) image.thumbnail(newSize) #greyscale image = image.convert('L') # neurolab seems to expect 1d input, so rescale the images in the # input array as linear (the network does't know about shape anyway) imageArray = np.array(image) newSize = mul(*imageArray.shape) return imageArray.reshape(newSize)
def make_anonymous_factorial(): """Return the value of an expression that computes factorial. >>> make_anonymous_factorial()(5) 120 """ return lambda x: 1 if x <= 1 else mul(x, make_anonymous_factorial()(sub(x, 1)))
def parse_dealer(self, img, coords): """Parses to see who is the dealer Template will be image with button Low MSE is true""" # load template tpl = self.img['dealer'] tpl_data = np.array(tpl) self.logger.debug('loaded dealer template {}'.format(tpl)) tpl.save(os.path.join(self.DEBUG_PATH, 'dealer_tpl.png')) dealers = {} for seat, bb in coords['dealers'].items(): self.logger.debug('parsing seat {} for dealer within {}'.format(seat, bb)) # crop out box box = img.crop(bb[0:4]) box_data = np.array(box) self.logger.debug('dealer box {} from {}'.format(box, bb[:4])) box.save(os.path.join(self.DEBUG_PATH, 'dealer_{}.png'.format(seat))) # MSE mse = np.sum((box_data.astype(np.float) - tpl_data.astype(np.float)) ** 2) mse /= mul(*box.size) is_dealer = mse <= bb[-1] self.logger.info('{}: {} calculated from MSE = {} (threshold {})'.format( seat, is_dealer, int(mse), bb[-1])) dealers[seat] = is_dealer self.logger.info('dealers {}'.format(dealers)) return dealers
def euler_27(max_a, max_b): a = range(-max_a, max_a + 1) #n = 0 means that the equation simplifies to b, so b must be prime. b = filter(lambda x: lib.is_prime(abs(x)), range(-max_b, max_b + 1)) vars = filter(a_filter, itertools.product(a, b)) solution = max((consec_primes_for_eq(*var), var) for var in vars)[1] return operator.mul(*solution)
def ch_to_op(c, x, y): if c == '+': return operator.add(x, y) elif c == '-': return operator.sub(x, y) elif c == 'x': return operator.mul(x, y)
def make_anonymous_factorial(): """Return the value of an expression that computes factorial. >>> make_anonymous_factorial()(5) 120 >>> from construct_check import check >>> # ban any assignments or recursion >>> check(HW_SOURCE_FILE, 'make_anonymous_factorial', ['Assign', 'AugAssign', 'FunctionDef', 'Recursion']) True """ """ return (lambda f: lambda n: f(f, n))(lambda f, n: 1 if not n else mul(n, f(f, sub(n, 1)))) #普通递归, 匿名函数的递归 """ return (lambda f: lambda n: f(f, n))(lambda f, n, y=1: y if not n else f(f, sub(n, 1), mul(n, y))) #尾递归优化
def solve1(input, n=256): return mul(*hash_round([int(l) for l in input.split(',')], list(range(n)))[0][:2])
def create_transformer_class(row, transformer_mapping): """ Creates a transformer class from the provided mapping overrides. :param row: The row to transform :param transformer_mapping: The overrides for the transform functions :return: The new transformer class """ transformer_mapping = { "lookup": lambda r, name: r[name], "add": lambda r, lhs, rhs: add(lhs, rhs), "subtract": lambda r, lhs, rhs: sub(lhs, rhs), "multiply": lambda r, lhs, rhs: mul(lhs, rhs), "divide": lambda r, lhs, rhs: div(lhs, rhs), "eq": lambda r, lhs, rhs: lhs == rhs, "not_eq": lambda r, lhs, rhs: lhs != rhs, "is_in": default_in_transformer, "not_in": default_not_in_transformer, "gt": lambda r, lhs, rhs: lhs > rhs, "gte": lambda r, lhs, rhs: lhs >= rhs, "lt": lambda r, lhs, rhs: lhs < rhs, "lte": lambda r, lhs, rhs: lhs <= rhs, "logical_or": lambda r, lhs, rhs: lhs or rhs, "logical_and": lambda r, lhs, rhs: lhs and rhs, "logical_not": lambda r, v: not v, "any": lambda r, v: AnyWrapper(v), "all": lambda r, v: AllWrapper(v), "str_join": default_join, "str_replace": default_replace, "str_match": default_match, "str_search": default_search, **(transformer_mapping or {}), } def mapped_function(name, *args, **kwargs): return transformer_mapping[name](row, *args, **kwargs) @v_args(inline=True) class TreeTransformer(BaseTreeTransformer): lookup = partial(mapped_function, "lookup") add = partial(mapped_function, "add") subtract = partial(mapped_function, "subtract") multiply = partial(mapped_function, "multiply") divide = partial(mapped_function, "divide") eq = partial(mapped_function, "eq") not_eq = partial(mapped_function, "not_eq") is_in = partial(mapped_function, "is_in") not_in = partial(mapped_function, "not_in") gt = partial(mapped_function, "gt") gte = partial(mapped_function, "gte") lt = partial(mapped_function, "lt") lte = partial(mapped_function, "lte") logical_not = partial(mapped_function, "logical_not") logical_or = partial(mapped_function, "logical_or") logical_and = partial(mapped_function, "logical_and") any = partial(mapped_function, "any") all = partial(mapped_function, "all") str_join = partial(mapped_function, "str_join") str_replace = partial(mapped_function, "str_replace") str_match = partial(mapped_function, "str_match") str_search = partial(mapped_function, "str_search") return TreeTransformer
def square(x): return mul(x, x)
[2, 0, 'one', 'seven'] # Operators 2000 + 17 2001 + 4**2 4034 // 2 -1 + 0 + 1 * 2**3 * 4 // 5 * 6 * 7 * 8 * 9 // 10 + 11 + 12 + 13 * 14 - 1 'Hello ' + 'world!' [2, 0] + ['one', 'seven'] # Call expressions from operator import add, mul add(2000, 17) mul(1009, 2) abs(-2017) pow(2, 100) from math import sqrt sqrt(2017) from math import sin, pi sin(pi) # Nested call expressions add(add(6, mul(4, 6)), mul(3, 5)) # Shakespeare demo # Note: Download from http://composingprograms.com/shakespeare.txt
def vector_count(self): return mul(map(Range.value_count, self))
def multiply(t): return mul(*t)
def mul(*args): ans = 1 for arg in args: ans = op.mul(ans, arg) return ans
from operator import mul import random total = 0 incs = [] mistakes = False qs = int(input("Number of questions:")) for num in range(0, qs): x = random.randint(6, 8) y = random.randint(13, 20) print("What is " + str(x) + " x " + str(y) + "?") answer = int(input("Your Answer:")) if answer == (mul(x, y)): print("Correct!") total = total + 1 elif answer == (000): exit() else: print("Wrong, the answer is " + str(x * y)) mistakes = True incs.append("You said " + str(x) + " times " + str(y) + " = " + str(answer) + " but the answer is actually " + str(mul(x, y))) print("You got " + str(total) + " out of " + str(qs) + " questions correct!") if mistakes == True: print("Here are your mistakes:") z = 0 for num in range(0, len(incs)): print(incs[z]) z += 1
def square(square): return mul(square, square)
def solver(): with open('input.txt', 'r') as f: data = f.read().strip() print('Part 1:', play_game(data, 100)) # 34952786 print('Part 2:', mul(*play_game2(data))) # 505334281774
adict = {'a': 1, 'b': 5, 'c': 1} dict((i, dict(v)) for i, v in groupby(adict.items(), itemgetter(1))) # Output: {1: {'a': 1, 'c': 1}, 5: {'b': 5}} #which is equivalent (but faster) to a lambda function like this: dict((i, dict(v)) for i, v in groupby(adict.items(), lambda x: x[1])) #Or sorting a list of tuples by the second element first the first element as secondary alist_of_tuples = [(5, 2), (1, 3), (2, 2)] sorted(alist_of_tuples, key=itemgetter(1, 0)) #Section 48.2: Operators as alternative to an infix operator print( "-----Section 48.2: Operators as alternative to an infix operator---------" ) print(add(1, 1)) print(mul('a', 10)) print(mul([3], 3)) #Section 48.3: Methodcaller print("-------Section 48.3: Methodcaller---------") alist = ['wolf', 'sheep', 'duck'] list(filter(lambda x: x.startswith('d'), alist)) # Keep only elements that start with 'd' # Output: ['duck'] # or list(filter(methodcaller('startswith', 'd'), alist)) # Does the same but is faster
def __mul__(self, other): return operator.mul(*self._get_operands(other))
def imp(consequence_crisp): return operator.mul(antecedent_out, consequence_membershipf(consequence_crisp))
def multiply(self, a: int, b: int) -> int: return mul(a, b)
def test_reformer2_one_step(self): d_model = 1024 vocab_size = 14041 max_len = 16384 pos_axial = (128, 128) # should multiply to max_len pos_d_axial_embs = (512, 512) # sum to d model assert operator.mul(*pos_axial) == max_len assert sum(pos_d_axial_embs) == d_model d_ff = 4096 n_heads = 8 d_attn = d_model // n_heads n_buckets = 128 encoder_chunk_len = (2 * max_len) // n_buckets # 256 decoder_chunk_len = 2 * encoder_chunk_len # 512 encoder_n_chunks_after = 1 # since its not causal. lsh_self_attention = functools.partial(self._lsh_self_attention_fn(), n_buckets=n_buckets) encoder_lsh_self_attention = functools.partial( lsh_self_attention, n_chunks_after=encoder_n_chunks_after, chunk_len=encoder_chunk_len) decoder_lsh_self_attention = functools.partial( lsh_self_attention, n_chunks_after=0, chunk_len=decoder_chunk_len) model = reformer.Reformer2( vocab_size, d_model=d_model, d_ff=d_ff, d_attention_key=d_attn, d_attention_value=d_attn, n_encoder_layers=1, n_decoder_layers=1, n_heads=n_heads, dropout=0.05, max_len=max_len, encoder_attention_type=encoder_lsh_self_attention, encoder_decoder_attention_type=decoder_lsh_self_attention, pos_axial_shape=pos_axial, pos_d_axial_embs=pos_d_axial_embs, ff_activation=tl.Relu, ff_use_sru=0, mode='train', ) def random_sentence(): return np.random.randint(low=1, high=vocab_size - 1, size=(1, max_len), dtype=np.int32) x = [random_sentence(), random_sentence()] weights, state = model.init(shapes.signature(x)) @fastmath.jit def mock_training_step(x, weights, state, rng): def compute_mock_loss(weights): logits_and_dec_toks, new_state = model.pure_fn(x, weights, state, rng) # This returns [logits, decoder tokens] logits = logits_and_dec_toks[0] loss = fastmath.numpy.mean(logits[..., 0]) return loss, (new_state, logits) gradients, (new_state, logits) = fastmath.grad( compute_mock_loss, has_aux=True)(weights) new_weights = fastmath.nested_map_multiarg( lambda w, g: w - 1e-4 * g, weights, gradients) return new_weights, new_state, logits weights, state, logits = mock_training_step( x, weights, state, fastmath.random.get_prng(0)) self.assertEqual(logits.shape, (1, max_len, vocab_size))
def __hash__(self): return mul(map(hash, [self._min, self._max]))
# ext_srcs = filter(lambda block: block not in text, page) sample_list = [ [1, 2], 3, 4, 5, [6, 7, 8], [9, 10] ] def compose(*funcs): return reduce(lambda f, g: lambda x: f(g(x)), funcs, lambda x: x) add = curry(add) mul = curry(mul) add_and_mul = compose(neg, mul(2), add(2)) @curry def calc(li): return reduce(lambda acc, i: acc + add_and_mul(i), li) @curry def odd(li): return filter(lambda i: i % 2 == 1, li) def i_range(stop): i = 0 while(i < stop):
# Numeric expressions 2019 2000 + 19 0 + 1 // 2 + 3 + 4 * ((5 // 6) + 7 * 8 * 9) # Call expressions max(3, 4.5) pow(100, 2) pow(2, 100) max(1, -2, 3, -4) max(pow(10, 2), pow(2, 10), 1010) # Importing and arithmetic with call expressions from operator import add, mul add(1, 2) mul(4, 6) mul(add(4, mul(4, 6)), add(3, 5)) add(3, mul(9, mul(add(4, mul(4, 6)), add(3, 5)))) ### DEMO 2: "Functions, Values, Objects, Interpreters, and Data" # Objects # Note: Download from http://composingprograms.com/shakespeare.txt # downloaded shakes = open('01/shakespeare.txt') text = shakes.read().split() len(text) text[:25] text.count('the') text.count('thou')
def test_next_bus(self): next = pkg.next_bus(*pkg.load_bus_info('test_input/day13_t1.txt')) self.assertEqual(mul(*next), 295)
# Callable : 호출 연산자 -> 메소드 형태로 호출 가능한지 확인 # 호출 가능 확인 print(callable(str), callable(list), callable(var_func), callable(3.14)) from inspect import signature sg = signature(var_func) print(sg) print(sg.parameters) print() print() # partial 사용법 : 인수 고정 -> 콜백 함수에 사용 from operator import mul from functools import partial print(mul(10, 10)) # 인수 고정 five = partial(mul, 5) # 고정 추가 six = partial(five, 6) print(five(10)) print(six()) print([five(i) for i in range(1, 11)]) print(list(map(five, range(1, 11))))
def op(a, b): return int64(mul(a, b))
def init_leftmost_tick(self): if self.leftmost_tick is None: self.leftmost_tick = op.mul( self.tick_frequency, np.ceil(self.x_min / self.tick_frequency) )
# Operator: realizando operações em variaveis import operator def soma(x): return operator.add(x, 2) print(soma(3)) # Python code to demonstrate working of # add(), sub(), mul() a = 4 b = 3 # using add() to add two numbers print("The addition of numbers is :", end="") print(operator.add(a, b)) # using sub() to subtract two numbers print("The difference of numbers is :", end="") print(operator.sub(a, b)) # using mul() to multiply two numbers print("The product of numbers is :", end="") print(operator.mul(a, b))
def dot(wi, x): from functools import reduce from operator import mul, add return reduce(add, map(lambda i: mul(wi[i], x[i]), x))
def kinetic_energy_jacobi(self, x, **kwargs): r"""Compute the value of the kinetic enery using the Jacobi Formula. .. math:: \\frac{\Delta (J(R) \Psi(R))}{ J(R) \Psi(R)} = \\frac{\\Delta J(R)}{J(R} + 2 \\frac{\\nabla J(R)}{J(R)} \\frac{\\nabla \\Psi(R)}{\\Psi(R)} + \\frac{\\Delta \\Psi(R)}{\\Psi(R)} The lapacian of the determinental part is computed via .. math:: \\Delta_i \\Psi(R) \\sum_n c_n ( \\frac{\\Delta_i D_n^{u}}{D_n^{u}} + \\frac{\\Delta_i D_n^{d}}{D_n^{d}} + 2 \\frac{\\nabla_i D_n^{u}}{D_n^{u}} \\frac{\\nabla_i D_n^{d}}{D_n^{d}} ) D_n^{u} D_n^{d} Since the backflow orbitals are multi-electronic the laplacian of the determinants are obtained .. math:: \\frac{\\Delta det(A)}{det(A)} = Tr(A^{-1} \\Delta A) + Tr(A^{-1} \\nabla A) Tr(A^{-1} \\nabla A) + Tr( (A^{-1} \\nabla A) (A^{-1} \\nabla A )) Args: x (torch.tensor): sampling points (Nbatch, 3*Nelec) Returns: torch.tensor: values of the kinetic energy at each sampling points """ # get ao values ao, dao, d2ao = self.ao( x, derivative=[0, 1, 2], sum_grad=False) # get the mo values mo = self.ao2mo(ao) dmo = self.ao2mo(dao) d2mo = self.ao2mo(d2ao) # compute the value of the slater det slater_dets = self.pool(mo) sum_slater_dets = self.fc(slater_dets) # compute ( tr(A_u^-1\Delta A_u) + tr(A_d^-1\Delta A_d) ) hess = self.pool.operator(mo, d2mo) # compute (tr(A_u^-1\nabla A_u) and tr(A_d^-1\nabla A_d)) grad = self.pool.operator(mo, dmo, op=None) # compute (tr((A_u^-1\nabla A_u)^2) + tr((A_d^-1\nabla A_d))^2) grad2 = self.pool.operator(mo, dmo, op_squared=True) # assemble the total second derivative term hess = (hess.sum(0) + operator.add(*[(g**2).sum(0) for g in grad]) - grad2.sum(0) + 2 * operator.mul(*grad).sum(0)) hess = self.fc(hess * slater_dets) / sum_slater_dets if self.use_jastrow is False: return -0.5 * hess # compute the Jastrow terms jast, djast, d2jast = self.jastrow(x, derivative=[0, 1, 2], sum_grad=False) # prepare the second derivative term d2Jast/Jast # Nbatch x Nelec d2jast = d2jast / jast # prepare the first derivative term djast = djast / jast.unsqueeze(-1) # -> Nelec x Ndim x Nbatch djast = djast.permute(2, 1, 0) # -> [Nelec*Ndim] x Nbatch djast = djast.reshape(-1, djast.shape[-1]) # prepare the grad of the dets # [Nelec*Ndim] x Nbatch x 1 grad_val = self.fc(operator.add(*grad) * slater_dets) / sum_slater_dets # [Nelec*Ndim] x Nbatch grad_val = grad_val.squeeze() # assemble the derivaite terms out = d2jast.sum(-1) + 2*(grad_val * djast).sum(0) + \ hess.squeeze(-1) return -0.5 * out.unsqueeze(-1)
def pi_term(k): return 8 / mul(k * 4 - 3, k * 4 - 1)
import operator as op a = 5 b = 3 print('add : ',op.add(a,b)) print('sub : ',op.sub(a, b)) print('mul : ',op.mul(a, b)) print('true div : ',op.truediv(a,b)) # a/b print('floor div : ',op.floordiv(a,b)) # a//b print('pow : ',op.pow(a,b)) # a**b print('MOD : ',op.mod(a,b)) #lt () : less than # using lt() to check if a is less than b if(op.lt(a,b)): print (str(a)+" is less than "+str(b)) else : print (str(a)+" is not less than "+str(b)) #le () : less than or equal # using le() to check if a is less than or equal b if(op.le(a,b)): print (str(a)+" is less than or equal to "+str(b)) else : print (str(a)+" is not less than or equal to "+str(b)) #eq () : equal #using eq() to check if a is equal to b if (op.eq(a,b)):
def square(x): # x is formal parameter return mul(x, x) # returns x * x using built in func mul
def squared(squared): return mul(squared, squared)