def similarity(self, x, y): total, XSTEP, YSTEP = len(self.matrix), 6 if x < 30 else 3, 6 base = self.matrix[total - YSTEP : total, x : x + XSTEP] varxy = self.matrix[y - YSTEP : y, x : x + XSTEP] ca = base - varxy sim = sum(sum(abs(ca[0:5]))) return (y, sim, (x, y), varxy[5:6][0].tolist())

def add_axis(self, heights, primary_axis, log=False): """ Add a new plot area to the plot. The x axis is always common. To plot 2 charts on top of each other, this method only needs to be called once, even if their y-axis scaling is different. This method returns an axis identifier that can be used for plotting: ax_id.plot(something) For stacked charts, call this method multiple times, the first one will be at the bottom. """ top, bottom, left, right, space = self.margins arguments = {"axisbg": "#fafafa"} if log: arguments["yscale"] = "log" if not hasattr(self, "i_axis"): self.i_axis = 0 else: self.i_axis += 1 arguments["sharex"] = primary_axis c = (1 - top - bottom - space * (len(heights) - 1)) / sum(h for h in heights) height = c * heights[self.i_axis] bottom += sum(c * h for h in heights[: self.i_axis]) + self.i_axis * space width = 1 - left - right axis = self.fig.add_axes([left, bottom, width, height], **arguments) return axis

def pearson(data, p1, p2): sim = {} for item in data[p1]: if item in data[p2]: sim[item] = 1 n = float(len(sim)) if n == 0: return 0 sum1 = sum([float(data[p1][it]) for it in sim]) sum2 = sum([float(data[p2][it]) for it in sim]) sum1Sq = sum([pow(float(data[p1][it]), 2) for it in sim]) sum2Sq = sum([pow(float(data[p2][it]), 2) for it in sim]) pSum = sum([float(data[p1][it]) * float(data[p2][it]) for it in sim]) num = pSum - (sum1 * sum2 / n) den = sqrt((sum1Sq - pow(sum1, 2) / n) * (sum2Sq - pow(sum2, 2) / n)) if den == 0: return 0 r = num / den return r

def make_mapping(partition, grouping): """Return a mapping from micro-state to the macro-states based on the partition of elements and grouping of states. Args: partition (list(list)): A partition of micro-elements into macro elements. grouping (list(list(list))): For each macro-element, a list of micro states which set it to ON or OFF. Returns: mapping (``nd.array``): A mapping from micro-states to macro-states. """ num_macro_nodes = len(grouping) num_micro_nodes = sum([len(part) for part in partition]) num_micro_states = 2 ** num_micro_nodes micro_states = [ convert.loli_index2state(micro_state_index, num_micro_nodes) for micro_state_index in range(num_micro_states) ] mapping = np.zeros(num_micro_states) # For every micro-state, find the corresponding macro-state and add it to # the mapping. for micro_state_index, micro_state in enumerate(micro_states): # Sum the number of micro-elements that are ON for each macro-element. micro_sum = [sum([micro_state[node] for node in partition[i]]) for i in range(num_macro_nodes)] # Check if the number of micro-elements that are ON corresponds to the # macro-element being ON or OFF. macro_state = [0 if micro_sum[i] in grouping[i][0] else 1 for i in range(num_macro_nodes)] # Record the mapping. mapping[micro_state_index] = convert.state2loli_index(macro_state) return mapping

def gradient_descent(alpha, x, y, ep=0.00001, max_iter=10000): converged = False iter = 0 m = x.shape[0] t0 = np.random.random(x.shape[1]) t1 = np.random.random(x.shape[1]) # print 'this is x shape 1',x.shape[1] # now we have a random hypothesis function h(x) = t0 + t1(x) # define J theta J = 1.0 / (2 * m) * sum([(t0 + t1 * x[i] - y[i]) ** 2 for i in range(m)]) while not converged: # partial derivatives w/rspt to theta0 and theta1 d0 = alpha * 1.0 / m * sum([(t0 + t1 * x[i] - y[i]) for i in range(m)]) d1 = alpha * 1.0 / m * sum([(t0 * x[i] + t1 * x[i] * x[i] - y[i] * x[i]) for i in range(m)]) temp0 = t0 - d0 temp1 = t1 - d1 # simultaneous update t0 = temp0 t1 = temp1 print t0, t1 # recalcuate mean squared error newJ = 1.0 / (2 * m) * sum([(t0 + t1 * x[i] - y[i]) ** 2 for i in range(m)]) if abs(J - newJ) <= ep: converged = True print "Converged, num iterations: ", iter J = newJ iter = iter + 1 if iter == max_iter: # finish print "Max iterations exceeded, ", iter converged = True return t0, t1

def order_view(request): shopuser = ShopUser.from_request(request) shopinfo = ShopInfo() context = {"shopinfo": shopinfo, "shopuser": shopuser} if request.method == "POST": return order_post(request, shopuser) offices = {} per_office = {} for order in Order.objects.filter(characterid=shopuser.characterid, closed=None): offices[order.office.id] = order.office per_office.setdefault(order.office.id, []) per_office[order.office.id].append(order) full_list = per_office.items() full_list.sort() context["full_list"] = [ (offices[id], order_list, sum(o.price_total for o in order_list)) for (id, order_list) in full_list ] context["total"] = sum(office_total for (office, order_list, office_total) in context["full_list"]) old_list = ( Order.objects.filter(characterid=shopuser.characterid) .exclude(closed=None) .filter(closed__gt=(datetime.datetime.utcnow() - datetime.timedelta(days=28))) .order_by("-closed") ) context["old_list"] = old_list return direct_to_template(request, "shop/order.html", extra_context=context)

def discount_apply(self): if self.discount_method == "amount": account_invoice_tax = self.env["account.invoice.tax"] self.global_discount = 0.0 ctx = dict(self._context) for invoice in self: amount_tax_whitout = 0.0 self._cr.execute( "DELETE FROM account_invoice_tax WHERE invoice_id=%s AND manual is False", (invoice.id,) ) self.invalidate_cache() partner = invoice.partner_id if partner.lang: ctx["lang"] = partner.lang for taxe in account_invoice_tax.compute(invoice.with_context(ctx)).values(): account_invoice_tax.create(taxe) amount_tax = sum(line.amount for line in self.tax_line) amount_untaxed = sum(line.price_subtotal for line in self.invoice_line) amount_total = self.amount_untaxed + self.amount_tax for line in self.tax_line: if line.base == 0.0: amount_tax_whitout += line.amount if amount_total - amount_tax_whitout == 0: self.global_discount = 100 else: self.global_discount = self.global_discount_amount * 100 / (amount_total - amount_tax_whitout) else: self.global_discount_amount = 0.0 for line in self.invoice_line: self.global_discount_amount += (line.price_unit * line.quantity) * (self.global_discount / 100) self.button_reset_taxes() self.amount_in_word = amount_to_text(self.amount_total, lang="fr", currency="Dinars") return True

def posAngNeighbors(self, neighbors): # If no neighbors handling: if len(neighbors) == 0: return (self.position[0], self.position[1], self.angle) # (x, y, angle, weight difference) colorPos = [ ( hammy.position[0], hammy.position[1], hammy.angle, 3.0 * (1 - (abs(self.darkness - hammy.darkness))) + 1.5 * (self.neighborRadius - self.distTo(hammy)), ) for hammy in neighbors ] # Determining sum of weights weightSum = sum([hammy[3] for hammy in colorPos]) # Calculate position posAng = ( sum([hammy[0] * hammy[3] for hammy in colorPos]) / weightSum, sum([hammy[1] * hammy[3] for hammy in colorPos]) / weightSum, sum([hammy[2] * hammy[3] for hammy in colorPos]) / weightSum, ) return posAng

def as_poly(self, mz=None, sub_base=False): # TODO: should allow AstonFrames in PeakComponents some day? # if type(self.trace) is AstonSeries: # if mz is not None: # assert mz == self.trace.name # trace = self.trace # else: # if mz is None: # mz = self.primary_mz # elif mz not in self.trace.columns: # return AstonSeries() # trace = self.trace[mz] # if self.baseline is None: # b_trace = None # elif type(self.baseline) is AstonSeries: # b_trace = self.baseline # else: # b_trace = self.baseline[mz] if mz in {"", "x", "tic", None}: # sum up all the components trace = self.components[0].trace b_trace = self.components[0].baseline for c in self.components[1:]: trace += c.trace if c.baseline is not None: # TODO: this fails if the first components baseline is None b_trace += c.baseline else: TOL = 0.5 def check(name): try: return np.abs(mz - float(name)) < TOL except: return False cs = [c for c in self.components if check(c._trace.name)] trace = sum(c.trace for c in cs) b_trace = sum(c.baseline for c in cs) # merge the trace and baseline if sub_base and b_trace is not None: trace -= np.interp(trace.index, b_trace.index, b_trace.values) t, z = trace.index, trace.values elif b_trace is None: t, z = trace.index, trace.values else: t = np.hstack([trace.index, b_trace.index[::-1]]) z = np.hstack([trace.values, b_trace.values[::-1]]) if hasattr(self, "dbplot"): # scale and offset according to parent t *= self.dbplot.x_scale t += self.dbplot.x_offset z *= self.dbplot.y_scale z += self.dbplot.y_offset return np.vstack([t, z]).T

def main(): N = 10000 amic = [] for i in xrange(0, N + 1): if is_amicable(i): amic.append(i) print sum(amic)

def itrace(a, axes=(0, 1)): """General tensor trace, i.e. multiple contractions. Parameters ---------- a: np.ndarray tensor to trace axes: (2,) int_like or (2,) array_like * (2,) int_like Perform trace on the two indices listed. * (2,) array_like Trace out first sequence indices with second sequence indices """ # Single index pair to trace out if isinstance(axes[0], int): return np.trace(a, axis1=axes[0], axis2=axes[1]) elif len(axes[0]) == 1: return np.trace(a, axis1=axes[0][0], axis2=axes[1][0]) # Multiple index pairs to trace out gone = set() for axis1, axis2 in zip(*axes): # Modify indices to adjust for traced out dimensions mod1 = sum(x < axis1 for x in gone) mod2 = sum(x < axis2 for x in gone) gone |= {axis1, axis2} a = np.trace(a, axis1=axis1 - mod1, axis2=axis2 - mod2) return a

def _non_dominated_front_arr(iterable, key=lambda x: x, allowequality=True): """Return a subset of items from iterable which are not dominated by any other item in iterable. Faster version, based on boolean matrix manipulations. """ items = list(iterable) fits = map(key, items) l = len(items) x = array(fits) a = tile(x, (l, 1, 1)) b = a.transpose((1, 0, 2)) if allowequality: ndom = sum(a <= b, axis=2) else: ndom = sum(a < b, axis=2) ndom = array(ndom, dtype=bool) res = set() for ii in range(l): res.add(ii) for ij in list(res): if ii == ij: continue if not ndom[ij, ii]: res.remove(ii) break elif not ndom[ii, ij]: res.remove(ij) return set(map(lambda i: items[i], res))

def fitPolynomial(xdata, ydata, order=2): """Fit a polynomial of the given order that matches the given x and y coordinates. This function takes the x and y coordinates separately, and their length must match. """ assert len(xdata) == len(ydata) X = [] for i in range(order + 1): X.append(sum(xWithY(xdata, ydata, xpower=i))) from numpy import zeros A = zeros((order + 1, order + 1)) del zeros for i in xrange(order + 1): for j in xrange(order + 1): if (i - 1) >= 0 and (j + 1) <= order: A[i][j] = A[i - 1][j + 1] else: k = i + j A[i][j] = sum([x ** k for x in xdata]) A[0][0] = len(xdata) result = linalg.solve(A, X) return result

def configure_plot(self, axes, shaft_length, a, i): """Configure the plot axes and titles and display specifications. Parameters: axes: The subplot being made. shaft_length: The length of the entire shaft. a: A value to scale text to the plot. i: The iteration number to determine what shaft is being plotted. """ c = self.constants sol = self.solution xmin = 0 xmax = shaft_length ymin = 3 * -a ymax = 3 * a axes.axis([xmin, xmax, ymin, ymax]) if i == 4: axes.set_title("Main Shaft") mass = sum(sol.mass[1:5]) else: axes.set_title("Crankshaft") mass = sum(sol.mass[5:8]) axes.set_xlabel("Length from Left Edge [m]") axes.set_ylabel("Radius [m]") # Plot design specifications. shaft_info = ( "Total Mass: %.2f kg\nMinimum Required Safety Factor" " Against Yielding, Xo: %.2g\nMinimum Required Safety" " Factor Against Fatigue, Xs: %.2g\nMaximum Allowable" " Angle of Twist: %.2g rad\nDesired Service Life: %.2g" " days" % (mass, c.sf_o, c.sf_s, c.twist_max, c.life) ) axes.text(xmax / 2, a, shaft_info, ha="center", fontsize=11)

def StandardDeviation(q): mean = sum(q) / float(len(q)) var = map(lambda x: math.pow(x - mean, 2), q) sd = sum(var) / float(len(q)) sd = math.sqrt(sd) cv = sd / mean return mean, sd, cv

def estimate_precision_and_recall(self, h, data): """ Re-implementation: return how accurate the h is on predicting adjacent grammar """ # TODO num of data is fixed # TODO use data for convenience num = 1024.0 / len(self.str_sets) output_t = {} for k in self.str_sets: output_t[self.de_ht(k)] = num # h_out = Counter([h() for _ in xrange(1024)]) h_out = data h_out_t = {} for k, v in h_out.iteritems(): h_out_t[self.de_ht(k)] = v base = sum(h_out_t.values()) cnt = 0.0 for k, v in h_out_t.iteritems(): if k in output_t: cnt += v precision = cnt / base base = sum(output_t.values()) cnt = 0.0 for k, v in output_t.iteritems(): if k in h_out_t: cnt += v recall = cnt / base return precision, recall

def write_file(self, qresults): """Writes to the handle, returns how many QueryResult objects are written.""" handle = self.handle qresult_counter, hit_counter, hsp_counter, frag_counter = 0, 0, 0, 0 for qresult in qresults: if self.has_comments: handle.write(self._build_comments(qresult)) if qresult: handle.write(self._build_rows(qresult)) if not self.has_comments: qresult_counter += 1 hit_counter += len(qresult) hsp_counter += sum(len(hit) for hit in qresult) frag_counter += sum(len(hit.fragments) for hit in qresult) # if it's commented and there are no hits in the qresult, we still # increment the counter if self.has_comments: qresult_counter += 1 # commented files have a line saying how many queries were processed if self.has_comments: handle.write("# BLAST processed %i queries" % qresult_counter) return qresult_counter, hit_counter, hsp_counter, frag_counter

def update_W_eu(self, W_eu): if not self.permute_ambiguous: return # Init if not hasattr(self, "A_neurons"): self.A_neurons = where(W_eu.W[:, self.lookup["A"]] == 1)[0] self.B_neurons = where(W_eu.W[:, self.lookup["B"]] == 1)[0] self.N_u = int(len(self.A_neurons)) assert self.N_u == len(self.B_neurons) self.N_A = {} self.N_B = {} for letter in [word[0] for word in self.words]: letter_index = self.lookup[letter] self.N_A[letter] = sum(W_eu.W[self.A_neurons, letter_index]).round().astype(int) self.N_B[letter] = sum(W_eu.W[self.B_neurons, letter_index]).round().astype(int) # When new word presented, permute its input # careful! simple permutation goes crazy when inputs overlap if self.ind == 0: letter = self.char() if letter in ["A", "B"]: return letter_index = self.index() # First set both to zero so that the second doesn't set # units of the first back to zero (overlap case) W_eu.W[self.A_neurons, letter_index] *= 0 W_eu.W[self.B_neurons, letter_index] *= 0 W_eu.W[self.A_neurons[smpl(xrange(self.N_u), self.N_A[letter])], letter_index] = 1 W_eu.W[self.B_neurons[smpl(xrange(self.N_u), self.N_B[letter])], letter_index] = 1

def mdclassify(observation, tree): if tree.results != None: return tree.results else: v = observation[tree.col] if v == None: tr, fr = mdclassify(observation, tree.tb), mdclassify(observation, tree.fb) tcount = sum(tr.values()) fcount = sum(fr.values()) tw = float(tcount) / (tcount + fcount) fw = float(fcount) / (tcount + fcount) result = {} for k, v in tr.items(): result[k] = v * tw for k, v in fr.items(): result[k] = v * fw return result else: if isinstance(v, int) or isinstance(v, float): if v >= tree.value: branch = tree.tb else: branch = tree.fb else: if v == tree.value: branch = tree.tb else: branch = tree.fb return mdclassify(observation, branch)

def _mle(N, M, training_outputs, training_states, pseudo_initial, pseudo_transition, pseudo_emission): # p_initial is the probability that a sequence of states starts # off with a particular one. p_initial = numpy.zeros(N) if pseudo_initial: p_initial = p_initial + pseudo_initial for states in training_states: p_initial[states[0]] += 1 p_initial = _normalize(p_initial) # p_transition is the probability that a state leads to the next # one. C(i,j)/C(i) where i and j are states. p_transition = numpy.zeros((N, N)) if pseudo_transition: p_transition = p_transition + pseudo_transition for states in training_states: for n in range(len(states) - 1): i, j = states[n], states[n + 1] p_transition[i, j] += 1 for i in range(len(p_transition)): p_transition[i, :] = p_transition[i, :] / sum(p_transition[i, :]) # p_emission is the probability of an output given a state. # C(s,o)|C(s) where o is an output and s is a state. p_emission = numpy.zeros((N, M)) if pseudo_emission: p_emission = p_emission + pseudo_emission p_emission = numpy.ones((N, M)) for outputs, states in zip(training_outputs, training_states): for o, s in zip(outputs, states): p_emission[s, o] += 1 for i in range(len(p_emission)): p_emission[i, :] = p_emission[i, :] / sum(p_emission[i, :]) return p_initial, p_transition, p_emission

def get_value(k): p = MixedIntegerLinearProgram(maximization=True) a = p.new_variable(real=True) p.add_constraint(sum(a[i] * (i - _sage_const_1) for i in xrange(_sage_const_1, k + _sage_const_2)) == _sage_const_1) for j in xrange(_sage_const_1, k + _sage_const_1): L = [] for t in xrange(_sage_const_1, j): L.append(-k + t) L.append(_sage_const_0) for t in xrange(j + _sage_const_1, k + _sage_const_2): L.append(t - _sage_const_1) print(L) p.add_constraint( sum(L[i - _sage_const_1] * a[i] for i in xrange(_sage_const_1, k + _sage_const_2)) >= _sage_const_0 ) for j in xrange(_sage_const_1, k + _sage_const_1): p.add_constraint(a[i] >= _sage_const_0) cur = p.get_backend() cur.set_verbosity(_sage_const_3) # for j in xrange(k): # for i in xrange(j+1): # p.add_constraint(res[(i,j)] >= alpha[i] - d[j]) # for i in xrange(k): # p.add_constraint(sum(res[(i,j)] for j in range(i,k)) <= f[0]) print(p.constraints()) p.set_objective(sum(a[j] for j in range(_sage_const_1, k + _sage_const_2))) print(p.solve()) x = p.get_values(a) for key in x.keys(): print(key, x[key])

def get_metric_statistics(self, period, start_time, end_time, metric_name, statistics, dimensions=None): metrics = {} instanceid = dimensions.get("InstanceId") domainid = dimensions.get("DomainId") if instanceid is None and domainid is None: index = None elif isinstance(instanceid, basestring): index = instanceid elif isinstance(domainid, basestring): index = domainid else: index = instanceid[0] try: average = sum(self.series_data) / len(self.series_data) except ZeroDivisionError: average = 0 metrics[index] = { Statistics.SERIES: self.series_data, Statistics.AVERAGE: average, Statistics.MAXIMUM: max(self.series_data), Statistics.MINIMUM: min(self.series_data), Statistics.SUM: sum(self.series_data), Statistics.SAMPLE_COUNT: len(self.series_data), } return metrics

def make_macro_tpm(micro_tpm, mapping): """Create the macro TPM for a given mapping from micro to macro-states. Args: micro_tpm (nd.array): The TPM of the micro-system. mapping (nd.array): A mapping from micro-states to macro-states. Returns: macro_tpm (``nd.array``): The TPM of the macro-system. """ # Validate the TPM validate.tpm(micro_tpm) if (micro_tpm.ndim > 2) or (not micro_tpm.shape[0] == micro_tpm.shape[1]): micro_tpm = convert.state_by_node2state_by_state(micro_tpm) num_macro_states = max(mapping) + 1 num_micro_states = len(micro_tpm) macro_tpm = np.zeros((num_macro_states, num_macro_states)) # For every possible micro-state transition, get the corresponding past and # current macro-state using the mapping and add that probability to the # state-by-state macro TPM. micro_state_transitions = itertools.product(range(num_micro_states), range(num_micro_states)) for past_state_index, current_state_index in micro_state_transitions: macro_tpm[mapping[past_state_index], mapping[current_state_index]] += micro_tpm[ past_state_index, current_state_index ] # Because we're going from a bigger TPM to a smaller TPM, we have to # re-normalize each row. return np.array([list(row) if sum(row) == 0 else list(row / sum(row)) for row in macro_tpm])

def graphml2mat(ingraph, outgraph, prune=False): ing = Graph.Read_GraphML(ingraph) if sum(ing.es()[:]["weight"]) < 500000: print "bad graph? ecount= ", sum(ing.es()[:]["weight"]) print "filename= ", ingraph return # currently being done in graphgen so don't need to delete vertex 0 # ing.vs[0].delete() if prune: # delete zero degree nodes # GK TODO: be smarter i = list() for n, v in enumerate(ing.vs): if v.degree() == 0: i.append(n) ing.vs[i].delete() outg = lil_matrix((ing.vcount(), ing.vcount())) # import pdb; pdb.set_trace() for e in ing.es: outg[e.source, e.target] = e["weight"] outg[e.target, e.source] = e["weight"] # since edges are undirected add both ways outg = triu(outg) mat_dict = {"graph": outg} savemat(outgraph, mat_dict)

def compute_geo(self, rat="all", ldp="all", now=0.0, pe=True): """ Compute position with the geometric algorithm Returns ------- True if estimated position has been computed """ if sum(self.cla.usable) >= 2: cpe = self.cla.compute(pe=pe) if cpe: self.savep(self.cla.pe, name="pe") self.savep(self.cla.pe, name="pe_geo") self.net.node[self.ID]["PN"].node[self.ID]["te"] = now # estimated time return True return False # in case of lack of observables elif sum(self.cla.usable) >= 1: cpe = self.cla.compute(pe=pe) if cpe: self.savep(np.array(self.cla.pecluster), now=now, name="pe_clust") self.net.node[self.ID]["PN"].node[self.ID]["te"] = now # estimated time return True return False else: return False

def classifyNB(vec2Classify, p0Vec, p1Vec, pClass1): p1 = sum(vec2Classify * p1Vec) + log(pClass1) p0 = sum(vec2Classify * p0Vec) + log(1.0 - pClass1) if p1 > p0: return 1 else: return 0

def search_cell(session, data): radio = RADIO_TYPE.get(data["radio"], -1) cells = [] for cell in data["cell"]: cell = normalized_cell_dict(cell, default_radio=radio) if not cell: continue key = to_cellkey(cell) query = ( session.query(Cell.lat, Cell.lon, Cell.range) .filter(*join_cellkey(Cell, key)) .filter(Cell.lat.isnot(None)) .filter(Cell.lon.isnot(None)) ) result = query.first() if result is not None: cells.append(Network(key, *result)) if not cells: return length = len(cells) avg_lat = sum([c.lat for c in cells]) / length avg_lon = sum([c.lon for c in cells]) / length return { "lat": quantize(avg_lat), "lon": quantize(avg_lon), "accuracy": estimate_accuracy(avg_lat, avg_lon, cells, CELL_MIN_ACCURACY), }

def test_eudx(): # read bvals,gradients and data fimg, fbvals, fbvecs = get_data("small_64D") bvals = np.load(fbvals) gradients = np.load(fbvecs) img = ni.load(fimg) data = img.get_data() print(data.shape) gqs = GeneralizedQSampling(data, bvals, gradients) ten = Tensor(data, bvals, gradients, thresh=50) seed_list = np.dot(np.diag(np.arange(10)), np.ones((10, 3))) iT = iter(EuDX(gqs.qa(), gqs.ind(), seeds=seed_list)) T = [] for t in iT: T.append(t) iT2 = iter(EuDX(ten.fa(), ten.ind(), seeds=seed_list)) T2 = [] for t in iT2: T2.append(t) print("length T ", sum([length(t) for t in T])) print("length T2", sum([length(t) for t in T2])) print(gqs.QA[1, 4, 8, 0]) print(gqs.QA.ravel()[ndarray_offset(np.array([1, 4, 8, 0]), np.array(gqs.QA.strides), 4, 8)]) assert_almost_equal( gqs.QA[1, 4, 8, 0], gqs.QA.ravel()[ndarray_offset(np.array([1, 4, 8, 0]), np.array(gqs.QA.strides), 4, 8)] ) assert_almost_equal(sum([length(t) for t in T]), 70.999996185302734, places=3) assert_almost_equal(sum([length(t) for t in T2]), 56.999997615814209, places=3)

def exportSheetToImage(self, fileName): """ exportSheetToImage() -> None Montage all the cell images and export to a file """ (rCount, cCount) = self.getDimension() if rCount < 1 or cCount < 1: return cellHeights = [self.getCellRect(r, 0).height() for r in xrange(rCount)] cellWidths = [self.getCellRect(0, c).width() for c in xrange(cCount)] finalImage = QtGui.QImage(sum(cellWidths), sum(cellHeights), QtGui.QImage.Format_ARGB32) finalImage.fill(0xFFFFFFFF) painter = QtGui.QPainter(finalImage) y = 0 for r in xrange(rCount): x = 0 for c in xrange(cCount): widget = self.getCell(r, c) if widget: pix = widget.grabWindowPixmap() cx = (cellWidths[c] - pix.width()) // 2 cy = (cellHeights[r] - pix.height()) // 2 painter.drawPixmap(x + cx, y + cy, widget.grabWindowPixmap()) x += cellWidths[c] y += cellHeights[r] painter.end() # forcing png format if no extension was provided (_, ext) = os.path.splitext(fileName) if ext == "": finalImage.save(fileName, "png") else: # try to guess based on the extension finalImage.save(fileName)

def solution_statement(self): lines = solutions.Lines() paths = MarkovChainBinomial.all_paths(self._qp["start_state"], self._qp["binomial"]["n_trials"]) if self._qp["binomial"]["cinema"] == 0: solution_paths = [i for i in paths if len(i) - sum(i) == self._qp["binomial"]["n_successes"]] else: solution_paths = [i for i in paths if sum(i) == self._qp["binomial"]["n_successes"]] lines += r"""Let $X$ represent the number of times {first_name} goes to {target_cinema_name} over the ${number_of_weeks}$ {day_of_the_week}s""".format( first_name=self._qi["first_name"], target_cinema_name=self._qp["cinemas"][self._qp["binomial"]["cinema"]], number_of_weeks=self._qp["binomial"]["n_trials"], day_of_the_week=self._qi["day_of_the_week"], ) path_strings = [MarkovChainBinomial.cinema_path(path, self._qp["cinemas"]) for path in solution_paths] lines += r"""$Pr(X = {number_of_cinema_trips}) = {possible_paths}$""".format( number_of_cinema_trips=self._qp["binomial"]["n_successes"], possible_paths=" + ".join(path_strings) ) numeric_strings = [ MarkovChainBinomial.numeric_path(path, self._qp["transition_matrix"]) for path in solution_paths ] lines += r"""$= {intermediate_paths}$""".format(intermediate_paths=" + ".join(numeric_strings)) answer = sum(MarkovChainBinomial.path_value(path, self._qp["transition_matrix"]) for path in solution_paths) # not sure how to make decimal.Decimal stop at the endless 0s of a decimal lines += r"""$= {answer}$""".format(answer=answer) return lines.write()