def check_hessenberg_tie_breaking(g, n_rows, n_cols, values, result, compare): if result.get('error_msg'): check(result, n_rows, n_cols, values) return # rowp, colp = result['rowp'], result['colp'] rperm = [-1] * len(rowp) for i, r in enumerate(rowp): rperm[r] = i # Last occupied columns rowwise, empty rows allowed n_rows = len(rperm) last_elem = [ max(colp[c - n_rows] for c in g[r]) if g[r] else -1 for r in rperm ] log('Last elems:', last_elem) row_weights = get_row_weights(g, n_rows) perm_weights = [row_weights[r] for r in rperm] # Checking if we really break ties if the last elements equal for i, ((c1, c2), (w1, w2)) in enumerate( zip(pairwise(last_elem), pairwise(perm_weights))): log('i:', i) log('Last elems:', c1, c2) log('Weights:', w1, w2) #assert (c1, w1) <= (c2, w2) assert compare(c1, w1, c2, w2)
def compute_name_value_category(self): has_single_color = True for c1, c2 in pairwise(self.cards): if c1.color != c2.color: has_single_color = False is_suite = True for c1, c2 in pairwise(self.cards): if c1.number < c2.number - 1 or \ c1.number == c2.number: is_suite = False is_set = True for c1, c2 in pairwise(self.cards): if c1.number != c2.number: is_set = False if has_single_color: # Color suite. Strongest combination. # The smallest card number gives its strength # relative to other color suites if is_suite: self.name = "Color suite" self.category = 4 self.value = self.cards[0].number # Color. # The sum of the card numbers gives its # strength relative to other colors else: self.name = "Color" self.category = 2 self.value = self.cards[0].number + \ self.cards[1].number + \ self.cards[2].number elif is_suite: # Suite. Strongest combination. # The smallest card number gives its strength # relative to other suites if is_suite: self.name = "Suite" self.category = 1 self.value = self.cards[0].number elif is_set: # Set. Second strongest combination. # Any card number gives its strength # relative to other sets self.name = "Set" self.category = 3 self.value = self.cards[0].number else: # Sum: weakest combination. # The sum of the card numbers gives its # strength relative to other sums. self.name = "Sum" self.category = 0 self.value = self.cards[0].number + \ self.cards[1].number + \ self.cards[2].number
def L_admissible(Gamma): # L-admissibility iotaGamma = [(-1)**i * g for i, g in enumerate(Gamma)] la = ((all(g > 0 for g in iotaGamma) or all(g < 0 for g in iotaGamma)) and (all(f >= g for f, g in pairwise(iotaGamma)) or all(f <= g for f, g in pairwise(iotaGamma)))) if not la: print("Vorticities {} are not L-admissible.".format(Gamma)) return la
def check_nondecreasing_row_weights(bip, rowp, colp, row_weights): # Last occupied columns rowwise, empty rows allowed c_index = { name : i for i, name in enumerate(n for n in colp) } last_elem = [max(c_index[c] for c in bip[r]) if bip[r] else -1 for r in rowp] log('Last elems:', last_elem) perm_weights = [row_weights[r] for r in rowp] # Checking if we really break ties if the last elements equal for i, ((c1,c2), (w1,w2)) in enumerate(zip(pairwise(last_elem), pairwise(perm_weights))): log('i:', i) log('Last elems:', c1, c2) log('Weights:', w1, w2) assert (c1, w1) <= (c2, w2)
def __main__(): with open(sys.argv[1]) as f: cfg = parse_dot(f) all_nodes = sorted(cfg.nodes(), key=natural_sort_key) for n, next_n in pairwise(all_nodes): print("%s:" % n) print(" nop()") succ = cfg.succ(n) if len(succ) == 0: print(" return") elif len(succ) == 1: if succ[0] != next_n: print(" goto %s" % succ[0]) else: old_len = len(succ) succ = [x for x in succ if x != next_n] else_node = None if len(succ) == old_len: # next_n wasn't in succ else_node = succ[-1] succ = succ[:-1] s = ", ".join(["($cond) goto %s" % x for x in succ]) if else_node: s += ", else goto %s" % else_node print(" if " + s)
def dump_c(cfg, stream=sys.stdout): labels = find_used_labels(cfg) func_start = True for (addr, info), nxt in pairwise(cfg.iter_rev_postorder()): bblock = info["val"] if func_start: label = cfg.parser.label_from_addr(bblock.addr) if label[0].isdigit(): label = "fun_" + label if ("estimated_params" in cfg.props): print("// Estimated params: %s" % sorted(list(cfg.props["estimated_params"])), file=stream) if cfg.props["trailing_jumps"]: print( "// Trailing jumps not removed, not rendering CFG edges as jumps", file=stream) print("void %s()\n{" % label, file=stream) func_start = False if addr in labels: print("\nl%s:" % addr, file=stream) bblock.dump(stream, indent=1, printer=print_inst) if not cfg.props["trailing_jumps"]: for succ in cfg.succ(addr): cond = cfg.edge(addr, succ).get("cond") if not cond and nxt and succ == nxt[0]: continue stream.write(" ") if cond: stream.write("if %s " % cond) print("goto l%s;" % succ, file=stream) print("}", file=stream)
def hierarchyReduction(hierarchy, dim = 2, classic=False): if(hierarchy.representatives == None): return docs = map(lambda x: x.center, hierarchy.representatives) if (hierarchy.center != None): docs.append(hierarchy.center) if (hierarchy.simMat == None): hierarchy.simMat = utils.pairwise(docs, lambda x,y: x.similarity(y)) points = [] if(classic): points = classicMDS(hierarchy.simMat,dim) else: points = reduction(hierarchy.simMat,dim) if (hierarchy.center != None): hierarchy.centerPos = points[-1] hierarchy.mdsPos = points[:-1] else: hierarchy.mdsPos = points[:] map(lambda x: hierarchyReduction(x,dim), hierarchy.representatives)
def reduce(self, classic=False, dim=2): if (self.representatives == None): return docs = map(lambda x: x.center, self.representatives) if (self.center != None): docs.append(self.center) if (self.simMat == None): self.simMat = utils.pairwise(docs, lambda x, y: x.similarity(y)) points = [] if (classic): points = mds.classicMDS(self.simMat, dim) else: points = mds.reduction(self.simMat, dim) if (self.center != None): self.centerPos = points[-1] self.mdsPos = points[:-1] else: self.mdsPos = points[:] map(lambda x: x.reduce(classic, dim), self.representatives)
def main(args): if(len(args) != 1): print "Usage: mds.py C clustering.pkl" print " C is the cluster in clustering.pkl to display" sys.exit(0) #C = int(args[1]) #path = args[2] print "Loading" #clustering = utils.load_obj(path) #docs = clustering[C].members docs = doc.get_docs_nested(driver.get_data_dir("small")) print "Calculating Pairwise Similarities" similarities = utils.pairwise(docs, lambda x,y: x.similarity(y)) #print "INITIAL SIMILARITIES:" #utils.print_mat(similarities) #similarities = [[0,93,82,133],[93,0,52,60],[82,52,0,111],[133,60,111,0]] print "Starting MDS" #pos = reduction(similarities) pos = classicMDS(similarities) print "MDS:" utils.print_mat(pos)
def scrape(origin, destination, start_date, end_date): # Creates a file with the cheapest outgoing and return flights for a given # date range. items = int((end_date - start_date).days) - 1 date_list = daterange(start_date, end_date) data = {} for index, (depart_date, return_date) in enumerate(pairwise(date_list)): try: depart_list, return_list = search_flight(destination, depart_date, return_date, params) except: log.error(f"Some error occured, retrying item {index+1}/{items}") data = open_file(f'{destination}.json') data[str(depart_date)]['outgoing'] = min_depart data[str(return_date)]['return'] = min_return with open(f'{destination}.json', 'w') as f: json.dump(data, f, sort_keys=True, indent=8, separators=(',', ': ')) log.info(f"Search {index+1}/{items} complete: {depart_date} to " f"{return_date}. ({min_depart}MYR, {min_return}MYR)")
def get_updated_link_shape(link, probes, get_distance=get_distance): [ref_node_shape, *_] = link.shapeInfo.split('|') [ref_node_lat, ref_node_lon, slope] = ref_node_shape.split('/') ref_latlon = (float(ref_node_lat), float(ref_node_lon)) get_distance_to_ref_node = lambda p: get_distance( (p.matchedLatitude, p.matchedLongitude), ref_latlon) unique_probes = dedup(probes, get_distance_to_ref_node) sorted_by_distance = sorted(unique_probes, key=get_distance_to_ref_node) slopes = [ compute_slope(p1, p2) for (p1, p2) in pairwise(sorted_by_distance) ] slopes = [s for s in slopes if s] average_slope = mean(slopes) if len(slopes) > 0 else 0.0 result = [] for node in link.shapeInfo.split('|'): [lat, lon, slope] = node.split('/') result += [ '{}/{}/{}'.format(lat, lon, slope if slope else average_slope) ] return '|'.join(result)
def max_acceleration_pass(track: Track, car: Car): """ Calculates the maximum acceleration (and corrects maximum velocity if necessary) in one backwards pass. """ def calc(this: Point, nxt: Point): # Find maximum acceleration. this.max_acceleration = (nxt.max_velocity**2 - this.max_velocity**2) / 2 if this.max_acceleration > car.acceleration: # Can't speed up enough, so correct next max velocity. this.max_acceleration = car.acceleration nxt.max_velocity = calc_velocity(this.max_velocity, this.max_acceleration) if nxt.next: calc(this.next, nxt.next) if this.max_acceleration < -1 * car.braking: # Can't slow down enough, so correct previous max velocity. this.max_acceleration = -1 * car.braking this.max_velocity = calc_velocity(nxt.max_velocity, car.braking) nxt.max_velocity = calc_velocity(this.max_velocity, this.max_acceleration) track.points[-1].max_acceleration = 0 for second, first in pairwise(track.points[::-1]): calc(first, second)
def plot_one(data_x, data_y, filename=None, show=False): print("Producing information plane image") cmap = plt.get_cmap('gnuplot') for id, e in enumerate(pairwise(zip(data_x, data_y))): (x1, y1), (x2, y2) = e plt.plot((x1, x2), (y1, y2), linewidth=0.2, alpha=0.9, color=cmap(min(.9, 0.15 + id * 0.1))) point_size = 300 plt.scatter(x1, y1, s=point_size, alpha=0.9, color=cmap(min(.9, 0.15 + id * 0.1))) plt.scatter(x2, y2, s=point_size, alpha=0.9, color=cmap(min(.9, 0.15 + id * 0.1))) plt.xlabel('I(X,T)') plt.ylabel('I(Y,T)') if filename is not None: print("Saving image to file : ", filename) start = time.time() plt.savefig(filename, dpi=1000) end = time.time() print("Time taken to save to file {:.3f}s".format((end - start))) if show: plt.show() plt.cla()
def parse_contributions(text): def is_invalid(s): tests = [ s['first_name'][0].isupper(), s['last_name'][0].isupper(), len(s['first_name']) >= 2, len(s['last_name']) >= 2 ] if not all(tests): log.debug("Discarting invalid contribution {}".format(s)) # itertools.filterfalse() returns list of items where the predicate returns # false, hence the inverse logic return not all(tests) end = re.search(r'$', text) speakers = itertools.filterfalse(lambda x: is_invalid(x.groupdict()), Regex.speaker_reg_.finditer(text)) # TODO: remove/fix aw_data = '' with open('../data/deputies.json') as f: aw_data = f.read() aw_data = json.loads(aw_data) contributions = [] for m, m1 in pairwise(itertools.chain(speakers, [end])): contrib = { 'speaker': match_abgeordnetenwatch(m.groupdict(), aw_data), 'start_idx': m.start(), 'end_idx': m1.start(), 'speech': text[m.end():m1.start()] } contributions.append(contrib) return contributions
def __compute_ride_fares(self, ride: Ride): slot_fares = self.rides_config.slot_fares remaining_duration = ride.duration ride_fares = [] start_time = ride.startTime.time() for current_slot_fare, next_slot_fare in cycle(pairwise(slot_fares + [slot_fares[0]])): if start_time < next_slot_fare.start or next_slot_fare.start == time(0, 0, 0): slot_duration = RidesService.__compute_slot_duration(current_slot_fare, next_slot_fare) ride_end_time = (ride.startTime + ride.duration).time() is_ride_end_in_slot = current_slot_fare.start <= ride_end_time and remaining_duration <= slot_duration if is_ride_end_in_slot: ride_fares.append((remaining_duration, current_slot_fare.fare)) remaining_duration = timedelta(0) else: slot_fare_end = datetime.combine(date.min, current_slot_fare.start) + slot_duration ride_in_slot_duration = slot_fare_end - datetime.combine(date.min, start_time) remaining_duration = remaining_duration - ride_in_slot_duration ride_fares.append((ride_in_slot_duration, current_slot_fare.fare)) start_time = slot_fare_end.time() if remaining_duration <= timedelta(0): break return ride_fares
def break_up_audio_file_by_timestamps(audio_file, timestamps): audio_file_name, audio_file_ext = os.path.splitext(audio_file) audio_segment = AudioSegment.from_file(audio_file) intervals = pairwise(itertools.chain((0,), timestamps, (len(audio_segment),))) slices = cut_audio_segment(audio_segment, intervals) for idx, current in enumerate(slices, 1): current.export(f'{audio_file_name}_{idx}{audio_file_ext}', format=audio_file_ext[1:])
def get_cost_score(self) -> float: # demand ~ capacity # time ~ due_time self.initial_port() for source, dest in utils.pairwise([0] + self.route + [0]): if self.check_capacity(dest): # current vehicle has the capacity to go from source to dest if not self.check_time_and_go(source, dest): # current vehicle hasn't enough time to go to dest -> new vehicle # current vehicle should go back from source to deport self.move_vehicle(source, 0) # current_load = 0 # new vehicle starts from deport heading dest self.add_vehicle() self.move_vehicle(0, dest) else: # current vehicle hasn't the capacity to go to dest # current vehicle should go back from source to deport self.move_vehicle(source, 0) # head from deport to dest distance = self.get_distance( 0, dest) # just for speeding up (caching) if not self.check_time_and_go(0, dest, distance): # too late to go from deport to dest on current vehicle -> new vehicle self.add_vehicle() self.move_vehicle(0, dest, distance) total_travel_cost = Chromosome.get_travel_cost(self.total_travel_dist) total_vehicles_and_deport_working_hours_cost = self.get_vehicle_count_preference_cost( vehicles_count=self.vehicles_count, deport_working_hours=self.max_elapsed_time) return total_travel_cost + total_vehicles_and_deport_working_hours_cost
def translated_operation(a, rng, operator): """ Return an array of corresponding results from operations between diametrically opposed translated slices. Parameters ---------- a : array-like Input array. rng : int Half of the Chebyshev distance between the two inputs in each pairs. operator : function Binary operator used to compute results Return ------ out : MaskedArray Array of results where additional dimension correspondent to each pair of translated slice. """ # get centered pixels for 2x2 and 3x3 based on rng a__ = a[:,1:-1,1:-1] if rng != 0 \ else (a[:,:-1,:-1]+a[:,:-1,1:]+a[:,1:,1:]+a[:,1:,:-1]) /4 # compute angle difference out = ma.masked_array( [operator(a__, a[ts]) for ts in TRANSLATING_SLICES_[rng]]) # Rearrange axes: for dim1, dim2 in pairwise(range(out.ndim)): out = out.swapaxes(dim1, dim2) return out, a__
def get_variational_parametrized_qc(qubits, params): register = get_quantum_register(len(qubits)) qc = get_new_qc(register) i = 0 parameters_are_exhausted = False while not parameters_are_exhausted: for qubit in qubits: # rotate qubit with an angle from parameters theta = params[i] * 2 * np.pi qc.rz(theta, qubit) i += 1 for qubit in qubits: # rotate qubit with an angle from parameters theta = params[i] * 2 * np.pi qc.rx(theta, qubit) i += 1 for qubit_pair in pairwise(qubits): # do control-Z rotations for neighbour qubits using angles from the params array theta = params[i] * 2 * np.pi qc.crz(theta, *list(qubit_pair)) i += 1 parameters_are_exhausted = i < len(params) return qc
def build_barkan(ppr, background=None): """Build a PSSM based on Barkan coding""" if background == None: background = (1,1,1,1,) code = get_code(ppr) #print "Building model for:\n\t{}".format( # string_code(code).replace('\n','\n\t')) PSSM = [] for i,(a,b) in enumerate(pairwise(code)): s = a[1] + b[0] if a[2] == 'P' and s in Ptype: emit = Ptype[s] elif a[2] == 'S' and s in Stype: emit = Stype[s] else: emit = equal if sum(emit) == 0: emit = equal #convert to log odds tot = sum(emit) emit = tuple(log(float(i) / float(tot*b)) for i,b in zip(emit,background)) #print "{:2}: \"{}\" [{}] -> {}".format(i,s,a[2],emit) PSSM.append(emit) return PSSM
def information(activation): data_t = activation if bins == -1: data_t = [ bin_array(t, bins=30, low=t.min(), high=t.max()) for t in data_t ] else: data_t = [ bin_array(t, bins=bins, low=t.min(), high=t.max()) for t in data_t ] #data_t = [binarize(t) for t in data_t] data_t = [hash_data(t) for t in data_t] h_t = np.array([entropy_of_data(t) for t in data_t]) #h_t_x = np.array([__conditional_entropy(t, data_x) for t in data_t]) h_t_y = np.array([__conditional_entropy(t, data_y) for t in data_t]) h_t_t = np.array( [__conditional_entropy(t1, t2) for (t1, t2) in pairwise(data_t)]) i_x_t = h_t # - h_t_x # H(T, X) is 0 since every element in X is unique i_y_t = h_t - h_t_y i_t_t = h_t[:-1] - h_t_t return i_x_t, i_y_t, i_t_t
def get_cost_score(self) -> float: self.initial_port() for source, dest in utils.pairwise([0] + self.route + [0]): # проверка на наличие потребности каждого вида топлива if dest!=0: if not self.check_demand(dest): continue if self.check_capacity(dest): # автомобиль удовлетворяет условиям multi-compartment if not self.check_time_and_go(source, dest): # автомобиль не успевает прибыть к новому потребителю, тогда + авто # Текущий автомобиль возвращается в депо self.move_vehicle(source, 0) # новый атомобиль стартует с депо self.add_vehicle() self.move_vehicle(0, dest) else: # автомобиль не проходит по ограничениям груза # возвращается в депо self.move_vehicle(source, 0) # расчет дистанции из депо к азс distance = self.get_distance(0, dest) # just for speeding up (caching) self.total_travel_dist +=distance if not self.check_time_and_go(0, dest, distance): #этот автомобиль не успевает, тогда выезжает новый self.add_vehicle() self.move_vehicle(0, dest, distance) total_travel_cost = Chromosome.get_travel_cost(self.total_travel_dist) total_vehicles_and_deport_working_hours_cost = self.get_vehicle_count_preference_cost( vehicles_count=self.vehicles_count, deport_working_hours=self.max_elapsed_time) return total_travel_cost + total_vehicles_and_deport_working_hours_cost
def hist_pdf(x, bin_edges, bin_weights): y = np.zeros_like(x) for w, (lo, hi) in zip(bin_weights, pairwise(bin_edges)): idx = (lo <= x) & (x < hi) y[idx] = w return y
def hist_err(x, weights, bin_edges, bin_weights): var = np.zeros_like(bin_weights) for i, (lo, hi) in enumerate(pairwise(bin_edges)): idx = (lo <= x) & (x < hi) var[i] = np.sum(weights[idx]**2) return np.sqrt(var)
def _build_layers(self): units = self._get_units() layers = [] for in_units, out_units in pairwise(units): layers += [self._build_hidden_layer(in_units, out_units)] layers += [self._build_final_layer(units[-1])] return nn.Sequential(*layers)
def move(self, how_many, where, delay): sess = Session.object_session(self) already = sess.query(MarchingOrder).filter_by(leader=self).first() if already: raise InProgressException(already) fighting = (sess.query(SkirmishAction). filter_by(participant=self).first()) if fighting: raise InProgressException(fighting) if how_many > self.loyalists: # TODO: Attempt to pick up loyalists raise InsufficientException(how_many, self.loyalists, "loyalists") # TODO: Drop off loyalists where = forcelist(where) locations = [self.region] + where for src, dest in pairwise(locations): if not dest in src.borders: raise NonAdjacentException(src, dest) if not dest.enterable_by(self.team): raise TeamException(dest) orders = [] if(delay > 0): orders = [] step = 0 for src, dest in pairwise(locations): step += 1 mo = MarchingOrder(arrival=time.mktime(time.localtime()) + delay * step, leader=self, source=src, dest=dest) orders.append(mo) sess.add(mo) else: self.region = where[-1] # TODO: Change number of loyalists self.defectable = False sess.commit() return orders
def get_rejection_count_from_status(self, searched_status): ''' Get number of times an item has transitioned from searched_status to "In Progress" ''' return len([ a for a, b in pairwise(self.get_status_flow()) if a == searched_status and b == 'In Progress' ])
def compute_edges(self, shop_lists: List[Shop]): edges = list() for shop_list1, shop_list2 in pairwise(shop_lists): for shop1, shop2 in product(shop_list1, shop_list2): edges.append( Edge(shop1.identifier, shop2.identifier, self.compute_distance(shop1, shop2))) return edges
def get_parameters(): parser = argparse.ArgumentParser() parameters_data(parser) network_parameters(parser) parser.add_argument( '--bins', '-b', dest='bins', default=30, type=int, help="select the number of bins to use for bnning defaults to 30") parser.add_argument('--dest', dest='dest', default="output", help="destination folder for output files") parser.add_argument( '--epoch_list', '-el', dest='epoch_list', default="1-19,20-90", help= "list of ranges for which to compute mi. ex: 1-10,15-30, eanges assumed to be disjoint" ) parser.add_argument( '--saved_epochs', '-se', dest='no_saved_epochs', default=100, type=int, help="no of epochs to consider when calculating mutual information") args = parser.parse_args() def convert_to_range(rrange): split = rrange.split('-') return int(split[0]), int(split[1]) args.epoch_list = args.epoch_list args.epoch_list = args.epoch_list.split(',') args.epoch_list = [convert_to_range(e) for e in args.epoch_list] for s, e in args.epoch_list: if s > e: raise ValueError("invalid range {}".format((s, e))) if s == e: raise ValueError("range is 0, start {}, end {}".format(s, e)) for a, b in pairwise(args.epoch_list): _, e = a s, _ = b if s < e: raise ValueError("ranges {} and {} overlap".format(a, b)) return args
def get_path_attr_list(self, path, attrs): attr_map = defaultdict(list) for (n1, n2) in U.pairwise(path): v1 = self.vd[n1] v2 = self.vd[n2] e = self.G.edge(v1, v2, add_missing=False) for attr in attrs: attr_map[attr].append(self.edge_attr[e][attr]) return attr_map
def parse_driver(line): driver = [int(x) for x in line.split()] if len(driver) % 6 != 0: raise Exception( 'All users should have origin ox oy and destination dx dy') idxs, points = get_indexes_and_points_separated_lists(driver) point_tuples = tuple([(x, y) for x, y in utils.pairwise(points)]) idxs_and_points = [(idx, point) for idx, point in zip(idxs, point_tuples)] return idxs_and_points
def get_yagi_NSRs(ppr): """Return a list of 3-tuples referring to the NSRs""" o = 3 * 1 NSR = [] for a,b in pairwise(ppr.features): s1 = ppr.seq[a.location.start+o:o+a.location.start + 1 + 3 * 4].translate() s2 = ppr.seq[b.location.start - 3*2+o:o+b.location.start].translate() NSR.append((s1[0], s1[3], s2[0],)) return NSR
def __init__(self, category_factors=(1, 1, 1, 1, 1)): if category_factors[0] <= 0: raise ValueError("ScoringScheme: category factors must be > 0") for f1, f2 in pairwise(category_factors): if f1 > f2: raise ValueError( "ScoringScheme: category factors must in non-decreasing order" ) self.category_factors = category_factors
def tracks(self): _tracks = self.tmx['track'] segments = [] for track in _tracks: for (x1, y1), (x2, y2) in pairwise(track.points): segments.append( LineSegment( x1 + track.x, -y1 + track.y, x2 + track.x, -y2 + track.y)) return segments
def compositions(n, m): """ >>> len(list(compositions(4, 3))) 15 >>> list(compositions(2, 1)) [(2,)] >>> list(compositions(2, 2)) [(0, 2), (1, 1), (2, 0)] """ for c in combinations(range(n + m - 1), m - 1): yield tuple(b-a-1 for a, b in pairwise((-1,) + c + (n+m-1,)))
def warp(self): if not self.convexOnly: pairs = [pair for pair in pairwise(self.hullVertices)] for p1, p2 in pairs: # TODO: implement min distance check? mdpt = midpoint(p1, p2) angle = random.randint(self.minWarpAngle, self.maxWarpAngle) newPoint = endpoint(p1, mdpt, angle, 'd') self.addedPoints.append(newPoint) # interleave self.hullVertices and newPoints self.hullVertices = [point for point in roundrobin(self.hullVertices, self.addedPoints)]
def get_wiki_table(cls, soup_object): trs = soup_object.find('table', class_='infobox').find_all('tr') table = {} for tr in trs: # Look at each pair of td tags, since infobox rows that we care about have two columns for h_text, v_text in [(replace_xa0(h.text), v.text) for h, v in pairwise(tr.find_all('td'))]: if h_text in WikipediaExtractor.headers_to_terms: comp_term = WikipediaExtractor.headers_to_terms[h_text] # removes blank lines and blank indexes table[comp_term] = [x for x in re.split('\n+', v_text) if x != u''] return table
def print_cluster_sim_mat(self): print "CLUSTER SIM MATRICES:" centers = map(lambda cluster: cluster.center, self.clusters) feature_set_names = self.docs[0].get_feature_set_names() for name in feature_set_names: print print "Similarity Type: %s" % name mat = utils.pairwise(centers, lambda doc1, doc2: doc1.global_sim(doc2, name)) mat = utils.apply_mat(mat, lambda x: "%3.2f" % x) utils.insert_indices(mat) utils.print_mat(mat) print print "Similarity Type: Cluster sim by CONFIRM" sub_mat = utils.pairwise(self.clusters, lambda c1, c2: self.confirm.cluster_similarity(c1, c2)) sub_mat = utils.apply_mat(sub_mat, lambda x: "%3.2f" % x) utils.insert_indices(sub_mat) utils.print_mat(sub_mat) print print
def path_to_dict(path): """Converts the path data (which is a list of node IDs) from point A to point B, to list of edges (i.e. pairs of nodes) corresponding to the path. That data is then written to the path_dict observable for consumption by the websockets frontend. Format: {'path': list of node pairs, 'endpoints': tuple: (start point ID, end point ID)} """ global path_dict path_dict.overwrite({ 'path': [sorted(x) for x in pairwise(path)], 'endpoints': (path[0], path[-1]) })
def find_used_labels(cfg): labels = set() for addr, nxt in pairwise(cfg.iter_rev_postorder()): info = cfg[addr] bblock = info["val"] succs = cfg.sorted_succ(addr) if len(succs) > 1 and nxt == succs[0]: swap_if_branches(cfg, addr) succs = cfg.sorted_succ(addr) for succ in succs: cond = cfg.edge(addr, succ).get("cond") if not cond and nxt and succ == nxt: continue labels.add(succ) return labels
def pitches_to_intervals(chord): """ >>> pitches_to_intervals([12, 16, 31]) [4, 3, 5] """ # TODO rewrite this pcs = sorted(list(set([p % 12 for p in chord]))) pcs = [p - pcs[0] for p in pcs] pcs.reverse() pcs.insert(0, 12) intervals = [] for a, b in pairwise(pcs): intervals.append(a - b) intervals.reverse() return intervals
def _init_global_thresh(self): sub_docs = self.docs[:20] sim_mat = utils.pairwise(sub_docs, lambda x, y: max(self.doc_similarity(x, y), self.doc_similarity(y, x))) for x in xrange(len(sim_mat)): del sim_mat[x][x] self.global_thresh = .7 #utils.avg(map(max, sim_mat)) print print "INITIAL GLOBAL THRESH: ", self.global_thresh tmp = utils.flatten(sim_mat) tmp.sort() print map(lambda x: "%.2f" % x, tmp) print map(lambda x: "%.2f" % x, map(max, sim_mat)) print self.sim_sum = self.global_thresh * self.initial_thresh_weight self.num_counted = self.initial_thresh_weight
def generate(self, streams): newstreams = [] output = [] i = 0 for s1, s2 in pairwise(streams): newstream = self._outstreamprefix if i > 0: newstream += str(i) output.append( "[{b}][{o}]{e}[{ns}]".format( b = s1, o = s2, e = self._expressions_accessor.get_expression(i), ns = newstream) ) newstreams.append(newstream) i += 1 return output, newstreams
def simulate_entire_trajectories(self, sim_states, T): comm = self.communicator comm.put_value('initial_continuous_states', sim_states.cont_states) comm.put_value('initial_discrete_states', sim_states.discrete_states) comm.put_value('initial_pvt_states', sim_states.pvt_states) comm.put_value('T', np.array([T])) ret_val_str_list = ['data_mat', 'idx_arr'] arg_str_list = [ 'sim_function', 'initial_continuous_states', 'initial_discrete_states', 'initial_pvt_states', 'inputs', 'T', ] fun_name_str = 'simulate_entire_trajectories' if self.parallel: [data_arr, idx_arr] = comm.call_function_retVal(ret_val_str_list, fun_name_str, arg_str_list) else: logger.warning('non-parallel simulate_entire_trajectories isunimplemented' ) raise NotImplementedError('single threaded implementation missing') idx_arr = self.matlab2py_indices(idx_arr) list_of_trajs = [] # quick sanity check if len(idx_arr.shape) != 1 or idx_arr[0] != 0: print 'idx_arr: ', idx_arr raise err.Fatal('sanity check on idx_arr fails!') # get a pairwise iterator for the index array for (i, j) in utils.pairwise(idx_arr): list_of_trajs.append(data_arr[i:j, :]) # append the last trajectory logger.info('Populating trajectories...') list_of_trajs.append(data_arr[j:, :]) # ##!!##logger.debug('===Trajectories===\n%s', str(list_of_trajs)) return list_of_trajs
def parse_line(line, fields): """Parses a line of fixed width data. Fields should be a list of tuples of form: (field_start, field_name), field_start being the column number at which the field begins. """ data = {} field_starts = [field[0] for field in fields] # Construct slices ranging from the start of each field to the start of the # next field. slices = [slice(a, b) for a, b in pairwise(field_starts + [None])] for slice_, field in zip(slices, fields): field_name = field[1] value = line[slice_].strip() data[field_name] = value return data
def _init_clusters(self): sub_docs = self.docs[:self.num_instances] sim_mat = utils.pairwise(sub_docs, lambda x, y: max(self.doc_similarity(x, y), self.doc_similarity(y, x))) edges = utils.minimum_spanning_tree(sim_mat) ccs = utils.get_ccs(range(self.num_instances), edges) biggest_cc = max(map(len, ccs)) while biggest_cc > self.num_init: edge_to_remove = random.sample(edges, 1)[0] edges.remove(edge_to_remove) ccs = utils.get_ccs(range(self.num_instances), edges) biggest_cc = max(map(len, ccs)) cc = ccs[utils.argmax(map(len, ccs))] for idx in cc: self._add_cluster(self.docs[idx], member=False)
def _init_clusters(self): super(MaxCliqueInitCONFIRM, self)._init_clusters() sub_docs = self.docs[:self.num_instances] sim_mat = utils.pairwise(sub_docs, lambda x, y: max(self.doc_similarity(x, y), self.doc_similarity(y, x))) print print "Doc Sim Mat" utils.print_mat(utils.apply_mat(sim_mat, lambda x: "%3.2f" % x)) idxs = utils.find_best_clique(sim_mat, self.num_clust) print print "Cluster Labels:" for idx in idxs: self._add_cluster(self.docs[idx], member=False) print idx, self.docs[idx].label
def is_good(ornament, biggest_interval=3): # Put the target note at the end of the ornament # to prevent repeated notes or large intervals # between the last note of the ornament and the target note ornament = list(ornament) ornament.append(0) pairs = pairwise(ornament) for a, b in pairs: # Don't allow repeated pitches if a == b: return False # Don't allow transitions greater than `biggest_interval` steps if abs(a - b) > biggest_interval: return False return True
def maxFlowOpt(reads, maxReadsPerPos, minReadsPerPos): """ reads: maxReadsPerPos: maximum coverage (aka k) minReadsPerPos: minimum coverage (aka t) """ # flatten read start and end into one list positions = sorted(set([x for sublist in reads for x in sublist])) backboneCapacity = maxReadsPerPos - minReadsPerPos sourceSinkCapacity = maxReadsPerPos readIntervalCapacity = 1 superSource = -1 superSink = positions[-1] + 1 g = nx.DiGraph() # we remove reads directly from the list reads = copy.copy(reads) # add backbone g.add_edge(superSource, positions[0], {'capacity': sourceSinkCapacity}) g.add_edge(positions[-1], superSink, {'capacity': sourceSinkCapacity}) for (p0, p1) in utils.pairwise(positions): g.add_edge(p0, p1, {'capacity': backboneCapacity}) # add intervals for read in reads: g.add_edge(read[0], read[1], {'capacity': readIntervalCapacity}) # TODO: implement flow algorithm val, mincostFlow = nx.maximum_flow(g, superSource, superSink) pruned = [] # TODO: this is in O(n) -> we can do it in O(log n) for key, values in mincostFlow.items(): for value, weight in values.items(): if weight is 0: toRemove = (key, value) if toRemove in reads: pruned.append((key, value)) reads.remove((key, value)) return reads, pruned, mincostFlow
def find_gaps(ppr, mingap=30, maxgap=None, skip_introns=True): """Find all the gaps between PPR motifs which are gte maxgap""" loc = [] feats = sorted(ppr.features, key = lambda(p): int(p.location.start)) for a,b in pairwise(feats): #ignore any gaps if a and b aren't in the same frame if a.qualifiers['frame'] != b.qualifiers['frame']: continue #if the size is within the range l = int(b.location.start) - int(a.location.end) if l >= (mingap or -float('inf')) and l <= (maxgap or float('inf')): #We've found a gap g = FeatureLocation(int(a.location.end), int(b.location.start), strand=1) g.prev = a g.next = b loc.append(g) return loc
def dump_c(cfg, stream=sys.stdout): labels = find_used_labels(cfg) func_start = True for addr, nxt in pairwise(cfg.iter_rev_postorder()): info = cfg[addr] bblock = info["val"] if func_start: label = cfg.props["name"] if not label: label = cfg.parser.label_from_addr(bblock.addr) if label[0].isdigit(): label = "fun_" + label if ("estimated_params" in cfg.props): print("// Estimated params: %s" % sorted(list(cfg.props["estimated_params"])), file=stream) if cfg.props["trailing_jumps"]: print("// Trailing jumps not removed, not rendering CFG edges as jumps", file=stream) func_props = progdb.FUNC_DB.get(label, {}) params = "" if "params" in func_props: params = sorted(func_props["params"], key=natural_sort_key) params = ", ".join(["u32 " + str(r) + "_0" for r in params]) print("void %s(%s)\n{" % (label, params), file=stream) func_start = False if addr in labels: print("\nl%s:" % addr, file=stream) bblock.dump(stream, indent=1, printer=print_inst) if not cfg.props["trailing_jumps"]: for succ in cfg.succ(addr): cond = cfg.edge(addr, succ).get("cond") if not cond and nxt and succ == nxt: continue stream.write(" ") if cond: stream.write("if %s " % cond) print("goto l%s;" % succ, file=stream) print("}", file=stream)
def pseudoDistance(clustering): # for each cluster, returns the estimated pairwise distances of every point to every point in the cluster distances = [] total = 0.0 count = 0 startTime = time.time() for cluster in clustering: # print "Cluster Size:", len(cluster.members) ''' print "Starting True Similarities" startTime = time.time() trueSimilarities = map(lambda r: map(lambda c: 1-c, r) ,utils.pairwise(cluster.members, lambda x,y: x.similarity(y))) endTime = time.time() print "Done. Elapsed Time:", endTime-startTime ''' #Order Matters here. ''' namedSims = map(lambda x: cluster.center.similarities_by_name(x).items(), [cluster.center] + cluster.members) distToCenter = map(lambda x: [1-i[1] for i in x], namedSims) euclid = utils.pairwise(distToCenter, lambda x,y: utils.euclideanDistance(x,y)) distances.append(euclid) ''' vectors = map(lambda x: cluster.center.similarity_vector(x), [cluster.center] + cluster.members) distToCenter = map(lambda x: map(lambda i: 1-i, x), vectors) euclid = utils.pairwise(distToCenter, lambda x,y: utils.euclideanDistance(x,y)) distances.append(euclid) endTime = time.time() print "Done. Elapsed Time:", endTime-startTime return distances
def refine_rel_model_based( AA, error_paths, pi_seq_list, sp, sys_sim, opts, sys, prop): '''does not handle pi_seq_list yet''' # abs_state relations: maps an abs_state to other abs_states # reachable in one time step abs_relations = defaultdict(set) for path in error_paths: # abs_state_1 -> abs_state_2 for a1, a2 in U.pairwise(path): abs_relations[a1].add(a2) flat_relations = [] for abs_state, rch_states in abs_relations.iteritems(): flat_relation = list(itertools.product([abs_state], rch_states)) flat_relations.extend(flat_relation) pwa_model = build_pwa_model( AA, flat_relations, sp, opts.max_model_error, opts.model_err, 'rel') if __debug__: sim_n_plot(error_paths, pwa_model, AA, sp, opts) check4CE(pwa_model, error_paths, sys.sys_name, 'rel', AA, sp, opts.bmc_engine)
def move(self, how_many, where, delay, sector=0, conf=None): sess = Session.object_session(self) where = forcelist(where) already = sess.query(MarchingOrder).filter_by(leader=self).first() if already: raise InProgressException(already) fighting = (sess.query(SkirmishAction). filter_by(participant=self).first()) if fighting: allow = False if conf: if len(where) == 1 and where[0] == self.region: allow = conf["game"].get("allow_sector_retreat", False) if not allow: raise InProgressException(fighting) if how_many > self.loyalists: # TODO: Attempt to pick up loyalists raise InsufficientException(how_many, self.loyalists, "loyalists") # Is that sector even real? if conf: num_sectors = conf["game"].get("num_sectors", 1) if sector < 0 or sector > num_sectors: raise NoSuchSectorException(sector, num_sectors) elif sector == 0: # Assign a random sector sector = random.randint(1, num_sectors) # TODO: Drop off loyalists locations = [self.region] + where for src, dest in pairwise(locations): if src == dest: continue if dest not in src.borders: raise NonAdjacentException(src, dest) traverse_neutrals = False if conf: traverse_neutrals = conf["game"].get("traversable_neutrals", False) if not dest.enterable_by(self.team, traverse_neutrals=traverse_neutrals): raise TeamException(dest) orders = [] if delay > 0: orders = [] total_delay = 0 for src, dest in pairwise(locations): travel_mult = max(src.travel_multiplier, dest.travel_multiplier) if src != dest: total_delay += (delay * travel_mult) else: if conf: intrasector = conf["game"].get("intrasector_travel", 900) # Travel multiplier doesn't apply to intrasector total_delay += intrasector mo = MarchingOrder(arrival=time.mktime(time.localtime()) + total_delay, leader=self, source=src, dest=dest, dest_sector=sector) orders.append(mo) sess.add(mo) else: self.region = where[-1] self.sector = sector # TODO: Change number of loyalists self.defectable = False sess.commit() return orders
def _handle_openflow_PacketIn(self, event): # TODO: Refactor this method packet = event.parsed source = packet.src destination = packet.dst # self.log.info("SRC: %s" % source) # self.log.info("DST: %s" % destination) if destination.is_multicast: # Note: pick between 2 flooding techniques here # Flood the packet # ** Manual flooding ** # TODO: Install new flow instead of crafting new packet (hold down?) for mac, entry in core.host_tracker.entryByMAC.iteritems(): if source == mac: continue message = of.ofp_packet_out() message.actions.append(of.ofp_action_output(port=entry.port)) message.data = event.data # message.data = event.ofp core.overseer_topology.graph.node[entry.dpid]['connection'].send(message) self.log.debug("FLOODING: MAC %s is connected to switch %s at port %s" % (mac, entry.dpid, entry.port)) # ** Use OFPP_FLOOD ** # message = of.ofp_packet_out() # message.actions.append(of.ofp_action_output(port=of.OFPP_FLOOD)) # message.buffer_id = event.ofp.buffer_id # # message.data = event.ofp # message.in_port = event.port # event.connection.send(message) return entryByMAC = core.host_tracker.entryByMAC known_hosts = entryByMAC.keys() if (source not in known_hosts) or (destination not in known_hosts): # Ignore non-end-to-end packet self.log.info("There is no path from host %s to host %s" % (source, destination)) return self.log.info("Finding path from host %s to host %s" % (source, destination)) from_host = entryByMAC[source] to_host = entryByMAC[destination] path = self.get_path(from_host.dpid, to_host.dpid, packet) match = of.ofp_match.from_packet(packet) match.in_port = None # match.dl_src = None # match.dl_dst = None #match.dl_vlan = None #match.dl_vlan_pcp = None # match.nw_proto = None #match.nw_tos = None self.log.info("Installing path from host %s to host %s" % (source, destination)) # Install flows # TODO: Handle buffer_id properly # first = True for from_switch, to_switch in utils.pairwise(path): self.log.info("Installing flow from switch %x to switch %x" % (from_switch, to_switch)) portByDpid = core.overseer_topology.graph.get_edge_data(from_switch, to_switch)["portByDpid"] message = of.ofp_flow_mod() message.match = match message.idle_timeout = self.flow_idle_timeout message.hard_timeout = self.flow_hard_timeout message.actions.append(of.ofp_action_output(port=portByDpid[from_switch])) # self.log.info("DEBUG: %s %d" % (type(portByDpid[from_switch]), portByDpid[from_switch])) # if first: # message.buffer_id = event.ofp.buffer_id # first = False core.overseer_topology.graph.node[from_switch]['connection'].send(message) # Install final flow self.log.info("Installing final flow from switch %x to host %s" % (path[-1], destination)) message = of.ofp_flow_mod() message.match = match message.idle_timeout = self.flow_idle_timeout message.hard_timeout = self.flow_hard_timeout message.actions.append(of.ofp_action_output(port=to_host.port)) core.overseer_topology.graph.node[path[-1]]['connection'].send(message)