def __init__(self, filepath: str = None, text: str = None) -> None: if not filepath and text is None: raise BufferInitializeException('Buffer must be instantiated with a filepath' 'or a text') if filepath: with open(filepath, 'r') as f: self.textl = blist(f.read()) else: self.textl = blist(text) # Fixme: there three must be properties and update toghether on write self._pos = 0 self._line = 1 self._column = 1 self.selections = [] # type: List[Selection] self.moveselect_method = { Subject.Char : self.ms_char, Subject.Word : self.ms_word, Subject.Line : self.ms_line, Subject.Sentence : self.ms_sentence, Subject.Paragraph : self.ms_paragraph, Subject.Function : self.ms_function, Subject.Class : self.ms_class, Subject.FullFile : self.ms_fullfile }
def __init__(self): if USE_BLIST: self._scores = blist([]) self._members = blist([]) else: self._scores = [] self._members = []
def clearCache(self): """ Removes all events from the cache, be recreating the events set, the delay list and the cache list. """ self.logger.logNotice("EventCache: clearing event cache.") self.events = set() self.delay_list = blist() self.cache_list = blist()
def sort_index(self): """ Sort the Series by the index. The sort modifies the Series inplace :return: nothing """ sort = sorted_list_indexes(self._index) # sort index self._index = blist([self._index[x] for x in sort]) if self._blist else [self._index[x] for x in sort] # sort data self._data = blist([self._data[x] for x in sort]) if self._blist else [self._data[x] for x in sort]
def __init__(self, config, logger, ticker): self.config = config self.logger = logger self.ticker = ticker self.events = set() #: a set of all events in the cache self.delay_list = blist() #: sorted list with event delay times -> tuple (delay time, event) self.cache_list = blist() #: sorted list with event cache times -> tuple (cache time, event) self.dropped_events = 0 #: count of dropped events self.compressed_events = 0 #: number of events removed from cache because of compression self.new_compressed = 0 #: number of new compressed events self.nextcachewarning = 0 #: next time for warning about cache size exceeded
def __init__(self, display, pos, size): self.display = display self.pos = pos self.size = size self.left = pos[0] self.top = pos[1] self.right = pos[0] + size[0] self.bottom = pos[1] + size[1] self.particles = blist([]) self.sources = blist([]) self.gravities = blist([]) self.obstacles = blist([])
def __init__(self, impl): self.impl = impl # reasonable defaults self.calc_ping = 0.1 self.calc_bandwidth = 100 * KB self.calc_quality = 1 self._last_bandwidth_scale = time.time() # use 64-bit integer to never run out of packet_nums self.packet_num = 0 self.last_packet_num = 0 self.stat_num = 0 self.last_packets = blist() self._used_bandwidth = 0 self._stats = blist() self._last_req = 0
def test_iterlen_empty(self): it = iter(blist.blist()) if hasattr(it, '__next__'): # pragma: no cover self.assertRaises(StopIteration, it.__next__) else: # pragma: no cover self.assertRaises(StopIteration, it.next) self.assertEqual(it.__length_hint__(), 0)
def __init__(self, mm, start, stop): self.mm = mm self.start = start self.stop = stop self.muts = blist([0]) self.muts *= self.stop - self.start
def __init__(self, eps=1e-6): # [remaining, jobid] queue for the *virtual* scheduler self.queue = blist() # Jobs that should have finished in the virtual time, # but didn't in the real (happens only in case of estimation # errors) self.late = set() # last time we run the schedule function self.last_t = 0 # Jobs that are running in the real time self.running = set() # Jobs that have less than eps work to do are considered done # (deals with floating point imprecision) self.eps = eps # queue for late jobs, sorted by attained service self.late_queue = sorteddict() # {jobid: att} where att is jobid's attained service self.attained = {} # last result of calling the schedule function self.scheduled = {}
def getTitleSet(self): dbs = Movie.query.all() from blist import blist result = blist([]) for db in dbs: striped_string = getTitle(db.titles) result.append(striped_string) return result
def restore_index(text_han, non_han): idx_list = blist(range(len(text_han) + 1)) for j, char in sorted(non_han, key=lambda x: x[0]): idx_list.insert(j, char) return idx_list
def filterNullValueForPrimaryKey(self, valList): valList = self.word_strip_lower(valList) newList = blist([]) for v in valList: if v != '\\n' and v != '\\na' and v != '\n' and v != '\na' and v != 'nan' and v != '\nan' and v != '\\nan' and v != '' and v != '' and v != ' ' and v != '-' and v != '_': #print (v) newList.append(v) return newList
def __init__(self, data=None, index=None, data_name='value', index_name='index', use_blist=False, sort=None): """ :param data: (optional) list of values. :param index: (optional) list of index values. If None then the index will be integers starting with zero :param data_name: (optional) name of the data column, or will default to 'value' :param index_name: (optional) name for the index. Default is "index" :param use_blist: if True then use blist() as the underlying data structure, if False use standard list() :param sort: if True then Series will keep the index sort. If True all index values must be of same type """ super(SeriesBase, self).__init__() # standard variable setup self._index = None self._index_name = index_name self._data = None self._data_name = data_name self._blist = use_blist # setup data list if data is None: self._data = blist() if self._blist else list() if index: # pad out to the number of rows self._pad_data(len(index)) self.index = index else: self.index = list() elif isinstance(data, (list, blist)): self._data = blist([x for x in data]) if self._blist else [x for x in data] # setup index if index: self.index = index else: self.index = list(range(len(self._data))) else: raise TypeError('Not valid data type.') # setup sort self._sort = None if sort is not None: self.sort = sort else: if index: self.sort = False else: self.sort = True
def __init__(self, L, el_type=set, overwrite=False): self.logger = logging.getLogger("SparseMap") self.L = L self.overwrite = overwrite self.logger.info("initializing empty blist with {0} entries".format(L)) if L: self.data = blist([el_type()]) * L self.el_type = el_type
def getListaIDsFavsByUserID(self, user_id): queryNeo4j = "MATCH (u:user {id_twitter : {ID}})-[r:FAV]->(a:tweet) return a" nodos = self.graph.cypher.execute(queryNeo4j, {"ID":user_id}) identificadores = blist([]) for nodo in nodos: identificadores.append(long(nodo[0].properties["id_twitter"])) return identificadores
def submapping(self, start, stop): bounds, values, nothing = self._bounds, self._values, self.nothing lindex = bisect_right(bounds, start) if start is not None else 0 rindex = (bisect_left(bounds, stop) if stop is not None else len(bounds)) res = type(self)() res._bounds = blist(bounds[lindex:rindex]) res._values = blist(values[lindex:rindex + 1]) if start is not None: res[:start] = nothing if stop is not None: res[stop:] = nothing return res
def __init__(self,L, el_type = set, overwrite=False): self.logger = logging.getLogger("SparseMap") self.L = L self.overwrite = overwrite self.logger.info("initializing empty blist with {0} entries".format(L)) if L: self.data = blist([el_type()]) * L self.el_type = el_type
def getUsersFavTweetByID(self, tweet_id): queryNeo4j = "MATCH (u:user)-[r:FAV]->(a:tweet {id_twitter : {ID}}) return u" nodos = self.graph.cypher.execute(queryNeo4j, {"ID":tweet_id}) identificadores = blist([]) for nodo in nodos: identificadores.append(long(nodo[0].properties["id_twitter"])) return identificadores
def main(): rule = "" players = 0 marbles = 0 game = blist() player_scores = dict() current_player = 2 game_idx = 2 current_marble_idx = 1 high_score = 0 with open('input.txt') as f: for line in f: rule = line rule = rule.split() players = int(rule[0]) marbles = int(rule[len(rule) - 2]) game.append(0) game.append(1) while game_idx != marbles: play_index = current_marble_idx if (play_index + 2) >= (len(game) + 1): play_index = 1 else: play_index = play_index + 2 if game_idx % 23 == 0: if current_marble_idx < 7: current_marble_idx = len(game) - (7 - current_marble_idx) else: current_marble_idx = current_marble_idx - 7 score = game_idx + game.pop(current_marble_idx) if current_player not in player_scores.keys(): player_scores[current_player] = score else: player_scores[ current_player] = player_scores[current_player] + score if player_scores[current_player] > high_score: high_score = player_scores[current_player] else: game.insert(play_index, game_idx) current_marble_idx = play_index game_idx = game_idx + 1 current_player = current_player + 1 if current_player > players: current_player = 1 print(high_score)
def test_encodeBlist(self): b = blist(list(range(10))) c = ujson.dumps(b) d = ujson.loads(c) self.assertEqual(10, len(d)) for x in range(10): self.assertEqual(x, d[x])
def set(self, adValues): """ Set all the values given as a list in the same order given. """ self.m_adValues = blist.blist([0]) * len(adValues) for i, d in enumerate(adValues): if d: self.m_adValues[i] = d
def get_graph_degrees_sym(sym_mat): """computes the degrees of nodes in a network (non oriented) - input: sym_matrix = ajacency symetric matrix (class Sym_mat) - output: vector of nodes degrees (ordered as in sym_mat.matrix)""" deg=blist([]) for i, _ in enumerate(sym_mat.matrix): deg.append(0) deg[i] = sym_mat.sum_line(i) return deg
def _sort_columns(self, columns_list): """ Given a list of column names will sort the DataFrame columns to match the given order :param columns_list: list of column names. Must include all column names :return: nothing """ if not (all([x in columns_list for x in self._columns]) and all([x in self._columns for x in columns_list])): raise ValueError( 'columns_list must be all in current columns, and all current columns must be in columns_list' ) new_sort = [self._columns.index(x) for x in columns_list] self._data = blist([ self._data[x] for x in new_sort ]) if self._blist else [self._data[x] for x in new_sort] self._columns = blist([self._columns[x] for x in new_sort]) if self._blist \ else [self._columns[x] for x in new_sort]
def part1(): jmp = 337 pos = 0 buff = blist.blist([0]) for i in range(1, 2018): pos = ((pos + jmp) % len(buff)) + 1 buff.insert(pos, i) return buff[pos + 1]
def groupfunc(self, key, pkey, func): """Return the output of a function to the values grouped by key """ rst = DataItem() if self.sortedkey != key: self.sort(key) temp = blist.blist() idx_val = type(self[key][0]).__init__() for idx in self.sortedindex: if idx_val != self[key][idx]: if len(temp)>0: rst[idx_val] = func(temp) temp = blist.blist() idx_val = self[key][idx] temp.append(self[pkey][idx]) rst[idx_val] = func(temp) return rst
def append(self, item): """Add a new data item into the dataset This is just for mocking list().append() """ for key in item.iterkeys(): if key not in self: self[key] = blist.blist(genzero(self._size)) self[key].append(item[key]) self._size += 1
def solve(spins, result_is_after, step=354): """ Solve the spinlock puzzle. """ nums = blist([0]) pos = 0 for i in range(1, spins): pos = (pos + step) % i + 1 nums.insert(pos, i) return nums[nums.index(result_is_after) + 1]
def __init__(self, in_bytes=b""): self.header = None self.set_bytes(in_bytes) self.lines = [] try: # Faster insertion from blist import blist self.lines = blist([]) except ImportError: pass
def part2(): # this is a dumb brute-force solution, there are better solutions. jmp = 337 pos = 0 buff = blist.blist([0]) for i in range(1, 50000001): pos = ((pos + jmp) % len(buff)) + 1 buff.insert(pos, i) return buff[buff.index(0) + 1]
def __init__(self, number_of_players, last_marble): self.position = 0 self.number_of_players = number_of_players # switch implementation to a BLIST for better performance self.sequence = blist([0]) self.players = [0] * number_of_players self.player = 0 self.last_marble = last_marble self.current_marble = 1 self.counter = 0
def __init__(self, n_players, last_marble): self.n_players = n_players self.last_marble = last_marble self.marbles = blist([0]) self.cur_marble = 0 self.cur_pos = 0 self.cur_player = 1 self.highscores = defaultdict(int) self.df = pd.DataFrame(columns=['marble', 'player', 'highscore']) self.data = []
def __eq__(self, sequence): "Flexible equality that implicitly converts 'str' and 'blist' types" if isinstance(sequence, DNA): return self.blist == sequence.blist if isinstance(sequence, blist.blist): return self.blist == sequence elif isinstance(sequence, str): return self.blist == blist.blist(sequence) else: raise TypeError
def __init__(self, t0, w0: torch.Tensor, window_size=8): """Initialize brownian path. Args: t0: float or torch.Tensor for initial time. w0: torch.Tensor for initial state. """ super(BrownianPath, self).__init__() if not utils.is_scalar(t0): raise ValueError('Initial time t0 should be a float or 0-d torch.Tensor.') t0 = float(t0) self._ts = blist.blist() self._ws = blist.blist() self._ts.append(t0) self._ws.append(w0) self._last_idx = 0 self._window_size = window_size
def spinlock(steps): list_ = blist([0]) position = 0 length = 1 while True: position = (position + steps) % length list_.insert(position + 1, length) position = position + 1 length += 1 yield list_
def solve_eqn_sets(solve_sets, modified_vars, solve_func=solve_eqn_set): """ Solve a group of equation sets in which only certain variables have been modified. """ # track modified and solved vars modified_vars = set(modified_vars) # don't really need to copy here... solved_vars = set() # `q` represents the equation sets that are ready to be solved now # TODO: threading # start queue with all sets that don't require any vars to be solved q = [eqs for eqs in solve_sets if not eqs.requires] q = blist(q) # use blist instead of list while q: # get the next set that is ready to solve eqn_set = q.pop(0) # solve the eqn_set *if necessary* if any(var in eqn_set.requires for var in modified_vars) or (any(var in eqn_set.solves for var in modified_vars) and not eqn_set.is_satisfied()): if not solve_func(eqn_set): print("FAIL") return eqn_set # return the eqn set that failed for reporting modified_vars |= eqn_set.solves # add all vars solved by this set to the set of solved vars solved_vars |= eqn_set.solves # create the frontier to add to the queue # TODO: much more efficient way to get the frontier # 1) don't look at eqn set if it has already been looked at # 2) keep track of linked list b/n eqn sets directly frontier = set() [ frontier.update( # add to the frontier eqs for eqs in var.required_by # all eqn sets required by by the var if all(v in solved_vars for v in eqs.requires ) # if all other required vars have been solved for ) for var in eqn_set.solves ] # for each var solved by this eqn set q += frontier # at this point, modified_vars contains all vars that were updated # also, eqn_set should be the final underconstrained set? pass
def unwrap_qg(phase, quality_map): """ Quality Guided Path Following unwrapping algoritm This algoritm uses the correlation array as quality map to guide the unwrapping path avoiding the tricky zones. Note: Correlation as also know as module image. Returns the unwrapped phase. """ assert phase.shape == quality_map.shape phase = phase.copy() shape = phase.shape rows, cols = shape phase /= tau def get_neighbors(pos): row = pos / cols col = pos % cols if row > 0: yield pos - cols if row < (rows - 1): yield pos + cols if col > 0: yield pos - 1 if col < (cols - 1): yield pos + 1 phase = phase.ravel() adder = {} quality_map = quality_map.ravel() first_pixel = quality_map.argmax() border = blist() for pos in get_neighbors(first_pixel): adder[pos] = phase[first_pixel] insort(border, (quality_map[pos], pos)) while border: quality, pixel = border.pop() phase[pixel] -= round(phase[pixel] - adder[pixel]) for pos in get_neighbors(pixel): if pos not in adder: adder[pos] = phase[pixel] try: insort(border, (quality_map[pos], pos)) except IndexError: print(quality_map.shape, pos) raise phase = phase.reshape(shape) * tau return phase
def getAllFieldValuesbyColumnsTsvToLst(inputFile): df = pd.read_csv(inputFile, sep = '\t', header = 0, index_col = None, quoting=csv.QUOTE_NONE) fields = df.columns; #print ('field xxx: ', len(fields)) fieldValLsts = [] preproc = preprocess() for tbfd in fields: valst = preproc.filterNullValue(df[tbfd]) newLst = blist([tbfd]) + valst fieldValLsts.append(newLst) return fieldValLsts
def __init__(self, iterable): self.q = q = deque(iterable) self.l = l = blist(q) l.sort() if len(q) % 2 == 1: self.odd = True self.mididx = (len(q) - 1) // 2 else: self.odd = False self.mididx1 = len(q) // 2 self.mididx2 = len(q) // 2 - 1
def __init__(self, sym_mat = None): """ input: sym_mat = ajacency matrix of the graph, symetric, of the class Sym_mat """ if sym_mat is None: self.graph = Sym_mat() self.node_degrees = blist([]) self.tot_num_edges = 0 else: self.graph = sym_mat self.node_degrees = get_graph_degrees_sym(sym_mat) self.tot_num_edges = tot_num_edges(sym_mat.matrix)
def part2(): #using more efficient implementation of lists that allows for O(log n) insert times inp = 329 cur_pos = 0 lst = blist([0]) for i in range(1, 50000000+1): cur_pos = (cur_pos+inp) % len(lst) lst.insert(cur_pos+1, i) cur_pos += 1 for idx, item in enumerate(lst): if item==0: pprint(lst[idx+1])
def question(marbles, num_players): scores = blist([0] * num_players) current_player = 1 board = blist([0, 1]) current_marble_index = 1 for i in range(2, marbles): print(round(i * 100 / marbles, 2)) board_length = len(board) if i % 23 != 0: after_index = (current_marble_index + 2) % board_length board.insert(after_index, i) current_marble_index = after_index else: scores[current_player - 1] += i other_marble_index = (current_marble_index - 7) % board_length scores[current_player - 1] += board[other_marble_index] del board[other_marble_index] current_marble_index = other_marble_index % (board_length - 1) current_player = (current_player + 1) % num_players return max(scores)
def steal_next(elfcount=3_014_603): circle = blist([elf_id for elf_id in range(1, elfcount+1)]) target = 0 elfcount = len(circle) while elfcount > 1: target = (target+1) % elfcount del circle[target] elfcount -= 1 return circle[0]
def _add_column(self, column): """ Add a new column to the DataFrame :param column: column name :return: nothing """ self._columns.append(column) if self._blist: self._data.append(blist([None] * len(self._index))) else: self._data.append([None] * len(self._index))
def to_array(self): if self.sentences is None: self.sentences = blist() for source, prefix in self.sources.items(): with utils.smart_open(source) as fin: for item_no, line in enumerate(fin): line = line.replace("\n", "") self.sentences.append( TaggedDocument(utils.to_unicode(line).split(), [prefix + "_%s" % item_no]) ) return self.sentences
def test_first_last(self): x = EulerTour(None, blist([1, 2, 3, 7, 3, 4, 5, 4, 3, 2, 1])) self.assertEqual(x.first_occurrence(1), 0) self.assertEqual(x.last_occurrence(1), 10) self.assertEqual(x.first_occurrence(7), 3) self.assertEqual(x.last_occurrence(7), 3) self.assertEqual(x.first_occurrence(5), 6) self.assertEqual(x.last_occurrence(5), 6) self.assertEqual(x.first_occurrence(3), 2) self.assertEqual(x.last_occurrence(3), 8) self.assertEqual(x.first_occurrence(8), None) self.assertEqual(x.last_occurrence(8), None)
def day17(iterations=3): """Day 17.""" lst = blist([0]) indx = 0 lst_insert = 0 for i in range(1, 2018): indx += 1 indx %= len(lst) lst.insert(indx + 1, i) lst_insert = indx + 1 indx += iterations return lst[lst_insert + 1]
def ZRANGE(self, zset, start, stop, *flags): # TODO: better flags checking if len(flags) == 1 and flags[0].upper() == 'WITHSCORES': result = blist() for key, value in zset.items()[redis_slice(start, stop)]: result.append(key) astext = '%.17f' % value astext = astext.rstrip('0').rstrip('.') result.append(astext) return result else: print 'keys' return zset.viewkeys()[redis_slice(start, stop)]
def test_encodeBlist(self): try: from blist import blist except ImportError: return b = blist(range(10)) c = ujson.dumps(b) d = ujson.loads(c) self.assertEqual(10, len(d)) for x in xrange(10): self.assertEqual(x, d[x])
def solve(cap,mod = 10**18): s = 0 t_arr = blist([]) inversions = [0] nofd = nofd_fill(cap+2) for i in xrange(1,cap+1): if i % (5*10**5) == 0: print "ON %d" % i t = T(i,nofd) pos = bisect_right(t_arr,t) inversions.append(len(t_arr)-pos) t_arr.insert(pos,t) print "On Second Stage." del t_arr t_arr = blist([]) for j in xrange(cap,0,-1): t = T(j,nofd) pos = bisect_left(t_arr,t) s = (s + inversions[j]*pos) % mod t_arr.insert(pos,t) if j % (5*10**5) == 0: print "ON %d, sum = %d" % (j,s) return s
def ZADD(self, zset, *args): assert args, 'syntax error, arguments required' assert len(args) % 2 == 0 pairs = blist() # check for errors before doing anything for index in xrange(0, len(args), 2): score = float(args[index]) assert not math.isnan(score), "ERR not a valid floating point value" member = args[index+1] pairs.append((score, member)) added = 0 for score, member in pairs: if member not in zset: added += 1 zset[member] = score return added
def pickle_tests(self, pickler): self.pickle_test(pickler, blist.blist()) self.pickle_test(pickler, blist.blist(list(range(limit)))) self.pickle_test(pickler, blist.blist(list(range(limit+1)))) self.pickle_test(pickler, blist.blist(list(range(n)))) x = blist.blist([0]) x *= n self.pickle_test(pickler, x) y = blist.blist(x) y[5] = 'x' self.pickle_test(pickler, x) self.pickle_test(pickler, y)
def bitsort_blist(filename = TDATA, maxn = MAXN): """ Sort a file named 'filename' which consists of maxn integers where each integer is less than maxn. Profile result: bitsort_blist: 4.84400010109 memory consuming: 70 MB """ # Initialize bitmap a = blist([0])*maxn # Read from file and fill bitmap for line in file(filename): n = int(line.strip()) # Turn bits on for numbers if n<maxn: a[n] = 1
def to_array(self): if self.sentences is None: self.sentences = blist() for source, prefix in self.sources.items(): with utils.smart_open(source) as fin: for item_no, line in enumerate(fin): # print [prefix + '_%s' % item_no] line = line.replace("\n", "") # exit() self.sentences.append( TaggedDocument(utils.to_unicode(line).split(), [prefix + "_%s" % item_no]) ) # self.sentences.append(LabeledSentence(utils.to_unicode(line).split(), [prefix + '_%s' % item_no])) # self.sentences.append(LabeledSentenceMio(utils.to_unicode(line).split(), [prefix + '_%s' % item_no])) # print labe.tags # self.sentences.append(labe) # self.sentences) # perm = np.random.permutation(self.sentences.shape[0]) # model_dm.train(all_train_reviews[perm]) return self.sentences
def solveCodeJam(algo, inFilePath, outFilePath): print('Input : \'{}\'\nOutput : \'{}\''.format(inFilePath, outFilePath)) inFile = open(inFilePath, 'r') d = inFile.read().splitlines() inFile.close() n, *d = d l = len(d) b = l//int(n) splitTask = (d[i:i+b] for i in range(0, l, b)) modules = __import__(algo) solutions = blist([]) for i, block in enumerate(splitTask): print(i+1, block) n, solution = modules.solve(i+1, block) solutions.append('Case #{}: {}'.format(n, solution)) with open(outFilePath, 'w') as outFile: outFile.write('\n'.join(solutions)) print('Done - {} cases'.format(n))