def data_of_file(file: str) -> Conversacion: ms = (Mensaje.parse(m) for m in lineas_de_fichero(file)) mensajes = [m for m in ms if m is not None] palabrasHuecas = { p for p in lineas_de_fichero( "../../../resources/palabras_huecas.txt") if len(p) > 0 } return Conversacion(mensajes, palabrasHuecas)
def linea_numero(file: str, n: int) -> str: return find_first(enumerate(lineas_de_fichero(file)), predicate=lambda p: p[0] == n).get()[1]
def palabra_en_lineas(file: str) -> OrderedDict[str, set[int]]: lns = lineas_de_fichero(file, encoding='utf-16') palabras = ((i, p) for i, linea in enumerate(lns) for p in re.split(sep, linea) if len(p) > 0) d = grouping_set(palabras, fkey=lambda e: e[1], fvalue=lambda e: e[0]) return OrderedDict(sorted(d.items(), key=lambda t: t[0]))
def linea_mas_larga(file: str) -> str: return max(lineas_de_fichero(file), key=lambda x: len(x))
def primera_linea_con_palabra(file: str, palabra: str) -> str: return find_first(lineas_de_fichero(file), predicate=lambda ln: palabra in ln).get()
def longitud_media_de_lineas(file: str) -> float: return average(len(ln) for ln in lineas_de_fichero(file))
def numero_de_lineas_vacias(file: str) -> int: return count(ln for ln in lineas_de_fichero(file, encoding='utf-16') if len(ln) == 0)
def numero_de_lineas(file: str) -> int: return len(lineas_de_fichero(file))
def palabras_no_huecas(file: str) -> Iterable[str]: huecas = palabras_huecas() lns = lineas_de_fichero(file, encoding='utf-16') pls = flat_map(lns, lambda x: re.split(sep, x)) palabras = (p for p in pls if p not in huecas if len(p) > 0) return palabras
def palabras_huecas() -> set[str]: lns = lineas_de_fichero("../../../resources/palabras_huecas.txt") return {p for p in lns}
if __name__ == '__main__': i = 0 for n in numeros: print(i, n, sep=': ') i = i + 1 texto = "Muestrame con puntos" for c in texto: print(c, end='.') with open('../../../resources/datos_2.txt', encoding='utf-8') as f: contenido = f.read() print(contenido) # Mostramos el contenido del fichero with open('../../../resources/datos_2.txt', encoding='utf-8') as f: for linea in f: print(linea, end='') print(f) ls = lineas_de_fichero('../../../resources/datos_2.txt') print(ls) print(math.pi) r = montecarlo_list(1000000) draw_multiline(r) print(r[-1])
# similar a Counter def groups_size(iterable: Iterable[E], fkey: Callable[[E], K] = identity, fsum: Callable[[E], int] = lambda e: 1) -> dict[K, int]: return grouping_reduce(iterable, fkey, op=lambda x, y: x + y, fvalue=fsum) if __name__ == '__main__': print(str_iterable(range(0, 100))) print(average(range(0, 100))) print(str_iterable(flat_map([[0, 1], [2, 3, 4], [5, 6], [9]]))) print(str_iterable(geometric(2, 100, 5))) print(index_bool((x % 29 == 0 for x in aleatorios(10, 1000, 50)))) print(str_iterable(lineas_de_fichero('../../../resources/datos.txt'))) print( index_predicate( (int(e) for e in lineas_de_fichero('../../../resources/datos.txt')), lambda x: x == 7)) print(first_and_last(arithmetic(3, 500, 29))) print(list(zip2([1, 2, 3, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15]))) g = grouping_reduce(range(0, 10, 2), fkey=lambda x: x % 3, op=lambda x, y: x + y) print(g[0]) cp = Counter(['a', 'b', 'c', 'a', 'b', 'b']) print(cp.most_common(1)[0][1]) r = ((1, 2, 3, 4) * 2)[-2:-1] print(r)