def main(): args = parse_args() loader_kwargs = dict() if args.sep is not None: loader_kwargs["sep"] = args.sep df = pd.read_csv(args.file, **loader_kwargs) tabloo.show( df, server_logging=True, )
#!/usr/bin/env python import pandas as pd import numpy as np import tabloo N = 1000 df = pd.DataFrame({ "id": np.arange(N), "xs": np.random.uniform(-1, +1, N), "ys": np.random.uniform(-1, +1, N), }) df.loc[::10, "xs"] = np.nan df.loc[::20, "ys"] = np.nan df.loc[::47, "xs"] = +np.inf df.loc[::83, "xs"] = -np.inf #df["Column with much too long name"] = 0 tabloo.show(df, open_browser=False, debug=True, server_logging=True)
# ### ┌─────────────────────────────────────────────────┐ # ### │ Dataframe │ # ### └─────────────────────────────────────────────────┘ # + df = pd.DataFrame({ "title": title, "date": dt, "exlink": elink, "comments": comments, "category": category, "user": user, "popular": pop, }) tabloo.show(df) # from operator import itemgetter # df['user'] = df['user'] | p(map, p(itemgetter(0)), px) | p(list) df["user"] = df["user"] | p(chain.from_iterable) | p(list) df["exlink"] = df["exlink"] | px.str.replace(r"\(|\)", "", regex=True) # - # ### ┌─────────────────────────────────────────────────┐ # ### │ Functions for scraping more than one page │ # ### └─────────────────────────────────────────────────┘ # + import time