Piter De Vries,David Dastmalchian,2021 Baron Vladimir Harkonnen,Stellan Skarsgård,2021 Reverend Mother Gaius Helen Mohiam,Charlotte Rampling,2021 Thufir Hawat,Stephen McKinley Henderson,2021 Dr. Yueh,Chen Chang,2021 Dr. Liet-Kynes,Sharon Duncan-Brewster,2021 Jamis,Babs Olusanmokun,2021 Harah,Gloria Obianyo,2021 Padishah Emperor Shaddam IV,...,2021 Shadout Mapes,...,2021 Princess Irulan,...,2021 Reverend Mother Ramallo,...,2021 Feyd-Rautha Harkonnen,...,2021 Alia Atreides,...,2021 Otheym,...,2021 """ dune_casts = lt.Table("dune_1984").csv_import(dune_casts_csv).create_index( "character") dune_1984 = dune_casts.where(year="1984").add_field("actor (1984)", attrgetter("actor")) dune_2000 = dune_casts.where(year="2000").add_field("actor (2000)", attrgetter("actor")) dune_2021 = dune_casts.where(year="2021").add_field("actor (2021)", attrgetter("actor")) join = dune_1984.join_on("character") + dune_2000 + dune_2021 dune_combined = join()("Dune Casts (combined)") dune_combined.present( fields=["character", "actor (1984)", "actor (2000)", "actor (2021)"])
# that normalize back to ASCII characters that can be used in Python # identifiers (A-Z, a-z, 0-9, _, and ·) _· = "_·" ident_chars = ("ABCDEFGHIJKLMNOPQRSTUVWXYZ" "abcdefghijklmnopqrstuvwxyz" "0123456789" + _·) accum = {ch: [] for ch in ident_chars} # build up accumulator by walking Unicode range unicode_upper_limit = sys.maxunicode+1 if ALL_UNICODE_CHARS else 65536 for i in range(32, unicode_upper_limit): ch = chr(i) norm = unicodedata.normalize("NFKC", ch) if norm in accum: accum[norm].append(ch) # convert accumulator to a littletable Table for presentation normalizations = lt.Table() for asc_char, normalizing_chars in accum.items(): normalizations.insert_many( {"ASCII": asc_char, "ord": ord(asc_char), "Unicode": norm_char, "code_point": ord(norm_char), "name": unicodedata.name(norm_char), } for norm_char in normalizing_chars ) normalizations.sort("ASCII") normalizations.present(groupby="ASCII ord")
title,ingredients Tuna casserole,tuna noodles cream of mushroom soup Hawaiian pizza,pizza dough pineapple ham tomato sauce Margherita pizza,pizza dough cheese pesto artichoke hearts Pepperoni pizza,pizza dough cheese tomato sauce pepperoni Grilled cheese sandwich,bread cheese butter Tuna melt,tuna mayonnaise tomato bread cheese Chili dog,hot dog chili onion bun French toast,egg milk vanilla bread maple syrup BLT,bread bacon lettuce tomato mayonnaise Reuben sandwich,rye bread sauerkraut corned beef swiss cheese russian dressing thousand island Hamburger,ground beef bun lettuce ketchup mustard pickle Cheeseburger,ground beef bun lettuce ketchup mustard pickle cheese Bacon cheeseburger,ground beef bun lettuce ketchup mustard pickle cheese bacon """) recipes = lt.Table().csv_import(recipe_data) # define search index on "ingredients" attribute search_attr = "ingredients" recipes.create_search_index(search_attr) # run sample queries queries = """\ tuna tuna +cheese pineapple +bacon lettuce beef -sauerkraut tomato pizza dough -pineapple pizza dough --pineapple bread bacon bread ++bacon""".splitlines()
tbl = lt.Table("Academy Awards 1960-1969").csv_import("""\ year,award,movie,recipient 1960,Best Picture,Ben-Hur, 1960,Best Actor,Ben-Hur,Charlton Heston 1960,Best Actress,The Heiress,Simone Signoret 1960,Best Director,Ben-Hur,William Wyler 1961,Best Picture,The Apartment, 1961,Best Actor,Elmer Gantry,Burt Lancaster 1961,Best Actress,Butterfield 8,Elizabeth Taylor 1961,Best Director,The Apartment,Billy Wilder 1962,Best Picture,West Side Story, 1962,Best Actor,Judgment at Nuremberg,Maximilian Schell 1962,Best Actress,Two Women,Sophia Loren 1962,Best Director,West Side Story,Willian Wise/Jerome Robbins 1963,Best Picture,Lawrence of Arabia, 1963,Best Actor,Gregory Peck,To Kill A Mockingbird 1963,Best Actress,Anne Bancroft,The Miracle Worker 1963,Best Director,David Lean,Lawrence of Arabia 1964,Best Picture,Tom Jones, 1964,Best Actor,Sidney Poitier,Lilies of the Field 1964,Best Actress,Patricia Neal,Hud 1964,Best Director,Tony Richardson,Tom Jones 1965,Best Picture,My Fair Lady, 1965,Best Actor,Rex Harrison,My Fair Lady 1965,Best Actress,Julie Andrews,Mary Poppins 1965,Best Director,George Kukor,My Fair Lady 1966,Best Picture,The Sound of Music, 1966,Best Actor,Lee Marvin,Cat Ballou 1966,Best Actress,Julie Christie,Darling 1966,Best Director,Robert Wise,The Sound of Music 1967,Best Picture,A Man for All Season, 1967,Best Actor,Paul Scofield,A Man for All Seasons 1967,Best Actress,Elizabeth Taylor,Who's Afraid of Virginia Woolf 1967,Best Director,Fred Zinnemann,A Man for All Seasons 1968,Best Picture,In The Heat of The Night, 1968,Best Actor,Rod Steiger,In The Heat of The Night 1968,Best Actress,Katherine Hepburn,Guess Who's Coming to Dinner 1968,Best Director,Mike Nichols,The Graduate 1969,Best Picture,Oliver!, 1969,Best Actor,Cliff Robertson,Charly 1969,Best Actress,Barbra Streisand,Funny Girl 1969,Best Director,Carol Reed,Oliver! """)
columns = [ ( "id_no", 0, ), ( "name", 4, ), ( "address", 21, ), ( "city", 42, ), ( "state", 56, 58, ), ("tech_skill_score", 59, None, float), ] characters_table = lt.Table().insert_many( lt.DataObject(**rec) for rec in lt.FixedWidthReader(columns, data)) print(len(characters_table)) print(characters_table[0])
# # excel_data_types.py # # Demonstrate data type conversions done automatically when importing from Excel # import littletable as lt xl = lt.Table().excel_import("../test/data_types.xlsx") xl.present() for row in xl: print(row.name, repr(row.value), type(row.value), row.type)
lasts = pathlib.Path('last_names.txt').read_text().splitlines() NUM_NURSES = 12 NUM_DOCTORS = 8 names = set() reset_id() while len(names) < NUM_NURSES: names.add( StaffMember(next_id(), random.choice(lasts), random.choice(firsts), "nurse")) while len(names) < NUM_NURSES + NUM_DOCTORS: names.add( StaffMember(next_id(), random.choice(lasts), random.choice(firsts), "doctor")) staff = lt.Table() staff.insert_many(sorted(names)) staff.create_index("staff_id", unique=True) print(len(staff)) print(staff.info()) print(staff[0]) staff.csv_export("staff.csv") departments = lt.Table() reset_id() departments.insert(lt.DataObject(id=next_id(), name="ER")) departments.insert(lt.DataObject(id=next_id(), name="ICU")) departments.insert(lt.DataObject(id=next_id(), name="OR")) print(departments.info()) departments.create_index("id", unique=True) departments.csv_export("departments.csv")
"digit_value", "numeric_value", "mirrored", "unicode_1_name", "iso10646_comment", "uppercase_hex", "lowercase_hex", "titlecase_hex", ] unicode_url = "https://www.unicode.org/Public/3.2-Update/UnicodeData-3.2.0.txt" unicode_file = "unicode_320.txt.zip" unicode = lt.Table().csv_import( unicode_file, delimiter=";", transforms={ "decimal_digit_value": int, "digit_value": int, "numeric_value": int, }, fieldnames=fieldnames, ) unicode.add_field("code_value", lambda r: int(r.code_value_hex, 16)) unicode.add_field("uppercase", lambda r: int(r.uppercase_hex, 16)) unicode.add_field("lowercase", lambda r: int(r.lowercase_hex, 16)) unicode.add_field("titlecase", lambda r: int(r.titlecase_hex, 16)) unicode.add_field("character", lambda r: chr(r.code_value)) unicode.add_field("upper_char", lambda r: chr(r.uppercase)) unicode.add_field("lower_char", lambda r: chr(r.lowercase)) unicode.add_field("title_char", lambda r: chr(r.titlecase)) unicode.add_field("is_identifier", lambda r: r.character.isidentifier()) unicode.create_index("code_value_hex", unique=True)