class TinyDBDatabase(DatabaseBase): def __init__(self, db_file_path: str = "./resources/db.json"): super().__init__() self._db_file_path = db_file_path self._db = TinyDB(self._db_file_path) def _insert_record(self, record: Dict): if self._get_record(record[ID]) is None: self._db.insert(record) def _get_record(self, id: str) -> Optional[Dict]: query = Query() search_result = self._db.search(query[ID] == id) if len(search_result) == 0: return None else: return search_result[0] def _list_patient_ids(self) -> List[str]: query = Query() search_result = self._db.search(query._id) search_result = [] or search_result return [record[ID] for record in search_result] def purge(self): self._db.purge_tables()
class TinyDBEngine(object): "Engine for Tiny db" def __init__(self, tablename="default"): """Initialises the engine for tinydb """ self._tdb = TinyDB('data/' + tablename + '.json', default_table=tablename) self._query = Query() def clear_db(self): """Clears all the tables in the database""" self._tdb.purge_tables() def add_record(self, idx, values): """Adds a new record to the database """ return self._tdb.insert({"idx": idx, "values": values}) def update_record(self, idx, update_attr): """Updates a record""" self._tdb.update(update_attr, self._query.idx == idx) def delete_record(self, idx): """ Deletes a particular record with idx """ self._tdb.remove(where('idx') == idx) def get_record_by_id(self, idx): """Returns a particular row""" return self._tdb.get(self._query.idx == idx) def get_records(self): """Returns all row""" return self._tdb.all()
def dump_model(model, target = os.getcwd() + "/db/" + "new_organ_db"): """Serialize a model as a JSON file in such a way that it can be reopened at any moment for later use.""" # Create the db for dumping try: target_db = TinyDB(target + ".json") except OSError as err: # If we encounter an error we raise it to the GUI raise err target_db.purge_tables() global_const = target_db.table("GlobalConstants") print(model.get_global_constants()) global_const.insert(model.get_global_constants()) global_funcs = target_db.table("GlobalFunctions") global_funcs.insert(model.get_global_functions()) global_params = target_db.table("GlobalParameters") global_params.insert(model.get_global_param_ranges()) systemic_organs = target_db.table("SystemicOrgans") for organ in model.get_organs().values(): # Write the organ as a dict for JSON representation organ_representation = {} organ_representation['name'] = organ.get_name() organ_representation['variables'] = organ.get_local_ranges() organ_representation['functions'] = organ.get_funcs() systemic_organs.insert(organ_representation)
class DBWrapper(object): def __init__(self, db_path): self.db = TinyDB(db_path) def insert_target(self, name, url, record_spec, field_specs): targets = self.db.table('targets') matching_target = targets.get(where('name') == name) if matching_target: targets.remove(where('name') == name) record_spec_str, field_specs_str = [ json.dumps(spec) for spec in [record_spec, field_specs] ] targets.insert({ 'name':name, 'url':url, 'record_spec':record_spec_str, 'field_specs':field_specs_str }) def insert_result(self, target_name, result, stamp=None): if not target_name or not result: return if not stamp: stamp = get_safe_timestamp() result_str = json.dumps(result) results = self.db.table('results') results.insert({ 'target':target_name, 'result':result_str, 'stamp':stamp }) def latest_result_json(self, target_name): if not target_name: return None results = self.db.table('results') target_results = results.search(where('target') == target_name) if not target_results: return None target_results = sorted(target_results, key=lambda r: r['stamp']) latest_result_raw = target_results[-1].get('result') if not latest_result_raw: return None return json.loads(latest_result_raw) def targets(self): targets = self.db.table('targets') for target in targets.all(): yield { 'name':target['name'], 'url':target['url'], 'record_spec':json.loads(target['record_spec']), 'field_specs':json.loads(target['field_specs']) } def purge(self): self.db.purge_tables()
def saveCookie2File(): cookies = driver.get_cookies() try: db = TinyDB(cookieFile) db.purge_tables() db.insert_multiple(cookies) db.close() except Exception as err: print(f'Error in save cookie File: {err}')
class DBC: def __init__(self, path=None): if path is None: self.db = TinyDB(os.path.join('my-config', 'chollos-db.json')) else: self.db = TinyDB(path) self.db.table('UserConfiguration') def get_table(self, table_name): return self.db.table(table_name) def purge(self): self.db.purge_tables() def insert_user_configuration(self, user_id): result = False if len(self.db.table('UserConfiguration').search(where('user_id') == user_id)) == 0: self.db.table('UserConfiguration').insert({'user_id': user_id, 'keywords': '*', 'merchants': '*'}) result = True return result def get_keywords(self, user_id): result = [] keywords = self.db.table('UserConfiguration').search(where('user_id') == user_id)[0]['keywords'] if keywords=='*' or ',' not in keywords: result.append(keywords) else: result = keywords.split(',') return result def get_merchants(self, user_id): result = [] merchants = self.db.table('UserConfiguration').search(where('user_id') == user_id)[0]['merchants'] if merchants=='*' or ',' not in merchants: result.append(merchants) else: result = merchants.split(',') return result def modify_keywords(self, keywords, user_id): user_configuration = self.db.table('UserConfiguration') query = Query() user_configuration.update({'keywords': keywords.replace('"','')}, query.user_id == user_id) def modify_merchants(self, merchants, user_id): user_configuration = self.db.table('UserConfiguration') query = Query() user_configuration.update({'merchants': merchants.replace('"','')}, query.user_id == user_id) def get_keywords_str(self, user_id): return self.db.table('UserConfiguration').search(where('user_id') == user_id)[0]['keywords'] def get_merchants_str(self, user_id): return self.db.table('UserConfiguration').search(where('user_id') == user_id)[0]['merchants']
def db_smartcache(): db_ = TinyDB(storage=MemoryStorage) db_.purge_tables() db_.table_class = SmartCacheTable db_ = db_.table('_default') db_.insert_multiple({'int': 1, 'char': c} for c in 'abc') return db_
class ReferenceUpdater: ebi_file = '' db = '' def __init__(self): self.ebi_file = 'http://www.eibispace.de/dx/sked-b19.csv' self.db = TinyDB('reference.json') def build_schedule_table(self): self.db.purge_tables() schedule_table = self.db.table('schedule_table') with get(self.ebi_file) as reader: # with open('sked-b19.csv', encoding='latin-1') as reader: i = 0 for line in reader.text.split('\n'): # for line in reader.readlines(): # line = line.rstrip() if (i != 0): sched = line.split(';') # kHz:75;Time(UTC):93;Days:59;ITU:49;Station:201;Lng:49;Target:62;Remarks:135;P:35;Start:60;Stop:60; # 16.4;0000-2400;;NOR;JXN Marine Norway;;NEu;no;1;; # 17.2;0730-0830;24Dec;S;SAQ Grimeton;-CW;Eu;gr;6;1312;2412 # 18.2;0000-2400;;IND;VTX Indian Navy;;SAs;v;1;; schedule_table.insert({'frequency': sched[0], 'time': sched[1], 'source': sched[3], 'station': sched[4], 'target': sched[6]}) print(str(i) + ' rows inserted') i = i + 1 def build_reference_db(self): schedule_table = self.db.table('schedule_table') self.db.purge_table('stations') # DO we need to purge this? self.db.purge_table('sourcedestination') # Do we need to purge this? station_index = 0 sourcedestination_index = 0 station_table = self.db.table('stations') sourcedestination_table = self.db.table('sourcedestination') stations_box = [] sourcedestination_box = [] for schedule_row in schedule_table.all(): current_station_id = -1 if schedule_row['station'] not in stations_box: station_table.insert({'id': station_index, 'name': schedule_row['station']}) current_station_id = station_index station_index = station_index + 1 stations_box.append(schedule_row['station']) else: current_station_id = stations_box.index(schedule_row['station']) current_source_id = -1 if schedule_row['source'] not in sourcedestination_box: sourcedestination_table.insert({'id': sourcedestination_index, 'name': schedule_row['source']}) current_source_id = sourcedestination_index sourcedestination_index = sourcedestination_index + 1 sourcedestination_box.append(schedule_row['source']) else: current_source_id = sourcedestination_box.index(schedule_row['source'])
def get_db(smart_cache=False): db_ = TinyDB(storage=MemoryStorage) db_.purge_tables() if smart_cache: db_.table_class = SmartCacheTable db_ = db_.table('_default') db_.insert_multiple({'int': 1, 'char': c} for c in 'abc') return db_
def saveCookie2File(): cookies = driver.get_cookies() logger.info('cookies:' + str(cookies)) try: db = TinyDB(cookieFile) db.purge_tables() db.insert_multiple(cookies) db.close() except Exception as err: logger.error(f'Error in save cookie File: {err}')
class AbstractRepository(ABC): def __init__(self): self.db = TinyDB('./db.json') def close(self): self.db.close() def delete_repo(self): self.db.purge_tables() self.db.purge()
def test_non_default_table(): db = TinyDB(storage=MemoryStorage) assert [TinyDB.DEFAULT_TABLE] == list(db.tables()) db = TinyDB(storage=MemoryStorage, default_table='non-default') assert set(['non-default']) == db.tables() db.purge_tables() TinyDB.DEFAULT_TABLE = 'non-default' db = TinyDB(storage=MemoryStorage) assert set(['non-default']) == db.tables()
class NoteInSpace(AbstractNIS): '''note controller over tinydb''' def __init__(self, **kwargs): if 'storage_file' not in kwargs: raise KeyError('storage is not defined') self.storage = TinyDB(kwargs['storage_file']) def get_all_notes(self): return self.storage.all() def get_note_by_id(self, int_id): if not isinstance(int_id, int): raise TypeError('int expected, got {}'.format(type(int_id))) return self.storage.get(doc_id=int_id) def insert_note(self, note_object): if not isinstance(note_object, Note): raise TypeError('object type is not Note') iid = self.storage.insert({}) note_object.id = iid if not note_object.id: raise AttributeError('failed to asign ID') self.storage.update(note_object.dictify(), doc_ids=[iid]) # return note_object return iid def delete_note_by_id(self, int_id): return self.storage.remove(where('id') == int_id) def get_all_spaces(self): result = [] for note in self.get_all_notes(): if note['space'] not in result: result.append(note['space']) return result def get_notes_in_space(self, str_space): result = [] for note in self.get_all_notes(): if note['space'] == str_space: result.append(note) return result def remove_space(self, str_space): return self.storage.remove(where('space') == str_space) def get_recent_notes(self, int_count): return self.get_all_notes()[-int_count:] def get_last_note(self): return self.get_recent_notes(1) def clear_notes(self): self.storage.purge_tables()
def main(): if not exists(DB_ROOT): makedirs(DB_ROOT, exist_ok=True) db = TinyDB(join(DB_ROOT, 'database.json')) db.purge_tables() for ORG in ORGS: res = requests.get( f'https://api.crunchbase.com/v3.1/organizations/{ORG}/investments?user_key={USER_KEY}' ) j = loads(res.content) # first load of the data total_pages = j['data']['paging']['number_of_pages'] current = 1 Data = namedtuple('Data', [ 'name', 'description', 'short_description', 'url', 'profile_image_url', 'email', 'num_employees_max', 'num_employees_min', 'money_raised_usd', 'series' ]) # table = db.table(f"{ORG}") while current < total_pages: for i in j['data']['items']: d = Data( i['relationships']['funding_round']['relationships'] ['funded_organization']['properties']['name'], i['relationships']['funding_round']['relationships'] ['funded_organization']['properties']['description'], i['relationships']['funding_round']['relationships'] ['funded_organization']['properties']['short_description'], f"{CB_URL}{i['relationships']['funding_round']['relationships']['funded_organization']['properties']['web_path']}", i['relationships']['funding_round']['relationships'] ['funded_organization']['properties']['profile_image_url'], i['relationships']['funding_round']['relationships'] ['funded_organization']['properties']['contact_email'], i['relationships']['funding_round']['relationships'] ['funded_organization']['properties']['num_employees_max'], i['relationships']['funding_round']['relationships'] ['funded_organization']['properties']['num_employees_min'], i['relationships']['funding_round']['properties'] ['money_raised_usd'], i['relationships']['funding_round'] ['properties']['series']) db.insert(d._asdict()) print(j['data']['paging']['next_page_url']) res = requests.get(j['data']['paging']['next_page_url'], params={'user_key': USER_KEY}) j = loads(res.content) current += 1 print(f'Loaded {len(db)} companies.')
class db_mgm: def __init__(self, dbdir): self.db = TinyDB(dbdir + 'database/db.json') self.output = self.db.table('output') self.query = Query() def start_db(self, schemaloc): #load data with open(schemaloc + 'schema') as schema: #Todo: ignore # for comments in schema try: for line in schema: if '#' not in line[0]: self.db.insert(ast.literal_eval(line)) except ValueError: logging.error('[X] Malformed Schema data on line \n' + line) print('[X] Malformed Schema data on line \n' + line) sys.exit() def db_lookup(self, playbook): #grab playbook if self.db.contains(self.query.name == playbook): response = self.db.search(self.query.name == playbook) response = response[0] return response else: return 'Error' def db_outputid(self): items = len(self.output) return str(items + 1) def db_stdoutinput(self, data): items = self.db_outputid() self.output.insert({'id': items, 'output': data}) def db_updateinput(self, data, taskid): self.output.update({ 'id': taskid, 'output': data }, self.query.id == taskid) def db_completed(self, taskid): output = self.output.get(self.query.id == taskid) output = output['output'] return output def db_exit(self): self.db.purge_tables()
class Dao: db = None def __init__(self) -> None: self.db = TinyDB("./app/bdd/fibonacci_sequence.json") super().__init__() def init_db(self): if len(self.db.all()) < 2: self.db.purge_tables() self.db.insert({'value': 0}) self.db.insert({'value': 1}) def get_fibonacci_number(self, number): number_query = Query() result = self.db.get(number_query.value == number) return result def get_last_two_documents(self): db_len = len(self.db) dict1 = self.db.all()[db_len - 2] dict2 = self.db.all()[db_len - 1] return [dict1, dict2] def persist_fibonacci_number(self, number): self.db.insert({'value': number}) def find_inferior_fibonacci_number(self, number): inferior_query = Query() results = self.db.search(inferior_query.value < number) number_results = [] for dict_result in results: number_results.append(dict_result['value']) if not number_results: return None return max(number_results) def find_superior_fibonacci_number(self, number): superior_query = Query() results = self.db.search(superior_query.value > number) number_results = [] for dict_result in results: number_results.append(dict_result['value']) if not number_results: return None return min(number_results)
class Persistence: def __init__(self, fileName): self.articleDb = TinyDB(fileName) def saveArticle(self, article): self.articleDb.insert(article._asdict()) def findArticle(self, title): query = Query() results = self.articleDb.search(query.title == title) return Article(**results[0]) def dropTables(self): self.articleDb.purge_tables() self.articleDb.close()
def test_1(): from tinydb import TinyDB from tinydb import Query from tinydb import JSONStorage from tinydb.middlewares import CachingMiddleware db = TinyDB('cache_db.json', storage=CachingMiddleware(JSONStorage)) db.purge_tables() # 重置数据 db.insert({'int': 1, 'char': 'a'}) db.insert({'int': 2, 'char': 'b'}) table = db.table('user') table.insert({'name': "shawn", "age": 18}) table.insert({'name': "shelton", "age": 28}) print(table.all() ) # [{'name': 'shawn', 'age': 18}, {'name': 'shelton', 'age': 28}] User = Query() table.update({'name': 'shawn', 'age': 19}, User.name == 'shawn') print(table.search(User.name == 'shawn')) # [{'name': 'shawn', 'age': 19}] table.remove(User.name == 'shawn') db.close()
async def hdb(ctx, arg1="", arg2="", arg3=""): litcoinlist = TinyDB("litcoin/LITCOIN" + ctx.message.server.id + ".json") levellist = TinyDB("level/LEVEL" + ctx.message.server.id + ".json") if ctx.message.author != Server.get_member(ctx.message.server, "166953638961479681"): return await hydroBot.say("Debugging is not for plebs. This is serious business.") elif arg1 == "say": return await hydroBot.say(arg2) elif arg1 == "lc_p": litcoinlist.purge_tables() elif arg1 == "lc_s": user = arg2 balance = arg3 litcoinlist.update({"balance": int(balance)}, query["user"] == user) return await hydroBot.say( "Set the balance of " + Server.get_member(ctx.message.server, user).name + "'s account to `" + str( balance) + "`") return
def populate_db(sentences): db = TinyDB('../data/tiny_db_2.json') db.purge_tables() sentenceTable = db.table('sentences') parser = Parser(sentences) bigrams = {} for tokens in parser.get_tokenised_parts(): for i in range(0, len(tokens) - 1): (word1, pos1) = tokens[i] (word2, pos2) = tokens[i + 1] bigram = (word1, word2) freq = bigrams.get(bigram) if freq is None: bigrams[bigram] = 1 else: bigrams[bigram] = freq + 1 bigramsTable = db.table('bigrams') for bigram, freq in bigrams.items(): (w1, w2) = bigram bigramsTable.insert({"word1": w1, "word2": w2, "freq": freq}) db.close()
class TinyRunDB(BaseDB): def __init__(self, conn_str): self.name = 'TinyRunDB' self.conn_str = conn_str self.default_table = 'linchpin' def _opendb(self): self.middleware = CachingMiddleware(JSONStorage) self.middleware.WRITE_CACHE_SIZE = 500 self.db = TinyDB(self.conn_str, storage=self.middleware, default_table=self.default_table) def __str__(self): if self.conn_str: return "{0} at {2}".format(self.name, self.conn_str) return "{0} at {1}".format(self.name, 'None') @property def schema(self): return self._schema @schema.setter def schema(self, schema): self._schema = dict() self._schema.update(schema) @usedb def init_table(self, table): t = self.db.table(name=table) return t.insert(self.schema) @usedb def update_record(self, table, run_id, key, value): t = self.db.table(name=table) return t.update(add(key, value), eids=[run_id]) @usedb def get_record(self, table, action='up', run_id=None): t = self.db.table(name=table) if not run_id: run_id = len(t.all()) if not run_id: return (None, 0) for rid in range(int(run_id), 0, -1): record = t.get(eid=int(rid)) if record['action'] == action: return (record, int(rid)) return (None, 0) @usedb def get_records(self, table, count=10): records = {} if table in self.db.tables(): t = self.db.table(name=table) start = len(t) end = start - count for i in xrange(start, end, -1): records[i] = t.get(doc_id=i) return records @usedb def get_tables(self): tables = self.db.tables() tables.remove(self.default_table) return tables def remove_record(self, table, key, value): pass def search(self, table, key=None): t = self.db.table(name=table) if key: return t.search(key) return t.all() def query(self, table, query): pass def purge(self, table=None): if table: return self.db.purge_table(table) return self.db.purge_tables() def _closedb(self): self.db.close()
def main(): # Open the databases noncore = TinyDB('./dbs/non_Core.json') MATs = TinyDB('./dbs/MATS.json') core = TinyDB('./dbs/Core.json') counties = TinyDB('./dbs/Counties.json') parser = GooeyParser(description='Find similar MATs.') subparsers = parser.add_subparsers(dest='action') similar = subparsers.add_parser('Similar', help='''\ Finds the most similar MAT to the specified MATs''') req = similar.add_argument_group('Required') can = similar.add_argument_group('Can be left alone') whichmats = req.add_mutually_exclusive_group(required=True) whichmats.add_argument('-MATs', choices=MATList(), nargs='*', widget='Listbox', help='Specifies MATs (hold cmd/ctrl to select multiple)') whichmats.add_argument('-All', action='store_true', help='Analyses all MATs') can.add_argument('--algorithm', '-a', default='defaults', nargs='*', help='''\ Configure the selection algorithm (optional)''', gooey_options={'validator': {'test': "(user_input == 'defaults') or (all(b in ('avg', 'rmsd', 'med', 'rng', 'mode', 'size') and (c in ['wgt', 'is', 'isnot']\ or c.endswith('gets')) for b, c in zip(user_input.split()[1::4], user_input.split()[2::4])) and len(user_input))", 'message': 'That is not a valid algorithm. See documentation.'}}) subparsers.add_parser('Display', help='''\ Displays datatables currently stored''') subparsers.add_parser('Purge', help='''\ Purges the compiled databases.''') test = subparsers.add_parser('Test', help='''\ Runs Testing Utililty''') can.add_argument('--multi', '-m', type=int, default=50, metavar='x', help='''\ Displays x most similar MATs''') test.add_argument('--plural', '-p', type=int, default=20, metavar='x', help='''\ Splits this many of the MATs into 2.''') args = parser.parse_args() msg.SEP() if args.action == 'Purge': msg.PURGE() for x in [core, noncore, counties, MATs]: x.purge() MATs.purge_tables() silentremove('./dbs/urns.pickle') msg.DONE() if args.action == 'Display': for x in MATs.tables(): if not x == '_default': print(x.replace('|', ' '), flush=True) def doit(algorithm, tested, num, testing=False): if algorithm in (None, ['defaults', ], 'defaults'): algorithm = defs.algorithm if tested is not None: chain = itertools.chain.from_iterable importkeys = [] for a, b in zip(algorithm[:-3:4], algorithm[1:-2:4]): importkeys += [a, b] table = importer(importkeys, testing=testing)[0] retresults = [] with multiprocess.Pool(processes=defs.threadcount) as pool: partthing = partial(tester, table, algorithm=algorithm, number=num, testing=testing) if len(tested) > 1: first = [partthing(tested.pop() go=0), ] partthing = partial(tester, table, algorithm=algorithm, number=num, testing=testing, go=1) else: first = [] for result in chain(first, pool.imap_unordered(partthing, tested)): print('{} is most similar to {}\n'.format(result[2], ' then '.join(result[0])), flush=True) retresults.append((result[2], result[1])) if testing: return retresults else: with open('result.csv', 'w', encoding='utf-16') as resultsfile: print('Writing results to file...', flush=True) fields = sorted(list(chain(['Average ' + x, 'Subject ' + x] for x in defs.ProgressScoreHeaders))) writer = csv.DictWriter(resultsfile, fieldnames=['MAT', ] + fields) writer.writeheader() rows = [{**{'MAT': x[0]}, **x[1][0], **x[1][1]} for x in retresults] writer.writerows(rows) msg.DONE()
#테이블 데이터 전체 삽입1 with open('c:/section5/data/users.json','r') as infile: r = json.loads(infile.read()) for p in r: users.insert(p) #테이블 데이터 전체 삽입2 with open('c:/section5/data/todos.json','r') as infile: r = json.loads(infile.read()) for p in r: todos.insert(p) #전체 데이터 출력 print(users.all()) print(todos.all()) #테이블 목록 조회 print(db.tables()) #전체 데이터 삭제 users.purge() todos.purge() db.purge_table('users') db.purge_table('todos') db.purge_tables() db.close()
class StarredDB(object): def __init__(self, my_stars_home, mode): self._db = TinyDB(os.path.join(my_stars_home, 'mystars.db'), storage=CachingMiddleware(JSONStorage)) if mode == 't': self._db.purge_tables() self._idx = self._db.table('index') if not self._idx.contains(Query().name == 'language'): self._idx.insert({ 'name': 'language', 'docs': {} }) if not self._idx.contains(Query().name == 'keyword'): self._idx.insert({ 'name': 'keyword', 'docs': {} }) def __enter__(self): return self def __exit__(self, exception_type, exception_value, traceback): self._db.close() def _get_index_docs(self, name): return self._idx.get(Query().name == name).get('docs', {}) def update(self, repo_list): if repo_list: self._db.table('latest_repo').purge() self._db.table('latest_repo').insert(repo_list[0]) language_docs = self._get_index_docs('language') keyword_docs = self._get_index_docs('keyword') for repo in repo_list: # save repo data doc_id = self._db.insert(repo) # update index name = repo.get('name') language = repo.get('language') description = repo.get('description') if language: for lang in language.split(): update_inverted_index(language_docs, lang.lower(), doc_id) keywords = split_repo_name(name) if description: keywords += split_repo_desc(description) for keyword in split_keywords(keywords): update_inverted_index(keyword_docs, keyword.lower(), doc_id) self._idx.update(operations.set('docs', language_docs), Query().name == 'language') self._idx.update(operations.set('docs', keyword_docs), Query().name == 'keyword') def get_latest_repo_full_name(self): latest_repo = self._db.table('latest_repo').all() if len(latest_repo) > 0: return latest_repo[0].get('full_name') def search(self, languages, keywords): # self._build_index() language_docs = self._get_index_docs('language') keyword_docs = self._get_index_docs('keyword') if not language_docs and not language_docs: raise EmptyIndexWarning('empty index') language_results = [] if languages: for search in languages: language_results += language_docs.get(search.lower(), []) keywords_results = [] if keywords: for keyword in keywords: for term in split_repo_name(keyword): results = keyword_docs.get(term.lower(), []) keywords_results.append(results) if languages and keywords: # python > 2.6 search_results = list(set(language_results).intersection(*keywords_results)) else: if len(keywords_results) > 1: # python > 2.6 final_keywords_results = list(set(keywords_results[0]).intersection(*keywords_results[1:])) else: final_keywords_results = [] for results in keywords_results: for r in results: final_keywords_results.append(r) search_results = language_results + final_keywords_results # remove duplicates then sort by id search_results = sorted(list(set(search_results)), key=int) return [self._db.get(doc_id=doc_id) for doc_id in search_results]
class TinyRunDB(BaseDB): def __init__(self, conn_str): self.name = 'TinyRunDB' self.conn_str = conn_str self.default_table = 'linchpin' def _opendb(self): self.middleware = CachingMiddleware(JSONStorage) self.middleware.WRITE_CACHE_SIZE = 500 self.db = TinyDB(self.conn_str, storage=self.middleware, default_table=self.default_table) def __str__(self): if self.conn_str: return "{0} at {2}".format(self.name, self.conn_str) return "{0} at {1}".format(self.name, 'None') @property def schema(self): return self._schema @schema.setter def schema(self, schema): self._schema = dict() self._schema.update(schema) @usedb def init_table(self, table): t = self.db.table(name=table) return t.insert(self.schema) @usedb def update_record(self, table, run_id, key, value): t = self.db.table(name=table) # get transaction record tx_rec = t.get(doc_id=run_id).get("outputs", []) if len(tx_rec) > 0 and isinstance(value, list): # fetch the resources dict, index # by filtering them from outputs list res_list = [(idx, x) for idx, x in enumerate(tx_rec) if "resources" in x] if len(res_list) != 0: res_idx = res_list[0][0] resources = res_list[0][1] if "resources" in list(value[0].keys()): de = resources["resources"] for i in value[0]["resources"]: de.append(i) de = {"resources": de} tx_rec[res_idx] = de res = t.update(tinySet(key, [de]), doc_ids=[run_id]) return res return t.update(add(key, value), doc_ids=[run_id]) @usedb def get_tx_record(self, tx_id): t = self.db.table(name='linchpin') return t.get(doc_id=tx_id) @usedb def get_tx_records(self, tx_ids): txs = {} t = self.db.table(name='linchpin') for tx_id in tx_ids: txs[tx_id] = t.get(doc_id=tx_id) return txs @usedb def get_record(self, table, action='up', run_id=None): t = self.db.table(name=table) if not run_id: run_id = len(t.all()) if not run_id: return (None, 0) for rid in range(int(run_id), 0, -1): record = t.get(eid=int(rid)) if record and record['action'] == action: return (record, int(rid)) else: record = t.get(eid=int(run_id)) if record: return (record, int(run_id)) return (None, 0) @usedb def get_records(self, table, count=10): records = {} if table in self.db.tables(): t = self.db.table(name=table) if len(t.all()): start = len(t) if count == 'all': end = 0 else: end = start - count for i in range(start, end, -1): records[i] = t.get(doc_id=i) return records @usedb def get_tables(self): tables = self.db.tables() tables.remove(self.default_table) return tables def remove_record(self, table, key, value): pass def search(self, table, key=None): t = self.db.table(name=table) if key: return t.search(key) return t.all() def query(self, table, query): pass def purge(self, table=None): if table: return self.db.purge_table(table) return self.db.purge_tables() def _closedb(self): self.db.close()
def db(): db_ = TinyDB(storage=MemoryStorage) db_.purge_tables() db_.insert_multiple({'int': 1, 'char': c} for c in 'abc') return db_
class Spot(): """ See spot run """ def __init__(self, token, user_id, trace=False): self.sp = Spotify(auth=token) self.user_id = user_id self.sp.trace = trace self.db = TinyDB('./spot.db') self.pprint = pprint.PrettyPrinter(indent=4).pprint # rows, columns = self.term = [int(x) for x in os.popen('stty size', 'r').read().split()] def purge_db(self): self.db.purge_tables() print('All tables purged') def db_info(self): Q = Query() tables = self.db.tables() print("==== TABLES ====") for table in tables: print('{}: {}'.format(table, self.db.table(table).count(Q.id))) print("================") def display_table(self, table_name, order_by=None, limit=None): ignore = ['is_local', 'is_playable', 'available_markets', 'disc_number', 'explicit', 'external_ids', 'external_urls', 'href', 'uri', 'linked_from', 'preview_url', 'context', 'type', 'followers', 'images', 'release_date', 'release_date_precision'] table = self.db.table(table_name) records = table.all() if order_by: reverse = False if order_by.startswith('-'): order_by = order_by[1:] reverse = True order_by = order_by.split(',') records = sorted( records, key=lambda x: tuple(x.get(k, '') for k in order_by)) if reverse: records = list(reversed(records)) window = records[slice(0, limit)] if not window: print("No records returned") return _, columns = self.term fields = list(set(window[0].keys()) - set(ignore)) # longest_field = len(sorted(fields, key=len)[-1]) num_fields = len(fields) col_width = int(columns / num_fields) - 1 # if col_width < longest_field: # col_width = longest_field # print("COL WIDTH: {} LONGEST FIELD: {}".format(col_width, longest_field)) rows = [] print("================= {} ==============".format(table.name)) for rec in window: rows.append({k: str(v)[0:col_width] for k, v in rec.items() if k not in ignore}) print(tabulate(rows, headers='keys')) print("======== Returned {} rows =========\n".format(len(window))) def pop_albums(self, albums): records = [] Q = Query() albums_table = self.db.table('albums') for segment in blocks(albums, 20): album_ids = [a.get('id') for a in segment] for album in self.sp.albums(album_ids).get('albums', []): album_artists = album.pop('artists', []) album.update({ 'created': niceo(), 'artists': [a.get('id') for a in album_artists] }) records.extend( albums_table.upsert(album, Q.id == album.get('id'))) return records def pop_artists(self, artists): records = [] Q = Query() artists_table = self.db.table('artists') for segment in blocks(artists, 50): artist_ids = [a.get('id') for a in segment] for artist in self.sp.artists(artist_ids).get('artists', []): artist.update({ 'created': niceo() }) records.extend( artists_table.upsert(artist, Q.id == artist.get('id'))) return records def fetch(self): """ fetch what we need to know Track -> track -> played_at: <datetime> context: ? """ next_query = {'limit': 25} total_tracks = 0 total_artists = 0 total_albums = 0 Q = Query() tracks_table = self.db.table('tracks') # gather records records = [] # all time faves for time_range in ['short_term', 'medium_term', 'long_term']: next_query = {'limit': 25, 'time_range': time_range} while next_query: resp = self.sp.current_user_top_tracks(**next_query) next_query = parse_qs(urlparse(resp.get('next')).query) if next_query: self.pprint(next_query) records.extend(resp.get('items')) # recently played while next_query: resp = self.sp.current_user_recently_played(**next_query) next_query = parse_qs(urlparse(resp.get('next')).query) if next_query: self.pprint(next_query) records.extend(resp.get('items')) # fetch in the above order to recently played comes later, # preserving last play-time in the upsert artists = [] albums = [] # process records for item in records: if 'track' in item: # flatten other info track = item.pop('track') track.update({k: v for k, v in item.items()}) else: track = item album = track.pop('album', None) if album is not None: artists.extend(album.get('artists', [])) albums.append(album) artists.extend(track.pop('artists', [])) track.update({ 'created': niceo(), 'album': album.get('id') if album is not None else None, 'artists': [a.get('id') for a in artists] }) total_tracks += len( tracks_table.upsert(track, Q.id == track.get('id'))) # fetch full album records for genre info total_albums += len(self.pop_albums(albums)) # fetch full artists for genre info total_artists += len(self.pop_artists(artists)) print( "Updated or created {} artists\n".format(total_artists), "Updated or created {} albums\n".format(total_albums), "Updated or created {} tracks\n".format(total_tracks), ) def recommend(self): """ recommendations( seed_artists=None, seed_genres=None, seed_tracks=None, limit=20, country=None, **kwargs ) """ artists = self.db.table('artists') albums = self.db.table('albums') tracks = self.db.table('tracks') Q = Query() genre_seeds = self.sp.recommendation_genre_seeds().get('genres', []) # artists and albums have genres .. but must be a valid seed seed_genres = [] max_candidates = albums.count(Q) + artists.count(Q) candidates_seen = [] # still a little leaky if there are less # non-unique genres than artists and albums while len(seed_genres) < 5 and len(candidates_seen) < max_candidates: genre_candidates = sample([g for subg in [ r.get('genres', []) for r in sample( sample(artists.all()) + sample(albums.all())) ] for g in subg]) candidates_seen.extend(genre_candidates) for gc in genre_candidates: if gc in genre_seeds: seed_genres.append(gc) if len(seed_genres) == 5: break seed_artists = [a.get('id') for a in sample(artists.all())] seed_tracks = [a.get('id') for a in sample(tracks.all())] return self.sp.recommendations( seed_artists=seed_artists, limit=5 ).get('tracks', []) + self.sp.recommendations( seed_genres=seed_genres, limit=5 ).get('tracks', []) + self.sp.recommendations( seed_tracks=seed_tracks, limit=5 ).get('tracks', []) def update_recommendations(self): Q = Query() # get spotbrainz playlist local meta pl_meta = self.db.get(Q.name == '__playlist_meta__') # create playlist if we don't have the id if not pl_meta: pl = self.sp.user_playlist_create( self.user_id, 'spotbrainz', public=False, description='Shoddy recommendations for spotty brains') else: pl = self.sp.user_playlist( self.user_id, pl_meta.get('record', {}).get('id')) self.db.upsert( {'name': '__playlist_meta__', 'record': pl}, Q.name == '__playlist_meta__') pl_meta = self.db.get(Q.name == '__playlist_meta__') recs = self.recommend() self.sp.user_playlist_add_tracks( self.user_id, pl_meta.get('record', {}).get('id'), [r.get('uri') for r in recs] ) print("*~*~*~ Updated recommended playlist ~*~*~*")
class DBC: def __init__(self, path=None): if path is None: self.db = TinyDB(os.path.join('my-config', 'raspberryPi-db.json')) else: self.db = TinyDB(path) self.db.table('UserConfiguration') def get_table(self, table_name): return self.db.table(table_name) def purge(self): self.db.purge_tables() def insert_user_configuration(self, user_id): result = False if len( self.db.table('UserConfiguration').search( where('user_id') == user_id)) == 0: self.db.table('UserConfiguration').insert({ 'user_id': user_id, 'cpu_alert': None, 'temp_alert': None, 'ram_alert': None, 'disk_alert': None, 'auth': False }) result = True return result def get_cpu_alert(self, user_id): return self.db.table('UserConfiguration').search( where('user_id') == user_id)[0]['cpu_alert'] def get_temp_alert(self, user_id): return self.db.table('UserConfiguration').search( where('user_id') == user_id)[0]['temp_alert'] def get_ram_alert(self, user_id): return self.db.table('UserConfiguration').search( where('user_id') == user_id)[0]['ram_alert'] def get_disk_alert(self, user_id): return self.db.table('UserConfiguration').search( where('user_id') == user_id)[0]['disk_alert'] def get_user_authenticated(self, user_id): return self.db.table('UserConfiguration').search( where('user_id') == user_id)[0]['auth'] def set_cpu_alert(self, user_id, percentage): user_configuration = self.db.table('UserConfiguration') query = Query() user_configuration.update({'cpu_alert': percentage}, query.user_id == user_id) def set_temp_alert(self, user_id, degrees): user_configuration = self.db.table('UserConfiguration') query = Query() user_configuration.update({'temp_alert': degrees}, query.user_id == user_id) def set_ram_alert(self, user_id, percentage): user_configuration = self.db.table('UserConfiguration') query = Query() user_configuration.update({'ram_alert': percentage}, query.user_id == user_id) def set_disk_alert(self, user_id, percentage): user_configuration = self.db.table('UserConfiguration') query = Query() user_configuration.update({'disk_alert': percentage}, query.user_id == user_id) def set_user_authenticated(self, user_id): user_configuration = self.db.table('UserConfiguration') query = Query() user_configuration.update({'auth': True}, query.user_id == user_id)
class DB: """ persistence for instances and transition requests """ def __init__(self): self.logger = logging.getLogger(__name__) try: self.db = TinyDB('config/db.json') self.db.purge() self.db.purge_tables() self.transitionTable = self.db.table('transitions') #self.instanceTable=self.db.table('instances') self.logger.debug('created db at config/db.json') except Exception as ex: raise DBException(ex) def createNewTransitionRequest(self, transition): """ store transition request and return unique id """ self.logger.debug('create transition db entry called') self.logger.debug(transition) try: with lock: id = self.transitionTable.insert(transition) self.logger.debug('added transition request with id ' + str(id)) except Exception as ex: raise DBException(ex) return id def updateTransitionRequest(self, id, transition): # update transition status self.logger.debug('update transition request') self.logger.debug(transition) try: with lock: self.transitionTable.update(transition, eids=[id]) except Exception as ex: raise DBException(ex) def removeTransition(self, eid): self.logger.debug('removing transition with eid ' + str(eid)) try: with lock: self.transitionTable.remove(eids=[eid]) except Exception as ex: self.logger.error('cannot remove transition with eid ' + str(eid)) raise DBException(ex) def findTransitionByRequestID(self, id): # will need to be updated to reflect new transition stuff self.logger.debug('find transition db entry for request id ' + str(id)) Transition = Query() try: i = self.transitionTable.get(Transition.requestId == int(id)) except Exception as ex: self.logger.error('something bad happened') raise DBException(ex) self.logger.debug(str(i)) return i def findTransitionsByResourceID(self, id): self.logger.debug('search for transition db entry for resource id ' + str(id)) Transition = Query() transitions = [] try: transitions = self.transitionTable.search( Transition.resourceId == str(id)) except Exception as ex: self.logger.error('something bad happened') raise DBException(ex) self.logger.debug(str(transitions)) return transitions def findTransitionByID(self, id): # will need to be updated to reflect new transition stuff self.logger.debug('find transition db entry for id ' + id) try: i = self.transitionTable.get(eid=int(id)) self.logger.debug(i) except Exception as ex: self.logger.error('cannot find transition with eid ' + str(eid)) raise DBException(ex) return i
class KindleClippingDB(object): def __init__(self, db_path): self.db = TinyDB(db_path, sort_keys=True, indent=4, ensure_ascii=False, encoding='utf-8') self.books = self.db.table('books') self.highlights = self.db.table('highlights') def pure_all(self): self.db.purge_tables() def add_book(self, title, author, other_attrs={}): doc = dict(other_attrs) doc['title'] = title doc['author'] = author if author else "" id = self.books.insert(doc) return id def _add_highlight(self, book_id, content, epoch, pos_start, pos_end, other_attrs): hQ = Query() res = self.highlights.search((hQ.book_id == book_id) & (hQ.content == content)) if len(res) > 0: return res[0].doc_id doc = dict(other_attrs) doc['book_id'] = book_id doc['content'] = content doc['pos_start'] = pos_start if pos_start else 0 doc['pos_end'] = pos_end if pos_end else 0 doc['epoch'] = epoch if epoch else 0 id = self.highlights.insert(doc) return id def add_highlight(self, content, book_title, book_author, epoch, pos_start=None, pos_end=None, other_attrs={}): res = self.get_books(book_title, book_author) book_id = None if len(res) > 0: book_id = res[0].doc_id else: book_id = self.add_book(book_title, book_author) id = self._add_highlight(book_id, content, epoch, pos_start, pos_end, other_attrs) return id def get_books(self, title, author): bookQ = Query() def author_name_matcher(db_value, input_value): db_names = set( [x for x in re.split(r'\s|,', db_value) if len(x) > 0]) input_names = set( [x for x in re.split(r'\s|,', input_value) if len(x) > 0]) return db_names == input_names def author_name_matcher_fallback(db_value, input_value): db_value = db_value.lower() input_value = input_value.lower() if (db_value.find(input_value) != -1 or input_value.find(db_value) != -1): return True else: return False res = self.books.search((bookQ.title == title) & ( bookQ.author.test(author_name_matcher, author))) if (len(res) < 1): res = self.books.search((bookQ.title == title) & ( bookQ.author.test(author_name_matcher_fallback, author))) return res def query_books(self, query_string): if (query_string == None or len(query_string) < 1): res = self.books.all() else: bookQ = Query() res = self.books.search( (bookQ.title.search(query_string, flags=re.IGNORECASE)) | (bookQ.author.search(query_string, flags=re.IGNORECASE))) return res def get_highligts_by_book(self, book_title, book_author, book_query=None): books = self.get_books(book_title, book_author) if not books: return [] res = [] for b in books: if book_query: q = (((Query().book_id == b.doc_id) & book_query) & (~Query().hidden_mark.exists())) else: q = ((Query().book_id == b.doc_id) & (~Query().hidden_mark.exists())) res.extend(self.highlights.search(q)) return res
resultado = "perderam" emoticon = u"\U0001F629" message = default_party_message.format(party, resultado, end_time, emoticon, duration, kdas) return message else: winning_names_and_heroes = ["\"%s (%s)\"" % (i['personaname'], i['hero']) for i in info] losing_names_and_heroes = ["\"%s (%s)\"" % (i['personaname'], i['hero']) for i in info] kda_list = ["{}/{}/{}".format(i['kills'], i['deaths'], i['assists']) for i in winning_side+losing_side] resultado = ['ganharam' if len(winning_side)>1 else 'ganhou'] message = default_versus_message.format(winning_names_and_heroes,resultado,losing_names_and_heroes,end_time,duration,kda_list) return message #Database Config db = TinyDB(c.db_path) matches_table = db.table('matches') matches_info_table = db.table('matches_info') db.purge_tables() for accountId in c.accounts: matches = match_history.matches(account_id=accountId, matches_requested=10, language="en_us") for m in matches: print "Creating registry for match %s, account %s"% (m.match_id,accountId) currentMatchDetails = match_details.match(m.match_id) matchInfo = fill_match_info(currentMatchDetails,accountId) matches_table.insert( {'account_id': accountId, 'match_id': m.match_id, 'message': create_message([matchInfo]), 'date_added': str(datetime.now())} ) matches_info_table.insert(matchInfo) print "Fim!"
def cleanup(): database = TinyDB('test_db.json') database.purge_tables()
class TinyRunDB(BaseDB): def __init__(self, conn_str): self.name = 'TinyRunDB' self.conn_str = conn_str self.default_table = 'linchpin' def _opendb(self): self.middleware = CachingMiddleware(JSONStorage) self.middleware.WRITE_CACHE_SIZE = 500 self.db = TinyDB(self.conn_str, storage=self.middleware, default_table=self.default_table) def __str__(self): if self.conn_str: return "{0} at {2}".format(self.name, self.conn_str) return "{0} at {1}".format(self.name, 'None') @property def schema(self): return self._schema @schema.setter def schema(self, schema): self._schema = dict() self._schema.update(schema) @usedb def init_table(self, table): t = self.db.table(name=table) return t.insert(self.schema) @usedb def update_record(self, table, run_id, key, value): t = self.db.table(name=table) tx_rec = t.get(eid=run_id).get("outputs", []) if len(tx_rec) > 0 and isinstance(value, list): # fetch the resources dict, index # by filtering them from outputs list res_list = [(idx, x) for idx, x in enumerate(tx_rec) if "resources" in x] if len(res_list) != 0: res_idx = res_list[0][0] resources = res_list[0][1] if "resources" in value[0]: de = defaultdict(list, resources["resources"]) for i, j in value[0]["resources"].items(): de[i].extend(j) de = {"resources": de} tx_rec[res_idx] = de return t.update(tinySet(key, [de]), eids=[run_id]) return t.update(add(key, value), eids=[run_id]) @usedb def get_tx_record(self, tx_id): t = self.db.table(name='linchpin') return t.get(eid=tx_id) @usedb def get_tx_records(self, tx_ids): txs = {} t = self.db.table(name='linchpin') for tx_id in tx_ids: txs[tx_id] = t.get(eid=tx_id) return txs @usedb def get_record(self, table, action='up', run_id=None): t = self.db.table(name=table) if not run_id: run_id = len(t.all()) if not run_id: return (None, 0) for rid in range(int(run_id), 0, -1): record = t.get(eid=int(rid)) if record and record['action'] == action: return (record, int(rid)) else: record = t.get(eid=int(run_id)) if record: return(record, int(run_id)) return (None, 0) @usedb def get_records(self, table, count=10): records = {} if table in self.db.tables(): t = self.db.table(name=table) if len(t.all()): start = len(t) if count == 'all': end = 0 else: end = start - count for i in xrange(start, end, -1): records[i] = t.get(doc_id=i) return records @usedb def get_tables(self): tables = self.db.tables() tables.remove(self.default_table) return tables def remove_record(self, table, key, value): pass def search(self, table, key=None): t = self.db.table(name=table) if key: return t.search(key) return t.all() def query(self, table, query): pass def purge(self, table=None): if table: return self.db.purge_table(table) return self.db.purge_tables() def _closedb(self): self.db.close()