class DB: def __init__(self,db_path): self.db = TinyDB(db_path) def add(self, data): # Only add it if you can't find it Track = Query() if not self.db.get(Track.display_id == data['display_id']): return self.db.insert(data) def searchById(self, video_id): Track = Query() return self.db.get(Track.display_id == video_id) def search(self, text): pattern = re.compile(text,re.IGNORECASE) def test(txt): return pattern.search(txt) Track = Query() q = Track.title.test(test) | Track.description.test(test) return self.db.search(q) def all(self): return self.db.all()
def index(): form = SearchForm() query = request.args.get('query', '').strip() db = TinyDB(recipyGui.config.get('tinydb')) if not query: runs = db.all() else: # Search run outputs using the query string runs = db.search( where('outputs').any(lambda x: listsearch(query, x)) | where('inputs').any(lambda x: listsearch(query, x)) | where('script').search(query) | where('notes').search(query) | where('unique_id').search(query)) runs = sorted(runs, key = lambda x: parse(x['date'].replace('{TinyDate}:', '')) if x['date'] is not None else x['eid'], reverse=True) run_ids = [] for run in runs: if 'notes' in run.keys(): run['notes'] = str(escape(run['notes'])) run_ids.append(run.eid) db.close() return render_template('list.html', runs=runs, query=query, form=form, run_ids=str(run_ids), dbfile=recipyGui.config.get('tinydb'))
def test_serialisation_of_pandas_dataframe(tmpdir): from sacred.observers.tinydb_hashfs import (DataFrameSerializer, SeriesSerializer) from tinydb_serialization import SerializationMiddleware import numpy as np import pandas as pd # Setup Serialisation object for non list/dict objects serialization_store = SerializationMiddleware() serialization_store.register_serializer(DataFrameSerializer(), 'TinyDataFrame') serialization_store.register_serializer(SeriesSerializer(), 'TinySeries') db = TinyDB(os.path.join(tmpdir.strpath, 'metadata.json'), storage=serialization_store) df = pd.DataFrame(np.eye(3), columns=list('ABC')) series = pd.Series(np.ones(5)) document = { 'foo': 'bar', 'some_dataframe': df, 'nested': { 'ones': series } } db.insert(document) returned_doc = db.all()[0] assert returned_doc['foo'] == 'bar' assert (returned_doc['some_dataframe'] == df).all().all() assert (returned_doc['nested']['ones'] == series).all()
def dragon_greet(): print("_______________________________________________________________\n") time = datetime.datetime.now().time() global user_full_name global user_prefix global config_file command = "getent passwd $LOGNAME | cut -d: -f5 | cut -d, -f1" user_full_name = os.popen(command).read() user_full_name = user_full_name[:-1] # .decode("utf8") home = expanduser("~") config_file = TinyDB(home + '/.dragonfire_config.json') callme_config = config_file.search(Query().datatype == 'callme') if callme_config: user_prefix = callme_config[0]['title'] else: gender_config = config_file.search(Query().datatype == 'gender') if gender_config: user_prefix = GENDER_PREFIX[gender_config[0]['gender']] else: gender = Classifier.gender(user_full_name.split(' ', 1)[0]) config_file.insert({'datatype': 'gender', 'gender': gender}) user_prefix = GENDER_PREFIX[gender] if time < datetime.time(12): time_of_day = "morning" elif datetime.time(12) < time < datetime.time(18): time_of_day = "afternoon" else: time_of_day = "evening" userin.execute(["echo"], "To activate say 'Dragonfire!' or 'Wake Up!'") userin.say(" ".join(["Good", time_of_day, user_prefix]))
def AddUser(self, username, chatid): my_pokemon = [0] * 152 # Matching arr index to pokemon index (0 is disregarded) db = TinyDB('users.json') db.insert({'username': username, 'chatid': chatid, 'pokemon': my_pokemon}) pass # RETURN: check bool
def check_prediction_cache( region_id, type_id, cache_path=CACHE_PATH, db_filename='prophet.json' ): """check tinyDB for cached predictions Args: region_id (int): EVE Online region ID type_id (int): EVE Online type ID cache_path (str): path to caches db_filename (str): name of tinydb Returns: pandas.DataFrame: cached prediction """ utc_today = datetime.utcnow().strftime('%Y-%m-%d') prediction_db = TinyDB(path.join(cache_path, db_filename)) raw_data = prediction_db.search( (Query().cache_date == utc_today) & (Query().region_id == region_id) & (Query().type_id == type_id) ) prediction_db.close() if raw_data: panda_data = pd.read_json(raw_data[0]['prediction']) return panda_data else: return None
def insert_test(db_file='db.json'): db = TinyDB(db_file) db.insert({ 'name': 'Aman Verma', 'items': 1, 'contact': 7890701597 })
def test_json_readwrite(tmpdir): """ Regression test for issue #1 """ path = str(tmpdir.join('test.db')) # Create TinyDB instance db = TinyDB(path, storage=JSONStorage) item = {'name': 'A very long entry'} item2 = {'name': 'A short one'} get = lambda s: db.get(where('name') == s) db.insert(item) assert get('A very long entry') == item db.remove(where('name') == 'A very long entry') assert get('A very long entry') is None db.insert(item2) assert get('A short one') == item2 db.remove(where('name') == 'A short one') assert get('A short one') is None
def update_statistic(): db = TinyDB("data/db.json") rows = db.all() rows.sort(key=lambda x: int(x['rid']), reverse=True) levels = {1: [], 2: [], 3: []} for row in rows: levels[row['level']].append(row) out = [] header = '|%-35s|%-40s|%-6s|%-10s|%5s|%10s|' % ("round", "problem", "solved", "ave_pts", "rate", "submission") for k, v in levels.items(): out.append('# LEVEL %d' % k) out.append('-' * len(header)) out.append(header) out.append('-' * len(header)) v.sort(key=lambda x: difficulty(x), reverse=True) for i in v: out.append('|%-35s|%-40s|%-6s|%-10.2lf|%-4.3lf|%10s|' % (i['round'], i['name'], i['solved'], i['average_pts'], i['correct_rate'], i['submissions'])) out.append('*' * len(header)) with open("data/statistic.txt", 'w') as f: for i in out: f.write(i + '\n') print '>>>> data/statistic.txt has been updated.'
def __init__(self, path): root_dir = os.path.abspath(path) if not os.path.exists(root_dir): raise IOError('Path does not exist: %s' % path) fs = HashFS(os.path.join(root_dir, 'hashfs'), depth=3, width=2, algorithm='md5') # Setup Serialisation for non list/dict objects serialization_store = SerializationMiddleware() serialization_store.register_serializer(DateTimeSerializer(), 'TinyDate') serialization_store.register_serializer(FileSerializer(fs), 'TinyFile') if opt.has_numpy: serialization_store.register_serializer(NdArraySerializer(), 'TinyArray') if opt.has_pandas: serialization_store.register_serializer(DataFrameSerializer(), 'TinyDataFrame') serialization_store.register_serializer(SeriesSerializer(), 'TinySeries') db = TinyDB(os.path.join(root_dir, 'metadata.json'), storage=serialization_store) self.db = db self.runs = db.table('runs') self.fs = fs
def test_write_first_cache(self): """test write behavior on first pass (cache-buster mode)""" self.test_clear_existing_cache() #blowup existing cache again dummy_data = forecast_utils.parse_emd_data(DEMO_DATA['result']) forecast_utils.write_prediction_cache( self.region_id, self.type_id, dummy_data, cache_path=self.cache_path ) assert path.isfile(self.cache_filepath) tdb = TinyDB(self.cache_filepath) data = tdb.all()[0] keys_list = [ 'cache_date', 'region_id', 'type_id', 'lastWrite', 'prediction' ] assert set(keys_list) == set(data.keys()) dummy_str_data = dummy_data.to_json( date_format='iso', orient='records' ) cached_data = pd.read_json(data['prediction']) assert data['prediction'] == dummy_str_data tdb.close()
def test_serialisation_of_numpy_ndarray(tmpdir): from sacred.observers.tinydb_hashfs import NdArraySerializer from tinydb_serialization import SerializationMiddleware import numpy as np # Setup Serialisation object for non list/dict objects serialization_store = SerializationMiddleware() serialization_store.register_serializer(NdArraySerializer(), 'TinyArray') db = TinyDB(os.path.join(tmpdir.strpath, 'metadata.json'), storage=serialization_store) eye_mat = np.eye(3) ones_array = np.ones(5) document = { 'foo': 'bar', 'some_array': eye_mat, 'nested': { 'ones': ones_array } } db.insert(document) returned_doc = db.all()[0] assert returned_doc['foo'] == 'bar' assert (returned_doc['some_array'] == eye_mat).all() assert (returned_doc['nested']['ones'] == ones_array).all()
class pcDB: def __init__(self,table="default"): #'/path/to/db.json' path='' self.table=table self.db = TinyDB(path).table(table) def insert(self,_dict): ''' :return: ''' self.db.insert(_dict) # db.insert({'int': 1, 'char': 'a'}) # db.insert({'int': 1, 'char': 'b'}) pass def getAll(self): ''' not param just get all data :return: ''' return self.db.all() #db.search() pass pass # # from tinydb.storages import JSONStorage # from tinydb.middlewares import CachingMiddleware # db = TinyDB('/path/to/db.json', storage=CachingMiddleware(JSONStorage))
def get_coffees(active): name = '' name_link = '' nav_links = [] description = '' db = TinyDB('db/coffee.json') coffee = db.all() count = 0 coffee_id = 0 for i in coffee: if active == -1 and count == 0: nav_links.append(("/"+str(i['id']),"active",i['name'])) name = i['name'] description = i['description'] coffee_id = i['id'] name_link = '/'+str(i['id']) elif active == -1 and count > 0: nav_links.append(("/"+str(i['id']),"",i['name'])) elif active == i['id']: nav_links.append(("/"+str(i['id']),"active",i['name'])) name = i['name'] description = i['description'] coffee_id = i['id'] name_link = '/'+str(i['id']) else: nav_links.append(("/"+str(i['id']),"",i['name'])) count = count+1 for i in nav_links: print i print name print name_link return nav_links, name, name_link, description, coffee_id
def test_process(tmpdir, capsys, side_effect, success): db_fn = tmpdir.join('database.json') log_dir = tmpdir.mkdir('log') db = TinyDB(db_fn.strpath) process_me = '/my/path/A12345' accession_number = op.basename(process_me) paths2process = {process_me: 42} with patch('subprocess.Popen') as mocked_popen: stdout = b"INFO: PROCESSING STARTS: {'just': 'a test'}" mocked_popen_instance = mocked_popen.return_value mocked_popen_instance.side_effect = side_effect mocked_popen_instance.communicate.return_value = (stdout, ) # set return value for wait mocked_popen_instance.wait.return_value = 1 - success # mock also communicate to get the supposed stdout process(paths2process, db, wait=-30, logdir=log_dir.strpath) out, err = capsys.readouterr() log_fn = log_dir.join(accession_number + '.log') mocked_popen.assert_called_once() assert log_fn.check() assert log_fn.read() == stdout.decode('utf-8') assert db_fn.check() # dictionary should be empty assert not paths2process assert out == 'Time to process {0}\n'.format(process_me) # check what we have in the database Path = Query() query = db.get(Path.input_path == process_me) assert len(db) == 1 assert query assert query['success'] == success assert query['accession_number'] == op.basename(process_me) assert query['just'] == 'a test'
def setUp(self): with open(os.path.join(BASE_DIR, 'tests', 'fixtures', 'test.json')) as f: self.j = json.load(f) self.database_name = os.path.join(os.getcwd(), 'test.db') db = TinyDB(self.database_name) db.insert_multiple(self.j) db.close
def crawl(sr=0, er=3): archive = dict() url = "https://community.topcoder.com/tc?module=ProblemArchive&sr=%d&er=%d" % (sr, er) print "requesting seed page..." r = requests.get(url) html = h.unescape(r.content.decode('utf-8')) doc = pq(html) for i in doc('table.paddingTable2').eq(2).children()[3:]: round_name = pq(i).children().eq(2).find('a').text() sub_url = pq(i).children().eq(2).find('a').attr.href if sub_url is not None: rid = sub_url.split('rd=')[-1] archive[round_name] = {'rid': rid, 'round': round_name} db = TinyDB("data/db.json") tot = len(archive.values()) cur = 0 prob_cnt = 0 for k in archive.values(): problems = crawl_round(k['rid'], k['round']) print 'parse result:' for p in problems: for pk, pv in p.items(): print "%-15s: %s" % (pk, pv) prob_cnt += 1 q = Query() if not db.search(q.name == p['name']): print '>>>>>>> insert problem: %s' % p['name'] db.insert(p) print '-' * 10 cur += 1 print '*' * 10, 'finish', k['round'], ',tot rounds:', tot, 'cur round:', cur, 'round problems:', len(problems), '*' * 10 print 'done, total round: %d, total problems: %d' % (cur, prob_cnt)
def __init__(self): self.sentCache = {} self.checkingThread = threading.Thread(target = self.startThread) #self.checkingThread.daemon = True self.keepChecking = True self.config = Config() if self.config.winning_streak: self.winning_streak_messages = self.read_spree_file(self.config.winning_streak_file) if self.config.losing_streak: self.losing_streak_messages = self.read_spree_file(self.config.losing_streak_file) # Debug purposes self.printOutput = False # Initializing the API key = dotamatch.get_key() # Steam Dev Key (~/.steamapi) try: self.match_history = MatchHistory(key) self.match_details = MatchDetails(key) self.account_details = PlayerSummaries(key) self.heroes = Heroes(key).heroes() # ActualDB db = TinyDB(self.config.db_path) #db.purge() #db.purge_tables() self.matches_table = db.table('matches') self.matches_info_table = db.table('matches_info') except dotamatch.api.ApiError: print u"Erro ao conectar à API."
def prelimsearchURLs(): db = TinyDB('db.json') prelimtable = db.table('prelimcasedetails') cases = getAllSearchResults() prelimtable.purge() for idx, case in enumerate(cases): prelimtable.insert({'caseid': case.caseid, 'casename': case.casename, 'prelimvideourl': case.videourl,'detailedVideoURL':'0'})
def is_vip(author): vip_db = TinyDB("dbs/vips.json") try: vip_db.get(where("email") == author) return True except: return False
def save_customer(self, customer): if not self.validate(customer): return None db = TinyDB('db/db.json') return db.insert(customer)
def process_and_add_one(pdf_path): pdf_name = pdf_path.split("/") pdf_name = pdf_name[-1] directory = pdf_path[0 : -len(pdf_name)] stripped_name = pdf_name[0:-4] title_path = title_dir + "/" + stripped_name + ".xml" extract_title(pdf_path, title_path) # check if title extraction worked, otherwise stop with this one tf = open(title_path, "r") txml = tf.read() if txml == "title extraction failed": return None # build dictionary with info we've got tf = open(title_path, "r") txml = tf.read() txml = txml.split(">") title = "title not found" for line in txml: if "</title" in line: title = line[0:-7] print title break # save nice text version of title txt_name_path = title_path[0:-4] + ".txt" ftxt = open(txt_name_path, "a") ftxt.write(title) if title == "title not found": return None # if title was found, get DOI from it currDOI = get_DOI_from_title(title) # open/create tiny db db = TinyDB(db_loc) # make sure the paper isnt in the db already paper = Query() gotit = db.search(paper.ownDOI == currDOI) if gotit: return currDOI text_path_xml = text_dir + "/" + stripped_name + ".xml" text_path_txt = text_dir + "/" + stripped_name + ".txt" if not extract_text(pdf_path, text_path_xml, text_path_txt): print ("text extraction failed") return None # only extract bibtex if you don't have it already, because this is the long part # TODO: Return before doing bib extraction bib_path = bib_dir + "/" + stripped_name + ".bib" if not extract_bibtex(pdf_path, bib_path): print ("caught in the new code") return None refDOIs = get_ref_list_DOIs(bib_path) new_dict = {"ownDOI": currDOI, "refDOIs": refDOIs, "filename": stripped_name} db.insert(new_dict) return currDOI
def write(message, args): # message.reply('I can understand hi or HI!') # react with thumb up emoji #message.react('+1') db = TinyDB('db.json') db.insert({'value': args}); print args db.close()
def log_output(filename, source): filename = os.path.abspath(filename) if option(CONFIG, 'general', 'debug'): print("Output to %s using %s" % (filename, source)) #Update object in DB db = TinyDB(DBFILE) db.update(append("outputs", filename), eids=[RUN_ID]) db.close()
class EventModel(object): def __init__(self, uri): self.uri = uri self.db = None self.reload() def reload(self): if self.db is not None: self.db.close() self.db = TinyDB(self.uri, indent=2) self.actions = self.db.table('actions') self.alarms = self.db.table('alarms') def get_action_by_id(self, action_id): return self.actions.get(eid=action_id) def get_alarm_by_id(self, alarm_id): return self.alarms.get(eid=alarm_id) def get_actions_by_alarm(self, alarm): for action_id in alarm.get('actions', []): action = self.get_action_by_id(action_id) if action is None: continue yield action def get_all_alarms(self): return self.alarms.all() def get_all_actions(self): return self.actions.all() def get_all_alarms_expanded(self): for alarm in self.get_all_alarms(): for action in self.get_actions_by_alarm(alarm): yield alarm, action def add_event(self, alarm, actions): action_ids = [self.add_action(a) for a in actions] alarm['actions'] = action_ids return self.alarms.insert(alarm) def add_action(self, action): return self.actions.insert(action) def add_alarm(self, alarm): return self.add_event(alarm, []) def update_alarm(self, alarmid, new_fields={}): return self.alarms.update(new_fields, eids=[alarmid]) def update_action(self, actionid, new_fields={}): return self.actions.update(new_fields, eids=[actionid]) def delete_alarm(self, alarmid): return self.alarms.remove(eids=[alarmid]) def delete_action(self, actionid): return self.actions.remove(eids=[actionid])
def get_file(name): db=TinyDB(path_db_) temp = Query() if len(db.search(temp.name==name))>0: path= get_path_data(db.search(temp.name==name)[0]['file']) db.close() if not os.path.exists(path): download(name) return path
def get_genres(database_name): """Utility method to get all the genres as a set""" db = TinyDB(os.path.join(os.getcwd(), database_name)) all_genres = { song['genre'] for song in db.all() } specific_genres = set() for genre in all_genres: specific_genres = specific_genres.union(set(genre.strip().split('/'))) db.close() return _strip_spaces(specific_genres)
def test_upgrade(tmpdir): db_file = tmpdir.join('db.json') db_file.write(v1_0) # Run upgrade assert migrate(str(db_file)) is True db = TinyDB(str(db_file)) assert db.count(where('key') == 'value') == 1
def test_json_kwargs(tmpdir): db_file = tmpdir.join('test.db') db = TinyDB(str(db_file), sort_keys=True, indent=4, separators=(',', ': ')) # Write contents db.insert({'b': 1}) db.insert({'a': 1}) assert db_file.read() == '''{
def api(): db = TinyDB(DB_FILENAME) results = db.search((Query().hidden == False) | (Query().starred == True)) db.close() for result in results: result['eid'] = result.eid result['url'] = url_for('goto', eid=result.eid) results.sort(key=lambda r: r['created_at']) return json.dumps(results)
file_list = glob.glob("/home/feng/桌面/to_be_calculated/" + str(patient[i]) + "/score/" + measure + "/*.txt") op_file = "Clinical_Note/" + str(patient[i]) + "/" + str( patient[i]) + "_scoredb_" + measure + ".txt" # 將某病人某方法算出的全部smt相似度分數都聚合在一起 print("正在結合所有相似度分數 for", patient[i], ".....") if True: with open(op_file, 'w') as outfile: for fname in file_list: with open(fname) as infile: outfile.write(infile.read()) from tinydb import TinyDB from tinydb.queries import Query, where print("正在建立分數資料庫 for", patient[i], ".....") db = TinyDB("Clinical_Note/" + str(patient[i]) + "/DB/" + str(patient[i]) + "_scoredb_path.json") f = open("Clinical_Note/" + str(patient[i]) + "/" + str(patient[i]) + "_scoredb_path.txt") rec = f.readline() # # print(rec.split("<>")[0], rec.split("<>")[1], rec.split("<>")[2].replace('\n', '')) # q = Query() # res = db.search((q.cui1 == 'C0748646') & (q.cui2 == 'C0010520')) # # print(res[0]['score']) # print(res) # 建立DB階段 if True: cnt = 0 while rec: db.insert({