#display existing business segments in dropdown menu user_bs_select = st.selectbox( 'Which Business Segment do you belong to?', existing_org) left_column, right_column = st.beta_columns(2) add_org_button = left_column.button('Save', key=0) #save org if add_org_button: for org in org_table: #update org value if (org['text'] == user_bs_select): OrgQuery = Query() org_table.update({'value': org['value'] + 1}, OrgQuery.text == user_bs_select) existing_org = [] existing_org_value = [] #read existing app names for org in org_table: existing_org.append(org['text']) existing_org_value.append(org['value']) #make dataframe out of DB entries org_df = pd.json_normalize(org_table.all()) org_fig = px.bar( org_df,
def remove_image(file, db): db.remove(Query()._id == file)
import time from datetime import datetime import httpx from loguru import logger from nonebot.adapters.cqhttp import MessageSegment, Message, Bot, Event from tinydb import Query from .config import hso_config from .model import group_config, friend_config, Power, status, db_tmp, history # ------------------------------------------------------- # Setu 类包装 # ------------------------------------------------------- Q = Query() async def set_essence_msg(message_id): # 添加精华消息 async with httpx.AsyncClient() as client: await client.post("http://127.0.0.1:5700/set_essence_msg", params={"message_id": message_id}) class Setu: def __init__(self, bot: Bot, event: Event, state: dict, **requests_kwargs): self.bot = bot self.event = event self.state = state self._REQUESTS_KWARGS = requests_kwargs # -----------------------------------
def reserve(cls): if command := cls.db().get(Query().version == cls.version): return command["reserve"]
### DESCRIPTION ### # This is the "SEARCH" command module. # Specifies the functionality for searching for all current contents of the fridge, # finding a specific item, trying to search for an item not found in the fridge. # Imports for DB # from tinydb import TinyDB, Query # Import for Flask response # from flask import jsonify #Declare DB and Query fridge_db = TinyDB('fridge_db.json') # DB to store fridge contents Food = Query() def search_fridge(quant): #Search and show all contents in fridge if quant == "all": fridge_list_str = "" i = 1 while i <= len(fridge_db): item_list = [] db_entry = fridge_db.get(doc_id=i) if db_entry['quantity'] != 0: #Search to show non-zero items db_entry_item = str(db_entry['item']) item_list.append(db_entry_item) db_entry_item_quant = str(db_entry['quantity']) item_list.append(db_entry_item_quant) db_entry_item_expire = str(db_entry['expire_date']) item_list.append(db_entry_item_expire)
r = requests.get(url=URL) while not r: r = requests.get(url=URL) data = r.content data = data[2:] data = data[:-1] data = json.loads(data) text = data['quoteText'] author = data['quoteAuthor'] print(text) print(author) # get text hash md5hash = hashlib.md5(text) print(md5hash.hexdigest()) while db.search(Query().textMd5 == md5hash.hexdigest()): r = requests.get(url=URL) while not r: r = requests.get(url=URL) data = r.content data = data[2:] data = data[:-1] data = json.loads(data) text = data['quoteText'] author = data['quoteAuthor'] print(text) print(author) # get text hash md5hash = hashlib.md5(text) db.insert({'textMd5': md5hash.hexdigest()})
def generate_reports(self, dbfile): ''' ''' # get run duration t1 = self.runtime_stamp t2 = self.end_time stime = datetime( int(t1[:4]), int(t1[4:6]), int(t1[6:8]), int(t1[9:11]), int(t1[11:13]), int(t1[13:15])) etime = datetime( int(t2[:4]), int(t2[4:6]), int(t2[6:8]), int(t2[9:11]), int(t2[11:13]), int(t2[13:15])) run_duration = str(etime - stime) print('Run Duration: {}'.format(run_duration)) # generate cucumber report json file db = TinyDB(dbfile, sort_keys=True, indent=4, separators=(',', ': ')) db.drop_table('_default') query = Query() cucumber_report_json = [] for table in db.tables(): group = db.table(table) reportList = group.search(query.status != 'crashed') feature_report = None for item in reportList: element = json.loads(open(item['run_result'], encoding='utf-8').read())[0] if not feature_report: feature_report = element else: feature_report['elements'].append(element['elements'][0]) if feature_report is not None: cucumber_report_json.append(feature_report) db.close() report_json_path = os.path.join(self.report_dir_base, 'cucumber-report.json') with open(report_json_path, 'w') as fname: json.dump(cucumber_report_json, fname, indent=4) # generate cucumber HTML report report_html_path = report_json_path[:report_json_path.rfind('json')] + 'html' if self.browser == 'CH': report_browser = 'chrome' report_browser_ver = subprocess.run('google-chrome --version'.split(), stdout=subprocess.PIPE) \ .stdout.decode('utf-8') \ .replace('Google Chrome', '') \ .strip() elif self.browser == 'FF': report_browser = 'firefox' report_browser_ver = subprocess.run('firefox --version'.split(), stdout=subprocess.PIPE) \ .stdout.decode('utf-8') \ .replace('Mozilla Firefox', '') \ .strip() elif self.browser == 'IE': report_browser = 'internet explorer' report_browser_ver = 'Unknown' else: report_browser = self.browser report_browser_ver = 'Unknown' cmd_generate_html_report = path.join(self.FrameworkPath, 'framework', 'scripts', 'generate-reports.js') + ' ' + \ '--reportJson=' + report_json_path + ' ' + \ '--reportName=\'AutoBDD HTML Report\' ' + \ '--reportTitle=' + self.project + ' ' + \ '--testPlatform=' + self.platform + ' ' + \ '--testPlatformVer=\'Ubuntu 18.04\' ' + \ '--testBrowser=' + report_browser + ' ' + \ '--testBrowserVer=' + report_browser_ver + ' ' + \ '--testThreads=' + self.parallel + ' ' + \ '--testStartTime=' + self.runtime_stamp + ' ' + \ '--testRunDuration=' + run_duration + ' ' + \ '--testRunArgs="' + self.argstring + '"' print('Generate HTML Report On: {}'.format(report_html_path)) print(cmd_generate_html_report) os.system(cmd_generate_html_report) # generate cucumber XML report report_xml_path = report_json_path[:report_json_path.rfind('json')] + 'xml' cmd_generate_xml_report = 'cat ' + report_json_path + \ ' | cucumber-junit --strict > ' + \ report_xml_path print('Generate XML Report On: {}'.format(report_xml_path)) print(cmd_generate_xml_report) os.system(cmd_generate_xml_report)
def _done_with_learner(db_fname: str, fname: str): Entry = Query() with TinyDB(db_fname) as db: db.update({"job_id": None, "is_done": True}, Entry.fname == fname)
def _get_n_jobs_done(db_fname: str): Entry = Query() with TinyDB(db_fname) as db: return db.count(Entry.is_done == True) # noqa: E711
from asyncio import coroutine, get_event_loop, ensure_future from collections import defaultdict from typing import Union from tinydb import Query from tinydb.database import Table ConfigQuery = Query() ConfigEntry = Union[str, int, float, bool] def _has_handlers(cls): for method in cls.__dict__.values(): for key in getattr(method, 'handles', []): cls._handlers[key].append(method) return cls def _handles(key: str) -> callable: def decorator(func: callable) -> callable: if not hasattr(func, 'handles'): func.handles = [] func.handles.append(key) return func return decorator def _await(coro: coroutine) -> None: get_event_loop().call_soon_threadsafe(ensure_future, coro)
def give_tacos(giving_user, receiv_user, n_tacos, reaction=False, channel=None): user = Query() init_user(giving_user) init_user(receiv_user) giving_owned_tacos = db.search( user.user_id == giving_user)[0]['daily_tacos'] if giving_owned_tacos - n_tacos >= 0: db.update(add('owned_tacos', n_tacos), user.user_id == receiv_user) db.update(subtract('daily_tacos', n_tacos), user.user_id == giving_user) # LOG to DB db_logs.insert({ 'giving_user': giving_user, 'receiver_user': receiv_user, 'n_tacos': n_tacos, 'type': 'reaction' if reaction else 'message', 'date': today }) slack_client.api_call( "chat.postMessage", channel=giving_user, as_user=True, text="¡<@" + receiv_user + "> *ha recibido {0:g} x :taco:* de tu parte! Te quedan {1:g} tacos para repartir hoy." .format( n_tacos, db.search( Query()['user_id'] == giving_user)[0]['daily_tacos'])) owned_tacos = db.search( Query().user_id == receiv_user)[0]['owned_tacos'] slack_client.api_call( "chat.postMessage", channel=receiv_user, as_user=True, text=("¡*Has recibido {0:g} x :taco: * de <@" + giving_user + "> en el canal <#" + channel + ">! Ya tienes *{1:g}x :taco: ").format(n_tacos, owned_tacos)) else: slack_client.api_call( "chat.postMessage", channel=giving_user, as_user=True, text= "*¡No tienes suficientes tacos!* Recibirás {0:g} TACOS NUEVOS :taco: recién cocinados en *{1:} horas.*" .format(DAILY_TACOS, time_left)) # To-do: Send giving user private message : No more tacos! You have to wait... {Time} return None
def events_show(): db = TinyDB('db.json') resp = db.search(Query().counter.exists()) return jsonify(resp)
accounts = w3.personal.listAccounts assert(len(accounts) > 0) account = accounts[0] pwd = getpass() assert(w3.personal.unlockAccount(account, pwd)) del pwd #Load contract address and abi with open("RangeProofRegistryABI.json") as f: contract_json = json.load(f) registry_contract = w3.eth.contract(address=contract_json["address"], abi=contract_json["abi"]) #Open database db = TinyDB("validator_db.json") query = db.table("sync").search(Query().blockNumber) blockNumber = 1 if len(query) == 0: db.table("sync").insert({ "blockNumber": 1 }) else: blockNumber = max([0, query[0]['blockNumber']-40000]) print("Start block number: " + str(blockNumber)) #Setup filters rps_filter = registry_contract.events.RangeProofsSubmitted.createFilter(fromBlock=blockNumber) rpa_filter = registry_contract.events.RangeProofsAccepted.createFilter(fromBlock=blockNumber) rpr_filter = registry_contract.events.RangeProofsRejected.createFilter(fromBlock=blockNumber) #Parse rejected and accepted proofs accepted_proofs = dict()
def remove_message_with_id(self, msg_id): self.outbox_db.remove(Query().id == msg_id)
import cv2 from tqdm import tqdm cred = credentials.Certificate('keys/i-find-521d7-firebase-adminsdk-wemsi-4fca9d0fb8.json') firebase_admin.initialize_app(cred) fm = FBManager(firestore.client()) inst = am.getInstance() if inst.checkConnected() is False: inst.connect('ifind', 'IFCollection') root_path = r'C:\Users\DeepLearning_3\PycharmProjects\ipcgan_clone\preprocess\posts' db = TinyDBManager(db_file=r'D:\Data\TinyDB\db.json', default_table='ganQueue') table = db.getTable() Request = Query() while True: # firebase 체크하기 # 5초마다 grabbed=False 인 것을 가져옴. # gan_list = fm.readCollection(fm.getCollection(u'ganStack')) if table.count(Request.grabbed == False) > 0: waiting_list = [] count = 0 gan_list = table.search((Request.grabbed == False)) for i in gan_list: if count >= 10: break waiting_list.append(i) count += 1 if not waiting_list:
async def query(self, ctx): user = ctx.author userdb = str(user) Points = Query() tally = db.search(Points.User == userdb) await ctx.channel.send(tally)
def _collect_result(self): useful = self.db.all() total = 0 duplicate_set = [] duplicate_status = {} for u in useful: total += len(u['useful']) for d in u['duplicates'].keys(): duplicate_set.append(u['duplicates'][d] + [d]) for s in duplicate_set: for m in s: if m not in duplicate_status.keys(): duplicate_status[m] = {'duplicate': 0, 'not_duplicate': 0} if len(s) > 1: duplicate_status[m]['duplicate'] += 1 else: duplicate_status[m]['not_duplicate'] += 1 totally_useless = 0 totally_useful = 0 partially_useless = 0 operators = {} for key in duplicate_status.keys(): mutant = self.state.db.table('mutants').search( Query().name == key)[0] operator = mutant['operator'] if operator not in operators.keys(): operators[operator] = { 'totally_useless': 0, 'totally_useful': 0, 'partially_useless': 0, 'products_useless': 0, 'products_useful': 0 } operators[operator]['products_useless'] += duplicate_status[key][ 'duplicate'] operators[operator]['products_useful'] += duplicate_status[key][ 'not_duplicate'] if duplicate_status[key]['not_duplicate'] == 0: totally_useless += 1 operators[operator]['totally_useless'] += 1 elif duplicate_status[key]['duplicate'] == 0: totally_useful += 1 operators[operator]['totally_useful'] += 1 else: partially_useless += 1 operators[operator]['partially_useless'] += 1 return { 'operators': operators, 'totally_useless': totally_useless, 'totally_useful': totally_useful, 'partially_useless': partially_useless, 'products_useful': total }
async def point(self, ctx, user: discord.User): userdb = str(user) User = Query() db.update(increment('Points'), User.User == userdb) print(userdb + " has gained a point!") await ctx.channel.send(f"A point has been awarded to {userdb}!")
class ChimpAutoRun: ''' run chimp ''' def __init__(self, arguments): ''' initialize local variables ''' if 'TZ' not in environ: os.environ['TZ'] = 'UTC' time.tzset() if 'FrameworkPath' not in environ: self.FrameworkPath = path.join(environ['HOME'], 'Projects', 'AutoBDD') else: self.FrameworkPath = environ['FrameworkPath'] os.chdir(self.FrameworkPath) self.reportonly = arguments.REPORTONLY self.runtime_stamp = arguments.TIMESTAMP if arguments.TIMESTAMP else time.strftime("%Y%m%d_%H%M%S%Z", time.gmtime()) self.parallel = arguments.PARALLEL self.screenshot = arguments.SCREENSHOT self.screenremark = arguments.SCREENREMARK self.movie = arguments.MOVIE self.platform = arguments.PLATFORM self.browser = arguments.BROWSER self.debugmode = arguments.DEBUGMODE self.projectbase = arguments.PROJECTBASE if arguments.PROJECTBASE else 'test-projects' self.project = arguments.PROJECT self.projecttype = arguments.PROJECTTYPE self.reportbase = arguments.REPORTBASE if arguments.REPORTBASE else path.join( self.FrameworkPath, self.projectbase, self.project, 'bdd_reports') self.reportpath = arguments.REPORTPATH if arguments.REPORTPATH else '_'.join( (self.project, self.runtime_stamp)) self.modulelist = arguments.MODULELIST self.argstring = arguments.ARGSTRING self.display_size = '1920x1200' self.project_full_path = path.join(self.FrameworkPath, self.projectbase, self.project) self.report_full_path = path.join(self.reportbase, self.reportpath) self.isMaven = self.isMavenProject (arguments.PROJECTTYPE) # Each runable module should have a chimp.js self.chimp_profile = path.join('chimp.js') # Create report directory if not path.exists(path.join(self.FrameworkPath, self.reportbase)): os.makedirs(path.join(self.FrameworkPath, self.reportbase)) self.report_dir_base = path.join(self.reportbase, self.reportpath) try: os.makedirs(self.report_dir_base) except OSError as e: if e.errno != errno.EEXIST: raise print('\n*** Report Directory: ***\n {}'.format(self.report_dir_base)) # remove /tmp/*.lock file for item in os.listdir('/tmp/'): if item.endswith(".lock"): os.remove('/tmp/' + item) self.host = [] self.available_pool_number = 0 self.end_time = time.strftime("%Y%m%d_%H%M%S%Z", time.gmtime()) self.get_available_host() pprint(vars(self)) def isMavenProject(self, args): result = False if ( self.projecttype.lower() == "chimp" or self.projecttype.lower() == "chimpy"): result = False elif (self.projecttype.lower() == "maven" or self.projecttype.lower() == "mvn"): result = True else: #auto-detect # print('*** Project Type is set to auto-detect ***') for fname in os.listdir (self.project_full_path): if "pom.xml" in fname: result = True break # print('*** is Maven = {}'.format (result)) return result def create_dryrun_json(self): from autorunner_dryrun import ChimpDryRun dry_run = ChimpDryRun(self.projectbase, self.project, self.modulelist, self.platform, self.browser, self.argstring, self.report_full_path) self.run_json = dry_run.create_run_json() return self.run_json def update_tinydb(self, tinyrundb_json, run_json, rerunWhat): db = TinyDB(tinyrundb_json, sort_keys=True, indent=4, separators=(',', ': ')) db.drop_table('_default') query = Query() runcases = json.loads(open(run_json).read(), encoding='utf-8') for case in runcases: if case['feature'] in db.tables(): feature_table = db.table(case['feature']) if (rerunWhat is not None) and (len(feature_table.search(query.status == rerunWhat)) > 0): feature_table.update({'status': 'rerun'}) else: feature_table = db.table(case['feature']) feature_table.insert(case) db.close() def get_available_host(self): ''' get avaiable host by reading config file ''' config_file = path.join(self.FrameworkPath, 'framework', 'configs', 'chimp_run_host.config') assert path.exists(config_file), '{} is not exits'.format(config_file) with open(config_file, encoding='utf-8') as fname: head = fname.readline() while 'SSHHOST' not in head: head = fname.readline() headarray = head.strip().split() for item in fname: hostinfo = item.strip().split() if len(hostinfo) > 1: hostdict = dict(zip(headarray, hostinfo)) if hostdict['Status'] == 'on': # and hostdict['Platform'] == self.platform: self.available_pool_number += int(hostdict['Thread']) self.host.append(hostdict) print(self.host) assert len( self. host) > 0, 'No host is avilable! Check file: chimp_run_host.config' def generate_reports(self, dbfile): ''' ''' # get run duration t1 = self.runtime_stamp t2 = self.end_time stime = datetime( int(t1[:4]), int(t1[4:6]), int(t1[6:8]), int(t1[9:11]), int(t1[11:13]), int(t1[13:15])) etime = datetime( int(t2[:4]), int(t2[4:6]), int(t2[6:8]), int(t2[9:11]), int(t2[11:13]), int(t2[13:15])) run_duration = str(etime - stime) print('Run Duration: {}'.format(run_duration)) # generate cucumber report json file db = TinyDB(dbfile, sort_keys=True, indent=4, separators=(',', ': ')) db.drop_table('_default') query = Query() cucumber_report_json = [] for table in db.tables(): group = db.table(table) reportList = group.search(query.status != 'crashed') feature_report = None for item in reportList: element = json.loads(open(item['run_result'], encoding='utf-8').read())[0] if not feature_report: feature_report = element else: feature_report['elements'].append(element['elements'][0]) if feature_report is not None: cucumber_report_json.append(feature_report) db.close() report_json_path = os.path.join(self.report_dir_base, 'cucumber-report.json') with open(report_json_path, 'w') as fname: json.dump(cucumber_report_json, fname, indent=4) # generate cucumber HTML report report_html_path = report_json_path[:report_json_path.rfind('json')] + 'html' if self.browser == 'CH': report_browser = 'chrome' report_browser_ver = subprocess.run('google-chrome --version'.split(), stdout=subprocess.PIPE) \ .stdout.decode('utf-8') \ .replace('Google Chrome', '') \ .strip() elif self.browser == 'FF': report_browser = 'firefox' report_browser_ver = subprocess.run('firefox --version'.split(), stdout=subprocess.PIPE) \ .stdout.decode('utf-8') \ .replace('Mozilla Firefox', '') \ .strip() elif self.browser == 'IE': report_browser = 'internet explorer' report_browser_ver = 'Unknown' else: report_browser = self.browser report_browser_ver = 'Unknown' cmd_generate_html_report = path.join(self.FrameworkPath, 'framework', 'scripts', 'generate-reports.js') + ' ' + \ '--reportJson=' + report_json_path + ' ' + \ '--reportName=\'AutoBDD HTML Report\' ' + \ '--reportTitle=' + self.project + ' ' + \ '--testPlatform=' + self.platform + ' ' + \ '--testPlatformVer=\'Ubuntu 18.04\' ' + \ '--testBrowser=' + report_browser + ' ' + \ '--testBrowserVer=' + report_browser_ver + ' ' + \ '--testThreads=' + self.parallel + ' ' + \ '--testStartTime=' + self.runtime_stamp + ' ' + \ '--testRunDuration=' + run_duration + ' ' + \ '--testRunArgs="' + self.argstring + '"' print('Generate HTML Report On: {}'.format(report_html_path)) print(cmd_generate_html_report) os.system(cmd_generate_html_report) # generate cucumber XML report report_xml_path = report_json_path[:report_json_path.rfind('json')] + 'xml' cmd_generate_xml_report = 'cat ' + report_json_path + \ ' | cucumber-junit --strict > ' + \ report_xml_path print('Generate XML Report On: {}'.format(report_xml_path)) print(cmd_generate_xml_report) os.system(cmd_generate_xml_report) def run_in_parallel(self, dbfile): ''' run chimp in parallel 1. determine parallel pool size base on parallel input or CPU count 2. from db find case of 'notrun' and 'rerun' ''' # set sub process pool number used_pool_number = None if self.parallel == 'MAX': # using all available rdp host in config file used_pool_number = int(self.available_pool_number) elif self.parallel == 'CPU': # using cpu count cpu_count = multiprocessing.cpu_count() if self.movie == '1': used_pool_number = cpu_count / 2 else: used_pool_number = cpu_count if used_pool_number < 1: used_pool_number = 1 else: used_pool_number = min(int(self.available_pool_number), int(self.parallel)) used_pool_number = int(used_pool_number) self.parallel = str(used_pool_number) pool = multiprocessing.Pool(used_pool_number) print('USED POOL NUMBER: {}'.format(used_pool_number)) db = TinyDB(dbfile, sort_keys=True, indent=4, separators=(',', ': ')) # each feature is a table, scenarios are entries in a table # here we identify any feature contains scenario that is notrun or failed and run the entire feature progress = [] runCount = 0 for table in db.tables(): group = db.table(table) query = Query() case = None runList = group.search((query.status == 'notrun') | (query.status == 'rerun')) runCount += len(runList) if len(runList) > 0: case = runList[0] if case.doc_id: module_path, module_name, feature_path, feature_name, run_result, run_report, report_dir_relative = definepath( case, self.project, self.report_dir_base) module_full_path = path.join(self.projectbase, self.project, module_path) group.update({'status': 'running', 'run_result': run_result, 'run_report': run_report}, doc_ids=[case.doc_id]) r = pool.apply_async(run_test, args=(self.FrameworkPath, self.host, self.platform, self.browser, self.projectbase, self.project, module_full_path, feature_path, self.movie, self.screenshot, self.screenremark, self.debugmode, self.display_size, self.chimp_profile, self.isMaven, self.argstring, self.report_dir_base, report_dir_relative, run_result, run_report)) progress.append(r) else: break print('Expected total: {}'.format(runCount)) overall = 0 while overall < runCount: scan = 0 time.sleep(1) for r in progress: if r.ready(): scan += 1 done_feature = r.get() for table in db.tables(): group = db.table(table) query = Query() runList = group.search(query.status == 'running') for case in runList: if done_feature in case['uri']: if os.path.exists(case['run_result']) and os.path.getsize(case['run_result']) > 0: resultString = '' failedString = '"status": "failed"' with open(case['run_result'], encoding='utf-8') as f: resultString = f.read() if (resultString.find(failedString) >= 0): group.update({'status': 'failed'}, doc_ids=[case.doc_id]) else: group.update({'status': 'passed'}, doc_ids=[case.doc_id]) else: group.update({'status': 'crashed'}, doc_ids=[case.doc_id])
async def unpoint(self, ctx, user: discord.User): userdb = str(user) User = Query() db.update(decrement('Points'), User.User == userdb) print(userdb + " has lost a point!") await ctx.channel.send(f"Point taken from {userdb}!")
def run_in_parallel(self, dbfile): ''' run chimp in parallel 1. determine parallel pool size base on parallel input or CPU count 2. from db find case of 'notrun' and 'rerun' ''' # set sub process pool number used_pool_number = None if self.parallel == 'MAX': # using all available rdp host in config file used_pool_number = int(self.available_pool_number) elif self.parallel == 'CPU': # using cpu count cpu_count = multiprocessing.cpu_count() if self.movie == '1': used_pool_number = cpu_count / 2 else: used_pool_number = cpu_count if used_pool_number < 1: used_pool_number = 1 else: used_pool_number = min(int(self.available_pool_number), int(self.parallel)) used_pool_number = int(used_pool_number) self.parallel = str(used_pool_number) pool = multiprocessing.Pool(used_pool_number) print('USED POOL NUMBER: {}'.format(used_pool_number)) db = TinyDB(dbfile, sort_keys=True, indent=4, separators=(',', ': ')) # each feature is a table, scenarios are entries in a table # here we identify any feature contains scenario that is notrun or failed and run the entire feature progress = [] runCount = 0 for table in db.tables(): group = db.table(table) query = Query() case = None runList = group.search((query.status == 'notrun') | (query.status == 'rerun')) runCount += len(runList) if len(runList) > 0: case = runList[0] if case.doc_id: module_path, module_name, feature_path, feature_name, run_result, run_report, report_dir_relative = definepath( case, self.project, self.report_dir_base) module_full_path = path.join(self.projectbase, self.project, module_path) group.update({'status': 'running', 'run_result': run_result, 'run_report': run_report}, doc_ids=[case.doc_id]) r = pool.apply_async(run_test, args=(self.FrameworkPath, self.host, self.platform, self.browser, self.projectbase, self.project, module_full_path, feature_path, self.movie, self.screenshot, self.screenremark, self.debugmode, self.display_size, self.chimp_profile, self.isMaven, self.argstring, self.report_dir_base, report_dir_relative, run_result, run_report)) progress.append(r) else: break
def get_dj_info(username): check_query = Query() check_if_user_exists = THE_DB.search(check_query.id == username) if len(check_if_user_exists) == 0: return return check_if_user_exists[0]
def collect_tax(self, tax): if self.db().search(Query().version == self.version): self.update_value("reserve", tax) else: self._reserve = tax self.save()
def __init__(self, path="db_R.json"): db = TinyDB(path) self.db = db.table("cpus") self.keys = db.table("keys") self.intel = Query()
def _in_database(file, db): return db.count(Query()._id == file) > 0
def remove_user(email): user = Query() doc_id = db.get(user.email == email).doc_id db.remove(doc_ids=[doc_id])
def respond(self, com): forget = "^(?:FORGET|UPDATE) (?:EVERYTHING YOU KNOW ABOUT |ABOUT )?(?P<subject>.*)" capture = re.search(forget, com) if capture: if self.db.remove( Query().subject == self.pronoun_fixer( capture.group('subject')) ): # if there is a record about the subject in the database then remove that record and... return "OK, I FORGOT EVERYTHING I KNOW ABOUT " + self.mirror( capture.group('subject')) else: return "I WASN'T EVEN KNOW ANYTHING ABOUT " + self.mirror( capture.group('subject')) define = "(?:PLEASE )?(?:DEFINE|EXPLAIN|TELL ME ABOUT|DESCRIBE) (?P<subject>.*)" # TODO: Extend the context of this regular expression capture = re.search(define, com) result = None if capture: return self.db_getter(capture.group('subject')) doc = self.nlp( com.decode('utf-8') ) # Command(user's speech) must be decoded from utf-8 to unicode because spaCy only supports unicode strings, self.nlp() handles all parsing subject = [ ] # subject list (subjects here usually are; I'M, YOU, HE, SHE, IT, etc.) prev_type = None # type of the previous noun phrase for np in doc.noun_chunks: # Iterate over the noun phrases(chunks) TODO: Cover 'dobj' also; doc = nlp(u'DESCRIBE THE SUN') >>> (u'THE SUN', u'SUN', u'dobj', u'DESCRIBE') # Purpose of this if statement is completing possessive form of nouns if np.root.dep_ == 'pobj' and prev_type == 'nsubj': # if it's an object of a preposition and the previous noun phrase's type was nsubj(nominal subject) then (it's purpose is capturing subject like MY PLACE OF BIRTH) subject.append( np.root.head.text.encode('utf-8') ) # append the parent text from syntactic relations tree (example: while nsubj is 'MY PLACE', np.root.head.text is 'OF') subject.append( np.text.encode('utf-8') ) # append the text of this noun phrase (example: while nsubj is 'MY PLACE', np.text is 'BIRTH') prev_type = None # make it None on each iteration after it completes its mission if np.root.dep_ == 'nsubj' and np.root.tag_ != 'WP': # if it's a nsubj(nominal subject). "wh-" words are also considered as nsubj(nominal subject) but they are out of scope. This is why we are excluding them. subject.append(np.text.encode( 'utf-8')) # append the text of this noun phrase prev_type = 'nsubj' # assign the previous type as nsubj(nominal subject) subject = ' '.join( subject).strip() # concatenate all noun phrases found if subject: # if the subject is not empty wh_found = False for word in doc: # iterate over the each word in the given command(user's speech) if word.tag_ in [ 'WDT', 'WP', 'WP$', 'WRB' ]: # check if there is a "wh-" question (we are determining that if it's a question or not, so only accepting questions with "wh-" form) wh_found = True if wh_found: # if that's a question return self.db_getter( subject) # return the answer from the database else: verb_found = False verbtense = None # verbtense is the am/is/are of the main sentence clause = [] # is the information that we need to acknowledge for word in doc: if verb_found: # get the all words comes after the first verb which will be our verbtense clause.append(word.text.encode('utf-8')) if word.pos_ == 'VERB' and not verb_found: # if that's a verb and verb does not found yet then verb_found = True # verb is found verbtense = word.text.encode( 'utf-8') # append it to verbtense clause = ' '.join(clause).strip() # concatenate the clause return (self.db_setter(subject, verbtense, clause, com)) # set the record to the database
def add_to_dropdown(category): rules = set([]) for rule in db.table("Rule").search(Query().category == category): if db.table('Finding').search(Query().rule.id == rule.eid): rules.add(rule['title']) return rules
def upsert(self, workspace): query = Query() self.db.upsert(workspace.__dict__, query.path == workspace.path)
def loadAccount(self): db = TinyDB('Database/Player/data.db') query = Query() user_data = db.search(query.token == str(self.player.token)) if user_data: self.player.name = user_data[0]["info"]["name"] self.player.low_id = user_data[0]["info"]["lowID"] self.player.IsFacebookLinked = user_data[0]["info"]["isFBLinked"] self.player.FacebookID = user_data[0]["info"]["facebookID"] self.player.club_low_id = user_data[0]["info"]["clubID"] self.player.club_role = user_data[0]["info"]["clubRole"] self.player.tutorial = user_data[0]["info"]["tutorial"] self.player.trophy_road = user_data[0]["info"]["leagueReward"] self.player.player_experience = user_data[0]["info"]["playerExp"] self.player.collected_experience = user_data[0]["info"][ "cappedExp"] self.player.solo_wins = user_data[0]["info"]["soloWins"] self.player.duo_wins = user_data[0]["info"]["duoWins"] self.player.ThreeVSThree_wins = user_data[0]["info"]["3vs3Wins"] self.player.gems = user_data[0]["info"]["gems"] self.player.gold = user_data[0]["info"]["gold"] self.player.star_points = user_data[0]["info"]["starpoints"] self.player.tickets = user_data[0]["info"]["tickets"] self.player.tokensdoubler = user_data[0]["info"]["tokensdoubler"] self.player.battle_tokens = user_data[0]["info"]["availableTokens"] self.player.brawler_id = user_data[0]["info"]["brawlerID"] self.player.skin_id = user_data[0]["info"]["skinID"] self.player.profile_icon = user_data[0]["info"]["profileIcon"] self.player.brawl_boxes = user_data[0]["info"]["brawlBoxes"] self.player.big_boxes = user_data[0]["info"]["bigBoxes"] self.player.brawlers_skins = user_data[0]["info"]["brawlersSkins"] self.player.name_color = user_data[0]["info"]["namecolor"] self.player.gadget = user_data[0]["info"]["gadget"] self.player.starpower = user_data[0]["info"]["starpower"] self.player.DoNotDistrubMessage = user_data[0]["info"][ "DoNotDistrub"] self.player.room_id = user_data[0]["info"]["roomID"] self.player.brawlers_trophies_in_rank = user_data[0]["info"][ "brawlersTrophiesForRank"] self.player.brawlers_upgradium = user_data[0]["info"][ "brawlersUpgradePoints"] self.player.Brawler_level = user_data[0]["info"][ "brawlerPowerLevel"] self.player.Brawler_starPower = user_data[0]["info"][ "brawlerStarPower"] self.player.Brawler_newTag = user_data[0]["info"]["brawlerNewTag"] self.player.brawlers_trophies = user_data[0]["info"][ "brawlersTrophies"] if self.player.UnlockType == "Off": self.player.BrawlersUnlockedState = user_data[0]["info"][ "UnlockedBrawlers"] player_total_trophies = 0 for BrawlerID in self.player.brawlers_trophies.keys(): player_total_trophies += self.player.brawlers_trophies[ BrawlerID] self.player.trophies = player_total_trophies DataBase.replaceValue(self, 'trophies', self.player.trophies) if self.player.trophies < user_data[0]["info"]["highesttrophies"]: self.player.highest_trophies = user_data[0]["info"][ "highesttrophies"] else: self.player.highest_trophies = self.player.trophies DataBase.replaceValue(self, 'highesttrophies', self.player.highest_trophies)