def recent_match_ids(count, initial_conditions=None, persistent=False): '''Generate up to `count` recent match IDs as a generator from the explorer API.''' # Generate our initial query, and make first request for N items. columns = ['public_matches.match_id'] table = 'public_matches' order = 'public_matches.match_id DESC' limit = min(MAX_LIMIT, count) sql_query = query(columns, table, conditions=initial_conditions, order=order, limit=limit) rows = explorer(sql_query, persistent=persistent) count -= len(rows) yield from [i['match_id'] for i in rows] # Make iterative requests until we fetch `count` items. # If we ever return less than `limit` items, we know that we've # made our last query, exhausted the data. subsequent_conditions = ( [] or initial_conditions) + ['public_matches.match_id < {match_id}'] while limit == len(rows) and count > 0: limit = min(MAX_LIMIT, count) match_id = rows[-1]['match_id'] sql_query = query(columns, table, conditions=subsequent_conditions, order=order, limit=limit).format(match_id=match_id) rows = explorer(sql_query, persistent=persistent) count -= len(rows) yield from [i['match_id'] for i in rows]
def analysis(webName, url): html = etree.HTML( requests.get(url, HEADER).content.decode(rules[webName]['code'], errors='ignore')) title = html.xpath(rules[webName]['title']) urlList = html.xpath(rules[webName]['urlList']) book_name = html.xpath(rules[webName]['book_name'])[0].strip() #判断,判断该小说是否保存过 query_all = query.query(book_name, title, urlList, False) store_old = [] if query_all: print("该小说已保存至(%s),请选择:" % query_all["title"][0]) print("1、原文件继续保存") print("2、与旧章节分开保存") queryTF = input("请输入序号选择:") if queryTF == "1": title = query_all["title"] urlList = query_all["urlList"] store_old = query_all["store"] else: query_all = query.query(book_name, title, urlList, True) print("小说名称:%s" % book_name) print("小说章节:%d章" % (len(title) + len(store_old))) print("已存章节:%d章" % len(store_old)) if input("是否开始保存?(y/n):") == "n": return print("-----开始保存-----") realm = rules[webName]['realm'] store_all = [] for i in range(len(title)): store = {"title": title[i].strip(), "urlList": realm + urlList[i]} store_all.append(store) chapter.classify(book_name, webName, store_all, store_old)
def run(args, outfile_obj=sys.stdout): if args.get('import'): try: infile_obj = open(args['<filename>'], 'r') except Exception as exception: print( 'Error opening file [%s] for reading, exception = [%s] [%s]' % (args['<filename>'], type(exception).__name__, exception)) sys.exit(1) num = import_data(infile_obj, delete_first=args['--delete']) print('Imported [%s] rows from [%s]' % (num, args['<filename>'])) elif args.get('query'): filters = {} if args.get('-f'): filters = dict( map(lambda x: tuple(x.split('=')), args.get('-f', '').split(','))) # e.g., filters == dict(STB='stb1', REV='4.00') order_fields = [] if args.get('-o'): order_fields = args['-o'].split(',') # e.g., order_fields == ['TITLE', 'DATE'] select_fields = [] if args.get('-s'): select_fields = args['-s'].split(',') # e.g., select_fields == ['TITLE:count', 'DATE'] query(filters=filters, group_by=args.get('-g'), order_fields=order_fields, select_fields=select_fields, outfile_obj=outfile_obj)
def read(self, start=None, stop=None, q=None): """Read a meter. :param start: Start date and time. :type start: datetime :param stop: Stop date and time. :type stop: datetime :param q: List of filters excluding timestamp filters :type q: List :return: Value of reading :rtype: Float """ # Default times to month to date default_start, default_stop = utils.mtd_range() if not start: start = default_start if not stop: stop = default_stop logger.info("Start: {}".format(start)) logger.info("Stop: {}".format(stop)) logger.info("Meter name: {}".format(self.name)) if start > stop: raise InvalidTimeRangeError(start, stop) # Add times to query. times are +- the extra time. q = q or [] q.append(query.query( 'timestamp', 'gt', start - self._extra_time, 'datetime' )) q.append(query.query( 'timestamp', 'le', stop + self._extra_time, 'datetime' )) # Count of samples: count = self.count(q) logger.debug("{} samples according to statistics.".format(count)) if not count: return [] # Get samples samples = self.client.samples.list( meter_name=self.name, q=q, limit=count ) logger.debug( "{} samples according to sample-list.".format(len(samples)) ) # Convert timestamps from strings to datetime objects for s in samples: s.timestamp = utils.normalize_time( utils.parse_datetime(s.timestamp) ) # Sort by resource id and then timestamps in ascending order samples.sort(cmp=_cmp_sample) # Return generator return self._reading_generator(samples, start, stop)
def read(self, start=None, stop=None, q=None): """Read a meter. :param start: Start date and time. :type start: datetime :param stop: Stop date and time. :type stop: datetime :param q: List of filters excluding timestamp filters :type q: List :return: Value of reading :rtype: Float """ # Default times to month to date default_start, default_stop = utils.mtd_range() if not start: start = default_start if not stop: stop = default_stop logger.info("Start: {}".format(start)) logger.info("Stop: {}".format(stop)) logger.info("Meter name: {}".format(self.name)) if start > stop: raise InvalidTimeRangeError(start, stop) # Add times to query. times are +- the extra time. q = q or [] q.append( query.query('timestamp', 'gt', start - self._extra_time, 'datetime')) q.append( query.query('timestamp', 'le', stop + self._extra_time, 'datetime')) schedule = query.Scheduler(self.client, self.name, start - self._extra_time, stop + self._extra_time, q=[], max_samples=self.max_samples) for s_start, s_stop, s_query, s_count in schedule: logger.debug("{} - {} - {}".format(s_start, s_stop, s_count)) logger.debug("Count of scheduled samples {}".format(schedule.count())) # Get samples samples = schedule.list() logger.debug("{} samples according to sample-list.".format( len(samples))) # Convert timestamps from strings to datetime objects for s in samples: s.timestamp = utils.normalize_time( utils.parse_datetime(s.timestamp)) # Sort by resource id and then timestamps in ascending order samples.sort(cmp=_cmp_sample) # Return generator return self._reading_generator(samples, start, stop)
def attach(): p = os.listdir('data') table = 'nfl.pbp.demo' pk = '1,2,3,4,5' for c in p: q = "select(attach_from_csv('%s', 'nfl-kg/pbp/data/%s', ',', '\n', null, 1,vector(%s)))" % (table,c,pk) query(q) print("\n\nTest Query:\n\n") query('select top 5 * from %s' % (table) )
def save(self): if self.jobid is None: job = [] job.append(self.jobnum.text()) machine_qry = query("machine_id") if machine_qry.first(): machine_id = str(machine_qry.value(0)) else: machine_id = "8" job.append(machine_id) user_qry = query("user_id") if user_qry.first(): user_id = str(user_qry.value(0)) else: user_id = "42" job.append(user_id) jobqry = query("save_job", job) jobid = jobqry.lastInsertId() print(jobid) else: jobid = self.jobid parts = [] for i in range(self.workparts.rowCount()): dest = self.workparts.item(i, 2).text() dest_qry = query("dest_id", [dest]) if dest_qry.first(): dest_id = str(dest_qry.value(0)) else: dest_id = "7" part = self.workparts.item(i, 0).text() # part_qry = query("part_id", [part]) # if part_qry.first(): # part_id = str(part_qry.value(0)) part_id = self.parts_d[part] if self.parts.__len__() > 0: row = [jobid, part_id, self.workparts.item(i, 1).text(), dest_id, self.workparts.item(i, 3).text(), self.parts[i]['tracking']] parts.append(row) else: row = [jobid, part_id, self.workparts.item(i, 1).text(), dest_id, self.workparts.item(i, 3).text()] parts.append(row) for part in parts: if self.parts.__len__() > 0: query("save_part", part) else: qry = query("save_new_part", part) tid = qry.lastInsertId() query("create_status", [tid]) self.searchline.setText(self.jobnum.text()) self.search()
def menu(): try: choice = int(input("Please select 1-add record or 2-show record/s: ")) if choice == 1: add_record() if choice == 2: query() else: raise WrongValueException() except ValueError: print("It must be 1 or 2")
def currency_coefficient(c1, c2, coefStart, coefEnd): coefStart = datetime.datetime.strptime(coefStart, '%Y-%m-%d').date() coefEnd = datetime.datetime.strptime(coefEnd, '%Y-%m-%d').date() if not c1 or not c2: raise PreventUpdate both_crypto = False if c2 in data.CURRENCIES: both_crypto = True dff = correlationCoef(c1, c2, both_crypto, coefStart, coefEnd) result = 'Correlation Coefficient: {}'.format(dff['CorrelationCoefficient'][0]) dff_1_query = "SELECT DMIX.{0}.DATE_TXT, DMIX.{0}.OPEN FROM DMIX.{0} WHERE TO_CHAR(DMIX.{0}.DATE_TXT, 'HH24') = 1".format(c1) dff_1_query = dff_1_query + " AND DMIX." + c1 + ".DATE_TXT >= TO_DATE('{0}-{1}-{2}', 'MM-DD-YYYY')".format(str(coefStart.month).rjust(2, '0'), str(coefStart.day).rjust(2, '0'), str(coefStart.year).rjust(4, '0')) dff_1_query = dff_1_query + " AND DMIX." + c1 + ".DATE_TXT <= TO_DATE('{0}-{1}-{2}', 'MM-DD-YYYY')".format(str(coefEnd.month).rjust(2, '0'), str(coefEnd.day).rjust(2, '0'), str(coefEnd.year).rjust(4, '0')) dff_1_query = dff_1_query + " ORDER BY DMIX.{0}.DATE_TXT ASC".format(c1) dff_1 = query(dff_1_query, ['Date', 'Price']) if both_crypto: dff_2_query = "SELECT DMIX.{0}.DATE_TXT, DMIX.{0}.OPEN FROM DMIX.{0} ".format(c2) dff_2_query = dff_2_query + "WHERE TO_CHAR(DMIX.{0}.DATE_TXT, 'HH24') = 1".format(c2) dff_2_query = dff_2_query + " AND DMIX." + c2 + ".DATE_TXT >= TO_DATE('{0}-{1}-{2}', 'MM-DD-YYYY')".format(str(coefStart.month).rjust(2, '0'), str(coefStart.day).rjust(2, '0'), str(coefStart.year).rjust(4, '0')) dff_2_query = dff_2_query + " AND DMIX." + c2 + ".DATE_TXT <= TO_DATE('{0}-{1}-{2}', 'MM-DD-YYYY')".format(str(coefEnd.month).rjust(2, '0'), str(coefEnd.day).rjust(2, '0'), str(coefEnd.year).rjust(4, '0')) dff_2_query = dff_2_query + " AND TO_CHAR(DMIX.{0}.DATE_TXT, 'HH24') = 1 ORDER BY DMIX.{0}.DATE_TXT ASC".format(c2) dff_2 = query(dff_2_query, ['Date', 'Price']) else: dff_2_query = "SELECT DATE_TXT, {} FROM DMIX.EXCHANGERATES".format(c2) dff_2_query = dff_2_query + " WHERE DMIX.EXCHANGERATES.DATE_TXT >= TO_DATE('{0}-{1}-{2}', 'MM-DD-YYYY')".format(str(coefStart.month).rjust(2, '0'), str(coefStart.day).rjust(2, '0'), str(coefStart.year).rjust(4, '0')) dff_2_query = dff_2_query + " AND DMIX.EXCHANGERATES.DATE_TXT <= TO_DATE('{0}-{1}-{2}', 'MM-DD-YYYY')".format(str(coefEnd.month).rjust(2, '0'), str(coefEnd.day).rjust(2, '0'), str(coefEnd.year).rjust(4, '0')) dff_2_query = dff_2_query + " ORDER BY DATE_TXT ASC".format(c2) dff_2 = query(dff_2_query, ['Date', 'Price']) fig_1 = go.Figure(go.Scatter( x=dff_1['Date'], y=dff_1['Price'], mode='lines')) fig_2 = go.Figure(go.Scatter( x=dff_2['Date'], y=dff_2['Price'], mode='lines')) fig_1.update_layout( xaxis={'title': 'Date'}, yaxis={'title': 'Price Percent Change'} ) fig_2.update_layout( xaxis={'title': 'Date'}, yaxis={'title': 'Price Percent Change'} ) return result, fig_1, fig_2
def stats(): if cache.get('results') is None and 'refresh' not in session: query(session['user']) c = cache.get('results') return render_template('stats.html', downs=c[0], ups=c[1], pings=c[2], username=session['username'])
def search(self): jobnum = self.searchline.text() self.parts = [] self.job.clear() self.jobnum.setText("") self.dateline.setText("") self.machine.setText("") self.created.setText("") #self.workparts.clearContents() for i in range(self.workparts.rowCount()): self.workparts.removeRow(0) for i in range(self.badparts.rowCount()): self.badparts.removeRow(0) qry = query("load_work_order", {jobnum}) if qry: self.searchline.setText("") if qry.first(): self.jobid = qry.value(0) self.job['jobnum'] = qry.value(1) self.job['date'] = qry.value(2) self.job['machine'] = qry.value(3) self.jobnum.setText(str(qry.value(1))) self.dateline.setText(str(qry.value(2))) self.machine.setText(str(qry.value(3))) self.created.setText(str(qry.value(4))) subqry = query("load_wo_parts", {self.jobid}) while subqry.next(): self.parts.append({'tracking': str(subqry.value(9)), 'partnum': str(subqry.value(0)), 'qty': str(subqry.value(1)), 'desc': str(subqry.value(5)), 'mat': str(subqry.value(6)), 'rout': str(subqry.value(7)), 'dest': str(subqry.value(2)), 'notes': str(subqry.value(10)), 'print': str(subqry.value(8)), 'order': str(subqry.value(4)), 'status': str(subqry.value(3)), }) for i, part in enumerate(self.parts): self.workparts.insertRow(i) keys = ["partnum", "qty", "dest", "order", "status", "rout", "mat"] for e, key in enumerate(keys): text = QtGui.QTableWidgetItem(part[key]) if "QPyNullVariant" in text.text(): text.setText("") part.update({key: ""}) self.workparts.setItem(i, e, text) self.workparts.resizeColumnsToContents()
def seqQuery(self, m, ids): if m == "split": query(self.training, 'training', self.training_groupings) query(self.validation, 'validation', self.validation_groupings) query(self.testing, 'testing', self.testing_groupings) if m == "cv": query(ids, 'all', self.groupTotals)
def test_query(capsys): # Query only outputs the first 10 rows, sort results to avoid randomness query_string = '''#standardSQL SELECT corpus FROM `publicdata.samples.shakespeare` GROUP BY corpus ORDER BY corpus LIMIT 10;''' query.query(query_string) out, _ = capsys.readouterr() assert 'antonyandcleopatra' in out
def start(): global input_list global response_dict global lock with open(cleaned_name, "r") as fh: job_database = json.load(fh) with open(json_name, "r") as fh: house_database = json.load(fh) print("Thread - Model >> Database starts ready for query.") while True: lock.acquire() if input_list: request = input_list.pop(0) ip = request.get('IP') infos = request.get('infos') print("Thread - Database >> Successfully removed request from IP", ip) result = query(job_database, house_database, infos) response_dict[ip] = result print( "Thread - Database >> Successfully queried for request from IP", ip) lock.release()
def booksCheckedOutByMember(): member_id = selectMember() results = query.query(['member_id'], 'checkout', where=('member_id=%d AND checkin_date IS NULL' % member_id)) print "This member currently has %d books checked out." % len(results)
def __init__(self): self.catalog_dir = "catalog/" if not os.path.isdir(self.catalog_dir): os.mkdir(self.catalog_dir) self.temp_catalog_dir = "catalog/temp/" if not os.path.isdir(self.temp_catalog_dir): os.mkdir(self.temp_catalog_dir) self.raw_catalog_dir = "raw_catalog/" self.milliqua = "million_quasar.txt" self.ozdes = "OzDES.txt" self.DR14Q = "DR14Q_v4_4.fits" self.DR7Q = "dr7qso.fit" self.dtype = [("ra",float),("dec", float),("flag","|S4"),\ ("z",float),("where","|S6")] self.crossmatch_radius = 2. self.SN_half_size = 1.1 self.file_record = open(self.catalog_dir + "record.info", "w") self.DES_SN_ra_dec = { "C1": (54.2743, -27.1116), "C2": (54.2743, -29.0884), "C3": (52.6484, -28.1000), "E1": (7.8744, -43.0096), "E2": (9.5000, -43.9980), "S1": (42.8200, 0.0000), "S2": (41.1944, -0.9884), "X1": (34.4757, -34.4757), "X2": (35.6645, -6.4121), "X3": (36.4500, -4.6000) } self.query = query.query()
def __init__(self, logger, args, credentials, query_template, logs_path, execution_name, ENV_NAME=None, ENV_DATA=None): self._script_path = os.path.dirname(os.path.realpath(__file__)) self._logger = logger self._args = args self._credentials = credentials self._query_template = query_template self._execution_name = execution_name self._environment_name = ENV_NAME self._environment_data = ENV_DATA self._logs_path = logs_path self._query = query(logger, args, credentials, query_template, execution_name, ENV_NAME, ENV_DATA) if self._environment_data['ssh']['enabled'] == 'True': self._query_execution = imp.load_source( 'query_execution', "{0}/logs/{1}/query_execution.py".format( self._script_path, self._execution_name)).query_execution(self._query) else: self._query_execution = imp.load_source( 'query_execution', "{}query_execution.py".format( self._logs_path)).query_execution(self._query)
def getAvailableCopies(book_id): return query.query( ['copy_id', 'library_name'], 'book JOIN copy USING (book_id) JOIN library USING (library_id)', where= ('book_id = %d AND copy_id != ALL (SELECT copy_id FROM checkout WHERE checkin_date IS NULL)' % book_id))
def test_query(self): """ This function test whether the "query" function extract correct row and attributes. """ data_extract = query(DF, ['subject_id'], ['hadm_id'], ['subject_id']) self.assertEqual(data_extract.shape, (3, 2)) self.assertEqual(list(data_extract.columns.values), ['subject_id', 'hadm_id'])
def retrieval_statistics(gallery_index_path, query_dataset_path, uniform_size=(224, 224, 3)): accuracy = {} accuracy['first_right'] = 0 accuracy['other_right'] = 0 accuracy['wrong'] = 0 query_name_list = os.listdir(query_dataset_path) for query_name in query_name_list: query_path = query_dataset_path + "/" + query_name results = query.query(query_path, gallery_index_path, uniform_size, limit=5) if results[0] == query_name: accuracy['first_right'] += 1 elif query_name in results: accuracy['other_right'] += 1 else: accuracy['wrong'] += 1 return accuracy
def run_emailout(): for proposal, person in query.query(): if person is not None: email_address = ( '{} <*****@*****.**>'.format(person.name) if trial else '{} <{}>'.format(person.name, person.email) ) else: print('#### No data of person to send email to.') return print('Subject:', subject) print('Recipient:', email_address) if proposal is not None: print('Title:', proposal.title) message = MIMEText(query.edit_template(str(file_paths[2]), proposal, person), _charset='utf-8') message['From'] = 'ACCUConf <*****@*****.**>' message['To'] = email_address if not trial: message['Cc'] = 'ACCUConf <*****@*****.**>' message['Subject'] = subject message['Date'] = formatdate() # RFC 2822 format. try: refusals = server.send_message(message) assert len(refusals) == 0 except SMTPException as e: click.echo(click.style('SMTP failed in some way: {}'.format(e), fg='red'))
def get_img_ids_with_img(): params = flask.request.json if not params: params = flask.request.args if not params: return Response("{'error': 'Missing either mod or img_id'}", status=400, mimetype='application/json') mod = params.get('mod') mod = process_mod(mod) img_base64 = params.get('img_base64') img_base64 = img_base64[img_base64.find('base64') + 7:] print mod print img_base64 try: img = Image.open(BytesIO(base64.b64decode(img_base64))) except Exception as e: print e img = img.convert('RGB') img = testset.transform(img) nn_result = query(mod, all_imgs, img, model, None) print nn_result response_data = {'img_ids': list(nn_result)} return jsonify(response_data)
def upload_report_(self): """ Function for uploading a pdf to the database so the user can print it from their scheduler. """ #Find the sender job_num = self.sender().parent().job #Get the last directory that was used last_laser = str(functions.read_settings('last_laser').toString()) #Get the report_file the user wants to upload report_file = QtGui.QFileDialog.getOpenFileName(self, caption='Open Print', filter='*.pdf', directory=last_laser) if report_file: functions.write_settings('last_laser', last_laser.rsplit('/', 1)[0]) print_bin = open(report_file, 'rb') dbw, ok = dbConnection.new_connection('write', 'riverview', 'riverview') if ok: qry = query("insert_pdf", [job_num, print_bin.read().encode('hex')], dbw) if qry: QtGui.QMessageBox.information(None, "Successful", "Paperwork Successfully uploaded") self.update_schedule_(None) return True else: return False
def store_annotation(ann_dict): ts = calendar.timegm(datetime.utcnow().timetuple()) auuid = uuid4() fid = ann_dict['id'] attr = ann_dict['attribute'] with r.pipeline(transaction=True) as p: if fid.startswith('tmp'): p.sadd('f:tmp', fid) p.sadd('f', fid) p.hmset('annotations:{}'.format(auuid), ann_dict) # p.zadd('annotations', ts, auuid) p.zadd('f:annotations:{}'.format(fid), ts, auuid) p.sadd('f:attrs:{}'.format(fid), attr) orig_value = r.hget('f:orig:{}'.format(fid), attr) f_uri = ann_dict.get('uri', None) if f_uri is not None: p.set('f:uri:{}'.format(fid), f_uri) if orig_value is None: query_value = query(f_uri, attr) if query_value is not None: p.hset('f:orig:{}'.format(fid), attr, str(query_value)) f_lat = ann_dict.get('latitude', None) f_long = ann_dict.get('longitude', None) if f_lat is not None and f_long is not None: p.set('f:pos:{}'.format(fid), (f_lat, f_long)) p.execute()
def start(): lookup = init_program() while True: print("\n\n") print("Tutorial Room Availability Checker") print("----------------------------------") duration = -1 day = -1 start_time = -1 while duration == -1: duration = ask_duration() while day == -1: day = ask_day() while start_time == -1: start_time = ask_start_time() results = query.query(lookup, duration, day, start_time) display(results) quit_program = input("Exit program? (Y - yes): ") if quit_program == 'Y' or quit_program == 'y': break print("See you!")
def sql_to_asp_human(): all_rels = query.query('human', 0.4) all_rels = (removedups(all_rels)) #print (all_rels) preddicts = defaultdict(list) framedicts = defaultdict(list) for record in all_rels: rel = (record[1], record[0], record[2]) coord = (record[3], record[4]) frame_id = record[5] if len(preddicts[rel]) == 0: print('added') preddicts[rel].append((frame_id, coord)) #print (type(frame_id)) framedicts[frame_id].append(coord) else: temp = copy.deepcopy(preddicts[rel]) for anypoint in temp: # print (anypoint) # print eucdist(coord, anypoint[1]) if eucdist(coord, anypoint[1]) < 2: print('hi') else: preddicts[rel].append((frame_id, coord)) framedicts[frame_id].append(coord) print('before') pprint(preddicts) append_object_semantic_map.from_sql(preddicts)
def correlationCoef(currency1, currency2, crypto, startDate=datetime.date(2012, 1, 1), endDate=datetime.date.today()): queryStr = "" if (crypto): if currency1 == currency2: queryStr = "SELECT CORR(DMIX.{0}.OPEN, DMIX.{0}.OPEN) FROM DMIX.{0}".format( currency1) else: queryStr = "SELECT CORR(DMIX." + currency1 + ".open, DMIX." + currency2 + ".open) FROM DMIX." + currency1 + " JOIN DMIX." + currency2 + " ON DMIX." + currency1 + ".date_txt = DMIX." + currency2 + ".date_txt" queryStr = queryStr + " WHERE DMIX." + currency1 + ".DATE_TXT >= TO_DATE('{0}-{1}-{2}', 'MM-DD-YYYY')".format( str(startDate.month).rjust(2, '0'), str(startDate.day).rjust(2, '0'), str(startDate.year).rjust(4, '0')) queryStr = queryStr + " AND DMIX." + currency1 + ".DATE_TXT <= TO_DATE('{0}-{1}-{2}', 'MM-DD-YYYY')".format( str(endDate.month).rjust(2, '0'), str(endDate.day).rjust(2, '0'), str(endDate.year).rjust(4, '0')) else: queryStr = "SELECT CORR(crypto_price, " + currency2 + ") FROM DMIX.EXCHANGERATES NATURAL JOIN (SELECT date_txt, DMIX." + currency1 + ".open AS crypto_price FROM DMIX." + currency1 + ")" queryStr = queryStr + " WHERE date_txt >= TO_DATE('{0}-{1}-{2}', 'MM-DD-YYYY')".format( str(startDate.month).rjust(2, '0'), str(startDate.day).rjust(2, '0'), str(startDate.year).rjust(4, '0')) queryStr = queryStr + " AND date_txt <= TO_DATE('{0}-{1}-{2}', 'MM-DD-YYYY')".format( str(endDate.month).rjust(2, '0'), str(endDate.day).rjust(2, '0'), str(endDate.year).rjust(4, '0')) title = 'CorrelationCoefficient' headers = [title] return query(queryStr, headers)
def query_page_post(): form = queryForm() selected_company = request.form['company'] all_realtime,history_data,less_list=query(selected_company) return render_template('query.html', form=form, all_realtime=all_realtime, history_data=history_data, less_list=less_list,selected_company=selected_company)
def queryAnalyzer(): nlq = str(request.form['query']) print("Query: " + nlq) results = query.query(nlq) print("query done") # print(results) return json.dumps({'query': nlq, 'data': results})
def build_from(self, node): """ Builds out a path based on user input. """ # TODO: describe generator better try: while True: direct = query(node) print() print(direct) # return -> navigate up if direct is ' ': yield # clarify -> navigate down elif direct is '': yield from self.build_from(node.down) # continue -> navigate right elif direct is ' ': yield from self.build_from(node.right) # quit else: raise StopIteration # TODO: handle quitting so that user doesn't have to manually go up except StopIteration: return
def coinVSInstability(currencies, countries, startDate=datetime.date(2012, 1, 1), endDate=datetime.date.today()): queryStr = getWithClause(currencies, startDate, endDate) + " SELECT " for country in countries: queryStr = queryStr + "DMIX.ECONOMICINSTABILITY.{0}, ".format(country) currencyIt = 0 while currencyIt < len(currencies): if currencyIt == 0: queryStr = queryStr + "{0}AVG.MONTH, ".format( currencies[currencyIt]) queryStr = queryStr + "{0}AVG.AVERAGE".format(currencies[currencyIt]) if currencyIt != len(currencies) - 1: queryStr = queryStr + "," queryStr = queryStr + " " currencyIt = currencyIt + 1 queryStr = queryStr + "FROM DMIX.ECONOMICINSTABILITY" for currency in currencies: queryStr = queryStr + " INNER JOIN {0}AVG ON DMIX.ECONOMICINSTABILITY.MONTH = EXTRACT(MONTH FROM {0}AVG.MONTH) AND DMIX.ECONOMICINSTABILITY.YEAR = EXTRACT(YEAR FROM {0}AVG.MONTH)".format( currency) headers = countries headers.append("month") for currency in currencies: headers.append(currency) return query(queryStr, headers)
def get(self): self.response.headers['Content-Type'] = 'application/json' #current defaults are based on sample data #to do: change to intelligent defaults e.g. today, all routes start = datetime.datetime.strptime(self.request.get('start', '2012-10-11T06:30:00' ), "%Y-%m-%dT%H:%M:%S") end = datetime.datetime.strptime(self.request.get('end', '2012-10-11T09:30:00' ), "%Y-%m-%dT%H:%M:%S") route = self.request.get('route', '10') callback = self.request.get('callback', None) arrivals = VehicleArrival.all().filter("arrival > ", start).filter("arrival < ", end).filter("route = ", route) if arrivals.count() == 0: self.response.out.write("[]") return processed_arrivals = query.query(arrivals) response_data = { 'route': route , 'start': start.isoformat() , 'end': end.isoformat() , 'route_aggregate': processed_arrivals['route_aggregate'] , 'stop_stats': processed_arrivals['stop_stats'] } response_data = json.dumps(response_data) # wrap as JSONP if callback is specified if callback: response_data = '%s(%s);' % (callback, response_data) self.response.out.write(response_data)
def call_query(*args): cwd = os.getcwd() os.chdir ('..') ret = query.query (*args) os.chdir (cwd) return ret
def get(self, source): if source not in ['web', 'image', 'news']: self.error(404) return response = query(self.request.get('q'), sources=source) #self.response.write(response) #return self.response.write(template(source+'.html', {"response": response}))
def update(self): self.df = query.query(self.nsid, self.tunoip, data_dir='/home/monitor/data') self.time = list( map(lambda x: x.strftime("%Y-%m-%d %H:%M:%S"), self.df.index)) self.preprocess('over_drop') self.preprocess('under_drop')
def get_status(self): qry = query("get_status") status = [] if qry: while qry.next(): status.append(qry.value(0).toString()) self.state.append(qry.value(1).toString()) return status
def run(self): query_queue = self._query_queue result_queue = self._result_queue while True: q = query_queue.get() if q is None: return result_queue.put((q.id,query.query(q.query_dn, q.ns, q.rrtype, q.timeout)))
def mdx_query(self): """ Return a MDX parser of the from clause of a MDX query """ selectToken = Keyword("select", caseless=True).suppress() fromToken = Keyword("from", caseless=True).suppress() whereToken = Keyword("where", caseless=True).suppress() semicolon = Literal(";").suppress() mdx = selectToken + self.mdx_axis_list() + fromToken + self.mdx_cube() + Optional(whereToken + self.mdx_slice()) + Optional(semicolon) mdx.setParseAction(lambda s,a,toks: query.query(*toks)) return mdx
def completer(self): qry = query("get_machines") if qry: status = ['Delivery', 'Lost', 'Shipping'] while qry.next(): status.append(qry.value(0).toString()) comp = QtGui.QCompleter(status) comp.setCaseSensitivity(0) self.status.setCompleter(comp)
def load_available_orders(self): qry = query("active_orders") if qry: active_mod = QtSql.QSqlQueryModel() active_mod.setQuery(qry) self.active_table.setModel(active_mod) self.active_table.resizeColumnsToContents() return True else: return False
def load_material_orders(self): qry = query("material_orders") if qry: material_mod = QtSql.QSqlQueryModel() material_mod.setQuery(qry) self.waiting_table.setModel(material_mod) self.waiting_table.resizeColumnsToContents() return True else: return False
def get_schedule_data(self, qry=None): """ This function retrieved all the required data from the database and sends it to the display function. It also sets up a time to check for new updates once a second. """ #If qry was passed, use the data from it instead of rerunning the query if qry is None: self.schedule_qry = query("work_schedule", [self.schedule]) if not self.schedule_qry: return False else: self.schedule_qry = qry self.schedule_qry.seek(-1) #self.scheduleData is used to check if the schedule needs updated self.schedule_data = [] while self.schedule_qry.next(): row = [] for i in range(10): row.append(self.schedule_qry.value(i)) self.schedule_data.append(row) pdf_qry = query("work_order_pdf_check", [self.schedule_qry.value(8).toString()]) if pdf_qry: if pdf_qry.first(): has_print = True else: has_print = False else: has_print = False #Send the data to the display function so it can be added #to the layout. self.new_row(self.schedule_qry.record(), has_print) #Pushes all the rows together at the top of the page self.schedule_frame.layout().insertStretch(-1) #Set up update timer. Currently set to update once a second. self.startTimer(1000) return True
def load_in_process(self): qry = query("in_process_orders") if qry: in_process_mod = QtSql.QSqlQueryModel() in_process_mod.setQuery(qry) self.in_process_table.setModel(in_process_mod) self.in_process_table.resizeColumnsToContents() return True else: return False
def __init__(self, task, account, backend, site_file, timestr, clip_duration): super(query_runner, self).__init__() can_slice = False if account.slicing: can_slice = clip_duration > account.slice_duration self.state = query_state.initial(slicing=account.slicing and can_slice, hot=backend.hot_user != None) self.query = query(task, account, backend, site_file, timestr) self.result = None
def _query(): q = request.query.q q = urllib2.unquote(q) try: matches = query.query(q, inv) except ParseException: return {"error" : "Query string was malformed"} results = [[part.code, part.name, clean_path(part.path)] for part in matches] return {"results" : results}
def load_setup_orders(self): qry = query("setup_orders") if qry: setup_mod = QtSql.QSqlQueryModel() setup_mod.setQuery(qry) self.setup_table.setModel(setup_mod) self.setup_table.resizeColumnsToContents() return True else: return False
def get(self): self.response.headers['Content-Type'] = 'text/html' start = datetime.datetime.strptime('2012-10-11T11:45:00', "%Y-%m-%dT%H:%M:%S") end = datetime.datetime.strptime('2012-10-11T15:30:00', "%Y-%m-%dT%H:%M:%S") route = '10' arrivals = VehicleArrival.all().filter("arrival > ", start).filter("arrival < ", end).filter("route = ", route) processed_arrivals = query.query(arrivals) template_values = { 'worst_arrivals': processed_arrivals['stop_stats'], } path = os.path.join(os.path.dirname(__file__), 'tabletest.html') self.response.out.write(template.render(path, template_values))
def submit_query(testbed_name, query_sentence, use_modified_engine): if use_modified_engine: parameters.use_blind_relevance_feedback = True parameters.remove_stop_words = True parameters.normalization = False else: parameters.use_blind_relevance_feedback = False parameters.remove_stop_words = False parameters.normalization = True result, accum, titles = query.query((testbed_name + "_collection"), query_sentence) return result, accum, titles
def _part(): q = request.query.q q = urllib2.unquote(q) matches = list(query.query("code:" + q, inv)) if len(matches) > 1: return {"error" : "Part code " + q + " apparently has multiple items associated"} elif len(matches) == 0: return {"error" : "Cannot find item with part code " + q} item = matches[0] return part_json(item)
def fetch(dag, keys, field, initNZ=None): d = {} nz = Normaliser() for k, v in query(dag, keys, field): if k not in d: d[k] = [v] else: d[k].append(v) if initNZ: nz.update(initNZ(v)) return d, nz
def get_users(): qry = query("get_users") if qry: users = [""] while qry.next(): users.append(qry.value(0).toString()) if not users: QtGui.QMessageBox.critical(None, "Users Error", "User list could not be loaded...") return False else: return False return users
def index(): user_json = request.args.get('user', None) # return example_response() if user_json: print user_json result = query(json.loads(user_json)) response = {'result': result} return jsonify(response) response = {'error': '何かがおかしいです。'} print jsonify(response)
def partnum_to_partid(partnum): """ Takes a part number as input and returns the part id for it if it has one. Returns None if if doesn't. """ qry = query("part_to_id", [partnum]) if qry: if qry.first(): return qry.value(0).toString() else: return None else: return None
def row_editing_finished(self): """ Saves the current row data to the database """ #Find the sender row = self.sender().parent() data = [row.job, row.priority.text(), row.material.text(), row.material_qty.text()] dbw, ok = dbConnection.new_connection('write', 'riverview', 'riverview') if ok: qry = query("update_work_order", data, dbw) if qry: row.setStyleSheet('')
def create_missing_tab(name): tab = QtGui.QFrame() tab.setWindowTitle(name) tab.setLayout(QtGui.QGridLayout()) tab.table = QtGui.QTableView() qry = query("missing_parts") if qry: mod = colorized_QSqlQueryModel() mod.setQuery(qry) tab.table.setModel(mod) tab.table.max_widths = [50, 150, 50, 300, 125, 100, 100] resize_table(tab.table) tab.layout().addWidget(tab.table) return tab
def update(self): QtGui.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.WaitCursor)) track = self.track.text() status = self.status.text() fin = '0' if status != "" and track != "": qry = query("get_qty", [track]) if qry: if qry.first(): qty = int(qry.value(0).toString()) else: return else: return qty, ok = QtGui.QInputDialog.getInt(self, "How many good parts did you make?", "How many good parts did you make?", qty) if ok: if start_transaction("write"): qry = query("new_status", [status, track, fin, qty], "write") if not qry: rollback_transaction("write") return qry = query("get_machine_id", [status]) if qry: if qry.first(): mach = qry.value(0).toString() qry = query("update_schedule", [mach, track]) if not qry: rollback_transaction("write") else: rollback_transaction("write") if not commit_transaction("write"): rollback_transaction("write") self.track.setText("") self.track.setFocus() QtGui.QApplication.restoreOverrideCursor()
def process(q): """ Generating parsed queries and doing search """ if not q: return q, filters = query(q) print "Filters:" print filters results = searcher.process_query(q, filters) ret = "" for r in results: ret += (str(json.dumps(r)) + "#") return ret[:-1]