def load_db_config(cfg): supercats = utils.select('''select distinct supercategory as name from sku where bin is not null and bin != '0' and active=True and supercategory is not null order by id''') for supercat in supercats: cfg['menu']['categories'].append(supercat) supercat['subcategories'] = [] for cat in utils.select('''select distinct category as name from sku where supercategory = %s and bin is not null and bin != '0' and active = True and category is not null order by bin''', args = (supercat['name'])): supercat['subcategories'].append(cat) cat['items'] = [] for item in utils.select('''select * from sku where supercategory = %s and category = %s and bin is not null and bin != '0' and active=True order by bin, name''', args = (supercat['name'], cat['name'])): cat['items'].append(item) if winecats.match(cat['name']): item['name'] = item['bin'] + ' ' + item['name'] my_logger.info('name: modified ' + item['name']) if item['qtprice'] > 0: my_logger.info('qt: added ' + item['name']) qtitem = item.copy() qtitem['fraction'] = .25 qtitem['retail_price'] = item['qtprice'] qtitem['name'] = 'qt: '+item['name'] cat['items'].append(qtitem)
def parse_entry(self, entry): """Scrape each AotD entry for its URL and title.""" item = super(ArticleOfTheDay, self).parse_entry(entry) summary = lxml.html.fromstring(entry.summary) try: item['summary'] = select(summary, 'p:first-of-type').text_content() except IndexError: # The featured article feed is created by # Extension:FeaturedFeeds from user-generated descriptions # in wikitext. Some descriptions, such as # [[Wikipedia:Today's featured article/January 16, 2017]] # are not formatted to have <p> tags when we need em. item['summary'] = summary.text_content() # since we don't have <p> tags for the description, we # need to filter out the trailing <div>s item['summary'] = item['summary'].split('Recently featured')[0].strip() item['summary'] = item['summary'].replace(u'(Full\xa0article...)', '') try: read_more = select(summary, 'p:first-of-type a:last-of-type') except IndexError: # The first bolded link is usually a link to the article read_more = select(summary, 'b a:first-of-type') item['url'] = read_more.get('href') item['title'] = read_more.get('title') return item
def parse_entry(self, entry): """Scrape each AotD entry for its URL and title.""" item = super(ArticleOfTheDay, self).parse_entry(entry) summary = lxml.html.fromstring(entry.summary) try: item['summary'] = select(summary, 'p:first-of-type').text_content() except IndexError: # The featured article feed is created by # Extension:FeaturedFeeds from user-generated descriptions # in wikitext. Some descriptions, such as # [[Wikipedia:Today's featured article/January 16, 2017]] # are not formatted to have <p> tags when we need em. item['summary'] = summary.text_content() # since we don't have <p> tags for the description, we # need to filter out the trailing <div>s item['summary'] = item['summary'].split( 'Recently featured')[0].strip() item['summary'] = item['summary'].replace(u'(Full\xa0article...)', '') try: read_more = select(summary, 'p:first-of-type a:last-of-type') except IndexError: # The first bolded link is usually a link to the article read_more = select(summary, 'b a:first-of-type') item['url'] = read_more.get('href') item['title'] = read_more.get('title') return item
def _get_extra_links(self, doc, plink): redirect_link = select( doc.xpath("/html/head/meta[@http-equiv='refresh']/@content")) redirect_link = redirect_link.split("=")[-1].strip() if redirect_link: link = {} url = url_padding(redirect_link, plink["url"]) link["url"] = url link["title"] = plink["title"] link["purl"] = plink["url"] link["ptitle"] = plink["title"] self.links.append(link.copy()) links = doc.xpath('//script[contains(text(), "gkfs(")]') for l in links: content = select(l.xpath(".//text()")).replace("gkfs(", "").replace( ")", "") items = content.split(",") items = [item.replace('"', "") for item in items] if len(items) == 3: link = {} _, url_shown, title = items url = url_padding(url_shown, plink["url"]) if not url: continue link["url"] = url link["url_shown"] = url_shown link["title"] = title link["purl"] = plink["url"] link["ptitle"] = plink["title"] self.links.append(link.copy())
def populate_pay_stub(temp = True, incursor=None): #days_of_tips_calculated = utils.select( # '''select count(distinct date(intime)) from hours # where yearweek(intime) = yearweek(now() - interval '1' week) and tip_pay is not null''', # label = False # )[0][0] #if days_of_tips_calculated != 7: # return 'Tips have been calculated for only %s days last week. When all days tips are calculated, refresh this page to see and print weekly pay for last week.'%days_of_tips_calculated results = utils.select(''' select DATE(intime) - interval (DAYOFWEEK(intime) -1) DAY as week_of, hours_worked.person_id, last_name, first_name, sum(hours_worked) as hours_worked, pay_rate, COALESCE(allowances, 0) allowances, COALESCE(nominal_scale, 0) nominal_scale, COALESCE(married, 0) married, COALESCE(salary, 0) + COALESCE(round(sum(hours_worked)*pay_rate), 0) as weekly_pay, COALESCE(salary, 0) + COALESCE(round(sum(hours_worked)*pay_rate), 0) * COALESCE(nominal_scale,0) as gross_wages, COALESCE(sum(tip_pay),0) tips, COALESCE(sum(tip_pay) / sum(hours_worked) + pay_rate, 0) as total_hourly_pay from hours_worked LEFT OUTER JOIN employee_tax_info ON hours_worked.person_id = employee_tax_info.person_id where yearweek(intime) = yearweek(now() - interval '1' week) and intime != 0 group by hours_worked.person_id, yearweek(intime) ''', incursor = incursor, label = True ) if temp: utils.execute(''' create temporary table PAY_STUB_TEMP like PAY_STUB; ''', incursor=incursor); table_names = ('PAY_STUB_TEMP',) else: table_names = ('PAY_STUB', 'WEEKLY_PAY_STUB') for row in results: if not temp and utils.select( 'select 1 from PAY_STUB where week_of = "%(week_of)s" and person_id = %(person_id)s'%row, incursor = incursor ): continue for table_name in table_names: if table_name == 'WEEKLY_PAY_STUB': row['gross_wages'] = row['weekly_pay'] + float(row['tips']) row['pay_rate'] = round(row['total_hourly_pay']) tax.add_witholding_fields(row) columns = ', '.join(row.keys()) values = ', '.join(("'%s'" % value for value in row.values())) sqltext = 'INSERT into %s (%s) VALUES (%s);'%(table_name, columns, values) my_logger.debug('pay stub: ' + sqltext) utils.execute(sqltext, incursor=incursor)
def ask_for_sliced(self, sliced): selected = select(sliced, self.slices) if not selected: name = infer_name(sliced) ingredient = self.pantry.gather(name) self.slices += self.cutting_board.use(ingredient) selected = select(sliced, self.slices) return selected
def get_wine_xml(): winecats = utils.select(''' select category from active_wine where active = true and listorder > 0 and bin is not null group by category order by min(listorder)''') for num, cat in enumerate(winecats): cat = cat['category'] wine_items = utils.select(''' select * from active_wine where category = '%(cat)s' and listorder > 0 and bin != '0' order by listorder ''' % locals()) if cat in ('Red Wine', 'Bubbly', 'Bottled Beer', 'House Cocktails') or utils.hostname() == 'plansrv' and cat == 'White Wine': style = 'P19' #this style starts new page else: style = 'P20' yield ''' <text:h text:style-name="%s">%s</text:h> ''' % (style, escape(cat)) if cat in ('House Cocktails', 'Bottled Beer'): yield '''<text:p/>''' current_subcategory = None for item in wine_items: binnum, name, listprice, byline, grapes, notes, subcategory = ( clean(escape(unicode(item[key]))) for key in ['bin', 'name', 'listprice', 'byline', 'grapes', 'notes', 'subcategory'] ) # do location heading if location changed if current_subcategory != subcategory and subcategory is not None: current_subcategory = subcategory yield '''<text:p text:style-name="Psubcat"><text:span text:style-name="T1">%s</text:span></text:p>'''%subcategory yield '''<text:p text:style-name="P4">%s.<text:tab/>%s<text:s text:c="5"/>%s'''%(binnum, name, listprice) if byline: yield '''<text:line-break/>%s''' % byline if grapes: yield '''<text:line-break/>Grapes: %s''' % grapes if notes: yield '''<text:line-break/>%s''' % notes yield '</text:p>' yield '''<text:p text:style-name="P18"/>''' if cat in ('House Cocktails', 'Bottled Beer'): yield '''<text:p/>'''
def parse_entry(self, entry): """Scrape each AotD entry for its URL and title.""" item = super(ArticleOfTheDay, self).parse_entry(entry) summary = lxml.html.fromstring(entry.summary) item['summary'] = select(summary, 'p:first-of-type').text_content() item['summary'] = item['summary'].replace(u'(Full\xa0article...)', '') read_more = select(summary, 'p:first-of-type a:last-of-type') item['url'] = read_more.get('href') item['title'] = read_more.get('title') return item
def run(self, edit): preferences=utils.get_preferences() if(preferences==None):return if(preferences.get("user")==None):preferences["user"]={} listKeys=list(preferences["user"].keys()) print("estas son : ",listKeys) for key in preferences["project"].keys(): print(preferences["project"][key]) listKeys+=list(preferences["project"][key].keys()) utils.select(listKeys)
def weekly_pay(printmode=0, incursor = None): if incursor is None: incursor = utils.get_cursor() for table_name in ('PAY_STUB', 'PAY_STUB_TEMP'): utils.execute(''' create temporary table v_%(table_name)s as select week_of, last_name, first_name, hours_worked, pay_rate, fed_withholding + nys_withholding + medicare_tax + social_security_tax as weekly_tax, round(weekly_pay -fed_withholding -nys_withholding -medicare_tax -social_security_tax) as net_wage, tips, total_hourly_pay from %(table_name)s where yearweek(week_of) = yearweek(now() - interval '1' week) order by last_name, first_name''' % locals(), incursor=incursor, ) if printmode == 1: break if printmode == 1: return utils.select('''select * from v_PAY_STUB''', incursor=incursor) else: return utils.select(''' select pst.week_of, pst.last_name, pst.first_name, IF(pst.hours_worked = ps.hours_worked or ps.hours_worked is null, pst.hours_worked, concat(pst.hours_worked, ' / ', ps.hours_worked)) hours_worked, IF(pst.pay_rate = ps.pay_rate or ps.pay_rate is null, pst.pay_rate, concat(pst.pay_rate, ' / ', ps.pay_rate)) pay_rate, IF(pst.weekly_tax = ps.weekly_tax or ps.weekly_tax is null, pst.weekly_tax, concat(pst.weekly_tax, ' / ', ps.weekly_tax)) weekly_tax, IF(pst.net_wage = ps.net_wage or ps.net_wage is null, pst.net_wage, concat(pst.net_wage, ' / ', ps.net_wage)) net_wage, IF(pst.tips = ps.tips or ps.tips is null, pst.tips, concat(pst.tips, ' / ', ps.tips)) tips, IF(pst.total_hourly_pay = ps.total_hourly_pay or ps.total_hourly_pay is null, pst.total_hourly_pay, concat(pst.total_hourly_pay, ' / ', ps.total_hourly_pay)) total_hourly_pay from v_PAY_STUB_TEMP pst LEFT OUTER JOIN v_PAY_STUB ps on pst.week_of = ps.week_of and pst.first_name = ps.first_name and pst.last_name = ps.last_name order by last_name, first_name ''', incursor = incursor, label= False )
def get_stub_data(person_id, week_of, table_name, incursor): print person_id, week_of stub_data = utils.select(''' select last_name as LAST_NAME, first_name as FIRST_NAME, "000-00-0000" as SOCIAL, week_of + interval '1' week as PERIOD_END, concat(week_of, ' - ', week_of + interval '6' day) as PERIOD_SPAN, fed_withholding as FED, social_security_tax as SOC, medicare_tax as MED, nys_withholding as STATE, gross_wages as GROSS, gross_wages - fed_withholding - social_security_tax - medicare_tax - nys_withholding as NET, round(gross_wages / pay_rate,2) as HOURS, pay_rate as RATE from {table_name} where person_id = {person_id} and week_of = "{week_of}" '''.format(**locals()), incursor ) stub_ytd_data = utils.select(''' select sum(fed_withholding) as FEDYTD, sum(social_security_tax) as SOCYTD, sum(medicare_tax) as MEDYTD, sum(nys_withholding) as STATEYTD, sum(gross_wages) as GYTD, sum(gross_wages - fed_withholding - social_security_tax - medicare_tax - nys_withholding) as NETYTD from {table_name} where person_id = {person_id} and year("{week_of}" + interval '1' week) = year(week_of+interval '1' week) and week_of <= date("{week_of}") '''.format(**locals()), incursor ) # make one dictionary of the two result sets result = stub_data[0] # start with stub_data result.update(stub_ytd_data[0]) #add the YTD stuff to it if utils.hostname() == 'salsrv': result["BUSS_INFO_LINE"] = "SALUMI dba Ultraviolet Enterprises 5600 Merrick RD, Massapequa, NY 11758 516-620-0057" else: result["BUSS_INFO_LINE"] = "PLANCHA dba Infrared Enterprises 931 Franklin AVE, GardenCity, NY 516-246-9459" return result
def select_data(message): data = utils.select(config.database_name, datetime.today().strftime('%Y-%m-%d')) df = pd.DataFrame(data)[0].sum() bot.send_message(message.chat.id, 'Траты за неделю : {} руб.'.format(int(df))) print df
def wine_insert(): winelist = utils.select('''select * from winelist order by id''') for item in winelist: if item['grapes']: item['grapes'] = 'Grapes: %s'%item['grapes'] dct = { 'id' : item['id'], 'name' : item['name'], 'supercategory' : 'bev', 'category' : item['category'], 'subcategory' : item['subcategory'], 'retail_price' : item['listprice'], 'qtprice' : item['qtprice'], 'scalable' : False, 'tax' : 'standard', 'wholesale_price' : item['frontprice'], 'supplier' : item['supplier'], 'vendor_code' : None, 'bin' : item['bin'], 'listorder' : item['listorder'], 'upc' : None, 'description' : '\n'.join((i for i in (item['byline'], item['grapes'], item['notes']) if i)), 'active' : item['active'], 'units_in_stock' : item['units_in_stock'], 'inventory_date' : item['inventory_date'], 'mynotes' : item['mynotes'] } utils.sql_insert('sku', dct)
def index(req): weekly = utils.select(''' select first_name, last_name, round(sum(hours_worked),2) as hours_worked, round(sum(hours_worked)*pay_rate - weekly_tax) as weekly_pay from hours_worked where yearweek(intime) = yearweek(now() - interval '1' week) group by yearweek(intime), last_name, first_name order by last_name''', incursor=None, label=False ) output = '_ '*40 + '\n'*3 for first_name, last_name, hours_worked, weekly_pay in weekly: if last_name in ('Kobrin', 'Kanarova'): continue output += ( ' '*4 + first_name.ljust(12) + ' '+last_name.ljust(12) + '$'+str(int(weekly_pay or 0)).ljust(6) + str(hours_worked).rjust(30) + ' hrs'+'\n'*2 + '_ '*40 + '\n'*3 ) return output
def run_adventure(self, root_block): if root_block is None: utils.log('[!] Cannot run NONE adventure for player ' + self.id) return None try: # Tag message = self.get_tag(True) + ": " # Preambles if root_block.public: category_data = world.get_category(root_block.category) if category_data is not None: preambles = category_data.preambles message += utils.select(preambles) + '\n' # Iterate through the blocks! message += self.run_adventure_block(root_block) # Increment Visits self.increment_visits(root_block.handle) # Calc Level up message += self.check_level_up() # Broadcast message return message except Exception as e: utils.log('[!] Encountered an exception in adventure ' + root_block.handle) utils.log_exception(e) return None
def extractCanvasSettings(d): """Split a dict in canvas settings and other items. Returns a tuple of two dicts: the first one contains the items that are canvas settings, the second one the rest. """ return utils.select(d,pf.refcfg['canvas']),utils.remove(d,pf.refcfg['canvas'])
def nightly_sales_by_server(label=False, lag_days=1): tax_rate = texttab.TAXRATE return utils.select(''' select sales.*, rbs.cc1, rbs.cc2, rbs.cash1, rbs.cash2, rbs.id as receipts_id from ( SELECT concat(p.last_name, ', ', p.first_name) server, p.id as person_id, p.ccid, sum(oi.price) sales, sum(ti.price) taxable_sales, sum(oi.price) + COALESCE(round(sum(ti.price) * %(tax_rate)s, 2),0) receipts, count(distinct og.id) tabs_closed, convert(date(now() - INTERVAL '%(lag_days)s' DAY), CHAR(10)) as dat FROM (order_item oi left outer join taxable_item ti on ti.id = oi.id), order_group og, person p WHERE oi.order_group_id = og.id AND oi.is_cancelled = False AND oi.is_comped = False AND og.closedby = p.id AND date(og.updated - interval '6' HOUR) = date(now() - INTERVAL '%(lag_days)s' DAY) GROUP BY p.id) sales left outer join (select * from receipts_by_server where dat = date(now() - INTERVAL '%(lag_days)s' DAY) ) rbs on sales.person_id = rbs.person_id ;''' % locals(), incursor=None, label=label )
def parse_entry(self, entry): """Scrape each PotD entry for its description and URL.""" item = super(PictureOfTheDay, self).parse_entry(entry) summary = lxml.html.fromstring(entry.summary) image_node = select(summary, 'a.image img') file_page_node = select(summary, 'a.image') thumb_url = image_node.get('src') width = image_node.get('width') # 300px per MediaWiki:Ffeed-potd-page image_url = thumb_url.rsplit('/' + width, 1)[0].replace('thumb/', '') desc_node = select(summary, '.description.en') # TODO: include authorship for the picture item['filename'] = image_node.get('alt') item['image_url'] = image_url item['filepage_url'] = file_page_node.get('href') item['description'] = desc_node.text_content().strip() return item
def print_one_week_stubs(week_of): for table_name in ('PAY_STUB', 'WEEKLY_PAY_STUB'): stub_keys = utils.select('''select person_id from {table_name} where week_of = {week_of}'''.format(**locals()), label=False) for person_id in stub_keys: print_stubs(person_id, week_of, table_name)
def generate_text(session, model, config, starting_text='<eos>', next_text='<eos>', stop_length=100, temp=1.0): state = model.initial_state.eval() list1 = [] stop_tokens = ['<eos>'] tokens = [model.vocab.encode(word) for word in starting_text] #找到starting_text中单词的索引 for i in range(len(starting_text)): token_list = np.array(tokens[i]).reshape( (1, model.config.num_steps)) #最近的结果作为下一次的输入 feed = { model.input_placeholder: token_list, model.initial_state: state, model.dropout_placeholder: 1 } y_pred, state = session.run( [model.predictions[-1], model.final_state], feed_dict=feed) #model.predictions[-1]为模型前一个的预测结果 for j in range(i + 1, len(starting_text)): b = tokens[j] pro = select(y_pred[0], b, temperature=temp) #print pro list1.append(pro) return list1
def populate_pay_stub(): results = utils.select(''' select DATE(intime) - interval (DAYOFWEEK(intime) -1) DAY as week_of, employee_tax_info.person_id, last_name, first_name, sum(hours_worked) as hours_worked, pay_rate, allowances, nominal_scale, round(sum(hours_worked)*pay_rate) as weekly_pay, round(sum(hours_worked)*pay_rate*nominal_scale) as gross_wages, married, sum(tip_pay) tips, round(sum(hours_worked)*pay_rate - weekly_tax) + sum(tip_pay) as total_weekly, sum(tip_pay) / sum(hours_worked) + pay_rate as total_hourly_pay from hours_worked JOIN employee_tax_info ON hours_worked.person_id = employee_tax_info.person_id where yearweek(intime) = yearweek(now() - interval '1' week) and intime != 0 group by employee_tax_info.person_id ''', incursor = None, label = True ) for row in results: add_witholding_fields(employee_tax_info = row) columns = ','.join(row.keys()) values = ','.join(map(str,row.values())) utils.execute('''INSERT into pay_stub (%s) VALUES (%s)'''%(columns, values))
def index(req): supercat = req.parsed_uri[7] if supercat == 'mkt': supercat = 'market' resp = '' results = utils.select(''' select * from sku_inv where active = true and bin > 0 and supercategory = %s order by supplier''', args=supercat ) if not results: return 'no such category as %s'%supercat supplier = '' for row in results: expand_extra_fields(row) next_supplier = row.get('supplier') or '<NONE>' if supplier.lower().strip() != next_supplier.lower().strip(): supplier = next_supplier resp += supplier + '\n' if (row['estimated_units_remaining'] is not None and isinstance(row.get('par'), (int, long, float)) and row.get('par') > row['estimated_units_remaining']): row.setdefault('order_amt', str(int(round(row['par'] - row['estimated_units_remaining']))) + ' ' + row.get('order_unit', 'ct.')) resp += "\t{name} - {order_amt} \t(${wholesale_price}, {estimated_units_remaining} on hand, par is {par})\n".format(**row) return resp
def update(req, edits, newrows): edits = json.loads(edits) newrows = json.loads(newrows) insert_ids = {} cursor = utils.get_cursor() for rowid, fields_and_vals in edits.items(): setlist = ','.join('%s = %s'%(f, sql_representation(v)) for f, v in fields_and_vals.items() if f != 'estimated_units_remaining') sql = "update sku set " + setlist + " where id = " + rowid + "\n" utils.execute(sql, cursor) for rowid, fields_and_vals in newrows.items(): for bad_field in ('uid', 'undefined', 'estimated_units_remaining', 'boundindex', 'visibleindex', 'uniqueid'): if fields_and_vals.has_key(bad_field): fields_and_vals.pop(bad_field) fields = fields_and_vals.keys() values = fields_and_vals.values() field_list = ','.join(fields) value_list = ','.join(sql_representation(v) for v in values) sql = "insert into sku ("+field_list+") VALUES ("+value_list+")" utils.execute(sql, cursor) insert_ids[rowid] = utils.select("select LAST_INSERT_ID()", cursor, False)[0][0] cursor.close () wineprint.gen_fodt_and_pdf() return json.dumps(insert_ids)
def index(req = None): winelist = utils.select(''' select category, bin as binnum, name, round(estimated_units_remaining,2) est_count from winelist_inv where bin !=0 and category != 'House Cocktails' order by category, bin; ''' , label=True) outfile = tempfile.NamedTemporaryFile(delete=False) filename = outfile.name catname = None; for rec in winelist: if catname != rec['category']: catname = rec['category'] outfile.write(catname+':\n') outfile.write( str(rec['binnum']).ljust(5) + rec['name'][:10].encode('latin1', 'replace') + ' ' + str(rec['est_count']) + '\n' ) outfile.close() subprocess.call(['enscript', '--font=Courier-Bold@11/16', '-B', '-MEnv10', filename]) os.remove(filename) return json.dumps(None)
def extractCanvasSettings(d): """Split a dict in canvas settings and other items. Returns a tuple of two dicts: the first one contains the items that are canvas settings, the second one the rest. """ return utils.select(d, pf.refcfg['canvas']), utils.remove( d, pf.refcfg['canvas'])
def server_tip_share(serverpin): ret = utils.select('SELECT tip_share FROM hours WHERE person_id = %(serverpin)s and tip_share is not null order by id desc LIMIT 1' % locals()) if ret: ts = ret[0]['tip_share']; else: ts = 'null' return ts
def print_this_week_stubs(): for table_name in ('PAY_STUB', 'WEEKLY_PAY_STUB'): stub_keys = utils.select(''' select person_id, week_of from {table_name} where yearweek(week_of) = yearweek(now() - interval '1' week)'''. format(**locals()), label=False) for person_id, week_of in stub_keys: print_stubs(person_id, week_of, table_name)
def index(req, pin): results = utils.select( '''select count(*) as cnt from person where id = %s''', args=[pin] ) is_good = results[0]['cnt'] != 0 return json.dumps(is_good)
def print_2016_stubs(): for table_name in ('PAY_STUB', 'WEEKLY_PAY_STUB'): stub_keys = utils.select(''' select person_id, week_of from %(table_name)s where year(week_of) = 2016 '''%locals(), label=False ) for person_id, week_of in stub_keys: print_stubs(person_id, week_of, table_name)
def print_r_stubs(): for table_name in ('PAY_STUB',): stub_keys = utils.select(''' select person_id, week_of from %(table_name)s where last_name = 'Sinitean' '''%locals(), label=False ) for person_id, week_of in stub_keys: print_stubs(person_id, week_of, table_name)
def new_model_exists(scenario, mode): """ Check if a new model is deployed for a scenario """ query = """ select uuid from apis_modeldetail am2 where "version" = (select max(version) from apis_modeldetail am where "scenarioID_id" = '{}') and "scenarioID_id" = '{}' """ latest_model = select(query.format(scenario, scenario)) # if table is empty if latest_model is None: return False if mode == "auto": query = """ select scores->'model' as uuid from feed_autowarehouse where "scenarioID" = '{}' """ elif mode == "portfolio": query = """ select scores->'model' as uuid from feed_portfoliowarehouse where "scenarioID" = '{}' """ existing_model = select(query.format(scenario)) # if table is empty if existing_model is None: return False logging.info( "comparing {} with {}".format(latest_model, existing_model)) if latest_model[0] != existing_model[0]: return True return False
def print_recent(person_lastname, numweeks=6, table_name='PAY_STUB'): stub_keys = utils.select(''' select person_id, week_of from %(table_name)s where last_name = '%(person_lastname)s' and yearweek(week_of) >= yearweek(now() - interval '%(numweeks)s' week) '''%locals(), label=False ) for person_id, week_of in stub_keys: print_stubs(person_id, week_of, table_name)
def populate_staff_tabs(cfg): items = utils.select(''' select concat(first_name, ' ', last_name) as name from person ''') table_cat_results = [cat for cat in cfg['menu']['categories'] if cat['name'] == 'tables'] if len(table_cat_results) != 1: raise Exception('Problem finding tables category') table_category = table_cat_results[0] staff_subcat = {'name': 'staff_tabs', 'items': items} table_category['subcategories'].append(staff_subcat)
def index(req, pin): results = utils.select( """select count(*) as cnt from person where id = %s""" % pin ) is_good = results[0]["cnt"] != 0 return json.dumps(is_good)
def populate_wine_category(cfg): bev_category_results = [cat for cat in cfg['menu']['categories'] if cat['name'] == 'bev'] if len(bev_category_results) != 1: raise Exception('Problem loading bev category') bev_category = bev_category_results[0] winecats = utils.select('''select distinct category from active_wine''') for cat in winecats: cat = cat['category'] items = utils.select(''' select id, bin, qtprice as price, CONCAT('qt: ', bin," ", name ) as name from winelist where category = '%(cat)s' and active = true and qtprice is not null and qtprice != 0 and bin is not null union all select id, bin, listprice as price, CONCAT(bin," ", name ) as name from winelist where category = '%(cat)s' and active = true and listprice != 0 and listprice is not null and bin is not null order by bin ''' % locals()) bev_subcat = {'name': cat, 'items': items} bev_category['subcategories'].append(bev_subcat)
def load(): global RESULT sql = '''select username,age,tel,email from users;''' db_user_info, res = utils.select(sql) if res: for i in db_user_info: name = i[0] RESULT[name] = dict(zip(TITLE, i)) return RESULT, True else: err_msg = "There is no user exist, load failed." return err_msg, False
def index(req, menu_item_id): results = utils.select(''' SELECT frontprice from winelist where id = %(menu_item_id)s'''%locals() ) if len(results) == 1: retval = results[0] else: retval = None return json.dumps(retval)
def _get_date(self, doc, link): # return format:xxxx-xx-xx sdate = "#" if doc: sdate = select( doc.xpath( "//span[contains(text(),'发布日期')]/following-sibling::span/text()" )) res = DATE_PATTERN.match(link) if res and len(res.groups()) > 0: sdate = res.groups()[0] sdate = "-".join([sdate[:4], sdate[4:6], sdate[6:8]]) return sdate
def new_record(req, person_id, dat): utils.execute( '''insert into receipts_by_server (id, person_id, dat, cc1, cc2, cash1, cash2) values (null, "%(person_id)s", "%(dat)s", null, null, null, null); '''%locals() ) results = utils.select( '''select id from receipts_by_server where person_id = '%(person_id)s' and dat = '%(dat)s'; '''%locals() ) return json.dumps(results[0]['id'])
def editgroup(group): utils.drawline('EDIT GROUP') print '0. 设置位置' print '1. 前进' print '2. 撤退' sel = utils.select(2) if sel == 0: #TODO utils.printjson(group) printgroup(group) return 1 else: return sel
def get(req, filtered='yes'): cursor = utils.get_cursor() if filtered == 'yes': recs = utils.select("select * from winelist_inv where bin != '0'", cursor) else: recs = utils.select('''select * from winelist_inv''', cursor) cursor.close () class MyEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, datetime.date): return obj.isoformat() if isinstance(obj, decimal.Decimal): #Decimal type has no json encoding return str(obj) return json.JSONEncoder.default(self, obj) return json.dumps(recs, cls=MyEncoder)
def store_to_sql(): sql = '''select username,age,tel,email from users;''' db_user_info, res = utils.select(sql) if res: INIT_RESULT = {i[0]: dict(zip(TITLE, i)) for i in db_user_info} for k, v in RESULT.items(): if k not in INIT_RESULT: sql_new_user = '''insert into users(username, age, tel, email) values('{name}', {age}, '{tel}', '{email}');'''.format( **RESULT[k]) msg, res = utils.insert(sql_new_user) log.info(msg) if res else log.error(msg) log.debug(sql_new_user) else: print("username: {} already exist.".format(k))
def create(self): self.level = 1 self.race = utils.select(['human', 'elf', 'dwarf', 'orc', 'gnome']) self.alignment = utils.select(['lawful', 'chaotic']) self.horoscope = utils.select([ 'aquarius', 'pisces', 'aries', 'taurus', 'gemini', 'cancer', 'leo', 'virgo', 'libra', 'scorpio', 'sagittarius', 'capricorn' ]) stats = [1, 2, 3, 4] utils.shuffle(stats) self.strength = stats[0] self.dexterity = stats[1] self.intellect = stats[2] self.charisma = stats[3] affinities = [1, 2, 3, 4] utils.shuffle(affinities) self.strength_affinity = affinities[0] self.dexterity_affinity = affinities[1] self.intellect_affinity = affinities[2] self.charisma_affinity = affinities[3]
def build_graph(self): sql = """SELECT SID,left_UID,left_concept,relation_UID,relation,right_UID,right_concept FROM statements""" statements = utils.select(self.db_file, sql) for statement in statements: sid = statement[0] left_uid = statement[1] left_concept = statement[2] relation_uid = statement[3] relation = statement[4] right_uid = statement[5] right_concept = statement[6] if not self.concepts.has_key(left_uid): _FACTORY.new_node(left_uid, left_concept) if not self.concepts.has_key(relation_uid): _FACTORY.new_node(relation_uid, relation) if not self.concepts.has_key(right_uid): _FACTORY.new_node(right_uid, right_concept) _FACTORY.add_relation(left_uid, relation_uid, right_uid)
def class_entropy(self, results, features, y, name, name_index, is_single=False): #print("class", name) selector = utils.select(self.names, self.groups, [name_index], self.do_subselection) r = 0 for index in selector: feature = features[:, index] t = self.calc_MI(y, feature, NUM_BINS) #print("for feature with index", index, t) r += t results.append((r, name, name_index))
def __primal_dual(self): m = len(self.X) # number of points n = len(self.F) # number of subsets x = np.zeros(n, dtype=np.bool) y = np.zeros(m) c = [1 / len(f) for f in self.F] U = set() while len(U) < len(self.X): e = utils.select(self.X - U) f_i = [i for i in range(n) if e in self.F[i]] # find all set contained e min_delta = np.min([ c[i] - np.sum([y[yi] for yi in self.F[i]]) + y[e] for i in f_i ]) y[e] += min_delta # increase ye for i in f_i: if np.sum([y[yi] for yi in self.F[i] ]) >= c[i]: # >= rather than == for precision x[i] = 1 U = U.union(self.F[i]) self.C = [self.F[i] for i in range(n) if x[i] == 1]
def selectgroup(): utils.drawline('SELECT GROUP') grouplist = {} # eliminate redundancy for group in userinf.teamlist: if group['groupid'] not in grouplist: grouplist[group['groupid']] = group for groupid in grouplist: printgroup(grouplist[groupid]) print 'Select a group(0 to exit)' sel = utils.select(5) if sel == 0: print 'Back to main menu' return -1, None elif str(sel) not in grouplist: print 'Invalid group.' return -1, None sel_group = grouplist[str(sel)] sel2 = editgroup(sel_group) if sel2 == 1: return sel, sel_group else: return -1, None
def Combine_Regularity(regular_scale=4, df_se1_stat=None): df_se1_stat = pd.read_csv('Demo_stat_totals.csv', index_col=0) df_se1_reg = pd.read_csv('Demo_Se1_seq_feature_scale_' + str(regular_scale) + '.csv', index_col=0) from utils import str_to_list, select for col in df_se1_reg.columns: if col.startswith('reg_'): df_se1_reg[col] = df_se1_reg[col].apply(str_to_list) df_se1_reg = df_se1_reg[[ i for i in df_se1_reg.columns if i.startswith('reg_') or i == 'MASKED_STUDENT_ID' ]] for col in df_se1_reg.columns: if col.startswith('reg_'): for i in range(len(df_se1_reg[col][0])): df_se1_reg[col + '_' + str(i)] = df_se1_reg[col].apply( lambda x: select(x, i)) blacklist = [ 'reg_' + regular_scale + '_COURSE_ACCESS', 'reg_' + regular_scale + '_PAGE_ACCESS', 'reg_' + regular_scale + '_LOGIN_ATTEMPT', 'reg_' + regular_scale + '_SESSION_TIMEOUT', 'reg_' + regular_scale + '_LIB' ] df_se1_reg = df_se1_reg[[i for i in df_se1_reg if i not in blacklist]] df_se1_stat_reg = pd.merge(df_se1_stat, df_se1_reg, on='MASKED_STUDENT_ID', how='left').fillna(0) df_se1_stat_reg.to_csv('Demo_baseline_Reg.csv')
def fit_and_score(data, method_name, method_args, propagate_params, verbose, output_dir): args = method_args.copy() args.update(data['meta']) args.update(propagate_params) label_name = args['truth'] + '_labels' labels = args[label_name] weights = get_weights(data['dissim'], propagate_params['bandwidth']) propagate_fn = method_table[method_name]['function'] propagated = propagate_fn(data['pred'], weights, labels=labels, output_dir=output_dir, **args) propagated = select(propagated, 'id', data['truth_ids']) assert len(propagated['pred']) == len(data['target']) score = scoring_table[method_args['scoring']](propagated['pred'], data['target']) logger.info('%s: %d propagated samples with ground truth yielded: %.3f', str(propagate_params), len(propagated), score) cm = sklearn.metrics.confusion_matrix(data['target'], propagated['pred'], labels=sorted(labels.keys())) return score, propagate_params, cm
def Merge_Social_Homophily( STAT_Reg_path='Demo_baseline_Reg.csv', SoH_path='se1_weekly_node_embeddings_30_sec_2s.csv'): df_se1_stat_reg = pd.read_csv(STAT_Reg_path, index_col=0) df_se1_soho = pd.read_csv(SoH_path, index_col=0) df_se1_soho = df_se1_soho[[ i for i in df_se1_soho.columns if i != 'label_atRist' ]] from utils import str_to_list, select for col in df_se1_soho.columns: if col != 'MASKED_STUDENT_ID': df_se1_soho[col] = df_se1_soho[col].apply(str_to_list) for col in df_se1_soho.columns: if col.startswith('week_'): for i in range(64): df_se1_soho[col + '_' + str(i)] = df_se1_soho[col].apply( lambda x: select(x, i)) blacklist = [ 'week_1', 'week_2', 'week_3', 'week_4', 'week_5', 'week_6', 'week_7', 'week_8', 'week_9', 'week_10', 'week_11', 'week_12', 'week_13', 'week_14' ] df_se1_soho = df_se1_soho[[ i for i in df_se1_soho.columns if i not in blacklist ]] print(df_se1_soho.shape) df_se1_stat_reg_soho = pd.merge(df_se1_stat_reg, df_se1_soho, on='MASKED_STUDENT_ID', how='left').fillna(0) df_se1_stat_reg_soho.to_csv('Demo_STAT_Reg_SoH.csv')
ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH) if ckpt and ckpt.model_checkpoint_path: saver.restore(sess, ckpt.model_checkpoint_path) print('Restored model!') with sess.as_default(): # Tensorboard writer writer = tf.summary.FileWriter(hp.logdir, sess.graph) for i in range(hp.num_train_epochs): print(len(indexs)) idns = indexs indexs = idns[:int(len(idns) * 0.7)] test_indexs = idns[int(len(idns) * 0.7):] np.random.shuffle(indexs) test_input_id_ = select(input_ids, test_indexs) test_input_mask_ = select(input_masks, test_indexs) test_segment_id_ = select(segment_ids, test_indexs) test_label_id_ = select(label_ids, test_indexs) print(num_batchs - 1) for j in range(num_batchs - 1): # Get ids selected i1 = indexs[j * hp.batch_size:min((j + 1) * hp.batch_size, num_train_samples)] # Get features input_id_ = select(input_ids, i1) input_mask_ = select(input_masks, i1) segment_id_ = select(segment_ids, i1) label_id_ = select(label_ids, i1)
def questmenu(): utils.drawline('QUESTMENU') print '0. 刷新任务列表' print '1. 敲击任务' print '2. 返回' return utils.select(2)
def eval_accuracy(self, indexes): if len(indexes) == 0: return RANDOM_ACCURACY, RANDOM_ACCURACY selector = utils.select(self.names, self.groups, indexes, self.do_subselection) if USE_N_FOLD_CROSS_VALIDATION: features = self.cv[:, selector] left_out_features = self.left_out[:, selector] validation_score = 0 test_score = 0 # rs = ShuffleSplit(n_splits = NUM_VALIDATION_ITERATIONS, test_size = 0.33) rs = KFold( n_splits=NUM_VALIDATION_ITERATIONS) #, test_size = 0.33) scores = [] # use balanced weigths to account for class imbalance # (we're trying to optimize f1 score, not accuracy) clf = RandomForestClassifier(n_estimators=NUM_TREES, random_state=0, class_weight="balanced") for train_index, test_index in rs.split(features): clf.fit(features[train_index], self.cv_y[train_index]) # s1 = clf.score(features[test_index], self.cv_y[test_index]) # s2 = clf.score(left_out_features, self.left_out_y) hypothesis = clf.predict(features[test_index]) s1 = f1_score(self.cv_y[test_index], hypothesis, average="micro") hypothesis = clf.predict(left_out_features) s2 = f1_score(self.left_out_y, hypothesis, average="micro") scores.append("{:2.2f} ({:2.2f})".format(s1, s2)) validation_score += s1 test_score += s2 validation_score /= NUM_VALIDATION_ITERATIONS test_score /= NUM_VALIDATION_ITERATIONS # names = [self.groups[i] for i in indexes] # print(names) # print("validation/test:" , scores) else: # simply train and then evaluate features_train = self.train[:, selector] features_validation = self.validation[:, selector] scores = [] for i in range(NUM_TRIALS): # use balanced weigths to account for class imbalance # (we're trying to optimize f1 score, not accuracy) clf = RandomForestClassifier(n_estimators=NUM_TREES, random_state=i, class_weight="balanced") clf.fit(features_train, self.train_y) #validation_score = clf.score(features_validation, self.validation_y) hypothesis = clf.predict(features_validation) #c = (self.validation_y == hypothesis) f1 = f1_score(self.validation_y, hypothesis, average="micro") scores.append(f1) validation_score = np.mean(scores) # check also the results on the test set features_test = self.test[:, selector] hypothesis = clf.predict(features_test) f1 = f1_score(self.test_y, hypothesis, average="micro") test_score = f1 #print("validation={:.2f} test={:.2f}".format(validation_score, test_score)) return validation_score, test_score
ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH) if ckpt and ckpt.model_checkpoint_path: saver.restore(sess, ckpt.model_checkpoint_path) print('Restored model!') with sess.as_default(): # Tensorboard writer writer = tf.compat.v1.summary.FileWriter(hp.logdir, sess.graph) for i in range(hp.num_train_epochs): indexs = shuffle_one(arr) for j in range(num_batchs - 1): i1 = indexs[j * hp.batch_size:min((j + 1) * hp.batch_size, num_train_samples)] # Get features input_id_ = select(input_ids, i1) input_mask_ = select(input_masks, i1) segment_id_ = select(segment_ids, i1) label_id_ = select(label_ids, i1) # Feed dict fd = { MODEL.input_ids: input_id_, MODEL.input_masks: input_mask_, MODEL.segment_ids: segment_id_, MODEL.label_ids: label_id_ } # Optimizer sess.run(MODEL.optimizer, feed_dict=fd) # Tensorboard # Tf2这个部分会报错,暂时还没解决这个问题 # 而且tensorboard对我没有太大用,所以我直接注释掉了
def cardmenu(): utils.drawline('CARDMENU') print '0. 查看人物' print '1. 返回' return utils.select(1)
def get_behavior_text(self): # for pets return self.get_tag() + ', *' + self.given_name + '*. *' + self.given_name + '* ' + utils.select(self.behaviors)
def on_equip(self): if self.slot == 'pet': self.given_name = utils.select(self.possible_names)
def roll(self): # random rolls this template or items random props if self.enchanted: self.enchant = utils.select(possible_enchants)