def on_get(self, req, resp, project, ident): query = build_query(req.env, project) if 'version' in req.params: version = req.params['version'] else: raise falcon.HTTPMissingParam('version') if version == 'latest': version = query('latest') if 'family' in req.params: family = req.params['family'] else: family = 'C' if family == 'B': #DT compatible strings are quoted in the database ident = parse.quote(ident) symbol_definitions, symbol_references, symbol_doccomments = query( 'ident', version, ident, family) resp.body = json.dumps({ 'definitions': [sym.__dict__ for sym in symbol_definitions], 'references': [sym.__dict__ for sym in symbol_references], 'documentations': [sym.__dict__ for sym in symbol_doccomments] }) resp.status = falcon.HTTP_200
def eng_response(message_text, sender_id, recipient_id, comment_id): #message_text = normalization_redis.eng_word_correction(message_text) print("Inside Response Generator") query(update_query_normalization, (message_text, comment_id)) try: api_response = api_ai_query(message_text, comment_id) except: send_message(sender_id, comment_id, "There was an error with fetching \ the response from the AI") return Response(status=200) return api_response
def fb_msg_parse(data): for entry in data["entry"]: for messaging_event in entry["messaging"]: try: message_text = messaging_event["message"]["text"] except Exception as e: message_text = 'NEW' sender_id = messaging_event["sender"]["id"] recipient_id = messaging_event["recipient"]["id"] comment_id = messaging_event["message"]["mid"] timestamp = get_timestamp(messaging_event['timestamp']) query(insert_query,(0,0,0,timestamp,sender_id,comment_id,\ message_text,'','','')) logging.info("facebook message inserted in db") return sender_id, recipient_id, comment_id, message_text
def without_select(): layer = iface.activeLayer() q = (query(layer).where(qstring).where(qstring2).top(top)) results = q() out = results.next() assert isinstance(out, dict) print out
def search(): frase = request.form["palabra"] st = time.time() lista_result = query(frase) et = time.time() return render_template("result.html", lista=lista_result, tiempo=et - st)
def call_query(query, *args): cwd = os.getcwd() os.chdir(ELIXIR_DIR) ret = query(*args) os.chdir(cwd) return ret
def without_select(): layer = iface.activeLayer() q = query(layer).where(qstring).where(qstring2).top(top) results = q() out = results.next() assert isinstance(out, dict) print out
def api_ai_query(text, comment_id): logging.info("Api AI entered") request = ai.text_request() request.query = text response = json.loads(request.getresponse().read().decode('utf-8')) responseStatus = response['status']['code'] if (responseStatus == 200): response = response['result']['fulfillment']['speech'] query(update_query_apiai, (response, comment_id)) logging.info("api ai returning") return response else: response = "Sorry, I couldn't understand that question" query(update_query_apiai, (response, comment_id)) logging.info("api ai returning") return response
def on_get(self, req, resp, project, ident): query = build_query(req.env, project) if 'version' in req.params: version = req.params['version'] else: raise falcon.HTTPMissingParam('version') if version == 'latest': version = query('latest') symbol_definitions, symbol_references = query('ident', version, ident) resp.body = json.dumps({ 'definitions': [sym.__dict__ for sym in symbol_definitions], 'references': [sym.__dict__ for sym in symbol_references] }) resp.status = falcon.HTTP_200
def google_translate(user_text, comment_id): ''' this function will translate the received text using google translate ''' translator = Translator(service_urls=['translate.google.com.pk']) detected_lang = translator.detect(user_text).lang if (detected_lang.find('en') != -1): #passthrough for english trans_input = user_text query(update_query_translation, (trans_input, comment_id)) return trans_input else: mid_trans = translator.translate(user_text, src="hi", dest="ur").text trans_input = translator.translate(mid_trans, src="ur", dest="en").text query(update_query_translation, (trans_input, comment_id)) logging.info("TEXT Translated") return trans_input
def monthly(start_date, end_date): tables = ['AgeGroupGender', 'MonthlyViews', 'VidSharingService'] print "Ran on: ", now con = None try: con = mdb.connect(**config.mysql_params) cur = con.cursor() videos = getVideoList() while (start_date <= end_date): for table in tables: if table == 'AgeGroupGender' or table == 'MonthlyViews': if table == 'MonthlyViews': end = start_date else: end = start_date+relativedelta(months = +1)+relativedelta(days = -1) response, columns = query(table, "", start_date, end) insert_statement = table_options[table]['insert'] for row in response: cur.execute(insert_statement, getValues(table, "", start_date, columns, row)) con.commit() else: for video in videos: end = start_date+relativedelta(months = +1)+relativedelta(days = -1) response, columns = query(table, video, start_date, end) insert_statement = table_options[table]['insert'] for row in response: cur.execute(insert_statement, getValues(table, video, start_date, columns, row)) con.commit() start_date = start_date+relativedelta(months = +1) except mdb.Error, e: print "Error %d: %s" % (e.args[0],e.args[1]) sys.exit(1)
def without(): def checkassessment(feature): return int(feature['assessment']) <> 4315968 layer = iface.activeLayer() q = (query(layer).where(qstring).where(qstring2).where( checkassessment).top(100)) results = q() for f in results: print f['assessment'], f['postcode'], f['subdivided']
def api_ai_query(text, comment_id): logging.info("Api AI entered") api_endpoint = "https://api.dialogflow.com/v1/query/?v=20150910" logging.info("API AI QUERY CALLED") logging.info("This is the session") logging.info(session['session_id']) headers = {'Authorization' : 'Bearer 3a67ab4afb49424587183ae8b04bf88b', 'Content-Type' : 'application/json'} body = {'query' : text, 'lang' : 'en', 'sessionId' : session['session_id'] } body_json = json.dumps(body) apiai_result = requests.post(api_endpoint, \ headers=headers, data=body_json).json() logging.info(apiai_result) logging.info("Dict, %s", apiai_result['result']['fulfillment']['speech']) api_response = apiai_result['result']['fulfillment']['speech'] query(update_query_apiai, (api_response, comment_id)) return api_response
def with_select_customfunction(): def MyValue(feature): return "Hello World" layer = iface.activeLayer() q = query(layer).top(1).select(MyValue) results = q() out = results.next() print out assert isinstance(out, dict) assert "MyValue" in out.keys()
def with_select_customfunction(): def MyValue(feature): return "Hello World" layer = iface.activeLayer() q = (query(layer).top(1).select(MyValue)) results = q() out = results.next() print out assert isinstance(out, dict) assert "MyValue" in out.keys()
def without(): def checkassessment(feature): return int(feature['assessment']) <> 4315968 layer = iface.activeLayer() q = (query(layer).where(qstring) .where(qstring2) .where(checkassessment) .top(100)) results = q() for f in results: print f['assessment'], f['postcode'], f['subdivided']
def add_distance(): roofq = query("SELECT voorval_nr, latitude, longitude FROM Straatroven", "sqlite:///Opendata.db") db = create_engine('sqlite:///Opendata.db') db.echo = True metadata = MetaData(db) db.connect() Straatroof = Table('Straatroven', metadata, Column('voorval_nr', String(20), primary_key=True), Column('distance_pol', Float)) u = Straatroof.update() distancearray = [] for r in roofq: voorval_nr = r['voorval_nr'] print(voorval_nr) rpoint = (r['latitude'], r['longitude']) distance = None policeq = query("SELECT latitude, longitude FROM police_stations", "sqlite:///Opendata.db") for p in policeq: ppoint = (p['latitude'], p['longitude']) if distance is None: distance = vincenty(rpoint, ppoint).meters elif distance > vincenty(rpoint, ppoint).meters: distance = vincenty(rpoint, ppoint).meters if distance > 10000: distance = None distancearray.append([voorval_nr, distance]) print(distancearray) for i in distancearray: u2 = u.where(Straatroof.c.voorval_nr == i[0]).values(distance_pol=i[1]) u2.execute()
def main(): print centered('Welcome to The Book of Mormon Reader') print centered('Created by Chris Hagmann') time.sleep(.3) print '\n' * 3 book, chapter = '','' while book not in Books: if book in Chapter_list: parsed = book.split() chapter = int(parsed.pop()) book = ' '.join(parsed) else: book = query('In which book of The Book of Mormon would you like to read? ', response=str) book = book.replace('First','1').replace('Second','2').replace('Third','3').replace('Fourth','4') if bom[book] == 1: chapter = 1 else: while reference(book, chapter) not in Chapter_list: chapter = query('Which chapter in {} would you like to read [Last chapter: {}]? '.format(book,bom[book]), response=int) numofchapters = query('How many chapters do you want to read? ', default=1, response=int) current_chapter = reference(book, chapter) start_flag = reference(book, chapter) + '\n' end_flag = Chapter_list[Chapter_list.index(current_chapter) + numofchapters] + '\n' filename = ''.join(str(c) for c in book + str(chapter) + '.txt' if str(c) != ' ') with open('BoM.txt','r') as f, open(filename,'w') as g: line = '' while next(f) != start_flag: pass while line != end_flag: line = next(f) if line != '\n': if len(line) == 1: g.write(line) elif not any(c.isdigit() for c in line.split()[-1]): g.write(line) next speed = query('How fast do you want to read in words per minute? ', default=450, response=int) if speed < 400: weigthed = False else: weighted = query('Do you want to try the weighted words feature', default='Y', response=boolean) columnmode = query('Do you want to try the column mode', default='Y', response=boolean) if columnmode: columnsize = query('What size column? ', default=50, response=int) speed_reader(filename, speed, weighted, columnmode, columnsize)
def post(self): emaila=self.get_argument('email') password=self.get_argument('pass1') condition={'emaila':emaila} getpasswd=query(condition) returnpwd=getpasswd.query_one() if password != returnpwd['password']: self.redirect("/") else: self.request.cookies() return True
def with_select_mapview(): layer = iface.activeLayer() q = (query(layer).restict_to(Query.MapView()).top(top).select( 'assessment', 'address', 'lot', geom=lambda f: f.geometry(), mylot=lambda f: int(f['house_numb']) * 100)) results = q() out = results.next() assert isinstance(out, dict) print out
def on_get(self, req, resp, project, ident): query = build_query(req.env, project) if 'version' in req.params: version = req.params['version'] else: raise falcon.HTTPMissingParam('version') if version == 'latest': version = query('latest') symbol_definitions, symbol_references = query('ident', version, ident) if len(symbol_definitions) or len(symbol_references): resp.body = json.dumps({ 'definitions': [sym.__dict__ for sym in symbol_definitions], 'references': [sym.__dict__ for sym in symbol_references] }) resp.status = falcon.HTTP_200 else: raise falcon.HTTPNotFound({ 'title': 'Requested identifier not found in {} project'.format(project) })
def with_select_mapview(): layer = iface.activeLayer() q = ( query(layer) .restict_to(Query.MapView()) .top(top) .select("assessment", "address", "lot", geom=lambda f: f.geometry(), mylot=lambda f: int(f["house_numb"]) * 100) ) results = q() out = results.next() assert isinstance(out, dict) print out
def with_select_mapview(): def checkassessment(feature): return int(feature['assessment']) <> 4315968 layer = iface.activeLayer() q = (query(layer).restict_to(Query.MapView()).top(10).select( 'assessment', 'address', 'lot', geom=lambda f: f.geometry(), mylot=lambda f: int(f['house_numb']) * 100)) results = q() for f in results: print f
def with_select(): def checkassessment(feature): return int(feature['assessment']) <> 4315968 layer = iface.activeLayer() q = (query(layer).where(qstring).where(qstring2).where( checkassessment).top(10).select( 'assessment', 'address', 'lot', geom=lambda f: f.geometry(), mylot=lambda f: int(f['house_numb']) * 100)) results = q() for f in results: print f
def main(): DEFAULT = query('Use default values for Indices? ', default='y', response=boolean) S = query('Number of stores: ', **q_opts(3, int, DEFAULT)) V = query('Number of vendors: ', **q_opts(3, int, DEFAULT)) P = query('Number of basic products: ', **q_opts(3, int, DEFAULT)) Q = query('Number of fashion products: ', **q_opts(3, int, DEFAULT)) T = query('Number of time periods: ', **q_opts(7, int, DEFAULT)) DEFAULT = query('Use default values for parameters? ', default='y', response=boolean) print rd = instance_generator(S, V, P, Q, T, DEFAULT=DEFAULT) if rd: print "Instance Created!" else: print "Failed to create instance!" return rd
def with_select_mapview(): def checkassessment(feature): return int(feature['assessment']) <> 4315968 layer = iface.activeLayer() q = (query(layer).restict_to(Query.MapView()) .top(10) .select('assessment', 'address', 'lot', geom = lambda f: f.geometry(), mylot = lambda f: int(f['house_numb']) * 100) ) results = q() for f in results: print f
def upload_file(): if request.method == 'POST': # Check to see if the post request has the file part if 'file' not in request.files: flash('No file part') return redirect(request.url) file = request.files['file'] # If the user does not select a file if file.filename == '': flash('No selected file') return redirect(request.url) # If everything looks good, proceed if file and allowed_file(file.filename): filename = secure_filename(file.filename) filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename) file.save(filepath) predicted_class = predict_class(filepath) # List of classes not used in this app bad_list = ['straight', 'unsure','braids','dreadlocks','nonhair', 'short'] if predicted_class in bad_list: return render_template('complete.html', predicted_class = predicted_class) # Query the product database, return products and amazon URLS for products prods, urls = query(predicted_class) shampoo,conditioner,leavein,gel,deep,protein,cream,serum,clarify = prods ushampoo,uconditioner,uleavein,ugel,udeep,uprotein,ucream,userum,uclarify = urls # Passes hair class, products, and URLS to the second HTML page return render_template('complete.html', predicted_class = predicted_class, shampoo = shampoo, conditioner = conditioner, leavein = leavein, gel = gel, deep = deep, protein = protein, cream = cream, serum = serum, clarify = clarify, ushampoo = ushampoo, uconditioner = uconditioner, uleavein = uleavein,ugel = ugel, udeep = udeep, ucream = ucream, userum = userum, uprotein = uprotein, uclarify = uclarify) return redirect(url_for('main')) return render_template('main.html')
def with_select(): def checkassessment(feature): if feature['assessment'] is None: return False return int(feature['assessment']) == 4315968 layer = iface.activeLayer() q = (query(layer).where("postcode = 6164").where("subdivided = 'Y'").where( checkassessment).top(top).select( 'assessment', 'address', 'lot', geom=lambda f: f.geometry(), mylot=lambda f: int(f['house_numb']) * 100)) results = q() out = results.next() assert isinstance(out, dict) print out
def with_select(): def checkassessment(feature): return int(feature['assessment']) <> 4315968 layer = iface.activeLayer() q = (query(layer).where(qstring) .where(qstring2) .where(checkassessment) .top(10) .select('assessment', 'address', 'lot', geom = lambda f: f.geometry(), mylot = lambda f: int(f['house_numb']) * 100) ) results = q() for f in results: print f
def with_select(): def checkassessment(feature): if feature["assessment"] is None: return False return int(feature["assessment"]) == 4315968 layer = iface.activeLayer() q = ( query(layer) .where("postcode = 6164") .where("subdivided = 'Y'") .where(checkassessment) .top(top) .select("assessment", "address", "lot", geom=lambda f: f.geometry(), mylot=lambda f: int(f["house_numb"]) * 100) ) results = q() out = results.next() assert isinstance(out, dict) print out
def parse_xl(file_path, table_name, primary_key=None): #f = urllib2.urlopen(file_name) file_name = file_path.split('/')[-1] #file_suffix = file_path.split('/')[-1] #if file_suffix.split(".")[1] == "xls": # file_name += "x" f = urllib.urlretrieve(file_path, file_name) wb = load_workbook(file_name, read_only=True) # local files #wb = load_workbook(file_name, read_only= True) sheets = wb.get_sheet_names() print(sheets) queries = [] ws = None types = {} for sheet in sheets: col_names = [] sheet_data = [] ws = wb[sheet] i = 0 for row in ws.rows: if i == 0: col_names = [cell.value for cell in row] i += 1 elif i == 1: for j in range(len(row)): if cell.value.isdigit(): types[col_names[j]] = 'integer' else: types[col_names[j]] = 'text' i += 1 else: row_data = [cell.value for cell in row] sheet_data.append(row_data) q = query(sheet, None, col_names, sheet_data, types) queries.append(q) f.close() return queries
def parse_csv(file_path, table_name, primary_key=None): # for remote files f = urllib2.urlopen(file_path) # for local files #f = open(filename) data = [] types = {} col_names = [] try: i = 0 col_names = [] reader = csv.reader(f.read().splitlines()) for row in reader: if i == 0: col_names = row i += 1 print(col_names) elif i == 1: try: for j in range(len(row)): if row[j].isdigit(): types[col_names[j]] = 'integer' else: types[col_names[j]] = 'text' data.append(row) assert (len(row) == len(col_names)) except: print "Data does not have equal number of column as column names" i += 1 else: data.append(row) except Exception as e: print e #print(col_names) f.close() q = query(table_name, primary_key, col_names, data, types) return q
def postsearch(): search = request.forms.get("search") results = query(search) Nextstep=check() #Activate Spellchecking functionality: #Nextstep will return a value whether it is 0 or 1. #If it's 0, that means the input spelling is correct and also the word_id is in database #If it's 1, that means we have already modify the input word and search the modify_id in the database, and return the new result if Nextstep==0: results.append(search) output = template('format_data', rows=results) return output if Nextstep==1: nword=get() #If for both words the user input and the modify word that we fixed don't have word_id in database #A "special" web page will be given to indicate the result is empty. if not results: f = open("no.html") html = "".join(f.readlines()) return html #otherwise, we will return the result based on the fixed word. results.append(nword) output = template('format_data_after_spellCheck', rows=results) return output
def get_stocks_data(stocks_code, date): q = query( valuation.code, # 股票代码 # 税息前利润(EBIT) = 营业利润 income.operating_profit, # 营业利润(元) 独自代表企业业务的盈利能力 EBIT # 净流动资本 + 净固定资产 = 资产总计 - 商誉 - 无形资产 # 投资回报率 = EBIT / (净流动资本 + 净固定资产) balance.total_assets, # 资产总计(元) balance.good_will, # 商誉(元) balance.intangible_assets, # 无形资产(元) # 企业价值 = 总市值 + 负债合计 # 收益率 = EBIT / 企业价值 valuation.market_cap, # 总市值(亿元) balance.total_liability, # 负债合计(元) ).filter( income.net_profit > 0, # valuation.market_cap > 50, valuation.code.in_(stocks_code)) data = get_fundamentals(q, date=date).fillna(value=0).set_index('code') return data
def on_click(self): getStart = self.lineEdit_start.text() getEnd = self.lineEdit_end.text() getDate = self.dateTimeEdit_start.date().getDate() finalDate = str(getDate[0]) finalDate += '-' + str(getDate[1]).zfill(2) finalDate += '-' + str(getDate[2]).zfill(2) if isStationExist() == True: station = eval(read()) if getStart != "" and getEnd != "": if getStart in station and getEnd in station: start = station[getStart] end = station[getEnd] data = query(start, end, finalDate) if len(data) != 0: self.displayTable(len(data), 16, data) else: self.messageDialog('警告', '此阶段没有车辆运行!') else: self.messageDialog('警告', '输入的站名不存在') else: self.messageDialog('警告', '请填写车站名称!') else: self.messageDialog('警告', '未下载车站查询文件!')
def without(): layer = iface.activeLayer() q = (query(layer).where(qstring).where(qstring2).top(top)) results = q() out = results.next()
def start_func(): mail.select("Inbox") # connect to inbox. result, data = mail.search(None,'(UNSEEN)') ids = data[0] # data is a list. id_list = ids.split() # ids is a space separated string i=0; for fold in range(1,num_folders): path, dirs, files = os.walk('class_new'+str(fold)).next() count=0 for i in files: if 'inbox' not in i.lower(): count+=1 nfiles=count/2 #nfiles = len(os.listdir('class_new'+str(fold)))/2 #print nfiles mail_header[folders[fold]]=[] for i in range(0,nfiles): filename = str(folders[fold])+"_header"+str(i+1) try: f=open('class_new'+str(fold)+'/'+filename,'r').readlines() data_body = [] for line in f: data_body.append(line) mail_header[folders[fold]].append(data_body) except: pass mail_header["Inbox"]=[] i=0; for email_id in id_list: i=i+1 #mail.store(email_id, '+FLAGS', '\\UnSeen') #to tag the read mail as seen.. result_uid,data_uid = mail.fetch(email_id, "(UID)") try: msg_uid = parse_uid(data_uid[0]) except: pass result_mail,data_mail = mail.fetch(email_id, "(RFC822)"); raw_email = data_mail[0][1] # here's the body, which is raw text of the whole email email_message = email.message_from_string(raw_email) receiver = email_message['To'] subject = email_message['Subject'] sender = email.utils.parseaddr(email_message['From']) payload=email_message.get_payload() body = extract_body(payload) if email_message.get_content_type() == "multipart/mixed": continue elif email_message.get_content_type() == "text/html": body = nltk.clean_html(body) indx1 = body.find("html") indx2 = body.find("<p>") if indx1!=-1 and indx2!=-1: if indx1<indx2: body = body[ :indx1] else: body = body[ :indx2] elif indx1!=-1: body = body[ :indx1] elif indx2!=-1: body = body[ :indx2] file1 = "Inbox"+str(i); #var="Receiver: "+str(receiver) +"\nSender: "+str(sender)+"\nSubject:"+str(subject)+"\nBody: "+str(body); info = str(sender)+" "+str(subject) try: mail_header["Inbox"].append(info) except: pass f2 = open('class_new0/'+file1,'w') f2.write(body) f2.close() file_send = open("check",'w') file_send.write(str(info) +"\n"+ str(body)) file_send.close() return_index = query(open("check").read()) path, dirs, files = os.walk('class_new'+str(return_index)).next() count=0 for j in files: if 'inbox' not in j.lower(): count+=1 nfiles=count/2+1 if return_index!=0: shutil.move("class_new0/file1","class_new"+str(return_index)+"/"+folders[return_index]+nfiles); file_send = open("class_new"+str(return_index)+"/"+folders[return_index]+"_header"+nfiles,'w') file_send.write(str(info)) file_send.close() mail_header["Inbox"].pop() file_send = open("class_"+str(return_index)+"/"+"file"+nfiles,'w') file_send.write(str(info)+"\n"+ str(body)) file_send.close() result = mail.uid('COPY', msg_uid, folders[return_index]) if result[0] == 'OK': mov_del, data_del = mail.uid('STORE', msg_uid , '+FLAGS', '(\Deleted)') mail.expunge()
def withbindex(): layer = iface.activeLayer() q = query(layer).where(qstring).top(top).with_index(bindex) results = q().next()
for dataset in listaDataSet: queryEliminaCollection(db) data = utility.caricaDatiDaJson("../../../dataset_lokomat/" + dataset) #Inserisco dataSet for i in data: queryInserisciDati(db,i['id'],i['name'],i['width'],i['height'],i['l_shank'],i['l_thigh'],i['lokomat_shank'],i['lokomat_thigh'] ,i['lokomat_recorded'],i['version'],i['legtype'],i['lwalk_training_duration'],i['lwalk_distance'] ,i['step_datas']) #Eseguo query contatore = 0 for query in listaQuery: #Stessa query 31 volte tempiOtherQuery = [] for _ in range(0, 31): tempo = time.time() query(db) tempoFinale = time.time() - tempo if (_ == 0): listaDatiPrimaQuery.append(tempoFinale) print str("PRIMA QUERY ") + str(tempoFinale) + str("\n") else: tempiOtherQuery.append(tempoFinale) print str(tempoFinale) listaTempiOtherQuery.append(tempiOtherQuery) contatore = contatore + 1 #Grafici for __ in range(0, 3): i = __ numDataset = 0
def instance_generator(S, V, P, Q, T, AD=None, DEFAULT=True, seed=None): I, J = 6, 6 hpt = 8 # hours per time period t tpy = 28 # time period per year if seed is None: seed = query('seed value: ', **q_opts(-1, float, DEFAULT)) if seed < 0: seed = round(1000 * random.random(), 2) random.seed(seed) # ----------------------------------------------------------------------------- # SET NUMBER OF EVERYTHING # ----------------------------------------------------------------------------- rd = Struct() rd.STORES = list("s" + str(s + 1) for s in xrange(S)) rd.PRODUCTS = list("p" + str(p + 1) for p in xrange(P)) rd.FASHION = list("q" + str(q + 1) for q in xrange(Q)) items = rd.PRODUCTS + rd.FASHION rd.PUTAWAY = list("i" + str(i + 1) for i in xrange(I)) rd.PICKING = list("j" + str(j + 1) for j in xrange(J)) tech = rd.PUTAWAY + rd.PICKING rd.TIMES = list("" + str(t + 1) for t in xrange(T)) rd.TIMES = range(1, T + 1) rd.T_minus_One = dict(zip(rd.TIMES[1:] + rd.TIMES[0:-1], rd.TIMES)) Vq = int(Q * V / (P + Q)) Vp = V - Vq rd.VENDORS_Q = list("vq" + str(v + 1) for v in xrange(Vq)) rd.VENDORS_P = list("vp" + str(v + 1) for v in xrange(Vp)) rd.VENDORS = rd.VENDORS_P + rd.VENDORS_Q # ----------------------------------------------------------------------------- # GENERATE MODEL PARAMETERS # ----------------------------------------------------------------------------- # Generate fashion period window rd.L_s = {s: 0 for s in rd.STORES} maxL = max(rd.L_s.values()) rd.pt = 1 tb_range = range(1, T + 1) try: for i in xrange(8): tb_default = tb_range[-i] except: pass rd.tb = query('fashion period start: ', **q_opts(tb_default, tb_range, int, DEFAULT, False)) te_range = range(rd.tb, T - maxL - rd.pt + 1) rd.te = query('fashion period end: ', **q_opts(te_range[-1], te_range, int, DEFAULT, False)) rd.ty = rd.te + rd.pt + maxL P_ratio = P / Vp Q_ratio = Q / Vq Omega = {} v_to_p = [] if P_ratio > 0: PRODUCTS = list(rd.PRODUCTS) for v in rd.VENDORS_P: p_from_v = [PRODUCTS.pop(0) for _ in xrange(P_ratio)] v_to_p += [(v, p) for p in p_from_v] else: v_to_p += [(v, p) for p in PRODUCTS] else: v_to_p += zip(rd.VENDORS_P, rd.PRODUCTS) if Q_ratio > 0: FASHION = list(rd.FASHION) for v in rd.VENDORS_Q: q_from_v = [FASHION.pop(0) for _ in xrange(Q_ratio)] v_to_p += [(v, q) for q in q_from_v] else: v_to_p += [(v, q) for q in FASHION] else: v_to_p += zip(rd.VENDORS_Q, rd.FASHION) vqq = list(itertools.product(rd.VENDORS_Q, rd.FASHION)) vpp = list(itertools.product(rd.VENDORS_P, rd.PRODUCTS)) Omega = {(v, p): 1 if (v, p) in v_to_p else 0 for v, p in chain(vqq, vpp)} rd.Omega_q = [(v, q) for v in rd.VENDORS_Q for q in rd.FASHION if Omega[v, q]] rd.Omega_p = [(v, p) for v in rd.VENDORS_P for p in rd.PRODUCTS if Omega[v, p]] #Generate labor costs for full- and part-time employers ft_hourly = query('full time labor costs: ', **q_opts(40, [25], int, DEFAULT)) pt_hourly = 30 if ft_hourly == 40 else 20 rd.C_alpha = ft_hourly * hpt * T rd.C_beta = pt_hourly * hpt # Generate various sensitivity parameters rd.gamma = query('gamma: ', **q_opts(.5, [.5, 1, 1.5, 2], float, DEFAULT)) rd.phi_put = query('phi_1: ', **q_opts(1, [.75, 1], float, DEFAULT)) rd.phi_pick = rd.phi_put # Generate volume of the truck rd.script_Q = 15000 # Generate product characteristics (Volume and Weight) rd.V_p = {p: round(random.uniform(.01, 1.0), 2) for p in rd.PRODUCTS} rd.V_q = {q: round(random.uniform(.01, 1.0), 2) for q in rd.FASHION} rd.W_p = {p: round(random.uniform(0.1, .99), 2) for p in rd.PRODUCTS} rd.W_q = {q: round(random.uniform(0.1, .99), 2) for q in rd.FASHION} # Generate cost of various technologies putaway_tech_choices = [(600, 0), (1200, 5945), (2400, 19817), (3600, 26849), (4800, 57534), (6000, 76712)] picking_tech_choices = [(100, 639), (200, 3388), (300, 7671), (400, 11506), (500, 17900), (1000, 51141)] rd.MHE = dict(zip(rd.PUTAWAY, [9, 27, 37, 87, 0, 0])) temp = dict(zip(tech, putaway_tech_choices + picking_tech_choices)) Ct = {t: int((temp[t][1] / tpy) * T) for t in tech} rd.C_put = {i: Ct[i] for i in rd.PUTAWAY} rd.C_pick = {j: Ct[j] for j in rd.PICKING} Lambda = {t: temp[t][0] * hpt for t in tech} rd.lambda_put = {i: Lambda[i] for i in rd.PUTAWAY} rd.lambda_pick = {j: Lambda[j] for j in rd.PICKING} spt = itertools.product(rd.STORES, rd.PRODUCTS, rd.TIMES) sq = itertools.product(rd.STORES, rd.FASHION) Base_Load = 4000. / (P + Q) if AD is None else AD random_load = Base_Load, .4 * Base_Load daily_load = lambda: int( max([0, round(random.normalvariate(*random_load), 0)])) rd.Demand = {(s, p, t): daily_load() for s, p, t in spt} rd.X_osq = {(s, q): daily_load() * T for s, q in sq} rd.X_ivq = {(v, q): sum([rd.X_osq[s, q] for s in rd.STORES]) * Omega[v, q] for v, q in vqq} rd.BigM = sum(rd.Demand.itervalues()) + sum(rd.X_osq.itervalues()) rd.M_MHE = 87 * (500 + 500) # Max MHE * (UPPER BOUNDS OF ALPHA AND BETA) #------------------------------------------------------------------- # GENERATE TRANSPORTATION COSTS #------------------------------------------------------------------- TransportDataA = [ 25, 50, 75, 100, 150, 200, 250, 300, 350, 400, 500, 600, 700, 800, 900, 1000, 1100 ] TransportDataB = [ 40.42, 45.59, 52.27, 66.13, 75.28, 85.67, 97.5, 110.96, 126.26, 143.71, 186.09, 217.05, 253.17, 297.11, 344.43, 401.77, 468.61 ] TransportDataC = [ 427.6, 441.14, 482.83, 492.45, 521.43, 541.39, 570.02, 598.03, 622.3, 659.96, 727.61, 802.19, 884.41, 975.07, 1075.01, 1185.2, 1306.68 ] TransportDataD = [ 0.022, 0.027, 0.029, 0.034, 0.036, 0.041, 0.042, 0.045, 0.047, 0.05, 0.055, 0.061, 0.067, 0.074, 0.081, 0.09, 0.099 ] DistanceInMiles = TransportDataA FixedCost = dict(zip(TransportDataA, TransportDataB)) Variable_Fixed = dict(zip(TransportDataA, TransportDataC)) Variable = dict(zip(TransportDataA, TransportDataD)) '''Randomly generate distances''' dist_VW, dist_WS, Cf, Cv, Cvf = {}, {}, {}, {}, {} for v in rd.VENDORS: dist_VW[v] = random.choice([x for x in DistanceInMiles if x >= 250]) Cf[v] = FixedCost[dist_VW[v]] Cv[v] = Variable[dist_VW[v]] Cvf[v] = Variable_Fixed[dist_VW[v]] for s in rd.STORES: rnd_num = random.random() if rnd_num <= .4: dist_WS[s] = random.choice( [x for x in DistanceInMiles if x <= 300]) elif .4 < rnd_num <= .7: dist_WS[s] = random.choice( [x for x in DistanceInMiles if 300 <= x <= 800]) else: dist_WS[s] = random.choice(DistanceInMiles) Cf[s] = FixedCost[dist_WS[s]] Cv[s] = Variable[dist_WS[s]] Cvf[s] = Variable_Fixed[dist_WS[s]] rd.C_vs = {s: Cv[s] for s in rd.STORES} rd.C_fv = {v: Cvf[v] + Cf[v] for v in rd.VENDORS} rd.C_fs = {s: Cvf[s] + Cf[s] for s in rd.STORES} rd.C_vv = {v: Cv[v] for v in rd.VENDORS} #------------------------------------------------------------------- # CREATE BHANU'S HEURISTIC FILES #------------------------------------------------------------------- warehouses = ['w1'] WarehouseVolume = {w: 100000000 for w in warehouses} K = {s: 10000000 for s in rd.STORES} key = 'v{}q{}s{}p{}t{}'.format(V, Q, S, P, T) filename = 'WITPdataSet_' + key + '.txt' with open(filename, 'wb') as g: Xpress_data(g, 'WarehouseVolume', WarehouseVolume, warehouses) Xpress_data(g, 'StoreVolume', K, rd.STORES) Xpress_data(g, 'StoreDemand', rd.Demand, [rd.STORES, rd.PRODUCTS, rd.TIMES]) Xpress_data(g, 'FixedCostVendorToWarehouse', Cf, [rd.VENDORS_P, warehouses]) Xpress_data(g, 'FixedCostWarehouseToStore', Cf, [warehouses, rd.STORES]) Xpress_data(g, 'Variable_FixedCostVendorToWarehouse', Cvf, [rd.VENDORS_P, warehouses]) Xpress_data(g, 'VariableCostVendorToWarehouse', Cv, [rd.VENDORS_P, warehouses]) Xpress_data(g, 'Variable_FixedCostWarehouseToStore', Cvf, [warehouses, rd.STORES]) Xpress_data(g, 'VariableCostWarehouseToStore', Cv, [warehouses, rd.STORES]) Xpress_data(g, 'ProductVolume', rd.V_p, rd.PRODUCTS) Xpress_data(g, 'ProductWeight', rd.W_p, rd.PRODUCTS) Xpress_data(g, 'MapVendorToProduct', Omega, [rd.VENDORS_P, rd.PRODUCTS]) Xpress_data(g, 'LeadTimeWarehouseToStores', rd.L_s, [warehouses, rd.STORES]) Xpress_data(g, 'StoreDemandForFashionProducts', rd.X_osq, [rd.STORES, rd.FASHION]) Xpress_data(g, 'FixedCostFashionVendorToWarehouse', Cf, [rd.VENDORS_Q, warehouses]) Xpress_data(g, 'Variable_FixedCostFashionVendorToWarehouse', Cvf, [rd.VENDORS_Q, warehouses]) Xpress_data(g, 'VariableCostFashionVendorToWarehouse', Cv, [rd.VENDORS_Q, warehouses]) Xpress_data(g, 'FashionProductVolume', rd.V_q, rd.FASHION) Xpress_data(g, 'FashionProductWeight', rd.W_q, rd.FASHION) Xpress_data(g, 'MapFashionVendorToProduct', Omega, [rd.VENDORS_Q, rd.FASHION]) print "Created {FILE}".format(FILE=filename) SED = {} SED['FILENAMEVAL'] = key SED['FULLTIMECOSTVAL'] = rd.C_alpha / T SED['PARTTIMECOSTVAL'] = rd.C_beta SED['GAMMAVAL'] = rd.gamma SED['PHIVAL'] = rd.phi_put SED['BEGINTIMEVAL'] = rd.tb - 1 SED['ENDTIMEVAL'] = rd.te - 1 SED['DUETIMEVAL'] = rd.ty - 1 SED['PROCESSINGTIMEVAL'] = rd.pt LARGE_FILE = sum(1 for _ in open(filename)) > 100000 SED['NOTOTALITERATIONSVAL'] = 200 if LARGE_FILE else 1000 SED['NOOFITERATIONSVAL'] = 200 if LARGE_FILE else 1000 SED['NUMSWAPITERATIONSVAL'] = 3 if LARGE_FILE else 5 SED['STOPITERVAL'] = 3 if LARGE_FILE else 5 SED = {k: str(v) for k, v in SED.items()} with open('BhanuCode/WITP-TPH/TEMPLATE.cs', 'r') as f: lines = f.readlines() for i in range(100): lines[i] = multiple_replace(lines[i], SED) with open('BhanuCode/WITP-TPH/TPH.cs', 'w') as f: for line in lines: f.write(line) _ = bash_command('xbuild BhanuCode/WITP-TPH.sln') _ = bash_command('rm -f WITP-TPH.exe') _ = bash_command('cp BhanuCode/WITP-TPH/bin/Debug/WITP-TPH.exe ./') _ = bash_command('cp BhanuCode/WITP-TPH/TPH.cs ./') print "Compiled WITP-TPH.exe" #------------------------------------------------------------------- # CREATE PYOMO FILES #------------------------------------------------------------------- with open('Pickled_Data', 'wb') as f: pickle.dump(rd, f, protocol=-1) print "Pickled Data" return rd if os.path.isfile('WITP-TPH.exe') else False
def get_file(path, version): if version == 'latest': version = query('latest') return query('file', version, path)
def instance_generator(S, V, P, Q, T, AD=None, DEFAULT=True, seed=None): I, J = 6, 6 hpt = 8 # hours per time period t tpy = 28 # time period per year if seed is None: seed = query('seed value: ', **q_opts(-1, float, DEFAULT)) if seed < 0: seed = round(1000 * random.random(), 2) random.seed(seed) # ----------------------------------------------------------------------------- # SET NUMBER OF EVERYTHING # ----------------------------------------------------------------------------- rd = Struct() rd.STORES = list("s" + str(s + 1) for s in xrange(S)) rd.PRODUCTS = list("p" + str(p + 1) for p in xrange(P)) rd.FASHION = list("q" + str(q + 1) for q in xrange(Q)) items = rd.PRODUCTS + rd.FASHION rd.PUTAWAY = list("i" + str(i + 1) for i in xrange(I)) rd.PICKING = list("j" + str(j + 1) for j in xrange(J)) tech = rd.PUTAWAY + rd.PICKING rd.TIMES = list("" + str(t + 1) for t in xrange(T)) rd.TIMES = range(1, T + 1) rd.T_minus_One = dict(zip(rd.TIMES[1:] + rd.TIMES[0:-1], rd.TIMES)) Vq = int(Q * V / (P + Q)) Vp = V - Vq rd.VENDORS_Q = list("vq" + str(v + 1) for v in xrange(Vq)) rd.VENDORS_P = list("vp" + str(v + 1) for v in xrange(Vp)) rd.VENDORS = rd.VENDORS_P + rd.VENDORS_Q # ----------------------------------------------------------------------------- # GENERATE MODEL PARAMETERS # ----------------------------------------------------------------------------- # Generate fashion period window rd.L_s = {s: 0 for s in rd.STORES} maxL = max(rd.L_s.values()) rd.pt = 1 tb_range = range(1, T + 1) try: for i in xrange(8): tb_default = tb_range[-i] except: pass rd.tb = query('fashion period start: ', **q_opts(tb_default, tb_range, int, DEFAULT, False)) te_range = range(rd.tb, T - maxL - rd.pt + 1) rd.te = query('fashion period end: ', **q_opts(te_range[-1], te_range, int, DEFAULT, False)) rd.ty = rd.te + rd.pt + maxL P_ratio = P / Vp Q_ratio = Q / Vq Omega = {} v_to_p = [] if P_ratio > 0: PRODUCTS = list(rd.PRODUCTS) for v in rd.VENDORS_P: p_from_v = [PRODUCTS.pop(0) for _ in xrange(P_ratio)] v_to_p += [(v, p) for p in p_from_v] else: v_to_p += [(v, p) for p in PRODUCTS] else: v_to_p += zip(rd.VENDORS_P, rd.PRODUCTS) if Q_ratio > 0: FASHION = list(rd.FASHION) for v in rd.VENDORS_Q: q_from_v = [FASHION.pop(0) for _ in xrange(Q_ratio)] v_to_p += [(v, q) for q in q_from_v] else: v_to_p += [(v, q) for q in FASHION] else: v_to_p += zip(rd.VENDORS_Q, rd.FASHION) vqq = list(itertools.product(rd.VENDORS_Q, rd.FASHION)) vpp = list(itertools.product(rd.VENDORS_P, rd.PRODUCTS)) Omega = {(v, p): 1 if (v, p) in v_to_p else 0 for v, p in chain(vqq, vpp)} rd.Omega_q = [(v, q) for v in rd.VENDORS_Q for q in rd.FASHION if Omega[v, q]] rd.Omega_p = [(v, p) for v in rd.VENDORS_P for p in rd.PRODUCTS if Omega[v, p]] #Generate labor costs for full- and part-time employers ft_hourly = query('full time labor costs: ', **q_opts(40, [25], int, DEFAULT)) pt_hourly = 30 if ft_hourly == 40 else 20 rd.C_alpha = ft_hourly * hpt * T rd.C_beta = pt_hourly * hpt # Generate various sensitivity parameters rd.gamma = query('gamma: ', **q_opts(.5, [.5, 1, 1.5, 2], float, DEFAULT)) rd.phi_put = query('phi_1: ', **q_opts(1, [.75, 1], float, DEFAULT)) rd.phi_pick = rd.phi_put # Generate volume of the truck rd.script_Q = 15000 # Generate product characteristics (Volume and Weight) rd.V_p = {p: round(random.uniform(.01, 1.0), 2) for p in rd.PRODUCTS} rd.V_q = {q: round(random.uniform(.01, 1.0), 2) for q in rd.FASHION} rd.W_p = {p: round(random.uniform(0.1, .99), 2) for p in rd.PRODUCTS} rd.W_q = {q: round(random.uniform(0.1, .99), 2) for q in rd.FASHION} # Generate cost of various technologies putaway_tech_choices = [(600, 0), (1200, 5945), (2400, 19817), (3600, 26849), (4800, 57534), (6000, 76712)] picking_tech_choices = [(100, 639), (200, 3388), (300, 7671), (400, 11506), (500, 17900), (1000, 51141)] rd.MHE = dict(zip(rd.PUTAWAY, [9, 27, 37, 87, 0, 0])) temp = dict(zip(tech, putaway_tech_choices + picking_tech_choices)) Ct = {t: int((temp[t][1] / tpy) * T) for t in tech} rd.C_put = {i: Ct[i] for i in rd.PUTAWAY} rd.C_pick = {j: Ct[j] for j in rd.PICKING} Lambda = {t: temp[t][0] * hpt for t in tech} rd.lambda_put = {i: Lambda[i] for i in rd.PUTAWAY} rd.lambda_pick = {j: Lambda[j] for j in rd.PICKING} spt = itertools.product(rd.STORES, rd.PRODUCTS, rd.TIMES) sq = itertools.product(rd.STORES, rd.FASHION) Base_Load = 4000. / (P + Q) if AD is None else AD random_load = Base_Load, .4 * Base_Load daily_load = lambda: int(max([0, round(random.normalvariate(*random_load), 0)])) rd.Demand = {(s, p, t): daily_load() for s, p, t in spt} rd.X_osq = {(s, q): daily_load() * T for s, q in sq} rd.X_ivq = {(v, q): sum([rd.X_osq[s, q] for s in rd.STORES]) * Omega[v, q] for v, q in vqq} rd.BigM = sum(rd.Demand.itervalues()) + sum(rd.X_osq.itervalues()) rd.M_MHE = 87 * (500 + 500) # Max MHE * (UPPER BOUNDS OF ALPHA AND BETA) #------------------------------------------------------------------- # GENERATE TRANSPORTATION COSTS #------------------------------------------------------------------- TransportDataA = [25, 50, 75, 100, 150, 200, 250, 300, 350, 400, 500, 600, 700, 800, 900, 1000, 1100] TransportDataB = [40.42, 45.59, 52.27, 66.13, 75.28, 85.67, 97.5, 110.96, 126.26, 143.71, 186.09, 217.05, 253.17, 297.11, 344.43, 401.77, 468.61] TransportDataC = [427.6, 441.14, 482.83, 492.45, 521.43, 541.39, 570.02, 598.03, 622.3, 659.96, 727.61, 802.19, 884.41, 975.07, 1075.01, 1185.2, 1306.68] TransportDataD = [0.022, 0.027, 0.029, 0.034, 0.036, 0.041, 0.042, 0.045, 0.047, 0.05, 0.055, 0.061, 0.067, 0.074, 0.081, 0.09, 0.099] DistanceInMiles = TransportDataA FixedCost = dict(zip(TransportDataA, TransportDataB)) Variable_Fixed = dict(zip(TransportDataA, TransportDataC)) Variable = dict(zip(TransportDataA, TransportDataD)) '''Randomly generate distances''' dist_VW, dist_WS, Cf, Cv, Cvf = {}, {}, {}, {}, {} for v in rd.VENDORS: dist_VW[v] = random.choice([x for x in DistanceInMiles if x >= 250]) Cf[v] = FixedCost[dist_VW[v]] Cv[v] = Variable[dist_VW[v]] Cvf[v] = Variable_Fixed[dist_VW[v]] for s in rd.STORES: rnd_num = random.random() if rnd_num <= .4: dist_WS[s] = random.choice([x for x in DistanceInMiles if x <= 300]) elif .4 < rnd_num <= .7: dist_WS[s] = random.choice([x for x in DistanceInMiles if 300 <= x <= 800]) else: dist_WS[s] = random.choice(DistanceInMiles) Cf[s] = FixedCost[dist_WS[s]] Cv[s] = Variable[dist_WS[s]] Cvf[s] = Variable_Fixed[dist_WS[s]] rd.C_vs = {s: Cv[s] for s in rd.STORES} rd.C_fv = {v: Cvf[v] + Cf[v] for v in rd.VENDORS} rd.C_fs = {s: Cvf[s] + Cf[s] for s in rd.STORES} rd.C_vv = {v: Cv[v] for v in rd.VENDORS} #------------------------------------------------------------------- # CREATE BHANU'S HEURISTIC FILES #------------------------------------------------------------------- warehouses = ['w1'] WarehouseVolume = {w: 100000000 for w in warehouses} K = {s: 10000000 for s in rd.STORES} key = 'v{}q{}s{}p{}t{}'.format(V, Q, S, P, T) filename = 'WITPdataSet_' + key + '.txt' with open(filename, 'wb') as g: Xpress_data(g, 'WarehouseVolume', WarehouseVolume, warehouses) Xpress_data(g, 'StoreVolume', K, rd.STORES) Xpress_data(g, 'StoreDemand', rd.Demand, [rd.STORES, rd.PRODUCTS, rd.TIMES]) Xpress_data(g, 'FixedCostVendorToWarehouse', Cf, [rd.VENDORS_P, warehouses]) Xpress_data(g, 'FixedCostWarehouseToStore', Cf, [warehouses, rd.STORES]) Xpress_data(g, 'Variable_FixedCostVendorToWarehouse', Cvf, [rd.VENDORS_P, warehouses]) Xpress_data(g, 'VariableCostVendorToWarehouse', Cv, [rd.VENDORS_P, warehouses]) Xpress_data(g, 'Variable_FixedCostWarehouseToStore', Cvf, [warehouses, rd.STORES]) Xpress_data(g, 'VariableCostWarehouseToStore', Cv, [warehouses, rd.STORES]) Xpress_data(g, 'ProductVolume', rd.V_p, rd.PRODUCTS) Xpress_data(g, 'ProductWeight', rd.W_p, rd.PRODUCTS) Xpress_data(g, 'MapVendorToProduct', Omega, [rd.VENDORS_P, rd.PRODUCTS]) Xpress_data(g, 'LeadTimeWarehouseToStores', rd.L_s, [warehouses, rd.STORES]) Xpress_data(g, 'StoreDemandForFashionProducts', rd.X_osq, [rd.STORES, rd.FASHION]) Xpress_data(g, 'FixedCostFashionVendorToWarehouse', Cf, [rd.VENDORS_Q, warehouses]) Xpress_data(g, 'Variable_FixedCostFashionVendorToWarehouse', Cvf, [rd.VENDORS_Q, warehouses]) Xpress_data(g, 'VariableCostFashionVendorToWarehouse', Cv, [rd.VENDORS_Q, warehouses]) Xpress_data(g, 'FashionProductVolume', rd.V_q, rd.FASHION) Xpress_data(g, 'FashionProductWeight', rd.W_q, rd.FASHION) Xpress_data(g, 'MapFashionVendorToProduct', Omega, [rd.VENDORS_Q, rd.FASHION]) print "Created {FILE}".format(FILE=filename) SED = {} SED['FILENAMEVAL'] = key SED['FULLTIMECOSTVAL'] = rd.C_alpha / T SED['PARTTIMECOSTVAL'] = rd.C_beta SED['GAMMAVAL'] = rd.gamma SED['PHIVAL'] = rd.phi_put SED['BEGINTIMEVAL'] = rd.tb - 1 SED['ENDTIMEVAL'] = rd.te - 1 SED['DUETIMEVAL'] = rd.ty - 1 SED['PROCESSINGTIMEVAL'] = rd.pt LARGE_FILE = sum(1 for _ in open(filename)) > 100000 SED['NOTOTALITERATIONSVAL'] = 200 if LARGE_FILE else 1000 SED['NOOFITERATIONSVAL'] = 200 if LARGE_FILE else 1000 SED['NUMSWAPITERATIONSVAL'] = 3 if LARGE_FILE else 5 SED['STOPITERVAL'] = 3 if LARGE_FILE else 5 SED = {k: str(v) for k, v in SED.items()} with open('BhanuCode/WITP-TPH/TEMPLATE.cs', 'r') as f: lines = f.readlines() for i in range(100): lines[i] = multiple_replace(lines[i], SED) with open('BhanuCode/WITP-TPH/TPH.cs', 'w') as f: for line in lines: f.write(line) _ = bash_command('xbuild BhanuCode/WITP-TPH.sln') _ = bash_command('rm -f WITP-TPH.exe') _ = bash_command('cp BhanuCode/WITP-TPH/bin/Debug/WITP-TPH.exe ./') _ = bash_command('cp BhanuCode/WITP-TPH/TPH.cs ./') print "Compiled WITP-TPH.exe" #------------------------------------------------------------------- # CREATE PYOMO FILES #------------------------------------------------------------------- with open('Pickled_Data', 'wb') as f: pickle.dump(rd, f, protocol=-1) print "Pickled Data" return rd if os.path.isfile('WITP-TPH.exe') else False
def main(argv): serial = False; options_per_page = 5 try: opts, args = getopt.getopt(argv,"hs:",["serial="]) except getopt.GetoptError: print "main.py -serial <Y|N>" sys.exit(2) for opt, arg in opts: if opt in ('-s', '--serial'): if (arg == 'Y'): serial = True print """ ------------------------- | Welcome to BUS-L! | ------------------------- """ #SYSTEM SETUP #Check for network access if internet_on() == False: print "No Internet = No BUSL! \n Please Check Your Interwebs" return -1 #HUE SETUP #Check for hue print """ ------------------------- | Connecting to hue! | ------------------------- """ # TODO: add in options to do this better (select a specific light, etc) lights = hue.hue_connect() light = lights[1] hue.ack(light) if (serial): ports = get_ports() if len(ports) != 0: #Set up GPIO for hardware print """ ------------------------- | Select Serial Port: | -------------------------""" display_serial_ports(ports, 0) index = get_user_int("\nPlease pick a serial port for the Arduino: ", len(ports)) if test_hardware(ports[index]) == False: print "No Microcontroller = No BUSL Lights! \n Please Check Your Ports" #USER CONFIG print """ ------------------------- | Lets Find Some Stops! | ------------------------- """ location = get_user_addr() #find X closest stops to user all_stops = go.get_all_umich_stops() ordering = ERROR_VAL while (ordering == ERROR_VAL): ordering = maplib.order_by_distance(location, all_stops) #present X stop choices (with estimated distances) print """ ------------------------- | Stops closest to you: | -------------------------""" page_index = 0 user_input = "c" while (user_input == "c"): display_results(ordering, page_index, options_per_page) page_index += options_per_page user_input = raw_input("\nSelect a stop number or press 'c' to see more stops: ") #get preferred stop stop_info = ordering[int(user_input)] preferred_stop = stop_info[2] preferred_stop_dist = stop_info[1] preferred_stop_walk_time = stop_info[0] print "\nLooking up route information for " + preferred_stop.names[1] + " (" + preferred_stop.names[0] + ") . . ." #present available bus routes active_routes = preferred_stop.active_routes print """ ---------------------- | Active bus routes: | ----------------------""" page_index = 0 user_input = "" while (user_input == ""): i = 0 for route in active_routes: print "\n [" + str(i) + "]\t" + route i += 1 page_index += options_per_page user_input = raw_input("\nSelect one or more route numbers (e.g. 1 or 1, 2, 3): ") route_nums = user_input.split(',') route_nums = [int(x.strip()) for x in route_nums] #get selection(s) preferred_routes = [active_routes[x] for x in route_nums] print "\nWatching for routes! \n" #START BUS LIGHT SERVER query(preferred_stop.names[0], preferred_stop_walk_time*60, preferred_routes, light)
from labels import get_label_from_key, get_label_trail_from_key_trail from query import query logging.basicConfig(filename='example.log', level=logging.DEBUG, format='%(asctime)s: %(levelname)s: %(message)s', datefmt='%m/%d/%Y %H:%M:%S') random.seed() root_dir = join_and_normalize(os.getcwd(), "test_library") meta_tree = create_item_meta_data_cache(root_dir) # print(meta_tree) def test_agg_predicate(item_file_path, meta_data_dict): return isinstance(meta_data_dict, dict) and "artist" in meta_data_dict and (meta_data_dict["artist"] == "DAISHI DANCE" or str(meta_data_dict["artist"]).casefold().startswith("dj")) for q_result in query(test_agg_predicate, meta_tree): print(q_result) # # all_key_trails = metatree.generate_all_keytrails(meta_tree) # # sample_key_trails = random.sample(all_key_trails, 8) # # specific_key_trail = ["ALBUM_00016", "TRACK_00006.ogg"] # query_func = query.generate_query_regex_match(r"(?i)\bcl", fields=("artist", "title")) # #query_func = query.generate_query_contains_string("cl", fields=("artist", "title")) # result_set = query.run_query(meta_tree, query_func) # for result in result_set: # print(result) # print(metatree.get_metadata(meta_tree, *result)) # print("----")
def get_ident(ident, version): if version == 'latest': version = query('latest') return query('ident', version, ident)
def daily(start_date, end_date): tables = ['VidGeneralMetrics', 'VidInsightPlaybackLocationType', 'VidInsightTrafficSourceType', 'VidLifetimeAgeGroupGender'] con = None print "Ran on: ", now print "Start Date: ", str(start_date) print "End Date: ", str(end_date) try: con = mdb.connect(**config.mysql_params) cur = con.cursor() trunc_statement = """TRUNCATE VideoInfo""" cur.execute(trunc_statement) con.commit() trunc_VidLifetimeAgeGroupGender = """TRUNCATE VidLifetimeAgeGroupGender""" cur.execute(trunc_VidLifetimeAgeGroupGender) con.commit() videos = getVideoList() for video in videos: insert_statement = """INSERT INTO VideoInfo (VideoID, Name, PublishedDate) VALUE (%s, %s, %s)""" cur.execute(insert_statement, [video, videos[video][0], videos[video][1]]) con.commit() for video in videos: for table in tables: if table == 'VidLifetimeAgeGroupGender': start_date = lifetime_start else: start_date = daily_start response, columns = query(table, video, start_date, end_date) insert_statement = table_options[table]['insert'] for row in response: #print row cur.execute(insert_statement, getValues(table, video, start_date, columns, row)) con.commit() #get daily metrics start_date = daily_start response, columns = query("DailyMetrics", "", start_date, end_date) insert_statement = table_options["DailyMetrics"]['insert'] for row in response: cur.execute(insert_statement, getValues("DailyMetrics", "", start_date, columns, row)) con.commit() #Twitter response = urllib2.urlopen('TWITTERFEED') html = response.read() soup = BeautifulSoup(html) # put Day, NumFollowers in Database values = [datetime.today().strftime('%Y-%m-%d'), soup.findAll(href="/USER/followers")[1].find('strong').text.replace(",", "")] cur.execute("""INSERT INTO TwitterFollowers VALUES (%s, %s);""", values) con.commit() #Youtube Cumulative response = urllib2.urlopen('http://gdata.youtube.com/feeds/api/users/USER?alt=json') youtube_data = json.load(response) stats = youtube_data['entry']['yt$statistics'] values = [datetime.today().strftime('%Y-%m-%d'), stats['subscriberCount'], stats['totalUploadViews']] cur.execute("""INSERT INTO YoutubeOverall VALUES (%s, %s, %s);""", values) con.commit() #Facebook response = urllib2.urlopen('http://graph.facebook.com/USER/') facebook_data = json.load(response) values = [datetime.today().strftime('%Y-%m-%d'), facebook_data["talking_about_count"], facebook_data["likes"]] cur.execute("""INSERT INTO FacebookOverall VALUES (%s, %s, %s);""", values) con.commit() except mdb.Error, e: print "Error %d: %s" % (e.args[0],e.args[1]) sys.exit(1)
def without(): layer = iface.activeLayer() q = query(layer).where(qstring).where(qstring2).top(top) results = q() out = results.next()
def main(): options_per_page = 5 print """ ------------------------- | Welcome to BUS-L! | ------------------------- """ #SYSTEM SETUP #Check for network access if internet_on() == False: print "No Internet = No BUSL! \n Please Check Your Interwebs" return -1 ports = get_ports() if len(ports) != 0: #Set up GPIO for hardware print """ ------------------------- | Select Serial Port: | -------------------------""" display_serial_ports(ports, 0) index = get_user_int("\nPlease pick a serial port for the Arduino: ", len(ports)) if test_hardware(ports[index]) == False: print "No Microcontroller = No BUSL Lights! \n Please Check Your Ports" #USER CONFIG location = get_user_addr() #find X closest stops to user all_stops = go.get_all_umich_stops() ordering = maplib.order_by_distance(location, all_stops) #present X stop choices (with estimated distances) print """ ------------------------- | Stops closest to you: | -------------------------""" page_index = 0 user_input = "c" while (user_input == "c"): display_results(ordering, page_index, options_per_page) page_index += options_per_page user_input = raw_input("\nSelect a stop number or press 'c' to see more stops: ") #get preferred stop stop_info = ordering[int(user_input)] preferred_stop = stop_info[2] preferred_stop_dist = stop_info[1] preferred_stop_walk_time = stop_info[0] print "\nLooking up route information for " + preferred_stop.names[1] + " (" + preferred_stop.names[0] + ") . . ." #present available bus routes active_routes = preferred_stop.active_routes print """ ---------------------- | Active bus routes: | ----------------------""" page_index = 0 user_input = "" while (user_input == ""): i = 0 for route in active_routes: print "\n [" + str(i) + "]\t" + route i += 1 page_index += options_per_page user_input = raw_input("\nSelect one or more route numbers (e.g. 1 or 1, 2, 3): ") route_nums = user_input.split(',') route_nums = [int(x.strip()) for x in route_nums] #get selection(s) preferred_routes = [active_routes[x] for x in route_nums] print "\nWatching for routes! \n" #START BUS LIGHT SERVER query(preferred_stop.names[0], preferred_stop_walk_time*60, preferred_routes)