def search(start_date, end_date, s): collected_news = [] # ciclo que itera sobre as datas de fim e de inicio for single_date in daterange(start_date, end_date): session = requests.Session() response = session.get( "https://expresso.pt/api/molecule/latest/economia?offset=" + single_date.strftime("%Y-%m-%d")) parsed_html = BeautifulSoup(response.content) desc = [] titl = [] date = [] for de in parsed_html.body.findAll('h2', attrs={'class': 'lead'}): desc.append(de.text) for dat in parsed_html.body.findAll('p', attrs={'class': 'publishedDate'}): date.append(dat.text) for div in parsed_html.body.findAll('h1', attrs={'class': 'title'}): titl.append(div.find('a').contents[0]) for i in range(len(desc)): collected_news.append(Hit(date[i], titl[i], desc[i])) print("current date: " + str(single_date)) sleep(s) return collected_news
def load_historical_prices(db_name, no_of_rows): # Init date = [] open_p = [] high = [] low = [] last = [] change = [] # Load data conn = sqlite3.connect(db_name) c = conn.cursor() for row in c.execute('SELECT date, open, high, low, last, change FROM crude_index LIMIT ' + str(no_of_rows)): date.append(row[0]) open_p.append(row[1]) high.append(row[2]) low.append(row[3]) last.append(row[4]) change.append(row[5]) # Prepare results results = [] results.append(date) # 0 results.append(open_p) # 1 results.append(high) # 2 results.append(low) # 3 results.append(last) # 4 results.append(change) # 5 return results
def get_path(): #获取最新计划请求路径和日期 html = s.get(host + "/zentao/productplan-browse-1.html", headers=header).content soup1 = BeautifulSoup(html, 'lxml') #获取最新计划单路径两种方法 #path = (soup1.find_all('a',href=re.compile("/zentao/productplan-view-(.*?).html"))[0]).get('href') a = [] try: #riqi = (soup1.find_all('a',href=re.compile("/zentao/productplan-view-(.*?).html"))[0]).string.split("发")[0] riqi = re.findall('.html">(.*?)发布计划</a>', str(soup1)) date = [] for i in riqi: if '.' or '/' in i: i = i.replace('.', '-').replace('/', '-').strip() date.append(i) a.append(date) except: return ("没有获取到计划") try: path = re.findall(r'<a href="(/zentao/productplan-view-.*?)">', str(soup1)) a.append(path) except: return ("没有获取到计划") return a
def get_data_by_order(self, sale_order): sale_order = request.env['sale.order'].sudo().search( [('name', '=', sale_order)]) data = [] if sale_order: date = [] picking_data = [] mesagge = request.env['mail.message'].sudo().search( [('res_id', 'in', sale_order.picking_ids.mapped('id'))]) stock_picking = request.env['stock.picking'].search( [('id', 'in', mesagge.mapped('res_id'))]) for item in stock_picking: if item.state == 'done': picking_data.append({ 'Picking_id': item.id, 'Container': item.container_number, }) for mes in mesagge: if mes.tracking_value_ids.filtered(lambda a: a.new_value_char == 'Realizado'): for value in picking_data: if mes.res_id == value['Picking_id']: value.update({'Date': mes.date}) date.append(mes.date) else: continue data.append({ 'Data': picking_data, 'DispatchedAt': date, 'ClientName': sale_order.partner_id.name, 'ClientEmail': sale_order.partner_id.email }) return data
def formatRetain(data, tp, size): arr = []; date = [None]; user = [(0,0,tp+'用户数',0)]; maxlen = 0; data['list'].reverse() for day in data['list']: date.append((0,0,day['refdate'],0)); user.append((0,0,day['assign_user'],0)); if len(day['item']) > maxlen : maxlen = len(day['item']) arr.append(date); arr.append(user); inx = 0; for day in data['list']: inx = inx + 1; for i in range(0, maxlen): if i <= (len(arr)-2) and inx == 1 : arr.append([(0,0,str(i+1)+size,0)]); if i < len(day['item']) : tmp = day['item'][i] arr[i+2].append((0,0,float(int(tmp['percentage'])/int(day['assign_user'])),0)); else : arr[i+2].append((0,0,None,0)); return arr;
def split_datetime_columns(file): """ Splits a file's "_DATETIME" columns into "DATE" and "TIME" columns. There are three cases: 1. datetime DATE and TIME are populated appropriately. 2. date Only DATE is populated. 3. None Both DATE and TIME are None If there are absolutely no TIMEs in the file the TIME column is not kept. Arg: file - a DataFile object """ dtimecol = file['_DATETIME'] date = file['DATE'] = Column('DATE') time = file['TIME'] = Column('TIME') for dtime in dtimecol.values: if dtime: date.append(strftime_woce_date(dtime)) if type(dtime) is datetime: time.append(strftime_woce_time(dtime)) else: date.append(None) time.append(None) del file['_DATETIME'] if not any(file['TIME'].values): file['TIME'].values = [UNKNONW_TIME_FILL] * len(file['TIME'])
def plot_usage_data(file_name): """ Plot the data :param file_name: :return: """ date = [] gflops = [] active_users = [] registered_users = [] for entry in connection.execute(select([USAGE]).order_by(USAGE.c.date)): date.append(entry[USAGE.c.date]) gflops.append(entry[USAGE.c.gflops] / 1.3) active_users.append(entry[USAGE.c.active_users]) registered_users.append(entry[USAGE.c.registered_users]) pdf_pages = PdfPages(file_name) pyplot.subplot(211) pyplot.plot(date, active_users, 'b-', label='Active') pyplot.plot(date, registered_users, 'g-', label='Registered') pyplot.ylabel('Users') pyplot.xticks(rotation=30) pyplot.grid(True) pyplot.legend(loc=0) pyplot.subplot(212) pyplot.plot(date, gflops, 'r-') pyplot.xlabel('Date') pyplot.ylabel('GFlops') pyplot.xticks(rotation=30) pyplot.grid(True) pyplot.tight_layout() pdf_pages.savefig() pdf_pages.close()
def download_list(): date_att = request.args.get('date_att') att_list = Attendance.query.filter_by(date_now=date_att).all() first_names = [] last_names = [] date = [] present = [] # Get the information from attedance table and populates the arrays above for att in att_list: first_names.append(att.owner.first_name) last_names.append(att.owner.last_name) date.append(att.date_now) present.append(att.present) # creates a dictionary where names,date, present, will be the headers for the spreadsheet # and the values(lists, see above) are rows for each column. df = pd.DataFrame({ 'First Name': first_names, 'Last Name': last_names, 'Date': date, 'Present': present }) output = BytesIO() writer = pd.ExcelWriter(output, engine='xlsxwriter') df.to_excel(writer, 'Sheet1', index=False) writer.save() output.seek(0) return send_file(output, attachment_filename='attendance:' + str(att.date_now) + '.xlsx', as_attachment=True)
def display(): db = pymysql.connect("localhost", "aayush", "deadpool", "Finance") cur = db.cursor() cur.execute("SELECT * FROM Expenses WHERE ID = {}".format(id)) temp = cur.fetchall() cur.execute("SELECT * FROM Income WHERE ID = {}".format(id)) temp1 = cur.fetchall() name, amt, date, name2, amt2, date2 = [], [], [], [], [], [] for i in range(len(temp)): cur.execute("SELECT * FROM Categories WHERE Cat_ID = {}".format(temp[i][1])) name.append(cur.fetchall()[0][1]) amt.append(temp[i][3]) date.append(temp[i][2]) for i in range(len(temp1)): cur.execute("SELECT * FROM Income_category WHERE ICat_ID = {}".format(temp1[i][1])) name2.append(cur.fetchall()[0][1]) amt2.append(temp1[i][3]) date2.append(temp1[i][2]) data1 = zip(name, amt, date) data2 = zip(name2, amt2, date2) return 'home.html', data1, data2
def clean_date(df, col): raw_list = df[col].to_list() raw_list = [a.split() for a in raw_list] date = [] for el in raw_list: date.append(el[1]) df[col] = [datetime.strptime(d, "%d/%m/%Y") for d in date]
def user_view_bookings(request): user_detail = user_details.objects.get(login_id=request.user.id) doct_name = [] doct_special = [] weekday = [] date = [] idss = [] weekday_list = [ 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday' ] user_ref_id = user_details.objects.values_list( 'id', flat=True).get(login_id=request.user.id) appointements = appointment.objects.filter(user_ref_id=user_ref_id) for x in appointements: doct_name.append( doctor_details.objects.values_list( 'username', flat=True).get(pk=x.doctor_ref_id)) doct_special.append( doctor_details.objects.values_list( 'specialization', flat=True).get(pk=x.doctor_ref_id)) weekday.append(1) date.append(x.date) idss.append(x.id) return render( request, 'administration/user_view_bookings.html', { 'zippy': zip(doct_name, doct_special, weekday, date, idss), 'user_detail': user_detail })
def get_data_from_db(db, given_date): question = [] answer = [] email_id = [] date = [] time = [] name = [] cursor = db.QuestionAnswer.find({'project_id': '5dc15779c346052fe774a8d1'}) for i in cursor: if given_date in str(i['datetime']): if len(i['user_id']) > 0: user_info = list( db.UserInformation.find({"_id": ObjectId(i['user_id'])}))[0] question.append(i['question']) answer.append(i['answer']) email_id.append(user_info['email_id']) name.append(user_info['name']) date.append(str(i['datetime'].date())), time.append(str(i['datetime'].time())) df = pd.DataFrame({ 'question': question, 'answer': answer, 'email_id': email_id, 'name': name, 'date': date, 'time': time }) return df
def create_figure(): fig = Figure() x = DB_e() cursor = x.conn.cursor() cursor.execute('SELECT * FROM expense') rows = cursor.fetchall() amount = [] date = [] for row in rows: print(row) amount.append(row[2]) date.append(row[1]) cursor.close() y = DB() cursor = y.conn.cursor() cursor.execute('SELECT * FROM income') rows_e = cursor.fetchall() amount_e = [] date_e = [] for row in rows_e: print(row) amount_e.append(row[2]) date_e.append(row[1]) cursor.close() axis = fig.add_subplot(1, 1, 1) axis.plot(date, amount, date_e, amount_e) return fig
def get_report_items(date_from): final_report_items = [] date = [] for i in range(7): date.append(date_from) date_from += timedelta(days=1) for rr_dt in date: for data in frappe.get_all('WB Sales by Sales', filters={'rr_dt': rr_dt}): data = frappe.get_doc('WB Sales by Sales', data.name) if data.supplier_oper_name in ['Логистика', 'Возврат']: continue item_data = identify_item_code(data) final_report_items.append({ "item_code": item_data['item_code'], "item_name": item_data['item_name'], "qty": data.quantity, "rate": data.retail_amount, "award": data.retail_commission, "uom": _("Nos"), "description": item_data['item_code'] }) return final_report_items
def get_data(**args): with request.urlopen('https://api.covid19india.org/data.json') as response: source = response.read() data = json.loads(source) statewise_dict = data['statewise'] v = Validator() v.schema = {'active': {'required': True, 'type': 'string'}, 'confirmed': {'required': True, 'type': 'string'}, 'deaths': {'required': True, 'type': 'string'}, 'recovered': {'required': True, 'type': 'string'}, 'deltaconfirmed': {'required': True, 'type': 'string'}, 'deltadeaths': {'required': True, 'type': 'string'}, 'deltarecovered': {'required': True, 'type': 'string'}, 'lastupdatedtime': {'required': True, 'type': 'string'}, 'migratedother': {'required': True, 'type': 'string'}, 'statecode': {'required': True, 'type': 'string'}, 'statenotes': {'required': True, 'type': 'string'}, 'state': {'required': True, 'type': 'string'}, } for item in statewise_dict: if not v.validate(item): print(v.errors) raise ValueError('API Data Not Valid') print('API Data is valid') df = pd.DataFrame(statewise_dict, columns=['active', 'confirmed', 'deaths', 'recovered', 'state']) date = [] for i in range(len(df.index)): date.append(today) df['date'] = date sdf = spark.createDataFrame(df) sdf.write.mode("overwrite").csv("hdfs://localhost:9000/user/nineleaps/covid_data.csv") print("Covid Statewise Data CSV is uploaded to HDFS")
def getSentiment(Symbol): ticker = Symbol start = datetime(2017, 1, 1) end = datetime.today() tweetCriteria = got.manager.TweetCriteria().setQuerySearch(f'${ticker}')\ .setSince(start.strftime('%Y-%m-%d'))\ .setUntil(end.strftime('%Y-%m-%d'))\ .setMaxTweets(1000) tweets = got.manager.TweetManager.getTweets(tweetCriteria) text = [] date = [] favorites = [] retweets = [] for item in tweets: text.append(item.text) date.append(item.date) favorites.append(item.favorites) retweets.append(item.retweets) tweetsdf = pd.DataFrame({ 'text': text, 'date': pd.to_datetime(date, infer_datetime_format=True).strftime("%Y-%m-%d %H:%M"), 'favs': favorites, 'retweets': retweets }) nltk.downloader.download('vader_lexicon') sid = SentimentIntensityAnalyzer() tweetsdf['sentiment'] = tweetsdf['text'].apply(sid.polarity_scores) tweetsdf['sentScore'] = tweetsdf['sentiment'].apply( lambda x: x['compound']) sentiments = tweetsdf[['date', 'sentScore']] sentiments['date'] = pd.to_datetime(sentiments['date'], infer_datetime_format=True) s = sentiments.resample('5T', on='date').mean() s = s.reset_index() s = s.dropna() s['date'] = s['date'].dt.strftime("%Y-%m-%d %H:%M") sentDF = s.to_csv(index=False, header=False) return sentDF
def get_date(entry): date = [] if type(entry) != float: for event in entry: dt = event['Date'] dt_dt = datetime.strptime(dt, '%b %d, %Y') date.append(dt_dt) return date
def convert_to_df(self, ret_df, keyword): dc = ret_df.to_dict() date = [] value = [] for key, val in dc[keyword].items(): date.append(key) value.append(val) df = pd.DataFrame({'date': date, 'value': value}) return df
def regex3_date(line): date = [] weekday = line[0] time = line[1].split('-') start_day = get_datetime(weekday+time[0]) end_day = get_datetime(weekday+time[1]) date.append(start_day) date.append(end_day) return date
def get_schedule(self, roster): date = [] time = [] opponent = [] opp_id = [] status = [] win_loss_list = [] # Loop through all rosters in list #for roster in roster_urls: page = requests.get(roster) tree = html.fromstring(page.content) soup = BeautifulSoup(page.text, 'lxml') # Get schedule block schedule_block = soup.find(id="showschedule") grid_rows_soup = BeautifulSoup(str(schedule_block), 'lxml') grid_rows = grid_rows_soup.find_all("tr") print self.team for row in grid_rows: if 'stathead' not in row['class'] and 'colhead' not in row['class'] : cur_date = row.td.string cur_date = parse(cur_date).date() date.append(cur_date) game_status = row.find(attrs={"class": "game-status"}) if game_status.string == 'vs': status.append('home') else: status.append('away') opp_name = row.find(attrs={"class": "team-name"}) opponent.append(opp_name.a.string.replace("'", '')) opp_id_raw = opp_name.a['href'] opp_id.append(opp_id_raw.split('/')[-2]) win_loss = row.find(attrs={"class": re.compile("win|loss")}) if win_loss: if win_loss.string == 'W': win_loss = 'W' else: win_loss = 'L' time_raw = 'NA' else: win_loss = 'NA' time_raw = row.find_all('td')[2].contents[0].string win_loss_list.append(win_loss) time.append(time_raw) # Record schedule by row for count, game in enumerate(opponent): self.record_schedule(date[count], opponent[count], status[count], time[count], opp_id[count], win_loss_list[count])
def laloca(): datafile = open("sealevelsantamartha.csv") #datafile = open("alltxt/loca.txt") datalines = datafile.readlines()[:] date, level = ([] for i in range(2)) for line in datalines: alldata = line.split(",") #if int(alldata[1])<10: #print alldata[1] #alldata[1]="0"+alldata[1] #print alldata[1] #if int(alldata[2])<10: #print alldata[2] #alldata[2]="0"+alldata[2] #print alldata[2] #date.append(alldata[0] + "-" + alldata[1]+ "-" + alldata[2]) #date.append(str(alldata[0]) + "-" + str(alldata[1])+ "-" + str(alldata[2]) + "-" + str(alldata[3])) #a.append(str(alldata[0])) #b.append(str(alldata[1])) #c.append(str(alldata[2])) #d.append(str(alldata[3])) #if (len(level)>100): break if (int(alldata[3]) > 0): #print alldata[0] #date.append(alldata[0] + alldata[1]+ alldata[2]+alldata[3]) date.append(alldata[0] + "-" + alldata[1] + "-" + alldata[2]) level.append(int(alldata[3])) else: continue #print (date[0]) #for i in range(len(a)): #print a[i],' ',b[i],' ',c[i],' ',d[i],' ',date[i],' ',level[i] #print len(level) x_date = [datetime.datetime.strptime(i, '%Y-%m-%d').date() for i in date] #print (x_date[0]) plt.figure(facecolor="white") #plt.plot([],[]) plt.plot(x_date, level, 'r--') #plt.gcf().autofmt_xdate() #myFmt = mdates.DateFormatter('%y-%m-%d-%H') #plt.gca().xaxis.set_major_formatter(myFmt) plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m')) #plt.gcf().autofmt_xdate() #plt.plot(x_weight,kg,'k--', label= 'weight') #plt.plot(x_run,run,'b--', label= 'run') #plt.gca().set_title('loca') #plt.gca().set_xlabel('Date') #plt.gca().set_ylabel('values') #plt.gca().grid(True) #legend = plt.gca().legend(loc='upper left', fontsize='large') plt.show() return 'done'
def insertInProfile(request, profile): # aggiungere la data da cui scaricare i post p = Profile.objects.get(username=profile) #L = instaloader.Instaloader() try: profile = instaloader.Profile.from_username(L.context, p.username) except LoginRequiredException: L.login("socialanalysiscld", "progettocloud123.") profile = instaloader.Profile.from_username(L.context, p.username) # #L = instaloader.Instaloader() #profile = instaloader.Profile.from_username(L.context, p.username) if p.isPrivate is False: if profile.is_private: p.isPrivate = True p.save() result = postDates(p) else: dateREQ = request.POST.getlist('checkYear') #prendo tutte le date dalla request date=[] for dateX in dateREQ: date.append(dateX) dates = [] dateDB = p.nDatePostSaved.split(", ") for dateToInsert in date: if dateToInsert not in dateDB: p.nDatePostSaved = p.nDatePostSaved + str(dateToInsert) + ", " dates.append(dateToInsert) if len(dates) > 0: for data in dates: dataPost = savePost(p.username, data) if dataPost[2] != 0 : p.totalLikes = p.totalLikes + dataPost[0] p.totalComments += dataPost[1] p.postContacts += dataPost[2] p.save() result = postDates(p) elif p.isPrivate is True: if profile.is_private: result = postDates(p) else: p.isPrivate=False result = postDates(p) p.save() context = { 'profile' : p, 'dateMancanti': result, 'averangeLikes': round(p.totalLikes/p.postContacts,2) if p.postContacts!=0 else 0, 'averangeComments':round(p.totalComments/p.postContacts, 2) if p.postContacts!=0 else 0, 'engagementProfile': round(float(p.totalLikes + p.totalComments) / (p.followers * p.postContacts),2) if p.postContacts!=0 else 0, 'template' : "post.html", } return response.HttpResponseRedirect(reverse('polls:profile', args=(p.id,)))
def profile(request): if request.session.has_key('username'): client = MongoClient("") db=client.movies locations=db.location.find() cities=[] idofcity=[] for y in locations: cities.append(y['name']) idofcity.append(y['location_id']) cities=zip(cities,idofcity) bookedmovies=db.booking.find({"email":request.session['email']}) moviesbooked=[] showtime=[] date=[] seats=[] amount=[] moviename=[] src=[] bookedcount=0 present = datetime.now().date() for l in bookedmovies: date_time_obj = datetime.strptime(l['DATE'], '%Y-%m-%d') if date_time_obj.date()>=present: if l['showtime']: showtime.append(l['showtime']) screen=l['showtime'] screen=screen[-7:]; if l['DATE']: date.append(l['DATE']) bookedcount=bookedcount+1; if l[screen]: seats.append(l[screen]) if l['moviename']: moviename.append(l['moviename']) if l['src']: src.append(l['src']) if l['amount']: amount.append(l['amount']) moviesbooked=zip(showtime,date,seats,amount,moviename,src) context={ 'name':request.session['username'], 'email':request.session['email'], 'password':request.session['password'], 'verify':request.session['verify'], 'mobile':request.session['mobile'], 'loop':a, 'loopinrow':colum, "locations":cities, "wallet":request.session['wallet'], "booked":moviesbooked, "count":bookedcount } return render(request, 'Modify/profile.html',context) else: return redirect(index)
def date_crawler(d, text): d.feed(text) date = [] for item in d.parser_date: temp = str(item.strip()) if temp: date.append(temp) return date
def simple(): dests = destination.objects.all() dic = {} k = 1 for dest in dests: if (k == 1): Total = dest.Total my_date = dest.start noofquarantine = dest.num_quarantine nonq = dest.num_free Total_rooms = dest.room Quarantine_days = dest.num_quarantine num_days = (Total - noofquarantine - nonq) // Total_rooms remaining = (Total - noofquarantine - nonq) % Total_rooms if (remaining != 0): num_days += 1 total_days = Quarantine_days * num_days date = [] rem = [] slot = [] li = [] for j in range(num_days): date1 = my_date + timedelta(days=(Quarantine_days) * j) slot1 = 'S' + str(j + 1) Remaining = (Total - noofquarantine - nonq) - (Total_rooms * (j + 1)) date.append(date1) slot.append(slot1) if j == num_days - 1: rem.append(0) else: rem.append(Remaining) li.append(j) end_date = my_date + timedelta(days=total_days - Quarantine_days) zipped_list = zip(date, rem, slot) # dic={'end':end_date,'slots':slot,'rem':rem,'date':date,'num':li} k += 2 # Total = (request.GET['total']) # my_date = (request.GET['startdate']) # noofquarantine = (request.GET['noofquarantine']) # nonq = (request.GET['nonq']) # Total_rooms = (request.GET['room']) # Quarantine_days = (request.GET['qt']) # num_days=(Total-noofquarantine-nonq)//Total_rooms # remaining=Total%Total_rooms # if (remaining!=0): # num_days+=1 # total_days = Quarantine_days * num_days # end_date = my_date + timedelta(days=total_days) # d = {'enddate': end_date, 'quara': noofquarantine, 'free': nonq} return zipped_list, end_date
def lambda_handler(event, context): """ find the Phone_No from the username """ username = event['queryStringParameters']['username'] Phone_No = get_phone_no(username) cursor = connection.cursor() """ a dictionary that maps branch_key to branch name """ branch_dict = {} cursor.execute('select * from branchmast') rows = cursor.fetchall() for row in rows: branch_dict[row[0]] = row[1] """ a dictionary that maps scheme_key to scheme name """ scheme_dict = {} cursor.execute('select * from chitgroup') rows = cursor.fetchall() for row in rows: scheme_dict[row[0]] = row[2] """ qeuerying the Phone_No to the database """ query = 'select * from chitmast where Phone_No = %s' cursor.execute(query, (Phone_No, )) details = cursor.fetchall() length = len(details) name = [] branch = [] scheme = [] amount = [] date = [] chit_key = [] """ storing the branch name, scheme name, amount , date of each scheme into a list """ for i in range(length): name.append(details[i][3]) branch.append(branch_dict[details[i][12]]) scheme.append(scheme_dict[details[i][1]]) amount.append(details[i][8]) chit_key.append(details[i][0]) date.append(add_x_month(details[i][6], 1)) response = {} """ returning the details of each scheme """ response['name'] = name response['branch'] = branch response['scheme'] = scheme response['amount'] = amount response['chit_key'] = chit_key response['date'] = date responseObject = {} responseObject['statusCode'] = 200 responseObject['headers'] = {} responseObject['headers']['Content-Type'] = 'application/json' responseObject['body'] = json.dumps(response) return responseObject
def last_date(day_num): """ 计算后几天的日期 """ now_time = datetime.datetime.now() date = [] for i in range(day_num): time = now_time + datetime.timedelta(days=i) date.append(time) date[i] = date[i].strftime('%Y-%m-%d') return date
async def answer_purchase_date(message: types.Message, state: FSMContext): temp = message.text await state.update_data(purchase_date=temp) date.append(temp) # data = await state.get_data() # portfolio_name = data.get("portfolio_name") # stock_ticker = data.get("stock_ticker") # purchase_date = message.text await message.answer("Введите цену покупки") await Quotes.next()
def drop_time(data): date = [] to_drop = [] for index, row in data.iterrows(): if row["Start date"].split()[0]== row["End date"].split()[0]: date.append(row["Start date"].split()[0]) else: to_drop.append(index) data.drop(["Start date", "End date"], axis=1, inplace=True) data.drop(data.index[to_drop], inplace=True) data["Date"] = date return data
def getRemInforList(self): date = [] self.remInfo = self.remProcessor.getData() index = 0 if self.remInfo != None and len(self.remInfo) != 0: for i in self.remInfo: date.append((eval(i['date']), (int(i['time'][:2]), int(i['time'][3:5])), index)) index += 1 date.sort() return date else: return None
def get_date(): df = get_data_nazione() start = df["data"].tolist()[0] end = df["data"].tolist()[-1] d = end date = [] date.append(d.strftime("%Y-%m-%d")) while (d > start): t = d - timedelta(days=0, weeks=1) date.append(t.strftime("%Y-%m-%d")) d = t #date = [ d.strftime("%Y-%m-%d") for d in df["data"].dt.date] return date
def to_pd(tweetset): #TO PANDAS DATAFRAME print 'Converting to Data Frame' ids=[] date=[] tweet = [] for line in tweetset: ids.append(line.split('\t')[0]) date.append(line.split('\t')[1]) tweet.append(line.split('\t')[2]) df = pd.DataFrame({'ids':ids[1:],'date':pd.to_datetime(date[1:]),'tweet':tweet[1:]}) return df
def history(request, pk=None): uploadObj = Upload.objects.all().filter(user=pk) success = False allFile = [] date = [] for value in uploadObj: allFile.append(value.FileName) success = True print(value.FileName) date.append(value.date) # print(value.date) mylist = zip(allFile, date) return render(request, "history.html", {"success": success, "mylist": mylist})
def scrape_google(self, stock_symbol): expiration_dates=self.google.get_expiration_dates(stock_symbol) for x in range(0, len(expiration_dates)): date=[] date.append(expiration_dates[x]['month']) date.append(expiration_dates[x]['day']) date.append(expiration_dates[x]['year']) #expiration date stuff exp_year=expiration_dates[x]['year'] exp_month=expiration_dates[x]['month'] exp_day=expiration_dates[x]['day'] cur_date=self.get_current_date() cur_year=cur_date['year'] cur_month=cur_date['month'] cur_day=cur_date['day'] #get option data if hasn't already path="./option_data/"+str(exp_month)+"-"+str(exp_day)+"-"+str(exp_year)+"/"+str(cur_month)+"-"+str(cur_day)+"-"+str(cur_year)+"/"+stock_symbol+"_calls.csv" if os.path.isfile(path)==False: option_data=self.google.get_option_data(stock_symbol, date) self.save_option_data(stock_symbol, expiration_dates[x], option_data['call'], "calls") self.save_option_data(stock_symbol, expiration_dates[x], option_data['put'], "puts")
def parsePubDate(self, element): medline = element.find('MedlineDate') if medline is not None: return medline.text.strip() else: date = [element.find('Year').text.strip()] if element.find('Season') is not None: date.append(element.find('Season').text.strip()) elif element.find('Month') is not None: date.append(element.find('Month').text.strip()) if element.find('Day') is not None: date.append(element.find('Day').text.strip()) return ' '.join(date)
data_list = self.db.collect("mysql315.loopia.se", "kthstud@a68445", "2013IIstud!#", "aktivahuset_com", "meterevents", device, True) start_date = data_list[0][:data_list[0].find(" ")].split("-") start_date = date(int(start_date[0]), int(start_date[1]), int(start_date[2])) last_date = data_list[-1][:data_list[-1].find(" ")].split("-") last_date = date(int(last_date[0]), int(last_date[1]), int(last_date[2])) <<<<<<< HEAD weather_list = self.weather.fetchGroup(start_date, last_date) date_list = list() for condition in conditions: for day in weather_list: if condition in day.cond: date.append(day) ======= print "Fetching weather data.." weather_list = self.weather.fetchGroup(start_date, last_date) date_list = list() for condition in conditions: for day in weather_list: for cond in day.conditions: if condition in cond: if day.date.__str__() not in date_list: date_list.append(day.date.__str__()) print "Filtering.." >>>>>>> new datamanager
def map_fingerprint(): args = request.args (gedcom, criteria, offset) = _get_data(args) target = "/fingerprint?first={}&middle={}&last={}&state={}&gedFile={}".format(cgi.escape(args['first'], True), cgi.escape(args['middle'], True), cgi.escape(args['last'], True), args.get('state', False), args['gedFile']) address = [] event = [] date = [] try: for element in gedcom.element_list(): # Do they match? if element.criteria_match(criteria): # A match, fingerprint them data = fingerprint_data(gedcom, element, offset) locations = data.get('locations') for location in locations: where = location[2] if where: what = location[0] when = location[1] address.append(where) event.append(what) date.append(when) except Exception as e: print e pass address_field = 'addresses = ["' + string.join(address, '","') + '"]\n' event_field = 'events = ["' + string.join(event, '","') + '"]\n' date_field = 'dates = ["' + string.join(date, '","') + '"]\n' html = ''' <!DOCTYPE html> <meta charset="utf-8"> <html> <head> <title>Fingerprint</title> <link rel="stylesheet" href="/static/fingerprint.css"> </head> <body> <h1>GEDcom Fingerprint : <a href="/">Home</a>, <a href="{}">Fingerprint</a></h1> <div id="map"></div> <script type="text/javascript"> var geocoder; var map; var bounds; var coords; var path; '''.format(target) + address_field + event_field + date_field + ''' function initMap() { geocoder = new google.maps.Geocoder(); bounds = new google.maps.LatLngBounds() map = new google.maps.Map(document.getElementById('map'), { center: {lat: -34.397, lng: 150.644}, zoom: 10 }); coords = []; for (var i=0; i<addresses.length; ++i) { coords.push(new google.maps.LatLng(0,0)); } path = new google.maps.Polyline({ path: coords, geodesic: false, strokeColor: '#FF0000', strokeOpacity: 1.0, strokeWeight: 2 }); path.setMap(map); for (var i=0; i<addresses.length; ++i) { setTimeout(codeAddress, 100*i, i, addresses[i], events[i], dates[i]); } } function codeAddress(idx, address, event, date) { geocoder.geocode( { 'address': address}, function(results, status) { if (status == google.maps.GeocoderStatus.OK) { // map.setCenter(results[0].geometry.location); var wobble = 0.001; var angle = (3.1415926 * 2.0 * idx) / 7.0; var pos = results[0].geometry.location; bounds.extend(pos); map.setCenter(bounds.getCenter()); map.fitBounds(bounds); coords[idx] = pos; path.setPath(coords); var newLat = pos.lat() + (Math.cos(angle) * wobble); var newLng = pos.lng() + (Math.sin(angle) * wobble); var marker = new google.maps.Marker({ map: map, position: new google.maps.LatLng(newLat, newLng), label: event, title: event + ', ' + date + ' @ ' + address }); } else { alert("Geocode was not successful for the following reason: " + status); } }); } </script> <script async defer src="https://maps.googleapis.com/maps/api/js?key=AIzaSyD15Bk3bzpSS7VKGH_MyDOCQU-TgsDUQ90&callback=initMap"> </script> </body> </html> ''' return html