def get_daywrffiles(all_files, day): """Return the 25 files of the day from the all_files parameter. Keyword arguments: all_files -- list of strings containing all the files found day -- int representing the day of interest in fomrat YYYYDDD """ day_str = str(day) year = int(day_str[0:4]) day_year = int(day_str[4:]) date = datetime(year, 1, 1) + timedelta(day_year - 1) daym = date.timetuple().tm_mday str_daym = '0' * (2 - len(str(daym))) + str(daym) month = date.timetuple().tm_mon str_month = '0' * (2 - len(str(month))) + str(month) next_date = datetime(year, 1, 1) + timedelta(day_year) next_daym = next_date.timetuple().tm_mday str_next_daym = '0' * (2 - len(str(next_daym))) + str(next_daym) next_month = next_date.timetuple().tm_mon str_next_month = '0' * (2 - len(str(next_month))) + str(next_month) next_year = next_date.timetuple().tm_year str_next_year = str(next_year) date_str = f'{year}-{str_month}-{str_daym}' next_date_str = f'{str_next_year}-{str_next_month}-{str_next_daym}_00' day_files = [file for file in all_files if date_str in file] next_day_00 = [file for file in all_files if next_date_str in file] day_files.extend(next_day_00) return day_files
def create_scene_name(prefix, path, row, date, sufix): '''Return the scene name''' year_day = date.timetuple().tm_yday days = ('%s' % year_day).zfill(3) row = ('%s' % row).zfill(3) path = ('%s' % path).zfill(3) year = date.timetuple().tm_year return '%s%s%s%s%s%s' % (prefix, path, row, year, days, sufix)
def scrape_house_plenary(): # This week url = "http://www.tweedekamer.nl/vergaderingen/plenaire_vergaderingen/deze_week/index.jsp" document = retrieve_if_not_exists(url, bypass_cache=bypass_cache) for element in document.cssselect("#columntwo")[0].xpath(".//h3"): date_string = "".join(element.xpath(".//text()")).strip() date = datetime.strptime(date_string, "%A %d %B").replace(year=datetime.now().year) details_raw = lxml.etree.tostring(list(element.itersiblings())[0]) assembly_detail_url = "http://www.tweedekamer.nl/vergaderingen/plenaire_vergaderingen/{0}".format(date.strftime("%Y%m%d")) update_or_create_assembly({ "url": assembly_detail_url, "date": int(time.mktime(date.timetuple())), "house": "house", "type": "plenary", "details_raw": details_raw, }) assembly_urls.append(assembly_detail_url) # Next week url = "http://www.tweedekamer.nl/vergaderingen/plenaire_vergaderingen/volgende_weken/index.jsp" document = retrieve_if_not_exists(url, bypass_cache=bypass_cache) for element in document.cssselect("#columntwo")[0].xpath(".//h3"): date_string = "".join(element.xpath(".//text()")).strip() details_raw = lxml.etree.tostring(list(element.itersiblings())[0]) try: week = re.findall("week ([0-9]+)", date_string)[0] except IndexError: continue date = datetime.strptime('{0} {1} 1'.format(datetime.now().year, week), '%Y %W %w') assembly_detail_url = "http://www.tweedekamer.nl/vergaderingen/plenaire_vergaderingen/week/{0}{1}".format(datetime.now().year, week) update_or_create_assembly({ "url": assembly_detail_url, "date": int(time.mktime(date.timetuple())), "house": "house", "type": "plenary", "time_period": "week", "details_raw": details_raw, }) assembly_urls.append(assembly_detail_url)
def _getExpenseDay(self, date): if type(date) is str: date = datetime.strptime(date, "%Y-%m-%d") if self.period == 'year': return date.timetuple().tm_yday else: return date.day
def get_item_by_time(self, date): sec = int(timegm(date.timetuple())) # print("sec:",sec) # if sec==1193526000: # print(self._hash) _ilist = self._hash.get(sec) retitem = None if _ilist is not None: if _ilist.size > 0 and isinstance(_ilist, np.ndarray): if _ilist.size > 1: item = self._list[_ilist[0]] else: item = self._list[_ilist] # if _ilist.size>1: # пытаемся пройти if item is not None: if item.get_date() == date: try: retitem = item except: retitem = False # if retitem is None or retitem is False: # print(_ilist) # print(self._list[_ilist]) return retitem
def time_as_int(date, utc=False): """ Converts a date or datetime object to a unixtimestamp. ``utc=True`` interprets the input as a UTC based timestamp. """ t = date.timetuple() return int(timegm(t) if utc else time.mktime(t))
def previously_dumped(db, date): params = (mktime(date.timetuple()),) cursor = db.cursor() cursor.execute('select * from steps_daily where date=?', params) if cursor.fetchone(): return True return False
def get_day_stamp(date): """计算指定年/月/日的时间戳""" date = date.timetuple() year = date.tm_year mon = date.tm_mon day = date.tm_mday return int(time.mktime(datetime(year, mon, day, 0, 0, 0).timetuple()))
def count_pomodoros_date(user_id, date, entry_type=1): end_date = date + timedelta(days=1) return count_pomodoros_ts_range( user_id, range_min=int(time.mktime(date.timetuple())), range_max=int(time.mktime(end_date.timetuple())) )
def check_revisions(db): # get total amount of articles n_articles = db.articles().count() # get cunks of 100 items per query per_page = 100 # total amoint of iterations total = 1 + n_articles/per_page for page in xrange(1, total): # Get last revision of every article. Paginate on chunks of 100 pipe = [ {'$group':{'_id': "$title", 'date': { '$max': "$timestamp" }}}, {'$skip': (page-1)*per_page}, {'$limit': per_page}, ] cursor = db.revisions().aggregate(pipeline=pipe) last_rev= db.find_last_rev() for document in cursor: # Set revision date to YYYY-MM-DD 00:00:00 date = document['date'].replace(hour=0, minute=0, second=0) # Convert to integer date_integer = int(time.mktime(date.timetuple())) # Verify if article should be revisited should_revisit = calculate_v1(date_integer) if should_revisit: # Revisit article. Extract revisions. Add to celery queue extract_article.delay(document['_id'])
def monthly_date(day): """convert date from YYYYDDD to YYYYMMDD. Keyword arguments: day -- int of the day in format YYYYDDD """ day_str = str(day) year = int(day_str[0:4]) day_y = int(day_str[4:]) date = datetime(year, 1, 1) + timedelta(day_y - 1) daym = date.timetuple().tm_mday str_daym = '0' * (2 - len(str(daym))) + str(daym) month = date.timetuple().tm_mon str_month = '0' * (2 - len(str(month))) + str(month) return str(year) + str_month + str_daym
def seconds_to_formatted_time(seconds): date = datetime.fromtimestamp(float(seconds[:-3])) date = date.timetuple() if date.tm_min < 10: output = '%d:0%d' % (date.tm_hour, date.tm_min) else: output = '%d:%d' % (date.tm_hour, date.tm_min) return output
def createURL(ticker): currentDay = date.timetuple(date.today()) currentMonth = currentDay[1] - 1 currentDate = currentDay[2] currentYear = currentDay[0] return "http://ichart.finance.yahoo.com/table.csv?s=" + ticker + "&d=" + str( currentMonth) + "&e=" + str(currentDate) + "&f=" + str( currentYear) + "&g=d&a=7&b=19&c=2004&ignore=.csv"
def tokenize_todays_date(self): tup_date = date.timetuple(date.today()) mon = tup_date.tm_mon if tup_date.tm_mon >= 10 else '0' + str( tup_date.tm_mon) day = tup_date.tm_mday if tup_date.tm_mday >= 10 else '0' + str( tup_date.tm_mday) return f'{tup_date.tm_year}{mon}{day}'
def get_nhits(self): "Return number of hits per day client made" tsec = time.mktime(date.timetuple(date.today())) spec = {'ip': cherrypy.request.remote.ip, 'ts': {'$gte': tsec}, 'args.pid': {'$exists': False}, # do not count pid requests 'path': '/cache'} # requests from das_client calls nhits = self.logcol.find(spec, count=True) return nhits
def handle_starttag(self, tag, attrs): if tag == "rect": data = {key: value for (key, value) in attrs} date = dateutil.parser.parse(data["data-date"]) count = int(data["data-count"]) day = date.timetuple().tm_yday - 1 if count > 0: C[day] = count
def dump_to_db(db, data_type, date, data): insertString = "insert into %s values (?, ?)" % data_type sum = 0 for row in data: db.execute(insertString, (mktime(row[0].timetuple()), row[1])) sum += row[1] db.execute("insert into %s_daily values (?, ?)" % data_type, (mktime(date.timetuple()), sum)) db.commit()
def project_hours_date(user_id, date): count_seconds = 0 entries = day_entries(user_id, date, date + timedelta(1)) day = entries[int(time.mktime(date.timetuple()))] for entry in day: if entry['type_id'] == 2: count_seconds += (entry['end'] - entry['start']) return count_seconds / (60.0*60.0)
def quote_per_day( session: Session, date: date = date.today()) -> Optional[Quote]: """This function provides a daily quote, relevant to the current day of the year. The quote is randomally selected from a set of quotes matching to the given day""" day_num = date.timetuple().tm_yday quote = session.query(Quote).filter( Quote.id % TOTAL_DAYS == day_num).order_by(func.random()).first() return quote
def floored_twelfth_of_a_year(date): """This function converts a date to a month number by flooring to the nearest 12th of a year. """ timetuple = date.timetuple() year = timetuple.tm_year day_of_year = timetuple.tm_yday month0 = floor((day_of_year / (isleap(year) and 366.0 or 365.0)) * 12) return ((year-start_year) * 12) + (month0) - start_month_0_indexed
def floored_twelfth_of_a_360_day_year(date): """This function converts a date to a month number by flooring to the nearest 12th of a 360 day year. Used by PRECIS projection. """ timetuple = date.timetuple() year = timetuple.tm_year day_of_year = timetuple.tm_yday month0 = floor((day_of_year / 360) * 12) return ((year-start_year) * 12) + (month0) - start_month_0_indexed
def formatDateForRequest(date): """Return a string with the date formatted as the ORNL server expects it. date -- Python datetime.dat object """ y=date.year d=date.timetuple().tm_yday return "A%d%03d"%(y,d)
def save_daily_count(wordid,date,perday_blog_count):#话题id、日期、微博数 date = ts2datetime(time.mktime(date.timetuple())) perday_blog_count = int(perday_blog_count) #print date,perday_blog_count new_item = PropagateTrend(wordid,date,perday_blog_count) db.session.add(new_item) db.session.commit()
def default(self, obj): try: if isinstance(obj, datetime): return calendar.timegm(date.timetuple()) iterable = iter(obj) except TypeError: pass else: return list(iterable) return JSONEncoder.default(self, obj)
def write_post_data(data,timestamp,monitor): try: post_id, title, views, favorite, twitter_data, facebook_data, vkontakte_data = data year, month, day, hour, minute = timestamp except: #habr_analytics_log(str(post_id)) return None date = datetime(year, month, day, hour, minute) overall_seconds = calendar.timegm(date.timetuple()) monitor.insert({"post_id":post_id, "views":views, "favorite":favorite, "twitter_data":twitter_data, "facebook_data":facebook_data, "vkontakte_data":vkontakte_data, "year":year, "month":month, "day":day, "hour":hour, "minute":minute, "overall_seconds":overall_seconds})
def get_current_price(session, p_no, date=datetime.date.today()): current_price = session.scalar( "select price from wx_order_deal where p_no=:p_no and deal_time >:deal_time order by id desc limit 1", { 'p_no': p_no, 'deal_time': time.mktime(date.timetuple()) }) if not current_price: current_price = get_last_price(session, p_no) return current_price
def get_open_price(session, p_no, date=datetime.date.today()): open_price = session.scalar( "select price from wx_order_deal where p_no=:p_no and deal_time >:deal_time order by id asc limit 1", { 'p_no': p_no, 'deal_time': time.mktime(date.timetuple()) }) if not open_price: open_price = 0.00 return open_price
def create_raster(plant, phenophase, climate_source, region, date, time_rez): if time_rez == 'year': date_string = date.strftime("%Y") else: date_string = date.strftime("%Y%m%d") year_string = date.strftime("%Y") # set up path if climate_source == 'prism' and time_rez == 'day': folder_name = "six_" + plant + "_" + phenophase + "_" + climate_source + "_" + year_string + os.sep elif climate_source == 'ncep' and time_rez == 'year' and region == 'conus': folder_name = "six_" + plant + "_" + phenophase + "_" + climate_source + "_historic" + os.sep elif climate_source == 'ncep' and time_rez == 'year' and region == 'alaska': folder_name = "six_" + plant + "_" + phenophase + "_" + climate_source + "_alaska_historic" + os.sep elif climate_source == 'ncep' and time_rez == 'day' and region == 'alaska': folder_name = "six_" + plant + "_" + phenophase + "_" + climate_source + "_alaska" + os.sep else: # prism yearly goes here folder_name = "six_" + plant + "_" + phenophase + "_" + climate_source + os.sep file_name = plant + '_' + phenophase + '_' + climate_source + '_' + date_string + '.tif' os.makedirs(os.path.dirname(Six.save_path + folder_name), exist_ok=True) file_path = Six.save_path + folder_name + file_name # set out_array to plant/phenophase of interest if phenophase == 'leaf': if plant == 'average': out_array = np.copy(Six.leaf_average_array) else: out_array = np.copy(Six.leaf_array) elif phenophase == 'bloom': if plant == 'average': out_array = np.copy(Six.bloom_average_array) else: out_array = np.copy(Six.bloom_array) else: print('Invalid phenophase: ' + phenophase) return # remove days beyond day's doy from outarray if climate_source == 'prism' and time_rez == 'year': day_of_year = 240 else: day_of_year = date.timetuple().tm_yday out_array[out_array > day_of_year] = -9999 out_array[np.isnan(out_array)] = -9999 # out_array[out_array == 0] = -9999 # convert to 16 bit unsigned integers out_array = out_array.astype(np.int16, copy=False) Six.write_int16_raster(file_path, out_array, Six.no_data_value, out_array.shape[1], out_array.shape[0], Six.projection, Six.geo_transform) out_array = None
def rounded_date_to_month_number(date): """This function converts a date to a month number by rounding to the nearest 12th of a year. See also date_to_month_number(year, month) """ timetuple = date.timetuple() year = timetuple.tm_year day_of_year = timetuple.tm_yday month0 = floor(((day_of_year / (isleap(year) and 366.0 or 365.0)) * 12) + 0.5) return ((year-start_year) * 12) + (month0) - start_month_0_indexed
def commitsByDate(self): seriesArr = [] byDateFunc = lambda entry: entry['date'].date() for user in self.log.getUserList(): byDate = countLogEntriesByFunc(self.log, byDateFunc, user) data = [(int(mktime(date.timetuple())) * 1000, count) for date, count in byDate.items()] series = self.getSeries(user, data) seriesArr.append(series) return seriesArr
def seconds_to_formatted_date(seconds): date = datetime.fromtimestamp(float(seconds[:-3])) date = date.timetuple() if date.tm_mday < 10 and date.tm_mon < 10: output = '%d-0%d-0%d' % (date.tm_year, date.tm_mon, date.tm_mday) elif date.tm_mday < 10: output = '%d-%d-0%d' % (date.tm_year, date.tm_mon, date.tm_mday) elif date.tm_mon < 10: output = '%d-0%d-%d' % (date.tm_year, date.tm_mon, date.tm_mday) else: output = '%d-%d-%d' % (date.tm_year, date.tm_mon, date.tm_mday) return output
def get_current_list(): currentyear = str(date.timetuple(date.today())[0]) cursor.execute('SELECT * from myrtle WHERE year=%s', (currentyear,)) results = cursor.fetchall() ret_json = [] for row in results: item = {} item['hooker'] = row[1] item['hookee'] = row[2] item['why'] = row[3] ret_json.append(item) return ret_json
def get_time_limits(date): log(LOG_FILE, 'Getting time limits...') #yesterday = datetime.now(timezone.utc).date() - timedelta(days=1) #today = datetime.now(timezone.utc).date() yesterday = date - timedelta(days=1) yesterday_linux = time.mktime(yesterday.timetuple()) today_linux = time.mktime(date.timetuple()) log(LOG_FILE, 'Start: {}\nEnd: {}'.format(yesterday_linux, today_linux)) return yesterday_linux, today_linux
def graph(request, hash): filePath = '/mnt/vol/csvs/' yValues = [] xValues = [] xAxis = '' yAxis = '' thing = [] valuesList = [] keyWords = [] #Read in csv file and gather information for passing to graph #with open('test.csv', 'r') as csvfile: #For testing with open(filePath + hash + '.csv', 'r') as csvfile: reader = csv.DictReader(csvfile) xAxis = reader.fieldnames[0] yAxis = reader.fieldnames[1] keyword = reader.fieldnames[2] for row in reader: valuesList.append((row[xAxis], row[yAxis], row[keyword])) for item in valuesList: if (item[2] not in keyWords): keyWords.append(item[2]) yValuesList = [] for word in keyWords: yTempValues = [] for thing in valuesList: if (thing[2] == word): try: yTempValues.append(float(thing[1])) except ValueError: yTempValues.append(-200) pass #Convert date to a format that can be sent to javascript date = datetime.strptime(thing[0], '%Y-%m-%d').date() timestamp = int(time.mktime(date.timetuple())) * 1000 if (timestamp not in xValues): xValues.append(timestamp) yValuesList.append(yTempValues) context = { 'xAxis': xAxis, 'yAxis': yAxis, 'xValues': xValues, 'keywords': keyWords, 'yValues': yValuesList, } return render(request, 'words/graph2.html', context)
def get_trapeze_link(self, date): if self.source.name == 'Y': domain = 'yorkshiretravel.net' name = 'Yorkshire Travel' else: domain = 'travelinescotland.com' name = 'Traveline Scotland' if date: date = int(time.mktime(date.timetuple()) * 1000) else: date = '' query = (('timetableId', self.service_code), ('direction', 'OUTBOUND'), ('queryDate', date), ('queryTime', date)) return 'http://www.{}/lts/#/timetables?{}'.format( domain, urlencode(query)), name
def save_daily_count(mid,date,perday_blog_count):#mid、日期、微博数 date = ts2datetime(time.mktime(date.timetuple())) perday_blog_count = int(perday_blog_count) exist_items = db.session.query(PropagateTrendSingle).\ filter(PropagateTrendSingle.id==mid, \ PropagateTrendSingle.date==date).all() for exist_item in exist_items: db.session.delete(exist_item) db.session.commit() # print mid,date,perday_blog_count new_item = PropagateTrendSingle(mid,date,perday_blog_count) db.session.add(new_item) db.session.commit()
def get_trapeze_link(self, date): if self.region_id == 'Y': domain = 'yorkshiretravel.net' name = 'Yorkshire Travel' else: domain = 'travelinescotland.com' name = 'Traveline Scotland' if date: date = int(time.mktime(date.timetuple()) * 1000) else: date = '' query = ( ('timetableId', self.service_code), ('direction', 'OUTBOUND'), ('queryDate', date), ('queryTime', date) ) return 'http://www.{}/lts/#/timetables?{}'.format(domain, urlencode(query)), name
def select_date(self, button): d = WindowModalDialog(self, _("Select date")) d.setMinimumSize(600, 150) d.date = None vbox = QVBoxLayout() def on_date(date): d.date = date cal = QCalendarWidget() cal.setGridVisible(True) cal.clicked[QDate].connect(on_date) vbox.addWidget(cal) vbox.addLayout(Buttons(OkButton(d), CancelButton(d))) d.setLayout(vbox) if d.exec_(): if d.date is None: return None date = d.date.toPyDate() button.setText(self.format_date(date)) return time.mktime(date.timetuple())
def scrape_senate_plenary(): index_url = "http://www.eerstekamer.nl/planning_plenaire_vergaderingen" try: document = retrieve_if_not_exists(index_url, bypass_cache=bypass_cache) except urllib2.HTTPError: error_on_retrieval.append(index_url) return for element in document.xpath("//a[contains(@href, '/plenaire_vergadering/')]"): date_string = element.text.strip() date, start_date, end_date = date_string_to_datetime(date_string) assembly_detail_url = element.get("href") try: document = retrieve_if_not_exists(assembly_detail_url, bypass_cache=bypass_cache) except urllib2.HTTPError: error_on_retrieval.append(assembly_detail_url) continue # Remove the footer and various other irrelevant elements map(lambda element: element.getparent().remove(element), document.cssselect("#footer_menu")[0].getprevious().itersiblings()) details_raw = "".join([lxml.etree.tostring(element) for element in document.cssselect("h1")[0].itersiblings()]) # Add to database update_or_create_assembly({ "type": "plenary", "url": assembly_detail_url, "date": int(time.mktime(date.timetuple())), "start_time": int(time.mktime(start_date.timetuple())) if start_date else None, "end_time": int(time.mktime(end_date.timetuple())) if end_date else None, "parlisnumber": None, "house": "senate", "status": None, "is_public": None, "location": None, "variety": None, "committee": None, "summary": None, "details_raw": details_raw, }) assembly_urls.append(assembly_detail_url)
def activateuser(request): if request.is_ajax(): if request.method == 'POST': try: json_data = json.loads(request.body) user_id = json_data['user_id'] days = json_data['days'] user = User.objects.get(user_id=user_id) # Sumar la cantidad de dias a hoy date = user.enable_for(days) # Envio evento a intercom ep = Setting.get_var('intercom_endpoint') token = Setting.get_var('intercom_token') try: intercom = Intercom(ep, token) metadata = { 'event_description': 'usuario activado por el administrador', 'expire_at': str(int(mktime(date.timetuple()))) } reply = intercom.submitEvent(user.user_id, user.email, 'user_activated', metadata) except Exception as e: pass return JsonResponse({'message': 'activado correctamente'}, status=200) except Exception as e: return JsonResponse( { 'message': 'Hubo un error', 'data': e.message }, status=500) return JsonResponse({ 'message': 'Metodo no permitido', 'data': '' }, status=500)
def move_file(self, filename, destination): creationtime = date.timetuple( datetime.datetime.fromtimestamp((os.path.getctime(filename)))) fname = self.create_folder(creationtime, destination) if self.safemode: shutil.copy(filename, fname) self.log(filename + " kopiert nach " + fname) else: try: os.chmod(filename, 0777) shutil.move(filename, fname) self.log(filename + " verschoben nach " + fname) except (OSError, shutil.Error) as e: shutil.copy(filename, fname) self.log(filename + " kopiert nach " + fname) for root, dirs, files in os.walk(fname): for momo in files: try: os.chmod(os.path.join(root, momo), 0777) except: pass
def sync(self, source, destination): for counter in range(0, 2): for dirname, dirnames, filenames in os.walk(source): for subdirname in dirnames: folder = os.path.join(dirname, subdirname) for filename in glob.glob(os.path.join(folder, '*.JPG')): self.move_file(filename, destination) for filename in glob.glob(os.path.join(folder, '*.jpg')): self.move_file(filename, destination) for filename in glob.glob(os.path.join(folder, '*.MP4')): self.move_file(filename, destination) for filename in glob.glob(os.path.join(folder, '*.AVI')): self.move_file(filename, destination) for filename in glob.glob(os.path.join(folder, '*.WAV')): self.move_file(filename, destination) for filename in glob.glob(os.path.join(folder, '*.MTS')): self.move_file(filename, destination) for filename in glob.glob(os.path.join(folder, '*.MP3')): self.move_file(filename, destination) for filename in glob.glob(os.path.join(folder, '*.ARW')): if self.RAW_subdir: creationtime = date.timetuple( datetime.datetime.fromtimestamp( (os.path.getctime(filename)))) fname = self.create_folder(creationtime) rawfolder = fname + "/RAW" if not os.path.exists(rawfolder): os.makedirs(rawfolder) self.log(rawfolder + " erstellt") os.chmod(rawfolder, 0777) if self.safemode: shutil.copy(filename, rawfolder) self.log(filename + " kopiert nach " + rawfolder) else: shutil.move(filename, rawfolder) self.log(filename + " verschoben nach " + rawfolder) else: self.move_file(filename, destination)
def save_profit(c): f_n = './data/trend_%s.csv' % c if os.path.exists(f_n) and time.localtime(os.path.getmtime(f_n)).tm_mday == date.timetuple(datetime.today()).tm_mday: print 'File exists' # return html = urlopen(BASE_URL+'%s.html' % c).read() page = etree.HTML(html) data = page.xpath(u'//div[@id="content_zjlxtable"]/table/tbody/tr/td/text()|//div[@id="content_zjlxtable"]/table/tbody/tr/td/span/text()') data = filter(lambda x: x.strip(),data) data = map(lambda x: x.strip().strip('%'),data) data = map(lambda x: x!='-' and x or '0',data) def try_normalize(x): try: return unicode('%.2f' % (float(x)/10000)) if abs(float(x)) > 100 else x except: return x data = map(lambda x: try_normalize(x), data) data = map(lambda x: x.endswith(u'\u4ebf') and unicode(float(x.strip(u'\u4ebf'))*10000) or x,data) data = map(lambda x: x.endswith(u'\u4e07') and x.strip(u'\u4e07') or x, data) data = list_to_tuple_list(data,len(data)/COL_NUM) df = pd.DataFrame(data, columns = ['date','close_price','raise_rate','ma_v','ma_r','ex_v','ex_r','big_v','big_r','mid_v','mid_r', 'sm_v','sm_r']) df.to_csv(f_n, encoding='utf-8')
def _generate_events_for_day(date): """Generates events for a given day.""" # Use date as seed. seed = int(time.mktime(date.timetuple())) Faker.seed(seed) random_state = random.RandomState(seed) # Determine how many users and how many events we will have. n_users = random_state.randint(low=50, high=100) n_events = random_state.randint(low=200, high=2000) # Generate a bunch of users. fake = Faker() users = [fake.ipv4() for _ in range(n_users)] return pd.DataFrame({ "user": random_state.choice(users, size=n_events, replace=True), "date": pd.to_datetime(date), })
def createURL(ticker): currentDay = date.timetuple(date.today()) currentMonth = currentDay[1] - 1 currentDate = currentDay[2] currentYear = currentDay[0] return "http://ichart.finance.yahoo.com/table.csv?s=" + ticker + "&d=" + str(currentMonth) + "&e=" +str(currentDate) + "&f=" + str(currentYear)+"&g=d&a=7&b=19&c=2004&ignore=.csv"
def _end_stamp(d): date = datetime(int(d.year),int(d.month),int(d.day),23,59,59,999) return time.mktime(date.timetuple())*1000
def timestamp_from_date(date): return mktime(date.timetuple())
def date_to_timestamp(self, date): ''' takes datetime object and returns unix timestamp ''' return int(time.mktime(date.timetuple()) * 1000)
def date_to_seconds(date): return calendar.timegm(date.timetuple())
def timestamp(date): return time.mktime(date.timetuple())