def strftime(datetime, code, format): ''' Convert datetime to a string as specified by the format argument. ''' if code in TIME_LOCALE: for f, i in (('%a', 6), ('%A', 6), ('%b', 1), ('%B', 1)): format = format.replace(f, TIME_LOCALE[code][f][datetime.timetuple()[i]]) format = format.replace('%p', TIME_LOCALE[code]['%p'][datetime.timetuple()[3] < 12 and 0 or 1]).encode('utf-8') else: format = format.encode('utf-8') return datetime_strftime(datetime, format).decode('utf-8')
def datetime_to_minutes(datetime): y, m, d, h, M = datetime.timetuple()[:5] # y,m,d =str(y), str(m),str(d) # m=m.zfill(2) # d=d.zfill(2) # time = int(y+m+d) time = y*10000+m*100+d return ymd2minute(time)+h*60+M
def convert_datetime2timestamp(self, datetime): """ (datetime 类型 )把日期 转换成 时间戳 单位s :param date: (2016-05-05 20:28:54) :return: 时间戳 s """ # 转为时间戳 timeStamp = int(time.mktime(datetime.timetuple())) return timeStamp
def datetime_to_timestamp(datetime): """ Takes a datetime and returns a unix epoch ms. """ time_without_ms = time.mktime(datetime.timetuple()) * 1000 ms = int(datetime.microsecond / 1000) return time_without_ms + ms
def strftime(datetime, code, format): ''' Convert datetime to a string as specified by the format argument. ''' if code in TIME_LOCALE: for f, i in (('%a', 6), ('%A', 6), ('%b', 1), ('%B', 1)): format = format.replace(f, TIME_LOCALE[code][f][datetime.timetuple()[i]]) format = format.replace('%p', TIME_LOCALE[code]['%p'][datetime.timetuple()[3] < 12 and 0 or 1]) # Encode and decode under Python2 because strftime use bytes/str. if sys.version_info < (3,): format = format.encode('utf-8') result = datetime_strftime(datetime, format) if sys.version_info < (3,): result = result.decode('utf-8') return result
def show_production_for_datapoint(self, pvstyle_date, pvstyle_time): datetime = self.pvoutput.parse_date_and_time(pvstyle_date, pvstyle_time) timestamp = time.mktime(datetime.timetuple()) datapoints = self.db.get_entries(self.system.inverters(), timestamp) sum = 0 for datapoint in datapoints: print(("Inverter (%s): %d" % (datapoint[2], datapoint[1]))) sum += datapoint[1] print(("System (%s): %d" % (self.system.pvoutput_sid, sum)))
def start(self): url = "https://github.com/ruanfei/ShadowsocksRRShare/tree/master/ss" try: r = requests.get(url) except requests.exceptions.RequestException as e: print(e) return # print(r.text) sel = Selector(r.text) xList = sel.xpath("//tr[@class='js-navigation-item']") m = 0 i = 0 for index in range(len(xList)): c = xList[index] datetimeStr = c.xpath(".//@datetime").get() if datetimeStr == None: continue # print(datetimeStr) datetime = dateutil.parser.parse(datetimeStr) un_time = time.mktime(datetime.timetuple()) # print(un_time) if un_time > m: m = un_time i = index href = xList[i].xpath( ".//a[@class='js-navigation-open ']//@href").get() url2 = "https://www.github.com" + href print(url2) try: r2 = requests.get(url2) except requests.exceptions.RequestException as e: print(e) return sel2 = Selector(r2.text) href2 = sel2.xpath("//a[@id='raw-url']//@href").get() url3 = "https://www.github.com" + href2 print(url3) try: r3 = requests.get(url3) except requests.exceptions.RequestException as e: print(e) return # print(r3.text) lineList = r3.text.split("\n") ret = [] for line in lineList: data = fromURL(line) if data != None: ret.append(data) return ret
def cambiar_fecha(self, archivo, datetime): datetime = self.convertir_a_datetime(datetime) datetime_ready = time.mktime(datetime.timetuple()) wintime = pywintypes.Time(datetime_ready) winfile = win32file.CreateFile( archivo, win32con.GENERIC_WRITE, win32con.FILE_SHARE_READ | win32con.FILE_SHARE_WRITE | win32con.FILE_SHARE_DELETE, None, win32con.OPEN_EXISTING, win32con.FILE_ATTRIBUTE_NORMAL, None) win32file.SetFileTime(winfile, wintime, None, None) winfile.close()
def _has_been_modified_since(self, datetime): if datetime is None: return True last_modified = format_date_time(mktime(datetime.timetuple())) if_modified_since = self.request.headers.get('If-Modified-Since') self.response.headers['Last-Modified'] = last_modified if if_modified_since and if_modified_since == last_modified: self.response.set_status(304) return False else: return True
def date2timestamp(date_string, date_format='%Y-%m-%d'): """ 转换 string 格式为 timestamp :param date_string: 要转换的日期字符串 :param date_format: 日期格式 :return: timestamp """ # C-level APIs not support key-arguments datetime = datetime.datetime.strptime(date_string, date_format) if not datetime: raise ValueError(u'{} for format "{}" not valid'.format( date_string, date_format)) return int(time.mktime(datetime.timetuple()))
def delayed_push(self, datetime, item): """Schedule a work `item` to be run on a specified queue at or after the specified `datetime`. Similar to :func:`enqueue`. See the Redis Docs: `RPUSH <http://redis.io/commands/rpush>`_, \ `ZADD <http://redis.io/commands/zadd>`_ :param datetime: When to run the job :type datetime: :py:mod:`datetime.datetime` :param item: the work item to put on the queue :type item: dict """ key = int(time.mktime(datetime.timetuple())) self.redis.rpush('resque:delayed:%s' % key, ResQ.encode(item)) self.redis.zadd('resque:delayed_queue_schedule', key, key)
def wind(self, datetime): """Returns wind speed on given day in m/s""" #80 Winter Days from Jan 1 to March 20 #91 Spring Days from March 21 to June 20 #92 Summer Days from June 21 to Sept 20 #91 Fall Days from Sept 21 to Dec 2 #11 Winter Days from Dec 21 to Dec 31 trueDay = datetime.timetuple().tm_yday if (trueDay<80 or trueDay>=354): params = np.array([6.3,5.,12.5,30.,datetime.hour]) elif (trueDay<171): params = np.array([7.2, 6., 12., 45., datetime.hour]) elif (trueDay<263): params = np.array([6.1,4.9,13.,70., datetime.hour]) elif (trueDay<354): params = np.array([6.4,5.1,13.5,50., datetime.hour]) return self.windFunction(params)
def wind(self, datetime): """Returns wind speed on given day in m/s""" #80 Winter Days from Jan 1 to March 20 #91 Spring Days from March 21 to June 20 #92 Summer Days from June 21 to Sept 20 #91 Fall Days from Sept 21 to Dec 2 #11 Winter Days from Dec 21 to Dec 31 trueDay = datetime.timetuple().tm_yday if (trueDay < 80 or trueDay >= 354): params = np.array([6.3, 5., 12.5, 30., datetime.hour]) elif (trueDay < 171): params = np.array([7.2, 6., 12., 45., datetime.hour]) elif (trueDay < 263): params = np.array([6.1, 4.9, 13., 70., datetime.hour]) elif (trueDay < 354): params = np.array([6.4, 5.1, 13.5, 50., datetime.hour]) return self.windFunction(params)
def process_alarm_data(self, alarm_data): """ parse alarm data to right format which influxdb fields need """ alarm_id, alarm_name, datetime, state, remark = alarm_data # datetime to timestamp date_timestamp = time.mktime(datetime.timetuple()) warning, warning_string = self.handle_warning_state(alarm_name, state) data = { "warning": warning, "warning_string": warning_string, "alarm_id": alarm_id, "alarm_name": alarm_name, "date_timestamp": date_timestamp, "state": state, "remark": remark, } return data
election_urls = [election['url'] + '.csv' for election in requests.get(url_str).json()] pp.pprint(election_urls) # Makes a dictionary of pandas DataFrames keyed on election string. dfs = dict((election.split("/")[-1][:-4], build_frame(election)) for election in election_urls) pp.pprint(dfs["2014-kentucky-senate-mcconnell-vs-grimes"].head()) first_name = re.compile("senate-(.*?)-vs") second_name = re.compile("vs-(.*)") for key in dfs: print key first_candidate = first_name.search(key) second_candidate = second_name.search(key) if not first_candidate or not second_candidate: continue name1 = first_candidate.group(1).title() name2 = second_candidate.group(1).title() dfs[key]["Diff"] = dfs[key][name1] - dfs[key][name2] convert = lambda x: dt.datetime.utcfromtimestamp(cl.timegm(dt.timetuple())) date = dfs[key]["Start Date"].apply(convert) dfs[key]["Start Date"] = date within_twoweeks = dfs[key][dt.datetime.now() - dfs[key]["Start Date"] <= dt.timedelta(days=14)] print within_twoweeks.head() break
def datetime_to_timestamp(datetime): return int(time.mktime(datetime.timetuple()))
def _timestamp_from_datetime(self,datetime): # TODO: Adjust for server timezone? return int(time.mktime(datetime.timetuple()))
def datetime2timestampms(datetime): return int(time.mktime(datetime.timetuple())*1000);
def set_modified(path, datetime): ''' Sets the atime and mtime for the particular path ''' atime = mtime = time.mktime(datetime.timetuple()) os.utime(path, (atime, mtime))
def datetime2timestampms(datetime): return int(time.mktime(datetime.timetuple())*1000)
def datetime_to_int(self, datetime = None): return int(time.mktime(datetime.timetuple()))
def datetime_to_faketime(datetime): return FakeTime(time.mktime(datetime.timetuple()))
def datetimeToEpoch(datetime): return str(time.mktime(datetime.timetuple()) + float("0.%s" % datetime.microsecond))
def add_votes(self): """ add selected legilsator's votes cast""" print "adding vote data from Sunlight Congress api" votes = [] #make initial request to get number of cosponsored bills votes_url = ('http://congress.api.sunlightfoundation' '.com/votes?&fields=bill,voters.%s,voted_at' '&per_page=50&apikey=7ed8089422bd4022bb9c236062' '377c5b') % self.legislator['id']['bioguide'] res = requests.get(votes_url) total_pages = (res.json()["count"]/50) + 1 page = 2 # votes = [ vote for vote in res.json()['results'] ] for result in res.json()["results"]: votes.append(result) # this should probably use generators while page <= total_pages: # print "adding votes" votes_url = ('http://congress.api.sunlightfoundation.com' '/votes?&fields=bill,voters.%s,voted_at&per_page=50' '&page=%s&apikey=%s') % (self.legislator['id']['bioguide'], page, self.apikey) res = requests.get(votes_url) for result in res.json()["results"]: votes.append(result) page += 1 for v in votes: bioguide = self.legislator['id']['bioguide'] if (bioguide in v['voters'].keys() and 'bill' in v.keys()): v['bill']['vote'] = '' v['bill']['vote'] = v['voters'][bioguide]['vote'] del v['voters'] v = dict(v['bill'].items() + v.items()) del v['bill'] v = dict(crp_dict.items() + v.items()) # if the bill comes from the house if v["bill_type"] == "hr": #set the pap key pap_key = "%s-HR-%s" % (str(v["congress"]), str(v["number"])) elif v["bill_type"] == "s": pap_key = "%s-S-%s" % (str(v["congress"]), str(v["number"])) # else set the pap key to None else: pap_key = None # if the pap key is not none if pap_key != None: # try to get a major topic try: v["major_topic"] = self.bill_topic_dict[pap_key][0] v["minor_topic"] = self.bill_topic_dict[pap_key][1] except: pass # else set the major topic to nothing else: v["major_topic"] = "" v["minor_topic"] = "" # iterate through the crosswalk and look for rows that match # bill's major and minor topic maj = v['major_topic'] min = v['minor_topic'] for row in self.crp_pap_crosswalk: if pap_topic_subtopic([maj, min], [row[3], row[4], row[6], row[7], row[8]]): # print "gots a vote topic with pap code %r %r %r" % ( pap_key, row[3], v['major_topic']) v['crp_catcode'] = row[0] v['crp_catname'] = row[1] v['crp_description'] = row[2] v['pap_major_topic'] = row[3] v['pap_subtopic_code'] = row[4] v['fit'] = row[5] v['pap_subtopic_2'] = row[6] v['pap_subtopic_3'] = row[7] v['pap_subtopic_4'] = row[8] v['notes_chad'] = row[9] v['pa_subtopic_code'] = row[10] v['note'] = row[11] dt = dup.parse(v["voted_at"]) timestamp = str(int(time.mktime(dt.timetuple())) - 14400) # remove 4 hours to convert from utc to est vote = { "time" : timestamp, "event" : "vote", "event_type" : "vote", "info" : v, "event_id" : str(uuid.uuid4()) } self.legis_list.append(vote) print "adding CRP/PAP metadata to votes"
def timestamp(self, datetime=datetime.datetime.utcnow()): ''' return unix timestamp from datetime ''' return int(time.mktime(datetime.timetuple()) * 1000)
def timestamp(datetime): "Convert a datetime object into epoch time" return time.mktime( datetime.timetuple() )
def add_votes(self): """ add selected legilsator's votes cast""" print "adding vote data from Sunlight Congress api" votes = [] #make initial request to get number of cosponsored bills votes_url = ('http://congress.api.sunlightfoundation' '.com/votes?&fields=bill,voters.%s,voted_at' '&per_page=50&apikey=7ed8089422bd4022bb9c236062' '377c5b') % self.legislator['id']['bioguide'] res = requests.get(votes_url) total_pages = (res.json()["count"] / 50) + 1 page = 2 # votes = [ vote for vote in res.json()['results'] ] for result in res.json()["results"]: votes.append(result) # this should probably use generators while page <= total_pages: # print "adding votes" votes_url = ( 'http://congress.api.sunlightfoundation.com' '/votes?&fields=bill,voters.%s,voted_at&per_page=50' '&page=%s&apikey=%s') % (self.legislator['id']['bioguide'], page, self.apikey) res = requests.get(votes_url) for result in res.json()["results"]: votes.append(result) page += 1 for v in votes: bioguide = self.legislator['id']['bioguide'] if (bioguide in v['voters'].keys() and 'bill' in v.keys()): v['bill']['vote'] = '' v['bill']['vote'] = v['voters'][bioguide]['vote'] del v['voters'] v = dict(v['bill'].items() + v.items()) del v['bill'] v = dict(crp_dict.items() + v.items()) # if the bill comes from the house if v["bill_type"] == "hr": #set the pap key pap_key = "%s-HR-%s" % (str(v["congress"]), str( v["number"])) elif v["bill_type"] == "s": pap_key = "%s-S-%s" % (str(v["congress"]), str( v["number"])) # else set the pap key to None else: pap_key = None # if the pap key is not none if pap_key != None: # try to get a major topic try: v["major_topic"] = self.bill_topic_dict[pap_key][0] v["minor_topic"] = self.bill_topic_dict[pap_key][1] except: pass # else set the major topic to nothing else: v["major_topic"] = "" v["minor_topic"] = "" # iterate through the crosswalk and look for rows that match # bill's major and minor topic maj = v['major_topic'] min = v['minor_topic'] for row in self.crp_pap_crosswalk: if pap_topic_subtopic( [maj, min], [row[3], row[4], row[6], row[7], row[8]]): # print "gots a vote topic with pap code %r %r %r" % ( pap_key, row[3], v['major_topic']) v['crp_catcode'] = row[0] v['crp_catname'] = row[1] v['crp_description'] = row[2] v['pap_major_topic'] = row[3] v['pap_subtopic_code'] = row[4] v['fit'] = row[5] v['pap_subtopic_2'] = row[6] v['pap_subtopic_3'] = row[7] v['pap_subtopic_4'] = row[8] v['notes_chad'] = row[9] v['pa_subtopic_code'] = row[10] v['note'] = row[11] dt = dup.parse(v["voted_at"]) timestamp = str( int(time.mktime(dt.timetuple())) - 14400) # remove 4 hours to convert from utc to est vote = { "time": timestamp, "event": "vote", "event_type": "vote", "info": v, "event_id": str(uuid.uuid4()) } self.legis_list.append(vote) print "adding CRP/PAP metadata to votes"
def parseToTimestamp(datetime): return int(time.mktime(datetime.timetuple()))
def datetime2struct_time(self, datetime: datetime.datetime) -> time.struct_time: return datetime.timetuple()
def _get_average_ping_from_time(datetime): # Get all ping objects from the database between the given datetime and today cursor.execute("SELECT * FROM pings WHERE datetime BETWEEN ? AND ?", [time.mktime(datetime.timetuple()), time.mktime(datetime.today().timetuple())]) # Instantiate the two lists used hour_summ = [0 for x in range(0, 24)] hour_count = [0 for x in range(0, 24)] # Sums all the pings in each hour for row in cursor: date = datetime.fromtimestamp(row[2]) if row[1] is not None: hour_summ[date.hour]+= row[1] hour_count[date.hour]+=1 # Calculates the average ping for each hour for i in range(0, 24): try: hour_summ[i] = hour_summ[i] / hour_count[i] except ZeroDivisionError: # If no pings for a given hour, use 0 as the default value hour_summ[i] = 0 return hour_summ
def get_js_timestamp(datetime): # Expects a date or datetime object; returns same in milliseconds # since the epoch. (That is the date format expected by flot.js.) return int(time.mktime(datetime.timetuple()) * 1000)
def dayth_hourth(datetime): datetuple = datetime.timetuple() dayth = datetuple.tm_yday hourth = datetuple.tm_hour + datetuple.tm_min / 60 return dayth, hourth
def to_timestamp(datetime): return int(calendar.timegm(datetime.timetuple())) * 1000
def convertDatetimeToTime(datetime): return time.mktime(datetime.timetuple())
def delayed_push(self, datetime, item): key = int(time.mktime(datetime.timetuple())) self.redis.rpush('resque:delayed:%s' % key, ResQ.encode(item)) self.redis.zadd('resque:delayed_queue_schedule', key, key)
def maketime(year, month, date, hour, minute, second = 0): datetime = datetime.datetime (year, month, date, hour, minute, second) return int (time.mktime (datetime.timetuple ()))
def _to_utc_timestamp(dt): if dt.tzinfo is not None: dt = dt.astimezone(pytz.UTC) return calendar.timegm(dt.timetuple())
def dt2stamp(datetime): ret = time.mktime(datetime.timetuple()) return ret
def getTimeStamp(self, datetime): return int(time.mktime(datetime.timetuple()) * 1000)
def main(): db = get_db('dev-ethinker') output=[] if request.args.get('value'): value = request.args.get('value') if request.args.get('search'): query = request.args.get('search') else: query="Elena Valenciano" if value =='summary': sourcesPipe = [{"$unwind":"$entities"},{"$match":{ "entities":query}}, {"$group":{"_id":{"key":"$titleBlog"}, "values":{"$sum":1}}}, {"$project":{"key":"$_id.key","values":"$values"}}, {"$sort":{"values":-1}}] categoriesPipe = [{"$unwind":"$entities"},{"$unwind":"$tags"}, {"$match":{ "entities":query}}, {"$group":{"_id":{"text":"$tags"}, "size":{"$sum":1}}}, {"$project":{"text":"$_id.text","size":"$size"}}, {"$sort":{"size":-1}}] articlesPipe = [{"$unwind":"$entities"},{"$match":{ "entities":query}}, {"$group":{"_id":{"key":"$entities"}, "count":{"$sum":1},"sentimentAvg": {"$avg":"$sentimentScore"},"sentimentSum": {"$sum":"$sentimentScore"}}}, {"$project":{"key":"$_id.key","count":"$count","sentimentAvg":"$sentimentAvg","sentimentTotal":"$sentimentSum"}}] sentimentPipe = [{"$unwind":"$entities"},{"$match":{ "entities":query}}, {"$group":{"_id":{"key":"$entities"}, "values":{"$sum":1}}}, {"$project":{"key":"$_id.key","values":"$values"}}, {"$sort":{"values":-1}}] sourcesData = db.articles.aggregate(sourcesPipe) categoriesData = db.articles.aggregate(categoriesPipe) articlesData = db.articles.aggregate(articlesPipe) output ={"sources": sourcesData['result'], "categories": categoriesData['result'][0:25], "articles": articlesData['result'][0]['count'], "sentimentAvg": int(articlesData['result'][0]['sentimentAvg']), "sentimentTotal": articlesData['result'][0]['sentimentTotal']} message="Summary data about the entity, number of mentions, global sentiment, sources and content" elif value=='mentions' or value=='sentiment': pipe = [{"$unwind":"$entities"}, {"$match":{ "entities":query}}, {"$group":{"_id":{"day": { "$dayOfMonth": "$date" },"month": { "$month": "$date" },"year": { "$year": "$date" }}, "count":{"$sum":1},"sentimentAvg": {"$avg":"$sentimentScore"},"sentimentSum": {"$sum":"$sentimentScore"}}}, {"$project":{"day":"$_id.day", "month":"$_id.month", "year":"$_id.year", "sentimentAvg": "$sentimentAvg", "sentimentSum": "$sentimentSum", "mentions":"$count"}}] mongoData = db.articles.aggregate(pipe) dataOutput=[] for item in mongoData['result']: dic={} date=str(item["_id"]['day'])+"-"+str(item["_id"]['month'])+"-"+str(item["_id"]['year']) dic['date']=datetime.strptime(date, '%d-%m-%Y') tupleDate=datetime.timetuple(dic['date']) milliDate=calendar.timegm(tupleDate)*1000 dic['milliseconds']= long(milliDate) dic['totalMentions']=item['mentions'] dic['sentimentSum']=item['sentimentSum'] dic['sentimentAvg']=item['sentimentAvg'] dataOutput.append(dic) sortData=sorted(dataOutput, key=lambda x:x['date']) if value=="mentions": output=[{'key':"Total Mentions","values":[]}] message="Time serie of mentions about an entity" for item in sortData: output[0]['values'].append([item['milliseconds'],item['totalMentions']]) elif value=="sentiment": output=[{'key':"Total Sentiment","values":[]},{'key':"Average Sentiment","values":[]}] message="Time serie of global and average sentiment about an entity" for item in sortData: output[0]['values'].append([item['milliseconds'],item['sentimentSum']]) output[1]['values'].append([item['milliseconds'],item['sentimentAvg']]) elif value =='sources': pipe = [{"$unwind":"$entities"},{ "$match" : {"$or":[{ "entities" : "Elena Valenciano"},{ "entities" :"Miguel Arias" }]}}, {"$group":{"_id":{"blog":"$entities"}, "count":{"$sum":1}}}, {"$sort":{"count":-1}}] pipeCount = [{ "$group" : { "_id" : None, "count" : { "$sum" : 1 } } }] elif value =='network': pipeGraph=[{"$unwind":"$entities"},{"$unwind":"$tags"},{"$match":{ "entities":query}}, {"$group":{"_id":{"author":"$author","source":"$titleBlog"},"size":{"$sum":1}}}, {"$project":{"author":"$_id.author","source":"$_id.source","size":"$size"}}] mongoDataGraph = db.articles.aggregate(pipeGraph) #dataGraph=sorted(mongoDataGraph,key=lambda x:x) message="Mentions network about an entity, main node is the entity, first level are sources and second level influencers" name_to_node = {} output = {'name': query, 'children': [],"size":10} i=0 data=sorted(mongoDataGraph['result'],key=lambda x:x['source']) for item in data: if i == 0: node = {"name":item['source'], 'children':[],"size":5} if 'author' in item: node["children"].append({'name': item['author'], 'size':item['size']}) else: node["children"].append({'name': item['source'], 'size':item['size']}) else: if item['source'] == node['name']: if 'author' in item: node["children"].append({'name': item['author'], 'size':item['size']}) else: node["children"].append({'name': item['source'], 'size':5}) else: node = {"name":item['source'], 'children':[],"size":5} if 'author' in item: node["children"].append({'name': item['author'], 'size':item['size']}) else: node["children"].append({'name': item['source'], 'size':5}) output['children'].append(node) i+=1 else: query="No Entity" value="" import wikiSearch as ws try: response_body=ws.searchWikiPage(query) except: response_body=[] return render_template("content.html", message=message, title = query, value = value, data = output, bio = response_body)
def getTimestamp(datetime): return str(int(time.mktime(datetime.timetuple())))
def epoch(datetime): return int(time.mktime(datetime.timetuple()) * 1000)
def unixTime(datetime): return int(time.mktime(datetime.timetuple()))
def delayed_push(cls, datetime, item): key = int(time.mktime(datetime.timetuple())) cls.client.rpush('resque:delayed:%s' % key, TaskMessage.encode(item)) cls.client.zadd('resque:delayed_queue_schedule', key, key)
def collect_events(helper, ew): # The following examples get the arguments of this input. # Note, for single instance mod input, args will be returned as a dict. # For multi instance mod input, args will be returned as a single value. opt_statuspage_url = helper.get_arg('statuspage_url') opt_incidents = helper.get_arg('incidents') opt_maintenance = helper.get_arg('maintenance') # get input type helper.get_input_type() # The following examples get options from setup page configuration. # get the loglevel from the setup page loglevel = helper.get_log_level() # get proxy setting configuration proxy_settings = helper.get_proxy() icp_key=opt_statuspage_url + "-incidents" mcp_key=opt_statuspage_url + "-maintenance" service_name= "Statuspage:" + helper.get_input_stanza_names() #Build Incidents URL iurl = "https://" + opt_statuspage_url + "/api/v2/incidents.json" murl = "https://" + opt_statuspage_url + "/api/v2/scheduled-maintenances.json" method = "GET" #Get Saved Checkpoints icp_updated_at=helper.get_check_point(icp_key) mcp_updated_at=helper.get_check_point(mcp_key) if icp_updated_at is None: icp_updated_at=0 nicp_updated_at=icp_updated_at if mcp_updated_at is None: mcp_updated_at=0 nmcp_updated_at=mcp_updated_at if opt_incidents: response = helper.send_http_request(iurl, method, parameters=None, payload=None, headers=None, cookies=None, verify=True, cert=None, timeout=None, use_proxy=True) # get response body as json. If the body text is not a json string, raise a ValueError r_json = response.json() # get response status code r_status = response.status_code # check the response status, if the status is not sucessful, raise requests.HTTPError response.raise_for_status() for incidents in r_json['incidents']: updated_at=time.mktime(datetime.timetuple(datetime.fromisoformat(incidents['updated_at']))) if updated_at > icp_updated_at: data=json.dumps(incidents) if updated_at > nicp_updated_at: nicp_updated_at=updated_at # To create a splunk event event = helper.new_event(source=service_name, index=helper.get_output_index(), sourcetype="statuspage:incidents", data=data) ew.write_event(event) if opt_maintenance: response = helper.send_http_request(murl, method, parameters=None, payload=None, headers=None, cookies=None, verify=True, cert=None, timeout=None, use_proxy=True) # get response body as json. If the body text is not a json string, raise a ValueError r_json = response.json() # get response status code r_status = response.status_code # check the response status, if the status is not sucessful, raise requests.HTTPError response.raise_for_status() for maintenance in r_json['scheduled_maintenances']: updated_at=time.mktime(datetime.timetuple(datetime.fromisoformat(maintenance['updated_at']))) if updated_at > icp_updated_at: data=json.dumps(maintenance) if updated_at > nmcp_updated_at: nmcp_updated_at=updated_at # To create a splunk event event = helper.new_event(source=service_name, index=helper.get_output_index(), sourcetype="statuspage:maintenance", data=data) ew.write_event(event) ## Save Checkpoint helper.save_check_point(icp_key, nicp_updated_at) helper.save_check_point(mcp_key, nmcp_updated_at)
def write_file_with_date(f, content, datetime): f.write(content.encode('utf-8')) f.seek(0) f_time = time.mktime(datetime.timetuple()) os.utime(f.name, (f_time, f_time))
def dt_to_int(dt): return int(time.mktime(dt.timetuple())) def int_to_dt(ts): return datetime.datetime.fromtimestamp(ts)
def _to_time(self, datetime): # Given a datetime object, return the timestamp return time.mktime(datetime.timetuple())
def datetime2timestamp(datetime): return time.mktime(datetime.timetuple())
def timestamp(datetime): """ Returns UTC timestamp, this is included in python3 but not 2""" return calendar.timegm(datetime.timetuple())
def datetimeToMicroseconds(self, datetime): return long(time.mktime(datetime.timetuple()) * 1e6) + datetime.microsecond
def from_datetime(cls, datetime): return eut.formatdate(timeval=mktime(datetime.timetuple()), localtime=False, usegmt=True)
def _get_timestamp(self, datetime): """ returns a unix timestamp representing the datetime object """ return long((time.mktime(datetime.timetuple())) * 1000)
def raw_time(self, time_str): return time.mktime(datetime.timetuple())