def getTime(daterange=1): '''获取指定时间戳''' t = time.localtime(time.time()) t = list(t) t[3] = t[4] = t[5] = 0 date = 0 if daterange == 1: #当天 t[3] = t[4] = t[5] = 0 date = time.mktime(tuple(t)) elif daterange == 2: #本周 t[2] -= t[6] date = time.mktime(tuple(t)) elif daterange == 3: #本月 t[2] = 1 date = time.mktime(tuple(t)) elif daterange == 4: #昨天 t[2] = t[2] - 1 date = time.mktime(tuple(t)) elif daterange == 5: #前天 t[2] = t[2] - 2 date = time.mktime(tuple(t)) else: return 0 date = int(date) return date
def test_sms_receiving(self): self.assertEquals(ReceivedSMS.objects.count(), 0) resp = self.client.get(reverse('api:e-scape:sms-receive'), { 'smsc': 'id of sms center', 'svc': 'name of the service', 's': 'originator', 'r': 'recipient', 'text': 'actual message' }) self.assertEquals(resp.status_code, 200) # make sure we don't send any headers or content. If we do we'll # automatically send a reply, currently that's not the # desired behaviour. self.assertEquals(resp.get('X-Kannel-From', None), None) self.assertEquals(resp.get('X-Kannel-To', None), None) self.assertEquals(resp.get('X-Kannel-SMSC', None), None) self.assertEquals(resp.get('X-Kannel-Service', None), None) self.assertEquals(resp.content, '') self.assertEquals(ReceivedSMS.objects.count(), 1) sms = ReceivedSMS.objects.latest() import time # grmbl messy datetime comparisons self.assertAlmostEqual( time.mktime(sms.received_at.utctimetuple()), time.mktime(datetime.utcnow().utctimetuple()), places=1 ) self.assertEquals(sms.from_msisdn, 'originator') self.assertEquals(sms.to_msisdn, 'recipient') self.assertEquals(sms.message, 'actual message') self.assertEquals(sms.transport_name, 'E-scape')
def clean_data(df): # select Incidents that are closed df_closed = df[df['incident_state'] == 'Closed'] df1 = df_closed[[ 'incident_state', 'category', 'assignment_group', 'reopen_count', 'made_sla', 'impact', 'urgency', 'priority', 'opened_at', 'closed_at', 'closed_code', 'caused_by' ]] list1 = [] for val in df1['opened_at']: d1 = datetime.datetime.strptime(val, "%d/%m/%Y %H:%M") t1 = time.mktime(d1.timetuple()) list1.append(t1) list2 = [] for val in df1['closed_at']: d2 = datetime.datetime.strptime(val, "%d/%m/%Y %H:%M") t2 = time.mktime(d2.timetuple()) list2.append(t2) list3 = [] for i in range(len(list1)): list3.append(list2[i] - list1[i]) df1['time_to_close'] = np.array(list3) df1['time_to_close'] = df1['time_to_close'] / 3600.0 priority = pd.get_dummies(df1.priority, prefix="priority") df1.drop('priority', inplace=True, axis=1) df1 = df1.join(priority) impact = pd.get_dummies(df1.impact, prefix="impact") df1.drop('impact', inplace=True, axis=1) df1 = df1.join(impact) urgency = pd.get_dummies(df1.urgency, prefix="urgency") df1.drop('urgency', inplace=True, axis=1) df1 = df1.join(urgency) closed_code = pd.get_dummies(df1.closed_code, prefix="closed_code") df1.drop('closed_code', inplace=True, axis=1) df1 = df1.join(closed_code) lb_make = LabelEncoder() df1['category'] = lb_make.fit_transform(df1['category']) df1['assignment_group'] = lb_make.fit_transform(df1['assignment_group']) # df1['active']= df1['active'].astype(int) df1['made_sla'] = df1['made_sla'].astype(int) df1 = df1.dropna() # df = pd.get_dummies(df) #df1['time_to_close'] = time_to_close df1 = df1.drop(['incident_state', 'opened_at', 'closed_at', 'caused_by'], axis=1) X = df1.drop(['time_to_close'], axis=1) y = df1['time_to_close'] return (X, y) '''
def writePeriod(self, sdt, edt, pcapdest): """write packets in a datetime window into pcapdest as pcap """ self.readFolder() # in case any new ones since object instantiated respcap = [] edtt = time.mktime(edt.timetuple()) # as seconds since epoch sdtt = time.mktime(sdt.timetuple()) try: enddt = edt.strftime('%Y-%m-%d-%H:%M:%S') startdt = sdt.strftime('%Y-%m-%d-%H:%M:%S') except: logging.warning( '##Problem with start and end datetimes in writePeriod - %s and %s - expected datetimes' % (sdt, edt)) return False firstfi = bisect.bisect_left(self.pcaptds, int(sdtt)) lastfi = min( bisect.bisect_right(self.pcaptds, int(edtt)) + 1, len(self.pcaptds) - 1) acted = False npkt = 0 for fnum in range(firstfi, lastfi): rdfname = self.pcapfnames[fnum] try: lsout = Popen(['lsof', '-t', rdfname], stdout=PIPE, shell=False) if lsout > "": logging.debug('file %s in use so not read' % rdfname) except: pin = rdpcap(rdfname) if (len(pin) > 0): mint = min([x.time for x in pin]) maxt = max([x.time for x in pin]) logging.debug('file %s has min %.2f and max %.2f' % (rdfname, mint, maxt)) pin = [ x for x in pin if int(x.time) >= sdtt and int(x.time) <= edtt ] # gotta love scapy if len(pin) > 0: npkt += len(pin) wrpcap(pcapdest, pin, append=True) #appends packets to output file acted = True logging.info('wrote %d packets to %s' % (len(pin), pcapdest)) else: logging.debug( 'writePeriod got zero packets filtering by start %s end %s on pcap %s ' % (sdtt, edtt, rdfname)) logging.debug( 'writePeriod got an empty pcap file at path %s - this happens...' % rdfname) logging.info( 'writePeriod filtered %d packets from %d packet files using window %s - %s to %s' % (npkt, lastfi - firstfi + 1, startdt, enddt, pcapdest)) return acted
def initialize_days(self): self.day_number = int((time.mktime(datetime.today().timetuple()) - time.mktime(self.start_time)) / 86400) self.day_number = self.day_number + 1 self.day_cnt_list = [0] * self.day_number self.day_doc_list = [nltk.FreqDist() for i in xrange(self.day_number)] self.day_weight_list = [0] * self.day_number
def get_current_datetime(option): current_result = int(time.mktime(datetime.datetime.now().timetuple())) if option == "date": time_format = "%d-%m-%Y" current_result = datetime.datetime.fromtimestamp( int(current_result)).strftime(time_format) current_result = int( time.mktime( datetime.datetime.strptime(current_result, time_format).timetuple())) return current_result
def strTimeProp(start, end, format, prop): """Get a time at a proportion of a range of two formatted times. start and end should be strings specifying times formated in the given format (strftime-style), giving an interval [start, end]. prop specifies how a proportion of the interval to be taken after start. The returned time will be in the specified format. """ stime = time.mktime(time.strptime(start, format)) etime = time.mktime(time.strptime(end, format)) ptime = stime + prop * (etime - stime) return time.strftime(format, time.localtime(ptime))
def __handle_stat(self, stat, id, topic, container_name): try: read_dt = parser.parse(stat["read"]) timestamp = int((time.mktime(read_dt.timetuple()) + (read_dt.microsecond / 1000000.0)) * 1000) memory_usage = float(stat["memory_stats"]["usage"]) / float(stat["memory_stats"]["limit"]) Metric.create(topic=topic, container=container_name, timestamp=timestamp, name="memory", value=memory_usage) # Calculate CPU usage. The docker API returns the number of cycles consumed # by the container and the number of cycles consumed by the system. We need # to take the difference over time and divide them to retrieve the usage # percentage. total_usage = float(stat["cpu_stats"]["cpu_usage"]["total_usage"]) system_usage = float(stat["cpu_stats"]["system_cpu_usage"]) if id in self.id_to_cpu: usage_diff = total_usage - self.id_to_cpu[id]["total"] system_diff = system_usage - self.id_to_cpu[id]["system"] if usage_diff >= 0: usage_pct = usage_diff / system_diff else: usage_pct = 0.0 Metric.create(topic=topic, container=container_name, timestamp=timestamp, name="cpu", value=usage_pct) self.id_to_cpu[id] = {"total": total_usage, "system": system_usage} except: # We don't want to kill the stat thread, and we don't really mind # if some statistics aren't saved properly pass
def increment_func(timestamp): dt = datetime.datetime.fromtimestamp(timestamp) year = dt.year month = dt.month + bump_direction day = dt.day if month > 12: month = 1 year += 1 if month < 1: month = 12 year -= 1 attempt_count = 10 while True: try: dt = dt.replace(year=year, month=month, day=day) except ValueError: day -= 1 attempt_count -= 1 if attempt_count == 0: raise else: break return time.mktime(dt.timetuple())
def get_sns(self, gld_name): temp_list = [] gld = request.env['syt.oa.gld'].sudo().search([('name', '=', gld_name) ]) message = request.env['mail.message'].sudo().search([ ('res_id', '=', gld.id), ('model', '=', 'syt.oa.gld') ]) if message: for value in message: temp_item = {} employee = request.env['hr.employee'].sudo().search([ ('user_id', '=', int(value.create_uid)) ]) # temp_item['operator'] = employee.name # 操作人 temp_item['id'] = employee.id # 员工id temp_item['name'] = employee.name # 操作人 temp_item['email'] = employee.work_email # 员工邮箱 temp_item['body'] = str(value.body).replace("<p>", "").replace( "</p>", "") # 内容 timeArray = time.strptime(str(value.create_date), "%Y-%m-%d %H:%M:%S") timeStamp = int(time.mktime(timeArray)) create_time = timeStamp + 8 * 60 * 60 # 加8个小时 timeArray = time.localtime(create_time) otherStyleTime = time.strftime("%Y-%m-%d %H:%M:%S", timeArray) temp_item['time'] = otherStyleTime # 更新时间 temp_list.append(temp_item) return JSONEncoder().encode(temp_list)
def get_gld_info(self, name): temp_type = request.env['syt.oa.gld'].sudo().search([('name', '=', name.lstrip())]) copy_users = request.env['syt.oa.gld'].sudo().search([ ('copy_users.user_id', '=', request.session['uid']) ]) temp_type_list = [] if temp_type: for value in temp_type: temp_item = {} temp_item['name'] = value.name # 单号 temp_item['company_name'] = value.company_id.name # 公司 temp_item['dept'] = value.dept # 部门 temp_item['id'] = value.create_uid.id # 创建员工ID temp_item['user_name'] = value.create_uid.name # 创建员工姓名 timeArray = time.strptime(str(value.create_date), "%Y-%m-%d %H:%M:%S") timeStamp = int(time.mktime(timeArray)) create_time = timeStamp + 8 * 60 * 60 # 加8个小时 timeArray = time.localtime(create_time) otherStyleTime = time.strftime("%Y-%m-%d %H:%M:%S", timeArray) temp_item['write_date'] = otherStyleTime # 创建更新时间 temp_item['state'] = value.state # 状态 temp_item['subject'] = value.subject # 标题 temp_item['content'] = value.content # 正文 if copy_users: temp_item['copy_users'] = 'yes' # 区别 判断是抄送人还是审批人 else: temp_item['copy_users'] = 'no' # 区别 判断是抄送人还是审批人 temp_type_list.append(temp_item) return JSONEncoder().encode(temp_type_list)
def readFolder(self): """ index complex folders of pcaps on start date using fugly metadata in filename so a time window of packets can be extracted """ pcapfnames = [] pcaptds = [] # date time started pcapinfo = [] for dirName, subdirList, fileList in os.walk(self.pcapsFolder): for pfn in fileList: fs = pfn.split('_') # assume name works this way... if len(fs) == 2: fn = fs[1] ppath = os.path.join(dirName, pfn) if self.isScapypcap(ppath): fstartdate = fn.split('.')[0] # date try: fsdt = datetime.strptime(fstartdate, FSDTFORMAT) fsdtt = int(time.mktime(fsdt.timetuple())) pcapinfo.append([fsdtt, ppath]) except: logging.warning( 'Found pcap file name %s in path %s - expected %s preceded by an underscore - ignoring' % (pfn, self.pcapsFolder, FSDTFORMAT)) else: logging.warning( 'File name %s in path %s is NOT a valid pcap file with _%s - ignoring' % (pfn, self.pcapsFolder, FSDTFORMAT)) pcapinfo.sort( ) # files might turn up in any old order in complex archives self.pcapfnames = [x[1] for x in pcapinfo] self.pcaptds = [x[0] for x in pcapinfo]
def calendar(callback_query, bot): msg = callback_query.message user_email = mongodb.get_chat(callback_query.message.chat)["email"] courses = moodleAPI.get_courses_by_user_email(user_email) events = moodleAPI.get_courses_events(courses) markup = types.InlineKeyboardMarkup() markup.add(types.InlineKeyboardButton("« Voltar", callback_data="config")) today = datetime.now() today = datetime(year=today.year, month=today.month, day=today.day, hour=0, second=0) next_month = today + relativedelta(months=1) this_month_calendar = cal.TextCalendar().formatmonth( today.year, today.month) this_month_calendar = replace_helper(this_month_calendar, today.day, "▓▓") next_month_calendar = cal.TextCalendar().formatmonth( next_month.year, next_month.month) events_legend = "" if events["events"]: events["events"].append({ "name": "Hoje", "timestart": time.mktime(today.timetuple()) }) events["events"].sort(key=lambda elem: elem["timestart"]) for event in events["events"]: event_date = datetime.fromtimestamp(event["timestart"]) if event["name"] == "Hoje": nl = "\n" events_legend += f"▓▓ {event_date.day}/{event_date.month} - {event['name']}{nl}" else: if event_date.month == today.month: this_month_calendar = replace_helper( this_month_calendar, event_date.day, "░░") else: next_month_calendar = replace_helper( next_month_calendar, event_date.day, "░░") nl = "\n" events_legend += f"░░ {event_date.day}/{event_date.month} - {event['name']}{nl}" message = f"""<code>{this_month_calendar}</code> <code>{next_month_calendar} {events_legend} </code> """ bot.edit_message_text(message, chat_id=msg.chat.id, message_id=msg.message_id, reply_markup=markup)
def find_mongo(db, startDate, endDate): try: pattern = '%Y%m%d/%H%M' startDateTS = int(time.mktime(time.strptime(startDate, pattern))) * 1000 endDateTS = int(time.mktime(time.strptime(endDate, pattern))) * 1000 result = db.mesowest.find( {"timestamp": { "$lt": endDateTS, "$gte": startDateTS }}) for document in result: print "Result from mongodb" print(document) return result except Exception as e: logging.warning(e)
def getTimeOClockOfToday(year, month, day): import time t = time.localtime(time.time()) time1 = time.mktime( time.strptime(time.strftime('%Y-%m-%d 00:00:00', t), '%Y-%m-%d %H:%M:%S')) return int(time1)
def setWecker(database_update_time,day_shift=False): today = datetime.datetime.today() alarm_hour, alarm_minute = database_update_time wecker = datetime.datetime(today.year,today.month, today.day, alarm_hour, alarm_minute, 0) if wecker < today or day_shift: wecker = wecker + datetime.timedelta(days=1) return int(time.mktime(wecker.timetuple()))
def daybeforehollyday(d): #get the epoch from current date object x = time.mktime(d); #add 86400 seconds (one day) x=x+86400 #create a struct time object from that d2 = time.localtime(x) #check if that date is a weekend return isHollyday(d2)
def update_days(self, tweets): #print tweets for tweet in tweets: #print tweet time_string = re.search(self.time_pattern, tweet).group() post_time = time.strptime(time_string, self.ISOTIMEFORMAT) day = int((time.mktime(post_time) - time.mktime(self.start_time)) / 86400) term_list = self.get_terms( tweet ) #print "day = ", day self.day_cnt_list[ day ] += 1 self.day_doc_list[ day ].update( term_list )
def friendtime(dt,format='%Y-%m-%d %H:%M'): '''时间友好显示化''' t = time.localtime(time.time()) today = time.mktime(time.strptime(time.strftime('%Y-%m-%d 00:00:00', t),'%Y-%m-%d %H:%M:%S')) yestoday = today - 3600*24 if dt > today: return u'今天' + time.strftime('%H:%M',time.localtime(dt)) if dt > yestoday and dt < today: return u'昨天' + time.strftime('%H:%M',time.localtime(dt)) return time.strftime(format,time.localtime(dt))
def __init__(self): # start logging MM_Common_Logging.MM_Common_Logging_Start('./log/MetaMan_Subprogram_Reactor_String') # set other data self.server_start_time = time.mktime(time.gmtime()) self.users = {} # maps user names to network instances # open the database self.db = database_base.MM_Server_Database() self.db.MM_Server_Database_Open(Config.get('DB Connections', 'PostDBHost').strip(), Config.get('DB Connections', 'PostDBPort').strip(), Config.get('DB Connections', 'PostDBName').strip(), Config.get('DB Connections', 'PostDBUser').strip(), Config.get('DB Connections', 'PostDBPass').strip()) # preload some data from database self.genre_list = self.db.MM_Server_Database_Metadata_Genre_List() logging.info("Ready for connections!")
def GetProjectsLastUpdate(self): self.Connect() self.cursor.execute('SELECT lastupdate FROM settings WHERE id = 1') lu = self.cursor.fetchone() if (lu[0] == None) or (lu[0] ==''): self.Disconnect() return None else: #dt = datetime.datetime.strptime(lu[0], '%Y-%m-%d %H:%M:%S.%f') dt = datetime.fromtimestamp(time.mktime(time.strptime(lu[0], '%Y-%m-%d %H:%M:%S.%f'))) self.Disconnect() return dt
def GetProjectsLastUpdate(self): self.Connect() self.cursor.execute('SELECT lastupdate FROM settings WHERE id = 1') lu = self.cursor.fetchone() if (lu[0] == None) or (lu[0] == ''): self.Disconnect() return None else: #dt = datetime.datetime.strptime(lu[0], '%Y-%m-%d %H:%M:%S.%f') dt = datetime.fromtimestamp( time.mktime(time.strptime(lu[0], '%Y-%m-%d %H:%M:%S.%f'))) self.Disconnect() return dt
def __init__(self, month=None, day=None, year=None, epoch=None): import time if epoch is not None: (year, month, day, hour, mm, ss, ms, x, y) = time.localtime(epoch) else: if year is None: year = nowdict["year"] if day is None: day = nowdict["day"] if month is None: month = nowdict["month"] epoch = time.mktime( (year,month,day, 0, 0, 0, 0, 0, 0) ) #print year, month, day, epoch, "<br>" self.year, self.month, self.day, self.epoch = year, month, day, epoch
def __init__(self, month=None, day=None, year=None, epoch=None): import time if epoch is not None: (year, month, day, hour, mm, ss, ms, x, y) = time.localtime(epoch) else: if year is None: year = nowdict["year"] if day is None: day = nowdict["day"] if month is None: month = nowdict["month"] epoch = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0)) #print year, month, day, epoch, "<br>" self.year, self.month, self.day, self.epoch = year, month, day, epoch
def getDayOfTheWeek(timestamp, as_day_short = True, my_local = "de_DE"): #import locale now = time.localtime(timestamp) timestamp = int(time.mktime(now)) day = None #locale.setlocale(locale.LC_ALL, my_local) if as_day_short: index = (datetime.datetime.fromtimestamp(int(timestamp)).strftime('%w')) daylist = ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'] day = daylist[int(index)] else: index = (datetime.datetime.fromtimestamp(int(timestamp)).strftime('%w')) daylist = ['Sonntag','Montag', 'Dienstag', 'Mittwoch', 'Donnerstag', 'Freitag', 'Samstag'] day = daylist[int(index)] return day
def getOrdersArrayResult(year, month, day): a = "%d-%d-%d 00:00:00" % (year, month, day) startTime = time.mktime(time.strptime(a, '%Y-%m-%d %H:%M:%S')) endTime = startTime + 60 * 60 * 24 - 1 i = 1 url = 'http://www.shanxinhui.com/user/manager/listuseroutgo/p/1.html' request_result = session.get(url, headers=headers, allow_redirects=False) html = request_result.text prefectHtml = html.replace('</td>\n</td>', '</td>') soup = BeautifulSoup(prefectHtml, "html.parser") # soup = BeautifulSoup(login_page.text, "html.parser") # print(soup.find("table")) rowsNumber = soup.find("span", class_="rows") print('----') print(rowsNumber.string) print('----') non_decimal = re.compile(r'[^\d.]+') number = non_decimal.sub('', rowsNumber.string) pageNumber = int(int(number) / 18) + 1 print(int(pageNumber) + 1) n = 1 while n < pageNumber + 1: getHTML(n) n += 1 global allOrders gggArray = [] for dict in allOrders: if dict[3] > startTime and dict[3] < endTime: gggArray.append(dict) print(dict[3]) newArray = [] for array in gggArray: order, newArray = orderById(array[0], newArray) order.number1 = str(int(array[1]) + int(order.number1)) order.number2 = str(int(array[2]) + int(order.number2)) exculeExcel(newArray)
def getHTML(page): url = 'http://www.shanxinhui.com/user/manager/listuseroutgo/p/%d.html' % page request_result = session.get(url, headers=headers, allow_redirects=False) print('html result -------') print(request_result.text) print('html result -------end') html = request_result.text prefectHtml = html.replace('</td>\n</td>', '</td>') soup = BeautifulSoup(prefectHtml, "html.parser") rows = soup.find("table").find_all("tr") dict = {'id': [1, 2]} array = [dict] table = soup.find("table") result = makelist(table) result.pop() result.pop(0) newReuslt = [] for tempArray in result: print(tempArray[5]) newReuslt.append([]) typeString = tempArray[1] timeString = tempArray[5] t_obj = time.strptime(timeString, "%Y-%m-%d %H:%M:%S") ts = time.mktime(t_obj) numberString = tempArray[2] accountString = tempArray[3] newReuslt[-1].insert(0, accountString) if typeString == '善种子': newReuslt[-1].insert(1, numberString) else: newReuslt[-1].insert(1, '0') if typeString == '善心币': newReuslt[-1].insert(2, numberString) else: newReuslt[-1].insert(2, '0') newReuslt[-1].insert(3, ts) print(newReuslt) global allOrders allOrders = allOrders + newReuslt
def insert_bulk_mongo_mesonet(db, data, timestamp): ''' Mesowest header 'STN YYMMDD/HHMM MNET SLAT SLON SELV TMPF SKNT DRCT GUST PMSL ALTI DWPF RELH WTHR P24I' Mesonet header '# id,name,mesonet,lat,lon,elevation,agl,cit,state,country,active' Matching: # id (0) -> STN lat (3) -> SLAT long (4) -> SLON elevation (5) -> SELV ''' for d in data: print d values = d.split(',') # Convert time from 20180316_2145 to 20180316/2145 timestamp.replace('_', '/') #dateTimeObject = datetime.datetime.strptime(values[1], '%Y%m%d/%H%M') #print dateTimeObject pattern = '%Y%m%d/%H%M' time_t = int(time.mktime(time.strptime(timestamp, pattern))) * 1000 print time_t input_data = { "STN":values[0], "timestamp":time_t, "MNET": "NULL", "SLAT":values[3], "SLON":values[4], "SELV":values[5], "TMPF":"NULL", "SKNT":"NULL", "DRCT":"NULL", "GUST":"NULL", "PMSL":"NULL", "ALTI":"NULL", "DWPF":"NULL", "RELH":"NULL", "WTHR":"NULL", "P24I":"NULL" } #print input_data result = db.mesowest.insert_one(input_data) print 'One post: {0}'.format(result.inserted_id)
def parse_datetime(s, rel_to=None, use_utc=False): import parsedatetime as pdt us_ptc = pdt.Constants("en_US") us_cal = pdt.Calendar(us_ptc) native_cal = pdt.Calendar() import datetime import time if s: if rel_to is not None: rel_to = datetime.datetime.fromtimestamp(rel_to).timetuple() else: rel_to = None t_struct, parsed_as = us_cal.parse(s, rel_to) if not parsed_as: t_struct, parsed_as = us_cal.parse(s, None) if not parsed_as: t_struct, parsed_as = native_cal.parse(s, rel_to) if not parsed_as: t_struct, parsed_as = native_cal.parse(s, None) if parsed_as: t_struct = list(t_struct) if parsed_as == 1: # only parsed as date, eliminate time part t_struct[3:6] = (0, 0, 0) t_struct[8] = -1 # isdst -- we don't know if that is DST if use_utc: from calendar import timegm return timegm(t_struct) else: return time.mktime(t_struct) else: raise ValueError("failed to parse date/time '%s'" % s) else: return None
def get_list(self, user_id, value, copy_users): temp_item = {} temp_item['user_id'] = user_id # user_id temp_item['id'] = value.create_uid.id # 创建员工ID temp_item['user_name'] = value.create_uid.name # 创建员工姓名 temp_item['name'] = value.name # 单号 temp_item['company_name'] = value.company_id.name # 公司 temp_item['dept'] = value.subject # 标题 timeArray = time.strptime(str(value.write_date), "%Y-%m-%d %H:%M:%S") timeStamp = int(time.mktime(timeArray)) create_time = timeStamp + 8 * 60 * 60 # 加8个小时 timeArray = time.localtime(create_time) otherStyleTime = time.strftime("%Y-%m-%d %H:%M:%S", timeArray) temp_item['write_date'] = otherStyleTime # 创建更新时间 temp_item['state'] = value.state # 状态 if copy_users: temp_item['copy_users'] = 'yes' # 区别 判断是抄送人还是审批人 else: temp_item['copy_users'] = 'no' # 区别 判断是抄送人还是审批人 return temp_item
def getTweetCountByDate(self,queryargs): """ :param queryargs: :return: """ categories = queryargs.getlist('categoryID') print "categories:", categories tweetCatCount = [] arguments = queryargs.copy() for category in categories: arguments['categoryID'] = category (success, result) = self.getTweets(arguments) #print 'tweetsresult',result catCount = {"key": category, "values":[]} countDict = {} for tweet in result['tweets']: timestamp = tweet['timestamp'] #date = datetime.datetime.fromtimestamp(timestamp/1000).strftime("%B %d, %Y") date = datetime.datetime.fromtimestamp(timestamp/1000).strftime("%d-%b-%y") #print "date:", date if date in countDict.keys(): countDict[date] = countDict[date]+1 else: countDict[date] = 1 for key,value in countDict.items(): key = key+":00:00:00" print "key::", key import time key = time.strptime(key, "%d-%b-%y:%H:%M:%S") key = time.mktime(key) pair = [key,value] catCount['values'].append(pair) tweetCatCount.append(catCount) print "TweetCatCount:::",tweetCatCount return True, tweetCatCount
def exif_getdate(filename): """ Rename <old_filename> with the using the date/time created or modified for the new file name""" created_time = os.path.getctime(filename) modify_time = os.path.getmtime(filename) # f = open(filename, 'rb') try: tags = exif.parse(filename) except UnboundLocalError: print "No EXIF data available for ", file tags = {} exif_time = 0 try: tags['DateTimeOriginal'] exif_time = str(tags['DateTimeOriginal']) exif_time = int( time.mktime(time.strptime(exif_time, "%Y:%m:%d %H:%M:%S"))) except (KeyError, ValueError): print 'No EXIF DateTimeOriginal for ', file exif_time = 0 if created_time < modify_time: local_time = time.localtime(created_time) else: local_time = time.localtime(modify_time) if exif_time: if exif_time < local_time: local_time = time.localtime(exif_time) date_time_name = time.strftime(DATE_FORMAT, local_time) #print 'Created Time = ', created_time #print 'Modified Time = ', modify_time #print 'EXIF Time = ', exif_time #print 'Time Used = ', local_time return date_time_name
def friendtimeV2(timestamp,isTimestamp=True): if not isTimestamp: timestamp = time.mktime(time.strptime(str(timestamp),'%Y-%m-%d %H:%M:%S')) return friendtime(timestamp) it = int(timestamp) + 8*3600 #return datetime.fromtimestamp(it).strftime('%Y-%m-%d %H:%M:%S') if (int(time.time())-it)<31536000: time1 = datetime.fromtimestamp(it) time_diff = (datetime.utcnow() + timedelta(hours =+ 8)) - time1 days = time_diff.days if days: if days > 60: return u'%s月前' % (days / 30) if days > 30: return u'1月前' if days > 14: return u'%s周前' % (days / 7) if days > 7: return u'1周前' if days > 1: return u'%s 天前' % days return u'1天前' seconds = time_diff.seconds if seconds > 7200: return u'%s小时前' % (seconds / 3600) if seconds > 3600: return u'1小时前' if seconds > 120: return u'%s分钟前' % (seconds / 60) if seconds > 60: return u'1分钟前' if seconds > 1: return u'%s秒前' %seconds return u'%s秒前' % seconds else: return datetime.fromtimestamp(it).strftime('%Y-%m-%d %H:%M:%S')
def isLogin(): # 通过查看用户个人信息来判断是否已经登录 print(session.cookies) print('----->') print(session.cookies) print('----->') url = "https://www.baidu.com/" login_page = session.get(url, headers=headers, allow_redirects=True) print(login_page.status_code) print(login_page.text) url = "http://www.shanxinhui.com/user/manager/listuseroutgo/p/1.html" login_page = session.get(url, headers=headers, allow_redirects=False) print(login_page.status_code) print(login_page.text) text = '2016-11-09 10:09:25' t_obj = time.strptime(text, "%Y-%m-%d %H:%M:%S") ts = time.mktime(t_obj) print(t_obj) # y = datetime.strptime(text, '%Y-%m-%d ') # z = datetime.now() # time_tuple = t_obj.timetuple() print(ts)
def get_opinion(self, name, shuzi): opinion = request.env['syt.oa.gld.opinion'].sudo().search([ ('gld_id', '=', name) ]) if opinion: opinion_list = [] for value in opinion: item = {} item['id'] = value.approver.id item['name'] = value.approver.name item['opinion'] = value.opinion timeArray = time.strptime(str(value.appov_date), "%Y-%m-%d %H:%M:%S") timeStamp = int(time.mktime(timeArray)) create_time = timeStamp + 8 * 60 * 60 # 加8个小时 timeArray = time.localtime(create_time) otherStyleTime = time.strftime("%Y-%m-%d %H:%M:%S", timeArray) item['time'] = otherStyleTime item['dept'] = value.approver.department_id.name item['company'] = value.approver.company_id.name opinion_list.append(item) return JSONEncoder().encode(opinion_list) else: return "2"
def __handle_stat(self, stat, id, topic, container_name): try: read_dt = parser.parse(stat['read']) timestamp = int((time.mktime(read_dt.timetuple()) + (read_dt.microsecond / 1000000.0)) * 1000) memory_usage = float(stat['memory_stats']['usage']) / float( stat['memory_stats']['limit']) Metric.create(topic=topic, container=container_name, timestamp=timestamp, name='memory', value=memory_usage) # Calculate CPU usage. The docker API returns the number of cycles consumed # by the container and the number of cycles consumed by the system. We need # to take the difference over time and divide them to retrieve the usage # percentage. total_usage = float(stat['cpu_stats']['cpu_usage']['total_usage']) system_usage = float(stat['cpu_stats']['system_cpu_usage']) if id in self.id_to_cpu: usage_diff = total_usage - self.id_to_cpu[id]['total'] system_diff = system_usage - self.id_to_cpu[id]['system'] if usage_diff >= 0: usage_pct = usage_diff / system_diff else: usage_pct = 0.0 Metric.create(topic=topic, container=container_name, timestamp=timestamp, name='cpu', value=usage_pct) self.id_to_cpu[id] = {'total': total_usage, 'system': system_usage} except: # We don't want to kill the stat thread, and we don't really mind # if some statistics aren't saved properly pass
def write(self, out, obj): long_time = long(time.mktime(obj.timetuple())) out.write_long(long_time)
def getDateTimeFromTimestamp2(timestamp): mytime = None now = time.localtime(timestamp) timestamp = int(time.mktime(now)) mytime = (datetime.datetime.fromtimestamp(int(timestamp)).strftime('%d.%m.%y %H:%M')) return mytime
def convert_date_to_int(date_convert): time_format = "%d-%m-%Y" current_result = int( time.mktime( datetime.datetime.strptime(date_convert, time_format).timetuple())) return current_result
def panorama_request(): """ Gets the details of anomalies from the database, using the URL arguments that are passed in by the :obj:`request.args` to build the MySQL select query string and queries the database, parse the results and creates an array of the anomalies that matched the query and creates the ``panaroma.json`` file, then returns the array. The Webapp needs both the array and the JSONP file to serve to the browser for the client side ``panaroma.js``. :param None: determined from :obj:`request.args` :return: array :rtype: array .. note:: And creates ``panaroma.js`` for client side javascript """ logger.info('determining request args') def get_ids_from_rows(thing, rows): found_ids = [] for row in rows: found_id = str(row[0]) found_ids.append(int(found_id)) ids_first = string.replace(str(found_ids), '[', '') in_ids = string.replace(str(ids_first), ']', '') return in_ids try: request_args_len = len(request.args) except: request_args_len = False latest_anomalies = False if request_args_len == 0: request_args_len = 'No request arguments passed' # return str(request_args_len) latest_anomalies = True metric = False if metric: logger.info('Getting db id for %s' % metric) query = 'select id from metrics WHERE metric=\'%s\'' % metric try: result = mysql_select(skyline_app, query) except: logger.error('error :: failed to get id from db: %s' % traceback.format_exc()) result = 'metric id not found in database' return str(result[0][0]) search_request = True count_request = False if latest_anomalies: logger.info('Getting latest anomalies') query = 'select id, metric_id, anomalous_datapoint, anomaly_timestamp, full_duration, created_timestamp from anomalies ORDER BY id DESC LIMIT 10' try: rows = mysql_select(skyline_app, query) except: logger.error('error :: failed to get anomalies from db: %s' % traceback.format_exc()) rows = [] if not latest_anomalies: logger.info('Determining search parameters') query_string = 'select id, metric_id, anomalous_datapoint, anomaly_timestamp, full_duration, created_timestamp from anomalies' needs_and = False # If we have to '' a string we cannot escape the query it seems... do_not_escape = False if 'metric' in request.args: metric = request.args.get('metric', None) if metric and metric != 'all': query = "select id from metrics WHERE metric='%s'" % (metric) try: found_id = mysql_select(skyline_app, query) except: logger.error('error :: failed to get app ids from db: %s' % traceback.format_exc()) found_id = None if found_id: target_id = str(found_id[0][0]) if needs_and: new_query_string = '%s AND metric_id=%s' % (query_string, target_id) else: new_query_string = '%s WHERE metric_id=%s' % (query_string, target_id) query_string = new_query_string needs_and = True if 'metric_like' in request.args: metric_like = request.args.get('metric_like', None) if metric_like and metric_like != 'all': query = 'select id from metrics WHERE metric LIKE \'%s\'' % (str(metric_like)) try: rows = mysql_select(skyline_app, query) except: logger.error('error :: failed to get metric ids from db: %s' % traceback.format_exc()) return False ids = get_ids_from_rows('metric', rows) new_query_string = '%s WHERE metric_id IN (%s)' % (query_string, str(ids)) query_string = new_query_string needs_and = True if 'count_by_metric' in request.args: count_by_metric = request.args.get('count_by_metric', None) if count_by_metric and count_by_metric != 'false': search_request = False count_request = True # query_string = 'SELECT metric_id, COUNT(*) FROM anomalies GROUP BY metric_id ORDER BY COUNT(*) DESC' query_string = 'SELECT metric_id, COUNT(*) FROM anomalies' needs_and = False if 'from_timestamp' in request.args: from_timestamp = request.args.get('from_timestamp', None) if from_timestamp and from_timestamp != 'all': if ":" in from_timestamp: import time import datetime new_from_timestamp = time.mktime(datetime.datetime.strptime(from_timestamp, '%Y%m%d %H:%M').timetuple()) from_timestamp = str(int(new_from_timestamp)) if needs_and: new_query_string = '%s AND anomaly_timestamp >= %s' % (query_string, from_timestamp) query_string = new_query_string needs_and = True else: new_query_string = '%s WHERE anomaly_timestamp >= %s' % (query_string, from_timestamp) query_string = new_query_string needs_and = True if 'until_timestamp' in request.args: until_timestamp = request.args.get('until_timestamp', None) if until_timestamp and until_timestamp != 'all': if ":" in until_timestamp: import time import datetime new_until_timestamp = time.mktime(datetime.datetime.strptime(until_timestamp, '%Y%m%d %H:%M').timetuple()) until_timestamp = str(int(new_until_timestamp)) if needs_and: new_query_string = '%s AND anomaly_timestamp <= %s' % (query_string, until_timestamp) query_string = new_query_string needs_and = True else: new_query_string = '%s WHERE anomaly_timestamp <= %s' % (query_string, until_timestamp) query_string = new_query_string needs_and = True if 'app' in request.args: app = request.args.get('app', None) if app and app != 'all': query = 'select id from apps WHERE app=\'%s\'' % (str(app)) try: found_id = mysql_select(skyline_app, query) except: logger.error('error :: failed to get app ids from db: %s' % traceback.format_exc()) found_id = None if found_id: target_id = str(found_id[0][0]) if needs_and: new_query_string = '%s AND app_id=%s' % (query_string, target_id) else: new_query_string = '%s WHERE app_id=%s' % (query_string, target_id) query_string = new_query_string needs_and = True if 'source' in request.args: source = request.args.get('source', None) if source and source != 'all': query = 'select id from sources WHERE source=\'%s\'' % (str(source)) try: found_id = mysql_select(skyline_app, query) except: logger.error('error :: failed to get source id from db: %s' % traceback.format_exc()) found_id = None if found_id: target_id = str(found_id[0][0]) if needs_and: new_query_string = '%s AND source_id=\'%s\'' % (query_string, target_id) else: new_query_string = '%s WHERE source_id=\'%s\'' % (query_string, target_id) query_string = new_query_string needs_and = True if 'algorithm' in request.args: algorithm = request.args.get('algorithm', None) # DISABLED as it is difficult match algorithm_id in the # triggered_algorithms csv list algorithm = 'all' if algorithm and algorithm != 'all': query = 'select id from algorithms WHERE algorithm LIKE \'%s\'' % (str(algorithm)) try: rows = mysql_select(skyline_app, query) except: logger.error('error :: failed to get algorithm ids from db: %s' % traceback.format_exc()) rows = [] ids = get_ids_from_rows('algorithm', rows) if needs_and: new_query_string = '%s AND algorithm_id IN (%s)' % (query_string, str(ids)) else: new_query_string = '%s WHERE algorithm_id IN (%s)' % (query_string, str(ids)) query_string = new_query_string needs_and = True if 'host' in request.args: host = request.args.get('host', None) if host and host != 'all': query = 'select id from hosts WHERE host=\'%s\'' % (str(host)) try: found_id = mysql_select(skyline_app, query) except: logger.error('error :: failed to get host id from db: %s' % traceback.format_exc()) found_id = None if found_id: target_id = str(found_id[0][0]) if needs_and: new_query_string = '%s AND host_id=\'%s\'' % (query_string, target_id) else: new_query_string = '%s WHERE host_id=\'%s\'' % (query_string, target_id) query_string = new_query_string needs_and = True if 'limit' in request.args: limit = request.args.get('limit', '10') else: limit = '10' if 'order' in request.args: order = request.args.get('order', 'DESC') else: order = 'DESC' search_query = '%s ORDER BY id %s LIMIT %s' % ( query_string, order, limit) if 'count_by_metric' in request.args: count_by_metric = request.args.get('count_by_metric', None) if count_by_metric and count_by_metric != 'false': # query_string = 'SELECT metric_id, COUNT(*) FROM anomalies GROUP BY metric_id ORDER BY COUNT(*) DESC' search_query = '%s GROUP BY metric_id ORDER BY COUNT(*) %s LIMIT %s' % ( query_string, order, limit) try: rows = mysql_select(skyline_app, search_query) except: logger.error('error :: failed to get anomalies from db: %s' % traceback.format_exc()) rows = [] anomalies = [] anomalous_metrics = [] if search_request: anomalies_json = path.abspath(path.join(path.dirname(__file__), '..', settings.ANOMALY_DUMP)) panorama_json = string.replace(str(anomalies_json), 'anomalies.json', 'panorama.json') if ENABLE_WEBAPP_DEBUG: logger.info('debug :: panorama_json - %s' % str(panorama_json)) for row in rows: if search_request: anomaly_id = str(row[0]) metric_id = str(row[1]) if count_request: metric_id = str(row[0]) anomaly_count = str(row[1]) query = 'select metric from metrics WHERE id=%s' % metric_id try: result = mysql_select(skyline_app, query) except: logger.error('error :: failed to get id from db: %s' % traceback.format_exc()) continue metric = str(result[0][0]) if search_request: anomalous_datapoint = str(row[2]) anomaly_timestamp = str(row[3]) full_duration = str(row[4]) created_timestamp = str(row[5]) anomaly_data = (anomaly_id, metric, anomalous_datapoint, anomaly_timestamp, full_duration, created_timestamp) anomalies.append([int(anomaly_id), str(metric), anomalous_datapoint, anomaly_timestamp, full_duration, created_timestamp]) anomalous_metrics.append(str(metric)) if count_request: limit_argument = anomaly_count if int(anomaly_count) > 200: limit_argument = 200 anomaly_data = (int(anomaly_count), metric, str(limit_argument)) anomalies.append([int(anomaly_count), str(metric), str(limit_argument)]) anomalies.sort(key=operator.itemgetter(int(0))) if search_request: with open(panorama_json, 'w') as fh: pass # Write anomalous_metrics to static webapp directory with open(panorama_json, 'a') as fh: # Make it JSONP with a handle_data() function fh.write('handle_data(%s)' % anomalies) if latest_anomalies: return anomalies else: return search_query, anomalies
def get_time_stamp(time_str): # 将字符串格式时间转换成时间戳 time_stamp = time.strptime(time_str, '%Y-%m-%d %H:%M:%S') time_stamp = int(time.mktime(time_stamp)) # 获取秒级时间戳 return time_stamp
def command_log(*args): if len(args) > 2: raise Exception( "Too many arguments: [ip] [time period in s] (optional parameter)") ip = '' max_ago = float('inf') if len(args) >= 1: ip = args[0] if len(args) == 2: max_ago = float(args[1]) from pyparsing import Word, alphas, Suppress, Combine, nums, string, Optional, Regex, ParseException # define line in (sys)log month = Word(string.uppercase, string.lowercase, exact=3) integer = Word(nums) serverDateTime = Combine(month + " " + integer + " " + integer + ":" + integer + ":" + integer) hostname = Word(alphas + nums + "_" + "-") daemon = Word(alphas + nums + "/" + "-" + "_") + Optional(Suppress("[") + integer + Suppress("]")) + Suppress(":") message = Regex(".*") bnf = serverDateTime + hostname + daemon + message from collections import deque import re, time last_access = {} tail_n = 100 for line in deque(open(logfile_path), tail_n): try: fields = bnf.parseString(line) except ParseException: continue else: m = re.search('requests (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3})', fields[-1]) if m: #print fields[0], m.group(1) cur = time.localtime() # guess year... st = time.strptime(fields[0] + " %s" % cur.tm_year, "%b %d %H:%M:%S %Y") if st > cur: # ok, re-guess st = time.strptime(fields[0] + " %s" % (cur.tm_year - 1), "%b %d %H:%M:%S %Y") if (st > cur): raise Exception("HMF logfile seems too old!?!") last_access[m.group(1)] = st ips = [key for key in last_access.keys() if ip in key] access_in_period = [((time.mktime(cur) - time.mktime(t)) <= max_ago) for r, t in last_access.items()] if ips and any(access_in_period): print "Previous accesses:" for (resource, timestamp), state in zip(last_access.items(), access_in_period): if not state: continue if not resource in ips: continue print "\t%s was accessed on %s (%.1fs ago)" % ( resource, time.asctime(timestamp), time.mktime(cur) - time.mktime(timestamp)) return EXIT_FAILURE return EXIT_SUCCESS
def createTimeStamp(datestr, format="%Y-%m-%d %H:%M:%S"): return time.mktime(time.strptime(datestr, format))
def getCurrentTimestamp(): #now = datetime.datetime.now() #return int(time.mktime(now.timetuple())) now = time.localtime() timestamp = int(time.mktime(now)) return timestamp
def getDateFromTimestamp(timestamp): day = None now = time.localtime(timestamp) timestamp = int(time.mktime(now)) day = (datetime.datetime.fromtimestamp(int(timestamp)).strftime('%d.%m.%y')) return day
def gtime(x): return time.mktime(datetime.datetime.strptime(x,'%Y%m%d%H%M%f').timetuple())
def utcdatetime_to_ts(dt): return time.mktime(dt.utctimetuple()) - time.timezone
def string_datetime_utc_to_string_timestamp_utc(datetime_utc): from time import time # timetuple() convert datetime obj to timestamp obj # time.mktime convert timestamp obj to timestamp string return time.mktime(datetime_utc.timetuple())