def display_info(): string = '' string += "Token holder: " + gateway.get_variable("token_holder") servers = gateway.get_servers() string += '<h1>Servers</h1>' for server_name, server in servers.iteritems(): string += '<div>' + str(server_name) + ':</div>' string += '<div style="margin-left:20px;"> host: ' + str(server['host']) + ':</div>' string += '<div style="margin-left:20px;"> port: ' + str(server['port']) + ':</div>' string += '<div style="margin-left:20px;"> max_user: '******'max_user']) + ':</div>' string += '<div style="margin-left:20px;"> total_user: '******'total_user']) + ':</div>' string += '<div style="margin-left:20px;"> connect_time: ' + str(datetime.fromtimestamp(server['connect_time'])) + ':</div>' string += '<div style="margin-left:20px;"> last_heartbeat: ' + str(datetime.fromtimestamp(server['last_heartbeat'])) + ':</div>' string += '<div style="margin-left:20px;"> hold_token: ' + str(server['hold_token']) + ':</div>' users = gateway.get_users() string += '<h1>Users</h1>' for user, server in users.iteritems(): string += '<div style="margin-left:20px;">' + str(user) + ' (' + str(server) + ')</div>' return string
def filterLeadsBySource(request, id): user_id = request.user.id company_id = request.user.company_id start_date = request.GET.get('start_date') end_date = request.GET.get('end_date') source = request.GET.get('source') query_type = request.GET.get('query_type') page_number = int(request.GET.get('page_number')) items_per_page = int(request.GET.get('per_page')) offset = (page_number - 1) * items_per_page if start_date is not None: local_start_date_naive = datetime.fromtimestamp(float(start_date)) local_start_date = get_current_timezone().localize(local_start_date_naive, is_dst=None) if end_date is not None: local_end_date_naive = datetime.fromtimestamp(float(end_date)) local_end_date = get_current_timezone().localize(local_end_date_naive, is_dst=None) utc_current_date = datetime.utcnow() #print 'filter start us ' + str(local_start_date) + ' and edn is ' + str(local_end_date) try: start_date_field_qry = 'leads__hspt__properties__createdate__gte' end_date_field_qry = 'leads__hspt__properties__createdate__lte' source_field_qry = 'leads__hspt__properties__hs_analytics_source' company_field_qry = 'company_id' querydict = {company_field_qry: company_id, source_field_qry: source, start_date_field_qry : local_start_date, end_date_field_qry : local_end_date} total = Lead.objects(**querydict).count() leads = Lead.objects(**querydict).skip(offset).limit(items_per_page) serializer = LeadSerializer(leads, many=True) return JsonResponse({'count' : total, 'results': serializer.data}) except Exception as e: return JsonResponse({'Error' : str(e)})
def assert_node_json(node_json): try: node = ujson.loads(node_json) except: assert False assert node.has_key('id') assert isinstance(node['id'], int) assert node.has_key('site_id') or not node['site_id'] assert isinstance(node['site_id'], int) assert node.has_key('alias') assert isinstance(node['alias'], unicode) assert node.has_key('nodetype_id') assert isinstance(node['nodetype_id'], int) or not node['nodetype_id'] assert node.has_key('sensors') assert isinstance(node['sensors'], list) assert node.has_key('latitude') assert isinstance(node['latitude'], float) assert node.has_key('longitude') assert isinstance(node['longitude'], float) assert node.has_key('type') assert node['type'] == 'site' assert node.has_key('created') assert datetime.fromtimestamp(float(node['created'])) assert node.has_key('updated') assert datetime.fromtimestamp(float(node['updated']))
def results(request): """this will generate the view for the result""" tag_name = request.session.get('tag_name') start_date = datetime.fromtimestamp(int(request.session.get('start_date'))) end_date = datetime.fromtimestamp(int(request.session.get('end_date'))) response = Pix.objects.filter(tag=tag_name).filter(date__gte=start_date).filter(date__lte=end_date) return render(request, 'result.html', {'pix_list':list(response)})
def render_html(stream): stream.write("""<!DOCTYPE html> <html> <head><title>Simple CMX Webinterface</title></head> <body> <pre>{file} running on {host}. Page generated at {ctime}</pre> <a href="/json">view in JSON formt</a>""".format( file=escape(__file__), host=escape(hostinfo), ctime=datetime.now().ctime() )) for component in cmx.Registry.list(): stream.write(""" <table border=2> <tr><th colspan=4>{name} ({processId})</th></tr> <tr><th>Name</th><th>Value</th><th>update</th><th>type</th></tr>""".format( name=escape(component.name()), processId=component.processId())) for value in component.list(): stream.write(""" <tr> <td>{name}</td> <td class="value"><textarea rows=2 cols=80>{value}</textarea></td> <td>{update}<br />{updateRel}</td> <td>{type}</td> </tr>""".format(name=escape(value.name()), type=escape(value.__class__.__name__), value=value.value(), update=datetime.fromtimestamp(value.mtime()/10.0**6), updateRel=datetime.now()-datetime.fromtimestamp(value.mtime()/10.0**6) )) stream.write("</table>\n<br/>\n") stream.write("</body>\n</html>")
def formatTaskForDisplay(self, obj, is_search=False): task = [] if self.verbose: task.append('> ' + obj['task']) if not obj['completed'] == '': dt = datetime.fromtimestamp(int(obj['completed'])) c = dt.strftime('%Y-%m-%d %H:%M') task.append(' Completed: ' + c) showLineNumber = False else: dt = datetime.fromtimestamp(int(obj['date'])) d = dt.strftime('%Y-%m-%d %H:%M') task.append(' Added: ' + d) showLineNumber = True task.append(' File Path: ' + obj['filePath']) if showLineNumber: task.append(' Line Number: ' + str(obj['lineNumber'])) else: if obj['completed'] == '' or is_search: dt = datetime.fromtimestamp(int(obj['date'])) d = dt.strftime('%Y-%m-%d %H:%M') if obj['completed'] != '': completed = '* ' else: completed = '' task.append("> %s%s %s, line %s, path: %s" % ( completed, d, self.bold(obj['task']), str(obj['lineNumber']), obj['filePath']) ) return "\n".join(task)
def EPG(url,iconimage): url= 'http://www.filmon.com/api/channel/%s?session_key=%s' % (url,ses) link = net.http_GET(url).content data = json.loads(link) tvguide = data['tvguide'] for field in tvguide: programme_id= field["programme"] startdate_time= field["startdatetime"] enddate_time= field["enddatetime"] day= field["date_of_month"] cid= field["channel_id"] desc= field["programme_description"] name= field["programme_name"] startdate_time_float=float(startdate_time) enddate_time_float=float(enddate_time) start=datetime.fromtimestamp(startdate_time_float) end=datetime.fromtimestamp(enddate_time_float) startdate_time_cleaned=start.strftime('%H:%M') enddate_time_cleaned=end.strftime('%H:%M') name = '[%s %s] [B]%s[/B]'%(day,startdate_time_cleaned,name) iconimage='http://static.filmon.com/couch/channels/%s/extra_big_logo.png'%(cid) name = name.encode('utf-8') description = desc.encode('utf-8') url =str(cid) addDir(name,url,2,iconimage,description,'','','record','','',programme_id,startdate_time) setView('movies', 'epg')
def timesmk(v): ts = v.get('-start', None) te = v.get('-stop', None) if ts is None or te is None: return u'' ts = datetime.fromtimestamp(time.mktime(time.strptime(ts.split()[0], '%Y%m%d%H%M%S'))) + timedelta(minutes=offset) te = datetime.fromtimestamp(time.mktime(time.strptime(te.split()[0], '%Y%m%d%H%M%S'))) + timedelta(minutes=offset) return u'%s %s' % (ts.strftime("%H:%M:%S"), te.strftime("%H:%M:%S"))
def interview_post(request): interview_form = dict(request.POST) del interview_form['csrfmiddlewaretoken'] interview_type = int(interview_form.pop('interview_type')[0]) recruiter_id = interview_form.pop('recruiter_id')[0] interview_template_name = interview_form.pop('interview_template_name')[0] candidate_name = interview_form.pop('candidate_name') interviews = map(dict, zip(*[[(k, v) for v in value] for k, value in interview_form.items()])) for interview_slot in interviews: interview_slot['start_time'] = datetime.fromtimestamp(float(interview_slot['start_time'])) interview_slot['start_time'].replace(tzinfo=pytz.timezone(settings.TIME_ZONE)) interview_slot['end_time'] = datetime.fromtimestamp(float(interview_slot['end_time'])) interview_slot['end_time'].replace(tzinfo=pytz.timezone(settings.TIME_ZONE)) interview_slot['interviewer_id'] = models.Interviewer.objects.get(name=interview_slot['interviewer'].split('@')[0]).id interview_slot['room_id'] = models.Room.objects.get(display_name=interview_slot['room']).id interview_slot['candidate_name'] = candidate_name[0] # Sorting so we can make the content in the right order. interviews = sorted(interviews, key=lambda x: x['start_time']) body_content = schedule_calculator.create_calendar_body( [(interview['start_time'], interview['interviewer']) for interview in interviews], models.Recruiter.objects.get(id=recruiter_id), request.user, ) start_time = datetime.fromtimestamp(float(interview_form['room_start_time'][0])) start_time = start_time.replace(tzinfo=pytz.timezone(settings.TIME_ZONE)) end_time = datetime.fromtimestamp(float(interview_form['room_end_time'][0])) end_time = end_time.replace(tzinfo=pytz.timezone(settings.TIME_ZONE)) interview_type_string = models.InterviewTypeChoice(interview_type).display_string calendar_response = calendar_client.create_event( '%(type)s - %(candidate)s (%(interview_template_name)s)' % { 'type': interview_type_string, 'candidate': candidate_name[0], 'interview_template_name': interview_template_name, }, body_content, start_time, end_time, interview_form['external_id'][0], interview_form['room'][0], ) schedule_calculator.persist_interview( interviews, interview_type, google_event_id=calendar_response['id'], recruiter_id=recruiter_id, user_id=request.user.id, ) return redirect('/new_scheduler?success=1')
def generate_corrected_timestamp(): now = time.time() if _time_correction is not None: t = now + _time_correction log.warn('using corrected timestamp: %r', datetime.fromtimestamp(t).isoformat()) else: t = now log.warn('using UNcorrected timestamp: %r', datetime.fromtimestamp(t).isoformat()) return t
def pretty_date_short(time=False): """ Get a datetime object or a int() Epoch timestamp and return a pretty string like 'an hour ago', 'Yesterday', '3 months ago', 'just now', etc http://stackoverflow.com/questions/1551382/user-friendly-time-format-in-python """ from datetime import datetime now = datetime.now() if type(time) is int: diff = now - datetime.fromtimestamp(time) elif isinstance(time,datetime): diff = now - time elif not time: diff = now - now else: try: diff = now - time except: return '' second_diff = diff.seconds day_diff = diff.days if day_diff < 0 or second_diff < 0: now = datetime.now() if type(time) is int: diff = datetime.fromtimestamp(time) - now elif isinstance(time,datetime): diff = time - now second_diff = diff.seconds day_diff = diff.days if day_diff == 0: if second_diff < 10: return "just now" if second_diff < 60: return str(second_diff) + "s" if second_diff < 3600: return str( second_diff / 60 ) + "m" if second_diff < 86400: return str( second_diff / 3600 ) + "h" if day_diff < 7: return str(day_diff) + "d " + str( second_diff / 3600 ) + "h" if day_diff < 365: return str(day_diff/7) + "w " + str( day_diff - (int(day_diff/7)*7)) + "d" return str(day_diff/365) + "y"
def process_gerrit_issue(self, issue): ret = {} ret['review_url'] = issue['url'] ret['header'] = issue['subject'] ret['owner'] = issue['owner']['email'] ret['author'] = ret['owner'] ret['created'] = datetime.fromtimestamp(issue['createdOn']) ret['modified'] = datetime.fromtimestamp(issue['lastUpdated']) if 'comments' in issue: ret['replies'] = self.process_gerrit_issue_replies(issue['comments']) else: ret['replies'] = [] return ret
def test_age_default(self, fake_datetime): """ age() w/ defaults works properly """ from datetime import datetime now = datetime.fromtimestamp(60.0) fake_datetime.return_value = now fake_datetime.utcnow = Mock(return_value=now) tor = FakeTorController() circuit = Circuit(tor) circuit._time_created = datetime.fromtimestamp(0.0) self.assertEquals(circuit.age(), 60) self.assertTrue(circuit.time_created is not None)
def rq_time_bounds(): start = request.args.get('start', None) end = request.args.get('end', None) if start is not None: try: start = datetime.fromtimestamp(int(start)/1000) except: start = None if end is not None: try: end = datetime.fromtimestamp(int(end)/1000) except: end = None return start, end
def graph_prices(x, y, gname): '''make a plot of the prices over time for a specific game' x is be the dates of the bins y is the prices gname is the name of the game ''' x_list = list(x) x_dt = [datetime.fromtimestamp(xx) for xx in x_list] fig=Figure(facecolor='white') ax=fig.add_subplot(111) ax.plot(x_dt,y,'r-') ax.set_ylim([0,np.max(y) + np.max(y) * 0.10]) #ax.set_title(gname) #ax.set_axis_bgcolor('red') formatter = FuncFormatter(money_format) ax.yaxis.set_major_formatter(formatter) #fig.autofmt_xdate() #xfmt = md.DateFormatter('%Y-%m-%d %H:%M:%S') #ax.xaxis.set_major_formatter(xfmt) ax.xaxis.set_major_formatter(DateFormatter('%Y-%m-%d')) fig.autofmt_xdate() canvas=FigureCanvas(fig) png_output = StringIO.StringIO() canvas.print_png(png_output) response=make_response(png_output.getvalue()) response.headers['Content-Type'] = 'image/png' return response
def process_feed_item(feed_item, feed): # print 'inserting %s' % feed_item.link if(db.items.find({'url': feed_item.link}).count() > 0): # print 'already there, done' # TODO handle updates return i = { # DO BETTER STUFF '_id': str(uuid.uuid1()), 'title': feed_item.get('title', ''), 'feed': feed, } i['url'] = feed_item.link try: i['description'] = feed_item.content[0].value except: i['description'] = feed_item.description try: i['date'] = datetime.fromtimestamp(calendar.timegm(feed_item.published_parsed)) except: i['date'] = datetime.now() db.items.insert(i)
def MAKE_WORKING_DIR(Dir_Name,Time_Stamp=True,Use_Cwd_Asbase=True,**kwargs): ''' Small function that creates a working directory. Version 1.0 JPR 7-24-15 Inputs (*Optional): (Dir_Name,Time_Stamp=True,Use_Cwd_Asbase=True) - Dir_Name == Name of new directory. - Time_Stamp == will append timestamp of when this function is called to directory. - Use_Cwd_Asbase == will use current working directory as the base directory (you new directory will be a sub directory of this space). - kwarg('base_cwd') == if Use_Cwd_Asbase is False, please send directory in with this kwarg. Outputs: Directory_Path ''' import os if Time_Stamp: from datetime import datetime import time Start_Time = time.time() Time_Stamp = datetime.fromtimestamp(Start_Time).strftime('%Y%m%d_%H%M%S') else: Time_Stamp = '' if Use_Cwd_Asbase: Base_Dir = os.getcwd() else: Base_Dir = kwargs.get('base_cwd','') try: D_Name = Dir_Name + Time_Stamp Output_Directory = os.path.join(Base_Dir,D_Name) except TypeError: raise # Create Directory Path if not os.path.exists(Output_Directory): os.makedirs(Output_Directory) return Output_Directory
def cli(): parser = optparse.OptionParser(usage="%prog [options...] mmssms.db") parser.add_option("-o", "--output", help="Output file name (default: generated from the date of the newest SMS message)") opts, args = parser.parse_args() if len(args) != 1: parser.print_help() sys.exit(1) messages = read_messages(args[0]) print ("Read %s messages" % messages.attrib["count"]) newest = datetime.fromtimestamp(int(messages.getchildren()[0].attrib["date"])/1000) if opts.output is None: opts.output = newest.strftime("sms-%Y%m%d%H%M%S.xml") try: os.stat(opts.output) ans = raw_input("Warning: file %s already exists, overwrite? " % opts.output) if ans.lower()[0] != 'y': sys.exit(0) except OSError: pass etree.ElementTree(messages).write( opts.output, encoding="utf-8", xml_declaration=True) print ("Messages saved to %s." % opts.output) print ("Now copy this file to your SD Card into ") print ("SMSBackupRestore folder and use SMS Backup & Restore by Ritesh Sahu") print ("to restore your messages.") print ("Google Play store link to the app (free version): http://goo.gl/ZO5cy")
def bulk(): data = request.data logger.debug("data:%s"%data) if not data: logger.debug("No data recieved dropping") return "" for row in data: inserts = dict() inserts['timestamp'] = datetime.fromtimestamp(int(row[0])) inserts['site_id'] = g.account['site_id'] inserts['site_name'] = g.site['site_name'] node_id = int(row[1]) # Adding distinction between nodes bulk_index_mapping = generate_bulk_index_conf(g.site) if bulk_index_mapping.has_key(node_id): length_of_indices = len(row) sensor_data = bulk_index_mapping[node_id] # Generating an insert dictionary representing the mapping of the data to the sensor mapping(bulk_index_mappng) for index, item_name in sensor_data.items(): if length_of_indices > index: # Make sure we have an entry for that index. just in case inserts[item_name] = row[index] logger.debug("The data to be written to the database is: %s" % inserts) insert_data(inserts) else: logger.warning("The node_id=%s sent has no mapping for this site. SKIPPING" % node_id) return "OK"
def __init__(self): super().__init__() self.bets = {} redis = RedisManager.get() self.last_game_start = None self.last_game_id = None try: last_game_start_timestamp = int(redis.get('{streamer}:last_hsbet_game_start'.format(streamer=StreamHelper.get_streamer()))) self.last_game_start = datetime.fromtimestamp(last_game_start_timestamp) except (TypeError, ValueError): # Issue with the int-cast pass except (OverflowError, OSError): # Issue with datetime.fromtimestamp pass try: self.last_game_id = int(redis.get('{streamer}:last_hsbet_game_id'.format(streamer=StreamHelper.get_streamer()))) except (TypeError, ValueError): pass self.scheduler = BackgroundScheduler() self.scheduler.start() self.job = self.scheduler.add_job(self.poll_trackobot, 'interval', seconds=15) self.job.pause() self.reminder_job = self.scheduler.add_job(self.reminder_bet, 'interval', seconds=1) self.reminder_job.pause()
def get_all_files_and_timestamp(webdav): dump_files_avilable = webdav.ls() webdav.cd("remote.php") webdav.cd("webdav") webdav.cd(OWNCLOUD_DIRECTORY) all_the_files = [] for f in dump_files_avilable: if not f.name or f.name[-1] == '/': continue # We try to extract a timestamp to get an idea of the creation date # Format: Mon, 14 Mar 2016 03:31:40 GMT t = time.strptime(f.mtime, '%a, %d %b %Y %H:%M:%S %Z') # We don't take into consideration backups that are too recent. # Otherwise they could be half uploaded (=> corrupted) dt = datetime.fromtimestamp(mktime(t)) if abs((datetime.now() - dt).total_seconds()) < 900: print "SKIP", f.name, "(too recent)" continue all_the_files.append((dt, f)) return all_the_files
def process_response(self, request, response): if not self.internal and request.facebook.session_key and request.facebook.uid: request.session['facebook_session_key'] = request.facebook.session_key request.session['facebook_user_id'] = request.facebook.uid if request.facebook.session_key_expires: expiry = datetime.fromtimestamp(request.facebook.session_key_expires) request.session.set_expiry(expiry) try: fb = request.facebook except: return response if not fb.is_session_from_cookie: # Make sure the browser accepts our session cookies inside an Iframe response['P3P'] = 'CP="NOI DSP COR NID ADMa OPTa OUR NOR"' fb_cookies = { 'expires': fb.session_key_expires, 'session_key': fb.session_key, 'user': fb.uid, } expire_time = None if fb.session_key_expires: expire_time = datetime.utcfromtimestamp(fb.session_key_expires) for k in fb_cookies: response.set_cookie(self.api_key + '_' + k, fb_cookies[k], expires=expire_time) response.set_cookie(self.api_key , fb._hash_args(fb_cookies), expires=expire_time) return response
def _create_post(c, title): # Wordpress Post url = 'http://'+ site.domain images = c.images()[:15] content = render_to_string('blog/post_gallery.html', {'artist':c.artist, 'collection':c, 'images':images, 'url':url }) post = Post(post_title=title, post_name=defaultfilters.slugify(title), post_content=content, post_author=2) gmt_time = datetime.fromtimestamp(time.mktime(time.gmtime())) post.post_date_gmt = gmt_time post.post_modified_gmt = gmt_time post.save() relation = Relation(object_id=post.pk, term_taxonomy_id=4) relation.save() # Post's tags post.add_tag(c.artist.__unicode__()) for art_type in c.artist.art_types.all(): post.add_tag(art_type.name) # Active artist c.artist.active = 1 c.artist.save() # Update Mobile Data load_mobile_data() return len(images)
def get(self): arg_parser = reqparse.RequestParser() arg_parser.add_argument('offset', type=int, default=0, location='args') arg_parser.add_argument('limit', type=int, default=10, location='args') arg_parser.add_argument('user_type', type=int, default=2, location='args') arg_parser.add_argument('deleted', type=int, default=0, location='args') arg_parser.add_argument('order_by', type=str, default='user_since', choices=['user_since', 'name'], location='args') arg_parser.add_argument('desc', type=int, default=1, choices=[1, 0], location='args') arg_parser.add_argument('since_time', type=int,default=admin_controllers.maketimestamp(datetime(2001,1,1)), location='args') try: args = arg_parser.parse_args() return admin_controllers.user_list(offset=args['offset'], limit=args['limit'], user_type=bool(args['user_type']), deleted=bool(args['deleted']), order_by=args['order_by'], desc=bool(args['desc']), since_time=datetime.fromtimestamp(args['since_time']) ) except Exception as e: err = sys.exc_info() raygun.send(err[0],err[1],err[2]) print traceback.format_exc(e) abort(500, message='Error')
def default( self, *path, **kwargs ): path = path[:-1] path = ('/'.join(path)).replace('//', '/') rpath = os.path.join( self.root_dir, path ) if os.path.isdir(rpath): if not self.browse: raise HttpNotFoundResponse( path ) link_prefix = ( kwargs['full_url'] + '/' ).replace('//', '/') filelist = sorted(os.listdir(rpath)) longest_name = reduce( lambda m, c: c if c > m else m, map(lambda n: len(n), filelist ), 0 ) longest_name = max( longest_name, len('Parent Directory') ) result = '<html>\n<head><title>Index of {0}</title></head>\n<body>\n<h1>Index of {0}</h1>'.format( path or '/' ) format_row = lambda n, m, s, l = None: n + ' ' * (longest_name + 4 - (l or len(n))) + m + ' ' * (34-len(m)-len(s)) + s + '\n' result += '<pre>' + format_row( 'Name', 'Last Modified', 'Size' ) + '<hr>\n' template = '<a href="{}">{}</a>' format_time = lambda t: datetime.fromtimestamp(t).strftime( '%Y-%m-%d %H:%M:%S' ) format_file = lambda l, p, n: format_row( template.format(l, n), format_time(os.path.getmtime(p)), '-' if os.path.isdir(p) else format_filesize(os.path.getsize(p)), len(n) ) if len(path) >= 2: result += format_file( os.path.join(link_prefix, '../'), os.path.join(rpath,'../'), 'Parent Directory' ) result = reduce( lambda all, name: all + format_file( os.path.join(link_prefix, name), os.path.join(rpath,name), name ), filelist, result ) result += '</pre>\n{footer}</body>\n</html>'.format(footer=HttpResponse.footer) return result if os.path.isfile(rpath): return HttpFileResponse(rpath) raise HttpNotFoundResponse( '/' + '/'.join( kwargs['full_path'][:-1] ) )
def __init__(self): super().__init__() self.bets = {} redis = RedisManager.get() self.last_game_start = None self.last_game_id = None try: last_game_start_timestamp = int(redis.get('{streamer}:last_hsbet_game_start'.format(streamer=StreamHelper.get_streamer()))) self.last_game_start = datetime.fromtimestamp(last_game_start_timestamp) except (TypeError, ValueError): # Issue with the int-cast pass except (OverflowError, OSError): # Issue with datetime.fromtimestamp pass try: self.last_game_id = int(redis.get('{streamer}:last_hsbet_game_id'.format(streamer=StreamHelper.get_streamer()))) except (TypeError, ValueError): pass self.job = ScheduleManager.execute_every(15, self.poll_trackobot) self.job.pause() self.reminder_job = ScheduleManager.execute_every(1, self.reminder_bet) self.reminder_job.pause()
def sync(self, gcal_items): self.log("Starting sync") response = self._call("tasks/get.php", key=self.key, comp=-1, fields="duedate" ) response = json.loads(response)[1:] td_items = {task["title"]: datetime.fromtimestamp(task["duedate"], pytz.utc) for task in response} self.log("{} items in list".format(len(td_items))) not_in_td = filter(lambda key: key not in td_items, gcal_items.keys()) self.log("{} new items to add".format(len(not_in_td))) payload = [ { "title": item, "duedate": timegm(gcal_items[item].timetuple()), "folder": self.folder_id } for item in not_in_td] if any(payload): map(lambda item: self.log("\tAdding '{}'".format(item["title"])), payload) payload = json.dumps(payload) response = self._call("tasks/add.php", key=self.key, tasks=payload ) self.log("Synchronization complete.") else: self.log("Nothing synchronized.")
def printComment(comment): print "id: " + comment['id'] print "user id: " + str(comment['fromid']) print "text: " + comment['text'] print "timestamp: " + str(datetime.fromtimestamp(long(comment['time']))) print "likes: " + str(comment['likes']) print "--------------------------------------------"
def process_feed_item(feed_item, feed): # print 'inserting %s' % feed_item.link if db.items.find({"url": feed_item.link}).count() > 0: # print 'already there, done' # TODO handle updates return i = { # DO BETTER STUFF "_id": str(uuid.uuid1()), "title": feed_item.get("title", ""), "feed": feed, } i["url"] = feed_item.link try: i["description"] = feed_item.content[0].value except: i["description"] = feed_item.description try: i["date"] = datetime.fromtimestamp(calendar.timegm(feed_item.published_parsed)) except: i["date"] = datetime.now() db.items.insert(i)
def printKeys(searchString): setTopBox = r_server.smembers('who' + searchString) endOfStream = re.compile(r"RTSP server send a end of stream event") for key in setTopBox: mac = key ip = r_server.hget(searchString + key, "ip") fw = r_server.hget(searchString + key, "fw") uptime = r_server.hget(searchString + key, "uptSec") url = r_server.hget(searchString + key, "url") operacrash = r_server.hget(searchString + key, "operacrash") mcast = r_server.hget(searchString + key, "mcast") decodeErr = r_server.hget(searchString + key, "decodeErr") rtsperr = r_server.hget(searchString + key, "rtsperr") if r_server.hget(searchString + key, "stime"): isoTime = datetime.fromtimestamp(int(r_server.hget(searchString + key, "stime"))).isoformat() if rtsperr: if endOfStream.search(rtsperr): rtsperr = "None" if not mcast: mcast = 0 if not operacrash: operacrash = 0 print str(isoTime) + ',' + str(ip) + ',' + str(fw) + ',' + str(mac) + ',' + str(uptime) + ',' + str(url) + ',' + str(mcast) + ',' + str(operacrash) + ',' + str(decodeErr) + ',' + str(rtsperr) dstFileTMP = '/mnt/nfs/dump/' + str(isoTime) + '.txt' dstFileTMP = dstFileTMP.split(":") dstFile = dstFileTMP[0] + dstFileTMP[1] + dstFileTMP[2] srcFile = '/tmp/commaExport.txt' shutil.copy(srcFile, dstFile)
def get_trivial_attr(self, obj, objdict): """Get all trivial attributes from the object. This function is used to extract attributes from a pb job. The dictionary specifies the types of each attribute in each tko class. :param obj: the pb object that is being extracted. objdict: the dict that specifies the type. :return: a dict of each attr name and it's corresponding value. """ resultdict = {} for field, field_type in objdict.items(): value = getattr(obj, field) if field_type in (str, int, long): resultdict[field] = field_type(value) elif field_type == datetime: resultdict[field] = (datetime.fromtimestamp(value / 1000.0)) return resultdict
class WxPayConf(object): """配置账号信息""" # ===============【基本信息设置】=================== AppId = "" AppSecret = "" MchId = "" # 商户支付密钥key Merchant_key = "" # ===============【异步通知url设置】================ NOTIFY_URL = "http://" # ================【证书路径】====================== # # 证书路径,应该填写绝对路径(仅退款、撤销订单时需要) SSLCERT_PATH = "../cert/apiclient_cert.pem" SSLKEY_PATH = "../cert/apiclient_key.pem" # ================【curl超时设置】================== CURL_TIMEOUT = 30 # 商户订单 时间+随机数 now = datetime.fromtimestamp(time.time(), tz=time.timezone('Asia/Shanghai')) OUT_TRADE_NO = '{0}{1}{2}'.format(MchId, now.strftime('%Y%m%d%H%M%S'), random.randint(1000, 10000))
def __init__(self, loc, name): self.loc = loc # QM_xJobs self.name = name # the job directory name self.date = os.stat(self.myjobfile)[ 8] # will be used for sorting - myjobfile stronger than url self.nicedate = datetime.fromtimestamp( self.date).strftime("%d %b %Y %H:%M:%S") self.timestarted = time.time() # the following will be modified by parsexml/parsecfg self.nb_proc = 1 self.e_mail = "unknown" self.info = "unknown" self.script = "unknown" self.priority = 0 self.size = "undefined" keylist = ["nb_proc", "e_mail", "info", "script", "priority", "size"] # adapt here self.keylist = keylist if debug: print('self.loc ', self.loc) print('self.name ', self.name) print('self.nb_proc ', self.nb_proc) print('self.e_mail ', self.e_mail) print('self.info ', self.info) print('self.script ', self.script) print('self.priority', self.priority) print('self.size', self.size) # and get them if self.job_type == "xml": self.parsexml() if self.job_type == "cfg": self.parsecfg() for intkey in ["nb_proc", "priority", "size"]: try: setattr(self, intkey, int(getattr(self, intkey))) except ValueError: setattr(self, intkey, "undefined")
def gen_index_string(): """ Create and returns a string including every file in the ENTRY_DIR as an index. Returns: string: html-formatted index string """ path_ex = ENTRY_DIR content_string = '' if path.exists(path_ex): name_list = os.listdir(path_ex) full_list = [os.path.join(path_ex, i) for i in name_list] contents = sorted(full_list, key=os.path.getctime) for file in reversed(contents): filename = pathlib.PurePath(file) purefile = filename title = open(filename).readline().rstrip('\n') text = open(filename).readlines()[1:] filename = filename.name if filename[0] != '.': filename = filename.split('.', 1)[0] content_string += '<div class=\'entry\'>\n' content_string += '<h2 id=\'' + filename + '\'>' + title + '</h2>\n' content_string += '[<a href="' + '/entry/' + \ pathlib.PurePath(file).name + '">' + \ 'standalone' + '</a>]<br>\n' if file.endswith('.html'): for line in text: content_string += line content_string += '<br>' if file.endswith('.md'): content_string += gen_md_content(file, 2) content_string += '<small>' + \ datetime.fromtimestamp(os.path.getctime( file)).strftime('%Y-%m-%d') + '</small>' content_string += '</div>' return content_string
def parse_time_list(unparsed_list): """Parse the time column for information. Unfortunately the patterns aren't consistent, and currently we're handling that by checking for multiple time patterns.""" time_list = [] patterns = [ '%m/%d/%Y %H:%M:%S', '%m/%d/%y %H:%M:%S', '%m/%d/%Y %H:%M', '%m/%d/%y %H:%M' ] for timestring in unparsed_list: if len(timestring) > 0: t = "NONE" for p in patterns: try: t = time.strptime(timestring[0], p) t = datetime.fromtimestamp(time.mktime(t)) break except: continue t_year = t.year t_month = t.month t_day = t.day t_hour = t.hour t_minute = t.minute t_second = t.second t_list = [ '{:02d}'.format(i) for i in [t_year, t_month, t_day, t_hour, t_minute, t_second] ] t_string = "".join(t_list) time_list.append(t_string) if t == "NONE": raise ValueError else: time_list.append("0") return time_list
def job_get_file_inventia_symbox(log_file, db, sftp, dir, datetime_max, directory_temp): # Switch to a remote directory sftp.cwd(dir) filename_list_to_get = {} filename_list_to_process = {} file_to_process = ('.csv') if datetime_max is None: return # Obtain structure of the remote directory directory_structure = sftp.listdir_attr() now = datetime.now() for attr in directory_structure: print(attr) mod_timestamp = datetime.fromtimestamp(attr.st_mtime) diff_1 = datetime_max - mod_timestamp diff = mod_timestamp - datetime_max if diff.total_seconds() > 0: filename_list_to_get.update({attr.filename: mod_timestamp}) if not filename_list_to_get: log_file.info("Nessun file da processare") return log_file.info( "Numero file = %i estratti in data = %s" % (len(filename_list_to_get), now.strftime("%m/%d/%Y, %H:%M:%S"))) for (filename, time) in filename_list_to_get.items(): print(filename) if filename.endswith(file_to_process): filename_list_to_process.update({filename: time}) #shutil.copy(sftp.get(filename_to_read),os.path.join(directory_temp, filename_to_read)) sftp.get(filename, os.path.join(directory_temp, filename)) log_file.info( "Numero file = %i da processare in data = %s" % (len(filename_list_to_process), now.strftime("%m/%d/%Y, %H:%M:%S"))) return filename_list_to_process
def pretty_date(time): """ Get a datetime object or a int() Epoch timestamp and return a pretty string like 'an hour ago', 'Yesterday', '3 months ago', 'just now', etc Based on https://stackoverflow.com/a/1551394/713980 Adapted by sven1103 """ from datetime import datetime now = datetime.now() if isinstance(time, datetime): diff = now - time else: diff = now - datetime.fromtimestamp(time) second_diff = diff.seconds day_diff = diff.days pretty_msg = { 0: [(float('inf'), 1, 'from the future')], 1: [(10, 1, "just now"), (60, 1, "{sec} seconds ago"), (120, 1, "a minute ago"), (3600, 60, "{sec} minutes ago"), (7200, 1, "an hour ago"), (86400, 3600, "{sec} hours ago")], 2: [(float('inf'), 1, 'yesterday')], 7: [(float('inf'), 1, '{days} days ago')], 31: [(float('inf'), 7, '{days} weeks ago')], 365: [(float('inf'), 30, '{days} months ago')], float('inf'): [(float('inf'), 365, '{days} years ago')] } for days, seconds in pretty_msg.items(): if day_diff < days: for sec in seconds: if second_diff < sec[0]: return sec[2].format(days=round(day_diff / sec[1], 1), sec=round(second_diff / sec[1], 1)) return '... time is relative anyway'
def _create_post(c, title): # Wordpress Post url = 'http://' + site.domain images = c.images()[:15] content = render_to_string('blog/post_gallery.html', { 'artist': c.artist, 'collection': c, 'images': images, 'url': url }) post = Post(post_title=title, post_name=defaultfilters.slugify(title), post_content=content, post_author=2) gmt_time = datetime.fromtimestamp(time.mktime(time.gmtime())) post.post_date_gmt = gmt_time post.post_modified_gmt = gmt_time post.save() relation = Relation(object_id=post.pk, term_taxonomy_id=4) relation.save() # Post's tags post.add_tag(c.artist.__unicode__()) for art_type in c.artist.art_types.all(): post.add_tag(art_type.name) # Active artist c.artist.active = 1 c.artist.save() # Update Mobile Data load_mobile_data() return len(images)
def check_token(event): data = event['body'] access_token = data['access_token'] refresh_token = data['refresh_token'] renew = True now = datetime.now() dec_access_token = jwt.get_unverified_claims(access_token) un = dec_access_token['username'] clientID = dec_access_token['client_id'] if now > datetime.fromtimestamp(dec_access_token['exp']): expired = True if renew: u = renew_access_token(refresh_token, clientID, un) else: expired = False return { "body": { "id_token": data['id_token'], "access_token": data['access_token'], "refresh_token": data['refresh_token'] }, "headers": {}, "statusCode": 200, "isBase64Encoded": "false", "expired": expired } return { "body": { "id_token": u['id_token'], "access_token": u['access_token'], "refresh_token": u['refresh_token'] }, "headers": {}, "statusCode": 200, "isBase64Encoded": "false", "expired": expired }
def pretty_date(time=False): now = datetime.now() if type(time) is int: diff = now - datetime.fromtimestamp(time) elif isinstance(time,datetime): diff = now - time elif not time: diff = now - now second_diff = diff.seconds day_diff = diff.days if day_diff < 0: return '' if day_diff == 0: if second_diff < 60: return "less than a minute ago" if second_diff < 120: return "one minute ago" if second_diff < 3600: return str( second_diff / 60 ) + " minutes ago" if second_diff < 7200: return "an hour ago" if second_diff < 86400: return str( second_diff / 3600 ) + " hours ago" if day_diff == 1: return "Yesterday" if day_diff < 7: return str(day_diff) + " days ago" if day_diff < 14: return "1 week ago" if day_diff < 31: return str(day_diff/7) + " weeks ago" if day_diff < 365: return str(day_diff/30) + " months ago" return str(day_diff/365) + " years ago"
def __init__(self, filepath): self.regexp = regexp self.schema = schema super(adeck, self).__init__(filepath) self.properties['stormNum'] = int(self.properties['stormNum']) self.properties['year'] = int(self.properties['year']) checkBasin(self.properties['basinId'], self.properties['filename']) self.properties['jtwcId'] = "{}{}".format( self.properties['stormNum'], self.properties['basinId'][0].upper()) if 'createDTG' in self.properties: self.properties['createDTG'] = datetime.datetime.strptime( self.properties['createDTG'], "%Y%m%d%H%M") else: # Should replace this time with newest time entry within file, need Roberts python adeck reader LOG.info( "Filename creation time field doesn't exist, using file modification time" ) self.properties['createDTG'] = datetime.fromtimestamp( os.path.getmtime(self.properties('filename')))
def ts_to_date(self, ts, precision='s'): try: if isinstance(ts, int): # change milli to seconds if ts > 16307632000: ts = ts // 1000 if precision == 'ns': ts = datetime.utcfromtimestamp(ts) # convert to nanosecond representation ts = np.datetime64(ts).astype(datetime) ts = pd.Timestamp(datetime.date(ts)) elif precision == 's': # convert to ms respresentation ts = datetime.fromtimestamp(ts) elif isinstance(ts, datetime): return ts elif isinstance(ts, str): return datetime.strptime(ts, "%Y-%m-%d %H:%M:%S") #logger.warning('ts_to_date: %s', ts) return ts except Exception: logger.error('ms_to_date', exc_info=True) return ts
def readRss(urls): print('refresh the news rss==') #posts = mongo.db.readings for url in urls: items = feedparser.parse(url) for entry in items.entries: if (posts.find_one({"link": entry.link})): continue str_pubDate = strftime("%Y-%m-%d %H:%M:%S", entry.date_parsed) d = pq(url=entry.link) content = d(".content").html() post = { "title": entry.title, "link": entry.link, "published": str_pubDate, "date": datetime.fromtimestamp(mktime(entry.published_parsed)), "summary": entry.summary, 'content': content } post_id = posts.insert(post) posts.create_index([("date", -1)]) posts.create_index([("link", 1)])
def parsepacket(pkt, timeSent): bytes = pkt try: thepkt = {} thepkt['datasrc'] = bytes[0] thepkt['sequence'] = bytes[2] + (bytes[3] << 8) thepkt['pkttype'] = bytes[1] & 0x7f thepkt['timeinvalid'] = bytes[1] >> 7 thepkt['type'] = packet_types[thepkt['pkttype']] thepkt['time_sent'] = timeSent #email time thepkt['timestamp'] = bytes[4] + (bytes[5] << 8) + (bytes[6] << 16) + ( bytes[7] << 24) thepkt['local_timestamp'] = datetime.fromtimestamp(thepkt['timestamp']) tempByte = bytes[8:] #additional parsing if thepkt['pkttype'] == 1: #gps first half thepkt['payload'] = parsegps1(bytes[8:]) if (thepkt['pkttype'] == 2 and len(tempByte) == 12): #gps second half thepkt['payload'] = parsegps2(bytes[8:]) if thepkt['pkttype'] == 4: #rtstate thepkt['payload'] = parsertstate(bytes[8:]) if (thepkt['pkttype'] == 5 and len(tempByte) == 10): #rtpath thepkt['payload'] = parsertpath(bytes[8:]) if (thepkt['pkttype'] == 6 and len(tempByte) == 5): #connection thepkt['payload'] = parseconn(bytes[8:]) return thepkt except: print "error parsing" return
def api_get_session(force=0, return_data=False): force = int(force) profile_settings = load_profile(profile_id=1) subscriptionToken = None try: saved_token = profile_settings['subscriptionToken'] if saved_token is not None: cached_token_expiration_time = datetime.fromtimestamp(pyjwt.decode(saved_token, verify=False)['exp']) token_validity_time_remaining = cached_token_expiration_time - datetime.now() if token_validity_time_remaining.total_seconds() > 60 * 60 * 24: subscriptionToken = saved_token except: pass if subscriptionToken is None: login_result = api_login() if not login_result['result']: if return_data == True: return {'result': False, 'data': login_result['data'], 'code': login_result['code']} return False profile_settings = load_profile(profile_id=1) profile_settings['last_login_success'] = 1 profile_settings['last_login_time'] = int(time.time()) save_profile(profile_id=1, profile=profile_settings) if return_data == True: return {'result': True, 'data': login_result['data'], 'code': login_result['code']} return True
def get_google_finance_intraday(ticker, period=60, days=3): url = 'https://finance.google.com/finance/getprices' \ '?i={period}&p={days}d&f=d,o,h,l,c,v&df=cpct&q={ticker}'.format(ticker=ticker, period=period, days=days) page = requests.get(url) reader = csv.reader(page.text.splitlines()) columns = ['Open', 'High', 'Low', 'Close', 'Volume'] rows = [] times = [] for row in reader: #print(row) if re.match('^[a\d]', row[0]): if row[0].startswith('a'): start = datetime.fromtimestamp(int(row[0][1:])) times.append(start) else: times.append(start + timedelta(seconds=period * int(row[0]))) rows.append(map(float, row[1:])) #print(len(rows)) if len(rows): result = pd.DataFrame(rows, index=pd.DatetimeIndex(times, name='Date'), columns=columns) result.insert(0, 'Ticker', ticker) return result else: result = pd.DataFrame(rows, index=pd.DatetimeIndex(times, name='Date')) result.insert(0, 'Ticker', ticker) return result
def evaluate_single_certificate(x509): """ Translate the certificate to its attributes. Calculate the days to expiration :param x509: body of x509 :type x509: string :return: Days to Expiration :rtype: int :return: Certificates Attributes :rtype: dict """ try: x509_str = "-----BEGIN CERTIFICATE-----\n" + x509 + "\n-----END CERTIFICATE-----\n" # Decode the x509 certificate x509_obj = crypto.load_certificate(crypto.FILETYPE_PEM, x509_str) certData = { 'Subject': dict(x509_obj.get_subject().get_components()), 'Issuer': dict(x509_obj.get_issuer().get_components()), 'serialNumber': x509_obj.get_serial_number(), 'version': x509_obj.get_version(), 'not Before': datetime.strptime(x509_obj.get_notBefore().decode(), '%Y%m%d%H%M%SZ'), 'not After': datetime.strptime(x509_obj.get_notAfter().decode(), '%Y%m%d%H%M%SZ'), } certData['Subject'] = {y.decode(): certData['Subject'].get(y).decode() for y in certData['Subject'].keys()} certData['Issuer'] = {y.decode(): certData['Issuer'].get(y).decode() for y in certData['Issuer'].keys()} except Exception as e: # Throw the exception back to the main thread to catch raise Exception from e cert_expire = certData['not After'] now = datetime.fromtimestamp(mktime(gmtime(time()))) expiration_days = (cert_expire - now).days return expiration_days, certData
def load_reviews(path, **kwargs): """ Loads MoviesLens Reviews :param path: :param kwargs: :return: """ options = { 'fieldnames': ('userid', 'movieid', 'rating', 'timestamp'), 'delimiter': '\t', } options.update(kwargs) parse_date = lambda r, k: datetime.fromtimestamp(float(r[k])) parse_int = lambda r, k: int(r[k]) with open(path, 'r', encoding='UTF-8') as reviews: # reader = csv.DictReader(reviews, **options) for row in reader: row['user_id'] = parse_int(row, 'user_id') row['movieid'] = parse_int(row, 'movie_id') row['rating'] = parse_int(row, 'rating') row['timestamp'] = parse_date(row, 'timestamp') yield row
def store_packet(pkt): if debug: print pkt.getlayer(IP).proto wrpcap(MEDIA_ROOT + "temp.pcap", pkt) p = Packet(title='Captured Packet', timestamp=datetime.fromtimestamp( pkt.time).strftime("%Y-%m-%d %H:%M:%S.%f"), src=pkt.getlayer(IP).src, dst=pkt.getlayer(IP).dst, srcport=pkt.sport, dstport=pkt.dport, packetdata=hd(str(pkt))) p.pcap.name = "temp.pcap" try: p.target = targets[pkt.dport], except KeyError: p.target = "return traffic" #try: p.protocol = protos[pkt.getlayer(IP).proto] #except KeyError: # p.protocol="IP" p.save()
def test_rate_limit(api, wait=True, buffer=.1): ### """ """ Tests whether the rate limit of the last request has been reached. :param api: The `tweepy` api instance. :param wait: A flag indicating whether to wait for the rate limit reset if the rate limit has been reached. :param buffer: A buffer time in seconds that is added on to the waiting time as an extra safety margin. :return: True if it is ok to proceed with the next request. False otherwise. """ ## """ #Get the number of remaining requests remaining = int(api.last_response.getheader('x-rate-limit-remaining')) #Check if we have reached the limit if remaining == 0: limit = int(api.last_response.getheader('x-rate-limit-limit')) reset = int(api.last_response.getheader('x-rate-limit-reset')) #Parse the UTC time reset = datetime.fromtimestamp(reset) #Let the user know we have reached the rate limit print("0 of {} requests remaining until {}.".format(limit, reset)) if wait: #Determine the delay and sleep delay = (reset - datetime.now()).total_seconds() + buffer print("Sleeping for {}s...".format(delay)) time.sleep(delay) #We have waited for the rate limit reset. OK to proceed. return True else: #We have reached the rate limit. The user needs to handle the rate limit manually. return False #We have not reached the rate limit return True
def score2stamp(score_in): ## Converts score value into a timestamp return datetime.fromtimestamp(score_in).strftime('%Y-%m-%d %H:%M:%S')
# print(type(tt)) # <class 'datetime.datetime'> # (2) t2 = time.mktime(time.strptime(s, f)) tt2 = time.strptime(s, f) # 这个类型有点怪 # print(tt2) # time.struct_time(tm_year=2019, tm_mon=6, tm_mday=7, tm_hour=16, tm_min=30, tm_sec=10, # tm_wday=4, tm_yday=158, tm_isdst=-1) # print(type(tt2)) # <class 'time.struct_time'> ut = 1559896210.0 # 将时间戳转换为当前时间 t3 = time.localtime(t) # print(t3) # time.struct_time(tm_year=2019, tm_mon=6, tm_mday=7, tm_hour=16, tm_min=30, tm_sec=10, # tm_wday=4, tm_yday=158, tm_isdst=0) d = datetime.fromtimestamp(ut) print(d) d2 = time.strftime(f, time.localtime(ut)) print(d2) # 网址 # https://blog.csdn.net/lsg9012/article/details/86546345 # if __name__ == '__main__':
#layoutg = graph.layout_lgl() bin2 = structure[data['id_str']][TS] #gettimestamp( data ) delta = bin2-bin1 stop = (switch>MAX_BINS or n>numTweets) if stop or delta>MAX_DELTA: graph, starttime = buildGraph(structure) if graph.ecount()>0: switch += 1 timetext = datetime.fromtimestamp(starttime/1000.0).strftime('%Y%m%d-%H%M%S') timetext += '-' tmp = folder + timetext + "%03d-" % switch print ("Get giant component : ", time.time() - start) generateGiantComponent(graph, tmp + '%08d-G' % frame) print ("Generate gephy for graph(", graph.vcount(), ", " , graph.ecount(), "): ", time.time() - start) generateGephyFile(graph, tmp + '%08d-gephi' % frame) print ("Print CSV : ", time.time() - start) printGraphCSV(graph, structure, tmp + '%08d-all' % frame) #plotGraph(graph, tmp + '%08d.png' % frame, elabel=False, vlabel=False) #layoutg = graph.layout_fruchterman_reingold() #igraph.plot(graph , tmp + '%08d.png' % frame, vertex_size=15,vertex_label=None,edge_label=None, layout=layoutg) print ("finsihed plotting : ", time.time() - start)
print (len(jsondata.items())) for key, value in jsondata.items(): print (key, ":" ,value , "\n") # Converting EPOCH time to HUMAN readable time # The Unix epoch is the time 00:00:00 on 1 January 1970. import time time.ctime(jsondata["sys"]["sunrise"]) import datetime datetime.fromtimestamp(jsondata["sys"]["sunset"]) # Sample code to POST data import json import requests Host = "http://httpbin.org/post" data = {"firstname":"dev","language":"English"} headers = {"Content-Type":"application/json","Content-Length":len(data),"data":json.dumps(data)}
def modified_time(self, name): metadata = self.blob_service.get_blob_metadata(self.container, name) modified_time = float(metadata.get('x-ms-meta-modified_time')) return datetime.fromtimestamp(modified_time)
def createImgGOME2_BIRA_OMI(fileAbsPath, pixelSize=0.25): filename = os.path.basename(fileAbsPath) instrument = filename.split('-')[0] product = filename.split('-')[1] sub_product = filename.split('-')[2] date_extracted = (filename.split('-')[3]).split('_')[0] level='L2' outFileList = [] hdf = h5py.File(fileAbsPath, 'r') driver = gdal.GetDriverByName('GTiff') lat = np.array(hdf['latitude']) lon = np.array(hdf['longitude']) time = np.array(hdf['Time']) data_2D = np.array(hdf['SO2 vcd']) data_3D = np.array(hdf['SO2 averaging kernel']) height = np.array(hdf['SO2 altitude grid']) time_computed = [] for i in range(len(time)): time_computed.append(tt.mktime(datetime.strptime(date_extracted+'T'+str(int(time[i])).rjust(9,'0'),'%Y%m%dT%H%M%S%f').timetuple())) dataType = GDT_Float32 if instrument == 'GOME2B': stepSize = 1500 else: stepSize = 4000 no_data = -9999 fillValue = -9999 ySize = 1 workingDir = os.path.dirname(os.path.realpath(__file__)) + '/../' processes = [] for i in range(0,len(lat),stepSize): if i + stepSize > len(lat): stepSize = len(lat)-i timeSlice = time_computed[i:stepSize+i] timeAvg = np.average(timeSlice) date = datetime.fromtimestamp(timeAvg).strftime('%Y%m%d.%H%M%S') timeStart = datetime.fromtimestamp(timeSlice[0]).strftime('%Y-%m-%dT%H:%M:%SZ') timeEnd = datetime.fromtimestamp(timeSlice[-1]).strftime('%Y-%m-%dT%H:%M:%SZ') filenameCoords = 'GOME_Coords_' + date + '.tif' coord_ds = driver.Create(filenameCoords, stepSize, ySize, 2, dataType) coord_ds.GetRasterBand(1).WriteArray(np.reshape(lat[i:stepSize+i],(stepSize,1)).transpose()) coord_ds.GetRasterBand(2).WriteArray(np.reshape(lon[i:stepSize+i],(stepSize,1)).transpose()) coord_ds = None tmpOutFile_2D = instrument + '_' + product + '_' + level + '_2D_' + date + '_tmp.tif' filenameOutput_2D = tmpOutFile_2D[0:-8] + '.tif' data_ds = driver.Create(tmpOutFile_2D, stepSize, ySize, 1, dataType) #TODO: To convert from DU to ug/cm3 band = np.reshape(data_2D[i:stepSize+i],(stepSize,1)).transpose() band[band == fillValue] = no_data maxValue=np.max(ma.masked_equal(band,no_data)) minValue=np.min(ma.masked_equal(band,no_data)) data_ds.GetRasterBand(1).WriteArray(band) data_ds = None window = str(stepSize)+'x'+str(ySize) upper_left = [] lower_right = [] upper_left.append(np.amax(lat[i:stepSize+i])) upper_left.append(np.amin(lon[i:stepSize+i])) lower_right.append(np.amin(lat[i:stepSize+i])) lower_right.append(np.amax(lon[i:stepSize+i])) workingDir = os.path.dirname(os.path.realpath(__file__)) + '/../' if instrument == 'GOME2B': command_call = [workingDir + 'bin/remap', '-i', tmpOutFile_2D, '-l', str(upper_left[0]), str(upper_left[1]), '-e', str(lower_right[0])+','+ str(lower_right[1]), '-a', filenameCoords, '-s', str(pixelSize),'-w',str(window), '-n', str(no_data), '-q', '-o', filenameOutput_2D+ '_mask','-f','60000'] else: command_call = [workingDir + 'bin/remap', '-i', tmpOutFile_2D, '-l', str(upper_left[0]), str(upper_left[1]), '-e', str(lower_right[0])+','+ str(lower_right[1]), '-a', filenameCoords, '-s', '0.1','-w',str(window), '-n', str(no_data), '-q', '-o', filenameOutput_2D+ '_mask','-f','50000'] mask_process = subprocess.Popen(command_call, stdout=open(os.devnull, 'wb')) # rremove filenameOutput_2D+'_mask','-f','60000' from command_call command_call.pop() command_call.pop() command_call.pop() command_call.append(filenameOutput_2D+'_remapped') command_call.append('-c') coord_process = subprocess.Popen(command_call, stdout=open(os.devnull, 'wb')) coord_process.wait() mask_process.wait() remap_ds = gdal.Open(filenameOutput_2D+'_remapped', gdal.GA_ReadOnly) transform_i = remap_ds.GetRasterBand(1).ReadAsArray().transpose() transform_j = remap_ds.GetRasterBand(2).ReadAsArray().transpose() mask_ds = gdal.Open(filenameOutput_2D + '_mask', gdal.GA_ReadOnly) mask = mask_ds.GetRasterBand(1).ReadAsArray().transpose() dst_ds = driver.Create(filenameOutput_2D, transform_j.shape[0], transform_j.shape[1], 1, gdal.GDT_Float32) outData = np.ones([transform_j.shape[0], transform_j.shape[1]]) * no_data band = np.reshape(data_2D[i:stepSize+i],(stepSize,1)).transpose() for i in range(outData.shape[0]): for j in range(outData.shape[1]): outData[i, j] = band[ transform_j[i,j], transform_i[i,j]] outData[mask==no_data] = no_data #starting from 0? so I add (+1) dst_ds.GetRasterBand(1).WriteArray(outData.transpose()) #2D metadata dst_ds.SetGeoTransform([upper_left[1], pixelSize, 0, upper_left[0], 0, -pixelSize]) srs = osr.SpatialReference() srs.SetWellKnownGeogCS('WGS84') dst_ds.SetProjection(srs.ExportToWkt()) dst_ds.SetMetadataItem('GLOBAL_MAX',str(maxValue)) dst_ds.SetMetadataItem('GLOBAL_MIN',str(minValue)) dst_ds.SetMetadataItem('TIME_END', timeEnd) dst_ds.SetMetadataItem('TIME_START', timeStart) dst_ds.GetRasterBand(1).SetNoDataValue(-9999) dst_ds.GetRasterBand(1).ComputeStatistics(False) dst_ds = None # 3D heightLevels = [] numBands = data_3D.shape[0] filenameOutput_3D = instrument + '_' + product + '_' + level + '_3D_' + date + '.tif' dst_ds = driver.Create(filenameOutput_3D, transform_j.shape[0], transform_j.shape[1], numBands, gdal.GDT_Float32) for l in range(numBands): outData = np.ones([transform_j.shape[0], transform_j.shape[1]]) * no_data band = np.reshape(data_3D[l,i:stepSize+i],(stepSize,1)).transpose() avg = np.average(height[l][i:stepSize+i]) if avg < 0: avg = 0 heightLevels.append(avg*1000) for i in range(outData.shape[0]): for j in range(outData.shape[1]): outData[i, j] = band[ transform_j[i,j], transform_i[i,j]] outData[mask==no_data] = no_data #starting from 0? so I add (+1) dst_ds.GetRasterBand(l+1).SetNoDataValue(no_data) dst_ds.GetRasterBand(l+1).WriteArray(outData.transpose()) dst_ds.GetRasterBand(l+1).ComputeStatistics(False) #3D metadata dst_ds.SetGeoTransform([upper_left[1], pixelSize, 0, upper_left[0], 0, -pixelSize]) srs = osr.SpatialReference() srs.SetWellKnownGeogCS('WGS84') dst_ds.SetProjection(srs.ExportToWkt()) dst_ds.SetMetadataItem('GLOBAL_MAX', str(np.max(band))) dst_ds.SetMetadataItem('GLOBAL_MIN', str(np.min(band))) dst_ds.SetMetadataItem('TIME_END', timeEnd) dst_ds.SetMetadataItem('TIME_START', timeStart) dst_ds.SetMetadataItem('VERTICAL_LEVELS_NUMBER', str(len(heightLevels))) dst_ds.SetMetadataItem('VERTICAL_LEVELS', str(heightLevels).replace(' ', '')[1:-1]) dst_ds = None outFileList.append(filenameOutput_2D) outFileList.append(filenameOutput_3D) os.system('rm ' + tmpOutFile_2D) os.system('rm ' + filenameOutput_2D + '_mask') os.system('rm ' + filenameOutput_2D + '_remapped') os.system('rm ' + filenameCoords[0:11] + '*') return outFileList
def format(self, record): isotime = datetime.fromtimestamp( record.created).strftime("%b %d %H:%M:%S") data = '%s %s CEF: %s' % (isotime, gethostname(), super(CEFFormatter, self).format(record)) return data.encode('ASCII', 'ignore')
def parse_job(self, response): js = json.loads('{}') try: js = json.loads(response.body) except: log.msg(u'职位详情返回非法的json数据,%s' % (response.body, ), level = log.ERROR) return # firstPublishTime = 0 _id = response.meta['_id'] if response.meta.has_key('FirstPublishTime'): firstPublishTime = response.meta['FirstPublishTime'] PublishTime = response.meta['PublishTime'] if js.has_key('refTime'): refTime = int(js['refTime']) else: refTime = int(mktime(datetime.now().timetuple())) pauseTime = 0 if js.has_key('pauseTime'): pauseTime = int(js['pauseTime']) if firstPublishTime == 0: firstPublishTime = refTime if refTime <= PublishTime: if pauseTime == 0: refTime = int(mktime(datetime.now().timetuple())) else: #本次最大发布时间<=上次抓取时间,直接退出 if response.meta['Last']: #第一页的发布结束日期=该职位的发布日期,不是第一条职位的发布日期 str_t0 = strftime('%Y-%m-%d %H:%M:%S', localtime(PublishTime)) str_t1 = strftime('%Y-%m-%d %H:%M:%S', localtime(firstPublishTime)) log.msg(u'0.CityName= %s 结束换词,发布日期:%s->%s' % (response.meta['CityName'], str_t0, str_t1), level = log.INFO) return elif response.meta['FirstFirst']: update_publishtime('CityIndex', _id, firstPublishTime, 'PublishTime') str_t0 = strftime('%Y-%m-%d %H:%M:%S', localtime(PublishTime)) str_t1 = strftime('%Y-%m-%d %H:%M:%S', localtime(firstPublishTime)) log.msg(u'0.CityName= %s 开始换词,发布日期:%s->%s' % (response.meta['CityName'], str_t0, str_t1), level = log.INFO) #下一页处理 nextPage = response.meta['NextPage'] if nextPage: yield Request(url = nextPage, meta = {'use_proxy': True, '_id': _id, 'CityId': response.meta['CityId'], 'CityName': response.meta['CityName'], 'PublishTime': PublishTime, 'FirstPublishTime': firstPublishTime, 'download_timeout': 10 }, dont_filter = True, callback = self.parse_joblist) #由于存在本页与上一页职位存在重复内容(新增职位会导致页数变动),需要进行职位去重(依据职位linkID,refTime去重) if not js.has_key('id'): log.msg(u'该职位没有id,丢弃该职位') return jobName = '' jobCode = '' #查找第3级 for jobType in js['jobType']: if jobType.has_key('jobNameId'): jobName = jobType['jobName'] jobCode = '%s_%s_%s' % (jobType['bigId'], jobType['categoryId'], jobType['jobNameId']) break ''' #查找第2级 if jobCode == '': log.msg(u'职位所属类别不能明确到第3级') for jobType in js['jobType']: if jobType.has_key('categoryId'): jobName = jobType['categoryName'] jobCode = '%s_%s' % (jobType['bigId'], jobType['categoryId']) break #查找第1级 if jobCode == '': for jobType in js['jobType']: if jobType.has_key('bigId'): jobCode = jobType['bigId'] webJob['JobName'] = jobType['bigName'] webJob['JobCode'] = FmtChinahrJobCode('remote_252_1', jobCode) break ''' if jobCode == '': log.msg(u'职位所属类别不明确,丢弃该职位[%s]' % js['jobName']) return LinkID = js['id'] if exist_linkid(7, LinkID, refTime): #log.msg(u'存在相同职位id,重复抓取,LinkID:%s,refTime:%s' % (LinkID, refTime)) return log.msg(u'增加职位:%s' % js['jobName']) webJob = WebJob() webJob['SiteID'] = 7 webJob['JobName'] = jobName webJob['JobCode'] = FmtChinahrJobCode('remote_252_1', jobCode) webJob['JobTitle'] = js['jobName'].replace('/', '').replace(' ','') webJob['Company'] = js['comName'] postTime = datetime.fromtimestamp(refTime) webJob['PublishTime'] = postTime webJob['RefreshTime'] = postTime # if js.has_key('salary'): webJob['Salary'] = js['salary'] else: webJob['Salary'] = u'面议' if js.has_key('minSalary'): webJob['SalaryMin'] = js['minSalary'] else: webJob['SalaryMin'] = '0' if js.has_key('maxSalary'): webJob['SalaryMax'] = js['maxSalary'] else: webJob['SalaryMax'] = '0' webJob['Eduacation'] = js['degName'] webJob['Number'] = js['number'] webJob['Exercise'] = js['experience'] webJob['Sex'] = js['gender'] webJob['SSWelfare'] = '' webJob['SBWelfare'] = '' webJob['OtherWelfare'] = '' if js.has_key('welfare'): ret = '' for welfare in js['welfare']: ret += welfare['name'] ret += ' ' # if ret.endswith(' '): ret = ret[0 : -1] webJob['SSWelfare'] = ret # if js.has_key('contact'): webJob['Relation'] = js['contact'] else: webJob['Relation'] = js['comContact'] webJob['Mobile'] = '' if js.has_key('phone'): webJob['Mobile'] = js['phone'] else: if js.has_key('telphone'): webJob['Mobile'] = js['telphone'] if js.has_key('mobile'): if webJob['Mobile'] != '': webJob['Mobile'] += ',' webJob['Mobile'] += js['mobile'] webJob['InsertTime'] = datetime.today() if js.has_key('jobEmail'): webJob['Email'] = js['jobEmail'] #comEmail,jobEmail, email elif js.has_key('email'): webJob['Email'] = js['email'] else: webJob['Email'] = js['comEmail'] #Email有多个,只取第一个 emailEndPos = webJob['Email'].find(';') if emailEndPos > 0: webJob['Email'] = webJob['Email'][0: emailEndPos] #任职条件 + 岗位职责 + 其他福利 if js['condition'] != js['jobDesc']: webJob['RequirementsDesc'] = js['condition'] else: webJob['RequirementsDesc'] = '' webJob['ResponsibilityDesc'] = js['jobDesc'] if js.has_key('benefits'): if js['benefits'] != js['jobDesc']: webJob['JobDesc'] = js['benefits'] else: webJob['JobDesc'] = '' else: webJob['JobDesc'] = '' webJob['Require'] = u'招%s人|学历%s|经验%s|性别%s' % (webJob['Number'], webJob['Eduacation'], webJob['Exercise'], webJob['Sex']) webJob['JobRequest'] = '' webJob['LinkID'] = LinkID webJob['Tag'] = '' workPlace = js['workPlace'] #工作地点情况(分单点或多点) place = workPlace[0] if len(workPlace) > 1: for place in workPlace: if place.has_key('cityId'): if '%s_%s' % (place['provId'], place['cityId']) == response.meta['CityId']: break #0或1级城市 else: #全国,等于当前搜索城市名 if place['provId'] == '-1': place['provName'] = '' place['cityId'] = response.meta['CityId'] place['cityName'] = response.meta['CityName'] break #直辖市(北京,天津,上海,重庆) elif place['provId'] == response.meta['CityId']: place['cityId'] = response.meta['CityId'] place['cityName'] = place['provName'] place['provName'] = '' break #职位只填写省级,依据城市仍可搜索出来 elif place['provId'] == response.meta['CityId'][0: 2]: place['cityId'] = response.meta['CityId'] place['cityName'] = response.meta['CityName'] break if place.has_key('cityId'): CityName = place['cityName'] #直辖市 else: if place['provId'] == '-1': CityName = response.meta['CityName'] else: CityName = place['provName'] #去除城市最后的'市' if CityName[-1: ] == u'市': CityName = CityName[0: -1] webJob['ProvinceName'] = '' if place.has_key('provName'): webJob['ProvinceName'] = place['provName'] webJob['CityName'] = CityName webJob['WorkArea'] = CityName if place.has_key('distName'): webJob['WorkArea1'] = place['distName'] else: webJob['WorkArea1'] = '' if js.has_key('ivAddr'): webJob['JobAddress'] = js['ivAddr'] else: webJob['JobAddress'] = webJob['ProvinceName'] + CityName + webJob['WorkArea1'] webJob['WorkArea2'] = '' webJob['AreaCode'] = FmtAreaCodeSimple('remote_252_1', CityName) webJob['CompanyLink'] = 'cnc_' + js['comId'] webJob['JobType'] = 1 webJob['SyncStatus'] = 0 webJob['SrcUrl'] = self.create_url('/job/%s.html' % webJob['LinkID']) webJob['GisLongitude'] = '0' webJob['GisLatitude'] = '0' if js.has_key('map'): webJob['GisLatitude'] = js['map']['yCoord'] webJob['GisLongitude'] = js['map']['xCoord'] webJob['ClickTimes'] = js['looked'] webJob['AnFmtID'] = 0 webJob['KeyValue'] = '' webJob['Industry'] = js['industryNameLong'] if webJob['Industry'] == '': webJob['Industry'] = js['industryName'] webJob['CompanyType'] = js['typeName'] webJob['CompanyScale'] = js['size'] webJob['Telphone1'] = '' webJob['Telphone2'] = '' webJob['Age'] = js['age'] webJob['ValidDate'] = '' webJob['ParentName'] = '' webJob['EduacationValue'] = 0 webJob['SalaryMin'] = 0.0 webJob['SalaryMax'] = 0.0 webJob['NumberValue'] = 0 webJob['SexValue'] = 0 webJob['OperStatus'] = 0 webJob['LastModifyTime'] = datetime.today() webJob['PropertyTag'] = '' webJob['SalaryValue'] = 0 webJob['ExerciseValue'] = 0 webJob['Valid'] = 'T' webJob['JobWorkTime'] = '' webJob['JobComputerSkill'] = '' webJob['ForeignLanguage'] = '' webJob['JobFunction'] = '' webJob['SalaryType'] = 0 webJob['StartDate'] = '' webJob['EndDate'] = '' webJob['BusinessCode'] = '' yield webJob #log.msg(str(webJob['JobName']), level = log.INFO) #加入企业信息 cmp = Company() cmp['SiteID'] = 7 cmp['company_id'] = webJob['CompanyLink'] cmp['Credibility'] = 0 cmp['Licensed'] = 0 cmp['Yan'] = 0 cmp['FangXin'] = 0 if js.has_key('comAuditVal'): if js['comAuditVal'] == u'普审': cmp['Yan'] = 1 if js['comAuditVal'] == u'证审': cmp['Licensed'] = 1 cmp['Yan'] = 1 cmp['CompanyName'] = js['comName'] cmp['CityName'] = '' if js.has_key('corpLocation'): if js['corpLocation'].has_key('cityName'): cmp['CityName'] = js['corpLocation']['cityName'] elif js['corpLocation']['provId'] in ('34', '35', '36', '37'): cmp['CityName'] = js['corpLocation']['provName'] if cmp['CityName'][-1: ] == u'市': cmp['CityName'] = cmp['CityName'][0: -1] cmp['Industry'] = webJob['Industry'] cmp['CompanyType'] = webJob['CompanyType'] cmp['CompanyScale'] = webJob['CompanyScale'] if js.has_key('addr'): cmp['CompanyAddress'] = FmtSQLCharater(js['addr']) else: cmp['CompanyAddress'] = '' if js.has_key('corpLocation'): if js['corpLocation'].has_key('provName'): if js['corpLocation']['provId'] not in ('34', '35', '36', '37'): cmp['CompanyAddress'] = js['corpLocation']['provName'] if js['corpLocation'].has_key('cityName'): cmp['CompanyAddress'] += js['corpLocation']['cityName'] if js['corpLocation'].has_key('distName'): cmp['CompanyAddress'] += js['corpLocation']['distName'] # cmpRel = js['comContact'] cmp['CompanyUrl'] = js['comNameUrl'] if js.has_key('logo_path'): cmp['CompanyLogoUrl'] = js['logo_path'] else: cmp['CompanyLogoUrl'] = '' cmp['Relation'] = FmtSQLCharater(cmpRel) cmp['Mobile'] = '' if js.has_key('comPhone'): cmp['Mobile'] = js['comPhone'] if js.has_key('comMobile'): if cmp['Mobile'] != '': cmp['Mobile'] += ',' cmp['Mobile'] += js['comMobile'] if js.has_key('comEmail'): cmp['Email'] = js['comEmail'] else: cmp['Email'] = '' if js.has_key('compDesc'): cmpDesc = js['compDesc'] else: cmpDesc = '' cmp['CompanyDesc'] = FmtSQLCharater(cmpDesc) cmp['PraiseRate'] = '0' cmp['GisLongitude'] = '0' cmp['GisLatitude'] = '0' cmp['UserId'] = '' cmp['UserName'] = '' cmp['ProvinceName'] = '' cmp['WorkArea1'] = '' cmp['AreaCode1'] = '' yield cmp
def main(): """Call to run script.""" basedirectory = "/data_store/manual" args = _process_command_line() searchpath = "{}/{}".format(basedirectory, args.type) curtime = datetime.utcnow() if args.day is not None: if int(args.day) > 31: dom = int(args.day) % 100 mon = int(args.day) / 100 subDir = '{}{:02}{:02}'.format(curtime.strftime("%Y"), mon, dom) else: subDir = '{}{:02}'.format(curtime.strftime("%Y%m"), int(args.day)) paths = get_filepaths('{}/{}'.format(searchpath, subDir)) else: subDir = curtime.strftime("%Y%m%d") print subDir paths = get_filepaths('{}/{}'.format(searchpath, subDir)) paths.sort(reverse=True) #if args.match: # print "Matching {}".format(args.match) # count = 0 unknown = 0 sumlatency = 0 maxlatency = 0 minlatency = 2000 file_times = [] for path in paths: savesecs = os.path.getmtime(path) savetime = datetime.fromtimestamp(savesecs) validsecs, ddttstr = getvalidtime(path, args.type, args.verbose) latency = int((savesecs - validsecs) / 60) #HHMMstr = savetime.strftime("%H%M") if args.match: if args.match in path: if args.latency: print "{:d}m {}".format(latency, path) elif args.tstamp: if ddttstr not in file_times: file_times.append(ddttstr) else: print "{}".format(path) if validsecs > 0: sumlatency += latency if latency > maxlatency: maxlatency = latency if latency < minlatency: minlatency = latency count += 1 else: unknown += 1 else: if args.latency: print "{:d}m {}".format(latency, path) elif args.tstamp: if ddttstr not in file_times: file_times.append(ddttstr) else: print "{}".format(path) if validsecs > 0: sumlatency += latency if latency > maxlatency: maxlatency = latency if latency < minlatency: minlatency = latency count += 1 else: unknown += 1 if count > 0: file_times.sort() if args.tstamp: if not args.abbrev: for ddttstr in file_times: print "{}".format(ddttstr) avglatency = sumlatency / count print "Products found: {}".format(count) print "Avg latency = {:.1f} min".format(avglatency) print "Max latency = {:d} min".format(maxlatency) print "Min latency = {:d} min".format(minlatency) else: print "Nothing matches criteria." if unknown > 0: print "Unknown products: {}".format(unknown) return
def dt_parse(t): return (datetime.fromtimestamp(time.mktime(email.utils.parsedate(t))))