def time_tensorflow_run(session, target, info_string): num_steps_burn_in = 10 total_duration = 0.0 total_duration_squared = 0.0 for i in xrange(FLAGS.num_batches + num_steps_burn_in): run_options = None run_metadata = None if FLAGS.enable_trace and i == num_steps_burn_in - 1: run_options = config_pb2.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) run_metadata = config_pb2.RunMetadata() merged = tf.merge_all_summaries() writer = tf.train.SummaryWriter('/home/minjie/tmp/pipeline', session.graph) # Run session start_time = time.time() _ = session.run(target, options=run_options, run_metadata=run_metadata) duration = time.time() - start_time if FLAGS.enable_trace and i == num_steps_burn_in - 1: tl = tf.python.client.timeline.Timeline(run_metadata.step_stats) ctf = tl.generate_chrome_trace_format() with open('tf_trace.ctf', 'w') as f: f.write(ctf) if i > num_steps_burn_in: if not i % 10: print ('%s: step %d, duration = %.3f speed = %.3f images/sec' % (datetime.now(), i - num_steps_burn_in, duration, FLAGS.batch_size / duration)) total_duration += duration total_duration_squared += duration * duration mn = total_duration / FLAGS.num_batches vr = total_duration_squared / FLAGS.num_batches - mn * mn sd = math.sqrt(vr) print ('%s: %s across %d steps, %.3f +/- %.3f sec / batch' % (datetime.now(), info_string, FLAGS.num_batches, mn, sd))
def time_tensorflow_run(session, target, info_string): """Run the computation to obtain the target tensor and print timing stats. Args: session: the TensorFlow session to run the computation under. target: the target Tensor that is passed to the session's run() function. info_string: a string summarizing this run, to be printed with the stats. Returns: None """ num_steps_burn_in = 10 total_duration = 0.0 total_duration_squared = 0.0 for i in xrange(FLAGS.num_batches + num_steps_burn_in): start_time = time.time() _ = session.run(target) duration = time.time() - start_time if i >= num_steps_burn_in: if not i % 10: print ('%s: step %d, duration = %.3f' % (datetime.now(), i - num_steps_burn_in, duration)) total_duration += duration total_duration_squared += duration * duration mn = total_duration / FLAGS.num_batches vr = total_duration_squared / FLAGS.num_batches - mn * mn sd = math.sqrt(vr) print ('%s: %s across %d steps, %.3f +/- %.3f sec / batch' % (datetime.now(), info_string, FLAGS.num_batches, mn, sd))
def _clickedCb(self, unused_actor, event): if (event.modifier_state & Clutter.ModifierType.CONTROL_MASK): self.remove() elif (datetime.now() - self.lastClick).total_seconds() < 0.5: self.remove() self.lastClick = datetime.now()
def checkTask(tile): session = DBSession() if tile.username is not None: if datetime.now() > tile.update + EXPIRATION_DURATION: tile.username = None tile.update = datetime.now() session.add(tile)
def handle_noargs(self, **options): articles_of_interest = ArticleOfInterest.objects.all() for article in articles_of_interest: article_dict = query_text_rendered(article.title, language=article.title_language) # don't import articles we already have if SourceArticle.objects.filter(doc_id__exact='%s' % article_dict['revid'], language=article.title_language): continue try: source_article = SourceArticle(title=article.title, language=article.title_language, source_text=article_dict['html'], timestamp=datetime.now(), doc_id=article_dict['revid']) source_article.save() tr = TranslationRequest(article=source_article, target_language=article.target_language, date=datetime.now(), translator=DEFAULT_TRANNY) tr.save() except Exception as e: print type(e) print e.args try: source_article.delete() tr.delete() except: pass
def test_linked_in_add_to_profile_btn_not_appearing_without_config(self): # Without linked-in config don't show Add Certificate to LinkedIn button self.client.login(username="******", password="******") CourseModeFactory.create( course_id=self.course.id, mode_slug='verified', mode_display_name='verified', expiration_datetime=datetime.now(pytz.UTC) - timedelta(days=1) ) CourseEnrollment.enroll(self.user, self.course.id, mode='honor') self.course.start = datetime.now(pytz.UTC) - timedelta(days=2) self.course.end = datetime.now(pytz.UTC) - timedelta(days=1) self.course.display_name = u"Omega" self.course = self.update_course(self.course, self.user.id) download_url = 'www.edx.org' GeneratedCertificateFactory.create( user=self.user, course_id=self.course.id, status=CertificateStatuses.downloadable, mode='honor', grade='67', download_url=download_url ) response = self.client.get(reverse('dashboard')) self.assertEquals(response.status_code, 200) self.assertNotIn('Add Certificate to LinkedIn', response.content) response_url = 'http://www.linkedin.com/profile/add?_ed=' self.assertNotContains(response, escape(response_url))
def main(): serverName = 'localhost' serverPort = 12000 #port number from server program clientSocket = socket(AF_INET, SOCK_DGRAM) #create client socket message = 'ping' #message to be capitalized by server i = 0 while i <= 10: start = datetime.now() #start time for rtt calc clientSocket.sendto(message,(serverName, serverPort)) #send to server clientSocket.settimeout(0.01) #timeout length try: newMess, serverAddress = clientSocket.recvfrom(2048) end = datetime.now() #end time for rtt calc rtt = (end - start) #printing of information print("{0}: Message:{1}\nRTT:{2}\n ".format(i, newMess, rtt)) except timeout: #different printout if timeout occurs print("{0}: Timeout\n".format(i)) start = 0 rtt = 0 i += 1 #iteration clientSocket.close() #close socket at end pass
def make_transfer(self): self.sock_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # SO_REUSEADDR: http://stackoverflow.com/questions/3229860/what-is-the-meaning-of-so-reuseaddr-setsockopt-option-linux s = self.sock_client s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 50688) # 49.5 KB s.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 87380) # 85.3 KB s.bind(self.client_addr) s.connect(self.server_addr) start = datetime.now() ## client -> server sent = 0 while sent < self.filesize: sent += s.send(self.randomfile[sent:sent+4096]) # We have to wait until the server finishes reading data from its socket # and closes the connection. rcvd = s.recv(1) time_up = total_seconds(datetime.now() - start) time.sleep(getattr(self, 'sleep', 0)) # wait for a bucket to fill again ## server -> client start = datetime.now() while len(rcvd) < self.filesize: rcvd += s.recv(1024) time_down = total_seconds(datetime.now() - start) s.close() return time_up, time_down
def action_confirm(self): for production in self: taskcode = production._get_mfg_wip_taskcode() for num in xrange(int(production.product_qty)): # auto-create serial number vals = { 'production_id': production.id, 'product_id': production.product_id.id, 'base_cost': 0.0, } lot_id = self.env['stock.production.lot'].create(vals) # create quants for production vals = { 'product_id': production.product_id.id, 'routing_id': taskcode.routing_id.id, 'routing_line_id': taskcode.routing_line_id.id, 'routing_subrouting_id': taskcode.id, 'location_id': taskcode.location_id.id, 'qty': 1, 'lot_id': lot_id.id, 'in_date': datetime.now().strftime(DEFAULT_SERVER_DATETIME_FORMAT), 'create_date': datetime.now().strftime(DEFAULT_SERVER_DATETIME_FORMAT), # 'mfg_order_id': production.id, 'wip_mfg_order_id': production.id, } self.env['stock.quant'].sudo().create(vals) production.write({'state': 'confirmed'})
def timed(msg, func): t0 = datetime.now() func() dt = datetime.now() - t0 print("{} Time: {:,.3f} ms".format(msg, dt.total_seconds() * 1000.0), flush=True)
def __init__(self, logfile='history.log'): # Initialize the scheduler SchedulerPolicy.__init__(self) # super() self.t = 0 # Nodes self.nodes = {} # History self.logfile = logfile self.history = History(filename=self.logfile) # Job submission self.lastJobId = 1 # Simulation self.maxTime = None # Id for jobs if sys.platform == 'win32': self.trackerId = datetime.now().strftime('%Y%m%d%H%M') else: self.trackerId = datetime.now().strftime('%4Y%2m%2d%2H%2M') # Specify if the nodes are sent to sleep when there's no load self.nodeManagement = True # Outputs self.energy = None # Step length self.STEP = 1
def bootstrap(request): # Don't create dummy playlist tracks if playlist tracks already exist! pl_tracks = PlaylistTrack.all().fetch(1) if len(pl_tracks) > 0: return HttpResponse(status=404) playlist = ChirpBroadcast() minutes = 0 tracks = Track.all().fetch(100) for track in tracks: pl_track = PlaylistTrack( playlist=playlist, selector=request.user, established=datetime.now() - timedelta(minutes=minutes), artist=track.album.album_artist, album=track.album, track=track, ) pl_track.put() if minutes > 0 and minutes % 25 == 0: pl_break = PlaylistBreak(playlist=playlist, established=datetime.now() - timedelta(minutes=minutes - 1)) pl_break.put() minutes += 5 return HttpResponseRedirect("/playlists/")
def popula_dados(): # DEBUG count = 0 x = datetime.now() print x # /DEBUG ultimo_idx = politicos.find_one(sort=[('idx', -1)],limit=1) if ultimo_idx: ultimo_idx = ultimo_idx.get('idx', 1) else: ultimo_idx = 1 total = len(range(ultimo_idx,100000)) for i in range(ultimo_idx,100000): # DEBUG if count % 1 == 0: percent = (float(count) / total) * 100.0 z = datetime.now() delta = z-x timestamp_delta = delta.total_seconds() if percent > 5: eta_secs = (100.0/percent)*timestamp_delta eta = timedelta(seconds=eta_secs-timestamp_delta) else: eta = '--CALCULANDO--' sys.stdout.write("\r%d%% - %s - ETA %s - %s" %(percent, delta, eta, i)) sys.stdout.flush() count += 1 # /DEBUG politico = politicos.find_one({'idx':i}) if not politico: dados = carrega_dados_politico(i) if not dados: continue salvar_dados(dados)
def make_twitter_connection(context, twitter_pub_addr): """ setup twitter and ZeroMQ connections. """ client = StreamClient(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN, ACCESS_TOKEN_SECRET) #setup listener for twitter follow_syms = gen_follow_symbols() response = client.stream.statuses.filter.post(track=follow_syms) l_string = str(datetime.now()) + ( " Thread called twitter agent ") logging.info(l_string) try: context = context tweet_pub_socket = context.socket(zmq.PUB) tweet_pub_socket.connect(twitter_pub_addr) sys.stdout.write("Initialized twitter_zmq_push \n") sys.stdout.flush() l_string = str(datetime.now()) + ( "init twitter_zmq_push success") logging.info(l_string) except Exception as error: l_string = str(datetime.now()) + ( "Failed to init twitter_zmq_push error: " + str(error)) logging.info(l_string) process_tweets(response, tweet_pub_socket) return
def run(self, command): self.set_state(9) self.log("Starting... %s\n" % self.id) self.log("Command %s\n" % command) store = [] last_send_time = datetime.now() command = shlex.split(command) proc = subprocess.Popen(command, shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) while proc.poll() is None: output = proc.stdout.readline() store.append(output) print output print store if datetime.now() > timedelta(seconds=settings.REDIS_FLUSH_TIME) + last_send_time: if len(store) > 0: print "SAVING" self.log("".join(store)) store = [] last_send_time = datetime.now() self.log("".join(store)) result_code = proc.poll() return result_code
def button_confirm(self): taskcode = self._get_mfg_wip_taskcode() if self.order_type == 'Upgrade': if not self.result_products: raise Warning("Please select resulting products for the upgrade") for line in self.result_products: # create quants for upgrade vals = { 'product_id': line.product_id.id, 'routing_id': taskcode.routing_id.id, 'routing_line_id': taskcode.routing_line_id.id, 'routing_subrouting_id': taskcode.id, 'location_id': taskcode.location_id.id, 'qty': 1, 'lot_id': line.lot_id.id, 'in_date': datetime.now().strftime(DEFAULT_SERVER_DATETIME_FORMAT), 'create_date': datetime.now().strftime(DEFAULT_SERVER_DATETIME_FORMAT), 'propagated_from_id': line.source_quant.id, 'wip_mod_order_id': self.id, } self.env['stock.quant'].sudo().create(vals) elif self.order_type == 'Repair': self.production_quants = self.source_quants else: raise Warning("Unknown order type") self.write({'state':'ready', 'date_start': datetime.today()})
def index(request): # Request the cotext of the request. # The context contains information such as the client's machine details, for example context = RequestContext(request) # construct a dictionary to pass to the template engine as its context. category_list = Category.objects.order_by('-likes')[:5] page_list = Page.objects.order_by('-views')[:5] context_dict = {'categories': category_list, 'pages': page_list, 'cat_list': get_category_list()} for category in category_list: category.url = category.name.replace(' ', '_') if request.session.get('last_visit'): last_visit = request.session.get('last_visit') visits = request.session.get('visits', 0) print last_visit last_visit_time = datetime.strptime(last_visit[:-7], "%Y-%m-%d %H:%M:%S") if (datetime.now() - last_visit_time).days > 0: request.session['visits'] = visits + 1 request.session['last_visit'] = str(datetime.now()) else: request.session['last_visit'] = str(datetime.now()) request.session['visits'] = 1 return render_to_response('rango/index.html', context_dict, context)
def _test_file_time_getter_tz_handling_off(self, getter): # Django's TZ (and hence the system TZ) is set to Africa/Algiers which # is UTC+1 and has no DST change. We can set the Django TZ to something # else so that UTC, Django's TIME_ZONE, and the system timezone are all # different. now_in_algiers = timezone.make_aware(datetime.now()) with timezone.override(timezone.get_fixed_timezone(-300)): # At this point the system TZ is +1 and the Django TZ # is -5. self.assertFalse(self.storage.exists('test.file.tz.off')) f = ContentFile('custom contents') f_name = self.storage.save('test.file.tz.off', f) self.addCleanup(self.storage.delete, f_name) dt = getter(f_name) # dt should be naive, in system (+1) TZ self.assertTrue(timezone.is_naive(dt)) # The three timezones are indeed distinct. naive_now = datetime.now() algiers_offset = now_in_algiers.tzinfo.utcoffset(naive_now) django_offset = timezone.get_current_timezone().utcoffset(naive_now) utc_offset = timezone.utc.utcoffset(naive_now) self.assertGreater(algiers_offset, utc_offset) self.assertLess(django_offset, utc_offset) # dt and naive_now should be the same effective time. self.assertLess(abs(dt - naive_now), timedelta(seconds=2)) # If we convert dt to an aware object using the Algiers # timezone then it should be the same effective time to # now_in_algiers. _dt = timezone.make_aware(dt, now_in_algiers.tzinfo) self.assertLess(abs(_dt - now_in_algiers), timedelta(seconds=2))
def run(self): spider_start_time = str(datetime.now()).split('.')[0] print spider_start_time, 'time to spider start!' proxy_manager = ProxyManager() page = get_html(BASE_URL) page = unicode(page,'GBK').encode('UTF-8') page_count = self.get_page_count(page) page_count_time = str(datetime.now()).split('.')[0] print page_count_time, 'get page count:', page_count default_ip = get_default_ip() if page_count != 0: last_proxy = None for i in xrange(1, page_count): page = get_html(URL_HEADER + str(i) + URL_END, last_proxy) proxy_list = filte(page) for proxy in proxy_list: if proxy.anonymous_type == '高匿': check_result = check_anonymous(proxy, default_ip) spider_time = str(datetime.now()).split('.')[0] if check_result[0]: proxy.delay_time = check_result[1] proxy.created_time = str(datetime.now()).split('.')[0] proxy.is_in_china = 2 proxy_manager.add_proxy(proxy, spider_time) last_proxy = proxy else: pass
def wait_for_success_events(request_id, timeout_in_minutes=None, sleep_time=5, stream_events=True, can_abort=False): if timeout_in_minutes == 0: return if timeout_in_minutes is None: timeout_in_minutes = 10 start = datetime.now() timediff = timedelta(seconds=timeout_in_minutes * 60) last_time = None streamer = io.get_event_streamer() if can_abort: streamer.prompt += strings['events.abortmessage'] events = [] try: # Get first event in order to get start time while not events: events = elasticbeanstalk.get_new_events( None, None, request_id, last_event_time=None ) if len(events) > 0: event = events[-1] app_name = event.app_name env_name = event.environment_name if stream_events: streamer.stream_event(get_event_string(event)) # Test event message for success string if _is_success_string(event.message): return last_time = event.event_date else: time.sleep(sleep_time) # Get remaining events without request id while (datetime.now() - start) < timediff: time.sleep(sleep_time) events = elasticbeanstalk.get_new_events( app_name, env_name, None, last_event_time=last_time ) for event in reversed(events): if stream_events: streamer.stream_event(get_event_string(event)) # We dont need to update last_time if we are not printing. # This can solve timing issues last_time = event.event_date # Test event message for success string if _is_success_string(event.message): return finally: streamer.end_stream() # We have timed out raise TimeoutError('Timed out while waiting for command to Complete. The timeout can be set using the --timeout option.')
def index(request): category_list = Category.objects.order_by('-likes')[:5] page_list = Page.objects.order_by('-views')[:5] context_dict = {'categories': category_list, 'pages': page_list} visits = request.session.get('visits') if not visits: visits = 1 reset_last_visit_time = False last_visit = request.session.get('last_visit') if last_visit: last_visit_time = datetime.strptime(last_visit[:-7], "%Y-%m-%d %H:%M:%S") if (datetime.now() - last_visit_time).seconds > 0: visits += 1 reset_last_visit_time = True else: reset_last_visit_time = True if reset_last_visit_time: request.session['last_visit'] = str(datetime.now()) request.session['visits'] = visits context_dict['visits'] = visits response = render(request, 'rango/index.html', context_dict) return response
def run_round(self, round_name, min_doge=5, max_doge=6): dogeline = " such appreciation\n\n\n\n+/u/dogetipbot %s doge" round_data = {"round_name": round_name, "users": [], "timestamp":datetime.now()} submission = self.reddit.get_submission(submission_id=self.current_thread) for comment in submission.comments: try: if comment.author.name in round_data["users"] or comment.author.name in self.blacklist: pass #TODO: maybe respond to user why this comment wont work else: user = self._get_user_from_comment(comment) if user['age'] > 25: tip_ammount = self._get_doge_tip(min_doge, max_doge, user['gifted']) tip_comment = dogeline %(str(tip_ammount)[:8]) user['gifts'].append({"thread":self.current_thread, "ammount":tip_ammount, "timestamp":datetime.now()}) user['gifted'] += tip_ammount round_data["users"].append(comment.author.name) comment.reply(" "+round_name+"\n\n"+tip_comment) comment.upvote() self.mc.dogelympics.users.save(user) except AttributeError: pass #user probably deleted their post self.mc.dogelympics.rounds.save(round_data) print datetime.now()
def test_08_publishing(self): self.create_default_page_set() page = Page.objects.get(pk=1) page2 = Page.objects.get(pk=2) self.is_published(page.get_absolute_url(), should_be=False) self.is_published(page2.get_absolute_url(), should_be=False) page.active = True page.save() page2.active = True page2.save() self.is_published(page.get_absolute_url(), should_be=True) self.is_published(page2.get_absolute_url(), should_be=True) old_publication = page.publication_date page.publication_date = datetime.now() + timedelta(days=1) page.save() self.is_published(page.get_absolute_url(), should_be=False) # Should be not accessible because of its parent's inactivity self.is_published(page2.get_absolute_url(), should_be=False) page.publication_date = old_publication page.publication_end_date = datetime.now() - timedelta(days=1) page.save() self.is_published(page.get_absolute_url(), should_be=False) # Should be not accessible because of its parent's inactivity self.is_published(page2.get_absolute_url(), should_be=False) page.publication_end_date = datetime.now() + timedelta(days=1) page.save() self.is_published(page.get_absolute_url(), should_be=True) self.is_published(page2.get_absolute_url(), should_be=True)
def migrate_instance_task(origCls, orig_creds, migrateCls, migrate_creds, **imaging_args): logger.info("migrate_instance_task task started at %s." % datetime.now()) new_image_id = migrate_instance(origCls, orig_creds, migrateCls, migrate_creds, **imaging_args) logger.info("migrate_instance_task task finished at %s." % datetime.now()) return new_image_id
def handle(self, *args, **options): scriptStartTime = datetime.now() db1 = psycopg2.connect (database="klpmis", user="******", password="******") curAcObj = current_academic() currentAcademicYearSql="select id,name from schools_academic_year where id=%s" %(curAcObj.id) currentAcademicYearRec = run_query(db1,currentAcademicYearSql) currentAcademicId = currentAcademicYearRec[0][0] currentAcademicYearName = currentAcademicYearRec[0][1] currentYear = int(currentAcademicYearName.split('-')[1]) nextAcademicYear = str(currentYear) + '-' + str(currentYear + 1) print 'CurrentAcademic Year :', currentAcademicYearName print 'Next Academic Year : ', nextAcademicYear nextAcademicYearSql="select id,name from schools_academic_year where name='%s'"%(nextAcademicYear) print nextAcademicYearSql nextAcademicRec = run_query(db1,nextAcademicYearSql) nextAcademicId = nextAcademicRec[0][0] move_class_sql(currentAcademicId, nextAcademicId) totaltime = datetime.now() - scriptStartTime print totaltime print 'preschool students have been Promoted to ' ,nextAcademicYear
def machine_imaging_task(managerCls, manager_creds, create_img_args): logger.info("machine_imaging_task task started at %s." % datetime.now()) manager = managerCls(**manager_creds) manager.hook = create_img_args.pop('machine_request', None) new_image_id = manager.create_image(**create_img_args) logger.info("machine_imaging_task task finished at %s." % datetime.now()) return new_image_id
def index(request): context = RequestContext(request) category_list = Category.objects.all() context_dict = {'categories': category_list} for category in category_list: category.url = encode_url(category.name) page_list = Page.objects.order_by('-views')[:5] context_dict['pages'] = page_list #### NEW CODE #### if request.session.get('last_visit'): # The session has a value for the last visit last_visit_time = request.session.get('last_visit') visits = request.session.get('visits', 0) if (datetime.now() - datetime.strptime(last_visit_time[:-7], "%Y-%m-%d %H:%M:%S")).days > 0: request.session['visits'] = visits + 1 request.session['last_visit'] = str(datetime.now()) else: # The get returns None, and the session does not have a value for the last visit. request.session['last_visit'] = str(datetime.now()) request.session['visits'] = 1 #### END NEW CODE #### # Render and return the rendered response back to the user. return render_to_response('rango/index.html', context_dict, context)
def _run_changed(msgs, chan): '''Consume the cloudsearch_changes queue, and print reporting information on how long it took and how many remain ''' start = datetime.now(g.tz) changed = [pickle.loads(msg.body) for msg in msgs] fullnames = set() fullnames.update(LinkUploader.desired_fullnames(changed)) fullnames.update(SubredditUploader.desired_fullnames(changed)) things = Thing._by_fullname(fullnames, data=True, return_dict=False) link_uploader = LinkUploader(g.CLOUDSEARCH_DOC_API, things=things) subreddit_uploader = SubredditUploader(g.CLOUDSEARCH_SUBREDDIT_DOC_API, things=things) link_time = link_uploader.inject() subreddit_time = subreddit_uploader.inject() cloudsearch_time = link_time + subreddit_time totaltime = (datetime.now(g.tz) - start).total_seconds() print ("%s: %d messages in %.2fs seconds (%.2fs secs waiting on " "cloudsearch); %d duplicates, %s remaining)" % (start, len(changed), totaltime, cloudsearch_time, len(changed) - len(things), msgs[-1].delivery_info.get('message_count', 'unknown')))
def new_func(): starttime = datetime.now() func() print("exectime: ", datetime.now() - starttime)
def poser(self, objet, personnage, qtt=1): """On pose l'objet.""" Salle = importlib.import_module("primaires.salle.salle").Salle if isinstance(objet.contenu, Salle) and \ (datetime.now() - objet.apparition).seconds < 100: objet.apparition = datetime.now() BaseType.poser(self, objet, personnage)