def testGettingTime(self): cur_time = python_time.time() our_time = time.time() self.assertApproximates(cur_time, our_time, 0.01) time.scale(0.1) cur_time = python_time.time() our_time = time.time() self.assertApproximates(cur_time /time._get_scale(), our_time, 0.01)
def explicitly_wait(method, args=tuple(), kwargs=dict(), poll=0.5, timeout=10): end_time = time.time() + timeout while(True): try: return method(*args, **kwargs) except (exceptions.NoSuchElementException, exceptions.StaleElementReferenceException, exceptions.InvalidSelectiorException): if(time.time() > end_time): raise time.sleep(poll)
def _publish(self, key, shard, message): assert isinstance(message, BaseMessage), "Unexpected message class" if message.expiration_time: delta = message.expiration_time - time.time() if delta < 0: self.log( "Not sending expired message. msg=%s, shard=%s, " "key=%s, delta=%r", message, shard, key, delta ) return serialized = self.serializer.convert(message) content = Content(serialized) content.properties["delivery mode"] = 1 # non-persistent self.log("Publishing msg=%s, shard=%s, key=%s", message, shard, key) if shard is None: self.error( "Tried to send message to exchange=None. This would " "mess up the whole txamqp library state, therefore " "this message is ignored" ) return defer.succeed(None) d = self.channel.basic_publish(exchange=shard, content=content, routing_key=key, immediate=False) d.addCallback(defer.drop_param, self.channel.tx_commit) d.addCallback(defer.override_result, message) return d
def do_log(self, level, object, category, format, args, depth=-1, file_path=None, line_num=None): level = int(level) if category is None: category = 'feat' if level > flulog.getCategoryLevel(category): return if file_path is None and line_num is None: file_path, line_num = flulog.getFileLine(where=-depth-2) if args: message = format % args else: message = str(format) data = dict( entry_type='log', level=level, log_name=object, category=category, file_path=file_path, line_num=line_num, message=message, timestamp=int(time.time())) self.insert_entry(**data) if self.should_keep_on_logging_to_flulog: flulog.doLog(level, object, category, format, args, where=depth, filePath=file_path, line=line_num)
def freshen_entries(self, response): etag = response.headers.get('etag') if response.status == 304 and etag: ctime = time.time() for entry in self.itervalues(): if entry.etag == etag: entry.fresh_at = ctime
def got_response(self, response, ctime=None): ctime = ctime or time.time() if isinstance(response, failure.Failure): self.size = None self._parsed = response self.state = EntryState.invalid elif response.status == 304: self.state = EntryState.ready self.fresh_at = ctime else: self._parsed = apply_parsers(response, self._parser, self.tag) if isinstance(self._parsed, failure.Failure): self.state = EntryState.invalid else: self.state = EntryState.ready self.size = len(response.body) if not self.cached_at: self.cached_at = ctime self.fresh_at = ctime if response.headers.get('etag'): self.etag = response.headers.get('etag') else: self.state = EntryState.invalid # trigger waiting Deferreds waiting = self._waiting self._waiting = list() for d in waiting: d.callback(self._parsed)
def _iterate_on_update(self, _document, _method, args, keywords): if IDocument.providedBy(_document): doc_id = _document.doc_id rev = _document.rev else: doc_id = _document['_id'] rev = _document['_rev'] try: result = _method(_document, *args, **keywords) except ResignFromModifying: return _document if result is None: d = self.delete_document(_document) else: d = self.save_document(result) if (IDocument.providedBy(_document) and _document.conflict_resolution_strategy == ConflictResolutionStrategy.merge): update_log = document.UpdateLog( handler=_method, args=args, keywords=keywords, rev_from=rev, timestamp=time.time()) d.addCallback(lambda doc: defer.DeferredList([defer.succeed(doc), self.get_database_tag(), self.get_update_seq()])) d.addCallback(self._log_update, update_log) d.addErrback(self._errback_on_update, doc_id, _method, args, keywords) return d
def _populate_data(self): e = self._generate_entry l = self._generate_log self.now = time.time() self.past1 = self.now - 100 self.past2 = self.past1 - 100 yield self.writer.insert_entries([ e(agent_id='other_agent', args='some args', timestamp=self.past1), e(agent_id='other_agent'), e(), e(), l(level=2, category='test', log_name='log_name', timestamp=self.past2, message='m1'), l(level=1, category='test', timestamp=self.past1, message='m2'), l(level=1, message='m3'), l(level=2, message='m4')]) yield self.writer2.insert_entries([ e(agent_id='cool_agent', args='some args'), e(agent_id='cool_agent'), l(level=2, category='spam', log_name='eggs', timestamp=self.past2, message='n1'), l(level=1, category='becon', timestamp=self.past1, message='n2'), l(level=1, message='n3'), l(level=2, message='n4')])
def __init__(self, logger, now=None): journal.DummyRecorderNode.__init__(self) log.LogProxy.__init__(self, logger) log.Logger.__init__(self, logger) self.calls = {} self.now = now or time.time() self.call = None
def _log_request_result(self, result, method, location, started): elapsed = time.time() - started if isinstance(result, failure.Failure): self.debug("%s on %s failed with error: %s. Elapsed: %.2f", method.name, location, result.value, elapsed) else: self.debug('%s on %s finished with %s status, elapsed: %.2f', method.name, location, int(result.status), elapsed)