def single_queue_it(self, count, remaining_count=None): """ This method is used for specifying how to queue your rater plugin based on single process :param count: Used to count number of events in a bundle :return: """ et = self.sample.earliestTime() lt = self.sample.latestTime() if count < 1 and count != -1: logger.info( "There is no data to be generated in worker {0} because the count is {1}." .format(self.sample.config.generatorWorkers, count)) else: genPlugin = self.generatorPlugin(sample=self.sample) # Adjust queue for threading mode genPlugin.updateConfig(config=self.config, outqueue=self.outputQueue) genPlugin.updateCounts(count=count, start_time=et, end_time=lt) try: self.generatorQueue.put(genPlugin) logger.info(("Put {0} MB of events in queue for sample '{1}'" + "with et '{2}' and lt '{3}'").format( round((count / 1024.0 / 1024), 4), self.sample.name, et, lt)) except Full: logger.warning( "Generator Queue Full. Skipping current generation.")
def flush(self, q): for x in q: msg = x['_raw'].rstrip() + '\n' self.s.sendto( str.encode(msg), (self._udpDestinationHost, int(self._udpDestinationPort))) logger.info("Flushing in udpout.")
def __init__(self, time, sample=None, config=None, genqueue=None, outputqueue=None, loggingqueue=None): # Logger already setup by config, just get an instance # setup default options self.profiler = config.profiler self.config = config self.sample = sample self.end = getattr(self.sample, "end", -1) self.endts = getattr(self.sample, "endts", None) self.generatorQueue = genqueue self.outputQueue = outputqueue self.time = time self.stopping = False self.countdown = 0 self.executions = 0 self.interval = getattr(self.sample, "interval", config.interval) logger.debug('Initializing timer for %s' % sample.name if sample is not None else "None") # load plugins if self.sample is not None: rater_class = self.config.getPlugin('rater.' + self.sample.rater, self.sample) self.rater = rater_class(self.sample) self.generatorPlugin = self.config.getPlugin('generator.' + self.sample.generator, self.sample) self.outputPlugin = self.config.getPlugin('output.' + self.sample.outputMode, self.sample) if self.sample.timeMultiple < 0: logger.error("Invalid setting for timeMultiple: {}, value should be positive".format( self.sample.timeMultiple)) elif self.sample.timeMultiple != 1: self.interval = self.sample.interval logger.debug("Adjusting interval {} with timeMultiple {}, new interval: {}".format( self.sample.interval, self.sample.timeMultiple, self.interval)) logger.info( "Start '%s' generatorWorkers for sample '%s'" % (self.sample.config.generatorWorkers, self.sample.name))
def multi_queue_it(self, count): logger.info("Entering multi-processing division of sample") numberOfWorkers = self.config.generatorWorkers logger.debug("Number of Workers: {0}".format(numberOfWorkers)) # this is a redundant check, but will prevent some missed call to multi_queue without a valid setting if bool(self.sample.splitSample): # if split = 1, then they want to divide by number of generator workers, else use the splitSample if self.sample.splitSample == 1: logger.debug("SplitSample = 1, using all availible workers") targetWorkersToUse = numberOfWorkers else: logger.debug("SplitSample != 1, using {0} workers.".format( self.sample.splitSample)) targetWorkersToUse = self.sample.splitSample else: logger.debug( "SplitSample set to disable multithreading for just this sample." ) self.single_queue_it() currentWorkerPrepCount = 0 remainingCount = count targetLoopCount = int(count) / targetWorkersToUse while currentWorkerPrepCount < targetWorkersToUse: currentWorkerPrepCount = currentWorkerPrepCount + 1 # check if this is the last loop, if so, add in the remainder count if currentWorkerPrepCount < targetWorkersToUse: remainingCount = count - targetLoopCount else: targetLoopCount = remainingCount self.single_queue_it(targetLoopCount)
def flush(self, q): self.s.connect( (self._tcpDestinationHost, int(self._tcpDestinationPort))) logger.info("Socket connected to {0}:{1}".format( self._tcpDestinationHost, self._tcpDestinationPort)) for x in q: self.s.send(x['_raw'].rstrip() + '\n') self.s.close()
def flush(self, events): if not self.scsEndPoint: if getattr(self.config, "scsEndPoint", None): self.scsEndPoint = self.config.scsEndPoint else: raise NoSCSEndPoint( "Please specify your REST endpoint for the SCS tenant") if not self.scsAccessToken: if getattr(self.config, "scsAccessToken", None): self.scsAccessToken = self.config.scsAccessToken else: raise NoSCSAccessToken( "Please specify your REST endpoint access token for the SCS tenant" ) if self.scsClientId and self.scsClientSecret: logger.info( "Both scsClientId and scsClientSecret are supplied." + " We will renew the expired token using these credentials.") self.scsRenewToken = True else: if getattr(self.config, "scsClientId", None) and getattr( self.config, "scsClientSecret", None): self.scsClientId = self.config.scsClientId self.scsClientSecret = self.config.scsClientSecret logger.info( "Both scsClientId and scsClientSecret are supplied." + " We will renew the expired token using these credentials." ) self.scsRenewToken = True else: self.scsRenewToken = False self.header = { "Authorization": "Bearer {0}".format(self.scsAccessToken), "Content-Type": "application/json", } self.accessTokenExpired = False self.tokenRenewEndPoint = "https://auth.scp.splunk.com/token" self.tokenRenewBody = { "client_id": self.scsClientId, "client_secret": self.scsClientSecret, "grant_type": "client_credentials", } for i in range(self.scsRetryNum + 1): logger.debug("Sending data to the scs endpoint. Num:{0}".format(i)) self._sendHTTPEvents(events) if not self.checkResults(): if self.accessTokenExpired and self.scsRenewToken: self.renewAccessToken() self.active_sessions = [] else: break
def renewAccessToken(self): response = requests.post(self.tokenRenewEndPoint, data=self.tokenRenewBody, timeout=5) if response.status_code == 200: logger.info("Renewal of the access token succesful") self.scsAccessToken = response.json()["access_token"] setattr(self._sample, "scsAccessToken", self.scsAccessToken) self.accessTokenExpired = False else: logger.error("Renewal of the access token failed")
def flush(self, q): self.s.connect((self._tcpDestinationHost, int(self._tcpDestinationPort))) logger.info( "Socket connected to {0}:{1}".format( self._tcpDestinationHost, self._tcpDestinationPort ) ) for x in q: msg = x["_raw"].rstrip() + "\n" self.s.send(str.encode(msg)) self.s.close()
def single_queue_it(self, count): """ This method is used for specifying how to queue your rater plugin based on single process :param count: :return: """ et = self.sample.earliestTime() lt = self.sample.latestTime() if count < 1 and count != -1: logger.info( "There is no data to be generated in worker {0} because the count is {1}.".format( self.sample.config.generatorWorkers, count ) ) else: genPlugin = self.generatorPlugin(sample=self.sample) # Adjust queue for threading mode genPlugin.updateCounts(count=count, start_time=et, end_time=lt) genPlugin.updateConfig(config=self.config, outqueue=self.outputQueue) try: logger.info( ( "Put {0} MB of events in queue for sample '{1}'" + "with et '{2}' and lt '{3}'" ).format( round((count / 1024.0 / 1024), 4), self.sample.name, et, lt ) ) if self.sample.generator == "replay": # lock on to replay mode, this will keep the timer knowing when to continue cycles since # replay mode has a dynamic replay time and interval doesn't mean the same thing. if ( hasattr(self.config, "outputCounter") and self.config.outputCounter ): from splunk_eventgen.lib.outputcounter import OutputCounter output_counter = OutputCounter() elif hasattr(self.config, "outputCounter"): output_counter = self.config.outputCounter genPlugin.run(output_counter=output_counter) else: self.generatorQueue.put(genPlugin) except Full: logger.warning("Generator Queue Full. Skipping current generation.")
def queue_it(self, count): count = count + self.previous_count_left if 0 < count < self.raweventsize: logger.info( "current interval size is {}, which is smaller than a raw event size {}." .format(count, self.raweventsize) + "Wait for the next turn.") self.update_options(previous_count_left=count) else: self.update_options(previous_count_left=0) et = self.sample.earliestTime() lt = self.sample.latestTime() # self.generatorPlugin is only an instance, now we need a real plugin. Make a copy of # of the sample in case another generator corrupts it. genPlugin = self.generatorPlugin(sample=self.sample) # Adjust queue for threading mode genPlugin.updateConfig(config=self.config, outqueue=self.outputQueue) genPlugin.updateCounts(count=count, start_time=et, end_time=lt) try: self.generatorQueue.put(genPlugin) except Full: logger.warning( "Generator Queue Full. Skipping current generation.")
def multi_queue_it(self, count): logger.info("Entering multi-processing division of sample") numberOfWorkers = self.config.generatorWorkers # this is a redundant check, but will prevent some missed call to multi_queue without a valid setting if bool(self.sample.splitSample): # if split = 1, then they want to divide by number of generator workers, else use the splitSample if self.sample.splitSample == 1: targetWorkersToUse = numberOfWorkers else: targetWorkersToUse = self.sample.splitSample else: self.single_queue_it() currentWorkerPrepCount = 0 remainingCount = count targetLoopCount = int(count) / targetWorkersToUse while currentWorkerPrepCount < targetWorkersToUse: currentWorkerPrepCount = currentWorkerPrepCount + 1 # check if this is the last loop, if so, add in the remainder count if currentWorkerPrepCount < targetWorkersToUse: remainingCount = count - targetLoopCount else: targetLoopCount = remainingCount self.single_queue_it(targetLoopCount)
def setupBackfill(self): """ Called by non-queueable plugins or by the timer to setup backfill times per config or based on a Splunk Search """ s = self._sample if s.backfill is not None: try: s.backfillts = timeParser(s.backfill, timezone=s.timezone) logger.info("Setting up backfill of %s (%s)" % (s.backfill, s.backfillts)) except Exception as ex: logger.error("Failed to parse backfill '%s': %s" % (s.backfill, ex)) raise if s.backfillSearch is not None: if s.backfillSearchUrl is None: try: s.backfillSearchUrl = c.getSplunkUrl(s)[ 0] # noqa, we update c in the globals() dict except ValueError: logger.error( "Backfill Search URL not specified for sample '%s', not running backfill search" % s.name) if not s.backfillSearch.startswith('search'): s.backfillSearch = 'search ' + s.backfillSearch s.backfillSearch += '| head 1 | table _time' if s.backfillSearchUrl is not None: logger.debug( "Searching Splunk URL '%s/services/search/jobs' with search '%s' with sessionKey '%s'" % (s.backfillSearchUrl, s.backfillSearch, s.sessionKey)) results = httplib2.Http( disable_ssl_certificate_validation=True).request( s.backfillSearchUrl + '/services/search/jobs', 'POST', headers={ 'Authorization': 'Splunk %s' % s.sessionKey }, body=urllib.parse.urlencode({ 'search': s.backfillSearch, 'earliest_time': s.backfill, 'exec_mode': 'oneshot' }))[1] try: temptime = minidom.parseString( results).getElementsByTagName( 'text')[0].childNodes[0].nodeValue # logger.debug("Time returned from backfill search: %s" % temptime) # Results returned look like: 2013-01-16T10:59:15.411-08:00 # But the offset in time can also be +, so make sure we strip that out first if len(temptime) > 0: if temptime.find('+') > 0: temptime = temptime.split('+')[0] temptime = '-'.join(temptime.split('-')[0:3]) s.backfillts = datetime.datetime.strptime( temptime, '%Y-%m-%dT%H:%M:%S.%f') logger.debug( "Backfill search results: '%s' value: '%s' time: '%s'" % (pprint.pformat(results), temptime, s.backfillts)) except (ExpatError, IndexError): pass if s.end is not None: parsed = False try: s.end = int(s.end) s.endts = None parsed = True except ValueError: logger.debug( "Failed to parse end '%s' for sample '%s', treating as end time" % (s.end, s.name)) if not parsed: try: s.endts = timeParser(s.end, timezone=s.timezone) logger.info("Ending generation at %s (%s)" % (s.end, s.endts)) except Exception as ex: logger.error( "Failed to parse end '%s' for sample '%s', treating as number of executions" % (s.end, s.name)) raise
def real_run(self): """ Worker function of the Timer class. Determine whether a plugin is queueable, and either place an item in the generator queue for that plugin or call the plugin's gen method directly. """ if self.sample.delay > 0: logger.info("Sample set to delay %s, sleeping." % self.sample.delay) time.sleep(self.sample.delay) logger.debug("Timer creating plugin for '%s'" % self.sample.name) local_time = datetime.datetime.now() end = False raw_event_size = self.predict_event_size() if self.end: if int(self.end) == 0: logger.info( "End = 0, no events will be generated for sample '%s'" % self.sample.name) end = True elif int(self.end) == -1: logger.info( "End is set to -1. Will be running without stopping for sample %s" % self.sample.name) while not end: try: # Need to be able to stop threads by the main thread or this thread. self.config will stop all threads # referenced in the config object, while, self.stopping will only stop this one. if self.config.stopping or self.stopping: end = True self.rater.update_options( config=self.config, sample=self.sample, generatorQueue=self.generatorQueue, outputQueue=self.outputQueue, outputPlugin=self.outputPlugin, generatorPlugin=self.generatorPlugin, ) count = self.rater.rate() # First run of the generator, see if we have any backfill work to do. if self.countdown <= 0: if self.sample.backfill and not self.sample.backfilldone: self.backrater.update_options( config=self.config, sample=self.sample, generatorQueue=self.generatorQueue, outputQueue=self.outputQueue, outputPlugin=self.outputPlugin, generatorPlugin=self.generatorPlugin, samplerater=self.rater, ) self.backrater.queue_it(count) else: if self.sample.generator == "perdayvolumegenerator": self.perdayrater.update_options( config=self.config, sample=self.sample, generatorQueue=self.generatorQueue, outputQueue=self.outputQueue, outputPlugin=self.outputPlugin, generatorPlugin=self.generatorPlugin, samplerater=self.rater, raweventsize=raw_event_size, ) self.perdayrater.rate() self.perdayrater.queue_it(count) self.rater.queue_it(count) self.countdown = self.interval self.executions += 1 except Exception as e: logger.exception(str(e)) if self.stopping: end = True pass # Sleep until we're supposed to wake up and generate more events if self.countdown == 0: self.countdown = self.interval # 8/20/15 CS Adding support for ending generation at a certain time if self.end: if int(self.end) == -1: time.sleep(self.time) self.countdown -= self.time continue # 3/16/16 CS Adding support for ending on a number of executions instead of time # Should be fine with storing state in this sample object since each sample has it's own unique # timer thread if not self.endts: if self.executions >= int(self.end): logger.info( "End executions %d reached, ending generation of sample '%s'" % (int(self.end), self.sample.name)) self.stopping = True end = True elif local_time >= self.endts: logger.info( "End Time '%s' reached, ending generation of sample '%s'" % (self.sample.endts, self.sample.name)) self.stopping = True end = True time.sleep(self.time) self.countdown -= self.time
def gen(self, count, earliest, latest, samplename=None): # TODO: Figure out how to gracefully tell generator plugins to exit when there is an error. try: from jinja2 import Environment, FileSystemLoader self.target_count = count # assume that if there is no "count" field, we want to run 1 time, and only one time. if self.target_count == -1: self.target_count = 1 self.earliest = earliest self.latest = latest if hasattr(self._sample, "jinja_count_type"): if self._sample.jinja_count_type in [ "line_count", "cycles", "perDayVolume" ]: self.jinja_count_type = self._sample.jinja_count_type startTime = datetime.datetime.now() # if eventgen is running as Splunk app the configfile is None sample_dir = self._sample.sampleDir if self._sample.splunkEmbedded is True: splunk_home = os.environ["SPLUNK_HOME"] app_name = getattr(self._sample, 'app', 'SA-Eventgen') sample_dir = os.path.join(splunk_home, 'etc', 'apps', app_name, 'samples') if not hasattr(self._sample, "jinja_template_dir"): template_dir = 'templates' else: template_dir = self._sample.jinja_template_dir if not os.path.isabs(template_dir): target_template_dir = os.path.join(sample_dir, template_dir) else: target_template_dir = template_dir logger.info('set jinja template path to %s', target_template_dir) if not hasattr(self._sample, "jinja_target_template"): raise CantFindTemplate( "Template to load not specified in eventgen conf for stanza. Skipping Stanza" ) jinja_env = Environment(loader=FileSystemLoader( [target_template_dir], encoding='utf-8', followlinks=False), extensions=[ 'jinja2.ext.do', 'jinja2.ext.with_', 'jinja2.ext.loopcontrols', JinjaTime ], line_statement_prefix="#", line_comment_prefix="##") jinja_loaded_template = jinja_env.get_template( str(self._sample.jinja_target_template)) if hasattr(self._sample, 'jinja_variables'): jinja_loaded_vars = json.loads(self._sample.jinja_variables) else: jinja_loaded_vars = None # make the default generator vars accessable to jinja jinja_loaded_vars["eventgen_count"] = self.current_count jinja_loaded_vars["eventgen_maxcount"] = self.target_count jinja_loaded_vars["eventgen_earliest"] = self.earliest self.earliest_epoch = ( self.earliest - datetime.datetime(1970, 1, 1)).total_seconds() jinja_loaded_vars["eventgen_earliest_epoch"] = self.earliest_epoch jinja_loaded_vars["eventgen_latest"] = self.latest jinja_loaded_vars["eventgen_latest_epoch"] = ( self.latest - datetime.datetime(1970, 1, 1)).total_seconds() self.latest_epoch = ( self.latest - datetime.datetime(1970, 1, 1)).total_seconds() while self.current_count < self.target_count: self.end_of_cycle = False jinja_loaded_vars["eventgen_count"] = self.current_count jinja_loaded_vars["eventgen_target_time_earliest"], jinja_loaded_vars["eventgen_target_time_latest"], \ jinja_loaded_vars["eventgen_target_time_slice_size"], \ jinja_loaded_vars["eventgen_target_time_epoch"] = \ JinjaTime._get_time_slice(self.earliest_epoch, self.latest_epoch, self.target_count, self.current_count, slice_type="random") self.jinja_stream = jinja_loaded_template.stream( jinja_loaded_vars) lines_out = [] try: for raw_line in self.jinja_stream: # trim the newline char for jinja output # it is quite normal to output empty newlines in jinja line = raw_line.strip() if line: # TODO: Time can be supported by self._sample.timestamp, should probably set that up here. try: target_line = json.loads(line) except ValueError as e: logger.error( "Unable to parse Jinja's return. Line: {0}" .format(line)) logger.error( "Parse Failure Reason: {0}".format( e.message)) logger.error( "Please note, you must meet the requirements for json.loads in python if you have" + "not installed ujson. Native python does not support multi-line events." ) continue current_line_keys = list(target_line.keys()) if "_time" not in current_line_keys: # TODO: Add a custom exception here raise Exception( "No _time field supplied, please add time to your jinja template." ) if "_raw" not in current_line_keys: # TODO: Add a custom exception here raise Exception( "No _raw field supplied, please add time to your jinja template." ) if "host" not in current_line_keys: target_line["host"] = self._sample.host if "hostRegex" not in current_line_keys: target_line[ "hostRegex"] = self._sample.hostRegex if "source" not in current_line_keys: target_line["source"] = self._sample.source if "sourcetype" not in current_line_keys: target_line[ "sourcetype"] = self._sample.sourcetype if "index" not in current_line_keys: target_line["index"] = self._sample.index lines_out.append(target_line) except TypeError as e: logger.exception(str(e)) self.end_of_cycle = True self._increment_count(lines_out) self._out.bulksend(lines_out) endTime = datetime.datetime.now() timeDiff = endTime - startTime timeDiffFrac = "%d.%06d" % (timeDiff.seconds, timeDiff.microseconds) logger.debug("Interval complete, flushing feed") self._out.flush(endOfInterval=True) logger.info("Generation of sample '%s' completed in %s seconds." % (self._sample.name, timeDiffFrac)) return 0 except Exception as e: logger.exception(str(e)) return 1
def real_run(self): """ Worker function of the Timer class. Determine whether a plugin is queueable, and either place an item in the generator queue for that plugin or call the plugin's gen method directly. """ if self.sample.delay > 0: logger.info("Sample set to delay %s, sleeping." % self.sample.delay) time.sleep(self.sample.delay) logger.debug("Timer creating plugin for '%s'" % self.sample.name) end = False previous_count_left = 0 raw_event_size = self.predict_event_size() if self.end: if int(self.end) == 0: logger.info( "End = 0, no events will be generated for sample '%s'" % self.sample.name) end = True elif int(self.end) == -1: logger.info( "End is set to -1. Will be running without stopping for sample %s" % self.sample.name) while not end: # Need to be able to stop threads by the main thread or this thread. self.config will stop all threads # referenced in the config object, while, self.stopping will only stop this one. if self.config.stopping or self.stopping: end = True continue count = self.rater.rate() # First run of the generator, see if we have any backfill work to do. if self.countdown <= 0: if self.sample.backfill and not self.sample.backfilldone: realtime = self.sample.now(realnow=True) if "-" in self.sample.backfill[0]: mathsymbol = "-" else: mathsymbol = "+" backfillnumber = "" backfillletter = "" for char in self.sample.backfill: if char.isdigit(): backfillnumber += char elif char != "-": backfillletter += char backfillearliest = timeParserTimeMath(plusminus=mathsymbol, num=backfillnumber, unit=backfillletter, ret=realtime) while backfillearliest < realtime: if self.end and self.executions == int(self.end): logger.info( "End executions %d reached, ending generation of sample '%s'" % (int(self.end), self.sample.name)) break et = backfillearliest lt = timeParserTimeMath(plusminus="+", num=self.interval, unit="s", ret=et) copy_sample = copy.copy(self.sample) tokens = copy.deepcopy(self.sample.tokens) copy_sample.tokens = tokens genPlugin = self.generatorPlugin(sample=copy_sample) # need to make sure we set the queue right if we're using multiprocessing or thread modes genPlugin.updateConfig(config=self.config, outqueue=self.outputQueue) genPlugin.updateCounts(count=count, start_time=et, end_time=lt) try: self.generatorQueue.put(genPlugin, True, 3) self.executions += 1 backfillearliest = lt except Full: logger.warning( "Generator Queue Full. Reput the backfill generator task later. %d backfill generators are dispatched.", self.executions) backfillearliest = et realtime = self.sample.now(realnow=True) self.sample.backfilldone = True else: # 12/15/13 CS Moving the rating to a separate plugin architecture # Save previous interval count left to avoid perdayvolumegenerator drop small tasks if self.sample.generator == 'perdayvolumegenerator': count = self.rater.rate() + previous_count_left if 0 < count < raw_event_size: logger.info( "current interval size is {}, which is smaller than a raw event size {}." .format(count, raw_event_size) + "Wait for the next turn.") previous_count_left = count self.countdown = self.interval self.executions += 1 continue else: previous_count_left = 0 else: count = self.rater.rate() et = self.sample.earliestTime() lt = self.sample.latestTime() try: if count < 1 and count != -1: logger.info( "There is no data to be generated in worker {0} because the count is {1}." .format(self.sample.config.generatorWorkers, count)) else: # Spawn workers at the beginning of job rather than wait for next interval logger.info( "Starting '%d' generatorWorkers for sample '%s'" % (self.sample.config.generatorWorkers, self.sample.name)) for worker_id in range( self.config.generatorWorkers): copy_sample = copy.copy(self.sample) tokens = copy.deepcopy(self.sample.tokens) copy_sample.tokens = tokens genPlugin = self.generatorPlugin( sample=copy_sample) # Adjust queue for threading mode genPlugin.updateConfig( config=self.config, outqueue=self.outputQueue) genPlugin.updateCounts(count=count, start_time=et, end_time=lt) try: self.generatorQueue.put(genPlugin) logger.debug(( "Worker# {0}: Put {1} MB of events in queue for sample '{2}'" + "with et '{3}' and lt '{4}'").format( worker_id, round((count / 1024.0 / 1024), 4), self.sample.name, et, lt)) except Full: logger.warning( "Generator Queue Full. Skipping current generation." ) self.executions += 1 except Exception as e: logger.exception(str(e)) if self.stopping: end = True pass # Sleep until we're supposed to wake up and generate more events self.countdown = self.interval # 8/20/15 CS Adding support for ending generation at a certain time if self.end: if int(self.end) == -1: time.sleep(self.time) self.countdown -= self.time continue # 3/16/16 CS Adding support for ending on a number of executions instead of time # Should be fine with storing state in this sample object since each sample has it's own unique # timer thread if not self.endts: if self.executions >= int(self.end): logger.info( "End executions %d reached, ending generation of sample '%s'" % (int(self.end), self.sample.name)) self.stopping = True end = True elif lt >= self.endts: logger.info( "End Time '%s' reached, ending generation of sample '%s'" % (self.sample.endts, self.sample.name)) self.stopping = True end = True else: time.sleep(self.time) self.countdown -= self.time