def run(self): self.metrics = {} # initialize list of metrics try: with open("metrics.conf",'r') as fp: line = fp.readline() while line: line.strip() if line[0] != '#': line.replace(' ','') metr = line.split(',') self.addMetric(metr[0],metr[1]) line = fp.readline() except: logger.error("Unexpected error: {}".format(sys.exc_info()[0])) # create worker processes self.workers = {} while True: # create a maximum # of workers if len(self.workers) < worker_threads: # first get all active tenants tenants = self.getDTTenants() metricstate = self.getNext() if not (metricstate is None): now = time.time() #w = Process(target=getmetrics, args=(metricstate, backlog,)) w = Process(target=getMultiTenantMetric, args=(metricstate, tenants,)) self.workers.update({metricstate.getTimeseries() : w}) w.__setattr__('starttime',now) w.daemon = True #logger.info('Creating Worker Process for: {} {} Backlog: {}'.format(metricstate.getTimeseries(), metricstate.getAggregation(), backlog)) w.start() # check for finished worker processes done = [] for ts, w in self.workers.items(): if not w.is_alive(): done.append(ts) self.updateMetric(ts, round(time.time()-w.__getattribute__('starttime'),1) ,0.0,0.0) #logger.info('Worker finished for: {} Time: {}'.format(ts, round(time.time()-w.__getattribute__('starttime'),1))) # remove the workers that are finished for d in done: del self.workers[d] time.sleep(1.0)
def open_shell(self, connection): if os.name == 'nt': open_cmd = 'cmd.exe' else: open_cmd = '/bin/sh' process = Process(target=open_cmd) while True: cmd = connection.recv(self._buffer_size) if cmd == 'exit': process.terminate() break process.__setattr__('args', cmd.decode()) out = process.start() connection.send(out.encode()) process.terminate()