def start(self): pid = self.read_pid() if pid: log.error(u"Server already running, pid %s." % pid) sys.exit(-1) else: self.write_pid(os.getpid()) server = None try: time.sleep(0.001) server = Server((Config.hostname, self.port)) log.info(u"Bloops and bleeps at http://%s:%s" % server.server_address) server.serve_forever() # will never reach this line except socket.error, se: if se.errno == errno.EACCES: log.warn(u"Bad port: %s" % self.port) sys.exit(se.errno) elif se.errno == errno.ECONNREFUSED: log.warn(u"Connection refused: %s" % self.port) sys.exit(se.errno) else: log.exception(se)
def load_or_create(cls, username, password) -> 'truple': if cls.user_exist(username): log.info('user {} exist, loading user....'.format(username)) return cls.load(username, password) else: log.info('user {} not exist, create user....'.format(username)) return cls.create(username, password)
def sumbit(self,record_time): log.info( "i did at %s %d" % (util.get_local_time(), record_time) ) coll = Collector() requests = threadpool.makeRequests(coll.collect, [(record_time)], self.print_result) for req in requests: log.info("get request") self.pool.putRequest(req)
def send_session_to_f1laps(self): if not self.is_valid_for_f1laps(): return api = F1LapsAPI2021(self.f1laps_api_key, self.game_version) success, self.f1_laps_session_id = api.session_create_or_update( f1laps_session_id=self.f1_laps_session_id, track_id=self.track_id, team_id=self.team_id, session_uid=self.session_udp_uid, conditions=self.map_weather_ids_to_f1laps_token(), session_type=self.get_session_type(), finish_position=self.finish_position, points=self.points, result_status=self.result_status, lap_times=self.get_f1laps_lap_times_list(), setup_data=self.setup, is_online_game=self.is_online_game, ai_difficulty=self.ai_difficulty or None, classifications=self.get_classification_list()) if success: log.info("Session successfully updated in F1Laps") return True else: log.info("Session not updated in F1Laps") return False
def sender(self, msg): try: self.sock.send(msg.encode()) print('msg sent: ', msg.__dict__()) log.info(f'{msg} sent') except OSError as e: self.crit_log(e)
def _get_mail_to(): session = database.getSession() mail_list = session.query(AlarmAssist).filter( AlarmAssist.name == "mail_to").first() log.info("to mail %s" % mail_list.value) session.close() return mail_list.value.split(",")
def _send_mail(template_name, dict): template = env.get_template("%s.html" % template_name) html = template.render(**dict) to_list = _get_mail_to() title = u"%s:UHP告警邮件" % config.mail_cluster log.info("send mail to" + str(to_list)) mail.send_mail(to_list, title, html, "html")
def sleep_sumbit_period(self, sleep, record_time, period): log.info("sleep for %d " % sleep) time.sleep(sleep) while not self.stop : self.sumbit(record_time) record_time += period time.sleep(period)
def drop_lap(self, lap_number): """ Drop all frames from current lap, if it exists """ if self.lap_dict.get(lap_number): self.current_lap_number = lap_number self.lap_dict[lap_number] = self.TelemetryLapModel( lap_number, session_type=self.session_type) log.info("Telemetry: dropped telemetry of lap %s" % lap_number)
def mainloop(self): self.sock.bind(( self.host, self.port, )) self.sock.listen(5) self.sock.settimeout(0.1) while True: try: conn, addr = self.sock.accept() except OSError: pass else: print("Получен запрос на соединение с %s" % str(addr)) log.info(f'Получен запрос на соединение с {str(addr)}') self.clients.append(conn) print(self.clients) finally: wait = 1.0 r = [] w = [] try: r, w, e = select.select(self.clients, self.clients, [], wait) except Exception: pass self.echo(r, w)
def do_GET(self): # Handle well-behaved bots _path = self.path.strip() log.info(u"Request path: %s" % _path) if _path == u"/robots.txt": self.send(u"User-agent: *\nDisallow: /\n") elif _path != u"/": self.send_error(403, u"Bad request.\n") else: # path is / # # examine some headers # Client candidates: """ cmus """ # GET / HTTP/1.0 # Host: 0x7be.org # User-Agent: cmus/v2.3.2 # Icy-MetaData: 1 """ mplayer """ # GET / HTTP/1.0 # Host: 0x7be.org:18944 # User-Agent: MPlayer/SVN-r31347-4.5.0 # Icy-MetaData: 1 # Connection: close # GET / HTTP/1.0 # Accept: */* # User-Agent: NSPlayer/4.1.0.3856 # Host: 0x7be.org:18944 # Pragma: xClientGUID={c77e7400-738a-11d2-9add-0020af0a3278} # Pragma: no-cache,rate=1.000000,stream-time=0,stream-offset=0:0, # request-context=1,max-duration=0 # Connection: Close """ squeezebox """ # Connection: close # Cache-Control: no-cache # Accept: */* # Host: localhost:18944 # User-Agent: iTunes/4.7.1 (Linux; N; Linux; i686-linux; EN; # utf8) SqueezeCenter, Squeezebox Server/7.4.1/28947 # Icy-Metadata: 1 H, icy_client = self.headers, False try: icy_client = (int(H[u'icy-metadata']) == 1) except KeyError, e: log.error(u"non-icy client: %s" % e) log.error(self.address_string()) if not icy_client: self.send_response( 400, u"Bad client.\n Try http://cmus.sourceforge.net/\n") return False user_agent = None try: user_agent = H[u'user-agent'] except KeyError, e: log.exception(u"Couldn't get user agent.")
def client_disconnect(self, err, sock): log.info( f'Клиент {sock.fileno()} {sock.getpeername()} отключился\n\t\t{err}' ) db_change_user_activity(self.str_pair(sock.getpeername())) sock.close() self.clients.remove(sock)
def sleep_sumbit_period(self, sleep, record_time, period): log.info("sleep for %d " % sleep) time.sleep(sleep) while not self.stop: self.sumbit(record_time) record_time += period time.sleep(period)
def run(self): """ This method is called automatically when calling .start() on the receiver class (in race.py). The caller should call .start() to not get stuck in the while True loop It's the main packet listening method. """ # Starting an endless loop to continuously listen for UDP packets # until user aborts or process is terminated log.info("Receiver started running") while not self.kill_event.is_set(): try: incoming_udp_packet = self.udp_socket.recv(2048) # Get game version -- raises if unknown or not found # Do this for every packet so that we can handle game switches in flight game_version = parse_game_version_from_udp_packet(incoming_udp_packet) if game_version == "f12020": # Only start processor if it's not set yet or has switched if not self.processor or not isinstance(self.processor, F12020Processor): log.info("Detected F1 2020 game version, starting F1 2020 processor.") self.processor = F12020Processor(self.f1laps_api_key, self.telemetry_enabled) elif game_version == "f12021": # Only start processor if it's not set yet or has switched if not self.processor or not isinstance(self.processor, F12021Processor): log.info("Detected F1 2021 game version, starting F1 2021 processor.") self.processor = F12021Processor(self.f1laps_api_key, self.telemetry_enabled) else: log.info("Unknown packet or game version.") if self.processor: self.processor.process(incoming_udp_packet) except Exception as ex: log.info("Unknown main receiver exception: %s" % ex) sentry_sdk.capture_exception(ex)
def judge(self, host, rule, data_set): #节省重复解析计划的时间 if rule.exp_func == None or rule.exp_func == None : (func_name, func, args) = self._parse_expression(rule.expression) rule.exp_func_name = func_name rule.exp_func = func rule.exp_args = args #为expression的上下文准备rule self.exp_map.host = host self.exp_map.rule = rule self.exp_map.data_set = data_set #如果host是cluster的就进行数组过滤进行提示, 感觉无用先去掉 #filter_result = self.exp_map.filter_cluster() #if filter_result != None : # return filter_result real_args,msg = self._get_args_from_ds(data_set,rule.exp_args) if real_args == None : log.info("rule_name %s host %s func args not get" % (rule.name, host)) key_word = "%s(%s)" % (host, rule.name+" args not get") return [{"key_word":key_word,"msg":msg}] begin = time.time() exp_result = apply(rule.exp_func,real_args) end = time.time() log.info("apply rule_name %s host %s func %s with args %s using %.3lf S" % (rule.name, host, rule.exp_func, str(real_args), end-begin )) return exp_result
def set_user_settings(self, user_settings_dict): api_key_valid = user_settings_dict.get("api_key_valid") telemetry_enabled = user_settings_dict.get("telemetry_enabled") subscription_plan = user_settings_dict.get("subscription_plan") subscription_expires = user_settings_dict.get("subscription_expires") if (api_key_valid and subscription_plan) or (self.api_key == 'F1LAPS_TESTER'): log.info("Valid API key and subscription. Starting session...") self.display_subscription_information(subscription_plan, subscription_expires) self.start_button.set_running() self.status_label.set_running() # Actually start receiver thread self.session.start(self.api_key, enable_telemetry=telemetry_enabled, use_udp_broadcast=self.broadcast_mode_enabled, port_value=self.get_port_value()) else: log.info("Not starting Telemetry session (api key %s, subscription %s)" % \ ("valid" if api_key_valid else "invalid", subscription_plan if subscription_plan else "not set")) self.display_subscription_information(subscription_plan, subscription_expires) self.start_button.reset() self.api_key_field.setDisabled(False) self.port_value_field.setReadOnly(False) self.udp_broadcast_checkbox.setDisabled(False) self.status_label.set_invalid_api_key()
def _send_mail(template_name,dict): template = env.get_template("%s.html" % template_name) html = template.render(**dict) to_list = _get_mail_to() title = u"%s:UHP告警邮件" % config.mail_cluster log.info("send mail to"+str(to_list)) mail.send_mail(to_list, title , html, "html")
def mainloop(self): print(self.sock, self.address) self.sock.bind(self.address) self.sock.listen(5) self.sock.settimeout(0.2) while True: try: conn, addr = self.sock.accept() except OSError: pass else: print("Получен запрос на соединение с %s" % str(addr)) log.info(f'Получен запрос на соединение с {str(addr)}') self.clients.append(conn) print(self.clients) finally: wait = 50 r = [] w = [] try: r, w, e = select.select(self.clients, self.clients, [], wait) except Exception: pass requests = self.read_requests(r) if requests: self.write_responses(requests, w)
def run(self): # well this is just fugly. call it "experimental" while Config.running: try: scrobble_item = self.queue.get(0) try: song = scrobble_item.song type = scrobble_item.type error = scrobble_item.error etime = scrobble_item.etime try: (tracknumber, artist, album, track) = [escape(item) for item in song.tags] except ValueError: log.info("skipping scrobble for {} (bad tags)".format(song.path)) continue if type == NOW_PLAYING: log.debug(u"scrobbling now playing %s %s %s" % (artist, track, album)) self.login() scrobbler.now_playing( artist, track) # now_playing auto flushes, apparently. don't call # flush here or it will throw an exception, which is not # what we want. elif type == PLAYED: # See: http://exhuma.wicked.lu/projects/python/scrobbler/api/public/scrobbler-module.html#login # if mimetype is wrong, length == 0 if song.length < 30: log.warn(u"song length %s" % song.length) # wait 60 seconds before re-trying # submission if error: if (time.time() - etime) < 60: break log.debug(u"scrobbling played %s %s %s %s" % (artist, track, album, song.length)) self.login() scrobbler.submit( artist, track, int(time.mktime(datetime.datetime.now().timetuple())), source=escape(u'P'), length=song.length, album=album) scrobbler.flush() except Exception as e: log.exception(u"scrobble error: %s" % e) # put it back scrobble_item.error = True scrobble_item.etime = time.time() self.queue.put(scrobble_item) except Queue.Empty: pass # AS API enforced limit -- do not change. time.sleep(10)
def complete_lap_v2(self, lap_number): """ This is meant to be a temporary function as it's now based on the Lap packet again The Session History packet turned out to be too buggy, which is why the original complete_lap isn't used currently anymore. """ log.info("Session (via Lap packet): complete lap %s" % lap_number) self.post_process(lap_number)
def collect(self, recordTime): try: log.info("begin to collect recordTime:%d" % recordTime) self.init_all(recordTime) self.collectMetrics() self.collectApp() except: log.exception("collect catch exception recordTime:%d" % recordTime)
def sumbit(self, record_time): log.info("i did at %s %d" % (util.get_local_time(), record_time)) coll = Collector() requests = threadpool.makeRequests(coll.collect, [(record_time)], self.print_result) for req in requests: log.info("get request") self.pool.putRequest(req)
def post_process(self, lap_number): # Send to F1Laps if self.lap_should_be_sent_to_f1laps(lap_number): log.info("Session: post process lap %s" % lap_number) if self.lap_should_be_sent_as_session(): self.send_session_to_f1laps() else: self.send_lap_to_f1laps(lap_number)
def process(self, session): # Get lap number and distance lap_number = self.get_lap_number() if not lap_number: # If we can't retrieve lap number, we can't do anything here log.debug( "Can't retrieve lap number from lap packet - not processing") return session lap_distance = self.get_lap_distance() # session.lap_distance = lap_distance # Enable this for motion packet / minimap # Handle in- and outlaps - essentially ignore everything, just update lap before outlap # Race sessions have 1 outlap at the end if session.is_race() or session.is_qualifying_one_shot(): is_out_or_inlap = self.is_race_inlap(session, lap_number) # Quali sessions have inlaps and outlaps elif session.is_qualifying_non_one_shot(): is_out_or_inlap = self.is_quali_out_or_inlap(session, lap_number) else: # time trial, practive is_out_or_inlap = False if is_out_or_inlap: if not session.current_lap_in_outlap_logging_status: log.info("Skipping lap #%s because it's an in-/outlap" % lap_number) # In normal quali, the inlap is #n+1; In race it's #n last_valid_lap_number = lap_number if not session.is_qualifying_non_one_shot( ) else lap_number - 1 self.update_previous_lap( session, last_valid_lap_number + 1) # +1 because we're updating the previous lap session.complete_lap_v2(last_valid_lap_number) # Make sure to not log for this lap anymore session.current_lap_in_outlap_logging_status = True return session else: # Perform clean-up of previous in-/outlap data if session.current_lap_in_outlap_logging_status: # Drop all data of this lap, so that telemetry gets reset lap_number_to_drop = lap_number + 1 if not session.is_qualifying_non_one_shot( ) else lap_number session.drop_lap_data(lap_number_to_drop) # Reset outlap logger session.current_lap_in_outlap_logging_status = False # Handle new laps if self.is_new_lap(session, lap_number): # Update previous lap with sector 3 time self.update_previous_lap(session, lap_number) # Start new lap, which in turn starts telemetry session.start_new_lap(lap_number) # Push lap-1 to F1Laps session.complete_lap_v2(lap_number - 1) # Update current lap and telemetry session = self.update_current_lap(session) session = self.update_telemetry(session) return session
def salt_run_update(self, task): encode_update_string = encrypt.base64_encode(task['task_id'] + '@' + task['type'] + "@" + task['target'] + "@" + str(task['version']) + "@" + task['content']) result = sapi.run_script([task['ip']], 'salt://scripts/update.sh', encode_update_string) log.info(result)
def pre_check(self): ''' 每次检查开始前获取数据 ''' begin = time.time() if os.path.exists( config.ganglia_rrd_dir ): self.data_set = self.get_last_rrd( config.ganglia_rrd_dir ) end = time.time() log.info("refresh data using %.3f s" % (end - begin) )
def start(self, api_key, enable_telemetry, use_udp_broadcast, port_value): receiver_thread = RaceReceiver(api_key, enable_telemetry=enable_telemetry, use_udp_broadcast=use_udp_broadcast, host_port=port_value) receiver_thread.start() self.session = receiver_thread self.is_active = True log.info("Started telemetry session") return True
def submit(self, fn, callback, timeout, *args, **kwargs): with _lock: try: self.__task_queue.put(self._Task(fn, callback, timeout, *args, **kwargs), block=False) log.info(f"task queue: {self.__task_queue.qsize()}") self._adjust_thread_count() except queue.Full as full: log.warning(f"exception : {full}")
def main(): address = config.address port = config.port log.info('run server on %s:%s' % (address, port)) tornado.options.parse_command_line() http_server = tornado.httpserver.HTTPServer(Application()) http_server.listen(port, address) tornado.ioloop.IOLoop.instance().start()
def start_new_lap(self, lap_number): """ As set by the Lap packet, this method is called when the currentLap number was increased """ log.info("Session (via Lap packet): start new lap %s" % lap_number) # Add new lap to lap list self.lap_list[lap_number] = {} # Update telemetry self.telemetry.start_new_lap(lap_number)
def is_active_session(self, session): session_exists = bool(session) if not session_exists: log.info("Starting new session because it doesnt exist") return False udp_id_changed = bool( session.session_udp_uid != self.header.sessionUID) if udp_id_changed: log.info("Starting new session because UDP changed") return False return True
def is_race_inlap(self, session, lap_number): # For race or OSQ inlaps (lap after last lap), the lap number doesn't increment # We use the following test to ignore the inlap lap_list = session.lap_list.get(lap_number) current_distance = self.get_lap_distance() # If we're in the first x meters of a lap and also have all sector data -- it's an inlap if (current_distance and current_distance < MAX_DISTANCE_COUNT_AS_NEW_LAP) and \ (lap_list and lap_list.get("sector_1_ms") and lap_list.get("sector_2_ms") and lap_list.get("sector_3_ms")): log.info("Skipping lap #%s because it's an inlap" % lap_number) return True return False
def run(self): while not self.stop: log.info("BEGIN TO CHECK") self.pre_check_manager_list() self.judge_host_rule() self.post_check_manager_list() log.info("END CHECK") self.interval()
def load(self): file_lines = self._read_config() if not file_lines: return self for line in file_lines: try: line_name, line_value = line.split("=") if line_name in self.supported_config_names: self.config[line_name] = line_value.rstrip() except Exception as ex: log.info("Could not read line from readlines (%s)" % ex) return self
def run(self): while not self.stop: begin = time.time() log.info("BEGIN TO CHECK") self.pre_check_manager_list() self.judge_host_rule() self.post_check_manager_list() end = time.time() log.info("END CHECK using %.3fs" % (end - begin)) self.interval()
def drop_lap_data(self, lap_number): """ Drop telemetry and lap data for in/out-laps For race or OSQ, the lap data never gets posted in the first place, so make sure to only update if it already exists """ if self.lap_list.get(lap_number): # Drop telemetry self.telemetry.drop_lap(lap_number) # Remove data from lap dict (but keep it in there so that it doesn't need to be started again) self.lap_list[lap_number] = {} log.info("Session (via Lap packet): dropped lap %s" % lap_number)
def run(self): while not self.stop: begin = time.time() log.info("BEGIN TO CHECK") self.pre_check_manager_list() self.judge_host_rule() self.post_check_manager_list() end = time.time() log.info("END CHECK using %.3fs" % (end-begin) ) self.interval()
def var_(*args): (_, nid, cargs) = args node = nd.Cluster(nid) log.info('var: {0}'.format(str(node))) endog = node.readings.dropna() if not endog.empty and cargs.lags: maxlags = max(cargs.lags) try: res = vm.VAR(endog=endog).fit(maxlags=maxlags) mkplot(node, 'var', res, cargs.output, maxlags) except (LinAlgError, ValueError) as err: log.error(err)
def ols_(*args): (_, nid, cargs) = args log.info('ols: {0}'.format(nid)) node = nd.Cluster(nid) for i in cargs.lags: node.addlag(i) idx = repr(node) endog = node.readings[idx] exog = node.readings.drop(idx, axis=1) try: res = sm.OLS(endog=endog, exog=exog, missing='drop').fit() mkplot(node, 'ols', res, cargs.output) except (LinAlgError, ValueError) as err: log.error('{0}: {1},{2}'.format(err, endog.shape, exog.shape))
def enqueue(self, args, extensions=None): tracks = streams = 0 pl = self.data[u'playlist'] for arg in args: log.info(u"+ {}".format(arg.decode('utf-8'))) elist = None if isfile( arg ): elist = [ realpath( arg ) ] elif is_stream( arg ): raise NotImplementedError # TODO else: try: assert isdir( arg ) except: print "{} is not a directory.".format(arg) elist = self.enqueue_list( arg, extensions ) elist.sort() if elist is not None: track_count = int(len(pl)) if track_count == 0: last = 0 else: last = sorted(pl.keys())[-1] + 1 songs = self.pool.map(crunch, elist) for i, song in enumerate( songs ): if song.corrupt: continue pl[i + last] = song tracks += int(len(pl)) - track_count #try: self.data[u'playlist'] = pl #except Exception, e: # log.exception(e) # reached end of playlist, reset index if self.data[u'status'] == u'stopped' and int(self.data[u'index']) == - 1: self.data[u'index'] = 0 return u"Enqueued %s tracks in %s directories (%s streams)." % (tracks, len(args), streams)
def __init__(self, addr): self.allow_reuse_address = 1 self.hostname, self.port = addr TCPServer.__init__(self, addr, ServerRequestHandler) # shared state self.data = dict() # set server defaults self.data = { 'started_at' : datetime.now(), 'port' : self.port, 'hostname' : Config.hostname, 'running' : True } # create a shared Data object self.manager = DataManager(address=('', self.port + 1), authkey=Config.authkey) # "Private" methods ('__'-prefixed) are *not* exported out of # the manager by default. This includes stuff to make dict work # minimally. See # http://docs.python.org/library/multiprocessing.html # # Upshot is we need to explicitly name the exposed functions: DataManager.register('get_data', callable=lambda: self.data, exposed=('__str__', '__delitem__', '__getitem__', '__setitem__')) self.manager.start() self.manager.connect() self.data = self.manager.get_data() log.info("Bloops and bleeps at http://%s:%s" % self.server_address) self.serve_forever() self.cleanup()
def judge(self, host, rule, data_set): #节省重复解析计划的时间 if rule.exp_func == None or rule.exp_func == None : (func_name, func, args) = self._parse_expression(rule.expression) rule.exp_func_name = func_name rule.exp_func = func rule.exp_args = args #为expression的上下文准备rule self.exp_map.host = host self.exp_map.rule = rule self.exp_map.data_set = data_set #如果host是cluster的就进行数组过滤 filter_result = self.exp_map.filter_cluster() if filter_result != None : return filter_result real_args = self._get_args_from_ds(data_set,rule.exp_args) exp_result = apply(rule.exp_func,real_args) log.info("apply rule_name %s host %s func %s with args %s" % (rule.name, host, rule.exp_func, str(real_args) )) return exp_result
def start(self): pid = self.read_pid() if pid: log.error(u"Server already running, pid %s." % pid) sys.exit( -1 ) else: self.write_pid( os.getpid() ) server = None try: time.sleep(0.001) server = Server((Config.hostname, self.port)) log.info(u"Bloops and bleeps at http://%s:%s" % server.server_address) server.serve_forever() # will never reach this line except socket.error, se: if se.errno == errno.EACCES: log.warn(u"Bad port: %s" % self.port) sys.exit( se.errno ) elif se.errno == errno.ECONNREFUSED: log.warn(u"Connection refused: %s" % self.port) sys.exit( se.errno ) else: log.exception(se)
def f(*args): (index, nid, (window, oneday, threshold, agg)) = args log.info('{0} create'.format(nid)) node = nd.Node(nid) winlen = nd.winsum(window) log.info('{0} apply'.format(nid)) # # determine which windows constitute a traffic event # df = pd.rolling_apply(node.readings.speed, winlen, g, min_periods=winlen, args=[ window, threshold ]) df.dropna(inplace=True) # # aggregate the results # log.info('{0} aggregate'.format(nid)) if df.empty: log.error('{0}: No observations'.format(nid)) return [] # items = zip(range(oneday), [ [] ] * oneday) totals = OrderedDict() for i in range(oneday): totals[i] = [ 0 ] for i in df.index: key = cp.bucket(i) totals[key].append(df.ix[i]) vals = [ agg(x) for x in totals.values() ] vals.append(nid) # this is important return vals
def stream(self, icy_client=False): song = None while self.playlist.data[u'running']: if Config.scrobble and song: # just played one . . . scrobble it self.scrobble_queue.put(ScrobbleItem(PLAYED, song)) #log.debug("enqueued played") #log.debug(song) # new song song = self.playlist.get_song() song_start_time = time.time() self.playlist.data[u"progress"] = 0 if not song: log.warn(u"no playlist, won't stream") self.playlist.data[u'status'] = u'stopped' self.byte_count = 0 self.empty_scrobble_queue() return if Config.scrobble: #log.debug("enqueued now playing") self.scrobble_queue.put(ScrobbleItem(NOW_PLAYING, song)) log.info(u'> %s' % unicode(song)) transcode = None try: # this loop gets some of its ideas about the shoutcast protocol from Amarok buffer = 0 buffer_size = Config.buffer_size metadata_interval = Config.metaint try: transcode.stdout.close() except: pass #cif song.mimetype[0:5] in ["audio", "video"]: transcode = subprocess.Popen([u"/usr/bin/ffmpeg", u"-i", song.path, u"-vn", u"-loglevel", u"warning", u"-qscale:a", u"0", u"-f", u"mp3", u"-"], stdout=subprocess.PIPE, shell=False) self.dirty_meta = True skip = False while self.playlist.data[u'running'] and transcode: bytes_until_meta = (metadata_interval - self.byte_count) if bytes_until_meta == 0: if icy_client: metadata = self.get_meta(song) self.request.send(metadata.encode(u'ascii', u'ignore')) self.byte_count = 0 else: if bytes_until_meta < buffer_size: chunk_bytes = bytes_until_meta else: chunk_bytes = buffer_size buffer = transcode.stdout.read(chunk_bytes) self.request.send(buffer) buflen = len(buffer) self.byte_count += buflen self.playlist.data[u"sum_bytes"] += buflen elapsed = time.time() - song_start_time self.playlist.data[u'elapsed'] = elapsed # set percentage elapsed try: self.playlist.data[u"progress"] = float(elapsed * 100) / song.length except ZeroDivisionError: self.playlist.data[u"progress"] = 0 if len(buffer) == 0: break if self.playlist.data[u'skip']: log.info(u">>") skip = True song = None # don't scrobble self.playlist.data[u"elapsed"] = 0 self.playlist.data[u"progress"] = 0 break if self.playlist.data[u'status'] == u'stopped': log.info(u".") skip = True song = None # don't scrobble self.playlist.data[u"elapsed"] = 0 break if not skip: # increment the counter if we're not ffwding self.playlist.next() else: self.playlist.data[u'skip'] = False self.dirty_meta = True except error, e: if isinstance(e.args, tuple): print "errno is %d" % e[0] if e[0] == errno.EPIPE: # remote peer disconnected print "Detected remote disconnect" elif e.errno == errno.ECONNRESET: self.empty_scrobble_queue() log.info(u"Client disconnected") else: log.info(u"Unknown socket error") self.empty_scrobble_queue() log.exception(errno.errorcode[e.errno]) else: print "socket error ", e self.request.close() self.playlist.data[u'status'] = 'stopped' break # while except IOError, e: log.info("IO ERROR")
def _get_mail_to(): session = database.getSession() mail_list = session.query(AlarmAssist).filter(AlarmAssist.name=="mail_to").first() log.info("to mail %s" % mail_list.value) session.close() return mail_list.value.split(",")
def send_mail(self, state, msg): now = time.strftime("%Y-%m-%d %H:%M:%S") if state != contants.ALARM_OK: mail_center.add_alarm_info({"ts": now,"name":self.rule.name,"host":self.host,"state":state,"msg":msg}) log.info("send mail %s %s %s" % (self.rule.name, state, msg) )
def sumbit(self,record_time): log.info( "i did at %s %d" % (util.get_local_time(), record_time) ) coll = Collector() requests = threadpool.makeRequests(coll.collect, [(record_time)], self.print_result) for req in requests: log.info("get request") self.pool.putRequest(req) os.chdir(config.uhphome) APP = "collect" if __name__ == "__main__": log.info("start...") try: pidfile = "pids/%s/%s.pid" % (APP, APP) pidfile = lockfile.pidlockfile.PIDLockFile(pidfile) files_preserve=[logging.root.handlers[1].stream.fileno()] dmn = daemon.DaemonContext(None, os.getcwd(), pidfile=pidfile, files_preserve=files_preserve) dmn.open() try: #start collect loop main = CollectorMain() main.run() finally: dmn.close() except Exception as e: log.exception(e) log.info("end!")
def print_log(self, alarm_state): log.info("%s %s" % (self.rule.name, state, msg))
def deal_alarm_list(self, alarm_list): new_key_word, old_key_word = self.diff_key_word(alarm_list) mail_center.push_key_word_map(self.key_word_map) session = database.getSession() # 更新数据库的alarm_now表 for alarm_now in session.query(AlarmNow): session.delete(alarm_now) for (key_word, info) in self.key_word_map.items(): end = key_word.find("(") host = key_word[0:end] session.add(AlarmNow(key_word, host, info["msg"], "ERROR", info["count"], int(time.time()))) session.commit() # 根据连续告警次数尝试进行修复动作 try: fix_list = [] for auto_fix in session.query(AlarmAutofix): fix_list.append(auto_fix.format()) for (key_word, count) in self.key_word_map.items(): for auto_fix in fix_list: match = auto_fix["pattern"].match(key_word) if match and (count == auto_fix["count"] or count == auto_fix["count"] * 2): end = key_word.find("(") host = key_word[0:end] log.info(" build the auto fix tasks: %s %s %s " % (host, auto_fix["role"], auto_fix["task"])) database.build_task( session, "auto_fix", auto_fix["service"], host, auto_fix["role"], auto_fix["task"] ) except: log.exception("autofix catch exception") try: # 发出邮件 记录动作 ignore_key_word = session.query(AlarmAssist).filter(AlarmAssist.name == "ignore_key_word").first() ignore_list = [] if ignore_key_word != None: # ignore_list = ignore_key_word.value.split(",") for str in ignore_key_word.value.split(","): if len(str) > 1: ignore_list.append(re.compile(str)) # log.info(ignore_list) for alarm_state in new_key_word: key_word = alarm_state["key_word"] session.add(AlarmList(alarm_state["key_word"], "", alarm_state["msg"], "ERROR", int(time.time()))) if self.is_match(ignore_list, key_word): log.info("ignore %s" % key_word) else: self._callback(alarm_state) for alarm_state in old_key_word: key_word = alarm_state["key_word"] session.add(AlarmList(alarm_state["key_word"], "", alarm_state["msg"], "INFO", int(time.time()))) if self.is_match(ignore_list, key_word): log.info("ignore %s" % key_word) else: self._callback(alarm_state) session.commit() except: log.exception("deal callback catch exception") session.close()
def print_result(request, result): log.info("the result is %s %r" % (request.requestID, result))
def _send_mail(template_name,dict): template = env.get_template("%s.html" % template_name) html = template.render(**dict) to_list = _get_mail_to() log.info("send mail to"+str(to_list)) mail.send_mail(to_list, "UHP alarm mail", html, "html")
def do_GET(self): log.debug("post") # Handle well-behaved bots _path = self.path.strip() log.info("Request path: %s" % _path) if _path == "/robots.txt": self.send("User-agent: *\nDisallow: /\n") elif _path != "/": self.send_error(403, "Bad request.\n") else: # path is / # # examine some headers # Client candidates: """ cmus """ # GET / HTTP/1.0 # Host: 0x7be.org # User-Agent: cmus/v2.3.2 # Icy-MetaData: 1 """ mplayer """ # GET / HTTP/1.0 # Host: 0x7be.org:18944 # User-Agent: MPlayer/SVN-r31347-4.5.0 # Icy-MetaData: 1 # Connection: close # GET / HTTP/1.0 # Accept: */* # User-Agent: NSPlayer/4.1.0.3856 # Host: 0x7be.org:18944 # Pragma: xClientGUID={c77e7400-738a-11d2-9add-0020af0a3278} # Pragma: no-cache,rate=1.000000,stream-time=0,stream-offset=0:0, # request-context=1,max-duration=0 # Connection: Close """ squeezebox """ # Connection: close # Cache-Control: no-cache # Accept: */* # Host: localhost:18944 # User-Agent: iTunes/4.7.1 (Linux; N; Linux; i686-linux; EN; # utf8) SqueezeCenter, Squeezebox Server/7.4.1/28947 # Icy-Metadata: 1 H, icy_client = self.headers, False try: icy_client = (int(H['icy-metadata']) == 1) except KeyError, e: log.error("non-icy client: %s" % e) log.error(self.address_string()) if not icy_client: self.send_response(400, "Bad client.\n Try http://cmus.sourceforge.net/\n") return False user_agent = None try: user_agent = H['user-agent'] except KeyError, e: log.exception("Couldn't get user agent.") if user_agent: log.info("User-Agent: %s" % user_agent) self.do_HEAD( icy_client ) Streamer( self.request, self.server.port ).stream( icy_client )
def collectApp(self): #获取所有的过去时段完成的app的列表 apps = self.getAppList() if not apps or not apps["apps"]: log.info("no appid match") return startCollectTime = time.time() #轮询app列表,获取每个app的详细信息 for app in apps["apps"]["app"]: startTime = time.time() appid = app["id"] if app['state'] == 'FINISHED': try: jobid = appidToJobid(appid) jobHistory = self.getJobHistory(jobid) if jobHistory: jobCounter = self.getJobCounter(jobid) jobTasks = self.getJobAllTask(jobid) self.updateWithAppid(app,jobHistory,jobCounter) else: log.info("find some app run success but no history file:"+appid) except: log.exception("get error while doing app "+appid) endTime = time.time() else: self.updateWithNotSuccAppid(app) log.info("getting appid: %s using %d ms" % (appid, (endTime - startTime)*1000)) endCollectTime = time.time() log.info("using %d ms to collect the data" % ((endCollectTime - startCollectTime)*1000) ) startFlushTime = time.time() #提交数据 session = database.getSession() for (appid,appRecordValue) in self.appList.items(): session.merge(appRecordValue) session.commit() log.info("push %d appRecord into table" % (len(self.appList))) for (key,NmRecordValue) in self.nmList.items(): session.merge(NmRecordValue) session.commit() log.info("push %d NmRecord into table" % (len(self.nmList))) for (key,RmRecordValue) in self.rmList.items(): session.merge(RmRecordValue) session.commit() log.info("push %d RmRecord into table" % (len(self.rmList))) endFlushTime = time.time() log.info("using %d ms to push to the db" % ((endFlushTime - startFlushTime)*1000))