def get(mode=None, username=None, ip=None): data_list = [] if mode == 'ip': try: for info in SshKeyInfo.select().where(SshKeyInfo.ip == ip): data = info.__dict__['_data'] data_list.append({ 'username': data['username'], 'system_user': data['system_user'] }) except Exception: log.exception('exception') return False else: return data_list elif mode == 'user': try: for info in SshKeyInfo.select().where( SshKeyInfo.username == username): data = info.__dict__['_data'] data_list.append({ 'ip': data['ip'], 'system_user': data['system_user'] }) except Exception: log.exception('exception') return False else: return data_list else: return False
def save(data): try: with codecs.open(Config.datapath, u'wb') as picklef: pickle.dump(data, picklef) return True except Exception, e: log.exception(e)
def collectApp(self): #获取所有的过去时段完成的app的列表 apps = self.getAppList() if not apps or not apps["apps"]: log.info("no appid match") return startCollectTime = time.time() #轮询app列表,获取每个app的详细信息 for app in apps["apps"]["app"]: startTime = time.time() appid = app["id"] if app['state'] == 'FINISHED': try: jobid = appidToJobid(appid) jobHistory = self.getJobHistory(jobid) if jobHistory: jobCounter = self.getJobCounter(jobid) jobTasks = self.getJobAllTask(jobid) self.updateWithAppid(app, jobHistory, jobCounter) else: log.info( "find some app run success but no history file:" + appid) except: log.exception("get error while doing app " + appid) endTime = time.time() else: self.updateWithNotSuccAppid(app) log.info("getting appid: %s using %d ms" % (appid, (endTime - startTime) * 1000)) endCollectTime = time.time() log.info("using %d ms to collect the data" % ((endCollectTime - startCollectTime) * 1000)) startFlushTime = time.time() #提交数据 session = database.getSession() for (appid, appRecordValue) in self.appList.items(): session.merge(appRecordValue) session.commit() log.info("push %d appRecord into table" % (len(self.appList))) for (key, NmRecordValue) in self.nmList.items(): session.merge(NmRecordValue) session.commit() log.info("push %d NmRecord into table" % (len(self.nmList))) for (key, RmRecordValue) in self.rmList.items(): session.merge(RmRecordValue) session.commit() log.info("push %d RmRecord into table" % (len(self.rmList))) endFlushTime = time.time() log.info("using %d ms to push to the db" % ((endFlushTime - startFlushTime) * 1000))
def delete(machine_name): del_data = (Machine.delete().where(Machine.machine_name == machine_name)) try: del_data.execute() except Exception: log.exception('exception') return False
def do_GET(self): # Handle well-behaved bots _path = self.path.strip() log.info(u"Request path: %s" % _path) if _path == u"/robots.txt": self.send(u"User-agent: *\nDisallow: /\n") elif _path != u"/": self.send_error(403, u"Bad request.\n") else: # path is / # # examine some headers # Client candidates: """ cmus """ # GET / HTTP/1.0 # Host: 0x7be.org # User-Agent: cmus/v2.3.2 # Icy-MetaData: 1 """ mplayer """ # GET / HTTP/1.0 # Host: 0x7be.org:18944 # User-Agent: MPlayer/SVN-r31347-4.5.0 # Icy-MetaData: 1 # Connection: close # GET / HTTP/1.0 # Accept: */* # User-Agent: NSPlayer/4.1.0.3856 # Host: 0x7be.org:18944 # Pragma: xClientGUID={c77e7400-738a-11d2-9add-0020af0a3278} # Pragma: no-cache,rate=1.000000,stream-time=0,stream-offset=0:0, # request-context=1,max-duration=0 # Connection: Close """ squeezebox """ # Connection: close # Cache-Control: no-cache # Accept: */* # Host: localhost:18944 # User-Agent: iTunes/4.7.1 (Linux; N; Linux; i686-linux; EN; # utf8) SqueezeCenter, Squeezebox Server/7.4.1/28947 # Icy-Metadata: 1 H, icy_client = self.headers, False try: icy_client = (int(H[u'icy-metadata']) == 1) except KeyError, e: log.error(u"non-icy client: %s" % e) log.error(self.address_string()) if not icy_client: self.send_response( 400, u"Bad client.\n Try http://cmus.sourceforge.net/\n") return False user_agent = None try: user_agent = H[u'user-agent'] except KeyError, e: log.exception(u"Couldn't get user agent.")
def get_all_user_task_statistic_by_time(begin_time=0, end_time=0): if begin_time == 0 or end_time == 0 or begin_time > end_time: return False user_list = [] for a in User.select(User.username): user_list.append(a.__dict__['_data']['username']) user_task_statistic = {} query = (Task.select( Task.creator, fn.COUNT(Task.task_id).alias('task_sum'), fn.FROM_UNIXTIME(Task.create_time, '%Y%m%d').alias( 'create_date')).where((Task.create_time >= begin_time) & (Task.create_time <= end_time)).group_by( Task.creator, SQL('create_date'))) try: for info in query.execute(): creator = info.__dict__['_data']['creator'] task_sum = info.__dict__['task_sum'] create_date = info.__dict__['create_date'] if create_date not in user_task_statistic: user_task_statistic[create_date] = {creator: task_sum} else: user_task_statistic[create_date][creator] = task_sum except Exception: log.exception('exception') return False else: return user_task_statistic, user_list
def start(self): pid = self.read_pid() if pid: log.error(u"Server already running, pid %s." % pid) sys.exit(-1) else: self.write_pid(os.getpid()) server = None try: time.sleep(0.001) server = Server((Config.hostname, self.port)) log.info(u"Bloops and bleeps at http://%s:%s" % server.server_address) server.serve_forever() # will never reach this line except socket.error, se: if se.errno == errno.EACCES: log.warn(u"Bad port: %s" % self.port) sys.exit(se.errno) elif se.errno == errno.ECONNREFUSED: log.warn(u"Connection refused: %s" % self.port) sys.exit(se.errno) else: log.exception(se)
def save(data): try: with codecs.open( Config.datapath, u'wb') as picklef: pickle.dump(data, picklef) return True except Exception, e: log.exception(e)
def run(self): # well this is just fugly. call it "experimental" while Config.running: try: scrobble_item = self.queue.get(0) try: song = scrobble_item.song type = scrobble_item.type error = scrobble_item.error etime = scrobble_item.etime try: (tracknumber, artist, album, track) = [escape(item) for item in song.tags] except ValueError: log.info("skipping scrobble for {} (bad tags)".format(song.path)) continue if type == NOW_PLAYING: log.debug(u"scrobbling now playing %s %s %s" % (artist, track, album)) self.login() scrobbler.now_playing( artist, track) # now_playing auto flushes, apparently. don't call # flush here or it will throw an exception, which is not # what we want. elif type == PLAYED: # See: http://exhuma.wicked.lu/projects/python/scrobbler/api/public/scrobbler-module.html#login # if mimetype is wrong, length == 0 if song.length < 30: log.warn(u"song length %s" % song.length) # wait 60 seconds before re-trying # submission if error: if (time.time() - etime) < 60: break log.debug(u"scrobbling played %s %s %s %s" % (artist, track, album, song.length)) self.login() scrobbler.submit( artist, track, int(time.mktime(datetime.datetime.now().timetuple())), source=escape(u'P'), length=song.length, album=album) scrobbler.flush() except Exception as e: log.exception(u"scrobble error: %s" % e) # put it back scrobble_item.error = True scrobble_item.etime = time.time() self.queue.put(scrobble_item) except Queue.Empty: pass # AS API enforced limit -- do not change. time.sleep(10)
def judge_specify_rule(self, host, rule, data_set ): try: #exp_result = self.exp_manager.judge(host, rule, data_set) return self.exp_manager.judge(host, rule, data_set) #self.cb_manager.callback(host, rule, exp_result) except: log.exception("get exception with h:%s r:%s" % (host,rule) ) return []
def mail_center(): while True: try: _need_send_period_mail() _need_send_alarm_mail() time.sleep(config.mail_interval) except: log.exception("mail center loop die")
def collect(self, recordTime): try: log.info("begin to collect recordTime:%d" % recordTime) self.init_all(recordTime) self.collectMetrics() self.collectApp() except: log.exception("collect catch exception recordTime:%d" % recordTime)
def judge_specify_rule(self, host, rule, data_set): try: #exp_result = self.exp_manager.judge(host, rule, data_set) return self.exp_manager.judge(host, rule, data_set) #self.cb_manager.callback(host, rule, exp_result) except: log.exception("get exception with h:%s r:%s" % (host, rule)) return []
def row_count(): try: count = Machine.select().count() except Exception: log.exception('exception') return 0 else: return count
def row_count(): try: count = Permissions.select().count() except Exception: log.exception('exception') return 0 else: return count
def get(access_token): try: info = Session.select().where( Session.access_token == access_token).get() except Exception: log.exception('exception') return False else: return info.__dict__['_data']
def delete(permisson): del_data = (Permissions.delete().where( Permissions.permission == permisson)) try: del_data.execute() except Exception: log.exception('exception') return False else: return True
def get_machine_ip_list(): data_list = [] try: for info in Machine.select().order_by(Machine.id): data_list.append(info.__dict__['_data']['inside_ip']) except Exception: log.exception('exception') return False else: return data_list
def get_user_list(): data_list = [] try: for info in User.select().order_by(User.id): data_list.append(info.__dict__['_data']['username']) except Exception: log.exception('exception') return False else: return data_list
def get_sub_id_list(parent_id): data_list = [] try: for info in User.select().where(User.parent_id == parent_id): data_list.append(info.__dict__['_data']['user_id']) except Exception: log.exception('exception') return False else: return data_list
def collectApp(self): #获取所有的过去时段完成的app的列表 apps = self.getAppList() if not apps or not apps["apps"]: log.info("no appid match") return startCollectTime = time.time() #轮询app列表,获取每个app的详细信息 for app in apps["apps"]["app"]: startTime = time.time() appid = app["id"] if app['state'] == 'FINISHED': try: jobid = appidToJobid(appid) jobHistory = self.getJobHistory(jobid) if jobHistory: jobCounter = self.getJobCounter(jobid) jobTasks = self.getJobAllTask(jobid) self.updateWithAppid(app,jobHistory,jobCounter) else: log.info("find some app run success but no history file:"+appid) except: log.exception("get error while doing app "+appid) endTime = time.time() else: self.updateWithNotSuccAppid(app) log.info("getting appid: %s using %d ms" % (appid, (endTime - startTime)*1000)) endCollectTime = time.time() log.info("using %d ms to collect the data" % ((endCollectTime - startCollectTime)*1000) ) startFlushTime = time.time() #提交数据 session = database.getSession() for (appid,appRecordValue) in self.appList.items(): session.merge(appRecordValue) session.commit() log.info("push %d appRecord into table" % (len(self.appList))) for (key,NmRecordValue) in self.nmList.items(): session.merge(NmRecordValue) session.commit() log.info("push %d NmRecord into table" % (len(self.nmList))) for (key,RmRecordValue) in self.rmList.items(): session.merge(RmRecordValue) session.commit() log.info("push %d RmRecord into table" % (len(self.rmList))) endFlushTime = time.time() log.info("using %d ms to push to the db" % ((endFlushTime - startFlushTime)*1000))
def add(permissions_dict): permissions = Permissions() for key in permissions_dict: setattr(permissions, key, permissions_dict[key]) try: permissions.save() except Exception: log.exception('exception') return False else: return True
def add(machine_info_dict): machine = Machine() for key in machine_info_dict: setattr(machine, key, machine_info_dict[key]) try: machine.save() except Exception: log.exception('exception') return False else: return True
def add(user_dict): user = User() for key in user_dict: setattr(user, key, user_dict[key]) try: user.save() except Exception: log.exception('exception') return False else: return True
def delete(username): del_data = (User.delete().where(User.username == username)) try: del_data.execute() except Exception: log.exception('exception') return False else: if get(username): return False else: return True
def login(self): config = ConfigParser.ConfigParser() cwd = os.path.realpath(os.path.dirname(__file__) + u'/..') config.read(os.path.join(cwd, u'scrobbler.cfg')) username = config.get(u'scrobbler', u'username') password = hashlib.md5(config.get(u'scrobbler', u'password')).hexdigest() try: scrobbler.login(user=username, password=password) except scrobbler.ProtocolError: time.sleep(49) except Exception as e: log.exception(u"Couldn't login: %s" % e)
def update(user_dict): user = User.get(user_id=user_dict['user_id']) for key in user_dict: if key != 'user_id': setattr(user, key, user_dict[key]) try: user.save() except Exception: log.exception('exception') return False else: return True
def delete(username, ip, system_user): del_data = ( SshKeyInfo.delete().where((SshKeyInfo.username == username) & (SshKeyInfo.ip == ip) & (SshKeyInfo.system_user == system_user))) try: del_data.execute() except Exception: log.exception('exception') return False else: return True
def delete(access_token): del_data = (Session.delete().where(Session.access_token == access_token)) try: del_data.execute() except Exception: log.exception('exception') return False else: if get(access_token): return False else: return True
def update(machine_dict): machine = Machine.get(machine_name=machine_dict['machine_name']) for key in machine_dict: if key != 'machine_name': setattr(machine, key, machine_dict[key]) try: machine.save() except Exception: log.exception('exception') return False else: return True
def update(permissions_dict): permission = Permissions.get(permisson=permissions_dict['permission']) for key in update_dict: if key != 'permission': setattr(permission, key, permissions_dict[key]) try: permission.save() except Exception: log.exception('exception') return False else: return True
def judge_host_rule(self): """ 为每个host的规则进行判断 """ try: host_list = self.get_host_list() # 添加集群规则 host_list.append("cluster") for host in host_list: rule_list = self.rule_manager.get_rule_by_host(host) data_set = self.data_manager.get_data_by_host(host) for rule in rule_list: self.judge_specify_rule(host, rule, data_set) except: log.exception("get exeception judge_host_rule")
def run_script(tgt, script_path, script_arg): """ tgt : a list, ie. ['host1', 'host2', ....],can not be ['*'] script_path: string, ie. salt://scripts/test.sh or /srv/run/scripts/test.sh script_args: a list, ie. ['arg1', 'arg2' ,'arg3',....] """ client = salt.client.LocalClient() try: result = client.cmd(tgt, 'cmd.script', [script_path, '"%s"' % script_arg], expr_form='list') except Exception: log.exception('exception') else: return result
def process_exception(self, request, exception): if isinstance(exception, BusinessExceptionError): resp_d = { "retcode": exception.code, "message": exception.msg, "data": exception.data, } else: log.exception('process request failed', exc_info=exception) resp_d = { "retcode": Resp.RESP_SERVER_ERROR.code, "message": Resp.RESP_SERVER_ERROR.msg, "data": {} } return HttpResponse(json.dumps(resp_d), content_type='application/json; charset=utf-8')
def row_count(id_type): if id_type: try: count = User.select().where(User.type == id_type).count() except Exception: log.exception('exception') return 0 else: return count else: try: count = User.select().count() except Exception: log.exception('exception') return 0 else: return count
def start(self): pid = self.read_pid() if pid: log.error("Server already running, pid %s." % pid) sys.exit( -1 ) else: self.write_pid( os.getpid() ) try: time.sleep(0.001) Server((Config.hostname, self.port)) # will never reach this line except socket.error, se: if se.errno == errno.EACCES: log.warn("Bad port: %s" % self.port) sys.exit( se.errno ) else: log.exception(se)
def judge_host_rule(self): ''' 为每个host的规则进行判断 ''' try: alarm_list = [] host_list = self.get_host_list() #添加集群规则 host_list.append("cluster") for host in host_list: rule_list = self.rule_manager.get_rule_by_host(host) data_set = self.data_manager.get_data_by_host(host) for rule in rule_list: temp = self.judge_specify_rule(host, rule, data_set) alarm_list.extend(temp) self.cb_manager.deal_alarm_list(alarm_list) except: log.exception("get exeception judge_host_rule")
def start(self): pid = self.read_pid() if pid: log.error(u"Server already running, pid %s." % pid) sys.exit( -1 ) else: self.write_pid( os.getpid() ) server = None try: time.sleep(0.001) server = Server((Config.hostname, self.port)) log.info(u"Bloops and bleeps at http://%s:%s" % server.server_address) server.serve_forever() # will never reach this line except socket.error, se: if se.errno == errno.EACCES: log.warn(u"Bad port: %s" % self.port) sys.exit( se.errno ) elif se.errno == errno.ECONNREFUSED: log.warn(u"Connection refused: %s" % self.port) sys.exit( se.errno ) else: log.exception(se)
def truncate(): try: codecs.open(Config.datapath, u'wb') return True except Exception, e: log.exception(e)
def stream(self, icy_client=False): song = None while self.playlist.data[u'running']: if Config.scrobble and song: # just played one . . . scrobble it self.scrobble_queue.put(ScrobbleItem(PLAYED, song)) #log.debug("enqueued played") #log.debug(song) # new song song = self.playlist.get_song() song_start_time = time.time() self.playlist.data[u"progress"] = 0 if not song: log.warn(u"no playlist, won't stream") self.playlist.data[u'status'] = u'stopped' self.byte_count = 0 self.empty_scrobble_queue() return if Config.scrobble: #log.debug("enqueued now playing") self.scrobble_queue.put(ScrobbleItem(NOW_PLAYING, song)) log.info(u'> %s' % unicode(song)) transcode = None try: # this loop gets some of its ideas about the shoutcast protocol from Amarok buffer = 0 buffer_size = Config.buffer_size metadata_interval = Config.metaint try: transcode.stdout.close() except: pass #cif song.mimetype[0:5] in ["audio", "video"]: transcode = subprocess.Popen([u"/usr/bin/ffmpeg", u"-i", song.path, u"-vn", u"-loglevel", u"warning", u"-qscale:a", u"0", u"-f", u"mp3", u"-"], stdout=subprocess.PIPE, shell=False) self.dirty_meta = True skip = False while self.playlist.data[u'running'] and transcode: bytes_until_meta = (metadata_interval - self.byte_count) if bytes_until_meta == 0: if icy_client: metadata = self.get_meta(song) self.request.send(metadata.encode(u'ascii', u'ignore')) self.byte_count = 0 else: if bytes_until_meta < buffer_size: chunk_bytes = bytes_until_meta else: chunk_bytes = buffer_size buffer = transcode.stdout.read(chunk_bytes) self.request.send(buffer) buflen = len(buffer) self.byte_count += buflen self.playlist.data[u"sum_bytes"] += buflen elapsed = time.time() - song_start_time self.playlist.data[u'elapsed'] = elapsed # set percentage elapsed try: self.playlist.data[u"progress"] = float(elapsed * 100) / song.length except ZeroDivisionError: self.playlist.data[u"progress"] = 0 if len(buffer) == 0: break if self.playlist.data[u'skip']: log.info(u">>") skip = True song = None # don't scrobble self.playlist.data[u"elapsed"] = 0 self.playlist.data[u"progress"] = 0 break if self.playlist.data[u'status'] == u'stopped': log.info(u".") skip = True song = None # don't scrobble self.playlist.data[u"elapsed"] = 0 break if not skip: # increment the counter if we're not ffwding self.playlist.next() else: self.playlist.data[u'skip'] = False self.dirty_meta = True except error, e: if isinstance(e.args, tuple): print "errno is %d" % e[0] if e[0] == errno.EPIPE: # remote peer disconnected print "Detected remote disconnect" elif e.errno == errno.ECONNRESET: self.empty_scrobble_queue() log.info(u"Client disconnected") else: log.info(u"Unknown socket error") self.empty_scrobble_queue() log.exception(errno.errorcode[e.errno]) else: print "socket error ", e self.request.close() self.playlist.data[u'status'] = 'stopped' break # while except IOError, e: log.info("IO ERROR")
log.info( "i did at %s %d" % (util.get_local_time(), record_time) ) coll = Collector() requests = threadpool.makeRequests(coll.collect, [(record_time)], self.print_result) for req in requests: log.info("get request") self.pool.putRequest(req) os.chdir(config.uhphome) APP = "collect" if __name__ == "__main__": log.info("start...") try: pidfile = "pids/%s/%s.pid" % (APP, APP) pidfile = lockfile.pidlockfile.PIDLockFile(pidfile) files_preserve=[logging.root.handlers[1].stream.fileno()] dmn = daemon.DaemonContext(None, os.getcwd(), pidfile=pidfile, files_preserve=files_preserve) dmn.open() try: #start collect loop main = CollectorMain() main.run() finally: dmn.close() except Exception as e: log.exception(e) log.info("end!")
def do_GET(self): log.debug("post") # Handle well-behaved bots _path = self.path.strip() log.info("Request path: %s" % _path) if _path == "/robots.txt": self.send("User-agent: *\nDisallow: /\n") elif _path != "/": self.send_error(403, "Bad request.\n") else: # path is / # # examine some headers # Client candidates: """ cmus """ # GET / HTTP/1.0 # Host: 0x7be.org # User-Agent: cmus/v2.3.2 # Icy-MetaData: 1 """ mplayer """ # GET / HTTP/1.0 # Host: 0x7be.org:18944 # User-Agent: MPlayer/SVN-r31347-4.5.0 # Icy-MetaData: 1 # Connection: close # GET / HTTP/1.0 # Accept: */* # User-Agent: NSPlayer/4.1.0.3856 # Host: 0x7be.org:18944 # Pragma: xClientGUID={c77e7400-738a-11d2-9add-0020af0a3278} # Pragma: no-cache,rate=1.000000,stream-time=0,stream-offset=0:0, # request-context=1,max-duration=0 # Connection: Close """ squeezebox """ # Connection: close # Cache-Control: no-cache # Accept: */* # Host: localhost:18944 # User-Agent: iTunes/4.7.1 (Linux; N; Linux; i686-linux; EN; # utf8) SqueezeCenter, Squeezebox Server/7.4.1/28947 # Icy-Metadata: 1 H, icy_client = self.headers, False try: icy_client = (int(H['icy-metadata']) == 1) except KeyError, e: log.error("non-icy client: %s" % e) log.error(self.address_string()) if not icy_client: self.send_response(400, "Bad client.\n Try http://cmus.sourceforge.net/\n") return False user_agent = None try: user_agent = H['user-agent'] except KeyError, e: log.exception("Couldn't get user agent.") if user_agent: log.info("User-Agent: %s" % user_agent) self.do_HEAD( icy_client ) Streamer( self.request, self.server.port ).stream( icy_client )
def deal_alarm_list(self, alarm_list): new_key_word, old_key_word = self.diff_key_word(alarm_list) mail_center.push_key_word_map(self.key_word_map) session = database.getSession() # 更新数据库的alarm_now表 for alarm_now in session.query(AlarmNow): session.delete(alarm_now) for (key_word, info) in self.key_word_map.items(): end = key_word.find("(") host = key_word[0:end] session.add(AlarmNow(key_word, host, info["msg"], "ERROR", info["count"], int(time.time()))) session.commit() # 根据连续告警次数尝试进行修复动作 try: fix_list = [] for auto_fix in session.query(AlarmAutofix): fix_list.append(auto_fix.format()) for (key_word, count) in self.key_word_map.items(): for auto_fix in fix_list: match = auto_fix["pattern"].match(key_word) if match and (count == auto_fix["count"] or count == auto_fix["count"] * 2): end = key_word.find("(") host = key_word[0:end] log.info(" build the auto fix tasks: %s %s %s " % (host, auto_fix["role"], auto_fix["task"])) database.build_task( session, "auto_fix", auto_fix["service"], host, auto_fix["role"], auto_fix["task"] ) except: log.exception("autofix catch exception") try: # 发出邮件 记录动作 ignore_key_word = session.query(AlarmAssist).filter(AlarmAssist.name == "ignore_key_word").first() ignore_list = [] if ignore_key_word != None: # ignore_list = ignore_key_word.value.split(",") for str in ignore_key_word.value.split(","): if len(str) > 1: ignore_list.append(re.compile(str)) # log.info(ignore_list) for alarm_state in new_key_word: key_word = alarm_state["key_word"] session.add(AlarmList(alarm_state["key_word"], "", alarm_state["msg"], "ERROR", int(time.time()))) if self.is_match(ignore_list, key_word): log.info("ignore %s" % key_word) else: self._callback(alarm_state) for alarm_state in old_key_word: key_word = alarm_state["key_word"] session.add(AlarmList(alarm_state["key_word"], "", alarm_state["msg"], "INFO", int(time.time()))) if self.is_match(ignore_list, key_word): log.info("ignore %s" % key_word) else: self._callback(alarm_state) session.commit() except: log.exception("deal callback catch exception") session.close()