def check(self): """Check our monitoring target""" result = False result_log = "{url}: {result}" try: start = time() contents = self._fetch_contents() end = time() elapsed = "{0:.3f}".format(end - start) if self._check_contents(contents, self.settings): log.info(result_log.format( url=self.url, result="GOOD {0} ms response time".format(elapsed) )) result = elapsed else: log.warning(result_log.format( url=self.url, result="BAD, Content mismatch, {0} ms response time".format( elapsed ) )) except IOError as e: log.error(result_log.format( url=self.url, result="BAD, Connection error: {0}".format(e) )) return result
def on_mqtt_connect(self, mosq, obj, flags, result_code): """ Handle connections (or failures) to the broker. This is called after the client has received a CONNACK message from the broker in response to calling connect(). The parameter rc is an integer giving the return code: 0: Success 1: Refused . unacceptable protocol version 2: Refused . identifier rejected 3: Refused . server unavailable 4: Refused . bad user name or password (MQTT v3.1 broker only) 5: Refused . not authorised (MQTT v3.1 broker only) """ if result_code == 0: log.info("Connected to {}:{}".format(self.host, self.port)) # See also the will_set function in connect() below self.mqttc.publish(self.lwt, "1", qos=0, retain=True) elif result_code == 1: log.error("Connection refused - unacceptable protocol version") elif result_code == 2: log.error("Connection refused - identifier rejected") elif result_code == 3: log.error("Connection refused - server unavailable") elif result_code == 4: log.error("Connection refused - bad user name or password") elif result_code == 5: log.error("Connection refused - not authorised") else: log.warning( "Connection failed - result code {}".format(result_code))
def check(self): """Check our monitoring target""" result = False result_log = "{url}: {result}" try: start = time() contents = self._fetch_contents() end = time() elapsed = "{0:.3f}".format(end - start) if self._check_contents(contents, self.settings): log.info( result_log.format( url=self.url, result="GOOD {0} ms response time".format(elapsed))) result = elapsed else: log.warning( result_log.format( url=self.url, result="BAD, Content mismatch, {0} ms response time". format(elapsed))) except IOError as e: log.error( result_log.format( url=self.url, result="BAD, Connection error: {0}".format(e))) return result
def __new__(cls, **kargs): task_id = TaskManager().find_task(cls.__name__, kargs) if task_id: log.warning("make-torrent task exists.task_id: %s" % task_id) return TaskManager().tasks[task_id] else: log.warning("create a new make-torrent task!") return super(MakeTorrentTask, cls).__new__(cls)
def getRegexTypeId(regType): regexType = list(db.regex_type.find({'type':regType})) regexTypeId = None if len(regexType) > 0: regexTypeId = regexType[0]['_id'] else: log.warning('regex_type无%s信息,需先手动添加'%regType) return regexTypeId
def getWebsiteId(domain): website = list(db.website.find({'domain':domain})) websiteId = None if len(website) > 0: websiteId = website[0]['_id'] else: log.warning('website表中无%s信息,需先手动添加'%domain) return websiteId
def consume_item(self, event): log.info('uploading to Dropbox: %s -> %s' % (event.source_absolute, event.target_absolute)) self.send_progress(event.source_absolute, 0.0) try: self._upload(event, event.target_absolute) except IOError as e: # file was deleted immediatily log.warning('upload failed' + str(e)) self.send_progress(event.source_absolute, 1.0)
def __key_suit(self): for i in self.__operate_list_dict: for j in i: self.__tmp_date.add(j.get(self.__key)) try: self.__tmp_date.remove(None) except KeyError: pass finally: if len(self.__tmp_date) == 0: log.warning("分类参数不在数据中")
def __new__(cls, torrent_file, save_dir, seed_time=0): from vtrans import Vtrans cls.vtrans = Vtrans() torrent_id = cls.vtrans.find_torrent_by_file(torrent_file) if torrent_id: log.warning("torrent file exists. return torrent_id: %s, id: %s" % (torrent_id, id(cls.vtrans.torrent_dict[torrent_id]))) return cls.vtrans.torrent_dict[torrent_id] else: log.warning("create a new torrent object!") return super(Torrent, cls).__new__(cls)
def consume_item(self, event): log.info('uploading to GoogleDrive: %s -> %s' % (event.source_absolute, event.target_absolute)) self.send_progress(event.source_absolute, 0.0) # TODO handle dir/file removal try: if event.isdir: self._path_to_ids(event.target_absolute, create_missing=True) else: self._put_file(event.source_absolute, event.target_absolute) except IOError as e: # file was deleted immediatily? log.warning('upload failed' + str(e)) finally: self.send_progress(event.source_absolute, 1.0)
def __get_swagger_path_data(self, path: str): try: # 检查是否是definitions中更多元素,且为避免无限循环,检查返回的数据中是否有当前路径 if path.startswith("#/definitions/"): tmp_path = path.split("/") result = self.__tmp_swagger_response.get(tmp_path[1]).get( tmp_path[2]).get("properties") result = json.dumps(result) result = json.loads(result.replace(path, "itself" + path)) return result else: return path except AttributeError: # 后端不规范引起错误 log.warning("服务器未告知%s数据结构,原样返回" % path) return path
def _loop_torrent_dict(self): #print "torrent service thread running......" from vtrans import Vtrans vtrans = Vtrans() vtrans.read_session_alerts() torrent_dict = self.torrent_dict #print "torrent_dict: ", torrent_dict, id(torrent_dict) for torrent in torrent_dict.values(): #print "serving torrent:", torrent.torrent_file if not torrent.torrent_handler: continue s = torrent.torrent_handler.status() state_str = ('queued', 'checking', 'downloading metadata',\ 'downloading', 'finished', 'seeding', 'allocating',\ 'checking fastresume') print '\ractive_time: %d, %.2f%% complete (down: %.1f kb/s up: %.1f kB/s peers: %d, seeds: %d) %s' % \ (s.active_time, s.progress * 100, s.download_rate / 1000, s.upload_rate / 1000, \ s.num_peers, s.num_seeds, state_str[s.state]), sys.stdout.flush() #print "s.active_time:", s.active_time, type(s.active_time) #print "torrent.is_seed:", torrent.is_seed #print "handler.is_seed():", torrent.torrent_handler.is_seed() if not torrent.is_seed and torrent.torrent_handler.is_seed(): # log.debug("%s will super seed!" % torrent.torrent_file) torrent.torrent_handler.super_seeding() torrent.is_seed = True if s.state == 3 and s.num_seeds == 0 and s.active_time > 0 and s.active_time % 10 == 0: #log.warning("\n %s find no seeds for 10s, let's re-announce!!!!!!" % torrent.torrent_file) #torrent.torrent_handler.force_reannounce() #torrent.torrent_handler.scrape_tracker() #torrent.torrent_handler.force_recheck() pass if torrent.seed_time == 0: #log.debug("%s seed for ever..." % torrent.torrent_file) continue #if torrent.torrent_handler.is_seed() and s.active_time > torrent.seed_time: if torrent.torrent_handler.is_seed() and s.seeding_time > torrent.seed_time: log.warning("%s seeding timeout! will remove it!!!" % torrent.torrent_file) Vtrans().remove_torrent(torrent)
def __init__(self, torrent_file, save_dir, seed_time=0): if hasattr(self, "torrent_id"): return self.torrent_id = gen_task_id() log.warning("Torrent Object init, torrent_id: %s, id: %s" % (self.torrent_id, id(self))) self.torrent_file = torrent_file self.save_dir = save_dir self.seed_time = seed_time self.lt_param = dict() self.lt_param['save_path'] = save_dir print "torrent_file:", torrent_file self.torrent_info = lt.torrent_info(torrent_file) self.lt_param['ti'] = self.torrent_info #if self.torrent_info.num_files() > 1: comment = self.torrent_info.comment() #if comment and json.loads(comment).get('type', 'file') == 'vhd': # log.debug("this is a vhd torrent-file.") # self.remap_files() self.remap_files() self.torrent_handler = None self.is_seed = False
def addDocumentary(websiteId, docName, abstract, url, episodeNum='', playNum='', totalLength='', releaseTime='', institutions='', cyclopediaMsg=''): ''' @summary: 添加纪录片 --------- @param websiteId: 网站id @param docName: 片名 @param abstract: 简介 @param url: 纪录片url @param episodeNum: 集数 @param playNum: 播放次数 @param totalLength: 时长 @param releaseTime: 发布时间 @param institutions: 播出机构 @param cyclopediaMsg: 百科信息 --------- @result: ''' aocumentaryDict = { 'website_id': websiteId, 'doc_name': docName, 'abstract': abstract, 'url': url, 'episode_num': episodeNum, 'play_num': playNum, 'total_length': totalLength, 'release_time': releaseTime, 'institutions': institutions, 'cyclopedia_msg': cyclopediaMsg } # 查找数据库,根据url和websiteid看是否有相同的纪录片,若有,则比较纪录片信息,将信息更全的纪录片更新到数据库中 for doc in db.documentary.find({ 'website_id': websiteId, 'url': url }, {'_id': 0}): isDiffent = False warning = '\n' + '-' * 50 + '\n' for key, value in doc.items(): if len(str(doc[key])) < len(str(aocumentaryDict[key])): isDiffent = True warning = warning + '更新 old %s: %s\n new %s: %s\n' % ( key, doc[key], key, aocumentaryDict[key]) doc[key] = aocumentaryDict[key] else: warning = warning + '留守 old %s: %s\n new %s: %s\n' % ( key, doc[key], key, aocumentaryDict[key]) if isDiffent: warning = '已存在:\n' + warning + '-' * 50 log.warning(warning) db.documentary.update({ 'website_id': websiteId, 'url': url }, {'$set': doc}) else: log.warning('已存在url: ' + url) return db.documentary.save(aocumentaryDict)