Beispiel #1
0
 def collect_diskinfo(self):
     parts = psutil.disk_partitions()
     setval = []
     devices = {}
     for part in parts:
         if not part.device in devices:
             devices[part.device] = 1
             diskval = {}
             diskval['device'] = part.device
             diskval['mountpoint'] = part.mountpoint
             try:
                 usage = psutil.disk_usage(part.mountpoint)
                 diskval['total'] = usage.total
                 diskval['used'] = usage.used
                 diskval['free'] = usage.free
                 diskval['percent'] = usage.percent
                 if(part.mountpoint.startswith('/opt/docklet/local/volume')):
                     names = re.split('/',part.mountpoint)
                     container = names[len(names)-1]
                     self.vetcdser.setkey('/%s/disk_use'%(container), diskval)
                 setval.append(diskval)
             except Exception as err:
                 logger.warning(traceback.format_exc())
                 logger.warning(err)
     self.etcdser.setkey('/diskinfo', setval)
     #print(output)
     #print(diskparts)
     return
Beispiel #2
0
 def run(self):
     global monitor_hosts
     global monitor_vnodes
     while not self.thread_stop:
         for worker in monitor_hosts.keys():
             monitor_hosts[worker]['running'] = False
         workers = self.nodemgr.get_nodeips()
         for worker in workers:
             try:
                 ip = worker
                 workerrpc = self.nodemgr.ip_to_rpc(worker)
                 # fetch data
                 info = list(eval(workerrpc.workerFetchInfo(self.master_ip)))
                 #logger.info(info[0])
                 # store data in monitor_hosts and monitor_vnodes
                 monitor_hosts[ip] = info[0]
                 for container in info[1].keys():
                     owner = get_owner(container)
                     if not owner in monitor_vnodes.keys():
                         monitor_vnodes[owner] = {}
                     monitor_vnodes[owner][container] = info[1][container]
                 for user in info[2].keys():
                     if not user in monitor_vnodes.keys():
                         continue
                     else:
                         monitor_vnodes[user]['net_stats'] = info[2][user]
                         self.net_billings(user, info[2][user]['bytes_total'])
             except Exception as err:
                 logger.warning(traceback.format_exc())
                 logger.warning(err)
         time.sleep(2)
         #logger.info(History.query.all())
         #logger.info(VNode.query.all())
     return
Beispiel #3
0
    def getSFTPResults(self, build):
        try:
            logger.debug("Catalog getSFTPResult: build=%s" % (build))
            tmpPath = tempfile.mkdtemp(prefix="autoport_")
            putdir = tmpPath + "/" + build
            if not os.path.exists(putdir):
                os.makedirs(putdir)
            logger.debug("Catalog getSFTPResult: putdir=%s remoteDir=%s" % (putdir, self.__copyPath + build))
            self.__archiveFtpClient.chdir(self.__copyPath + build)

            # Copy as many files as possible.  Reports use different files
            files = self.__archiveFtpClient.listdir()
            for file in files:
                try:
                    logger.debug("Catalog getSFTPResult: Downloading, sourceFile=%s Destination=%s" % (file, putdir + "/" + file))
                    self.__archiveFtpClient.get(file, putdir + "/" + file)
                except IOError:
                    pass
            self.__tmpdirs.append(tmpPath)
            return putdir
        except AttributeError:
            msg = "Connection error to archive storage.  Use settings menu to configure!"
            logger.warning(msg)
            assert(False), msg
        except IOError as e:
            msg = "getSFTPResults: IOError " + str(e)
            logger.warning(msg)
            return None
        except Exception as e:
            msg = "getSFTPResults: Exception " + str(e)
            logger.debug(msg)
            return None
Beispiel #4
0
 def __load_cur_pool_index(self):
     [sts, res] = self.__etcd.getkey('netids/cur_pool_index')
     if sts:
         self.cur_pool_index = int(res)
     else:
         self.cur_pool_index = -1
         logger.warning('load_info in NetIdMgr: etcd get cur_pool_index failed, use default')
    def buildHtml(self, data):
        logger.info("+" * 10)
        logger.info(data)
        logger.info("-" * 10)

        emails = []
        msg = ""
        subject = ""

        _type = data['type']

        if _type == "createDay":
            self.createDayAction(data)
        elif _type == "createWeek":
            self.createWeekAction(data)
        elif _type == "notifyDay":
            self.notifyDayAction(data)
        elif _type == "notifyWeek":
            self.notifyWeekAction(data)
        elif _type == "summaryDay":
            self.summaryDayAction(data)
        elif _type == "summaryWeek":
            self.summaryWeekAction(data)
        elif _type == "createTask":
            self.createTaskAction(data['data'])
        elif _type == "pubTask":
            self.pubTaskAction(data['data'])
        elif _type == "taskMemberSummary":
            self.taskMemberSummaryAction(data['data'])
        elif _type == "taskTeamSummary":
            self.taskTeamSummaryAction(data['data'])
        else:
            logger.warning("template error")
Beispiel #6
0
    def __get_alpha(self, alpha):
        # Transparency
        if alpha == "active_opacity":
            # For backwards compability
            alpha = "color5"

        for i in range(1, 9):
            if alpha in ("color%s"%i, "opacity%s"%i):
                if self.globals.colors.has_key("color%s_alpha"%i):
                    a = float(self.globals.colors["color%s_alpha"%i])/255
                else:
                    logger.warning("Theme error: The theme has no" + \
                          " opacity option for color%s." % i)
                    a = 1.0
                break
        else:
            try:
                a = float(alpha)/100
                if a > 1.0 or a < 0:
                    raise
            except:
                logger.exception("Theme error: The opacity attribute of a theme " + \
                      "command should be a number between \"0\" " + \
                      " and \"100\" or \"color1\" to \"color8\".")
                a = 1.0
        return a
Beispiel #7
0
        def __init__(self, host, port, http_backend, use_ssl, ca_cert,
                     ssl_key, ssl_cert, hard_ssl_name_check, daemon_thread_pool_size):
            self.port = port
            self.host = host
            self.srv = None
            # Port = 0 means "I don't want HTTP server"
            if self.port == 0:
                return

            self.use_ssl = use_ssl

            self.registered_fun = {}
            self.registered_fun_names = []
            self.registered_fun_defaults = {}

            protocol = 'http'
            if use_ssl:
                protocol = 'https'
            self.uri = '%s://%s:%s' % (protocol, self.host, self.port)
            logger.info("Opening HTTP socket at %s", self.uri)

            # Hack the BaseHTTPServer so only IP will be looked by wsgiref, and not names
            __import__('BaseHTTPServer').BaseHTTPRequestHandler.address_string = \
                lambda x: x.client_address[0]

            if http_backend == 'cherrypy' or http_backend == 'auto' and cheery_wsgiserver:
                self.srv = CherryPyBackend(host, port, use_ssl, ca_cert, ssl_key,
                                           ssl_cert, hard_ssl_name_check, daemon_thread_pool_size)
            else:
                logger.warning('Loading the old WSGI Backend. CherryPy >= 3 is recommanded instead')
                self.srv = WSGIREFBackend(host, port, use_ssl, ca_cert, ssl_key,
                                          ssl_cert, hard_ssl_name_check, daemon_thread_pool_size)

            self.lock = threading.RLock()
Beispiel #8
0
    def getLocalProjectForGivenBatch(self, batchList, repo):
        # Strip batch name from given batch file path in the list batchList
        batchNames = [str(ntpath.basename(i)) for i in batchList]
        pathsToWalk = []
        projects = {}
        for batchName in batchNames:
            projects[batchName] = [];
        # Walk through directories and search for the batch related projects to fetch info.
        if repo == 'local':
            pathsToWalk = [globals.localPathForBatchTestResults]
        else:
            pathsToWalk = [os.path.dirname(batchFile) for batchFile in batchList]

        pathsToWalk = set(pathsToWalk)

        try:
            for path in pathsToWalk:
                for dirname, dirnames, filenames in os.walk(path):
                    for filename in sorted(filenames):
                        actualFilePath = "%s/%s" % (dirname, filename)
                        if filename != ".gitignore" and filename in batchNames:
                            batchFile = open(actualFilePath)
                            if not projects.has_key(filename):
                                projects[filename] = []
                            projects[filename].extend([i.strip() for i in batchFile.readlines()])
                            batchFile.close()
        except Exception as ex:
            logger.warning("getLocalProjectForGivenBatch: Error=%s" % str(ex))

        return projects
Beispiel #9
0
 def getLocalResults(self, build):
     try:
         logger.debug("In getLocalResult build=%s " % (build))
         localPath = self.__localPath + build + "/"
         tmpPath = tempfile.mkdtemp(prefix="autoport_")
         putdir = tmpPath + "/" + build
         if not os.path.exists(putdir):
             os.makedirs(putdir)
         logger.debug("Catalog getLocalResults: putdir=%s remoteDir=%s"
                      % (putdir, localPath))
         # Copy as many files as possible.  Reports use different files
         files = os.listdir(localPath)
         for file in files:
             try:
                 shutil.copyfile(localPath + file, putdir + "/" + file)
             except IOError:
                 pass
         self.__tmpdirs.append(tmpPath)
         return putdir
     except IOError as e:
         msg = "getLocalResults: " + str(e)
         logger.warning(msg)
         return None
     except Exception as e:
         msg = "getLocalResults: " + str(e)
         logger.debug(msg)
         return None
Beispiel #10
0
def ip_in_whitelist(ip):
    try:
        logger.debug("client ip request for registry auth is %s" % ip)
        white_ips = [x.strip() for x in REGISTRY_IP_WHITELIST.split(',')]
        networks, ranges, ips = [], [], []
        for ip_str in white_ips:
            if ip_str.find('/') >= 0:
                try:
                    networks.append(ipaddress.ip_network(unicode(ip_str)))
                except Exception as e:
                    logger.warning("format of ip net %s is invalid" % ip_str)
            elif ip_str.find('-') >= 0:
                try:
                    first, last = ip_str.split('-')
                    ranges.append(ipaddress.summarize_address_range(
                        IPv4Address(unicode(first)), IPv4Address(unicode(last))))
                except Exception as e:
                    logger.warning("format of ip range %s is invalid" % ip_str)
            else:
                ips.append(ip_str)
        if ip in ips:
            return True
        for ip_range in ranges:
            if IPv4Address(ip) in ip_range:
                return True
        for network in networks:
            if IPv4Address(ip) in network:
                return True
        return IPAddress(ip) in IPNetwork(NODE_NETWORK)
    except Exception, e:
        logger.error(
            "Exception parse registry whitelist for ip %s : %s" % (ip, str(e)))
        return False
Beispiel #11
0
def writejson(filename, data):
    with open(filename, 'w') as f:
        try:
            f.write(json.dumps(data, indent=2, encoding='utf-8'))
            logger.info('packed %d values' %(len(data)))
        except Exception as e:
            logger.warning('could not pack: %s' %(e))
Beispiel #12
0
 def __load_pool(self, pool_index):
     [sts, res] = self.__etcd.getkey('netids/pools/'+str(pool_index))
     if sts:
         self.cur_pool = list(json.loads(res))
     else:
         self.cur_pool = []
         logger.warning('load_pool in NetIdMgr: etcd get pools /%s failed, used default' % (str(pool_index)))
def get_bathroom_usages(start, end):
    usages = []
    using_bathroom = None
    last_start_datetime = None
    for event in smarthome_db.query(SmarthomeEvent).filter(
        SmarthomeEvent.smart == "bathroom_usage_watcher", SmarthomeEvent.attribute.in_(["started", "stopped"]),
        SmarthomeEvent.datetime >= start, SmarthomeEvent.datetime <= end,
    ).order_by(SmarthomeEvent.datetime):
        if using_bathroom is None:
            # Первое событие за интервал: узнаём, в начале интервала пользователь был в ванной или нет или нет?
            if event.attribute == "stopped":
                # Вышел, значит, пользовался
                using_bathroom = True
                last_start_datetime = start
            else:
                # Зашёл, значит, не был
                using_bathroom = False

        if not using_bathroom and event.attribute == "started":
            using_bathroom = True
            last_start_datetime = event.datetime
        elif using_bathroom and event.attribute == "stopped":
            usages.append((last_start_datetime, event.datetime))
            using_bathroom = False
        else:
            logger.warning(u"using_bathroom = %d и в %s произошло событие %s" % (using_bathroom, event.datetime.strftime("%Y-%m-%d %H:%M:%S"), event.attribute))
    # Если в конце интервала пользователь в ванной, зафиксируем последний вход
    if using_bathroom:
        usages.append((last_start_datetime, end))

    return usages    
def bank_card():
    shops = {}
    for content_item in db.query(ContentItem).filter(ContentItem.type == "vtb24_transaction", ContentItem.created_at >= start, ContentItem.created_at <= end):
        m = re.search(re.compile(u"произведена транзакция по (.+) на сумму ([0-9.]+) (.+?)\..+Детали платежа: (.+)\. Код авторизации", re.DOTALL), content_item.data["notification"])
        if m.group(1) == u"оплате":
            amount = float(m.group(2))
            if m.group(3) == "USD":
                amount *= 30
            elif m.group(3) == "RUR":
                pass
            else:
                logger.warning("Unknown currency: %s" % m.group(3))

            shop_name = m.group(4)

            if shop_name not in shops:
                shops[shop_name] = 0
            shops[shop_name] += amount

    if shops:
        text = u"Покупал товары. Топ магазинов:\n"

        text += u"<ul>\n"
        for shop_name, amount in sorted(shops.items(), key=lambda kv: -kv[1])[:5]:
            text += u"<li>%s (%s)</li>\n" % (shop_name, pytils.numeral.get_plural(int(amount), (u"рубль", u"рубля", u"рублей")))
        text += u"</ul>\n"

        return text
    else:
        return None
Beispiel #15
0
 def _watchnewnode(self):
     while(True):
         time.sleep(0.1)
         [status, runlist] = self.etcd.listdir("machines/runnodes")
         if not status:
             logger.warning ("get runnodes list failed from etcd ")
             continue
         for node in runlist:
             nodeip = node['key'].rsplit('/',1)[1]
             if node['value']=='waiting':
                 logger.info ("%s want to joins, call it to init first" % nodeip)
             elif node['value']=='work':
                 logger.info ("new node %s joins" % nodeip)
                 # setup GRE tunnels for new nodes
                 if self.addr == nodeip:
                     logger.debug ("worker start on master node. not need to setup GRE")
                 else:
                     logger.debug ("setup GRE for %s" % nodeip)
                     if netcontrol.gre_exists('docklet-br', nodeip):
                         logger.debug("GRE for %s already exists, reuse it" % nodeip)
                     else:
                         netcontrol.setup_gre('docklet-br', nodeip)
                 self.etcd.setkey("machines/runnodes/"+nodeip, "ok")
                 if nodeip not in self.runnodes:
                     self.runnodes.append(nodeip)
                     if nodeip not in self.allnodes:
                         self.allnodes.append(nodeip)
                         self.etcd.setkey("machines/allnodes/"+nodeip, "ok")
                     logger.debug ("all nodes are: %s" % self.allnodes)
                     logger.debug ("run nodes are: %s" % self.runnodes)
                     self.rpcs.append(xmlrpc.client.ServerProxy("http://%s:%s"
                         % (nodeip, self.workerport)))
                     logger.info ("add %s:%s in rpc client list" %
                         (nodeip, self.workerport))
def get_outs(start, end):
    outs = []
    owner_is_at_home = None
    last_leave_datetime = None
    for event in smarthome_db.query(SmarthomeEvent).filter(
        SmarthomeEvent.smart == "owner_presence_watcher", SmarthomeEvent.attribute.in_(["came", "left"]),
        SmarthomeEvent.datetime >= start, SmarthomeEvent.datetime <= end,
    ).order_by(SmarthomeEvent.datetime):
        if owner_is_at_home is None:
            # Первое событие за интервал: узнаём, в начале интервала пользователь был дома или нет?
            if event.attribute == "came":
                # Пришёл, значит, не был
                owner_is_at_home = False
                last_leave_datetime = start
            else:
                # Ушёл, значит, был
                owner_is_at_home = True

        if owner_is_at_home and event.attribute == "left":
            owner_is_at_home = False
            last_leave_datetime = event.datetime
        elif not owner_is_at_home and event.attribute == "came":
            outs.append((last_leave_datetime, event.datetime))
            owner_is_at_home = True
        else:
            logger.warning(u"owner_is_at_home = %d и в %s произошло событие %s" % (owner_is_at_home, event.datetime.strftime("%Y-%m-%d %H:%M:%S"), event.attribute))
    # Если в конце интервала пользователь не дома, зафиксируем последний уход
    if not owner_is_at_home:
        outs.append((last_leave_datetime, end))

    return outs
Beispiel #17
0
    def handle_read(self):
        #print "Handle read"

        q = self.current
        # get a read but no current query? Not normal!

        if not q:
            #print "WARNING: got LS read while no current query in progress. I return"
            return

        try:
            data = self.do_read(16)
            code = data[0:3]
            q.return_code = code

            length = int(data[4:15])
            data = self.do_read(length)

            if code == "200":
                try:
                    d = eval(data)
                    #print d
                    q.put(d)
                except:
                    q.put(None)
            else:
                q.put(None)
                return None
        except IOError, exp:
            self.alive = False
            logger.warning("SOCKET ERROR: %s" % str(exp))
            return q.put(None)
Beispiel #18
0
 def on_message(self, message):
     prefix, message = message.split(",", 1)
     id = prefix.split("/", 1)[0]
     message = jsonapi.loads(message)
     logger.debug("SockJSHandler.on_message: %s", message)
     msg_type = message["header"]["msg_type"]
     app = self.session.handler.application
     if id == "complete":
         if msg_type in ("complete_request", "object_info_request"):
             app.completer.registerRequest(self, message)
         return
     try:
         kernel = app.kernel_dealer.kernel(id)
     except KeyError:
         # Ignore messages to nonexistent or killed kernels.
         logger.warning("%s sent to nonexistent kernel %s", msg_type, id)
         return
     if id not in self.channels:
         self.channels[id] = SockJSChannelsHandler(self.send)
         self.channels[id].connect(kernel)
     if msg_type == "execute_request":
         stats_logger.info(StatsMessage(
             kernel_id=id,
             remote_ip=kernel.remote_ip,
             referer=kernel.referer,
             code=message["content"]["code"],
             execute_type="request"))
     self.channels[id].send(message)
Beispiel #19
0
 def basic_app_remove(self):
     logger.info("remove basic app : %s " % self.appname)
     remove_results = {}
     remove_success_results = {}
     remove_failed_results = {}
     remove_missed_results = {}
     try:
         app_spec = self.app_spec
         for pg_spec in app_spec.PodGroups:
             remove_r = self.podgroup_remove(pg_spec.Name)
             if remove_r.status_code < 400:
                 remove_success_results[pg_spec.Name] = remove_r
             elif remove_r.status_code == 404:
                 remove_missed_results[pg_spec.Name] = remove_r
             else:
                 remove_failed_results[pg_spec.Name] = remove_r
         # use dependency_remove api of Deployd for deleting proc with
         # portal type
         for dp_spec in app_spec.Portals:
             remove_r = self.dependency_remove(dp_spec.Name)
             if remove_r.status_code < 400:
                 remove_success_results[dp_spec.Name] = remove_r
             elif remove_r.status_code == 404:
                 remove_missed_results[dp_spec.Name] = remove_r
             else:
                 remove_failed_results[dp_spec.Name] = remove_r
     except Exception, e:
         logger.warning("failed when trying to remove app %s: %s" %
                        (self.appname, str(e)))
Beispiel #20
0
 def collect_diskinfo(self):
     global workercinfo
     parts = psutil.disk_partitions()
     setval = []
     devices = {}
     for part in parts:
         # deal with each partition
         if not part.device in devices:
             devices[part.device] = 1
             diskval = {}
             diskval['device'] = part.device
             diskval['mountpoint'] = part.mountpoint
             try:
                 usage = psutil.disk_usage(part.mountpoint)
                 diskval['total'] = usage.total
                 diskval['used'] = usage.used
                 diskval['free'] = usage.free
                 diskval['percent'] = usage.percent
                 if(part.mountpoint.startswith('/opt/docklet/local/volume')):
                     # the mountpoint indicate that the data is the disk used information of a container
                     names = re.split('/',part.mountpoint)
                     container = names[len(names)-1]
                     if not container in workercinfo.keys():
                         workercinfo[container] = {}
                     workercinfo[container]['disk_use'] = diskval 
                 setval.append(diskval)  # make a list
             except Exception as err:
                 logger.warning(traceback.format_exc())
                 logger.warning(err)
     #print(output)
     #print(diskparts)
     return setval
Beispiel #21
0
 def run(self):
     global workercinfo
     global workerinfo
     cnt = 0
     while not self.thread_stop:
         containers = self.list_container()
         countR = 0
         conlist = []
         for container in containers:
             # collect data of each container
             if not container == '':
                 conlist.append(container)
                 if not container in workercinfo.keys():
                     workercinfo[container] = {}
                 try:
                     success= self.collect_containerinfo(container)
                     if(success):
                         countR += 1
                 except Exception as err:
                     logger.warning(traceback.format_exc())
                     logger.warning(err)
         containers_num = len(containers)-1
         concnt = {}
         concnt['total'] = containers_num
         concnt['running'] = countR
         workerinfo['containers'] = concnt
         time.sleep(self.interval)
         if cnt == 0:
             # update containers list on the worker each 5 times
             workerinfo['containerslist'] = conlist
         cnt = (cnt+1)%5
         if self.test:
             break
     return
Beispiel #22
0
def fetch_and_parse_feed(url, etag=None, last_modified=None):
    # TODO implement etag & last_modified header
    url = sanitize_url(url)
    feed_parsed = feedparser.parse(url)
    if not hasattr(feed_parsed, 'status'):
        raise FetchingException("Connection error")
    elif feed_parsed.status not in (200, 301, 302):
        raise FetchingException("status_code is %d" % feed_parsed.status)
    if feed_parsed.version == '':
        # it's probably html instead of rss/atom
        resp = fetch_url(url)
        if resp.status_code not in (200, 301, 302):
            raise FetchingException("status_code is %d" % resp.status_code)
        soup = BeautifulSoup(resp.content)
        try:
            url = soup.find_all("link", rel="alternate")[0]['href']
        except (IndexError, KeyError):
            # alternate-link is missing
            raise FetchingException("Neither RSS nor good HTML...")
        if not url.startswith("http"):
            url = concat_urls(resp.url, url)
        feed_parsed = feedparser.parse(url)
        if feed_parsed.status not in (200, 301, 302):
            raise FetchingException("status_code is %d" % feed_parsed.status)
    if feed_parsed.status == 302:  # moved permanently
        logger.warning("/!\\ permanent redirect (302) for %s", url)
        url = feed_parsed.href
    elif feed_parsed.status == 301:
        logger.warning("/!\\ temporary redirect (301) for %s", url)
    return {"feed": feed_parsed, "real_url": url}
Beispiel #23
0
 def run(self):
     global monitor_hosts
     global monitor_vnodes
     while not self.thread_stop:
         for worker in monitor_hosts.keys():
             monitor_hosts[worker]['running'] = False
         workers = self.nodemgr.get_rpcs()
         for worker in workers:
             try:
                 ip = self.nodemgr.rpc_to_ip(worker)
                 # fetch data
                 info = list(eval(worker.workerFetchInfo(self.master_ip)))
                 #logger.info(info[0])
                 # store data in monitor_hosts and monitor_vnodes
                 monitor_hosts[ip] = info[0]
                 for container in info[1].keys():
                     owner = get_owner(container)
                     if not owner in monitor_vnodes.keys():
                         monitor_vnodes[owner] = {}
                     monitor_vnodes[owner][container] = info[1][container]
             except Exception as err:
                 logger.warning(traceback.format_exc())
                 logger.warning(err)
         time.sleep(2)
         #logger.info(History.query.all())
         #logger.info(VNode.query.all())
     return
Beispiel #24
0
 def run(self):
     cnt = 0
     while not self.thread_stop:
         containers = self.list_container()
         countR = 0
         conlist = []
         for container in containers:
             if not container == '':
                 conlist.append(container)
                 try:
                     if(self.collect_containerinfo(container)):
                         countR += 1
                 except Exception as err:
                     #pass
                     logger.warning(err)
         containers_num = len(containers)-1
         concnt = {}
         concnt['total'] = containers_num
         concnt['running'] = countR
         self.etcdser.setkey('/hosts/%s/containers'%(self.host), concnt)
         time.sleep(self.interval)
         if cnt == 0:
             self.etcdser.setkey('/hosts/%s/containerslist'%(self.host), conlist)
         cnt = (cnt+1)%5
         if self.test:
             break
     return
Beispiel #25
0
def readjson(filename):
    if os.path.exists(filename):
        with open(filename, 'r') as f:
            try:
                return json.loads(f.read(), 'utf-8')
            except Exception as e:
                logger.warning('could not unpack: %s' %(e))
Beispiel #26
0
    def get(url, payload=None):
        payload = payload or {}
        logger.debug([url, payload])
        retries = MAX_RETRY
        res = None
        while retries > 0:
            try:
                cache = TwitchAPI.caching
                if cache:
                    res = cache.get(url, payload)
                    if res:
                        return res

                res = requests.get(url, params=payload,
                                   headers=common_headers, verify=False)
                j = res.json()
                if cache:
                    cache.set(url, payload, j)

                if "error" in j and j['error']:
                    raise Exception(j.get("error"))
                return j
            except ValueError as e:
                logger.exception(e)
                if res:
                    logger.warning(res.text)
                retries -= 1
                if retries <= 0:
                    raise
                time.sleep(0.3)
            except Exception as e:
                logger.exception(e)
                raise e
Beispiel #27
0
    def useless_procs_remove(self, origin_procs):
        remove_results = {}
        remove_success_results = {}
        remove_failed_results = {}
        remove_missed_results = {}

        current_pgs = ["%s.%s.%s" % (self.appname, p.type.name, p.name)
                       for p in self.lain_config.procs.values()]
        try:
            for proc in origin_procs:
                pg_name = "%s.%s.%s" % (
                    self.appname, proc.type.name, proc.name)
                if pg_name in current_pgs:
                    continue

                logger.info("remove useless proc %s of app : %s " %
                            (pg_name, self.appname))
                remove_r = self.podgroup_remove(pg_name) if proc.type != ProcType.portal else \
                    self.dependency_remove(pg_name)
                if remove_r.status_code < 400:
                    remove_success_results[pg_name] = remove_r
                elif remove_r.status_code == 404:
                    remove_missed_results[pg_name] = remove_r
                else:
                    remove_failed_results[pg_name] = remove_r
        except Exception, e:
            logger.warning("failed when trying to remove useless proc of app %s: %s" %
                           (self.appname, str(e)))
Beispiel #28
0
 def run(self):
     global monitor_hosts
     global monitor_vnodes
     while not self.thread_stop:
         for worker in monitor_hosts.keys():
             monitor_hosts[worker]['running'] = False
         workers = self.nodemgr.get_nodeips()
         for worker in workers:
             try:
                 ip = worker
                 workerrpc = xmlrpc.client.ServerProxy("http://%s:%s" % (worker, env.getenv("WORKER_PORT")))
                 # fetch data
                 info = list(eval(workerrpc.workerFetchInfo(self.master_ip)))
                 #logger.info(info[0])
                 # store data in monitor_hosts and monitor_vnodes
                 monitor_hosts[ip] = info[0]
                 for container in info[1].keys():
                     owner = get_owner(container)
                     if not owner in monitor_vnodes.keys():
                         monitor_vnodes[owner] = {}
                     monitor_vnodes[owner][container] = info[1][container]
             except Exception as err:
                 logger.warning(traceback.format_exc())
                 logger.warning(err)
         time.sleep(2)
         #logger.info(History.query.all())
         #logger.info(VNode.query.all())
     return
Beispiel #29
0
    def do_work(self, s, returns_queue, c):
        ## restore default signal handler for the workers:
        # but on android, we are a thread, so don't do it
        if not is_android:
            signal.signal(signal.SIGTERM, signal.SIG_DFL)

        self.set_proctitle()

        print "I STOP THE http_daemon", self.http_daemon
        if self.http_daemon:
            self.http_daemon.shutdown()

        timeout = 1.0
        self.checks = []
        self.returns_queue = returns_queue
        self.s = s
        self.t_each_loop = time.time()
        while True:
            begin = time.time()
            msg = None
            cmsg = None

            # If we are dying (big problem!) we do not
            # take new jobs, we just finished the current one
            if not self.i_am_dying:
                # REF: doc/shinken-action-queues.png (3)
                self.get_new_checks()
                # REF: doc/shinken-action-queues.png (4)
                self.launch_new_checks()
            # REF: doc/shinken-action-queues.png (5)
            self.manage_finished_checks()

            # Now get order from master
            try:
                cmsg = c.get(block=False)
                if cmsg.get_type() == 'Die':
                    logger.debug("[%d] Dad say we are dying..." % self.id)
                    break
            except:
                pass

            if self._mortal == True and self._idletime > 2 * self._timeout:
                logger.warning("[%d] Timeout, Harakiri" % self.id)
                # The master must be dead and we are lonely, we must die
                break

            # Look if we are dying, and if we finish all current checks
            # if so, we really die, our master poller will launch a new
            # worker because we were too weak to manage our job :(
            if len(self.checks) == 0 and self.i_am_dying:
                logger.warning("[%d] I DIE because I cannot do my job as I should (too many open files?)... forgot me please." % self.id)
                break

            # Manage a possible time change (our avant will be change with the diff)
            diff = self.check_for_system_time_change()
            begin += diff

            timeout -= time.time() - begin
            if timeout < 0:
                timeout = 1.0
Beispiel #30
0
 def stop(self):
     #TODO: find why, but in ssl mode the stop() is locking, so bailout before
     if self.use_ssl:
         return
     try:
         self.srv.stop()
     except Exception, exp:
         logger.warning('Cannot stop the CherryPy backend : %s', exp)
Beispiel #31
0
def empty_config_path(path):
    assert path.startswith(NGINX_CONF_PATH)
    mkdir_p(path)
    delete_list = os.listdir(path)
    for item in delete_list:
        abs_path = os.path.join(path, item)
        if os.path.isfile(abs_path):
            os.remove(abs_path)
        elif os.path.islink(abs_path):
            os.remove(abs_path)
        elif os.path.isdir(abs_path):
            shutil.rmtree(abs_path)
        else:
            logger.warning('something strange in config path: %s' % abs_path)
Beispiel #32
0
        def take_guanhuai_gift(ctx):
            cfg = self.cfg.vip_mentor

            # 确认使用的角色
            server_id, roleid = "", ""
            if cfg.guanhuai_dnf_server_id == "":
                logger.warning("未配置会员关怀礼包的区服和角色信息,将使用道聚城绑定的角色信息")
                logger.warning(
                    color("bold_cyan") +
                    "如果大号经常玩,建议去其他跨区建一个小号,然后不再登录,这样日后的关怀活动和集卡活动都可以拿这个来获取回归相关的领取资格"
                )
            else:
                if cfg.guanhuai_dnf_role_id == "":
                    logger.warning(
                        f"配置了会员关怀礼包的区服ID为{cfg.guanhuai_dnf_server_id},但未配置角色ID,将打印该服所有角色信息如下,请将合适的角色ID填到配置表"
                    )
                    self.djc_helper.query_dnf_rolelist(
                        cfg.guanhuai_dnf_server_id)
                else:
                    logger.info("使用配置的区服和角色信息来进行领取会员关怀礼包")
                    server_id, roleid = cfg.guanhuai_dnf_server_id, cfg.guanhuai_dnf_role_id

            if guanhuai_distinctActive == "0":
                logger.warning(
                    color("bold_cyan") +
                    "本次会员关怀活动不允许获取资格和领取奖励的账号不同,因此若当前QQ未被判定为幸运玩家,则不会领取成功~")

            return _game_award(ctx,
                               guanhuai_gift.act_name,
                               guanhuai_gift.ruleid,
                               area=server_id,
                               partition=server_id,
                               roleid=roleid)
Beispiel #33
0
    def get(self):
        logger.debug('RootHandler.get')
        args = self.request.arguments
        code = None
        lang = self.get_argument("lang", None)
        interacts = None
        if "c" in args:
            # The code is explicitly specified
            code = self.get_argument("c")
        elif "z" in args:
            # The code is base64-compressed
            def get_decompressed(name):
                a = args[name][-1]
                # We allow the user to strip off the ``=`` padding at the end
                # so that the URL doesn't have to have any escaping.
                # Here we add back the ``=`` padding if we need it.
                a += "=" * ((4 - (len(a) % 4)) % 4)
                return zlib.decompress(
                    base64.urlsafe_b64decode(a)).decode("utf8")

            try:
                code = get_decompressed("z")
                if "interacts" in args:
                    interacts = get_decompressed("interacts")
            except Exception as e:
                self.set_status(400)
                self.finish("Invalid zipped code: %s\n" % (e.message, ))
                return
        elif "q" in args:
            # The code is referenced by a permalink identifier.
            q = self.get_argument("q")
            try:
                code, lang, interacts = (yield tornado.gen.Task(
                    self.application.db.get, q))[0]
            except LookupError:
                logger.warning("ID not found in permalink database %s", q)
                self.set_status(404)
                self.finish("ID not found in permalink database")
                return
        if code is not None:
            code = url_escape(code, plus=False)
        if interacts is not None:
            interacts = url_escape(interacts, plus=False)
        autoeval = self.get_argument("autoeval",
                                     "false" if code is None else "true")
        self.render("root.html",
                    code=code,
                    lang=lang,
                    interacts=interacts,
                    autoeval=autoeval)
Beispiel #34
0
    def __command_combine(self, surface, pix1, pix2, degrees=None):
        # Combines left half of surface with right half of surface2.
        # The transition between the two halves are soft.

        # Degrees keyword are kept of compability reasons.
        w = surface.get_width()
        h = surface.get_height()
        if pix1 == "self":
            p1 = surface
        elif pix1 in self.temp:
            p1 = self.temp[pix1]
        elif self.theme.has_surface(pix1):
            w = surface.get_width()
            h = surface.get_height()
            p1 = self.__resize_surface(self.theme.get_surface(bg), w, h)
        else:
            logger.warning("theme error: pixmap %s not found" % pix1)
        if pix2 == "self":
            p2 = surface
        elif pix2 in self.temp:
            p2 = self.temp[pix2]
        elif self.theme.has_surface(pix2):
            w = surface.get_width()
            h = surface.get_height()
            p2 = self.__resize_surface(self.theme.get_surface(bg), w, h)
        else:
            logger.warning("theme error: pixmap %s not found" % pix2)

        surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, p1.get_width(),
                                     p1.get_height())
        ctx = cairo.Context(surface)

        linear = cairo.LinearGradient(0, 0, p1.get_width(), 0)
        linear.add_color_stop_rgba(0.4, 0, 0, 0, 0.5)
        linear.add_color_stop_rgba(0.6, 0, 0, 0, 1)
        ctx.set_source_surface(p2, 0, 0)
        #ctx.mask(linear)
        ctx.paint()

        linear = cairo.LinearGradient(0, 0, p1.get_width(), 0)
        linear.add_color_stop_rgba(0.4, 0, 0, 0, 1)
        linear.add_color_stop_rgba(0.6, 0, 0, 0, 0)
        ctx.set_source_surface(p1, 0, 0)
        ctx.mask(linear)
        try:
            del pb
            del pbs
        except:
            pass
        return surface
Beispiel #35
0
 def ping():
     #logger.debug("ping for %s", self.id)
     now = ioloop.time()
     if self._expecting_pong:
         logger.warning("kernel %s died unexpectedly", self.id)
         self.stop()
     elif now > self.hard_deadline:
         logger.info("hard deadline reached for %s", self.id)
         self.stop()
     elif now > self.deadline and self.executing == 0:
         self.stop()
     else:
         hb.send(b'ping')
         self._expecting_pong = True
Beispiel #36
0
def update_task():
    session_task_id = session.get('edit_task_id')
    session_title = session.get('edit_title')
    session_content = session.get('edit_content')
    session_token = session.get('edit_csrf_token')
    post_task_id = request.form["task_id"]
    post_title = request.form["title"]
    post_content = request.form["content"]
    edit_csrf_token = request.form["edit_csrf_token"]

    if edit_csrf_token != session_token:
        logger.warning('edit csrf_token is %s ', edit_csrf_token)
        abort(400)

    task = TaskModel.query.get(post_task_id)

    if not task:
        logMsg = "in update task execution:query data is none : data is %s."
        logger.warning(logMsg, task)
        abort(400)

    if post_task_id != str(session_task_id):
        logMsg = "in update task execution:post data is wrong : request post_task_id  is %s."
        logger.warning(logMsg, post_task_id)
        abort(400)

    if session_title != post_title or session_content != post_content:
        logMsg = "in update task execution: input data is wrong : post data is %s."
        logger.warning(logMsg, post_title)
        abort(400)

    try:
        task.title = post_title
        task.content = post_content
        task.date = str(datetime.today().year) + "-" + str(
            datetime.today().month) + "-" + str(datetime.today().day)
        db.session.commit()

        delete_edit_session()

        return redirect(url_for('.index'))
    except:
        db.session.rollback()

        logMsg = "in update task execution: update execution is failed. please return index page."
        logger.warning(logMsg)

        delete_edit_session()
        abort(400)
Beispiel #37
0
    def load_past_jobs(self):
        """
        Look in the jobs directory and load all valid jobs
        """
        loaded_jobs = []
        failed_jobs = []
        for dir_name in sorted(os.listdir(config_value('jobs_dir'))):
            if os.path.isdir(os.path.join(config_value('jobs_dir'), dir_name)):
                # Make sure it hasn't already been loaded
                if dir_name in self.jobs:
                    continue

                try:
                    job = Job.load(dir_name)
                    # The server might have crashed
                    if job.status.is_running():
                        job.status = Status.ABORT
                    for task in job.tasks:
                        if task.status.is_running():
                            task.status = Status.ABORT

                    # We might have changed some attributes here or in __setstate__
                    job.save()
                    loaded_jobs.append(job)
                except Exception as e:
                    failed_jobs.append((dir_name, e))

        # add DatasetJobs
        for job in loaded_jobs:
            if isinstance(job, DatasetJob):
                self.jobs[job.id()] = job

        # add ModelJobs
        for job in loaded_jobs:
            if isinstance(job, ModelJob):
                try:
                    # load the DatasetJob
                    job.load_dataset()
                    self.jobs[job.id()] = job
                except Exception as e:
                    failed_jobs.append((job.id(), e))

        logger.info('Loaded %d jobs.' % len(self.jobs))

        if len(failed_jobs):
            logger.warning('Failed to load %d jobs.' % len(failed_jobs))
            if self.verbose:
                for job_id, e in failed_jobs:
                    logger.debug('%s - %s: %s' % (job_id, type(e).__name__, str(e)))
Beispiel #38
0
 def create_cluster(self, clustername, username, image, user_info, setting):
     if self.is_cluster(clustername, username):
         return [False, "cluster:%s already exists" % clustername]
     clustersize = int(self.defaultsize)
     logger.info ("starting cluster %s with %d containers for %s" % (clustername, int(clustersize), username))
     workers = self.nodemgr.get_rpcs()
     image_json = json.dumps(image)
     groupname = json.loads(user_info)["data"]["group"]
     if (len(workers) == 0):
         logger.warning ("no workers to start containers, start cluster failed")
         return [False, "no workers are running"]
     # check user IP pool status, should be moved to user init later
     if not self.networkmgr.has_user(username):
         self.networkmgr.add_user(username, cidr=29, isshared = True if str(groupname) == "fundation" else False)
     [status, result] = self.networkmgr.acquire_userips_cidr(username, clustersize)
     gateway = self.networkmgr.get_usergw(username)
     vlanid = self.networkmgr.get_uservlanid(username)
     logger.info ("create cluster with gateway : %s" % gateway)
     self.networkmgr.printpools()
     if not status:
         logger.info ("create cluster failed: %s" % result)
         return [False, result]
     ips = result
     clusterid = self._acquire_id()
     clusterpath = self.fspath+"/global/users/"+username+"/clusters/"+clustername
     hostpath = self.fspath+"/global/users/"+username+"/hosts/"+str(clusterid)+".hosts"
     hosts = "127.0.0.1\tlocalhost\n"
     containers = []
     for i in range(0, clustersize):
         onework = workers[random.randint(0, len(workers)-1)]
         lxc_name = username + "-" + str(clusterid) + "-" + str(i)
         hostname = "host-"+str(i)
         logger.info ("create container with : name-%s, username-%s, clustername-%s, clusterid-%s, hostname-%s, ip-%s, gateway-%s, image-%s" % (lxc_name, username, clustername, str(clusterid), hostname, ips[i], gateway, image_json))
         [success,message] = onework.create_container(lxc_name, username, json.dumps(setting) , clustername, str(clusterid), str(i), hostname, ips[i], gateway, str(vlanid), image_json)
         if success is False:
             logger.info("container create failed, so vcluster create failed")
             return [False, message]
         logger.info("container create success")
         hosts = hosts + ips[i].split("/")[0] + "\t" + hostname + "\t" + hostname + "."+clustername + "\n"
         containers.append({ 'containername':lxc_name, 'hostname':hostname, 'ip':ips[i], 'host':self.nodemgr.rpc_to_ip(onework), 'image':image['name'], 'lastsave':datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), 'setting': setting })
     hostfile = open(hostpath, 'w')
     hostfile.write(hosts)
     hostfile.close()
     clusterfile = open(clusterpath, 'w')
     proxy_url = env.getenv("PORTAL_URL") + "/_web/" + username + "/" + clustername
     info = {'clusterid':clusterid, 'status':'stopped', 'size':clustersize, 'containers':containers, 'nextcid': clustersize, 'create_time':datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), 'start_time':"------" , 'proxy_url':proxy_url}
     clusterfile.write(json.dumps(info))
     clusterfile.close()
     return [True, info]
    def find_ap(self, sen):
        try:
            cwl = sen.class_word_list
            for i in range(len(cwl)):
                if cwl[i].pos == 'v':
                    sen.V_ID = i
                    break
            if sen.V_ID < 0:
                logger.debug('该句子分词经过词性标注后没有动词')
                sen.ap_ID = -1
                return sen.ap_ID
            for i in range(len(cwl)):
                if cwl[i].arcs_relation == 'HED':
                    sen.hed_ID = i
                    break
            if sen.hed_ID < 0:
                logger.debug('经过句法分析没有核心词')
                sen.ap_ID = sen.V_ID
                return sen.ap_ID

            if sen.V_ID == sen.hed_ID:
                sen.ap_ID = sen.hed_ID
                cwl[sen.ap_ID].Semantic_markup = 'ap'
                return sen.ap_ID

            if cwl[sen.hed_ID].pos != 'v':
                new_hed = self.find_late(sen.hed_ID, sen.postags_list)
                if new_hed == sen.V_ID:
                    sen.ap_ID = sen.V_ID
                else:
                    if cwl[new_hed].arcs_head == sen.V_ID or cwl[
                            sen.V_ID].arcs_head == new_hed:
                        sen.ap_ID = sen.V_ID
                    else:
                        sen.ap_ID = new_hed
            else:
                if cwl[sen.V_ID].arcs_head == sen.hed_ID + 1:
                    sen.ap_ID = sen.V_ID
                else:
                    sen.ap_ID = sen.hed_ID

            cwl[sen.ap_ID].Semantic_markup = 'ap'
            cwl[sen.hed_ID].arcs_head = cwl[sen.ap_ID].arcs_head
            cwl[sen.hed_ID].arcs_relation = cwl[sen.ap_ID].arcs_relation
            return sen.ap_ID
        except Exception as e:
            s = "确定动作属性发生异常find_ap" + str(e)
            logger.warning(s)
            logger.warning(sys.exc_info())
Beispiel #40
0
 def insert_one(self, music):
     '''实现插入功能'''
     # 判断该歌曲是否已经存在
     count = self.mongo.count_documents({'id': music.id})
     # 若不存在
     if count == 0:
         # 使用music.id为MongoDB数据库的主键:_id
         dic = music.__dict__
         dic['_id'] = music.id
         # 插入内容
         self.mongo.insert_one(dic)
         logger.info(f'插入音乐内容:{music.sing_name}')
     # 若歌曲在数据库中已存在
     else:
         logger.warning(f'已存在音乐内容:{music.sing_name}')
def run():
    csv_list = os.listdir('./output/')
    i = 0
    for filename in csv_list:
        logger.info(f'filename={filename.format()}')
        # print(f'filename={filename.format()}')
        i = i + 1
        df = pd.read_csv(f'./output/{filename}', encoding='utf-8')
        # print(df)
        # print(type(df))
        rows = []
        for index, row in df.iterrows():
            rows.append(row)
        run_pthread(rows)
        logger.warning(f'------------第{i}轮已经爬取完成------------')
Beispiel #42
0
def parse_xml(xml):
    tree = ET.fromstring(xml)
    unique_name = tree.findall("./var[@name='id']")
    level = tree.findall("./var[@name='level']")
    if len(unique_name) > 1 or len(level) > 1:
        logger.warning("XML was created unconsistent, id or level is not the one in it")
    objects_elements = tree.findall("./objects/object")
    objects = []
    for obj in objects_elements:
        objects.append(obj.attrib["name"])
    result =  {
        "id":unique_name[0].attrib["value"],
        "level":level[0].attrib["value"], "objects":objects
        }
    return result
Beispiel #43
0
 def run(self):
     # if self.db.is_over_maxinum(REDIS_KEY_HTTP) :
     #     return
     for callback_index in range(self.__function_count__):
         callback = self.__functions__[callback_index]
         try:
             for res in eval('self.{}()'.format(callback)):
                 if res['type'].upper() == 'HTTP':
                     print(res['proxy'])
                     # self.db.add(REDIS_KEY_HTTP, res['proxy'])
                 else:
                     print(res['proxy'])
                     # self.db.add(REDIS_KEY_HTTPS, res['proxy'])
         except Exception as e:
             logger.warning("{}爬虫方法报错:{}".format(callback, e))
Beispiel #44
0
 def findShareFromUrl(self, url, travseSibling = True):
     try:
         r = self.session.get(url)
         if r.status_code == 200:
             self.listFile(r.text, url)
             # travser other pages
             if travseSibling == True:
                 soup = BeautifulSoup(r.text, "html.parser")
                 nextPages = self.pageList(soup)
                 if len(nextPages) > 0:
                     for p in nextPages:
                         logger.info('To find share in page %s' % (url + p))
                         self.findShareFromUrl(url + p, travseSibling = False) #no need to traverse sibling
     except Exception as e:
         logger.warning("Unexpected error: %s"%str(e))
Beispiel #45
0
def remove_ghost_workers():
    if not OPTIONS.get('remove_ghost_workers', False):
        return

    if not redis_runs_on_same_machine():
        logger.warning(
            'Cannot remove Ghost Workers, because the configured Redis Server is not running on localhost!'
        )
        return

    setup_rq_connection()

    for w in Worker.all():
        if not worker_running(w):
            w.register_death()
Beispiel #46
0
    def answer(self, k: int) -> bool:
        """
        Ответить на вопрос

        :param k: Индекс ответа
        :return: is correct
        """
        self.answered += 1
        if not (0 <= k < len(self.variants)):
            logger.warning(
                'User answer doesn\'t represent any of available variants\n'
                'User: "******", Variants: "%s", QID: %d' %
                (k, self.variants, self.id))
            return False
        return k == self.correct
Beispiel #47
0
    def handle(self):
        while True:
            try:
                self.data = self.request.recv(server_settings.RECV_SIZE)
                if not self.data:
                    break

                cmd_dict = json.loads(self.data.decode())
                action = cmd_dict['action']
                if hasattr(self, action):
                    func = getattr(self, action)
                    func(cmd_dict)
            except ConnectionResetError:
                logger.warning('client disconnect')
                break
Beispiel #48
0
def check_update_on_start(config):
    try:
        if not config.check_update_on_start and not config.auto_update_on_start:
            logger.warning("启动时检查更新被禁用,若需启用请在config.toml中设置")
            return

        ui = get_update_info(config)

        if config.check_update_on_start:
            try_manaual_update(ui)

        if config.auto_update_on_start:
            show_update_info_on_first_run(ui)
    except Exception as err:
        logger.error("更新版本失败, 错误为{}".format(err))
    def download_file(self,
                      fileinfo: FileInFolder,
                      download_dir: str,
                      overwrite=True,
                      show_log=True) -> str:
        """
        下载最新版本压缩包到指定目录,并返回最终压缩包的完整路径
        """
        if not os.path.isdir(download_dir):
            os.mkdir(download_dir)

        download_dir = os.path.realpath(download_dir)
        target_path = os.path.join(download_dir, fileinfo.name)

        def after_downloaded(file_name):
            """下载完成后的回调函数"""
            target_path = file_name
            if show_log: logger.info(f"最终下载文件路径为 {file_name}")

        if show_log: logger.info(f"即将开始下载 {target_path}")
        callback = None
        if show_log: callback = self.show_progress
        retCode = self.lzy.down_file_by_url(
            fileinfo.url,
            "",
            download_dir,
            callback=callback,
            downloaded_handler=after_downloaded,
            overwrite=overwrite)
        if retCode != LanZouCloud.SUCCESS:
            if show_log: logger.error(f"下载失败,retCode={retCode}")
            if retCode == LanZouCloud.NETWORK_ERROR:
                if show_log:
                    logger.warning(
                        color("bold_yellow") +
                        ("蓝奏云api返回网络错误,这很可能是由于dns的问题导致的\n"
                         "分别尝试在浏览器中访问下列两个网页,是否一个打的开一个打不开?\n"
                         "https://fzls.lanzoux.com/s/djc-helper\n"
                         "https://fzls.lanzous.com/s/djc-helper\n"
                         "\n"
                         "如果是这样,请按照下面这个链接,修改本机的dns,使用阿里、腾讯、百度、谷歌dns中的任意一个应该都可以解决。\n"
                         "https://www.ypojie.com/9830.html\n"
                         "\n"
                         "如果两个都打不开,大概率是蓝奏云挂了-。-可选择忽略后面的弹框,继续运行旧版本,或者手动去QQ群或github下载最新版本"
                         ))
            raise Exception("下载失败")

        return target_path
    def _upload_to_lanzouyun(self,
                             filepath: str,
                             target_folder: Folder,
                             history_file_prefix="") -> bool:
        filename = os.path.basename(filepath)
        logger.warning(f"开始上传 {filename} 到 {target_folder.name}")
        run_start_time = datetime.now()

        def on_uploaded(fid, is_file):
            if not is_file:
                return

            logger.info(f"上传完成,fid={fid}")

            prefix = history_file_prefix
            if prefix == "":
                prefix = self.history_version_prefix

            folder_history_files = self.folder_history_files
            if target_folder.id == self.folder_online_files.id:
                folder_history_files = self.folder_online_files_history_files

            files = self.lzy.get_file_list(target_folder.id)
            for file in files:
                if file.name.startswith(prefix):
                    self.lzy.move_file(file.id, folder_history_files.id)
                    logger.info(
                        f"将{file.name}移动到目录({folder_history_files.name})")

            logger.info(f"将文件移到目录({target_folder.name})中")
            self.lzy.move_file(fid, target_folder.id)

        # 上传到指定的文件夹中
        retCode = self.lzy.upload_file(filepath,
                                       -1,
                                       callback=self.show_progress,
                                       uploaded_handler=on_uploaded)
        if retCode != LanZouCloud.SUCCESS:
            logger.error(f"上传失败,retCode={retCode}")
            return False

        filesize = os.path.getsize(filepath)
        logger.warning(
            color("bold_yellow") +
            f"上传文件 {filename}({human_readable_size(filesize)}) 总计耗时{datetime.now() - run_start_time}"
        )

        return True
Beispiel #51
0
 def print_fps(self):
     ''' Frame per seconds measurement. Displayed on the terminal
     FIXME: display on screen
     ./glut_viewer.py -i test/data/bunny.obj 2>&1 | grep fps
     '''
     max_value = 20  # we measure 20 frames
     self.fps_counter += 1
     if self.fps_counter == max_value:
         t = time()
         f = (t - self.timer) / 20.
         if False:
             logger.warning('frame rendered in %f' % (f))
             logger.warning('fps %f' % (1. / f))
         self.timer = t
         self.fps_counter = 0
         self.fps = '%.2f' % (1. / f)
Beispiel #52
0
    def authorize_registry(cls, request):
        info = authorize.registry.parse_request(request)

        if not authorize.registry.ip_in_whitelist(info.client_ip):
            if not authorize.utils.is_valid_user(info.username, info.password):
                logger.warning("requests from %s, %s not valid" %
                               (info.username, info.password))
                return False, 'the username or password may not be correct.'
            if info.appname:
                succ, role = Group.get_user_role(info.username, info.appname)
                if not succ:
                    logger.warning("requests from %s for %s not valid" %
                                   (info.username, info.appname))
                    return False, 'the user has no access to %s' % info.appname

        return True, authorize.registry.get_jwt_with_request_info(info)
Beispiel #53
0
def set_defaults(opts):
    # If the synthetic data is passed together with the train_file we want to use the synthetic data
    if opts['generated_data'] and opts['train_file']:
        logger.warning(
            "generated-data flag passed, truning off the train_file flag")
        opts['train_file'] = None
        opts['test_file'] = None
    # If neither the synthetic data nor the train_file flags are passed we are going to proceed with synthetic data
    if not opts['generated_data'] and not opts['train_file']:
        raise ValueError(
            'Neither generated-data nor train-file was passed, set at least one of them to run the code.'
        )

    # In the case of synthetic-data training we set the duplication factor to 1
    if opts['generated_data']:
        opts['duplication_factor'] = 1
async def upload_log(log_filename,
                     uploader_path,
                     logs_path='/logs',
                     upload_timeout_sec=60):
    upload_command = (uploader_path, 'upload', log_filename, logs_path)
    if not os.path.isfile(log_filename):
        logger.warning(
            "File to upload {} could not be found".format(log_filename))
    else:
        try:
            await asyncio.wait_for(do_upload(upload_command),
                                   upload_timeout_sec)
            return True
        except asyncio.TimeoutError:
            logger.warning('Upload timeout!')
            return False
Beispiel #55
0
 def inCitys():
     try:
         with connection.cursor() as cursor:
             cursor.execute('SELECT city_name FROM City')
             li_citys = [x for y in cursor.fetchall()
                         for x in y]  #  all city list
             '''
             The ralation between renting house of city and province should filter with SQL not Python,So Just list all citys with right renting information.
             '''
             return li_citys
     except ProgrammingError as p:
         logger.error('ProgrammingError has detected: {0}'.format(p))
     except OperationalError as o:
         logger.error('OperationalError has detected: {0}'.format(o))
     except Warning as w:
         logger.warning('Warning has detected: {0}'.format(w))
Beispiel #56
0
def download(url, title):
    title = re.sub(
        u"([^\u4e00-\u9fa5\u0030-\u0039\u0041-"
        u"\u005a\u0061-\u007a])", " ", title)
    base_ts_path = './m3u8/'
    ts_path = base_ts_path + title + '/'
    # print(ts_path)
    # exit()
    save_path = f'./m3u8_video/{title}.mp4'

    m3u8_test = get_m3u8_test(url)
    ts_url_list = get_ts_url_list(m3u8_test, url)
    write_ts(ts_url_list, ts_path)

    download_m3u8_video(ts_path, save_path, title)
    logger.warning(f'{title}下载完成!')
Beispiel #57
0
 def try_lottery_using_cards(self, print_warning=True):
     if self.enable_cost_all_cards_and_do_lottery():
         if print_warning:
             logger.warning(
                 color("fg_bold_cyan") +
                 f"已开启抽卡活动({self.zzconfig.actid})消耗所有卡片来抽奖的功能,若尚未兑换完所有奖励,不建议开启这个功能"
             )
         card_counts = self.get_card_counts()
         for name, count in card_counts.items():
             self.lottery_using_cards(name, count)
     else:
         if print_warning:
             logger.warning(
                 color("fg_bold_cyan") +
                 f"尚未开启抽卡活动({self.zzconfig.actid})消耗所有卡片来抽奖的功能,建议所有礼包都兑换完成后开启该功能,从而充分利用卡片。"
             )
Beispiel #58
0
 def delete_group_member(cls, access_token, appname, username):
     try:
         response = authorize.utils.delete_group_member(
             access_token, appname, username)
         if response.status_code != 204:
             logger.warning("fail delete group member %s from app %s : %s" %
                            (username, appname, response.text))
             return False, "fail delete group member %s from app %s : %s" % (
                 username, appname, response.text)
         else:
             return True, "delete group member successfully"
     except Exception, e:
         logger.error('Exception delete group member %s from app %s: %s' %
                      (username, appname, str(e)))
         return False, "sso system wrong when deleting group member %s from app %s" % (
             username, appname)
Beispiel #59
0
def short_link(permalink):
    parsed = urlparse(permalink)
    parts = parsed.path.split("/")
    if len(parts) == 7:
        postid = parts[4]
        text = postid
        link = "{}/comments/{}/".format(BASE_URL, postid)
    elif len(parts) == 8:
        postid = parts[4]
        commentid = parts[6]
        text = postid + ":" + commentid
        link = "{}/comments/{}/_/{}/".format(BASE_URL, postid, commentid)
    else:
        logger.warning("unexpected permalink: " + permalink)
        text, link = permalink, permalink
    return text, link
Beispiel #60
0
 def ping():
     #logger.debug("ping for %s", self.id)
     now = ioloop.time()
     if self._expecting_pong:
         logger.warning("kernel %s died unexpectedly", self.id)
         self.stop()
     elif now > self.hard_deadline:
         logger.info("hard deadline reached for %s", self.id)
         self.stop()
     elif (self.timeout > 0 and now > self.deadline
           and self.status == "idle"):
         logger.info("kernel %s timed out", self.id)
         self.stop()
     else:
         hb.send(b'ping')
         self._expecting_pong = True