Example #1
0
def run_server(_,
               address=CONFIG.SERVER_ADDR,
               port=CONFIG.SERVER_PORT,
               log_level='INFO'):
    """ Launches 'WG Forge' TCP server.
    """
    log.setLevel(log_level)
    ThreadingTCPServer.allow_reuse_address = True
    server = ThreadingTCPServer((address, port), GameServerRequestHandler)
    log.info('Serving on {}'.format(server.socket.getsockname()))
    try:
        server.serve_forever()
    except KeyboardInterrupt:
        log.warn('Server stopped by keyboard interrupt, shutting down...')
    finally:
        try:
            GameServerRequestHandler.shutdown_all_sockets()
            Game.stop_all_games()
            if log.is_queued:
                log.stop()
        finally:
            server.shutdown()
            server.server_close()
Example #2
0
def main():
    conf = configure()
    log.setLevel(conf['log_level'])

    p = Process(target=run_agent,
                args=(conf['agent_interface'], conf['agent_port'],
                      conf['center_interface'], conf['center_port'])
                )
    p.start()

    print(conf)
    dir_names = conf['directories']
    spaces = []
    create_monitoring_db()

    for dir_name in dir_names:
        spaces.append(MonitoringDir(dir_name))

    while True:
        for space_obj in spaces:
            monitoringProc = Process(target=check_space, args=(space_obj,))
            monitoringProc.start()
        sleep(10)
import os
import subprocess
import logging
from tqdm import tqdm
import requests
# import tarfile
import shutil

from logger import log
from systemData import HOME

log.setLevel(logging.DEBUG)

HADOOP_NAME = 'hadoop-3.2.1'
HADOOP_ROOT = os.path.join(HOME, 'hadoop')
HADOOP_HOME = os.path.join(HADOOP_ROOT, HADOOP_NAME)


def bar_custom(current, total, width=80):
    print("Downloading: %d%% [%d / %d] bytes" %
          (current / total * 100, current, total))


def downloadHadoop(
    reDownload=True,
    filename='hadoop-3.2.1.tar.gz',
    url='http://apachemirror.wuchna.com/hadoop/common/stable/hadoop-3.2.1.tar.gz'
):

    log.debug("Executing Function downloadHadoop.")
Example #4
0
            elif req[0] == "sync":
                log.info("Sync reports with server started")
                self._do_sync()
                log.info("Sync reports with server finished")
            else:
                log.error(DEBUG_REPORTMGR, "reportmgr thread, unknown command %s" % req[0])


if __name__ == '__main__':
    """
    Unit test
    """
    import time
    import logging
    from PyQt5.Qt import QApplication
    
    log.setLevel(logging.DEBUG)
    app = createQApplication()

    localdb = util.openLocalDatabase2(":memory:")
    
    reportMgr = ReportMgr(localdb=localdb)
    reportMgr.init()
    reportMgr.sync()
    reportMgr.stop()

    while not reportMgr.toThreadQ.empty():
        QApplication.processEvents()
        time.sleep(0.5)
Example #5
0
import os
import subprocess
# from logger import log
import logging
from logger import log

log.setLevel(logging.INFO)


# Check for version is remaining
def checkJREVersion(output):
    version = output.split('"')[1]
    if 'OpenJDK' in output:
        log.debug(f"OpenJDK Runtime Environment Version : {version}")
        version = int(version.strip().split('.')[0])
        if version > 7 and version <= 11:
            return True
        log.error("JRE version is not supported.")
        return False

    log.debug(f"Java Version : {version}")
    version = int(version.strip().split('.')[1])
    if version > 7 and version <= 11:
        return True
    log.error("JRE version is not supported.")
    return False


def jreIsInstalled():
    log.info("Checking for JRE.")
    log.debug(
Example #6
0
import os.path
import sys
"""
日志使用方法:
from logger import log

log.info('your info')
log.debug('heyhey')

"""

LOG_LEVEL = logging.DEBUG  # 日志的输出级别,有 NOTSET, DEBUG, INFO, WARNING, ERROR, CRITICAL
LOG_FORMAT = '[%(asctime)s] %(funcName)s(%(filename)s:%(lineno)s) [%(levelname)s]:%(message)s'  # 日志的输出格式

log = logging.getLogger()
log.setLevel(LOG_LEVEL)


def create_log(log_file='/var/log/apnsagent.log'):
    global log

    formatter = logging.Formatter(LOG_FORMAT)
    filehandler = logging.FileHandler(log_file)
    filehandler.setFormatter(formatter)
    log.addHandler(filehandler)


create_log = create_log


def log_ex(msg=None):
Example #7
0
# coding=utf-8
import gevent
from gevent import monkey
monkey.patch_all()

import sys
import logging
import socket

from octopus.service import octp_client
from octopus.service.selector import round_selector, random_selector

from logger import log
log.setLevel('DEBUG')
log.addHandler(logging.StreamHandler(sys.stdout))


def main():
    while True:
        service = sel.get_service(5)
        print service

        if service:
            sock = socket.socket()
            try:
                sock.connect(tuple(service.addr.values()))
                sock.send('ping')
                print sock.recv(1024)
            except Exception as e:
                print e
                pass
Example #8
0
# coding=utf-8
import gevent
from gevent import monkey
monkey.patch_all()

import sys
import logging
import socket

from service import octp_client
from service.selector import round_selector, random_selector

from logger import log
log.setLevel('DEBUG')
log.addHandler(logging.StreamHandler(sys.stdout))


def main():
    while True:
        service = sel.get_service(5)
        print service

        if service:
            sock = socket.socket()
            try:
                sock.connect(tuple(service.addr.values()))
                sock.send('ping')
                print sock.recv(1024)
            except Exception as e:
                print e
                pass
def main() -> None:
    """Main function ran when the script is called directly"""
    # Determine whether we're running in a container or by a user
    args = EnvironmentArgs() if os.getenv("CGX_TOKEN", "") else parse_args()
    if args.verbose:
        log.setLevel("DEBUG")
        log.debug("Log level has been overriden by the --verbose argument.")
    # Initialize the CloudGenix handler
    cgx = CloudGenixHandler(token=args.cloudgenix_token)
    # Collect all sites and filter to spokes
    sites = cgx.get_sites()
    sites = [
        {"name": s["name"], "id": s["id"], "tags": s["tags"]}
        for s in sites if s["element_cluster_role"] == "SPOKE"
        ]
    # Filter on tag if specified
    if args.tag:
        log.info(
            "Filtering CloudGenix sites to those containing tag '%s'.", args.tag
            )
        # Sites with no tags return None so make the map conditional
        sites = [
            s for s in sites if args.tag.casefold() in
            (map(lambda x: x.casefold(), s["tags"]) if s["tags"] else [])
            ]
    log.info("Filtered to %s CloudGenix spoke sites.", len(sites))
    for site in sites:
        log.info(
            "Retrieving WAN interfaces for site %s (ID: %s).",
            site["name"], site["id"]
            )
        # Collect all site WAN interfaces
        wan_ints = cgx.get_wan_ints(site["id"])
        for wan_int in wan_ints:
            # Filter out WAN interfacess containing the ignore tag
            if wan_int["tags"]:
                if "auto_bw:false" in \
                        map(lambda x: x.casefold(), wan_int["tags"]):
                    log.info(
                        "%s WAN interface %s (ID: %s) contains 'auto_bw:false' "
                        "tag. Skipping.",
                        site["name"], wan_int["name"], wan_int["id"]
                        )
                    continue
            log.info("Retrieving PCM metrics for %s WAN interface %s (ID: %s).",
            site["name"], wan_int["name"], wan_int["id"]
            )
            # Query CloudGenix for WAN interface PCM metrics
            metrics_query = format_wan_metrics_query(
                site["id"], wan_int["id"], hours=args.hours)
            metrics_wan = cgx.get_wan_metrics(metrics_query)
            metrics_calced = calc_wan_int_capacity(metrics_wan, args.percentile)
            log.info(
                "Site %s WAN interface %s %s-hour download bandwidth capacity "
                "(%sth) is %sMbps.",
                site["name"], wan_int["name"], args.hours,
                args.percentile, metrics_calced["ingress_mbps"]
                )
            log.info(
                "Site %s WAN interface %s %s-hour upload bandwidth capacity "
                "(%sth) is %sMbps.",
                site["name"], wan_int["name"], args.hours,
                args.percentile, metrics_calced["egress_mbps"]
                )
            # Apply the updated bandwidth calculation
            if not (metrics_calced["ingress_mbps"]
                    or metrics_calced["egress_mbps"]):
                log.info(
                    "Site %s WAN interface %s has no usable metrics. Skipping.",
                    site["name"], wan_int["name"]
                    )
                continue
            # Set bandwidth ceiling if one provided
            if args.max:
                for k, v in metrics_calced.items():
                    metrics_calced[k] = args.max if v > args.max else v
            # Update link bandwidth values
            log.info(
                "Updating site %s WAN interface %s bandwidth capacity "
                "(%sMbps down / %sMbps up).",
                site["name"], wan_int["name"], metrics_calced["ingress_mbps"],
                metrics_calced["egress_mbps"]
                )
            wan_int["link_bw_down"] = metrics_calced["ingress_mbps"]
            wan_int["link_bw_up"] = metrics_calced["egress_mbps"]
            resp = cgx.put_wan_int(site["id"], wan_int["id"], wan_int)
            log.info(
                "CloudGenix API response status %s. Reason: %s",
                resp.status_code, resp.reason
                )
Example #10
0
import sys

"""
日志使用方法:
from logger import log

log.info('your info')
log.debug('heyhey')

"""

LOG_LEVEL = logging.DEBUG # 日志的输出级别,有 NOTSET, DEBUG, INFO, WARNING, ERROR, CRITICAL
LOG_FORMAT = '[%(asctime)s] %(funcName)s(%(filename)s:%(lineno)s) [%(levelname)s]:%(message)s' # 日志的输出格式

log = logging.getLogger()
log.setLevel(LOG_LEVEL)

def create_log(log_file='/var/log/apnsagent.log'):
    global log
    
    formatter = logging.Formatter(LOG_FORMAT)
    filehandler = logging.FileHandler(log_file)
    filehandler.setFormatter(formatter)
    log.addHandler(filehandler)

create_log = create_log

def log_ex(msg=None):
    if msg:
        log.error(msg)
    excinfo = sys.exc_info()
        synch_times.append(average_convergence_time * dt)

    #### Plot the convergence
    axis = couple_strength
    fig_conv = plt.figure()
    fig_conv = plt.gcf()
    fig_conv.canvas.set_window_title('Convergence time vs. Coupling Strength')
    plt.plot(axis, synch_times, 'r')  # plotting t,a separately
    plt.show()


#########################
#         DRIVER        #
#########################
if __name__ == '__main__':
    log.setLevel('INFO')  # Set the logging level
    # Report basic system info
    log.out.info("Python version: " + sys.version)
    CPUCOUNT = mp.cpu_count()
    log.out.info("Number of CPUs: " + str(CPUCOUNT))

    # Choose the demo

    # Couple weakly in two dimensions
    plotSynch(steps=16000,
              dt=0.001,
              kx=0.01,
              ky=0.01,
              kz=0.00,
              init_timesteps=2000,
              synch_max=4000)