示例#1
0
    def __init__ (self, device_server, stream, address):
        self.recv_msg_cond = Condition()
        self.recv_msg = {}
        self.send_msg_sem = Semaphore(1)
        self.pending_request_cnt = 0
        self.device_server = device_server
        self.stream = stream
        self.address = address
        self.stream.set_nodelay(True)
        self.idle_time = 0;
        self.killed = False
        self.sn = ""
        self.private_key = ""
        self.node_id = 0
        self.name = ""
        self.iv = None
        self.cipher = None

        #self.state_waiters = []
        #self.state_happened = []

        self.event_waiters = []
        self.event_happened = []

        self.ota_ing = False
        self.ota_notify_done_future = None
        self.post_ota = False
        self.online_status = True
示例#2
0
    def __init__ (self, device_server, stream, address, conn_pool):
        self.fw_version = 0.0
        self.recv_msg_cond = Condition()
        self.recv_msg = {}
        self.send_msg_sem = Semaphore(1)
        self.pending_request_cnt = 0
        self.device_server = device_server
        self.device_server_conn_pool = conn_pool
        self.stream = stream
        self.address = address
        self.stream.set_nodelay(True)
        self.stream.set_close_callback(self.on_close)
        self.timeout_handler_onlinecheck = None
        self.timeout_handler_offline = None
        self.killed = False
        self.is_junk = False
        self.sn = ""
        self.private_key = ""
        self.node_id = ""
        self.user_id = ""
        self.iv = None
        self.cipher_down = None
        self.cipher_up = None

        self.event_waiters = []
        self.event_happened = []

        self.ota_ing = False
        self.ota_notify_done_future = None
        self.post_ota = False
        self.online_status = True
示例#3
0
def setup_handler(
    pairs_path,
    nonpairs_path,
    lang_names,
    missing_freqs_path,
    timeout,
    max_pipes_per_pair,
    min_pipes_per_pair,
    max_users_per_pipe,
    max_idle_secs,
    restart_pipe_after,
    max_doc_pipes,
    verbosity=0,
    scale_mt_logs=False,
    memory=1000,
    apy_keys=None,
):

    global missing_freqs_db
    if missing_freqs_path:
        missing_freqs_db = missingdb.MissingDb(missing_freqs_path, memory)

    handler = BaseHandler
    handler.lang_names = lang_names
    handler.timeout = timeout
    handler.max_pipes_per_pair = max_pipes_per_pair
    handler.min_pipes_per_pair = min_pipes_per_pair
    handler.max_users_per_pipe = max_users_per_pipe
    handler.max_idle_secs = max_idle_secs
    handler.restart_pipe_after = restart_pipe_after
    handler.scale_mt_logs = scale_mt_logs
    handler.verbosity = verbosity
    handler.doc_pipe_sem = Semaphore(max_doc_pipes)
    handler.api_keys_conf = apy_keys

    modes = search_path(pairs_path, verbosity=verbosity)
    if nonpairs_path:
        src_modes = search_path(nonpairs_path,
                                include_pairs=False,
                                verbosity=verbosity)
        for mtype in modes:
            modes[mtype] += src_modes[mtype]

    for mtype in modes:
        logging.info('%d %s modes found', len(modes[mtype]), mtype)

    for path, lang_src, lang_trg in modes['pair']:
        handler.pairs['%s-%s' % (lang_src, lang_trg)] = path
    for dirpath, modename, lang_pair in modes['analyzer']:
        handler.analyzers[lang_pair] = (dirpath, modename)
    for dirpath, modename, lang_pair in modes['generator']:
        handler.generators[lang_pair] = (dirpath, modename)
    for dirpath, modename, lang_pair in modes['tagger']:
        handler.taggers[lang_pair] = (dirpath, modename)
    for dirpath, modename, lang_src in modes['spell']:
        if (any(lang_src == elem[2] for elem in modes['tokenise'])):
            handler.spellers[lang_src] = (dirpath, modename)

    handler.init_pairs_graph()
    handler.init_paths()
示例#4
0
    def __init__(self, pubnub_instance):

        subscription_manager = self

        self._message_queue = Queue()
        self._consumer_event = Event()
        self._cancellation_event = Event()
        self._subscription_lock = Semaphore(1)
        # self._current_request_key_object = None
        self._heartbeat_periodic_callback = None
        self._reconnection_manager = TornadoReconnectionManager(pubnub_instance)

        super(TornadoSubscriptionManager, self).__init__(pubnub_instance)
        self._start_worker()

        class TornadoReconnectionCallback(ReconnectionCallback):
            def on_reconnect(self):
                subscription_manager.reconnect()

                pn_status = PNStatus()
                pn_status.category = PNStatusCategory.PNReconnectedCategory
                pn_status.error = False

                subscription_manager._subscription_status_announced = True
                subscription_manager._listener_manager.announce_status(pn_status)

        self._reconnection_listener = TornadoReconnectionCallback()
        self._reconnection_manager.set_reconnection_listener(self._reconnection_listener)
示例#5
0
    def __init__(self, client_id, client_secret, api_key,
            api_uri=HAD_API_URI, auth_uri=HAD_AUTH_URI,
            token_uri=HAD_TOKEN_URI, rqlim_time=RQLIM_TIME,
            client=None, log=None, io_loop=None):

        if log is None:
            log = extdlog.getLogger(self.__class__.__module__)

        if io_loop is None:
            io_loop = IOLoop.current()

        if client is None:
            client = AsyncHTTPClient()

        self._client = client
        self._io_loop = io_loop
        self._log = log
        self._client_id = client_id
        self._client_secret = client_secret
        self._api_key = api_key
        self._api_uri = api_uri
        self._auth_uri = auth_uri
        self._token_uri = token_uri

        # Timestamps of last rqlim_num requests
        self._last_rq = 0.0
        self._rqlim_time = rqlim_time

        # Semaphore to limit concurrent access
        self._rq_sem = Semaphore(1)

        # If None, then no "forbidden" status is current.
        # Otherwise, this stores when the "forbidden" flag expires.
        self._forbidden_expiry = None
示例#6
0
        def __init__(self,
                     max_concurrent_batches=10,
                     block_on_send=False,
                     block_on_response=False,
                     max_batch_size=100,
                     send_frequency=timedelta(seconds=0.25),
                     user_agent_addition=''):
            if not has_tornado:
                raise ImportError(
                    'TornadoTransmission requires tornado, but it was not found.'
                )

            self.block_on_send = block_on_send
            self.block_on_response = block_on_response
            self.max_batch_size = max_batch_size
            self.send_frequency = send_frequency

            user_agent = "libhoney-py/" + VERSION
            if user_agent_addition:
                user_agent += " " + user_agent_addition

            self.http_client = AsyncHTTPClient(
                force_instance=True, defaults=dict(user_agent=user_agent))

            # libhoney adds events to the pending queue for us to send
            self.pending = Queue(maxsize=1000)
            # we hand back responses from the API on the responses queue
            self.responses = Queue(maxsize=2000)

            self.batch_data = {}
            self.sd = statsd.StatsClient(prefix="libhoney")
            self.batch_sem = Semaphore(max_concurrent_batches)
示例#7
0
文件: ipc.py 项目: songfj/salt
 def __singleton_init__(self, socket_path, io_loop=None):
     super(IPCMessageSubscriber, self).__singleton_init__(socket_path,
                                                          io_loop=io_loop)
     self._read_sync_future = None
     self._read_stream_future = None
     self._sync_ioloop_running = False
     self.saved_data = []
     self._sync_read_in_progress = Semaphore()
示例#8
0
    def __init__(self, db_uri, project_id, admin_uid, client_id, client_secret,
                 api_key, api_rq_interval, domain, secure, static_uri,
                 static_path, thread_count, crawler_config, **kwargs):
        self._log = extdlog.getLogger(self.__class__.__name__)
        # Database connection
        self._db = Database(db_uri, log=self._log.getChild('db'))
        # Session management connection
        self._pool = WorkerPool(thread_count)
        self._hasher = ImageHasher(self._log.getChild('hasher'), self._pool)
        AsyncHTTPClient.configure(
            None,
            defaults=dict(
                user_agent=
                "HADSH/0.0.1 (https://hackaday.io/project/29161-hackadayio-spambot-hunter-project)"
            ))
        self._api = HackadayAPI(client_id=client_id,
                                client_secret=client_secret,
                                api_key=api_key,
                                rqlim_time=api_rq_interval,
                                client=AsyncHTTPClient(),
                                log=self._log.getChild('api'))
        self._crawler = Crawler(project_id,
                                admin_uid,
                                self._db,
                                self._api,
                                self._hasher,
                                self._log.getChild('crawler'),
                                config=crawler_config)
        self._resizer = ImageResizer(self._log.getChild('resizer'), self._pool)
        self._domain = domain
        self._secure = secure
        self._classify_sem = Semaphore(1)

        self._crypt_context = CryptContext(['argon2', 'scrypt', 'bcrypt'])

        # Initialise traits
        init_traits(self, self._log.getChild('trait'))

        super(HADSHApp, self).__init__([
            (r"/", RootHandler),
            (r"/login", LoginHandler),
            (r"/avatar/([0-9]+)", AvatarHandler),
            (r"/avatar/(average_hash|dhash|phash|whash|sha512)/([0-9]+)", \
                    AvatarHashHandler),
            (r"/user/([0-9]+)", UserHandler),
            (r"/callback", CallbackHandler),
            (r"/classify/([0-9]+)", ClassifyHandler),
            (r"/data/newcomers.json", NewcomerDataHandler),
            (r"/data/legit.json", LegitUserDataHandler),
            (r"/data/suspect.json", SuspectUserDataHandler),
            (r"/data/admin.json", AdminUserDataHandler),
            (r"/authorize", RedirectHandler, {
                "url": self._api.auth_uri
            }),
        ],
        static_url_prefix=static_uri,
        static_path=static_path,
        **kwargs)
 def __init__(self, workers=None, io_loop=None):
     if workers is None:
         workers = cpu_count()
     if io_loop is None:
         io_loop = IOLoop.current()
     self._io_loop = io_loop
     self._sem = Semaphore(workers)
     self._queue = Queue()
     self._active = False
示例#10
0
 def __init__(self, pubnub_instance):
     self._message_queue = Queue()
     self._consumer_event = Event()
     self._subscription_lock = Semaphore(1)
     # self._current_request_key_object = None
     self._heartbeat_periodic_callback = None
     self._cancellation_event = None
     super(TornadoSubscriptionManager, self).__init__(pubnub_instance)
     self._start_worker()
    def __init__(self, trunk_id, name, max_lines, tag, direction):
	self.id        = trunk_id
	self.name      = name
	self.max_lines = trun
	self.tag       = tag
	self.direction = direction
	self.lines     = { l: {} for l in range(0, max_lines) }
	self.semaphore = Semaphore(1)
	self.callers   = {}
	self.counters  = 
示例#12
0
 def __init__(self, start_link=None):
     self._init_defaults()
     # Now load the config file to override defaults
     self._load_config()
     
     if start_link:
         self.start_link = start_link
     if not self.start_link:
         raise SystemExit("No start link is provided, exiting now...")
     links.put(self.start_link)
     self.semaphore = Semaphore(self.workers_limit)
示例#13
0
def extract_proxies_async(requests_proxies):
    """
    :rtype: {'http://123.169.238.33:8888', ...}
    """

    SEMA = Semaphore(CONCURRENT_NUM)

    @gen.coroutine
    def worker(instance, idx, item, requests_proxies):
        with (yield SEMA.acquire()):
            proxies = instance.parse_proxies_async(item)
            yield instance.async_http(proxies, idx, requests_proxies)

    instance = ExtractProxies.instance()
    raw_proxies = instance.get_proxies()
    if raw_proxies is not None:
        yield [worker(instance, idx, item, requests_proxies) \
            for idx, item in enumerate(raw_proxies)]
示例#14
0
    def __init__(self, prespawn_count, kernel_manager):
        # Make sure there's at least one kernel as a delegate
        if not prespawn_count:
            prespawn_count = 1

        super(ManagedKernelPool, self).__init__(prespawn_count, kernel_manager)

        self.kernel_clients = {}
        self.on_recv_funcs = {}
        self.kernel_pool = []

        kernel_ids = self.kernel_manager.list_kernel_ids()
        self.kernel_semaphore = Semaphore(len(kernel_ids))

        # Create clients and iopub handlers for prespawned kernels
        for kernel_id in kernel_ids:
            self.kernel_clients[kernel_id] = kernel_manager.get_kernel(
                kernel_id).client()
            self.kernel_pool.append(kernel_id)
            iopub = self.kernel_manager.connect_iopub(kernel_id)
            iopub.on_recv(self.create_on_reply(kernel_id))
示例#15
0
 def __init__(self, method='ascii', **kwargs):
     super(AsyncModbusGeneratorClient, self).__init__(method=method,
                                                      **kwargs)
     self.sem = Semaphore(1)
示例#16
0
#!urs/bin/env python
#coding:utf-8


# Semaphore:一个信号量管理着代表release调用次数减去acquire的调用次数的计数器加一个
# 初始值。如果必要的话,acquire方法将会阻塞,直到它可以返回,而不使该计数器成为负值。
# 信号量限制访问共享资源,为了允许两个worker同时获得权限,代码如下。


from tornado import gen
from tornado.ioloop import IOLoop
from tornado.locks import Semaphore
sem = Semaphore(2)


@gen.coroutine
def worker(worker_id):
    with (yield sem.acquire()): # acquire()是一个上下文管理器
        print("Worker %d is working" % worker_id)
    print("Worker %d is done" % worker_id)
    sem.release()
    
#     yield sem.acquire()
#     try:
#         print('Worker %d is working' % worker_id)
# #         yield use_some_resource()
#     finally:
#         print('Worker %d is done' % worker_id)
#         sem.release()
        
@gen.coroutine
示例#17
0
class BaseHandler(tornado.web.RequestHandler):
    pairs = {}  # type: Dict[str, str]
    analyzers = {}  # type: Dict[str, Tuple[str, str]]
    generators = {}  # type: Dict[str, Tuple[str, str]]
    taggers = {}  # type: Dict[str, Tuple[str, str]]
    spellers = {}  # type: Dict[str, Tuple[str, str]]
    # (l1, l2): [translation.Pipeline], only contains flushing pairs!
    pipelines = {}  # type: Dict[str, List]
    pipelines_holding = []  # type: List
    callback = None
    timeout = None
    scale_mt_logs = False
    verbosity = 0
    api_keys_conf = None

    # dict representing a graph of translation pairs; keys are source languages
    # e.g. pairs_graph['eng'] = ['fra', 'spa']
    pairs_graph = {}  # type: Dict[str, List[str]]
    # 2-D dict storing the shortest path for a chained translation pair
    # keys are source and target languages
    # e.g. paths['eng']['fra'] = ['eng', 'spa', 'fra']
    paths = {}  # type: Dict[str, Dict[str, List[str]]]

    stats = {
        'startdate': datetime.now(),
        'useCount': {},
        'vmsize': 0,
        'timing': [],
    }

    # (l1, l2): translation.ParsedModes
    pipeline_cmds = {}  # type: Dict
    max_pipes_per_pair = 1
    min_pipes_per_pair = 0
    max_users_per_pipe = 5
    max_idle_secs = 0
    restart_pipe_after = 1000
    doc_pipe_sem = Semaphore(3)
    # Empty the url_cache[pair] when it's this full:
    max_inmemory_url_cache = 1000  # type: int
    url_cache = {}  # type: Dict[Tuple[str, str], Dict[str, str]]
    url_cache_path = None  # type: Optional[str]
    # Keep half a gig free when storing url_cache to disk:
    min_free_space_disk_url_cache = 512 * 1024 * 1024  # type: int

    def initialize(self):
        self.callback = self.get_argument('callback', default=None)

    @classmethod
    def init_pairs_graph(cls):
        for pair in cls.pairs:
            lang1, lang2 = pair.split('-')
            if lang1 in cls.pairs_graph:
                cls.pairs_graph[lang1].append(lang2)
            else:
                cls.pairs_graph[lang1] = [lang2]

    @classmethod
    def calculate_paths(cls, start):
        nodes = set()
        for pair in map(lambda x: x.split('-'), cls.pairs):
            nodes.add(pair[0])
            nodes.add(pair[1])
        dists = {}
        prevs = {}
        dists[start] = 0

        while nodes:
            u = min(nodes, key=lambda u: dists.get(u, sys.maxsize))
            nodes.remove(u)
            for v in cls.pairs_graph.get(u, []):
                if v in nodes:
                    other = dists.get(
                        u, sys.maxsize
                    ) + 1  # TODO: weight(u, v) -- lower weight = better translation
                    if other < dists.get(v, sys.maxsize):
                        dists[v] = other
                        prevs[v] = u

        cls.paths[start] = {}
        for u in prevs:
            prev = prevs[u]
            path = [u]
            while prev:
                path.append(prev)
                prev = prevs.get(prev)
            cls.paths[start][u] = list(reversed(path))

    @classmethod
    def init_paths(cls):
        for lang in cls.pairs_graph:
            cls.calculate_paths(lang)

    def log_vmsize(self):
        if self.verbosity < 1:
            return
        scale = {'kB': 1024, 'mB': 1048576, 'KB': 1024, 'MB': 1048576}
        try:
            for line in open('/proc/%d/status' % os.getpid()):
                if line.startswith('VmSize:'):
                    _, num, unit = line.split()
                    break
            vmsize = int(num) * scale[unit]
            if vmsize > self.stats['vmsize']:
                logging.warning('VmSize of %s from %d to %d', os.getpid(),
                                self.stats['vmsize'], vmsize)
                self.stats['vmsize'] = vmsize
        except Exception as e:
            # Don't fail just because we couldn't log:
            logging.info('Exception in log_vmsize: %s', e)

    def send_response(self, data):
        self.log_vmsize()
        if isinstance(data, dict) or isinstance(data, list):
            data = dump_json(data)
            self.set_header('Content-Type', 'application/json; charset=UTF-8')

        if self.callback:
            self.set_header('Content-Type',
                            'application/javascript; charset=UTF-8')
            self._write_buffer.append(utf8('%s(%s)' % (self.callback, data)))
        else:
            self._write_buffer.append(utf8(data))
        self.finish()

    def write_error(self, status_code, **kwargs):
        http_explanations = {
            400:
            'Request not properly formatted or contains languages that Apertium APy does not support',
            404:
            'Resource requested does not exist. URL may have been mistyped',
            408:
            'Server did not receive a complete request within the time it was prepared to wait. Try again',
            500:
            'Unexpected condition on server. Request could not be fulfilled.',
        }
        explanation = kwargs.get('explanation',
                                 http_explanations.get(status_code, ''))
        if 'exc_info' in kwargs and len(kwargs['exc_info']) > 1:
            exception = kwargs['exc_info'][1]
            if hasattr(exception, 'log_message') and exception.log_message:
                explanation = exception.log_message % exception.args
            elif hasattr(exception, 'reason'):
                explanation = exception.reason or tornado.httputil.responses.get(
                    status_code, 'Unknown')
            else:
                explanation = tornado.httputil.responses.get(
                    status_code, 'Unknown')

        result = {
            'status': 'error',
            'code': status_code,
            'message': tornado.httputil.responses.get(status_code, 'Unknown'),
            'explanation': explanation,
        }

        data = dump_json(result)
        self.set_header('Content-Type', 'application/json; charset=UTF-8')

        if self.callback:
            self.set_header('Content-Type',
                            'application/javascript; charset=UTF-8')
            self._write_buffer.append(utf8('%s(%s)' % (self.callback, data)))
        else:
            self._write_buffer.append(utf8(data))
        self.finish()

    def set_default_headers(self):
        self.set_header('Access-Control-Allow-Origin', '*')
        self.set_header('Access-Control-Allow-Methods', 'GET,POST,OPTIONS')
        self.set_header(
            'Access-Control-Allow-Headers',
            'accept, cache-control, origin, x-requested-with, x-file-name, content-type'
        )

    @tornado.web.asynchronous
    def post(self):
        self.get()

    def options(self):
        self.set_status(204)
        self.finish()
示例#18
0
def _read_trunks():
    global trunks
    global trunks_groups
    if trunks and trunks_groups:
        raise gen.Return(trunks)

    trunks = OrderedDict()
    trunks_groups = OrderedDict()
    try:
        db = pg2.connect(
            'host=192.168.222.20 port=5432 dbname=freeswitch_trunks user=freeswitch password=freeswitch'
        )
        c = db.cursor()
        SELECT = 'select tr.name as group_trunks, trl.phone as trunk_name, o.name as operator_name, o.icon_name, ' \
        'trl.in_trunk_position, c.id as channel_id, trl.direction, d.id as device_id, ' \
        'c.lines as max_lines, c.sip_gateway_name, d.address, c.port, dcl.name as device_class, tr.screen_position, ' \
        'trl.trunk_id ' \
        'from trunk_lines trl ' \
        'left join operators o on (o.id=trl.operator_id) ' \
        'left join trunks tr on (tr.id=trl.trunk_id) ' \
        'left join channels c on (c.id=trl.channel_id) ' \
        'left join devices d on (d.id=c.device_id) ' \
        'left join device_classes dcl on (dcl.id=d.class_id) ' \
        'where (trl.channel_id > 0) and (c.is_active) ' \
        'order by tr.screen_position, group_trunks, trl.in_trunk_position '
        c.execute(SELECT)
        for r in c.fetchall():
            trunk_name = r[9] if r[
                9] else '0' + r[1] if r[1][:2] != '23' else r[1]
            # trunk_name = r[9] if r[9] else '0'+r[1]

            if not trunk_name in trunks:
                trunks[trunk_name] = OrderedDict()
            if not r[0] in trunks_groups:
                trunks_groups[r[0]] = []
            trunks_groups[r[0]].append(trunk_name)
            trunks[trunk_name].update(
                callers=dict(),  # Обрабатываемы(е/й) звонок(и)
                max_lines=r[8],  # channels.lines
                operator_logo='static/img/%s.png' %
                r[3],  # operators.icon_name
                channel_id=r[5],  # channels.id
                counters={
                    'answered': 0,
                    'total': 0,
                    'rejected': 0
                },  # Счётчики
                direction=r[
                    6],  # trunk_lines.directions ['inbound', 'outbound', 'sms']
                channel='%s:%s' %
                (r[10], r[11]),  # device.address, channel.port
                group=r[0],  # trunks.name
                semaphore=Semaphore(1),  # Блокировка транка
                phone=r[1],  # Номер телефона на линии
                device_id=r[7],
                screen_position=r[13],
                trunk_id=r[14],
            )
            print(trunk_name)
        c.close()
        db.close()
    except Exception as e:
        print('ChannelHandler._read_trunks исключение: %s' % e)
    # print ('ChannelHandler._read_trunks данные: %s' % trunks)
    raise gen.Return(trunks)
示例#19
0
import threading, psycopg2 as pg2
import datetime
from tornado.locks import Semaphore
from tornado import gen
from collections import OrderedDict
trunks = OrderedDict()
trunks_groups = OrderedDict()
trunks_semaphore = Semaphore(1)


@gen.coroutine
def _read_trunks():
    global trunks
    global trunks_groups
    if trunks and trunks_groups:
        raise gen.Return(trunks)

    trunks = OrderedDict()
    trunks_groups = OrderedDict()
    try:
        db = pg2.connect(
            'host=192.168.222.20 port=5432 dbname=freeswitch_trunks user=freeswitch password=freeswitch'
        )
        c = db.cursor()
        SELECT = 'select tr.name as group_trunks, trl.phone as trunk_name, o.name as operator_name, o.icon_name, ' \
        'trl.in_trunk_position, c.id as channel_id, trl.direction, d.id as device_id, ' \
        'c.lines as max_lines, c.sip_gateway_name, d.address, c.port, dcl.name as device_class, tr.screen_position, ' \
        'trl.trunk_id ' \
        'from trunk_lines trl ' \
        'left join operators o on (o.id=trl.operator_id) ' \
        'left join trunks tr on (tr.id=trl.trunk_id) ' \
示例#20
0
def cull_idle(url,
              api_token,
              inactive_limit,
              cull_users=False,
              max_age=0,
              concurrency=10):
    """Shutdown idle single-user servers

    If cull_users, inactive *users* will be deleted as well.
    """
    auth_header = {
        'Authorization': 'token %s' % api_token,
    }
    req = HTTPRequest(
        url=url + '/users',
        headers=auth_header,
    )
    now = datetime.now(timezone.utc)
    client = AsyncHTTPClient()

    if concurrency:
        semaphore = Semaphore(concurrency)

        @coroutine
        def fetch(req):
            """client.fetch wrapped in a semaphore to limit concurrency"""
            yield semaphore.acquire()
            try:
                return (yield client.fetch(req))
            finally:
                yield semaphore.release()
    else:
        fetch = client.fetch

    resp = yield fetch(req)
    users = json.loads(resp.body.decode('utf8', 'replace'))
    futures = []

    @coroutine
    def handle_server(user, server_name, server):
        """Handle (maybe) culling a single server

        Returns True if server was culled,
        False otherwise.
        """
        log_name = user['name']
        if server_name:
            log_name = '%s/%s' % (user['name'], server_name)
        if server.get('pending'):
            app_log.warning("Not culling server %s with pending %s", log_name,
                            server['pending'])
            return False

        if server.get('started'):
            age = now - parse_date(server['started'])
        else:
            # started may be undefined on jupyterhub < 0.9
            age = 'unknown'

        # check last activity
        # last_activity can be None in 0.9
        if server['last_activity']:
            inactive = now - parse_date(server['last_activity'])
        else:
            # no activity yet, use start date
            # last_activity may be None with jupyterhub 0.9,
            # which introduces the 'started' field which is never None
            # for running servers
            inactive = age

        should_cull = inactive.total_seconds() >= inactive_limit
        if should_cull:
            app_log.info("Culling server %s (inactive for %s)", log_name,
                         format_td(inactive))

        if max_age and not should_cull:
            # only check started if max_age is specified
            # so that we can still be compatible with jupyterhub 0.8
            # which doesn't define the 'started' field
            if age.total_seconds() >= max_age:
                app_log.info("Culling server %s (age: %s, inactive for %s)",
                             log_name, format_td(age), format_td(inactive))
                should_cull = True

        if not should_cull:
            app_log.debug("Not culling server %s (age: %s, inactive for %s)",
                          log_name, format_td(age), format_td(inactive))
            return False

        req = HTTPRequest(
            url=url + '/users/%s/server' % quote(user['name']),
            method='DELETE',
            headers=auth_header,
        )
        resp = yield fetch(req)
        if resp.code == 202:
            app_log.warning(
                "Server %s is slow to stop",
                log_name,
            )
            # return False to prevent culling user with pending shutdowns
            return False
        return True

    @coroutine
    def handle_user(user):
        """Handle one user"""
        # shutdown servers first.
        # Hub doesn't allow deleting users with running servers.
        servers = user.get(
            'servers', {
                '': {
                    'started': user.get('started'),
                    'last_activity': user['last_activity'],
                    'pending': user['pending'],
                }
            })
        server_futures = [
            handle_server(user, server_name, server)
            for server_name, server in servers.items()
        ]
        results = yield multi(server_futures)
        if not cull_users:
            return
        # some servers are still running, cannot cull users
        still_alive = len(results) - sum(results)
        if still_alive:
            app_log.debug("Not culling user %s with %i servers still alive",
                          user['name'], still_alive)
            return False

        should_cull = False
        if user.get('created'):
            age = now - parse_date(user['created'])
        else:
            # created may be undefined on jupyterhub < 0.9
            age = 'unknown'

        # check last activity
        # last_activity can be None in 0.9
        if user['last_activity']:
            inactive = now - parse_date(user['last_activity'])
        else:
            # no activity yet, use start date
            # last_activity may be None with jupyterhub 0.9,
            # which introduces the 'created' field which is never None
            inactive = age

        should_cull = inactive.total_seconds() >= inactive_limit
        if should_cull:
            app_log.info("Culling user %s (inactive for %s)", user['name'],
                         inactive)

        if max_age and not should_cull:
            # only check created if max_age is specified
            # so that we can still be compatible with jupyterhub 0.8
            # which doesn't define the 'started' field
            if age.total_seconds() >= max_age:
                app_log.info("Culling user %s (age: %s, inactive for %s)",
                             user['name'], format_td(age), format_td(inactive))
                should_cull = True

        if not should_cull:
            app_log.debug("Not culling user %s (created: %s, last active: %s)",
                          user['name'], format_td(age), format_td(inactive))
            return False

        req = HTTPRequest(
            url=url + '/users/%s' % user['name'],
            method='DELETE',
            headers=auth_header,
        )
        yield fetch(req)
        return True

    for user in users:
        futures.append((user['name'], handle_user(user)))

    for (name, f) in futures:
        try:
            result = yield f
        except Exception:
            app_log.exception("Error processing %s", name)
        else:
            if result:
                app_log.debug("Finished culling %s", name)
示例#21
0
def cull_idle(url,
              api_token,
              inactive_limit,
              cull_users=False,
              max_age=0,
              concurrency=10):
    """Shutdown idle single-user servers

    If cull_users, inactive *users* will be deleted as well.
    """
    auth_header = {'Authorization': 'token %s' % api_token}
    req = HTTPRequest(url=url + '/users', headers=auth_header)
    now = datetime.now(timezone.utc)
    client = AsyncHTTPClient()

    if concurrency:
        semaphore = Semaphore(concurrency)

        @coroutine
        def fetch(req):
            """client.fetch wrapped in a semaphore to limit concurrency"""
            yield semaphore.acquire()
            try:
                return (yield client.fetch(req))
            finally:
                yield semaphore.release()

    else:
        fetch = client.fetch

    resp = yield fetch(req)
    users = json.loads(resp.body.decode('utf8', 'replace'))
    futures = []

    @coroutine
    def handle_server(user, server_name, server):
        """Handle (maybe) culling a single server

        Returns True if server is now stopped (user removable),
        False otherwise.
        """
        log_name = user['name']
        if server_name:
            log_name = '%s/%s' % (user['name'], server_name)
        if server.get('pending'):
            app_log.warning("Not culling server %s with pending %s", log_name,
                            server['pending'])
            return False

        # jupyterhub < 0.9 defined 'server.url' once the server was ready
        # as an *implicit* signal that the server was ready.
        # 0.9 adds a dedicated, explicit 'ready' field.
        # By current (0.9) definitions, servers that have no pending
        # events and are not ready shouldn't be in the model,
        # but let's check just to be safe.

        if not server.get('ready', bool(server['url'])):
            app_log.warning("Not culling not-ready not-pending server %s: %s",
                            log_name, server)
            return False

        if server.get('started'):
            age = now - parse_date(server['started'])
        else:
            # started may be undefined on jupyterhub < 0.9
            age = None

        # check last activity
        # last_activity can be None in 0.9
        if server['last_activity']:
            inactive = now - parse_date(server['last_activity'])
        else:
            # no activity yet, use start date
            # last_activity may be None with jupyterhub 0.9,
            # which introduces the 'started' field which is never None
            # for running servers
            inactive = age

        should_cull = (inactive is not None
                       and inactive.total_seconds() >= inactive_limit)
        if should_cull:
            app_log.info("Culling server %s (inactive for %s)", log_name,
                         format_td(inactive))

        if max_age and not should_cull:
            # only check started if max_age is specified
            # so that we can still be compatible with jupyterhub 0.8
            # which doesn't define the 'started' field
            if age is not None and age.total_seconds() >= max_age:
                app_log.info(
                    "Culling server %s (age: %s, inactive for %s)",
                    log_name,
                    format_td(age),
                    format_td(inactive),
                )
                should_cull = True

        if not should_cull:
            app_log.debug(
                "Not culling server %s (age: %s, inactive for %s)",
                log_name,
                format_td(age),
                format_td(inactive),
            )
            return False

        if server_name:
            # culling a named server
            delete_url = url + "/users/%s/servers/%s" % (
                quote(user['name']),
                quote(server['name']),
            )
        else:
            delete_url = url + '/users/%s/server' % quote(user['name'])

        req = HTTPRequest(url=delete_url, method='DELETE', headers=auth_header)
        resp = yield fetch(req)
        if resp.code == 202:
            app_log.warning("Server %s is slow to stop", log_name)
            # return False to prevent culling user with pending shutdowns
            return False
        return True

    @coroutine
    def handle_user(user):
        """Handle one user.

        Create a list of their servers, and async exec them.  Wait for
        that to be done, and if all servers are stopped, possibly cull
        the user.
        """
        # shutdown servers first.
        # Hub doesn't allow deleting users with running servers.
        # jupyterhub 0.9 always provides a 'servers' model.
        # 0.8 only does this when named servers are enabled.
        if 'servers' in user:
            servers = user['servers']
        else:
            # jupyterhub < 0.9 without named servers enabled.
            # create servers dict with one entry for the default server
            # from the user model.
            # only if the server is running.
            servers = {}
            if user['server']:
                servers[''] = {
                    'last_activity': user['last_activity'],
                    'pending': user['pending'],
                    'url': user['server'],
                }
        server_futures = [
            handle_server(user, server_name, server)
            for server_name, server in servers.items()
        ]
        results = yield multi(server_futures)
        if not cull_users:
            return
        # some servers are still running, cannot cull users
        still_alive = len(results) - sum(results)
        if still_alive:
            app_log.debug(
                "Not culling user %s with %i servers still alive",
                user['name'],
                still_alive,
            )
            return False

        should_cull = False
        if user.get('created'):
            age = now - parse_date(user['created'])
        else:
            # created may be undefined on jupyterhub < 0.9
            age = None

        # check last activity
        # last_activity can be None in 0.9
        if user['last_activity']:
            inactive = now - parse_date(user['last_activity'])
        else:
            # no activity yet, use start date
            # last_activity may be None with jupyterhub 0.9,
            # which introduces the 'created' field which is never None
            inactive = age

        should_cull = (inactive is not None
                       and inactive.total_seconds() >= inactive_limit)
        if should_cull:
            app_log.info("Culling user %s (inactive for %s)", user['name'],
                         inactive)

        if max_age and not should_cull:
            # only check created if max_age is specified
            # so that we can still be compatible with jupyterhub 0.8
            # which doesn't define the 'started' field
            if age is not None and age.total_seconds() >= max_age:
                app_log.info(
                    "Culling user %s (age: %s, inactive for %s)",
                    user['name'],
                    format_td(age),
                    format_td(inactive),
                )
                should_cull = True

        if not should_cull:
            app_log.debug(
                "Not culling user %s (created: %s, last active: %s)",
                user['name'],
                format_td(age),
                format_td(inactive),
            )
            return False

        req = HTTPRequest(url=url + '/users/%s' % user['name'],
                          method='DELETE',
                          headers=auth_header)
        yield fetch(req)
        return True

    for user in users:
        futures.append((user['name'], handle_user(user)))

    for (name, f) in futures:
        try:
            result = yield f
        except Exception:
            app_log.exception("Error processing %s", name)
        else:
            if result:
                app_log.debug("Finished culling %s", name)
示例#22
0
文件: podtube.py 项目: kaesi0/PodTube
import misaka
import psutil
import requests
from feedgen.feed import FeedGenerator
from pytube import YouTube
from tornado import gen, httputil, ioloop, iostream, process, web
from tornado.locks import Semaphore

__version__ = '3.0'

key = None
video_links = {}
playlist_feed = {}
channel_feed = {}
conversion_queue = {}
converting_lock = Semaphore(2)


def get_youtube_url(video):
    if video in video_links and video_links[video][
            'expire'] > datetime.datetime.now():
        return video_links[video]['url']
    yt = YouTube('http://www.youtube.com/watch?v=' + video)
    vid = yt.streams.get_highest_resolution().url
    parts = {
        part.split('=')[0]: part.split('=')[1]
        for part in vid.split('?')[-1].split('&')
    }
    link = {
        'url': vid,
        'expire': datetime.datetime.fromtimestamp(int(parts['expire']))
示例#23
0
import nfs
import os
import logging
import time
from tornado import web, gen
from tornado.locks import Semaphore
from tornado.httpclient import AsyncHTTPClient
from framework import settings
from framework.config import config

MAX_BODY_SIZE = 4 * 1024.0 * 1024.0 * 1024.0  # 4GB
GMT_FORMAT = '%a, %d %b %Y %H:%M:%S GMT'
AsyncHTTPClient.configure(None, max_body_size=MAX_BODY_SIZE)

logger = logging.getLogger('default')
semaphore = Semaphore(config.get('file_service_semaphore', 5))


class FileHandler(web.RequestHandler):
    @gen.coroutine
    def get(self):
        self.file_name = self.get_argument('filename')  # type: str
        self.space_dir = nfs.join(settings.REPO_DIR,
                                  settings.REPO_ANT_SPACENAME)
        if not nfs.exists(self.space_dir):
            nfs.makedirs(self.space_dir)
        self.file_path = nfs.join(self.space_dir, self.file_name)
        lock_file_name = nfs.extsep + self.file_name + nfs.extsep + 'lock'
        self.lock_file = nfs.join(self.space_dir, lock_file_name)
        logger.info('#%d Request file: %s', id(self.request), self.file_name)
示例#24
0
def cull_idle(
    url, api_token, profiles_list = [], db_filename = "profile_quotas.db", check_every = 600, concurrency=10
):

    """Shutdown idle single-user servers"""
    
    auth_header = {'Authorization': 'token %s' % api_token}
    req = HTTPRequest(url=url + '/users', headers=auth_header)
    now = datetime.now(timezone.utc)
    client = AsyncHTTPClient()

    if concurrency:
        semaphore = Semaphore(concurrency)

        @coroutine
        def fetch(req):
            """client.fetch wrapped in a semaphore to limit concurrency"""
            yield semaphore.acquire()
            try:
                return (yield client.fetch(req))
            finally:
                yield semaphore.release()

    else:
        fetch = client.fetch

    resp = yield fetch(req)
    users = json.loads(resp.body.decode('utf8', 'replace'))
    futures = []

    @coroutine
    def handle_server(user, server_name, server):
        """Handle (maybe) culling a single server

        "server" is the entire server model from the API.

        Returns True if server is now stopped (user removable),
        False otherwise.
        """
        log_name = user['name']
        if server_name:
            log_name = '%s/%s' % (user['name'], server_name)
        if server.get('pending'):
            app_log.warning(
                "Not culling server %s with pending %s", log_name, server['pending']
            )
            return False

        # jupyterhub < 0.9 defined 'server.url' once the server was ready
        # as an *implicit* signal that the server was ready.
        # 0.9 adds a dedicated, explicit 'ready' field.
        # By current (0.9) definitions, servers that have no pending
        # events and are not ready shouldn't be in the model,
        # but let's check just to be safe.

        if not server.get('ready', bool(server['url'])):
            app_log.warning(
                "Not culling not-ready not-pending server %s: %s", log_name, server
            )
            return False

        if server.get('started'):
            age = now - parse_date(server['started'])
        else:
            # started may be undefined on jupyterhub < 0.9
            age = None


        # CUSTOM CULLING TEST CODE HERE
        # Add in additional server tests here.  Return False to mean "don't
        # cull", True means "cull immediately", or, for example, update some
        # other variables like inactive_limit.
        #
        # Here, server['state'] is the result of the get_state method
        # on the spawner.  This does *not* contain the below by
        # default, you may have to modify your spawner to make this
        # work.  The `user` variable is the user model from the API.
        #
        # if server['state']['profile_name'] == 'unlimited'
        #     return False
        # inactive_limit = server['state']['culltime']

        should_cull = False

        # if there's no profile info in the server state to base the determinaton on, we got nothing to go on
        profile_slug = server.get("state", {}).get("profile_slug", None)
        balance = float("inf")

        if profile_slug:
            conn = db.get_connection(db_filename)
            db.update_user_tokens(conn, profiles_list, user['name'], user['admin'])
            
            for profile in profiles_list:
                if profile["slug"] == profile_slug and "quota" in profile:
                    hours = (check_every / 60 / 60)
                    db.log_usage(conn, profiles_list, user['name'], profile_slug, hours, user['admin'])
                    db.charge_tokens(conn, profiles_list, user['name'], profile_slug, hours, user['admin'])
                    current_balance = db.get_balance(conn, profiles_list, user['name'], profile_slug, user['admin'])

                    if current_balance < 0.0:
                        pass
                        # don't actually cull, let the balance go negative (since we don't have a way to alert the user that their server is about to be killed)
                        # should_cull = True
            db.close_connection(conn)

        if should_cull:
            app_log.info(
                "Culling server %s (balance for profile %s is %s)", log_name, profile_slug, balance
            )

        if not should_cull:
            app_log.debug(
                "Not culling server %s (balance for profile %s is %s)",
                log_name,
                profile_slug,
                balance,
            )
            return False

        if server_name:
            # culling a named server
            delete_url = url + "/users/%s/servers/%s" % (
                quote(user['name']),
                quote(server['name']),
            )
        else:
            delete_url = url + '/users/%s/server' % quote(user['name'])

        req = HTTPRequest(url=delete_url, method='DELETE', headers=auth_header)
        resp = yield fetch(req)
        if resp.code == 202:
            app_log.warning("Server %s is slow to stop", log_name)
            # return False to prevent culling user with pending shutdowns
            return False
        return True

    @coroutine
    def handle_user(user):
        """Handle one user.

        Create a list of their servers, and async exec them.  Wait for
        that to be done, and if all servers are stopped, possibly cull
        the user.
        """
        # shutdown servers first.
        # Hub doesn't allow deleting users with running servers.
        # jupyterhub 0.9 always provides a 'servers' model.
        # 0.8 only does this when named servers are enabled.
        if 'servers' in user:
            servers = user['servers']
        else:
            # jupyterhub < 0.9 without named servers enabled.
            # create servers dict with one entry for the default server
            # from the user model.
            # only if the server is running.
            servers = {}
            if user['server']:
                servers[''] = {
                    'last_activity': user['last_activity'],
                    'pending': user['pending'],
                    'url': user['server'],
                }
        server_futures = [
            handle_server(user, server_name, server)
            for server_name, server in servers.items()
        ]
        results = yield multi(server_futures)
    
    
    
    
    for user in users:
        futures.append((user['name'], handle_user(user)))

    for (name, f) in futures:
        try:
            result = yield f
        except Exception:
            app_log.exception("Error processing %s", name)
        else:
            if result:
                app_log.debug("Finished culling %s", name)
示例#25
0
def cull_idle(
    url,
    api_token,
    inactive_limit,
    protected_users,
    cull_users=False,
    max_age=0,
    concurrency=10,
):
    """Shutdown idle single-user servers
    If cull_users, inactive *users* will be deleted as well.
    """
    auth_header = {
        "Authorization": "token %s" % api_token,
    }
    req = HTTPRequest(
        url=url + "/users",
        headers=auth_header,
    )
    now = datetime.now(timezone.utc)
    client = AsyncHTTPClient()

    if concurrency:
        semaphore = Semaphore(concurrency)

        @coroutine
        def fetch(req):
            """client.fetch wrapped in a semaphore to limit concurrency"""
            yield semaphore.acquire()
            try:
                return (yield client.fetch(req))
            finally:
                yield semaphore.release()

    else:
        fetch = client.fetch

    resp = yield fetch(req)
    users = json.loads(resp.body.decode("utf8", "replace"))
    futures = []

    @coroutine
    def handle_server(user, server_name, server):
        """Handle (maybe) culling a single server
        Returns True if server is now stopped (user removable),
        False otherwise.
        """
        log_name = user["name"]
        if server_name:
            log_name = "%s/%s" % (user["name"], server_name)
        if server.get("pending"):
            app_log.warning("Not culling server %s with pending %s", log_name,
                            server["pending"])
            return False

        # jupyterhub < 0.9 defined 'server.url' once the server was ready
        # as an *implicit* signal that the server was ready.
        # 0.9 adds a dedicated, explicit 'ready' field.
        # By current (0.9) definitions, servers that have no pending
        # events and are not ready shouldn't be in the model,
        # but let's check just to be safe.

        if not server.get("ready", bool(server["url"])):
            app_log.warning("Not culling not-ready not-pending server %s: %s",
                            log_name, server)
            return False

        if server.get("started"):
            age = now - parse_date(server["started"])
        else:
            # started may be undefined on jupyterhub < 0.9
            age = None

        # check last activity
        # last_activity can be None in 0.9
        if server["last_activity"]:
            inactive = now - parse_date(server["last_activity"])
        else:
            # no activity yet, use start date
            # last_activity may be None with jupyterhub 0.9,
            # which introduces the 'started' field which is never None
            # for running servers
            inactive = age

        should_cull = (inactive is not None
                       and inactive.total_seconds() >= inactive_limit)
        if should_cull:
            app_log.info("Culling server %s (inactive for %s)", log_name,
                         format_td(inactive))

        if max_age and not should_cull:
            # only check started if max_age is specified
            # so that we can still be compatible with jupyterhub 0.8
            # which doesn't define the 'started' field
            if age is not None and age.total_seconds() >= max_age:
                app_log.info(
                    "Culling server %s (age: %s, inactive for %s)",
                    log_name,
                    format_td(age),
                    format_td(inactive),
                )
                should_cull = True

        if not should_cull:
            app_log.debug(
                "Not culling server %s (age: %s, inactive for %s)",
                log_name,
                format_td(age),
                format_td(inactive),
            )
            return False

        if server_name:
            # culling a named server
            delete_url = url + "/users/%s/servers/%s" % (
                quote(user["name"]),
                quote(server["name"]),
            )
        else:
            delete_url = url + "/users/%s/server" % quote(user["name"])

        req = HTTPRequest(
            url=delete_url,
            method="DELETE",
            headers=auth_header,
        )
        resp = yield fetch(req)
        if resp.code == 202:
            app_log.warning(
                "Server %s is slow to stop",
                log_name,
            )
            # return False to prevent culling user with pending shutdowns
            return False
        return True

    @coroutine
    def handle_user(user):
        """Handle one user.
        Create a list of their servers, and async exec them.  Wait for
        that to be done, and if all servers are stopped, possibly cull
        the user.
        """
        # shutdown servers first.
        # Hub doesn't allow deleting users with running servers.
        # jupyterhub 0.9 always provides a 'servers' model.
        # 0.8 only does this when named servers are enabled.
        if "servers" in user:
            servers = user["servers"]
        else:
            # jupyterhub < 0.9 without named servers enabled.
            # create servers dict with one entry for the default server
            # from the user model.
            # only if the server is running.
            servers = {}
            if user["server"]:
                servers[""] = {
                    "last_activity": user["last_activity"],
                    "pending": user["pending"],
                    "url": user["server"],
                }
        server_futures = [
            handle_server(user, server_name, server)
            for server_name, server in servers.items()
        ]
        results = yield multi(server_futures)
        if not cull_users:
            return
        # some servers are still running, cannot cull users
        still_alive = len(results) - sum(results)
        if still_alive:
            app_log.debug(
                "Not culling user %s with %i servers still alive",
                user["name"],
                still_alive,
            )
            return False

        should_cull = False
        if user.get("created"):
            age = now - parse_date(user["created"])
        else:
            # created may be undefined on jupyterhub < 0.9
            age = None

        # check last activity
        # last_activity can be None in 0.9
        if user["last_activity"]:
            inactive = now - parse_date(user["last_activity"])
        else:
            # no activity yet, use start date
            # last_activity may be None with jupyterhub 0.9,
            # which introduces the 'created' field which is never None
            inactive = age

        should_cull = (inactive is not None
                       and inactive.total_seconds() >= inactive_limit)
        if should_cull:
            app_log.info("Culling user %s (inactive for %s)", user["name"],
                         inactive)

        if max_age and not should_cull:
            # only check created if max_age is specified
            # so that we can still be compatible with jupyterhub 0.8
            # which doesn't define the 'started' field
            if age is not None and age.total_seconds() >= max_age:
                app_log.info(
                    "Culling user %s (age: %s, inactive for %s)",
                    user["name"],
                    format_td(age),
                    format_td(inactive),
                )
                should_cull = True

        if not should_cull:
            app_log.debug(
                "Not culling user %s (created: %s, last active: %s)",
                user["name"],
                format_td(age),
                format_td(inactive),
            )
            return False

        req = HTTPRequest(
            url=url + "/users/%s" % user["name"],
            method="DELETE",
            headers=auth_header,
        )
        yield fetch(req)
        return True

    p_users = []
    # Don't kill protected servers
    if protected_users is not None:
        # Expects a list of eithers users or files that contains users
        for protected in protected_users.split(","):
            if os.path.exists(protected):
                db_path = protected
                try:
                    app_log.info("Cull, checking db {} "
                                 "for users".format(db_path))
                    file_users = []
                    with open(db_path, "r") as db:
                        file_users = [
                            user.rstrip("\n").rstrip("\r\n") for user in db
                        ]
                    p_users.extend(file_users)
                except IOError as err:
                    app_log.error("Cull, tried to open db file {},"
                                  "Failed {}".format(db_path, err))
            else:
                p_users.append(protected)
        users = [user for user in users if user["name"] not in p_users]

    for user in users:
        futures.append((user["name"], handle_user(user)))

    for (name, f) in futures:
        try:
            result = yield f
        except Exception:
            app_log.exception("Error processing %s", name)
        else:
            if result:
                app_log.debug("Finished culling %s", name)
示例#26
0
def cull_idle(url,
              api_token,
              inactive_limit,
              cull_users=False,
              remove_named_servers=False,
              max_age=0,
              concurrency=10):
    """Shutdown idle single-user servers

    If cull_users, inactive *users* will be deleted as well.
    """
    auth_header = {'Authorization': 'token %s' % api_token}
    req = HTTPRequest(url=url + '/users', headers=auth_header)
    now = datetime.now(timezone.utc)
    client = AsyncHTTPClient()

    if concurrency:
        semaphore = Semaphore(concurrency)

        @coroutine
        def fetch(req):
            """client.fetch wrapped in a semaphore to limit concurrency"""
            yield semaphore.acquire()
            try:
                return (yield client.fetch(req))
            finally:
                yield semaphore.release()

    else:
        fetch = client.fetch

    resp = yield fetch(req)
    users = json.loads(resp.body.decode('utf8', 'replace'))
    futures = []

    @coroutine
    def handle_server(user, server_name, server, max_age, inactive_limit):
        """Handle (maybe) culling a single server

        "server" is the entire server model from the API.

        Returns True if server is now stopped (user removable),
        False otherwise.
        """
        log_name = user['name']
        if server_name:
            log_name = '%s/%s' % (user['name'], server_name)
        if server.get('pending'):
            app_log.warning("Not culling server %s with pending %s", log_name,
                            server['pending'])
            return False

        # jupyterhub < 0.9 defined 'server.url' once the server was ready
        # as an *implicit* signal that the server was ready.
        # 0.9 adds a dedicated, explicit 'ready' field.
        # By current (0.9) definitions, servers that have no pending
        # events and are not ready shouldn't be in the model,
        # but let's check just to be safe.

        if not server.get('ready', bool(server['url'])):
            app_log.warning("Not culling not-ready not-pending server %s: %s",
                            log_name, server)
            return False

        if server.get('started'):
            age = now - parse_date(server['started'])
        else:
            # started may be undefined on jupyterhub < 0.9
            age = None

        # check last activity
        # last_activity can be None in 0.9
        if server['last_activity']:
            inactive = now - parse_date(server['last_activity'])
        else:
            # no activity yet, use start date
            # last_activity may be None with jupyterhub 0.9,
            # which introduces the 'started' field which is never None
            # for running servers
            inactive = age

        # CUSTOM CULLING TEST CODE HERE
        # Add in additional server tests here.  Return False to mean "don't
        # cull", True means "cull immediately", or, for example, update some
        # other variables like inactive_limit.
        #
        # Here, server['state'] is the result of the get_state method
        # on the spawner.  This does *not* contain the below by
        # default, you may have to modify your spawner to make this
        # work.  The `user` variable is the user model from the API.
        #
        # if server['state']['profile_name'] == 'unlimited'
        #     return False
        # inactive_limit = server['state']['culltime']
        state = server['state']
        # Support getting state from wrapspawer child's conf.
        if 'child_conf' in state:
            state = state['child_conf']
        if 'cull_max_age' in state:
            max_age = max(max_age, state['cull_max_age'])
        if 'cull_inactive_limit' in state:
            inactive_limit = max(inactive_limit, state['cull_inactive_limit'])
        app_log.info(
            f"CULL IDLE: {user['name']}/{server_name}: {max_age} inactive={inactive} inactive_limit={inactive_limit} age={age} last_activity={server['last_activity']}"
        )

        should_cull = (inactive is not None
                       and inactive.total_seconds() >= inactive_limit)
        if should_cull:
            app_log.info("Culling server %s (inactive for %s)", log_name,
                         format_td(inactive))

        if max_age and not should_cull:
            # only check started if max_age is specified
            # so that we can still be compatible with jupyterhub 0.8
            # which doesn't define the 'started' field
            if age is not None and age.total_seconds() >= max_age:
                app_log.info(
                    "Culling server %s (age: %s, inactive for %s)",
                    log_name,
                    format_td(age),
                    format_td(inactive),
                )
                should_cull = True

        if not should_cull:
            app_log.debug(
                "Not culling server %s (age: %s, inactive for %s)",
                log_name,
                format_td(age),
                format_td(inactive),
            )
            return False

        body = None
        if server_name:
            # culling a named server
            # A named server can be stopped and kept available to the user
            # for starting again or stopped and removed. To remove the named
            # server we have to pass an additional option in the body of our
            # DELETE request.
            delete_url = url + "/users/%s/servers/%s" % (
                quote(user['name']),
                quote(server['name']),
            )
            if remove_named_servers:
                body = json.dumps({"remove": True})
        else:
            delete_url = url + '/users/%s/server' % quote(user['name'])

        req = HTTPRequest(url=delete_url,
                          method='DELETE',
                          headers=auth_header,
                          body=body,
                          allow_nonstandard_methods=True)
        resp = yield fetch(req)
        if resp.code == 202:
            app_log.warning("Server %s is slow to stop", log_name)
            # return False to prevent culling user with pending shutdowns
            return False
        return True

    @coroutine
    def handle_user(user):
        """Handle one user.

        Create a list of their servers, and async exec them.  Wait for
        that to be done, and if all servers are stopped, possibly cull
        the user.
        """
        # shutdown servers first.
        # Hub doesn't allow deleting users with running servers.
        # jupyterhub 0.9 always provides a 'servers' model.
        # 0.8 only does this when named servers are enabled.
        if 'servers' in user:
            servers = user['servers']
        else:
            # jupyterhub < 0.9 without named servers enabled.
            # create servers dict with one entry for the default server
            # from the user model.
            # only if the server is running.
            servers = {}
            if user['server']:
                servers[''] = {
                    'last_activity': user['last_activity'],
                    'pending': user['pending'],
                    'url': user['server'],
                }
        server_futures = [
            handle_server(user, server_name, server, max_age, inactive_limit)
            for server_name, server in servers.items()
        ]
        results = yield multi(server_futures)
        if not cull_users:
            return
        # some servers are still running, cannot cull users
        still_alive = len(results) - sum(results)
        if still_alive:
            app_log.debug(
                "Not culling user %s with %i servers still alive",
                user['name'],
                still_alive,
            )
            return False

        should_cull = False
        if user.get('created'):
            age = now - parse_date(user['created'])
        else:
            # created may be undefined on jupyterhub < 0.9
            age = None

        # check last activity
        # last_activity can be None in 0.9
        if user['last_activity']:
            inactive = now - parse_date(user['last_activity'])
        else:
            # no activity yet, use start date
            # last_activity may be None with jupyterhub 0.9,
            # which introduces the 'created' field which is never None
            inactive = age

        should_cull = (inactive is not None
                       and inactive.total_seconds() >= inactive_limit)
        if should_cull:
            app_log.info("Culling user %s (inactive for %s)", user['name'],
                         inactive)

        if max_age and not should_cull:
            # only check created if max_age is specified
            # so that we can still be compatible with jupyterhub 0.8
            # which doesn't define the 'started' field
            if age is not None and age.total_seconds() >= max_age:
                app_log.info(
                    "Culling user %s (age: %s, inactive for %s)",
                    user['name'],
                    format_td(age),
                    format_td(inactive),
                )
                should_cull = True

        if not should_cull:
            app_log.debug(
                "Not culling user %s (created: %s, last active: %s)",
                user['name'],
                format_td(age),
                format_td(inactive),
            )
            return False

        req = HTTPRequest(url=url + '/users/%s' % user['name'],
                          method='DELETE',
                          headers=auth_header)
        yield fetch(req)
        return True

    for user in users:
        futures.append((user['name'], handle_user(user)))

    for (name, f) in futures:
        try:
            result = yield f
        except Exception:
            app_log.exception("Error processing %s", name)
        else:
            if result:
                app_log.debug("Finished culling %s", name)
示例#27
0
async def cull_idle(api_url,
                    base_url,
                    api_token,
                    inactive_limit,
                    max_age=0,
                    warn_timeout=0,
                    concurrency=10,
                    verify_ssl=True):
    """Shutdown idle single-user servers
    If cull_users, inactive *users* will be deleted as well.
    """
    auth_header = {
        'Authorization': 'token %s' % api_token,
    }
    req = HTTPRequest(
        url=api_url + '/users',
        headers=auth_header,
        validate_cert=verify_ssl,
    )
    now = datetime.now(timezone.utc)
    client = AsyncHTTPClient()

    if concurrency:
        semaphore = Semaphore(concurrency)

        async def fetch(req):
            """client.fetch wrapped in a semaphore to limit concurrency"""
            await semaphore.acquire()
            try:
                return (await client.fetch(req))
            finally:
                semaphore.release()
    else:
        fetch = client.fetch

    # tornado.curl_httpclient.CurlError: HTTP 599: Connection timed out after 20003 milliseconds
    # Potential timeout error here? (slow to stop line: 478)
    resp = await fetch(req)
    users = json.loads(resp.body.decode('utf8', 'replace'))
    futures = []

    async def get_server_active(server):
        server_url = urljoin(base_url, server['url'])
        app_log.debug('Server url: %s', server_url)

        if server.get('started'):
            age = now - parse_date(server['started'])
        else:
            # started may be undefined on jupyterhub < 0.9
            age = None

        # check server status
        num_kernels = 0
        try:
            # status query does not change last_activity on notebook server
            req = HTTPRequest(
                url=urljoin(server_url, 'api/status'),
                headers=auth_header,
                validate_cert=verify_ssl,
            )
            resp = await fetch(req)
            status = json.loads(resp.body.decode('utf-8', 'replace'))
            # app_log.info(status)
            inactive = [now - parse_date(status['last_activity'])]
            num_kernels = status['kernels']
        except HTTPClientError as e:
            app_log.error('Failed to get notebook status: %s', e)
            # make sure inactive is defined
            inactive = [age]

        # if an error happened, then num_kernels is still 0
        # TODO: for now kernel activity tracking is deactivated
        # code below is problematic ... it triggers an update of last activity on
        # the notebook server ... also should look into other activites like open shell (process?)
        # a busy cell that finishes updates last_activity as well
        # Also it seems, that a user has to keep the notebook in an open tab visible/foreground ....
        #.   putting tab a side does not help.not
        #.   minifing browser window neither or moving off screen neither.
        #    hiding browser window with anothe window stops refreshing as well
        #.   jupyterlab stops polling if document.hidden is true (old interface doesn't poll at all)
        #.   -> we could also hook into here ... and add a 'keep-alive' extension, that keeps polling (at a slower interval or so?)
        # TODO: to make this more reliable, we should install a notebook api/service extension,
        #.      that tracks all the activity we want. This allows us to use the internal
        #       notebook API and container/host process inspection to look at more things as well
        if not num_kernels:
            # no kernel running
            return True, min(inactive), age

        # FIXME: hardcoded skip rest of activity checking
        return True, min(inactive), age

        # assume everything is idle
        idle = True
        # kernels:
        # TODO: we ar ecalling through the proxy here.... which will update
        #       the hubs view of inactivity :(
        if app_log.isEnabledFor(logging.DEBUG):
            app_log.debug('Query kernels %s', urljoin(server_url,
                                                      'api/kernels'))
        req = HTTPRequest(
            url=urljoin(server_url, 'api/kernels'),
            headers=auth_header,
            validate_cert=verify_ssl,
        )
        try:
            resp = await fetch(req)
            kernels = json.loads(resp.body.decode('utf-8', 'replace'))
            for kernel in kernels:
                # TODO: seems like kernel state stays in 'starting' after a restart and auto
                #       re-creation of running kernels from last ui state
                idle = idle and (kernel['execution_state']
                                 in ('idle', 'starting'))
                inactive.append(now - parse_date(kernel['last_activity']))
        except HTTPClientError as e:
            app_log.error('Falid to inspect notebook kernels: %s', e)
        # find smallest inactive time
        return idle, min(inactive), age

    async def handle_server(user, server_name, server):
        """Handle (maybe) culling a single server
        Returns True if server is now stopped (user removable),
        False otherwise.
        """
        # import ipdb; ipdb.set_trace()
        log_name = user['name']
        if server_name:
            log_name = '%s/%s' % (user['name'], server_name)
        if server.get('pending'):
            app_log.warning("Not culling server %s with pending %s", log_name,
                            server['pending'])
            return False

        # jupyterhub < 0.9 defined 'server.url' once the server was ready
        # as an *implicit* signal that the server was ready.
        # 0.9 adds a dedicated, explicit 'ready' field.
        # By current (0.9) definitions, servers that have no pending
        # events and are not ready shouldn't be in the model,
        # but let's check just to be safe.

        if not server.get('ready', bool(server['url'])):
            app_log.warning("Not culling not-ready not-pending server %s: %s",
                            log_name, server)
            return False

        idle, inactive, age = await get_server_active(server)
        if not idle and app_log.isEnabledFor(logging.DEBUG):
            # something is not idle
            # when the kernel transitions from busy to idle, the kernel resets the
            # inactive timer as well.
            app_log.debug(
                'Not culling server %s with busy connections. (inactive for %s)',
                log_name, inactive)
            return

        should_cull = (inactive is not None
                       and inactive.total_seconds() >= inactive_limit)
        if should_cull:
            app_log.info("Culling server %s (inactive for %s)", log_name,
                         format_td(inactive))

        if max_age and not should_cull:
            # only check started if max_age is specified
            # so that we can still be compatible with jupyterhub 0.8
            # which doesn't define the 'started' field
            if age is not None and age.total_seconds() >= max_age:
                app_log.info("Culling server %s (age: %s, inactive for %s)",
                             log_name, format_td(age), format_td(inactive))
                should_cull = True

        # should we warn?
        remaining = inactive_limit - inactive.total_seconds()
        if should_warn(user, warn_timeout, inactive) and remaining > 0:
            IOLoop.current().run_in_executor(
                None, send_email, user, {
                    'serverurl': urljoin(base_url, server['url']),
                    'inactive': human_seconds(inactive.total_seconds()),
                    'remaining': human_seconds(remaining),
                })

        if not should_cull:
            app_log.debug("Not culling server %s (age: %s, inactive for %s)",
                          log_name, format_td(age), format_td(inactive))
            return False

        if server_name:
            # culling a named server
            delete_url = api_url + "/users/%s/servers/%s" % (quote(
                user['name']), quote(server['name']))
        else:
            delete_url = api_url + '/users/%s/server' % quote(user['name'])

        req = HTTPRequest(
            url=delete_url,
            method='DELETE',
            headers=auth_header,
            validate_cert=verify_ssl,
        )
        resp = await fetch(req)
        if resp.code == 202:
            app_log.warning(
                "Server %s is slow to stop",
                log_name,
            )
            # return False to prevent culling user with pending shutdowns
            return False
        return True

    async def handle_user(user):
        """Handle one user.
        Create a list of their servers, and async exec them.  Wait for
        that to be done, and if all servers are stopped, possibly cull
        the user.
        """
        # shutdown servers first.

        servers = user['servers']
        server_futures = [
            handle_server(user, server_name, server)
            for server_name, server in servers.items()
        ]
        results = await multi(server_futures)

    for user in users:
        futures.append((user['name'], handle_user(user)))

    for (name, f) in futures:
        try:
            result = await f
        except Exception:
            app_log.exception("Error processing %s", name)
        else:
            if result:
                app_log.debug("Finished culling %s", name)
示例#28
0
    def get_resized(self, gallery, photo,
            width=None, height=None, quality=60,
            rotation=0.0, img_format=None, orientation=0):
        """
        Retrieve the given photo in a resized format.
        """
        # Determine the path to the original file.
        orig_node = self._fs_node.join_node(gallery, photo)

        if img_format is None:
            # Detect from original file and quality setting.
            with magic.Magic(flags=magic.MAGIC_MIME_TYPE) as m:
                mime_type = m.id_filename(orig_node.abs_path)
                self._log.debug('%s/%s detected format %s',
                        gallery, photo, mime_type)
                if mime_type == 'image/gif':
                    img_format = ImageFormat.GIF
                else:
                    if quality == 100:
                        # Assume PNG
                        img_format = ImageFormat.PNG
                    else:
                        # Assume JPEG
                        img_format = ImageFormat.JPEG
        else:
            # Use the format given by the user
            img_format = ImageFormat(img_format)

        self._log.debug('%s/%s using %s format',
                gallery, photo, img_format.name)

        # Sanitise dimensions given by user.
        width, height = self.get_dimensions(gallery, photo, width, height)
        self._log.debug('%s/%s target dimensions %d by %d',
                gallery, photo, width, height)

        # Determine where the file would be cached
        (cache_dir, cache_name) = self._get_cache_name(gallery, photo,
                width,height, quality, rotation, img_format)

        # Do we have this file?
        data = self._read_cache(orig_node, cache_dir, cache_name)
        if data is not None:
            raise Return((img_format, cache_name, data))

        # Locate the lock for this photo.
        mutex_key = (gallery, photo, width, height, quality, rotation,
                img_format)
        try:
            mutex = self._mutexes[mutex_key]
        except KeyError:
            mutex = Semaphore(1)
            self._mutexes[mutex_key] = mutex

        resize_args = (gallery, photo, width, height, quality,
                    rotation, img_format.value, orientation)
        try:
            self._log.debug('%s/%s waiting for mutex',
                    gallery, photo)
            yield mutex.acquire()

            # We have the semaphore, call our resize routine.
            self._log.debug('%s/%s retrieving resized image (args=%s)',
                    gallery, photo, resize_args)
            (img_format, file_name, file_data) = yield self._pool.apply(
                func=self._do_resize,
                args=resize_args)
            raise Return((img_format, file_name, file_data))
        except Return:
            raise
        except:
            self._log.exception('Error resizing photo; gallery: %s, photo: %s, '\
                    'width: %d, height: %d, quality: %f, rotation: %f, format: %s',
                    gallery, photo, width, height, quality, rotation, img_format)
            raise
        finally:
            mutex.release()
示例#29
0
    return int(round(time.time() * 1000))


request_users = {}


class User(object):
    def __init__(self):
        self.last_visit_time = 0
        self.req_num = 0


index = 0
worker_stop = 0
concurrent_worker_count = 20
consumer = Semaphore(concurrent_worker_count)
request_queue = Queue(maxsize=5000)


async def worker(worker_id):
    print("worker {} start".format(worker_id))
    while not worker_stop:
        await consumer.acquire()
        req_handler = await request_queue.get()
        print("worker {} work".format(worker_id))
        try:
            global index
            remote_ip = req_handler.request.remote_ip
            message = "love you liuchen {} {}".format(remote_ip, index)
            index = index + 1
            #process grpc here
示例#30
0
        try:
            yield func(self, *args, **kwargs)
        except Exception as e:
            self.logger.error(traceback.format_exc())
            if self.request.headers.get("Accept",
                                        "").startswith("application/json"):
                self.send_json_error()
            else:
                self.write_error(500)
            return

    return wrapper


base_cache = BaseRedis()
sem = Semaphore(1)


def cache(prefix=None, key=None, ttl=60, hash=True, lock=True, separator=":"):
    """
    cache装饰器

    :param prefix: 指定prefix
    :param key: 指定key
    :param ttl: ttl (s)
    :param hash: 是否需要hash
    :param lock: -
    :param separator: key 分隔符
    :return:
    """
    key_ = key