Ejemplo n.º 1
0
        if data == "":
            time.sleep(1)
        else:
            message = message + (data)
            if "END " in data:
                print("Message Received");
                rx_queue.put(message)
                message = ""
    return

###########################################
###########################################

if __name__ == "__main__":
    rx_queue =Queue.Queue()     # create a Thread Safe Queue
    stopper = threading.Event() # create a Thread Safe Event
        
    client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    client_socket.connect(('localhost', 10003)) # connect to HISTORICAL DATA socket
    
    t = threading.Thread(target=worker, args=(client_socket, stopper, rx_queue ) )
    t.start()
    time.sleep( 5 )
    
    
    # http://213.92.13.32/apiwiki/#ch
    # IT3L+0,00%7,395ETFS 3X DAILY LONG FTSE MIB17:35:36mercato chiuso0JE00B8DVWK97
    # CANDLE titolo numero_giorni periodo_candela
    
    print("Extract 1 hour candles")
    #cmd="CANDLE IT3L 40 3600" 
Ejemplo n.º 2
0
            readable, writable, _ = select.select([sock], toWrite, [], 1)
            for s in readable:
                msg = s.recv(1024)
                # if len(msg) > 0:
                #    assume we were told to shutdown


if __name__ == '__main__':
    # create a background thread to listen to a connection from the master pi
    # upon connection, background thread sets shutdown event
    # main thread begins shutdown
    # background thread waits for shutdown complete
    # background thread signals master pi that shutdown is complete
    # main thread joins on background thread

    startShutdown = threading.Event()
    shutdownComplete = threading.Event()
    bg = threading.Thread(target=talk_to_master, args=(startShutdown, shutdownComplete))
    bg.start()

    # Simulate the main loop of the train program.
    while not startShutdown.is_set():
        print('trains doing train stuff')
        time.sleep(2)

    print('trains returning to station')
    for i in range(20, 0, -1):
        time.sleep(1)
        print(i)
    shutdownComplete.set()
Ejemplo n.º 3
0
    json = {"_save_name": extra_info}

    for i in renpy.config.save_json_callbacks:
        i(json)

    json = json_dumps(json)

    sr = SaveRecord(screenshot, extra_info, json, logf.getvalue())
    location.save(slotname, sr)

    location.scan()
    clear_slot(slotname)


# Flag that lets us know if an autosave is in progress.
autosave_not_running = threading.Event()
autosave_not_running.set()

# The number of times autosave has been called without a save occuring.
autosave_counter = 0


def autosave_thread(take_screenshot):

    global autosave_counter

    try:

        try:

            cycle_saves("auto-", renpy.config.autosave_slots)
Ejemplo n.º 4
0
 def __init__(self, parser):
     threading.Thread.__init__(self)
     self.parser = parser
     self._stop = threading.Event()
     self.threads = []
Ejemplo n.º 5
0
 def __init__(self, conf, chassis):
     self.conf = conf
     self.chassis = chassis
     self._sb_idl = None
     self._post_fork_event = threading.Event()
     self.subscribe()
"""

import requests
import threading
import os
from bs4 import BeautifulSoup
import re
import time
import tkinter as tk
from os.path import join, abspath

window = tk.Tk()
window.title('微博下载专用工具')
window.geometry('+500+300')
window.geometry('400x400')
pause_lock = threading.Event()
l = tk.Label(window, bg='yellow', width=20, text='网址')
l.grid(row=1, column=1)
e = tk.Entry(window, show=None)
e.grid(row=1, column=2)
l = tk.Label(window, bg='yellow', width=20, text='页数')
l.grid(row=2, column=1)
d = tk.Entry(window, show=None)
d.grid(row=2, column=2)


def get_pictureandvideo(ID, x, page):

    baseurl = 'https://m.weibo.cn/api/container/getIndex?'
    y = x.split('&')
    l = {'type': 'uid'}
Ejemplo n.º 7
0
    global mpvManager, _player, _queueEraser
    if mpvManager:
        mpvManager.close()
        mpvManager = None
        _player = None
        _queueEraser = None

# Mplayer in slave mode
##########################################################################

mplayerCmd = ["mplayer", "-really-quiet", "-noautosub"]

mplayerQueue = []
mplayerManager = None
mplayerReader = None
mplayerEvt = threading.Event()
mplayerClear = False

class MplayerMonitor(threading.Thread):

    def run(self):
        global mplayerClear
        self.mplayer = None
        self.deadPlayers = []
        while 1:
            mplayerEvt.wait()
            mplayerEvt.clear()
            # clearing queue?
            if mplayerClear and self.mplayer:
                try:
                    self.mplayer.stdin.write(b"stop\n")
Ejemplo n.º 8
0
    def run_forever(self, sockopt=None, sslopt=None, ping_interval=0, ping_timeout=None,
        http_proxy_host=None, http_proxy_port=None):
        """
        run event loop for WebSocket framework.
        This loop is infinite loop and is alive during websocket is available.
        sockopt: values for socket.setsockopt.
            sockopt must be tuple and each element is argument of sock.setscokopt.
        sslopt: ssl socket optional dict.
        ping_interval: automatically send "ping" command every specified period(second)
            if set to 0, not send automatically.
        ping_timeout: timeout(second) if the pong message is not recieved.
        http_proxy_host: http proxy host name.
        http_proxy_port: http proxy port. If not set, set to 80.
        """

        if not ping_timeout or ping_timeout<=0:
            ping_timeout = None
        if sockopt is None:
            sockopt = []
        if sslopt is None:
            sslopt = {}
        if self.sock:
            raise WebSocketException("socket is already opened")
        thread = None

        try:
            self.sock = WebSocket(self.get_mask_key, sockopt=sockopt, sslopt=sslopt,
                fire_cont_frame=self.on_cont_message and True or False)
            self.sock.settimeout(default_timeout)
            self.sock.connect(self.url, header=self.header, cookie=self.cookie,
                http_proxy_host=http_proxy_host, http_proxy_port=http_proxy_port)
            self._callback(self.on_open)

            if ping_interval:
                event  = threading.Event()
                thread = threading.Thread(target=self._send_ping, args=(ping_interval, event))
                thread.setDaemon(True)
                thread.start()

            while True:
                r, w, e = select.select((self.sock.sock, ), (), (), ping_timeout)
                if not self.keep_running:
                    break
                if ping_timeout and self.last_ping_tm and time.time() - self.last_ping_tm > ping_timeout:
                    self.last_ping_tm = 0
                    raise WebSocketTimeoutException()

                if r:
                    op_code, frame = self.sock.recv_data_frame(True)
                    if op_code == ABNF.OPCODE_CLOSE:
                        break
                    elif op_code == ABNF.OPCODE_PING:
                        self._callback(self.on_ping, frame.data)
                    elif op_code == ABNF.OPCODE_PONG:
                        self._callback(self.on_pong, frame.data)
                    elif op_code == ABNF.OPCODE_CONT and self.on_cont_message:
                        self._callback(self.on_cont_message, frame.data, frame.fin)
                    else:
                        data = frame.data
                        if six.PY3 and frame.opcode == ABNF.OPCODE_TEXT:
                            data = data.decode("utf-8")
                        self._callback(self.on_message, data)
        except Exception as e:
            self._callback(self.on_error, e)
        finally:
            if thread:
                event.set()
                thread.join()
                self.keep_running = False
            self.sock.close()
            self._callback(self.on_close)
            self.sock = None
Ejemplo n.º 9
0
 def __init__(self):
   # Event flag to communicate between the Run and Stop methods
   self.stopEvent = threading.Event()
   self.server = None
def service_mgr_loop(shutdown_event,
                     ctxt=None,
                     name='service mgr',
                     id=None,
                     discovery_addr='*:5555',
                     mgmt_addr='*:6666',
                     discovery_interval=5,
                     poll_timeout=5000):
    func_name = service_mgr_loop.__name__
    try:
        logger = logging.getLogger()
        registry = {}

        if(ctxt is None):
            ctxt = zmq.Context()

        if(id is None):
            id = getuid()

        service_mgr_addr = Address(addrparts=(name, str(id)))

        discovery_socket = ctxt.socket(zmq.PUB)
        mgmt_socket = ctxt.socket(zmq.ROUTER)
        discovery_socket.bind('tcp://' + str(discovery_addr))
        mgmt_socket.bind('tcp://' + str(mgmt_addr))

        th = threading.current_thread()
        th.setName(name)
        logger.info('%s : %s : %s : %s : Starting Service Manager...'
                    '(discover_addr=%s, mgmt_addr=%s'
                    'discover_interval=%d, poll_timeout=%d)',
                    func_name,
                    th.getName(),
                    th.ident, id,
                    discovery_addr,
                    mgmt_addr,
                    discovery_interval,
                    poll_timeout)

        housekeeping_shutdown_event = threading.Event()

        service_mgr_housekeeping(service_mgr_addr,
                                 discovery_socket,
                                 housekeeping_shutdown_event,
                                 discovery_interval,
                                 True)

        poller = zmq.Poller()
        poller.register(mgmt_socket, zmq.POLLIN)

        while shutdown_event.is_set() is False:
            try:
                socks = dict(poller.poll(poll_timeout))

                if(mgmt_socket in socks):
                    msgparts = mgmt_socket.recv_multipart()
                    numparts = len(msgparts)
                    i = 1
                    msg = ''
                    while(i < numparts):
                        msg += msgparts[i].decode()
                        i += 1

                    retval, out_msg = service_mgr_process_msg(service_mgr_addr,
                                                              msgparts[0],
                                                              registry,
                                                              msg)

                    if(retval is False):
                        break

                    if(out_msg is not None):
                        out_msgparts = (msgparts[0], out_msg.tobytes())
                        mgmt_socket.send_multipart(out_msgparts)

                service_mgr_update_registry(registry)
            except KeyboardInterrupt:
                logger.exception('%s : Service Manager Exception...',
                                 func_name)
                break

        logger.info('%s : Shutting Down Service Manager...',
                    func_name)
        housekeeping_shutdown_event.set()
    except:
        logger.exception('%s : Shutting Down Service Manager...',
                         func_name)
        housekeeping_shutdown_event.set()
Ejemplo n.º 11
0
        simultaneous_launcher.wait()

        print("Press the following keys for moving/orientating the robot by the 4 cardinal points")
        for menu_command in menu_order:
            print("{:8} - {}".format(menu_command, available_commands[menu_command]))
    except threading.BrokenBarrierError:
        pass

    # read the keyboard as long as the synchronization between threads wasn't broken
    # and while CTRL-C wasn't pressed
    with Input(keynames = "curtsies") as input_generator:
        while not (trigger.is_set() or simultaneous_launcher.broken):
            period = 1 / keyboard_refresh_rate
            key = input_generator.send(period)

            if key in available_commands:
                try:
                    motor_command_queue.put_nowait(available_commands[key])
                except queue.Full:
                    pass

    # exit codes depending on the issue
    if simultaneous_launcher.broken:
        sys.exit(1)
    sys.exit(0)

if __name__ == "__main__":
    trigger = threading.Event() # event used when CTRL-C is pressed
    signal.signal(signal.SIGINT, lambda signum, frame : trigger.set()) # SIGINT (CTRL-C) signal handler
    Main(trigger)
def cmd_proc_loop(shutdown_event,
                  ctxt=None,
                  name='cmd proc',
                  id=None,
                  discovery_addr='localhost:5555',
                  mgmt_addr='localhost:6666',
                  discovery_interval=5,
                  poll_timeout=5000):
    func_name = cmd_proc_loop.__name__
    try:
        usage = """
           l : List Available Servers
           s <uuid> : Stop a Server
           e : Exit
        """
        logger = logging.getLogger()

        if(ctxt is None):
            ctxt = zmq.Context()

        if(id is None):
            id = getuid()

        cmd_proc_addr = Address(addrparts=(name, str(id)))

        th = threading.current_thread()
        th.setName(name)
        logger.info('%s : %s : %s : %s : Starting Cmd Proc Loop...'
                    '(discover_addr=%s, mgmt_addr=%s'
                    'discover_interval=%d, poll_timeout=%d)',
                    func_name,
                    th.getName(),
                    th.ident, id,
                    discovery_addr,
                    mgmt_addr,
                    discovery_interval,
                    poll_timeout)

        cmd_socket = ctxt.socket(zmq.PAIR)
        cmd_socket.bind('inproc://' + str(cmd_proc_addr))

        inproc_addr_list = (cmd_proc_addr,)
        inproc_event_list = (threading.Event(),)

        service_thread, service_shutdown_event = \
            init_service(ctxt,
                         'cmd proc service',
                         id,
                         discovery_addr,
                         mgmt_addr,
                         inproc_addr_list,
                         inproc_event_list,
                         discovery_interval,
                         poll_timeout,
                         '../../../../conf/logconf_servicecmd.json')

        print_menu = True
        valid_cmds = ('l', 's', 'e')
        while shutdown_event.is_set() is False:
            try:
                if(print_menu is True):
                    print_usage(usage)

                cmd = nonBlockingRawInputUsingSelect()
                if(cmd == ''):
                    print_menu = False
                    continue
                elif(cmd not in valid_cmds):
                    print_menu = True
                    logger.info('%s : <%s> not a valid cmd...try again',
                                func_name, cmd)
                    continue
                elif(cmd == 'e'):
                    inproc_event_list[0].set()
                    break
                else:
                    print_menu = True

                msg = MsgReqRaw(body=STMsgType.MGMT+'\r\n'+cmd)
                msg.fromhdr = cmd_proc_addr
                msg.tohdr = cmd_proc_addr
                cmd_socket.send(msg.tobytes())
                if(cmd == 'l'):
                    rcvd_msg = cmd_socket.recv()
                    rcvd_msg = rcvd_msg.decode()
                    msg, reason =\
                        verify_respmsg(rcvd_msg)
                    if(reason is not None):
                        logger.info('%s : Invalid Msg=%s',
                                    func_name, reason)
                        logger.debug('%s : Msg=%r', func_name, rcvd_msg)
                    elif(msg.tohdr == cmd_proc_addr):
                        print('List of services : ')
                        print('respcode=%s, reason=%s' %
                              (msg.respcode, msg.reason))
                        print('%s' % msg.body)
                    else:
                        logger.info('%s : Cmd Loop : Message Not Authentic,'
                                    'Msg=%s', func_name, msg.debug())
            except KeyboardInterrupt:
                break

        logger.info('%s : Shutting Down Cmd Processor...', func_name)
        service_shutdown_event.set()
        service_thread.join()
    except:
        logger.exception('%s : Shutting Down Cmd Processor...', func_name)
        service_shutdown_event.set()
        service_thread.join()
Ejemplo n.º 13
0
    def _onTeleCallMesg(self, sock, mesg):

        # tele:call - call a method on a shared object

        jid = mesg[1].get('jid')
        sid = mesg[1].get('sid')

        # check if the socket knows about their auth
        # ( most likely via SSL client cert )
        user = sock.get('syn:user')

        with s_scope.enter({'dmon': self, 'sock': sock, 'syn:user': user, 'syn:auth': self.auth}):

            try:

                name = mesg[1].get('name')

                item = self.shared.get(name)
                if item is None:
                    # is it a pushed object?
                    pushsock = self.pushed.get(name)
                    if pushsock is not None:
                        # pass along how to reply
                        mesg[1]['suid'] = sock.iden
                        return pushsock.tx(mesg)
                    raise s_common.NoSuchObj(name)

                task = mesg[1].get('task')
                meth, args, kwargs = task

                self._reqUserAllowed(user, 'tele:call', name, meth)

                func = getattr(item, meth, None)
                if func is None:
                    raise s_common.NoSuchMeth(meth)

                if getattr(func, '_tele_clientside', False):
                    name = s_reflect.getMethName(func)
                    raise s_common.TeleClientSide(name=name)

                ret = func(*args, **kwargs)

                # handle generator returns specially
                if isinstance(ret, types.GeneratorType):

                    iden = s_common.guid()

                    txwait = threading.Event()
                    # start off set...
                    txwait.set()

                    self._dmon_yields.add(iden)
                    sock.tx(s_common.tufo('tele:yield:init', jid=jid, iden=iden))

                    # FIXME opt
                    maxsize = 100000000
                    def ontxsize(m):
                        size = m[1].get('size')
                        if size >= maxsize:
                            txwait.clear()
                        else:
                            txwait.set()

                    try:

                        sock.onfini(txwait.set)
                        sock.on('sock:tx:size', ontxsize)

                        for item in ret:

                            txwait.wait()

                            # check if we woke due to fini
                            if sock.isfini:
                                break

                            sock.tx(s_common.tufo('tele:yield:item', iden=iden, item=item))
                            if iden not in self._dmon_yields:
                                break

                    finally:
                        sock.off('sock:tx:size', ontxsize)
                        self._dmon_yields.discard(iden)
                        sock.tx(s_common.tufo('tele:yield:fini', iden=iden))

                    return

                sock.tx(s_common.tufo('job:done', jid=jid, ret=ret))

            except Exception as e:
                errinfo = s_common.excinfo(e)
                sock.tx(s_common.tufo('job:done', jid=jid, err=errinfo.get('err'), errinfo=errinfo))
Ejemplo n.º 14
0
 def __init__(self, base_dir, pats: List[str]):
     self.base_dir = base_dir
     self.tailer = multitail2.MultiTail([str(base_dir / pat) for pat in pats])
     self._stop_event = threading.Event()
     super().__init__()
Ejemplo n.º 15
0
 def __init__(self):
     self.executor = concurrent.futures.ThreadPoolExecutor(1)
     self._stop_event = threading.Event()
     self.future = None
Ejemplo n.º 16
0
 def __init__(self, seq):
     super(MidiInThread, self).__init__()
     set_thread_name("beatkit midi-in")
     self.seq = seq
     self._run = threading.Event()
     self._run.set()
Ejemplo n.º 17
0
def _encode(filename,
            first,
            last,
            preserve,
            encoder,
            tmp_path,
            prefix,
            img_ext,
            quality,
            quiet,
            _self=cmd):
    import os
    tries = 10
    while 1:  # loop until all of the files have been created...
        done = 1
        # check for the required output files
        for index in range(first, last + 1):
            path = os.path.join(tmp_path,
                                "%s%04d%s" % (prefix, index, img_ext))
            if not os.path.exists(path):
                done = 0
                break
        if done:
            break
        elif _self.get_modal_draw(
        ):  # keep looping so long as we're rendering...
            tries = 10
        else:
            tries = tries - 1
            if tries < 0:
                done = 0
                break
        time.sleep(0.25)
    _self.sync()
    ok = 1
    result = None

    # reduce chance of passing non-ascii file paths to sub processes
    # by changing directory
    fn_rel = os.path.relpath(filename, tmp_path)
    old_cwd = os.getcwd()
    fps = get_movie_fps(_self)

    if done and ok and (encoder == 'mpeg_encode'):
        try:
            from pymol import mpeg_encode
        except:
            ok = 0
            print("produce-error: Unable to import module pymol.mpeg_encode.")
        if ok:
            if not mpeg_encode.validate():
                ok = 0
                print("produce-error: Unable to validate pymol.mpeg_encode.")
        if not ok:
            print("produce-error: Unable to create mpeg file.")
        else:
            mpeg_quality = 1 + int(((100 - quality) * 29) / 100)  # 1 to 30
            input = mpeg_encode.input(fn_rel, '.', prefix, first, last,
                                      mpeg_quality)

            FPS_LEGAL_VALUES = [23.976, 24, 25, 29.97, 30, 50, 59.94, 60]
            fps_legal = min(FPS_LEGAL_VALUES, key=lambda v: abs(v - fps))
            if fps_legal != round(fps, 3):
                colorprinting.warning(
                    " Warning: Adjusting frame rate to {} fps (legal values are: {})"
                    .format(fps_legal, FPS_LEGAL_VALUES))
            input = input.replace('FRAME_RATE 30',
                                  'FRAME_RATE {:.3f}'.format(fps_legal))

            if not quiet:
                print(" produce: creating '%s' (in background)..." %
                      (filename))

            os.chdir(tmp_path)

            done_event = None
            if not quiet:
                done_event = threading.Event()
                _self.async_(_watch, fn_rel, done_event, _self=_self)

            try:
                result = mpeg_encode.run(input)
            finally:
                os.chdir(old_cwd)
                if done_event is not None:
                    done_event.set()
    elif encoder == 'ffmpeg':
        import subprocess
        os.chdir(tmp_path)
        try:
            args = [
                'ffmpeg',
                '-f',
                'image2',
                '-framerate',
                '{:.3f}'.format(fps),
                '-i',
                prefix + '%04d' + img_ext,
            ]
            if fn_rel.endswith('.webm'):
                args_crf = ['-crf', '{:.0f}'.format(65 - (quality / 2))]
                args += ['-c:v', 'libvpx-vp9', '-b:v', '0'] + args_crf
            elif not fn_rel.endswith('.gif'):
                args += [
                    '-crf',
                    '10' if quality > 90 else '15' if quality > 80 else '20',
                    '-pix_fmt',
                    'yuv420p',  # needed for Mac support
                ]
            process = subprocess.Popen(args + [fn_rel], stderr=subprocess.PIPE)
            stderr = process.communicate()[1]
            colorprinting.warning(stderr.strip().decode(errors='replace'))
            if process.returncode != 0:
                colorprinting.error('ffmpeg failed with '
                                    'exit status {}'.format(
                                        process.returncode))
        finally:
            os.chdir(old_cwd)
    elif encoder == 'convert':
        import subprocess
        exe = find_exe(encoder)
        try:
            subprocess.check_call([
                exe,
                '-delay',
                '{:.3f}'.format(100. / fps),  # framerate
                os.path.join(tmp_path, prefix) + '*' + img_ext,
                filename
            ])
        finally:
            pass
    if not quiet:
        if not os.path.exists(filename):
            if result is not None:
                print(input, result[0], result[1])
            print(" produce: compression failed")
        else:
            print(" produce: finished.")
    _self.unset("keep_alive")
    if preserve < 1:
        if os.path.isdir(tmp_path):
            for fil in glob.glob(os.path.join(tmp_path, prefix + "*")):
                os.unlink(fil)
            os.rmdir(tmp_path)
Ejemplo n.º 18
0
    def __init__(self, dummy = False, immediate = False, default_logging=True, with_ros = True):

        if default_logging:
            configure_logging()

        super(Ranger, self).__init__(actions = ["ranger.actions"], 
                                    supports = ROS if with_ros else 0,
                                    dummy = dummy,
                                    immediate = immediate)


        self.beacons = {}
        self.odom = Odom()

        self.poses.add_frame_provider(RangerFrames(self))

        # creates accessors for each of the fields in STATE
        self.state.update(Ranger.STATE)
        self.state.eyelids = Ranger.eyelids.OPEN
        self.state.touches = TouchManager()

        self.innerstate = EmotionalState()

        #######################################################################
        #                       ASEBA initialization
        #######################################################################

        # init Aseba
        self.aseba = Aseba(dummy = dummy)

        self.aseba.load_events_list(RANGER_ASEBA_SCRIPT)

        # Basic check to be sure all the Ranger's ASEBA nodes are up and running
        nodes = self.aseba.get_nodes_list()
        if not dummy and len(nodes) not in [2,3]:
            logger.error("One of the Ranger Aseba node is not up!!")
            logger.error("List of active nodes: {0}".format(nodes))
            raise Exception("Missing Aseba node")

        # Register callbacks for the main events of the 3 nodes
        self.aseba.on_event("mainFeedbackWithEncoders", self._process_main_feedback)
        self.aseba.on_event("neuilFeedback", self._process_neuil_feedback)
        #self.aseba.on_event("receiverFeedback", self._process_rab_feedback)

        # Condition variable that can be used to wait
        # until the next update of a given Aseba node
        self.main_update = threading.Event()
        self.neuil_update = threading.Event()
        #self.rab_update = threading.Event()

        # Starts DBus thread (responsible for receiving events)
        self.aseba_thread = threading.Thread(target=self.aseba.run)
        self.aseba_thread.start()

        # Asks the nodes to send their events
        self._send_evt("enableEncoders", enable = 1)
        self._send_evt("enableFeedback", enable = 1)

        # Wait until we hear about the 2 main nodes ('main' and 'neuil')
        if not dummy:
            self.get_full_state()

    	if sys.version_info < (2, 7):
	    logger.warning("Python < 2.7 does not correctly supports timeouts on threading.Event! I may wait forever for Aseba updates in case of problems with Aseba network!")
Ejemplo n.º 19
0
import logging
import os
import socket
import threading
import time
import traceback

import rucio.db.sqla.util
from rucio.common import exception
from rucio.common.logging import setup_logging
from rucio.common.utils import get_thread_with_periodic_running_function, daemon_sleep
from rucio.core.account_counter import get_updated_account_counters, update_account_counter, fill_account_counter_history_table
from rucio.core.heartbeat import live, die, sanity_check

graceful_stop = threading.Event()


def account_update(once=False, sleep_time=10):
    """
    Main loop to check and update the Account Counters.
    """

    logging.info('account_update: starting')

    logging.info('account_update: started')

    # Make an initial heartbeat so that all abacus-account daemons have the correct worker number on the next try
    executable = 'abacus-account'
    hostname = socket.gethostname()
    pid = os.getpid()
Ejemplo n.º 20
0
def main():
  uploader_fn(threading.Event())
Ejemplo n.º 21
0
 def start_receiving(self):
     """ Start the receiving thread, dispatching responses to the handlers """
     self._exit_event = threading.Event()
     self._receive_thread = threading.Thread(
         target=self._receive_thread_func, daemon=True)
     self._receive_thread.start()
Ejemplo n.º 22
0
 def __init__(self, target, args):
     super(StoppableThread, self).__init__()
     self._stop = threading.Event()
     self.target = target
     self.args = args
Ejemplo n.º 23
0
 def get_stop_flag(self):
     return threading.Event()
Ejemplo n.º 24
0
def run_forked(testcase_suites):
    wrapped_testcase_suites = set()

    # suites are unhashable, need to use list
    results = []
    unread_testcases = set()
    finished_unread_testcases = set()
    manager = StreamQueueManager()
    manager.start()
    for i in range(concurrent_tests):
        if len(testcase_suites) > 0:
            wrapped_testcase_suite = TestCaseWrapper(testcase_suites.pop(0),
                                                     manager)
            wrapped_testcase_suites.add(wrapped_testcase_suite)
            unread_testcases.add(wrapped_testcase_suite)
        else:
            break

    read_from_testcases = threading.Event()
    read_from_testcases.set()
    stdouterr_thread = threading.Thread(target=stdouterr_reader_wrapper,
                                        args=(unread_testcases,
                                              finished_unread_testcases,
                                              read_from_testcases))
    stdouterr_thread.start()

    failed_wrapped_testcases = set()
    stop_run = False
    while len(wrapped_testcase_suites) > 0:
        finished_testcase_suites = set()
        for wrapped_testcase_suite in wrapped_testcase_suites:
            while wrapped_testcase_suite.result_parent_end.poll():
                wrapped_testcase_suite.result.process_result(
                    *wrapped_testcase_suite.result_parent_end.recv())
                wrapped_testcase_suite.last_heard = time.time()

            while wrapped_testcase_suite.keep_alive_parent_end.poll():
                wrapped_testcase_suite.last_test, \
                    wrapped_testcase_suite.last_test_vpp_binary, \
                    wrapped_testcase_suite.last_test_temp_dir, \
                    wrapped_testcase_suite.vpp_pid = \
                    wrapped_testcase_suite.keep_alive_parent_end.recv()
                wrapped_testcase_suite.last_heard = time.time()

            if wrapped_testcase_suite.finished_parent_end.poll():
                wrapped_testcase_suite.finished_parent_end.recv()
                wrapped_testcase_suite.last_heard = time.time()
                stop_run = process_finished_testsuite(
                    wrapped_testcase_suite,
                    finished_testcase_suites,
                    failed_wrapped_testcases,
                    results) or stop_run
                continue

            fail = False
            if wrapped_testcase_suite.last_heard + test_timeout < time.time():
                fail = True
                wrapped_testcase_suite.logger.critical(
                    "Child test runner process timed out "
                    "(last test running was `%s' in `%s')!" %
                    (wrapped_testcase_suite.last_test,
                     wrapped_testcase_suite.last_test_temp_dir))
            elif not wrapped_testcase_suite.child.is_alive():
                fail = True
                wrapped_testcase_suite.logger.critical(
                    "Child test runner process unexpectedly died "
                    "(last test running was `%s' in `%s')!" %
                    (wrapped_testcase_suite.last_test,
                     wrapped_testcase_suite.last_test_temp_dir))
            elif wrapped_testcase_suite.last_test_temp_dir and \
                    wrapped_testcase_suite.last_test_vpp_binary:
                if is_core_present(wrapped_testcase_suite.last_test_temp_dir):
                    wrapped_testcase_suite.add_testclass_with_core()
                    if wrapped_testcase_suite.core_detected_at is None:
                        wrapped_testcase_suite.core_detected_at = time.time()
                    elif wrapped_testcase_suite.core_detected_at + \
                            core_timeout < time.time():
                        wrapped_testcase_suite.logger.critical(
                            "Child test runner process unresponsive and core-"
                            "file exists in test temporary directory "
                            "(last test running was `%s' in `%s')!" %
                            (wrapped_testcase_suite.last_test,
                             wrapped_testcase_suite.last_test_temp_dir))
                        fail = True

            if fail:
                wrapped_testcase_suite.child.terminate()
                try:
                    # terminating the child process tends to leave orphan
                    # VPP process around
                    if wrapped_testcase_suite.vpp_pid:
                        os.kill(wrapped_testcase_suite.vpp_pid, signal.SIGTERM)
                except OSError:
                    # already dead
                    pass
                wrapped_testcase_suite.result.crashed = True
                wrapped_testcase_suite.result.process_result(
                    wrapped_testcase_suite.last_test_id, ERROR)
                stop_run = process_finished_testsuite(
                    wrapped_testcase_suite,
                    finished_testcase_suites,
                    failed_wrapped_testcases,
                    results) or stop_run

        for finished_testcase in finished_testcase_suites:
            finished_testcase.child.join()
            finished_testcase.close_pipes()
            wrapped_testcase_suites.remove(finished_testcase)
            finished_unread_testcases.add(finished_testcase)
            finished_testcase.stdouterr_queue.put(None)
            if stop_run:
                while len(testcase_suites) > 0:
                    results.append(TestResult(testcase_suites.pop(0)))
            elif len(testcase_suites) > 0:
                new_testcase = TestCaseWrapper(testcase_suites.pop(0), manager)
                wrapped_testcase_suites.add(new_testcase)
                unread_testcases.add(new_testcase)

    while len(unread_testcases) > 0:
        # wait for reader thread to read everything in all loggers
        pass

    read_from_testcases.clear()
    stdouterr_thread.join(test_timeout)
    manager.shutdown()
    handle_cores(failed_wrapped_testcases)
    return results
Ejemplo n.º 25
0
    def do_attach(self, line):
        '''
        Pseudo Command: attach

        Description:
        Attach to a given session.  If the session is not active
        an error will be printed to the screen.  The --wait option
        can be used to wait for the session to become active.

        Args:
        attach [OPTIONS] <SessionId>

        where OPTIONS are:
        -w, --wait - Wait for the session to be come active.
        '''
        p = CliArgs(usage='attach [OPTIONS] <SessionId>')
        p.add_option('-w', '--wait', action="store_true", default=False,  help='Quit a given session')
        (opts, args) = p.parse_line(line)

        if (len(args) != 1):
            CliArgsException("Invalid number of arguments to attached (got %d expected %d)" % (len(args), 1))
        sessid = int(args[0])

        if (opts.wait):
            timeout = 60*5
        else:
            timeout = 0
        sessioninfo = None

        while (1):

            pending = False
            sess = self._session_list()
            for s in sess:
                # look for an active or pending session
                # matching what we are looking for
                if s['id'] == sessid:
                    if s['status'] == 'active':
                        sessioninfo = s
                        break;
                    elif s['status'] == 'pending':
                        pending = True

            if (sessioninfo is not None):
                break

            if (not pending):
                print "No pending session found for session id %s" % sessid
                return

            if (timeout == 0):
                print 'Session is not active - wait and try attaching again'
                return
            else:
                time.sleep(1)
                timeout -= 1

        # ok -we have a session
        self.session = sessid
        self.prompt = 'Session[%s] > ' % cyan(str(sessid))
        self.cwd = sessioninfo['current_working_directory']

        # spawn a thread to keep the sensor active
        # when we are attached
        self.keepaliveEvent = threading.Event()
        self.keepaliveThread = threading.Thread(target=self._keepaliveThread, args=())
        self.keepaliveThread.daemon = True
        self.keepaliveThread.start()

        print "Session: %d" % sessid
        print "  Available Drives: %s" % ' '.join(sessioninfo['drives'])

        # look up supported commands
        ret = {}
        for c in sessioninfo['supported_commands']:
            ret[API_CMD_TO_CLI_CMD[c]] = 1

        print "  Supported Commands: %s" % ' '.join(ret.keys())
        print "  Working Directory: %s" % self.cwd
Ejemplo n.º 26
0
 def __init__(self, s3_comm):
     StoppableAsyncTask.__init__(self, self._thread_func)
     self.s3_comm = s3_comm
     self.work_event = threading.Event()
     self.logs_to_send = []
Ejemplo n.º 27
0
                                      delete_temporary_dids)
from rucio.rse import rsemanager as rsemgr

logging.getLogger("requests").setLevel(logging.CRITICAL)

logging.basicConfig(
    stream=sys.stdout,
    level=getattr(
        logging,
        config_get('common',
                   'loglevel',
                   raise_exception=False,
                   default='DEBUG').upper()),
    format='%(asctime)s\t%(process)d\t%(levelname)s\t%(message)s')

GRACEFUL_STOP = threading.Event()


def reaper(rses=[],
           worker_number=0,
           total_workers=1,
           chunk_size=100,
           once=False,
           scheme=None):
    """
    Main loop to select and delete files.

    :param rses: List of RSEs the reaper should work against. If empty, it considers all RSEs.
    :param worker_number: The worker number.
    :param total_workers:  The total number of workers.
    :param chunk_size: the size of chunk for deletion.
Ejemplo n.º 28
0
66.102.255.0-255
80.228.65.128-191
81.175.29.128-191
84.235.77.0-255
85.182.250.0-255
86.127.118.128-191
93.183.211.192-255
93.94.217.0-31
93.94.218.0-31
94.200.103.64-71
94.40.70.0-63
'''

logging.basicConfig(format="[%(threadName)s]%(message)s", level=logging.INFO)

evt_ipramdomstart = threading.Event()
evt_ipramdomend = threading.Event()


def PRINT(strlog):
    logging.info(strlog)


def isgoolgledomain(domain):
    lowerdomain = domain.lower()
    if lowerdomain in g_ssldomain:
        return 1
    if lowerdomain in g_excludessdomain:
        return 0
    return 2
Ejemplo n.º 29
0
 def __init__(self, loop_func, setup_func):
     threading.Thread.__init__(self)
     self._stop_event = threading.Event()
     self.daemon = True
     self.loop_func = loop_func
     self.setup_func = setup_func
    def __init__(self, loader):
        super(_MultiProcessingDataLoaderIter, self).__init__(loader)

        assert self._num_workers > 0

        if loader.multiprocessing_context is None:
            multiprocessing_context = multiprocessing
        else:
            multiprocessing_context = loader.multiprocessing_context

        self._worker_init_fn = loader.worker_init_fn
        self._worker_queue_idx_cycle = itertools.cycle(range(self._num_workers))
        self._worker_result_queue = multiprocessing_context.Queue()
        self._worker_pids_set = False
        self._shutdown = False
        self._send_idx = 0  # idx of the next task to be sent to workers
        self._rcvd_idx = 0  # idx of the next task to be returned in __next__
        # information about data not yet yielded, i.e., tasks w/ indices in range [rcvd_idx, send_idx).
        # map: task idx => - (worker_id,)        if data isn't fetched (outstanding)
        #                  \ (worker_id, data)   if data is already fetched (out-of-order)
        self._task_info = {}
        self._tasks_outstanding = 0  # always equal to count(v for v in task_info.values() if len(v) == 1)
        self._workers_done_event = multiprocessing_context.Event()

        self._index_queues = []
        self._workers = []
        # A list of booleans representing whether each worker still has work to
        # do, i.e., not having exhausted its iterable dataset object. It always
        # contains all `True`s if not using an iterable-style dataset
        # (i.e., if kind != Iterable).
        self._workers_status = []
        for i in range(self._num_workers):
            index_queue = multiprocessing_context.Queue()
            # index_queue.cancel_join_thread()
            w = multiprocessing_context.Process(
                target=_utils.worker._worker_loop,
                args=(self._dataset_kind, self._dataset, index_queue,
                      self._worker_result_queue, self._workers_done_event,
                      self._auto_collation, self._collate_fn, self._drop_last,
                      self._base_seed + i, self._worker_init_fn, i, self._num_workers))
            w.daemon = True
            # NB: Process.start() actually take some time as it needs to
            #     start a process and pass the arguments over via a pipe.
            #     Therefore, we only add a worker to self._workers list after
            #     it started, so that we do not call .join() if program dies
            #     before it starts, and __del__ tries to join but will get:
            #     AssertionError: can only join a started process.
            w.start()
            self._index_queues.append(index_queue)
            self._workers.append(w)
            self._workers_status.append(True)

        if self._pin_memory:
            self._pin_memory_thread_done_event = threading.Event()
            self._data_queue = queue.Queue()
            pin_memory_thread = threading.Thread(
                target=_utils.pin_memory._pin_memory_loop,
                args=(self._worker_result_queue, self._data_queue,
                      torch.cuda.current_device(),
                      self._pin_memory_thread_done_event))
            pin_memory_thread.daemon = True
            pin_memory_thread.start()
            # Similar to workers (see comment above), we only register
            # pin_memory_thread once it is started.
            self._pin_memory_thread = pin_memory_thread
        else:
            self._data_queue = self._worker_result_queue

        _utils.signal_handling._set_worker_pids(id(self), tuple(w.pid for w in self._workers))
        _utils.signal_handling._set_SIGCHLD_handler()
        self._worker_pids_set = True

        # prime the prefetch loop
        for _ in range(2 * self._num_workers):
            self._try_put_index()