Example #1
0
  def reset_signal_handler(cls, signal_handler):
    """
    Class state:
    - Overwrites `cls._signal_handler`.
    OS state:
    - Overwrites signal handlers for SIGINT, SIGQUIT, and SIGTERM.

    NB: This method calls signal.signal(), which will crash if not called from the main thread!

    :returns: The :class:`SignalHandler` that was previously registered, or None if this is
              the first time this method was called.
    """
    assert(isinstance(signal_handler, SignalHandler))
    # NB: Modify process-global state!
    for signum, handler in signal_handler.signal_handler_mapping.items():
      signal.signal(signum, handler)
      # Retry any system calls interrupted by any of the signals we just installed handlers for
      # (instead of having them raise EINTR). siginterrupt(3) says this is the default behavior on
      # Linux and OSX.
      signal.siginterrupt(signum, False)

    previous_signal_handler = cls._signal_handler
    # NB: Mutate the class variables!
    cls._signal_handler = signal_handler
    return previous_signal_handler
Example #2
0
    def add_signal_handler(self, sig, callback, *args):
        """Add a handler for a signal.  UNIX only.

        Raise ValueError if the signal number is invalid or uncatchable.
        Raise RuntimeError if there is a problem setting up the handler.
        """
        self._check_signal(sig)
        try:
            # set_wakeup_fd() raises ValueError if this is not the
            # main thread.  By calling it early we ensure that an
            # event loop running in another thread cannot add a signal
            # handler.
            signal.set_wakeup_fd(self._csock.fileno())
        except ValueError as exc:
            raise RuntimeError(str(exc))

        handle = events.make_handle(callback, args)
        self._signal_handlers[sig] = handle

        try:
            signal.signal(sig, self._handle_signal)
            # Set SA_RESTART to limit EINTR occurrences.
            signal.siginterrupt(sig, False)
        except OSError as exc:
            del self._signal_handlers[sig]
            if not self._signal_handlers:
                try:
                    signal.set_wakeup_fd(-1)
                except ValueError as nexc:
                    logger.info('set_wakeup_fd(-1) failed: %s', nexc)

            if exc.errno == errno.EINVAL:
                raise RuntimeError('sig {} cannot be caught'.format(sig))
            else:
                raise
Example #3
0
def daemonize(do_fork=True, skip_fds=[]):
    """Daemonizes current process."""

    if do_fork:
        if os.fork():
            os._exit(0)
        else:
            os.setsid()

            if os.fork():
                os._exit(0)

    os.chdir("/")
    os.umask(0)

    signal.signal(signal.SIGHUP, signal.SIG_IGN)
    signal.siginterrupt(signal.SIGHUP, False)

    # Redirecting standard streams to /dev/null and closing original descriptors
    null_dev = eintr_retry(os.open)("/dev/null", os.O_RDWR)
    try:
        for fd in (sys.stdin.fileno(), sys.stdout.fileno(), sys.stderr.fileno()):
            if fd not in skip_fds:
                os.dup2(null_dev, fd)
    finally:
        eintr_retry(os.close)(null_dev)
Example #4
0
def main():
    signal.siginterrupt(signal.SIGINT, True)
    signal.signal(signal.SIGINT, exit)
    args = sys.argv
    if len(args) != 2 or args[1][-4:] == 'help':
        print("usage: python flashcard.py filename.json")
        sys.exit(0)

    with open(args[1], 'r') as f:
         text = f.read()
         d = json.loads(text)
         keys = list(d.keys())
         n = len(keys)
         while True:
             os.system('clear')
             print("Starting a new round")
             print("Press enter to proceed")
             input()
             for i in range(n):
                 target = random.randrange(i, n)
                 keys[i], keys[target] = keys[target], keys[i]
                 koi = keys[i]
                 os.system('clear')
                 print(koi)
                 input()
                 print(d[koi])
                 input()
Example #5
0
    def init_signals(self):
        # Set up signals through the event loop API.

        self.loop.add_signal_handler(signal.SIGQUIT, self.handle_quit,
                                     signal.SIGQUIT, None)

        self.loop.add_signal_handler(signal.SIGTERM, self.handle_exit,
                                     signal.SIGTERM, None)

        self.loop.add_signal_handler(signal.SIGINT, self.handle_quit,
                                     signal.SIGINT, None)

        self.loop.add_signal_handler(signal.SIGWINCH, self.handle_winch,
                                     signal.SIGWINCH, None)

        self.loop.add_signal_handler(signal.SIGUSR1, self.handle_usr1,
                                     signal.SIGUSR1, None)

        self.loop.add_signal_handler(signal.SIGABRT, self.handle_abort,
                                     signal.SIGABRT, None)

        # Don't let SIGTERM and SIGUSR1 disturb active requests
        # by interrupting system calls
        signal.siginterrupt(signal.SIGTERM, False)
        signal.siginterrupt(signal.SIGUSR1, False)
Example #6
0
def server_config():
    global log_file, log_buffer 
    global data_fd, data_fifo
    #SIGINT clean up and quit
    signal.signal(signal.SIGINT, sigint_handler) 
    #keep alive timer
    signal.signal(signal.SIGALRM, sigtimer_handler)
    signal.setitimer(signal.ITIMER_REAL, KEEP_ALIVE, KEEP_ALIVE)
    #syscall interrupt: restart 
    signal.siginterrupt(signal.SIGALRM, False)
    signal.siginterrupt(signal.SIGUSR1, False)
    #server status
    signal.signal(signal.SIGUSR1, sigusr1_handler)
    #data channel
    data_fifo = "nonblocking_pid_%d.fifo" % os.getpid()
    os.mkfifo(data_fifo , 0666) 
    f = open(data_fifo, "r+")
    data_fd = os.dup(f.fileno())
    f.close()
    #main log
    if os.path.exists(NONBLOCKING_LOG): 
        log_file = open(NONBLOCKING_LOG, "a+")
    else:
        log_file = open(NONBLOCKING_LOG, "w")
    if LOG_BUFFER_SIZE:
        log_buffer = StringIO()
Example #7
0
    def stop(self, signum=None, frame_unused=None):
        """Stop the consumer from consuming by calling BasicCancel and setting
        our state.

        """
        LOGGER.debug('Stop called in state: %s', self.state_description)
        if self.is_stopped:
            LOGGER.warning('Stop requested but consumer is already stopped')
            return
        elif self.is_shutting_down:
            LOGGER.warning('Stop requested, consumer is already shutting down')
            return
        elif self.is_waiting_to_shutdown:
            LOGGER.warning('Stop requested but already waiting to shut down')
            return

        # Stop consuming
        self.cancel_consumer_with_rabbitmq()

        # Wait until the consumer has finished processing to shutdown
        if self.is_processing:
            LOGGER.info('Waiting for consumer to finish processing')
            self.set_state(self.STATE_STOP_REQUESTED)
            if signum == signal.SIGTERM:
                signal.siginterrupt(signal.SIGTERM, False)
            return

        self.on_ready_to_stop()
Example #8
0
def main():
    pipe_r, pipe_w = os.pipe()
    flags = fcntl.fcntl(pipe_w, fcntl.F_GETFL, 0)
    flags = flags | os.O_NONBLOCK
    fcntl.fcntl(pipe_w, fcntl.F_SETFL, flags)
    
    signal.signal(signal.SIGCHLD, lambda x,y: None)
    signal.signal(signal.SIGALRM, lambda x,y: None)
    signal.siginterrupt(signal.SIGCHLD,False) #makes no difference
    signal.siginterrupt(signal.SIGALRM,False) #makes no difference
    signal.set_wakeup_fd(pipe_w)
    signal.setitimer(signal.ITIMER_REAL, 2, 2)
    
    poller = select.epoll()
    poller.register(pipe_r, select.EPOLLIN)
    poller.register(sys.stdin, select.EPOLLIN)
    
    print "Main screen turn on"
    while True:
        events=[]
        try:
            events = poller.poll()
            try:
                for fd, flags in events:
                    ch=os.read(fd, 1)
                    if fd==pipe_r:
                        sys.stdout.write( "We get Signal" )
                    if fd==sys.stdin.fileno():
                        sys.stdout.write( ch )
                    sys.stdout.flush()
            except IOError as e:
                print "exception loop" + str(e)
        except IOError as e:
            print "exception poll" + str(e)
Example #9
0
def fastingest(ns):
    """
    A really fast ingest version, unlike the
    echoprint server utility, which will eat all of you RAM.

    :param ns: Namespace object with required config
    :return: None
    """
    es = EchoprintServer(
        solr_url=ns.solr, tyrant_address=(ns.tyrant_host, ns.tyrant_port)
    )
    import_date = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")

    status = 0
    start_time = datetime.datetime.utcnow()

    def signal_handler(signum, frame):
        diff = datetime.datetime.utcnow() - start_time
        print '{0}, {1}'.format(diff, status)
    signal.signal(signal.SIGUSR1, signal_handler)
    signal.siginterrupt(signal.SIGUSR1, False)

    with committing(es), open(ns.path) as f:
        data = ijson.items(f, 'item')

        for item in data:
            song = Song.from_echoprint(item, import_date=import_date)
            if song is not None:
                # don't commit, the contextmanager (with) takes care of it
                es.ingest(song, commit=False,
                          check_duplicates=ns.check_duplicates)
            status += 1
Example #10
0
def main(prog, args):
    listenfd = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)

    # empty string means INADDR_ANY
    servaddr = ('', const.SERV_PORT)

    listenfd.bind(servaddr)
    listenfd.listen(const.LISTENQ)

    signal.signal(signal.SIGCHLD, sig_chld)
    signal.siginterrupt(signal.SIGCHLD, False)

    while True:
        # XXX
        # In Python, socket.accept() is written in C so it will block
        # the main process from receiving the SIGCHLD signal
        # Thus the child process will remain in zombie status untill
        # next connection from client and this accept returns.
        #
        # By setting up timeout to listenfd and wrap accept in a loop is
        # a solution but not a perfect one.
        connfd, remote_addr = listenfd.accept()

        if not os.fork():
            # close listen fd in child process
            # not actual close, just minus its reference count
            # by one
            listenfd.close()
            tools.str_echo(connfd)
            connfd.close()
            sys.exit(0)

        connfd.close()
Example #11
0
    def __init__(self):
        self.processes = {}

        # Stop all processes as last part of mainloop termination.
        main.signals['shutdown-after'].connect(self.stopall)
        main.signals['unix-signal'].connect(self._sigchld_handler)

        # Set SA_RESTART bit for the signal, which restarts any interrupted
        # system calls -- however, select (at least on Linux) is NOT restarted
        # for reasons described at:
        #    http://lkml.indiana.edu/hypermail/linux/kernel/0003.2/0336.html
        #
        # We do this early (which is effectively at import time, because
        # _Supervisor() gets instantiated at import) so that child processes
        # can be created before the main loop is started, and their
        # termination won't interrupt any system calls.
        if sys.hexversion >= 0x02060000:
            # Python 2.6+ has signal.siginterrupt()
            signal.siginterrupt(signal.SIGCHLD, False)
        elif sys.hexversion >= 0x02050000:
            # Python 2.5
            import ctypes, ctypes.util
            libc = ctypes.util.find_library('c')
            ctypes.CDLL(libc).siginterrupt(signal.SIGCHLD, 0)
        else:
            # Python 2.4- is not supported.
            raise SystemError('kaa.base requires Python 2.5 or later')
Example #12
0
 def __register_sighandler(self):
     # Register a SIGUSR1 handler.
     def handler(signum, frame):
         Ping.set(True)
     signal.signal(signal.SIGUSR1, handler)
     # Don't interrupt system cals on SIGUSR1.
     signal.siginterrupt(signal.SIGUSR1, False)
Example #13
0
    def open(self):
        """
        Fork a child nethack process into a pty and setup its stdin and stdout
        """

        (self.pid, self.pipe) = os.forkpty()

        if self.pid == 0:
            # I'm the child process in a fake pty. I need to replace myself
            # with an instance of nethack.
            #
            # NOTE: The '--proxy' argument doesn't seem to do anything though
            # it's used by dgamelaunch which is a bit confusing. However, 
            # without *some* argument execvp doesn't seem to like nethack and
            # launches a shell instead? It's quite confusing.
            if self.debug:
                os.execvpe("nethack", ["--proxy", "-D"], os.environ)
            else:
                os.execvpe("nethack", ["--proxy"], os.environ)
        else:
            # Before we do anything else, it's time to establish some boundries
            signal.siginterrupt(signal.SIGCHLD, True)
            signal.signal(signal.SIGCHLD, self._close)

            # When my tty resizes, the child's pty should resize too.
            signal.signal(signal.SIGWINCH, self.resize_child)

            # Setup out input/output proxies
            self.stdout = os.fdopen(self.pipe, "rb", 0)
            self.stdin = os.fdopen(self.pipe, "wb", 0)

            # Set the initial size of the child pty to my own size.
            self.resize_child()
Example #14
0
def timed_wait(p, timeout):
    signal(SIGALRM, raise_timeout_exception)
    siginterrupt(SIGALRM, False)
    alarm(timeout)
    rc = p.wait()
    alarm(0)
    return rc
Example #15
0
    def stop(self, signum=None, _unused=None):
        """Stop the consumer from consuming by calling BasicCancel and setting
        our state.

        :param int signum: The signal received
        :param frame _unused: The stack frame from when the signal was called

        """
        LOGGER.debug('Stop called in state: %s', self.state_description)
        if self.is_stopped:
            LOGGER.warning('Stop requested but consumer is already stopped')
            return
        elif self.is_shutting_down:
            LOGGER.warning('Stop requested, consumer is already shutting down')
            return
        elif self.is_waiting_to_shutdown:
            LOGGER.warning('Stop requested but already waiting to shut down')
            return

        # Stop consuming and close AMQP connections
        self.shutdown_connections()

        # Wait until the consumer has finished processing to shutdown
        if self.is_processing:
            LOGGER.info('Waiting for consumer to finish processing')
            self.set_state(self.STATE_STOP_REQUESTED)
            if signum == signal.SIGTERM:
                signal.siginterrupt(signal.SIGTERM, False)
            return
Example #16
0
    def add_signal_handler(self, sig, callback, *args):
        """Add a handler for a signal.  UNIX only.

        Raise ValueError if the signal number is invalid or uncatchable.
        Raise RuntimeError if there is a problem setting up the handler.
        """
        if (coroutines.iscoroutine(callback)
        or coroutines.iscoroutinefunction(callback)):
            raise TypeError("coroutines cannot be used "
                            "with add_signal_handler()")
        self._check_signal(sig)
        self._check_closed()
        try:
            # set_wakeup_fd() raises ValueError if this is not the
            # main thread.  By calling it early we ensure that an
            # event loop running in another thread cannot add a signal
            # handler.
            signal.set_wakeup_fd(self._csock.fileno())
        except (ValueError, OSError) as exc:
            raise RuntimeError(str(exc))

        handle = events.Handle(callback, args, self)
        self._signal_handlers[sig] = handle

        try:
            if compat.PY33:
                # On Python 3.3 and newer, the C signal handler writes the
                # signal number into the wakeup file descriptor and then calls
                # Py_AddPendingCall() to schedule the Python signal handler.
                #
                # Register a dummy signal handler to ask Python to write the
                # signal number into the wakup file descriptor.
                # _process_self_data() will read signal numbers from this file
                # descriptor to handle signals.
                signal.signal(sig, _sighandler_noop)
            else:
                # On Python 3.2 and older, the C signal handler first calls
                # Py_AddPendingCall() to schedule the Python signal handler,
                # and then write a null byte into the wakeup file descriptor.
                signal.signal(sig, self._handle_signal)

            # Set SA_RESTART to limit EINTR occurrences.
            signal.siginterrupt(sig, False)
        except (RuntimeError, OSError) as exc:
            # On Python 2, signal.signal(signal.SIGKILL, signal.SIG_IGN) raises
            # RuntimeError(22, 'Invalid argument'). On Python 3,
            # OSError(22, 'Invalid argument') is raised instead.
            exc_type, exc_value, tb = sys.exc_info()

            del self._signal_handlers[sig]
            if not self._signal_handlers:
                try:
                    signal.set_wakeup_fd(-1)
                except (ValueError, OSError) as nexc:
                    logger.info('set_wakeup_fd(-1) failed: %s', nexc)

            if isinstance(exc, RuntimeError) or exc.errno == errno.EINVAL:
                raise RuntimeError('sig {0} cannot be caught'.format(sig))
            else:
                reraise(exc_type, exc_value, tb)
 def sigusr1_handler(self, signum, stack_frame):
     # Apparently the setting siginterrupt can get reset on some platforms.
     signal.siginterrupt(signal.SIGUSR1, False)
     print('Received SIGUSR1. Current stack trace:', file=sys.stderr)
     traceback.print_stack(stack_frame)
     if self.runner is not None:
         self.runner.debug_status()
Example #18
0
    def init_signals(self):
        # reset signal handlers to defaults
        [signal.signal(s, signal.SIG_DFL) for s in self.SIGNALS]

        # einhorn will send SIGUSR2 to request a graceful shutdown
        signal.signal(signal.SIGUSR2, self.start_graceful_shutdown)
        signal.siginterrupt(signal.SIGUSR2, False)
Example #19
0
 def _child_handler(self, sig, stack):
     pid, sts = os.waitpid(-1, os.WNOHANG)
     proc = self._procs.get(pid)
     if proc is not None:
         self._proc_status(proc, sts)
     signal.signal(SIGCHLD, self._child_handler)
     signal.siginterrupt(SIGCHLD, False)
Example #20
0
    def __init__(self, bot_id):
        self.__log_buffer = []
        self.parameters = Parameters()

        self.__error_retries_counter = 0
        self.__source_pipeline = None
        self.__destination_pipeline = None
        self.logger = None

        try:
            version_info = sys.version.splitlines()[0].strip()
            self.__log_buffer.append(('info',
                                      '{} initialized with id {} and version '
                                      '{} as process {}.'
                                      ''.format(self.__class__.__name__,
                                                bot_id, version_info,
                                                os.getpid())))
            self.__log_buffer.append(('debug', 'Library path: %r.' % __file__))

            self.__load_defaults_configuration()
            self.__load_system_configuration()

            self.__check_bot_id(bot_id)
            self.__bot_id = bot_id

            if self.parameters.logging_handler == 'syslog':
                syslog = self.parameters.logging_syslog
            else:
                syslog = False
            self.logger = utils.log(self.__bot_id, syslog=syslog,
                                    log_path=self.parameters.logging_path,
                                    log_level=self.parameters.logging_level)
        except:
            self.__log_buffer.append(('critical', traceback.format_exc()))
            self.stop()
        else:
            for line in self.__log_buffer:
                getattr(self.logger, line[0])(line[1])

        try:
            self.logger.info('Bot is starting.')
            self.__load_runtime_configuration()
            self.__load_pipeline_configuration()
            self.__load_harmonization_configuration()

            self.init()

            self.__sighup = False
            signal.signal(signal.SIGHUP, self.__handle_sighup_signal)
            # system calls should not be interrupted, but restarted
            signal.siginterrupt(signal.SIGHUP, False)
        except Exception as exc:
            if self.parameters.error_log_exception:
                self.logger.exception('Bot initialization failed.')
            else:
                self.logger.error(utils.error_message_from_exc(exc))
                self.logger.error('Bot initialization failed.')

            self.stop()
            raise
Example #21
0
def main():
    proc_pool = {} # 记录创建的所有子进程
    cntl_q = mp.Queue() # 控制信息传递队列
    data_q = mp.Queue() # 具体数据传递队列
    exit_flag = mp.Event() # 退出标记,初始值为False

    # 收到SIGINT,通知proxy停止读取数据
    signal(SIGINT, lambda x, y: exit_flag.set())
    siginterrupt(SIGINT, False)

    # 启动proxy进程,后续按需启动woker进程
    print 'main {} started'.format(os.getpid())
    proc = mp.Process(target=proc_proxy, args=(cntl_q, data_q, exit_flag))
    proc.start()
    proc_pool[proc.pid] = proc
    print 'proxy {} started'.format(proc.pid)

    while True:
        item = cntl_q.get()
        if item['event'] == 'data':
            proc = mp.Process(target=proc_worker, args=(cntl_q, data_q))
            proc.start()
            proc_pool[proc.pid] = proc
            print 'worker {} started'.format(proc.pid)
        elif item['event'] == 'exit':
            proc = proc_pool.pop(item['pid'])
            proc.join()
            print 'child {} stopped'.format(item['pid'])
        else:
            print 'It\'s impossible !'

        if not proc_pool: # 所有子进程均已退出
            break

    print 'main {} stopped'.format(os.getpid())
def main():
    """Parses the arguments and call worker()"""
    # Set the signal handler
    for s in [signal.SIGINT, signal.SIGTERM]:
        signal.signal(s, shutdown)
        signal.siginterrupt(s, False)
    parser, _ = utils.create_argparser(__doc__)
    parser.add_argument(
        '--sensor', metavar='SENSOR[:SENSOR]',
        help='sensor to check, optionally with a long name, defaults to all.',
    )
    parser.add_argument(
        '--directory', metavar='DIR',
        help='base directory (defaults to /ivre/passiverecon/).',
        default="/ivre/passiverecon/",
    )
    parser.add_argument(
        '--progname', metavar='PROG',
        help='Program to run (defaults to ivre passiverecon2db).',
        default="ivre passiverecon2db",
    )
    args = parser.parse_args()
    if args.sensor is not None:
        SENSORS.update(dict([args.sensor.split(':', 1)
                             if ':' in args.sensor
                             else [args.sensor, args.sensor]]))
        sensor = args.sensor.split(':', 1)[0]
    else:
        sensor = None
    worker(args.progname, args.directory, sensor=sensor)
 def __init_signal(self):
     try:
         signal.signal(signal.SIGCHLD, signal.SIG_DFL)
         signal.siginterrupt(signal.SIGCHLD, False)
         signal.signal(signal.SIGTERM, MasterD.sig_term_handler)
     except:
         raise
Example #24
0
def hup_hook(signal_or_callable=signal.SIGTERM, verbose=False):
    """
    Register a signal handler for `signal.SIGHUP` that checks for modified
    files and only acts if at least one modified file is found.

    @type signal_or_callable: str, int or callable
    @param signal_or_callable: You can pass either a signal or a callable.
        The signal can be specified by name or number. If specifying by name,
        the 'SIG' portion is optional. For example, valid values for SIGINT
        include 'INT', 'SIGINT' and `signal.SIGINT`.

        Alternatively, you can pass a callable that will be called with the list
        of changed files. So the call signature should be `func(list)`. The return
        value of the callable is ignored.
    @type verbose: bool or callable
    @param verbose: Defaults to False. True indicates that a message should be
        printed. You can also pass a callable such as log.info.
    """

    #noinspection PyUnusedLocal
    def handle_hup(signum, frame):
        changed = modified()
        if changed:
            if callable(signal_or_callable):
                func = signal_or_callable
                args = (changed,)
                op = 'Calling'
                try:
                    name = signal_or_callable.__name__
                except Exception:
                    name = str(signal_or_callable)
            else:
                if isinstance(signal_or_callable, int):
                    name = str(signal_or_callable)
                    signum = signal_or_callable
                    if verbose:
                        for item in dir(signal):
                            if item.startswith('SIG') and getattr(signal, item) == signal_or_callable:
                                name = item
                                break
                else:
                    name = signal_or_callable if signal_or_callable.startswith('SIG') else 'SIG' + signal_or_callable
                    signum = getattr(signal, name)
                func = os.kill
                args = (os.getpid(), signum)
                op = 'Sending'
            if verbose:
                more = ' and {0} other files'.format(len(changed)) if len(changed) > 1 else ''
                message = '{0} {1} because {2}{3} changed'.format(op, name, changed[0], more)
                if callable(verbose):
                    #noinspection PyCallingNonCallable
                    verbose(message)
                else:
                    print(message)
            func(*args)

    files()
    signal.signal(signal.SIGHUP, handle_hup)
    signal.siginterrupt(signal.SIGHUP, False)
Example #25
0
 def __init__(self, interval=0.01, mode='virtual'):
     self.interval = interval
     self.mode = mode
     assert mode in Collector.MODES
     timer, sig = Collector.MODES[self.mode]
     signal.signal(sig, self.handler)
     signal.siginterrupt(sig, False)
     self.reset()
Example #26
0
 def set_sigchld_handler(self):
     # TODO: find out whether set_wakeup_fd still works if the default
     # signal handler is used (I'm pretty sure it doesn't work if the
     # signal is ignored).
     signal.signal(signal.SIGCHLD, self.handle_sigchld)
     # This should keep reads and writes from getting EINTR.
     if hasattr(signal, 'siginterrupt'):
         signal.siginterrupt(signal.SIGCHLD, False)
Example #27
0
 def setupHandler(self):
     self.fd = socket.socketpair(socket.AF_UNIX,socket.SOCK_STREAM,0)
     if not self.fd:
         return -1
     Signal.fds[self.signum] = self
     signal.signal(self.signum,self.handler)
     signal.siginterrupt(self.signum,False)
     return 0
Example #28
0
    def mount(self, args):
        '''Mount a backup repository as a FUSE filesystem.

        This subcommand allows you to access backups in an Obnam
        backup repository as normal files and directories. Each
        backed up file or directory can be viewed directly, using
        a graphical file manager or command line tools.

        Example: To mount your backup repository:

        mkdir my-fuse
        obnam mount --viewmode multiple --to my-fuse

        You can then access the backup using commands such as these:

        ls -l my-fuse
        ls -l my-fuse/latest
        diff -u my-fuse/latest/home/liw/README ~/README
        
        You can also restore files by copying them from the
        my-fuse directory:

        cp -a my-fuse/12765/Maildir ~/Maildir.restored

        To un-mount:

        fusermount -u my-fuse

        '''

        if not hasattr(fuse, 'fuse_python_api'):
            raise obnamlib.Error('Failed to load module "fuse", '
                                 'try installing python-fuse')
        self.app.settings.require('repository')
        self.app.settings.require('client-name')
        self.app.settings.require('to')
        self.repo = self.app.open_repository()
        self.repo.open_client(self.app.settings['client-name'])

        self.mountroot = (['/'] + self.app.settings['root'] + args)[-1]
        if self.mountroot != '/':
            self.mountroot = self.mountroot.rstrip('/')

        logging.debug('FUSE Mounting %s@%s:%s to %s', self.app.settings['client-name'],
                        self.app.settings['generation'],
                        self.mountroot, self.app.settings['to'])

        try:
            ObnamFuseOptParse.obnam = self
            fs = ObnamFuse(obnam=self, parser_class=ObnamFuseOptParse)
            signal.signal(signal.SIGUSR1, lambda s,f: fs.sigUSR1())
            signal.siginterrupt(signal.SIGUSR1, False)
            fs.flags = 0
            fs.multithreaded = 0
            fs.parse()
            fs.main()
        except fuse.FuseError, e:
            raise obnamlib.Error(repr(e))
Example #29
0
 def setup_signals(self):
     """
     Register signal handlers
     """
     signal.signal(signal.SIGTERM, self.sigterm_handler)
     signal.signal(signal.SIGINT, self.sigterm_handler)
     signal.signal(signal.SIGHUP, self.sighup_handler)
     signal.siginterrupt(signal.SIGHUP, False)
     logger.info("Set up signal handlers")
    def run(self):
        signal(SIGTERM, self._sigterm)
        signal(SIGINT, self._sigterm)
        # Try to avoid stacktraces from interrupted signal calls
        siginterrupt(SIGTERM, False)
        siginterrupt(SIGINT, False)

        if self.options.restart:
            if os.path.exists(self.config_filename):
                os.remove(self.config_filename)

        self._loadConfig()
        if self.options.batchsize <= 0:
            self.parser.error('Invalid argument for --batchsize parameter - must be positive')
        if self.options.sleep < 0:
            self.parser.error('Invalid argument for --sleep parameter')

        if not self.options.fetchArgs:
            if not self.options.evtuser or self.options.evtpass is None:
                self.parser.error('Required arguments --evtuser and --evtpass must be provided when using '
                                  '--dont-fetch-args')
        else:
            zem = self.dmd.ZenEventManager
            self.options.evthost = zem.host
            self.options.evtport = zem.port
            self.options.evtuser = zem.username
            self.options.evtpass = zem.password
            self.options.evtdb = zem.database
        conn = None
        publisher = None
        try:
            conn = connect(host=self.options.evthost,
                           user=self.options.evtuser,
                           passwd=self.options.evtpass,
                           db=self.options.evtdb,
                           port=self.options.evtport,
                           cursorclass=DictCursor,
                           use_unicode=True)
            conn.autocommit(1)

            publisher = getUtility(IQueuePublisher)

            # Migrate status
            self._migrate_events(conn, publisher, True)

            # Migrate history
            self._migrate_events(conn, publisher, False)
            
        except Exception as e:
            if log.isEnabledFor(logging.DEBUG):
                log.exception('Error migrating events')
            print >>sys.stderr, "Failed to migrate events: %s" % e
        finally:
            if publisher:
                publisher.close()
            if conn:
                conn.close()
Example #31
0
def main(argv, config):
    g.running = 'server'

    parser = ArgumentParser(prog=argv.pop(0))
    parser.add_argument('--debug', action='store_true')
    options = parser.parse_args(argv)

    # all forks belong to the same happy family
    try:
        os.setpgrp()
    except OSError:
        print(
            "Failed to create process group - there is probably already one (daemontools).",
            file=sys.stderr)

    # Set a low (but not too low) open file limit to make
    # dispatch.update_valid_fds faster.
    # The runners will set the highest limit they can
    # before actually running any methods.
    r1, r2 = resource.getrlimit(resource.RLIMIT_NOFILE)
    r1 = min(r1, r2, 1024)
    resource.setrlimit(resource.RLIMIT_NOFILE, (r1, r2))

    # Start the board in a separate process so it can't interfere.
    # Even if it dies we don't care.
    try:
        if not isinstance(config.board_listen, tuple):
            # Don't bother if something is already listening.
            check_socket(config.board_listen)
        Process(target=board.run, args=(config, ), name='board').start()
    except Exception:
        pass

    iowrapper.main()

    # setup statmsg sink and tell address using ENV
    statmsg_rd, statmsg_wr = socket.socketpair(socket.AF_UNIX,
                                               socket.SOCK_DGRAM)
    os.environ['BD_STATUS_FD'] = str(statmsg_wr.fileno())

    def buf_up(fh, opt):
        sock = socket.fromfd(fh.fileno(), socket.AF_UNIX, socket.SOCK_DGRAM)
        sock.setsockopt(socket.SOL_SOCKET, opt, 256 * 1024)
        # does not close fh, because fromfd dups the fd (but not the underlying socket)
        sock.close()

    buf_up(statmsg_wr, socket.SO_SNDBUF)
    buf_up(statmsg_rd, socket.SO_RCVBUF)

    t = DeadlyThread(target=statmsg_sink,
                     args=(statmsg_rd, ),
                     name="statmsg sink")
    t.daemon = True
    t.start()

    # do all main-stuff, i.e. run server
    sys.stdout = autoflush.AutoFlush(sys.stdout)
    sys.stderr = autoflush.AutoFlush(sys.stderr)
    atexit.register(exitfunction)
    signal.signal(signal.SIGTERM, exitfunction)
    signal.signal(signal.SIGINT, exitfunction)

    signal.signal(signal.SIGUSR1, siginfo)
    signal.siginterrupt(signal.SIGUSR1, False)
    if hasattr(signal, 'SIGINFO'):
        signal.signal(signal.SIGINFO, siginfo)
        signal.siginterrupt(signal.SIGINFO, False)

    if isinstance(config.listen, tuple):
        server = ThreadedHTTPServer(config.listen, XtdHandler)
    else:
        check_socket(config.listen)
        # We want the socket to be world writeable, protect it with dir permissions.
        u = os.umask(0)
        server = ThreadedUnixHTTPServer(config.listen, XtdHandler)
        os.umask(u)

    if config.get('urd_local'):
        from accelerator import urd
        t = DeadlyThread(target=urd.main,
                         args=(['urd', '--quiet',
                                '--allow-passwordless'], config),
                         name='urd')
        t.daemon = True
        t.start()

    ctrl = control.Main(config, options, config.url)
    print()
    ctrl.print_workdirs()
    print()

    XtdHandler.ctrl = ctrl
    job_tracking[None].workdir = ctrl.target_workdir

    for n in (
            "project_directory",
            "result_directory",
            "input_directory",
    ):
        v = config.get(n)
        n = n.replace("_", " ")
        print("%17s: %s" % (
            n,
            v,
        ))
    for n in (
            "board",
            "urd",
    ):
        v = config.get(n + '_listen')
        if v and not config.get(n + '_local', True):
            extra = ' (remote)'
        else:
            extra = ''
        print("%17s: %s%s" % (
            n,
            v,
            extra,
        ))
    print()

    print("Serving on %s\n" % (config.listen, ), file=sys.stderr)
    server.serve_forever()
Example #32
0
 def init_signals(self):
     [signal.signal(s, signal.SIG_DFL) for s in self.SIGNALS]
     signal.signal(signal.SIGQUIT, self.handle_quit)
     signal.signal(signal.SIGTERM, self.handle_exit)
     signal.signal(signal.SIGINT, self.handle_quit)
     signal.siginterrupt(signal.SIGTERM, False)
Example #33
0

def printTempStats():
    init_strings = "\n\n" + str(datetime.now()) + " TEMP"
    printStats(init_strings)


def sigquitHandler(signum, frame):
    init_strings = "\n\n" + str(datetime.now()) + " END"
    printStats(init_strings)
    stdout.write("\n")
    sys.exit(0)


signal.signal(signal.SIGINT, sigquitHandler)
signal.siginterrupt(signal.SIGINT, True)

#init some variables
command_limit_one= \
    "http://ws.audioscrobbler.com/2.0/?method=user.getfriends"\
    "&user={0}&limit=1&page={1}&api_key={2}"

command_user_info = \
    "http://ws.audioscrobbler.com/2.0/?method=user.getinfo"\
    "&user={0}&api_key={1}"

command_user_shouts = \
    "http://ws.audioscrobbler.com/2.0/?method=user.getshouts"\
    "&user={0}&limit={1}&api_key={2}"

api_key = 'cda9140cf81af12206d411e1d420af18'  #team_amz's API Key
Example #34
0
def main(args, net, datadir_path, merged_urls, worker_endpoint):
    try:
        print 'p2pool (version %s)' % (p2pool.__version__, )
        print

        @defer.inlineCallbacks
        def connect_p2p():
            # connect to bitcoind over bitcoin-p2p
            print '''Testing bitcoind P2P connection to '%s:%s'...''' % (
                args.bitcoind_address, args.bitcoind_p2p_port)
            factory = bitcoin_p2p.ClientFactory(net.PARENT)
            reactor.connectTCP(args.bitcoind_address, args.bitcoind_p2p_port,
                               factory)
            yield factory.getProtocol()  # waits until handshake is successful
            print '    ...success!'
            print
            defer.returnValue(factory)

        if args.testnet:  # establish p2p connection first if testnet so bitcoind can work without connections
            factory = yield connect_p2p()

        # connect to bitcoind over JSON-RPC and do initial getmemorypool
        url = '%s://%s:%i/' % ('https' if args.bitcoind_rpc_ssl else 'http',
                               args.bitcoind_address, args.bitcoind_rpc_port)
        print '''Testing bitcoind RPC connection to '%s' with username '%s'...''' % (
            url, args.bitcoind_rpc_username)
        bitcoind = jsonrpc.Proxy(
            url,
            dict(Authorization='Basic ' +
                 base64.b64encode(args.bitcoind_rpc_username + ':' +
                                  args.bitcoind_rpc_password)),
            timeout=30)
        yield helper.check(bitcoind, net)
        temp_work = yield helper.getwork(bitcoind)

        bitcoind_warning_var = variable.Variable(None)

        @defer.inlineCallbacks
        def poll_warnings():
            errors = (yield
                      deferral.retry('Error while calling getmininginfo:')(
                          bitcoind.rpc_getmininginfo)())['errors']
            bitcoind_warning_var.set(errors if errors != '' else None)

        yield poll_warnings()
        task.LoopingCall(poll_warnings).start(20 * 60)

        print '    ...success!'
        print '    Current block hash: %x' % (temp_work['previous_block'], )
        print '    Current block height: %i' % (temp_work['height'] - 1, )
        print

        if not args.testnet:
            factory = yield connect_p2p()

        print 'Determining payout address...'
        if args.pubkey_hash is None:
            address_path = os.path.join(datadir_path, 'cached_payout_address')

            if os.path.exists(address_path):
                with open(address_path, 'rb') as f:
                    address = f.read().strip('\r\n')
                print '    Loaded cached address: %s...' % (address, )
            else:
                address = None

            if address is not None:
                res = yield deferral.retry(
                    'Error validating cached address:',
                    5)(lambda: bitcoind.rpc_validateaddress(address))()
                if not res['isvalid'] or not res['ismine']:
                    print '    Cached address is either invalid or not controlled by local bitcoind!'
                    address = None

            if address is None:
                print '    Getting payout address from bitcoind...'
                address = yield deferral.retry(
                    'Error getting payout address from bitcoind:',
                    5)(lambda: bitcoind.rpc_getaccountaddress('p2pool'))()

            with open(address_path, 'wb') as f:
                f.write(address)

            my_pubkey_hash = bitcoin_data.address_to_pubkey_hash(
                address, net.PARENT)
        else:
            my_pubkey_hash = args.pubkey_hash
        print '    ...success! Payout address:', bitcoin_data.pubkey_hash_to_address(
            my_pubkey_hash, net.PARENT)
        print

        ss = p2pool_data.ShareStore(os.path.join(datadir_path, 'shares.'), net)
        shares = {}
        known_verified = set()
        print "Loading shares..."
        for i, (mode, contents) in enumerate(ss.get_shares()):
            if mode == 'share':
                contents.time_seen = 0
                shares[contents.hash] = contents
                if len(shares) % 1000 == 0 and shares:
                    print "    %i" % (len(shares), )
            elif mode == 'verified_hash':
                known_verified.add(contents)
            else:
                raise AssertionError()
        print "    ...done loading %i shares (%i verified)!" % (
            len(shares), len(known_verified))
        print

        print 'Initializing work...'

        node = p2pool_node.Node(factory, bitcoind, shares.values(),
                                known_verified, net)
        yield node.start()

        for share_hash in shares:
            if share_hash not in node.tracker.items:
                ss.forget_share(share_hash)
        for share_hash in known_verified:
            if share_hash not in node.tracker.verified.items:
                ss.forget_verified_share(share_hash)
        del shares, known_verified
        node.tracker.removed.watch(lambda share: ss.forget_share(share.hash))
        node.tracker.verified.removed.watch(
            lambda share: ss.forget_verified_share(share.hash))

        def save_shares():
            for share in node.tracker.get_chain(
                    node.best_share_var.value,
                    min(node.tracker.get_height(node.best_share_var.value),
                        2 * net.CHAIN_LENGTH)):
                ss.add_share(share)
                if share.hash in node.tracker.verified.items:
                    ss.add_verified_hash(share.hash)

        task.LoopingCall(save_shares).start(60)

        print '    ...success!'
        print

        print 'Joining p2pool network using port %i...' % (args.p2pool_port, )

        @defer.inlineCallbacks
        def parse(x):
            if ':' in x:
                ip, port = x.split(':')
                defer.returnValue(((yield reactor.resolve(ip)), int(port)))
            else:
                defer.returnValue(((yield reactor.resolve(x)), net.P2P_PORT))

        addrs = {}
        if os.path.exists(os.path.join(datadir_path, 'addrs')):
            try:
                with open(os.path.join(datadir_path, 'addrs'), 'rb') as f:
                    addrs.update(
                        dict((tuple(k), v) for k, v in json.loads(f.read())))
            except:
                print >> sys.stderr, 'error parsing addrs'
        for addr_df in map(parse, net.BOOTSTRAP_ADDRS):
            try:
                addr = yield addr_df
                if addr not in addrs:
                    addrs[addr] = (0, time.time(), time.time())
            except:
                log.err()

        connect_addrs = set()
        for addr_df in map(parse, args.p2pool_nodes):
            try:
                connect_addrs.add((yield addr_df))
            except:
                log.err()

        node.p2p_node = p2pool_node.P2PNode(
            node,
            port=args.p2pool_port,
            max_incoming_conns=args.p2pool_conns,
            addr_store=addrs,
            connect_addrs=connect_addrs,
            desired_outgoing_conns=args.p2pool_outgoing_conns,
        )
        node.p2p_node.start()

        def save_addrs():
            with open(os.path.join(datadir_path, 'addrs'), 'wb') as f:
                f.write(json.dumps(node.p2p_node.addr_store.items()))

        task.LoopingCall(save_addrs).start(60)

        print '    ...success!'
        print

        if args.upnp:

            @defer.inlineCallbacks
            def upnp_thread():
                while True:
                    try:
                        is_lan, lan_ip = yield ipdiscover.get_local_ip()
                        if is_lan:
                            pm = yield portmapper.get_port_mapper()
                            yield pm._upnp.add_port_mapping(
                                lan_ip, args.p2pool_port, args.p2pool_port,
                                'p2pool', 'TCP')
                    except defer.TimeoutError:
                        pass
                    except:
                        if p2pool.DEBUG:
                            log.err(None, 'UPnP error:')
                    yield deferral.sleep(random.expovariate(1 / 120))

            upnp_thread()

        # start listening for workers with a JSON-RPC server

        print 'Listening for workers on %r port %i...' % (worker_endpoint[0],
                                                          worker_endpoint[1])

        wb = work.WorkerBridge(node, my_pubkey_hash, args.donation_percentage,
                               merged_urls, args.worker_fee)
        web_root = web.get_web_root(wb, datadir_path, bitcoind_warning_var)
        worker_interface.WorkerInterface(wb).attach_to(
            web_root, get_handler=lambda request: request.redirect('/static/'))

        deferral.retry('Error binding to worker port:', traceback=False)(
            reactor.listenTCP)(worker_endpoint[1],
                               server.Site(web_root),
                               interface=worker_endpoint[0])

        with open(os.path.join(os.path.join(datadir_path, 'ready_flag')),
                  'wb') as f:
            pass

        print '    ...success!'
        print

        # done!
        print 'Started successfully!'
        print 'Go to http://127.0.0.1:%i/ to view graphs and statistics!' % (
            worker_endpoint[1], )
        if args.donation_percentage > 0.51:
            print '''Donating %.1f%% of work towards P2Pool's development. Thanks for the tip!''' % (
                args.donation_percentage, )
        elif args.donation_percentage < 0.49:
            print '''Donating %.1f%% of work towards P2Pool's development. Please donate to encourage further development of P2Pool!''' % (
                args.donation_percentage, )
        else:
            print '''Donating %.1f%% of work towards P2Pool's development. Thank you!''' % (
                args.donation_percentage, )
            print 'You can increase this amount with --give-author argument! (or decrease it, if you must)'
        print

        if hasattr(signal, 'SIGALRM'):
            signal.signal(
                signal.SIGALRM, lambda signum, frame: reactor.callFromThread(
                    sys.stderr.write, 'Watchdog timer went off at:\n' + ''.
                    join(traceback.format_stack())))
            signal.siginterrupt(signal.SIGALRM, False)
            task.LoopingCall(signal.alarm, 30).start(1)

        if args.irc_announce:
            from twisted.words.protocols import irc

            class IRCClient(irc.IRCClient):
                nickname = 'p2pool%02i' % (random.randrange(100), )
                channel = net.ANNOUNCE_CHANNEL

                def lineReceived(self, line):
                    if p2pool.DEBUG:
                        print repr(line)
                    irc.IRCClient.lineReceived(self, line)

                def signedOn(self):
                    self.in_channel = False
                    irc.IRCClient.signedOn(self)
                    self.factory.resetDelay()
                    self.join(self.channel)

                    @defer.inlineCallbacks
                    def new_share(share):
                        if not self.in_channel:
                            return
                        if share.pow_hash <= share.header[
                                'bits'].target and abs(share.timestamp -
                                                       time.time()) < 10 * 60:
                            yield deferral.sleep(random.expovariate(1 / 60))
                            message = '\x02%s BLOCK FOUND by %s! %s%064x' % (
                                net.NAME.upper(),
                                bitcoin_data.script2_to_address(
                                    share.new_script, net.PARENT),
                                net.PARENT.BLOCK_EXPLORER_URL_PREFIX,
                                share.header_hash)
                            if all('%x' %
                                   (share.header_hash, ) not in old_message
                                   for old_message in self.recent_messages):
                                self.say(self.channel, message)
                                self._remember_message(message)

                    self.watch_id = node.tracker.verified.added.watch(
                        new_share)
                    self.recent_messages = []

                def joined(self, channel):
                    self.in_channel = True

                def left(self, channel):
                    self.in_channel = False

                def _remember_message(self, message):
                    self.recent_messages.append(message)
                    while len(self.recent_messages) > 100:
                        self.recent_messages.pop(0)

                def privmsg(self, user, channel, message):
                    if channel == self.channel:
                        self._remember_message(message)

                def connectionLost(self, reason):
                    node.tracker.verified.added.unwatch(self.watch_id)
                    print 'IRC connection lost:', reason.getErrorMessage()

            class IRCClientFactory(protocol.ReconnectingClientFactory):
                protocol = IRCClient

            reactor.connectTCP("irc.freenode.net", 6667, IRCClientFactory())

        @defer.inlineCallbacks
        def status_thread():
            last_str = None
            last_time = 0
            while True:
                yield deferral.sleep(3)
                try:
                    height = node.tracker.get_height(node.best_share_var.value)
                    this_str = 'P2Pool: %i shares in chain (%i verified/%i total) Peers: %i (%i incoming)' % (
                        height,
                        len(node.tracker.verified.items),
                        len(node.tracker.items),
                        len(node.p2p_node.peers),
                        sum(1 for peer in node.p2p_node.peers.itervalues()
                            if peer.incoming),
                    ) + (' FDs: %i R/%i W' %
                         (len(reactor.getReaders()), len(reactor.getWriters()))
                         if p2pool.DEBUG else '')

                    datums, dt = wb.local_rate_monitor.get_datums_in_last()
                    my_att_s = sum(datum['work'] / dt for datum in datums)
                    this_str += '\n Local: %sH/s in last %s Local dead on arrival: %s Expected time to share: %s' % (
                        math.format(int(my_att_s)),
                        math.format_dt(dt),
                        math.format_binomial_conf(
                            sum(1 for datum in datums if datum['dead']),
                            len(datums), 0.95),
                        math.format_dt(2**256 / node.tracker.items[
                            node.best_share_var.value].max_target / my_att_s)
                        if my_att_s and node.best_share_var.value else '???',
                    )

                    if height > 2:
                        (stale_orphan_shares,
                         stale_doa_shares), shares, _ = wb.get_stale_counts()
                        stale_prop = p2pool_data.get_average_stale_prop(
                            node.tracker, node.best_share_var.value,
                            min(60 * 60 // net.SHARE_PERIOD, height))
                        real_att_s = p2pool_data.get_pool_attempts_per_second(
                            node.tracker, node.best_share_var.value,
                            min(height - 1, 60 * 60 //
                                net.SHARE_PERIOD)) / (1 - stale_prop)

                        this_str += '\n Shares: %i (%i orphan, %i dead) Stale rate: %s Efficiency: %s Current payout: %.4f %s' % (
                            shares,
                            stale_orphan_shares,
                            stale_doa_shares,
                            math.format_binomial_conf(
                                stale_orphan_shares + stale_doa_shares, shares,
                                0.95),
                            math.format_binomial_conf(
                                stale_orphan_shares + stale_doa_shares, shares,
                                0.95, lambda x: (1 - x) / (1 - stale_prop)),
                            node.get_current_txouts().get(
                                bitcoin_data.pubkey_hash_to_script2(
                                    my_pubkey_hash), 0) * 1e-8,
                            net.PARENT.SYMBOL,
                        )
                        this_str += '\n Pool: %sH/s Stale rate: %.1f%% Expected time to block: %s' % (
                            math.format(int(real_att_s)),
                            100 * stale_prop,
                            math.format_dt(
                                2**256 /
                                node.bitcoind_work.value['bits'].target /
                                real_att_s),
                        )

                        for warning in p2pool_data.get_warnings(
                                node.tracker, node.best_share_var.value, net,
                                bitcoind_warning_var.value,
                                node.bitcoind_work.value):
                            print >> sys.stderr, '#' * 40
                            print >> sys.stderr, '>>> Warning: ' + warning
                            print >> sys.stderr, '#' * 40

                    if this_str != last_str or time.time() > last_time + 15:
                        print this_str
                        last_str = this_str
                        last_time = time.time()
                except:
                    log.err()

        status_thread()
    except:
        reactor.stop()
        log.err(None, 'Fatal error:')
def main():
    """Main routine. Do the work."""

    # # First, write a PID so we can monitor the process
    # pid = str(os.getpid())
    # pidfile = "/var/run/bspdserver.pid"
    #
    # if os.path.isfile(pidfile):
    #     print "%s already exists, exiting" % pidfile
    #     sys.exit()
    # else:
    #     file(pidfile, 'w').write(pid)

    # Some logging preamble
    logging.debug('\n\n-=- Starting new BSDP server session -=-\n')

    # We are changing nbiimages for use by other functions
    global nbiimages

    # Instantiate a basic pydhcplib DhcpServer class using netopts (listen port,
    #   reply port and listening IP)
    server = Server(netopt)

    # Do a one-time discovery of all available NBIs on the server. NBIs added
    #   after the server was started will not be picked up until after a restart
    nbiimages, nbisources = getNbiOptions(tftprootpath)

    def scan_nbis(signal, frame):
        global nbiimages
        logging.debug('[========= Updating boot images list =========]')
        nbiimages, nbisources = getNbiOptions(tftprootpath)
        for nbi in nbisources:
            logging.debug(nbi)
        logging.debug('[=========      End updated list     =========]')

    signal.signal(signal.SIGUSR1, scan_nbis)
    signal.siginterrupt(signal.SIGUSR1, False)

    # Print the full list of eligible NBIs to the log
    logging.debug('[========= Using the following boot images =========]')
    for nbi in nbisources:
        logging.debug(nbi)
    logging.debug('[=========     End boot image listing      =========]')

    # Loop while the looping's good.
    while True:

        # Listen for DHCP packets. Since select() is used upstream we need to
        #   catch the EINTR signal it trips on when we receive a USR1 signal to
        #   reload the nbiimages list.
        try:
            packet = server.GetNextDhcpPacket()
        except select.error, e:
            if e[0] != errno.EINTR: raise

        try:
            # Check to see if any vendor_encapsulated_options are present
            if len(packet.GetOption('vendor_encapsulated_options')) > 1:

                # If we have vendor_encapsulated_options check for a value of 1
                #   which in BSDP terms means the packet is a BSDP[LIST] request
                if packet.GetOption('vendor_encapsulated_options')[2] == 1:
                    logging.debug(
                        '-=========================================-')
                    logging.debug('Got BSDP INFORM[LIST] packet: ')

                    # Pass ack() the matching packet, defaultnbi and 'list'
                    bsdplistack, clientip, replyport = ack(
                        packet, defaultnbi, 'list')
                    # Once we have a finished DHCP packet, send it to the client
                    server.SendDhcpPacketTo(bsdplistack, str(clientip),
                                            replyport)

                # If the vendor_encapsulated_options BSDP type is 2, we process
                #   the packet as a BSDP[SELECT] request
                elif packet.GetOption('vendor_encapsulated_options')[2] == 2:
                    logging.debug(
                        '-=========================================-')
                    logging.debug('Got BSDP INFORM[SELECT] packet: ')


                    bsdpselectack, selectackclientip, selectackreplyport = \
                        ack(packet, None, 'select')

                    # Once we have a finished DHCP packet, send it to the client
                    server.SendDhcpPacketTo(bsdpselectack,
                                            str(selectackclientip),
                                            selectackreplyport)
                # If the packet length is 7 or less, move on, BSDP packets are
                #   at least 8 bytes long.
                elif len(packet.GetOption('vendor_encapsulated_options')) <= 7:
                    pass
        except:
            # Dump tracbacks to stderr and carry on
            traceback.print_exc()
            pass
Example #36
0
 def _swap_handler(self, signum, signal_handler):
     self._old_handlers[signum] = signal.getsignal(signum)
     signal.signal(signum, signal_handler)
     if not platform._is_win:
         signal.siginterrupt(signum, False)
Example #37
0
    def __init__(self, bot_id):
        self.__log_buffer = []
        self.parameters = Parameters()

        self.__error_retries_counter = 0
        self.__source_pipeline = None
        self.__destination_pipeline = None
        self.logger = None

        try:
            version_info = sys.version.splitlines()[0].strip()
            self.__log_buffer.append(
                ('info', '{} initialized with id {} and version '
                 '{} as process {}.'
                 ''.format(self.__class__.__name__, bot_id, version_info,
                           os.getpid())))
            self.__log_buffer.append(('debug', 'Library path: %r.' % __file__))

            self.__load_defaults_configuration()
            self.__load_system_configuration()

            self.__check_bot_id(bot_id)
            self.__bot_id = bot_id

            if self.parameters.logging_handler == 'syslog':
                syslog = self.parameters.logging_syslog
            else:
                syslog = False
            self.logger = utils.log(self.__bot_id,
                                    syslog=syslog,
                                    log_path=self.parameters.logging_path,
                                    log_level=self.parameters.logging_level)
        except:
            self.__log_buffer.append(('critical', traceback.format_exc()))
            self.stop()
        else:
            for line in self.__log_buffer:
                getattr(self.logger, line[0])(line[1])

        try:
            self.logger.info('Bot is starting.')
            self.__load_runtime_configuration()
            self.__load_pipeline_configuration()
            self.__load_harmonization_configuration()

            if not getattr(self.parameters, 'enabled', True):
                self.logger.warn('The bot was disabled by configuration. '
                                 'It will not be started as long as this '
                                 'configuration is present.')
                self.stop()

            self.init()

            self.__sighup = False
            signal.signal(signal.SIGHUP, self.__handle_sighup_signal)
            # system calls should not be interrupted, but restarted
            signal.siginterrupt(signal.SIGHUP, False)
        except Exception as exc:
            if self.parameters.error_log_exception:
                self.logger.exception('Bot initialization failed.')
            else:
                self.logger.error(utils.error_message_from_exc(exc))
                self.logger.error('Bot initialization failed.')

            self.stop()
            raise
Example #38
0
    def run(self):
        signal(SIGTERM, self._sigterm)
        signal(SIGINT, self._sigterm)
        # Try to avoid stacktraces from interrupted signal calls
        siginterrupt(SIGTERM, False)
        siginterrupt(SIGINT, False)

        if self.options.restart:
            if os.path.exists(self.config_filename):
                os.remove(self.config_filename)

        if self.options.detail_file:
            self.populate_detail_mappings()

        self._loadConfig()
        if self.options.batchsize <= 0:
            self.parser.error('Invalid argument for --batchsize parameter - must be positive')
        if self.options.sleep < 0:
            self.parser.error('Invalid argument for --sleep parameter')

        if self.options.startoffset < 0:
            self.parser.error('Invalid argument for --startoffset parameter - must be non-negative')
        if self.options.numevents < 0:
            self.parser.error('Invalid argument for --numevents parameter - must be non-negative')

        self.firstTimeStart = None
        if self.options.firstTimeStart:
            if not self.options.firstTimeEnd:
                self.parser.error('Invalid argument for --firsttime-end parameter - must be specified')
        if self.options.firstTimeEnd:
            if not self.options.firstTimeStart:
                self.parser.error('Invalid argument for --firsttime-start parameter - must be specified')
            firstTimeStart = dateutil.parser.parse(self.options.firstTimeStart)
            firstTimeEnd = dateutil.parser.parse(self.options.firstTimeEnd)
            if firstTimeStart > firstTimeEnd:
                self.parser.error('Invalid argument for firsttime parameter - start must be earlier than end')
            if self.options.startoffset >= 0:
                self._output('disabling --startoffset...')
                self.options.startoffset = 0
            if self.options.numevents >= 0:
                self._output('disabling --numevents...')
                self.options.numevents = 0
            self.firstTimeStart = firstTimeStart
            self.firstTimeEnd = firstTimeEnd
            self._loadConfig(self.firstTimeStart.strftime('%Y%m%d-%H%M%S'))

        validTables = set(('status', 'history'))
        tablesToMigrate = re.split('\W+', self.options.tables)
        for table in tablesToMigrate:
            if table not in validTables:
                self.parser.error('Invalid argument for --tables parameter - must be either status or history')

        if not self.options.fetchArgs:
            if not self.options.evtuser or self.options.evtpass is None:
                self.parser.error('Required arguments --evtuser and --evtpass must be provided when using '
                                  '--dont-fetch-args')
        else:
            zem = self.dmd.ZenEventManager
            self.options.evthost = zem.host
            self.options.evtport = zem.port
            self.options.evtuser = zem.username
            self.options.evtpass = zem.password
            self.options.evtdb = zem.database
        conn = None
        publisher = None
        try:
            conn = connect(host=self.options.evthost,
                           user=self.options.evtuser,
                           passwd=self.options.evtpass,
                           db=self.options.evtdb,
                           port=self.options.evtport,
                           cursorclass=DictCursor,
                           use_unicode=True)
            conn.autocommit(1)

            publisher = getUtility(IQueuePublisher)

            # Migrate status
            if 'status' in tablesToMigrate:
                self._migrate_events(conn, publisher, True)

            # Migrate history
            if 'history' in tablesToMigrate:
                self._migrate_events(conn, publisher, False)
            
        except Exception as e:
            if log.isEnabledFor(logging.DEBUG):
                log.exception('Error migrating events')
            print >>sys.stderr, "Failed to migrate events: %s" % e
        finally:
            if publisher:
                publisher.close()
            if conn:
                conn.close()
Example #39
0
def init_signal(signal_num, signal_object, exception_class, handler):
    handler = functools.partial(handler, signal_object, exception_class)
    signal.signal(signal_num, handler)
    signal.siginterrupt(signal_num, False)
Example #40
0
    def __init__(self, workflowfile, args, jmax=100):
        self.args = args
        self.workflowfile = workflowfile
        self.workflowspec = load_workflow(workflowfile)
        self.workflowspec = filter_workflow(self.workflowspec,
                                            args.target_tasks,
                                            args.target_labels)

        if not self.workflowspec['stages']:
            if args.target_tasks:
                print(
                    'Apparently some of the chosen target tasks are not in the workflow'
                )
                exit(0)
            print('Workflow is empty. Nothing to do')
            exit(0)

        workflow = build_dag_properties(self.workflowspec)
        if args.visualize_workflow:
            draw_workflow(self.workflowspec)
        self.possiblenexttask = workflow['nexttasks']
        self.taskweights = workflow['weights']
        self.topological_orderings = workflow['topological_ordering']
        self.taskuniverse = [l['name'] for l in self.workflowspec['stages']]
        self.idtotask = [0 for l in self.taskuniverse]
        self.tasktoid = {}
        for i in range(len(self.taskuniverse)):
            self.tasktoid[self.taskuniverse[i]] = i
            self.idtotask[i] = self.taskuniverse[i]

        self.maxmemperid = [
            self.workflowspec['stages'][tid]['resources']['mem']
            for tid in range(len(self.taskuniverse))
        ]
        self.cpuperid = [
            self.workflowspec['stages'][tid]['resources']['cpu']
            for tid in range(len(self.taskuniverse))
        ]
        self.curmembooked = 0
        self.curcpubooked = 0
        self.curmembooked_backfill = 0
        self.curcpubooked_backfill = 0
        self.memlimit = float(args.mem_limit)  # some configurable number
        self.cpulimit = float(args.cpu_limit)
        self.procstatus = {
            tid: 'ToDo'
            for tid in range(len(self.workflowspec['stages']))
        }
        self.taskneeds = {
            t: set(self.getallrequirements(t))
            for t in self.taskuniverse
        }
        self.stoponfailure = True
        self.max_jobs_parallel = int(jmax)
        self.scheduling_iteration = 0
        self.process_list = [
        ]  # list of currently scheduled tasks with normal priority
        self.backfill_process_list = [
        ]  # list of curently scheduled tasks with low backfill priority (not sure this is needed)
        self.pid_to_psutilsproc = {
        }  # cache of putilsproc for resource monitoring
        self.pid_to_files = {
        }  # we can auto-detect what files are produced by which task (at least to some extent)
        self.pid_to_connections = {
        }  # we can auto-detect what connections are opened by which task (at least to some extent)
        signal.signal(signal.SIGINT, self.SIGHandler)
        signal.siginterrupt(signal.SIGINT, False)
        self.nicevalues = [os.nice(0) for tid in range(len(self.taskuniverse))]
        self.internalmonitorcounter = 0  # internal use
        self.internalmonitorid = 0  # internal use
        self.tids_marked_toretry = [
        ]  # sometimes we might want to retry a failed task (simply because it was "unlucky") and we put them here
        self.retry_counter = [0 for tid in range(len(self.taskuniverse))
                              ]  # we keep track of many times retried already
        self.semaphore_values = {
            self.workflowspec['stages'][tid].get('semaphore'): 0
            for tid in range(len(self.taskuniverse))
            if self.workflowspec['stages'][tid].get('semaphore') != None
        }  # keeps current count of semaphores (defined in the json workflow). used to achieve user-defined "critical sections".
Example #41
0
        tempfile.write("collect regs->cs\n")
    tempfile.write("collect $current_task_pid\n")
tempfile.write("end\n")
tempfile.close()
tempfile = open(tempfilename, "r")
print "Tracepoint command:"
print tempfile.read()
tempfile.close()
gdb.execute("source " + tempfilename, True, False)
os.remove(tempfilename)
gdb.execute("set disconnected-tracing on", True, False)
gdb.execute("tstart")
gdb.execute("kill", True, False)

signal.signal(signal.SIGINT, sigint_handler)
signal.siginterrupt(signal.SIGINT, False)

#Connect to pipe
gdb.execute("target tfile /sys/kernel/debug/gtpframe_pipe")


def get_function_from_sym(sym):
    sym = sym.rstrip(os.linesep)
    sym_end = sym.find(" in section")
    function = ""
    if sym_end > 0:
        function = sym[0:sym_end]
        function_list = function.split(' + ')
        function_list_len = len(function_list)
        if function_list_len >= 1:
            function = function_list[0]
Example #42
0
 def init_signals(self):
     for sig in ('SIGINT', 'SIGTERM'):
         self.loop.add_signal_handler(getattr(signal, sig), self.stop)
         self.loop.add_signal_handler(signal.SIGHUP, self.reload)
         signal.siginterrupt(signal.SIGTERM, False)
Example #43
0
def main():

    MinimalPython.check()

    try:

        args = init_arg_parser()

        if args.print_version:
            print(f"Version {cyclone.VERSION}")
            sys.exit()

        config_file_reader = ControllerConfigFileReader(args.config_file)

        init_logging(config_file_reader.log_filename, args.enable_debug)

        with PIDControl(config_file_reader.pid_file) as pid_control, \
                ControllerCommHandler(config_file_reader.comm_target,
                                      config_file_reader.comm_port,
                                      config_file_reader.poll_timeout) as comm_handler, \
                SharedQueue() as result_queue, \
                SharedQueue() as task_queue:

            if pid_control.lock():

                logging.info("Started")
                logging.info(f"Controller PID: {pid_control.pid()}")

                logging.debug("Version: %s", cyclone.VERSION)

                signal.signal(signal.SIGINT, signal.SIG_IGN)
                signal.signal(signal.SIGHUP, signal_handler)
                signal.signal(signal.SIGTERM, signal_handler)

                signal.siginterrupt(signal.SIGHUP, True)
                signal.siginterrupt(signal.SIGINT, True)
                signal.siginterrupt(signal.SIGTERM, True)

                comm_handler.connect()

                request_retry_count = 0
                request_retry_wait_duration = config_file_reader.request_retry_wait_duration
                max_num_request_retries = config_file_reader.max_num_request_retries

                lock_worker_state_table = multiprocessing.Lock()
                lock_result_queue = multiprocessing.Lock()

                cond_result_queue = multiprocessing.Condition(
                    lock_result_queue)

                worker_count = config_file_reader.worker_count
                worker_ids = create_worker_ids(worker_count)
                worker_state_table = create_worker_state_table(worker_ids)

                worker_handle_dict = \
                    create_worker(worker_state_table,
                                  lock_worker_state_table,
                                  task_queue,
                                  result_queue,
                                  cond_result_queue)

                global RUN_CONDITION

                if not start_worker(worker_handle_dict, worker_state_table):

                    logging.error("Not all worker are ready!")
                    RUN_CONDITION = False

                while RUN_CONDITION:

                    try:

                        send_msg = None

                        if not send_msg:

                            with CriticalSection(cond_result_queue):

                                if not result_queue.is_empty():

                                    task_id = result_queue.pop_nowait()

                                    if task_id:

                                        logging.debug("Finished task: %s",
                                                      task_id)
                                        send_msg = TaskFinished(
                                            comm_handler.fqdn, task_id)

                        if not send_msg:

                            found_ready_worker = False

                            with CriticalSection(lock_worker_state_table):

                                for worker_id in worker_state_table.keys():

                                    if worker_handle_dict[worker_id].is_alive() \
                                            and worker_state_table[worker_id].get_state == WorkerState.READY:

                                        found_ready_worker = True
                                        break

                            if found_ready_worker:

                                logging.debug('Requesting a task...')

                                send_msg = TaskRequest(comm_handler.fqdn)

                            else:

                                worker_count = len(worker_state_table)
                                worker_count_not_active = 0

                                for worker_id in worker_state_table.keys():

                                    if not worker_handle_dict[
                                            worker_id].is_alive():
                                        worker_count_not_active += 1

                                if worker_count == worker_count_not_active:

                                    logging.error('No worker are alive!')
                                    RUN_CONDITION = False

                                else:  # Available worker are busy

                                    with CriticalSection(cond_result_queue):

                                        wait_timeout_result_queue = 1

                                        cond_result_queue.wait(
                                            wait_timeout_result_queue)

                                        if result_queue.is_empty():
                                            send_msg = Heartbeat(
                                                comm_handler.fqdn)

                        if send_msg:

                            if logging.root.isEnabledFor(logging.DEBUG):
                                # TODO: remove redundant call of send_msg.to_string()
                                logging.debug("Sending message to master: %s",
                                              send_msg.to_string())

                            comm_handler.send_string(send_msg.to_string())

                            # Check for response and process it.
                            # Used redundant - TODO: make a class method.
                            ################################################################################
                            in_raw_data = comm_handler.recv_string()

                            if in_raw_data:

                                logging.debug(
                                    "Retrieved message (raw data): %s",
                                    in_raw_data)

                                in_msg = MessageFactory.create(in_raw_data)
                                in_msg_type = in_msg.type()

                                if MessageType.TASK_ASSIGN() == in_msg_type:

                                    task = in_msg.to_task()
                                    logging.debug(
                                        "Retrieved task assign for: %s",
                                        task.tid)
                                    task_queue.push(task)
                                    logging.debug(
                                        "Pushed task to task queue: %s",
                                        task.tid)

                                elif MessageType.ACKNOWLEDGE() == in_msg_type:
                                    pass

                                elif MessageType.WAIT_COMMAND() == in_msg_type:

                                    #TODO: Implement it on the master side!
                                    wait_duration = in_msg.duration
                                    logging.debug(
                                        "Retrieved Wait Command with duration: %fs",
                                        wait_duration)
                                    time.sleep(wait_duration)

                                elif MessageType.EXIT_COMMAND() == in_msg_type:

                                    RUN_CONDITION = False
                                    logging.info(
                                        'Retrieved exit message from master...'
                                    )

                                if request_retry_count:
                                    request_retry_count = 0
################################################################################

                            else:

                                if request_retry_count == max_num_request_retries:

                                    logging.info(
                                        'Exiting, since maximum retry count is reached!'
                                    )
                                    comm_handler.disconnect()
                                    RUN_CONDITION = False

                                time.sleep(request_retry_wait_duration)

                                # Check for response and process it.
                                # Used redundant - TODO: make a class method.
                                ################################################################################
                                in_raw_data = comm_handler.recv_string()

                                if in_raw_data:

                                    logging.debug(
                                        "Retrieved message (raw data): %s",
                                        in_raw_data)

                                    in_msg = MessageFactory.create(in_raw_data)
                                    in_msg_type = in_msg.type()

                                    if MessageType.TASK_ASSIGN(
                                    ) == in_msg_type:

                                        task = in_msg.to_task()
                                        logging.debug(
                                            "Retrieved task assign for: %s",
                                            task.tid)
                                        task_queue.push(task)
                                        logging.debug(
                                            "Pushed task to task queue: %s",
                                            task.tid)

                                    elif MessageType.ACKNOWLEDGE(
                                    ) == in_msg_type:
                                        pass

                                    elif MessageType.WAIT_COMMAND(
                                    ) == in_msg_type:

                                        #TODO: Implement it on the master side!
                                        wait_duration = in_msg.duration
                                        logging.debug(
                                            "Retrieved Wait Command with duration: %fs",
                                            wait_duration)
                                        time.sleep(wait_duration)

                                    elif MessageType.EXIT_COMMAND(
                                    ) == in_msg_type:

                                        RUN_CONDITION = False
                                        logging.info(
                                            'Retrieved exit message from master...'
                                        )

                                    if request_retry_count:
                                        request_retry_count = 0


################################################################################

                                else:

                                    logging.debug(
                                        'No response retrieved - Reconnecting...'
                                    )
                                    comm_handler.reconnect()
                                    request_retry_count += 1

                    except Exception as err:

                        RUN_CONDITION = False
                        exc_type, _, exc_tb = sys.exc_info()
                        filename = os.path.split(
                            exc_tb.tb_frame.f_code.co_filename)[1]
                        logging.error(
                            f"Caught exception (type: {exc_type}) in main loop: {err} "
                            f"- {filename} (line: {exc_tb.tb_lineno})")

                if not RUN_CONDITION:

                    try:

                        logging.info("Shutting down all worker...")

                        all_worker_down = False

                        while not all_worker_down:

                            found_active_worker = False

                            for worker_id in worker_state_table.keys():

                                if worker_handle_dict[worker_id].is_alive():

                                    os.kill(worker_handle_dict[worker_id].pid,
                                            signal.SIGUSR1)

                                    task_queue.push(PoisenPill())

                                    logging.debug(
                                        "Waiting for worker to complete: %s",
                                        worker_handle_dict[worker_id].name)

                                    found_active_worker = True

                            if not found_active_worker:
                                all_worker_down = True
                                logging.debug('All worker are down.')

                            else:
                                logging.debug(
                                    'Waiting for worker to shutdown...')
                                time.sleep(1)

                    except Exception as err:
                        logging.error(
                            f"Caught exception terminating Worker: {err}")

            else:

                logging.error(
                    f"Another instance might be already running (PID file: {config_file_reader.pid_file})!"
                )
                sys.exit(1)

    except Exception as err:

        exc_type, _, exc_tb = sys.exc_info()
        filename = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
        logging.error(
            f"Exception in {filename} (line: {exc_tb.tb_lineno}): {err}")
        sys.exit(1)

    logging.info('Finished')
    sys.exit(0)
Example #44
0
    def __init__(
        self,
        name=None,  # Name for log entries
        prefix=None,  # Prefix for name (e.g., prefix/progname)
        level=INFO,  # Logging level
        facility=local4,  # Log facility
        debug=False,  # Force level to DEBUG
        verbose=False,  # Log to stderr, too.
        quiet=None,  # Don't log anything on startup  (See below)
        signals=True,  # Enable debug on/off with SIGUSR1/SIGUSR2
        propagate=False  # Pass debug state on to child processes
    ):

        #
        # Handle the parameters
        #

        if name is None:
            name = os.path.basename(sys.argv[0])
        assert type(name) == str

        if prefix is not None:
            assert type(prefix) == str
            name = prefix + "/" + name

        self.facility = facility

        if debug:
            level = DEBUG

        self.is_propagating = propagate

        # This prevents verbose() from choking on this being undefined.
        self.is_verbose = False

        if quiet is None:
            quiet = False
            forced_quiet = False
        else:
            forced_quiet = quiet

        self.is_quiet = quiet

        self.forced_debug = debug

        #
        # Inherit state from the environment
        #

        if STATE_VARIABLE in os.environ:

            try:
                depickled = pickle.loads(os.environ[STATE_VARIABLE])

                facility = depickled['facility']
                assert type(facility) == int

                level = depickled['last_level']
                assert type(level) == int

                self.forced_debug = depickled['forced_debug']
                assert type(self.forced_debug) == bool

                self.is_quiet = depickled['is_quiet']
                assert type(self.is_quiet) == bool

            except Exception as ex:
                self.exception("Failed to decode %s '%s'" %
                               (STATE_VARIABLE, os.environ[STATE_VARIABLE]))

        #
        # Set up the logger
        #

        self.logger = logging.getLogger(name)
        self.logger.propagate = False

        self.syslog_handler = None
        self.__syslog_handler_init()

        # Stderr
        self.stderr_handler = logging.StreamHandler(sys.stderr)
        self.stderr_handler.setFormatter(
            logging.Formatter(fmt='%(asctime)s %(message)s',
                              datefmt='%Y-%m-%dT%H:%M:%S'))
        # Don't add this handler; verbose will cover it.

        #
        # Get everything set to go
        #

        self.verbose(verbose)
        self.level(level, save=True)
        self.set_debug(self.forced_debug)
        self.__update_env()

        # Grab signals and make them non-interrupting
        # TODO: How portable is this?
        if signals:
            signal.signal(signal.SIGUSR1, self.sigusr1)
            signal.signal(signal.SIGUSR2, self.sigusr2)
            signal.siginterrupt(signal.SIGUSR1, False)
            signal.siginterrupt(signal.SIGUSR2, False)

        if (not self.is_quiet) and (not forced_quiet):
            self.info("Started")
Example #45
0
from ag.orbit.gui import Main

from PyQt5.QtWidgets import QApplication
from PyQt5.QtCore import Qt

import sys
import signal

QApplication.setAttribute(Qt.AA_X11InitThreads)
app = QApplication(sys.argv)

win = Main()
win.show()

try:
    # override signals to allow for Ctr+C in command line
    try:
        signal.siginterrupt(signal.SIGCHLD, False)
        signal.signal(signal.SIGINT, signal.SIG_DFL)

    except AttributeError:
        pass

    # this blocks until application exit
    sys.exit(app.exec_())

except KeyboardInterrupt:
    pass