Exemplo n.º 1
1
    def __init__(self, server, jobmanager, **config):
        Greenlet.__init__(self)
        self._set_config(**config)
        self.jobmanager = jobmanager
        self.server = server
        self.reporter = server.reporter
        self.logger = server.register_logger('auxmonitor_{}'.format(self.config['name']))
        self.block_stats = dict(accepts=0,
                                rejects=0,
                                solves=0,
                                last_solve_height=None,
                                last_solve_time=None,
                                last_solve_worker=None)
        self.current_net = dict(difficulty=None, height=None)
        self.recent_blocks = deque(maxlen=15)

        self.prefix = self.config['name'] + "_"
        # create an instance local one_min_stats for use in the def status func
        self.one_min_stats = [self.prefix + key for key in self.one_min_stats]
        self.server.register_stat_counters(self.one_min_stats)

        self.coinservs = self.config['coinservs']
        self.coinserv = bitcoinrpc.AuthServiceProxy(
            "http://{0}:{1}@{2}:{3}/"
            .format(self.coinservs[0]['username'],
                    self.coinservs[0]['password'],
                    self.coinservs[0]['address'],
                    self.coinservs[0]['port']),
            pool_kwargs=dict(maxsize=self.coinservs[0].get('maxsize', 10)))
        self.coinserv.config = self.coinservs[0]

        if self.config['signal']:
            gevent.signal(self.config['signal'], self.update, reason="Signal recieved")
Exemplo n.º 2
0
    def run(self):
        container = create_container(self.config)
        install_plugins(container, self.config.get('plugins', {}))
        install_interfaces(container, self.config.get('interfaces', {}))

        for cls_name in self.args.get('--interface', ()):
            cls = import_object(cls_name)
            container.install(cls)

        if self.args.get('--debug'):
            from gevent.backdoor import BackdoorServer
            backdoor = BackdoorServer(('127.0.0.1', 5005), locals={'container': container})
            gevent.spawn(backdoor.serve_forever)

        def handle_signal():
            logger.info('caught SIGINT/SIGTERM, pid=%s', os.getpid())
            container.stop()
            container.join()
            sys.exit(0)
        gevent.signal(signal.SIGINT, handle_signal)
        gevent.signal(signal.SIGTERM, handle_signal)

        setproctitle('lymph-instance (identity: %s, endpoint: %s, config: %s)' % (
            container.identity,
            container.endpoint,
            self.config.source,
        ))

        container.start(register=not self.args.get('--isolated', False))

        if self.args.get('--reload'):
            set_source_change_callback(container.stop)

        container.join()
Exemplo n.º 3
0
def run_all():
    gevent.signal(signal.SIGQUIT, gevent.kill)
    run_emitters()
    run_listeners()
    eventloop = gevent.spawn(run_eventloop)
    __jobs.append(eventloop)
    gevent.joinall(__jobs, raise_error=True)
Exemplo n.º 4
0
    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file')
        parser.add_argument('-p', type=int, metavar='PORT', default=5500, help="WebSockets server port")
        args = parser.parse_args()
        configure_logging('/var/log/containerd.log', 'DEBUG')
        setproctitle.setproctitle('containerd')

        gevent.signal(signal.SIGTERM, self.die)
        gevent.signal(signal.SIGQUIT, self.die)

        self.config = args.c
        self.init_datastore()
        self.init_dispatcher()
        self.init_mgmt()
        self.init_nat()
        self.init_ec2()
        self.logger.info('Started')

        # WebSockets server
        kwargs = {}
        s4 = WebSocketServer(('', args.p), ServerResource({
            '/console': ConsoleConnection,
        }, context=self), **kwargs)

        s6 = WebSocketServer(('::', args.p), ServerResource({
            '/console': ConsoleConnection,
        }, context=self), **kwargs)

        serv_threads = [gevent.spawn(s4.serve_forever), gevent.spawn(s6.serve_forever)]
        gevent.joinall(serv_threads)
Exemplo n.º 5
0
def terminate():
    global _terminated
    if _terminated:
        return
    _terminated = True
    gevent.signal(signal.SIGINT, None)
    gevent.signal(signal.SIGTERM, None)
    if ui:
        ui.set_splash_text('terminating...')
    if event:
        event.fire('loader:terminating')
    for obj in reversed(pre_objects + main_objects + post_objects):
        if hasattr(obj, 'terminate'):
            name = re.sub(r'^client\.', '', obj.__name__)
            if log:
                log.debug('terminating module {}'.format(name))
            try:
                obj.terminate()
            except (AssertionError, SystemExit, KeyboardInterrupt, gevent.GreenletExit):
                print "exit error catched in", name
                pass
            except:
                traceback.print_exc()
                if log:
                    log.critical('failed terminating module {}'.format(name), exc_info=sys.exc_info)
    sys.exit(0)
Exemplo n.º 6
0
def _run_chassis(fabricconfig, mgmtbusconfig, fts):
    try:
        # lower priority to make master and web
        # more "responsive"
        os.nice(5)

        c = minemeld.chassis.Chassis(
            fabricconfig['class'],
            fabricconfig['config'],
            mgmtbusconfig
        )
        c.configure(fts)

        gevent.signal(signal.SIGUSR1, c.stop)

        while not c.fts_init():
            if c.poweroff.wait(timeout=0.1) is not None:
                break

            gevent.sleep(1)

        LOG.info('Nodes initialized')

        try:
            c.poweroff.wait()
            LOG.info('power off')

        except KeyboardInterrupt:
            LOG.error("We should not be here !")
            c.stop()

    except:
        LOG.exception('Exception in chassis main procedure')
        raise
Exemplo n.º 7
0
def main(argv):
    global time_taken, time_limit
    time_start = time.time()
    get_para_result = get_para(argv)
    if not get_para_result:
        help_msg()
    # signal for the timelimit param
    # signal.signal(signal.SIGALRM, timeout_handler)
    # signal.alarm(time_limit)

    test_round = request_num / concurrency
    gevent.signal(signal.SIGQUIT, gevent.shutdown)

    for i in range(test_round):
        time_out = time_limit
        time_gevent_start = time.time()
        jobs = []
        for con in range(int(concurrency)):
            jobs.append(gevent.spawn(make_request, dest_url))
        if time_out > 0:
            try:
                gevent.Timeout(1).start()
                gevent.joinall(jobs, timeout=1, raise_error=True)
            except gevent.Timeout:
                print 'gevent timeout'
            time_gevent_finish = time.time()
            time_per_gevent = time_gevent_finish - time_gevent_start
            time_limit -= time_per_gevent
        else:
            print '\n Can not finish the test in time limit'
            help_msg()

    time_finish = time.time()
    time_taken = time_finish - time_start
    output_msg()
Exemplo n.º 8
0
    def run(self):
        self.logger.info("Firing up workers")
        for i in xrange(self.config['workers']):
            worker = Worker(self, i)
            self.workers.append(worker)
            worker.start()

        self.logger.info("Starting zmq puller")
        self.puller = Puller(self)
        self.puller.start()

        gevent.signal(signal.SIGINT, self.exit, "SIGINT")
        gevent.signal(signal.SIGHUP, self.exit, "SIGHUP")

        self._exit_signal = Event()
        self._exit_signal.wait()

        # stop all greenlets
        for gl in self.workers:
            self.logger.info("Requesting stop for {} greenlet".format(gl))
            gl.kill(timeout=self.config['term_timeout'], block=False)

        self.logger.info("Requesting stop for puller")
        self.puller.kill(timeout=self.config['term_timeout'], block=False)

        try:
            if gevent.wait(timeout=self.config['term_timeout']):
                self.logger.info("All threads exited normally")
            else:
                self.logger.info("Timeout reached, shutting down forcefully")
        except KeyboardInterrupt:
            self.logger.info("Shutdown requested again by system, "
                             "exiting without cleanup")

        self.logger.info("=" * 80)
Exemplo n.º 9
0
def main(argv):
    try:
        #Configure logger
        logging.config.dictConfig(settings.LOGGING)

        with pidfile(settings.SERVICE_PID_FILE, create_directory=True):

            
            #Create service
            service = ChatService()
            
            def sigterm_handler():
                service.stop()

            gevent.signal(signal.SIGTERM, sigterm_handler);

            service.start()
            service.join()
        
    except PidFileException as error:
        logging.error("Service is already running: %s" % str(error))

    except KeyboardInterrupt:
        service.stop()
        service.join()
Exemplo n.º 10
0
    def _install_signal_handlers(self):
        """Installs signal handlers for handling SIGINT and SIGTERM
        gracefully.
        """

        def request_force_stop():
            """Terminates the application (cold shutdown).
            """
            self.log.warning('Cold shut down.')

            self.gevent_pool.kill()

            raise SystemExit()

        def request_stop():
            """Stops the current worker loop but waits for child processes to
            end gracefully (warm shutdown).
            """
            gevent.signal(signal.SIGINT, request_force_stop)
            gevent.signal(signal.SIGTERM, request_force_stop)

            msg = 'Warm shut down requested.'
            self.log.warning(msg)

            # If shutdown is requested in the middle of a job, wait until
            # finish before shutting down
            self.log.debug('Stopping after all greenlets are finished. '
                           'Press Ctrl+C again for a cold shutdown.')
            self._stopped = True
            self.gevent_pool.join()

            raise StopRequested()

        gevent.signal(signal.SIGINT, request_stop)
        gevent.signal(signal.SIGTERM, request_stop)
Exemplo n.º 11
0
 def start_server():
     global port
     server = SocksServer(('0.0.0.0', port))
     gevent.signal(signal.SIGTERM, server.close)
     gevent.signal(signal.SIGINT, server.close)
     print "Server is listening on 0.0.0.0:%d" % port
     server.serve_forever()
Exemplo n.º 12
0
    def run(self):
        # gevent complains if you import it before you daemonize
        import gevent
        gevent.signal(signal.SIGUSR1, self.do_reload)
        gevent.signal(signal.SIGTERM, self.terminate)

        if self._get_action_func() == '_run':
            # to make debugging easier, we're including the directory where
            # the configuration file lives as well as the current working
            # directory in the module search path
            sys.path.append(os.path.dirname(self.config_path))
            sys.path.append(os.getcwd())

        self._log_config()

        if self.proc_name:
            setproctitle.setproctitle(self.proc_name)

        self.service = self.service_factory()

        if hasattr(self.service, 'catch'):
            self.service.catch(SystemExit, lambda e,g: self.service.stop())

        def shed_privileges():
            if self.uid and self.gid:
                daemon.daemon.change_process_owner(self.uid, self.gid)
            
        self.service.serve_forever(ready_callback = shed_privileges)
Exemplo n.º 13
0
    def __init__(
        self,
        rabbit_ip,
        rabbit_port,
        rabbit_user,
        rabbit_password,
        rabbit_vhost,
        rabbit_ha_mode,
        q_name,
        subscribe_cb,
        logger,
    ):
        self._rabbit_ip = rabbit_ip
        self._rabbit_port = rabbit_port
        self._rabbit_user = rabbit_user
        self._rabbit_password = rabbit_password
        self._rabbit_vhost = rabbit_vhost
        self._subscribe_cb = subscribe_cb
        self._logger = logger
        self._publish_queue = Queue()
        self._conn_lock = Semaphore()

        self.obj_upd_exchange = kombu.Exchange("vnc_config.object-update", "fanout", durable=False)

        # Register a handler for SIGTERM so that we can release the lock
        # Without it, it can take several minutes before new master is elected
        # If any app using this wants to register their own sigterm handler,
        # then we will have to modify this function to perhaps take an argument
        gevent.signal(signal.SIGTERM, self.sigterm_handler)
Exemplo n.º 14
0
def run():
    atlasnode.info = AtlasNodeInfo()
    atlasnode.info.host = atlasnode.config['host']
    atlasnode.info.port = atlasnode.config['port']
    atlasnode.info.name = atlasnode.config['name']
    atlasnode.info.protocolVersion = atlasnode.protocol_version
        
    logging.info('This is node %s' % atlasnode.info.get_name())

    gevent.spawn(bootstrap)

    def stop():
        #if atlasnode.nodes.list():
        #    atlasnode.config['known_nodes'] = atlasnode.nodes.list()
        atlasnode.config.save()
        sys.exit(0)

    gevent.signal(signal.SIGTERM, stop)

    # Create server
    processor = lambda: AtlasNode.Processor(Handler())
    transport = TSocket.TServerSocket(
        host=atlasnode.config['host'],
        port=atlasnode.config['port'],
    )
    tfactory = TTransport.TBufferedTransportFactory()
    pfactory = TBinaryProtocol.TBinaryProtocolFactory()

    logging.info('Accepting connections on %s:%i' % (transport.host, transport.port))
    server = TGEventServer(processor, transport, tfactory, pfactory)

    try:
        server.serve()
    except KeyboardInterrupt:
        stop()
Exemplo n.º 15
0
def start():
    '''Fork and start the server, saving the pid in a file.'''
    kill()
    try:
        os.unlink(TEST_LOG)
    except OSError:
        pass
    pid = os.fork()
    if pid == 0:
        try:
            import coverage
            cov = coverage.coverage(data_suffix=True)
            cov.start()

            def save_coverage():
                '''Callback for signal to save coverage info to file.'''
                cov.save()

            gevent.signal(signal.SIGUSR1, save_coverage)
        except ImportError:
            pass
        core = scalestack.Core(TEST_CONFIG_FILE)
        core.force_log_level = logging.DEBUG
        core.run()
        sys.exit(0)
    pid_file = open(TEST_PID, 'w')
    pid_file.write(str(pid))
    pid_file.close()
    atexit.register(kill)
    time.sleep(1)
Exemplo n.º 16
0
    def __init__(self):

        self.config = BootstrapFile()
        self.routers = []
        signal(2, self.__stopSequence)
        self.__stopping = False
        self.module_manager = ModuleManager()
Exemplo n.º 17
0
def main():
    logging.basicConfig(stream=sys.stdout)
    logging.getLogger("haproxy").setLevel(logging.DEBUG if DEBUG else logging.INFO)
    if DEBUG:
        logging.getLogger("python-dockercloud").setLevel(logging.DEBUG)

    config.RUNNING_MODE = check_running_mode(HAPROXY_CONTAINER_URI, HAPROXY_SERVICE_URI, API_AUTH)

    gevent.signal(signal.SIGUSR1, on_user_reload)
    gevent.signal(signal.SIGTERM, sys.exit)

    gevent.spawn(run_haproxy)

    pid = create_pid_file()
    logger.info("dockercloud/haproxy PID: %s" % pid)

    if config.RUNNING_MODE == RunningMode.CloudMode:
        gevent.spawn(listen_dockercloud_events)
    elif config.RUNNING_MODE == RunningMode.ComposeMode:
        add_haproxy_run_task("Initial start - Compose Mode")
        gevent.spawn(listen_docker_events_compose_mode)
    elif config.RUNNING_MODE == RunningMode.SwarmMode:
        add_haproxy_run_task("Initial start - Swarm Mode")
        gevent.spawn(polling_service_status_swarm_mode)
    elif config.RUNNING_MODE == RunningMode.LegacyMode:
        add_haproxy_run_task("Initial start - Legacy Mode")

    while True:
        time.sleep(5)
        if Haproxy.cls_process:
            if is_process_running(Haproxy.cls_process):
                continue
            Haproxy.cls_cfg = None
            add_haproxy_run_task("haproxy %s died , restart" % Haproxy.cls_process.pid)
Exemplo n.º 18
0
    def debug(self, command, config, instances, queue_size, frequency, identification, module_path):
        '''
        Handles the Wishbone debug command.
        '''

        if module_path is not None:
            self.__expandSearchPath(module_path)

        processes = []

        def stopSequence(*args, **kwargs):
            for proc in processes:
                proc.stop()

        signal(2, stopSequence)

        module_manager = ModuleManager()
        router_config = ConfigFile().load(config)

        if instances == 1:
            sys.stdout.write("\nInstance started in foreground with pid %s\n" % (os.getpid()))
            Default(router_config, module_manager, size=queue_size, frequency=frequency, identification=identification, stdout_logging=True).start()
        else:
            for instance in range(instances):
                processes.append(Default(router_config, module_manager, size=queue_size, frequency=frequency, identification=identification, stdout_logging=True, process=True).start())
            pids = [str(p.pid) for p in processes]
            print(("\nInstances started in foreground with pid %s\n" % (", ".join(pids))))
            for proc in processes:
                proc.join()
Exemplo n.º 19
0
def main():

    gevent.signal(signal.SIGQUIT, gevent.kill)

    parser = argparse.ArgumentParser(
        prog='areplay',
        description='Apache Log live replay',
        formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=80)
    )

    parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + __version__)
    parser.add_argument('-a', '--auth', help='Basic authentication user:password', type=str)
    parser.add_argument('-w', '--workers', help='Workers pool size', type=int, default=50)
    parser.add_argument('-m', '--match', help='Only process matching requests', type=str)
    parser.add_argument('-i', '--ignore', help='Ignore matching requests', type=str)
    parser.add_argument('-d', '--dry-run', dest='dry_run', action='store_true', help='Only prints URLs')
    parser.add_argument('-f', '--format', help='Apache log format', type=str, default=DEFAULT_LOG_FORMAT)
    parser.add_argument('-sv', '--skip-verify', dest='verify', action='store_false', help='Skip SSL certificate verify')
    parser.add_argument('-iu', '--ignore-url', dest='ignore_url', help='URL to hit when URL from log is ignored', type=str)
    parser.add_argument('server', help='Remote Server')
    parser.add_argument('log_file', help='Apache log file path')

    args = parser.parse_args()

    if args.match is not None:
        args.match = args.match.split('|')

    if args.ignore is not None:
        args.ignore = args.ignore.split('|')

    try:
        gevent.spawn(reader, args).join()
    except KeyboardInterrupt:
        pass
Exemplo n.º 20
0
 def __init__(self):
     self._quit = False
     if len(sys.argv) >= 2:
         config = parse_conf(sys.argv[1])
     else:
         config = parse_conf('ivt.yml')
     config = config_schema.validate(config)
     self.config = config
     if len(sys.argv) >= 3:
         channel_config = parse_conf(sys.argv[2])
     else:
         channel_config = parse_conf('ivt_channels.yml')
     self.ivt = IVT(
         config['project'],
         config['login_code'],
         config['login_passwd'],
         config['keepalive_interval'],
         channel_config
     )
     WSClientTransport.APP_FACTORY = self.ivt.ivt_session_factory
     self.ws_url = config['ivc']+'?'+urllib.urlencode(
         {
             'login_code': config['login_code'],
             'login_passwd': config['login_passwd'],
             'project': config['project'],
             'hardware_model': config['hardware_model'],
             'firmware_model': IVT_VERSION
         }
     )
     self._retry_evt = Event()
     self._done_upgrade_evt = Event()
     self._done_upgrade_evt.set()
     gevent.signal(signal.SIGINT, self.close)
     gevent.signal(signal.SIGTERM, self.close)
     self._greenlet_chk_ivt_package = gevent.spawn(self._chk_new_ivt_firmware)
Exemplo n.º 21
0
def main():
	gevent.signal(signal.SIGQUIT, gevent.shutdown)
	global port
	global address
	if port and type(port) is str:
		port = int(port)
	if not address:
		address = "127.0.0.1"
	try:
		server = WSGIServer((address, port), app)

		if serverType is "fastcgi":
			print ("Now serving py as a fastcgi server at %s:%i" % (address, port))
		else:
			print ("Now serving py at %s:%i" % (address, port))
		print "Press Ctrl+c or send SIGQUIT to stop"

		print "\r\nHeres some fancy URLs also:\n\r"

		print "  Url : Class Name"
		print "  -------------------------"
		for url in urls:
			print ("  %s : %s" % (url['url'], url["object"].__name__))
		
		if serverType is "fastcgi":
			print "\r\n\r\nNo logging of requests done here."
			print "Check your server logs instead."
		else:
			print "\r\n\r\nNow logging requests:"
			print "  Remote IP - - [YYYY-MM-DD HH:MM:SS] \"METHOD url HTTP/version\" Status code Something Request timing"
			print "------------------------------------------------------------------------------------------------------"

		server.serve_forever()
	except KeyboardInterrupt:
		gevent.shutdown
Exemplo n.º 22
0
Arquivo: g.py Projeto: akissa/spamc
 def run(self):
     self.server = SpamdTCP(self.address)
     # self.port = server.server_port
     gevent.signal(signal.SIGTERM, self.server.close)
     gevent.signal(signal.SIGINT, self.server.close)
     self.server.start()
     gevent.wait()
Exemplo n.º 23
0
	def __init__(self, input_fn=None, input_file=sys.stdin, output=sys.stdout,
	             suppress_nonprinting=True, encoding='utf-8', completion=None, completion_print=True,
	             complete_whole_line=False, gevent_handle_sigint=False):
		"""input_fn overrides the default read function. Alternately, input_file specifies a
		file to read from in the default read function.
		input_fn should take no args.
		output is the file to write to, and should be a tty.
		suppress_nonprinting is default True (set False to disable) and causes any unhandled non-printing characters
			to not be written to output.
		Encoding is the encoding that input is expected to be in.
			Note that input bytes will buffer until a full character can be decoded.
			All intermediate output will be encoded with this encoding.
			Set to None to disable this behaviour and treat all bytes as single characters.
			Strings returned from self.readline() will be bytes if encoding is None, else unicode.
			This option is ignored in python3 - everything is unicode, whether you like it or not.
		completion, if given, should be a callable that takes an input string and return a list of possible completions.
			Results generally should have the input as a prefix, but this is not a hard requirement.
			This function will be called with the current pre-cursor input (back to the first non-word character)
			when the user presses the completion key (tab). Word characters are as per the re module.
			If any results are returned, the given input is replaced with the longest common prefix
			among the results. If only one result is returned, a space is also appended.
			If there are multiple results and the given input is already equal to the longest common prefix,
			and completion_print=True, a list of possible completions is output.
			An iterable may be given instead of a callable - this is equivilent to a completion_fn that returns
			all items from that iterable which start with the input.
		complete_whole_line: If True, all characters before the cursor are passed to the completion function,
			not just the latest word. In this case, the completion function should return (head, completions),
			where head is a static string to leave unchanged, while completions is the list of potential suffixes
			to head to complete as. For example, if you were completing a filepath /foo/ba and the options were
			/foo/bar or /foo/baz, you would return ("/foo/", ["bar", "baz"]).
		gevent_handle_sigint=True: Add some special functionality to work around an issue with KeyboardInterrupt
			and gevent. Note this disables SIGINT from raising, but makes SIGQUIT do so instead.
		"""
		if input_fn:
			self.read = input_fn
		else:
			self.input_file = input_file
		self.output = output
		self.suppress_nonprinting = suppress_nonprinting
		self.encoding = encoding
		self.completion_fn = completion if callable(completion) else complete_from(completion)
		self.completion_print = completion_print
		self.complete_whole_line = complete_whole_line
		self.history = self.history[:] # so we have a unique instance to ourselves
		self._gevent_handle_sigint = gevent_handle_sigint

		# If we're using gevent, the keyboard interrupt handling doesn't work well, we probably
		# won't raise the KeyboardInterrupt in the right greenlet. We work around this by explicitly handling
		# the SIGINT and re-raising in the correct place.
		# We rebind SIGQUIT to raise KeyboardInterrupt for debugging/aborting
		if self._gevent_handle_sigint:
			import gevent
			self._readline_greenlet = None
			def _sigquit(signum, frame):
				raise KeyboardInterrupt
			def _sigint():
				if self._readline_greenlet:
					self._readline_greenlet.kill(KeyboardInterrupt, block=False)
			signal.signal(signal.SIGQUIT, _sigquit)
			gevent.signal(signal.SIGINT, _sigint)
Exemplo n.º 24
0
    def run(self):
       
        """ @sighup
        SIGHUP handler to indicate configuration changes
        """
        gevent.signal(signal.SIGHUP, self.sighup_handler) 

        i = 0
        self._sem = Semaphore()
        self._logger.debug('Starting.. %s' % str(
                    self._config.zookeeper_server()))
        constnt_schdlr = ConsistentScheduler(
                            self._config._name,
                            zookeeper=self._config.zookeeper_server(),
                            delete_hndlr=self._del_uves,
                            logger=self._logger, 
                            cluster_id=self._config.cluster_id())
        while self._keep_running:
            self._logger.debug('@run: ittr(%d)' % i)
            if constnt_schdlr.schedule(self._config.devices()):
                members = constnt_schdlr.members()
                partitions = constnt_schdlr.partitions()
                self._send_snmp_collector_uve(members, partitions,
                    constnt_schdlr.work_items())
                sleep_time = self.do_work(i, constnt_schdlr.work_items())
                self._logger.debug('done work %s' % str(self._prouters.keys()))
                i += 1
                gevent.sleep(sleep_time)
            else:
                gevent.sleep(1)
        constnt_schdlr.finish()
Exemplo n.º 25
0
    def __init__(self, start_on_init=True):
        self.inbox = Queue()
        gevent.signal(signal.SIGTERM, self.__cleanup)
        gevent.signal(signal.SIGINT, self.__cleanup)

        self.actor_id = shortuuid.ShortUUID().random(length=5)

        def dummy_broadcast(msg):
            print "broadcasting msg (DUMMY) : ", msg
        self.broadcast_inbox = dummy_broadcast

        self.msg_serial = 0
        if start_on_init:
            self.start()
        self.msg_history = []

        self.sem = Semaphore()
        #atexit.register(self.__cleanup)

        functions = inspect.getmembers(self, predicate=inspect.ismethod)
        self.handle_functions = {}
        for f in functions:
            if f[0].startswith("handle_"):
                #print "this is handle funct: ", f
                self.handle_functions[f[0]] = f[1]
Exemplo n.º 26
0
def main():
    import argparse

    parser = argparse.ArgumentParser(description="Start FSAL server")
    parser.add_argument("--conf", metavar="PATH", help="Path to configuration file", default=in_pkg("fsal-server.ini"))
    args, unknown = parser.parse_known_args()

    config = ConfDict.from_file(args.conf, defaults=FSAL_DEFAULTS)

    configure_logging(config)

    context = dict()
    context["config"] = config
    context["databases"] = init_databases(config)

    fs_manager = FSDBManager(config, context)
    fs_manager.start()
    context["fs_manager"] = fs_manager

    server = FSALServer(config, context)
    context["server"] = server

    def cleanup_wrapper(*args):
        cleanup(context)

    gevent.signal(signal.SIGINT, cleanup_wrapper)
    gevent.signal(signal.SIGTERM, cleanup_wrapper)

    try:
        logging.info("FSAL server started.")
        server.run()
    except KeyboardInterrupt:
        logging.info("Keyboard interrupt received. Shutting down.")
        cleanup(context)
Exemplo n.º 27
0
Arquivo: cyvk.py Projeto: cydev/cyvk
 def __init__(self):
     self.users = {}
     self.client = xmpp.Component(self, HOST)
     self.sending = Queue(100)
     self.dispatcher_gl = None
     gevent.signal(signal.SIGTERM, self.disconnect)
     gevent.signal(signal.SIGINT, self.disconnect)
Exemplo n.º 28
0
 def run(self):
     """路由服务"""
     logging.info("server starting run on `%s:%s`" % self.listener)
     gevent.signal(signal.SIGINT, self.close, signal.SIGINT)
     gevent.signal(signal.SIGTERM, self.close, signal.SIGTERM)
     gevent.spawn(self.cli_conns.check_cli_timeout, self.cli_timeout)
     self.serve_forever()
Exemplo n.º 29
0
 def start_client_registry(self, host, port):
     self.service = ClientRegistry(host, port)
     self.service_greenlet = Greenlet(self.service.start)
     gevent.signal(signal.SIGTERM, self.service_greenlet.kill)
     gevent.signal(signal.SIGINT, self.service_greenlet.kill)
     self.service_greenlet.start()
     logger.info('Started client registry, connecting to %s:%s', host, port)
Exemplo n.º 30
0
    def handle(self, *args, **options):
        if not options['noinput']:
            confirm = raw_input("""
        ### %s Fast Reindex !!! ###
        You have requested to do a fluff index reset via fast track.
        This will update all your fluff indicators in place.

        Type 'yes' to continue, or 'no' to cancel: """ % self.pillow_class.__name__)

            if confirm != 'yes':
                print "\tReset cancelled."
                return

        from gevent.monkey import patch_all
        patch_all()

        self._bootstrap(options)
        start = datetime.utcnow()

        gevent.signal(signal.SIGQUIT, gevent.shutdown)
        queue = Queue(POOL_SIZE)
        workers = [gevent.spawn(worker, self, queue) for i in range(POOL_SIZE)]

        print "Starting fast tracked reindexing"
        for i, row in enumerate(self.full_couch_view_iter()):
            queue.put((row, i))

        gevent.joinall(workers)

        end = datetime.utcnow()
        print "done in %s seconds" % (end - start).seconds
Exemplo n.º 31
0
	mempool = MemPool.MemPool(log)
	chaindb = ChainDb.ChainDb(settings, settings['db'], log, mempool,
				  netmagic, False, False)
	peermgr = PeerManager(log, mempool, chaindb, netmagic)

	if 'loadblock' in settings:
		chaindb.loadfile(settings['loadblock'])

	threads = []

	# start HTTP server for JSON-RPC
	rpcexec = rpc.RPCExec(peermgr, mempool, chaindb, log,
				  settings['rpcuser'], settings['rpcpass'])
	rpcserver = gevent.pywsgi.WSGIServer(('', settings['rpcport']), rpcexec.handle_request)
	t = gevent.Greenlet(rpcserver.serve_forever)
	t.start()
	threads.append(t)

	# connect to specified remote node
	c = peermgr.add(settings['host'], settings['port'])
	c.start()
	threads.append(c)

	# program main loop
	def shutdown():
		for t in threads: t.kill()
	gevent.signal(signal.SIGINT, shutdown)
	gevent.joinall(threads)

        assert vizd_obj2.verify_message_table_limit()

        return True

    @staticmethod
    def get_free_port():
        cs = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        cs.bind(("", 0))
        cport = cs.getsockname()[1]
        cs.close()
        return cport

    @staticmethod
    def _check_skip_test():
        if (socket.gethostname() == 'build01'):
            logging.info("Skipping test")
            return True
        return False


# end class AnalyticsDbTest


def _term_handler(*_):
    raise IntSignal()


if __name__ == '__main__':
    gevent.signal(signal.SIGINT, _term_handler)
    unittest.main(catchbreak=True)
Exemplo n.º 33
0
def main():
    parser, options = parse_options()

    # setup logging
    if not options.skip_log_setup:
        setup_logging(options.loglevel, options.logfile)

    logger = logging.getLogger(__name__)

    locustfile = find_locustfile(options.locustfile)

    if not locustfile:
        logger.error(
            "Could not find any locustfile! Ensure file ends in '.py' and see --help for available options."
        )
        sys.exit(1)

    if locustfile == "locust.py":
        logger.error(
            "The locustfile must not be named `locust.py`. Please rename the file and try again."
        )
        sys.exit(1)

    docstring, locusts = load_locustfile(locustfile)

    if options.list_commands:
        console_logger.info("Available Locusts:")
        for name in locusts:
            console_logger.info("    " + name)
        sys.exit(0)

    if not locusts:
        logger.error("No Locust class found!")
        sys.exit(1)

    # make sure specified Locust exists
    if options.locust_classes:
        missing = set(options.locust_classes) - set(locusts.keys())
        if missing:
            logger.error("Unknown Locust(s): %s\n" % (", ".join(missing)))
            sys.exit(1)
        else:
            names = set(options.locust_classes) & set(locusts.keys())
            locust_classes = [locusts[n] for n in names]
    else:
        # list() call is needed to consume the dict_view object in Python 3
        locust_classes = list(locusts.values())

    if options.show_task_ratio:
        console_logger.info("\n Task ratio per locust class")
        console_logger.info("-" * 80)
        print_task_ratio(locust_classes)
        console_logger.info("\n Total task ratio")
        console_logger.info("-" * 80)
        print_task_ratio(locust_classes, total=True)
        sys.exit(0)
    if options.show_task_ratio_json:
        from json import dumps
        task_data = {
            "per_class": get_task_ratio_dict(locust_classes),
            "total": get_task_ratio_dict(locust_classes, total=True)
        }
        console_logger.info(dumps(task_data))
        sys.exit(0)

    if options.run_time:
        if not options.no_web:
            logger.error(
                "The --run-time argument can only be used together with --no-web"
            )
            sys.exit(1)
        if options.slave:
            logger.error(
                "--run-time should be specified on the master node, and not on slave nodes"
            )
            sys.exit(1)
        try:
            options.run_time = parse_timespan(options.run_time)
        except ValueError:
            logger.error(
                "Valid --run-time formats are: 20, 20s, 3m, 2h, 1h20m, 3h30m10s, etc."
            )
            sys.exit(1)

        def spawn_run_time_limit_greenlet():
            logger.info("Run time limit set to %s seconds" % options.run_time)

            def timelimit_stop():
                logger.info("Time limit reached. Stopping Locust.")
                runners.locust_runner.quit()

            gevent.spawn_later(options.run_time, timelimit_stop)

    if options.step_time:
        if not options.step_load:
            logger.error(
                "The --step-time argument can only be used together with --step-load"
            )
            sys.exit(1)
        if options.slave:
            logger.error(
                "--step-time should be specified on the master node, and not on slave nodes"
            )
            sys.exit(1)
        try:
            options.step_time = parse_timespan(options.step_time)
        except ValueError:
            logger.error(
                "Valid --step-time formats are: 20, 20s, 3m, 2h, 1h20m, 3h30m10s, etc."
            )
            sys.exit(1)

    if options.master:
        runners.locust_runner = MasterLocustRunner(locust_classes, options)
    elif options.slave:
        try:
            runners.locust_runner = SlaveLocustRunner(locust_classes, options)
        except socket.error as e:
            logger.error("Failed to connect to the Locust master: %s", e)
            sys.exit(-1)
    else:
        runners.locust_runner = LocalLocustRunner(locust_classes, options)
    # main_greenlet is pointing to runners.locust_runner.greenlet by default, it will point the web greenlet later if in web mode
    main_greenlet = runners.locust_runner.greenlet

    if options.no_web:
        if options.master:
            while len(runners.locust_runner.clients.ready
                      ) < options.expect_slaves:
                logging.info(
                    "Waiting for slaves to be ready, %s of %s connected",
                    len(runners.locust_runner.clients.ready),
                    options.expect_slaves)
                time.sleep(1)
        if options.step_time:
            runners.locust_runner.start_stepload(options.num_clients,
                                                 options.hatch_rate,
                                                 options.step_clients,
                                                 options.step_time)
        elif not options.slave:
            runners.locust_runner.start_hatching(options.num_clients,
                                                 options.hatch_rate)
            # make locusts are spawned
            time.sleep(1)
    elif not options.slave:
        # spawn web greenlet
        logger.info("Starting web monitor at http://%s:%s" %
                    (options.web_host or "*", options.port))
        main_greenlet = gevent.spawn(web.start, locust_classes, options)

    if options.run_time:
        spawn_run_time_limit_greenlet()

    stats_printer_greenlet = None
    if not options.only_summary and (options.print_stats or
                                     (options.no_web and not options.slave)):
        # spawn stats printing greenlet
        stats_printer_greenlet = gevent.spawn(stats_printer)

    if options.csvfilebase:
        gevent.spawn(stats_writer, options.csvfilebase,
                     options.stats_history_enabled)

    def shutdown(code=0):
        """
        Shut down locust by firing quitting event, printing/writing stats and exiting
        """
        logger.info("Shutting down (exit code %s), bye." % code)
        if stats_printer_greenlet is not None:
            stats_printer_greenlet.kill(block=False)
        logger.info("Cleaning up runner...")
        if runners.locust_runner is not None:
            runners.locust_runner.quit()
        logger.info("Running teardowns...")
        events.quitting.fire(reverse=True)
        print_stats(runners.locust_runner.stats, current=False)
        print_percentile_stats(runners.locust_runner.stats)
        if options.csvfilebase:
            write_stat_csvs(options.csvfilebase, options.stats_history_enabled)
        print_error_report()
        sys.exit(code)

    # install SIGTERM handler
    def sig_term_handler():
        logger.info("Got SIGTERM signal")
        shutdown(0)

    gevent.signal(signal.SIGTERM, sig_term_handler)

    try:
        logger.info("Starting Locust %s" % version)
        main_greenlet.join()
        code = 0
        lr = runners.locust_runner
        if len(lr.errors) or len(lr.exceptions) or lr.cpu_log_warning():
            code = options.exit_code_on_error
        shutdown(code=code)
    except KeyboardInterrupt as e:
        shutdown(0)
Exemplo n.º 34
0
 def install_signal_handlers(self):
     # Enabling the following line to explicitly set SIGINT yields very
     # weird behaviour: can anybody explain?
     # gevent.signal(signal.SIGINT, signal.default_int_handler)
     gevent.signal(signal.SIGTERM, signal.default_int_handler)
Exemplo n.º 35
0
                        dest="syslog",
                        action='store_true',
                        help="enable syslog (default: disabled)")
    PARSER.add_argument('-b', '--bind',
                        dest="bind",
                        default="0.0.0.0",
                        help="bind address (default: 0.0.0.0)")
    PARSER.add_argument('-p', '--port',
                        dest="port",
                        type=int,
                        default=8000,
                        help="listen port (default: 8000)")
    PARSER.add_argument('-i', '--instances',
                        dest="instances",
                        type=int,
                        default=0,
                        help="instances number (supervisor only, default:<cpu-count>)")

    ARGS = PARSER.parse_args()
    if ARGS.syslog:
        logging.getLogger().addHandler(SYSLOG)
    logging.info('Listening on %s:%d...' % (ARGS.bind, ARGS.port))

    gevent.signal(signal.SIGTERM, graceful_shutdown)

    try:
        WSGIServer((ARGS.bind, ARGS.port), application).serve_forever()
    except (KeyboardInterrupt, SystemExit) as err:
        graceful_shutdown(err)

Exemplo n.º 36
0
    def daemonize(self):
        """
        Do the UNIX double-fork magic, see Stevens' "Advanced
        Programming in the UNIX Environment" for details (ISBN 0201563177)
        http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
        """
        try:
            pid = os.fork()
            if pid > 0:
                # Exit first parent
                sys.exit(0)
        except OSError as e:
            sys.stderr.write("fork #1 failed: %d (%s)\n" %
                             (e.errno, e.strerror))
            sys.exit(1)

        # Decouple from parent environment
        os.chdir(self.home_dir)
        os.setsid()
        os.umask(self.umask)

        # Do second fork
        try:
            pid = os.fork()
            if pid > 0:
                # Exit from second parent
                sys.exit(0)
        except OSError as e:
            sys.stderr.write("fork #2 failed: %d (%s)\n" %
                             (e.errno, e.strerror))
            sys.exit(1)

        # if sys.platform != 'darwin':  # This block breaks on OS X
        #     # Redirect standard file descriptors
        #     sys.stdout.flush()
        #     sys.stderr.flush()
        #     si = open(self.stdin, 'r')
        #     so = open(self.stdout, 'a+')
        #     if self.stderr:
        #         se = open(self.stderr, 'a+', 0)
        #     else:
        #         se = so
        #     os.dup2(si.fileno(), sys.stdin.fileno())
        #     os.dup2(so.fileno(), sys.stdout.fileno())
        #     os.dup2(se.fileno(), sys.stderr.fileno())

        def sigtermhandler(signum, frame):
            self.daemon_alive = False
            sys.exit()

        if self.use_gevent:
            import gevent
            gevent.reinit()
            gevent.signal(signal.SIGTERM, sigtermhandler, signal.SIGTERM, None)
            gevent.signal(signal.SIGINT, sigtermhandler, signal.SIGINT, None)
        else:
            signal.signal(signal.SIGTERM, sigtermhandler)
            signal.signal(signal.SIGINT, sigtermhandler)

        if self.verbose >= 1:
            print("Started")

        # Write pidfile
        atexit.register(
            self.delpid)  # Make sure pid file is removed if we quit
        pid = str(os.getpid())
        open(self.pidfile, 'w+').write("%s\n" % pid)
Exemplo n.º 37
0
    print 'Handling signal TERM'
    if http_server.started:
        http_server.stop()
    sys.exit(signal.SIGTERM)


if __name__ == "__main__":
    # Initial check of the services and setup main dictionary Services accordingly
    for s in Services:
        task = Services[s]['fun']
        ServiceStat = task(s, 'status')
        if ServiceStat:  # service is now running
            Services[s][
                'state'] = 1  # wait for feedback from the service, do not change immediately
            sse_parm['LED_%s' %
                     Services[s]['id']] = Services[s]['lon']  # turn the led on
            sse_parm['BUT_%s' % Services[s]['id']] = Services[s][
                'bon']  # show the OFF button
        else:  # service is not up and running
            Services[s][
                'state'] = 0  # wait for feedback from the service, do not change immediately
            sse_parm['LED_%s' % Services[s]['id']] = Services[s][
                'loff']  # turn the led off
            sse_parm['BUT_%s' % Services[s]['id']] = Services[s][
                'boff']  # show the ON button

    gevent.signal(signal.SIGTERM, stop)
    gevent.spawn(param_worker)
    http_server = WSGIServer(('', 5000), app)
    http_server.serve_forever()
Exemplo n.º 38
0
 def signal(self, *args, **kwargs):
     gevent.signal(*args, **kwargs)
Exemplo n.º 39
0
    def __init__(self, dm_logger=None, args=None):
        DeviceManager._device_manager = self
        self._args = args
        PushConfigState.set_push_mode(int(self._args.push_mode))
        PushConfigState.set_repush_interval(int(self._args.repush_interval))
        PushConfigState.set_repush_max_interval(
            int(self._args.repush_max_interval))
        PushConfigState.set_push_delay_per_kb(
            float(self._args.push_delay_per_kb))
        PushConfigState.set_push_delay_max(int(self._args.push_delay_max))
        PushConfigState.set_push_delay_enable(
            bool(self._args.push_delay_enable))

        self._chksum = ""
        if self._args.collectors:
            self._chksum = hashlib.md5(''.join(
                self._args.collectors)).hexdigest()

        # Initialize logger
        self.logger = dm_logger or DeviceManagerLogger(args)

        # Register Plugins
        try:
            DeviceConf.register_plugins()
        except DeviceConf.PluginsRegistrationFailed as e:
            self.logger.error("Exception: " + str(e))
        except Exception as e:
            tb = traceback.format_exc()
            self.logger.error("Internal error while registering plugins: " +
                              str(e) + tb)

        # Register Ansible Plugins
        try:
            AnsibleBase.register_plugins()
        except AnsibleBase.PluginsRegistrationFailed as e:
            self.logger.error("Exception: " + str(e))
        except Exception as e:
            tb = traceback.format_exc()
            self.logger.error(
                "Internal error while registering ansible plugins: " + str(e) +
                tb)

        # Retry till API server is up
        connected = False
        self.connection_state_update(ConnectionStatus.INIT)
        api_server_list = args.api_server_ip.split(',')
        while not connected:
            try:
                self._vnc_lib = VncApi(
                    args.admin_user,
                    args.admin_password,
                    args.admin_tenant_name,
                    api_server_list,
                    args.api_server_port,
                    api_server_use_ssl=args.api_server_use_ssl)
                connected = True
                self.connection_state_update(ConnectionStatus.UP)
            except requests.exceptions.ConnectionError as e:
                # Update connection info
                self.connection_state_update(ConnectionStatus.DOWN, str(e))
                time.sleep(3)
            except ResourceExhaustionError:  # haproxy throws 503
                time.sleep(3)
        """ @sighup
        Handle of SIGHUP for collector list config change
        """
        gevent.signal(signal.SIGHUP, self.sighup_handler)

        # Initialize amqp
        self._vnc_amqp = DMAmqpHandle(self.logger, self.REACTION_MAP,
                                      self._args)
        self._vnc_amqp.establish()

        # Initialize cassandra
        self._object_db = DMCassandraDB.get_instance(self, _zookeeper_client)
        DBBaseDM.init(self, self.logger, self._object_db)
        DBBaseDM._sandesh = self.logger._sandesh

        for obj in GlobalSystemConfigDM.list_obj():
            GlobalSystemConfigDM.locate(obj['uuid'], obj)

        for obj in NodeProfileDM.list_obj():
            NodeProfileDM.locate(obj['uuid'], obj)

        for obj in RoleConfigDM.list_obj():
            RoleConfigDM.locate(obj['uuid'], obj)

        for obj in GlobalVRouterConfigDM.list_obj():
            GlobalVRouterConfigDM.locate(obj['uuid'], obj)

        for obj in VirtualNetworkDM.list_obj():
            VirtualNetworkDM.locate(obj['uuid'], obj)

        dci_obj_list = DataCenterInterconnectDM.list_obj()
        for obj in dci_obj_list or []:
            DataCenterInterconnectDM.locate(obj['uuid'], obj)

        for obj in FabricDM.list_obj():
            FabricDM.locate(obj['uuid'], obj)

        for obj in FabricNamespaceDM.list_obj():
            FabricNamespaceDM.locate(obj['uuid'], obj)

        for obj in LogicalRouterDM.list_obj():
            LogicalRouterDM.locate(obj['uuid'], obj)

        for obj in RoutingInstanceDM.list_obj():
            RoutingInstanceDM.locate(obj['uuid'], obj)

        for obj in FloatingIpPoolDM.list_obj():
            FloatingIpPoolDM.locate(obj['uuid'], obj)

        for obj in BgpRouterDM.list_obj():
            BgpRouterDM.locate(obj['uuid'], obj)

        for obj in PortTupleDM.list_obj():
            PortTupleDM.locate(obj['uuid'], obj)

        for obj in PhysicalInterfaceDM.list_obj():
            PhysicalInterfaceDM.locate(obj['uuid'], obj)

        for obj in LinkAggregationGroupDM.list_obj():
            LinkAggregationGroupDM.locate(obj['uuid'], obj)

        for obj in LogicalInterfaceDM.list_obj():
            LogicalInterfaceDM.locate(obj['uuid'], obj)

        pr_obj_list = PhysicalRouterDM.list_obj()
        for obj in pr_obj_list:
            PhysicalRouterDM.locate(obj['uuid'], obj)

        pr_uuid_set = set([pr_obj['uuid'] for pr_obj in pr_obj_list])
        self._object_db.handle_pr_deletes(pr_uuid_set)

        dci_uuid_set = set([dci_obj['uuid'] for dci_obj in dci_obj_list])
        self._object_db.handle_dci_deletes(dci_uuid_set)

        for obj in VirtualMachineInterfaceDM.list_obj():
            VirtualMachineInterfaceDM.locate(obj['uuid'], obj)

        for obj in SecurityGroupDM.list_obj():
            SecurityGroupDM.locate(obj['uuid'], obj)

        for obj in AccessControlListDM.list_obj():
            AccessControlListDM.locate(obj['uuid'], obj)

        for obj in pr_obj_list:
            pr = PhysicalRouterDM.locate(obj['uuid'], obj)
            li_set = pr.logical_interfaces
            vmi_set = set()
            for pi_id in pr.physical_interfaces:
                pi = PhysicalInterfaceDM.locate(pi_id)
                if pi:
                    li_set |= pi.logical_interfaces
                    vmi_set |= pi.virtual_machine_interfaces
            for li_id in li_set:
                li = LogicalInterfaceDM.locate(li_id)
                if li and li.virtual_machine_interface:
                    vmi_set |= set([li.virtual_machine_interface])
            for vmi_id in vmi_set:
                vmi = VirtualMachineInterfaceDM.locate(vmi_id)

        si_obj_list = ServiceInstanceDM.list_obj()
        si_uuid_set = set([si_obj['uuid'] for si_obj in si_obj_list])
        self._object_db.handle_pnf_resource_deletes(si_uuid_set)

        for obj in si_obj_list:
            ServiceInstanceDM.locate(obj['uuid'], obj)

        for obj in InstanceIpDM.list_obj():
            InstanceIpDM.locate(obj['uuid'], obj)

        for obj in FloatingIpDM.list_obj():
            FloatingIpDM.locate(obj['uuid'], obj)

        for vn in VirtualNetworkDM.values():
            vn.update_instance_ip_map()

        for obj in ServiceEndpointDM.list_obj():
            ServiceEndpointDM.locate(obj['uuid'], obj)

        for obj in ServiceConnectionModuleDM.list_obj():
            ServiceConnectionModuleDM.locate(obj['uuid'], obj)

        for obj in ServiceObjectDM.list_obj():
            ServiceObjectDM.locate(obj['uuid'], obj)

        for obj in NetworkDeviceConfigDM.list_obj():
            NetworkDeviceConfigDM.locate(obj['uuid'], obj)

        for obj in E2ServiceProviderDM.list_obj():
            E2ServiceProviderDM.locate(obj['uuid'], obj)

        for obj in PeeringPolicyDM.list_obj():
            PeeringPolicyDM.locate(obj['uuid'], obj)

        for pr in PhysicalRouterDM.values():
            pr.set_config_state()
            pr.uve_send()

        self._vnc_amqp._db_resync_done.set()
        try:
            gevent.joinall(self._vnc_amqp._vnc_kombu.greenlets())
        except KeyboardInterrupt:
            DeviceManager.destroy_instance()
            raise
Exemplo n.º 40
0
import signal
import sys
import getpass


# Just a general signal handler to stop everying cleanly.
def signal_handler():
    global h
    print('You pressed Ctrl+C!')
    if h is not None:
        print("Stopping hunter.")
        h.stop()
    sys.exit(0)


gevent.signal(signal.SIGINT, signal_handler)


def debugPrint(msg):
    print msg


class TestHunter(limacharlie.Hunter):
    def init(self):
        print("Initializing")

    def deinit(self):
        print("Deinitializing")

    def run(self):
        print("First we will fetch all sensors.")
Exemplo n.º 41
0
def run(ctx, **kwargs):
    # pylint: disable=too-many-locals,too-many-branches,too-many-statements

    if ctx.invoked_subcommand is None:
        print('Welcome to Raiden, version {}!'.format(
            get_system_spec()['raiden']))
        from raiden.ui.console import Console
        from raiden.api.python import RaidenAPI

        slogging.configure(kwargs['logging'],
                           log_json=kwargs['log_json'],
                           log_file=kwargs['logfile'])
        if kwargs['logfile']:
            # Disable stream logging
            root = slogging.getLogger()
            for handler in root.handlers:
                if isinstance(handler, slogging.logging.StreamHandler):
                    root.handlers.remove(handler)
                    break

        # TODO:
        # - Ask for confirmation to quit if there are any locked transfers that did
        # not timeout.
        (listen_host, listen_port) = split_endpoint(kwargs['listen_address'])
        try:
            with SocketFactory(listen_host,
                               listen_port,
                               strategy=kwargs['nat']) as mapped_socket:
                kwargs['mapped_socket'] = mapped_socket

                app_ = ctx.invoke(app, **kwargs)

                domain_list = []
                if kwargs['rpccorsdomain']:
                    if ',' in kwargs['rpccorsdomain']:
                        for domain in kwargs['rpccorsdomain'].split(','):
                            domain_list.append(str(domain))
                    else:
                        domain_list.append(str(kwargs['rpccorsdomain']))

                if ctx.params['rpc']:
                    raiden_api = RaidenAPI(app_.raiden)
                    rest_api = RestAPI(raiden_api)
                    api_server = APIServer(
                        rest_api,
                        cors_domain_list=domain_list,
                        web_ui=ctx.params['web_ui'],
                        eth_rpc_endpoint=ctx.params['eth_rpc_endpoint'],
                    )
                    (api_host,
                     api_port) = split_endpoint(kwargs['api_address'])
                    api_server.start(api_host, api_port)

                    print(
                        'The Raiden API RPC server is now running at http://{}:{}/.\n\n'
                        'See the Raiden documentation for all available endpoints at\n'
                        'http://raiden-network.readthedocs.io/en/stable/rest_api.html'
                        .format(
                            api_host,
                            api_port,
                        ))

                if ctx.params['console']:
                    console = Console(app_)
                    console.start()

                # wait for interrupt
                event = gevent.event.Event()
                gevent.signal(signal.SIGQUIT, event.set)
                gevent.signal(signal.SIGTERM, event.set)
                gevent.signal(signal.SIGINT, event.set)

                gevent.signal(signal.SIGUSR1, toogle_cpu_profiler)
                gevent.signal(signal.SIGUSR2, toggle_trace_profiler)

                event.wait()
                print('Signal received. Shutting down ...')
                try:
                    api_server.stop()
                except NameError:
                    pass
        except socket.error as v:
            if v.args[0] == errno.EADDRINUSE:
                print(
                    'ERROR: Address %s:%s is in use. '
                    'Use --listen-address <host:port> to specify port to listen on.'
                    % (listen_host, listen_port))
                sys.exit(1)
            raise
        app_.stop(leave_channels=False)
    else:
        # Pass parsed args on to subcommands.
        ctx.obj = kwargs
Exemplo n.º 42
0
                        default='camera:1')
    parser.add_argument('-u',
                        '--url',
                        help='Redis URL',
                        type=str,
                        default='redis://127.0.0.1:6379')
    parser.add_argument('-z',
                        '--zpc',
                        help='zpc port',
                        type=str,
                        default='4245')
    parser.add_argument('--field',
                        help='Image field name',
                        type=str,
                        default='image')
    parser.add_argument('--maxlen',
                        help='Maximum length of output stream',
                        type=int,
                        default=100)
    args = parser.parse_args()

    # Set up Redis connection
    url = urlparse(args.url)

    zpc = zerorpc.Server(
        Canny(args.output, args.input, url, args.field, args.maxlen))
    zpc.bind(f"tcp://0.0.0.0:{args.zpc}")
    print("run")
    gevent.signal(signal.SIGTERM, zpc.stop)
    zpc.run()
    print("Killed")
Exemplo n.º 43
0
def run(config=None, plugin_providers=None, product_name='ajenti', dev_mode=False,
        debug_mode=False, autologin=False):
    """
    A global entry point for Ajenti.

    :param config: config file implementation instance to use
    :type  config: :class:`aj.config.BaseConfig`
    :param plugin_providers: list of plugin providers to load plugins from
    :type  plugin_providers: list(:class:`aj.plugins.PluginProvider`)
    :param str product_name: a product name to use
    :param bool dev_mode: enables dev mode (automatic resource recompilation)
    :param bool debug_mode: enables debug mode (verbose and extra logging)
    :param bool autologin: disables authentication and logs everyone in as the user running the panel. This is EXTREMELY INSECURE.
    """
    if config is None:
        raise TypeError('`config` can\'t be None')

    reload_module(sys)
    if hasattr(sys, 'setdefaultencoding'):
        sys.setdefaultencoding('utf8')

    aj.product = product_name
    aj.debug = debug_mode
    aj.dev = dev_mode
    aj.dev_autologin = autologin

    aj.init()
    aj.log.set_log_params(tag='master', master_pid=os.getpid())
    aj.context = Context()
    aj.config = config
    aj.plugin_providers = plugin_providers or []
    logging.info('Loading config from %s', aj.config)
    aj.config.load()
    aj.config.ensure_structure()

    if aj.debug:
        logging.warn('Debug mode')
    if aj.dev:
        logging.warn('Dev mode')

    try:
        locale.setlocale(locale.LC_ALL, '')
    except locale.Error:
        logging.warning('Couldn\'t set default locale')

    # install a passthrough gettext replacement since all localization is handled in frontend
    # and _() is here only for string extraction
    __builtins__['_'] = lambda x: x

    logging.info('Ajenti Core %s', aj.version)
    logging.info('Detected platform: %s / %s', aj.platform, aj.platform_string)

    # Load plugins
    PluginManager.get(aj.context).load_all_from(aj.plugin_providers)
    if len(PluginManager.get(aj.context)) == 0:
        logging.warn('No plugins were loaded!')

    if aj.config.data['bind']['mode'] == 'unix':
        path = aj.config.data['bind']['socket']
        if os.path.exists(path):
            os.unlink(path)
        listener = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
        try:
            listener.bind(path)
        except OSError:
            logging.error('Could not bind to %s', path)
            sys.exit(1)

    if aj.config.data['bind']['mode'] == 'tcp':
        host = aj.config.data['bind']['host']
        port = aj.config.data['bind']['port']
        listener = socket.socket(
            socket.AF_INET6 if ':' in host else socket.AF_INET, socket.SOCK_STREAM
        )
        if aj.platform not in ['freebsd', 'osx']:
            try:
                listener.setsockopt(socket.IPPROTO_TCP, socket.TCP_CORK, 1)
            except socket.error:
                logging.warn('Could not set TCP_CORK')
        listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        logging.info('Binding to [%s]:%s', host, port)
        try:
            listener.bind((host, port))
        except socket.error as e:
            logging.error('Could not bind: %s', str(e))
            sys.exit(1)

    # Fix stupid socketio bug (it tries to do *args[0][0])
    socket.socket.__getitem__ = lambda x, y: None

    listener.listen(10)

    gateway = GateMiddleware.get(aj.context)
    application = HttpRoot(HttpMiddlewareAggregator([gateway])).dispatch

    aj.server = SocketIOServer(
        listener,
        log=open(os.devnull, 'w'),
        application=application,
        handler_class=RequestHandler,
        policy_server=False,
        transports=[
            str('websocket'),
            str('flashsocket'),
            str('xhr-polling'),
            str('jsonp-polling'),
        ],
    )

    if aj.config.data['ssl']['enable'] and aj.config.data['bind']['mode'] == 'tcp':
        aj.server.ssl_args = {'server_side': True}
        cert_path = aj.config.data['ssl']['certificate']
        if aj.config.data['ssl']['fqdn_certificate']:
            fqdn_cert_path = aj.config.data['ssl']['fqdn_certificate']
        else:
            fqdn_cert_path = cert_path

        context = gevent.ssl.SSLContext(ssl.PROTOCOL_TLS)
        context.load_cert_chain(certfile=fqdn_cert_path, keyfile=fqdn_cert_path)
        context.options |= ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1
        context.set_ciphers('ALL:!ADH:!EXP:!LOW:!RC2:!3DES:!SEED:!RC4:+HIGH:+MEDIUM')

        if aj.config.data['ssl']['client_auth']['enable']:

            logging.info('Enabling SSL client authentication')
            context.load_verify_locations(cafile=cert_path)
            if aj.config.data['ssl']['client_auth']['force']:
                context.verify_mode = ssl.CERT_REQUIRED
            else:
                context.verify_mode = ssl.CERT_OPTIONAL

            ## Test callback : client_certificate_callback must return None to get forward
            # context.set_servername_callback(AuthenticationService.get(aj.context).client_certificate_callback)

        aj.server.wrap_socket = lambda socket, **args:context.wrap_socket(sock=socket, server_side=True)
        logging.info('SSL enabled')

    # auth.log
    try:
        syslog.openlog(
            ident=str(aj.product),
            facility=syslog.LOG_AUTH,
        )
    except:
        syslog.openlog(aj.product)

    def cleanup():
        if hasattr(cleanup, 'started'):
            return
        cleanup.started = True
        logging.info('Process %s exiting normally', os.getpid())
        gevent.signal(signal.SIGINT, lambda: None)
        gevent.signal(signal.SIGTERM, lambda: None)
        if aj.master:
            gateway.destroy()

        p = psutil.Process(os.getpid())
        for c in p.children(recursive=True):
            try:
                os.killpg(c.pid, signal.SIGTERM)
                os.killpg(c.pid, signal.SIGKILL)
            except OSError:
                pass

    def signal_handler():
        cleanup()
        sys.exit(0)

    gevent.signal(signal.SIGINT, signal_handler)
    gevent.signal(signal.SIGTERM, signal_handler)

    aj.server.serve_forever()

    if not aj.master:
        # child process, server is stopped, wait until killed
        gevent.wait()

    if hasattr(aj.server, 'restart_marker'):
        logging.warn('Restarting by request')
        cleanup()

        fd = 20  # Close all descriptors. Creepy thing
        while fd > 2:
            try:
                os.close(fd)
                logging.debug('Closed descriptor #%i', fd)
            except OSError:
                pass
            fd -= 1

        logging.warn('Will restart the process now')
        if '-d' in sys.argv:
            sys.argv.remove('-d')
        os.execv(sys.argv[0], sys.argv)
    else:
        if aj.master:
            logging.debug('Server stopped')
            cleanup()
Exemplo n.º 44
0
logger.debug("Using requests %s" % requests.__version__)
logger.debug("Using bencode %s" % bencode_version__)

# Dropping root privileges if needed
if AceConfig.osplatform != 'Windows' and AceConfig.aceproxyuser and os.getuid(
) == 0:
    if drop_privileges(AceConfig.aceproxyuser):
        logger.info("Dropped privileges to user %s" % AceConfig.aceproxyuser)
    else:
        logger.error("Cannot drop privileges to user %s" %
                     AceConfig.aceproxyuser)
        sys.exit(1)

# setting signal handlers
try:
    gevent.signal(signal.SIGHUP, _reloadconfig)
    gevent.signal(signal.SIGTERM, shutdown)
except AttributeError:
    pass  # not available on Windows

# Creating ClientCounter
AceStuff.clientcounter = ClientCounter()

#### AceEngine startup
name = 'ace_engine.exe' if AceConfig.osplatform == 'Windows' else os.path.basename(
    AceConfig.acecmd)
ace_pid = findProcess(name)
if not ace_pid and AceConfig.acespawn:
    AceStuff.aceProc = '' if AceConfig.osplatform == 'Windows' else AceConfig.acecmd.split(
    )
    if spawnAce(AceStuff.aceProc, AceConfig.acestartuptimeout):
Exemplo n.º 45
0
def main():
    def _sigint_handler():
        raise KeyboardInterrupt('Ctrl-C from _sigint_handler')

    def _sigterm_handler():
        raise KeyboardInterrupt('Ctrl-C from _sigterm_handler')

    def _cleanup():
        trace_writer.stop()
        query_processor.stop()
        store.stop()
        comm.stop()

    args = _parse_args()

    loglevel = logging.INFO
    if args.verbose:
        loglevel = logging.DEBUG

    logging.basicConfig(
        level=loglevel,
        format="%(asctime)s (%(process)d)%(module)s.%(funcName)s"
               " %(levelname)s: %(message)s",
        datefmt="%Y-%m-%dT%H:%M:%S"
    )
    LOG.info('Starting mm-traced version %s', __version__)
    LOG.info('mm-traced arguments: %s', args)

    with open(args.config, 'r') as f:
        config = yaml.safe_load(f)
    if config is None:
        config = {}

    LOG.info('mm-traced config: %s', config)

    store = minemeld.traced.storage.Store(config.get('store', None))

    transport_config = config.get('transport', {
        'class': 'AMQP',
        'config': {
            'num_connections': 1
        }
    })
    comm = minemeld.comm.factory(
        transport_config['class'],
        transport_config['config']
    )

    trace_writer = minemeld.traced.writer.Writer(
        comm,
        store,
        topic=config.get('topic', 'mbus:log'),
        config=config.get('writer', {})
    )

    query_processor = minemeld.traced.queryprocessor.QueryProcessor(
        comm,
        store,
        config=config.get('queryprocessor', {})
    )

    shutdown_event = gevent.event.Event()
    comm.add_failure_listener(
        functools.partial(_ioloop_failure, shutdown_event)
    )

    comm.start()

    gevent.signal(signal.SIGINT, _sigint_handler)
    gevent.signal(signal.SIGTERM, _sigterm_handler)

    try:
        shutdown_event.wait()

    except KeyboardInterrupt:
        pass

    except:
        LOG.exception('Exception')

    finally:
        _cleanup()
Exemplo n.º 46
0
Arquivo: cc.py Projeto: pkediyal/pyon
    def start(self):
        log.debug("Container starting...")
        if self._is_started:
            raise ContainerError("Container already started")

        # Check if this UNIX process already runs a Container.
        self.pidfile = "cc-pid-%d" % os.getpid()
        if os.path.exists(self.pidfile):
            raise ContainerError(
                "Container.on_start(): Container is a singleton per UNIX process. Existing pid file found: %s"
                % self.pidfile)

        # write out a PID file containing our agent messaging name
        with open(self.pidfile, 'w') as f:
            pid_contents = {
                'messaging': dict(CFG.server.amqp),
                'container-agent': self.name,
                'container-xp': bootstrap.get_sys_name()
            }
            f.write(msgpack.dumps(pid_contents))
            atexit.register(self._cleanup_pid)
            self._capabilities.append("PID_FILE")

        # set up abnormal termination handler for this container
        def handl(signum, frame):
            try:
                self._cleanup_pid()  # cleanup the pidfile first
                self.quit(
                )  # now try to quit - will not error on second cleanup pidfile call
            finally:
                signal.signal(signal.SIGTERM, self._normal_signal)
                os.kill(os.getpid(), signal.SIGTERM)

        self._normal_signal = signal.signal(signal.SIGTERM, handl)

        # set up greenlet debugging signal handler
        gevent.signal(signal.SIGUSR2, self._handle_sigusr2)

        self.datastore_manager.start()
        self._capabilities.append("DATASTORE_MANAGER")

        self._capabilities.append("DIRECTORY")

        # Event repository
        self.event_repository = EventRepository()
        self.event_pub = EventPublisher()
        self._capabilities.append("EVENT_REPOSITORY")

        # Local resource registry
        self.resource_registry = ResourceRegistry()
        self._capabilities.append("RESOURCE_REGISTRY")

        # Persistent objects
        self.datastore_manager.get_datastore("objects",
                                             DataStore.DS_PROFILE.OBJECTS)

        # State repository
        self.state_repository = StateRepository()
        self._capabilities.append("STATE_REPOSITORY")

        # internal router for local transports
        self.local_router = LocalRouter(bootstrap.get_sys_name())
        self.local_router.start()
        self.local_router.ready.wait(timeout=2)
        self._capabilities.append("LOCAL_ROUTER")

        # Start ExchangeManager, which starts the node (broker connection)
        self.ex_manager.start()
        self._capabilities.append("EXCHANGE_MANAGER")

        self.proc_manager.start()
        self._capabilities.append("PROC_MANAGER")

        self.app_manager.start()
        self._capabilities.append("APP_MANAGER")

        self.governance_controller.start()
        self._capabilities.append("GOVERNANCE_CONTROLLER")

        if CFG.get_safe('container.sflow.enabled', False):
            self.sflow_manager.start()
            self._capabilities.append("SFLOW_MANAGER")

        # Start the CC-Agent API
        rsvc = ProcessRPCServer(node=self.node,
                                from_name=self.name,
                                service=self,
                                process=self)

        cleanup = lambda _: self.proc_manager._cleanup_method(self.name, rsvc)

        # Start an ION process with the right kind of endpoint factory
        proc = self.proc_manager.proc_sup.spawn(name=self.name,
                                                listeners=[rsvc],
                                                service=self,
                                                cleanup_method=cleanup)
        self.proc_manager.proc_sup.ensure_ready(proc)
        proc.start_listeners()
        self._capabilities.append("CONTAINER_AGENT")

        self.event_pub.publish_event(event_type="ContainerLifecycleEvent",
                                     origin=self.id,
                                     origin_type="CapabilityContainer",
                                     sub_type="START",
                                     state=ContainerStateEnum.START)

        self._is_started = True
        self._status = "RUNNING"

        log.info("Container (%s) started, OK.", self.id)
Exemplo n.º 47
0
def main():
    # config
    import yaml
    import io
    import sys
    import signal
    import gevent
    from peermanager import PeerManager
    from jsonrpc import JSONRPCServer
    from discovery import NodeDiscovery
    import slogging
    log = slogging.get_logger('app')
    slogging.configure(config_string=':debug')

    # read config
    sample_config = """
p2p:
    num_peers: 10
    bootstrap_nodes:
        # local bootstrap
        # - enode://6ed2fecb28ff17dec8647f08aa4368b57790000e0e9b33a7b91f32c41b6ca9ba21600e9a8c44248ce63a71544388c6745fa291f88f8b81e109ba3da11f7b41b9@127.0.0.1:30303
        # go_bootstrap
        #- enode://6cdd090303f394a1cac34ecc9f7cda18127eafa2a3a06de39f6d920b0e583e062a7362097c7c65ee490a758b442acd5c80c6fce4b148c6a391e946b45131365b@54.169.166.226:30303
        # cpp_bootstrap
        - enode://4a44599974518ea5b0f14c31c4463692ac0329cb84851f3435e6d1b18ee4eae4aa495f846a0fa1219bd58035671881d44423876e57db2abd57254d0197da0ebe@5.1.83.226:30303

    listen_host: 0.0.0.0
    listen_port: 30303
node:
    privkey_hex: 65462b0520ef7d3df61b9992ed3bea0c56ead753be7c8b3614e0ce01e4cac41b
    """
    if len(sys.argv) == 1:
        config = yaml.load(io.BytesIO(sample_config))
        pubkey = crypto.privtopub(config['node']['privkey_hex'].decode('hex'))
        config['node']['id'] = crypto.sha3(pubkey)
    else:
        fn = sys.argv[1]
        log.info('loading config from', fn=fn)
        config = yaml.load(open(fn))

    # stop on every unhandled exception!
    gevent.get_hub(
    ).SYSTEM_ERROR = BaseException  # (KeyboardInterrupt, SystemExit, SystemError)

    print config
    # create app
    app = BaseApp(config)

    # register services
    NodeDiscovery.register_with_app(app)
    PeerManager.register_with_app(app)
    #  JSONRPCServer.register_with_app(app)

    # start app
    app.start()

    # wait for interupt
    evt = gevent.event.Event()
    # gevent.signal(signal.SIGQUIT, gevent.kill) ## killall pattern
    gevent.signal(signal.SIGQUIT, evt.set)
    gevent.signal(signal.SIGTERM, evt.set)
    gevent.signal(signal.SIGINT, evt.set)
    evt.wait()

    # finally stop
    app.stop()
Exemplo n.º 48
0
    def start(self):
        self.server.start()

    def stop(self):
        pass


def handleAgentPacket(client, type, str):
    '''

    :param str str:
    :return:
    '''
    TCLogger.debug("%d,%s", type, str)


if __name__ == '__main__':
    ac = PHPAgentConf(CAConfig)
    agent = FrontAgent(ac, handleAgentPacket)
    agent.start()
    from gevent.event import Event
    import gevent, signal
    evt = Event()

    gevent.signal(signal.SIGQUIT, evt.set)
    gevent.signal(signal.SIGTERM, evt.set)
    gevent.signal(signal.SIGINT, evt.set)

    evt.wait()
Exemplo n.º 49
0
    def __init__(self, dm_logger=None, args=None):
        self._args = args
        PushConfigState.set_repush_interval(int(self._args.repush_interval))
        PushConfigState.set_repush_max_interval(
            int(self._args.repush_max_interval))
        PushConfigState.set_push_delay_per_kb(
            float(self._args.push_delay_per_kb))
        PushConfigState.set_push_delay_max(int(self._args.push_delay_max))
        PushConfigState.set_push_delay_enable(
            bool(self._args.push_delay_enable))

        self._chksum = ""
        if self._args.collectors:
            self._chksum = hashlib.md5(''.join(
                self._args.collectors)).hexdigest()

        # Initialize logger
        self.logger = dm_logger or DeviceManagerLogger(args)

        # Retry till API server is up
        connected = False
        self.connection_state_update(ConnectionStatus.INIT)
        while not connected:
            try:
                self._vnc_lib = VncApi(
                    args.admin_user,
                    args.admin_password,
                    args.admin_tenant_name,
                    args.api_server_ip,
                    args.api_server_port,
                    api_server_use_ssl=args.api_server_use_ssl)
                connected = True
                self.connection_state_update(ConnectionStatus.UP)
            except requests.exceptions.ConnectionError as e:
                # Update connection info
                self.connection_state_update(ConnectionStatus.DOWN, str(e))
                time.sleep(3)
            except ResourceExhaustionError:  # haproxy throws 503
                time.sleep(3)
        """ @sighup
        Handle of SIGHUP for collector list config change
        """
        gevent.signal(signal.SIGHUP, self.sighup_handler)

        # Initialize amqp
        self._vnc_amqp = DMAmqpHandle(self.logger, self.REACTION_MAP,
                                      self._args)
        self._vnc_amqp.establish()

        # Initialize cassandra
        self._object_db = DMCassandraDB.get_instance(self, _zookeeper_client)
        DBBaseDM.init(self, self.logger, self._object_db)
        DBBaseDM._sandesh = self.logger._sandesh

        for obj in GlobalSystemConfigDM.list_obj():
            GlobalSystemConfigDM.locate(obj['uuid'], obj)

        for obj in GlobalVRouterConfigDM.list_obj():
            GlobalVRouterConfigDM.locate(obj['uuid'], obj)

        for obj in VirtualNetworkDM.list_obj():
            VirtualNetworkDM.locate(obj['uuid'], obj)

        for obj in RoutingInstanceDM.list_obj():
            RoutingInstanceDM.locate(obj['uuid'], obj)

        for obj in BgpRouterDM.list_obj():
            BgpRouterDM.locate(obj['uuid'], obj)

        pr_obj_list = PhysicalRouterDM.list_obj()
        for obj in pr_obj_list:
            PhysicalRouterDM.locate(obj['uuid'], obj)

        pr_uuid_set = set([pr_obj['uuid'] for pr_obj in pr_obj_list])
        self._object_db.handle_pr_deletes(pr_uuid_set)

        for obj in PortTupleDM.list_obj():
            PortTupleDM.locate(obj['uuid'], obj)

        for obj in PhysicalInterfaceDM.list_obj():
            PhysicalInterfaceDM.locate(obj['uuid'], obj)

        for obj in LogicalInterfaceDM.list_obj():
            LogicalInterfaceDM.locate(obj['uuid'], obj)

        for obj in VirtualMachineInterfaceDM.list_obj():
            VirtualMachineInterfaceDM.locate(obj['uuid'], obj)

        for obj in pr_obj_list:
            pr = PhysicalRouterDM.locate(obj['uuid'], obj)
            li_set = pr.logical_interfaces
            vmi_set = set()
            for pi_id in pr.physical_interfaces:
                pi = PhysicalInterfaceDM.locate(pi_id)
                if pi:
                    li_set |= pi.logical_interfaces
                    vmi_set |= pi.virtual_machine_interfaces
            for li_id in li_set:
                li = LogicalInterfaceDM.locate(li_id)
                if li and li.virtual_machine_interface:
                    vmi_set |= set([li.virtual_machine_interface])
            for vmi_id in vmi_set:
                vmi = VirtualMachineInterfaceDM.locate(vmi_id)

        si_obj_list = ServiceInstanceDM.list_obj()
        si_uuid_set = set([si_obj['uuid'] for si_obj in si_obj_list])
        self._object_db.handle_pnf_resource_deletes(si_uuid_set)

        for obj in si_obj_list:
            ServiceInstanceDM.locate(obj['uuid'], obj)

        for obj in InstanceIpDM.list_obj():
            InstanceIpDM.locate(obj['uuid'], obj)

        for obj in FloatingIpDM.list_obj():
            FloatingIpDM.locate(obj['uuid'], obj)

        for vn in VirtualNetworkDM.values():
            vn.update_instance_ip_map()

        for pr in PhysicalRouterDM.values():
            pr.set_config_state()

        DeviceManager._device_manager = self
        self._vnc_amqp._db_resync_done.set()
        try:
            gevent.joinall(self._vnc_amqp._vnc_kombu.greenlets())
        except KeyboardInterrupt:
            DeviceManager.destroy_instance()
            raise
Exemplo n.º 50
0
def main():
    parser, options = parse_options()

    # setup logging
    if not options.skip_log_setup:
        setup_logging(options.loglevel, options.logfile)

    logger = logging.getLogger(__name__)

    locust_path = get_locust_path()

    if options.demo:
        if not locust_path:
            logger.error(
                '''Cannot locate Python path, make sure it is in right place. If windows add it to sys PATH,
            if linux make sure python is installed in /usr/local/lib/''')
            sys.exit(1)
        pt_demo_path = os.path.join(locust_path, 'demo',
                                    'demo_pressuretest.xls')
        pt_new_demo = os.path.join(os.getcwd(), 'PtDemo.xls')
        shutil.copyfile(pt_demo_path, pt_new_demo)
        sys.exit(0)

    if options.xlsfile:
        pt_file = options.xlsfile
        if not pt_file.endswith('.xls'):
            logger.error(
                "PressureTest file must be end with '.xls' and see --help for available options."
            )
            sys.exit(1)
        if not os.path.isfile(pt_file):
            logger.error('PressureTest file is not exist, please check it.')
            sys.exit(1)
        make_locustfile(pt_file)
        logger.info('Transform XLS to locustfile finish.')
        sys.exit(0)

    locustfile = find_locustfile(options.locustfile)

    if not locustfile:
        logger.error(
            "Could not find any locustfile! Ensure file ends in '.py' and see --help for available options."
        )
        sys.exit(1)

    if locustfile == "locust.py" or locustfile == "locust.xls":
        logger.error(
            "The locustfile must not be named `locust.py` or `locust.xls`. Please rename the file and try again."
        )
        sys.exit(1)

    docstring, locusts = load_locustfile(locustfile)

    if options.list_commands:
        console_logger.info("Available Locusts:")
        for name in locusts:
            console_logger.info("    " + name)
        sys.exit(0)

    if not locusts:
        logger.error("No Locust class found!")
        sys.exit(1)

    # make sure specified Locust exists
    if options.locust_classes:
        missing = set(options.locust_classes) - set(locusts.keys())
        if missing:
            logger.error("Unknown Locust(s): %s\n" % (", ".join(missing)))
            sys.exit(1)
        else:
            names = set(options.locust_classes) & set(locusts.keys())
            locust_classes = [locusts[n] for n in names]
    else:
        # list() call is needed to consume the dict_view object in Python 3
        locust_classes = list(locusts.values())

    if options.show_task_ratio:
        console_logger.info("\n Task ratio per locust class")
        console_logger.info("-" * 80)
        print_task_ratio(locust_classes)
        console_logger.info("\n Total task ratio")
        console_logger.info("-" * 80)
        print_task_ratio(locust_classes, total=True)
        sys.exit(0)
    if options.show_task_ratio_json:
        from json import dumps
        task_data = {
            "per_class": get_task_ratio_dict(locust_classes),
            "total": get_task_ratio_dict(locust_classes, total=True)
        }
        console_logger.info(dumps(task_data))
        sys.exit(0)

    if options.run_time:
        if not options.no_web:
            logger.error(
                "The --run-time argument can only be used together with --no-web"
            )
            sys.exit(1)
        if options.slave:
            logger.error(
                "--run-time should be specified on the master node, and not on slave nodes"
            )
            sys.exit(1)
        try:
            options.run_time = parse_timespan(options.run_time)
        except ValueError:
            logger.error(
                "Valid --run-time formats are: 20, 20s, 3m, 2h, 1h20m, 3h30m10s, etc."
            )
            sys.exit(1)

        def spawn_run_time_limit_greenlet():
            logger.info("Run time limit set to %s seconds" % options.run_time)

            def timelimit_stop():
                logger.info("Time limit reached. Stopping Locust.")
                runners.locust_runner.quit()

            gevent.spawn_later(options.run_time, timelimit_stop)

    if options.step_time:
        if not options.step_load:
            logger.error(
                "The --step-time argument can only be used together with --step-load"
            )
            sys.exit(1)
        if options.slave:
            logger.error(
                "--step-time should be specified on the master node, and not on slave nodes"
            )
            sys.exit(1)
        try:
            options.step_time = parse_timespan(options.step_time)
        except ValueError:
            logger.error(
                "Valid --step-time formats are: 20, 20s, 3m, 2h, 1h20m, 3h30m10s, etc."
            )
            sys.exit(1)

    if options.master:
        # Add -d for automatically run slaves
        if options.distribute:
            ptpy = locustfile
            pt_s = PtExcel(options.locustfile)
            master_ip, pt_slave_info = pt_s.pt_slave()
            if master_ip == '':
                logger.error(
                    'master IP cannot be None if you use --distribute')
                sys.exit(1)
            try:
                locust_cli_slave = 'nohup locust -f /root/{locustfile} --slave --master-host={masteIP} > /dev/null 2>&1 &'.format(
                    locustfile=ptpy, masteIP=master_ip)
                thread_pool = []
                for slave in pt_slave_info:
                    slave_ip, slave_username, slave_password = slave
                    _t = Thread(target=pt_slave,
                                args=(slave_ip, slave_username, slave_password,
                                      ptpy, locust_cli_slave))
                    logger.info('Prepare slave {}'.format(slave_ip))
                    thread_pool.append(_t)
                    _t.start()
                for each_t in thread_pool:
                    each_t.join()
            except KeyboardInterrupt:
                pass
            except Exception as e:
                logger.error(
                    'Must something happened, collect Exceptions here: {}'.
                    format(e))

        runners.locust_runner = MasterLocustRunner(locust_classes, options)
    elif options.slave:
        try:
            runners.locust_runner = SlaveLocustRunner(locust_classes, options)
        except socket.error as e:
            logger.error("Failed to connect to the Locust master: %s", e)
            sys.exit(-1)
    else:
        runners.locust_runner = LocalLocustRunner(locust_classes, options)
    # main_greenlet is pointing to runners.locust_runner.greenlet by default, it will point the web greenlet later if in web mode
    main_greenlet = runners.locust_runner.greenlet

    if options.no_web:
        if options.master:
            while len(runners.locust_runner.clients.ready
                      ) < options.expect_slaves:
                logging.info(
                    "Waiting for slaves to be ready, %s of %s connected",
                    len(runners.locust_runner.clients.ready),
                    options.expect_slaves)
                time.sleep(1)
        if options.step_time:
            runners.locust_runner.start_stepload(options.num_clients,
                                                 options.hatch_rate,
                                                 options.step_clients,
                                                 options.step_time)
        elif not options.slave:
            runners.locust_runner.start_hatching(options.num_clients,
                                                 options.hatch_rate)
            # make locusts are spawned
            time.sleep(1)
    elif not options.slave:
        # spawn web greenlet
        logger.info("Starting web monitor at http://%s:%s" %
                    (options.web_host or "*", options.port))
        main_greenlet = gevent.spawn(web.start, locust_classes, options)

    if options.run_time:
        spawn_run_time_limit_greenlet()

    stats_printer_greenlet = None
    if not options.only_summary and (options.print_stats or
                                     (options.no_web and not options.slave)):
        # spawn stats printing greenlet
        stats_printer_greenlet = gevent.spawn(stats_printer)

    if options.csvfilebase:
        gevent.spawn(stats_writer, options.csvfilebase,
                     options.stats_history_enabled)

    def shutdown(code=0):
        """
        Shut down locust by firing quitting event, printing/writing stats and exiting
        """
        logger.info("Shutting down (exit code %s), bye." % code)
        if stats_printer_greenlet is not None:
            stats_printer_greenlet.kill(block=False)
        logger.info("Cleaning up runner...")
        if runners.locust_runner is not None:
            runners.locust_runner.quit()
        logger.info("Running teardowns...")
        events.quitting.fire(reverse=True)
        print_stats(runners.locust_runner.stats, current=False)
        print_percentile_stats(runners.locust_runner.stats)
        if options.csvfilebase:
            write_stat_csvs(options.csvfilebase, options.stats_history_enabled)
        print_error_report()
        sys.exit(code)

    # install SIGTERM handler
    def sig_term_handler():
        logger.info("Got SIGTERM signal")
        shutdown(0)

    gevent.signal(signal.SIGTERM, sig_term_handler)

    try:
        logger.info("Starting Locust %s" % version)
        main_greenlet.join()
        code = 0
        if len(runners.locust_runner.errors) or len(
                runners.locust_runner.exceptions):
            code = options.exit_code_on_error
        shutdown(code=code)
    except KeyboardInterrupt as e:
        shutdown(0)
Exemplo n.º 51
0
def main(args_str=' '.join(sys.argv[1:])):
    # Parse Arguments
    node_parser = argparse.ArgumentParser(add_help=False)
    node_parser.add_argument("--nodetype",
                             default='contrail-analytics',
                             help='Type of node which nodemgr is managing')
    try:
        args, remaining_argv = node_parser.parse_known_args(args_str.split())
    except:
        usage()
    default = {'rules': '',
               'collectors': [],
               'hostip': '127.0.0.1',
               'minimum_diskgb': 256,
               'contrail_databases': 'config analytics',
               'cassandra_repair_interval': 24,
               'cassandra_repair_logdir': '/var/log/contrail/',
               'sandesh_send_rate_limit': \
                    SandeshSystem.get_sandesh_send_rate_limit(),
              }
    sandesh_opts = {
        'sandesh_keyfile': '/etc/contrail/ssl/private/server-privkey.pem',
        'sandesh_certfile': '/etc/contrail/ssl/certs/server.pem',
        'sandesh_ca_cert': '/etc/contrail/ssl/certs/ca-cert.pem',
        'sandesh_ssl_enable': False,
        'introspect_ssl_enable': False,
        'sandesh_dscp_value': 0
    }
    node_type = args.nodetype
    if (node_type == 'contrail-analytics'):
        config_file = '/etc/contrail/contrail-analytics-nodemgr.conf'
    elif (node_type == 'contrail-config'):
        config_file = '/etc/contrail/contrail-config-nodemgr.conf'
    elif (node_type == 'contrail-control'):
        config_file = '/etc/contrail/contrail-control-nodemgr.conf'
    elif (node_type == 'contrail-vrouter'):
        config_file = '/etc/contrail/contrail-vrouter-nodemgr.conf'
    elif (node_type == 'contrail-database'):
        config_file = '/etc/contrail/contrail-database-nodemgr.conf'
    else:
        sys.stderr.write("Node type" + str(node_type) + "is incorrect" + "\n")
        return
    if (os.path.exists(config_file) == False):
        sys.stderr.write("config file " + config_file + " is not present" + "\n")
        return
    config = ConfigParser.SafeConfigParser()
    config.read([config_file])
    if 'DEFAULTS' in config.sections():
        default.update(dict(config.items('DEFAULTS')))
    if 'COLLECTOR' in config.sections():
        try:
            collector = config.get('COLLECTOR', 'server_list')
            default['collectors'] = collector.split()
        except ConfigParser.NoOptionError as e:
            pass
    if 'SANDESH' in config.sections():
        sandesh_opts.update(dict(config.items('SANDESH')))
        if 'sandesh_ssl_enable' in config.options('SANDESH'):
            sandesh_opts['sandesh_ssl_enable'] = config.getboolean(
                'SANDESH', 'sandesh_ssl_enable')
        if 'introspect_ssl_enable' in config.options('SANDESH'):
            sandesh_opts['introspect_ssl_enable'] = config.getboolean(
                'SANDESH', 'introspect_ssl_enable')
        if 'sandesh_dscp_value' in config.options('SANDESH'):
            try:
                sandesh_opts['sandesh_dscp_value'] = config.getint(
                    'SANDESH', 'sandesh_dscp_value')
            except:
                pass
    parser = argparse.ArgumentParser(parents=[node_parser],
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    default.update(sandesh_opts)
    parser.set_defaults(**default)
    parser.add_argument("--rules",
                        help='Rules file to use for processing events')
    parser.add_argument("--collectors",
                        nargs='+',
                        help='Collector addresses in format' +
                             'ip1:port1 ip2:port2')
    parser.add_argument("--sandesh_send_rate_limit", type=int,
            help="Sandesh send rate limit in messages/sec")
    parser.add_argument("--sandesh_keyfile",
                        help="Sandesh ssl private key")
    parser.add_argument("--sandesh_certfile",
                        help="Sandesh ssl certificate")
    parser.add_argument("--sandesh_ca_cert",
                        help="Sandesh CA ssl certificate")
    parser.add_argument("--sandesh_ssl_enable", action="store_true",
                        help="Enable ssl for sandesh connection")
    parser.add_argument("--introspect_ssl_enable", action="store_true",
                        help="Enable ssl for introspect connection")
    parser.add_argument("--sandesh_dscp_value", type=int,
                        help="DSCP bits for IP header of Sandesh messages")
    if (node_type == 'contrail-database'):
        parser.add_argument("--minimum_diskGB",
                            type=int,
                            dest='minimum_diskgb',
                            help="Minimum disk space in GB's")
        parser.add_argument("--contrail_databases",
                            nargs='+',
                            help='Contrail databases on this node' +
                                 'in format: config analytics' )
        parser.add_argument("--hostip",
                            help="IP address of host")
        parser.add_argument("--cassandra_repair_interval", type=int,
                            help="Time in hours to periodically run "
                            "nodetool repair for cassandra maintenance")
        parser.add_argument("--cassandra_repair_logdir",
                            help="Directory for storing repair logs")
    try:
        _args = parser.parse_args(remaining_argv)
    except:
        usage()
    rule_file = _args.rules
    collector_addr = _args.collectors
    sys.stderr.write("Collector address: " + str(collector_addr) + "\n")

    # randomize collector list
    _args.chksum = ""
    if _args.collectors:
        _args.chksum = hashlib.md5("".join(_args.collectors)).hexdigest()
        _args.random_collectors = random.sample(_args.collectors, len(_args.collectors))
        _args.collectors = _args.random_collectors

    collector_addr = _args.collectors
    sys.stderr.write("Random Collector address: " + str(collector_addr) + "\n")

    if _args.sandesh_send_rate_limit is not None:
        SandeshSystem.set_sandesh_send_rate_limit(_args.sandesh_send_rate_limit)
    sandesh_config = SandeshConfig(_args.sandesh_keyfile,
        _args.sandesh_certfile, _args.sandesh_ca_cert,
        _args.sandesh_ssl_enable, _args.introspect_ssl_enable,
        _args.sandesh_dscp_value)
    # done parsing arguments

    prog = None
    if (node_type == 'contrail-analytics'):
        if not rule_file:
            rule_file = "/etc/contrail/supervisord_analytics_files/" + \
                "contrail-analytics.rules"
        unit_names = ['contrail-collector.service',
                      'contrail-analytics-api.service',
                      'contrail-snmp-collector.service',
                      'contrail-query-engine.service',
                      'contrail-alarm-gen.service',
                      'contrail-topology.service',
                      'contrail-analytics-nodemgr.service',
                     ]
        prog = AnalyticsEventManager(
            rule_file, unit_names, collector_addr, sandesh_config)
    elif (node_type == 'contrail-config'):
        if not rule_file:
            rule_file = "/etc/contrail/supervisord_config_files/" + \
                "contrail-config.rules"
        unit_names = ['contrail-api.service',
                      'contrail-schema.service',
                      'contrail-svc-monitor.service',
                      'contrail-device-manager.service',
                      'contrail-config-nodemgr.service',
                      'ifmap.service',
                     ]
        cassandra_repair_interval = _args.cassandra_repair_interval
	cassandra_repair_logdir = _args.cassandra_repair_logdir
        prog = ConfigEventManager(
            rule_file, unit_names, collector_addr, sandesh_config,
            cassandra_repair_interval, cassandra_repair_logdir)
    elif (node_type == 'contrail-control'):
        if not rule_file:
            rule_file = "/etc/contrail/supervisord_control_files/" + \
                "contrail-control.rules"
        unit_names = ['contrail-control.service',
                      'contrail-dns.service',
                      'contrail-named.service',
                      'contrail-control-nodemgr.service',
                     ]
        prog = ControlEventManager(
            rule_file, unit_names, collector_addr, sandesh_config)
    elif (node_type == 'contrail-vrouter'):
        if not rule_file:
            rule_file = "/etc/contrail/supervisord_vrouter_files/" + \
                "contrail-vrouter.rules"
        unit_names = ['contrail-vrouter-agent.service',
                      'contrail-vrouter-nodemgr.service',
                     ]
        prog = VrouterEventManager(
            rule_file, unit_names, collector_addr, sandesh_config)
    elif (node_type == 'contrail-database'):
        if not rule_file:
            rule_file = "/etc/contrail/supervisord_database_files/" + \
                "contrail-database.rules"
        unit_names = ['contrail-database.service',
                      'kafka.service',
                      'contrail-database-nodemgr.service',
                     ]
        hostip = _args.hostip
        minimum_diskgb = _args.minimum_diskgb
        contrail_databases = _args.contrail_databases
        cassandra_repair_interval = _args.cassandra_repair_interval
	cassandra_repair_logdir = _args.cassandra_repair_logdir
        prog = DatabaseEventManager(
            rule_file, unit_names, collector_addr, sandesh_config, 
            hostip, minimum_diskgb, contrail_databases,
	    cassandra_repair_interval, cassandra_repair_logdir)
    else:
        sys.stderr.write("Node type" + str(node_type) + "is incorrect" + "\n")
        return

    prog.process()
    prog.send_nodemgr_process_status()
    prog.send_process_state_db(prog.group_names)
    prog.config_file = config_file
    prog.collector_chksum = _args.chksum

    """ @sighup
    Reconfig of collector list
    """
    gevent.signal(signal.SIGHUP, prog.nodemgr_sighup_handler)

    gevent.joinall([gevent.spawn(prog.runforever),
        gevent.spawn(prog.run_periodically(prog.do_periodic_events, 60))])
Exemplo n.º 52
0
def run():
    reload(sys)
    sys.setdefaultencoding('utf8')

    try:
        locale.setlocale(locale.LC_ALL, '')
    except:
        logging.warning('Couldn\'t set default locale')

    logging.info('Ajenti %s running on platform: %s' %
                 (ajenti.version, ajenti.platform))

    if ajenti.debug:
        exconsole.register()

    # Load plugins
    ajenti.plugins.manager.load_all()

    bind_spec = (ajenti.config.tree.http_binding.host,
                 ajenti.config.tree.http_binding.port)
    if ':' in bind_spec[0]:
        addrs = socket.getaddrinfo(bind_spec[0], bind_spec[1], socket.AF_INET6,
                                   0, socket.SOL_TCP)
        bind_spec = addrs[0][-1]

    ssl_tunnel = None
    if not bind_spec[0].startswith('/'):
        if ajenti.config.tree.ssl.enable:
            ssl_tunnel = SSLTunnel()
            ssl_tunnel.start(bind_spec[0], bind_spec[1],
                             ajenti.config.tree.ssl.certificate_path)
            if ssl_tunnel.check():
                logging.info('SSL tunnel running fine')
                bind_spec = ('127.0.0.1', ssl_tunnel.port)
                atexit.register(ssl_tunnel.stop)
            else:
                logging.error('SSL tunnel failed to start')

    # Fix stupid socketio bug (it tries to do *args[0][0])
    socket.socket.__getitem__ = lambda x, y: None

    logging.info('Starting server on %s' % (bind_spec, ))
    if bind_spec[0].startswith('/'):
        if os.path.exists(bind_spec[0]):
            os.unlink(bind_spec[0])
        listener = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
        try:
            listener.bind(bind_spec[0])
        except:
            logging.error('Could not bind to %s' % bind_spec[0])
            sys.exit(1)
        listener.listen(10)
    else:
        listener = socket.socket(
            socket.AF_INET6 if ':' in bind_spec[0] else socket.AF_INET,
            socket.SOCK_STREAM)
        listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        try:
            listener.bind(bind_spec)
        except:
            logging.error('Could not bind to %s' % (bind_spec, ))
            sys.exit(1)
        listener.listen(10)

    stack = [
        SessionMiddleware(),
        AuthenticationMiddleware(),
        CentralDispatcher()
    ]
    ajenti.server = SocketIOServer(
        listener,
        log=open(os.devnull, 'w'),
        application=HttpRoot(stack).dispatch,
        policy_server=False,
        resource='ajenti:socket',
    )

    # auth.log
    try:
        syslog.openlog(
            ident=str(b'ajenti'),
            facility=syslog.LOG_AUTH,
        )
    except:
        syslog.openlog(b'ajenti')

    try:
        gevent.signal(signal.SIGTERM, lambda: sys.exit(0))
    except:
        pass

    ajenti.feedback.start()
    ajenti.ipc.IPCServer.get(manager.context).start()

    Inflater.get(manager.context).precache()
    ajenti.server.serve_forever()

    if hasattr(ajenti.server, 'restart_marker'):
        logging.warn('Restarting by request')
        if ssl_tunnel:
            ssl_tunnel.stop()

        fd = 20  # Close all descriptors. Creepy thing
        while fd > 2:
            try:
                os.close(fd)
                logging.debug('Closed descriptor #%i' % fd)
            except:
                pass
            fd -= 1

        os.execv(sys.argv[0], sys.argv)
    else:
        logging.info('Stopped by request')
Exemplo n.º 53
0
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import sys
# reload(sys)
# sys.setdefaultencoding('utf-8')

import signal
import gevent

try:
    from pymysqlrpc import RPCServer
except:
    pass
    sys.path.append("..")
    from pymysqlrpc import RPCServer

from demoapp import aclmap

# spawn = 30,最多30个链接,否则会报 too many connections
# spawn = 30, max connction is 30, if greater than this, error is too many connections
# activetimeout  = 30 ,just for test, common default is 1800s
server = RPCServer(('0.0.0.0', 3308), aclmap, log=None, spawn=30, webport=8308, querytimeout=20, interval=3, activetimeout=30)
gevent.signal(signal.SIGTERM, server.close)
gevent.signal(signal.SIGINT, server.close)
server.serve_forever()
Exemplo n.º 54
0
def run_server():
    server = get_server()
    gevent.signal(signal.SIGTERM, server.close)
    gevent.signal(signal.SIGINT, server.close)
    server.start()
    gevent.wait()
Exemplo n.º 55
0
        driver.quit()


def sig_handler(*args):
    print("Going down")
    global should_exit
    should_exit = True


if __name__ == "__main__":

    parser = OptionParser()
    parser.add_option("-u", "--url", default=DEFAULT_URL, dest="url")
    parser.add_option("-t", "--times", type="int", default=DEFAULT_TIMES, dest="times")
    parser.add_option("-f", "--forever", action="store_true", dest="forever")
    parser.add_option("-c", "--csvoutput", metavar="FILE", default="out.csv", dest="csvoutput", help="Path to the CSV output file")
    parser.add_option("-p", "--preppend", default="", dest="preppend")

    (options, args) = parser.parse_args()

    results = open(options.csvoutput, "a")

    if len(options.preppend) > 0:
        results.write("clients,format,fps,lat\n")
    else:
        results.write("fps,lat\n")

    gevent.signal(signal.SIGTERM, sig_handler)

    run(options.url, options.times, results, options.preppend, options.forever)
Exemplo n.º 56
0
def runner(ctx, **kwargs):
    """ Start a raiden Echo Node that will send received transfers back to the initiator. """
    # This is largely a copy&paste job from `raiden.ui.cli::run`, with the difference that
    # an `EchoNode` is instantiated from the App's `RaidenAPI`.
    print('Welcome to Raiden, version {} [Echo Node]'.format(
        get_system_spec()['raiden']))
    slogging.configure(kwargs['logging'],
                       log_json=kwargs['log_json'],
                       log_file=kwargs['logfile'])
    if kwargs['logfile']:
        # Disable stream logging
        root = slogging.getLogger()
        for handler in root.handlers:
            if isinstance(handler, slogging.logging.StreamHandler):
                root.handlers.remove(handler)
                break

    token_address = kwargs.pop('token_address')

    (listen_host, listen_port) = split_endpoint(kwargs['listen_address'])
    with SocketFactory(listen_host, listen_port,
                       strategy=kwargs['nat']) as mapped_socket:
        kwargs['mapped_socket'] = mapped_socket

        app_ = ctx.invoke(app, **kwargs)

        domain_list = []
        if kwargs['rpccorsdomain']:
            if ',' in kwargs['rpccorsdomain']:
                for domain in kwargs['rpccorsdomain'].split(','):
                    domain_list.append(str(domain))
            else:
                domain_list.append(str(kwargs['rpccorsdomain']))

        raiden_api = RaidenAPI(app_.raiden)
        if ctx.params['rpc']:
            rest_api = RestAPI(raiden_api)
            api_server = APIServer(
                rest_api,
                cors_domain_list=domain_list,
                web_ui=ctx.params['web_ui'],
            )
            (api_host, api_port) = split_endpoint(kwargs['api_address'])
            api_server.start(api_host, api_port)

            print(
                'The Raiden API RPC server is now running at http://{}:{}/.\n\n'
                'See the Raiden documentation for all available endpoints at\n'
                'http://raiden-network.readthedocs.io/en/stable/rest_api.html'.
                format(
                    api_host,
                    api_port,
                ))

        # This will install the EchoNode callback in the alarm task:
        echo = EchoNode(raiden_api, token_address)

        event = gevent.event.Event()
        gevent.signal(signal.SIGQUIT, event.set)
        gevent.signal(signal.SIGTERM, event.set)
        gevent.signal(signal.SIGINT, event.set)
        event.wait()

        # This will remove the EchoNode callback from the alarm task:
        echo.stop()

        try:
            api_server.stop()
        except NameError:
            pass
    app_.stop(leave_channels=False)
Exemplo n.º 57
0
def main(args_str=None):
    global _amqp_client
    global _zookeeper_client

    if not args_str:
        args_str = ' '.join(sys.argv[1:])
    args = parse_args(args_str)
    if args.cluster_id:
        client_pfx = args.cluster_id + '-'
        zk_path_pfx = args.cluster_id + '/'
    else:
        client_pfx = ''
        zk_path_pfx = ''

    # randomize collector list
    args.random_collectors = args.collectors
    if args.collectors:
        args.random_collectors = random.sample(args.collectors,
                                               len(args.collectors))

    args.log_level = str(args.log_level)

    # Initialize logger without introspect thread
    dm_logger = DeviceManagerLogger(args, http_server_port=-1)

    # Initialize AMQP handler then close it to be sure remain queue of a
    # precedent run is cleaned
    vnc_amqp = DMAmqpHandle(dm_logger, {}, args)
    vnc_amqp.establish()
    vnc_amqp.close()
    dm_logger.debug("Removed remaining AMQP queue from previous run")

    if 'host_ip' not in args:
        args.host_ip = socket.gethostbyname(socket.getfqdn())

    _amqp_client = initialize_amqp_client(dm_logger, args)
    _zookeeper_client = ZookeeperClient(client_pfx + "device-manager",
                                        args.zk_server_ip, args.host_ip)
    _db_conn = initialize_db_connection(dm_logger, args)

    try:
        # Initialize the device job manager
        DeviceJobManager(_amqp_client, _zookeeper_client, _db_conn, args,
                         dm_logger)
        # Allow kombu client to connect consumers
        gevent.sleep(0.5)
    except Exception as e:
        dm_logger.error("Error while initializing the device job "
                        "manager %s" % str(e))
        raise e

    try:
        # Initialize the device ztp manager
        DeviceZtpManager(_amqp_client, _db_conn, args, dm_logger)
        # Allow kombu client to connect consumers
        gevent.sleep(0.5)
    except Exception as e:
        dm_logger.error("Error while initializing the device ztp "
                        "manager %s" % str(e))
        raise e

    gevent.signal(signal.SIGHUP, sighup_handler)
    gevent.signal(signal.SIGTERM, sigterm_handler)
    gevent.signal(signal.SIGINT, sigterm_handler)

    dm_logger.notice("Waiting to be elected as master...")
    _zookeeper_client.master_election(zk_path_pfx + "/device-manager",
                                      os.getpid(), run_device_manager,
                                      dm_logger, args)
Exemplo n.º 58
0
    def _start_services(self):
        from raiden.api.python import RaidenAPI

        config = deepcopy(App.DEFAULT_CONFIG)
        config["reveal_timeout"] = self._options["default_reveal_timeout"]
        config["settle_timeout"] = self._options["default_settle_timeout"]
        if self._options.get("extra_config", dict()):
            merge_dict(config, self._options["extra_config"])
            del self._options["extra_config"]
        self._options["config"] = config

        if self._options["showconfig"]:
            print("Configuration Dump:")
            dump_config(config)
            dump_cmd_options(self._options)
            dump_module("settings", settings)
            dump_module("constants", constants)

        # this catches exceptions raised when waiting for the stalecheck to complete
        try:
            app_ = run_app(**self._options)
        except (ConnectionError, ConnectTimeout, RequestsConnectionError):
            print(ETHEREUM_NODE_COMMUNICATION_ERROR)
            sys.exit(1)
        except RuntimeError as e:
            click.secho(str(e), fg="red")
            sys.exit(1)
        except EthNodeInterfaceError as e:
            click.secho(str(e), fg="red")
            sys.exit(1)

        gevent_tasks: List[gevent.Greenlet] = list()
        runnable_tasks: List[Runnable] = list()

        # RaidenService takes care of Transport and AlarmTask
        runnable_tasks.append(app_.raiden)

        domain_list = []
        if self._options["rpccorsdomain"]:
            if "," in self._options["rpccorsdomain"]:
                for domain in self._options["rpccorsdomain"].split(","):
                    domain_list.append(str(domain))
            else:
                domain_list.append(str(self._options["rpccorsdomain"]))

        self._raiden_api = RaidenAPI(app_.raiden)

        if self._options["rpc"]:
            rest_api = RestAPI(self._raiden_api)
            (api_host, api_port) = split_endpoint(self._options["api_address"])

            if not api_port:
                api_port = Port(settings.DEFAULT_HTTP_SERVER_PORT)

            api_server = APIServer(
                rest_api,
                config={"host": api_host, "port": api_port},
                cors_domain_list=domain_list,
                web_ui=self._options["web_ui"],
                eth_rpc_endpoint=self._options["eth_rpc_endpoint"],
            )

            try:
                api_server.start()
            except APIServerPortInUseError:
                click.secho(
                    f"ERROR: API Address {api_host}:{api_port} is in use. "
                    f"Use --api-address <host:port> to specify a different port.",
                    fg="red",
                )
                sys.exit(1)

            print(
                "The Raiden API RPC server is now running at http://{}:{}/.\n\n"
                "See the Raiden documentation for all available endpoints at\n"
                "http://raiden-network.readthedocs.io/en/stable/rest_api.html".format(
                    api_host, api_port
                )
            )
            runnable_tasks.append(api_server)

        if self._options["console"]:
            from raiden.ui.console import Console

            console = Console(app_)
            console.start()

            gevent_tasks.append(console)

        # spawn a greenlet to handle the version checking
        version = get_system_spec()["raiden"]

        gevent_tasks.append(gevent.spawn(check_version, version))

        # spawn a greenlet to handle the gas reserve check
        gevent_tasks.append(gevent.spawn(check_gas_reserve, app_.raiden))

        # spawn a greenlet to handle the periodic check for the network id
        gevent_tasks.append(
            gevent.spawn(
                check_network_id, app_.raiden.rpc_client.chain_id, app_.raiden.rpc_client.web3
            )
        )

        spawn_user_deposit_task = app_.user_deposit and (
            self._options["pathfinding_service_address"] or self._options["enable_monitoring"]
        )
        if spawn_user_deposit_task:
            # spawn a greenlet to handle RDN deposits check
            gevent_tasks.append(gevent.spawn(check_rdn_deposits, app_.raiden, app_.user_deposit))

        # spawn a greenlet to handle the functions

        self._startup_hook()

        # wait for interrupt
        event: "AsyncResult[None]" = AsyncResult()

        def sig_set(sig=None, _frame=None):
            event.set(sig)

        gevent.signal(signal.SIGQUIT, sig_set)
        gevent.signal(signal.SIGTERM, sig_set)
        gevent.signal(signal.SIGINT, sig_set)

        # quit if any task exits, successfully or not
        for runnable in runnable_tasks:
            runnable.greenlet.link(event)

        for task in gevent_tasks:
            task.link(event)

        try:
            event.get()
            print("Signal received. Shutting down ...")
        except (ConnectionError, ConnectTimeout, RequestsConnectionError):
            print(ETHEREUM_NODE_COMMUNICATION_ERROR)
            sys.exit(1)
        except RaidenError as ex:
            click.secho(f"FATAL: {ex}", fg="red")
        except Exception as ex:
            file = NamedTemporaryFile(
                "w",
                prefix=f"raiden-exception-{datetime.utcnow():%Y-%m-%dT%H-%M}",
                suffix=".txt",
                delete=False,
            )
            with file as traceback_file:
                traceback.print_exc(file=traceback_file)
                click.secho(
                    f"FATAL: An unexpected exception occured. "
                    f"A traceback has been written to {traceback_file.name}\n"
                    f"{ex}",
                    fg="red",
                )
        finally:
            self._shutdown_hook()

            for task in gevent_tasks:
                task.kill()

            for task in runnable_tasks:
                task.stop()

            gevent.joinall(
                set(gevent_tasks + runnable_tasks),
                app_.config.get("shutdown_timeout", settings.DEFAULT_SHUTDOWN_TIMEOUT),
                raise_error=True,
            )

        return app_
Exemplo n.º 59
0
                    object_dict['id'] = category_index.get(value)['id']
                    object_dict['name'] = category_index.get(value)['name']
                    object_dict['score'] = float(scores[0, index])
                    (ymin, xmin, ymax,
                     xmax) = boxes[0, index] * np.array([h, w, h, w])
                    object_dict['box'] = {
                        'ymin': ymin,
                        'xmin': xmin,
                        'ymax': ymax,
                        'xmax': xmax
                    }
                    labels.append(object_dict)

            objects_in_image.append({'image': image_path, 'labels': labels})

        return json.dumps(objects_in_image)


# Create zerorpc object.
zerorpc_obj = DetectRPC()
# Create and bind zerorpc server.
s = zerorpc.Server(zerorpc_obj, heartbeat=ZRPC_HEARTBEAT)
s.bind(ZRPC_PIPE)
# Register graceful ways to stop server.
gevent.signal(signal.SIGINT, s.stop)  # Ctrl-C
gevent.signal(signal.SIGTERM, s.stop)  # termination
# Start server.
# This will block until a gevent signal is caught
s.run()
# After server is stopped then close the tf session.
zerorpc_obj.close_sess()
Exemplo n.º 60
0
def parse_settings(root_path):
    config['ROOT_PATH'] = root_path
    # Set the default config files up
    config_files = [get_path('config/config.ini')]
    if '-cf' in sys.argv or '--config' in sys.argv:
        config_files = []
    parser = configargparse.ArgParser(default_config_files=config_files)
    parser.add_argument(
        '-cf', '--config', is_config_file=True, help='Configuration file')
    parser.add_argument(
        '-d', '--debug', help='Debug Mode', action='store_true', default=False)
    parser.add_argument(
        '-H', '--host', help='Set web server listening host',
        default='127.0.0.1')
    parser.add_argument(
        '-P', '--port', type=int,
        help='Set web server listening port', default=4000)
    parser.add_argument(
        '-C', '--concurrency', type=int,
        help='Maximum concurrent connections for the webserver.', default=200)
    parser.add_argument(
        '-m', '--manager_count', type=int, default=1,
        help='Number of Manager processes to start.')
    parser.add_argument(
        '-M', '--manager_name', type=parse_unicode,
        action='append', default=[],
        help='Names of Manager processes to start.')
    parser.add_argument(
        '-k', '--key', type=str, action='append', default=[],
        help='Specify a Google API Key to use.')
    parser.add_argument(
        '-f', '--filters', type=parse_unicode, action='append',
        default=['filters.json'],
        help='Filters configuration file. default: filters.json')
    parser.add_argument(
        '-a', '--alarms', type=parse_unicode, action='append',
        default=['alarms.json'],
        help='Alarms configuration file. default: alarms.json')
    parser.add_argument(
        '-r', '--rules', type=parse_unicode, action='append',
        default=[None],
        help='Rules configuration file. default: None')
    parser.add_argument(
        '-gf', '--geofences', type=parse_unicode,
        action='append', default=[None],
        help='Alarms configuration file. default: None')
    parser.add_argument(
        '-l', '--location', type=parse_unicode, action='append',
        default=[None], help='Location, can be an address or coordinates')
    parser.add_argument(
        '-L', '--locale', type=parse_unicode, action='append', default=['en'],
        choices=['de', 'en', 'es', 'fr', 'it', 'ko', 'pt', 'zh_hk'],
        help='Locale for Pokemon and Move names: default en," '
             '+ " check locale folder for more options')
    parser.add_argument(
        '-u', '--units', type=parse_unicode, default=['imperial'],
        action='append', choices=['metric', 'imperial'],
        help='Specify either metric or imperial units to use for distance " '
             '+ "measurements. ')
    parser.add_argument(
        '-ct', '--cache_type', type=parse_unicode, action='append',
        default=['mem'], choices=cache_options,
        help="Specify the type of cache to use. Options: "
             + "['mem', 'file'] (Default: 'mem')")
    parser.add_argument(
        '-tl', '--timelimit', type=int, default=[0], action='append',
        help='Minimum limit')
    parser.add_argument(
        '-ma', '--max_attempts', type=int, default=[3], action='append',
        help='Maximum attempts an alarm makes to send a notification.')
    parser.add_argument(
        '-tz', '--timezone', type=str, action='append', default=[None],
        help='Timezone used for notifications. Ex: "America/Los_Angeles"')

    args = parser.parse_args()

    if args.debug:
        log.setLevel(logging.DEBUG)
        logging.getLogger().setLevel(logging.DEBUG)
        logging.getLogger('PokeAlarm').setLevel(logging.DEBUG)
        logging.getLogger('Manager').setLevel(logging.DEBUG)
        log.debug("Debug mode enabled!")

    config['HOST'] = args.host
    config['PORT'] = args.port
    config['CONCURRENCY'] = args.concurrency
    config['DEBUG'] = args.debug

    # Check to make sure that the same number of arguments are included
    for arg in [args.filters, args.alarms, args.geofences,
                args.location, args.locale, args.units, args.cache_type,
                args.timelimit, args.max_attempts,
                args.timezone]:
        if len(arg) > 1:  # Remove defaults from the list
            arg.pop(0)
        size = len(arg)
        if size != 1 and size != args.manager_count:
            log.critical("Number of arguments must be either 1 for all "
                         + "managers or equal to Manager Count. Please "
                         + "provided the correct number of arguments.")
            log.critical(arg)
            sys.exit(1)

    # Attempt to parse the timezones
    for i in range(len(args.timezone)):
        if str(args.timezone[i]).lower() == "none":
            args.timezone[i] = None
            continue
        try:
            log.info(args.timezone[i])
            args.timezone[i] = pytz.timezone(args.timezone[i])
        except pytz.exceptions.UnknownTimeZoneError:
            log.error("Invalid timezone. For a list of valid timezones, see "
                      + "https://en.wikipedia.org/wiki"
                      + "/List_of_tz_database_time_zones")
            sys.exit(1)

    # Pad manager_name to match manager_count
    while len(args.manager_name) < args.manager_count:
        m_ct = len(args.manager_name)
        args.manager_name.append("Manager_{}".format(m_ct))

    # Build the managers
    for m_ct in range(args.manager_count):
        # TODO: Fix this mess better next time
        config['UNITS'] = get_from_list(args.units, m_ct, args.units[0])
        m = Manager(
            name=args.manager_name[m_ct],
            google_key=args.key,
            locale=get_from_list(args.locale, m_ct, args.locale[0]),
            units=get_from_list(args.units, m_ct, args.units[0]),
            timezone=get_from_list(args.timezone, m_ct, args.timezone[0]),
            time_limit=get_from_list(args.timelimit, m_ct, args.timelimit[0]),
            max_attempts=get_from_list(
                args.max_attempts, m_ct, args.max_attempts[0]),
            quiet=False,  # TODO: I'll totally document this some day. Promise.
            cache_type=get_from_list(
                args.cache_type, m_ct, args.cache_type[0]),
            location=get_from_list(args.location, m_ct, args.location[0]),
            filter_file=get_from_list(args.filters, m_ct, args.filters[0]),
            geofence_file=get_from_list(
                args.geofences, m_ct, args.geofences[0]),
            alarm_file=get_from_list(args.alarms, m_ct, args.alarms[0]),
            debug=config['DEBUG']
        )
        parse_rules_file(m, get_from_list(args.rules, m_ct, args.rules[0]))
        if m.get_name() not in managers:
            # Add the manager to the map
            managers[m.get_name()] = m
        else:
            log.critical("Names of Manager processes must be unique "
                         + "(not case sensitive)! Process will exit.")
            sys.exit(1)
    log.info("Starting up the Managers")
    for m_name in managers:
        managers[m_name].start()

    # Set up signal handlers for graceful exit
    signal(signal.SIGINT, exit_gracefully)
    signal(signal.SIGTERM, exit_gracefully)