예제 #1
0
def test_submit_v1_service_checks():
    f = Forwarder("api_key", DOMAIN)
    f.submit_v1_service_checks("data", None)
    t = get_transaction(f)

    assert t.endpoint == "/api/v1/check_run?api_key=api_key"
    assert t.payload == "data"
예제 #2
0
    def SetUp(self):
        """Sets up the test harness and device before all tests are run."""
        super(TestRunner, self).SetUp()
        if not self.adb.IsRootEnabled():
            logging.warning(
                'Unable to enable java asserts for %s, non rooted device',
                self.device)
        else:
            if self.adb.SetJavaAssertsEnabled(enable=True):
                self.adb.Reboot(full_reboot=False)

        # We give different default value to launch HTTP server based on shard index
        # because it may have race condition when multiple processes are trying to
        # launch lighttpd with same port at same time.
        # This line *must* come before the forwarding below, as it nukes all
        # the other forwarders. A more comprehensive fix might be to pull the
        # forwarder-killing line up to here, but that might violate assumptions
        # implicit in other places.
        self.LaunchTestHttpServer(
            os.path.join(constants.CHROME_DIR),
            (constants.LIGHTTPD_RANDOM_PORT_FIRST + self.shard_index))

        if self.ports_to_forward:
            for port in self.ports_to_forward:
                self.forwarders.append(
                    Forwarder(self.adb, [(port, port)], self.tool, '127.0.0.1',
                              self.build_type))
        self.CopyTestFilesOnce()
        self.flags.AddFlags(['--enable-test-intents'])
예제 #3
0
    def SetUp(self):
        """Sets up the test harness and device before all tests are run."""
        super(TestRunner, self).SetUp()
        if not self.adb.IsRootEnabled():
            logging.warning(
                'Unable to enable java asserts for %s, non rooted device',
                self.device)
        else:
            if self.adb.SetJavaAssertsEnabled(
                    enable=not self.disable_assertions):
                self.adb.Reboot(full_reboot=False)

        # We give different default value to launch HTTP server based on shard index
        # because it may have race condition when multiple processes are trying to
        # launch lighttpd with same port at same time.
        http_server_ports = self.LaunchTestHttpServer(
            os.path.join(constants.CHROME_DIR),
            (constants.LIGHTTPD_RANDOM_PORT_FIRST + self.shard_index))
        if self.ports_to_forward:
            port_pairs = [(port, port) for port in self.ports_to_forward]
            # We need to remember which ports the HTTP server is using, since the
            # forwarder will stomp on them otherwise.
            port_pairs.append(http_server_ports)
            self.forwarder = Forwarder(self.adb, self.build_type)
            self.forwarder.Run(port_pairs, self.tool, '127.0.0.1')
        self.CopyTestFilesOnce()
        self.flags.AddFlags(['--enable-test-intents'])
예제 #4
0
 def LaunchChromeTestServerSpawner(self):
     """Launches test server spawner."""
     server_ready = False
     error_msgs = []
     # Try 3 times to launch test spawner server.
     for i in xrange(0, 3):
         # Do not allocate port for test server here. We will allocate
         # different port for individual test in TestServerThread.
         self.test_server_spawner_port = ports.AllocateTestServerPort()
         self._spawning_server = SpawningServer(
             self.test_server_spawner_port, self.adb, self.tool,
             self.build_type)
         self._spawning_server.Start()
         server_ready, error_msg = ports.IsHttpServerConnectable(
             '127.0.0.1',
             self.test_server_spawner_port,
             path='/ping',
             expected_read='ready')
         if server_ready:
             break
         else:
             error_msgs.append(error_msg)
         self._spawning_server.Stop()
         # Wait for 2 seconds then restart.
         time.sleep(2)
     if not server_ready:
         logging.error(';'.join(error_msgs))
         raise Exception('Can not start the test spawner server.')
     self._PushTestServerPortInfoToDevice()
     self._spawner_forwarder = Forwarder(
         self.adb,
         [(self.test_server_spawner_port, self.test_server_spawner_port)],
         self.tool, '127.0.0.1', self.build_type)
예제 #5
0
async def handle_get(request: Request):
    url = get_server_location(request)
    try:
        f = Forwarder(url, 'GET', None, request.headers)
        resp, req = await f.forward(SELENIUM_TIMEOUT)
    except ClientTimeout:
        return error_on_timeout()
    return web.json_response(resp, status=req.status)
예제 #6
0
def init_dogstatsd(config):
    api_key = config['api_key']
    recent_point_threshold = config.get('recent_point_threshold', None)
    server_host = config['dogstatsd']['bind_host']
    dd_url = config['dd_url']
    port = config['dogstatsd']['port']
    forward_to_host = config['dogstatsd'].get('forward_host')
    forward_to_port = config['dogstatsd'].get('forward_port')
    non_local_traffic = config['dogstatsd'].get('non_local_traffic')
    so_rcvbuf = config['dogstatsd'].get('so_rcvbuf')
    utf8_decoding = config['dogstatsd'].get('utf8_decoding')

    interval = DOGSTATSD_FLUSH_INTERVAL
    aggregator_interval = DOGSTATSD_AGGREGATOR_BUCKET_SIZE

    hostname = get_hostname()

    # get proxy settings
    proxies = get_proxy()

    forwarder = Forwarder(
        api_key,
        dd_url,
        proxies=proxies,
    )
    forwarder.start()

    aggregator = MetricsBucketAggregator(
        hostname,
        aggregator_interval,
        recent_point_threshold=recent_point_threshold,
        formatter=get_formatter(config),
        histogram_aggregates=config.get('histogram_aggregates'),
        histogram_percentiles=config.get('histogram_percentiles'),
        utf8_decoding=utf8_decoding
    )
    # serializer
    serializer = Serializer(
        aggregator,
        forwarder,
    )

    reporter = Reporter(interval, aggregator, serializer, api_key,
                        use_watchdog=False, hostname=hostname)

    # NOTICE: when `non_local_traffic` is passed we need to bind to any interface on the box. The forwarder uses
    # Tornado which takes care of sockets creation (more than one socket can be used at once depending on the
    # network settings), so it's enough to just pass an empty string '' to the library.
    # In Dogstatsd we use a single, fullstack socket, so passing '' as the address doesn't work and we default to
    # '0.0.0.0'. If someone needs to bind Dogstatsd to the IPv6 '::', they need to turn off `non_local_traffic` and
    # use the '::' meta address as `bind_host`.
    if non_local_traffic:
        server_host = '0.0.0.0'

    server = Server(aggregator, server_host, port, forward_to_host=forward_to_host,
                    forward_to_port=forward_to_port, so_rcvbuf=so_rcvbuf)

    return reporter, server, forwarder
예제 #7
0
    def StartForwarder(self, port_pairs):
        """Starts TCP traffic forwarding for the given |port_pairs|.

    Args:
      host_port_pairs: A list of (device_port, local_port) tuples to forward.
    """
        if self._forwarder:
            self._forwarder.Close()
        self._forwarder = Forwarder(self.adb, port_pairs, self.tool,
                                    '127.0.0.1', self.build_type)
예제 #8
0
 def run(self):
     logging.info('Start running the thread!')
     self.wait_event.clear()
     self._GenerateCommandLineArguments()
     command = constants.CHROME_DIR
     if self.arguments['server-type'] == 'sync':
         command = [
             os.path.join(command, 'sync', 'tools', 'testserver',
                          'sync_testserver.py')
         ] + self.command_line
     else:
         command = [
             os.path.join(command, 'net', 'tools', 'testserver',
                          'testserver.py')
         ] + self.command_line
     logging.info('Running: %s', command)
     self.process = subprocess.Popen(command)
     if self.process:
         if self.pipe_out:
             self.is_ready = self._WaitToStartAndGetPortFromTestServer()
         else:
             self.is_ready = _CheckPortStatus(self.host_port, True)
     if self.is_ready:
         self._test_server_forwarder = Forwarder(self.adb, self.build_type)
         self._test_server_forwarder.Run([(0, self.host_port)], self.tool,
                                         '127.0.0.1')
         # Check whether the forwarder is ready on the device.
         self.is_ready = False
         device_port = self._test_server_forwarder.DevicePortForHostPort(
             self.host_port)
         if device_port:
             for timeout in range(1, 5):
                 if ports.IsDevicePortUsed(self.adb, device_port, 'LISTEN'):
                     self.is_ready = True
                     self.forwarder_device_port = device_port
                     break
                 time.sleep(timeout)
     # Wake up the request handler thread.
     self.ready_event.set()
     # Keep thread running until Stop() gets called.
     while not self.stop_flag:
         time.sleep(1)
     if self.process.poll() is None:
         self.process.kill()
     if self._test_server_forwarder:
         self._test_server_forwarder.Close()
     self.process = None
     self.is_ready = False
     if self.pipe_out:
         os.close(self.pipe_in)
         os.close(self.pipe_out)
         self.pipe_in = None
         self.pipe_out = None
     logging.info('Test-server has died.')
     self.wait_event.set()
예제 #9
0
    def __init__(self):
        self.API_TOKEN = "1700885261:AAETCokNpqNDk44x3d5XASfnQfzxiNOKWfI"  ## os.environ["SECRET"]
        self.contatinhosSheet = "http://bit.ly/contatosbcc021"
        self.githubRepo = "https://github.com/lineuzinho-icmc/lineuzinho"
        self.usefulLinks = "Estamos adicionando todo mundo aos poucos. Se puder ajudar a achar o pessoal, passa o link do grupo na descrição!\n\nInscrição na semana de recepção: calouros.icmc.usp.br/\n\nGuia do Bixo: https://bit.ly/3c9mcUG\n\nContatinho de geral: {0}\n\n".format(
            self.contatinhosSheet)
        self.docsChannel = "https://t.me/docs21"

        self.greeter = Greeter()
        self.forwarder = Forwarder()
        self.beaner = Beaner()
예제 #10
0
    def __init__(self):
        self.API_TOKEN = os.environ["SECRET"]
        self.contatinhosSheet = "http://bit.ly/contatosbcc021"
        self.githubRepo = "https://github.com/lineuzinho-icmc/lineuzinho"
        self.usefulLinks = "Estamos adicionando todo mundo aos poucos. Se puder ajudar a achar o pessoal, passa o link do grupo na descrição!\n\nInscrição na semana de recepção: calouros.icmc.usp.br/\n\nGuia do Bixo: https://bit.ly/3c9mcUG\n\nContatinho de geral: {0}\n\nEnquetes: https://t.me/joinchat/qrJ_MrnHDbE1ZmNh\n\n".format(
            self.contatinhosSheet)
        self.docsChannel = "https://t.me/docs21"

        self.greeter = Greeter()
        self.forwarder = Forwarder()
        self.beaner = Beaner()

        self.conn = Connection()
        self.last_pi_call = time.time()
예제 #11
0
    def StartForwarder(self, port_pairs):
        """Starts TCP traffic forwarding for the given |port_pairs|.

    Args:
      host_port_pairs: A list of (device_port, local_port) tuples to forward.
    """
        # Sometimes the forwarder device port may be already used. We have to kill
        # all forwarder processes to ensure that the forwarder can be started since
        # currently we can not associate the specified port to related pid.
        self.adb.KillAll('forwarder')
        if self._forwarder:
            self._forwarder.Close()
        self._forwarder = Forwarder(self.adb, port_pairs, self.tool,
                                    '127.0.0.1', self.build_type)
예제 #12
0
async def run(is_debug_mode):
    """
    Runs the cluster discovery & forwarder.
    """
    asyncio.create_task(_epsagon_conf_watcher(is_debug_mode))
    events_manager = InMemoryEventsManager()
    epsagon_client = await EpsagonClient.create(EPSAGON_TOKEN)
    events_sender = EventsSender(
        epsagon_client,
        COLLECTOR_URL,
        CLUSTER_NAME,
        EPSAGON_TOKEN
    )
    cluster_discovery = ClusterDiscovery(
        events_manager.write_event,
        should_collect_resources=SHOULD_COLLECT_RESOURCES,
        should_collect_events=SHOULD_COLLECT_EVENTS,
    )
    forwarder = Forwarder(
        events_manager,
        events_sender
    )
    while True:
        try:
            tasks = [
                asyncio.create_task(forwarder.start()),
                asyncio.create_task(cluster_discovery.start())
            ]
            await asyncio.gather(*tasks)
        except (
                client_exceptions.ClientError,
                socket.gaierror,
                ConnectionRefusedError,
                EpsagonClientException
        ):
            logging.error(
                "Connection error, restarting agent in %d seconds",
                RESTART_WAIT_TIME_SECONDS
            )
            _cancel_tasks(tasks)
            events_manager.clean()
            await asyncio.sleep(RESTART_WAIT_TIME_SECONDS)
        except Exception as exception:
            logging.error(str(exception))
            logging.error(format_exc())
            logging.info("Agent is exiting due to an unexpected error")
            _cancel_tasks(tasks)
            await epsagon_client.close()
            break
예제 #13
0
def test_submit_payload_():
    f = Forwarder("api_key", DOMAIN)

    f._submit_payload("test", "data", {"test": 21})
    t = get_transaction(f)
    assert t.payload == "data"
    assert t.domain == DOMAIN
    assert t.endpoint == "test?api_key=api_key"
    assert t.headers == {"test": 21, Forwarder.DD_API_HEADER: "api_key"}

    f._submit_payload("test", "data")
    t = get_transaction(f)
    assert t.payload == "data"
    assert t.domain == DOMAIN
    assert t.endpoint == "test?api_key=api_key"
    assert t.headers == {Forwarder.DD_API_HEADER: "api_key"}
예제 #14
0
async def delete_session(request: Request):
    """
    DELETE /session/{session_id}
    :param request:
    :return:
    """
    session_id = request.match_info['session_id']
    url = get_server_location(request)
    f = Forwarder(url, 'DELETE', None, request.headers)
    try:
        resp, req = await f.forward(SELENIUM_TIMEOUT)
    except ClientTimeout:
        return error_on_timeout()
    if session_id in request.app['session_cache']:
        request.app['session_cache'].pop(session_id)
    return web.json_response(resp)
예제 #15
0
async def new_session(request):
    """
    POST /session creates a new session
    :param request:
    :return:
    """
    hub = await request.app['selenium_hubs'].select_hub()
    payload = await request.json()
    url = '{}/session'.format(hub.rstrip('/'))
    f = Forwarder(url, 'POST', payload, request.headers)
    try:
        resp, req = await f.forward(SELENIUM_TIMEOUT)
    except ClientTimeout:
        return error_on_timeout()
    if 'sessionId' in resp:
        sess_id = resp['sessionId']
        request.app['session_cache'][sess_id] = (hub, dt.datetime.now())
    return web.json_response(resp, status=req.status)
예제 #16
0
def test_forwarder_start_stop():
    f = Forwarder("api_key", "https://datadog.com", 2)
    f.start()

    assert len(f.workers) == 2
    assert f.workers[0].is_alive()
    assert f.workers[1].is_alive()
    assert f.retry_worker.is_alive()

    tmp_workers = f.workers
    tmp_retry_worker = f.retry_worker

    f.stop()

    assert len(f.workers) == 0
    assert f.retry_worker is None
    assert not tmp_workers[0].is_alive()
    assert not tmp_workers[1].is_alive()
    assert not tmp_retry_worker.is_alive()
예제 #17
0
    def test_something(self):
        try:
            ff = Forwarder(concurrency=10)

            for _ in range(2000):
                ff.issue(
                    Request('/dev/ram0',
                            randint(0, 1),
                            randint(0, 1024 * 1024 * 500),
                            length=512,
                            data=b'\xcd' * 512))
            sleep(0.5)
            lat = 0
            for i in range(2000):
                res = ff.res_Q.pop()
                lat = (lat * i + res.latency) / (i + 1)
                # print(res.addr, res.type, res.latency * 1000)
            print("\nmean latency = %f" % (lat * 1000))
            self.assertTrue('mean latency = %f' % (lat * 1000))
        except Exception as e:
            self.assertFalse(e.__traceback__)
예제 #18
0
class Proxy:
    forwarder = Forwarder()

    def __init__(self):
        raise Exception('Proxy is a static class, so can not be instantiated.')

    @staticmethod
    def invalidate(blk):
        if blk.typ == CacheConf.ram_blk:
            Tables.RAM.invalidate(blk.addr)
        elif blk.typ == CacheConf.ssd_blk:
            Tables.SSD.invalidate(blk.addr)

    @staticmethod
    def promote(blk, dst):
        pass

    @staticmethod
    def write(blk):
        Proxy.forwarder.issue(Request(CacheConf.get_dev(blk.typ), blk.typ, blk.addr, data=b'\x00' * blk.len))

    @staticmethod
    def read(blk):
        Proxy.forwarder.issue(Request(CacheConf.get_dev(blk.typ), blk.typ, blk.addr, length=blk.len))
예제 #19
0
    def run(self):
        try:
            hostname = get_hostname()
        except HostnameException as e:
            logging.critical(
                "{} - You can define one in datadog.yaml or in your hosts file"
                .format(e))
            sys.exit(1)

        logging.info("Starting the agent, hostname: %s", hostname)

        # init Forwarder
        logging.info("Starting the Forwarder")
        api_key = config.get('api_key')
        dd_url = config.get('dd_url')
        if not dd_url:
            logging.error('No Datadog URL configured - cannot continue')
            sys.exit(1)
        if not api_key:
            logging.error('No API key configured - cannot continue')
            sys.exit(1)

        # get proxy settings
        proxies = get_proxy()
        logging.debug('Proxy configuration used: %s', proxies)

        # get site url
        forwarder = Forwarder(
            api_key,
            get_site_url(dd_url, site=config.get('site')),
            proxies=proxies,
        )
        forwarder.start()

        # agent aggregator
        aggregator = MetricsAggregator(
            hostname,
            interval=config.get('aggregator_interval'),
            expiry_seconds=(config.get('min_collection_interval') +
                            config.get('aggregator_expiry_seconds')),
            recent_point_threshold=config.get('recent_point_threshold'),
            histogram_aggregates=config.get('histogram_aggregates'),
            histogram_percentiles=config.get('histogram_percentiles'),
        )

        # serializer
        serializer = Serializer(
            aggregator,
            forwarder,
        )

        # instantiate collector
        collector = Collector(config, aggregator)
        collector.load_check_classes()
        collector.instantiate_checks()

        # instantiate AgentRunner
        runner = AgentRunner(collector, serializer, config)

        # instantiate Dogstatsd
        reporter = None
        dsd_server = None
        dsd_enable = config['dogstatsd'].get('enable', False)
        if dsd_enable:
            reporter, dsd_server, _ = init_dogstatsd(config,
                                                     forwarder=forwarder)
            dsd = DogstatsdRunner(dsd_server)

        # instantiate API
        status = {
            'agent': aggregator.stats,
            'forwarder': forwarder.stats,
            'collector': collector.status,
        }
        if dsd_server:
            status['dogstatsd'] = dsd_server.aggregator.stats

        api = APIServer(config, status=status)

        handler = SignalHandler()
        # components
        handler.register('runner', runner)
        handler.register('forwarder', forwarder)
        handler.register('api', api)
        if dsd_enable:
            handler.register('reporter', reporter)
            handler.register('dsd_server', dsd_server)

        # signals
        handler.handle(signal.SIGTERM)
        handler.handle(signal.SIGINT)

        # start signal handler
        handler.start()

        runner.start()
        api.start()

        if dsd_enable:
            reporter.start()
            dsd.start()

            dsd.join()
            logging.info("Dogstatsd server done...")
            try:
                dsd.raise_for_status()
            except Exception as e:
                log.error("There was a problem with the dogstatsd server: %s",
                          e)
                reporter.stop()

        runner.join()
        logging.info("Collector done...")

        api.join()
        logging.info("API done...")

        handler.stop()
        handler.join()
        logging.info("Signal handler done...")

        logging.info("Thank you for shopping at DataDog! Come back soon!")

        sys.exit(0)
예제 #20
0
def test_forwarder_creation():
    f = Forwarder("api_key", DOMAIN)
    assert f.api_key == "api_key"
    assert f.domain == "https://app.datadoghq.com"
예제 #21
0
    def run(self):
        try:
            hostname = get_hostname()
        except HostnameException as e:
            logging.critical(
                "{} - You can define one in datadog.yaml or in your hosts file"
                .format(e))
            sys.exit(1)

        logging.info("Starting the agent, hostname: %s", hostname)

        # init Forwarder
        logging.info("Starting the Forwarder")
        api_key = config.get('api_key')
        dd_url = config.get('dd_url')
        if not dd_url:
            logging.error('No Datadog URL configured - cannot continue')
            sys.exit(1)
        if not api_key:
            logging.error('No API key configured - cannot continue')
            sys.exit(1)

        # get proxy settings
        proxies = get_proxy()
        logging.debug('Proxy configuration used: %s', proxies)

        forwarder = Forwarder(
            api_key,
            dd_url,
            proxies=proxies,
        )
        forwarder.start()

        # aggregator
        aggregator = MetricsAggregator(
            hostname,
            interval=config.get('aggregator_interval'),
            expiry_seconds=(config.get('min_collection_interval') +
                            config.get('aggregator_expiry_seconds')),
            recent_point_threshold=config.get('recent_point_threshold'),
            histogram_aggregates=config.get('histogram_aggregates'),
            histogram_percentiles=config.get('histogram_percentiles'),
        )

        # serializer
        serializer = Serializer(
            aggregator,
            forwarder,
        )

        # instantiate collector
        collector = Collector(config, aggregator)
        collector.load_check_classes()
        collector.instantiate_checks()

        # instantiate AgentRunner
        runner = AgentRunner(collector, serializer, config)

        # instantiate API
        api = APIServer(8888, aggregator.stats)

        def signal_handler(signal, frame):
            log.info("SIGINT received: stopping the agent")
            log.info("Stopping the forwarder")
            runner.stop()
            forwarder.stop()
            api.stop()
            log.info("See you !")
            sys.exit(0)

        signal.signal(signal.SIGINT, signal_handler)

        runner.start()
        api.run()  # blocking tornado in main thread
예제 #22
0
    def run(self):
        try:
            hostname = get_hostname()
        except HostnameException as e:
            logging.critical(
                "{} - You can define one in datadog.yaml or in your hosts file"
                .format(e))
            sys.exit(1)

        logging.info("Starting the agent, hostname: %s", hostname)

        # init Forwarder
        logging.info("Starting the Forwarder")
        api_key = config.get('api_key')
        dd_url = config.get('dd_url')
        if not dd_url:
            logging.error('No Datadog URL configured - cannot continue')
            sys.exit(1)
        if not api_key:
            logging.error('No API key configured - cannot continue')
            sys.exit(1)

        # get proxy settings
        proxies = get_proxy()
        logging.debug('Proxy configuration used: %s', proxies)

        forwarder = Forwarder(
            api_key,
            dd_url,
            proxies=proxies,
        )
        forwarder.start()

        # aggregator
        aggregator = MetricsAggregator(
            hostname,
            interval=config.get('aggregator_interval'),
            expiry_seconds=(config.get('min_collection_interval') +
                            config.get('aggregator_expiry_seconds')),
            recent_point_threshold=config.get('recent_point_threshold'),
            histogram_aggregates=config.get('histogram_aggregates'),
            histogram_percentiles=config.get('histogram_percentiles'),
        )

        # serializer
        serializer = Serializer(
            aggregator,
            forwarder,
        )

        # instantiate collector
        collector = Collector(config, aggregator)
        collector.load_check_classes()
        collector.instantiate_checks()

        # instantiate AgentRunner
        runner = AgentRunner(collector, serializer, config)

        # instantiate API
        api = APIServer(config, aggregator.stats)

        handler = SignalHandler()
        # components
        handler.register('runner', runner)
        handler.register('forwarder', forwarder)
        handler.register('api', api)
        # signals
        handler.handle(signal.SIGTERM)
        handler.handle(signal.SIGINT)

        # start signal handler
        handler.start()

        runner.start()
        api.start()

        runner.join()
        logging.info("Agent done...")

        api.join()
        logging.info("API done...")

        handler.stop()
        handler.join()
        logging.info("Signal handler done...")

        logging.info("Thank you for shopping at DataDog! Come back soon!")

        sys.exit(0)
예제 #23
0
def start():
    """
    Dummy start until we have a collector
    """
    init_agent()

    hostname = get_hostname()

    logging.info("Starting the agent, hostname: %s", hostname)

    # init Forwarder
    logging.info("Starting the Forwarder")
    api_key = config.get('api_key')
    dd_url = config.get('dd_url')
    if not dd_url:
        logging.error('No Datadog URL configured - cannot continue')
        sys.exit(1)
    if not api_key:
        logging.error('No API key configured - cannot continue')
        sys.exit(1)

    forwarder = Forwarder(api_key, dd_url)
    forwarder.start()

    # aggregator
    aggregator = MetricsAggregator(
        hostname,
        interval=config.get('aggregator_interval'),
        expiry_seconds=(config.get('min_collection_interval') +
                        config.get('aggregator_expiry_seconds')),
        recent_point_threshold=config.get('recent_point_threshold'),
        histogram_aggregates=config.get('histogram_aggregates'),
        histogram_percentiles=config.get('histogram_percentiles'),
    )

    # serializer
    serializer = Serializer(
        aggregator,
        forwarder,
    )

    # instantiate collector
    collector = Collector(config, aggregator)
    collector.load_check_classes()
    collector.instantiate_checks()

    def signal_handler(signal, frame):
        logging.info("SIGINT received: stopping the agent")
        logging.info("Stopping the forwarder")
        forwarder.stop()
        logging.info("See you !")
        sys.exit(0)

    signal.signal(signal.SIGINT, signal_handler)

    # update the metadata periodically?
    metadata = get_metadata(hostname)
    serializer.submit_metadata(metadata)
    while True:
        collector.run_checks()
        serializer.serialize_and_push()
        time.sleep(config.get('min_collection_interval'))
예제 #24
0
    def setup(
        self,
        program,
        switch_mac,
        switch_ip,
        bfrt_ip,
        bfrt_port,
        ports_file,
    ):

        # Device 0
        self.dev = 0
        # Target all pipes
        self.target = gc.Target(self.dev, pipe_id=0xFFFF)
        # Connect to BFRT server
        try:
            interface = gc.ClientInterface('{}:{}'.format(bfrt_ip, bfrt_port),
                                           client_id=0,
                                           device_id=self.dev)
        except RuntimeError as re:
            msg = re.args[0] % re.args[1]
            self.critical_error(msg)
        else:
            self.log.info('Connected to BFRT server {}:{}'.format(
                bfrt_ip, bfrt_port))

        try:
            interface.bind_pipeline_config(program)
        except gc.BfruntimeForwardingRpcException:
            self.critical_error('P4 program {} not found!'.format(program))

        try:
            # Get all tables for program
            self.bfrt_info = interface.bfrt_info_get(program)

            # Ports table
            self.ports = Ports(self.target, gc, self.bfrt_info)

            # Enable loopback on front panel ports
            loopback_ports = (
                [64] +  # Pipe 0 CPU ethernet port
                # Pipe 0: all 16 front-panel ports
                #list(range(  0,  0+64,4)) +
                # Pipe 1: all 16 front-panel ports
                list(range(128, 128 + 64, 4)) +
                # Pipe 2: all 16 front-panel ports
                list(range(256, 256 + 64, 4)) +
                # Pipe 3: all 16 front-panel ports
                list(range(384, 384 + 64, 4)))
            print('Setting {} front panel ports in loopback mode'.format(
                len(loopback_ports)))
            self.ports.set_loopback_mode(loopback_ports)

            # Enable loopback on PktGen ports
            pktgen_ports = [192, 448]

            if not self.ports.get_loopback_mode_pktgen(pktgen_ports):
                # Not all PktGen ports are in loopback mode

                print('\nYou must \'remove\' the ports in the BF ucli:\n')
                for p in pktgen_ports:
                    print('    bf-sde> dvm rmv_port 0 {}'.format(p))
                input('\nPress Enter to continue...')

                if not self.ports.set_loopback_mode_pktgen(pktgen_ports):
                    self.critical_error(
                        'Failed setting front panel ports in loopback mode')

                print('\nAdd the ports again:\n')
                for p in pktgen_ports:
                    print('    bf-sde> dvm add_port 0 {} 100 0'.format(p))
                input('\nPress Enter to continue...')

                if not self.ports.get_loopback_mode_pktgen(pktgen_ports):
                    self.critical_error(
                        'Front panel ports are not in loopback mode')

            # Packet Replication Engine table
            self.pre = PRE(self.target, gc, self.bfrt_info, self.cpu_port)

            # Setup tables
            # Forwarder
            self.forwarder = Forwarder(self.target, gc, self.bfrt_info,
                                       self.all_ports_mgid)
            # ARP and ICMP responder
            self.arp_and_icmp = ARPandICMPResponder(self.target, gc,
                                                    self.bfrt_info)
            # Drop simulator
            self.drop_simulator = DropSimulator(self.target, gc,
                                                self.bfrt_info)
            # RDMA receiver
            self.rdma_receiver = RDMAReceiver(self.target, gc, self.bfrt_info)
            # UDP receiver
            self.udp_receiver = UDPReceiver(self.target, gc, self.bfrt_info)
            # Bitmap checker
            self.bitmap_checker = BitmapChecker(self.target, gc,
                                                self.bfrt_info)
            # Workers counter
            self.workers_counter = WorkersCounter(self.target, gc,
                                                  self.bfrt_info)
            # Exponents
            self.exponents = Exponents(self.target, gc, self.bfrt_info)
            # Processors
            self.processors = []
            for i in range(32):
                p = Processor(self.target, gc, self.bfrt_info, i)
                self.processors.append(p)
            # Next step selector
            self.next_step_selector = NextStepSelector(self.target, gc,
                                                       self.bfrt_info)
            # RDMA sender
            self.rdma_sender = RDMASender(self.target, gc, self.bfrt_info)
            # UDP sender
            self.udp_sender = UDPSender(self.target, gc, self.bfrt_info)

            # Add multicast group for flood
            self.pre.add_multicast_group(self.all_ports_mgid)

            # Enable ports
            success, ports = self.load_ports_file(ports_file)
            if not success:
                self.critical_error(ports)

            # Set switch addresses
            self.set_switch_mac_and_ip(switch_mac, switch_ip)

            # CLI setup
            self.cli = Cli()
            self.cli.setup(self, prompt='SwitchML', name='SwitchML controller')

            # Set up gRPC server
            self.grpc_server = GRPCServer(ip='[::]', port=50099)

            # Run event loop for gRPC server in a separate thread
            # limit concurrency to 1 to avoid synchronization problems in the BFRT interface
            self.grpc_executor = futures.ThreadPoolExecutor(max_workers=1)

            self.event_loop = asyncio.get_event_loop()

        except KeyboardInterrupt:
            self.critical_error('Stopping controller.')
        except Exception as e:
            self.log.exception(e)
            self.critical_error('Unexpected error. Stopping controller.')
예제 #25
0
async def handle_delete(request: Request):
    url = get_server_location(request)
    f = Forwarder(url, 'DELETE', None, request.headers)
    resp, req = await f.forward(SELENIUM_TIMEOUT)
    return web.json_response(resp, status=req.status)
예제 #26
0
 def _CreateAndRunForwarder(self, adb, port_pairs, tool, host_name,
                            build_type):
     """Creates and run a forwarder."""
     forwarder = Forwarder(adb, build_type)
     forwarder.Run(port_pairs, tool, host_name)
     return forwarder
예제 #27
0
import threading
from tqdm import tqdm

from parser import Parser
from forwarder import Forwarder

if __name__ == '__main__':
    parsers = [
        Parser('trace/NEW_2016031907-LUN1.csv'),
        Parser('trace/NEW_2016031907-LUN4.csv')
    ]
    forwarder = Forwarder(concurrency=10)

    llt = 0
    for parser in parsers:
        llt += parser.cnt
        threading.Thread(target=parser.start_sending_requests,
                         args=(forwarder.issue, ),
                         daemon=True).start()

    lats = []
    cnt = 0
    with tqdm(total=llt) as pbar:
        try:
            while True:
                cnt += 1
                if cnt == 500:
                    pbar.update(cnt)
                    cnt = 0
                res = forwarder.res_Q.pop()
                lats.append(res.latency * 1000)