Пример #1
0
 def test_disabled_timestamp(self):
     now = time.time()
     payload = {"a": "a", "1": 1, "2.2": 2.2}
     logger, handler = make_test_logger(__name__)
     handler.formatter = JsonFormatter(datefmt="")
     logger.critical("message", payload, extra={"created": now})
     data = json.loads(handler.content)
     assert len(data) == len(payload) + 1
     assert "time" not in data
Пример #2
0
 def test_payload(self):
     for payload in ({"a": "a"}, {str(i): i for i in range(20)}):
         logger, handler = make_test_logger(__name__)
         handler.formatter = JsonFormatter()
         logger.critical("message", payload)
         data = json.loads(handler.content)
         assert len(data) == len(payload) + 2
         assert data.pop("message") == "message"
         assert data.pop("time")
         assert data == payload
Пример #3
0
 def test_explicit_timestamp(self):
     now = time.time()
     payload = {"a": "a", "1": 1, "2.2": 2.2}
     logger, handler = make_test_logger(__name__)
     handler.formatter = JsonFormatter(datefmt="%Y")
     logger.critical("message", payload, extra={"created": now})
     data = json.loads(handler.content)
     assert len(data) == len(payload) + 2
     # from Formatter and LogRecord
     ct = time.localtime(now)
     default_time_string = time.strftime("%Y", ct)
     assert data.pop("time") == default_time_string
Пример #4
0
def cli(ctx, seed, until, log_tcp, log_file, log_telegraf):
    ctx.ensure_object(dict)
    ctx.obj["seed"] = seed
    ctx.obj["until"] = until
    monitoring_logger = logging.getLogger()
    monitoring_logger.setLevel(logging.DEBUG)
    time_filter = SimulationTimeFilter()
    monitoring_logger.addFilter(time_filter)
    if log_tcp:
        socketHandler = LoggingSocketHandler(
            "localhost", logging.handlers.DEFAULT_TCP_LOGGING_PORT
        )
        socketHandler.setFormatter(JsonFormatter())
        monitoring_logger.addHandler(socketHandler)
    if log_file:
        streamHandler = logging.StreamHandler(stream=log_file)
        streamHandler.setFormatter(JsonFormatter())
        monitoring_logger.addHandler(streamHandler)
    if log_telegraf:
        telegrafHandler = LoggingUDPSocketHandler(
            "localhost", logging.handlers.DEFAULT_UDP_LOGGING_PORT
        )
        telegrafHandler.setFormatter(LineProtocolFormatter(resolution=1))
        monitoring_logger.addHandler(telegrafHandler)
Пример #5
0
 def test_default(self):
     logger, handler = make_test_logger(__name__)
     handler.formatter = JsonFormatter(fmt={"test": 1})
     logger.critical("message", {"drones": 1})
     data = json.loads(handler.content)
     assert data.pop("test") == 1
     handler.clear()
     logger.critical("message", {"test": 2})
     data = json.loads(handler.content)
     assert data.pop("test") == 2
     assert len(data) == 2
     handler.clear()
     logger.critical("message", {})
     data = json.loads(handler.content)
     assert data.pop("test") == 1
     assert len(data) == 2
Пример #6
0
 def test_default_timestamp(self):
     now = time.time()
     payload = {"a": "a", "1": 1, "2.2": 2.2}
     logger, handler = make_test_logger(__name__)
     handler.formatter = JsonFormatter()
     logger.critical("message", payload, extra={"created": now})
     data = json.loads(handler.content)
     assert len(data) == len(payload) + 2
     # from Formatter and LogRecord
     ct = time.localtime(now)
     msecs = (now - int(now)) * 1000
     default_time_string = logging.Formatter.default_msec_format % (
         time.strftime(logging.Formatter.default_time_format, ct),
         msecs,
     )
     assert data.pop("time") == default_time_string
Пример #7
0
    def register_statistic(self, statistic: Callable) -> None:
        """
        Register a callable that takes an object for logging and generates a list
        of records. The callable should have the following accessible attributes:

        name:
            The identifying name of the statistic for logging
        logging_formatter:
            Pre-defined formatters for the different supported logging formats
            including socket, stream, and telegraf logging.
        whitelist:
            A tuple of objects the statistic callable is interested in to create
            the required logging messages.

        :param statistic: Callable that returns a list of records for logging
        """
        assert hasattr(statistic, "name") and hasattr(statistic, "logging_formatter")
        try:
            for element in statistic.whitelist:
                self._statistics.setdefault(element, set()).add(statistic)
        except AttributeError:
            logging.getLogger("implementation").warning(
                f"Removing statistic {statistic.name} as no whitelist has been defined."
            )
            return

        # prepare the logger
        logger = logging.getLogger(statistic.name)
        if not logger.handlers:
            logger.addFilter(SimulationTimeFilter())
            logger.propagate = False
            # append handlers of default logger and add required formatters
            root_logger = logging.getLogger()
            for handler in root_logger.handlers:
                new_handler = copy.copy(handler)
                new_handler.setFormatter(
                    statistic.logging_formatter.get(
                        type(handler).__name__, JsonFormatter()
                    )
                )
                logger.addHandler(new_handler)
Пример #8
0

def simulation_id(simulationinfo) -> list:
    results = [{
        "input": str(simulationinfo.input),
        "id": simulationinfo.identifier,
        "time": pytime.ctime(SIMULATION_START),
    }]
    return results


simulation_id.name = "simulation_id"
simulation_id.whitelist = (SimulationInfo, )
simulation_id.logging_formatter = {
    LoggingSocketHandler.__name__:
    JsonFormatter(),
    # logging.StreamHandler.__name__: JsonFormatter(),
    logging.StreamHandler.__name__:
    LineProtocolFormatter(tags={"tardis"}, resolution=1e-9),
    LoggingUDPSocketHandler.__name__:
    LineProtocolFormatter(tags={"tardis"}, resolution=1e-9),
}


def hitrate_evaluation(hitrateinfo: HitrateInfo) -> list:
    results = [{
        "hitrate": hitrateinfo.hitrate,
        "volume": hitrateinfo.volume / 1000.0 / 1000.0 / 1000.0,
        "providesfile": hitrateinfo.provides_file,
    }]
    return results
Пример #9
0
 def test_payload_empty(self):
     logger, handler = make_test_logger(__name__)
     handler.formatter = JsonFormatter()
     logger.critical("message", {})
     data = json.loads(handler.content)
     assert len(data) == 2
Пример #10
0
                {
                    "resource_type": resource_type,
                    "pool_configuration": "None",
                    "pool_type": "drone",
                    "pool": repr(drone),
                    "used_ratio": 1,
                    "requested_ratio": 1,
                }
            )
    return results


resource_statistics.name = "resource_status"
resource_statistics.whitelist = (Drone,)
resource_statistics.logging_formatter = {
    LoggingSocketHandler.__name__: JsonFormatter(),
    # logging.StreamHandler.__name__: JsonFormatter(),
    logging.StreamHandler.__name__: LineProtocolFormatter(
        tags={"tardis", "resource_type", "pool_configuration", "pool_type", "pool"},
        resolution=1e-9,
    ),
    LoggingUDPSocketHandler.__name__: LineProtocolFormatter(
        tags={"tardis", "resource_type", "pool_configuration", "pool_type", "pool"},
        resolution=1e-9,
    ),
}


def user_demand(job_queue: JobQueue) -> List[Dict]:
    """
    Log global user demand.
Пример #11
0
                "pool_configuration": "None",
                "pool_type": "drone",
                "pool": repr(drone),
                "used_ratio": 1
                - used_resources[resource_type] / drone.pool_resources[resource_type],
                "requested_ratio": 1
                - resources[resource_type] / drone.pool_resources[resource_type],
            }
        )
    return results


resource_statistics.name = "resource_status"
resource_statistics.whitelist = (Drone,)
resource_statistics.logging_formatter = {
    LoggingSocketHandler.__name__: JsonFormatter(),
    logging.StreamHandler.__name__: JsonFormatter(),
    LoggingUDPSocketHandler.__name__: LineProtocolFormatter(
        tags={"tardis", "resource_type", "pool_configuration", "pool_type", "pool"},
        resolution=1,
    ),
}


def user_demand(job_queue: JobQueue) -> List[Dict]:
    """
    Log global user demand.

    :param scheduler: the scheduler
    :return: list of records for logging
    """
Пример #12
0
def ini_and_run(
    job_file,
    pool_files,
    storage_file,
    storage_type,
    log_file="test_{}.log".format(time()),
    remote_throughput=1.0,
    seed=1234,
    until=None,
    calculation_efficiency=1.0,
    log_telegraf=False,
    pre_job_rank=pre_job_rank_default,
    machine_ads=machine_ad_defaults,
    job_ads=job_ad_defaults,
    additional_identifier=None,
):
    # ini logging to file
    monitoring_logger = logging.getLogger()
    monitoring_logger.setLevel(logging.DEBUG)
    time_filter = SimulationTimeFilter()
    monitoring_logger.addFilter(time_filter)
    streamHandler = logging.StreamHandler(stream=open(log_file, "w"))
    streamHandler.setFormatter(JsonFormatter())
    monitoring_logger.addHandler(streamHandler)

    if log_telegraf:
        telegrafHandler = LoggingUDPSocketHandler(
            "localhost", logging.handlers.DEFAULT_UDP_LOGGING_PORT)
        telegrafHandler.setFormatter(LineProtocolFormatter(resolution=1))
        monitoring_logger.addHandler(telegrafHandler)

    # ini simulation
    print("starting static environment")
    simulator = Simulator(seed=seed)
    file_type = "htcondor"
    file = job_file
    simulator.create_job_generator(
        job_input=open(file, "r"),
        job_reader=partial(job_import_mapper[file_type],
                           calculation_efficiency=calculation_efficiency),
    )

    print("scheduler configuration: \n "
          "\tpre job rank: {} \n\n"
          "\tmachine classad:\n \t{}\n\n"
          "\tjob classad: {}".format(pre_job_rank, machine_ads, job_ads))

    simulator.job_scheduler = CondorClassadJobScheduler(
        job_queue=simulator.job_queue,
        pre_job_rank=pre_job_rank,
        machine_ad=machine_ads,
        job_ad=job_ads,
    )

    simulator.connection = Connection(remote_throughput * 1000 * 1000 * 1000,
                                      filebased_caching=False)
    dummy_pool_connection = Connection(float("Inf"))
    print("dummy:", dummy_pool_connection.remote_connection.connection)
    with open(storage_file, "r") as storage_file:
        simulator.create_storage(
            storage_input=storage_file,
            storage_content_input=None,
            storage_reader=storage_import_mapper[storage_type],
            storage_type=FileBasedHitrateStorage,
        )

    for pool_file in pool_files:
        with open(pool_file, "r") as pool_file:
            # Attention: dummy_pool_connection is currently not part of
            # monitoring as it is not known within the simulator itself
            # TODO: do you need this in monitoring?
            create_pool_in_simulator(
                simulator=simulator,
                pool_input=pool_file,
                pool_reader=pool_import_mapper["htcondor"],
                pool_type=StaticPool,
                connection=dummy_pool_connection
                if "dummycluster" in pool_file.name else simulator.connection,
            )

    simulator.enable_monitoring()

    # run simulation
    simulator.run(until=until)
Пример #13
0
def cli(
    seed,
    until,
    log_tcp,
    log_file,
    log_telegraf,
    calculation_efficiency,
    job_file,
    pre_job_rank,
    machine_ads,
    job_ads,
    scheduler_type,
    static_pool_files,
    dynamic_pool_files,
    storage_files,
    remote_throughput,
    filebased_caching,
    cache_hitrate,
):
    monitoring_logger = logging.getLogger()
    monitoring_logger.setLevel(logging.DEBUG)
    time_filter = SimulationTimeFilter()
    monitoring_logger.addFilter(time_filter)
    if log_tcp:
        socketHandler = LoggingSocketHandler(
            "localhost", logging.handlers.DEFAULT_TCP_LOGGING_PORT)
        socketHandler.setFormatter(JsonFormatter())
        monitoring_logger.addHandler(socketHandler)
    if log_file:
        streamHandler = logging.StreamHandler(stream=log_file)
        streamHandler.setFormatter(JsonFormatter())
        monitoring_logger.addHandler(streamHandler)
    if log_telegraf:
        telegrafHandler = LoggingUDPSocketHandler(
            "localhost", logging.handlers.DEFAULT_UDP_LOGGING_PORT)
        telegrafHandler.setFormatter(LineProtocolFormatter(resolution=1))
        monitoring_logger.addHandler(telegrafHandler)

    click.echo("starting hybrid environment")

    simulator = Simulator(seed=seed)
    infile, file_type = job_file
    simulator.create_job_generator(
        job_input=infile,
        job_reader=partial(
            job_import_mapper[file_type],
            calculation_efficiency=calculation_efficiency,
        ),
    )

    if scheduler_import_mapper[
            scheduler_type] == CondorClassadJobScheduler and any(
                (pre_job_rank, machine_ads, job_ads)):
        simulator.job_scheduler = CondorClassadJobScheduler(
            job_queue=simulator.job_queue,
            pre_job_rank=pre_job_rank,
            machine_ad=machine_ads,
            job_ad=job_ads,
        )
    else:
        simulator.create_scheduler(
            scheduler_type=scheduler_import_mapper[scheduler_type])

    for current_storage_files in storage_files:
        assert all(
            current_storage_files), "All storage inputs have to be available"
        simulator.create_connection_module(remote_throughput,
                                           filebased_caching)
        storage_file, storage_content_file, storage_type = current_storage_files
        simulator.create_storage(
            storage_input=storage_file,
            storage_content_input=storage_content_file,
            storage_reader=storage_import_mapper[storage_type],
            storage_type=FileBasedHitrateStorage,  # TODO: Generalize this
        )

    for current_pool in static_pool_files:
        pool_file, pool_file_type = current_pool
        if "dummycluster" in pool_file.name:
            simulator.create_connection_module(float("Inf"))
        simulator.create_pools(
            pool_input=pool_file,
            pool_reader=pool_import_mapper[pool_file_type],
            pool_type=StaticPool,
        )

    for current_pool in dynamic_pool_files:
        pool_file, pool_file_type = current_pool
        if "dummycluster" in pool_file.name:
            simulator.create_connection_module(float("Inf"))
        simulator.create_pools(
            pool_input=pool_file,
            pool_reader=pool_import_mapper[pool_file_type],
            pool_type=Pool,
            controller=SimulatedLinearController,
        )

    click.echo("scheduler configuration: \n "
               f"\tscheduler type: {scheduler_type}\n\n"
               f"\tpre job rank: {pre_job_rank} \n\n"
               f"\tmachine classads:\n \t{machine_ads}\n\n"
               f"\tjob classads: {job_ads}")

    simulator.enable_monitoring()
    simulator.run(until=until)