Ejemplo n.º 1
0
def copy_object(obj_id, src, dst):
    obj = get_object(src, obj_id)
    if obj is not None:
        put_object(dst, obj_id, obj)
        statsd.increment(CONTENT_BYTES_METRIC, len(obj))
        return len(obj)
    return 0
Ejemplo n.º 2
0
 def pre_add(self, kw):
     """Called before the 'add' method."""
     statsd.increment(
         "swh_objstorage_in_bytes_total",
         len(kw["content"]),
         tags={"endpoint": "add_bytes"},
     )
Ejemplo n.º 3
0
    def process(self, worker_fn):
        """Polls Kafka for a batch of messages, and calls the worker_fn
        with these messages.

        Args:
            worker_fn Callable[Dict[str, List[dict]]]: Function called with
                                                       the messages as
                                                       argument.
        """
        total_objects_processed = 0
        # timeout for message poll
        timeout = 1.0

        with statsd.status_gauge(JOURNAL_STATUS_METRIC,
                                 statuses=["idle", "processing",
                                           "waiting"]) as set_status:
            set_status("idle")
            while True:
                batch_size = self.batch_size
                if self.stop_after_objects:
                    if total_objects_processed >= self.stop_after_objects:
                        break

                    # clamp batch size to avoid overrunning stop_after_objects
                    batch_size = min(
                        self.stop_after_objects - total_objects_processed,
                        batch_size,
                    )
                set_status("waiting")
                for i in cycle(reversed(range(10))):
                    messages = self.consumer.consume(timeout=timeout,
                                                     num_messages=batch_size)
                    if messages:
                        break

                    # do check for an EOF condition iff we already consumed
                    # messages, otherwise we could detect an EOF condition
                    # before messages had a chance to reach us (e.g. in tests)
                    if total_objects_processed > 0 and self.stop_on_eof and i == 0:
                        at_eof = all(
                            (tp.topic, tp.partition) in self.eof_reached
                            for tp in self.consumer.assignment())
                        if at_eof:
                            break
                if messages:
                    set_status("processing")
                    batch_processed, at_eof = self.handle_messages(
                        messages, worker_fn)

                    set_status("idle")
                    # report the number of handled messages
                    statsd.increment(JOURNAL_MESSAGE_NUMBER_METRIC,
                                     value=batch_processed)
                    total_objects_processed += batch_processed

                if at_eof:
                    break

        return total_objects_processed
Ejemplo n.º 4
0
 def process(self, type, event, message):
     """Process the received event by dispatching it to the appropriate
     handler."""
     handler = self.handlers.get(type) or self.handlers.get("*")
     if handler:
         handler(event, message)
         statsd.increment(
             "swh_scheduler_listener_handled_event_total", tags={"event_type": type}
         )
Ejemplo n.º 5
0
 def __call__(self, retry_state):
     if not retry_state.outcome.failed:
         statsd.increment(
             CONTENT_RETRY_METRIC,
             tags={
                 "operation": retry_state.fn.__name__,
                 "attempt": str(retry_state.attempt_number),
             },
         )
     return False
Ejemplo n.º 6
0
def process_replay_objects(all_objects: Dict[str, List[BaseModel]], *,
                           storage: StorageInterface) -> None:
    for (object_type, objects) in all_objects.items():
        logger.debug("Inserting %s %s objects", len(objects), object_type)
        with statsd.timed(GRAPH_DURATION_METRIC,
                          tags={"object_type": object_type}):
            _insert_objects(object_type, objects, storage)
        statsd.increment(GRAPH_OPERATIONS_METRIC,
                         len(objects),
                         tags={"object_type": object_type})
    if notify:
        notify("WATCHDOG=1")
Ejemplo n.º 7
0
def send_metric(metric, count, method_name):
    """Send statsd metric with count for method `method_name`

    If count is 0, the metric is discarded.  If the metric is not
    parseable, the metric is discarded with a log message.

    Args:
        metric (str): Metric's name (e.g content:add, content:add:bytes)
        count (int): Associated value for the metric
        method_name (str): Method's name

    Returns:
        Bool to explicit if metric has been set or not
    """
    if count == 0:
        return False

    metric_type = metric.split(":")
    _length = len(metric_type)
    if _length == 2:
        object_type, operation = metric_type
        metric_name = OPERATIONS_METRIC
    elif _length == 3:
        object_type, operation, unit = metric_type
        metric_name = OPERATIONS_UNIT_METRIC.format(unit=unit)
    else:
        logging.warning("Skipping unknown metric {%s: %s}" % (metric, count))
        return False

    statsd.increment(
        metric_name,
        count,
        tags={
            "endpoint": method_name,
            "object_type": object_type,
            "operation": operation,
        },
    )
    return True
Ejemplo n.º 8
0
def process_event(event, scheduler_backend):
    uuid = event.get("uuid")
    if not uuid:
        return

    event_type = event["type"]
    statsd.increment("swh_scheduler_listener_handled_event_total",
                     tags={"event_type": event_type})

    if event_type == "task-started":
        scheduler_backend.start_task_run(
            uuid,
            timestamp=utcnow(),
            metadata={"worker": event.get("hostname")},
        )
    elif event_type == "task-result":
        result = event["result"]

        status = None

        if isinstance(result, dict) and "status" in result:
            status = result["status"]
            if status == "success":
                status = "eventful" if result.get("eventful") else "uneventful"

        if status is None:
            status = "eventful" if result else "uneventful"

        scheduler_backend.end_task_run(uuid,
                                       timestamp=utcnow(),
                                       status=status,
                                       result=result)
    elif event_type == "task-failed":
        scheduler_backend.end_task_run(uuid,
                                       timestamp=utcnow(),
                                       status="failed")
Ejemplo n.º 9
0
    def _copy_object(obj):
        nonlocal nb_skipped
        nonlocal nb_failures

        obj_id = obj[ID_HASH_ALGO]
        if obj["status"] != "visible":
            nb_skipped += 1
            logger.debug("skipped %s (status=%s)", hash_to_hex(obj_id),
                         obj["status"])
            statsd.increment(
                CONTENT_OPERATIONS_METRIC,
                tags={
                    "decision": "skipped",
                    "status": obj["status"]
                },
            )
        elif exclude_fn and exclude_fn(obj):
            nb_skipped += 1
            logger.debug("skipped %s (manually excluded)", hash_to_hex(obj_id))
            statsd.increment(CONTENT_OPERATIONS_METRIC,
                             tags={"decision": "excluded"})
        elif check_dst and obj_in_objstorage(obj_id, dst):
            nb_skipped += 1
            logger.debug("skipped %s (in dst)", hash_to_hex(obj_id))
            statsd.increment(CONTENT_OPERATIONS_METRIC,
                             tags={"decision": "in_dst"})
        else:
            try:
                copied = copy_object(obj_id, src, dst)
            except ObjNotFoundError:
                nb_skipped += 1
                statsd.increment(CONTENT_OPERATIONS_METRIC,
                                 tags={"decision": "not_in_src"})
            else:
                if copied is None:
                    nb_failures += 1
                    statsd.increment(CONTENT_OPERATIONS_METRIC,
                                     tags={"decision": "failed"})
                else:
                    vol.append(copied)
                    statsd.increment(CONTENT_OPERATIONS_METRIC,
                                     tags={"decision": "copied"})
Ejemplo n.º 10
0
def test_increment_doesnt_send_none(statsd):
    statsd.increment("metric", None)
    assert statsd.socket.recv() is None
Ejemplo n.º 11
0
def run_ready_tasks(backend: SchedulerInterface, app) -> List[Dict]:
    """Schedule tasks ready to be scheduled.

    This lookups any tasks per task type and mass schedules those accordingly (send
    messages to rabbitmq and mark as scheduled equivalent tasks in the scheduler
    backend).

    If tasks (per task type) with priority exist, they will get redirected to dedicated
    high priority queue (standard queue name prefixed with `save_code_now:`).

    Args:
        backend: scheduler backend to interact with (read/update tasks)
        app (App): Celery application to send tasks to

    Returns:
        A list of dictionaries::

          {
            'task': the scheduler's task id,
            'backend_id': Celery's task id,
            'scheduler': utcnow()
          }

        The result can be used to block-wait for the tasks' results::

          backend_tasks = run_ready_tasks(self.scheduler, app)
          for task in backend_tasks:
              AsyncResult(id=task['backend_id']).get()

    """
    all_backend_tasks: List[Dict] = []
    while True:
        task_types = {}
        pending_tasks = []
        for task_type in backend.get_task_types():
            task_type_name = task_type["type"]
            task_types[task_type_name] = task_type
            max_queue_length = task_type["max_queue_length"]
            if max_queue_length is None:
                max_queue_length = 0
            backend_name = task_type["backend_name"]
            if max_queue_length:
                try:
                    queue_length = app.get_queue_length(backend_name)
                except ValueError:
                    queue_length = None

                if queue_length is None:
                    # Running without RabbitMQ (probably a test env).
                    num_tasks = MAX_NUM_TASKS
                else:
                    num_tasks = min(max_queue_length - queue_length,
                                    MAX_NUM_TASKS)
            else:
                num_tasks = MAX_NUM_TASKS
            # only pull tasks if the buffer is at least 1/5th empty (= 80%
            # full), to help postgresql use properly indexed queries.
            if num_tasks > min(MAX_NUM_TASKS, max_queue_length) // 5:
                # Only grab num_tasks tasks with no priority
                grabbed_tasks = backend.grab_ready_tasks(task_type_name,
                                                         num_tasks=num_tasks)
                if grabbed_tasks:
                    pending_tasks.extend(grabbed_tasks)
                    logger.info("Grabbed %s tasks %s", len(grabbed_tasks),
                                task_type_name)
                    statsd.increment(
                        "swh_scheduler_runner_scheduled_task_total",
                        len(grabbed_tasks),
                        tags={"task_type": task_type_name},
                    )
            # grab max_queue_length (or 10) potential tasks with any priority for the
            # same type (limit the result to avoid too long running queries)
            grabbed_priority_tasks = backend.grab_ready_priority_tasks(
                task_type_name, num_tasks=max_queue_length or 10)
            if grabbed_priority_tasks:
                pending_tasks.extend(grabbed_priority_tasks)
                logger.info(
                    "Grabbed %s tasks %s (priority)",
                    len(grabbed_priority_tasks),
                    task_type_name,
                )
                statsd.increment(
                    "swh_scheduler_runner_scheduled_task_total",
                    len(grabbed_priority_tasks),
                    tags={"task_type": task_type_name},
                )

        if not pending_tasks:
            return all_backend_tasks

        backend_tasks = []
        celery_tasks: List[Tuple[bool, str, str, List, Dict]] = []
        for task in pending_tasks:
            args = task["arguments"]["args"]
            kwargs = task["arguments"]["kwargs"]

            backend_name = task_types[task["type"]]["backend_name"]
            backend_id = uuid()
            celery_tasks.append((
                task.get("priority") is not None,
                backend_name,
                backend_id,
                args,
                kwargs,
            ))
            data = {
                "task": task["id"],
                "backend_id": backend_id,
                "scheduled": utcnow(),
            }

            backend_tasks.append(data)
        logger.debug("Sent %s celery tasks", len(backend_tasks))

        backend.mass_schedule_task_runs(backend_tasks)
        for with_priority, backend_name, backend_id, args, kwargs in celery_tasks:
            kw = dict(
                task_id=backend_id,
                args=args,
                kwargs=kwargs,
            )
            if with_priority:
                kw["queue"] = f"save_code_now:{backend_name}"
            app.send_task(backend_name, **kw)

        all_backend_tasks.extend(backend_tasks)
Ejemplo n.º 12
0
 def post_get(self, ret, kw):
     """Called after the 'get' method."""
     statsd.increment("swh_objstorage_out_bytes_total",
                      len(ret),
                      tags={"endpoint": "get_bytes"})