Beispiel #1
0
 def wait_for_data_processing(data_type, time_limit):
     """
     Wait until the data passed to Tyr is processed by checking the associated dataset state in the job
     :param data_type: Type of data passed to Tyr
     :param time_limit: UTC time from when the job could have been created. Allows to exclude jobs from previous bina
     :return: When dataset is "done"
     """
     instance_jobs_url = "{base_url}/v0/jobs/{instance}".format(
         base_url=config["URL_TYR"], instance=data_set)
     r = requests.get(instance_jobs_url)
     r.raise_for_status()
     jobs_resp = json.loads(r.text)["jobs"]
     for job in jobs_resp:
         job_creation = datetime.datetime.strptime(
             job["created_at"], "%Y-%m-%dT%H:%M:%S.%f")
         if job_creation > time_limit:
             for dataset in job["data_sets"]:
                 if data_type == dataset["type"]:
                     if dataset["state"] == "done":
                         logger.info(
                             "Dataset '{}' done!".format(data_type))
                         return
                     elif dataset["state"] != "failed":
                         raise utils.RetryError(
                             "Job with dataset '{type}' still in process ({state})"
                             .format(type=data_type,
                                     state=job["state"]))
                     else:
                         raise Exception(
                             "Dataset '{type}' in state '{state}'".
                             format(type=data_type, state=job["state"]))
     raise utils.RetryError(
         "Job with dataset '{}' not yet created ".format(data_type))
Beispiel #2
0
        def wait_for_kraken_reload(last_data_loaded, cov):
            new_data_loaded = get_last_coverage_loaded_time(cov)

            if last_data_loaded == new_data_loaded:
                raise utils.RetryError("kraken data is not loaded")

            logger.info("Kraken reloaded")
Beispiel #3
0
 def wait_for_tyr_instance_scan(instance_name):
     instances_url = "{base_url}/v0/instances/".format(
         base_url=config["URL_TYR"])
     instances_request = requests.get(instances_url)
     instances_request.raise_for_status()
     if instance_name not in [
             instance["name"]
             for instance in json.loads(instances_request.text)
     ]:
         raise utils.RetryError(
             "Instance {} not yet scanned by Tyr".format(instance_name))
Beispiel #4
0
    def _get_asgard_data_download_container():
        # find container asgard-data (data downloader)
        containers = [
            x for x in docker.DockerClient(version="auto").containers.list()
            if "asgard-data" in x.name
        ]
        # if this container exist then download is not finished yet
        if containers:
            raise utils.RetryError("Asgard is not loaded")

        logger.info("Asgard reloaded")
Beispiel #5
0
 def wait_for_cities_db():
     """
     Wait until the 'cities' database is available
     """
     url = config["URL_TYR"] + "/v0/cities/status"
     r = requests.get(url)
     log.debug(r.text)
     if r.status_code == 404:
         response = json.loads(r.text)
         if response.get("message") == "cities db not reachable":
             raise utils.RetryError("Cities database is still upgrading...")
         else:
             raise Exception("Couldn't get 'cities' job status")
Beispiel #6
0
    def wait_for_cities_completion():
        """
        Wait until the 'cities' task is completed
        The task is considered failed if any error occurs while requesting Tyr
        """
        last_cities_job = get_last_cities_job()

        if last_cities_job and "state" in last_cities_job:
            if last_cities_job["state"] == "running":
                raise utils.RetryError("Cities task still running...")
            elif last_cities_job["state"] == "failed":
                raise Exception("Job 'cities' status FAILED")
        else:
            raise Exception("Couldn't get 'cities' job status")
Beispiel #7
0
        def wait_until_instance_jobs_are_done(time_limit):
            """
            Wait until all Tyr's jobs related to the instance and created after `time_limit` are marked "done"
            :param time_limit: UTC time from when the job could have been created. Allows to exclude jobs from previous bina
            :return: When dataset is "done"
            """
            r = requests.get(instance_jobs_url)
            r.raise_for_status()
            jobs_resp = json.loads(r.text)["jobs"]
            a_job_exists = False
            for job in jobs_resp:
                job_creation = datetime.datetime.strptime(
                    job["created_at"], "%Y-%m-%dT%H:%M:%S.%f")
                if job_creation > time_limit:
                    a_job_exists = True
                    if job["state"] == "done":
                        logger.debug("Job done! : '{}' ".format(
                            json.dumps(job["data_sets"], indent=2)))

                    elif job["state"] in ["running", "pending"]:
                        raise utils.RetryError(
                            "Job still in process ({state}). {job}".format(
                                job=json.dumps(job["data_sets"], indent=2),
                                state=job["state"],
                            ))
                    else:
                        raise Exception("Job in state '{state}'. {job}".format(
                            job=json.dumps(job, indent=2), state=job["state"]))

            if not a_job_exists:
                raise utils.RetryError(
                    "No tyr job launched after {} found in {}".format(
                        time_limit, instance_jobs_url))
            # if a_job_exists and we exited the loop above, then it means that all
            # found jobs are marked as "done".
            return