def execute(self, ctxt, args):
        client_ID = ctxt.handler.get_client_ID()
        assert self.is_valid_client_ID(ctxt.redis_connection, client_ID)

        redis_conn = ctxt.redis_connection
        jobs = get_jobs_of_client(redis_conn, client_ID)
        assert isinstance(jobs, list)
        jobs_info = []
        for job in jobs:
            info = job.__dict__
            info["downloadURL"] = webhelpers.create_download_url(job.jobID, client_ID, job.title)
            del info["output"]
            jobs_info += [info]

        return InfoMessage(CATROBAT_LANGUAGE_VERSION, jobs_info)
    def execute(self, ctxt, args):
        client_ID = ctxt.handler.get_client_ID()
        assert self.is_valid_client_ID(ctxt.redis_connection, client_ID)

        redis_conn = ctxt.redis_connection
        jobs = get_jobs_of_client(redis_conn, client_ID)
        assert isinstance(jobs, list)
        jobs_info = []
        for job in jobs:
            info = job.__dict__
            info["downloadURL"] = webhelpers.create_download_url(job.jobID, client_ID, job.title)
            del info["output"]
            jobs_info += [info]

        return InfoMessage(CATROBAT_LANGUAGE_VERSION, jobs_info)
示例#3
0
    def notify(cls, msg_type, args):
        # Note: jobID is equivalent to scratch project ID by definition!
        job_ID = args[jobmonprot.Request.ARGS_JOB_ID]
        job_key = webhelpers.REDIS_JOB_KEY_TEMPLATE.format(job_ID)
        job = Job.from_redis(cls.REDIS_CONNECTION, job_key)

        if job == None:
            _logger.error("Cannot find job #{}".format(job_ID))
            return

        if msg_type == NotificationType.JOB_STARTED:
            job.title = args[jobmonprot.Request.ARGS_TITLE]
            job.state = Job.State.RUNNING
            job.progress = 0
            job.imageURL = args[jobmonprot.Request.ARGS_IMAGE_URL]
            _logger.info('Started to convert: "%s"' % job.title)
        elif msg_type == NotificationType.JOB_FAILED:
            _logger.warn("Job failed! Exception Args: %s", args)
            job.state = Job.State.FAILED
        elif msg_type == NotificationType.JOB_OUTPUT:
            job.output = job.output if job.output != None else ""
            for line in args[jobmonprot.Request.ARGS_LINES]:
                job.output += line
        elif msg_type == NotificationType.JOB_PROGRESS:
            progress = args[jobmonprot.Request.ARGS_PROGRESS]
            isinstance(progress, int)
            job.progress = progress
        elif msg_type == NotificationType.JOB_CONVERSION_FINISHED:
            _logger.info(
                "Job #{} finished, waiting for file transfer".format(job_ID))
            return
        elif msg_type == NotificationType.JOB_FINISHED:
            job.state = Job.State.FINISHED
            job.progress = 100
            job.archiveCachedUTCDate = dt.utcnow().strftime(
                Job.DATETIME_FORMAT)

        # find listening clients
        # TODO: cache this...
        listening_client_job_key = webhelpers.REDIS_LISTENING_CLIENT_JOB_KEY_TEMPLATE.format(
            job_ID)
        all_listening_client_IDs = cls.REDIS_CONNECTION.get(
            listening_client_job_key)
        if all_listening_client_IDs == None:
            _logger.warn("WTH?! No listening clients stored!")
            if not job.save_to_redis(cls.REDIS_CONNECTION, job_key):
                _logger.info("Unable to update job state!")
            return

        all_listening_client_IDs = ast.literal_eval(all_listening_client_IDs)
        num_clients_of_project = len(all_listening_client_IDs)
        _logger.debug("There %s %d registered client%s." % \
                      ("is" if num_clients_of_project == 1 else "are", \
                       num_clients_of_project, "s" if num_clients_of_project != 1 else ""))

        if msg_type in (NotificationType.JOB_FINISHED,
                        NotificationType.JOB_FAILED):
            # Job completely finished or failed -> remove all listeners from database
            #                                      before updating job state in database
            remove_all_listening_clients_from_job(cls.REDIS_CONNECTION, job_ID)

        # update job state in database
        if not job.save_to_redis(cls.REDIS_CONNECTION, job_key):
            _logger.info("Unable to update job state!")
            return

        currently_listening_client_IDs = filter(
            lambda client_ID: client_ID in cls.client_ID_open_sockets_map,
            all_listening_client_IDs)
        currently_listening_client_sockets = map(
            lambda client_ID: cls.client_ID_open_sockets_map[client_ID],
            currently_listening_client_IDs)
        _logger.debug("There are %d active clients listening on this job." %
                      len(currently_listening_client_sockets))

        for idx, socket_handlers in enumerate(
                currently_listening_client_sockets):
            if msg_type == NotificationType.JOB_STARTED:
                message = JobRunningMessage(job_ID, job.title, job.imageURL)
            elif msg_type == NotificationType.JOB_OUTPUT:
                message = JobOutputMessage(job_ID,
                                           args[jobmonprot.Request.ARGS_LINES])
            elif msg_type == NotificationType.JOB_PROGRESS:
                message = JobProgressMessage(job_ID, job.progress)
            elif msg_type == NotificationType.JOB_FINISHED:
                client_ID = currently_listening_client_IDs[idx]
                download_url = webhelpers.create_download_url(
                    job_ID, client_ID, job.title)
                message = JobFinishedMessage(job_ID, download_url, None)
            elif msg_type == NotificationType.JOB_FAILED:
                message = JobFailedMessage(job_ID)
            else:
                _logger.warn("IGNORING UNKNOWN MESSAGE")
                return
            for handler in socket_handlers:
                handler.send_message(message)
    def notify(cls, msg_type, args):
        # Note: jobID is equivalent to scratch project ID by definition!
        job_ID = args[jobmonprot.Request.ARGS_JOB_ID]
        job_key = webhelpers.REDIS_JOB_KEY_TEMPLATE.format(job_ID)
        job = Job.from_redis(cls.REDIS_CONNECTION, job_key)

        if job == None:
            _logger.error("Cannot find job #{}".format(job_ID))
            return

        if msg_type == NotificationType.JOB_STARTED:
            job.title = args[jobmonprot.Request.ARGS_TITLE]
            job.state = Job.State.RUNNING
            job.progress = 0
            job.imageURL = args[jobmonprot.Request.ARGS_IMAGE_URL]
            _logger.info('Started to convert: "%s"' % job.title)
        elif msg_type == NotificationType.JOB_FAILED:
            _logger.warn("Job failed! Exception Args: %s", args)
            job.state = Job.State.FAILED
        elif msg_type == NotificationType.JOB_OUTPUT:
            job.output = job.output if job.output != None else ""
            for line in args[jobmonprot.Request.ARGS_LINES]:
                job.output += line
        elif msg_type == NotificationType.JOB_PROGRESS:
            progress = args[jobmonprot.Request.ARGS_PROGRESS]
            isinstance(progress, int)
            job.progress = progress
        elif msg_type == NotificationType.JOB_CONVERSION_FINISHED:
            _logger.info("Job #{} finished, waiting for file transfer".format(job_ID))
            return
        elif msg_type == NotificationType.JOB_FINISHED:
            job.state = Job.State.FINISHED
            job.progress = 100
            job.archiveCachedUTCDate = dt.utcnow().strftime(Job.DATETIME_FORMAT)

        # find listening clients
        # TODO: cache this...
        listening_client_job_key = webhelpers.REDIS_LISTENING_CLIENT_JOB_KEY_TEMPLATE.format(job_ID)
        all_listening_client_IDs = cls.REDIS_CONNECTION.get(listening_client_job_key)
        if all_listening_client_IDs == None:
            _logger.warn("WTH?! No listening clients stored!")
            if not job.save_to_redis(cls.REDIS_CONNECTION, job_key):
                _logger.info("Unable to update job state!")
            return

        all_listening_client_IDs = ast.literal_eval(all_listening_client_IDs)
        num_clients_of_project = len(all_listening_client_IDs)
        _logger.debug("There %s %d registered client%s." % \
                      ("is" if num_clients_of_project == 1 else "are", \
                       num_clients_of_project, "s" if num_clients_of_project != 1 else ""))

        if msg_type in (NotificationType.JOB_FINISHED, NotificationType.JOB_FAILED):
            # Job completely finished or failed -> remove all listeners from database
            #                                      before updating job state in database
            remove_all_listening_clients_from_job(cls.REDIS_CONNECTION, job_ID)

        # update job state in database
        if not job.save_to_redis(cls.REDIS_CONNECTION, job_key):
            _logger.info("Unable to update job state!")
            return

        currently_listening_client_IDs = filter(lambda client_ID: client_ID in cls.client_ID_open_sockets_map,
                                                all_listening_client_IDs)
        currently_listening_client_sockets = map(lambda client_ID: cls.client_ID_open_sockets_map[client_ID],
                                                 currently_listening_client_IDs)
        _logger.debug("There are %d active clients listening on this job." % len(currently_listening_client_sockets))

        for idx, socket_handlers in enumerate(currently_listening_client_sockets):
            if msg_type == NotificationType.JOB_STARTED:
                message = JobRunningMessage(job_ID, job.title, job.imageURL)
            elif msg_type == NotificationType.JOB_OUTPUT:
                message = JobOutputMessage(job_ID, args[jobmonprot.Request.ARGS_LINES])
            elif msg_type == NotificationType.JOB_PROGRESS:
                message = JobProgressMessage(job_ID, job.progress)
            elif msg_type == NotificationType.JOB_FINISHED:
                client_ID = currently_listening_client_IDs[idx]
                download_url = webhelpers.create_download_url(job_ID, client_ID, job.title)
                message = JobFinishedMessage(job_ID, download_url, None)
            elif msg_type == NotificationType.JOB_FAILED:
                message = JobFailedMessage(job_ID)
            else:
                _logger.warn("IGNORING UNKNOWN MESSAGE")
                return
            for handler in socket_handlers:
                handler.send_message(message)
    def execute(self, ctxt, args):
        client_ID = ctxt.handler.get_client_ID()
        assert self.is_valid_client_ID(ctxt.redis_connection, client_ID)

        # validate job_ID
        job_ID = args[Command.ArgumentType.JOB_ID]
        if not self.is_valid_job_ID(job_ID):
            _logger.error("Invalid jobID given!")
            return ErrorMessage("Invalid jobID given!")

        force = False
        if Command.ArgumentType.FORCE in args:
            force_param_str = str(args[Command.ArgumentType.FORCE]).lower()
            force = force_param_str == "true" or force_param_str == "1"

        verbose = False
        if Command.ArgumentType.VERBOSE in args:
            verbose_param_str = str(args[Command.ArgumentType.VERBOSE]).lower()
            verbose = verbose_param_str == "true" or verbose_param_str == "1"

        redis_conn = ctxt.redis_connection
        jobs_of_client = get_jobs_of_client(redis_conn, client_ID)
        jobs_of_client_in_progress = filter(lambda job: job.is_in_progress(), jobs_of_client)
        if len(jobs_of_client_in_progress) >= MAX_NUM_SCHEDULED_JOBS_PER_CLIENT:
            return ErrorMessage("Maximum number of jobs per client limit exceeded: {}"
                                .format(MAX_NUM_SCHEDULED_JOBS_PER_CLIENT))

        # TODO: lock.acquire() => use python's context-handler (i.e. "with"-keyword) and file lock!
        assign_job_to_client(redis_conn, job_ID, client_ID)
        job_key = webhelpers.REDIS_JOB_KEY_TEMPLATE.format(job_ID)
        job = Job.from_redis(redis_conn, job_key)

        if job != None:
            if job.is_in_progress():
                # TODO: lock.release()
                _logger.info("Job already scheduled (scratch project with ID: %d)", job_ID)
                if not add_listening_client_to_job(redis_conn, client_ID, job_ID):
                    return JobFailedMessage(job_ID, "Cannot add client as listener to job!")
                return JobAlreadyRunningMessage(job_ID, job.title, job.imageURL)

            elif job.state == Job.State.FINISHED and not force:
                assert job.archiveCachedUTCDate is not None
                archive_cached_utc_date = dt.strptime(job.archiveCachedUTCDate, Job.DATETIME_FORMAT)
                download_valid_until_utc = archive_cached_utc_date + timedelta(seconds=Job.CACHE_ENTRY_VALID_FOR)

                if dt.utcnow() <= download_valid_until_utc:
                    file_name = str(job_ID) + CATROBAT_FILE_EXT
                    file_path = "%s/%s" % (ctxt.jobmonitorserver_settings["download_dir"], file_name)
                    if file_name and os.path.exists(file_path):
                        download_url = webhelpers.create_download_url(job_ID, client_ID, job.title)
                        # TODO: lock.release()
                        return JobFinishedMessage(job_ID, download_url, job.archiveCachedUTCDate)

            else:
                assert job.state == Job.State.FAILED or force

        job = Job(job_ID, "-", Job.State.READY)
        if not job.save_to_redis(redis_conn, job_key):
            # TODO: lock.release()
            return JobFailedMessage(job_ID, "Cannot schedule job!")

        if not add_listening_client_to_job(redis_conn, client_ID, job_ID):
            return JobFailedMessage(job_ID, "Cannot add client as listener to job!")

        # schedule this job
        use_connection(redis_conn)
        q = Queue(connection=redis_conn)
        host, port = ctxt.jobmonitorserver_settings["host"], ctxt.jobmonitorserver_settings["port"]
        _logger.info("Scheduled new job (host: %s, port: %s, scratch project ID: %d)", host, port, job_ID)
        #q.enqueue(convert_scratch_project, scratch_project_ID, host, port)
        q.enqueue_call(func=convert_scratch_project, args=(job_ID, host, port, verbose,), timeout=JOB_TIMEOUT)
        # TODO: lock.release()
        return JobReadyMessage(job_ID)