Exemplo n.º 1
0
    def get_unfinished_jobs(cls):
        """
        Queries SimpleDB for a list of pending jobs that have not yet been
        finished. 
        
        :rtype: list 
        :returns: A list of unfinished :py:class:`EncodingJob` objects.
        """
        query_str = (
            "SELECT * FROM %s WHERE job_state != '%s' "
            "and job_state != '%s' "
            "and job_state != '%s'" % (settings.SIMPLEDB_JOB_STATE_DOMAIN, "FINISHED", "ERROR", "ABANDONED")
        )
        results = cls._get_sdb_job_state_domain().select(query_str)

        jobs = []
        for item in results:
            try:
                job = cls._get_job_object_from_item(item)
            except TypeError:
                message = "JobStateBackend.get_unfinished_jobs(): " "Unable to instantiate job: %s" % item
                logger.error(message_or_obj=message)
                logger.error()
                continue
            except ImportError:
                message = "JobStateBackend.get_unfinished_jobs(): " "Invalid nommer specified for job: %s" % item
                logger.error(message_or_obj=message)
                logger.error()
                continue
            jobs.append(job)

        return jobs
    def spawn_instances(cls, num_instances):
        """
        Spawns the number of instances specified.
        
        :param int num_instances: The number of instances to spawn.
        :rtype: :py:class:`boto.ec2.instance.Reservation`
        :returns: A boto Reservation for the started instance(s).
        """
        logger.info("EC2InstanceManager.spawn_instances(): " \
                     "Spawning %d new instances" % num_instances)

        conn = cls._aws_ec2_connection()
        try:
            image = conn.get_all_images(image_ids=settings.EC2_AMI_ID)
            image = image[0]
        except EC2ResponseError:
            logger.error("EC2InstanceManager.spawn_instances(): " \
                         "No AMI with ID %s could be found." % settings.EC2_AMI_ID)
            logger.error()
            return
        except IndexError:
            logger.error("EC2InstanceManager.spawn_instances(): " \
                         "No AMI with ID %s could be found." % settings.EC2_AMI_ID)
            logger.error()
            return

        # The boto Reservation object. Its 'instances' attribute is the
        # important bit.
        return image.run(min_count=num_instances, max_count=num_instances,
                         instance_type=settings.EC2_INSTANCE_TYPE,
                         security_groups=settings.EC2_SECURITY_GROUPS,
                         key_name=settings.EC2_KEY_NAME,
                         user_data=cls._gen_ec2_user_data())
Exemplo n.º 3
0
    def spawn_instances(cls, num_instances):
        """
        Spawns the number of instances specified.
        
        :param int num_instances: The number of instances to spawn.
        :rtype: :py:class:`boto.ec2.instance.Reservation`
        :returns: A boto Reservation for the started instance(s).
        """
        logger.info("EC2InstanceManager.spawn_instances(): " \
                     "Spawning %d new instances" % num_instances)

        conn = cls._aws_ec2_connection()
        try:
            image = conn.get_all_images(image_ids=settings.EC2_AMI_ID)
            image = image[0]
        except EC2ResponseError:
            logger.error("EC2InstanceManager.spawn_instances(): " \
                         "No AMI with ID %s could be found." % settings.EC2_AMI_ID)
            logger.error()
            return
        except IndexError:
            logger.error("EC2InstanceManager.spawn_instances(): " \
                         "No AMI with ID %s could be found." % settings.EC2_AMI_ID)
            logger.error()
            return

        # The boto Reservation object. Its 'instances' attribute is the
        # important bit.
        return image.run(min_count=num_instances,
                         max_count=num_instances,
                         instance_type=settings.EC2_INSTANCE_TYPE,
                         security_groups=settings.EC2_SECURITY_GROUPS,
                         key_name=settings.EC2_KEY_NAME,
                         user_data=cls._gen_ec2_user_data())
Exemplo n.º 4
0
    def refresh_jobs_with_state_changes(cls):
        """
        Looks at the state SQS queue specified by the
        :py:data:`SQS_JOB_STATE_CHANGE_QUEUE_NAME <media_nommer.conf.settings.SQS_JOB_STATE_CHANGE_QUEUE_NAME>`
        setting and refreshes any jobs that have changed. This simply reloads
        the job's details from SimpleDB_.

        :rtype: ``list`` of :py:class:`EncodingJob <media_nommer.core.job_state_backend.EncodingJob>`
        :returns: A list of changed :py:class:`EncodingJob` objects.
        """
        logger.debug("JobCache.refresh_jobs_with_state_changes(): " \
                    "Checking state change queue.")
        # Pops up to 10 changed jobs that we think may have changed. There are
        # some false alarms in here, whch brings us to...
        popped_changed_jobs = JobStateBackend.pop_state_changes_from_queue(10)
        # A temporary list that stores the jobs that actually changed. This
        # will be returned at the completion of this method's path.
        changed_jobs = []

        if popped_changed_jobs:
            logger.debug("Potential job state changes found: %s" %
                         popped_changed_jobs)
            for job in popped_changed_jobs:
                if cls.is_job_cached(job):
                    current_state = cls.get_job(job).job_state
                    new_state = job.job_state

                    if current_state != new_state:
                        logger.info("* Job state changed %s: %s -> %s" % (
                            job.unique_id,
                            # Current job state in cache
                            current_state,
                            # New incoming job state
                            new_state,
                        ))
                        cls.update_job(job)
                        # This one actually changed, append this for returning.
                        changed_jobs.append(job)
                        if new_state == 'ERROR':
                            logger.error('Error trace from ec2nommerd:')
                            logger.error(job.job_state_details)
        return changed_jobs
Exemplo n.º 5
0
    def refresh_jobs_with_state_changes(cls):
        """
        Looks at the state SQS queue specified by the
        :py:data:`SQS_JOB_STATE_CHANGE_QUEUE_NAME <media_nommer.conf.settings.SQS_JOB_STATE_CHANGE_QUEUE_NAME>`
        setting and refreshes any jobs that have changed. This simply reloads
        the job's details from SimpleDB_.

        :rtype: ``list`` of :py:class:`EncodingJob <media_nommer.core.job_state_backend.EncodingJob>`
        :returns: A list of changed :py:class:`EncodingJob` objects.
        """
        logger.debug("JobCache.refresh_jobs_with_state_changes(): " \
                    "Checking state change queue.")
        # Pops up to 10 changed jobs that we think may have changed. There are
        # some false alarms in here, whch brings us to...
        popped_changed_jobs = JobStateBackend.pop_state_changes_from_queue(10)
        # A temporary list that stores the jobs that actually changed. This
        # will be returned at the completion of this method's path.
        changed_jobs = []

        if popped_changed_jobs:
            logger.debug("Potential job state changes found: %s" % popped_changed_jobs)
            for job in popped_changed_jobs:
                if cls.is_job_cached(job):
                    current_state = cls.get_job(job).job_state
                    new_state = job.job_state

                    if current_state != new_state:
                        logger.info("* Job state changed %s: %s -> %s" % (
                            job.unique_id,
                            # Current job state in cache
                            current_state,
                            # New incoming job state
                            new_state,
                        ))
                        cls.update_job(job)
                        # This one actually changed, append this for returning.
                        changed_jobs.append(job)
                        if new_state == 'ERROR':
                            logger.error('Error trace from ec2nommerd:')
                            logger.error(job.job_state_details)
        return changed_jobs
Exemplo n.º 6
0
    def __run_ffmpeg(self, fobj):
        """
        Fire up ffmpeg and toss the results into a temporary file.
        
        :rtype: file-like object or ``None``
        :returns: If the encoding succeeds, a file-like object is returned.
            If an error happens, ``None`` is returned, and the ERROR job
            state is set.
        """
        is_two_pass = len(self.job.job_options) > 1
        out_fobj = tempfile.NamedTemporaryFile(mode='w+b', delete=True)

        pass_counter = 1
        for encoding_pass_options in self.job.job_options:
            is_second_pass = pass_counter == 2
            # Based on the given options, assemble the command list to
            # pass on to Popen.
            ffmpeg_cmd = self.__assemble_ffmpeg_cmd_list(
                encoding_pass_options,
                fobj,
                out_fobj,
                is_two_pass=is_two_pass,
                is_second_pass=is_second_pass)

            # Do this for ffmpeg's sake. Allows more than one concurrent
            # encoding job per EC2 instance.
            os.chdir(self.temp_cwd)
            # Fire up ffmpeg.
            process = subprocess.Popen(
                ffmpeg_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
            # Get back to home dir. Not sure this is necessary, but meh.
            os.chdir(os.path.expanduser('~'))
            # Block here while waiting for output
            cmd_output = process.communicate()

            # 0 is success, so anything but that is bad.
            error_happened = process.returncode != 0

            if not error_happened and (is_second_pass or not is_two_pass):
                # No errors, return the file object for uploading.
                out_fobj.seek(0)
                return out_fobj
            elif error_happened:
                # Error found, return nothing so the nommer can die.
                logger.error(
                    message_or_obj="Error encountered while running ffmpeg.")
                logger.error(message_or_obj=cmd_output[0])
                logger.error(message_or_obj=cmd_output[1])
                self.wrapped_set_job_state('ERROR', details=cmd_output[1])
                return None

            pass_counter += 1
Exemplo n.º 7
0
    def get_unfinished_jobs(cls):
        """
        Queries SimpleDB for a list of pending jobs that have not yet been
        finished. 
        
        :rtype: list 
        :returns: A list of unfinished :py:class:`EncodingJob` objects.
        """
        query_str = "SELECT * FROM %s WHERE job_state != '%s' " \
                    "and job_state != '%s' " \
                    "and job_state != '%s'" % (
              settings.SIMPLEDB_JOB_STATE_DOMAIN,
              'FINISHED',
              'ERROR',
              'ABANDONED',
        )
        results = cls._get_sdb_job_state_domain().select(query_str)

        jobs = []
        for item in results:
            try:
                job = cls._get_job_object_from_item(item)
            except TypeError:
                message = "JobStateBackend.get_unfinished_jobs(): " \
                          "Unable to instantiate job: %s" % item
                logger.error(message_or_obj=message)
                logger.error()
                continue
            except ImportError:
                message = "JobStateBackend.get_unfinished_jobs(): " \
                          "Invalid nommer specified for job: %s" % item
                logger.error(message_or_obj=message)
                logger.error()
                continue
            jobs.append(job)

        return jobs
Exemplo n.º 8
0
    def __run_ffmpeg(self, fobj):
        """
        Fire up ffmpeg and toss the results into a temporary file.
        
        :rtype: file-like object or ``None``
        :returns: If the encoding succeeds, a file-like object is returned.
            If an error happens, ``None`` is returned, and the ERROR job
            state is set.
        """
        is_two_pass = len(self.job.job_options) > 1
        out_fobj = tempfile.NamedTemporaryFile(mode='w+b', delete=True)

        pass_counter = 1
        for encoding_pass_options in self.job.job_options:
            is_second_pass = pass_counter == 2
            # Based on the given options, assemble the command list to
            # pass on to Popen.
            ffmpeg_cmd = self.__assemble_ffmpeg_cmd_list(
                             encoding_pass_options,
                             fobj, out_fobj,
                             is_two_pass=is_two_pass,
                             is_second_pass=is_second_pass)

            # Do this for ffmpeg's sake. Allows more than one concurrent
            # encoding job per EC2 instance.
            os.chdir(self.temp_cwd)
            # Fire up ffmpeg.
            process = subprocess.Popen(ffmpeg_cmd,
                                       stdout=subprocess.PIPE,
                                       stderr=subprocess.PIPE)
            # Get back to home dir. Not sure this is necessary, but meh.
            os.chdir(os.path.expanduser('~'))
            # Block here while waiting for output
            cmd_output = process.communicate()

            # 0 is success, so anything but that is bad.
            error_happened = process.returncode != 0

            if error_happened:
                # Error found, return nothing so the nommer can die.
                logger.error(
                    message_or_obj="Error encountered while running ffmpeg.")
                logger.error(message_or_obj=cmd_output[0])
                logger.error(message_or_obj=cmd_output[1])
                self.wrapped_set_job_state('ERROR', details=cmd_output[1])
                return None

            move_atom = encoding_pass_options.get('move_atom_to_front', False)
            if move_atom:
                qtfaststart_cmd = self.__assemble_qtfaststart_cmd_list(out_fobj)
                process = subprocess.Popen(qtfaststart_cmd,
                                           stdout=subprocess.PIPE,
                                           stderr=subprocess.PIPE)
                cmd_output = process.communicate()
                error_happened = process.returncode != 0

                if error_happened:
                # Error found, return nothing so the nommer can die.
                    logger.error(
                        message_or_obj="Error encountered while running qtfaststart.")
                    logger.error(message_or_obj=cmd_output[0])
                    logger.error(message_or_obj=cmd_output[1])
                    self.wrapped_set_job_state('ERROR', details=cmd_output[1])
                    return None

            if is_second_pass or not is_two_pass:
                # No errors, return the file object for uploading.
                out_fobj.seek(0)
                return out_fobj

            pass_counter += 1