Example #1
0
    def __init__(self, category, optional=0, load_default=1, **kwds):
        super(ComponentItem, self).__init__()
        kwds['category'] = category
        kwds['optional'] = optional
        kwds['load_default'] = load_default
        #kwds['getter'] = getter
        self._update(kwds, ComponentItem._forced)
        try:
            assert (implies(
                self['defvalue'] is None and not self['load_default'],
                self['optional']))
        except AssertionError:
            raise SchemaError(
                "ComponentItem has no defvalue, load_default or requirement to be optional"
            )

        try:
            assert (implies(
                self['getter'], self['transient'] and self['defvalue'] is None
                and self['protected'] and not self['sequence']
                and not self['copyable']))
        except AssertionError:
            raise SchemaError(
                "There is no getter, transient flag or defvalue and the ComponentItem is protected, not a sequence or not copyable. This is not supported"
            )
Example #2
0
    def __init__(self, category, optional=0, load_default=1, **kwds):
        super(ComponentItem, self).__init__()
        kwds['category'] = category
        kwds['optional'] = optional
        kwds['load_default'] = load_default
        #kwds['getter'] = getter
        self._update(kwds, forced=ComponentItem._forced)
        assert(implies(self['defvalue'] is None and not self['load_default'], self['optional']))

        assert(implies(self['getter'], self['transient'] and self['defvalue'] is None and self['protected'] and not self['sequence'] and not self['copyable']))
Example #3
0
    def __init__(self, category, optional=0, load_default=1, **kwds):
        Item.__init__(self)
        kwds['category'] = category
        kwds['optional'] = optional
        kwds['load_default'] = load_default
        #kwds['getter'] = getter
        self._update(kwds, forced=ComponentItem._forced)
        assert(implies(self['defvalue'] is None and not self['load_default'], self['optional']))

        assert(implies(self['getter'], self['transient'] and self['defvalue'] is None and self['protected'] and not self['sequence'] and not self['copyable']))
Example #4
0
    def __init__(self, category, optional=0, load_default=1, **kwds):
        super(ComponentItem, self).__init__()
        kwds['category'] = category
        kwds['optional'] = optional
        kwds['load_default'] = load_default
        #kwds['getter'] = getter
        self._update(kwds, ComponentItem._forced)
        try:
            assert(implies(self['defvalue'] is None and not self['load_default'], self['optional']))
        except AssertionError:
            raise SchemaError("ComponentItem has no defvalue, load_default or requirement to be optional")

        try:
            assert(implies(self['getter'], self['transient'] and self['defvalue'] is None and self['protected'] and not self['sequence'] and not self['copyable']))
        except AssertionError:
            raise SchemaError("There is no getter, transient flag or defvalue and the ComponentItem is protected, not a sequence or not copyable. This is not supported")
Example #5
0
    def convert_type(self, x_name):
        """ Convert the type of session_value or user_value (referred to by x_name) according to the types defined by the self.
        If the option has not been defined or the x_name in question is not defined, then this method is no-op.
        If conversion cannot be performed (type mismatch) then raise ConfigError.
        """

        try:
            value = getattr(self, x_name)
        except AttributeError:
            return

        logger = getLogger()

        # calculate the cast type, if it cannot be done then the option has not been yet defined (setDefaultValue)
        # in this case do not modify the value
        if self.typelist is not None:
            # in this case cast_type is a list of string dotnames (like for
            # typelist property in schemas)
            cast_type = self.typelist
        else:
            try:
                # in this case cast_type is a single type object
                cast_type = type(self.default_value)
            except AttributeError:
                return

        new_value = value

        optdesc = 'while setting option [.]%s = %s ' % (self.name, value)

        # eval string values only if the cast_type is not exactly a string
        if isinstance(value, str) and cast_type is not str:
            try:
                new_value = eval(value, config_scope)
                logger.debug('applied eval(%s) -> %s (%s)', value, new_value, optdesc)
            except Exception as x:
                logger.debug('ignored failed eval(%s): %s (%s)', value, x, optdesc)

        # check the type of the value unless the cast_type is not NoneType
        logger.debug('checking value type: %s (%s)', cast_type, optdesc)

        def check_type(x, t):
            return isinstance(x, t) or x is t

        # first we check using the same rules for typelist as for the GPI proxy
        # objects
        try:
            import Ganga.GPIDev.TypeCheck
            type_matched = Ganga.GPIDev.TypeCheck._valueTypeAllowed(new_value, cast_type, logger)
        except TypeError:  # cast_type is not a list
            type_matched = check_type(new_value, cast_type)

        from Ganga.Utility.logic import implies
        if not implies(not cast_type is type(None), type_matched):
            raise ConfigError('type mismatch: expected %s got %s (%s)' % (cast_type, type(new_value), optdesc))

        setattr(self, x_name, new_value)
Example #6
0
    def convert_type(self, x_name):
        """ Convert the type of session_value or user_value (referred to by x_name) according to the types defined by the self.
        If the option has not been defined or the x_name in question is not defined, then this method is no-op.
        If conversion cannot be performed (type mismatch) then raise ConfigError.
        """

        try:
            value = getattr(self, x_name)
        except AttributeError:
            return

        logger = getLogger()

        # calculate the cast type, if it cannot be done then the option has not been yet defined (setDefaultValue)
        # in this case do not modify the value
        if self.typelist is not None:
            # in this case cast_type is a list of string dotnames (like for
            # typelist property in schemas)
            cast_type = self.typelist
        else:
            try:
                # in this case cast_type is a single type object
                cast_type = type(self.default_value)
            except AttributeError:
                return

        new_value = value

        optdesc = 'while setting option [.]%s = %s ' % (self.name, value)

        # eval string values only if the cast_type is not exactly a string
        if isinstance(value, str) and cast_type is not str:
            try:
                new_value = eval(value, config_scope)
                logger.debug('applied eval(%s) -> %s (%s)', value, new_value, optdesc)
            except Exception as x:
                logger.debug('ignored failed eval(%s): %s (%s)', value, x, optdesc)

        # check the type of the value unless the cast_type is not NoneType
        logger.debug('checking value type: %s (%s)', cast_type, optdesc)

        def check_type(x, t):
            return isinstance(x, t) or x is t

        # first we check using the same rules for typelist as for the GPI proxy
        # objects
        try:
            import Ganga.GPIDev.TypeCheck
            type_matched = Ganga.GPIDev.TypeCheck._valueTypeAllowed(new_value, cast_type, logger)
        except TypeError:  # cast_type is not a list
            type_matched = check_type(new_value, cast_type)

        from Ganga.Utility.logic import implies
        if not implies(not cast_type is type(None), type_matched):
            raise ConfigError('type mismatch: expected %s got %s (%s)' % (cast_type, type(new_value), optdesc))

        setattr(self, x_name, new_value)
Example #7
0
def adddoc(name, object, doc_section, docstring):
    '''
    Add automatic documentation to gangadoc system.
    "doc_section" specifies how the object should be documented.
    If docstring is specified then use it to document the object. Otherwise use __doc__ (via pydoc utilities).
    '''
    from Ganga.Utility.logic import implies
    assert(implies(docstring, doc_section == "Objects"))
    #assert(not docstring and not object.__doc__)

    _GPIhelp_sections[doc_section] += [(name, object, docstring)]
Example #8
0
    def master_bulk_submit(self, rjobs, subjobconfigs, masterjobconfig):
        '''submit multiple subjobs in parallel, by default using 10 concurrent threads'''

        from Ganga.Utility.logic import implies
        assert (implies(rjobs, len(subjobconfigs) == len(rjobs)))

        # prepare the subjobs, jdl repository before bulk submission
        node_jdls = self.__mt_job_prepare__(rjobs, subjobconfigs,
                                            masterjobconfig)

        if not node_jdls:
            logger.error('Some jobs not successfully prepared')
            return False

        # set all subjobs to submitting status
        for sj in rjobs:
            sj.updateStatus('submitting')

        node_jids = self.__mt_bulk_submit__(node_jdls)

        status = False

        if node_jids:
            for sj in rjobs:
                if sj.id in node_jids.keys():
                    sj.backend.id = node_jids[sj.id]
                    sj.backend.CE = self.CE
                    sj.backend.actualCE = sj.backend.CE
                    sj.updateStatus('submitted')
                    sj.info.submit_counter += 1
                else:
                    logger.warning('subjob %s not successfully submitted' %
                                   sj.getFQID('.'))

            status = True

        return status
Example #9
0
    def master_bulk_submit(self, rjobs, subjobconfigs, masterjobconfig):
        '''submit multiple subjobs in parallel, by default using 10 concurrent threads'''

        from Ganga.Utility.logic import implies
        assert(implies(rjobs, len(subjobconfigs) == len(rjobs)))

        # prepare the subjobs, jdl repository before bulk submission
        node_jdls = self.__mt_job_prepare__(
            rjobs, subjobconfigs, masterjobconfig)

        if not node_jdls:
            logger.error('Some jobs not successfully prepared')
            return False

        # set all subjobs to submitting status
        for sj in rjobs:
            sj.updateStatus('submitting')

        node_jids = self.__mt_bulk_submit__(node_jdls)

        status = False

        if node_jids:
            for sj in rjobs:
                if sj.id in node_jids.keys():
                    sj.backend.id = node_jids[sj.id]
                    sj.backend.CE = self.CE
                    sj.backend.actualCE = sj.backend.CE
                    sj.updateStatus('submitted')
                    sj.info.submit_counter += 1
                else:
                    logger.warning(
                        'subjob %s not successfully submitted' % sj.getFQID('.'))

            status = True

        return status
Example #10
0
    def master_submit(self, rjobs, subjobconfigs, masterjobconfig, keep_going=False, parallel_submit=False):
        """  Submit   the  master  job  and  all   its  subjobs.   The
        masterjobconfig  is  shared,  individual  subjob  configs  are
        defined  in  subjobconfigs.   Submission  of  individual  jobs
        (not-split) also  always goes via  this method.  In  that case
        the subjobconfigs contains just one element - the job itself.

        The default  implementation of  this method emulates  the bulk
        submission  calling  a submit()  method  on individual  subjob
        objects.  If submission  of any of the subjobs  fails then the
        whole   process  is  aborted   with  IncompleteSubmissionError
        exception. The subjobs which  have already been submitted stay
        submitted.

        The default implementation does not process the masterjobconfig.
        Therefore this method may be overriden in the derived class
        in the following way:

        def master_submit(self,masterjobconfig,subjobconfigs,keep_going):
           ... 
           do_some_processsing_of(masterjobconfig)
           ...
           return IBackend.master_submit(self,subjobconfigs,masterjobconfig,keep_joing)


        Implementation note: we set keep_going to be optional in the
        signature of IBackend.master_submit() to allow the existing
        backend implementations which do not support keep_going=True
        and which at some point may call IBackend.master_submit() to
        work without change. It may sometimes be non-trivial to enable
        support for keep_going=True in some backends, even if the
        finally call IBackend.master_submit(). Therefore it is left to
        the decision of backend developer to explicitly enable the
        support for keep_going flag.

        """
        from Ganga.Core import IncompleteJobSubmissionError, GangaException
        from Ganga.Utility.logging import log_user_exception

        job = self.getJobObject()
        logger.debug("SubJobConfigs: %s" % len(subjobconfigs))
        logger.debug("rjobs: %s" % len(rjobs))
        assert(implies(rjobs, len(subjobconfigs) == len(rjobs)))

        incomplete = 0
        incomplete_subjobs = []

        def handleError(x):
            if keep_going:
                incomplete_subjobs.append(fqid)
                return False
            else:
                if incomplete:
                    raise x
                else:
                    return True

        master_input_sandbox = self.master_prepare(masterjobconfig)

        if parallel_submit:

            from Ganga.GPI import queues

            threads_before = queues.totalNumIntThreads()

            for sc, sj in zip(subjobconfigs, rjobs):

                fqid = sj.getFQID('.')
                b = sj.backend
                # FIXME would be nice to move this to the internal threads not user ones
                #from Ganga.GPIDev.Base.Proxy import stripProxy
                #all_queues = stripProxy(queues)
                #all_queues._addSystem( self._parallel_submit, ( b, sj, sc, master_input_sandbox, fqid, logger ) )
                queues._monitoring_threadpool.add_function(self._parallel_submit, (b, sj, sc, master_input_sandbox, fqid, logger))

            def subjob_status_check(rjobs):
                has_submitted = True
                for sj in rjobs:
                    if sj.status not in ["submitted","failed","completed","running","completing"]:
                        has_submitted = False
                        break
                return has_submitted

            while not subjob_status_check(rjobs):
                import time
                time.sleep(1.)

            for i in rjobs:
                if i.status in ["new", "failed"]:
                    return 0
            return 1

        for sc, sj in zip(subjobconfigs, rjobs):

            fqid = sj.getFQID('.')
            logger.info("submitting job %s to %s backend", fqid, sj.backend._name)
            try:
                b = sj.backend
                sj.updateStatus('submitting')
                if b.submit(sc, master_input_sandbox):
                    sj.updateStatus('submitted')
                    # sj._commit() # PENDING: TEMPORARY DISABLED
                    incomplete = 1
                    sj.info.increment()
                else:
                    if handleError(IncompleteJobSubmissionError(fqid, 'submission failed')):
                        return 0
            except Exception as x:
                #sj.updateStatus('new')
                if isinstance(x, GangaException):
                    logger.error(str(x))
                    log_user_exception(logger, debug=True)
                else:
                    log_user_exception(logger, debug=False)
                if handleError(IncompleteJobSubmissionError(fqid, str(x))):
                    return 0

        if incomplete_subjobs:
            raise IncompleteJobSubmissionError(
                incomplete_subjobs, 'submission failed')

        return 1
Example #11
0
        import types
        def check_type(x,t):
            return type(x) is t or x is t
        
        type_matched = False

        # first we check using the same rules for typelist as for the GPI proxy objects
        try:
            import Ganga.GPIDev.TypeCheck
            type_matched = Ganga.GPIDev.TypeCheck._valueTypeAllowed(new_value,cast_type,logger)
        except TypeError: #cast_type is not a list
            type_matched = check_type(new_value,cast_type)
            
        from Ganga.Utility.logic import implies
        if not implies(not cast_type is type(None), type_matched):
            raise ConfigError('type mismatch: expected %s got %s (%s)'%(str(cast_type),str(type(new_value)),optdesc))

        setattr(self,x_name,new_value)

# indicate if the GPI proxies for the configuration have been created
_after_bootstrap = False
    
# Scope used by eval when reading-in the configuration.
# Symbols defined in this scope will be correctly evaluated. For example, File class adds itself here.
# This dictionary may also be used by other parts of the system, e.g. XML repository.
config_scope = {}

class PackageConfig:
    """ Package  Config object  represents a  Configuration  Unit (typically
    related to Ganga Packages). It should not be created directly
Example #12
0
    def master_submit(self,
                      rjobs,
                      subjobconfigs,
                      masterjobconfig,
                      keep_going=False,
                      parallel_submit=False):
        """  Submit   the  master  job  and  all   its  subjobs.   The
        masterjobconfig  is  shared,  individual  subjob  configs  are
        defined  in  subjobconfigs.   Submission  of  individual  jobs
        (not-split) also  always goes via  this method.  In  that case
        the subjobconfigs contains just one element - the job itself.

        The default  implementation of  this method emulates  the bulk
        submission  calling  a submit()  method  on individual  subjob
        objects.  If submission  of any of the subjobs  fails then the
        whole   process  is  aborted   with  IncompleteSubmissionError
        exception. The subjobs which  have already been submitted stay
        submitted.

        The default implementation does not process the masterjobconfig.
        Therefore this method may be overriden in the derived class
        in the following way:

        def master_submit(self,masterjobconfig,subjobconfigs,keep_going):
           ... 
           do_some_processsing_of(masterjobconfig)
           ...
           return IBackend.master_submit(self,subjobconfigs,masterjobconfig,keep_joing)


        Implementation note: we set keep_going to be optional in the
        signature of IBackend.master_submit() to allow the existing
        backend implementations which do not support keep_going=True
        and which at some point may call IBackend.master_submit() to
        work without change. It may sometimes be non-trivial to enable
        support for keep_going=True in some backends, even if the
        finally call IBackend.master_submit(). Therefore it is left to
        the decision of backend developer to explicitly enable the
        support for keep_going flag.

        """
        from Ganga.Core import IncompleteJobSubmissionError, GangaException
        from Ganga.Utility.logging import log_user_exception

        logger.debug("SubJobConfigs: %s" % len(subjobconfigs))
        logger.debug("rjobs: %s" % len(rjobs))
        assert (implies(rjobs, len(subjobconfigs) == len(rjobs)))

        incomplete = 0
        incomplete_subjobs = []

        def handleError(x):
            if keep_going:
                incomplete_subjobs.append(fqid)
                return False
            else:
                if incomplete:
                    raise x
                else:
                    return True

        master_input_sandbox = self.master_prepare(masterjobconfig)

        if parallel_submit:

            from Ganga.GPI import queues

            threads_before = queues.totalNumIntThreads()

            for sc, sj in zip(subjobconfigs, rjobs):

                fqid = sj.getFQID('.')
                b = sj.backend
                # FIXME would be nice to move this to the internal threads not user ones
                #from Ganga.GPIDev.Base.Proxy import stripProxy
                #all_queues = stripProxy(queues)
                #all_queues._addSystem( self._parallel_submit, ( b, sj, sc, master_input_sandbox, fqid, logger ) )
                queues._monitoring_threadpool.add_function(
                    self._parallel_submit,
                    (b, sj, sc, master_input_sandbox, fqid, logger))

            def subjob_status_check(rjobs):
                has_submitted = True
                for sj in rjobs:
                    if sj.status not in [
                            "submitted", "failed", "completed", "running",
                            "completing"
                    ]:
                        has_submitted = False
                        break
                return has_submitted

            while not subjob_status_check(rjobs):
                import time
                time.sleep(1.)

            for i in rjobs:
                if i.status in ["new", "failed"]:
                    return 0
            return 1

        for sc, sj in zip(subjobconfigs, rjobs):

            fqid = sj.getFQID('.')
            logger.info("submitting job %s to %s backend", fqid,
                        getName(sj.backend))
            try:
                b = stripProxy(sj.backend)
                sj.updateStatus('submitting')
                if b.submit(sc, master_input_sandbox):
                    sj.updateStatus('submitted')
                    # sj._commit() # PENDING: TEMPORARY DISABLED
                    incomplete = 1
                    stripProxy(sj.info).increment()
                else:
                    if handleError(
                            IncompleteJobSubmissionError(
                                fqid, 'submission failed')):
                        return 0
            except Exception as x:
                #sj.updateStatus('new')
                if isType(x, GangaException):
                    logger.error(str(x))
                    log_user_exception(logger, debug=True)
                else:
                    log_user_exception(logger, debug=False)
                if handleError(IncompleteJobSubmissionError(fqid, str(x))):
                    return 0

        if incomplete_subjobs:
            raise IncompleteJobSubmissionError(incomplete_subjobs,
                                               'submission failed')

        return 1
Example #13
0
    def master_submit(self, rjobs, subjobconfigs, masterjobconfig, keep_going=False, parallel_submit=False):
        """  Submit   the  master  job  and  all   its  subjobs.   The
        masterjobconfig  is  shared,  individual  subjob  configs  are
        defined  in  subjobconfigs.   Submission  of  individual  jobs
        (not-split) also  always goes via  this method.  In  that case
        the subjobconfigs contains just one element - the job itself.

        The default  implementation of  this method emulates  the bulk
        submission  calling  a submit()  method  on individual  subjob
        objects.  If submission  of any of the subjobs  fails then the
        whole   process  is  aborted   with  IncompleteSubmissionError
        exception. The subjobs which  have already been submitted stay
        submitted.

        The default implementation does not process the masterjobconfig.
        Therefore this method may be overriden in the derived class
        in the following way:

        def master_submit(self,masterjobconfig,subjobconfigs,keep_going):
           ... 
           do_some_processsing_of(masterjobconfig)
           ...
           return IBackend.master_submit(self,subjobconfigs,masterjobconfig,keep_joing)


        Implementation note: we set keep_going to be optional in the
        signature of IBackend.master_submit() to allow the existing
        backend implementations which do not support keep_going=True
        and which at some point may call IBackend.master_submit() to
        work without change. It may sometimes be non-trivial to enable
        support for keep_going=True in some backends, even if the
        finally call IBackend.master_submit(). Therefore it is left to
        the decision of backend developer to explicitly enable the
        support for keep_going flag.

        """
        from Ganga.Utility.logging import log_user_exception

        logger.debug("SubJobConfigs: %s" % len(subjobconfigs))
        logger.debug("rjobs: %s" % len(rjobs))
        assert(implies(rjobs, len(subjobconfigs) == len(rjobs)))

        incomplete = 0
        incomplete_subjobs = []

        def handleError(x):
            if keep_going:
                incomplete_subjobs.append(fqid)
                return False
            else:
                if incomplete:
                    raise x
                else:
                    return True

        master_input_sandbox = self.master_prepare(masterjobconfig)
        # Shall we submit in parallel
        if parallel_submit:

            from Ganga.Core.GangaThread.WorkerThreads import getQueues

            threads_before = getQueues().totalNumIntThreads()

            for sc, sj in zip(subjobconfigs, rjobs):

                b = sj.backend

                # Must check for credentials here as we cannot handle missing credentials on Queues by design!
                if hasattr(b, 'credential_requirements') and b.credential_requirements is not None:
                    from Ganga.GPIDev.Credentials.CredentialStore import credential_store
                    try:
                        cred = credential_store[b.credential_requirements]
                    except GangaKeyError:
                        credential_store.create(b.credential_requirements)

                fqid = sj.getFQID('.')
                # FIXME would be nice to move this to the internal threads not user ones
                getQueues()._monitoring_threadpool.add_function(self._parallel_submit, (b, sj, sc, master_input_sandbox, fqid, logger), callback_func = self._successfulSubmit, callback_args = (sj, incomplete_subjobs))

            def subjob_status_check(rjobs):
                has_submitted = True
                for sj in rjobs:
                    if sj.status not in ["submitted","failed","completed","running","completing"] and sj.getFQID('.') not in incomplete_subjobs:
                        has_submitted = False
                        break
                return has_submitted

            while not subjob_status_check(rjobs):
                import time
                time.sleep(1.)

            if incomplete_subjobs:
                raise IncompleteJobSubmissionError(
                    incomplete_subjobs, 'submission failed for subjobs %s' % incomplete_subjobs)
            return 1

        # Alternatively submit sequentially
        for sc, sj in zip(subjobconfigs, rjobs):

            fqid = sj.getFQID('.')
            logger.info("submitting job %s to %s backend", fqid, getName(sj.backend))
            try:
                b = stripProxy(sj.backend)
                sj.updateStatus('submitting')
                if b.submit(sc, master_input_sandbox):
                    sj.updateStatus('submitted')
                    # sj._commit() # PENDING: TEMPORARY DISABLED
                    incomplete = 1
                    stripProxy(sj.info).increment()
                else:
                    if handleError(IncompleteJobSubmissionError(fqid, 'submission failed')):
                        raise IncompleteJobSubmissionError(fqid, 'submission failed')
            except Exception as x:
                sj.updateStatus('new')
                if isType(x, GangaException):
                    logger.error("%s" % x)
                    log_user_exception(logger, debug=True)
                else:
                    log_user_exception(logger, debug=False)
                raise IncompleteJobSubmissionError(fqid, 'submission failed')

        return 1