Esempio n. 1
0
    def removeDirectory(self, path, recursive=False):
        """Not available on Echo

      :returns: S_ERROR
    """
        return S_ERROR("Removing directory does not exist in Echo")
Esempio n. 2
0
    def generateProxyToString(self,
                              lifeTime,
                              diracGroup=False,
                              strength=1024,
                              limited=False,
                              rfc=False,
                              proxyKey=False):
        """
    Generate a proxy and get it as a string

    Args:
        lifeTime (int): expected lifetime in seconds of proxy
        diracGroup (str): diracGroup to add to the certificate
        strength (int): length in bits of the pair
        limited (bool): Create a limited proxy

    """
        if not self.__loadedChain:
            return S_ERROR(DErrno.ENOCHAIN)
        if not self.__loadedPKey:
            return S_ERROR(DErrno.ENOPKEY)

        if self.__isProxy:
            rfc = self.isRFC().get('Value', False)

        issuerCert = self.__certList[0]

        if not proxyKey:
            proxyKey = crypto.PKey()
            proxyKey.generate_key(crypto.TYPE_RSA, strength)

        proxyCert = crypto.X509()

        if rfc:
            proxyCert.set_serial_number(str(int(random.random() * 10**10)))
            cloneSubject = issuerCert.get_subject().clone()
            cloneSubject.insert_entry("CN", str(int(random.random() * 10**10)))
            proxyCert.set_subject(cloneSubject)
            proxyCert.add_extensions(
                self.__getProxyExtensionList(diracGroup, rfc and not limited,
                                             rfc and limited))
        else:
            proxyCert.set_serial_number(issuerCert.get_serial_number())
            cloneSubject = issuerCert.get_subject().clone()
            if limited:
                cloneSubject.insert_entry("CN", "limited proxy")
            else:
                cloneSubject.insert_entry("CN", "proxy")
            proxyCert.set_subject(cloneSubject)
            proxyCert.add_extensions(self.__getProxyExtensionList(diracGroup))

        proxyCert.set_issuer(issuerCert.get_subject())
        proxyCert.set_version(issuerCert.get_version())
        proxyCert.set_pubkey(proxyKey)
        proxyCert.gmtime_adj_notBefore(-900)
        proxyCert.gmtime_adj_notAfter(int(lifeTime))
        proxyCert.sign(self.__keyObj, 'sha256')

        proxyString = "%s%s" % (crypto.dump_certificate(
            crypto.FILETYPE_PEM,
            proxyCert), crypto.dump_privatekey(crypto.FILETYPE_PEM, proxyKey))
        for i in range(len(self.__certList)):
            proxyString += crypto.dump_certificate(crypto.FILETYPE_PEM,
                                                   self.__certList[i])

        return S_OK(proxyString)
Esempio n. 3
0
            sirIDToCheck = []
            while openSIRs:
                sir = openSIRs.pop()
                self.log.verbose("SIR %s is in state %s" % (sir.id, sir.state))
                if sir.state == u'active' and 'instanceId' in dir(sir):
                    self.log.verbose("SIR %s has instance %s" %
                                     (sir.id, sir.instanceId))
                    idList.append(sir.instanceId)
                elif sir.state == u'closed':
                    invalidSIRs.append(sir.id)
                else:
                    sirIDToCheck.append(sir.id)

        if idList:
            return S_OK(idList)
        return S_ERROR("Could not start any spot instance. Failed SIRs : %s" %
                       ", ".join(invalidSIRs))

    """
  Simple call to terminate a VM based on its id
  """

    def stopInstances(self, instancesList):
        if type(instancesList) in (types.StringType, types.UnicodeType):
            instancesList = [instancesList]
        self.__conn.terminate_instances(instancesList)

    """
  Get all instances for this image
  """

    def getAllInstances(self):
Esempio n. 4
0
  def downloadSandbox( self, sbLocation, destinationDir = "", inMemory = False, unpack = True ):
    """
    Download a sandbox file and keep it in bundled form
    """
    if sbLocation.find( "SB:" ) != 0:
      return S_ERROR( "Invalid sandbox URL" )
    sbLocation = sbLocation[ 3: ]
    sbSplit = sbLocation.split( "|" )
    if len( sbSplit ) < 2:
      return S_ERROR( "Invalid sandbox URL" )
    SEName = sbSplit[0]
    SEPFN = "|".join( sbSplit[1:] )
    # If destination dir is not specified use current working dir
    # If its defined ensure the dir structure is there
    if not destinationDir:
      destinationDir = os.getcwd()
    else:
      mkDir(destinationDir)

    try:
      tmpSBDir = tempfile.mkdtemp( prefix = "TMSB." )
    except Exception as e:
      return S_ERROR( "Cannot create temporal file: %s" % str( e ) )

    se = StorageElement( SEName, vo = self.__vo )
    result = returnSingleResult( se.getFile( SEPFN, localPath = tmpSBDir ) )

    if not result[ 'OK' ]:
      return result
    sbFileName = os.path.basename( SEPFN )

    result = S_OK()
    tarFileName = os.path.join( tmpSBDir, sbFileName )

    if inMemory:
      try:
        tfile = open( tarFileName, 'r' )
        data = tfile.read()
        tfile.close()
        os.unlink( tarFileName )
        os.rmdir( tmpSBDir )
      except Exception as e:
        os.unlink( tarFileName )
        os.rmdir( tmpSBDir )
        return S_ERROR( 'Failed to read the sandbox archive: %s' % str( e ) )
      return S_OK( data )

    if not unpack:
      result[ 'Value' ] = tarFileName
      return result

    try:
      sandboxSize = 0
      tf = tarfile.open( name = tarFileName, mode = "r" )
      for tarinfo in tf:
        tf.extract( tarinfo, path = destinationDir )
        sandboxSize += tarinfo.size
      tf.close()
      result[ 'Value' ] = sandboxSize
    except Exception as e:
      result = S_ERROR( "Could not open bundle: %s" % str( e ) )

    try:
      os.unlink( tarFileName )
      os.rmdir( tmpSBDir )
    except Exception as e:
      gLogger.warn( "Could not remove temporary dir %s: %s" % ( tmpSBDir, str( e ) ) )

    return result
Esempio n. 5
0
  def uploadFilesAsSandbox( self, fileList, sizeLimit = 0, assignTo = {} ):
    """ Send files in the fileList to a Sandbox service for the given jobID.
        This is the preferable method to upload sandboxes.

        a fileList item can be:
          - a string, which is an lfn name
          - a file name (real), that is supposed to be on disk, in the current directory
          - a fileObject that should be a StringIO.StringIO type of object

        Parameters:
          - assignTo : Dict containing { 'Job:<jobid>' : '<sbType>', ... }
    """
    errorFiles = []
    files2Upload = []

    for key in assignTo:
      if assignTo[ key ] not in self.__validSandboxTypes:
        return S_ERROR( "Invalid sandbox type %s" % assignTo[ key ] )

    if not isinstance( fileList, ( list, tuple ) ):
      return S_ERROR( "fileList must be a list or tuple!" )

    for sFile in fileList:
      if isinstance( sFile, basestring ):
        if re.search( '^lfn:', sFile, flags = re.IGNORECASE ):
          pass
        else:
          if os.path.exists( sFile ):
            files2Upload.append( sFile )
          else:
            errorFiles.append( sFile )

      elif isinstance( sFile, StringIO.StringIO ):
        files2Upload.append( sFile )
      else:
        return S_ERROR("Objects of type %s can't be part of InputSandbox" % type( sFile ) )

    if errorFiles:
      return S_ERROR( "Failed to locate files: %s" % ", ".join( errorFiles ) )

    try:
      fd, tmpFilePath = tempfile.mkstemp( prefix = "LDSB." )
      os.close( fd )
    except Exception as e:
      return S_ERROR( "Cannot create temporal file: %s" % str( e ) )

    with tarfile.open( name = tmpFilePath, mode = "w|bz2" ) as tf:
      for sFile in files2Upload:
        if isinstance( sFile, basestring ):
          tf.add( os.path.realpath( sFile ), os.path.basename( sFile ), recursive = True )
        elif isinstance( sFile, StringIO.StringIO ):
          tarInfo = tarfile.TarInfo( name = 'jobDescription.xml' )
          tarInfo.size = len( sFile.buf )
          tf.addfile( tarinfo = tarInfo, fileobj = sFile )

    if sizeLimit > 0:
      # Evaluate the compressed size of the sandbox
      if getGlobbedTotalSize( tmpFilePath ) > sizeLimit:
        result = S_ERROR( "Size over the limit" )
        result[ 'SandboxFileName' ] = tmpFilePath
        return result

    oMD5 = hashlib.md5()
    with open( tmpFilePath, "rb" ) as fd:
      bData = fd.read( 10240 )
      while bData:
        oMD5.update( bData )
        bData = fd.read( 10240 )

    transferClient = self.__getTransferClient()
    result = transferClient.sendFile( tmpFilePath, ( "%s.tar.bz2" % oMD5.hexdigest(), assignTo ) )
    result[ 'SandboxFileName' ] = tmpFilePath
    try:
      if result['OK']:
        os.unlink( tmpFilePath )
    except:
      pass
    return result
Esempio n. 6
0
    def _treatOperation(self, operation):
        """ Treat one operation:
          * does the callback if the operation is finished
          * generate new jobs and submits them

          :param operation: the operation to treat
          :param threadId: the id of the tread, it just has to be unique (used for the context cache)
    """
        try:
            threadID = current_process().name
            log = gLogger.getSubLogger("treatOperation/%s" %
                                       operation.operationID,
                                       child=True)

            # If the operation is totally processed
            # we perform the callback
            if operation.isTotallyProcessed():
                log.debug("FTS3Operation %s is totally processed" %
                          operation.operationID)
                res = operation.callback()

                if not res['OK']:
                    log.error("Error performing the callback", res)
                    log.info("Putting back the operation")
                    dbRes = self.fts3db.persistOperation(operation)

                    if not dbRes['OK']:
                        log.error("Could not persist operation", dbRes)

                    return operation, res

            else:
                log.debug("FTS3Operation %s is not totally processed yet" %
                          operation.operationID)

                res = operation.prepareNewJobs(
                    maxFilesPerJob=self.maxFilesPerJob,
                    maxAttemptsPerFile=self.maxAttemptsPerFile)

                if not res['OK']:
                    log.error(
                        "Cannot prepare new Jobs",
                        "FTS3Operation %s : %s" % (operation.operationID, res))
                    return operation, res

                newJobs = res['Value']

                log.debug("FTS3Operation %s: %s new jobs to be submitted" %
                          (operation.operationID, len(newJobs)))

                for ftsJob in newJobs:
                    res = self._serverPolicy.chooseFTS3Server()
                    if not res['OK']:
                        log.error(res)
                        continue

                    ftsServer = res['Value']
                    log.debug("Use %s server" % ftsServer)

                    ftsJob.ftsServer = ftsServer

                    res = self.getFTS3Context(ftsJob.username,
                                              ftsJob.userGroup,
                                              ftsServer,
                                              threadID=threadID)

                    if not res['OK']:
                        log.error("Could not get context", res)
                        continue

                    context = res['Value']
                    res = ftsJob.submit(context=context,
                                        protocols=self.thirdPartyProtocols)

                    if not res['OK']:
                        log.error(
                            "Could not submit FTS3Job",
                            "FTS3Operation %s : %s" %
                            (operation.operationID, res))
                        continue

                    operation.ftsJobs.append(ftsJob)

                    submittedFileIds = res['Value']
                    log.info(
                        "FTS3Operation %s: Submitted job for %s transfers" %
                        (operation.operationID, len(submittedFileIds)))

                # new jobs are put in the DB at the same time
            res = self.fts3db.persistOperation(operation)

            if not res['OK']:
                log.error("Could not persist operation", res)

            return operation, res

        except Exception as e:
            log.exception('Exception in the thread', repr(e))
            return operation, S_ERROR("Exception %s" % repr(e))
Esempio n. 7
0
 def __setSectionByCmd(self, value):
     if value[0] != "/":
         return S_ERROR(
             "%s is not a valid section. It should start with '/'" % value)
     self.currentSectionPath = value
     return S_OK()
Esempio n. 8
0
    def _monitorProxy(self, pilotProxy, payloadProxy):
        """Base class for the monitor and update of the payload proxy, to be used in
      derived classes for the basic renewal of the proxy, if further actions are
      necessary they should be implemented there
    """
        retVal = getProxyInfo(payloadProxy)
        if not retVal['OK']:
            self.log.error('Could not get payload proxy info', retVal)
            return retVal
        self.log.verbose('Payload Proxy information:\n%s' %
                         formatProxyInfoAsString(retVal['Value']))

        payloadProxyDict = retVal['Value']
        payloadSecs = payloadProxyDict['chain'].getRemainingSecs()['Value']
        if payloadSecs > self.minProxyTime:
            self.log.verbose('No need to renew payload Proxy')
            return S_OK()

        # if there is no pilot proxy, assume there is a certificate and try a renewal
        if not pilotProxy:
            self.log.info(
                'Using default credentials to get a new payload Proxy')
            return gProxyManager.renewProxy(
                proxyToBeRenewed=payloadProxy,
                minLifeTime=self.minProxyTime,
                newProxyLifeTime=self.defaultProxyTime,
                proxyToConnect=pilotProxy)

        # if there is pilot proxy
        retVal = getProxyInfo(pilotProxy)
        if not retVal['OK']:
            return retVal
        pilotProxyDict = retVal['Value']

        if not 'groupProperties' in pilotProxyDict:
            self.log.error('Invalid Pilot Proxy',
                           'Group has no properties defined')
            return S_ERROR('Proxy has no group properties defined')

        pilotProps = pilotProxyDict['groupProperties']

        # if running with a pilot proxy, use it to renew the proxy of the payload
        if Properties.PILOT in pilotProps or Properties.GENERIC_PILOT in pilotProps:
            self.log.info('Using Pilot credentials to get a new payload Proxy')
            return gProxyManager.renewProxy(
                proxyToBeRenewed=payloadProxy,
                minLifeTime=self.minProxyTime,
                newProxyLifeTime=self.defaultProxyTime,
                proxyToConnect=pilotProxy)

        # if we are running with other type of proxy check if they are for the same user and group
        # and copy the pilot proxy if necessary

        self.log.info('Trying to copy pilot Proxy to get a new payload Proxy')
        pilotProxySecs = pilotProxyDict['chain'].getRemainingSecs()['Value']
        if pilotProxySecs <= payloadSecs:
            errorStr = 'Pilot Proxy is not longer than payload Proxy'
            self.log.error(errorStr)
            return S_ERROR('Can not renew by copy: %s' % errorStr)

        # check if both proxies belong to the same user and group
        pilotDN = pilotProxyDict['chain'].getIssuerCert(
        )['Value'].getSubjectDN()['Value']
        retVal = pilotProxyDict['chain'].getDIRACGroup()
        if not retVal['OK']:
            return retVal
        pilotGroup = retVal['Value']

        payloadDN = payloadProxyDict['chain'].getIssuerCert(
        )['Value'].getSubjectDN()['Value']
        retVal = payloadProxyDict['chain'].getDIRACGroup()
        if not retVal['OK']:
            return retVal
        payloadGroup = retVal['Value']
        if pilotDN != payloadDN or pilotGroup != payloadGroup:
            errorStr = 'Pilot Proxy and payload Proxy do not have same DN and Group'
            self.log.error(errorStr)
            return S_ERROR('Can not renew by copy: %s' % errorStr)

        if pilotProxyDict.get('hasVOMS', False):
            return pilotProxyDict['chain'].dumpAllToFile(payloadProxy)

        attribute = CS.getVOMSAttributeForGroup(payloadGroup)
        vo = CS.getVOMSVOForGroup(payloadGroup)

        retVal = VOMS().setVOMSAttributes(pilotProxyDict['chain'],
                                          attribute=attribute,
                                          vo=vo)
        if not retVal['OK']:
            return retVal

        chain = retVal['Value']
        return chain.dumpAllToFile(payloadProxy)
Esempio n. 9
0
  def __match(self, validCache, elementNames, elementType, statusTypes):
    """
    Obtains all keys on the cache ( should not be empty ! ).

    Gets the sets ( no duplicates ) of elementNames and statusTypes. There is a
    slight distinction. A priori we cannot know which are all the elementNames.
    So, if elementNames is None, we will consider all elementNames in the cacheKeys.
    However, if statusTypes is None, we will get the standard list from the
    ResourceStatus configuration in the CS.

    If the cartesian product of our sets is on the cacheKeys set, we have a
    positive match.

    :Parameters:
      **validCache** - `dict`
        cache dictionary
      **elementNames** - [ None, `string`, `list` ]
        name(s) of the elements to be matched
      **elementType** - [ `string` ]
        type of the elements to be matched
      **statusTypes** - [ None, `string`, `list` ]
        name(s) of the statusTypes to be matched

    :return: S_OK() || S_ERROR()
    """

    cacheKeys = validCache.keys()

    if isinstance(elementNames, basestring):
      elementNames = [elementNames]
    elif elementNames is None:
      if isinstance(cacheKeys[0], (tuple, list)):
        elementNames = [cacheKey[0] for cacheKey in cacheKeys]
      else:
        elementNames = cacheKeys
    # Remove duplicates, makes Cartesian product faster
    elementNamesSet = set(elementNames)

    if isinstance(elementType, basestring):
      if not elementType or elementType == 'Site':
        elementType = []
      else:
        elementType = [elementType]
    elif elementType is None:
      elementType = [cacheKey[1] for cacheKey in cacheKeys]
    # Remove duplicates, makes Cartesian product faster
    elementTypeSet = set(elementType)

    if isinstance(statusTypes, basestring):
      if not statusTypes:
        statusTypes = []
      else:
        statusTypes = [statusTypes]
    elif statusTypes is None:
      statusTypes = self.allStatusTypes
    # Remove duplicates, makes Cartesian product faster
    statusTypesSet = set(statusTypes)

    if not elementTypeSet and not statusTypesSet:
      cartesianProduct = elementNamesSet
    else:
      cartesianProduct = set(itertools.product(elementNamesSet, elementTypeSet, statusTypesSet))

    # Some users find funny sending empty lists, which will make the cartesianProduct
    # be []. Problem: [] is always subset, no matter what !

    if not cartesianProduct:
      self.log.warn('Empty cartesian product')
      return S_ERROR('Empty cartesian product')

    notInCache = list(cartesianProduct.difference(set(cacheKeys)))
    if notInCache:
      self.log.warn('Cache misses: %s' % notInCache)
      return S_ERROR('Cache misses: %s' % notInCache)

    return S_OK(cartesianProduct)
Esempio n. 10
0
    def transferAndRegisterFile(self,
                                fileName,
                                localPath,
                                lfn,
                                destinationSEList,
                                fileMetaDict,
                                fileCatalog=None,
                                masterCatalogOnly=False):
        """Performs the transfer and register operation with failover.
    """
        errorList = []
        fileGUID = fileMetaDict.get("GUID", None)
        fileChecksum = fileMetaDict.get("Checksum", None)

        for se in destinationSEList:
            self.log.info(
                "Attempting dm.putAndRegister",
                "('%s','%s','%s',guid='%s',catalog='%s', checksum = '%s')" %
                (lfn, localPath, se, fileGUID, fileCatalog, fileChecksum))

            result = DataManager(
                catalogs=fileCatalog,
                masterCatalogOnly=masterCatalogOnly).putAndRegister(
                    lfn, localPath, se, guid=fileGUID, checksum=fileChecksum)
            self.log.verbose(result)
            if not result['OK']:
                self.log.error('dm.putAndRegister failed with message',
                               result['Message'])
                errorList.append(result['Message'])
                continue

            if not result['Value']['Failed']:
                self.log.info(
                    'dm.putAndRegister successfully uploaded and registered',
                    '%s to %s' % (fileName, se))
                return S_OK({'uploadedSE': se, 'lfn': lfn})

            # Now we know something went wrong
            self.log.warn(
                "Didn't manage to do everything, now adding requests for the missing operation"
            )

            errorDict = result['Value']['Failed'][lfn]
            if 'register' not in errorDict:
                self.log.error('dm.putAndRegister failed with unknown error',
                               str(errorDict))
                errorList.append(
                    'Unknown error while attempting upload to %s' % se)
                continue

            # fileDict = errorDict['register']
            # Therefore the registration failed but the upload was successful
            if not fileCatalog:
                fileCatalog = ''

            if masterCatalogOnly:
                fileCatalog = FileCatalog().getMasterCatalogNames()['Value']

            result = self._setRegistrationRequest(lfn, se, fileMetaDict,
                                                  fileCatalog)
            if not result['OK']:
                self.log.error('Failed to set registration request',
                               'SE %s and metadata: \n%s' % (se, fileMetaDict))
                errorList.append(
                    'Failed to set registration request for: SE %s and metadata: \n%s'
                    % (se, fileMetaDict))
                continue
            else:
                self.log.info(
                    'Successfully set registration request',
                    'for: SE %s and metadata: \n%s' % (se, fileMetaDict))
                metadata = {}
                metadata['filedict'] = fileMetaDict
                metadata['uploadedSE'] = se
                metadata['lfn'] = lfn
                metadata['registration'] = 'request'
                return S_OK(metadata)

        self.log.error('Failed to upload output data file',
                       'Encountered %s errors' % len(errorList))
        return S_ERROR('Failed to upload output data file')
Esempio n. 11
0
def install(app, app_tar, tarballURL, overwrite, md5sum, area):
    """ Install the software
  """
    appName = app[0]
    appVersion = app[1]
    folder_name = app_tar.replace(".tgz", "").replace(".tar.gz", "")
    #jar file does not contain .tgz nor tar.gz so the file name is untouched and folder_name = app_tar
    if appName == "slic":
        folder_name = "%s%s" % (appName, appVersion)

    appli_exists = False
    app_tar_base = os.path.basename(app_tar)

    ###########################################
    ###Go where the software is to be installed
    os.chdir(area)
    #We go back to the initial place at any return
    ###########################################
    ##Handle the locking
    lockname = folder_name + ".lock"
    #Make sure the lock is not too old, or wait until it's gone
    res = checkLockAge(lockname)
    if not res['OK']:
        gLogger.error(
            "Something uncool happened with the lock, will kill installation")
        gLogger.error("Message: %s" % res['Message'])
        return S_ERROR("Failed lock checks")

    if 'Value' in res and res[
            'Value']:  #this means the lock file was very old, meaning that the installation failed elsewhere
        overwrite = True

    #Check if the application is here and not to be overwritten
    if os.path.exists(folder_name):
        appli_exists = True
        if not overwrite:
            gLogger.info("Folder or file %s found in %s, skipping install !" %
                         (folder_name, area))
            return S_OK([folder_name, app_tar_base])

    ## If we are here, it means the application was never installed OR its overwrite flag is true

    #Now lock the area
    res = createLock(lockname)  ##This will fail if not allowed to write here
    if not res['OK']:
        gLogger.error(res['Message'])
        return res

    ## Cleanup old version in case it has to be overwritten (implies it's already here)
    ## In particular the jar file of LCSIM
    if appli_exists and overwrite:
        gLogger.info("Overwriting %s found in %s" % (folder_name, area))
        res = deleteOld(folder_name)
        if not res['OK']:  #should be always OK for the time being
            clearLock(lockname)
            return res
        ## Now application must have been removed

    ## If here, the application DOES NOT exist locally: either it was here and the overwrite flag was false and
    ## we returned earlier, either it was here and the overwrite flag was true and it was removed, or finally it
    ## was never here so here appli_exists=False always

    ## Now we can get the files and unpack them

    ## Downloading file from url
    res = downloadFile(tarballURL, app_tar, folder_name)
    if not res['OK']:
        clearLock(lockname)
        return res

    ## Check that the tar ball is there. Should never happen as download file catches the errors
    if not os.path.exists("%s/%s" % (os.getcwd(), app_tar_base)):
        gLogger.error('Failed to download software', '%s' % (folder_name))
        clearLock(lockname)
        return S_ERROR('Failed to download software')

    # FIXME: this is really bad style, suggestion: create 2 private methods that (download a file and check its md5)
    # and (delete the old files and cleanup), then call the download_and_check method once and create a loop
    # with a MAX_TRIES variable that cleans up the old files and tries to download again until MAX_TRIES are used up.
    # The loop is only entered if download/md5check fail and if anything goes wrong in the loop, `continue` is called
    # Could also think about creating a clearLockAndExit method that takes a string (lockname) and return value res
    # and just calls clearLock(lockname) return res. Then replace these double calls in this method with return clearLockAndExit

    ## Check that the downloaded file (or existing one) has the right checksum
    res = tarMd5Check(app_tar_base, md5sum)
    if not res['OK']:
        gLogger.error("Will try getting the file again, who knows")
        try:  #Remove tar ball that we just got
            os.unlink("%s/%s" % (os.getcwd(), app_tar_base))
        except OSError:
            gLogger.error(
                "Failed to clean tar ball, something bad is happening")
        ## Clean up existing stuff (if any, in particular the jar file)
        res = deleteOld(folder_name)
        if not res['OK']:  #should be always OK for the time being
            clearLock(lockname)
            return res
        res = downloadFile(tarballURL, app_tar, folder_name)
        if not res['OK']:
            clearLock(lockname)
            return res
        res = tarMd5Check(app_tar_base, md5sum)
        if not res['OK']:
            gLogger.error(
                "Hash failed again, something is really wrong, cannot continue."
            )
            clearLock(lockname)
            return S_ERROR("MD5 check failed")

    if tarfile.is_tarfile(app_tar_base):  ##needed because LCSIM is jar file
        app_tar_to_untar = tarfile.open(app_tar_base)
        try:
            app_tar_to_untar.extractall()
        except TarError as e:
            gLogger.error(
                "Could not extract tar ball %s because of %s, cannot continue !"
                % (app_tar_base, str(e)))
            clearLock(lockname)
            return S_ERROR(
                "Could not extract tar ball %s because of %s, cannot continue !"
                % (app_tar_base, str(e)))
        if folder_name.count("slic"):
            slicname = folder_name
            members = app_tar_to_untar.getmembers()
            fileexample = members[0].name
            basefolder = fileexample.split("/")[0]
            try:
                os.rename(basefolder, slicname)
            except OSError as e:
                gLogger.error("Failed renaming slic:", str(e))
                clearLock(lockname)
                return S_ERROR("Could not rename slic directory")
    try:
        dircontent = os.listdir(folder_name)
        if not len(dircontent):
            clearLock(lockname)
            return S_ERROR(
                "Folder %s is empty, considering install as failed" %
                folder_name)
    except OSError:
        pass

    #Everything went fine, we try to clear the lock
    clearLock(lockname)

    return S_OK([folder_name, app_tar_base])
Esempio n. 12
0
    def listDirectory(self, path):
        """Not available on Echo

      :returns: S_ERROR
    """
        return S_ERROR("Listing directory does not exist in Echo")
Esempio n. 13
0
    def getDirectoryMetadata(self, path):
        """Not available on Echo

      :returns: S_ERROR
    """
        return S_ERROR("Getting directory metadata does not exist in Echo")
Esempio n. 14
0
    def getDirectorySize(self, path):
        """Not available on Echo

      :returns: S_ERROR
    """
        return S_ERROR("Getting directory size does not exist in Echo")
Esempio n. 15
0
    def submitJob(self, executableFile, proxy=None, numberOfJobs=1):
        """ Method to submit job
    """
        self.createClient()
        # Check if the client is ready
        if not self.BOINCClient:
            return S_ERROR('Soap client is not ready')

        self.log.verbose("Executable file path: %s" % executableFile)

        # if no proxy is supplied, the executable can be submitted directly
        # otherwise a wrapper script is needed to get the proxy to the execution node
        # The wrapper script makes debugging more complicated and thus it is
        # recommended to transfer a proxy inside the executable if possible.
        wrapperContent = ''
        if proxy:
            self.log.verbose('Setting up proxy for payload')

            compressedAndEncodedProxy = base64.encodestring(
                bz2.compress(proxy.dumpAllToString()['Value'])).replace(
                    '\n', '')
            compressedAndEncodedExecutable = base64.encodestring(
                bz2.compress(open(executableFile, "rb").read(),
                             9)).replace('\n', '')

            wrapperContent = """#!/bin/bash
/usr/bin/env python << EOF
# Wrapper script for executable and proxy
import os
import tempfile
import sys
import base64
import bz2
import shutil
import stat
try:
  workingDirectory = tempfile.mkdtemp( suffix = '_wrapper', prefix= 'TORQUE_' )
  os.chdir( workingDirectory )
  open( 'proxy', "w" ).write(bz2.decompress( base64.decodestring( "%(compressedAndEncodedProxy)s" ) ) )
  open( '%(executable)s', "w" ).write(bz2.decompress( base64.decodestring( "%(compressedAndEncodedExecutable)s" ) ) )
  os.chmod('proxy',stat.S_IRUSR | stat.S_IWUSR)
  os.chmod('%(executable)s',stat.S_IRWXU)
  os.environ["X509_USER_PROXY"]=os.path.join(workingDirectory, 'proxy')
except Exception as x:
  print >> sys.stderr, x
  sys.exit(-1)
cmd = "./%(executable)s"
print 'Executing: ', cmd
sys.stdout.flush()
os.system( cmd )

shutil.rmtree( workingDirectory )

EOF
""" % { 'compressedAndEncodedProxy': compressedAndEncodedProxy, \
              'compressedAndEncodedExecutable': compressedAndEncodedExecutable, \
              'executable': os.path.basename( executableFile ) }

            fd, name = tempfile.mkstemp(suffix='_pilotwrapper.py',
                                        prefix='DIRAC_',
                                        dir=os.getcwd())
            os.close(fd)
            submitFile = name

        else:  # no proxy
            submitFile = executableFile
            wrapperContent = self._fromFileToStr(submitFile)

        if not wrapperContent:
            self.log.error('Executable file is empty.')
            return S_ERROR('Executable file is empty.')

        #Some special symbol can not be transported by xml,
        #such as less, greater, amp. So, base64 is used here.
        wrapperContent = base64.encodestring(wrapperContent).replace("\n", '')

        prefix = os.path.splitext(os.path.basename(submitFile))[0].replace(
            '_pilotwrapper', '').replace('DIRAC_', '')
        batchIDList = []
        stampDict = {}
        for i in range(0, numberOfJobs):
            jobID = "%s_%d@%s" % (prefix, i, self.suffix)
            try:
                #  print jobID + "\n" + wrapperContent
                #  print self.BOINCClient
                result = self.BOINCClient.service.submitJob(
                    jobID, wrapperContent, self.ceParameters['Platform'][0],
                    self.ceParameters['MarketPlaceID'])
            except:
                self.log.error('Could not submit the pilot to the BOINC CE',
                               'Pilot %s, BOINC CE %s' % (jobID, self.wsdl))
                break

            if not result['ok']:
                self.log.warn(
                    'Didn\'t submit the pilot %s to the BOINC CE %s, the value returned is false!'
                    % (jobID, self.wsdl))
                break

            self.log.verbose('Submit the pilot %s to the BOINC CE %s' %
                             (jobID, self.wsdl))
            diracStamp = "%s_%d" % (prefix, i)
            batchIDList.append(jobID)
            stampDict[jobID] = diracStamp

        if batchIDList:
            resultRe = S_OK(batchIDList)
            resultRe['PilotStampDict'] = stampDict
        else:
            resultRe = S_ERROR('Submit no pilot to BOINC CE %s' % self.wsdl)
        return resultRe
Esempio n. 16
0
 def getAttribute(self, name):
     try:
         self.__checkType(name, basestring)
     except TypeError as excp:
         return S_ERROR(str(excp))
     return self.jobDB.getJobAttribute(self.__jid, name)
Esempio n. 17
0
  def wrapped_fcn( *args, **kwargs ):

    userName = kwargs.pop( 'proxyUserName', '' )
    userDN = kwargs.pop( 'proxyUserDN', '' )
    userGroup = kwargs.pop( 'proxyUserGroup', '' )
    vomsFlag = kwargs.pop( 'proxyWithVOMS', True )
    proxyFilePath = kwargs.pop( 'proxyFilePath', False )

    if ( userName or userDN ) and userGroup:

      # Setup user proxy
      originalUserProxy = os.environ.get( 'X509_USER_PROXY' )
      if not userDN:
        result = getDNForUsername( userName )
        if not result[ 'OK' ]:
          return result
        userDN = result[ 'Value' ][0]
      vomsAttr = ''
      if vomsFlag:
        vomsAttr = getVOMSAttributeForGroup( userGroup )

      if vomsAttr:
        result = gProxyManager.downloadVOMSProxyToFile( userDN, userGroup,
                                                        requiredVOMSAttribute = vomsAttr,
                                                        filePath = proxyFilePath,
                                                        requiredTimeLeft = 3600,
                                                        cacheTime = 3600 )
      else:
        result = gProxyManager.downloadProxyToFile( userDN, userGroup,
                                                    filePath = proxyFilePath,
                                                    requiredTimeLeft = 3600,
                                                    cacheTime = 3600 )

      if not result['OK']:
        gLogger.warn( "Can't download proxy to file", result['Message'] )
        return result

      proxyFile = result['Value']
      os.environ['X509_USER_PROXY'] = proxyFile

      # Check if the caller is executing with the host certificate
      useServerCertificate = gConfig.useServerCertificate()
      if useServerCertificate:
        gConfigurationData.setOptionInCFG( '/DIRAC/Security/UseServerCertificate', 'false' )

      try:
        return fcn( *args, **kwargs )
      except Exception as lException:
        value = ','.join( [str( arg ) for arg in lException.args] )
        exceptType = lException.__class__.__name__
        return S_ERROR( "Exception - %s: %s" % ( exceptType, value ) )
      finally:
        # Restore the default host certificate usage if necessary
        if useServerCertificate:
          gConfigurationData.setOptionInCFG( '/DIRAC/Security/UseServerCertificate', 'true' )
        if originalUserProxy:
          os.environ['X509_USER_PROXY'] = originalUserProxy
        else:
          os.environ.pop( 'X509_USER_PROXY' )

    else:
      # No proxy substitution requested
      return fcn( *args, **kwargs )
Esempio n. 18
0
 def getOptParameter(self, name):
     try:
         self.__checkType(name, basestring)
     except TypeError as excp:
         return S_ERROR(str(excp))
     return self.jobDB.getJobOptParameter(self.__jid, name)
Esempio n. 19
0
                if optionName == definedOptionTuple[0].replace( ":", "" ) or \
                  optionName == definedOptionTuple[1].replace( "=", "" ):
                    if definedOptionTuple[3]:
                        retVal = definedOptionTuple[3](optionValue)
                        if type(retVal) != types.DictType:
                            errorsList.append(
                                "Callback for switch '%s' does not return S_OK or S_ERROR"
                                % optionName)
                        elif not retVal['OK']:
                            errorsList.append(retVal['Message'])
                    else:
                        self.unprocessedSwitches.append(
                            (optionName, optionValue))

        if len(errorsList) > 0:
            return S_ERROR("\n%s" % "\n".join(errorsList))
        return S_OK()

    def disableCS(self):
        """
    Do not contact Configuration Server upon initialization
    """
        gRefresher.disable()

    def enableCS(self):
        """
    Force the connection the Configuration Server
    """
        return gRefresher.enable()

    def isCSEnabled(self):
Esempio n. 20
0
 def getOptParameters(self, nameList=None):
     try:
         self.__checkType(nameList, (list, tuple), canBeNone=True)
     except TypeError as excp:
         return S_ERROR(str(excp))
     return self.jobDB.getJobOptParameters(self.__jid, nameList)
Esempio n. 21
0
  def __resolveInputData( self ):
    """This method controls the execution of the DIRAC input data modules according
       to the VO policy defined in the configuration service.
    """
    if self.arguments['Configuration'].has_key( 'SiteName' ):
      site = self.arguments['Configuration']['SiteName']
    else:
      site = DIRAC.siteName()

    policy = []
    if not self.arguments.has_key( 'Job' ):
      self.arguments['Job'] = {}

    if self.arguments['Job'].has_key( 'InputDataPolicy' ):
      policy = self.arguments['Job']['InputDataPolicy']
      #In principle this can be a list of modules with the first taking precedence
      if type( policy ) in types.StringTypes:
        policy = [policy]
      self.log.info( 'Job has a specific policy setting: %s' % ( ', '.join( policy ) ) )
    else:
      self.log.verbose( 'Attempting to resolve input data policy for site %s' % site )
      inputDataPolicy = gConfig.getOptionsDict( '/Operations/InputDataPolicy' )
      if not inputDataPolicy['OK']:
        return S_ERROR( 'Could not resolve InputDataPolicy from /Operations/InputDataPolicy' )

      options = inputDataPolicy['Value']
      if options.has_key( site ):
        policy = options[site]
        policy = [x.strip() for x in policy.split( ',' )]
        self.log.info( 'Found specific input data policy for site %s:\n%s' % ( site, '\n'.join( policy ) ) )
      elif options.has_key( 'Default' ):
        policy = options['Default']
        policy = [x.strip() for x in policy.split( ',' )]
        self.log.info( 'Applying default input data policy for site %s:\n%s' % ( site, '\n'.join( policy ) ) )

    dataToResolve = None #if none, all supplied input data is resolved
    allDataResolved = False
    successful = {}
    failedReplicas = []
    for modulePath in policy:
      if not allDataResolved:
        result = self.__runModule( modulePath, dataToResolve )
        if not result['OK']:
          self.log.warn( 'Problem during %s execution' % modulePath )
          return result

        if result.has_key( 'Failed' ):
          failedReplicas = result['Failed']

        if failedReplicas:
          self.log.info( '%s failed for the following files:\n%s'
                         % ( modulePath, '\n'.join( failedReplicas ) ) )
          dataToResolve = failedReplicas
        else:
          self.log.info( 'All replicas resolved after %s execution' % ( modulePath ) )
          allDataResolved = True

        successful.update( result['Successful'] )
        self.log.verbose( successful )

    result = S_OK()
    result['Successful'] = successful
    result['Failed'] = failedReplicas
    return result
Esempio n. 22
0
    def commitCache(self, initialState, cache, jobLog):
        try:
            self.__checkType(initialState, dict)
            self.__checkType(cache, dict)
            self.__checkType(jobLog, (list, tuple))
        except TypeError as excp:
            return S_ERROR(str(excp))
        result = self.getAttributes(initialState.keys())
        if not result['OK']:
            return result
        if not result['Value'] == initialState:
            return S_OK(False)
        gLogger.verbose("Job %s: About to execute trace. Current state %s" %
                        (self.__jid, initialState))

        data = {'att': [], 'jobp': [], 'optp': []}
        for key in cache:
            for dk in data:
                if key.find("%s." % dk) == 0:
                    data[dk].append((key[len(dk) + 1:], cache[key]))

        if data['att']:
            attN = [t[0] for t in data['att']]
            attV = [t[1] for t in data['att']]
            result = self.__retryFunction(5, self.jobDB.setJobAttributes,
                                          (self.__jid, attN, attV),
                                          {'update': True})
            if not result['OK']:
                return result

        if data['jobp']:
            result = self.__retryFunction(5, self.jobDB.setJobParameters,
                                          (self.__jid, data['jobp']))
            if not result['OK']:
                return result

        for k, v in data['optp']:
            result = self.__retryFunction(5, self.jobDB.setJobOptParameter,
                                          (self.__jid, k, v))
            if not result['OK']:
                return result

        if 'inputData' in cache:
            result = self.__retryFunction(5, self.jobDB.setInputData,
                                          (self.__jid, cache['inputData']))
            if not result['OK']:
                return result

        gLogger.verbose("Adding logging records for %s" % self.__jid)
        for record, updateTime, source in jobLog:
            gLogger.verbose("Logging records for %s: %s %s %s" %
                            (self.__jid, record, updateTime, source))
            record['date'] = updateTime
            record['source'] = source
            result = self.__retryFunction(5, self.logDB.addLoggingRecord,
                                          (self.__jid, ), record)
            if not result['OK']:
                return result

        gLogger.info("Job %s: Ended trace execution" % self.__jid)
        # We return a new initial state
        return self.getAttributes(initialState.keys())
Esempio n. 23
0
 def uploadFilesAsSandboxForPilot( self, fileList, jobId, sbType, sizeLimit = 0 ):
   if sbType not in self.__validSandboxTypes:
     return S_ERROR( "Invalid Sandbox type %s" % sbType )
   return self.uploadFilesAsSandbox( fileList, sizeLimit, assignTo = { "Pilot:%s" % jobId: sbType } )
Esempio n. 24
0
 def getSEName(self, seID):
     if seID in self.db.seids.keys():
         return S_OK(self.db.seids[seID])
     return S_ERROR('SE id %d not found' % seID)
Esempio n. 25
0
    def getTimeLeft(self, cpuConsumed=0.0, processors=1):
        """ Returns the CPU Time Left for supported batch systems.
        The CPUConsumed is the current raw total CPU.
    """
        # Quit if no scale factor available
        if not self.scaleFactor:
            return S_ERROR(
                '/LocalSite/CPUScalingFactor not defined for site %s' %
                DIRAC.siteName())

        if not self.batchPlugin:
            return S_ERROR(self.batchError)

        resourceDict = self.batchPlugin.getResourceUsage()
        if not resourceDict['OK']:
            self.log.warn(
                'Could not determine timeleft for batch system at site %s' %
                DIRAC.siteName())
            return resourceDict

        resources = resourceDict['Value']
        self.log.debug("self.batchPlugin.getResourceUsage(): %s" %
                       str(resources))
        if not resources.get('CPULimit') and not resources.get(
                'WallClockLimit'):
            # This should never happen
            return S_ERROR('No CPU or WallClock limit obtained')

        # if one of CPULimit or WallClockLimit is missing, compute a reasonable value
        if not resources.get('CPULimit'):
            resources['CPULimit'] = resources['WallClockLimit'] * processors
        elif not resources.get('WallClockLimit'):
            resources['WallClockLimit'] = resources['CPULimit'] / processors

        # if one of CPU or WallClock is missing, compute a reasonable value
        if not resources.get('CPU'):
            resources['CPU'] = resources['WallClock'] * processors
        elif not resources.get('WallClock'):
            resources['WallClock'] = resources['CPU'] / processors

        timeLeft = 0.
        cpu = float(resources['CPU'])
        cpuLimit = float(resources['CPULimit'])
        wallClock = float(resources['WallClock'])
        wallClockLimit = float(resources['WallClockLimit'])
        batchSystemTimeUnit = resources.get('Unit', 'Both')

        # Some batch systems rely on wall clock time and/or cpu time to make allocations
        if batchSystemTimeUnit == 'WallClock':
            time = wallClock
            timeLimit = wallClockLimit
        else:
            time = cpu
            timeLimit = cpuLimit

        if time and cpuConsumed > 3600. and self.normFactor:
            # If there has been more than 1 hour of consumed CPU and
            # there is a Normalization set for the current CPU
            # use that value to renormalize the values returned by the batch system
            # NOTE: cpuConsumed is non-zero for call by the JobAgent and 0 for call by the watchdog
            # cpuLimit and cpu may be in the units of the batch system, not real seconds...
            # (in this case the other case won't work)
            # therefore renormalise it using cpuConsumed (which is in real seconds)
            cpuWorkLeft = (timeLimit -
                           time) * self.normFactor * cpuConsumed / time
        elif self.normFactor:
            # FIXME: this is always used by the watchdog... Also used by the JobAgent
            #        if consumed less than 1 hour of CPU
            # It was using self.scaleFactor but this is inconsistent: use the same as above
            # In case the returned cpu and cpuLimit are not in real seconds, this is however rubbish
            cpuWorkLeft = (timeLimit - time) * self.normFactor
        else:
            # Last resort recovery...
            cpuWorkLeft = (timeLimit - time) * self.scaleFactor

        self.log.verbose('Remaining CPU in normalized units is: %.02f' %
                         timeLeft)
        return S_OK(cpuWorkLeft)
Esempio n. 26
0
    def _prepareCommand(self):
        """
        DowntimeCommand requires four arguments:
        - name : <str>
        - element : Site / Resource
        - elementType: <str>

        If the elements are Site(s), we need to get their GOCDB names. They may
        not have, so we ignore them if they do not have.
        """

        if "name" not in self.args:
            return S_ERROR('"name" not found in self.args')
        elementName = self.args["name"]

        if "element" not in self.args:
            return S_ERROR('"element" not found in self.args')
        element = self.args["element"]

        if "elementType" not in self.args:
            return S_ERROR('"elementType" not found in self.args')
        elementType = self.args["elementType"]

        if element not in ["Site", "Resource"]:
            return S_ERROR("element is neither Site nor Resource")

        hours = None
        if "hours" in self.args:
            hours = self.args["hours"]

        gOCDBServiceType = None

        # Transform DIRAC site names into GOCDB topics
        if element == "Site":

            gocSite = getGOCSiteName(elementName)
            if not gocSite[
                    "OK"]:  # The site is most probably is not a grid site - not an issue, of course
                pass  # so, elementName remains unchanged
            else:
                elementName = gocSite["Value"]

        # The DIRAC se names mean nothing on the grid, but their hosts do mean.
        elif elementType == "StorageElement":
            # for SRM and SRM only, we need to distinguish if it's tape or disk
            # if it's not SRM, then gOCDBServiceType will be None (and we'll use them all)
            try:
                se = StorageElement(elementName)
                seOptions = se.options
                seProtocols = set(se.localAccessProtocolList) | set(
                    se.localWriteProtocolList)
            except AttributeError:  # Sometimes the SE can't be instantiated properly
                self.log.error("Failure instantiating StorageElement object",
                               elementName)
                return S_ERROR("Failure instantiating StorageElement")
            if "SEType" in seOptions and "srm" in seProtocols:
                # Type should follow the convention TXDY
                seType = seOptions["SEType"]
                diskSE = re.search("D[1-9]", seType) is not None
                tapeSE = re.search("T[1-9]", seType) is not None
                if tapeSE:
                    gOCDBServiceType = "srm.nearline"
                elif diskSE:
                    gOCDBServiceType = "srm"

            res = getSEHosts(elementName)
            if not res["OK"]:
                return res
            seHosts = res["Value"]

            if not seHosts:
                return S_ERROR("No seHost(s) for %s" % elementName)
            elementName = seHosts  # in this case it will return a list, because there might be more than one host only

        elif elementType in ["FTS", "FTS3"]:
            gOCDBServiceType = "FTS"
            # WARNING: this method presupposes that the server is an FTS3 type
            gocSite = getGOCFTSName(elementName)
            if not gocSite["OK"]:
                self.log.warn("FTS not in Resources/FTSEndpoints/FTS3 ?",
                              elementName)
            else:
                elementName = gocSite["Value"]

        elif elementType == "ComputingElement":
            res = getCESiteMapping(elementName)
            if not res["OK"]:
                return res
            siteName = res["Value"][elementName]
            ceType = gConfig.getValue(
                cfgPath("Resources", "Sites",
                        siteName.split(".")[0], siteName, "CEs", elementName,
                        "CEType"))
            if ceType == "HTCondorCE":
                gOCDBServiceType = "org.opensciencegrid.htcondorce"
            elif ceType == "ARC":
                gOCDBServiceType = "ARC-CE"

        return S_OK((element, elementName, hours, gOCDBServiceType))
Esempio n. 27
0
 def isRFC(self):
     if not self.__loadedChain:
         return S_ERROR(DErrno.ENOCHAIN)
     return S_OK(self.__isRFC)
Esempio n. 28
0
    def doNew(self, masterParams=None):
        """
        Gets the parameters to run, either from the master method or from its
        own arguments.

        For every elementName, unless it is given a list, in which case it contacts
        the gocdb client. The server is not very stable, so in case of failure tries
        a second time.

        If there are downtimes, are recorded and then returned.
        """

        if masterParams is not None:
            element, elementNames = masterParams
            hours = 120
            elementName = None
            gOCDBServiceType = None

        else:
            params = self._prepareCommand()
            if not params["OK"]:
                return params
            element, elementName, hours, gOCDBServiceType = params["Value"]
            if not isinstance(elementName, list):
                elementNames = [elementName]
            else:
                elementNames = elementName

        # WARNING: checking all the DT that are ongoing or starting in given <hours> from now
        try:
            results = self.gClient.getStatus(element,
                                             name=elementNames,
                                             startingInHours=hours)
        except URLError:
            try:
                # Let's give it a second chance..
                results = self.gClient.getStatus(element,
                                                 name=elementNames,
                                                 startingInHours=hours)
            except URLError as e:
                return S_ERROR(e)

        if not results["OK"]:
            return results
        results = results["Value"]

        if results is None:  # no downtimes found
            return S_OK(None)

        # cleaning the Cache
        if elementNames:
            cleanRes = self._cleanCommand(element, elementNames)
            if not cleanRes["OK"]:
                return cleanRes

        uniformResult = []

        # Humanize the results into a dictionary, not the most optimal, but readable
        for downtime, downDic in results.items():  # can be an iterator

            dt = {}

            dt["Name"] = downDic.get(
                "URL", downDic.get("HOSTNAME", downDic.get("SITENAME")))
            if not dt["Name"]:
                return S_ERROR(
                    "URL, SITENAME and HOSTNAME are missing from downtime dictionary"
                )

            dt["gOCDBServiceType"] = downDic.get("SERVICE_TYPE")

            if dt["gOCDBServiceType"] and gOCDBServiceType:
                if gOCDBServiceType.lower() != downDic["SERVICE_TYPE"].lower():
                    self.log.warn(
                        "SERVICE_TYPE mismatch",
                        "between GOCDB (%s) and CS (%s) for %s" %
                        (downDic["SERVICE_TYPE"], gOCDBServiceType,
                         dt["Name"]),
                    )

            dt["DowntimeID"] = downtime
            dt["Element"] = element
            dt["StartDate"] = downDic["FORMATED_START_DATE"]
            dt["EndDate"] = downDic["FORMATED_END_DATE"]
            dt["Severity"] = downDic["SEVERITY"]
            dt["Description"] = downDic["DESCRIPTION"].replace("'", "")
            dt["Link"] = downDic["GOCDB_PORTAL_URL"]

            uniformResult.append(dt)

        storeRes = self._storeCommand(uniformResult)
        if not storeRes["OK"]:
            return storeRes

        return S_OK()
Esempio n. 29
0
    def _refreshSEs(self, connection=False):
        """Refresh the SE cache"""

        return S_ERROR("To be implemented on derived class")
Esempio n. 30
0
    def getDirectory(self, path, localPath=False):
        """Not available on Echo

      :returns: S_ERROR
    """
        return S_ERROR("Getting directory does not exist in Echo")