def __loadConfigurationData(self):
        try:
            os.makedirs(os.path.join(DIRAC.rootPath, "etc", "csbackup"))
        except:
            pass
        gConfigurationData.loadConfigurationData()
        if gConfigurationData.isMaster():
            bBuiltNewConfiguration = False
            if not gConfigurationData.getName():
                DIRAC.abort(
                    10, "Missing name for the configuration to be exported!")
            gConfigurationData.exportName()
            sVersion = gConfigurationData.getVersion()
            if sVersion == "0":
                gLogger.info("There's no version. Generating a new one")
                gConfigurationData.generateNewVersion()
                bBuiltNewConfiguration = True

            if self.sURL not in gConfigurationData.getServers():
                gConfigurationData.setServers(self.sURL)
                bBuiltNewConfiguration = True

            gConfigurationData.setMasterServer(self.sURL)

            if bBuiltNewConfiguration:
                gConfigurationData.writeRemoteConfigurationToDisk()
Exemple #2
0
    def _refreshAndPublish(self):
        """
        Refresh configuration and publish local updates
        """
        self._lastUpdateTime = time.time()
        gLogger.info("Refreshing from master server")
        sMasterServer = gConfigurationData.getMasterServer()
        if sMasterServer:
            from DIRAC.ConfigurationSystem.Client.ConfigurationClient import ConfigurationClient

            oClient = ConfigurationClient(
                url=sMasterServer,
                timeout=self._timeout,
                useCertificates=gConfigurationData.useServerCertificate(),
                skipCACheck=gConfigurationData.skipCACheck(),
            )
            dRetVal = _updateFromRemoteLocation(oClient)
            if not dRetVal["OK"]:
                gLogger.error("Can't update from master server",
                              dRetVal["Message"])
                return False
            if gConfigurationData.getAutoPublish():
                gLogger.info("Publishing to master server...")
                dRetVal = oClient.publishSlaveServer(self._url)
                if not dRetVal["OK"]:
                    gLogger.error("Can't publish to master server",
                                  dRetVal["Message"])
            return True
        else:
            gLogger.warn(
                "No master server is specified in the configuration, trying to get data from other slaves"
            )
            return self._refresh()["OK"]
  def __loadConfigurationData( self ):
    try:
      os.makedirs( os.path.join( DIRAC.rootPath, "etc", "csbackup" ) )
    except:
      pass
    gConfigurationData.loadConfigurationData()
    if gConfigurationData.isMaster():
      bBuiltNewConfiguration = False
      if not gConfigurationData.getName():
        DIRAC.abort( 10, "Missing name for the configuration to be exported!" )
      gConfigurationData.exportName()
      sVersion = gConfigurationData.getVersion()
      if sVersion == "0":
        gLogger.info( "There's no version. Generating a new one" )
        gConfigurationData.generateNewVersion()
        bBuiltNewConfiguration = True

      if self.sURL not in gConfigurationData.getServers():
        gConfigurationData.setServers( self.sURL )
        bBuiltNewConfiguration = True

      gConfigurationData.setMasterServer( self.sURL )

      if bBuiltNewConfiguration:
        gConfigurationData.writeRemoteConfigurationToDisk()
Exemple #4
0
 def _connect( self ):
   self.__discoverExtraCredentials()
   if not self.__initStatus[ 'OK' ]:
     return self.__initStatus
   if self.__enableThreadCheck:
     self.__checkThreadID()
   gLogger.debug( "Connecting to: %s" % self.serviceURL )
   try:
     transport = gProtocolDict[ self.__URLTuple[0] ][ 'transport' ]( self.__URLTuple[1:3], **self.kwargs )
     retVal = transport.initAsClient()
     if not retVal[ 'OK' ]:
       if self.__retry < 5:
         url = "%s://%s:%d/%s" % ( self.__URLTuple[0], self.__URLTuple[1], int( self.__URLTuple[2] ), self.__URLTuple[3] )
         if url not in self.__bannedUrls: 
           gLogger.notice( "URL banned", "%s" % url )
           self.__bannedUrls += [url]   
         self.__retry += 1
         gLogger.info( "Retry connection: ", "%d" % self.__retry )
         time.sleep( self.__retryDelay )
         self.__discoverURL()
         return self._connect()
       else:
         return S_ERROR( "Can't connect to %s: %s" % ( self.serviceURL, retVal ) )
   except Exception, e:
     return S_ERROR( "Can't connect to %s: %s" % ( self.serviceURL, e ) )
Exemple #5
0
 def __refreshAndPublish(self):
     self.__lastUpdateTime = time.time()
     gLogger.info("Refreshing from master server")
     from DIRAC.Core.DISET.RPCClient import RPCClient
     sMasterServer = gConfigurationData.getMasterServer()
     if sMasterServer:
         oClient = RPCClient(
             sMasterServer,
             timeout=self.__timeout,
             useCertificates=gConfigurationData.useServerCertificate(),
             skipCACheck=gConfigurationData.skipCACheck())
         dRetVal = _updateFromRemoteLocation(oClient)
         if not dRetVal['OK']:
             gLogger.error("Can't update from master server",
                           dRetVal['Message'])
             return False
         if gConfigurationData.getAutoPublish():
             gLogger.info("Publishing to master server...")
             dRetVal = oClient.publishSlaveServer(self.__url)
             if not dRetVal['OK']:
                 gLogger.error("Can't publish to master server",
                               dRetVal['Message'])
         return True
     else:
         gLogger.warn(
             "No master server is specified in the configuration, trying to get data from other slaves"
         )
         return self.__refresh()['OK']
Exemple #6
0
 def __backupCurrentConfiguration(self, backupName):
     configurationFilename = "%s.cfg" % self.getName()
     configurationFile = os.path.join(DIRAC.rootPath, "etc",
                                      configurationFilename)
     today = Time.date()
     backupPath = os.path.join(self.getBackupDir(), str(today.year),
                               "%02d" % today.month)
     mkDir(backupPath)
     backupFile = os.path.join(
         backupPath,
         configurationFilename.replace(".cfg", ".%s.zip" % backupName))
     if os.path.isfile(configurationFile):
         gLogger.info("Making a backup of configuration in %s" % backupFile)
         try:
             with zipfile.ZipFile(backupFile, "w",
                                  zipfile.ZIP_DEFLATED) as zf:
                 zf.write(
                     configurationFile, "%s.backup.%s" %
                     (os.path.split(configurationFile)[1], backupName))
         except Exception:
             gLogger.exception()
             gLogger.error("Cannot backup configuration data file",
                           "file %s" % backupFile)
     else:
         gLogger.warn("CS data file does not exist", configurationFile)
Exemple #7
0
    def __refreshAndPublish(self):
        self.__lastUpdateTime = time.time()
        gLogger.info("Refreshing from master server")
        from DIRAC.Core.DISET.RPCClient import RPCClient

        sMasterServer = gConfigurationData.getMasterServer()
        if sMasterServer:
            oClient = RPCClient(
                sMasterServer,
                timeout=self.__timeout,
                useCertificates=gConfigurationData.useServerCertificate(),
                skipCACheck=gConfigurationData.skipCACheck(),
            )
            dRetVal = _updateFromRemoteLocation(oClient)
            if not dRetVal["OK"]:
                gLogger.error("Can't update from master server", dRetVal["Message"])
                return False
            if gConfigurationData.getAutoPublish():
                gLogger.info("Publishing to master server...")
                dRetVal = oClient.publishSlaveServer(self.__url)
                if not dRetVal["OK"]:
                    gLogger.error("Can't publish to master server", dRetVal["Message"])
            return True
        else:
            gLogger.warn("No master server is specified in the configuration, trying to get data from other slaves")
            return self.__refresh()["OK"]
Exemple #8
0
 def _connect( self ):
   self.__discoverExtraCredentials()
   if not self.__initStatus[ 'OK' ]:
     return self.__initStatus
   if self.__enableThreadCheck:
     self.__checkThreadID()
   gLogger.debug( "Connecting to: %s" % self.serviceURL )
   try:
     transport = gProtocolDict[ self.__URLTuple[0] ][ 'transport' ]( self.__URLTuple[1:3], **self.kwargs )
     retVal = transport.initAsClient()
     if not retVal[ 'OK' ]:
       if self.__retry < self.__nbOfRetry * self.__nbOfUrls - 1:
         url = "%s://%s:%d/%s" % ( self.__URLTuple[0], self.__URLTuple[1], int( self.__URLTuple[2] ), self.__URLTuple[3] )
         if url not in self.__bannedUrls: 
           gLogger.notice( "URL banned", "%s" % url )
           self.__bannedUrls += [url]   
         self.__retry += 1
         gLogger.info( "Retry connection: ", "%d" % self.__retry )
         if (len(self.__bannedUrls) == self.__nbOfUrls):
           self.__retryDelay = 3. / self.__nbOfUrls  if self.__nbOfUrls > 1 else 5  # we run only one service! In that case we increase the retry delay.
           gLogger.info( "Waiting %f  second before retry all service(s)" % self.__retryDelay )
           time.sleep( self.__retryDelay )
         self.__discoverURL()
         return self._connect()
       else:
         return S_ERROR( "Can't connect to %s: %s" % ( self.serviceURL, retVal ) )
   except Exception, e:
     return S_ERROR( "Can't connect to %s: %s" % ( self.serviceURL, e ) )
Exemple #9
0
 def __checkSlavesStatus(self, forceWriteConfiguration=False):
   gLogger.info("Checking status of slave servers")
   iGraceTime = gConfigurationData.getSlavesGraceTime()
   bModifiedSlaveServers = False
   for sSlaveURL in self.dAliveSlaveServers.keys():
     if time.time() - self.dAliveSlaveServers[sSlaveURL] > iGraceTime:
       gLogger.info("Found dead slave", sSlaveURL)
       del self.dAliveSlaveServers[sSlaveURL]
       bModifiedSlaveServers = True
   if bModifiedSlaveServers or forceWriteConfiguration:
     gConfigurationData.setServers("%s, %s" % (self.sURL,
                                               ", ".join(self.dAliveSlaveServers.keys())))
     self.__generateNewVersion()
Exemple #10
0
 def __checkSlavesStatus(self, forceWriteConfiguration=False):
     gLogger.info("Checking status of slave servers")
     iGraceTime = gConfigurationData.getSlavesGraceTime()
     lSlaveURLs = self.dAliveSlaveServers.keys()
     bModifiedSlaveServers = False
     for sSlaveURL in lSlaveURLs:
         if time.time() - self.dAliveSlaveServers[sSlaveURL] > iGraceTime:
             gLogger.info("Found dead slave", sSlaveURL)
             del self.dAliveSlaveServers[sSlaveURL]
             bModifiedSlaveServers = True
     if bModifiedSlaveServers or forceWriteConfiguration:
         gConfigurationData.setServers("%s, %s" % (self.sURL, ", ".join(self.dAliveSlaveServers.keys())))
         self.__generateNewVersion()
    def forceSlavesUpdate(self):
        """
        Force updating configuration on all the slave configuration servers

        :return: Nothing
        """
        gLogger.info("Updating configuration on slave servers")
        iGraceTime = gConfigurationData.getSlavesGraceTime()
        urlSet = set()
        for slaveURL in self.dAliveSlaveServers:
            if time.time() - self.dAliveSlaveServers[slaveURL] <= iGraceTime:
                urlSet.add(slaveURL)
        self._updateServiceConfiguration(urlSet, fromMaster=True)
Exemple #12
0
  def forceSlavesUpdate(self):
    """
    Force updating configuration on all the slave configuration servers

    :return: S_OK/S_ERROR, Value Successful/Failed dict with service URLs
    """
    gLogger.info("Updating configuration on slave servers")
    iGraceTime = gConfigurationData.getSlavesGraceTime()
    self.__updateResultDict = {"Successful": {}, "Failed": {}}
    urlSet = set()
    for slaveURL in self.dAliveSlaveServers:
      if time.time() - self.dAliveSlaveServers[slaveURL] <= iGraceTime:
        urlSet.add(slaveURL)
    return self.__updateServiceConfiguration(urlSet, fromMaster=True)
Exemple #13
0
 def __getPreviousCFG(self, oRemoteConfData):
   backupsList = self.__getCfgBackups(gConfigurationData.getBackupDir(), date=oRemoteConfData.getVersion())
   if not backupsList:
     return S_ERROR("Could not AutoMerge. Could not retrieve original commiter's version")
   prevRemoteConfData = ConfigurationData()
   backFile = backupsList[0]
   if backFile[0] == "/":
     backFile = os.path.join(gConfigurationData.getBackupDir(), backFile[1:])
   try:
     prevRemoteConfData.loadConfigurationData(backFile)
   except Exception as e:
     return S_ERROR("Could not load original commiter's version: %s" % str(e))
   gLogger.info("Loaded client original version %s" % prevRemoteConfData.getVersion())
   return S_OK(prevRemoteConfData.getRemoteCFG())
    def publishSlaveServer(self, sSlaveURL):
        """
        Called by the slave server via service, it register a new slave server

        :param sSlaveURL: url of slave server
        """

        if not gConfigurationData.isMaster():
            return S_ERROR("Configuration modification is not allowed in this server")
        gLogger.info("Pinging slave %s" % sSlaveURL)
        rpcClient = ConfigurationClient(url=sSlaveURL, timeout=10, useCertificates=True)
        retVal = rpcClient.ping()
        if not retVal["OK"]:
            gLogger.info("Slave %s didn't reply" % sSlaveURL)
            return
        if retVal["Value"]["name"] != "Configuration/Server":
            gLogger.info("Slave %s is not a CS serveR" % sSlaveURL)
            return
        bNewSlave = False
        if sSlaveURL not in self.dAliveSlaveServers:
            bNewSlave = True
            gLogger.info("New slave registered", sSlaveURL)
        self.dAliveSlaveServers[sSlaveURL] = time.time()
        if bNewSlave:
            gConfigurationData.setServers(", ".join(self.dAliveSlaveServers))
            self.__generateNewVersion()
 def __getPreviousCFG(self, oRemoteConfData):
     backupsList = self.__getCfgBackups(gConfigurationData.getBackupDir(), date=oRemoteConfData.getVersion())
     if not backupsList:
         return S_ERROR("Could not AutoMerge. Could not retrieve original committer's version")
     prevRemoteConfData = ConfigurationData()
     backFile = backupsList[0]
     if backFile[0] == "/":
         backFile = os.path.join(gConfigurationData.getBackupDir(), backFile[1:])
     try:
         prevRemoteConfData.loadConfigurationData(backFile)
     except Exception as e:
         return S_ERROR("Could not load original committer's version: %s" % str(e))
     gLogger.info("Loaded client original version %s" % prevRemoteConfData.getVersion())
     return S_OK(prevRemoteConfData.getRemoteCFG())
    def _forceServiceUpdate(url, fromMaster):
        """
        Force updating configuration on a given service
        This should be called by _updateServiceConfiguration

        :param str url: service URL
        :param bool fromMaster: flag to force updating from the master CS
        :return: S_OK/S_ERROR
        """
        gLogger.info("Updating service configuration on", url)

        result = Client(url=url).refreshConfiguration(fromMaster)
        result["URL"] = url
        return result
Exemple #17
0
  def forceGlobalUpdate(self):
    """
    Force updating configuration of all the registered services

    :return: S_OK/S_ERROR, Value Successful/Failed dict with service URLs
    """
    gLogger.info("Updating services configuration")
    # Get URLs of all the services except for Configuration services
    cfg = gConfigurationData.remoteCFG.getAsDict()['Systems']
    urlSet = set()
    for system_ in cfg:
      for instance in cfg[system_]:
        for url in cfg[system_][instance]['URLs']:
          urlSet = urlSet.union(set([u.strip() for u in cfg[system_][instance]['URLs'][url].split(',')
                                     if 'Configuration/Server' not in u]))
    return self.__updateServiceConfiguration(urlSet)
Exemple #18
0
    def _connect(self):

        self.__discoverExtraCredentials()
        if not self.__initStatus['OK']:
            return self.__initStatus
        if self.__enableThreadCheck:
            self.__checkThreadID()
        gLogger.debug("Connecting to: %s" % self.serviceURL)
        try:
            transport = gProtocolDict[self.__URLTuple[0]]['transport'](
                self.__URLTuple[1:3], **self.kwargs)
            #the socket timeout is the default value which is 1.
            #later we increase to 5
            retVal = transport.initAsClient()
            if not retVal['OK']:
                if self.__retry < self.__nbOfRetry * self.__nbOfUrls - 1:
                    url = "%s://%s:%d/%s" % (
                        self.__URLTuple[0], self.__URLTuple[1],
                        int(self.__URLTuple[2]), self.__URLTuple[3])
                    if url not in self.__bannedUrls:
                        self.__bannedUrls += [url]
                        if len(self.__bannedUrls) < self.__nbOfUrls:
                            gLogger.notice(
                                "Non-responding URL temporarily banned",
                                "%s" % url)
                    self.__retry += 1
                    if self.__retryCounter == self.__nbOfRetry - 1:
                        transport.setSocketTimeout(
                            5
                        )  # we increase the socket timeout in case the network is not good
                    gLogger.info("Retry connection: ", "%d" % self.__retry)
                    if len(self.__bannedUrls) == self.__nbOfUrls:
                        self.__retryCounter += 1
                        self.__retryDelay = 3. / self.__nbOfUrls if self.__nbOfUrls > 1 else 2  # we run only one service! In that case we increase the retry delay.
                        gLogger.info(
                            "Waiting %f  second before retry all service(s)" %
                            self.__retryDelay)
                        time.sleep(self.__retryDelay)
                    self.__discoverURL()
                    return self._connect()
                else:
                    return retVal
        except Exception as e:
            return S_ERROR("Can't connect to %s: %s" %
                           (self.serviceURL, repr(e)))
        trid = getGlobalTransportPool().add(transport)
        return S_OK((trid, transport))
Exemple #19
0
def loadObjects(path, reFilter=None, parentClass=None):
    if not reFilter:
        reFilter = re.compile(".*[a-z1-9]\.py$")
    pathList = List.fromChar(path, "/")

    parentModuleList = [
        "%sDIRAC" % ext for ext in CSGlobals.getCSExtensions()
    ] + ['DIRAC']
    objectsToLoad = {}
    #Find which object files match
    for parentModule in parentModuleList:
        objDir = os.path.join(DIRAC.rootPath, parentModule, *pathList)
        if not os.path.isdir(objDir):
            continue
        for objFile in os.listdir(objDir):
            if reFilter.match(objFile):
                pythonClassName = objFile[:-3]
                if pythonClassName not in objectsToLoad:
                    gLogger.info("Adding to load queue %s/%s/%s" %
                                 (parentModule, path, pythonClassName))
                    objectsToLoad[pythonClassName] = parentModule

    #Load them!
    loadedObjects = {}

    for pythonClassName in objectsToLoad:
        parentModule = objectsToLoad[pythonClassName]
        try:
            #Where parentModule can be DIRAC, pathList is something like [ "AccountingSystem", "Client", "Types" ]
            #And the python class name is.. well, the python class name
            objPythonPath = "%s.%s.%s" % (parentModule, ".".join(pathList),
                                          pythonClassName)
            objModule = __import__(objPythonPath, globals(), locals(),
                                   pythonClassName)
            objClass = getattr(objModule, pythonClassName)
        except Exception, e:
            gLogger.error("Can't load type %s/%s: %s" %
                          (parentModule, pythonClassName, str(e)))
            continue
        if parentClass == objClass:
            continue
        if parentClass and not issubclass(objClass, parentClass):
            gLogger.warn("%s is not a subclass of %s. Skipping" %
                         (objClass, parentClass))
            continue
        gLogger.info("Loaded %s" % objPythonPath)
        loadedObjects[pythonClassName] = objClass
 def __backupCurrentConfiguration( self, backupName ):
   configurationFilename = "%s.cfg" % self.getName()
   configurationFile = os.path.join( DIRAC.rootPath, "etc", configurationFilename )
   today = Time.date()
   backupPath = os.path.join( self.getBackupDir(), str( today.year ), "%02d" % today.month )
   mkDir(backupPath)
   backupFile = os.path.join( backupPath, configurationFilename.replace( ".cfg", ".%s.zip" % backupName ) )
   if os.path.isfile( configurationFile ):
     gLogger.info( "Making a backup of configuration in %s" % backupFile )
     try:
       with zipfile.ZipFile( backupFile, "w", zipfile.ZIP_DEFLATED ) as zf:
         zf.write( configurationFile, "%s.backup.%s" % ( os.path.split( configurationFile )[1], backupName ) )
     except Exception:
       gLogger.exception()
       gLogger.error( "Cannot backup configuration data file", "file %s" % backupFile )
   else:
     gLogger.warn( "CS data file does not exist", configurationFile )
    def _checkSlavesStatus(self, forceWriteConfiguration=False):
        """
        Check if Slaves server are still availlable

        :param forceWriteConfiguration: (default False) Force rewriting configuration after checking slaves
        """

        gLogger.info("Checking status of slave servers")
        iGraceTime = gConfigurationData.getSlavesGraceTime()
        bModifiedSlaveServers = False
        for sSlaveURL in list(self.dAliveSlaveServers):
            if time.time() - self.dAliveSlaveServers[sSlaveURL] > iGraceTime:
                gLogger.warn("Found dead slave", sSlaveURL)
                del self.dAliveSlaveServers[sSlaveURL]
                bModifiedSlaveServers = True
        if bModifiedSlaveServers or forceWriteConfiguration:
            gConfigurationData.setServers(", ".join(self.dAliveSlaveServers))
            self.__generateNewVersion()
def loadObjects( path, reFilter = None, parentClass = None ):
  if not reFilter:
    reFilter = re.compile( r".*[a-z1-9]\.py$" )
  pathList = List.fromChar( path, "/" )

  parentModuleList = [ "%sDIRAC" % ext for ext in CSGlobals.getCSExtensions() ] + [ 'DIRAC' ]
  objectsToLoad = {}
  #Find which object files match
  for parentModule in parentModuleList:
    objDir = os.path.join( DIRAC.rootPath, parentModule, *pathList )
    if not os.path.isdir( objDir ):
      continue
    for objFile in os.listdir( objDir ):
      if reFilter.match( objFile ):
        pythonClassName = objFile[:-3]
        if pythonClassName not in objectsToLoad:
          gLogger.info( "Adding to message load queue %s/%s/%s" % ( parentModule, path, pythonClassName ) )
          objectsToLoad[ pythonClassName ] = parentModule

  #Load them!
  loadedObjects = {}

  for pythonClassName in objectsToLoad:
    parentModule = objectsToLoad[ pythonClassName ]
    try:
      #Where parentModule can be DIRAC, pathList is something like [ "AccountingSystem", "Client", "Types" ]
      #And the python class name is.. well, the python class name
      objPythonPath = "%s.%s.%s" % ( parentModule, ".".join( pathList ), pythonClassName )
      objModule = __import__( objPythonPath,
                               globals(),
                               locals(), pythonClassName )
      objClass = getattr( objModule, pythonClassName )
    except Exception as e:
      gLogger.exception( "Can't load type %s/%s: %s" % ( parentModule, pythonClassName, str( e ) ) )
      continue
    if parentClass == objClass:
      continue
    if parentClass and not issubclass( objClass, parentClass ):
      gLogger.warn( "%s is not a subclass of %s. Skipping" % ( objClass, parentClass ) )
      continue
    gLogger.info( "Loaded %s" % objPythonPath )
    loadedObjects[ pythonClassName ] = objClass

  return loadedObjects
    def forceGlobalUpdate(self):
        """
        Force updating configuration of all the registered services

        :returns: S_OK (needed for DISET return call)
        """
        gLogger.info("Updating services configuration")
        # Get URLs of all the services except for Configuration services
        cfg = gConfigurationData.remoteCFG.getAsDict()["Systems"]
        urlSet = set()
        for system_ in cfg:
            for instance in cfg[system_]:
                for url in cfg[system_][instance]["URLs"]:
                    urlSet = urlSet.union(
                        set([
                            u.strip() for u in cfg[system_][instance]["URLs"]
                            [url].split(",") if "Configuration/Server" not in u
                        ]))
        self._updateServiceConfiguration(urlSet)
        return S_OK()
Exemple #24
0
  def __forceServiceUpdate(url, fromMaster):
    """
    Force updating configuration on a given service

    :param str url: service URL
    :param bool fromMaster: flag to force updating from the master CS
    :return: S_OK/S_ERROR
    """
    gLogger.info('Updating service configuration on', url)
    if url.startswith('dip'):
      rpc = RPCClient(url)
      result = rpc.refreshConfiguration(fromMaster)
    elif url.startswith('http'):
      hostCertTuple = getHostCertificateAndKeyLocation()
      resultRequest = requests.get(url,
                                   headers={'X-RefreshConfiguration': "True"},
                                   cert=hostCertTuple,
                                   verify=False)
      result = S_OK()
      if resultRequest.status_code != 200:
        result = S_ERROR("Status code returned %d" % resultRequest.status_code)
    result['URL'] = url
    return result
Exemple #25
0
  def _connect( self ):

    self.__discoverExtraCredentials()
    if not self.__initStatus[ 'OK' ]:
      return self.__initStatus
    if self.__enableThreadCheck:
      self.__checkThreadID()
    gLogger.debug( "Connecting to: %s" % self.serviceURL )
    try:
      transport = gProtocolDict[ self.__URLTuple[0] ][ 'transport' ]( self.__URLTuple[1:3], **self.kwargs )
      #the socket timeout is the default value which is 1.
      #later we increase to 5
      retVal = transport.initAsClient()
      if not retVal[ 'OK' ]:
        if self.__retry < self.__nbOfRetry * self.__nbOfUrls - 1:
          url = "%s://%s:%d/%s" % ( self.__URLTuple[0], self.__URLTuple[1], int( self.__URLTuple[2] ), self.__URLTuple[3] )
          if url not in self.__bannedUrls:
            self.__bannedUrls += [url]
            if len( self.__bannedUrls ) < self.__nbOfUrls:
              gLogger.notice( "Non-responding URL temporarily banned", "%s" % url )
          self.__retry += 1
          if self.__retryCounter == self.__nbOfRetry - 1:
            transport.setSocketTimeout( 5 ) # we increase the socket timeout in case the network is not good
          gLogger.info( "Retry connection: ", "%d" % self.__retry )
          if len(self.__bannedUrls) == self.__nbOfUrls:
            self.__retryCounter += 1
            self.__retryDelay = 3. / self.__nbOfUrls  if self.__nbOfUrls > 1 else 2  # we run only one service! In that case we increase the retry delay.
            gLogger.info( "Waiting %f  second before retry all service(s)" % self.__retryDelay )
            time.sleep( self.__retryDelay )
          self.__discoverURL()
          return self._connect()
        else:
          return retVal
    except Exception as e:
      return S_ERROR( "Can't connect to %s: %s" % ( self.serviceURL, repr( e ) ) )
    trid = getGlobalTransportPool().add( transport )
    return S_OK( ( trid, transport ) )
Exemple #26
0
 def publishSlaveServer(self, sSlaveURL):
     if not gConfigurationData.isMaster():
         return S_ERROR("Configuration modification is not allowed in this server")
     gLogger.info("Pinging slave %s" % sSlaveURL)
     rpcClient = RPCClient(sSlaveURL, timeout=10, useCertificates=True)
     retVal = rpcClient.ping()
     if not retVal["OK"]:
         gLogger.info("Slave %s didn't reply" % sSlaveURL)
         return
     if retVal["Value"]["name"] != "Configuration/Server":
         gLogger.info("Slave %s is not a CS serveR" % sSlaveURL)
         return
     bNewSlave = False
     if not sSlaveURL in self.dAliveSlaveServers.keys():
         bNewSlave = True
         gLogger.info("New slave registered", sSlaveURL)
     self.dAliveSlaveServers[sSlaveURL] = time.time()
     if bNewSlave:
         gConfigurationData.setServers("%s, %s" % (self.sURL, ", ".join(self.dAliveSlaveServers.keys())))
         self.__generateNewVersion()
Exemple #27
0
 def publishSlaveServer(self, sSlaveURL):
   if not gConfigurationData.isMaster():
     return S_ERROR("Configuration modification is not allowed in this server")
   gLogger.info("Pinging slave %s" % sSlaveURL)
   rpcClient = RPCClient(sSlaveURL, timeout=10, useCertificates=True)
   retVal = rpcClient.ping()
   if not retVal['OK']:
     gLogger.info("Slave %s didn't reply" % sSlaveURL)
     return
   if retVal['Value']['name'] != 'Configuration/Server':
     gLogger.info("Slave %s is not a CS serveR" % sSlaveURL)
     return
   bNewSlave = False
   if sSlaveURL not in self.dAliveSlaveServers:
     bNewSlave = True
     gLogger.info("New slave registered", sSlaveURL)
   self.dAliveSlaveServers[sSlaveURL] = time.time()
   if bNewSlave:
     gConfigurationData.setServers("%s, %s" % (self.sURL,
                                               ", ".join(self.dAliveSlaveServers.keys())))
     self.__generateNewVersion()
 def __init__(self, sURL):
     self.sURL = sURL
     gLogger.info("Initializing Configuration Service", "URL is %s" % sURL)
     self.__modificationsIgnoreMask = ["/DIRAC/Configuration/Servers", "/DIRAC/Configuration/Version"]
     gConfigurationData.setAsService()
     if not gConfigurationData.isMaster():
         gLogger.info("Starting configuration service as slave")
         gRefresher.autoRefreshAndPublish(self.sURL)
     else:
         gLogger.info("Starting configuration service as master")
         gRefresher.disable()
         self.__loadConfigurationData()
         self.dAliveSlaveServers = {}
         self._launchCheckSlaves()
 def __init__( self, sURL ):
   threading.Thread.__init__( self )
   self.sURL = sURL
   gLogger.info( "Initializing Configuration Service", "URL is %s" % sURL )
   self.__modificationsIgnoreMask = [ '/DIRAC/Configuration/Servers', '/DIRAC/Configuration/Version' ]
   gConfigurationData.setAsService()
   if not gConfigurationData.isMaster():
     gLogger.info( "Starting configuration service as slave" )
     gRefresher.autoRefreshAndPublish( self.sURL )
   else:
     gLogger.info( "Starting configuration service as master" )
     gRefresher.disable()
     self.__loadConfigurationData()
     self.dAliveSlaveServers = {}
     self.__launchCheckSlaves()
Exemple #30
0
  def __init__(self, sURL):
    threading.Thread.__init__(self)
    self.sURL = sURL
    gLogger.info("Initializing Configuration Service", "URL is %s" % sURL)
    self.__modificationsIgnoreMask = ['/DIRAC/Configuration/Servers', '/DIRAC/Configuration/Version']
    gConfigurationData.setAsService()
    if not gConfigurationData.isMaster():
      gLogger.info("Starting configuration service as slave")
      gRefresher.autoRefreshAndPublish(self.sURL)
    else:
      gLogger.info("Starting configuration service as master")
      gRefresher.disable()
      self.__loadConfigurationData()
      self.dAliveSlaveServers = {}
      self.__launchCheckSlaves()

    self.__updateResultDict = {"Successful": {}, "Failed": {}}
Exemple #31
0
 def generateNewVersion(self):
     self.setVersion(str(datetime.datetime.utcnow()))
     self.sync()
     gLogger.info("Generated new version %s" % self.getVersion())
 def generateNewVersion( self ):
   self.setVersion( Time.toString() )
   self.sync()
   gLogger.info( "Generated new version %s" % self.getVersion() )
    def updateConfiguration(self, sBuffer, committer="", updateVersionOption=False):
        """
        Update the master configuration with the newly received changes

        :param str sBuffer: newly received configuration data
        :param str committer: the user name of the committer
        :param bool updateVersionOption: flag to update the current configuration version
        :return: S_OK/S_ERROR of the write-to-disk of the new configuration
        """
        if not gConfigurationData.isMaster():
            return S_ERROR("Configuration modification is not allowed in this server")
        # Load the data in a ConfigurationData object
        oRemoteConfData = ConfigurationData(False)
        oRemoteConfData.loadRemoteCFGFromCompressedMem(sBuffer)
        if updateVersionOption:
            oRemoteConfData.setVersion(gConfigurationData.getVersion())
        # Test that remote and new versions are the same
        sRemoteVersion = oRemoteConfData.getVersion()
        sLocalVersion = gConfigurationData.getVersion()
        gLogger.info("Checking versions\n", "remote: %s\nlocal:  %s" % (sRemoteVersion, sLocalVersion))
        if sRemoteVersion != sLocalVersion:
            if not gConfigurationData.mergingEnabled():
                return S_ERROR(
                    "Local and remote versions differ (%s vs %s). Cannot commit." % (sLocalVersion, sRemoteVersion)
                )
            else:
                gLogger.info("AutoMerging new data!")
                if updateVersionOption:
                    return S_ERROR("Cannot AutoMerge! version was overwritten")
                result = self.__mergeIndependentUpdates(oRemoteConfData)
                if not result["OK"]:
                    gLogger.warn("Could not AutoMerge!", result["Message"])
                    return S_ERROR("AutoMerge failed: %s" % result["Message"])
                requestedRemoteCFG = result["Value"]
                gLogger.info("AutoMerge successful!")
                oRemoteConfData.setRemoteCFG(requestedRemoteCFG)
        # Test that configuration names are the same
        sRemoteName = oRemoteConfData.getName()
        sLocalName = gConfigurationData.getName()
        if sRemoteName != sLocalName:
            return S_ERROR("Names differ: Server is %s and remote is %s" % (sLocalName, sRemoteName))
        # Update and generate a new version
        gLogger.info("Committing new data...")
        gConfigurationData.lock()
        gLogger.info("Setting the new CFG")
        gConfigurationData.setRemoteCFG(oRemoteConfData.getRemoteCFG())
        gConfigurationData.unlock()
        gLogger.info("Generating new version")
        gConfigurationData.generateNewVersion()
        # self.__checkSlavesStatus( forceWriteConfiguration = True )
        gLogger.info("Writing new version to disk")
        retVal = gConfigurationData.writeRemoteConfigurationToDisk(
            "%s@%s" % (committer, gConfigurationData.getVersion())
        )
        gLogger.info("New version", gConfigurationData.getVersion())

        # Attempt to update the configuration on currently registered slave services
        if gConfigurationData.getAutoSlaveSync():
            self.forceSlavesUpdate()

        return retVal
Exemple #34
0
 def generateNewVersion(self):
     self.setVersion(Time.toString())
     self.sync()
     gLogger.info("Generated new version %s" % self.getVersion())
Exemple #35
0
def initDIRAC( rootPath, enableDebug = False ):
    # CONFIGURATION OPTIONS HERE (note: all config options will override
    # any Pylons config options)
    configDict = { 'webConfig' : {} }
    configDict[ 'webConfig' ]['dirac.webroot'] = rootPath
    diracRootPath = os.path.realpath( os.path.dirname( os.path.dirname( rootPath ) ) )
    configDict[ 'webConfig' ]['dirac.root'] = diracRootPath
    if diracRootPath not in sys.path:
      sys.path.append( diracRootPath )
    from DIRAC.FrameworkSystem.Client.Logger import gLogger
    gLogger.registerBackends( [ 'stderr' ] )
    from DIRAC.Core.Base import Script
    Script.registerSwitch( "r", "reload", "Reload for pylons" )
    Script.localCfg.addDefaultEntry( "/DIRAC/Security/UseServerCertificate", "yes" )
    Script.localCfg.addDefaultEntry( "LogColor", True )
    Script.initialize( script = "Website", ignoreErrors = True, initializeMonitor = False )
    gLogger._systemName = "Framework"
    gLogger.initialize( "Web", "/Website" )
    gLogger.setLevel( "VERBOSE" )

    from DIRAC import gMonitor, gConfig, rootPath as droot
    from DIRAC.Core.Utilities import CFG
    from DIRAC.ConfigurationSystem.Client.Helpers import getCSExtensions
    gMonitor.setComponentType( gMonitor.COMPONENT_WEB )
    gMonitor.initialize()
    gMonitor.registerActivity( "pagesServed", "Pages served", "Framework", "pages", gMonitor.OP_SUM )

    gLogger.info( "DIRAC Initialized" )

    configDict['portalVersion'] = portalVersion( rootPath )
    gLogger.info( "DIRAC portal version: %s" % configDict['portalVersion'] )

    extModules = [ '%sDIRAC' % module for module in getCSExtensions() ]
    #Load web.cfg of modules
    cfgFilePaths = [ os.path.join( droot, "etc", "web.cfg" ) ]
    for extModule in extModules:
      gLogger.info( "Adding web.cfg for %s extension" % extModule )
      extModulePath = os.path.join( diracRootPath, extModule )
      webCFGPath = os.path.join( extModulePath, "Web", "web.cfg" )
      cfgFilePaths.append( webCFGPath )
      for systemDir in os.listdir( extModulePath ):
        webCFGSystemPath = os.path.join( extModulePath, systemDir, "Web", "web.cfg" )
        cfgFilePaths.append( webCFGSystemPath )
    webCFG = CFG.CFG()
    for webCFGPath in cfgFilePaths:
      if not os.path.isfile( webCFGPath ):
        gLogger.warn( "%s does not exist" % webCFGPath )
      else:
        gLogger.info( "Loading %s" % webCFGPath )
        modCFG = CFG.CFG().loadFromFile( webCFGPath )
        if modCFG.getOption( 'Website/AbsoluteDefinition', False ):
          gLogger.info( "CFG %s is absolute" % webCFGPath )
          webCFG = modCFG
        else:
          webCFG = webCFG.mergeWith( modCFG )
    gConfig.loadCFG( webCFG )
    gLogger.showHeaders( True )
    gLogger._gLogger__initialized = False
    gLogger.initialize( "Web", "/Website" )

    #Define the controllers, templates and public directories
    for type in ( 'controllers', 'templates', 'public' ):
      configDict[ type ] = []
      for extModule in extModules:
        extModulePath = os.path.join( diracRootPath, extModule )
        typePath = os.path.join( extModulePath, "Web", type )
        if os.path.isdir( typePath ):
          gLogger.info( "Adding %s path for module %s" % ( type, extModule ) )
          configDict[ type ].append( typePath )
        for systemDir in os.listdir( extModulePath ):
          systemTypePath = os.path.join( extModulePath, systemDir, "Web", type )
          if os.path.isdir( systemTypePath ):
            gLogger.info( "Adding %s path for system %s in module %s" % ( type, systemDir, extModule ) )
            configDict[ type ].append( systemTypePath )
      #End of extensions
      configDict[ type ].append( os.path.join( rootPath, type ) )

    #Load debug.cfg?
    if enableDebug:
      debugCFGPath = os.path.join( rootPath, "debug.cfg" )
      if os.path.isfile( debugCFGPath ):
        gLogger.info( "Loading debug cfg file at %s" % debugCFGPath )
        gConfig.loadFile( debugCFGPath )

    gLogger.info( "Extension modules loaded" )

    return configDict
Exemple #36
0
  def _connect(self):
    """ Establish the connection.
        It uses the URL discovered in __discoverURL.
        In case the connection cannot be established, __discoverURL
        is called again, and _connect calls itself.
        We stop after trying self.__nbOfRetry * self.__nbOfUrls

    """
    # Check if the useServerCertificate configuration changed
    # Note: I am not really sure that  all this block makes
    # any sense at all since all these variables are
    # evaluated in __discoverCredentialsToUse
    if gConfig.useServerCertificate() != self.__useCertificates:
      if self.__forceUseCertificates is None:
        self.__useCertificates = gConfig.useServerCertificate()
        self.kwargs[self.KW_USE_CERTIFICATES] = self.__useCertificates
        # The server certificate use context changed, rechecking the transport sanity
        result = self.__checkTransportSanity()
        if not result['OK']:
          return result

    # Take all the extra credentials
    self.__discoverExtraCredentials()
    if not self.__initStatus['OK']:
      return self.__initStatus
    if self.__enableThreadCheck:
      self.__checkThreadID()

    gLogger.debug("Connecting to: %s" % self.serviceURL)
    try:
      # Calls the transport method of the apropriate protocol.
      # self.__URLTuple[1:3] = [server name, port, System/Component]
      transport = gProtocolDict[self.__URLTuple[0]]['transport'](self.__URLTuple[1:3], **self.kwargs)
      # the socket timeout is the default value which is 1.
      # later we increase to 5
      retVal = transport.initAsClient()
      # If we have an issue connecting
      if not retVal['OK']:
        # We try at most __nbOfRetry each URLs
        if self.__retry < self.__nbOfRetry * self.__nbOfUrls - 1:
          # Recompose the URL (why not using self.serviceURL ? )
          url = "%s://%s:%d/%s" % (self.__URLTuple[0], self.__URLTuple[1], int(self.__URLTuple[2]), self.__URLTuple[3])
          # Add the url to the list of banned URLs if it is not already there. (Can it happen ? I don't think so)
          if url not in self.__bannedUrls:
            self.__bannedUrls += [url]
            # Why only printing in this case ?
            if len(self.__bannedUrls) < self.__nbOfUrls:
              gLogger.notice("Non-responding URL temporarily banned", "%s" % url)
          # Increment the retry couunter
          self.__retry += 1
          # If it is our last attempt for each URL, we increase the timeout
          if self.__retryCounter == self.__nbOfRetry - 1:
            transport.setSocketTimeout(5)  # we increase the socket timeout in case the network is not good
          gLogger.info("Retry connection", ": %d to %s" % (self.__retry, self.serviceURL))
          # If we tried all the URL, we increase the global counter (__retryCounter), and sleep
          if len(self.__bannedUrls) == self.__nbOfUrls:
            self.__retryCounter += 1
            # we run only one service! In that case we increase the retry delay.
            self.__retryDelay = 3. / self.__nbOfUrls if self.__nbOfUrls > 1 else 2
            gLogger.info("Waiting %f seconds before retry all service(s)" % self.__retryDelay)
            time.sleep(self.__retryDelay)
          # rediscover the URL
          self.__discoverURL()
          # try to reconnect
          return self._connect()
        else:
          return retVal
    except Exception as e:
      gLogger.exception(lException=True, lExcInfo=True)
      return S_ERROR("Can't connect to %s: %s" % (self.serviceURL, repr(e)))
    # We add the connection to the transport pool
    trid = getGlobalTransportPool().add(transport)

    return S_OK((trid, transport))
Exemple #37
0
  def _connect(self):
    """ Establish the connection.
        It uses the URL discovered in __discoverURL.
        In case the connection cannot be established, __discoverURL
        is called again, and _connect calls itself.
        We stop after trying self.__nbOfRetry * self.__nbOfUrls

        :return: S_OK()/S_ERROR()
    """
    # Check if the useServerCertificate configuration changed
    # Note: I am not really sure that  all this block makes
    # any sense at all since all these variables are
    # evaluated in __discoverCredentialsToUse
    if gConfig.useServerCertificate() != self.__useCertificates:
      if self.__forceUseCertificates is None:
        self.__useCertificates = gConfig.useServerCertificate()
        self.kwargs[self.KW_USE_CERTIFICATES] = self.__useCertificates
        # The server certificate use context changed, rechecking the transport sanity
        result = self.__checkTransportSanity()
        if not result['OK']:
          return result

    # Take all the extra credentials
    self.__discoverExtraCredentials()
    if not self.__initStatus['OK']:
      return self.__initStatus
    if self.__enableThreadCheck:
      self.__checkThreadID()

    gLogger.debug("Trying to connect to: %s" % self.serviceURL)
    try:
      # Calls the transport method of the apropriate protocol.
      # self.__URLTuple[1:3] = [server name, port, System/Component]
      transport = gProtocolDict[self.__URLTuple[0]]['transport'](self.__URLTuple[1:3], **self.kwargs)
      # the socket timeout is the default value which is 1.
      # later we increase to 5
      retVal = transport.initAsClient()
      # We try at most __nbOfRetry each URLs
      if not retVal['OK']:
        gLogger.warn("Issue getting socket:", "%s : %s : %s" % (transport, self.__URLTuple, retVal['Message']))
        # We try at most __nbOfRetry each URLs
        if self.__retry < self.__nbOfRetry * self.__nbOfUrls - 1:
          # Recompose the URL (why not using self.serviceURL ? )
          url = "%s://%s:%d/%s" % (self.__URLTuple[0], self.__URLTuple[1], int(self.__URLTuple[2]), self.__URLTuple[3])
          # Add the url to the list of banned URLs if it is not already there. (Can it happen ? I don't think so)
          if url not in self.__bannedUrls:
            gLogger.warn("Non-responding URL temporarily banned", "%s" % url)
            self.__bannedUrls += [url]
          # Increment the retry counter
          self.__retry += 1
          # 16.07.20 CHRIS: I guess this setSocketTimeout does not behave as expected.
          # If the initasClient did not work, we anyway re-enter the whole method,
          # so a new transport object is created.
          # However, it migh be that this timeout value was propagated down to the
          # SocketInfoFactory singleton, and thus used, but that means that the timeout
          # specified in parameter was then void.

          # If it is our last attempt for each URL, we increase the timeout
          if self.__retryCounter == self.__nbOfRetry - 1:
            transport.setSocketTimeout(5)  # we increase the socket timeout in case the network is not good
          gLogger.info("Retry connection", ": %d to %s" % (self.__retry, self.serviceURL))
          # If we tried all the URL, we increase the global counter (__retryCounter), and sleep
          if len(self.__bannedUrls) == self.__nbOfUrls:
            self.__retryCounter += 1
            # we run only one service! In that case we increase the retry delay.
            self.__retryDelay = 3. / self.__nbOfUrls if self.__nbOfUrls > 1 else 2
            gLogger.info("Waiting %f seconds before retry all service(s)" % self.__retryDelay)
            time.sleep(self.__retryDelay)
          # rediscover the URL
          self.__discoverURL()
          # try to reconnect
          return self._connect()
        else:
          return retVal
    except Exception as e:
      gLogger.exception(lException=True, lExcInfo=True)
      return S_ERROR("Can't connect to %s: %s" % (self.serviceURL, repr(e)))
    # We add the connection to the transport pool
    gLogger.debug("Connected to: %s" % self.serviceURL)
    trid = getGlobalTransportPool().add(transport)

    return S_OK((trid, transport))
 def updateConfiguration(self,
                         sBuffer,
                         commiterDN="",
                         updateVersionOption=False):
     if not gConfigurationData.isMaster():
         return S_ERROR(
             "Configuration modification is not allowed in this server")
     #Load the data in a ConfigurationData object
     oRemoteConfData = ConfigurationData(False)
     oRemoteConfData.loadRemoteCFGFromCompressedMem(sBuffer)
     if updateVersionOption:
         oRemoteConfData.setVersion(gConfigurationData.getVersion())
     #Test that remote and new versions are the same
     sRemoteVersion = oRemoteConfData.getVersion()
     sLocalVersion = gConfigurationData.getVersion()
     gLogger.info("Checking versions\nremote: %s\nlocal:  %s" %
                  (sRemoteVersion, sLocalVersion))
     if sRemoteVersion != sLocalVersion:
         if not gConfigurationData.mergingEnabled():
             return S_ERROR(
                 "Local and remote versions differ (%s vs %s). Cannot commit."
                 % (sLocalVersion, sRemoteVersion))
         else:
             gLogger.info("AutoMerging new data!")
             if updateVersionOption:
                 return S_ERROR("Cannot AutoMerge! version was overwritten")
             result = self.__mergeIndependentUpdates(oRemoteConfData)
             if not result['OK']:
                 gLogger.warn("Could not AutoMerge!", result['Message'])
                 return S_ERROR("AutoMerge failed: %s" % result['Message'])
             requestedRemoteCFG = result['Value']
             gLogger.info("AutoMerge successful!")
             oRemoteConfData.setRemoteCFG(requestedRemoteCFG)
     #Test that configuration names are the same
     sRemoteName = oRemoteConfData.getName()
     sLocalName = gConfigurationData.getName()
     if sRemoteName != sLocalName:
         return S_ERROR("Names differ: Server is %s and remote is %s" %
                        (sLocalName, sRemoteName))
     #Update and generate a new version
     gLogger.info("Committing new data...")
     gConfigurationData.lock()
     gLogger.info("Setting the new CFG")
     gConfigurationData.setRemoteCFG(oRemoteConfData.getRemoteCFG())
     gConfigurationData.unlock()
     gLogger.info("Generating new version")
     gConfigurationData.generateNewVersion()
     #self.__checkSlavesStatus( forceWriteConfiguration = True )
     gLogger.info("Writing new version to disk!")
     retVal = gConfigurationData.writeRemoteConfigurationToDisk(
         "%s@%s" % (commiterDN, gConfigurationData.getVersion()))
     gLogger.info("New version it is!")
     return retVal
 def __launchCheckSlaves( self ):
   gLogger.info( "Starting purge slaves thread" )
   self.setDaemon( 1 )
   self.start()
 def updateConfiguration( self, sBuffer, commiterDN = "", updateVersionOption = False ):
   if not gConfigurationData.isMaster():
     return S_ERROR( "Configuration modification is not allowed in this server" )
   #Load the data in a ConfigurationData object
   oRemoteConfData = ConfigurationData( False )
   oRemoteConfData.loadRemoteCFGFromCompressedMem( sBuffer )
   if updateVersionOption:
     oRemoteConfData.setVersion( gConfigurationData.getVersion() )
   #Test that remote and new versions are the same
   sRemoteVersion = oRemoteConfData.getVersion()
   sLocalVersion = gConfigurationData.getVersion()
   gLogger.info( "Checking versions\nremote: %s\nlocal:  %s" % ( sRemoteVersion, sLocalVersion ) )
   if sRemoteVersion != sLocalVersion:
     if not gConfigurationData.mergingEnabled():
       return S_ERROR( "Local and remote versions differ (%s vs %s). Cannot commit." % ( sLocalVersion, sRemoteVersion ) )
     else:
       gLogger.info( "AutoMerging new data!" )
       if updateVersionOption:
         return S_ERROR( "Cannot AutoMerge! version was overwritten" )
       result = self.__mergeIndependentUpdates( oRemoteConfData )
       if not result[ 'OK' ]:
         gLogger.warn( "Could not AutoMerge!", result[ 'Message' ] )
         return S_ERROR( "AutoMerge failed: %s" % result[ 'Message' ] )
       requestedRemoteCFG = result[ 'Value' ]
       gLogger.info( "AutoMerge successful!" )
       oRemoteConfData.setRemoteCFG( requestedRemoteCFG )
   #Test that configuration names are the same
   sRemoteName = oRemoteConfData.getName()
   sLocalName = gConfigurationData.getName()
   if sRemoteName != sLocalName:
     return S_ERROR( "Names differ: Server is %s and remote is %s" % ( sLocalName, sRemoteName ) )
   #Update and generate a new version
   gLogger.info( "Committing new data..." )
   gConfigurationData.lock()
   gLogger.info( "Setting the new CFG" )
   gConfigurationData.setRemoteCFG( oRemoteConfData.getRemoteCFG() )
   gConfigurationData.unlock()
   gLogger.info( "Generating new version" )
   gConfigurationData.generateNewVersion()
   #self.__checkSlavesStatus( forceWriteConfiguration = True )
   gLogger.info( "Writing new version to disk!" )
   retVal = gConfigurationData.writeRemoteConfigurationToDisk( "%s@%s" % ( commiterDN, gConfigurationData.getVersion() ) )
   gLogger.info( "New version it is!" )
   return retVal
class ServiceInterface(threading.Thread):
    def __init__(self, sURL):
        threading.Thread.__init__(self)
        self.sURL = sURL
        gLogger.info("Initializing Configuration Service", "URL is %s" % sURL)
        self.__modificationsIgnoreMask = [
            '/DIRAC/Configuration/Servers', '/DIRAC/Configuration/Version'
        ]
        gConfigurationData.setAsService()
        if not gConfigurationData.isMaster():
            gLogger.info("Starting configuration service as slave")
            gRefresher.autoRefreshAndPublish(self.sURL)
        else:
            gLogger.info("Starting configuration service as master")
            gRefresher.disable()
            self.__loadConfigurationData()
            self.dAliveSlaveServers = {}
            self.__launchCheckSlaves()

    def isMaster(self):
        return gConfigurationData.isMaster()

    def __launchCheckSlaves(self):
        gLogger.info("Starting purge slaves thread")
        self.setDaemon(1)
        self.start()

    def __loadConfigurationData(self):
        try:
            os.makedirs(os.path.join(DIRAC.rootPath, "etc", "csbackup"))
        except:
            pass
        gConfigurationData.loadConfigurationData()
        if gConfigurationData.isMaster():
            bBuiltNewConfiguration = False
            if not gConfigurationData.getName():
                DIRAC.abort(
                    10, "Missing name for the configuration to be exported!")
            gConfigurationData.exportName()
            sVersion = gConfigurationData.getVersion()
            if sVersion == "0":
                gLogger.info("There's no version. Generating a new one")
                gConfigurationData.generateNewVersion()
                bBuiltNewConfiguration = True

            if self.sURL not in gConfigurationData.getServers():
                gConfigurationData.setServers(self.sURL)
                bBuiltNewConfiguration = True

            gConfigurationData.setMasterServer(self.sURL)

            if bBuiltNewConfiguration:
                gConfigurationData.writeRemoteConfigurationToDisk()

    def __generateNewVersion(self):
        if gConfigurationData.isMaster():
            gConfigurationData.generateNewVersion()
            gConfigurationData.writeRemoteConfigurationToDisk()

    def publishSlaveServer(self, sSlaveURL):
        if not gConfigurationData.isMaster():
            return S_ERROR(
                "Configuration modification is not allowed in this server")
        gLogger.info("Pinging slave %s" % sSlaveURL)
        rpcClient = RPCClient(sSlaveURL, timeout=10, useCertificates=True)
        retVal = rpcClient.ping()
        if not retVal['OK']:
            gLogger.info("Slave %s didn't reply" % sSlaveURL)
            return
        if retVal['Value']['name'] != 'Configuration/Server':
            gLogger.info("Slave %s is not a CS serveR" % sSlaveURL)
            return
        bNewSlave = False
        if not sSlaveURL in self.dAliveSlaveServers.keys():
            bNewSlave = True
            gLogger.info("New slave registered", sSlaveURL)
        self.dAliveSlaveServers[sSlaveURL] = time.time()
        if bNewSlave:
            gConfigurationData.setServers(
                "%s, %s" %
                (self.sURL, ", ".join(self.dAliveSlaveServers.keys())))
            self.__generateNewVersion()

    def __checkSlavesStatus(self, forceWriteConfiguration=False):
        gLogger.info("Checking status of slave servers")
        iGraceTime = gConfigurationData.getSlavesGraceTime()
        lSlaveURLs = self.dAliveSlaveServers.keys()
        bModifiedSlaveServers = False
        for sSlaveURL in lSlaveURLs:
            if time.time() - self.dAliveSlaveServers[sSlaveURL] > iGraceTime:
                gLogger.info("Found dead slave", sSlaveURL)
                del (self.dAliveSlaveServers[sSlaveURL])
                bModifiedSlaveServers = True
        if bModifiedSlaveServers or forceWriteConfiguration:
            gConfigurationData.setServers(
                "%s, %s" %
                (self.sURL, ", ".join(self.dAliveSlaveServers.keys())))
            self.__generateNewVersion()

    def getCompressedConfiguration(self):
        sData = gConfigurationData.getCompressedData()

    def updateConfiguration(self,
                            sBuffer,
                            commiterDN="",
                            updateVersionOption=False):
        if not gConfigurationData.isMaster():
            return S_ERROR(
                "Configuration modification is not allowed in this server")
        #Load the data in a ConfigurationData object
        oRemoteConfData = ConfigurationData(False)
        oRemoteConfData.loadRemoteCFGFromCompressedMem(sBuffer)
        if updateVersionOption:
            oRemoteConfData.setVersion(gConfigurationData.getVersion())
        #Test that remote and new versions are the same
        sRemoteVersion = oRemoteConfData.getVersion()
        sLocalVersion = gConfigurationData.getVersion()
        gLogger.info("Checking versions\nremote: %s\nlocal:  %s" %
                     (sRemoteVersion, sLocalVersion))
        if sRemoteVersion != sLocalVersion:
            if not gConfigurationData.mergingEnabled():
                return S_ERROR(
                    "Local and remote versions differ (%s vs %s). Cannot commit."
                    % (sLocalVersion, sRemoteVersion))
            else:
                gLogger.info("AutoMerging new data!")
                if updateVersionOption:
                    return S_ERROR("Cannot AutoMerge! version was overwritten")
                result = self.__mergeIndependentUpdates(oRemoteConfData)
                if not result['OK']:
                    gLogger.warn("Could not AutoMerge!", result['Message'])
                    return S_ERROR("AutoMerge failed: %s" % result['Message'])
                requestedRemoteCFG = result['Value']
                gLogger.info("AutoMerge successful!")
                oRemoteConfData.setRemoteCFG(requestedRemoteCFG)
        #Test that configuration names are the same
        sRemoteName = oRemoteConfData.getName()
        sLocalName = gConfigurationData.getName()
        if sRemoteName != sLocalName:
            return S_ERROR("Names differ: Server is %s and remote is %s" %
                           (sLocalName, sRemoteName))
        #Update and generate a new version
        gLogger.info("Committing new data...")
        gConfigurationData.lock()
        gLogger.info("Setting the new CFG")
        gConfigurationData.setRemoteCFG(oRemoteConfData.getRemoteCFG())
        gConfigurationData.unlock()
        gLogger.info("Generating new version")
        gConfigurationData.generateNewVersion()
        #self.__checkSlavesStatus( forceWriteConfiguration = True )
        gLogger.info("Writing new version to disk!")
        retVal = gConfigurationData.writeRemoteConfigurationToDisk(
            "%s@%s" % (commiterDN, gConfigurationData.getVersion()))
        gLogger.info("New version it is!")
        return retVal

    def getCompressedConfigurationData(self):
        return gConfigurationData.getCompressedData()

    def getVersion(self):
        return gConfigurationData.getVersion()

    def getCommitHistory(self):
        files = self.__getCfgBackups(gConfigurationData.getBackupDir())
        backups = [
            ".".join(fileName.split(".")[1:3]).split("@") for fileName in files
        ]
        return backups

    def run(self):
        while True:
            iWaitTime = gConfigurationData.getSlavesGraceTime()
            time.sleep(iWaitTime)
            self.__checkSlavesStatus()

    def getVersionContents(self, date):
        backupDir = gConfigurationData.getBackupDir()
        files = self.__getCfgBackups(backupDir, date)
        for fileName in files:
            zFile = zipfile.ZipFile("%s/%s" % (backupDir, fileName), "r")
            cfgName = zFile.namelist()[0]
            #retVal = S_OK( zlib.compress( str( fd.read() ), 9 ) )
            retVal = S_OK(zlib.compress(zFile.read(cfgName), 9))
            zFile.close()
            return retVal
        return S_ERROR("Version %s does not exist" % date)

    def __getCfgBackups(self, basePath, date="", subPath=""):
        rs = re.compile("^%s\..*%s.*\.zip$" %
                        (gConfigurationData.getName(), date))
        fsEntries = os.listdir("%s/%s" % (basePath, subPath))
        fsEntries.sort(reverse=True)
        backupsList = []
        for entry in fsEntries:
            entryPath = "%s/%s/%s" % (basePath, subPath, entry)
            if os.path.isdir(entryPath):
                backupsList.extend(
                    self.__getCfgBackups(basePath, date,
                                         "%s/%s" % (subPath, entry)))
            elif os.path.isfile(entryPath):
                if rs.search(entry):
                    backupsList.append("%s/%s" % (subPath, entry))
        return backupsList

    def __getPreviousCFG(self, oRemoteConfData):
        remoteExpectedVersion = oRemoteConfData.getVersion()
        backupsList = self.__getCfgBackups(gConfigurationData.getBackupDir(),
                                           date=oRemoteConfData.getVersion())
        if not backupsList:
            return S_ERROR(
                "Could not AutoMerge. Could not retrieve original commiter's version"
            )
        prevRemoteConfData = ConfigurationData()
        backFile = backupsList[0]
        if backFile[0] == "/":
            backFile = os.path.join(gConfigurationData.getBackupDir(),
                                    backFile[1:])
        try:
            prevRemoteConfData.loadConfigurationData(backFile)
        except Exception, e:
            return S_ERROR("Could not load original commiter's version: %s" %
                           str(e))
        gLogger.info("Loaded client original version %s" %
                     prevRemoteConfData.getVersion())
        return S_OK(prevRemoteConfData.getRemoteCFG())
 def __launchCheckSlaves(self):
     gLogger.info("Starting purge slaves thread")
     self.setDaemon(1)
     self.start()
Exemple #43
0
def initDIRAC(rootPath, enableDebug=False):
    # CONFIGURATION OPTIONS HERE (note: all config options will override
    # any Pylons config options)
    configDict = {'webConfig': {}}
    configDict['webConfig']['dirac.webroot'] = rootPath
    diracRootPath = os.path.realpath(os.path.dirname(
        os.path.dirname(rootPath)))
    configDict['webConfig']['dirac.root'] = diracRootPath
    if diracRootPath not in sys.path:
        sys.path.append(diracRootPath)
    from DIRAC.FrameworkSystem.Client.Logger import gLogger
    gLogger.registerBackends(['stderr'])
    from DIRAC.Core.Base import Script
    Script.registerSwitch("r", "reload", "Reload for pylons")
    Script.localCfg.addDefaultEntry("/DIRAC/Security/UseServerCertificate",
                                    "yes")
    Script.localCfg.addDefaultEntry("LogColor", True)
    Script.initialize(script="Website",
                      ignoreErrors=True,
                      initializeMonitor=False)
    gLogger._systemName = "Framework"
    gLogger.initialize("Web", "/Website")
    gLogger.setLevel("VERBOSE")

    from DIRAC import gMonitor, gConfig, rootPath as droot
    from DIRAC.Core.Utilities import CFG
    from DIRAC.ConfigurationSystem.Client.Helpers import getCSExtensions
    gMonitor.setComponentType(gMonitor.COMPONENT_WEB)
    gMonitor.initialize()
    gMonitor.registerActivity("pagesServed", "Pages served", "Framework",
                              "pages", gMonitor.OP_SUM)

    gLogger.info("DIRAC Initialized")

    configDict['portalVersion'] = portalVersion(rootPath)
    gLogger.info("DIRAC portal version: %s" % configDict['portalVersion'])

    extModules = ['%sDIRAC' % module for module in getCSExtensions()]
    #Load web.cfg of modules
    cfgFilePaths = [os.path.join(droot, "etc", "web.cfg")]
    for extModule in extModules:
        gLogger.info("Adding web.cfg for %s extension" % extModule)
        extModulePath = os.path.join(diracRootPath, extModule)
        webCFGPath = os.path.join(extModulePath, "Web", "web.cfg")
        cfgFilePaths.append(webCFGPath)
        for systemDir in os.listdir(extModulePath):
            webCFGSystemPath = os.path.join(extModulePath, systemDir, "Web",
                                            "web.cfg")
            cfgFilePaths.append(webCFGSystemPath)
    webCFG = CFG.CFG()
    for webCFGPath in cfgFilePaths:
        if not os.path.isfile(webCFGPath):
            gLogger.warn("%s does not exist" % webCFGPath)
        else:
            gLogger.info("Loading %s" % webCFGPath)
            modCFG = CFG.CFG().loadFromFile(webCFGPath)
            if modCFG.getOption('Website/AbsoluteDefinition', False):
                gLogger.info("CFG %s is absolute" % webCFGPath)
                webCFG = modCFG
            else:
                webCFG = webCFG.mergeWith(modCFG)
    gConfig.loadCFG(webCFG)
    gLogger.showHeaders(True)
    gLogger._gLogger__initialized = False
    gLogger.initialize("Web", "/Website")

    #Define the controllers, templates and public directories
    for type in ('controllers', 'templates', 'public'):
        configDict[type] = []
        for extModule in extModules:
            extModulePath = os.path.join(diracRootPath, extModule)
            typePath = os.path.join(extModulePath, "Web", type)
            if os.path.isdir(typePath):
                gLogger.info("Adding %s path for module %s" %
                             (type, extModule))
                configDict[type].append(typePath)
            for systemDir in os.listdir(extModulePath):
                systemTypePath = os.path.join(extModulePath, systemDir, "Web",
                                              type)
                if os.path.isdir(systemTypePath):
                    gLogger.info("Adding %s path for system %s in module %s" %
                                 (type, systemDir, extModule))
                    configDict[type].append(systemTypePath)
        #End of extensions
        configDict[type].append(os.path.join(rootPath, type))

    #Load debug.cfg?
    if enableDebug:
        debugCFGPath = os.path.join(rootPath, "debug.cfg")
        if os.path.isfile(debugCFGPath):
            gLogger.info("Loading debug cfg file at %s" % debugCFGPath)
            gConfig.loadFile(debugCFGPath)

    gLogger.info("Extension modules loaded")

    return configDict