コード例 #1
0
  def test_simple_inheritance_overwrite(self, _sf_generateStorageObject, _rss_getSEStatus):
    """ In this test, we load a storage element CERN-USER that inherits from CERN-BASE,
        add a storage option, redefine the path and the space token
    """

    sf = StorageFactory(vo = 'lhcb')
    storages = sf.getStorages('CERN-USER')

    self.assertTrue(storages['OK'], storages)
    storages = storages['Value']

    self.assertListEqual(storages['RemotePlugins'], ['GFAL2_SRM2'])

    # There should be a single protocol
    self.assertEqual(len(storages['ProtocolOptions']), 1)
    # There should be one storage object
    self.assertEqual(len(storages['StorageObjects']), 1)

    protocolDetail = storages['ProtocolOptions'][0]
    # These are the values we expect
    self.assertEqual(protocolDetail['Access'], 'remote')
    self.assertEqual(protocolDetail['Host'], 'srm-eoslhcb.cern.ch')
    self.assertEqual(protocolDetail['Path'], '/eos/lhcb/grid/user')
    self.assertEqual(protocolDetail['PluginName'], 'GFAL2_SRM2')
    self.assertEqual(protocolDetail['Port'], 8443)
    self.assertEqual(protocolDetail['Protocol'], 'srm')
    self.assertEqual(protocolDetail['SpaceToken'], 'LHCb_USER')
    self.assertEqual(protocolDetail['WSUrl'], '/srm/v2/server?SFN:')

    self.assertDictEqual(storages['StorageOptions'], {
        'BackendType': 'Eos',
        'SEType': 'T0D1',
        'PledgedSpace': 205,
        'BaseSE': 'CERN-BASE'
    })
コード例 #2
0
ファイル: TestRFIOPlugIn.py プロジェクト: sbel/bes3-jinr
 def setUp( self ):
   factory = StorageFactory()
   res = factory.getStorages( 'CERN-RAW', ['RFIO'] )
   self.assert_( res['OK'] )
   storageDetails = res['Value']
   self.storage = storageDetails['StorageObjects'][0]
   self.storage.changeDirectory( 'lhcb/test/unit-test/Storage/RFIOStorage' )
コード例 #3
0
ファイル: Test_StorageFactory.py プロジェクト: sparsh35/DIRAC
    def test_more_protocol(self, _sf_generateStorageObject, _rss_getSEStatus):
        """ In this test, we load a storage element CERN-MORE that inherits from CERN-BASE,
        and adds an extra protocol
    """

        sf = StorageFactory(vo='lhcb')
        storages = sf.getStorages('CERN-MORE')
        self.assertTrue(storages['OK'], storages)
        storages = storages['Value']

        self.assertListEqual(storages['RemotePlugins'],
                             ['Extra', 'GFAL2_SRM2'])

        expectedProtocols = [{
            'Access': 'remote',
            'Host': 'srm-eoslhcb.cern.ch',
            'Path': '/eos/lhcb/grid/prod',
            'PluginName': 'Extra',
            'Port': 8443,
            'Protocol': 'srm',
            'SpaceToken': 'LHCb-EOS',
            'WSUrl': ''
        }, {
            'Access': 'remote',
            'Host': 'srm-eoslhcb.cern.ch',
            'Path': '/eos/lhcb/grid/user',
            'PluginName': 'GFAL2_SRM2',
            'Port': 8443,
            'Protocol': 'srm',
            'SpaceToken': 'LHCb-EOS',
            'WSUrl': '/srm/v2/server?SFN:'
        }]

        self.assertListEqual(storages['ProtocolOptions'], expectedProtocols)
コード例 #4
0
  def test_child_inherit_from_base_with_two_same_plugins(self, _sf_generateStorageObject, _rss_getSEStatus):
    """ In this test, we load a storage element CERN-CHILD-INHERIT-FROM-BASE-WITH-TWO-SAME-PLUGINS that inherits 
        from CERN-BASE-WITH-TWO-SAME-PLUGINS, using two identical plugin names in two sections.
    """
    sf = StorageFactory(vo = 'lhcb')
    storages = sf.getStorages('CERN-CHILD-INHERIT-FROM-BASE-WITH-TWO-SAME-PLUGINS')
    self.assertTrue(storages['OK'], storages)
    storages = storages['Value']

    self.assertListEqual(storages['RemotePlugins'], ['GFAL2_SRM2', 'GFAL2_SRM2'])

    expectedProtocols = [{
        'Access': 'remote',
        'Host': 'srm-eoslhcb.cern.ch',
        'Path': '/eos/lhcb/grid/user',
        'PluginName': 'GFAL2_SRM2',
        'Port': 8443,
        'Protocol': 'srm',
        'SpaceToken': '',
        'WSUrl': '/srm/v2/server?SFN:'
    }, {
        'Access': 'remote',
        'Host': 'eoslhcb.cern.ch',
        'Path': '',
        'PluginName': 'GFAL2_SRM2',
        'Port': 8443,
        'Protocol': 'root',
        'SpaceToken': '',
        'WSUrl': '/srm/v2/server?SFN:'
    }]

    self.assertListEqual(storages['ProtocolOptions'], expectedProtocols)
コード例 #5
0
ファイル: Test_StorageFactory.py プロジェクト: project8/DIRAC
  def test_simple_inheritance_overwrite(self, _sf_generateStorageObject, _rss_getSEStatus):
    """ In this test, we load a storage element CERN-USER that inherits from CERN-BASE,
        add a storage option, redefine the path and the space token
    """

    sf = StorageFactory(vo = 'lhcb')
    storages = sf.getStorages('CERN-USER')

    self.assertTrue(storages['OK'], storages)
    storages = storages['Value']

    self.assertListEqual(storages['RemotePlugins'], ['GFAL2_SRM2'])

    # There should be a single protocol
    self.assertEqual(len(storages['ProtocolOptions']), 1)
    # There should be one storage object
    self.assertEqual(len(storages['StorageObjects']), 1)

    protocolDetail = storages['ProtocolOptions'][0]
    # These are the values we expect
    self.assertEqual(protocolDetail['Access'], 'remote')
    self.assertEqual(protocolDetail['Host'], 'srm-eoslhcb.cern.ch')
    self.assertEqual(protocolDetail['Path'], '/eos/lhcb/grid/user')
    self.assertEqual(protocolDetail['PluginName'], 'GFAL2_SRM2')
    self.assertEqual(protocolDetail['Port'], 8443)
    self.assertEqual(protocolDetail['Protocol'], 'srm')
    self.assertEqual(protocolDetail['SpaceToken'], 'LHCb_USER')
    self.assertEqual(protocolDetail['WSUrl'], '/srm/v2/server?SFN:')

    self.assertDictEqual(storages['StorageOptions'], {
        'BackendType': 'Eos',
        'SEType': 'T0D1',
        'PledgedSpace': 205,
        'BaseSE': 'CERN-BASE'
    })
コード例 #6
0
ファイル: Test_StorageFactory.py プロジェクト: sparsh35/DIRAC
    def test_use_plugin_as_protocol_name_with_plugin_name(
            self, _sf_generateStorageObject, _rss_getSEStatus):
        """ In this test, we load a storage element CERN-USE-PLUGIN-AS-PROTOCOL-NAME that inherits from CERN-BASE,
        and uses a protocol named as a plugin name, the plugin name is also present.
    """
        sf = StorageFactory(vo='lhcb')
        storages = sf.getStorages(
            'CERN-USE-PLUGIN-AS-PROTOCOL-NAME-WITH-PLUGIN-NAME')
        self.assertTrue(storages['OK'], storages)
        storages = storages['Value']

        self.assertListEqual(storages['RemotePlugins'],
                             ['GFAL2_SRM2', 'GFAL2_XROOT'])

        expectedProtocols = [{
            'Access': 'remote',
            'Host': 'srm-eoslhcb.cern.ch',
            'Path': '/eos/lhcb/grid/prod',
            'PluginName': 'GFAL2_SRM2',
            'Port': 8443,
            'Protocol': 'srm',
            'SpaceToken': 'LHCb-EOS',
            'WSUrl': '/srm/v2/server?SFN:'
        }, {
            'Access': 'remote',
            'Host': 'srm-eoslhcb.cern.ch',
            'Path': '/eos/lhcb/grid/user',
            'PluginName': 'GFAL2_XROOT',
            'Port': 8443,
            'Protocol': 'srm',
            'SpaceToken': 'LHCb-EOS',
            'WSUrl': '/srm/v2/server?SFN:'
        }]

        self.assertListEqual(storages['ProtocolOptions'], expectedProtocols)
コード例 #7
0
  def test_pure_inheritance(self, _sf_generateStorageObject, _rss_getSEStatus):
    """ In this test, we load a storage element CERN-NO-DEF that inherits from CERN-BASE,
        but does not redefine ANYTHING. We expect it to be just like the parent
    """

    sf = StorageFactory(vo = 'lhcb')
    storages = sf.getStorages('CERN-NO-DEF')

    self.assertTrue(storages['OK'], storages)
    storages = storages['Value']

    self.assertListEqual(storages['RemotePlugins'], ['GFAL2_SRM2'])

    # There should be a single protocol
    self.assertEqual(len(storages['ProtocolOptions']), 1)
    # There should be one storage object
    self.assertEqual(len(storages['StorageObjects']), 1)

    protocolDetail = storages['ProtocolOptions'][0]
    # These are the values we expect
    self.assertEqual(protocolDetail['Access'], 'remote')
    self.assertEqual(protocolDetail['Host'], 'srm-eoslhcb.cern.ch')
    self.assertEqual(protocolDetail['Path'], '/eos/lhcb/grid/prod')
    self.assertEqual(protocolDetail['PluginName'], 'GFAL2_SRM2')
    self.assertEqual(protocolDetail['Port'], 8443)
    self.assertEqual(protocolDetail['Protocol'], 'srm')
    self.assertEqual(protocolDetail['SpaceToken'], 'LHCb-EOS')
    self.assertEqual(protocolDetail['WSUrl'], '/srm/v2/server?SFN:')

    self.assertDictEqual(storages['StorageOptions'],
                         {'BackendType': 'Eos',
                          'SEType': 'T0D1',
                          'BaseSE': 'CERN-BASE'})
コード例 #8
0
ファイル: Test_StorageFactory.py プロジェクト: sparsh35/DIRAC
    def test_standalone(self, _sf_generateStorageObject, _rss_getSEStatus):
        """ Test loading a storage element with everything defined in itself.
        It should have two storage plugins
    """

        sf = StorageFactory(vo='lhcb')
        storages = sf.getStorages('CERN-SIMPLE')

        self.assertTrue(storages['OK'], storages)
        storages = storages['Value']

        self.assertListEqual(storages['LocalPlugins'], ['File'])
        self.assertListEqual(storages['RemotePlugins'], ['GFAL2_SRM2'])

        allProtocols = []
        for protocol in ['RemoteAccessProtocol', 'LocalAccessProtocol']:
            protocolDef = copy.copy(mandatoryProtocolOptions)
            protocolDef.update(
                fake_gConfig.crawlCS(
                    '/Resources/StorageElements/CERN-SIMPLE/%s' % protocol))
            allProtocols.append(protocolDef)

        self.assertEqual(len(storages['ProtocolOptions']), len(allProtocols))
        self.assertEqual(len(storages['StorageObjects']), len(allProtocols))

        self.assertListEqual(sorted(allProtocols),
                             sorted(storages['ProtocolOptions']))
        self.assertDictEqual(storages['StorageOptions'], {
            'BackendType': 'Eos',
            'SEType': 'T0D1'
        })
コード例 #9
0
  def test_standalone(self, _sf_generateStorageObject, _rss_getSEStatus):
    """ Test loading a storage element with everything defined in itself.
        It should have two storage plugins
    """

    sf = StorageFactory(vo = 'lhcb')
    storages = sf.getStorages('CERN-SIMPLE')

    self.assertTrue(storages['OK'], storages)
    storages = storages['Value']

    self.assertListEqual(storages['LocalPlugins'], ['File'])
    self.assertListEqual(storages['RemotePlugins'], ['GFAL2_SRM2'])

    allProtocols = []
    for protocol in ['RemoteAccessProtocol', 'LocalAccessProtocol']:
      protocolDef = copy.copy(mandatoryProtocolOptions)
      protocolDef.update(
          fake_gConfig.crawlCS('/Resources/StorageElements/CERN-SIMPLE/%s' % protocol))
      allProtocols.append(protocolDef)

    self.assertEqual(len(storages['ProtocolOptions']), len(allProtocols))
    self.assertEqual(len(storages['StorageObjects']), len(allProtocols))

    self.assertListEqual(sorted(allProtocols), sorted(storages['ProtocolOptions']))
    self.assertDictEqual(storages['StorageOptions'], {'BackendType': 'Eos', 'SEType': 'T0D1'})
コード例 #10
0
ファイル: Test_StorageFactory.py プロジェクト: sparsh35/DIRAC
    def test_pure_inheritance(self, _sf_generateStorageObject,
                              _rss_getSEStatus):
        """ In this test, we load a storage element CERN-NO-DEF that inherits from CERN-BASE,
        but does not redefine ANYTHING. We expect it to be just like the parent
    """

        sf = StorageFactory(vo='lhcb')
        storages = sf.getStorages('CERN-NO-DEF')

        self.assertTrue(storages['OK'], storages)
        storages = storages['Value']

        self.assertListEqual(storages['RemotePlugins'], ['GFAL2_SRM2'])

        # There should be a single protocol
        self.assertEqual(len(storages['ProtocolOptions']), 1)
        # There should be one storage object
        self.assertEqual(len(storages['StorageObjects']), 1)

        protocolDetail = storages['ProtocolOptions'][0]
        # These are the values we expect
        self.assertEqual(protocolDetail['Access'], 'remote')
        self.assertEqual(protocolDetail['Host'], 'srm-eoslhcb.cern.ch')
        self.assertEqual(protocolDetail['Path'], '/eos/lhcb/grid/prod')
        self.assertEqual(protocolDetail['PluginName'], 'GFAL2_SRM2')
        self.assertEqual(protocolDetail['Port'], 8443)
        self.assertEqual(protocolDetail['Protocol'], 'srm')
        self.assertEqual(protocolDetail['SpaceToken'], 'LHCb-EOS')
        self.assertEqual(protocolDetail['WSUrl'], '/srm/v2/server?SFN:')

        self.assertDictEqual(storages['StorageOptions'], {
            'BackendType': 'Eos',
            'SEType': 'T0D1',
            'BaseSE': 'CERN-BASE'
        })
コード例 #11
0
ファイル: TestRFIOPlugIn.py プロジェクト: wirespecter/DIRAC
 def setUp( self ):
   factory = StorageFactory()
   res = factory.getStorages( 'CERN-RAW', ['RFIO'] )
   self.assert_( res['OK'] )
   storageDetails = res['Value']
   self.storage = storageDetails['StorageObjects'][0]
   self.storage.changeDirectory( 'lhcb/test/unit-test/Storage/RFIOStorage' )
コード例 #12
0
  def test_redefine_plugin_name(self, _sf_generateStorageObject, _rss_getSEStatus):
    """ In this test, we load a storage element CERN-REDEFINE-PLUGIN-NAME that inherits from CERN-BASE,
        and uses the same Plugin with a different section.
    """

    sf = StorageFactory(vo = 'lhcb')
    storages = sf.getStorages('CERN-REDEFINE-PLUGIN-NAME')
    self.assertTrue(storages['OK'], storages)
    storages = storages['Value']

    self.assertListEqual(storages['RemotePlugins'], ['GFAL2_SRM2','GFAL2_SRM2'])

    expectedProtocols = [{
        'Access': 'remote',
        'Host': 'srm-eoslhcb.cern.ch',
        'Path': '/eos/lhcb/grid/prod',
        'PluginName': 'GFAL2_SRM2',
        'Port': 8443,
        'Protocol': 'srm',
        'SpaceToken': 'LHCb-EOS',
        'WSUrl': '/srm/v2/server?SFN:'
    }, {
        'Access': 'remote',
        'Host': '',
        'Path': '/eos/lhcb/grid/other',
        'PluginName': 'GFAL2_SRM2',
        'Port': '',
        'Protocol': '',
        'SpaceToken': '',
        'WSUrl': ''
    }]

    self.assertListEqual(storages['ProtocolOptions'], expectedProtocols)
コード例 #13
0
ファイル: Test_StorageFactory.py プロジェクト: sparsh35/DIRAC
    def test_pure_abstract(self, _sf_generateStorageObject, _rss_getSEStatus):
        """ In this test, we load a storage element CERN-CHILD that inherits from CERN-ABSTRACT.
        CERN-ABSTRACT has two uncomplete protocols, and CERN-CHILD defines them
    """

        sf = StorageFactory(vo='lhcb')
        storages = sf.getStorages('CERN-CHILD')
        self.assertTrue(storages['OK'], storages)
        storages = storages['Value']

        self.assertListEqual(storages['RemotePlugins'],
                             ['GFAL2_SRM2', 'GFAL2_XROOT'])

        expectedProtocols = [{
            'Access': 'remote',
            'Host': 'srm-eoslhcb.cern.ch',
            'Path': '/eos/lhcb/grid/user',
            'PluginName': 'GFAL2_SRM2',
            'Port': 8443,
            'Protocol': 'srm',
            'SpaceToken': 'LHCb_USER',
            'WSUrl': '/srm/v2/server?SFN:'
        }, {
            'Access': 'remote',
            'Host': 'eoslhcb.cern.ch',
            'Path': '/eos/lhcb/grid/xrootuser',
            'PluginName': 'GFAL2_XROOT',
            'Port': '',
            'Protocol': 'root',
            'SpaceToken': '',
            'WSUrl': ''
        }]

        self.assertListEqual(storages['ProtocolOptions'], expectedProtocols)
コード例 #14
0
ファイル: Test_StorageFactory.py プロジェクト: sparsh35/DIRAC
    def test_child_inherit_from_base_with_two_same_plugins(
            self, _sf_generateStorageObject, _rss_getSEStatus):
        """ In this test, we load a storage element CERN-CHILD-INHERIT-FROM-BASE-WITH-TWO-SAME-PLUGINS that inherits
        from CERN-BASE-WITH-TWO-SAME-PLUGINS, using two identical plugin names in two sections.
    """
        sf = StorageFactory(vo='lhcb')
        storages = sf.getStorages(
            'CERN-CHILD-INHERIT-FROM-BASE-WITH-TWO-SAME-PLUGINS')
        self.assertTrue(storages['OK'], storages)
        storages = storages['Value']

        self.assertListEqual(storages['RemotePlugins'],
                             ['GFAL2_SRM2', 'GFAL2_SRM2'])

        expectedProtocols = [{
            'Access': 'remote',
            'Host': 'srm-eoslhcb.cern.ch',
            'Path': '/eos/lhcb/grid/user',
            'PluginName': 'GFAL2_SRM2',
            'Port': 8443,
            'Protocol': 'srm',
            'SpaceToken': '',
            'WSUrl': '/srm/v2/server?SFN:'
        }, {
            'Access': 'remote',
            'Host': 'eoslhcb.cern.ch',
            'Path': '',
            'PluginName': 'GFAL2_SRM2',
            'Port': 8443,
            'Protocol': 'root',
            'SpaceToken': '',
            'WSUrl': '/srm/v2/server?SFN:'
        }]

        self.assertListEqual(storages['ProtocolOptions'], expectedProtocols)
コード例 #15
0
  def test_pure_abstract(self, _sf_generateStorageObject, _rss_getSEStatus):
    """ In this test, we load a storage element CERN-CHILD that inherits from CERN-ABSTRACT.
        CERN-ABSTRACT has two uncomplete protocols, and CERN-CHILD defines them
    """

    sf = StorageFactory(vo = 'lhcb')
    storages = sf.getStorages('CERN-CHILD')
    self.assertTrue(storages['OK'], storages)
    storages = storages['Value']

    self.assertListEqual(storages['RemotePlugins'], ['GFAL2_SRM2', 'GFAL2_XROOT'])

    expectedProtocols = [{
        'Access': 'remote',
        'Host': 'srm-eoslhcb.cern.ch',
        'Path': '/eos/lhcb/grid/user',
        'PluginName': 'GFAL2_SRM2',
        'Port': 8443,
        'Protocol': 'srm',
        'SpaceToken': 'LHCb_USER',
        'WSUrl': '/srm/v2/server?SFN:'
    }, {
        'Access': 'remote',
        'Host': 'eoslhcb.cern.ch',
        'Path': '/eos/lhcb/grid/xrootuser',
        'PluginName': 'GFAL2_XROOT',
        'Port': '',
        'Protocol': 'root',
        'SpaceToken': '',
        'WSUrl': ''
    }]

    self.assertListEqual(storages['ProtocolOptions'], expectedProtocols)
コード例 #16
0
    def test_pure_inheritance(self, _sf_generateStorageObject,
                              _rss_getSEStatus):
        """In this test, we load a storage element CERN-NO-DEF that inherits from CERN-BASE,
        but does not redefine ANYTHING. We expect it to be just like the parent
        """

        sf = StorageFactory(vo="lhcb")
        storages = sf.getStorages("CERN-NO-DEF")

        self.assertTrue(storages["OK"], storages)
        storages = storages["Value"]

        self.assertListEqual(storages["RemotePlugins"], ["GFAL2_SRM2"])

        # There should be a single protocol
        self.assertEqual(len(storages["ProtocolOptions"]), 1)
        # There should be one storage object
        self.assertEqual(len(storages["StorageObjects"]), 1)

        protocolDetail = storages["ProtocolOptions"][0]
        # These are the values we expect
        self.assertEqual(protocolDetail["Access"], "remote")
        self.assertEqual(protocolDetail["Host"], "srm-eoslhcb.cern.ch")
        self.assertEqual(protocolDetail["Path"], "/eos/lhcb/grid/prod")
        self.assertEqual(protocolDetail["PluginName"], "GFAL2_SRM2")
        self.assertEqual(protocolDetail["Port"], 8443)
        self.assertEqual(protocolDetail["Protocol"], "srm")
        self.assertEqual(protocolDetail["SpaceToken"], "LHCb-EOS")
        self.assertEqual(protocolDetail["WSUrl"], "/srm/v2/server?SFN:")

        self.assertDictEqual(storages["StorageOptions"], {
            "BackendType": "Eos",
            "SEType": "T0D1",
            "BaseSE": "CERN-BASE"
        })
コード例 #17
0
    def test_bad_plugin_name(self, _sf_generateStorageObject,
                             _rss_getSEStatus):
        """In this test, we load a storage element CERN-BAD-PLUGIN-NAME that inherits from CERN-BASE,
        and redefine the same protocol but with a different PluginName.
        """

        sf = StorageFactory(vo="lhcb")
        storages = sf.getStorages("CERN-BAD-PLUGIN-NAME")
        self.assertTrue(storages["OK"], storages)
        storages = storages["Value"]

        self.assertListEqual(storages["RemotePlugins"], [])
        self.assertListEqual(storages["LocalPlugins"], ["AnotherPluginName"])

        expectedProtocols = [{
            "Access": "local",
            "Host": "srm-eoslhcb.cern.ch",
            "Path": "/eos/lhcb/grid/prod",
            "PluginName": "AnotherPluginName",
            "Port": 8443,
            "Protocol": "srm",
            "SpaceToken": "LHCb-EOS",
            "WSUrl": "/srm/v2/server?SFN:",
        }]

        self.assertListEqual(storages["ProtocolOptions"], expectedProtocols)
コード例 #18
0
  def test_use_plugin_as_protocol_name_with_plugin_name(self, _sf_generateStorageObject, _rss_getSEStatus):
    """ In this test, we load a storage element CERN-USE-PLUGIN-AS-PROTOCOL-NAME that inherits from CERN-BASE,
        and uses a protocol named as a plugin name, the plugin name is also present.
    """
    sf = StorageFactory(vo = 'lhcb')
    storages = sf.getStorages('CERN-USE-PLUGIN-AS-PROTOCOL-NAME-WITH-PLUGIN-NAME')
    self.assertTrue(storages['OK'], storages)
    storages = storages['Value']

    self.assertListEqual(storages['RemotePlugins'], ['GFAL2_SRM2', 'GFAL2_XROOT'])

    expectedProtocols = [{
        'Access': 'remote',
        'Host': 'srm-eoslhcb.cern.ch',
        'Path': '/eos/lhcb/grid/prod',
        'PluginName': 'GFAL2_SRM2',
        'Port': 8443,
        'Protocol': 'srm',
        'SpaceToken': 'LHCb-EOS',
        'WSUrl': '/srm/v2/server?SFN:'
    }, {
        'Access': 'remote',
        'Host': 'srm-eoslhcb.cern.ch',
        'Path': '/eos/lhcb/grid/user',
        'PluginName': 'GFAL2_XROOT',
        'Port': 8443,
        'Protocol': 'srm',
        'SpaceToken': 'LHCb-EOS',
        'WSUrl': '/srm/v2/server?SFN:'
    }]

    self.assertListEqual(storages['ProtocolOptions'], expectedProtocols)
コード例 #19
0
    def test_standalone(self, _sf_generateStorageObject, _rss_getSEStatus):
        """Test loading a storage element with everything defined in itself.
        It should have two storage plugins
        """

        sf = StorageFactory(vo="lhcb")
        storages = sf.getStorages("CERN-SIMPLE")

        self.assertTrue(storages["OK"], storages)
        storages = storages["Value"]

        self.assertListEqual(storages["LocalPlugins"], ["File"])
        self.assertListEqual(storages["RemotePlugins"], ["GFAL2_SRM2"])

        allProtocols = []
        for protocol in ["RemoteAccessProtocol", "LocalAccessProtocol"]:
            protocolDef = copy.copy(mandatoryProtocolOptions)
            protocolDef.update(
                fake_gConfig.crawlCS(
                    "/Resources/StorageElements/CERN-SIMPLE/%s" % protocol))
            allProtocols.append(protocolDef)

        self.assertEqual(len(storages["ProtocolOptions"]), len(allProtocols))
        self.assertEqual(len(storages["StorageObjects"]), len(allProtocols))

        self.assertListEqual(
            sorted(allProtocols, key=lambda x: x["Host"]),
            sorted(storages["ProtocolOptions"], key=lambda x: x["Host"]))
        self.assertDictEqual(storages["StorageOptions"], {
            "BackendType": "Eos",
            "SEType": "T0D1"
        })
コード例 #20
0
  def test_more_protocol(self, _sf_generateStorageObject, _rss_getSEStatus):
    """ In this test, we load a storage element CERN-MORE that inherits from CERN-BASE,
        and adds an extra protocol
    """

    sf = StorageFactory(vo = 'lhcb')
    storages = sf.getStorages('CERN-MORE')
    self.assertTrue(storages['OK'], storages)
    storages = storages['Value']

    self.assertListEqual(storages['RemotePlugins'], ['Extra', 'GFAL2_SRM2'])

    expectedProtocols = [{
        'Access': 'remote',
        'Host': 'srm-eoslhcb.cern.ch',
        'Path': '/eos/lhcb/grid/prod',
        'PluginName': 'Extra',
        'Port': 8443,
        'Protocol': 'srm',
        'SpaceToken': 'LHCb-EOS',
        'WSUrl': ''
    }, {
        'Access': 'remote',
        'Host': 'srm-eoslhcb.cern.ch',
        'Path': '/eos/lhcb/grid/user',
        'PluginName': 'GFAL2_SRM2',
        'Port': 8443,
        'Protocol': 'srm',
        'SpaceToken': 'LHCb-EOS',
        'WSUrl': '/srm/v2/server?SFN:'
    }]

    self.assertListEqual(storages['ProtocolOptions'], expectedProtocols)
コード例 #21
0
ファイル: TestRFIOPlugIn.py プロジェクト: roiser/DIRAC
 def setUp(self):
     factory = StorageFactory()
     res = factory.getStorages("CERN-RAW", ["RFIO"])
     self.assert_(res["OK"])
     storageDetails = res["Value"]
     self.storage = storageDetails["StorageObjects"][0]
     self.storage.changeDirectory("lhcb/test/unit-test/Storage/RFIOStorage")
コード例 #22
0
ファイル: Test_StorageFactory.py プロジェクト: sparsh35/DIRAC
    def test_redefine_plugin_name(self, _sf_generateStorageObject,
                                  _rss_getSEStatus):
        """ In this test, we load a storage element CERN-REDEFINE-PLUGIN-NAME that inherits from CERN-BASE,
        and uses the same Plugin with a different section.
    """

        sf = StorageFactory(vo='lhcb')
        storages = sf.getStorages('CERN-REDEFINE-PLUGIN-NAME')
        self.assertTrue(storages['OK'], storages)
        storages = storages['Value']

        self.assertListEqual(storages['RemotePlugins'],
                             ['GFAL2_SRM2', 'GFAL2_SRM2'])

        expectedProtocols = [{
            'Access': 'remote',
            'Host': 'srm-eoslhcb.cern.ch',
            'Path': '/eos/lhcb/grid/prod',
            'PluginName': 'GFAL2_SRM2',
            'Port': 8443,
            'Protocol': 'srm',
            'SpaceToken': 'LHCb-EOS',
            'WSUrl': '/srm/v2/server?SFN:'
        }, {
            'Access': 'remote',
            'Host': '',
            'Path': '/eos/lhcb/grid/other',
            'PluginName': 'GFAL2_SRM2',
            'Port': '',
            'Protocol': '',
            'SpaceToken': '',
            'WSUrl': ''
        }]

        self.assertListEqual(storages['ProtocolOptions'], expectedProtocols)
コード例 #23
0
ファイル: Test_StorageFactory.py プロジェクト: project8/DIRAC
  def test_no_plugin_name(self, _sf_generateStorageObject, _rss_getSEStatus):
    """ In this test, we load a storage element CERN-NO-PLUGIN-NAME that inherits from CERN-BASE,
        and redifine the same protocol but with no PluginName
    """

    sf = StorageFactory(vo = 'lhcb')
    storages = sf.getStorages('CERN-NO-PLUGIN-NAME')

    self.assertFalse(storages['OK'], storages)
コード例 #24
0
ファイル: Test_StorageFactory.py プロジェクト: sparsh35/DIRAC
    def test_aliasSE_in_SEDefinition(self, _sf_generateStorageObject,
                                     _rss_getSEStatus):
        """ In this test, a storage aliases a baseSE which is declared in the
        StorageElements section. That should remain possible
    """

        sf = StorageFactory(vo='lhcb')
        storages = sf.getStorages('CERN-WRONGLOCATION-ALIAS')

        self.assertTrue(storages['OK'], storages)
コード例 #25
0
ファイル: Test_StorageFactory.py プロジェクト: sparsh35/DIRAC
    def test_baseSE_in_SEDefinition(self, _sf_generateStorageObject,
                                    _rss_getSEStatus):
        """ In this test, a storage inherits from a baseSE which is declared in the
        StorageElements section instead of the BaseStorageElements section.
        It used to be possible, but we remove this compatibility layer.
    """

        sf = StorageFactory(vo='lhcb')
        storages = sf.getStorages('CERN-WRONGLOCATION')

        self.assertFalse(storages['OK'], storages)
コード例 #26
0
    def setUp(self):

        factory = StorageFactory("lhcb")
        res = factory.getStorages(storageElementToTest, [plugin])
        self.assertTrue(res["OK"])
        storageDetails = res["Value"]
        self.storage = storageDetails["StorageObjects"][0]
        self.storage.changeDirectory("lhcb/test/unit-test/TestStoragePlugIn")
        destDir = self.storage.getCurrentURL("")["Value"]
        res = self.storage.createDirectory(destDir)
        self.assertTrue(res["OK"])
        self.assertTrue(destDir in res["Value"]["Successful"])
        self.assertTrue(res["Value"]["Successful"][destDir])
        self.numberOfFiles = 1
コード例 #27
0
    def setUp(self):

        factory = StorageFactory('lhcb')
        res = factory.getStorages(storageElementToTest, [plugin])
        self.assert_(res['OK'])
        storageDetails = res['Value']
        self.storage = storageDetails['StorageObjects'][0]
        self.storage.changeDirectory('lhcb/test/unit-test/TestStoragePlugIn')
        destDir = self.storage.getCurrentURL('')['Value']
        res = self.storage.createDirectory(destDir)
        self.assert_(res['OK'])
        self.assert_(res['Value']['Successful'].has_key(destDir))
        self.assert_(res['Value']['Successful'][destDir])
        self.numberOfFiles = 1
コード例 #28
0
ファイル: TestStoragePlugIn.py プロジェクト: JanEbbing/DIRAC
  def setUp( self ):

    factory = StorageFactory( 'lhcb' )
    res = factory.getStorages( storageElementToTest, [plugin] )
    self.assert_( res['OK'] )
    storageDetails = res['Value']
    self.storage = storageDetails['StorageObjects'][0]
    self.storage.changeDirectory( 'lhcb/test/unit-test/TestStoragePlugIn' )
    destDir = self.storage.getCurrentURL( '' )['Value']
    res = self.storage.createDirectory( destDir )
    self.assert_( res['OK'] )
    self.assert_( res['Value']['Successful'].has_key( destDir ) )
    self.assert_( res['Value']['Successful'][destDir] )
    self.numberOfFiles = 1
コード例 #29
0
  def test_getStorages( self ):
    factory = StorageFactory()
    storageName = 'IN2P3-SRM2'
    protocolList = ['SRM2']
    res = factory.getStorages( storageName, protocolList )
    self.assert_( res['OK'] )
    storageStubs = res['Value']['StorageObjects']
    storageStub = storageStubs[0]

    storageDict = {}
    storageDict['StorageName'] = 'IN2P3-SRM2'
    storageDict['ProtocolName'] = 'SRM2'
    storageDict['Protocol'] = 'srm'
    storageDict['Host'] = 'ccsrmtestv2.in2p3.fr'
    storageDict['Port'] = '8443/srm/managerv2?SFN='
    storageDict['Path'] = '/pnfs/in2p3.fr/data'
    storageDict['SpaceToken'] = 'LHCb_RAW'
    res = storageStub.getParameters()
    self.assert_( res['OK'] )
    parameterDict = res['Value']
    self.assertEqual( parameterDict, storageDict )

    res = storageStub.getPFNBase( withPort = False )
    self.assert_( res['OK'] )
    self.assertEqual( res['Value'], 'srm://ccsrmtestv2.in2p3.fr/pnfs/in2p3.fr/data' )
    res = storageStub.getPFNBase( withPort = True )
    self.assert_( res['OK'] )
    self.assertEqual( res['Value'], 'srm://ccsrmtestv2.in2p3.fr:8443/srm/managerv2?SFN=/pnfs/in2p3.fr/data' )

    res = storageStub.getUrl( '/lhcb/production/DC06/test.file', withPort = False )
    self.assert_( res['OK'] )
    self.assertEqual( res['Value'], 'srm://ccsrmtestv2.in2p3.fr/pnfs/in2p3.fr/data/lhcb/production/DC06/test.file' )
    res = storageStub.getUrl( '/lhcb/production/DC06/test.file', withPort = True )
    self.assert_( res['OK'] )
    self.assertEqual( res['Value'], 'srm://ccsrmtestv2.in2p3.fr:8443/srm/managerv2?SFN=/pnfs/in2p3.fr/data/lhcb/production/DC06/test.file' )

    res = storageStub.remove( 'srm://ccsrmtestv2.in2p3.fr:8443/srm/managerv2?SFN=/pnfs/in2p3.fr/data/lhcb/production/DC06/test.file' )
    listOfDirs = ['srm://ccsrmtestv2.in2p3.fr:8443/srm/managerv2?SFN=/pnfs/in2p3.fr/data/lhcb/production/DC06/v1-lumi2/00001368/DIGI']
    res = storageStub.ls( listOfDirs )

    #directoryPath = 'srm://ccsrmtestv2.in2p3.fr:8443/srm/managerv2?SFN=/pnfs/in2p3.fr/data/lhcb/production/DC06/v1-lumi2/1368'
    #res = storageStub.removeDir(directoryPath)

    destFile = 'srm://ccsrmtestv2.in2p3.fr:8443/srm/managerv2?SFN=/pnfs/in2p3.fr/data/lhcb/production/DC06/v1-lumi2/1368/dirac_directory.7'
    res = storageStub.put( destFile )
    print res

    res = storageStub.get( destFile )
コード例 #30
0
def get_base_pfn(base_lfn, target_se):
  """constructs a pfn from an lfn and a target se"""
  storage_factory = StorageFactory()
  result = storage_factory.getStorages(target_se)
  if not result['OK']:
    raise Exception("Failed to look up storage element details: %s" % result)
  # look for srm (don't use anything else)
  storage_info = result['Value']
  for proto_info in storage_info['ProtocolOptions']:
    if proto_info['Protocol'] != 'srm':
      continue
    se_host = proto_info['Host']
    se_port = proto_info['Port']
    se_wsurl = proto_info['WSUrl']
    se_vopath = proto_info['Path']
    base_pfn = "srm://%s:%s%s%s%s" %(se_host, se_port, se_wsurl, se_vopath, base_lfn)
    return base_pfn
  raise Exception("No srm protocol found for storage element: %s" %target_se)
コード例 #31
0
    def test_getStorages(self):
        factory = StorageFactory(vo='lhcb')
        storageName = 'IN2P3-disk'
        protocolList = ['SRM2']
        res = factory.getStorages(storageName, protocolList)
        self.assert_(res['OK'])
        storageStubs = res['Value']['StorageObjects']
        storageStub = storageStubs[0]

        storageDict = {}
        storageDict['StorageName'] = 'IN2P3-disk'
        storageDict['PluginName'] = 'SRM2'
        storageDict['Protocol'] = 'srm'
        storageDict['Host'] = 'ccsrm02.in2p3.fr'
        storageDict['Port'] = '8443'
        storageDict['WSUrl'] = '/srm/managerv2?SFN='
        storageDict['Path'] = '/pnfs/in2p3.fr/data/lhcb'
        storageDict['SpaceToken'] = ''
        parameterDict = storageStub.getParameters()
        self.assertEqual(parameterDict, storageDict)

        res = storageStub.getTransportURL('/lhcb/production/DC06/test.file')
        self.assert_(res['OK'])
        self.assertEqual(
            res['Value'],
            'srm://ccsrm02.in2p3.fr:8443/srm/managerv2?SFN=/pnfs/in2p3.fr/data/lhcb/production/DC06/test.file'
        )

        res = storageStub.removeFile(
            'srm://ccsrm02.in2p3.fr:8443/srm/managerv2?SFN=/pnfs/in2p3.fr/data/lhcb/production/DC06/test.file'
        )
        listOfDirs = [
            'srm://ccsrm02.in2p3.fr:8443/srm/managerv2?SFN=/pnfs/in2p3.fr/data/lhcb/production/DC06/v1-lumi2/00001368/DIGI'
        ]
        res = storageStub.listDirectory(listOfDirs)

        #directoryPath = 'srm://ccsrmtestv2.in2p3.fr:8443/srm/managerv2?SFN=/pnfs/in2p3.fr/data/lhcb/production/DC06/v1-lumi2/1368'
        #res = storageStub.removeDir(directoryPath)

        destFile = 'srm://ccsrmtestv2.in2p3.fr:8443/srm/managerv2?SFN=/pnfs/in2p3.fr/data/lhcb/production/DC06/v1-lumi2/1368/dirac_directory.7'
        res = storageStub.putFile(destFile)
        print res

        res = storageStub.getFile(destFile)
コード例 #32
0
def get_base_pfn(base_lfn, target_se):
    """constructs a pfn from an lfn and a target se"""
    storage_factory = StorageFactory()
    result = storage_factory.getStorages(target_se)
    if not result['OK']:
        raise Exception("Failed to look up storage element details: %s" %
                        result)
    # look for srm (don't use anything else)
    storage_info = result['Value']
    for proto_info in storage_info['ProtocolOptions']:
        if proto_info['Protocol'] != 'srm':
            continue
        se_host = proto_info['Host']
        se_port = proto_info['Port']
        se_wsurl = proto_info['WSUrl']
        se_vopath = proto_info['Path']
        base_pfn = "srm://%s:%s%s%s%s" % (se_host, se_port, se_wsurl,
                                          se_vopath, base_lfn)
        return base_pfn
    raise Exception("No srm protocol found for storage element: %s" %
                    target_se)
コード例 #33
0
    def test_more_protocol(self, _sf_generateStorageObject, _rss_getSEStatus):
        """In this test, we load a storage element CERN-MORE that inherits from CERN-BASE,
        and adds an extra protocol
        """

        sf = StorageFactory(vo="lhcb")
        storages = sf.getStorages("CERN-MORE")
        self.assertTrue(storages["OK"], storages)
        storages = storages["Value"]

        self.assertSetEqual(set(storages["RemotePlugins"]),
                            set(["Extra", "GFAL2_SRM2"]))

        expectedProtocols = [
            {
                "Access": "remote",
                "Host": "srm-eoslhcb.cern.ch",
                "Path": "/eos/lhcb/grid/prod",
                "PluginName": "Extra",
                "Port": 8443,
                "Protocol": "srm",
                "SpaceToken": "LHCb-EOS",
                "WSUrl": "",
            },
            {
                "Access": "remote",
                "Host": "srm-eoslhcb.cern.ch",
                "Path": "/eos/lhcb/grid/user",
                "PluginName": "GFAL2_SRM2",
                "Port": 8443,
                "Protocol": "srm",
                "SpaceToken": "LHCb-EOS",
                "WSUrl": "/srm/v2/server?SFN:",
            },
        ]

        self.assertListEqual(
            sorted(storages["ProtocolOptions"], key=lambda x: x["PluginName"]),
            expectedProtocols,
        )
コード例 #34
0
    def test_simple_inheritance_overwrite(self, _sf_generateStorageObject,
                                          _rss_getSEStatus):
        """In this test, we load a storage element CERN-USER that inherits from CERN-BASE,
        add a storage option, redefine the path and the space token
        """

        sf = StorageFactory(vo="lhcb")
        storages = sf.getStorages("CERN-USER")

        self.assertTrue(storages["OK"], storages)
        storages = storages["Value"]

        self.assertListEqual(storages["RemotePlugins"], ["GFAL2_SRM2"])

        # There should be a single protocol
        self.assertEqual(len(storages["ProtocolOptions"]), 1)
        # There should be one storage object
        self.assertEqual(len(storages["StorageObjects"]), 1)

        protocolDetail = storages["ProtocolOptions"][0]
        # These are the values we expect
        self.assertEqual(protocolDetail["Access"], "remote")
        self.assertEqual(protocolDetail["Host"], "srm-eoslhcb.cern.ch")
        self.assertEqual(protocolDetail["Path"], "/eos/lhcb/grid/user")
        self.assertEqual(protocolDetail["PluginName"], "GFAL2_SRM2")
        self.assertEqual(protocolDetail["Port"], 8443)
        self.assertEqual(protocolDetail["Protocol"], "srm")
        self.assertEqual(protocolDetail["SpaceToken"], "LHCb_USER")
        self.assertEqual(protocolDetail["WSUrl"], "/srm/v2/server?SFN:")

        self.assertDictEqual(
            storages["StorageOptions"],
            {
                "BackendType": "Eos",
                "SEType": "T0D1",
                "PledgedSpace": 205,
                "BaseSE": "CERN-BASE"
            },
        )
コード例 #35
0
    def test_redefine_plugin_name(self, _sf_generateStorageObject,
                                  _rss_getSEStatus):
        """In this test, we load a storage element CERN-REDEFINE-PLUGIN-NAME that inherits from CERN-BASE,
        and uses the same Plugin with a different section.
        """

        sf = StorageFactory(vo="lhcb")
        storages = sf.getStorages("CERN-REDEFINE-PLUGIN-NAME")
        self.assertTrue(storages["OK"], storages)
        storages = storages["Value"]

        self.assertListEqual(storages["RemotePlugins"],
                             ["GFAL2_SRM2", "GFAL2_SRM2"])

        expectedProtocols = [
            {
                "Access": "remote",
                "Host": "srm-eoslhcb.cern.ch",
                "Path": "/eos/lhcb/grid/prod",
                "PluginName": "GFAL2_SRM2",
                "Port": 8443,
                "Protocol": "srm",
                "SpaceToken": "LHCb-EOS",
                "WSUrl": "/srm/v2/server?SFN:",
            },
            {
                "Access": "remote",
                "Host": "",
                "Path": "/eos/lhcb/grid/other",
                "PluginName": "GFAL2_SRM2",
                "Port": "",
                "Protocol": "",
                "SpaceToken": "",
                "WSUrl": "",
            },
        ]

        self.assertListEqual(storages["ProtocolOptions"], expectedProtocols)
コード例 #36
0
    def test_use_plugin_as_protocol_name_with_plugin_name(
            self, _sf_generateStorageObject, _rss_getSEStatus):
        """In this test, we load a storage element CERN-USE-PLUGIN-AS-PROTOCOL-NAME that inherits from CERN-BASE,
        and uses a protocol named as a plugin name, the plugin name is also present.
        """
        sf = StorageFactory(vo="lhcb")
        storages = sf.getStorages(
            "CERN-USE-PLUGIN-AS-PROTOCOL-NAME-WITH-PLUGIN-NAME")
        self.assertTrue(storages["OK"], storages)
        storages = storages["Value"]

        self.assertListEqual(storages["RemotePlugins"],
                             ["GFAL2_SRM2", "GFAL2_XROOT"])

        expectedProtocols = [
            {
                "Access": "remote",
                "Host": "srm-eoslhcb.cern.ch",
                "Path": "/eos/lhcb/grid/prod",
                "PluginName": "GFAL2_SRM2",
                "Port": 8443,
                "Protocol": "srm",
                "SpaceToken": "LHCb-EOS",
                "WSUrl": "/srm/v2/server?SFN:",
            },
            {
                "Access": "remote",
                "Host": "srm-eoslhcb.cern.ch",
                "Path": "/eos/lhcb/grid/user",
                "PluginName": "GFAL2_XROOT",
                "Port": 8443,
                "Protocol": "srm",
                "SpaceToken": "LHCb-EOS",
                "WSUrl": "/srm/v2/server?SFN:",
            },
        ]

        self.assertListEqual(storages["ProtocolOptions"], expectedProtocols)
コード例 #37
0
    def test_child_inherit_from_base_with_two_same_plugins(
            self, _sf_generateStorageObject, _rss_getSEStatus):
        """In this test, we load a storage element CERN-CHILD-INHERIT-FROM-BASE-WITH-TWO-SAME-PLUGINS that inherits
        from CERN-BASE-WITH-TWO-SAME-PLUGINS, using two identical plugin names in two sections.
        """
        sf = StorageFactory(vo="lhcb")
        storages = sf.getStorages(
            "CERN-CHILD-INHERIT-FROM-BASE-WITH-TWO-SAME-PLUGINS")
        self.assertTrue(storages["OK"], storages)
        storages = storages["Value"]

        self.assertListEqual(storages["RemotePlugins"],
                             ["GFAL2_SRM2", "GFAL2_SRM2"])

        expectedProtocols = [
            {
                "Access": "remote",
                "Host": "srm-eoslhcb.cern.ch",
                "Path": "/eos/lhcb/grid/user",
                "PluginName": "GFAL2_SRM2",
                "Port": 8443,
                "Protocol": "srm",
                "SpaceToken": "",
                "WSUrl": "/srm/v2/server?SFN:",
            },
            {
                "Access": "remote",
                "Host": "eoslhcb.cern.ch",
                "Path": "",
                "PluginName": "GFAL2_SRM2",
                "Port": 8443,
                "Protocol": "root",
                "SpaceToken": "",
                "WSUrl": "/srm/v2/server?SFN:",
            },
        ]

        self.assertListEqual(storages["ProtocolOptions"], expectedProtocols)
コード例 #38
0
ファイル: TestStorageFactory.py プロジェクト: JanEbbing/DIRAC
  def test_getStorages( self ):
    factory = StorageFactory( vo = 'lhcb' )
    storageName = 'IN2P3-disk'
    protocolList = ['SRM2']
    res = factory.getStorages( storageName, protocolList )
    self.assert_( res['OK'] )
    storageStubs = res['Value']['StorageObjects']
    storageStub = storageStubs[0]

    storageDict = {}
    storageDict['StorageName'] = 'IN2P3-disk'
    storageDict['PluginName'] = 'SRM2'
    storageDict['Protocol'] = 'srm'
    storageDict['Host'] = 'ccsrm02.in2p3.fr'
    storageDict['Port'] = '8443'
    storageDict['WSUrl'] = '/srm/managerv2?SFN='
    storageDict['Path'] = '/pnfs/in2p3.fr/data/lhcb'
    storageDict['SpaceToken'] = ''
    parameterDict = storageStub.getParameters()
    self.assertEqual( parameterDict, storageDict )

    res = storageStub.getTransportURL( '/lhcb/production/DC06/test.file' )
    self.assert_( res['OK'] )
    self.assertEqual( res['Value'], 'srm://ccsrm02.in2p3.fr:8443/srm/managerv2?SFN=/pnfs/in2p3.fr/data/lhcb/production/DC06/test.file' )

    res = storageStub.removeFile( 'srm://ccsrm02.in2p3.fr:8443/srm/managerv2?SFN=/pnfs/in2p3.fr/data/lhcb/production/DC06/test.file' )
    listOfDirs = ['srm://ccsrm02.in2p3.fr:8443/srm/managerv2?SFN=/pnfs/in2p3.fr/data/lhcb/production/DC06/v1-lumi2/00001368/DIGI']
    res = storageStub.listDirectory( listOfDirs )

    #directoryPath = 'srm://ccsrmtestv2.in2p3.fr:8443/srm/managerv2?SFN=/pnfs/in2p3.fr/data/lhcb/production/DC06/v1-lumi2/1368'
    #res = storageStub.removeDir(directoryPath)

    destFile = 'srm://ccsrmtestv2.in2p3.fr:8443/srm/managerv2?SFN=/pnfs/in2p3.fr/data/lhcb/production/DC06/v1-lumi2/1368/dirac_directory.7'
    res = storageStub.putFile( destFile )
    print res

    res = storageStub.getFile( destFile )
コード例 #39
0
    def test_pure_abstract(self, _sf_generateStorageObject, _rss_getSEStatus):
        """In this test, we load a storage element CERN-CHILD that inherits from CERN-ABSTRACT.
        CERN-ABSTRACT has two uncomplete protocols, and CERN-CHILD defines them
        """

        sf = StorageFactory(vo="lhcb")
        storages = sf.getStorages("CERN-CHILD")
        self.assertTrue(storages["OK"], storages)
        storages = storages["Value"]

        self.assertListEqual(storages["RemotePlugins"],
                             ["GFAL2_SRM2", "GFAL2_XROOT"])

        expectedProtocols = [
            {
                "Access": "remote",
                "Host": "srm-eoslhcb.cern.ch",
                "Path": "/eos/lhcb/grid/user",
                "PluginName": "GFAL2_SRM2",
                "Port": 8443,
                "Protocol": "srm",
                "SpaceToken": "LHCb_USER",
                "WSUrl": "/srm/v2/server?SFN:",
            },
            {
                "Access": "remote",
                "Host": "eoslhcb.cern.ch",
                "Path": "/eos/lhcb/grid/xrootuser",
                "PluginName": "GFAL2_XROOT",
                "Port": "",
                "Protocol": "root",
                "SpaceToken": "",
                "WSUrl": "",
            },
        ]

        self.assertListEqual(storages["ProtocolOptions"], expectedProtocols)
コード例 #40
0
ファイル: Test_StorageFactory.py プロジェクト: project8/DIRAC
  def test_bad_plugin_name(self, _sf_generateStorageObject, _rss_getSEStatus):
    """ In this test, we load a storage element CERN-BAD-PLUGIN-NAME that inherits from CERN-BASE,
        and redifine the same protocol but with a different PluginName.
        
        Currently this results in two different protocols, but this should change
    """

    sf = StorageFactory(vo = 'lhcb')
    storages = sf.getStorages('CERN-BAD-PLUGIN-NAME')
    self.assertTrue(storages['OK'], storages)
    storages = storages['Value']

    self.assertListEqual(storages['RemotePlugins'], ['GFAL2_SRM2'])
    self.assertListEqual(storages['LocalPlugins'], ['AnotherPluginName'])

    expectedProtocols = [{
        'Access': 'remote',
        'Host': 'srm-eoslhcb.cern.ch',
        'Path': '/eos/lhcb/grid/prod',
        'PluginName': 'GFAL2_SRM2',
        'Port': 8443,
        'Protocol': 'srm',
        'SpaceToken': 'LHCb-EOS',
        'WSUrl': '/srm/v2/server?SFN:'
    }, {
        'Access': 'local',
        'Host': '',
        'Path': '/eos/lhcb/grid/prod',
        'PluginName': 'AnotherPluginName',
        'Port': '',
        'Protocol': '',
        'SpaceToken': '',
        'WSUrl': ''
    }]

    self.assertListEqual(storages['ProtocolOptions'], expectedProtocols)
コード例 #41
0
class ReplicationScheduler( AgentModule ):

  def initialize( self ):

    self.section = PathFinder.getAgentSection( AGENT_NAME )
    self.RequestDB = RequestDBMySQL()
    self.TransferDB = TransferDB()
    self.DataLog = DataLoggingClient()
    self.factory = StorageFactory()
    self.rm = ReplicaManager()

    # This sets the Default Proxy to used as that defined under
    # /Operations/Shifter/DataManager
    # the shifterProxy option in the Configuration can be used to change this default.
    self.am_setOption( 'shifterProxy', 'DataManager' )

    return S_OK()

  def execute( self ):
    """ The main agent execution method """

    # This allows dynamic changing of the throughput timescale
    self.throughputTimescale = self.am_getOption( 'ThroughputTimescale', 3600 )
    self.throughputTimescale = 60 * 60 * 1
    #print 'ThroughputTimescale:',self.throughputTimescale
    ######################################################################################
    #
    #  Obtain information on the current state of the channel queues
    #

    res = self.TransferDB.getChannelQueues()
    if not res['OK']:
      errStr = "ReplicationScheduler._execute: Failed to get channel queues from TransferDB."
      gLogger.error( errStr, res['Message'] )
      return S_OK()
    if not res['Value']:
      gLogger.info( "ReplicationScheduler._execute: No active channels found for replication." )
      return S_OK()
    channels = res['Value']

    res = self.TransferDB.getChannelObservedThroughput( self.throughputTimescale )
    if not res['OK']:
      errStr = "ReplicationScheduler._execute: Failed to get observed throughput from TransferDB."
      gLogger.error( errStr, res['Message'] )
      return S_OK()
    if not res['Value']:
      gLogger.info( "ReplicationScheduler._execute: No active channels found for replication." )
      return S_OK()
    bandwidths = res['Value']

    self.strategyHandler = StrategyHandler( bandwidths, channels, self.section )

    processedRequests = []
    requestsPresent = True
    while requestsPresent:

      ######################################################################################
      #
      #  The first step is to obtain a transfer request from the RequestDB which should be scheduled.
      #

      gLogger.info( "ReplicationScheduler._execute: Contacting RequestDB for suitable requests." )
      res = self.RequestDB.getRequest( 'transfer' )
      if not res['OK']:
        gLogger.error( "ReplicationScheduler._execute: Failed to get a request list from RequestDB.", res['Message'] )
        continue
      if not res['Value']:
        gLogger.info( "ReplicationScheduler._execute: No requests found in RequestDB." )
        requestsPresent = False
        return S_OK()
      requestString = res['Value']['RequestString']
      requestName = res['Value']['RequestName']
      gLogger.info( "ReplicationScheduler._execute: Obtained Request %s from RequestDB." % ( requestName ) )

      ######################################################################################
      #
      #  The request must then be parsed to obtain the sub-requests, their attributes and files.
      #

      logStr = 'ReplicationScheduler._execute: Parsing Request %s.' % ( requestName )
      gLogger.info( logStr )
      oRequest = RequestContainer( requestString )
      res = oRequest.getAttribute( 'RequestID' )
      if not res['OK']:
        gLogger.error( 'ReplicationScheduler._execute: Failed to get requestID.', res['Message'] )
        return S_ERROR( 'ReplicationScheduler._execute: Failed to get number of sub-requests.' )
      requestID = res['Value']
      if requestID in processedRequests:
        # Break the loop once we have iterated once over all requests
        res = self.RequestDB.updateRequest( requestName, requestString )
        if not res['OK']:
          gLogger.error( "Failed to update request", "%s %s" % ( requestName, res['Message'] ) )
        return S_OK()

      processedRequests.append( requestID )

      res = oRequest.getNumSubRequests( 'transfer' )
      if not res['OK']:
        gLogger.error( 'ReplicationScheduler._execute: Failed to get number of sub-requests.', res['Message'] )
        return S_ERROR( 'ReplicationScheduler._execute: Failed to get number of sub-requests.' )
      numberRequests = res['Value']
      gLogger.info( "ReplicationScheduler._execute: '%s' found with %s sub-requests." % ( requestName, numberRequests ) )

      ######################################################################################
      #
      #  The important request attributes are the source and target SEs.
      #

      for ind in range( numberRequests ):
        gLogger.info( "ReplicationScheduler._execute: Treating sub-request %s from '%s'." % ( ind, requestName ) )
        attributes = oRequest.getSubRequestAttributes( ind, 'transfer' )['Value']
        if attributes['Status'] != 'Waiting':
          #  If the sub-request is already in terminal state
          gLogger.info( "ReplicationScheduler._execute: Sub-request %s is status '%s' and  not to be executed." % ( ind, attributes['Status'] ) )
          continue

        sourceSE = attributes['SourceSE']
        targetSE = attributes['TargetSE']
        """ This section should go in the transfer request class """
        if type( targetSE ) in types.StringTypes:
          if re.search( ',', targetSE ):
            targetSEs = targetSE.split( ',' )
          else:
            targetSEs = [targetSE]
        """----------------------------------------------------- """
        operation = attributes['Operation']
        reqRepStrategy = None
        if operation in self.strategyHandler.getSupportedStrategies():
          reqRepStrategy = operation

        ######################################################################################
        #
        # Then obtain the file attribute of interest are the  LFN and FileID
        #

        res = oRequest.getSubRequestFiles( ind, 'transfer' )
        if not res['OK']:
          gLogger.error( 'ReplicationScheduler._execute: Failed to obtain sub-request files.' , res['Message'] )
          continue
        files = res['Value']
        gLogger.info( "ReplicationScheduler._execute: Sub-request %s found with %s files." % ( ind, len( files ) ) )
        filesDict = {}
        for file in files:
          lfn = file['LFN']
          if file['Status'] != 'Waiting':
            gLogger.debug( "ReplicationScheduler._execute: %s will not be scheduled because it is %s." % ( lfn, file['Status'] ) )
          else:
            fileID = file['FileID']
            filesDict[lfn] = fileID
        if not filesDict:
          gLogger.info( "ReplicationScheduler._execute: No Waiting files found for request" )
          continue
        notSched = len( files ) - len( filesDict )
        if notSched:
          gLogger.info( "ReplicationScheduler._execute: %d files found not Waiting" % notSched )

        ######################################################################################
        #
        #  Now obtain replica information for the files associated to the sub-request.
        #

        lfns = filesDict.keys()
        gLogger.info( "ReplicationScheduler._execute: Obtaining replica information for %d sub-request files." % len( lfns ) )
        res = self.rm.getCatalogReplicas( lfns )
        if not res['OK']:
          gLogger.error( "ReplicationScheduler._execute: Failed to get replica information.", res['Message'] )
          continue
        for lfn, failure in res['Value']['Failed'].items():
          gLogger.error( "ReplicationScheduler._execute: Failed to get replicas.", '%s: %s' % ( lfn, failure ) )
        replicas = res['Value']['Successful']
        if not replicas.keys():
          gLogger.error( "ReplicationScheduler._execute: Failed to get replica information for all files." )
          continue

        ######################################################################################
        #
        #  Now obtain the file sizes for the files associated to the sub-request.
        #

        lfns = replicas.keys()
        gLogger.info( "ReplicationScheduler._execute: Obtaining file sizes for %d sub-request files." % len( lfns ) )
        res = self.rm.getCatalogFileMetadata( lfns )
        if not res['OK']:
          gLogger.error( "ReplicationScheduler._execute: Failed to get file size information.", res['Message'] )
          continue
        for lfn, failure in res['Value']['Failed'].items():
          gLogger.error( 'ReplicationScheduler._execute: Failed to get file size.', '%s: %s' % ( lfn, failure ) )
        metadata = res['Value']['Successful']
        if not metadata.keys():
          gLogger.error( "ReplicationScheduler._execute: Failed to get metadata for all files." )
          continue

        ######################################################################################
        #
        # For each LFN determine the replication tree
        #

        for lfn in sortList( metadata.keys() ):
          fileSize = metadata[lfn]['Size']
          lfnReps = replicas[lfn]
          fileID = filesDict[lfn]

          targets = []
          for targetSE in targetSEs:
            if targetSE in lfnReps.keys():
              gLogger.debug( "ReplicationScheduler.execute: %s already present at %s." % ( lfn, targetSE ) )
            else:
              targets.append( targetSE )
          if not targets:
            gLogger.info( "ReplicationScheduler.execute: %s present at all targets." % lfn )
            oRequest.setSubRequestFileAttributeValue( ind, 'transfer', lfn, 'Status', 'Done' )
            continue
          if not lfnReps:
            gLogger.error( "ReplicationScheduler.execute: The file has no replicas.", lfn )
            continue
          res = self.strategyHandler.determineReplicationTree( sourceSE, targets, lfnReps, fileSize, strategy = reqRepStrategy )
          if not res['OK']:
            gLogger.error( "ReplicationScheduler.execute: Failed to determine replication tree.", res['Message'] )
            continue
          tree = res['Value']

          ######################################################################################
          #
          # For each item in the replication tree obtain the source and target SURLS
          #

          for channelID, dict in tree.items():
            gLogger.info( "ReplicationScheduler.execute: processing for channel %d %s" % ( channelID, str( dict ) ) )
            hopSourceSE = dict['SourceSE']
            hopDestSE = dict['DestSE']
            hopAncestor = dict['Ancestor']

            # Get the sourceSURL
            if hopAncestor:
              status = 'Waiting%s' % ( hopAncestor )
              res = self.obtainLFNSURL( hopSourceSE, lfn )
              if not res['OK']:
                errStr = res['Message']
                gLogger.error( errStr )
                return S_ERROR( errStr )
              sourceSURL = res['Value']
            else:
              status = 'Waiting'
              res = self.resolvePFNSURL( hopSourceSE, lfnReps[hopSourceSE] )
              if not res['OK']:
                sourceSURL = lfnReps[hopSourceSE]
              else:
                sourceSURL = res['Value']

            # Get the targetSURL
            res = self.obtainLFNSURL( hopDestSE, lfn )
            if not res['OK']:
              errStr = res['Message']
              gLogger.error( errStr )
              return S_ERROR( errStr )
            targetSURL = res['Value']

            ######################################################################################
            #
            # For each item in the replication tree add the file to the channel
            #
            res = self.TransferDB.addFileToChannel( channelID, fileID, hopSourceSE, sourceSURL, hopDestSE, targetSURL, fileSize, fileStatus = status )
            if not res['OK']:
              errStr = res['Message']
              gLogger.error( "ReplicationScheduler._execute: Failed to add File to Channel." , "%s %s" % ( fileID, channelID ) )
              return S_ERROR( errStr )
            res = self.TransferDB.addFileRegistration( channelID, fileID, lfn, targetSURL, hopDestSE )
            if not res['OK']:
              errStr = res['Message']
              gLogger.error( "ReplicationScheduler._execute: Failed to add File registration." , "%s %s" % ( fileID, channelID ) )
              result = self.TransferDB.removeFileFromChannel( channelID, fileID )
              if not result['OK']:
                errStr += result['Message']
                gLogger.error( "ReplicationScheduler._execute: Failed to remove File." , "%s %s" % ( fileID, channelID ) )
              return S_ERROR( errStr )
            oRequest.setSubRequestFileAttributeValue( ind, 'transfer', lfn, 'Status', 'Scheduled' )
          res = self.TransferDB.addReplicationTree( fileID, tree )

        if oRequest.isSubRequestEmpty( ind, 'transfer' )['Value']:
          oRequest.setSubRequestStatus( ind, 'transfer', 'Scheduled' )

      ################################################
      #  Generate the new request string after operation
      requestString = oRequest.toXML()['Value']
      res = self.RequestDB.updateRequest( requestName, requestString )
      if not res['OK']:
        gLogger.error( "ReplicationScheduler._execute: Failed to update request", "%s %s" % ( requestName, res['Message'] ) )

  def obtainLFNSURL( self, targetSE, lfn ):
    """ Creates the targetSURL for the storage and LFN supplied
    """
    res = self.factory.getStorages( targetSE, protocolList = ['SRM2'] )
    if not res['OK']:
      errStr = 'ReplicationScheduler._execute: Failed to create SRM2 storage for %s: %s. ' % ( targetSE, res['Message'] )
      gLogger.error( errStr )
      return S_ERROR( errStr )
    storageObjects = res['Value']['StorageObjects']
    for storageObject in storageObjects:
      res = storageObject.getCurrentURL( lfn )
      if res['OK']:
        return res
    gLogger.error( 'ReplicationScheduler._execute: Failed to get SRM compliant storage.' , targetSE )
    return S_ERROR( 'ReplicationScheduler._execute: Failed to get SRM compliant storage.' )

  def resolvePFNSURL( self, sourceSE, pfn ):
    """ Creates the targetSURL for the storage and LFN supplied
    """
    res = self.rm.getPfnForProtocol( [pfn], sourceSE )
    if not res['OK']:
      return res
    if pfn in res['Value']['Failed'].keys():
      return S_ERROR( res['Value']['Failed'][pfn] )
    return S_OK( res['Value']['Successful'][pfn] )
コード例 #42
0
class FTSClient( Client ):
  """
  .. class:: FTSClient

  """

  def __init__( self, useCertificates = False ):
    """c'tor

    :param self: self reference
    :param bool useCertificates: flag to enable/disable certificates
    """
    Client.__init__( self )
    self.log = gLogger.getSubLogger( "DataManagement/FTSClient" )
    self.setServer( "DataManagement/FTSManager" )

    # getting other clients
    self.ftsValidator = FTSValidator()
    self.dataManager = DataManager()
    self.storageFactory = StorageFactory()

    url = PathFinder.getServiceURL( "DataManagement/FTSManager" )
    if not url:
      raise RuntimeError( "CS option DataManagement/FTSManager URL is not set!" )
    self.ftsManager = RPCClient( url )

  def getFTSFileList( self, statusList = None, limit = None ):
    """ get list of FTSFiles with status in statusList """
    statusList = statusList if statusList else [ "Waiting" ]
    limit = limit if limit else 1000
    getFTSFileList = self.ftsManager.getFTSFileList( statusList, limit )
    if not getFTSFileList['OK']:
      self.log.error( "getFTSFileList: %s" % getFTSFileList['Message'] )
      return getFTSFileList
    getFTSFileList = getFTSFileList['Value']
    return S_OK( [ FTSFile( ftsFile ) for ftsFile in getFTSFileList ] )

  def getFTSJobList( self, statusList = None, limit = None ):
    """ get FTSJobs wit statues in :statusList: """
    statusList = statusList if statusList else list( FTSJob.INITSTATES + FTSJob.TRANSSTATES )
    limit = limit if limit else 500
    getFTSJobList = self.ftsManager.getFTSJobList( statusList, limit )
    if not getFTSJobList['OK']:
      self.log.error( "getFTSJobList: %s" % getFTSJobList['Message'] )
      return getFTSJobList
    getFTSJobList = getFTSJobList['Value']
    return S_OK( [ FTSJob( ftsJobDict ) for ftsJobDict in getFTSJobList ] )

  def getFTSFilesForRequest( self, requestID, statusList = None ):
    """ read FTSFiles for a given :requestID:

    :param int requestID: ReqDB.Request.RequestID
    :param list statusList: List of statuses (default: Waiting)
    """
    ftsFiles = self.ftsManager.getFTSFilesForRequest( requestID, statusList )
    if not ftsFiles['OK']:
      self.log.error( "getFTSFilesForRequest: %s" % ftsFiles['Message'] )
      return ftsFiles
    return S_OK( [ FTSFile( ftsFileDict ) for ftsFileDict in ftsFiles['Value'] ] )

  def getAllFTSFilesForRequest( self, requestID ):
    """ read FTSFiles for a given :requestID:

    :param int requestID: ReqDB.Request.RequestID
    """
    ftsFiles = self.ftsManager.getAllFTSFilesForRequest( requestID )
    if not ftsFiles['OK']:
      self.log.error( "getFTSFilesForRequest: %s" % ftsFiles['Message'] )
      return ftsFiles
    return S_OK( [ FTSFile( ftsFileDict ) for ftsFileDict in ftsFiles['Value'] ] )

  def getFTSJobsForRequest( self, requestID, statusList = None ):
    """ get list of FTSJobs with statues in :statusList: given requestID

    :param int requestID: ReqDB.Request.RequestID
    :param list statusList: list with FTSJob statuses

    :return: [ FTSJob, FTSJob, ... ]
    """
    statusList = statusList if statusList else list( FTSJob.INITSTATES + FTSJob.TRANSSTATES )
    getJobs = self.ftsManager.getFTSJobsForRequest( requestID, statusList )
    if not getJobs['OK']:
      self.log.error( "getFTSJobsForRequest: %s" % getJobs['Message'] )
      return getJobs
    return S_OK( [ FTSJob( ftsJobDict ) for ftsJobDict in getJobs['Value'] ] )

  def getFTSFile( self, ftsFileID = None ):
    """ get FTSFile

    :param int ftsFileID: FTSFileID
    """
    getFile = self.ftsManager.getFTSFile( ftsFileID )
    if not getFile['OK']:
      self.log.error( getFile['Message'] )
    # # de-serialize
    if getFile['Value']:
      ftsFile = FTSFile( getFile['Value'] )
    return S_OK( ftsFile )

  def putFTSJob( self, ftsJob ):
    """ put FTSJob into FTSDB

    :param FTSJob ftsJob: FTSJob instance
    """
    ftsJobJSON = ftsJob.toJSON()
    if not ftsJobJSON['OK']:
      self.log.error( ftsJobJSON['Message'] )
      return ftsJobJSON
    isValid = self.ftsValidator.validate( ftsJob )
    if not isValid['OK']:
      self.log.error( isValid['Message'], str( ftsJobJSON['Value'] ) )
      return isValid
    return self.ftsManager.putFTSJob( ftsJobJSON['Value'] )

  def getFTSJob( self, ftsJobID ):
    """ get FTS job, change its status to 'Assigned'

    :param int ftsJobID: FTSJobID
    """
    getJob = self.ftsManager.getFTSJob( ftsJobID )
    if not getJob['OK']:
      self.log.error( getJob['Message'] )
      return getJob
    setStatus = self.ftsManager.setFTSJobStatus( ftsJobID, 'Assigned' )
    if not setStatus['OK']:
      self.log.error( setStatus['Message'] )
    # # de-serialize
#    if getJob['Value']:
#      getJob = FTSJob( getJob['Value'] )
    return getJob

  def peekFTSJob( self, ftsJobID ):
    """ just peek FTSJob

    :param int ftsJobID: FTSJobID
    """
    getJob = self.ftsManager.getFTSJob( ftsJobID )
    if not getJob['OK']:
      self.log.error( getJob['Message'] )
      return getJob
    return getJob

  def deleteFTSJob( self, ftsJobID ):
    """ delete FTSJob into FTSDB

    :param int ftsJob: FTSJobID
    """
    deleteJob = self.ftsManager.deleteFTSJob( ftsJobID )
    if not deleteJob['OK']:
      self.log.error( deleteJob['Message'] )
    return deleteJob

  def getFTSJobIDs( self, statusList = None ):
    """ get list of FTSJobIDs for a given status list """
    statusList = statusList if statusList else [ "Submitted", "Ready", "Active" ]
    ftsJobIDs = self.ftsManager.getFTSJobIDs( statusList )
    if not ftsJobIDs['OK']:
      self.log.error( ftsJobIDs['Message'] )
    return ftsJobIDs

  def getFTSFileIDs( self, statusList = None ):
    """ get list of FTSFileIDs for a given status list """
    statusList = statusList if statusList else [ "Waiting" ]
    ftsFileIDs = self.ftsManager.getFTSFileIDs( statusList )
    if not ftsFileIDs['OK']:
      self.log.error( ftsFileIDs['Message'] )
    return ftsFileIDs

  def getFTSHistory( self ):
    """ get FTS history snapshot """
    getFTSHistory = self.ftsManager.getFTSHistory()
    if not getFTSHistory['OK']:
      self.log.error( getFTSHistory['Message'] )
      return getFTSHistory
    getFTSHistory = getFTSHistory['Value']
    return S_OK( [ FTSHistoryView( ftsHistory ) for ftsHistory in getFTSHistory ] )

  def getDBSummary( self ):
    """ get FTDB summary """
    dbSummary = self.ftsManager.getDBSummary()
    if not dbSummary['OK']:
      self.log.error( "getDBSummary: %s" % dbSummary['Message'] )
    return dbSummary

  def setFTSFilesWaiting( self, operationID, sourceSE, opFileIDList = None ):
    """ update status for waiting FTSFiles from 'Waiting#SourceSE' to 'Waiting'

    :param int operationID: ReqDB.Operation.OperationID
    :param str sourceSE: source SE name
    :param opFileIDList: [ ReqDB.File.FileID, ... ]
    """
    return self.ftsManager.setFTSFilesWaiting( operationID, sourceSE, opFileIDList )

  def deleteFTSFiles( self, operationID, opFileIDList = None ):
    """ delete FTSFiles for rescheduling

    :param int operationID: ReqDB.Operation.OperationID
    :param list opFileIDList: [ ReqDB.File.FileID, ... ]
    """
    return self.ftsManager.deleteFTSFiles( operationID, opFileIDList )

  def ftsSchedule( self, requestID, operationID, opFileList ):
    """ schedule lfn for FTS job

    :param int requestID: RequestDB.Request.RequestID
    :param int operationID: RequestDB.Operation.OperationID
    :param list opFileList: list of tuples ( File.toJSON()['Value'], sourcesList, targetList )
    """

    # Check whether there are duplicates
    fList = []
    for fTuple in opFileList:
      if fTuple not in fList:
        fList.append( fTuple )
      else:
        self.log.warn( 'File list for FTS scheduling has duplicates, fix it:\n', fTuple )
    fileIDs = [int( fileJSON.get( 'FileID', 0 ) ) for fileJSON, _sourceSEs, _targetSEs in fList ]
    res = self.ftsManager.cleanUpFTSFiles( requestID, fileIDs )
    if not res['OK']:
      self.log.error( "ftsSchedule: %s" % res['Message'] )
      return S_ERROR( "ftsSchedule: %s" % res['Message'] )

    ftsFiles = []

    # # this will be returned on success
    result = { "Successful": [], "Failed": {} }

    for fileJSON, sourceSEs, targetSEs in fList:

      lfn = fileJSON.get( "LFN", "" )
      size = int( fileJSON.get( "Size", 0 ) )
      fileID = int( fileJSON.get( "FileID", 0 ) )
      opID = int( fileJSON.get( "OperationID", 0 ) )

      self.log.verbose( "ftsSchedule: LFN=%s FileID=%s OperationID=%s sources=%s targets=%s" % ( lfn, fileID, opID,
                                                                                                 sourceSEs,
                                                                                                 targetSEs ) )

      res = self.dataManager.getActiveReplicas( lfn )
      if not res['OK']:
        self.log.error( "ftsSchedule: %s" % res['Message'] )
        result["Failed"][fileID] = res['Message']
        continue
      replicaDict = res['Value']

      if lfn in replicaDict["Failed"] and lfn not in replicaDict["Successful"]:
        result["Failed"][fileID] = "no active replicas found"
        continue
      replicaDict = replicaDict["Successful"].get( lfn, {} )
      # # use valid replicas only
      validReplicasDict = dict( [ ( se, pfn ) for se, pfn in replicaDict.items() if se in sourceSEs ] )

      if not validReplicasDict:
        self.log.warn( "No active replicas found in sources" )
        result["Failed"][fileID] = "no active replicas found in sources"
        continue

      tree = self.ftsManager.getReplicationTree( sourceSEs, targetSEs, size )
      if not tree['OK']:
        self.log.error( "ftsSchedule: %s cannot be scheduled: %s" % ( lfn, tree['Message'] ) )
        result["Failed"][fileID] = tree['Message']
        continue
      tree = tree['Value']

      self.log.verbose( "LFN=%s tree=%s" % ( lfn, tree ) )

      treeBranches = []
      printed = False
      for repDict in tree.values():
        if repDict in treeBranches:
          if not printed:
            self.log.warn( 'Duplicate tree branch', str( tree ) )
            printed = True
        else:
          treeBranches.append( repDict )

      for repDict in treeBranches:
        self.log.verbose( "Strategy=%s Ancestor=%s SourceSE=%s TargetSE=%s" % ( repDict["Strategy"],
                                                                                repDict["Ancestor"],
                                                                                repDict["SourceSE"],
                                                                                repDict["TargetSE"] ) )
        transferSURLs = self._getTransferURLs( lfn, repDict, sourceSEs, validReplicasDict )
        if not transferSURLs['OK']:
          result["Failed"][fileID] = transferSURLs['Message']
          continue

        sourceSURL, targetSURL, fileStatus = transferSURLs['Value']
        if sourceSURL == targetSURL:
          result["Failed"][fileID] = "sourceSURL equals to targetSURL for %s" % lfn
          continue

        self.log.verbose( "sourceURL=%s targetURL=%s FTSFile.Status=%s" % ( sourceSURL, targetSURL, fileStatus ) )

        ftsFile = FTSFile()
        for key in ( "LFN", "FileID", "OperationID", "Checksum", "ChecksumType", "Size" ):
          if fileJSON.get( key ):
            setattr( ftsFile, key, fileJSON.get( key ) )
        ftsFile.RequestID = requestID
        ftsFile.OperationID = operationID
        ftsFile.SourceSURL = sourceSURL
        ftsFile.TargetSURL = targetSURL
        ftsFile.SourceSE = repDict["SourceSE"]
        ftsFile.TargetSE = repDict["TargetSE"]
        ftsFile.Status = fileStatus
        ftsFiles.append( ftsFile )

    if not ftsFiles:
      self.log.info( "ftsSchedule: no FTSFiles to put for request %d" % requestID )
      return S_OK( result )

    ftsFilesJSONList = [ftsFile.toJSON()['Value'] for ftsFile in ftsFiles]
    res = self.ftsManager.putFTSFileList( ftsFilesJSONList )
    if not res['OK']:
      self.log.error( "ftsSchedule: %s" % res['Message'] )
      return S_ERROR( "ftsSchedule: %s" % res['Message'] )

    result['Successful'] += [ fileID for fileID in fileIDs if fileID not in result['Failed']]

    # # if we land here some files have been properly scheduled
    return S_OK( result )

  ################################################################################################################
  # Some utilities function

  def _getSurlForLFN( self, targetSE, lfn ):
    """ Get the targetSURL for the storage and LFN supplied.

    :param self: self reference
    :param str targetSE: target SE
    :param str lfn: LFN
    """
    res = self.storageFactory.getStorages( targetSE, protocolList = ["SRM2"] )
    if not res['OK']:
      errStr = "_getSurlForLFN: Failed to create SRM2 storage for %s: %s" % ( targetSE, res['Message'] )
      self.log.error( errStr )
      return S_ERROR( errStr )
    storageObjects = res['Value']["StorageObjects"]
    for storageObject in storageObjects:
      res = storageObject.getCurrentURL( lfn )
      if res['OK']:
        return res
    self.log.error( "_getSurlForLFN: Failed to get SRM compliant storage.", targetSE )
    return S_ERROR( "_getSurlForLFN: Failed to get SRM compliant storage." )

  def _getTransferURLs( self, lfn, repDict, replicas, replicaDict ):
    """ prepare TURLs for given LFN and replication tree

    :param self: self reference
    :param str lfn: LFN
    :param dict repDict: replication dictionary
    :param dict replicas: LFN replicas
    """
    hopSourceSE = repDict["SourceSE"]
    hopTargetSE = repDict["TargetSE"]
    hopAncestor = repDict["Ancestor"]

    # # get targetSURL
    res = self._getSurlForLFN( hopTargetSE, lfn )
    if not res['OK']:
      self.log.error( "_getTransferURLs: %s" % res['Message'] )
      return res
    targetSURL = res['Value']

    status = "Waiting"

    # # get the sourceSURL
    if hopAncestor:
      status = "Waiting#%s" % ( hopAncestor )
    res = self._getSurlForLFN( hopSourceSE, lfn )
    sourceSURL = res.get( 'Value', replicaDict.get( hopSourceSE, None ) )
    if not sourceSURL:
      self.log.error( "_getTransferURLs: %s" % res['Message'] )
      return res

    return S_OK( ( sourceSURL, targetSURL, status ) )
コード例 #43
0
class ReplicationScheduler(AgentModule):
    def initialize(self):

        self.section = PathFinder.getAgentSection(AGENT_NAME)
        self.RequestDB = RequestDBMySQL()
        self.TransferDB = TransferDB()
        self.DataLog = DataLoggingClient()
        self.factory = StorageFactory()
        self.rm = ReplicaManager()

        # This sets the Default Proxy to used as that defined under
        # /Operations/Shifter/DataManager
        # the shifterProxy option in the Configuration can be used to change this default.
        self.am_setOption('shifterProxy', 'DataManager')

        return S_OK()

    def execute(self):
        """ The main agent execution method """

        # This allows dynamic changing of the throughput timescale
        self.throughputTimescale = self.am_getOption('ThroughputTimescale',
                                                     3600)
        self.throughputTimescale = 60 * 60 * 1
        #print 'ThroughputTimescale:',self.throughputTimescale
        ######################################################################################
        #
        #  Obtain information on the current state of the channel queues
        #

        res = self.TransferDB.getChannelQueues()
        if not res['OK']:
            errStr = "ReplicationScheduler._execute: Failed to get channel queues from TransferDB."
            gLogger.error(errStr, res['Message'])
            return S_OK()
        if not res['Value']:
            gLogger.info(
                "ReplicationScheduler._execute: No active channels found for replication."
            )
            return S_OK()
        channels = res['Value']

        res = self.TransferDB.getChannelObservedThroughput(
            self.throughputTimescale)
        if not res['OK']:
            errStr = "ReplicationScheduler._execute: Failed to get observed throughput from TransferDB."
            gLogger.error(errStr, res['Message'])
            return S_OK()
        if not res['Value']:
            gLogger.info(
                "ReplicationScheduler._execute: No active channels found for replication."
            )
            return S_OK()
        bandwidths = res['Value']

        self.strategyHandler = StrategyHandler(bandwidths, channels,
                                               self.section)

        processedRequests = []
        requestsPresent = True
        while requestsPresent:

            ######################################################################################
            #
            #  The first step is to obtain a transfer request from the RequestDB which should be scheduled.
            #

            gLogger.info(
                "ReplicationScheduler._execute: Contacting RequestDB for suitable requests."
            )
            res = self.RequestDB.getRequest('transfer')
            if not res['OK']:
                gLogger.error(
                    "ReplicationScheduler._execute: Failed to get a request list from RequestDB.",
                    res['Message'])
                continue
            if not res['Value']:
                gLogger.info(
                    "ReplicationScheduler._execute: No requests found in RequestDB."
                )
                requestsPresent = False
                return S_OK()
            requestString = res['Value']['RequestString']
            requestName = res['Value']['RequestName']
            gLogger.info(
                "ReplicationScheduler._execute: Obtained Request %s from RequestDB."
                % (requestName))

            ######################################################################################
            #
            #  The request must then be parsed to obtain the sub-requests, their attributes and files.
            #

            logStr = 'ReplicationScheduler._execute: Parsing Request %s.' % (
                requestName)
            gLogger.info(logStr)
            oRequest = RequestContainer(requestString)
            res = oRequest.getAttribute('RequestID')
            if not res['OK']:
                gLogger.error(
                    'ReplicationScheduler._execute: Failed to get requestID.',
                    res['Message'])
                return S_ERROR(
                    'ReplicationScheduler._execute: Failed to get number of sub-requests.'
                )
            requestID = res['Value']
            if requestID in processedRequests:
                # Break the loop once we have iterated once over all requests
                res = self.RequestDB.updateRequest(requestName, requestString)
                if not res['OK']:
                    gLogger.error("Failed to update request",
                                  "%s %s" % (requestName, res['Message']))
                return S_OK()

            processedRequests.append(requestID)

            res = oRequest.getNumSubRequests('transfer')
            if not res['OK']:
                gLogger.error(
                    'ReplicationScheduler._execute: Failed to get number of sub-requests.',
                    res['Message'])
                return S_ERROR(
                    'ReplicationScheduler._execute: Failed to get number of sub-requests.'
                )
            numberRequests = res['Value']
            gLogger.info(
                "ReplicationScheduler._execute: '%s' found with %s sub-requests."
                % (requestName, numberRequests))

            ######################################################################################
            #
            #  The important request attributes are the source and target SEs.
            #

            for ind in range(numberRequests):
                gLogger.info(
                    "ReplicationScheduler._execute: Treating sub-request %s from '%s'."
                    % (ind, requestName))
                attributes = oRequest.getSubRequestAttributes(
                    ind, 'transfer')['Value']
                if attributes['Status'] != 'Waiting':
                    #  If the sub-request is already in terminal state
                    gLogger.info(
                        "ReplicationScheduler._execute: Sub-request %s is status '%s' and  not to be executed."
                        % (ind, attributes['Status']))
                    continue

                sourceSE = attributes['SourceSE']
                targetSE = attributes['TargetSE']
                """ This section should go in the transfer request class """
                if type(targetSE) in types.StringTypes:
                    if re.search(',', targetSE):
                        targetSEs = targetSE.split(',')
                    else:
                        targetSEs = [targetSE]
                """----------------------------------------------------- """
                operation = attributes['Operation']
                reqRepStrategy = None
                if operation in self.strategyHandler.getSupportedStrategies():
                    reqRepStrategy = operation

                ######################################################################################
                #
                # Then obtain the file attribute of interest are the  LFN and FileID
                #

                res = oRequest.getSubRequestFiles(ind, 'transfer')
                if not res['OK']:
                    gLogger.error(
                        'ReplicationScheduler._execute: Failed to obtain sub-request files.',
                        res['Message'])
                    continue
                files = res['Value']
                gLogger.info(
                    "ReplicationScheduler._execute: Sub-request %s found with %s files."
                    % (ind, len(files)))
                filesDict = {}
                for file in files:
                    lfn = file['LFN']
                    if file['Status'] != 'Waiting':
                        gLogger.debug(
                            "ReplicationScheduler._execute: %s will not be scheduled because it is %s."
                            % (lfn, file['Status']))
                    else:
                        fileID = file['FileID']
                        filesDict[lfn] = fileID
                if not filesDict:
                    gLogger.info(
                        "ReplicationScheduler._execute: No Waiting files found for request"
                    )
                    continue
                notSched = len(files) - len(filesDict)
                if notSched:
                    gLogger.info(
                        "ReplicationScheduler._execute: %d files found not Waiting"
                        % notSched)

                ######################################################################################
                #
                #  Now obtain replica information for the files associated to the sub-request.
                #

                lfns = filesDict.keys()
                gLogger.info(
                    "ReplicationScheduler._execute: Obtaining replica information for %d sub-request files."
                    % len(lfns))
                res = self.rm.getCatalogReplicas(lfns)
                if not res['OK']:
                    gLogger.error(
                        "ReplicationScheduler._execute: Failed to get replica information.",
                        res['Message'])
                    continue
                for lfn, failure in res['Value']['Failed'].items():
                    gLogger.error(
                        "ReplicationScheduler._execute: Failed to get replicas.",
                        '%s: %s' % (lfn, failure))
                replicas = res['Value']['Successful']
                if not replicas.keys():
                    gLogger.error(
                        "ReplicationScheduler._execute: Failed to get replica information for all files."
                    )
                    continue

                ######################################################################################
                #
                #  Now obtain the file sizes for the files associated to the sub-request.
                #

                lfns = replicas.keys()
                gLogger.info(
                    "ReplicationScheduler._execute: Obtaining file sizes for %d sub-request files."
                    % len(lfns))
                res = self.rm.getCatalogFileMetadata(lfns)
                if not res['OK']:
                    gLogger.error(
                        "ReplicationScheduler._execute: Failed to get file size information.",
                        res['Message'])
                    continue
                for lfn, failure in res['Value']['Failed'].items():
                    gLogger.error(
                        'ReplicationScheduler._execute: Failed to get file size.',
                        '%s: %s' % (lfn, failure))
                metadata = res['Value']['Successful']
                if not metadata.keys():
                    gLogger.error(
                        "ReplicationScheduler._execute: Failed to get metadata for all files."
                    )
                    continue

                ######################################################################################
                #
                # For each LFN determine the replication tree
                #

                for lfn in sortList(metadata.keys()):
                    fileSize = metadata[lfn]['Size']
                    lfnReps = replicas[lfn]
                    fileID = filesDict[lfn]

                    targets = []
                    for targetSE in targetSEs:
                        if targetSE in lfnReps.keys():
                            gLogger.debug(
                                "ReplicationScheduler.execute: %s already present at %s."
                                % (lfn, targetSE))
                        else:
                            targets.append(targetSE)
                    if not targets:
                        gLogger.info(
                            "ReplicationScheduler.execute: %s present at all targets."
                            % lfn)
                        oRequest.setSubRequestFileAttributeValue(
                            ind, 'transfer', lfn, 'Status', 'Done')
                        continue
                    if not lfnReps:
                        gLogger.error(
                            "ReplicationScheduler.execute: The file has no replicas.",
                            lfn)
                        continue
                    res = self.strategyHandler.determineReplicationTree(
                        sourceSE,
                        targets,
                        lfnReps,
                        fileSize,
                        strategy=reqRepStrategy)
                    if not res['OK']:
                        gLogger.error(
                            "ReplicationScheduler.execute: Failed to determine replication tree.",
                            res['Message'])
                        continue
                    tree = res['Value']

                    ######################################################################################
                    #
                    # For each item in the replication tree obtain the source and target SURLS
                    #

                    for channelID, dict in tree.items():
                        gLogger.info(
                            "ReplicationScheduler.execute: processing for channel %d %s"
                            % (channelID, str(dict)))
                        hopSourceSE = dict['SourceSE']
                        hopDestSE = dict['DestSE']
                        hopAncestor = dict['Ancestor']

                        # Get the sourceSURL
                        if hopAncestor:
                            status = 'Waiting%s' % (hopAncestor)
                            res = self.obtainLFNSURL(hopSourceSE, lfn)
                            if not res['OK']:
                                errStr = res['Message']
                                gLogger.error(errStr)
                                return S_ERROR(errStr)
                            sourceSURL = res['Value']
                        else:
                            status = 'Waiting'
                            res = self.resolvePFNSURL(hopSourceSE,
                                                      lfnReps[hopSourceSE])
                            if not res['OK']:
                                sourceSURL = lfnReps[hopSourceSE]
                            else:
                                sourceSURL = res['Value']

                        # Get the targetSURL
                        res = self.obtainLFNSURL(hopDestSE, lfn)
                        if not res['OK']:
                            errStr = res['Message']
                            gLogger.error(errStr)
                            return S_ERROR(errStr)
                        targetSURL = res['Value']

                        ######################################################################################
                        #
                        # For each item in the replication tree add the file to the channel
                        #
                        res = self.TransferDB.addFileToChannel(
                            channelID,
                            fileID,
                            hopSourceSE,
                            sourceSURL,
                            hopDestSE,
                            targetSURL,
                            fileSize,
                            fileStatus=status)
                        if not res['OK']:
                            errStr = res['Message']
                            gLogger.error(
                                "ReplicationScheduler._execute: Failed to add File to Channel.",
                                "%s %s" % (fileID, channelID))
                            return S_ERROR(errStr)
                        res = self.TransferDB.addFileRegistration(
                            channelID, fileID, lfn, targetSURL, hopDestSE)
                        if not res['OK']:
                            errStr = res['Message']
                            gLogger.error(
                                "ReplicationScheduler._execute: Failed to add File registration.",
                                "%s %s" % (fileID, channelID))
                            result = self.TransferDB.removeFileFromChannel(
                                channelID, fileID)
                            if not result['OK']:
                                errStr += result['Message']
                                gLogger.error(
                                    "ReplicationScheduler._execute: Failed to remove File.",
                                    "%s %s" % (fileID, channelID))
                            return S_ERROR(errStr)
                        oRequest.setSubRequestFileAttributeValue(
                            ind, 'transfer', lfn, 'Status', 'Scheduled')
                    res = self.TransferDB.addReplicationTree(fileID, tree)

                if oRequest.isSubRequestEmpty(ind, 'transfer')['Value']:
                    oRequest.setSubRequestStatus(ind, 'transfer', 'Scheduled')

            ################################################
            #  Generate the new request string after operation
            requestString = oRequest.toXML()['Value']
            res = self.RequestDB.updateRequest(requestName, requestString)
            if not res['OK']:
                gLogger.error(
                    "ReplicationScheduler._execute: Failed to update request",
                    "%s %s" % (requestName, res['Message']))

    def obtainLFNSURL(self, targetSE, lfn):
        """ Creates the targetSURL for the storage and LFN supplied
    """
        res = self.factory.getStorages(targetSE, protocolList=['SRM2'])
        if not res['OK']:
            errStr = 'ReplicationScheduler._execute: Failed to create SRM2 storage for %s: %s. ' % (
                targetSE, res['Message'])
            gLogger.error(errStr)
            return S_ERROR(errStr)
        storageObjects = res['Value']['StorageObjects']
        for storageObject in storageObjects:
            res = storageObject.getCurrentURL(lfn)
            if res['OK']:
                return res
        gLogger.error(
            'ReplicationScheduler._execute: Failed to get SRM compliant storage.',
            targetSE)
        return S_ERROR(
            'ReplicationScheduler._execute: Failed to get SRM compliant storage.'
        )

    def resolvePFNSURL(self, sourceSE, pfn):
        """ Creates the targetSURL for the storage and LFN supplied
    """
        res = self.rm.getPfnForProtocol([pfn], sourceSE)
        if not res['OK']:
            return res
        if pfn in res['Value']['Failed'].keys():
            return S_ERROR(res['Value']['Failed'][pfn])
        return S_OK(res['Value']['Successful'][pfn])
コード例 #44
0
ファイル: FTSClient.py プロジェクト: SimonBidwell/DIRAC
class FTSClient( Client ):
  """
  .. class:: FTSClient

  """

  def __init__( self, useCertificates = False ):
    """c'tor

    :param self: self reference
    :param bool useCertificates: flag to enable/disable certificates
    """
    Client.__init__( self )
    self.log = gLogger.getSubLogger( "DataManagement/FTSClient" )
    self.setServer( "DataManagement/FTSManager" )

    # getting other clients
    self.ftsValidator = FTSValidator()
    self.dataManager = DataManager()
    self.storageFactory = StorageFactory()

    url = PathFinder.getServiceURL( "DataManagement/FTSManager" )
    if not url:
      raise RuntimeError( "CS option DataManagement/FTSManager URL is not set!" )
    self.ftsManager = RPCClient( url )

  def getFTSFileList( self, statusList = None, limit = None ):
    """ get list of FTSFiles with status in statusList """
    statusList = statusList if statusList else [ "Waiting" ]
    limit = limit if limit else 1000
    getFTSFileList = self.ftsManager.getFTSFileList( statusList, limit )
    if not getFTSFileList['OK']:
      self.log.error( "getFTSFileList: %s" % getFTSFileList['Message'] )
      return getFTSFileList
    getFTSFileList = getFTSFileList['Value']
    return S_OK( [ FTSFile( ftsFile ) for ftsFile in getFTSFileList ] )

  def getFTSJobList( self, statusList = None, limit = None ):
    """ get FTSJobs wit statues in :statusList: """
    statusList = statusList if statusList else list( FTSJob.INITSTATES + FTSJob.TRANSSTATES )
    limit = limit if limit else 500
    getFTSJobList = self.ftsManager.getFTSJobList( statusList, limit )
    if not getFTSJobList['OK']:
      self.log.error( "getFTSJobList: %s" % getFTSJobList['Message'] )
      return getFTSJobList
    getFTSJobList = getFTSJobList['Value']
    return S_OK( [ FTSJob( ftsJobDict ) for ftsJobDict in getFTSJobList ] )

  def getFTSFilesForRequest( self, requestID, statusList = None ):
    """ read FTSFiles for a given :requestID:

    :param int requestID: ReqDB.Request.RequestID
    :param list statusList: List of statuses (default: Waiting)
    """
    ftsFiles = self.ftsManager.getFTSFilesForRequest( requestID, statusList )
    if not ftsFiles['OK']:
      self.log.error( "getFTSFilesForRequest: %s" % ftsFiles['Message'] )
      return ftsFiles
    return S_OK( [ FTSFile( ftsFileDict ) for ftsFileDict in ftsFiles['Value'] ] )

  def getAllFTSFilesForRequest( self, requestID ):
    """ read FTSFiles for a given :requestID:

    :param int requestID: ReqDB.Request.RequestID
    """
    ftsFiles = self.ftsManager.getAllFTSFilesForRequest( requestID )
    if not ftsFiles['OK']:
      self.log.error( "getFTSFilesForRequest: %s" % ftsFiles['Message'] )
      return ftsFiles
    return S_OK( [ FTSFile( ftsFileDict ) for ftsFileDict in ftsFiles['Value'] ] )

  def getFTSJobsForRequest( self, requestID, statusList = None ):
    """ get list of FTSJobs with statues in :statusList: given requestID

    :param int requestID: ReqDB.Request.RequestID
    :param list statusList: list with FTSJob statuses

    :return: [ FTSJob, FTSJob, ... ]
    """
    statusList = statusList if statusList else list( FTSJob.INITSTATES + FTSJob.TRANSSTATES )
    getJobs = self.ftsManager.getFTSJobsForRequest( requestID, statusList )
    if not getJobs['OK']:
      self.log.error( "getFTSJobsForRequest: %s" % getJobs['Message'] )
      return getJobs
    return S_OK( [ FTSJob( ftsJobDict ) for ftsJobDict in getJobs['Value'] ] )

  def getFTSFile( self, ftsFileID = None ):
    """ get FTSFile

    :param int ftsFileID: FTSFileID
    """
    getFile = self.ftsManager.getFTSFile( ftsFileID )
    if not getFile['OK']:
      self.log.error( getFile['Message'] )
    # # de-serialize
    if getFile['Value']:
      ftsFile = FTSFile( getFile['Value'] )
    return S_OK( ftsFile )

  def putFTSJob( self, ftsJob ):
    """ put FTSJob into FTSDB

    :param FTSJob ftsJob: FTSJob instance
    """
    isValid = self.ftsValidator.validate( ftsJob )
    if not isValid['OK']:
      self.log.error( isValid['Message'] )
      return isValid
    ftsJobJSON = ftsJob.toJSON()
    if not ftsJobJSON['OK']:
      self.log.error( ftsJobJSON['Message'] )
      return ftsJobJSON
    return self.ftsManager.putFTSJob( ftsJobJSON['Value'] )

  def getFTSJob( self, ftsJobID ):
    """ get FTS job, change its status to 'Assigned'

    :param int ftsJobID: FTSJobID
    """
    getJob = self.ftsManager.getFTSJob( ftsJobID )
    if not getJob['OK']:
      self.log.error( getJob['Message'] )
      return getJob
    setStatus = self.ftsManager.setFTSJobStatus( ftsJobID, 'Assigned' )
    if not setStatus['OK']:
      self.log.error( setStatus['Message'] )
    # # de-serialize
#    if getJob['Value']:
#      getJob = FTSJob( getJob['Value'] )
    return getJob

  def peekFTSJob( self, ftsJobID ):
    """ just peek FTSJob

    :param int ftsJobID: FTSJobID
    """
    getJob = self.ftsManager.getFTSJob( ftsJobID )
    if not getJob['OK']:
      self.log.error( getJob['Message'] )
      return getJob
    return getJob

  def deleteFTSJob( self, ftsJobID ):
    """ delete FTSJob into FTSDB

    :param int ftsJob: FTSJobID
    """
    deleteJob = self.ftsManager.deleteFTSJob( ftsJobID )
    if not deleteJob['OK']:
      self.log.error( deleteJob['Message'] )
    return deleteJob

  def getFTSJobIDs( self, statusList = None ):
    """ get list of FTSJobIDs for a given status list """
    statusList = statusList if statusList else [ "Submitted", "Ready", "Active" ]
    ftsJobIDs = self.ftsManager.getFTSJobIDs( statusList )
    if not ftsJobIDs['OK']:
      self.log.error( ftsJobIDs['Message'] )
    return ftsJobIDs

  def getFTSFileIDs( self, statusList = None ):
    """ get list of FTSFileIDs for a given status list """
    statusList = statusList if statusList else [ "Waiting" ]
    ftsFileIDs = self.ftsManager.getFTSFileIDs( statusList )
    if not ftsFileIDs['OK']:
      self.log.error( ftsFileIDs['Message'] )
    return ftsFileIDs

  def getFTSHistory( self ):
    """ get FTS history snapshot """
    getFTSHistory = self.ftsManager.getFTSHistory()
    if not getFTSHistory['OK']:
      self.log.error( getFTSHistory['Message'] )
      return getFTSHistory
    getFTSHistory = getFTSHistory['Value']
    return S_OK( [ FTSHistoryView( ftsHistory ) for ftsHistory in getFTSHistory ] )

  def getDBSummary( self ):
    """ get FTDB summary """
    dbSummary = self.ftsManager.getDBSummary()
    if not dbSummary['OK']:
      self.log.error( "getDBSummary: %s" % dbSummary['Message'] )
    return dbSummary

  def setFTSFilesWaiting( self, operationID, sourceSE, opFileIDList = None ):
    """ update status for waiting FTSFiles from 'Waiting#SourceSE' to 'Waiting'

    :param int operationID: ReqDB.Operation.OperationID
    :param str sourceSE: source SE name
    :param opFileIDList: [ ReqDB.File.FileID, ... ]
    """
    return self.ftsManager.setFTSFilesWaiting( operationID, sourceSE, opFileIDList )

  def deleteFTSFiles( self, operationID, opFileIDList = None ):
    """ delete FTSFiles for rescheduling

    :param int operationID: ReqDB.Operation.OperationID
    :param list opFileIDList: [ ReqDB.File.FileID, ... ]
    """
    return self.ftsManager.deleteFTSFiles( operationID, opFileIDList )

  def ftsSchedule( self, requestID, operationID, opFileList ):
    """ schedule lfn for FTS job

    :param int requestID: RequestDB.Request.RequestID
    :param int operationID: RequestDB.Operation.OperationID
    :param list opFileList: list of tuples ( File.toJSON()['Value'], sourcesList, targetList )
    """

    fileIDs = [int( fileJSON.get( 'FileID', 0 ) ) for fileJSON, _sourceSEs, _targetSEs in opFileList ]
    res = self.ftsManager.cleanUpFTSFiles( requestID, fileIDs )
    if not res['OK']:
      self.log.error( "ftsSchedule: %s" % res['Message'] )
      return S_ERROR( "ftsSchedule: %s" % res['Message'] )

    ftsFiles = []

    # # this will be returned on success
    result = { "Successful": [], "Failed": {} }

    for fileJSON, sourceSEs, targetSEs in opFileList:

      lfn = fileJSON.get( "LFN", "" )
      size = int( fileJSON.get( "Size", 0 ) )
      fileID = int( fileJSON.get( "FileID", 0 ) )
      opID = int( fileJSON.get( "OperationID", 0 ) )

      self.log.verbose( "ftsSchedule: LFN=%s FileID=%s OperationID=%s sources=%s targets=%s" % ( lfn, fileID, opID,
                                                                                                 sourceSEs,
                                                                                                 targetSEs ) )

      res = self.dataManager.getActiveReplicas( lfn )
      if not res['OK']:
        self.log.error( "ftsSchedule: %s" % res['Message'] )
        result["Failed"][fileID] = res['Message']
        continue
      replicaDict = res['Value']

      if lfn in replicaDict["Failed"] and lfn not in replicaDict["Successful"]:
        result["Failed"][fileID] = "no active replicas found"
        continue
      replicaDict = replicaDict["Successful"].get( lfn, {} )
      # # use valid replicas only
      validReplicasDict = dict( [ ( se, pfn ) for se, pfn in replicaDict.items() if se in sourceSEs ] )

      if not validReplicasDict:
        self.log.warn( "No active replicas found in sources" )
        result["Failed"][fileID] = "no active replicas found in sources"
        continue

      tree = self.ftsManager.getReplicationTree( sourceSEs, targetSEs, size )
      if not tree['OK']:
        self.log.error( "ftsSchedule: %s cannot be scheduled: %s" % ( lfn, tree['Message'] ) )
        result["Failed"][fileID] = tree['Message']
        continue
      tree = tree['Value']

      self.log.verbose( "LFN=%s tree=%s" % ( lfn, tree ) )

      for repDict in tree.values():
        self.log.verbose( "Strategy=%s Ancestor=%s SourceSE=%s TargetSE=%s" % ( repDict["Strategy"],
                                                                                repDict["Ancestor"],
                                                                                repDict["SourceSE"],
                                                                                repDict["TargetSE"] ) )
        transferSURLs = self._getTransferURLs( lfn, repDict, sourceSEs, validReplicasDict )
        if not transferSURLs['OK']:
          result["Failed"][fileID] = transferSURLs['Message']
          continue

        sourceSURL, targetSURL, fileStatus = transferSURLs['Value']
        if sourceSURL == targetSURL:
          result["Failed"][fileID] = "sourceSURL equals to targetSURL for %s" % lfn
          continue

        self.log.verbose( "sourceURL=%s targetURL=%s FTSFile.Status=%s" % ( sourceSURL, targetSURL, fileStatus ) )

        ftsFile = FTSFile()
        for key in ( "LFN", "FileID", "OperationID", "Checksum", "ChecksumType", "Size" ):
          if fileJSON.get( key ):
            setattr( ftsFile, key, fileJSON.get( key ) )
        ftsFile.RequestID = requestID
        ftsFile.OperationID = operationID
        ftsFile.SourceSURL = sourceSURL
        ftsFile.TargetSURL = targetSURL
        ftsFile.SourceSE = repDict["SourceSE"]
        ftsFile.TargetSE = repDict["TargetSE"]
        ftsFile.Status = fileStatus
        ftsFiles.append( ftsFile )

    if not ftsFiles:
      self.log.info( "ftsSchedule: no FTSFiles to put for request %d" % requestID )
      return S_OK( result )

    ftsFilesJSONList = [ftsFile.toJSON()['Value'] for ftsFile in ftsFiles]
    res = self.ftsManager.putFTSFileList( ftsFilesJSONList )
    if not res['OK']:
      self.log.error( "ftsSchedule: %s" % res['Message'] )
      return S_ERROR( "ftsSchedule: %s" % res['Message'] )

    result['Successful'] += [ fileID for fileID in fileIDs if fileID not in result['Failed']]

    # # if we land here some files have been properly scheduled
    return S_OK( result )

  ################################################################################################################
  # Some utilities function

  def _getSurlForLFN( self, targetSE, lfn ):
    """ Get the targetSURL for the storage and LFN supplied.

    :param self: self reference
    :param str targetSE: target SE
    :param str lfn: LFN
    """
    res = self.storageFactory.getStorages( targetSE, protocolList = ["SRM2"] )
    if not res['OK']:
      errStr = "_getSurlForLFN: Failed to create SRM2 storage for %s: %s" % ( targetSE, res['Message'] )
      self.log.error( errStr )
      return S_ERROR( errStr )
    storageObjects = res['Value']["StorageObjects"]
    for storageObject in storageObjects:
      res = storageObject.getCurrentURL( lfn )
      if res['OK']:
        return res
    self.log.error( "_getSurlForLFN: Failed to get SRM compliant storage.", targetSE )
    return S_ERROR( "_getSurlForLFN: Failed to get SRM compliant storage." )

  def _getTransferURLs( self, lfn, repDict, replicas, replicaDict ):
    """ prepare TURLs for given LFN and replication tree

    :param self: self reference
    :param str lfn: LFN
    :param dict repDict: replication dictionary
    :param dict replicas: LFN replicas
    """
    hopSourceSE = repDict["SourceSE"]
    hopTargetSE = repDict["TargetSE"]
    hopAncestor = repDict["Ancestor"]

    # # get targetSURL
    res = self._getSurlForLFN( hopTargetSE, lfn )
    if not res['OK']:
      self.log.error( "_getTransferURLs: %s" % res['Message'] )
      return res
    targetSURL = res['Value']

    status = "Waiting"

    # # get the sourceSURL
    if hopAncestor:
      status = "Waiting#%s" % ( hopAncestor )
    res = self._getSurlForLFN( hopSourceSE, lfn )
    sourceSURL = res.get( 'Value', replicaDict.get( hopSourceSE, None ) )
    if not sourceSURL:
      self.log.error( "_getTransferURLs: %s" % res['Message'] )
      return res

    return S_OK( ( sourceSURL, targetSURL, status ) )