def __init__(self, numThread=10):

        MTRunner.__init__(self, name='lcg_output_downloader', data=Data(
            collection=[]), algorithm=LCGOutputDownloadAlgorithm())

        self.keepAlive = True
        self.numThread = numThread
Exemplo n.º 2
0
    def __init__(self, numThread=10):

        MTRunner.__init__(self,
                          name='lcg_output_downloader',
                          data=Data(collection=[]),
                          algorithm=LCGOutputDownloadAlgorithm())

        self.keepAlive = True
        self.numThread = numThread
Exemplo n.º 3
0
    def __init__(self, numThread=10):

        logger.debug('starting new MTRunner instance: %s.', "saga_file_transfer_manager")

        MTRunner.__init__(self, name='saga_file_transfer_manager', 
                          data=Data(collection=[]), 
                          algorithm=SAGAFileTransferAlgorithm())
        
        self.keepAlive = True
        self.numThread = numThread
Exemplo n.º 4
0
    def __init__(self, numThread=10):

        logger.debug('starting new MTRunner instance: %s.',
                     "saga_file_transfer_manager")

        MTRunner.__init__(self,
                          name='saga_file_transfer_manager',
                          data=Data(collection=[]),
                          algorithm=SAGAFileTransferAlgorithm())

        self.keepAlive = True
        self.numThread = numThread
Exemplo n.º 5
0
    def __mt_bulk_submit__(self, node_jdls):
        '''submitting jobs in multiple threads'''

        job = self.getJobObject()

        logger.warning('submitting %d subjobs ... it may take a while' %
                       len(node_jdls))

        # the algorithm for submitting a single bulk job
        class MyAlgorithm(Algorithm):
            def __init__(self, cred_req, masterInputWorkspace, ce, arcverbose):
                Algorithm.__init__(self)
                self.inpw = masterInputWorkspace
                self.cred_req = cred_req
                self.ce = ce
                self.arcverbose = arcverbose

            def process(self, jdl_info):
                my_sj_id = jdl_info[0]
                my_sj_jdl = jdl_info[1]

                my_sj_jid = Grid.arc_submit(my_sj_jdl, self.ce,
                                            self.arcverbose, self.cred_req)

                if not my_sj_jid:
                    return False
                else:
                    self.__appendResult__(my_sj_id, my_sj_jid)
                    return True

        mt_data = []
        for id, jdl in node_jdls.items():
            mt_data.append((id, jdl))

        myAlg = MyAlgorithm(cred_req=self.credential_requirements,
                            masterInputWorkspace=job.getInputWorkspace(),
                            ce=self.CE,
                            arcverbose=self.verbose)
        myData = Data(collection=mt_data)

        runner = MTRunner(name='arc_jsubmit',
                          algorithm=myAlg,
                          data=myData,
                          numThread=config['SubmissionThread'])
        runner.start()
        runner.join(timeout=-1)

        if len(runner.getDoneList()) < len(mt_data):
            # not all bulk jobs are successfully submitted. canceling the
            # submitted jobs on WMS immediately
            logger.error(
                'some bulk jobs not successfully (re)submitted, canceling submitted jobs on WMS'
            )
            Grid.arc_cancel_multiple(runner.getResults().values(),
                                     self.credential_requirements)
            return None
        else:
            return runner.getResults()
Exemplo n.º 6
0
    def __mt_job_prepare__(self, rjobs, subjobconfigs, masterjobconfig):
        '''preparing jobs in multiple threads'''

        logger.warning(
            'preparing %d subjobs ... it may take a while' % len(rjobs))

        # prepare the master job (i.e. create shared inputsandbox, etc.)
        master_input_sandbox = IBackend.master_prepare(self, masterjobconfig)

        # uploading the master job if it's over the WMS sandbox limitation
        for f in master_input_sandbox:
            master_input_idx = self.__check_and_prestage_inputfile__(f)

            if not master_input_idx:
                logger.error('master input sandbox perparation failed: %s' % f)
                return None

        # the algorithm for preparing a single bulk job
        class MyAlgorithm(Algorithm):

            def __init__(self):
                Algorithm.__init__(self)

            def process(self, sj_info):
                my_sc = sj_info[0]
                my_sj = sj_info[1]

                try:
                    logger.debug("preparing job %s" % my_sj.getFQID('.'))
                    jdlpath = my_sj.backend.preparejob(
                        my_sc, master_input_sandbox)

                    if (not jdlpath) or (not os.path.exists(jdlpath)):
                        raise GangaException(
                            'job %s not properly prepared' % my_sj.getFQID('.'))

                    self.__appendResult__(my_sj.id, jdlpath)
                    return True
                except Exception as x:
                    log_user_exception()
                    return False

        mt_data = []
        for sc, sj in zip(subjobconfigs, rjobs):
            mt_data.append([sc, sj])

        myAlg = MyAlgorithm()
        myData = Data(collection=mt_data)

        runner = MTRunner(
            name='lcg_jprepare', algorithm=myAlg, data=myData, numThread=10)
        runner.start()
        runner.join(-1)

        if len(runner.getDoneList()) < len(mt_data):
            return None
        else:
            # return a JDL file dictionary with subjob ids as keys, JDL file
            # paths as values
            return runner.getResults()
Exemplo n.º 7
0
    def __mt_job_prepare__(self, rjobs, subjobconfigs, masterjobconfig):
        '''preparing jobs in multiple threads'''

        logger.warning(
            'preparing %d subjobs ... it may take a while' % len(rjobs))

        # prepare the master job (i.e. create shared inputsandbox, etc.)
        master_input_sandbox = IBackend.master_prepare(self, masterjobconfig)

        # uploading the master job if it's over the WMS sandbox limitation
        for f in master_input_sandbox:
            master_input_idx = self.__check_and_prestage_inputfile__(f)

            if not master_input_idx:
                logger.error('master input sandbox perparation failed: %s' % f)
                return None

        # the algorithm for preparing a single bulk job
        class MyAlgorithm(Algorithm):

            def __init__(self):
                Algorithm.__init__(self)

            def process(self, sj_info):
                my_sc = sj_info[0]
                my_sj = sj_info[1]

                try:
                    logger.debug("preparing job %s" % my_sj.getFQID('.'))
                    jdlpath = my_sj.backend.preparejob(
                        my_sc, master_input_sandbox)

                    if (not jdlpath) or (not os.path.exists(jdlpath)):
                        raise GangaException(
                            'job %s not properly prepared' % my_sj.getFQID('.'))

                    self.__appendResult__(my_sj.id, jdlpath)
                    return True
                except Exception as x:
                    log_user_exception()
                    return False

        mt_data = []
        for sc, sj in zip(subjobconfigs, rjobs):
            mt_data.append([sc, sj])

        myAlg = MyAlgorithm()
        myData = Data(collection=mt_data)

        runner = MTRunner(
            name='lcg_jprepare', algorithm=myAlg, data=myData, numThread=10)
        runner.start()
        runner.join(-1)

        if len(runner.getDoneList()) < len(mt_data):
            return None
        else:
            # return a JDL file dictionary with subjob ids as keys, JDL file
            # paths as values
            return runner.getResults()
Exemplo n.º 8
0
    def __mt_bulk_submit__(self, node_jdls):
        '''submitting jobs in multiple threads'''

        job = self.getJobObject()

        logger.warning(
            'submitting %d subjobs ... it may take a while' % len(node_jdls))

        # the algorithm for submitting a single bulk job
        class MyAlgorithm(Algorithm):

            def __init__(self, gridObj, masterInputWorkspace, ce, arcverbose):
                Algorithm.__init__(self)
                self.inpw = masterInputWorkspace
                self.gridObj = gridObj
                self.ce = ce
                self.arcverbose = arcverbose

            def process(self, jdl_info):
                my_sj_id = jdl_info[0]
                my_sj_jdl = jdl_info[1]

                #my_sj_jid = self.gridObj.arc_submit(my_sj_jdl, self.ce, self.verbose)
                my_sj_jid = self.gridObj.arc_submit(
                    my_sj_jdl, self.ce, self.arcverbose)

                if not my_sj_jid:
                    return False
                else:
                    self.__appendResult__(my_sj_id, my_sj_jid)
                    return True

        mt_data = []
        for id, jdl in node_jdls.items():
            mt_data.append((id, jdl))

        myAlg = MyAlgorithm(gridObj=grids['GLITE'], masterInputWorkspace=job.getInputWorkspace(
        ), ce=self.CE, arcverbose=self.verbose)
        myData = Data(collection=mt_data)

        runner = MTRunner(name='arc_jsubmit', algorithm=myAlg,
                          data=myData, numThread=config['SubmissionThread'])
        runner.start()
        runner.join(timeout=-1)

        if len(runner.getDoneList()) < len(mt_data):
            # not all bulk jobs are successfully submitted. canceling the
            # submitted jobs on WMS immediately
            logger.error(
                'some bulk jobs not successfully (re)submitted, canceling submitted jobs on WMS')
            grids['GLITE'].arc_cancelMultiple(runner.getResults().values())
            return None
        else:
            return runner.getResults()
Exemplo n.º 9
0
 def handlesubmit(self, jobids, runid):
     """Submits exported jobs as identified by a list of path patterns.
     
     Keyword arguments:
     jobids -- A list of the submitted job ids.
     runid -- A UTC ID string which identifies the run.
     
     """
     # get configuration properties
     patterns = self.getoption('CoreSubmitter_Patterns')
     logger.info("Searching for job files matching patterns %s.", patterns)
     matches = self._getmatches(patterns)
     logger.info("Found %d matching job files.", len(matches))
     runner = MTRunner(
         'ThreadedSubmitterMTRunner',
         algorithm=ThreadedSubmitterAlgorithm(), 
         data=Data([(m,jobids) for m in matches]), 
         numThread=int(self.getoption('ThreadedSubmitter_numThreads'))
     )
     runner.start()
     runner.join()
Exemplo n.º 10
0
    def impl_delete(self, cred_req, files=[], opts=''):
        """
        Deletes multiple files from remote grid storages. 
        """

        # the algorithm of downloading one file to a local directory
        class MyAlgorithm(Algorithm):

            def __init__(self, cacheObj, cred_req):
                Algorithm.__init__(self)
                self.cacheObj = cacheObj
                self.shell = getShell(cred_req)

            def process(self, file):

                guid = file.id

                lfc_host = file.attributes['lfc_host']

                self.shell.env['LFC_HOST'] = lfc_host

                self.cacheObj.logger.debug(
                    'delete file with LFC_HOST: %s' % self.shell.env['LFC_HOST'])

                cmd = 'lcg-del -a -t 60 --vo %s %s' % (self.cacheObj.vo, guid)

                rc, output, m = self.cacheObj.__cmd_retry_loop__(
                    self.shell, cmd, self.cacheObj.max_try)

                if rc != 0:
                    return False
                else:
                    self.__appendResult__(file.id, file)
                    return True

        myAlg = MyAlgorithm(cacheObj=self, cred_req=cred_req)
        myData = Data(collection=files)

        runner = MTRunner(
            name='sandboxcache_lcgdel', algorithm=myAlg, data=myData)
        runner.start()
        runner.join(-1)

        # update the local index file
        del_files = runner.getResults().values()
        all_files = self.get_cached_files()

        left_files = []
        for f in all_files:
            if f not in del_files:
                left_files.append(f)

        self.impl_bookkeepUploadedFiles(left_files, append=False)

        return del_files
Exemplo n.º 11
0
    def impl_download(self, cred_req, files=[], dest_dir=None, opts=''):
        """
        Downloads multiple files from remote grid storages to 
        a local directory.
        """
        if not dest_dir:
            dest_dir = os.getcwd()
        self.logger.debug('download file to: %s', dest_dir)

        # the algorithm of downloading one file to a local directory
        class MyAlgorithm(Algorithm):

            def __init__(self, cacheObj, cred_req):
                Algorithm.__init__(self)
                self.cacheObj = cacheObj
                self.shell = getShell(cred_req)

            def process(self, file):

                guid = file.id
                lfn = file.attributes['local_fpath']
                lfc_host = file.attributes['lfc_host']
                fname = os.path.basename(urlparse(lfn)[2])

                self.shell.env['LFC_HOST'] = lfc_host
                self.cacheObj.logger.debug(
                    'download file with LFC_HOST: %s', self.shell.env['LFC_HOST'])

                cmd = 'lcg-cp -t %d --vo %s ' % (
                    self.cacheObj.timeout, self.cacheObj.vo)
                if self.cacheObj.se_type:
                    cmd += '-T %s ' % self.cacheObj.se_type
                cmd += '%s file://%s/%s' % (guid, dest_dir, fname)

                self.cacheObj.logger.debug('download file: %s', cmd)

                rc, output, m = self.cacheObj.__cmd_retry_loop__(
                    self.shell, cmd, self.cacheObj.max_try)

                if rc != 0:
                    return False
                else:
                    self.__appendResult__(file.id, file)
                    return True

        myAlg = MyAlgorithm(cacheObj=self, cred_req=cred_req)
        myData = Data(collection=files)

        runner = MTRunner(
            name='sandboxcache_lcgcp', algorithm=myAlg, data=myData)
        runner.start()
        runner.join(-1)

        return runner.getResults().values()
Exemplo n.º 12
0
    def impl_delete(self, files=[], opts=''):
        """
        Deletes multiple files from remote gridftp server
        """

        shell = getShell(self.middleware)

        # the algorithm of downloading one file to a local directory
        class MyAlgorithm(Algorithm):
            def __init__(self, cacheObj):
                Algorithm.__init__(self)
                self.cacheObj = cacheObj

            def process(self, file):

                destURI = file.id

                uri_info = urisplit(destURI)

                cmd = 'uberftp %s "rm %s"' % (uri_info[1], uri_info[2])

                rc, output, m = self.cacheObj.__cmd_retry_loop__(
                    shell, cmd, self.cacheObj.max_try)

                if rc != 0:
                    self.cacheObj.logger.error(output)
                    return False
                else:
                    self.__appendResult__(file.id, file)
                    return True

        myAlg = MyAlgorithm(cacheObj=self)
        myData = Data(collection=files)

        runner = MTRunner(name='sandboxcache_lcgdel',
                          algorithm=myAlg,
                          data=myData)
        runner.start()
        runner.join(-1)

        # update the local index file
        del_files = runner.getResults().values()
        all_files = self.get_cached_files()

        left_files = []
        for f in all_files:
            if f not in del_files:
                left_files.append(f)

        self.impl_bookkeepUploadedFiles(left_files, append=False)

        return del_files
Exemplo n.º 13
0
    def impl_delete(self, files=[], opts=''):
        """
        Deletes multiple files from remote gridftp server
        """

        shell = getShell(self.middleware)

        # the algorithm of downloading one file to a local directory
        class MyAlgorithm(Algorithm):

            def __init__(self, cacheObj):
                Algorithm.__init__(self)
                self.cacheObj = cacheObj

            def process(self, file):

                destURI = file.id

                uri_info = urisplit(destURI)

                cmd = 'uberftp %s "rm %s"' % (uri_info[1], uri_info[2])

                rc, output, m = self.cacheObj.__cmd_retry_loop__(
                    shell, cmd, self.cacheObj.max_try)

                if rc != 0:
                    self.cacheObj.logger.error(output)
                    return False
                else:
                    self.__appendResult__(file.id, file)
                    return True

        myAlg = MyAlgorithm(cacheObj=self)
        myData = Data(collection=files)

        runner = MTRunner(
            name='sandboxcache_lcgdel', algorithm=myAlg, data=myData)
        runner.start()
        runner.join(-1)

        # update the local index file
        del_files = runner.getResults().values()
        all_files = self.get_cached_files()

        left_files = []
        for f in all_files:
            if f not in del_files:
                left_files.append(f)

        self.impl_bookkeepUploadedFiles(left_files, append=False)

        return del_files
Exemplo n.º 14
0
    def impl_download(self, files=[], dest_dir=None, opts=''):
        """
        Downloads multiple files from gridftp server to 
        a local directory.
        """
        if not dest_dir:
            dest_dir = os.getcwd()
        self.logger.debug('download file to: %s', dest_dir)

        shell = getShell(self.middleware)

        # the algorithm of downloading one file to a local directory
        class MyAlgorithm(Algorithm):
            def __init__(self, cacheObj):
                Algorithm.__init__(self)
                self.cacheObj = cacheObj

            def process(self, file):

                srcURI = file.id
                fname = os.path.basename(urisplit(srcURI)[2])
                destURI = 'file:%s/%s' % (dest_dir, fname)

                #cmd  = 'uberftp %s %s' % (srcURI, destURI)
                cmd = 'globus-url-copy %s %s' % (srcURI, destURI)

                rc, output, m = self.cacheObj.__cmd_retry_loop__(
                    shell, cmd, self.cacheObj.max_try)

                if rc != 0:
                    self.cacheObj.logger.error(output)
                    return False
                else:
                    self.__appendResult__(file.id, file)
                    return True

        myAlg = MyAlgorithm(cacheObj=self)
        myData = Data(collection=files)

        runner = MTRunner(name='sandboxcache_gridftp',
                          algorithm=myAlg,
                          data=myData)
        runner.start()
        runner.join(-1)

        return runner.getResults().values()
Exemplo n.º 15
0
    def impl_download(self, files=[], dest_dir=None, opts=''):
        """
        Downloads multiple files from gridftp server to 
        a local directory.
        """
        if not dest_dir:
            dest_dir = os.getcwd()
        self.logger.debug('download file to: %s', dest_dir)

        shell = getShell(self.middleware)

        # the algorithm of downloading one file to a local directory
        class MyAlgorithm(Algorithm):

            def __init__(self, cacheObj):
                Algorithm.__init__(self)
                self.cacheObj = cacheObj

            def process(self, file):

                srcURI = file.id
                fname = os.path.basename(urisplit(srcURI)[2])
                destURI = 'file:%s/%s' % (dest_dir, fname)

                #cmd  = 'uberftp %s %s' % (srcURI, destURI)
                cmd = 'globus-url-copy %s %s' % (srcURI, destURI)

                rc, output, m = self.cacheObj.__cmd_retry_loop__(
                    shell, cmd, self.cacheObj.max_try)

                if rc != 0:
                    self.cacheObj.logger.error(output)
                    return False
                else:
                    self.__appendResult__(file.id, file)
                    return True

        myAlg = MyAlgorithm(cacheObj=self)
        myData = Data(collection=files)

        runner = MTRunner(
            name='sandboxcache_gridftp', algorithm=myAlg, data=myData)
        runner.start()
        runner.join(-1)

        return runner.getResults().values()
Exemplo n.º 16
0
 def handlesubmit(self, jobids, runid):
     """Submits exported jobs as identified by a list of path patterns.
     
     Keyword arguments:
     jobids -- A list of the submitted job ids.
     runid -- A UTC ID string which identifies the run.
     
     """
     # get configuration properties
     patterns = self.getoption('CoreSubmitter_Patterns')
     logger.info("Searching for job files matching patterns %s.", patterns)
     matches = self._getmatches(patterns)
     logger.info("Found %d matching job files.", len(matches))
     runner = MTRunner('ThreadedSubmitterMTRunner',
                       algorithm=ThreadedSubmitterAlgorithm(),
                       data=Data([(m, jobids) for m in matches]),
                       numThread=int(
                           self.getoption('ThreadedSubmitter_numThreads')))
     runner.start()
     runner.join()
Exemplo n.º 17
0
    def impl_upload(self, files=[], opts=""):
        """
        Uploads multiple files to a remote gridftp server.
        """

        shell = getShell()

        # making the directory on remove storage at destURI
        dirname = self.__get_unique_fname__()

        # creating subdirectory

        dir_ok = False

        destURI = "%s/%s" % (self.baseURI, dirname)

        uri_info = urisplit(destURI)

        cmd = 'uberftp %s "cd %s"' % (uri_info[1], uri_info[2])

        rc, output, m = self.__cmd_retry_loop__(shell, cmd, 1)

        if rc != 0:

            for l in output.split("\n"):
                l.strip()
                if re.match(r"^550.*", l):
                    # the directory is not found (error code 550), try to creat
                    # the lowest level one
                    cmd = 'uberftp %s "mkdir %s"' % (uri_info[1], uri_info[2])

                    rc, output, m = self.__cmd_retry_loop__(shell, cmd, 1)

                    if rc != 0:
                        self.logger.error(output)
                    else:
                        dir_ok = True

                    break
        else:
            self.logger.debug("parent directory already available: %s" % destURI)
            dir_ok = True

        if not dir_ok:
            self.logger.error("parent directory not available: %s" % destURI)
            return []

        # the algorithm of uploading one file
        class MyAlgorithm(Algorithm):
            def __init__(self, cacheObj):
                Algorithm.__init__(self)
                self.cacheObj = cacheObj

            def process(self, file):
                # decide number of parallel stream to be used
                fsize = os.path.getsize(urlparse(file)[2])
                fname = os.path.basename(urlparse(file)[2])
                fpath = os.path.abspath(urlparse(file)[2])

                md5sum = get_md5sum(fpath, ignoreGzipTimestamp=True)
                nbstream = int((fsize * 1.0) / (10.0 * 1024 * 1024 * 1024))

                if nbstream < 1:
                    nbstream = 1  # min stream
                if nbstream > 8:
                    nbstream = 8  # max stream

                myDestURI = "%s/%s" % (destURI, fname)

                # uploading the file
                cmd = "uberftp"
                if nbstream > 1:
                    cmd += " -c %d" % nbstream

                cmd += " file:%s %s" % (fpath, myDestURI)

                rc, output, m = self.cacheObj.__cmd_retry_loop__(shell, cmd, self.cacheObj.max_try)

                if rc != 0:
                    self.cacheObj.logger.error(output)
                    return False
                else:
                    fidx = GridftpFileIndex()
                    fidx.id = myDestURI
                    fidx.name = fname
                    fidx.md5sum = md5sum
                    fidx.attributes["fpath"] = fpath

                    self.__appendResult__(file, fidx)
                    return True

        myAlg = MyAlgorithm(cacheObj=self)
        myData = Data(collection=files)

        runner = MTRunner(name="sandboxcache_gridftp", algorithm=myAlg, data=myData)
        runner.start()
        runner.join(-1)

        return runner.getResults().values()
Exemplo n.º 18
0
    def impl_upload(self, cred_req, files=[], opts=''):
        """
        Uploads multiple files to a remote grid storage.
        """

        shell = getShell(cred_req)

        if self.lfc_host:
            shell.env['LFC_HOST'] = self.lfc_host

        self.logger.debug(
            'upload file with LFC_HOST: %s', shell.env['LFC_HOST'])

        # the algorithm of uploading one file
        class MyAlgorithm(Algorithm):

            def __init__(self, cacheObj):
                Algorithm.__init__(self)
                self.cacheObj = cacheObj
                self.dirname = self.cacheObj.__get_unique_fname__()

            def process(self, file):
                # decide number of parallel stream to be used
                fsize = os.path.getsize(urlparse(file)[2])
                fname = os.path.basename(urlparse(file)[2])
                fpath = os.path.abspath(urlparse(file)[2])

                md5sum = get_md5sum(fpath, ignoreGzipTimestamp=True)
                nbstream = int((fsize * 1.0) / (10.0 * 1024 * 1024 * 1024))

                if nbstream < 1:
                    nbstream = 1  # min stream
                if nbstream > 8:
                    nbstream = 8  # max stream

                cmd = 'lcg-cr -t 180 --vo %s -n %d' % (
                    self.cacheObj.vo, nbstream)
                if self.cacheObj.se != None:
                    cmd = cmd + ' -d %s' % self.cacheObj.se
                if self.cacheObj.se_type == 'srmv2' and self.cacheObj.srm_token:
                    cmd = cmd + ' -D srmv2 -s %s' % self.cacheObj.srm_token

                # specify the physical location
                cmd = cmd + \
                    ' -P %s/ganga.%s/%s' % (self.cacheObj.se_rpath,
                                            self.dirname, fname)

                # specify the logical filename
                # NOTE: here we assume the root dir for VO is /grid/<voname>
                lfc_dir = '/grid/%s/ganga.%s' % (
                    self.cacheObj.vo, self.dirname)
                if not self.cacheObj.__lfc_mkdir__(shell, lfc_dir):
                    self.cacheObj.logger.warning(
                        'cannot create LFC directory: %s' % lfc_dir)
                    return None

                cmd = cmd + ' -l %s/%s %s' % (lfc_dir, fname, file)
                rc, output, m = self.cacheObj.__cmd_retry_loop__(
                    shell, cmd, self.cacheObj.max_try)

                if rc != 0:
                    return False
                else:
                    match = re.search('(guid:\S+)', output)
                    if match:
                        guid = match.group(1)

                        fidx = LCGFileIndex()
                        fidx.id = guid
                        fidx.name = fname
                        fidx.md5sum = md5sum
                        fidx.lfc_host = self.cacheObj.lfc_host
                        fidx.local_fpath = fpath

                        self.__appendResult__(file, fidx)
                        return True
                    else:
                        return False

        myAlg = MyAlgorithm(cacheObj=self)
        myData = Data(collection=files)

        runner = MTRunner(
            name='sandboxcache_lcgcr', algorithm=myAlg, data=myData)
        runner.start()
        runner.join(-1)

        return runner.getResults().values()
Exemplo n.º 19
0
    def impl_upload(self, files=[], opts=''):
        """
        Uploads multiple files to a remote gridftp server.
        """

        shell = getShell(self.middleware)

        # making the directory on remove storage at destURI
        dirname = self.__get_unique_fname__()

        # creating subdirectory

        dir_ok = False

        destURI = '%s/%s' % (self.baseURI, dirname)

        uri_info = urisplit(destURI)

        cmd = 'uberftp %s "cd %s"' % (uri_info[1], uri_info[2])

        rc, output, m = self.__cmd_retry_loop__(shell, cmd, 1)

        if rc != 0:

            for l in output.split('\n'):
                l.strip()
                if re.match(r'^550.*', l):
                    # the directory is not found (error code 550), try to creat
                    # the lowest level one
                    cmd = 'uberftp %s "mkdir %s"' % (uri_info[1], uri_info[2])

                    rc, output, m = self.__cmd_retry_loop__(shell, cmd, 1)

                    if rc != 0:
                        self.logger.error(output)
                    else:
                        dir_ok = True

                    break
        else:
            self.logger.debug('parent directory already available: %s' %
                              destURI)
            dir_ok = True

        if not dir_ok:
            self.logger.error('parent directory not available: %s' % destURI)
            return []

        # the algorithm of uploading one file
        class MyAlgorithm(Algorithm):
            def __init__(self, cacheObj):
                Algorithm.__init__(self)
                self.cacheObj = cacheObj

            def process(self, file):
                # decide number of parallel stream to be used
                fsize = os.path.getsize(urlparse(file)[2])
                fname = os.path.basename(urlparse(file)[2])
                fpath = os.path.abspath(urlparse(file)[2])

                md5sum = get_md5sum(fpath, ignoreGzipTimestamp=True)
                nbstream = int((fsize * 1.0) / (10.0 * 1024 * 1024 * 1024))

                if nbstream < 1:
                    nbstream = 1  # min stream
                if nbstream > 8:
                    nbstream = 8  # max stream

                myDestURI = '%s/%s' % (destURI, fname)

                # uploading the file
                cmd = 'uberftp'
                if nbstream > 1:
                    cmd += ' -c %d' % nbstream

                cmd += ' file:%s %s' % (fpath, myDestURI)

                rc, output, m = self.cacheObj.__cmd_retry_loop__(
                    shell, cmd, self.cacheObj.max_try)

                if rc != 0:
                    self.cacheObj.logger.error(output)
                    return False
                else:
                    fidx = GridftpFileIndex()
                    fidx.id = myDestURI
                    fidx.name = fname
                    fidx.md5sum = md5sum
                    fidx.attributes['fpath'] = fpath

                    self.__appendResult__(file, fidx)
                    return True

        myAlg = MyAlgorithm(cacheObj=self)
        myData = Data(collection=files)

        runner = MTRunner(name='sandboxcache_gridftp',
                          algorithm=myAlg,
                          data=myData)
        runner.start()
        runner.join(-1)

        return runner.getResults().values()