Esempio n. 1
0
    def collect_output(self, destination):
        # We're using the default implementation of the file transfer code
        # This doesn't take into account a different port for the remote host
        # connection. To work around this, we temporarily set the host property
        # to include the port and the revert to the original value after the
        # file transfer is complete.
        host_tmp = self.host
        self.host = ('%s:%s' % (self.host, self.port))

        # Using the base implementation of job output file collection...
        JobDeploymentBase.collect_output(self, destination)

        # If job_config delete_job_files is True, we can now delete the job
        # files on the remote platform
        if self.job_config.delete_job_files:
            jobs_dir = self.platform_config.storage_job_directory
            # Check that the job storage directory exists and then create a
            # sub-directory specifically for this job.
            try:
                LOG.debug('URL for file job directory: sftp://%s%s' %
                          (self.host, jobs_dir))
                directory = Directory('sftp://%s%s' % (self.host, jobs_dir),
                                      session=self.session)
            except saga.BadParameter as e:
                LOG.error('The specified job directory does not exist on '
                          'resource <%s> (%s).' % (self.host, str(e)))
                raise JobError('The specified job directory does not exist '
                               'on resource <%s> (%s)' % (self.host, str(e)))
            try:
                LOG.debug('Deleting job directory after job completion '
                          '<sftp://%s%s/%s>' %
                          (self.host, jobs_dir, self.job_config.job_id))
                directory.remove(self.job_config.job_id, RECURSIVE)
            except saga.NoSuccess as e:
                LOG.error('The specified job data directory couldn\'t be '
                          'removed <%s> (%s).' %
                          (self.job_config.job_id, str(e)))
                raise JobError('The specified job data directory couldn\'t be '
                               'removed <%s> (%s)' %
                               (self.job_config.job_id, str(e)))

        # Set the host value back to its original value
        self.host = host_tmp
    def collect_output(self, destination):
        # Before calling the base implementation of output file collection to
        # pull files back from the master node, we first need to gather output
        # from each of the slave nodes onto the master node
        LOG.debug('Gather files from slave nodes to master...')
        job_dir = self.platform_config.storage_job_directory

        # If we have only one node then we can skip this stage...
        master_node = self.running_nodes[0][0]
        slave_nodes = []
        if len(self.running_nodes) > 1:
            slave_nodes = [node[0] for node in self.running_nodes[1:]]

        if slave_nodes:
            slave_private_ips = [node.private_ips[0] for node in slave_nodes]
            self._gather_results_data(master_node.public_ips[0],
                                      slave_private_ips,
                                      self.platform_config.user_id,
                                      self.platform_config.user_key_file,
                                      job_dir, self.job_config.job_id)

        # Using the base implementation of job output file collection...
        JobDeploymentBase.collect_output(self, destination)
Esempio n. 3
0
 def collect_output(self):
     JobDeploymentBase.collect_output(self)
 def collect_output(self, destination):
     JobDeploymentBase.collect_output(self, destination)