Exemplo n.º 1
0
    def download_dir_as_tarball(self, remotepath, outfile):
        parent_dir, target_dir = remotepath.rstrip('/').rsplit('/', 1)
        command = 'tar -cz -C "%s" "%s"' % (parent_dir, target_dir)

        logger.debug("execing command: %s" % command)

        with self.executer.sshclient() as client:
            try:
                stdin, stdout, stderr = client.exec_command(command)

                while not stdout.channel.exit_status_ready():
                    rl, wl, xl = select.select([stdout.channel], [], [])
                    if len(rl) > 0:
                        while stdout.channel.recv_ready():
                            data = stdout.channel.recv(BLOCK_SIZE)
                            outfile.write(data)

                    # Stdout might still have data, flush it all out
                    data = stdout.channel.recv(BLOCK_SIZE)
                    while data:
                        outfile.write(data)
                        data = stdout.channel.recv(BLOCK_SIZE)

                exit_status = stdout.channel.exit_status
                logger.debug("Exit status: %s", exit_status)
                if exit_status != 0:
                    raise RetryException(
                        "Exit status %s received why trying to tarball %s" %
                        (exit_status, remotepath))
            except paramiko.SSHException as sshe:
                raise RetryException(sshe, traceback.format_exc())

        return exit_status == 0
Exemplo n.º 2
0
    def submit_task(self):
        """
        For local exec submitting a task executes the task and blocks
        the current process. It is not intended for large scale real world usage.
        """
        exec_scheme, exec_parts = uriparse(self.task.job.exec_backend)
        working_scheme, working_parts = uriparse(self.working_output_dir_uri())

        script = self.get_submission_script(exec_parts.hostname,
                                            working_parts.path)
        logger.debug('script {0}'.format(script))
        script_name = self.create_script(script)

        if os.path.exists(working_parts.path):
            shutil.rmtree(working_parts.path)

        os.makedirs(working_parts.path)

        try:
            stdout = open(os.path.join(working_parts.path, 'STDOUT.txt'), 'w')
            stderr = open(os.path.join(working_parts.path, 'STDERR.txt'), 'w')

            logger.debug('Running in {0}'.format(working_parts.path))
            args = shlex.split(self.task.command.encode('utf-8'))

            def set_remote_id(pid):
                self.task.remote_id = pid
                self.task.save()

            args = [script_name]
            status = blocking_execute(args=args,
                                      stderr=stderr,
                                      stdout=stdout,
                                      cwd=working_parts.path,
                                      report_pid_callback=set_remote_id)

            if status != 0:
                if self.is_aborting():
                    return None
                logger.error('Non zero exit status [{0}]'.format(status))
                raise RetryException(
                    'Local Exec of command "{0}" retuned non-zero code {1}'.
                    format(" ".join(args), status))

        except Exception as exc:
            raise RetryException(exc)
        finally:
            try:
                stdout.close()
                stderr.close()
            except Exception as exc:
                logger.error(exc)

            try:
                os.unlink(script_name)
            except:
                logger.exception("Couldn't delete script file %s", script_name)

        return status
Exemplo n.º 3
0
    def exec_script(self, script):
        logger.debug("SSHExex.exec_script...")
        logger.debug('script content = {0}'.format(script))
        exec_scheme, exec_parts = uriparse(self.uri)
        ssh = sshclient(exec_parts.hostname, exec_parts.port, self.credential)
        sftp = None
        try:
            sftp = ssh.open_sftp()

            script_name = self.upload_script(sftp, script)
            stdin, stdout, stderr = ssh.exec_command(script_name, bufsize=-1, timeout=None, get_pty=False)
            stdin.close()
            exit_code = stdout.channel.recv_exit_status()
            logger.debug("sshclient exec'd script OK")

            self.remove_script(sftp, script_name)

            return exit_code, stdout.readlines(), stderr.readlines()
        except paramiko.SSHException as sshe:
            raise RetryException(sshe, traceback.format_exc())
        finally:
            try:
                if sftp is not None:
                    sftp.close()
                if ssh is not None:
                    ssh.close()
            except:
                pass
Exemplo n.º 4
0
    def remote_file_upload(yabiusername, filename, uri):
        """Use a local fifo to upload to a remote file"""
        logger.debug('local_fifo -> {0}'.format(uri))

        # we need ref to the backend
        backend = FSBackend.urifactory(yabiusername, uri)
        scheme, parts = uriparse(uri)

        # uri for an upload must specify filename. we can't rely on the
        # source name as we copy from a fifo with a random name
        if not uri.endswith(filename):
            if not uri.endswith('/'):
                uri = uri + '/'
            uri = uri + filename

        try:
            # create a fifo, start the write to/read from fifo
            fifo = create_fifo('remote_file_upload_' + yabiusername + '_' + parts.hostname)
            thread, queue = backend.fifo_to_remote(uri, fifo)

            outfile = open(fifo, "wb")
            try:
                os.unlink(fifo)
            except OSError:
                logger.exception("Couldn't delete remote file upload fifo")
            return outfile, queue
        except Exception as exc:
            raise RetryException(exc, traceback.format_exc())
Exemplo n.º 5
0
def blocking_execute(args,
                     bufsize=0,
                     stdin=None,
                     stdout=subprocess.PIPE,
                     stderr=subprocess.PIPE,
                     shell=False,
                     cwd=None,
                     env=None,
                     report_pid_callback=(lambda x: None)):
    """execute a process and wait for it to end"""
    status = None
    try:
        logger.debug(args)
        logger.debug(cwd)
        process = execute(args,
                          bufsize=bufsize,
                          stdin=stdin,
                          stdout=stdout,
                          stderr=stderr,
                          shell=shell,
                          cwd=cwd,
                          env=env)
        report_pid_callback(process.pid)
        stdout_data, stderr_data = process.communicate(stdin)
        status = process.returncode

    except Exception as exc:
        logger.error('execute failed {0}'.format(status))
        from yabi.backend.exceptions import RetryException
        raise RetryException(exc, traceback.format_exc())

    return status
Exemplo n.º 6
0
    def remote_file_download(yabiusername, uri, is_dir=False):
        """Use a local fifo to download a remote file"""
        logger.debug('{0} -> local fifo'.format(uri))

        # we need ref to the backend
        backend = FSBackend.urifactory(yabiusername, uri)
        scheme, parts = uriparse(uri)

        try:
            # create a fifo, start the write to/read from fifo
            fifo = create_fifo('remote_file_download_' + yabiusername + '_' + parts.hostname)
            if is_dir:
                thread, queue = backend.remote_dir_to_fifo(uri, fifo)
            else:
                thread, queue = backend.remote_to_fifo(uri, fifo)

            infile = open(fifo, "rb")
            try:
                os.unlink(fifo)
            except OSError:
                logger.exception("Couldn't delete remote file download fifo")

            return infile, thread, queue

        except Exception as exc:
            raise RetryException(exc, traceback.format_exc())
Exemplo n.º 7
0
    def remote_file_copy(yabiusername, src_uri, dst_uri):
        """Use a local fifo to copy a single file from src_uri to dst_uri"""
        logger.debug('remote_file_copy {0} -> {1}'.format(src_uri, dst_uri))

        # we need refs to the src and dst backends
        src_backend = FSBackend.urifactory(yabiusername, src_uri)
        dst_backend = FSBackend.urifactory(yabiusername, dst_uri)
        src_scheme, src_parts = uriparse(src_uri)
        dst_scheme, dst_parts = uriparse(dst_uri)
        # Making sure dst_uri is always a file not a dir
        if dst_parts.path.endswith("/"):  # Looks like a dir
            dst_file_uri = "%s/%s" % (dst_uri.rstrip('/'), src_backend.basename(src_parts.path))
            dst_scheme, dst_parts = uriparse(dst_uri)
        else:
            dst_file_uri = dst_uri

        fifo = None
        try:
            src_stat = src_backend.remote_uri_stat(src_uri)

            # create a fifo, start the write to/read from fifo
            fifo = create_fifo('remote_file_copy_' + yabiusername + '_' + src_parts.hostname + '_' + dst_parts.hostname)
            src_cmd, src_queue = src_backend.remote_to_fifo(src_uri, fifo)
            dst_cmd, dst_queue = dst_backend.fifo_to_remote(dst_file_uri, fifo)
            src_cmd.join()
            dst_cmd.join()
            try:
                os.unlink(fifo)
            except OSError:
                pass
            src_success = src_queue.get()
            dst_success = dst_queue.get()

            # check exit status
            if not src_success:
                raise RetryException('remote_file_copy remote_to_fifo failed')
            if not dst_success:
                raise RetryException('remote_file_copy fifo_to_remote failed')

            if src_stat:
                atime = src_stat.get('atime')
                mtime = src_stat.get('mtime')
                dst_backend.set_remote_uri_times(dst_file_uri, atime, mtime)

        except Exception as exc:
            raise RetryException(exc, traceback.format_exc())
Exemplo n.º 8
0
 def local_copy(self, src_uri, dst_uri):
     """A local copy within this backend."""
     logger.debug('local_copy {0} -> {1}'.format(src_uri, dst_uri))
     src_scheme, src_parts = uriparse(src_uri)
     dst_scheme, dst_parts = uriparse(dst_uri)
     try:
         shutil.copy2(src_parts.path, dst_parts.path)
     except Exception as exc:
         raise RetryException(exc, traceback.format_exc())
Exemplo n.º 9
0
 def download_dir(self, uri, outfile):
     logger.debug("SFTPBackend.download_dir: %s => tarball => %s", uri,
                  outfile)
     scheme, parts = uriparse(uri)
     executer = create_executer(self.yabiusername, uri)
     try:
         return executer.download_dir_as_tarball(parts.path, outfile)
     except Exception as exc:
         raise RetryException(exc, traceback.format_exc())
Exemplo n.º 10
0
def sshclient(hostname, port, credential):
    if port is None:
        port = 22
    ssh = None

    c = credential.get_decrypted()

    logger.debug('Connecting to {0}@{1}:{2}'.format(c.username, hostname,
                                                    port))

    try:
        ssh = paramiko.SSHClient()
        ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
        ssh.load_system_host_keys()

        connect = partial(ssh.connect,
                          hostname=hostname,
                          port=port,
                          username=c.username,
                          key_filename=None,
                          timeout=None,
                          allow_agent=False,
                          look_for_keys=False,
                          compress=False,
                          sock=None)

        if c.key:
            private_key = create_paramiko_pkey(c.key, c.password)
            connect(pkey=private_key)
        else:
            logger.debug("Connecting using password")
            connect(password=c.password)

    except paramiko.BadHostKeyException as bhke:  # BadHostKeyException - if the server's host key could not be verified
        raise RetryException(bhke, traceback.format_exc())
    except paramiko.AuthenticationException as aue:  # AuthenticationException - if authentication failed
        raise RetryException(aue, traceback.format_exc())
    except paramiko.SSHException as sshe:  # SSHException - if there was any other error connecting or establishing an SSH session
        raise RetryException(sshe, traceback.format_exc())
    except socket.error as soe:  # socket.error - if a socket error occurred while connecting
        raise RetryException(soe, traceback.format_exc())

    return ssh
Exemplo n.º 11
0
    def symbolic_link(self, src_uri, dst_uri):
        """symbolic link to target_uri called link_uri."""
        logger.debug("SFTPBackend.symbolic_link: %s => %s", src_uri, dst_uri)
        src_scheme, src_parts = uriparse(src_uri)
        dst_scheme, dst_parts = uriparse(dst_uri)
        logger.debug('{0} -> {1}'.format(src_uri, dst_uri))

        executer = create_executer(self.yabiusername, src_uri)
        try:
            executer.local_symlink(src_parts.path, dst_parts.path)
        except Exception as exc:
            raise RetryException(exc, traceback.format_exc())
Exemplo n.º 12
0
    def mkdir(self, uri):
        """mkdir at uri"""
        logger.debug('mkdir {0}'.format(uri))
        scheme, parts = uriparse(uri)

        if os.path.exists(parts.path) and os.path.isdir(parts.path):
            return

        try:
            os.makedirs(parts.path)
        except OSError as ose:
            raise RetryException(ose, traceback.format_exc())
Exemplo n.º 13
0
 def symbolic_link(self, target_uri, link_uri):
     """symbolic link to target_uri called link_uri."""
     logger.debug('symbolic_link {0} -> {1}'.format(target_uri, link_uri))
     target_scheme, target_parts = uriparse(target_uri)
     link_scheme, link_parts = uriparse(link_uri)
     target = target_parts.path
     try:
         if not os.path.exists(target):
             raise FileNotFoundError(
                 "Source of symbolic link '%s' doesn't exist" % target)
         os.symlink(target, link_parts.path)
     except OSError as ose:
         raise RetryException(ose, traceback.format_exc())
Exemplo n.º 14
0
 def local_copy(self, src_uri, dst_uri):
     """Copy src_uri to dst_uri on the remote backend"""
     logger.debug("SFTPBackend.local_copy: %s => %s", src_uri, dst_uri)
     src_scheme, src_parts = uriparse(src_uri)
     dst_scheme, dst_parts = uriparse(dst_uri)
     logger.debug('{0} -> {1}'.format(src_uri, dst_uri))
     # Given paramiko does not support local copy, we
     # use cp on server via exec backend
     executer = create_executer(self.yabiusername, src_uri)
     try:
         executer.local_copy(src_parts.path, dst_parts.path)
     except Exception as exc:
         raise RetryException(exc, traceback.format_exc())
Exemplo n.º 15
0
    def rm(self, uri):
        self.set_cred(uri)
        bucket_name, path = self.parse_s3_uri(uri)

        try:
            bucket = self.bucket(bucket_name)
            all_keys = self.get_keys_recurse(bucket_name, path)

            # Unfortunately, when passing in Unicode key names to the
            # Boto bucket.delete_objects, it throws a UnicodeEncodeError when
            # building the XML request. As a workaround, we split keys in 2 groups.
            # A group that has only valid ASCII keys and the other that has Unicode
            # keys (ie. would throw an UnicodeEncodeError on str conversion).
            # The first group of keys will be deleted with one API call to
            # bucket.delete_objects. The second group we iterate over and delete
            # one-by-one. Those will be deleted with a DELETE HTTP call, so no
            # XML 1.0 problems there.

            ASCII_keys = []
            Unicode_keys = []
            for k in map(lambda k: k['Key'], all_keys):
                try:
                    ASCII_keys.append({'Key': str(k)})
                except UnicodeEncodeError:
                    Unicode_keys.append(k)

            errors = []
            # delete_objects accepts a maximum of 1000 keys so we chunk the keys
            for keys in chunks(ASCII_keys, 1000):
                logger.debug("Deleting keys: %s", keys)
                multi_delete_result = bucket.delete_objects(
                    Delete={'Objects': keys})
                if 'Errors' in multi_delete_result:
                    errors.extend(multi_delete_result['Errors']['Key'])

            for key in Unicode_keys:
                try:
                    bucket.Object(key).delete()
                except:
                    errors.append(key)

            if len(errors) > 0:
                # Some keys couldn't be deleted
                raise RuntimeError(
                    "The following keys couldn't be deleted when deleting uri %s: %s",
                    uri, ", ".join(errors))

        except Exception as exc:
            logger.exception("Error while trying to S3 rm uri %s", uri)
            raise RetryException(exc, traceback.format_exc())
Exemplo n.º 16
0
    def mkdir(self, uri):
        self.set_cred(uri)
        dir_uri = ensure_trailing_slash(uri)
        self.rm(dir_uri)
        bucket_name, path = self.parse_s3_uri(dir_uri)

        try:
            bucket = self.bucket(bucket_name)
            key = bucket.Object(path.lstrip(DELIMITER))
            key.put(Body='')

        except Exception as exc:
            logger.exception("Error while trying to S3 rm uri %s", uri)
            raise RetryException(exc, traceback.format_exc())
Exemplo n.º 17
0
 def local_copy_recursive(self, src_uri, dst_uri):
     """A local copy within this backend."""
     logger.debug('local_copy {0} -> {1}'.format(src_uri, dst_uri))
     src_scheme, src_parts = uriparse(src_uri)
     dst_scheme, dst_parts = uriparse(dst_uri)
     try:
         for item in os.listdir(src_parts.path):
             src = os.path.join(src_parts.path, item)
             dst = os.path.join(dst_parts.path, item)
             if os.path.isdir(src):
                 shutil.copytree(src, dst)
             else:
                 shutil.copy2(src, dst)
     except Exception as exc:
         raise RetryException(exc, traceback.format_exc())
Exemplo n.º 18
0
 def rm(self, uri):
     """recursively delete a uri"""
     scheme, parts = uriparse(uri)
     logger.debug('{0}'.format(parts.path))
     ssh = sshclient(parts.hostname, parts.port, self.cred.credential)
     try:
         sftp = ssh.open_sftp()
         self._rm(sftp, parts.path)
     except Exception as exc:
         raise RetryException(exc, traceback.format_exc())
     finally:
         try:
             if ssh is not None:
                 ssh.close()
         except:
             pass
Exemplo n.º 19
0
    def rm(self, uri):
        """rm uri"""
        logger.debug('rm {0}'.format(uri))
        scheme, parts = uriparse(uri)
        logger.debug('{0}'.format(parts.path))

        if not os.path.exists(parts.path):
            raise Exception(
                'rm target ({0}) is not a file or directory'.format(
                    parts.path))

        try:
            path = parts.path.rstrip('/')
            if os.path.isfile(path) or os.path.islink(path):
                os.unlink(path)
            elif os.path.isdir(path):
                shutil.rmtree(path)
        except Exception as exc:
            raise RetryException(exc, traceback.format_exc())
Exemplo n.º 20
0
    def local_copy_recursive(self, src_uri, dst_uri):
        """recursively copy src_uri to dst_uri on the remote backend"""
        logger.debug("SFTPBackend.local_copy_recursive: %s => %s", src_uri,
                     dst_uri)
        dst_scheme, dst_parts = uriparse(dst_uri)
        dst_path = dst_parts.path

        listing = self.ls(src_uri)

        executer = create_executer(self.yabiusername, src_uri)
        try:
            for key in listing:
                for listing_file in listing[key]['files']:
                    file_path = os.path.join(key, listing_file[0])
                    executer.local_copy(file_path, dst_path)
                for listing_dir in listing[key]['directories']:
                    dir_path = os.path.join(key, listing_dir[0])
                    executer.local_copy(dir_path, dst_path, recursive=True)
        except Exception as exc:
            raise RetryException(exc, traceback.format_exc())
Exemplo n.º 21
0
 def ls(self, uri):
     """ls at uri"""
     self.set_cred(uri)
     scheme, parts = uriparse(uri)
     ssh = sshclient(parts.hostname, parts.port, self.cred.credential)
     try:
         sftp = ssh.open_sftp()
         results = self._do_ls(sftp, parts.path)
         output = {}
         output[parts.path] = results
         return output
     except FileNotFoundError:
         return {}
     except Exception as exc:
         logger.exception("ls: %s" % uri)
         raise RetryException(exc, traceback.format_exc())
     finally:
         try:
             if ssh is not None:
                 ssh.close()
         except:
             pass
Exemplo n.º 22
0
    def mkdir(self, uri):
        """mkdir at uri"""
        self.set_cred(uri)
        scheme, parts = uriparse(uri)
        path = parts.path
        ssh = sshclient(parts.hostname, parts.port, self.cred.credential)
        try:
            sftp = ssh.open_sftp()
            try:
                self._rm(sftp, path)
                logger.debug("deleted existing directory %s OK" % path)
            except Exception as ex:
                logger.debug("could not remove directory %s: %s" % (path, ex))

            def full_path(result, d):
                previous = result[-1] if result else ""
                result.append("%s/%s" % (previous, d))
                return result

            dirs = [p for p in path.split("/") if p.strip() != '']
            dir_full_paths = reduce(full_path, dirs, [])
            non_existant_dirs = dropwhile(lambda d: self.path_exists(sftp, d),
                                          dir_full_paths)

            for d in non_existant_dirs:
                sftp.mkdir(d)

            logger.debug("created dir %s OK" % path)

        except Exception as exc:
            logger.error(exc)
            raise RetryException(exc, traceback.format_exc())
        finally:
            try:
                if ssh is not None:
                    ssh.close()
            except:
                pass
Exemplo n.º 23
0
    def remote_copy_recurse(yabiusername, src_uri, dst_uri):
        """Recursively copy src_uri to dst_uri"""
        logger.info('remote_copy {0} -> {1}'.format(src_uri, dst_uri))
        src_backend = FSBackend.urifactory(yabiusername, src_uri)
        dst_backend = FSBackend.urifactory(yabiusername, dst_uri)

        try:
            src_stat = src_backend.remote_uri_stat(src_uri)

            listing = src_backend.ls(src_uri)  # get _flat_ listing here not recursive as before
            dst_backend.mkdir(dst_uri)
            logger.debug("listing of src_uri %s = %s" % (src_uri, listing))
            for key in listing:
                # copy files using a fifo
                for listing_file in listing[key]['files']:
                    src_file_uri = url_join(src_uri, listing_file[0])
                    dst_file_uri = url_join(dst_uri, listing_file[0])
                    logger.debug("src_file_uri = %s" % src_file_uri)
                    logger.debug("dst_file_uri = %s" % dst_file_uri)
                    FSBackend.remote_file_copy(yabiusername, src_file_uri, dst_file_uri)

                # recurse on directories

                for listing_dir in listing[key]['directories']:
                    src_dir_uri = url_join(src_uri, listing_dir[0])
                    dst_dir_uri = url_join(dst_uri, listing_dir[0])
                    logger.debug("src_dir_uri = %s" % src_dir_uri)
                    logger.debug("dst_dir_uri = %s" % dst_dir_uri)
                    FSBackend.remote_copy_recurse(yabiusername, src_dir_uri, dst_dir_uri)

            if src_stat and src_backend.basename(src_uri.rstrip('/')) == dst_backend.basename(dst_uri.rstrip('/')):
                # Avoid setting the times if we're copying the contents of the source
                atime = src_stat.get('atime')
                mtime = src_stat.get('mtime')
                dst_backend.set_remote_uri_times(dst_uri, atime, mtime)
        except Exception as exc:
            raise RetryException(exc, traceback.format_exc())
Exemplo n.º 24
0
def execute(args,
            bufsize=0,
            stdin=None,
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,
            shell=False,
            cwd=None,
            env=None):
    """execute a process and return a handle to the process"""
    try:
        logger.debug(args)
        process = subprocess.Popen(args,
                                   bufsize=bufsize,
                                   stdin=stdin,
                                   stdout=stdout,
                                   stderr=stderr,
                                   shell=shell,
                                   cwd=cwd,
                                   env=env)
    except Exception as exc:
        logger.error(exc)
        raise RetryException(exc, traceback.format_exc())

    return process