Example #1
0
 def test_encryption( self ):
     for is_master in False, True:
         try:
             test_file = self.test_files[ two_and_a_half_parts ]
             src_url = self.ftp_url( test_file )
             sse_key = '-0123456789012345678901234567890'
             args = concat(
                 '--sse-key=' + sse_key,
                 [ '--sse-key-is-master' ] if is_master else [ ] )
             s3am.cli.main( concat(
                 'upload', verbose, slots, src_url, self.s3_url( ),
                 '--exists=overwrite',
                 args ) )
             self._assert_key( test_file, sse_key=sse_key, is_master=is_master )
             # Ensure that we can't actually retrieve the object without specifying an encryption key
             try:
                 self._assert_key( test_file )
             except boto.exception.S3ResponseError as e:
                 self.assertEquals( e.status, 400 )
             else:
                 self.fail( 'S3ResponseError(400) should have been raised' )
             # If a per-file was used ...
             if is_master:
                 # ... ensure that we can't retrieve the object with the master key.
                 try:
                     self._assert_key( test_file, sse_key=sse_key, is_master=False )
                 except boto.exception.S3ResponseError as e:
                     self.assertEquals( e.status, 403 )
                 else:
                     self.fail( 'S3ResponseError(403) should have been raised' )
             self._test_download( test_file, args=args )
         finally:
             self._clean_bucket( self.bucket )
Example #2
0
    def test_existence_handling(self):
        test_file = self.test_files[two_and_a_half_parts]
        # upload a file to the remote bucket so that another upload call will require overwrite or
        # skip to be passed to --exists
        s3am.cli.main(
            concat('upload', verbose, slots, self.ftp_url(test_file),
                   self.s3_url()))

        # Upload to a key that already exists in the bucket without --overwrite should fail
        self.assertRaises(
            s3am.ObjectExistsError, s3am.cli.main,
            concat('upload', verbose, slots, self.ftp_url(test_file),
                   self.s3_url()))
        # Now try with --exists=skip. This should raise a SystemExit
        try:
            s3am.cli.main(
                concat('upload', verbose, slots, '--exists=skip',
                       self.ftp_url(test_file), self.s3_url()))
        except SystemExit as err:
            self.assertEqual(err.code, 0)
        else:
            self.fail()
        # Now try with --existst=overwrite. This should pass.
        s3am.cli.main(
            concat('upload', verbose, slots, '--exists=overwrite',
                   self.ftp_url(test_file), self.s3_url()))
Example #3
0
 def test_download_resume(self):
     test_file = self.test_files[two_and_a_half_parts]
     s3am.cli.main(
         concat('upload', verbose, slots, self.ftp_url(test_file),
                self.s3_url()))
     self._assert_key(test_file)
     s3am.operations.Download.simulate_error(rate=1.0)
     try:
         self.assertRaises(s3am.WorkerException,
                           self._test_download,
                           test_file,
                           cleanup=None)
         # Change etag without actually changing the content (etag = f(part_size,content))
         s3am.cli.main(
             concat('upload', verbose, slots, '--exists=overwrite',
                    '--part-size', str(part_size * 2),
                    self.ftp_url(test_file), self.s3_url()))
         s3am.operations.Download.simulate_error(rate=0.75)
         while True:
             try:
                 self._test_download(test_file,
                                     cleanup=None,
                                     args=['--download-exists=resume'])
             except s3am.WorkerException:
                 pass
             else:
                 break
     finally:
         s3am.operations.Download.simulate_error(rate=None)
Example #4
0
 def test_copy(self):
     # setup already created the destination bucket
     dst_bucket_name = self.test_bucket_name
     src_bucket_name = dst_bucket_name + '-src'
     with closing(s3am.boto_utils.s3_connect_to_region(
             copy_bucket_region)) as s3:
         src_location = s3am.boto_utils.region_to_bucket_location(
             copy_bucket_region)
         src_bucket = s3.create_bucket(src_bucket_name,
                                       location=src_location)
         try:
             self._clean_bucket(src_bucket)
             for test_file in self.test_files.values():
                 src_url = self.ftp_url(test_file)
                 src_sse_key = '-0123456789012345678901234567890'
                 dst_sse_key = 'skdjfh9q4rusidfjs9fjsdr9vkfdh833'
                 dst_url = self.s3_url(test_file, src_bucket_name)
                 s3am.cli.main(
                     concat('upload', verbose, slots, src_url, dst_url,
                            '--sse-key=' + src_sse_key,
                            '--sse-key-is-master'))
                 src_url = dst_url
                 dst_url = self.s3_url()
                 s3am.cli.main(
                     concat('upload', verbose, slots, src_url, dst_url,
                            '--src-sse-key=' + src_sse_key,
                            '--src-sse-key-is-master',
                            '--sse-key=' + dst_sse_key))
                 self._assert_key(test_file, dst_sse_key)
         finally:
             self._clean_bucket(src_bucket)
             src_bucket.delete()
Example #5
0
    def test_force(self):
        test_file = self.test_files[two_and_a_half_parts]
        src_url = self.ftp_url(test_file)

        # Run with a simulated download failure
        UnreliableHandler.setup_for_failure_at(int(0.75 * test_file.size))
        try:
            s3am.cli.main(
                concat('upload', verbose, one_slot, src_url, self.s3_url()))
        except s3am.WorkerException:
            pass
        else:
            self.fail()

        # Retrying without --force should fail
        self.assertRaises(
            s3am.UploadExistsError, s3am.cli.main,
            concat('upload', verbose, slots, src_url, self.s3_url()))

        # Retrying with --force should succeed. We use a different part size to ensure that
        # transfer is indeed started from scratch, not resumed.
        s3am.cli.main(
            concat('upload', verbose, slots, src_url, self.s3_url(), '--force',
                   '--part-size', str(2 * part_size)))

        # Test force with uploading a key that already exists in the bucket
        self._test_force_resume_overwrites(force_or_resume='force',
                                           test_file=test_file,
                                           src_url=src_url)
Example #6
0
    def test_resume(self):
        test_file = self.test_files[two_and_a_half_parts]
        src_url = self.ftp_url(test_file)

        # Run with a simulated download failure
        UnreliableHandler.setup_for_failure_at(int(0.75 * test_file.size))
        self.assertRaises(
            s3am.WorkerException, s3am.cli.main,
            concat('upload', verbose, one_slot, src_url, self.s3_url()))

        # Retrying without --resume should fail
        self.assertRaises(
            s3am.UploadExistsError, s3am.cli.main,
            concat('upload', verbose, slots, src_url, self.s3_url()))

        # Retrying with --resume and different part size should fail
        self.assertRaises(
            s3am.IncompatiblePartSizeError, s3am.cli.main,
            concat('upload', verbose, slots, src_url, self.s3_url(),
                   '--resume', '--part-size', str(2 * part_size)))

        # Retry
        s3am.cli.main(
            concat('upload', verbose, slots, src_url, self.s3_url(),
                   '--resume'))

        # FIMXE: We should assert that the resume skips existing parts
        self._assert_key(test_file)

        self._test_force_resume_overwrites(force_or_resume='resume',
                                           test_file=test_file,
                                           src_url=src_url)
Example #7
0
 def test_verify(self):
     for test_file in self.test_files.values():
         s3am.cli.main(
             concat('upload', verbose, slots, self.ftp_url(test_file),
                    self.s3_url()))
         self._assert_key(test_file)
         buffer_size = s3am.operations.verify_buffer_size
         for verify_part_size in {buffer_size, part_size}:
             md5 = s3am.cli.main(
                 concat('verify', '--part-size', str(verify_part_size),
                        self.s3_url(test_file)))
             self.assertEqual(test_file.md5, md5)
Example #8
0
 def test_invalid_file_urls(self):
     test_file = self.test_files[1]
     for url_prefix in ('file:/', ):
         self.assertRaises(
             s3am.InvalidSourceURLError, s3am.cli.main,
             concat('upload', verbose, slots, url_prefix + test_file.path,
                    self.s3_url()))
Example #9
0
    def _run(cls, command, *args, **kwargs):
        """
        Run a command. Convenience wrapper for subprocess.check_call and subprocess.check_output.

        :param str command: The command to be run.

        :param str args: Any arguments to be passed to the command.

        :param Any kwargs: keyword arguments for subprocess.Popen constructor. Pass capture=True
               to have the process' stdout returned. Pass input='some string' to feed input to the
               process' stdin.

        :rtype: None|str

        :return: The output of the process' stdout if capture=True was passed, None otherwise.
        """
        args = list(concat(command, args))
        log.info('Running %r', args)
        capture = kwargs.pop('capture', False)
        _input = kwargs.pop('input', None)
        if capture:
            kwargs['stdout'] = subprocess.PIPE
        if _input is not None:
            kwargs['stdin'] = subprocess.PIPE
        popen = subprocess.Popen(args, **kwargs)
        stdout, stderr = popen.communicate(input=_input)
        assert stderr is None
        if popen.returncode != 0:
            raise subprocess.CalledProcessError(popen.returncode, args)
        if capture:
            return stdout
Example #10
0
 def test_file_urls(self):
     test_file = self.test_files[1]
     for url_prefix in 'file:', 'file://', 'file://localhost':
         s3am.cli.main(
             concat('upload', '--exists=overwrite', verbose, slots,
                    url_prefix + test_file.path, self.s3_url()))
         self._assert_key(test_file)
Example #11
0
 def _runParasol(self, command, autoRetry=True):
     """
     Issues a parasol command using popen to capture the output. If the command fails then it
     will try pinging parasol until it gets a response. When it gets a response it will
     recursively call the issue parasol command, repeating this pattern for a maximum of N
     times. The final exit value will reflect this.
     """
     command = list(concat(self.parasolCommand, command))
     while True:
         logger.debug('Running %r', command)
         process = subprocess.Popen(command,
                                    stdout=subprocess.PIPE,
                                    stderr=subprocess.PIPE,
                                    bufsize=-1)
         stdout, stderr = process.communicate()
         status = process.wait()
         for line in stderr.split('\n'):
             if line: logger.warn(line)
         if status == 0:
             return 0, stdout.split('\n')
         message = 'Command %r failed with exit status %i' % (command, status)
         if autoRetry:
             logger.warn(message)
         else:
             logger.error(message)
             return status, None
         logger.warn('Waiting for a 10s, before trying again')
         time.sleep(10)
Example #12
0
 def apply_workers( ):
     log.info( '=== Performing %s on workers ===', operation )
     instances = first_worker.list( leader_instance_id=leader.instance_id )
     papply( apply_worker,
             pool_size=pool_size,
             seq=zip( concat( first_worker, clones( ) ),
                      (i.id for i in instances) ) )
Example #13
0
    def test_generate_key(self):
        test_dir = mkdtemp('test_genkey')
        key_file = os.path.join(test_dir, 'test.key')

        def entropy(string):
            """
            Calculates the Shannon entropy of a string
            http://stackoverflow.com/questions/2979174/how-do-i-compute-the-approximate-entropy-of
                                                                                       -a-bit-string

            :param str string: The string for which entropy must be calculated
            """
            # Get probability of chars in string
            prob = [
                float(string.count(c)) / len(string)
                for c in dict.fromkeys(list(string))
            ]
            # Calculate the entropy
            entropy = -sum([p * math.log(p) / math.log(2.0) for p in prob])
            return entropy

        try:
            for i in range(0, 10):
                s3am.cli.main(concat('generate-sse-key', key_file))
                self.assertTrue(os.path.exists(key_file))
                self.assertEqual(os.stat(key_file).st_size, 32)
                with open(key_file) as k_f:
                    self.assertGreater(entropy(k_f.read()), 0)
                os.remove(key_file)
        finally:
            shutil.rmtree(test_dir)
Example #14
0
def pip(args, path='pip', use_sudo=False):
    """
    Run pip.

    :param args: a string or sequence of strings to be passed to pip as command line arguments.
    If given a sequence of strings, its elements will be quoted if necessary and joined with a
    single space in between.

    :param path: the path to pip

    :param use_sudo: whther to run pip as sudo
    """
    if isinstance(args, (str, unicode)):
        command = path + ' ' + args
    else:
        command = join_argv(concat(path, args))
    # Disable pseudo terminal creation to prevent pip from spamming output with progress bar.
    kwargs = Expando(pty=False)
    if use_sudo:
        f = sudo
        # Set HOME so pip's cache doesn't go into real user's home, potentially creating files
        # not owned by that user (older versions of pip) or printing a warning about caching
        # being disabled.
        kwargs.sudo_args = '-H'
    else:
        f = run
    f(command, **kwargs)
Example #15
0
 def _runParasol(self, command, autoRetry=True):
     """
     Issues a parasol command using popen to capture the output. If the command fails then it
     will try pinging parasol until it gets a response. When it gets a response it will
     recursively call the issue parasol command, repeating this pattern for a maximum of N
     times. The final exit value will reflect this.
     """
     command = list(concat(self.parasolCommand, command))
     while True:
         logger.debug('Running %r', command)
         process = subprocess.Popen(command,
                                    stdout=subprocess.PIPE,
                                    stderr=subprocess.PIPE,
                                    bufsize=-1)
         stdout, stderr = process.communicate()
         status = process.wait()
         for line in stderr.split('\n'):
             if line: logger.warn(line)
         if status == 0:
             return 0, stdout.split('\n')
         message = 'Command %r failed with exit status %i' % (command,
                                                              status)
         if autoRetry:
             logger.warn(message)
         else:
             logger.error(message)
             return status, None
         logger.warn('Waiting for a 10s, before trying again')
         time.sleep(10)
Example #16
0
def pip( args, path='pip', use_sudo=False ):
    """
    Run pip.

    :param args: a string or sequence of strings to be passed to pip as command line arguments.
    If given a sequence of strings, its elements will be quoted if necessary and joined with a
    single space in between.

    :param path: the path to pip

    :param use_sudo: whther to run pip as sudo
    """
    if isinstance( args, (str, unicode) ):
        command = path + ' ' + args
    else:
        command = join_argv( concat( path, args ) )
    # Disable pseudo terminal creation to prevent pip from spamming output with progress bar.
    kwargs = Expando( pty=False )
    if use_sudo:
        f = sudo
        # Set HOME so pip's cache doesn't go into real user's home, potentially creating files
        # not owned by that user (older versions of pip) or printing a warning about caching
        # being disabled.
        kwargs.sudo_args = '-H'
    else:
        f = run
    f( command, **kwargs )
Example #17
0
    def _run(cls, command, *args, **kwargs):
        """
        Run a command. Convenience wrapper for subprocess.check_call and subprocess.check_output.

        :param str command: The command to be run.

        :param str args: Any arguments to be passed to the command.

        :param Any kwargs: keyword arguments for subprocess.Popen constructor. Pass capture=True
               to have the process' stdout returned. Pass input='some string' to feed input to the
               process' stdin.

        :rtype: None|str

        :return: The output of the process' stdout if capture=True was passed, None otherwise.
        """
        args = list(concat(command, args))
        log.info('Running %r', args)
        capture = kwargs.pop('capture', False)
        _input = kwargs.pop('input', None)
        if capture:
            kwargs['stdout'] = PIPE
        if _input is not None:
            kwargs['stdin'] = PIPE
        popen = Popen(args, **kwargs)
        stdout, stderr = popen.communicate(input=_input)
        assert stderr is None
        if popen.returncode != 0:
            raise CalledProcessError(popen.returncode, args)
        if capture:
            return stdout
Example #18
0
 def test_file_path(self):
     test_file = self.test_files[1]
     for path in test_file.path, os.path.relpath(test_file.path):
         s3am.cli.main(
             concat('upload', '--exists=overwrite', verbose, slots, path,
                    self.s3_url()))
         self._assert_key(test_file)
Example #19
0
 def __install_toil( self ):
     # FIXME: consider using a virtualenv for Toil like we do for s3am
     # Older versions of pip don't support the 'extra' mechanism used by Toil's setup.py
     pip( 'install --upgrade pip', use_sudo=True )
     pip( concat( 'install', self._toil_pip_args( ) ), use_sudo=True )
     self._lazy_mkdir( '/var/lib', 'toil', persistent=None )
     sudo( 'echo "TOIL_WORKDIR=/var/lib/toil" >> /etc/environment' )
Example #20
0
def mesos_service( name, user, *flags ):
    command = concat( '/usr/sbin/mesos-{name}', '--log_dir={log_dir}/mesos', flags )
    return Service(
        init_name='mesosbox-' + name,
        user=user,
        description=fmt( 'Mesos {name} service' ),
        command=fmt( ' '.join( command ) ) )
Example #21
0
 def setUp(self):
     self.workdir = tempfile.mkdtemp()
     self.fastq_url = "s3://cgl-pipeline-inputs/germline/ci/NIST7035_NIST7086.aln21.ci.1.fq"
     self.paired_url = "s3://cgl-pipeline-inputs/germline/ci/NIST7035_NIST7086.aln21.ci.2.fq"
     self.jobStore = os.getenv("TOIL_SCRIPTS_TEST_JOBSTORE", os.path.join(self.workdir, "jobstore-%s" % uuid4()))
     self.toilOptions = shlex.split(os.environ.get("TOIL_SCRIPTS_TEST_TOIL_OPTIONS", ""))
     self.base_command = concat("toil-germline", "run", self.toilOptions, self.jobStore)
Example #22
0
 def __install_toil(self):
     # FIXME: consider using a virtualenv for Toil like we do for s3am
     # Older versions of pip don't support the 'extra' mechanism used by Toil's setup.py
     pip('install --upgrade pip', use_sudo=True)
     pip(concat('install', self._toil_pip_args()), use_sudo=True)
     self._lazy_mkdir('/var/lib', 'toil', persistent=None)
     sudo('echo "TOIL_WORKDIR=/var/lib/toil" >> /etc/environment')
Example #23
0
 def test_download(self):
     for test_file in self.test_files.values():
         s3am.cli.main(
             concat('upload', verbose, slots, self.ftp_url(test_file),
                    self.s3_url()))
         self._assert_key(test_file)
         self._test_download(test_file)
Example #24
0
    def _test_download(self, test_file, cleanup=True, args=()):
        """
        :param bool|None cleanup: True, False or None to clean up always, never or only on success
        """
        dl_slots = ('--download-slots', str(num_slots), '--checksum-slots',
                    str(num_slots))
        dst_path = test_file.path + '.downloaded'

        def _cleanup():
            for suffix in ('', '.partial', '.progress'):
                try:
                    os.unlink(dst_path + suffix)
                except OSError as e:
                    if e.errno == errno.ENOENT:
                        pass
                    else:
                        raise

        try:
            s3am.cli.main(
                concat('download', verbose, dl_slots, '--part-size',
                       str(part_size), args, self.s3_url(test_file),
                       'file://' + dst_path))
            self._assert_file(test_file, dst_path)
        except:
            if cleanup:
                _cleanup()
            raise
        else:
            if cleanup is not False:
                _cleanup()
Example #25
0
    def testRestart(self):
        """
        Test whether hot-deployment works on restart.
        """
        with self._venvApplianceCluster() as (leader, worker):

            def userScript():
                from toil.job import Job
                from toil.common import Toil

                # noinspection PyUnusedLocal
                def job(job, disk='10M', cores=1, memory='10M'):
                    assert False

                if __name__ == '__main__':
                    options = Job.Runner.getDefaultArgumentParser().parse_args(
                    )
                    with Toil(options) as toil:
                        if toil.config.restart:
                            toil.restart()
                        else:
                            toil.start(Job.wrapJobFn(job))

            userScript = self._getScriptSource(userScript)

            leader.deployScript(path=self.sitePackages,
                                packagePath='foo.bar',
                                script=userScript)

            pythonArgs = ['venv/bin/python', '-m', 'foo.bar']
            toilArgs = [
                '--logDebug', '--batchSystem=mesos',
                '--mesosMaster=localhost:5050', '--defaultMemory=10M',
                '/data/jobstore'
            ]
            command = concat(pythonArgs, toilArgs)
            self.assertRaises(CalledProcessError, leader.runOnAppliance,
                              *command)

            # Deploy an updated version of the script ...
            userScript = userScript.replace('assert False', 'assert True')
            leader.deployScript(path=self.sitePackages,
                                packagePath='foo.bar',
                                script=userScript)
            # ... and restart Toil.
            command = concat(pythonArgs, '--restart', toilArgs)
            leader.runOnAppliance(*command)
Example #26
0
 def _getAllInstances(self):
     """
     :rtype: Iterable[Instance]
     """
     reservations = self._ec2.get_all_reservations(filters={
         'Tag:leader_instance_id': self._instanceId,
         'instance-state-name': 'running'})
     return concat(r.instances for r in reservations)
Example #27
0
 def _ssh(cls, role, *args, **kwargs):
     try:
         admin = kwargs["admin"]
     except KeyError:
         admin = False
     else:
         del kwargs["admin"]
     cls._cgcloud(*filter(None, concat("ssh", "-a" if admin else None, role, cls.sshOptions, args)), **kwargs)
Example #28
0
    def issueBatchJob(self, command, memory, cores, disk, preemptable):
        """
        Issues parasol with job commands.
        """
        self.checkResourceRequest(memory, cores, disk)

        MiB = 1 << 20
        truncatedMemory = (memory / MiB) * MiB
        # Look for a batch for jobs with these resource requirements, with
        # the memory rounded down to the nearest megabyte. Rounding down
        # meams the new job can't ever decrease the memory requirements
        # of jobs already in the batch.
        if len(self.resultsFiles) >= self.maxBatches:
            raise RuntimeError( 'Number of batches reached limit of %i' % self.maxBatches)
        try:
            results = self.resultsFiles[(truncatedMemory, cores)]
        except KeyError:
            results = getTempFile(rootDir=self.parasolResultsDir)
            self.resultsFiles[(truncatedMemory, cores)] = results

        # Prefix the command with environment overrides, optionally looking them up from the
        # current environment if the value is None
        command = ' '.join(concat('env', self.__environment(), command))
        parasolCommand = ['-verbose',
                          '-ram=%i' % memory,
                          '-cpu=%i' % cores,
                          '-results=' + results,
                          'add', 'job', command]
        # Deal with the cpus
        self.usedCpus += cores
        while True:  # Process finished results with no wait
            try:
                jobID = self.cpuUsageQueue.get_nowait()
            except Empty:
                break
            if jobID in self.jobIDsToCpu.keys():
                self.usedCpus -= self.jobIDsToCpu.pop(jobID)
            assert self.usedCpus >= 0
        while self.usedCpus > self.maxCores:  # If we are still waiting
            jobID = self.cpuUsageQueue.get()
            if jobID in self.jobIDsToCpu.keys():
                self.usedCpus -= self.jobIDsToCpu.pop(jobID)
            assert self.usedCpus >= 0
        # Now keep going
        while True:
            line = self._runParasol(parasolCommand)[1][0]
            match = self.parasolOutputPattern.match(line)
            if match is None:
                # This is because parasol add job will return success, even if the job was not
                # properly issued!
                logger.info('We failed to properly add the job, we will try again after a 5s.')
                time.sleep(5)
            else:
                jobID = int(match.group(1))
                self.jobIDsToCpu[jobID] = cores
                self.runningJobs.add(jobID)
                logger.debug("Got the parasol job id: %s from line: %s" % (jobID, line))
                return jobID
Example #29
0
    def __install_tools(self):
        """
        Installs the mesos-master-discovery init script and its companion mesos-tools. The latter
        is a Python package distribution that's included in cgcloud-mesos as a resource. This is
        in contrast to the cgcloud agent, which is a standalone distribution.
        """
        tools_dir = install_dir + '/tools'
        admin = self.admin_account()
        sudo(fmt('mkdir -p {tools_dir}'))
        sudo(fmt('chown {admin}:{admin} {tools_dir}'))
        run(fmt('virtualenv --no-pip {tools_dir}'))
        run(fmt('{tools_dir}/bin/easy_install pip==1.5.2'))

        with settings(forward_agent=True):
            with self._project_artifacts('mesos-tools') as artifacts:
                pip(use_sudo=True,
                    path=tools_dir + '/bin/pip',
                    args=concat('install', artifacts))
        sudo(fmt('chown -R root:root {tools_dir}'))

        mesos_tools = "MesosTools(**%r)" % dict(user=user,
                                                shared_dir=self._shared_dir(),
                                                ephemeral_dir=ephemeral_dir,
                                                persistent_dir=persistent_dir,
                                                lazy_dirs=self.lazy_dirs)

        self.lazy_dirs = None  # make sure it can't be used anymore once we are done with it

        self._register_init_script(
            "mesosbox",
            heredoc("""
                description "Mesos master discovery"
                console log
                start on (local-filesystems and net-device-up IFACE!=lo)
                stop on runlevel [!2345]
                pre-start script
                for i in 1 2 3; do if {tools_dir}/bin/python2.7 - <<END
                import logging
                logging.basicConfig( level=logging.INFO )
                from cgcloud.mesos_tools import MesosTools
                mesos_tools = {mesos_tools}
                mesos_tools.start()
                END
                then exit 0; fi; echo Retrying in 60s; sleep 60; done; exit 1
                end script
                post-stop script
                {tools_dir}/bin/python2.7 - <<END
                import logging
                logging.basicConfig( level=logging.INFO )
                from cgcloud.mesos_tools import MesosTools
                mesos_tools = {mesos_tools}
                mesos_tools.stop()
                END
                end script"""))
        # Explicitly start the mesosbox service to achieve creation of lazy directoriess right
        # now. This makes a generic mesosbox useful for adhoc tests that involve Mesos and Toil.
        self._run_init_script('mesosbox')
Example #30
0
    def __install_tools( self ):
        """
        Installs the mesos-master-discovery init script and its companion mesos-tools. The latter
        is a Python package distribution that's included in cgcloud-mesos as a resource. This is
        in contrast to the cgcloud agent, which is a standalone distribution.
        """
        tools_dir = install_dir + '/tools'
        admin = self.admin_account( )
        sudo( fmt( 'mkdir -p {tools_dir}' ) )
        sudo( fmt( 'chown {admin}:{admin} {tools_dir}' ) )
        run( fmt( 'virtualenv --no-pip {tools_dir}' ) )
        run( fmt( '{tools_dir}/bin/easy_install pip==1.5.2' ) )

        with settings( forward_agent=True ):
            with self._project_artifacts( 'mesos-tools' ) as artifacts:
                pip( use_sudo=True,
                     path=tools_dir + '/bin/pip',
                     args=concat( 'install', artifacts ) )
        sudo( fmt( 'chown -R root:root {tools_dir}' ) )

        mesos_tools = "MesosTools(**%r)" % dict( user=user,
                                                 shared_dir=self._shared_dir( ),
                                                 ephemeral_dir=ephemeral_dir,
                                                 persistent_dir=persistent_dir,
                                                 lazy_dirs=self.lazy_dirs )

        self.lazy_dirs = None  # make sure it can't be used anymore once we are done with it

        self._register_init_script(
            "mesosbox",
            heredoc( """
                description "Mesos master discovery"
                console log
                start on (local-filesystems and net-device-up IFACE!=lo)
                stop on runlevel [!2345]
                pre-start script
                for i in 1 2 3; do if {tools_dir}/bin/python2.7 - <<END
                import logging
                logging.basicConfig( level=logging.INFO )
                from cgcloud.mesos_tools import MesosTools
                mesos_tools = {mesos_tools}
                mesos_tools.start()
                END
                then exit 0; fi; echo Retrying in 60s; sleep 60; done; exit 1
                end script
                post-stop script
                {tools_dir}/bin/python2.7 - <<END
                import logging
                logging.basicConfig( level=logging.INFO )
                from cgcloud.mesos_tools import MesosTools
                mesos_tools = {mesos_tools}
                mesos_tools.stop()
                END
                end script""" ) )
        # Explicitly start the mesosbox service to achieve creation of lazy directoriess right
        # now. This makes a generic mesosbox useful for adhoc tests that involve Mesos and Toil.
        self._run_init_script( 'mesosbox' )
Example #31
0
 def setUp(self):
     self.workdir = tempfile.mkdtemp()
     self.fastq_url = 's3://cgl-pipeline-inputs/germline/ci/NIST7035_NIST7086.aln21.ci.1.fq'
     self.paired_url = 's3://cgl-pipeline-inputs/germline/ci/NIST7035_NIST7086.aln21.ci.2.fq'
     self.jobStore = os.getenv('TOIL_SCRIPTS_TEST_JOBSTORE', os.path.join(self.workdir, 'jobstore-%s' % uuid4()))
     self.toilOptions = shlex.split(os.environ.get('TOIL_SCRIPTS_TEST_TOIL_OPTIONS', ''))
     self.base_command = concat('toil-germline', 'run',
                                self.toilOptions,
                                self.jobStore)
Example #32
0
    def testRestart(self):
        """
        Test whether hot-deployment works on restart.
        """
        with self._venvApplianceCluster() as (leader, worker):
            def userScript():
                from toil.job import Job
                from toil.common import Toil

                # noinspection PyUnusedLocal
                def job(job, disk='10M', cores=1, memory='10M'):
                    assert False

                if __name__ == '__main__':
                    options = Job.Runner.getDefaultArgumentParser().parse_args()
                    with Toil(options) as toil:
                        if toil.config.restart:
                            toil.restart()
                        else:
                            toil.start(Job.wrapJobFn(job))

            userScript = self._getScriptSource(userScript)

            leader.deployScript(path=self.sitePackages,
                                packagePath='foo.bar',
                                script=userScript)

            pythonArgs = ['venv/bin/python', '-m', 'foo.bar']
            toilArgs = ['--logDebug',
                        '--batchSystem=mesos',
                        '--mesosMaster=localhost:5050',
                        '--defaultMemory=10M',
                        '/data/jobstore']
            command = concat(pythonArgs, toilArgs)
            self.assertRaises(CalledProcessError, leader.runOnAppliance, *command)

            # Deploy an updated version of the script ...
            userScript = userScript.replace('assert False', 'assert True')
            leader.deployScript(path=self.sitePackages,
                                packagePath='foo.bar',
                                script=userScript)
            # ... and restart Toil.
            command = concat(pythonArgs, '--restart', toilArgs)
            leader.runOnAppliance(*command)
 def cgcloudVenv(self):
     path = self._createTempDir(purpose='cgcloud-venv')
     self._run('virtualenv', path)
     binPath = os.path.join(path, 'bin')
     path = os.environ['PATH']
     os.environ['PATH'] = os.pathsep.join(concat(binPath, path.split(os.pathsep)))
     try:
         yield
     finally:
         os.environ['PATH'] = path
Example #34
0
    def log_ssh_hint(self, options):
        hint = self.ssh_hint(options)

        def opt(name, value, default):
            return name + ' ' + value if value != default else None

        cmd = concat(hint.executable, hint.command,
                     (opt(**option) for option in hint.options), hint.args)
        cmd = ' '.join(filter(None, cmd))
        log.info("Run '%s' to start using this %s.", cmd, hint.object)
Example #35
0
 def _getAllInstances(self):
     """
     :rtype: Iterable[Instance]
     """
     reservations = self._ec2.get_all_reservations(
         filters={
             'Tag:leader_instance_id': self._instanceId,
             'instance-state-name': 'running'
         })
     return concat(r.instances for r in reservations)
Example #36
0
 def test_multiple_uploads( self ):
     test_file = self.test_files[ two_and_a_half_parts ]
     src_url = self.ftp_url( test_file )
     upload1 = self.bucket.initiate_multipart_upload( test_file.name )
     try:
         upload2 = self.bucket.initiate_multipart_upload( test_file.name )
         try:
             self.assertRaises( s3am.MultipleUploadsExistError,
                                s3am.cli.main,
                                concat( 'upload', src_url, self.s3_url( ) ) )
             self.assertRaises( s3am.MultipleUploadsExistError,
                                s3am.cli.main,
                                concat( 'upload', '--resume', src_url, self.s3_url( ) ) )
         finally:
             upload2.cancel_upload( )
     finally:
         upload1.cancel_upload( )
     s3am.cli.main( concat( 'upload', '--force', verbose, src_url, self.s3_url( ) ) )
     self._assert_key( test_file )
Example #37
0
    def test_cancel(self):
        test_file = self.test_files[two_and_a_half_parts]
        src_url = self.ftp_url(test_file)

        # Run with a simulated download failure
        UnreliableHandler.setup_for_failure_at(int(0.75 * test_file.size))
        self.assertRaises(
            s3am.WorkerException, s3am.cli.main,
            concat('upload', verbose, one_slot, src_url, self.s3_url()))

        # A retry without --resume should fail.
        self.assertRaises(
            s3am.UploadExistsError, s3am.cli.main,
            concat('upload', verbose, slots, src_url, self.s3_url()))

        # Cancel
        s3am.cli.main(concat('cancel', verbose, self.s3_url(test_file)))

        # Retry, should succeed
        s3am.cli.main(concat('upload', verbose, slots, src_url, self.s3_url()))
Example #38
0
 def setUp(self):
     self.workdir = tempfile.mkdtemp()
     self.fastq_url = 's3://cgl-pipeline-inputs/germline/ci/NIST7035_NIST7086.aln21.ci.1.fq'
     self.paired_url = 's3://cgl-pipeline-inputs/germline/ci/NIST7035_NIST7086.aln21.ci.2.fq'
     self.jobStore = os.getenv(
         'TOIL_SCRIPTS_TEST_JOBSTORE',
         os.path.join(self.workdir, 'jobstore-%s' % uuid4()))
     self.toilOptions = shlex.split(
         os.environ.get('TOIL_SCRIPTS_TEST_TOIL_OPTIONS', ''))
     self.base_command = concat('toil-germline', 'run', self.toilOptions,
                                self.jobStore)
Example #39
0
 def _runningOnWorker(self):
     try:
         mainModule = sys.modules['__main__']
     except KeyError:
         log.warning('Cannot determine main program module.')
         return False
     else:
         mainModuleFile = os.path.basename(mainModule.__file__)
         workerModuleFiles = concat(('worker' + ext for ext in self.moduleExtensions),
                                    '_toil_worker')  # the setuptools entry point
         return mainModuleFile in workerModuleFiles
Example #40
0
 def _runningOnWorker(self):
     try:
         mainModule = sys.modules['__main__']
     except KeyError:
         log.warning('Cannot determine main program module.')
         return False
     else:
         mainModuleFile = os.path.basename(mainModule.__file__)
         workerModuleFiles = concat(('worker' + ext for ext in self.moduleExtensions),
                                    '_toil_worker')  # the setuptools entry point
         return mainModuleFile in workerModuleFiles
Example #41
0
 def _ssh(cls, role, *args, **kwargs):
     try:
         admin = kwargs['admin']
     except KeyError:
         admin = False
     else:
         del kwargs['admin']
     cls._cgcloud(
         *filter(
             None,
             concat('ssh', '-a' if admin else None, role, cls.sshOptions,
                    args)), **kwargs)
 def setUp(self):
     self.input_dir = urlparse('s3://cgl-pipeline-inputs/rnaseq_cgl/ci')
     self.output_dir = urlparse('s3://cgl-driver-projects/test/ci/%s' % uuid4())
     self.sample = urlparse(self.input_dir.geturl() + '/chr6_sample.tar.gz')
     self.workdir = tempfile.mkdtemp()
     jobStore = os.getenv('TOIL_SCRIPTS_TEST_JOBSTORE', os.path.join(self.workdir, 'jobstore-%s' % uuid4()))
     toilOptions = shlex.split(os.environ.get('TOIL_SCRIPTS_TEST_TOIL_OPTIONS', ''))
     self.base_command = concat('toil-rnaseq', 'run',
                                '--config', self._generate_config(),
                                '--retryCount', '1',
                                toilOptions,
                                jobStore)
Example #43
0
    def log_ssh_hint( self, options ):
        hint = self.ssh_hint( options )

        def opt( name, value, default ):
            return name + ' ' + value if value != default else None

        cmd = concat( hint.executable,
                      hint.command,
                      (opt( **option ) for option in hint.options),
                      hint.args )
        cmd = ' '.join( filter( None, cmd ) )
        log.info( "Run '%s' to start using this %s.", cmd, hint.object )
Example #44
0
 def _ssh(cls, role, *args, **kwargs):
     try:
         admin = kwargs['admin']
     except KeyError:
         admin = False
     else:
         del kwargs['admin']
     cls._cgcloud(*filter(None, concat('ssh',
                                       '-a' if admin else None,
                                       role,
                                       cls.sshOptions,
                                       args)), **kwargs)
Example #45
0
 def _test_force_resume_overwrites( self, force_or_resume, test_file, src_url ):
     assert force_or_resume in ('force', 'resume')
     force_or_resume = '--' + force_or_resume
     # Run with a simulated download failure to create an unfinished upload
     UnreliableHandler.setup_for_failure_at( int( 0.75 * test_file.size ) )
     self.assertRaises( s3am.WorkerException,
                        s3am.cli.main,
                        concat( 'upload', '--exists=overwrite', verbose, one_slot,
                                src_url, self.s3_url( ) ) )
     # Running without --force/--resume and --exists=overwrite should fail
     self.assertRaises( s3am.ObjectExistsError,
                        s3am.cli.main,
                        concat( 'upload', verbose, slots, src_url, self.s3_url( ) ) )
     # Running without --resume/--force but with --exists=overwrite should fail
     self.assertRaises( s3am.UploadExistsError,
                        s3am.cli.main,
                        concat( 'upload', verbose, slots, src_url, self.s3_url( ),
                                '--exists=overwrite' ) )
     # Running with --force/--resume and --exists=overwrite should pass
     s3am.cli.main( concat(
         'upload', verbose, slots, src_url, self.s3_url( ),
         force_or_resume, '--exists=overwrite' ) )
Example #46
0
 def _resourcePath(self):
     """
     The path to the directory that should be used when shipping this module and its siblings
     around as a resource.
     """
     if '.' in self.name:
         return os.path.join(self.dirPath, self._rootPackage())
     else:
         initName = self._initModuleName(self.dirPath)
         if initName:
             raise ResourceException(
                 "Toil does not support loading a user script from a package directory. You "
                 "may want to remove %s from %s or invoke the user script as a module via "
                 "'PYTHONPATH=\"%s\" python -m %s.%s'." %
                 tuple(concat(initName, self.dirPath, os.path.split(self.dirPath), self.name)))
         return self.dirPath
Example #47
0
 def __enter__(self):
     with self.lock:
         image = self._getApplianceImage()
         # Omitting --rm, it's unreliable, see https://github.com/docker/docker/issues/16575
         args = list(concat('docker', 'run',
                            '--net=host',
                            '-i',
                            '--name=' + self.containerName,
                            ['--volume=%s:%s' % mount for mount in self.mounts.iteritems()],
                            image,
                            self._containerCommand()))
         log.info('Running %r', args)
         self.popen = subprocess.Popen(args)
     self.start()
     self.__wait_running()
     return self
Example #48
0
 def setUpClass(cls):
     logging.basicConfig(level=logging.INFO)
     super(AbstractCGCloudProvisionerTest, cls).setUpClass()
     cls.saved_cgcloud_plugins = os.environ.get("CGCLOUD_PLUGINS")
     os.environ["CGCLOUD_PLUGINS"] = "cgcloud.toil"
     cls.sdistPath = cls._getSourceDistribution()
     path = cls._createTempDirEx("cgcloud-venv")
     cls._run("virtualenv", path)
     binPath = os.path.join(path, "bin")
     cls.oldPath = os.environ["PATH"]
     os.environ["PATH"] = os.pathsep.join(concat(binPath, cls.oldPath.split(os.pathsep)))
     # Suppress pip's cache to avoid concurrency issues. Keep in mind that the concrete
     # subclasses may be run concurrently. See run_tests.sh.
     cls._run("pip", "install", "--no-cache", "cgcloud-toil==" + cgcloudVersion)
     if cls.createImage:
         cls._cgcloud("create", "-IT", "--option", "toil_sdists=%s[aws,mesos]" % cls.sdistPath, "toil-latest-box")
Example #49
0
    def __setup_agent( self ):
        availability_zone = self.ctx.availability_zone
        namespace = self.ctx.namespace
        ec2_keypair_globs = ' '.join( shell.quote( _ ) for _ in self.ec2_keypair_globs )
        accounts = ' '.join( [ self.admin_account( ) ] + self.other_accounts( ) )
        admin_account = self.admin_account( )
        run_dir = '/var/run/cgcloudagent'
        log_dir = '/var/log'
        install_dir = '/opt/cgcloudagent'

        # Lucid & CentOS 5 have an ancient pip
        pip( 'install --upgrade pip==1.5.2', use_sudo=True )
        pip( 'install --upgrade virtualenv', use_sudo=True )
        sudo( fmt( 'mkdir -p {install_dir}' ) )
        sudo( fmt( 'chown {admin_account}:{admin_account} {install_dir}' ) )
        # By default, virtualenv installs the latest version of pip. We want a specific
        # version, so we tell virtualenv not to install pip and then install that version of
        # pip using easy_install.
        run( fmt( 'virtualenv --no-pip {install_dir}' ) )
        run( fmt( '{install_dir}/bin/easy_install pip==1.5.2' ) )

        with settings( forward_agent=True ):
            venv_pip = install_dir + '/bin/pip'
            if self._enable_agent_metrics( ):
                pip( path=venv_pip, args='install psutil==3.4.1' )
            with self._project_artifacts( 'agent' ) as artifacts:
                pip( path=venv_pip,
                     args=concat( 'install',
                                  '--allow-external', 'argparse',  # needed on CentOS 5 and 6
                                  artifacts ) )

        sudo( fmt( 'mkdir {run_dir}' ) )
        script = self.__gunzip_base64_decode( run( fmt(
            '{install_dir}/bin/cgcloudagent'
            ' --init-script'
            ' --zone {availability_zone}'
            ' --namespace {namespace}'
            ' --accounts {accounts}'
            ' --keypairs {ec2_keypair_globs}'
            ' --user root'
            ' --group root'
            ' --pid-file {run_dir}/cgcloudagent.pid'
            ' --log-spill {log_dir}/cgcloudagent.out'
            '| gzip -c | base64' ) ) )
        self._register_init_script( 'cgcloudagent', script )
        self._run_init_script( 'cgcloudagent' )
Example #50
0
def main( args=None ):
    """
    This is the cgcloud entry point. It should be installed via setuptools.setup(entry_points=...)
    """
    root_logger = CGCloud.setup_logging( )
    try:
        plugins = os.environ.get( 'CGCLOUD_PLUGINS', '' ).strip( )
        plugins = concat( cgcloud.core,
                          [ plugin_module( plugin ) for plugin in plugins.split( ":" ) if plugin ] )
        app = CGCloud( plugins, root_logger )
        for plugin in plugins:
            if hasattr( plugin, 'command_classes' ):
                for command_class in plugin.command_classes( ):
                    app.add( command_class )
        app.run( args )
    except UserError as e:
        log.error( e.message )
        sys.exit( 255 )
Example #51
0
    def _runningOnWorker(self):
        try:
            mainModule = sys.modules['__main__']
        except KeyError:
            log.warning('Cannot determine main program module.')
            return False
        else:
            # If __file__ is not a valid attribute, it's because
            # toil is being run interactively, in which case
            # we can reasonably assume that we are not running
            # on a worker node.
            try:
                mainModuleFile = os.path.basename(mainModule.__file__)
            except AttributeError:
                return False

            workerModuleFiles = concat(('worker' + ext for ext in self.moduleExtensions),
                                       '_toil_worker')  # the setuptools entry point
            return mainModuleFile in workerModuleFiles
Example #52
0
 def setUpClass(cls):
     logging.basicConfig(level=logging.INFO)
     super(AbstractCGCloudProvisionerTest, cls).setUpClass()
     cls.saved_cgcloud_plugins = os.environ.get('CGCLOUD_PLUGINS')
     os.environ['CGCLOUD_PLUGINS'] = 'cgcloud.toil'
     cls.sdistPath = cls._getSourceDistribution()
     path = cls._createTempDirEx('cgcloud-venv')
     # --never-download prevents silent upgrades to pip, wheel and setuptools
     cls._run('virtualenv', '--never-download', path)
     binPath = os.path.join(path, 'bin')
     cls.oldPath = os.environ['PATH']
     os.environ['PATH'] = os.pathsep.join(concat(binPath, cls.oldPath.split(os.pathsep)))
     # Suppress pip's cache to avoid concurrency issues. Keep in mind that the concrete
     # subclasses may be run concurrently. See run_tests.sh.
     cls._run('pip', 'install', '--no-cache', 'cgcloud-toil==' + cgcloudVersion)
     if cls.createImage:
         cls._cgcloud('create', '-IT',
                      '--option', 'toil_sdists=%s[aws,mesos]' % cls.sdistPath,
                      'toil-latest-box')
Example #53
0
    def allocate_cluster_ordinals( num, used ):
        """
        Return an iterator containing a given number of unused cluster ordinals. The result is
        guaranteed to yield each ordinal exactly once, i.e. the result is set-like. The argument
        set and the result iterator will be disjoint. The sum of all ordinals in the argument and
        the result is guaranteed to be minimal, i.e. the function will first fill the gaps in the
        argument before allocating higher values. The result will yield ordinal in ascending order.

        :param int num: the number of ordinal to allocate
        :param set[int] used: a set of currently used ordinal
        :rtype: iterator

        >>> f = GrowClusterCommand.allocate_cluster_ordinals

        >>> list(f(0,set()))
        []
        >>> list(f(1,set()))
        [0]
        >>> list(f(0,{0}))
        []
        >>> list(f(1,{0}))
        [1]
        >>> list(f(0,{0,1}))
        []
        >>> list(f(1,{0,1}))
        [2]
        >>> list(f(0,{0,2}))
        []
        >>> list(f(1,{0,2}))
        [1]
        >>> list(f(2,{0,2}))
        [1, 3]
        >>> list(f(3,{0,2}))
        [1, 3, 4]
        """
        assert isinstance( used, set )
        first_free = max( used ) + 1 if used else 0
        complete = set( range( 0, len( used ) ) )
        gaps = sorted( complete - used )
        return islice( concat( gaps, count( first_free ) ), num )
Example #54
0
def mesos_service( name, *flags ):
    command = concat( '/usr/sbin/mesos-{name}', '--log_dir={log_dir}/mesos', flags )
    return Service(
        init_name='mesosbox-' + name,
        description=fmt( 'Mesos {name} service' ),
        command=fmt( ' '.join( command ) ) )
Example #55
0
    def __install_tools( self ):
        """
        Installs the spark-master-discovery init script and its companion spark-tools. The latter
        is a Python package distribution that's included in cgcloud-spark as a resource. This is
        in contrast to the cgcloud agent, which is a standalone distribution.
        """
        tools_dir = install_dir + '/tools'
        admin = self.admin_account( )
        sudo( fmt( 'mkdir -p {tools_dir}' ) )
        sudo( fmt( 'chown {admin}:{admin} {tools_dir}' ) )
        run( fmt( 'virtualenv --no-pip {tools_dir}' ) )
        run( fmt( '{tools_dir}/bin/easy_install pip==1.5.2' ) )

        with settings( forward_agent=True ):
            with self._project_artifacts( 'spark-tools' ) as artifacts:
                pip( use_sudo=True,
                     path=tools_dir + '/bin/pip',
                     args=concat( 'install', artifacts ) )
        sudo( fmt( 'chown -R root:root {tools_dir}' ) )

        spark_tools = "SparkTools(**%r)" % dict( user=user,
                                                 shared_dir=self._shared_dir( ),
                                                 install_dir=install_dir,
                                                 ephemeral_dir=ephemeral_dir,
                                                 persistent_dir=persistent_dir,
                                                 lazy_dirs=self.lazy_dirs )

        self.lazy_dirs = None  # make sure it can't be used anymore once we are done with it

        self._register_init_script(
            "sparkbox",
            heredoc( """
                description "Spark/HDFS master discovery"
                console log
                start on (local-filesystems and net-device-up IFACE!=lo)
                stop on runlevel [!2345]
                pre-start script
                for i in 1 2 3; do if {tools_dir}/bin/python2.7 - <<END
                import logging
                logging.basicConfig( level=logging.INFO )
                from cgcloud.spark_tools import SparkTools
                spark_tools = {spark_tools}
                spark_tools.start()
                END
                then exit 0; fi; echo Retrying in 60s; sleep 60; done; exit 1
                end script
                post-stop script
                {tools_dir}/bin/python2.7 - <<END
                import logging
                logging.basicConfig( level=logging.INFO )
                from cgcloud.spark_tools import SparkTools
                spark_tools = {spark_tools}
                spark_tools.stop()
                END
                end script""" ) )

        script_path = "/usr/local/bin/sparkbox-manage-slaves"
        put( remote_path=script_path, use_sudo=True, local_path=StringIO( heredoc( """
            #!{tools_dir}/bin/python2.7
            import sys
            import logging
            # Prefix each log line to make it more obvious that it's the master logging when the
            # slave calls this script via ssh.
            logging.basicConfig( level=logging.INFO,
                                 format="manage_slaves: " + logging.BASIC_FORMAT )
            from cgcloud.spark_tools import SparkTools
            spark_tools = {spark_tools}
            spark_tools.manage_slaves( slaves_to_add=sys.argv[1:] )""" ) ) )
        sudo( fmt( "chown root:root {script_path} && chmod 755 {script_path}" ) )
Example #56
0
 def __install_toil( self ):
     # Older versions of pip don't support the 'extra' mechanism used by Toil's setup.py
     pip( 'install --upgrade pip', use_sudo=True )
     pip( 'install --pre s3am', use_sudo=True )
     pip( concat( 'install', self._toil_pip_args( ) ), use_sudo=True )
     self._lazy_mkdir( '/var/lib', 'toil', persistent=None )
Example #57
0
 def _docker_data_prefixes( self ):
     # We prefer Docker to be stored on the persistent volume if there is one
     return concat( persistent_dir, super( ToilBoxSupport, self )._docker_data_prefixes( ) )
Example #58
0
 def _rsync( cls, role, *args, **kwargs ):
     cls._cgcloud( *concat( 'rsync',
                            dict_to_opts( kwargs, ssh_opts=cls.ssh_opts_str( ) ),
                            role, args ) )
Example #59
0
 def _run(self, *args):
     args = list(concat(*args))
     log.info('Running %r', args)
     subprocess.check_call(args)