예제 #1
0
 def ensure(self):
     '''
     ensures existence of readwrite and meta directories.
     '''
     for d in self.readwrite:
         utils.mkdir_p(d)
     utils.mkdir_p(self.metadir)
예제 #2
0
def docker_enc_handler(environment, context, job):
    nametag = context['nametag']
    log = logging.getLogger('step_logger_{}'.format(nametag))

    # short interruption to create metainfo storage location
    metadir = '{}/_packtivity'.format(context['readwrite'][0])
    context['metadir'] = metadir
    log.info('creating metadirectory %s if necessary. exists? : %s', metadir,
             os.path.exists(metadir))
    utils.mkdir_p(metadir)

    #setup more detailed logging
    utils.setup_logging(nametag, context)

    log.debug('starting log for step: %s', nametag)
    if 'PACKTIVITY_DOCKER_NOPULL' not in os.environ:
        log.info('prepare pull')
        docker_pull_cmd = 'docker pull {container}:{tag}'.format(
            container=environment['image'], tag=environment['imagetag'])
        docker_pull(docker_pull_cmd, log, context, nametag)

    log.info('running job')

    if 'command' in job:
        # log.info('running oneliner command')
        docker_run_cmd_str = prepare_full_docker_with_oneliner(
            context, environment, job['command'], log)
        docker_run_cmd(docker_run_cmd_str, log, context, nametag)
        log.debug('reached return for docker_enc_handler')
    elif 'script' in job:
        run_docker_with_script(context, environment, job, log)
    else:
        raise RuntimeError('do not know yet how to run this...')
예제 #3
0
 def ensure(self):
     """
     ensures existence of readwrite and meta directories.
     """
     for d in self.readwrite:
         utils.mkdir_p(d)
     utils.mkdir_p(self.metadir)
예제 #4
0
def localproc_env(environment, context, job):
    nametag = context['nametag']
    log = logging.getLogger('step_logger_{}'.format(nametag))
    olddir = os.path.realpath(os.curdir)
    workdir = context['readwrite'][0]
    log.info('running local command %s', job['command'])
    try:
        log.info('changing to workdirectory %s', workdir)
        utils.mkdir_p(workdir)
        os.chdir(workdir)
        #this is used for testing and we will keep this shell
        #doesn't make sense to wrap in sh ...
        subprocess.check_call(job['command'], shell=True)
    except:
        log.exception('local job failed. job: %s', job)
    finally:
        log.info('changing back to original directory %s', olddir)
        os.chdir(olddir)
def localproc_env(config, environment, state, job, metadata):
    with logutils.setup_logging_topic(config,
                                      metadata,
                                      state,
                                      "step",
                                      return_logger=True) as log:
        olddir = os.path.realpath(os.curdir)
        workdir = state.readwrite[0]
        try:
            log.info("changing to workdirectory %s", workdir)
            utils.mkdir_p(workdir)
            os.chdir(workdir)
            shell = ["sh", "-c", str(job["command"])]
            # shell = ['sh','-c','echo hello world']
            log.info("running %s", shell)
            subprocess.check_call(shell)
        except:
            log.exception("local job failed. job: %s", job)
            raise RuntimeError("failed")
        finally:
            log.info("changing back to original directory %s", olddir)
            os.chdir(olddir)
예제 #6
0
def test_mkdir_exist(tmpdir):
	pathtomake = tmpdir.join('hello')
	pathtomake.ensure(dir = True)
	mkdir_p(str(pathtomake))
	assert pathtomake.check()
예제 #7
0
def test_mkdir_notexist(tmpdir):
	pathtomake = tmpdir.join('hello')
	mkdir_p(str(pathtomake))
	assert pathtomake.check()
예제 #8
0
def test_mkdir_exist_butfile(tmpdir):
	pathtomake = tmpdir.join('hello')
	pathtomake.ensure(file = True)
	with pytest.raises(OSError):
		mkdir_p(str(pathtomake))
 def ensure(self):
     from packtivity.utils import mkdir_p
     mkdir_p(self.metadir)
예제 #10
0
def umbrella(environment, context, job):
    metadir = '{}/_packtivity'.format(context['readwrite'][0])
    context['metadir'] = metadir

    if not os.path.exists(metadir):
        utils.mkdir_p(metadir)
    fp = open(os.path.join(metadir, "umbrella_output.txt"), "w")

    # Check if the spec_url is actually a url or just a file path
    spec_file = None
    spec_path = None
    specification_file = ""
    try:
        f = urlopen(environment['spec_url'])  # tries to open the url
        spec_fd, temp_spec_path = mkstemp()
        spec_file = spec_fd
        spec_path = temp_spec_path
        # urlretrieve will throw UrlError, HTTPError, or ContentTooShortError
        (filename, headers) = urlretrieve(environment['spec_url'],
                                          temp_spec_path)
        specification_file = filename
    except ContentTooShortError:
        print("URL Content is Too Short! ")
    except ValueError:  # invalid URL
        specification_file = environment['spec_url']

    # what spec is the umbrella command using?
    print("Using specification file: ", specification_file)

    command = job.get('command', None)
    tempdir = tempfile.mkdtemp()
    logfile = job.get('logfile', 'umbrella.log')
    # docker_mod = prepare_docker(context=context, do_cvmfs=False, do_auth=False, log=logfile)

    readwrites = context['readwrite']
    readonlies = context['readonly']
    options = [
        "umbrella",
        "--spec",
        specification_file,
        "--sandbox_mode",
        "docker",
        '--localdir',
        tempdir,
        '--log',
        logfile,
    ]
    volumes = ""
    for item in readwrites:
        # Umbrella cuts off last character in path
        if item[-1] != "/":
            item = item + "/"
        item = item + "=" + item
        volumes += item + ","

    for item in readonlies:
        # Umbrella cuts off last character in path
        if item[-1] != "/":
            item = item + "/"
        item = item + "=" + item
        volumes += item + ","

    if volumes:
        # Remove the trailing comma
        volumes = volumes[:-1]
        options.append("-i")
        options.append(volumes)
    options.append('run')
    options.append(command)
    print options
    try:
        if not command:
            command = job.get('script', None)
        if not command:
            raise RuntimeError('command or script option must be provided')
        try:
            p = subprocess.Popen(
                options,
                stdout=fp,
                stderr=fp,
                # stdout=subprocess.STDOUT,
                # stderr=subprocess.STDOUT
            )
            # log.debug("Umbrella process PID: %s" % p.pid)
            p.communicate()
            returncode = p.returncode

            if spec_file and spec_path:
                os.close(spec_file)
                os.remove(spec_path)

            if returncode != 0:
                # log.error("Docker execution failed, return code %s" % returncode)
                raise RuntimeError(
                    "Umbrella execution failed, return code %s" % returncode)
            # log.debug("docker import command completed successfully")
        except (OSError, IOError) as e:
            # log.exception("subprocess failed: %s", sys.exc_info())
            raise RuntimeError(
                "Umbrella execution failed, subprocess error (%s)" % e)

    except Exception as e:
        # Clean up
        os.rmdir(tempdir)
        raise e
예제 #11
0
def tarball_handler(environment, context, job):

    url = environment['url']
    image = environment['image']
    nametag = context['nametag']

    # prepare logging for the execution of the job. We're ready to handle up to DEBUG
    log = logging.getLogger('step_logger_{}'.format(url))
    log.setLevel(logging.DEBUG)

    # This is all internal loggin, we don't want to escalate to handlers of parent loggers
    # we will have two handlers, a stream handler logging to stdout at INFO
    log.propagate = False
    fmt = logging.Formatter('%(levelname)s:%(name)s:%(message)s')
    fh = logging.StreamHandler()
    fh.setLevel(logging.INFO)
    fh.setFormatter(fmt)
    log.addHandler(fh)

    # short interruption to create metainfo storage location
    metadir = '{}/_packtivity'.format(context['readwrite'][0])
    context['metadir'] = metadir

    if not os.path.exists(metadir):
        log.info('Creating metadirectory %s', metadir)
        utils.mkdir_p(metadir)

    # Now that we have  place to store meta information we put a file based logger in place
    # to log at DEBUG
    logname = '{}/{}.step.log'.format(metadir, nametag)
    fh = logging.FileHandler(logname)
    fh.setLevel(logging.DEBUG)
    fh.setFormatter(fmt)
    log.addHandler(fh)
    log.debug('starting log for step: %s', nametag)
    log.debug('context: %s', context)
    log.info('Executing docker import')
    with open(os.path.join(metadir, '%s.docker.import.log' % nametag),
              'w') as logfile:
        try:
            p = subprocess.Popen(["docker", "import", url, image],
                                 stdout=logfile,
                                 stderr=subprocess.STDOUT)
            log.debug("Docker process PID: %s" % p.pid)
            p.communicate()
            returncode = p.returncode
            if returncode != 0:
                log.error("Docker execution failed, return code %s" %
                          returncode)
                raise RuntimeError("Docker execution failed, return code %s" %
                                   returncode)
            log.debug("docker import command completed successfully")
        except (OSError, IOError) as e:
            log.exception("subprocess failed: %s", sys.exc_info())
            raise RuntimeError("Docker execution failed, subprocess error")

    if 'command' in job:
        # log.info('running oneliner command')
        docker_run_cmd_str = prepare_full_docker_with_oneliner(
            context, environment, job['command'], log)
        docker_run_cmd(docker_run_cmd_str, log, context, nametag)
        log.debug('reached return for docker_enc_handler')
    elif 'script' in job:
        run_docker_with_script(context, environment, job, log)
    else:
        remove_docker_image(image=image,
                            log_filename=os.path.join(
                                metadir, '%s.docker.rmi.log' % nametag),
                            logger=log)
        raise RuntimeError('do not know yet how to run this...')
    remove_docker_image(image=image,
                        log_filename=os.path.join(
                            metadir, '%s.docker.rmi.log' % nametag),
                        logger=log)