コード例 #1
0
def list_cmd(image_name, project, endpoint, apikey):
    """Short version of list cmd to use with deploy cmd."""
    settings = _get_project_settings(project, endpoint, apikey)
    environment = {'JOB_SETTINGS': json.dumps(settings)}
    exit_code, logs = _run_cmd_in_docker_container(image_name,
                                                   'shub-image-info',
                                                   environment)
    if exit_code == 0:
        return _extract_metadata_from_image_info_output(logs)
    # shub-image-info command not found, fallback to list-spiders
    elif exit_code == 127:
        # FIXME we should pass some value for SCRAPY_PROJECT_ID anyway
        # to handle `scrapy list` cmd properly via sh_scrapy entrypoint
        # environment['SCRAPY_PROJECT_ID'] = str(project) if project else ''
        exit_code, logs = _run_cmd_in_docker_container(image_name,
                                                       'list-spiders',
                                                       environment)
        if exit_code != 0:
            click.echo(logs)
            raise ShubException('Container with list cmd exited with code %s' %
                                exit_code)
        return {
            'project_type': 'scrapy',
            'spiders': utils.valid_spiders(logs.splitlines()),
        }
    else:
        click.echo(logs)
        raise ShubException(
            'Container with shub-image-info cmd exited with code %s' %
            exit_code)
コード例 #2
0
ファイル: deploy.py プロジェクト: Kryndex/shub
def _handle_deploy_errors(request):
    content = request.json()
    if request.status_code == 400 and content:
        reason = content.get('non_field_errors')
        if reason:
            raise ShubException('\n'.join(reason))
        else:
            raise ShubException(request.content)
    raise
コード例 #3
0
def _upload_egg(endpoint, eggpath, project, version, auth, verbose, keep_log,
                stack=None, requirements_file=None, eggs=None):
    expanded_eggs = []
    for e in (eggs or []):
        # Expand glob patterns, but make sure we don't swallow non-existing
        # eggs that were directly named
        # (glob.glob('non_existing_file') returns [])
        if any(['*' in e, '?' in e, '[' in e and ']' in e]):
            # Never match the main egg
            expanded_eggs.extend(
                [x for x in glob.glob(e)
                 if os.path.abspath(x) != os.path.abspath(eggpath)])
        else:
            expanded_eggs.append(e)

    data = {'project': project, 'version': version}
    if stack:
        data['stack'] = stack

    try:
        files = [('eggs', open(path, 'rb')) for path in expanded_eggs]
        if requirements_file:
            files.append(('requirements', open(requirements_file, 'rb')))
    except IOError as e:
        raise ShubException("%s %s" % (e.strerror, e.filename))
    files.append(('egg', open(eggpath, 'rb')))
    url = _url(endpoint, 'scrapyd/addversion.json')
    click.echo('Deploying to Scrapy Cloud project "%s"' % project)
    return make_deploy_request(url, data, files, auth, verbose, keep_log)
コード例 #4
0
def cli(target_or_key, keys, force):
    # target_or_key contains a target or just another job key
    if "/" in target_or_key:
        keys = (target_or_key,) + keys
        target = "default"
    else:
        target = target_or_key

    targetconf = get_target_conf(target)
    project_id = targetconf.project_id
    client = get_scrapinghub_client_from_config(targetconf)
    project = client.get_project(project_id)

    try:
        job_keys = [validate_job_key(project_id, key) for key in keys]
    except (BadParameterException, SubcommandException) as err:
        click.echo('Error during keys validation: %s' % str(err))
        exit(1)

    if not force:
        jobs_str = ", ".join([str(job) for job in job_keys])
        click.confirm(
            'Do you want to cancel these %s jobs? \n\n%s \n\nconfirm?'
            % (len(job_keys), jobs_str),
            abort=True
        )

    try:
        output = project.jobs.cancel(
            keys=[str(job) for job in job_keys]
        )
    except (ValueError, ScrapinghubAPIError) as err:
        raise ShubException(str(err))

    click.echo(output)
コード例 #5
0
def _run_cmd_in_docker_container(image_name, command, environment):
    """Run a command inside the image container."""
    client = utils.get_docker_client()
    container = client.create_container(
        image=image_name,
        command=[command],
        environment=environment,
    )
    if 'Id' not in container:
        raise ShubException("Create container error:\n %s" % container)
    try:
        client.start(container)
    except docker.errors.APIError as e:
        explanation = utils.ensure_unicode(e.explanation or '')
        if 'executable file not found' in explanation:
            # docker.errors.APIError: 500 Server Error:
            # Internal Server Error ("Cannot start container xxx:
            # [8] System error: exec: "shub-image-info":
            # executable file not found in $PATH")
            return 127, None
        raise
    statuscode = client.wait(container=container['Id'])['StatusCode']
    logs = client.logs(
        container=container['Id'],
        stream=False,
        timestamps=False,
        stdout=True,
        stderr=True if statuscode else False,
    )
    return statuscode, utils.ensure_unicode(logs)
コード例 #6
0
def _get_poetry_requirements():
    try:
        data = toml.load('poetry.lock')
    except IOError:
        raise ShubException('Please make sure the poetry lock file is present')
    # Adapted from poetry 1.0.0a2 poetry/utils/exporter.py
    lines = []
    for package in data['package']:
        source = package.get('source') or {}
        source_type = source.get('type')
        if source_type == 'git':
            line = 'git+{}@{}#egg={}'.format(source['url'],
                                             source['reference'],
                                             package['name'])
        elif source_type in ['directory', 'file']:
            line = ''
            line += source['url']
        else:
            line = '{}=={}'.format(package['name'], package['version'])

            if source_type == 'legacy' and source['url']:
                line += ' \\\n'
                line += '    --index-url {}'.format(source['url'])

        line += '\n'
        lines.append(line)
    return ''.join(lines)
コード例 #7
0
def _get_pipfile_requirements(tmpdir=None):
    try:
        from pipenv.utils import convert_deps_to_pip, prepare_pip_source_args
    except ImportError:
        raise ImportError('You need pipenv installed to deploy with Pipfile')
    try:
        with open('Pipfile.lock') as f:
            pipefile = json.load(f)
            deps = pipefile['default']
            sources_list = prepare_pip_source_args(
                pipefile['_meta']['sources'])
            sources = ' '.join(sources_list)
    except IOError:
        raise ShubException('Please lock your Pipfile before deploying')
    # We must remove any hash from the pipfile before converting to play nice
    # with vcs packages
    for k, v in deps.items():
        if 'hash' in v:
            del v['hash']
        if 'hashes' in v:
            del v['hashes']
        # Scrapy Cloud also doesn't support editable packages
        if 'editable' in v:
            del v['editable']
    return open(
        _add_sources(convert_deps_to_pip(deps),
                     _sources=sources.encode(),
                     tmpdir=tmpdir), 'rb')
コード例 #8
0
ファイル: deploy.py プロジェクト: bopopescu/vinalo
def _upload_egg(endpoint,
                eggpath,
                project,
                version,
                auth,
                verbose,
                keep_log,
                stack=None,
                requirements_file=None,
                eggs=None):
    eggs = eggs or []
    data = {'project': project, 'version': version}
    if stack:
        data['stack'] = stack

    try:
        files = [('eggs', open(path, 'rb')) for path in eggs]
        if requirements_file:
            files.append(('requirements', open(requirements_file, 'rb')))
    except IOError as e:
        raise ShubException("%s %s" % (e.strerror, e.filename))
    files.append(('egg', open(eggpath, 'rb')))
    url = _url(endpoint, 'scrapyd/addversion.json')
    click.echo('Deploying to Scrapy Cloud project "%s"' % project)
    return make_deploy_request(url, data, files, auth, verbose, keep_log)
コード例 #9
0
ファイル: deploy.py プロジェクト: hcoura/shub
def _get_pipfile_requirements():
    try:
        from pipenv.utils import convert_deps_to_pip
    except ImportError:
        raise ImportError('You need pipenv installed to deploy with Pipfile')
    try:
        with open('Pipfile.lock') as f:
            deps = json.load(f)['default']
    except IOError:
        raise ShubException('Please lock your Pipfile before deploying')
    return convert_deps_to_pip(deps)
コード例 #10
0
def deploy_cmd(target, version, username, password, email,
               apikey, insecure, async_):
    config = load_shub_config()
    target_conf = config.get_target_conf(target)
    endpoint, target_apikey = target_conf.endpoint, target_conf.apikey
    image = config.get_image(target)
    version = version or config.get_version()
    image_name = utils.format_image_name(image, version)
    username, password = utils.get_credentials(
        username=username, password=password, insecure=insecure,
        apikey=apikey, target_apikey=target_apikey)

    apikey = apikey or target_apikey
    params = _prepare_deploy_params(
        target_conf.project_id, version, image_name, endpoint, apikey,
        username, password, email)

    click.echo("Deploying {}".format(image_name))
    utils.debug_log('Deploy parameters: {}'.format(params))
    req = requests.post(
        urljoin(endpoint, '/api/releases/deploy.json'),
        data=params,
        auth=(apikey, ''),
        timeout=300,
        allow_redirects=False
    )
    if req.status_code == 400:
        reason = req.json().get('non_field_errors')
        raise ShubException('\n'.join(reason) if reason else req.text)
    req.raise_for_status()
    status_url = req.headers['location']
    status_id = utils.store_status_url(
        status_url, limit=STORE_N_LAST_STATUS_URLS)
    click.echo(
        "You can check deploy results later with "
        "'shub image check --id {}'.".format(status_id))
    if async_:
        return
    if utils.is_verbose():
        deploy_progress_cls = _LoggedDeployProgress
    else:
        deploy_progress_cls = _DeployProgress
    events = _convert_status_requests_to_events(status_url)
    deploy_progress = deploy_progress_cls(events)
    deploy_progress.show()
コード例 #11
0
ファイル: deploy.py プロジェクト: ttilberg/shub
def _get_pipfile_requirements():
    try:
        from pipenv.utils import convert_deps_to_pip
    except ImportError:
        raise ImportError('You need pipenv installed to deploy with Pipfile')
    try:
        with open('Pipfile.lock') as f:
            deps = json.load(f)['default']
    except IOError:
        raise ShubException('Please lock your Pipfile before deploying')
    # We must remove any hash from the pipfile before converting to play nice
    # with vcs packages
    for k, v in deps.items():
        if 'hash' in v:
            del v['hash']
        if 'hashes' in v:
            del v['hashes']
        # Scrapy Cloud also doesn't support editable packages
        if 'editable' in v:
            del v['editable']
    return convert_deps_to_pip(deps)
コード例 #12
0
ファイル: utils.py プロジェクト: Kryndex/shub
def validate_connection_with_docker_daemon(client):
    try:
        client.version()
    except:
        raise ShubException(DOCKER_UNAVAILABLE_MSG)
コード例 #13
0
 def raise_shub_image_info_error(error):
     """Helper to raise ShubException with prefix and output"""
     msg = "shub-image-info: {} \n[output '{}']".format(error, output)
     raise ShubException(msg)