Esempio n. 1
0
def bulk(file, json_output, post, input_format, debug, rule_file, rule_uri,
         merge):
    """
    Scan multiple http(s) endpoints with drheader.

    The default file format is json:

        \b
        [
          {
            "url": "https://example.com",
            "params": {
                "example_parameter_key": "example_parameter_value"
            }
          },
          ...
        ]

    You can also use a txt file for input (using the "-ff txt" option):

        \b
        https://example.com
        https://example.co.uk

    NOTE: URL parameters are currently only supported on bulk scans.
    """
    exit_code = EXIT_CODE_NO_ERROR
    audit = []
    urls = []
    schema = {
        "type": "array",
        "items": {
            "type": "object",
            "properties": {
                "url": {
                    "type": "string",
                    'format': 'uri'
                },
                "params": {
                    "type": "string"
                },
            },
            "required": ['url']
        }
    }

    if debug:
        logging.basicConfig(level=logging.DEBUG)

    if input_format == 'txt':
        urls_temp = list(filter(None, file.read().splitlines()))
        for i in urls_temp:
            urls.append({'url': i})
        for i, v in enumerate(urls):
            logging.debug('Found: {}'.format(v))
            if not validators.url(v['url']):
                raise click.ClickException(
                    message='[line {}] "{}" is not a valid URL.'.format(
                        i + 1, v['url']))
    else:
        try:
            urls = json.loads(file.read())
            jsonschema.validate(instance=urls,
                                schema=schema,
                                format_checker=jsonschema.FormatChecker())
        except Exception as e:
            raise click.ClickException(e)

    logging.debug('Found {} URLs'.format(len(urls)))

    if rule_uri and not rule_file:
        if not validators.url(rule_uri):
            raise click.ClickException(
                message='"{}" is not a valid URL.'.format(rule_uri))
        try:
            rule_file = get_rules_from_uri(rule_uri)
        except Exception as e:
            if debug:
                raise click.ClickException(e)
            else:
                raise click.ClickException(
                    'No content retrieved from rules-uri.')

    rules = load_rules(rule_file, merge)

    for i, v in enumerate(urls):
        logging.debug('Querying: {}...'.format(v))
        drheader_instance = Drheader(url=v['url'],
                                     post=post,
                                     params=v.get('params', None))
        logging.debug('Analysing: {}...'.format(v))
        drheader_instance.analyze(rules)
        audit.append({'url': v['url'], 'report': drheader_instance.report})
        if drheader_instance.report:
            exit_code = EXIT_CODE_FAILURE

    echo_bulk_report(audit, json_output)
    sys.exit(exit_code)
Esempio n. 2
0
def _check_raw_prerequisites():
    if errors := check_raw_prerequisites():
        raise click.ClickException("\n".join(errors))
Esempio n. 3
0
def cmd(
    ctx,
    github_commit,
    channel,
    owner,
    pull_request,
    task_id,
    cache_urls,
    nix_instantiate,
    taskcluster_client_id,
    taskcluster_access_token,
    dry_run,
):
    '''A tool to be ran on each commit.
    '''

    taskcluster_secret = 'repo:github.com/mozilla-releng/services:branch:' + channel
    if pull_request is not None:
        taskcluster_secret = 'repo:github.com/mozilla-releng/services:pull-request'

    taskcluster_queue = cli_common.taskcluster.get_service('queue')
    taskcluster_notify = cli_common.taskcluster.get_service('notify')

    click.echo(' => Retriving taskGroupId ... ', nl=False)
    with click_spinner.spinner():
        task = taskcluster_queue.task(task_id)
        if 'taskGroupId' not in task:
            please_cli.utils.check_result(
                1, 'taskGroupId does not exists in task: {}'.format(
                    json.dumps(task)))
        task_group_id = task['taskGroupId']
        please_cli.utils.check_result(0, '')
        click.echo('    taskGroupId: ' + task_group_id)

    if channel in please_cli.config.DEPLOY_CHANNELS:
        taskcluster_notify.irc(
            dict(
                channel='#release-services',
                message=
                f'New deployment on {channel} is about to start: https://tools.taskcluster.net/groups/{task_group_id}'
            ))

    message = (
        'release-services team is about to release a new version of mozilla/release-services '
        '(*.mozilla-releng.net, *.moz.tools). Any alerts coming up soon will be best directed '
        'to #release-services IRC channel. Automated message (such as this) will be send '
        'once deployment is done. Thank you.')
    '''This message will only be sent when channel is production.
    '''
    if channel is 'production':
        for msgChannel in ['#ci', '#moc']:
            taskcluster_notify.irc(dict(channel=msgChannel, message=message))

    click.echo(' => Checking cache which project needs to be rebuilt')
    build_projects = []
    project_hashes = dict()
    for project in sorted(PROJECTS):
        click.echo('     => ' + project)
        project_exists_in_cache, project_hash = ctx.invoke(
            please_cli.check_cache.cmd,
            project=project,
            cache_urls=cache_urls,
            nix_instantiate=nix_instantiate,
            channel=channel,
            indent=8,
            interactive=False,
        )
        project_hashes[project] = project_hash
        if not project_exists_in_cache:
            build_projects.append(project)

    projects_to_deploy = []

    if channel in please_cli.config.DEPLOY_CHANNELS:
        click.echo(' => Checking which project needs to be redeployed')

        # TODO: get status for our index branch
        deployed_projects = {}

        for project_name in sorted(PROJECTS):
            deployed_projects.get(project_name)

            # update hook for each project
            if please_cli.config.PROJECTS_CONFIG[project_name][
                    'update'] is True:

                if channel == 'production':
                    update_hook_nix_path_atttribute = f'updateHook.{channel}.scheduled'
                else:
                    update_hook_nix_path_atttribute = f'updateHook.{channel}.notScheduled'

                projects_to_deploy.append((
                    project_name,
                    [],
                    'TASKCLUSTER_HOOK',
                    {
                        'enable': True,
                        'docker_registry': 'index.docker.io',
                        'docker_repo': 'mozillareleng/services',
                        'name-suffix': '-update-dependencies',
                        'nix_path_attribute': update_hook_nix_path_atttribute,
                    },
                ))

            if deployed_projects == project_hashes[project_name]:
                continue

            if 'deploys' not in please_cli.config.PROJECTS_CONFIG[
                    project_name]:
                continue

            for deploy in please_cli.config.PROJECTS_CONFIG[project_name][
                    'deploys']:
                for deploy_channel in deploy['options']:
                    if channel == deploy_channel:
                        projects_to_deploy.append((
                            project_name,
                            please_cli.config.PROJECTS_CONFIG[project_name].
                            get('requires', []),
                            deploy['target'],
                            deploy['options'][channel],
                        ))

    click.echo(' => Creating taskcluster tasks definitions')
    tasks = []

    # 1. build tasks
    build_tasks = {}
    for index, project in enumerate(sorted(build_projects)):
        project_uuid = slugid.nice().decode('utf-8')
        required = []
        if pull_request is not None:
            required += [
                'CACHE_BUCKET',
                'CACHE_REGION',
            ]
        secrets = cli_common.taskcluster.get_secrets(
            taskcluster_secret,
            project,
            required=required,
            taskcluster_client_id=taskcluster_client_id,
            taskcluster_access_token=taskcluster_access_token,
        )
        build_tasks[project_uuid] = get_build_task(
            index,
            project,
            task_group_id,
            task_id,
            github_commit,
            owner,
            channel,
            taskcluster_secret,
            pull_request is None and secrets.get('CACHE_BUCKET') or None,
            pull_request is None and secrets.get('CACHE_REGION') or None,
        )
        tasks.append((project_uuid, build_tasks[project_uuid]))

    if projects_to_deploy:

        # 2. maintanance on task
        maintanance_on_uuid = slugid.nice().decode('utf-8')
        if len(build_tasks.keys()) == 0:
            maintanance_on_dependencies = [task_id]
        else:
            maintanance_on_dependencies = [i for i in build_tasks.keys()]
        maintanance_on_task = get_task(
            task_group_id,
            maintanance_on_dependencies,
            github_commit,
            channel,
            taskcluster_secret,
            './please -vv tools maintanance:on ' +
            ' '.join(list(set([i[0] for i in projects_to_deploy]))),
            {
                'name':
                '2. Maintanance ON',
                'description':
                '',
                'owner':
                owner,
                'source':
                'https://github.com/mozilla/release-services/tree/' + channel,
            },
        )
        tasks.append((maintanance_on_uuid, maintanance_on_task))

        # 3. deploy tasks (if on production/staging)
        deploy_tasks = {}
        for index, (project, project_requires, deploy_target, deploy_options) in \
                enumerate(sorted(projects_to_deploy, key=lambda x: x[0])):
            try:
                enable = deploy_options['enable']
            except KeyError:
                raise click.ClickException(
                    f'Missing {enable} in project {project} and channel {channel} deploy options'
                )

            if not enable:
                continue

            project_uuid = slugid.nice().decode('utf-8')
            project_task = get_deploy_task(
                index,
                project,
                project_requires,
                deploy_target,
                deploy_options,
                task_group_id,
                maintanance_on_uuid,
                github_commit,
                owner,
                channel,
                taskcluster_secret,
            )
            if project_task:
                deploy_tasks[project_uuid] = project_task
                tasks.append((project_uuid, deploy_tasks[project_uuid]))

        # 4. maintanance off task
        maintanance_off_uuid = slugid.nice().decode('utf-8')
        maintanance_off_task = get_task(
            task_group_id,
            [i for i in deploy_tasks.keys()],
            github_commit,
            channel,
            taskcluster_secret,
            './please -vv tools maintanance:off ' +
            ' '.join(list(set([i[0] for i in projects_to_deploy]))),
            {
                'name':
                '4. Maintanance OFF',
                'description':
                '',
                'owner':
                owner,
                'source':
                'https://github.com/mozilla/release-services/tree/' + channel,
            },
        )
        maintanance_off_task['requires'] = 'all-resolved'
        tasks.append((maintanance_off_uuid, maintanance_off_task))

    click.echo(' => Submitting taskcluster definitions to taskcluster')
    if dry_run:
        tasks2 = {task_id: task for task_id, task in tasks}
        for task_id, task in tasks:
            click.echo(' => %s [taskId: %s]' %
                       (task['metadata']['name'], task_id))
            click.echo('    dependencies:')
            deps = []
            for dep in task['dependencies']:
                depName = '0. Decision task'
                if dep in tasks2:
                    depName = tasks2[dep]['metadata']['name']
                    deps.append('      - %s [taskId: %s]' % (depName, dep))
            for dep in sorted(deps):
                click.echo(dep)
    else:
        for task_id, task in tasks:
            taskcluster_queue.createTask(task_id, task)
Esempio n. 4
0
def fetch_timeline(
    session,
    url,
    db,
    args=None,
    sleep=1,
    stop_after=None,
    key=None,
    since_id=None,
    since=False,
    since_type=None,
    since_key=None,
):
    # See https://developer.twitter.com/en/docs/tweets/timelines/guides/working-with-timelines
    if since and since_id:
        raise click.ClickException(
            "Use either --since or --since_id, not both")

    since_type_id = None
    last_since_id = None
    if since_type is not None:
        assert since_key is not None
        since_type_id = SINCE_ID_TYPES[since_type]
        # Figure out the last since_id in case we need it
        try:
            last_since_id = db.conn.execute(
                """
                select since_id from since_ids
                where type = ? and key = ?
                """,
                [since_type_id, since_key],
            ).fetchall()[0][0]
        except (IndexError, sqlite3.OperationalError):
            pass

    if since:
        # Load since_id from database
        since_id = last_since_id

    args = dict(args or {})
    args["count"] = 200
    if stop_after is not None:
        args["count"] = stop_after
    if since_id:
        args["since_id"] = since_id
    args["tweet_mode"] = "extended"
    min_seen_id = None
    num_rate_limit_errors = 0
    while True:
        if min_seen_id is not None:
            args["max_id"] = min_seen_id - 1
        response = session.get(url, params=args)
        tweets = response.json()
        if "errors" in tweets:
            # Was it a rate limit error? If so sleep and try again
            if RATE_LIMIT_ERROR_CODE == tweets["errors"][0]["code"]:
                num_rate_limit_errors += 1
                assert num_rate_limit_errors < 5, "More than 5 rate limit errors"
                print("Rate limit exceeded - will sleep 15s and try again {}".
                      format(repr(response.headers)))
                time.sleep(15)
                continue
            else:
                raise Exception(str(tweets["errors"]))
        if key is not None:
            tweets = tweets[key]
        if not tweets:
            break
        for tweet in tweets:
            yield tweet
        min_seen_id = min(t["id"] for t in tweets)
        max_seen_id = max(t["id"] for t in tweets)
        if last_since_id is not None:
            max_seen_id = max((last_since_id, max_seen_id))
            last_since_id = max_seen_id
        if since_type_id is not None and since_key is not None:
            db["since_ids"].insert(
                {
                    "type": since_type_id,
                    "key": since_key,
                    "since_id": max_seen_id,
                },
                replace=True,
            )
        if stop_after is not None:
            break
        time.sleep(sleep)
Esempio n. 5
0
def _ingest_loading(gc, project, composite, dir, ana_key, loading_file,
                    elements, experiment, samples, platemap, technique):
    comp_regex = re.compile('([a-zA-Z]+)\.PM.AtFrac')
    sample_regex = re.compile('.*ana__.*_(.*)_rawlen.txt')

    [file_path] = glob.glob('%s/**/%s' % (dir, loading_file), recursive=True)
    click.echo('Ingesting: %s' % file_path)
    with open(file_path) as f:
        loading = parse_csv(f.read())

    sample_numbers = loading['sample_no']
    run_ints = loading['runint']
    plate_ids = loading['plate_id']
    compositions = {}
    for key, value in loading.items():
        match = comp_regex.match(key)
        if match:
            element = match.group(1).lower()
            if element in elements:
                compositions[element] = value

    for i, (plate_id, sample_number, run_int) in enumerate(zip(plate_ids, sample_numbers, run_ints)):
#       # Only process if we haven't already seen it in another platemap
        click.echo('Ingesting sample %s on plate %s' % (sample_number, int(plate_id)))
        if sample_number not in samples.setdefault(plate_id, {}):
            sample_meta = {}
            # TODO Enable when we have created a run
            sample_meta['runId'] = experiment[int(run_int)]['_id']
            sample_meta['sampleNum'] = sample_number
#
            comp = {}
            sample_meta['composition'] = comp
            for e in compositions.keys():
                comp[e] = compositions[e][i]

            if round(sum(comp.values())) != 1:
                raise click.ClickException('Composite values don\'t add up to 1, for sample: %s' % sample_number)

            scalars = sample_meta.setdefault('scalars', {})
            for s in scalars_to_extract:
                if s in loading:
                    # We need to replace . with the unicode char so we
                    # can store the key in mongo
                    k = s.replace('.', '\\u002e')
                    scalars[k] = loading[s][i]

            sample = gc.post('edp/projects/%s/composites/%s/samples'
                             % (project, composite), json=sample_meta)
            samples.setdefault(plate_id, {})[sample_number] = sample

            # Now look up time series data
            t = technique if technique is not None else '*'
            glob_path = '%s/**/ana__*__Sample%d_*_%s_rawlen.txt' % (dir, sample_number, t)
            sample_files = glob.glob(glob_path, recursive=True)

            timeseries_data = {}
            for sample_file in sample_files:
                match = sample_regex.match(sample_file)
                technique = match.group(1)
                with open(sample_file) as f:
                    timeseries = parse_rawlen(f.read())

                timeseries_data.update(
                    {'%s(%s)' % (key.replace('.', '\\u002e'), technique):value for (key,value) in timeseries.items()}
                )

                # Now look up techinque sample files
                if technique is not None:
                    technique_files = experiment[run_int]['sampleFiles'][technique]
                    prefix = 'Sample%d' % sample_number
                    for file_path in technique_files:
                        name = os.path.basename(file_path)
                        if name.startswith(prefix):
                            with open(file_path, 'rb') as ff:
                                data = ff.read().decode()
                            s = parse_sample(data)
                            for key, value in s.items():
                                if key in scalars_to_extract:
                                    key =  '%s(%s)' % (key.replace('.', '\\u002e'), technique)
                                    timeseries_data[key] = value
            timeseries = {
                'data': timeseries_data
            }
            timeseries = gc.post(
                'edp/projects/%s/composites/%s/samples/%s/timeseries'
                % (project, composite, sample['_id']), json=timeseries)
        else:
            sample = samples.setdefault(plate_id, {}).get(sample_number)

        platemap.setdefault('sampleIds', []).append(sample['_id'])
Esempio n. 6
0
def startsite(
    ctx,
    appname,
    prjname,
    batch,
    dev_repos,
    shared_env,
    db_engine,
    db_port,
    db_host,
    db_user,
    db_password,
):
    """
    Create a new Lino site.

    Two mandatory arguments must be given:

    APPNAME : The application to run on the new site.

    SITENAME : The internal name for the new site. It must be unique for this
    Lino server. We recommend lower-case only and maybe digits but no "-" or
    "_". Examples:  foo, foo2, mysite, first,


    """ # .format(appnames=' '.join(APPNAMES))

    # if len(FOUND_CONFIG_FILES) == 0:
    #     raise click.UsageError(
    #         "This server is not yet configured. Did you run `sudo -H getlino configure`?")

    i = Installer(batch)

    # if os.path.exists(prjpath):
    #     raise click.UsageError("Project directory {} already exists.".format(prjpath))

    web_server = resolve_web_server(DEFAULTSECTION.get('web_server'))
    # prod = DEFAULTSECTION.getboolean('prod')
    # contrib = DEFAULTSECTION.getboolean('contrib')
    sites_base = DEFAULTSECTION.get('sites_base')
    local_prefix = DEFAULTSECTION.get('local_prefix')
    python_path_root = join(sites_base, local_prefix)
    project_dir = join(python_path_root, prjname)
    # shared_env = DEFAULTSECTION.get('shared_env')
    admin_name = DEFAULTSECTION.get('admin_name')
    admin_email = DEFAULTSECTION.get('admin_email')
    server_domain = DEFAULTSECTION.get('server_domain')
    if ifroot() and web_server:
        server_domain = prjname + "." + server_domain
    server_url = ("https://" if DEFAULTSECTION.getboolean('https') else "http://") \
                 + server_domain
    secret_key = secrets.token_urlsafe(20)

    db_engine = resolve_db_engine(db_engine or DEFAULTSECTION.get('db_engine'))

    if db_engine.needs_root and not ifroot():
        raise click.ClickException(
            "You need to be root for doing startsite with {}".format(
                db_engine))

    db_host = db_host or DEFAULTSECTION.get('db_host')
    db_port = db_port or DEFAULTSECTION.get(
        'db_port') or db_engine.default_port

    usergroup = DEFAULTSECTION.get('usergroup')

    app = REPOS_DICT.get(appname, None)
    if app is None:
        raise click.ClickException(
            "Invalid application nickname '{}'".format(appname))
    if not app.settings_module:
        raise click.ClickException(
            "{} is a library, not an application".format(appname))

    front_end = REPOS_DICT.get(DEFAULTSECTION.get('front_end'), None)
    if front_end is None:
        raise click.ClickException(
            "Invalid front_end name '{}''".format(front_end))

    # i.check_usergroup(usergroup)

    if dev_repos:
        for k in dev_repos.split():
            repo = REPOS_DICT.get(k, None)
            if repo is None or not repo.git_repo:
                nicknames = ' '.join(
                    [r.nickname for r in KNOWN_REPOS if r.git_repo])
                raise click.ClickException(
                    "Invalid repository name {}. "
                    "Allowed names are one or more of ({})".format(
                        k, nicknames))

    if not i.check_overwrite(project_dir):
        raise click.Abort()

    # if not i.asroot and not shared_env:
    #     raise click.ClickException(
    #         "Cannot startsite in a development environment without a shared-env!")

    app_package = app.package_name
    # app_package = app.settings_module.split('.')[0]
    repo_nickname = app.git_repo.split('/')[-1]

    context = {}
    context.update(DEFAULTSECTION)
    pip_packages = set()
    if True:  # not shared_env:
        if app.nickname not in dev_repos:
            pip_packages.add(app.package_name)
        if front_end.nickname not in dev_repos:
            pip_packages.add(front_end.package_name)

        # 20190803 not needed:
        # for nickname in ("lino", "xl"):
        #     if nickname not in dev_repos:
        #         pip_packages.add(REPOS_DICT[nickname].package_name)

    for pkgname in db_engine.python_packages.split():
        pip_packages.add(pkgname)

    context.update({
        "prjname":
        prjname,
        "appname":
        appname,
        "project_dir":
        project_dir,
        "repo_nickname":
        repo_nickname,
        "app_package":
        app_package,
        "app_settings_module":
        app.settings_module,
        "django_settings_module":
        "{}.{}.settings".format(local_prefix, prjname),
        "server_domain":
        server_domain,
        "server_url":
        server_url,
        "dev_packages":
        ' '.join([a.nickname for a in KNOWN_REPOS if a.nickname in dev_repos]),
        "pip_packages":
        ' '.join(pip_packages),
        "db_name":
        prjname,
        "python_path":
        sites_base,
        "usergroup":
        usergroup
    })

    click.echo('Create a new Lino {appname} site into {project_dir}'.format(
        **context))

    db_user = DEFAULTSECTION.get('db_user')
    shared_user = False
    if db_user:
        db_password = DEFAULTSECTION.get('db_password')
        shared_user = True
    else:
        db_user = prjname
        db_password = secrets.token_urlsafe(8)
        if not batch:
            if db_engine.name != "sqlite3":
                click.echo(
                    "User credentials (for {db_engine} on {db_host}:{db_port}):"
                    .format(**context))
                db_user = click.prompt("- user name", default=db_user)
                db_password = click.prompt("- user password",
                                           default=db_password)
                db_port = click.prompt("- port", default=db_port)
                db_host = click.prompt("- host name", default=db_host)

    if not batch:
        shared_env = click.prompt("Shared virtualenv", default=shared_env)
        # if asroot:
        #     server_url = click.prompt("Server URL ", default=server_url)
        #     admin_name = click.prompt("Administrator's full name", default=admin_name)
        #     admin_email = click.prompt("Administrator's full name", default=admin_email)
        secret_key = click.prompt("Site's secret key", default=secret_key)

    context.update({
        "db_host": db_host,
        "db_port": db_port,
        "db_user": db_user,
        "db_password": db_password,
        "secret_key": secret_key,
    })

    if not i.yes_or_no(
            "OK to create {} with above options?".format(project_dir)):
        raise click.Abort()

    os.umask(0o002)

    os.makedirs(project_dir, exist_ok=True)
    i.jinja_write(join(project_dir, "settings.py"), **context)
    i.jinja_write(join(project_dir, "manage.py"), **context)
    # pull.sh script is now in the virtualenv's bin folder
    #i.jinja_write(join(project_dir, "pull.sh"), **context)
    if ifroot():
        i.jinja_write(join(project_dir, "make_snapshot.sh"), **context)
        i.make_file_executable(join(project_dir, "make_snapshot.sh"))
        if web_server:
            i.jinja_write(join(project_dir, "wsgi.py"), **context)
            pth = join(project_dir, web_server.name)
            os.makedirs(pth, exist_ok=True)
            if web_server.name == "nginx":
                i.jinja_write(join(pth, "uwsgi.ini"), **context)
                i.jinja_write(join(pth, "uwsgi_params"), **context)

        logdir = join(DEFAULTSECTION.get("log_base"), prjname)
        os.makedirs(logdir, exist_ok=True)
        with i.override_batch(True):
            i.check_permissions(logdir)
            os.symlink(logdir, join(project_dir, 'log'))
            i.write_logrotate_conf('lino-{}.conf'.format(prjname),
                                   join(logdir, "lino.log"))

        backups_base_dir = join(DEFAULTSECTION.get("backups_base"), prjname)
        os.makedirs(backups_base_dir, exist_ok=True)
        with i.override_batch(True):
            i.check_permissions(backups_base_dir)

        fn = 'make_snapshot_{prjname}.sh'.format(**context)
        i.write_daily_cron_job(fn, MAKE_SNAPSHOT_CRON_SH.format(**context))

    if DEFAULTSECTION.getboolean('linod'):
        i.write_file(join(project_dir, 'linod.sh'),
                     LINOD_SH.format(**context),
                     executable=True)
        if ifroot():
            i.write_supervisor_conf('linod_{}.conf'.format(prjname),
                                    LINOD_SUPERVISOR_CONF.format(**context))
            i.must_restart('supervisor')

    os.makedirs(join(project_dir, 'media'), exist_ok=True)

    if shared_env:
        envdir = shared_env
    else:
        envdir = join(project_dir, DEFAULTSECTION.get('env_link'))

    i.check_virtualenv(envdir, context)

    if shared_env:
        os.symlink(envdir, join(project_dir, DEFAULTSECTION.get('env_link')))
        static_root = join(shared_env, 'static_root')
        if not os.path.exists(static_root):
            os.makedirs(static_root, exist_ok=True)

    if dev_repos:
        click.echo("dev_repos is {} --> {}".format(dev_repos,
                                                   dev_repos.split()))
        repos = []
        for nickname in dev_repos.split():
            lib = REPOS_DICT.get(nickname, None)
            if lib is None:
                raise click.ClickException(
                    "Invalid repository nickname {} in --dev-repos".format(
                        nickname))
            repos.append(lib)

        click.echo("Installing {} repositories...".format(len(repos)))
        full_repos_dir = DEFAULTSECTION.get('repos_base')
        if not full_repos_dir:
            full_repos_dir = join(envdir, DEFAULTSECTION.get('repos_link'))
            if not os.path.exists(full_repos_dir):
                os.makedirs(full_repos_dir, exist_ok=True)
        i.check_permissions(full_repos_dir)
        os.chdir(full_repos_dir)
        for lib in repos:
            i.clone_repo(lib)
        for lib in repos:
            i.install_repo(lib, envdir)

    if len(pip_packages):
        click.echo("Installing {} Python packages...".format(
            len(pip_packages)))
        i.run_in_env(
            envdir,
            "pip install -q --upgrade {}".format(' '.join(pip_packages)))

    if ifroot():
        if web_server:
            filename = "{}.conf".format(prjname)
            conf_root = join("/etc/", web_server.service)
            conf_tpl = web_server.name + ".conf"
            avpth = join(conf_root, 'sites-available', filename)
            enpth = join(conf_root, 'sites-enabled', filename)
            # shutil.copyfile(join(project_dir, 'nginx', filename), avpth)
            if i.jinja_write(avpth, conf_tpl, **context):
                if i.override_batch(True):
                    if i.check_overwrite(enpth):
                        os.symlink(avpth, enpth)
            if web_server.name == "nginx":
                i.write_supervisor_conf(
                    '{}-uwsgi.conf'.format(prjname),
                    UWSGI_SUPERVISOR_CONF.format(**context))
                i.must_restart("supervisor")
            i.must_restart(web_server.service)

    os.chdir(project_dir)
    i.run_in_env(envdir, "python manage.py install --noinput")
    if not shared_user:
        db_engine.setup_user(i, context)
    db_engine.setup_database(i, prjname, db_user, db_host)
    i.run_in_env(envdir, "python manage.py migrate --noinput")
    i.run_in_env(envdir, "python manage.py prep --noinput")
    db_engine.after_prep(i, context)
    if ifroot():
        i.run_in_env(envdir, "python manage.py collectstatic --noinput")

    i.run_apt_install()
    i.restart_services()

    if ifroot() and web_server:
        # I imagine that we need to actually restart nginx
        # before running certbot-auto because otherwise certbot would add
        # its entries to the default because it does does not yet see the
        # new site.

        if DEFAULTSECTION.getboolean('https'):
            certbot_cmd = which_certbot()
            if certbot_cmd is None:
                raise click.ClickException("Oops, certbot is not installed.")
            i.runcmd("{} --{} -d {}".format(certbot_cmd, web_server.name,
                                            server_domain))
            i.must_restart(web_server.service)

    click.echo("The new site {} has been created.".format(prjname))
Esempio n. 7
0
 def loop():
     if ticks() - rcv_at > self._timeout:
         raise click.ClickException("Timeout!")
     self._mqclient.loop(0.1)
Esempio n. 8
0
def dump(
    outfile,
    support,
    pretty,
    flat_list,
    include,
    exclude,
    include_type,
    exclude_type,
    filter,
    filter_exclude,
    exclude_tokens,
    device,
):
    """Dump coin data in JSON format

    This file is structured the same as the internal data. That is, top-level object
    is a dict with keys: 'bitcoin', 'eth', 'erc20', 'nem' and 'misc'. Value for each
    key is a list of dicts, each describing a known coin.

    If '--list' is specified, the top-level object is instead a flat list of coins.

    \b
    Fields are category-specific, except for four common ones:
    - 'name' - human-readable name
    - 'shortcut' - currency symbol
    - 'key' - unique identifier, e.g., 'bitcoin:BTC'
    - 'support' - a dict with entries per known device

    To control the size and properties of the resulting file, you can specify whether
    or not you want pretty-printing and whether or not to include support data with
    each coin.

    You can specify which categories and which fields will be included or excluded.
    You cannot specify both include and exclude at the same time. Include is "stronger"
    than exclude, in that _only_ the specified fields are included.

    You can also specify filters, in the form '-f field=value' (or '-F' for inverse
    filter). Filter values are case-insensitive and support shell-style wildcards,
    so '-f name=bit*' finds all coins whose names start with "bit" or "Bit".
    """
    if exclude_tokens:
        exclude_type = ("erc20", )

    if include and exclude:
        raise click.ClickException(
            "You cannot specify --include and --exclude at the same time.")
    if include_type and exclude_type:
        raise click.ClickException(
            "You cannot specify --include-type and --exclude-type at the same time."
        )

    coins = coin_info.coin_info()
    support_info = coin_info.support_info(coins.as_list())

    if support:
        for category in coins.values():
            for coin in category:
                coin["support"] = support_info[coin["key"]]

    # filter types
    if include_type:
        coins_dict = {k: v for k, v in coins.items() if k in include_type}
    else:
        coins_dict = {k: v for k, v in coins.items() if k not in exclude_type}

    # filter individual coins
    include_filters = [f.split("=", maxsplit=1) for f in filter]
    exclude_filters = [f.split("=", maxsplit=1) for f in filter_exclude]

    # always exclude 'address_bytes', not encodable in JSON
    exclude += ("address_bytes", )

    def should_include_coin(coin):
        for field, filter in include_filters:
            filter = filter.lower()
            if field not in coin:
                return False
            if not fnmatch.fnmatch(str(coin[field]).lower(), filter):
                return False
        for field, filter in exclude_filters:
            filter = filter.lower()
            if field not in coin:
                continue
            if fnmatch.fnmatch(str(coin[field]).lower(), filter):
                return False
        if device:
            is_supported = support_info[coin["key"]].get(device, None)
            if not is_supported:
                return False
        return True

    def modify_coin(coin):
        if include:
            return {k: v for k, v in coin.items() if k in include}
        else:
            return {k: v for k, v in coin.items() if k not in exclude}

    for key, coinlist in coins_dict.items():
        coins_dict[key] = [
            modify_coin(c) for c in coinlist if should_include_coin(c)
        ]

    if flat_list:
        output = sum(coins_dict.values(), [])
    else:
        output = coins_dict

    with outfile:
        indent = 4 if pretty else None
        json.dump(output, outfile, indent=indent, sort_keys=True)
        outfile.write("\n")
Esempio n. 9
0
def render(paths, outfile, verbose, bitcoin_only):
    """Generate source code from Mako templates.

    For every "foo.bar.mako" filename passed, runs the template and
    saves the result as "foo.bar". For every directory name passed,
    processes all ".mako" files found in that directory.

    If `-o` is specified, renders a single file into the specified outfile.

    If no arguments are given, processes the current directory.
    """
    if not CAN_RENDER:
        raise click.ClickException("Please install 'mako' and 'munch'")

    if outfile and (len(paths) != 1 or not os.path.isfile(paths[0])):
        raise click.ClickException(
            "Option -o can only be used with single input file")

    # prepare defs
    defs = coin_info.coin_info()
    support_info = coin_info.support_info(defs)

    if bitcoin_only:
        defs["bitcoin"] = [
            x for x in defs["bitcoin"]
            if x["coin_name"] in ("Bitcoin", "Testnet", "Regtest")
        ]

    # munch dicts - make them attribute-accessible
    for key, value in defs.items():
        defs[key] = [Munch(coin) for coin in value]
    for key, value in support_info.items():
        support_info[key] = Munch(value)

    def do_render(src, dst):
        if verbose:
            click.echo("Rendering {} => {}".format(src, dst))
        render_file(src, dst, defs, support_info)

    # single in-out case
    if outfile:
        do_render(paths[0], outfile)
        return

    # find files in directories
    if not paths:
        paths = ["."]

    files = []
    for path in paths:
        if not os.path.exists(path):
            click.echo("Path {} does not exist".format(path))
        elif os.path.isdir(path):
            files += glob.glob(os.path.join(path, "*.mako"))
        else:
            files.append(path)

    # render each file
    for file in files:
        if not file.endswith(".mako"):
            click.echo("File {} does not end with .mako".format(file))
        else:
            target = file[:-len(".mako")]
            with open(target, "w") as dst:
                do_render(file, dst)
Esempio n. 10
0
def user_timeline(
    db_path,
    identifiers,
    attach,
    sql,
    auth,
    ids,
    stop_after,
    user_id,
    screen_name,
    since,
    since_id,
):
    "Save tweets posted by specified user"
    auth = json.load(open(auth))
    session = utils.session_for_auth(auth)
    db = utils.open_database(db_path)
    identifiers = utils.resolve_identifiers(db, identifiers, attach, sql)

    # Backwards compatible support for old --user_id and --screen_name options
    if screen_name:
        if ids:
            raise click.ClickException("Cannot use --screen_name with --ids")
        identifiers.append(screen_name)

    if user_id:
        if not identifiers:
            identifiers = [user_id]
        else:
            if not ids:
                raise click.ClickException("Use --user_id with --ids")
            identifiers.append(user_id)

    # If identifiers is empty, fetch the authenticated user
    fetch_profiles = True
    if not identifiers:
        fetch_profiles = False
        profile = utils.get_profile(db, session, user_id, screen_name)
        identifiers = [profile["screen_name"]]
        ids = False

    format_string = (
        "@{:" + str(max(len(str(identifier)) for identifier in identifiers)) + "}"
    )

    for identifier in identifiers:
        kwargs = {}
        if ids:
            kwargs["user_id"] = identifier
        else:
            kwargs["screen_name"] = identifier
        if fetch_profiles:
            profile = utils.get_profile(db, session, **kwargs)
        else:
            profile = db["users"].get(profile["id"])
        expected_length = profile["statuses_count"]

        if since or since_id:
            expected_length = None

        with click.progressbar(
            utils.fetch_user_timeline(
                session,
                db,
                stop_after=stop_after,
                since_id=since_id,
                since=since,
                **kwargs
            ),
            length=expected_length,
            label=format_string.format(profile["screen_name"]),
            show_pos=True,
        ) as bar:
            # Save them 100 at a time
            chunk = []
            for tweet in bar:
                chunk.append(tweet)
                if len(chunk) >= 100:
                    utils.save_tweets(db, chunk)
                    chunk = []
            if chunk:
                utils.save_tweets(db, chunk)
Esempio n. 11
0
def check(backend, icons, show_duplicates):
    """Validate coin definitions.

    Checks that every btc-like coin is properly filled out, reports duplicate symbols,
    missing or invalid icons, backend responses, and uniform key information --
    i.e., that all coins of the same type have the same fields in their JSON data.

    Uniformity check ignores NEM mosaics and ERC20 tokens, where non-uniformity is
    expected.

    The `--show-duplicates` option can be set to:

    - all: all shortcut collisions are shown, including colliding ERC20 tokens

    - nontoken: only collisions that affect non-ERC20 coins are shown

    - errors: only collisions between non-ERC20 tokens are shown. This is the default,
    as a collision between two or more non-ERC20 tokens is an error.

    In the output, duplicate ERC tokens will be shown in cyan; duplicate non-tokens
    in red. An asterisk (*) next to symbol name means that even though it was detected
    as duplicate, it is still included in results.

    The collision detection checks that SLIP44 numbers don't collide between different
    mainnets (testnet collisions are allowed), that `address_prefix` doesn't collide
    with Bitcoin (other collisions are reported as warnings). `address_prefix_p2sh`
    is also checked but we have a bunch of collisions there and can't do much
    about them, so it's not an error.

    In the collision checks, Bitcoin is shown in red, other mainnets in blue,
    testnets in green and unsupported networks in gray, marked with `(X)` for
    non-colored output.
    """
    if backend and requests is None:
        raise click.ClickException(
            "You must install requests for backend check")

    if icons and not CAN_BUILD_DEFS:
        raise click.ClickException("Missing requirements for icon check")

    defs, buckets = coin_info.coin_info_with_duplicates()
    all_checks_passed = True

    print("Checking BTC-like coins...")
    if not check_btc(defs.bitcoin):
        all_checks_passed = False

    print("Checking Ethereum networks...")
    if not check_eth(defs.eth):
        all_checks_passed = False

    if show_duplicates == "all":
        dup_level = logging.DEBUG
    elif show_duplicates == "nontoken":
        dup_level = logging.INFO
    else:
        dup_level = logging.ERROR
    print("Checking unexpected duplicates...")
    if not check_dups(buckets, dup_level):
        all_checks_passed = False

    nontoken_dups = [
        coin for coin in defs.as_list() if "dup_key_nontoken" in coin
    ]
    if nontoken_dups:
        nontoken_dup_str = ", ".join(
            highlight_key(coin, "red") for coin in nontoken_dups)
        print_log(logging.ERROR,
                  "Non-token duplicate keys: " + nontoken_dup_str)
        all_checks_passed = False

    if icons:
        print("Checking icon files...")
        if not check_icons(defs.bitcoin):
            all_checks_passed = False

    if backend:
        print("Checking backend responses...")
        if not check_backends(defs.bitcoin):
            all_checks_passed = False

    print("Checking segwit fields...")
    if not check_segwit(defs.bitcoin):
        all_checks_passed = False

    print("Checking key uniformity...")
    for cointype, coinlist in defs.items():
        if cointype in ("erc20", "nem"):
            continue
        if not check_key_uniformity(coinlist):
            all_checks_passed = False

    if not all_checks_passed:
        print("Some checks failed.")
        sys.exit(1)
    else:
        print("Everything is OK.")
Esempio n. 12
0
def _get_id_from_foreign_key(api, foreign_id):
    collection = api.get_collection_by_foreign_id(foreign_id)
    if collection is None:
        raise click.ClickException("Collection does not exist.")
    return collection.get('id')
Esempio n. 13
0
def devserver(https, web, ws, worker, assets, beat):
    """
    Run a development server.

    This command will start a development instance of h, consisting of a web
    application, Celery worker, and websocket server. It will also start a
    process which will watch and build the frontend assets.

    By default, the webserver will be accessible at:

        http://localhost:8080

    You can also pass the `--https` flag, which will look for a TLS certificate
    and key in PEM format in the current directory, in files called:

        .tlscert.pem
        .tlskey.pem

    If you use this flag, the webserver will be accessible at:

        https://localhost:8080

    If you wish this to be the default behaviour, you can set the
    USE_HTTPS environment variable.
    """
    try:
        from honcho.manager import Manager
    except ImportError:
        raise click.ClickException(
            "cannot import honcho: did you run `pip install -r requirements-dev.in` yet?"
        )

    os.environ["PYTHONUNBUFFERED"] = "true"
    if https:
        gunicorn_args = "--certfile=.tlscert.pem --keyfile=.tlskey.pem"
        os.environ["APP_URL"] = "https://localhost:8080"
        os.environ["WEBSOCKET_URL"] = "wss://localhost:5001/ws"
    else:
        gunicorn_args = ""
        os.environ["APP_URL"] = "http://localhost:8080"
        os.environ["WEBSOCKET_URL"] = "ws://localhost:5001/ws"

    m = Manager()
    if web:
        m.add_process(
            "web",
            "newrelic-admin run-program gunicorn --name web --reload --paste conf/development-app.ini %s"
            % gunicorn_args,
        )

    if ws:
        m.add_process(
            "ws",
            "newrelic-admin run-program gunicorn --name websocket --reload --paste conf/development-websocket.ini %s"
            % gunicorn_args,
        )

    if worker:
        m.add_process("worker", "hypothesis --dev celery worker -l INFO")

    if beat:
        m.add_process("beat", "hypothesis --dev celery beat")

    if assets:
        m.add_process("assets", "gulp watch")

    m.loop()

    sys.exit(m.returncode)
Esempio n. 14
0
def compare(file, json_output, debug, rule_file, rule_uri, merge):
    """
    If you have headers you would like to test with drheader, you can "compare" them with your ruleset this command.

    This command requires a valid json file as input.

    Example:

        \b
        [
            {
                "url": "https://test.com",
                "headers": {
                    "X-XSS-Protection": "1; mode=block",
                    "Content-Security-Policy": "default-src 'none'; script-src 'self' unsafe-inline; object-src 'self';"
                    "Strict-Transport-Security": "max-age=31536000; includeSubDomains",
                    "X-Frame-Options": "SAMEORIGIN",
                    "X-Content-Type-Options": "nosniff",
                    "Referrer-Policy": "strict-origin",
                    "Cache-Control": "no-cache, no-store, must-revalidate",
                    "Pragma": "no-cache",
                    "Set-Cookie": ["HttpOnly; Secure"]
                },
                "status_code": 200
            },
            ...
        ]
    """
    exit_code = EXIT_CODE_NO_ERROR
    audit = []
    schema = {
        "type": "array",
        "items": {
            "type": "object",
            "properties": {
                "url": {
                    "type": "string",
                    'format': 'uri'
                },
                "headers": {
                    "type": "object"
                },
                "status_code": {
                    "type": "integer"
                }
            },
            "required": ['headers', 'url']
        }
    }

    if debug:
        logging.basicConfig(level=logging.DEBUG)

    try:
        data = json.loads(file.read())
        jsonschema.validate(instance=data,
                            schema=schema,
                            format_checker=jsonschema.FormatChecker())
        logging.debug('Found {} URLs'.format(len(data)))
    except Exception as e:
        raise click.ClickException(e)

    if rule_uri and not rule_file:
        if not validators.url(rule_uri):
            raise click.ClickException(
                message='"{}" is not a valid URL.'.format(rule_uri))
        try:
            rule_file = get_rules_from_uri(rule_uri)
        except Exception as e:
            if debug:
                raise click.ClickException(e)
            else:
                raise click.ClickException(
                    'No content retrieved from rules-uri.')

    rules = load_rules(rule_file, merge)

    for i in data:
        logging.debug('Analysing : {}'.format(i['url']))
        drheader_instance = Drheader(url=i['url'], headers=i['headers'])
        drheader_instance.analyze(rules)
        audit.append({'url': i['url'], 'report': drheader_instance.report})
        if drheader_instance.report:
            exit_code = EXIT_CODE_FAILURE

    echo_bulk_report(audit, json_output)
    sys.exit(exit_code)
Esempio n. 15
0
def deploy_cmd(ctx, chain_name, wait_for_sync, contracts_to_deploy):
    """
    Deploys the specified contracts to a chain.
    """
    project = ctx.obj['PROJECT']
    logger = logging.getLogger('populus.cli.deploy')

    # Determine which chain should be used.
    if not chain_name:
        chain_name = select_chain(project)

    contract_data = project.compiled_contract_data

    if contracts_to_deploy:
        # validate that we *know* about all of the contracts
        unknown_contracts = set(contracts_to_deploy).difference(
            contract_data.keys())
        if unknown_contracts:
            unknown_contracts_message = (
                "Some contracts specified for deploy were not found in the "
                "compiled project contracts.  These contracts could not be found "
                "'{0}'.  Searched these known contracts '{1}'".format(
                    ', '.join(sorted(unknown_contracts)),
                    ', '.join(sorted(contract_data.keys())),
                ))
            raise click.ClickException(unknown_contracts_message)
    else:
        # prompt the user to select the desired contracts they want to deploy.
        # Potentially display the currently deployed status.
        contracts_to_deploy = [select_project_contract(project)]

    with project.get_chain(chain_name) as chain:
        provider = chain.provider
        registrar = chain.registrar

        # wait for the chain to start syncing.
        if wait_for_sync:
            logger.info("Waiting for chain to start syncing....")
            while chain.wait.for_syncing() and is_synced(chain.web3):
                sleep(1)
            logger.info("Chain sync complete")

        # Get the deploy order.
        deploy_order = get_deploy_order(
            contracts_to_deploy,
            contract_data,
        )

        # Display Start Message Info.
        starting_msg = (
            "Beginning contract deployment.  Deploying {0} total contracts ({1} "
            "Specified, {2} because of library dependencies)."
            "\n\n" + (" > ".join(deploy_order.keys()))).format(
                len(deploy_order),
                len(contracts_to_deploy),
                len(deploy_order) - len(contracts_to_deploy),
            )
        logger.info(starting_msg)

        for contract_name, _ in deploy_order.items():
            if not provider.are_contract_dependencies_available(contract_name):
                raise ValueError(
                    "Something is wrong with the deploy order.  Some "
                    "dependencies for {0} are not "
                    "available.".format(contract_name))

            # Check if we already have an existing deployed version of that
            # contract (via the registry).  For each of these, prompt the user
            # if they would like to use the existing version.
            if provider.is_contract_available(contract_name):
                # TODO: this block should be a standalone cli util.
                # TODO: this block needs to use the `Provider` API
                existing_contract_instance = provider.get_contract(
                    contract_name)
                found_existing_contract_prompt = (
                    "Found existing version of {name} in registrar. "
                    "Would you like to use the previously deployed "
                    "contract @ {address}?".format(
                        name=contract_name,
                        address=existing_contract_instance.address,
                    ))
                if click.prompt(found_existing_contract_prompt, default=True):
                    continue

            # We don't have an existing version of this contract available so
            # deploy it.
            contract_instance = deploy_contract_and_verify(
                chain,
                contract_name=contract_name,
            )

            # Store the contract address for linking of subsequent deployed contracts.
            registrar.set_contract_address(contract_name,
                                           contract_instance.address)

        # TODO: fix this message.
        success_msg = ("Deployment Successful.")
        logger.info(success_msg)
Esempio n. 16
0
def main(upload_listing: str) -> None:
    os.chdir(INCOMING_DIR)
    with open(upload_listing) as upload_listing_file:
        uploads = upload_listing_file.read().splitlines()
    os.unlink(upload_listing)
    region = os.environ.get("AWS_REGION", "us-east-2")
    session = boto3.session.Session(region_name=region)
    s3 = session.resource("s3")
    bucket = s3.Bucket("edgedb-packages")
    pkg_directories = set()
    for path_str in uploads:
        path = pathlib.Path(path_str)
        if not path.is_file():
            print("File not found:", path)
            continue

        print("Looking at", path)
        # macos-x86_64/edgedb-1-alpha6-dev5081_1.0a6.dev5081+ga0106974_2020092316~nightly.pkg
        try:
            dist = path.parent  # macos-x86_64
            dist_base = arch = ""
            if "-" in str(dist):
                dist_base, arch = str(dist).split("-", 1)
            leaf = path.name
            m = PACKAGE_RE.match(leaf)
            if not m:
                raise click.ClickException(
                    f"Cannot parse artifact filename: {path_str}")
            basename = m.group("basename")
            slot = m.group("slot") or ""
            subdist = m.group("release")
            subdist = re.sub(r"[0-9]+", "", subdist)
            subdist = subdist.replace("~", "_")
            pkg_dir = str(dist) + subdist.replace("_", ".")
            pkg_directories.add(pkg_dir)
            ext = m.group("ext")
            print(f"dist={dist} leaf={leaf}")
            print(f"basename={basename} slot={slot}")
            print(f"subdist={subdist} pkg_dir={pkg_dir}")
            print(f"ext={ext}")
            with tempfile.TemporaryDirectory(prefix="genrepo",
                                             dir=LOCAL_DIR) as temp_dir:
                staging_dir = pathlib.Path(temp_dir) / pkg_dir
                os.makedirs(staging_dir)
                shutil.copy(path_str, staging_dir)
                asc_path = gpg_detach_sign(staging_dir / leaf)
                sha256_path = sha256(staging_dir / leaf)

                archive_dir = ARCHIVE / pkg_dir
                put(bucket, staging_dir / leaf, archive_dir, cache=True)
                put(bucket, asc_path, archive_dir, cache=True)
                put(bucket, sha256_path, archive_dir, cache=True)

                target_dir = DIST / pkg_dir
                dist_name = f"{basename}{slot}_latest{subdist}{ext}"
                put(bucket, staging_dir / leaf, target_dir, name=dist_name)
                put(bucket, asc_path, target_dir, name=dist_name + ".asc")
                put(bucket,
                    sha256_path,
                    target_dir,
                    name=dist_name + ".sha256")
        finally:
            os.unlink(path)
        print(path)

    for pkg_dir in pkg_directories:
        remove_old(bucket, ARCHIVE / pkg_dir, keep=3, subdist="nightly")
        make_index(bucket, ARCHIVE, pkg_dir)
Esempio n. 17
0
def check(ret, message, expected=0):
    if ret == expected:
        return
    if isinstance(expected, list) and ret in expected:
        return
    raise click.ClickException(message)
Esempio n. 18
0
def configure(ctx, py, yaml, skip_backend_validation=False):
    """
    Given the two different config files, set up the environment.

    NOTE: Will only execute once, so it's safe to call multiple times.
    """
    global __installed
    if __installed:
        return

    # Make sure that our warnings are always displayed
    import warnings
    warnings.filterwarnings('default', '', Warning, r'^sentry')

    # Add in additional mimetypes that are useful for our static files
    # which aren't common in default system registries
    import mimetypes
    for type, ext in (
        ('application/json', 'map'),
        ('application/font-woff', 'woff'),
        ('application/font-woff2', 'woff2'),
        ('application/vnd.ms-fontobject', 'eot'),
        ('application/x-font-ttf', 'ttf'),
        ('application/x-font-ttf', 'ttc'),
        ('font/opentype', 'otf'),
    ):
        mimetypes.add_type(type, '.' + ext)

    from .importer import install

    if yaml is None:
        # `yaml` will be None when SENTRY_CONF is pointed
        # directly to a file, in which case, this file must exist
        if not os.path.exists(py):
            if ctx:
                raise click.ClickException(
                    "Configuration file does not exist. Use 'sentry init' to initialize the file."
                )
            raise ValueError("Configuration file does not exist at '%s'" %
                             click.format_filename(py))
    elif not os.path.exists(yaml) and not os.path.exists(py):
        if ctx:
            raise click.ClickException(
                "Configuration file does not exist. Use 'sentry init' to initialize the file."
            )
        raise ValueError("Configuration file does not exist at '%s'" %
                         click.format_filename(yaml))

    # Add autoreload for config.yml file if needed
    if 'UWSGI_PY_AUTORELOAD' in os.environ:
        if yaml is not None and os.path.exists(yaml):
            try:
                import uwsgi
                from uwsgidecorators import filemon
            except ImportError:
                pass
            else:
                filemon(yaml)(uwsgi.reload)

    os.environ['DJANGO_SETTINGS_MODULE'] = 'sentry_config'

    install('sentry_config', py, DEFAULT_SETTINGS_MODULE)

    # HACK: we need to force access of django.conf.settings to
    # ensure we don't hit any import-driven recursive behavior
    from django.conf import settings
    hasattr(settings, 'INSTALLED_APPS')

    from .initializer import initialize_app, on_configure
    initialize_app({
        'config_path': py,
        'settings': settings,
        'options': yaml,
    },
                   skip_backend_validation=skip_backend_validation)
    on_configure({'settings': settings})

    __installed = True
Esempio n. 19
0
'''

with open(os.path.join(os.path.dirname(__file__), 'VERSION')) as f:
    VERSION = f.read().strip()

ROOT_DIR = None
_folders = []
for item in reversed(CWD_DIR.split(os.sep)):
    item_dir = '/' + CWD_DIR[:CWD_DIR.find(item) + len(item)][1:]
    _folders.append(item_dir)
    if os.path.isfile(os.path.join(item_dir, 'please')):
        ROOT_DIR = item_dir
        break

if ROOT_DIR is None:
    raise click.ClickException(NO_ROOT_DIR_ERROR % '\n - '.join(_folders))

CACHE_URLS = [
    'https://cache.mozilla-releng.net',
]

SRC_DIR = os.path.join(ROOT_DIR, 'src')
TMP_DIR = os.path.join(ROOT_DIR, 'tmp')

CHANNELS = ['master', 'testing', 'staging', 'production']
DEPLOY_CHANNELS = ['testing', 'staging', 'production']

DOCKER_BASE_REGISTRY = 'index.docker.io'
DOCKER_BASE_REPO = 'mozillareleng/services'
DOCKER_BASE_TAG = 'base-' + VERSION
Esempio n. 20
0
def list_(config):
    '''
    List migrations.

    This shows migration metadata: migrations that have been applied (and the
    result of that application) and migrations that are pending.

        * bootstrapped: a migration that was inserted during the bootstrap
          process.
        * failed: the migration did not apply cleanly; the migrations system
          will not be able to operate until this is rectified, typically by
          restoring from a backup.
        * pending: the migration has not been applied yet.
        * succeeded: the migration applied cleanly.

    Applied migrations are ordered by the "started_at" timestamp. Pending
    migrations follow applied migrations and are sorted in the same order that
    they would be applied.
    '''

    with _get_db_cursor(config) as (db, cursor):
        try:
            applied, pending = _get_all_migrations(config, cursor)
            migrations = applied + pending

            if len(migrations) == 0:
                raise click.ClickException('No migrations exist.')

            column_names = 'Name', 'Status', 'Started At', 'Completed At'
            max_name = max([len(m.name) for m in migrations])
            max_status = max([len(m.status.name) for m in migrations])
            row_format = '{{:<{}}} | {{:{}}} | {{:<19}} | {{:<19}}'
            name_col_width = max(max_name, len(column_names[1]))
            status_col_width = max(max_status, len(column_names[2]))
            row = row_format.format(name_col_width, status_col_width)
            date_format = '%Y-%m-%d %H:%I:%S'

            click.echo(row.format(*column_names))
            click.echo(
                '-' * (name_col_width + 1) + '+' +
                '-' * (status_col_width + 2) + '+' +
                '-' * 21 + '+' +
                '-' * 20
            )

            for migration in migrations:
                if migration.started_at is None:
                    started_at = 'N/A'
                else:
                    started_at = migration.started_at.strftime(date_format)

                if migration.completed_at is None:
                    completed_at = 'N/A'
                elif isinstance(migration.completed_at, datetime):
                    completed_at = migration.completed_at.strftime(date_format)

                msg = row.format(
                    migration.name,
                    migration.status.name,
                    started_at,
                    completed_at
                )

                if migration.status == MigrationStatus.bootstrapped:
                    click.echo(msg)
                elif migration.status == MigrationStatus.failed:
                    click.secho(msg, fg='red')
                elif migration.status == MigrationStatus.pending:
                    click.echo(msg)
                elif migration.status == MigrationStatus.succeeded:
                    click.secho(msg, fg='green')
                else:
                    msg = 'Invalid migration status: "{}".'
                    raise ValueError(msg.format(migration.status.name))

        except Exception as e:
            if config.debug:
                raise
            msg = 'Cannot list migrations: {}'
            raise click.ClickException(msg.format(e))
Esempio n. 21
0
    def perform(self, cmd, msg, tail=None):
        t0 = ticks()  # times the entire command
        rcv_at = ticks()  # last received message for timeout purposes
        got_error = None  # flag to signal the end, False->OK; True->abort with raise
        sz = 0
        next_seq = 0  # next expected sequence number
        ack = -1  # ack for flow-control
        subscribed = False  # flag to pop out of loop waiting for subscription
        output = b""  # output ultimately returned from perform

        # generate an ID we can use for the MQTT topics to match replies
        self._topic_id = MQTT._gen_id(6)

        def on_reply(cli, ud, msg):
            nonlocal sz, next_seq, ack, rcv_at, output, got_error
            self.debug(
                f"Received reply on topic '{msg.topic}' with QoS {msg.qos}")
            # parse message header
            if len(msg.payload) < 2:
                return
            seq = ((msg.payload[0] & 0x7F) << 8) | msg.payload[1]
            last = (msg.payload[0] & 0x80) != 0
            # check sequence number
            if seq < next_seq:
                self.debug(
                    f"Duplicate message, expected seq={next_seq}, got {seq}")
                return
            if seq > next_seq:
                raise click.ClickException(
                    f"Missing message(s), expected seq={next_seq}, got {seq}")
            # handle ACK for long streams (a bit of a hack!)
            if len(msg.payload) - 2 < 10 and msg.payload[2:].startswith(
                    b"SEQ "):
                try:
                    s = int(msg.payload[6:])
                    if s > ack:
                        ack = s
                        print(".", end="")
                        return
                except ValueError:
                    raise click.ClickException("Bad ACK received")
            sz += len(msg.payload) - 2
            output += msg.payload[2:]
            rcv_at = ticks()
            if last:
                dt = ticks() - t0
                self.debug("{:.3f}kB in {:.3f}s -> {:.3f}kB/s".format(
                    sz / 1024, dt, sz / 1024 / dt))
                got_error = False

        def on_error(cli, ud, message):
            nonlocal got_error
            click.echo(message.payload.strip(), err=True)
            got_error = True

        def on_sub(client, userdata, mid, granted_qos):
            nonlocal subscribed
            subscribed = True

        def loop():
            if ticks() - rcv_at > self._timeout:
                raise click.ClickException("Timeout!")
            self._mqclient.loop(0.1)

        # first connect
        self.connect()

        # subscribe to the response topics
        reply_topic = self._mktopic("reply/out")
        err_topic = self._mktopic("reply/err")
        self._mqclient.message_callback_add(reply_topic, on_reply)
        self._mqclient.message_callback_add(err_topic, on_error)
        self._mqclient.on_subscribe = on_sub
        (res, sub_mid) = self._mqclient.subscribe([(reply_topic, 1),
                                                   (err_topic, 1)])
        self.debug(f"Subscribing to {reply_topic} and {err_topic}")
        if res != paho.MQTT_ERR_SUCCESS:
            raise click.ClickException("Subscribe failed")
        while not subscribed:
            loop()

        # iterate through content and send one buffer at a time
        seq = 0
        if isinstance(msg, str):
            msg = msg.encode()
        buf = bytearray(BUFLEN + 2)
        cmd_topic = self._mktopic(cmd, tail=tail)
        flowctrl = len(msg) > 100 * 1024  # hack
        while len(msg) > 0 and got_error is None:
            # make sure we're not more than 16 messages ahead of flow-control ACKs
            while flowctrl and seq - ack > 16 and got_error is None:
                loop()
            # construct outgoing message with 2-byte header (last flag and seq number)
            buf[2:] = msg[:BUFLEN]
            msg = msg[BUFLEN:]
            last = len(msg) == 0
            struct.pack_into("!H", buf, 0, last << 15 | seq)
            # publish
            sz += len(buf)
            self.debug(f"Pub {cmd_topic} #{seq} last={last} len={len(buf)}")
            self._mqclient.publish(cmd_topic, buf, qos=1)
            seq += 1
            loop()
        # self.debug("done publishing")

        # wait for replies
        while got_error is None:
            loop()

        # wrap up
        self._mqclient.unsubscribe(reply_topic)
        self._mqclient.unsubscribe(err_topic)
        if got_error:
            raise click.Abort()
        return output
Esempio n. 22
0
def migrate(config, backup):
    ''' Run pending migrations. '''

    # Get a list of pending migrations.
    with _get_db_cursor(config) as (db, cursor):
        try:
            failed_migrations = config.backend.has_failed_migrations(cursor)
        except Exception as e:
            msg = 'Unable to start migrations: {}'
            raise click.ClickException(msg.format(e))

        if failed_migrations:
            msg = 'Cannot run due to previously failed migrations.'
            raise click.ClickException(click.style(msg, fg='red'))

        _, pending = _get_all_migrations(config, cursor)
        total = len(pending)

        if total == 0:
            raise click.ClickException(
                click.style('There are no pending migrations.', fg='red')
            )

    # Make a backup file [optional].
    if backup:
        backup_file = tempfile.NamedTemporaryFile('w', delete=False)
        msg = 'Backing up {} to "{}".'
        click.echo(msg.format(config.backend.location, backup_file.name))
        _wait_for(config.backend.backup_db(backup_file))
        backup_file.close()

    # Run migrations.
    with _get_db_cursor(config) as (db, cursor):
        msg = 'About to run {} migration{} in {}:'
        click.echo(
            msg.format(total, 's' if total > 1 else '', config.backend.location)
        )

        try:
            _run_migrations(config, cursor, pending)
        except Exception as e:
            click.secho('Migration failed because:', fg='red')
            click.echo(str(e))

            if backup:
                click.secho('Will try to restore from backup…', fg='red')
                config.backend.clear_db(cursor)
                db.close()

                try:
                    with open(backup_file.name, 'r') as backup_handle:
                        _wait_for(config.backend.restore_db(backup_handle))
                    click.secho('Restored from backup.', fg='green')
                except Exception as e2:
                    raise e2
                    msg = 'Could not restore from backup: {}'.format(e2)
                    click.secho(msg, fg='red', bold=True)

            if config.debug:
                raise

            raise click.Abort()

        click.secho('Migrations completed successfully.', fg='green')

    # Remove backup file.
    if backup:
        click.echo('Removing backup "{}".'.format(backup_file.name))
        os.unlink(backup_file.name)
Esempio n. 23
0
def rpg(pass_length: int, number: int, output: click.File,
        exclude_charsets: str, no_safe: bool, verbose: bool) -> None:
    """
    Generate random, entropic, complex and safe password.
    \f

    :param int pass_length: desire password length
    :param int number: number of password to generate. Default is 1
    :param click.File output: output file
    :param str exclude_charsets: comma-separated charsets to exclude. Default is None
    :param bool no_safe: do not check password in Have I Been Pwned db. Default is False
    :param bool verbose: print verbose output. Default is False
    :return: None
    """
    # Check pass_length validity
    msg.Prints.verbose(f"Checking <pass-length> ({pass_length}) validity",
                       verbose)
    if pass_length > _max_pass_length or pass_length < _min_pass_length:
        raise click.BadArgumentUsage(
            msg.Echoes.error((
                f"Invalid value for \"<pass-length>\": {pass_length} "
                f"is not in the valid range of {_min_pass_length} to {_max_pass_length}."
            )))

    # Check number validity
    msg.Prints.verbose(f"Checking <pass-number> ({number}) validity", verbose)
    if number > _max_pass_number:
        raise click.BadOptionUsage(
            "number",
            msg.Echoes.error(
                "Invalid value for \"<pass-number>\": the maximum value accepted is 50."
            ))

    # Load charsets and check the validity
    msg.Prints.verbose("Loading charsets", verbose)
    chars = _get_char_list(exclude_charsets)
    msg.Prints.verbose("Charsets loaded\nChecking charsets validity", verbose)

    # Check at least one charsets type has been selected
    if not chars:
        raise click.BadOptionUsage(
            "--exclude-charsets",
            msg.Echoes.error(
                "RPG needs at least one charsets type to generate password."))
    else:
        if not len(chars) == len(string.ascii_lowercase +
                                 string.ascii_uppercase + string.digits +
                                 string.punctuation):
            # User chose to not use any charsets, print warning message
            msg.Prints.warning(
                "You are going to generate passwords without one or more of default charsets!"
            )
            msg.Prints.warning(
                "RPG cares a lot for your security, it's recommended to avoid this practice if possible!\n"
            )

    # Check if --no-safe option is in use, if so, print a warning message
    if no_safe:
        msg.Prints.warning(
            "You are going to generate passwords without checking if they have been already leaked!"
        )
        msg.Prints.warning(
            "RPG cares a lot for your security, it's recommended to avoid this practice if possible!\n"
        )

    msg.Prints.verbose("Start to generate passwords", verbose)

    # Print loading
    pw_list = []
    with click.progressbar(length=number,
                           label="Generating passwords",
                           show_pos=True) as pw_bar:
        for _ in pw_bar:
            pw = _generate_random_password(pass_length, chars, no_safe)
            if pw is not None:
                pw_list.append(pw)
            else:
                raise click.ClickException(
                    msg.Echoes.error(
                        "An error occurred while querying Have I Been Pwned API. Please retry or use --no-safe option"
                    ))

    # Print generated passwords
    if output:
        output.write("Passwords:\n")
    else:
        msg.Prints.emphasis("Passwords:")

    for pw in pw_list:
        if output:
            output.write(f"{pw}\n")
        else:
            msg.Prints.info(pw)

    # Calculate entropy and print it
    entropy = _get_entropy(pass_length, chars)
    if output:
        output.write(f"\nEntropy: {entropy}")
    else:
        msg.Prints.emphasis(
            f"\nThe entropy of generated password is: {entropy}")

    # Print summary table, only if --verbose or --output
    if output:
        output.write(f"\n{_password_entropy_table}")
    else:
        msg.Prints.verbose(_password_entropy_table, verbose)
Esempio n. 24
0
def test(config, yes, current, target):
    '''
    Test pending migrations.

    Given two snapshots, one of your "current" state and one of your "target"
    state, this command verifies: current + migrations = target.

    If you have a schema build system, this command is useful for verifying that
    your new migrations will produce the exact same schema as the build system.

    Note: you may find it useful to set up a database for testing separate from
    the one that you use for development; this allows you to test repeatedly
    without disrupting your development work.
    '''

    # Create a temporary file for holding the migrated schema.
    temp_snapshot = tempfile.TemporaryFile('w+')

    # Make sure the user understands what is about to happen.
    warning = (
        'WARNING: This will drop all objects in {}!'
        .format(config.backend.location)
    )

    click.echo(click.style(warning, fg='red'))
    confirmation = 'Are you 100% positive that you want to do this?'

    if not (yes or click.confirm(confirmation)):
        raise click.Abort()

    with _get_db_cursor(config) as (db, cursor):
        # Load the current schema.
        click.echo('Dropping {}.'.format(config.backend.location))
        config.backend.clear_db(cursor)

    click.echo('Loading current snapshot "{}".'.format(current.name))
    _wait_for(config.backend.restore_db(current))

    with _get_db_cursor(config) as (db, cursor):
        # Run migrations on current schema.
        _, pending = _get_all_migrations(config, cursor)
        total = len(pending)
        click.echo(
            'About to run {} migration{} in {}:'
            .format(total, 's' if total > 1 else '', config.backend.location)
        )

        try:
            _run_migrations(config, cursor, pending)
        except Exception as e:
            click.secho('Migration failed because:', fg='red')
            click.echo(str(e))
            raise click.Abort()

        click.echo('Finished migrations.')

    # Dump the migrated schema to the temp file.
    click.echo('Snapshotting the migrated database.')
    _wait_for(config.backend.snapshot_db(temp_snapshot))
    _migration_insert_sql(config, temp_snapshot)

    # Compare the migrated schema to the target schema.
    click.echo('Comparing migrated schema to target schema.')
    temp_snapshot.seek(0)

    ignore = 'INSERT INTO agnostic_migrations'
    migrated = [line for line in temp_snapshot if not line.startswith(ignore)]
    targeted = [line for line in target if not line.startswith(ignore)]

    diff = list(difflib.unified_diff(
        migrated,
        targeted,
        fromfile='Migrated Schema',
        tofile='Target Schema'
    ))

    if len(diff) == 0:
        click.secho(
            'Test passed: migrated schema matches target schema!',
            fg='green'
        )
    else:
        click.secho(
            'Test failed: migrated schema differs from target schema.\n',
            fg='red'
        )
        click.echo(''.join(diff))
        raise click.ClickException('Test failed. See diff output above.')
Esempio n. 25
0
def check_prerequisites():
    if errors := pipeline.check_prerequisites():
        raise click.ClickException("\n".join(errors))
Esempio n. 26
0
def train(ctx, config, classifier_config, model, n_fold, seed, plot,
          diagnostics, overwrite):
    """
    Train a classifier from ``scikit-learn`` on YATSM output and save result to
    file <model>. Dataset configuration is specified by <yatsm_config> and
    classifier and classifier parameters are specified by <classifier_config>.
    """
    # Setup
    if not model.endswith('.pkl'):
        model += '.pkl'
    if os.path.isfile(model) and not overwrite:
        raise click.ClickException('<model> exists and --overwrite was not '
                                   'specified')

    if seed:
        np.random.seed(seed)

    # Parse config & algorithm config
    cfg = parse_config_file(config)
    algo, algo_cfg = cfg_to_algorithm(classifier_config)

    training_image = cfg['classification']['training_image']
    if not training_image or not os.path.isfile(training_image):
        raise click.ClickException(
            'Training data image {} does not exist'.format(training_image))

    # Find information from results -- e.g., design info
    attrs = find_result_attributes(cfg)
    cfg['YATSM'].update(attrs)

    # Cache file for training data
    has_cache = False
    training_cache = cfg['classification']['cache_training']
    if training_cache:
        # If doesn't exist, retrieve it
        if not os.path.isfile(training_cache):
            logger.info('Could not retrieve cache file for Xy')
            logger.info('    file: %s' % training_cache)
        else:
            logger.info('Restoring X/y from cache file')
            has_cache = True

    training_image = cfg['classification']['training_image']
    # Check if we need to regenerate the cache file because training data is
    #   newer than the cache
    regenerate_cache = is_cache_old(training_cache, training_image)
    if regenerate_cache:
        logger.warning('Existing cache file older than training data ROI')
        logger.warning('Regenerating cache file')

    if not has_cache or regenerate_cache:
        logger.debug('Reading in X/y')
        X, y, row, col, labels = get_training_inputs(cfg)
        logger.debug('Done reading in X/y')
    else:
        logger.debug('Reading in X/y from cache file %s' % training_cache)
        with np.load(training_cache) as f:
            X = f['X']
            y = f['y']
            row = f['row']
            col = f['col']
            labels = f['labels']
        logger.debug('Read in X/y from cache file %s' % training_cache)

    # If cache didn't exist but is specified, create it for first time
    if not has_cache and training_cache:
        logger.info('Saving X/y to cache file %s' % training_cache)
        try:
            np.savez(training_cache, X=X, y=y, row=row, col=col, labels=labels)
        except Exception as e:
            raise click.ClickException(
                'Could not save X/y to cache file ({})'.format(e))

    # Do modeling
    logger.info('Training classifier')
    algo.fit(X, y, **algo_cfg.get('fit', {}))

    # Serialize algorithm to file
    logger.info('Pickling classifier with sklearn.externals.joblib')
    joblib.dump(algo, model, compress=3)

    # Diagnostics
    if diagnostics:
        algo_diagnostics(cfg, X, y, row, col, algo, n_fold, plot)
Esempio n. 27
0
def verify(ctx):
    if patch_verify(ctx.obj[FILE_NAME]):
        click.echo(f"Python patched (file: {ctx.obj[FILE_NAME]})")
    else:
        raise click.ClickException(f"Python not patched (file: {ctx.obj[FILE_NAME]})")
Esempio n. 28
0
def transform(
    path,
    table,
    type,
    drop,
    rename,
    column_order,
    not_null,
    not_null_false,
    pk,
    pk_none,
    default,
    default_none,
    drop_foreign_key,
    sql,
    load_extension,
):
    "Transform a table beyond the capabilities of ALTER TABLE"
    db = sqlite_utils.Database(path)
    _load_extensions(db, load_extension)
    types = {}
    kwargs = {}
    for column, ctype in type:
        if ctype.upper() not in VALID_COLUMN_TYPES:
            raise click.ClickException(
                "column types must be one of {}".format(VALID_COLUMN_TYPES))
        types[column] = ctype.upper()

    not_null_dict = {}
    for column in not_null:
        not_null_dict[column] = True
    for column in not_null_false:
        not_null_dict[column] = False

    default_dict = {}
    for column, value in default:
        default_dict[column] = value
    for column in default_none:
        default_dict[column] = None

    kwargs["types"] = types
    kwargs["drop"] = set(drop)
    kwargs["rename"] = dict(rename)
    kwargs["column_order"] = column_order or None
    kwargs["not_null"] = not_null_dict
    if pk:
        if len(pk) == 1:
            kwargs["pk"] = pk[0]
        else:
            kwargs["pk"] = pk
    elif pk_none:
        kwargs["pk"] = None
    kwargs["defaults"] = default_dict
    if drop_foreign_key:
        kwargs["drop_foreign_keys"] = drop_foreign_key

    if sql:
        for line in db[table].transform_sql(**kwargs):
            click.echo(line)
    else:
        db[table].transform(**kwargs)
Esempio n. 29
0
def get_deploy_task(
    index,
    project,
    project_requires,
    deploy_target,
    deploy_options,
    task_group_id,
    parent_task,
    github_commit,
    owner,
    channel,
    taskcluster_secret,
):

    scopes = []

    nix_path_attribute = deploy_options.get('nix_path_attribute')
    if nix_path_attribute:
        nix_path_attribute = '{}.{}'.format(project, nix_path_attribute)
    else:
        nix_path_attribute = project

    if deploy_target == 'S3':
        subfolder = []
        if 'subfolder' in deploy_options:
            subfolder = [deploy_options['subfolder']]
        project_csp = []
        for url in deploy_options.get('csp', []):
            project_csp.append('--csp="{}"'.format(url))
        for require in project_requires:
            require_config = please_cli.config.PROJECTS_CONFIG.get(require, {})

            require_urls = [
                i.get('options', {}).get(channel, {}).get('url')
                for i in require_config.get('deploys', [])
            ]
            require_urls = filter(lambda x: x is not None, require_urls)
            require_urls = map(lambda x: '--csp="{}"'.format(x), require_urls)

            project_csp += require_urls

        project_envs = []
        project_envs.append('--env="release-version: {}"'.format(
            please_cli.config.VERSION))
        project_envs.append('--env="release-channel: {}"'.format(channel))
        for env_name, env_value in deploy_options.get('envs', {}).items():
            project_envs.append('--env="{}: {}"'.format(env_name, env_value))
        for require in project_requires:
            require_config = please_cli.config.PROJECTS_CONFIG.get(require, {})

            require_urls = [(
                i.get('options', {}).get(channel, {}).get('url'),
                i.get('options', {}).get(channel, {}).get('name-suffix', ''),
            ) for i in require_config.get('deploys', [])]
            require_urls = filter(lambda x: x[0] is not None, require_urls)
            normalized_require = please_cli.utils.normalize_name(
                require, normalizer='-')
            require_urls = map(
                lambda x: '--env="{}{}-url: {}"'.format(
                    normalized_require, x[1], x[0]), require_urls)

            project_envs += require_urls

        project_name = '{}{} to AWS S3 ({})'.format(
            project,
            ' ({})'.format(nix_path_attribute),
            deploy_options['s3_bucket'],
        )
        command = [
            './please',
            '-vv',
            'tools',
            'deploy:S3',
            project,
            '--s3-bucket=' + deploy_options['s3_bucket'],
            '--taskcluster-secret=' + taskcluster_secret,
            '--nix-path-attribute=' + nix_path_attribute,
            '--no-interactive',
        ] + subfolder + project_csp + project_envs

    elif deploy_target == 'HEROKU':
        project_name = '{}{} to HEROKU ({}/{})'.format(
            project,
            ' ({})'.format(nix_path_attribute),
            deploy_options['heroku_app'],
            deploy_options['heroku_dyno_type'],
        )
        command = [
            './please',
            '-vv',
            'tools',
            'deploy:HEROKU',
            project,
            '--heroku-app=' + deploy_options['heroku_app'],
            '--heroku-dyno-type=' + deploy_options['heroku_dyno_type'],
        ]

        heroku_command = deploy_options.get('heroku_command')
        if heroku_command:
            command.append('--heroku-command="{}"'.format(heroku_command))

        command += [
            '--taskcluster-secret=' + taskcluster_secret,
            '--nix-path-attribute=' + nix_path_attribute,
            '--no-interactive',
        ]

    elif deploy_target == 'DOCKERHUB':
        try:
            docker_registry = deploy_options['docker_registry']
            docker_repo = deploy_options['docker_repo']
            docker_stable_tag = deploy_options['docker_stable_tag']
        except KeyError:
            raise click.ClickException(
                'Missing `docker_registry` or `docker_repo` or `docker_stable_tag` in deploy options'
            )

        project_name = (
            f'{project} ({nix_path_attribute}) to DOCKERHUB '
            f'({docker_registry}/{docker_repo}:{project}-{nix_path_attribute}-{channel})'
        )
        command = [
            './please',
            '-vv',
            'tools',
            'deploy:DOCKERHUB',
            project,
            f'--taskcluster-secret={taskcluster_secret}',
            f'--nix-path-attribute={nix_path_attribute}',
            f'--docker-repo={docker_repo}',
            f'--docker-registry={docker_registry}',
            f'--channel={channel}',
            f'--docker-stable-tag={docker_stable_tag}',
            '--no-interactive',
        ]

    elif deploy_target == 'TASKCLUSTER_HOOK':
        try:
            docker_registry = deploy_options['docker_registry']
            docker_repo = deploy_options['docker_repo']
        except KeyError:
            raise click.ClickException(
                'Missing `docker_registry` or `docker_repo` in deploy options')
        hook_group_id = 'project-releng'
        name_suffix = deploy_options.get('name-suffix', '')
        hook_id = f'services-{channel}-{project}{name_suffix}'
        project_name = f'{project} ({nix_path_attribute}) to TASKCLUSTER HOOK ({hook_group_id}/{hook_id})'
        command = [
            './please',
            '-vv',
            'tools',
            'deploy:TASKCLUSTER_HOOK',
            project,
            f'--docker-registry={docker_registry}',
            f'--docker-repo={docker_repo}',
            f'--hook-group-id={hook_group_id}',
            f'--hook-id={hook_id}',
            f'--taskcluster-secret={taskcluster_secret}',
            f'--nix-path-attribute={nix_path_attribute}',
            '--no-interactive',
        ]
        scopes += [
            f'assume:hook-id:project-releng/services-{channel}-*',
            f'hooks:modify-hook:project-releng/services-{channel}-*',
        ]

    else:
        raise click.ClickException(
            f'Unknown deployment target `{deploy_target}` for project `{project}`'
        )

    return get_task(
        task_group_id,
        [parent_task],
        github_commit,
        channel,
        taskcluster_secret,
        ' '.join(command),
        {
            'name':
            '3.{index:02}. Deploying {project_name}'.format(
                index=index + 1,
                project_name=project_name,
            ),
            'description':
            '',
            'owner':
            owner,
            'source':
            'https://github.com/mozilla/release-services/tree/' + channel,
        },
        scopes,
    )
Esempio n. 30
0
def single(target_url, json_output, debug, rule_file, rule_uri, merge, junit):
    """
    Scan a single http(s) endpoint with drheader.

    NOTE: URL parameters are currently only supported on bulk scans.
    """
    exit_code = EXIT_CODE_NO_ERROR
    if debug:
        logging.basicConfig(level=logging.DEBUG)

    logging.debug('Validating: {}'.format(target_url))
    if not validators.url(target_url):
        raise click.ClickException(
            message='"{}" is not a valid URL.'.format(target_url))

    if rule_uri and not rule_file:
        if not validators.url(rule_uri):
            raise click.ClickException(
                message='"{}" is not a valid URL.'.format(rule_uri))
        try:
            rule_file = get_rules_from_uri(rule_uri)
        except Exception as e:
            if debug:
                raise click.ClickException(e)
            else:
                raise click.ClickException(
                    'No content retrieved from rules-uri.')

    rules = load_rules(rule_file, merge)

    try:
        logging.debug('Querying headers...')
        drheader_instance = Drheader(url=target_url)
    except Exception as e:
        if debug:
            raise click.ClickException(e)
        else:
            raise click.ClickException('Failed to get headers.')

    try:
        logging.debug('Analyzing headers...')
        drheader_instance.analyze(rules)
    except Exception as e:
        if debug:
            raise click.ClickException(e)
        else:
            raise click.ClickException('Failed to analyze headers.')

    if drheader_instance.report:
        exit_code = EXIT_CODE_FAILURE

    if json_output:
        click.echo(json.dumps(drheader_instance.report))
    else:
        click.echo()
        if not drheader_instance.report:
            click.echo('No issues found!')
        else:
            click.echo('{0} issues found'.format(len(
                drheader_instance.report)))
            for i in drheader_instance.report:
                values = []
                for k, v in i.items():
                    values.append([k, v])
                click.echo('----')
                click.echo(tabulate(values, tablefmt="presto"))
    if junit:
        file_junit_report(rules, drheader_instance.report)
    sys.exit(exit_code)