Пример #1
0
def gsutil_publish(filename, gsurl, duration='7d'):
    if gsurl:
        if filename:
            subprocess.check_call(f'ls -lah {filename}', shell=True)
            gcloud_driver.check_call(
                *cluster_manager.get_provider().get_project_zone(),
                f'cp ./{filename} {gsurl}',
                gsutil=True)
        with tempfile.NamedTemporaryFile('w') as f:
            f.write(
                config_manager.get(
                    key='service-account-json',
                    secret_name='ckan-cloud-provider-cluster-gcloud'))
            f.flush()
            output = gcloud_driver.check_output(
                *cluster_manager.get_provider().get_project_zone(),
                f'signurl -d {duration} {f.name} {gsurl}',
                gsutil=True)
            signed_gsurls = [
                line for line in [
                    line.strip().split('\t')[-1].strip()
                    for line in output.decode().splitlines()
                ] if len(line) > 20
            ]
            assert len(signed_gsurls) == 1
        return signed_gsurls[0]
    else:
        return None
def _get_latest_backups(db_name, datastore_name):
    gs_base_url = config_manager.get(
        key='backups-gs-base-url',
        secret_name='ckan-cloud-provider-db-gcloudsql-credentials')
    output = gcloud_driver.check_output(
        *cluster_manager.get_provider().get_project_zone(),
        f"ls {gs_base_url}/`date +%Y/%m/%d`/'*'/ | grep {db_name}",
        gsutil=True).decode() + '\n' + gcloud_driver.check_output(
            *cluster_manager.get_provider().get_project_zone(),
            f"ls {gs_base_url}/`date +%Y/%m/%d`/'*'/ | grep {datastore_name}",
            gsutil=True).decode()
    datastore_backup_url, datastore_backup_datetime = None, None
    db_backup_url, db_backup_datetime = None, None
    for line in output.splitlines():
        line = line.strip()
        if len(line) < 10: continue
        backup_name, backup_datetime = line.split('/')[-1].split('.')[0].split(
            '_')
        backup_datetime = datetime.datetime.strptime(backup_datetime,
                                                     '%Y%m%d%H%M')
        if backup_name == db_name:
            is_datastore = False
        elif backup_name == datastore_name:
            is_datastore = True
        else:
            continue
        logs.info(backup_name=backup_name,
                  backup_datetime=backup_datetime,
                  is_datastore=is_datastore)
        if is_datastore and (datastore_backup_datetime is None
                             or datastore_backup_datetime < backup_datetime):
            datastore_backup_datetime, datastore_backup_url = backup_datetime, line
        if not is_datastore and (db_backup_datetime is None
                                 or db_backup_datetime < backup_datetime):
            db_backup_datetime, db_backup_url = backup_datetime, line
    logs.info(db_backup_datetime=db_backup_datetime,
              db_backup_url=db_backup_url)
    logs.info(datastore_backup_datetime=datastore_backup_datetime,
              datastore_backup_url=datastore_backup_url)
    return db_backup_url, datastore_backup_url
Пример #3
0
def export_db(instance_id):
    instance_spec = kubectl.get(f'ckancloudckaninstance {instance_id}')['spec']
    db_name = instance_spec['db']['name']
    db_prefix = instance_spec['db'].get('dbPrefix')
    datastore_name = instance_spec['datastore']['name']
    datastore_prefix = instance_spec['datastore'].get('dbPrefix')
    gs_base_url = config_manager.get(
        secret_name='ckan-cloud-provider-db-gcloudsql-credentials',
        key='backups-gs-base-url')
    db_prefix_path = f'{db_prefix}/' if db_prefix else ''
    datastore_prefix_path = f'{datastore_prefix}/' if datastore_prefix else ''
    latest_gs_urls = {'db': None, 'datastore': None}
    latest_gs_urls_datetimes = {'db': None, 'datastore': None}
    for dbtype in ['db', 'datastore']:
        for minus_days in (0, 1, 2):
            dt = (datetime.datetime.now() -
                  datetime.timedelta(days=minus_days))
            datepath = dt.strftime('%Y/%m/%d')
            datesuffix = dt.strftime('%Y%m%d')
            if dbtype == 'datastore':
                ls_arg = f'{gs_base_url}{datastore_prefix_path}/{datepath}/*/{datastore_name}_{datesuffix}*.gz'
            else:
                ls_arg = f'{gs_base_url}{db_prefix_path}/{datepath}/*/{db_name}_{datesuffix}*.gz'
            output = gcloud_driver.check_output(
                *cluster_manager.get_provider().get_project_zone(),
                f'ls -l "{ls_arg}"',
                gsutil=True)
            for line in output.decode().splitlines():
                gsurl = line.strip().split(' ')[-1].strip()
                if gsurl.startswith('gs://'):
                    gs_url_datetime = datetime.datetime.strptime(
                        gsurl.split('/')[-1].split('.')[-2].split('_')[-1],
                        '%Y%m%d%H%M')
                    if not latest_gs_urls[dbtype] or latest_gs_urls_datetimes[
                            dbtype] < gs_url_datetime:
                        latest_gs_urls[dbtype], latest_gs_urls_datetimes[
                            dbtype] = gsurl, gs_url_datetime
            if latest_gs_urls[dbtype]:
                break
    return latest_gs_urls['db'], latest_gs_urls['datastore']
Пример #4
0
def _gcloud():
    return cluster_manager.get_provider()
Пример #5
0
def _get_deployment_spec(router_name,
                         router_type,
                         annotations,
                         image=None,
                         httpauth_secrets=None,
                         dns_provider=None):
    volume_spec = cluster_manager.get_or_create_multi_user_volume_claim(
        get_label_suffixes(router_name, router_type))
    httpauth_secrets_volume_mounts, httpauth_secrets_volumes = [], []
    if httpauth_secrets:
        added_secrets = []
        for httpauth_secret in httpauth_secrets:
            if httpauth_secret in added_secrets: continue
            added_secrets.append(httpauth_secret)
            httpauth_secrets_volumes.append({
                'name': httpauth_secret,
                'secret': {
                    'secretName': httpauth_secret
                }
            })
            httpauth_secrets_volume_mounts.append({
                'name':
                httpauth_secret,
                'mountPath':
                f'/httpauth-{httpauth_secret}'
            })
    container_spec_overrides = config_manager.get(
        'container-spec-overrides',
        configmap_name=f'traefik-router-{router_name}-deployment',
        required=False,
        default=None)
    deployment_spec = {
        'replicas': 1,
        'revisionHistoryLimit': 5,
        'template': {
            'metadata': {
                'labels': get_labels(router_name, router_type, for_deployment=True)
            },
            'spec': {
                'containers': [
                    {
                        'name': 'traefik',
                        'image': image or 'traefik:1.6-alpine',
                        'ports': [{'containerPort': 80}],
                        'volumeMounts': [
                            {'name': 'etc-traefik', 'mountPath': '/etc-traefik'},
                            {'name': 'traefik-acme', 'mountPath': '/traefik-acme', 'subPath': f'router-traefik-{router_name}'},
                            *httpauth_secrets_volume_mounts,
                        ],
                        'args': ['--configFile=/etc-traefik/traefik.toml'],
                        **(json.loads(container_spec_overrides) if container_spec_overrides else {})
                    }
                ],
                'volumes': [
                    {'name': 'etc-traefik', 'configMap': {'name': f'router-traefik-{router_name}'}},
                    dict(volume_spec, name='traefik-acme'),
                    *httpauth_secrets_volumes,
                ]
            }
        }
    }
    if dns_provider == 'route53':
        logs.info('Traefik deployment: adding SSL support using AWS Route53')
        container = deployment_spec['template']['spec']['containers'][0]
        container['ports'].append({'containerPort': 443})
        aws_credentials = cluster_manager.get_provider().get_aws_credentials()
        secret_name = f'ckancloudrouter-{router_name}-route53'
        kubectl.update_secret(
            secret_name, {
                'AWS_ACCESS_KEY_ID': aws_credentials['access'],
                'AWS_SECRET_ACCESS_KEY': aws_credentials['secret'],
                'AWS_REGION': aws_credentials['region']
            },
            labels=get_labels(router_name, router_type))
        container['envFrom'] = [{'secretRef': {'name': secret_name}}]
    elif annotations.get_flag('letsencryptCloudflareEnabled'):
        logs.info('Traefik deployment: adding SSL support using Cloudflare')
        container = deployment_spec['template']['spec']['containers'][0]
        container['ports'].append({'containerPort': 443})
        cloudflare_email, cloudflare_api_key = get_cloudflare_credentials()
        secret_name = f'ckancloudrouter-{router_name}-cloudflare'
        kubectl.update_secret(secret_name, {
            'CLOUDFLARE_EMAIL': cloudflare_email,
            'CLOUDFLARE_API_KEY': cloudflare_api_key,
        },
                              labels=get_labels(router_name, router_type))
        container['envFrom'] = [{'secretRef': {'name': secret_name}}]
    else:
        logs.info('Not configuring SSL support for Traefik deployment')
    return deployment_spec
Пример #6
0
def update(router_name, wait_ready, spec, annotations, routes, dry_run=False):
    cluster_manager.get_provider().update_nginx_router(router_name, wait_ready,
                                                       spec, annotations,
                                                       routes, dry_run)