Esempio n. 1
0
def create_command(args):

    tier_name = get_tier_name()

    with TSTransaction() as ts:
        prep = prepare_tenant_name(ts=ts,
                                   tenant_name=vars(args)['tenant-name'],
                                   product_name=vars(args)['product-name'])
        tenant_name = prep['tenant_name']

        if ts.get_table('tenant-names').get({'tenant_name': tenant_name}):
            print "Tenant '{}' already exists. Use 'tenant refresh' command to refresh it.".format(
                tenant_name)
            sys.exit(1)

        result = define_tenant(ts=ts,
                               tenant_name=tenant_name,
                               product_name=prep['product']['product_name'],
                               tier_name=tier_name)

    print "Tenant '{}' created. Note, config is not committed!".format(
        tenant_name)
    print "State of tenant for deployables:"
    for deployable_name, state in result['report']:
        print "\t{}: {}".format(deployable_name, state)
    else:
        print "\tNo deployable associated with this tenant!"
Esempio n. 2
0
def create_standard_claims_for_test():
    """
    Duplicate of the code from jwtsetup but does not use the
    application context to get tenant, deployable and tier
    (which should probably be refactored instead of duplicated)
    """
    from appmodule import app

    expire = 86400
    tier_name = get_tier_name()
    iat = datetime.utcnow()
    exp = iat + timedelta(seconds=expire)
    nbf = iat + timedelta(seconds=0)
    jti = str(uuid.uuid4()).replace("-", "")
    iss = app.config["name"]
    standard_claims = {
        # JWT standard fields
        'iat': iat,
        'exp': exp,
        'nbf': nbf,
        'jti': jti,
        'iss': iss,

        # Drift fields
        'tier': tier_name,
        'tenant': _get_test_db(),
        'deployable': iss,
    }
    return standard_claims
Esempio n. 3
0
def cleanup_orphaned_matchqueues():
    """
    Find matches who have been reserved by the match queue but not joined
    for 10 minutes and make them available to other players
    """
    logger = get_task_logger("cleanup_orphaned_matchqueues")

    tier_name = get_tier_name()
    tenants = driftbase.tasks.get_tenants()
    logger.info("Cleaning up match queues for %s tenants...", len(tenants))
    for tenant_config in tenants:
        tenant_name = tenant_config["name"]
        if tenant_config.get("name", "*") == "*":
            continue
        try:
            this_conn_string = get_connection_string(tenant_config,
                                                     None,
                                                     tier_name=tier_name)

        except TenantNotFoundError:
            continue

        with sqlalchemy_session(this_conn_string) as session:
            sql = """
            SELECT m.* FROM gs_matches m
                INNER JOIN gs_servers s ON s.server_id = m.server_id
            WHERE m.status = 'queue' AND
                  m.status_date::timestamp < now()::timestamp - interval '5 minutes' AND
                  s.heartbeat_date::timestamp >= now()::timestamp - interval '2 minutes'
             ORDER BY m.match_id DESC
            """
            result = session.execute(sql)
            orphaned_matches = set()
            match = result.fetchone()
            while match:
                orphaned_matches.add(match.match_id)
                match = result.fetchone()
            if orphaned_matches:
                log.info("Tenant '%s' has %s orphaned matches", tenant_name,
                         len(orphaned_matches))
            for match_id in orphaned_matches:
                match = session.query(Match).get(match_id)
                match.status = "idle"
                match.status_date = utcnow()
                matchqueueplayers = session.query(MatchQueuePlayer) \
                                           .filter(MatchQueuePlayer.match_id == match_id)
                for p in matchqueueplayers:
                    session.delete(p)
                logger.info(
                    "Cleaning up orphaned match '%s' in tenant '%s' and putting it "
                    "back into the pool", match_id, tenant_name)
            session.commit()

            # if we cleaned up any matches we should process the match queue in
            # case there are any players waiting
            if orphaned_matches:
                logger.info("Processing match queue")
                redis = RedisCache(tenant=tenant_config["name"],
                                   redis_server=tenant_config['redis_server'])
                process_match_queue(redis, session)
Esempio n. 4
0
def _find_latest_ami(service_name, release=None):
    name = get_app_name()
    tier_name = get_tier_name()
    conf = get_drift_config(tier_name=tier_name, deployable_name=name)
    domain = conf.domain.get()
    aws_region = conf.tier['aws']['region']

    ec2 = boto3.resource('ec2', region_name=aws_region)
    filters = [
        {
            'Name': 'tag:service-name',
            'Values': [name]
        },
        {
            'Name': 'tag:domain-name',
            'Values': [domain['domain_name']]
        },
    ]
    if release:
        filters.append({'Name': 'tag:git-release', 'Values': [release]}, )

    amis = list(ec2.images.filter(Owners=['self'], Filters=filters))
    if not amis:
        criteria = {d['Name']: d['Values'][0] for d in filters}
        print "No '{}' AMI found using the search criteria {}.".format(
            UBUNTU_BASE_IMAGE_NAME, criteria)
        sys.exit(1)

    ami = max(amis, key=operator.attrgetter("creation_date"))
    return ami
Esempio n. 5
0
def get_tier_config():
    """Fetches information for the specified tier local config
    """
    tier_name = get_tier_name()
    config_path = os.path.join(os.path.expanduser("~"), ".drift")
    with open(os.path.join(config_path, "{}.json".format(tier_name))) as f:
        config = json.load(f)
    return config
Esempio n. 6
0
def get_mock_tenants():
    t = {"name": os.environ.get("drift_test_database"),
         "db_server": "localhost",
         "redis_server": "localhost",
         "heartbeat_timeout": 0,
         }
    conn_string = get_connection_string(t, None, tier_name=get_tier_name())
    t["conn_string"] = conn_string
    return [t]
Esempio n. 7
0
def construct_db_name(tenant, service, tier_name=None):
    # TODO: Sanitize tenant
    # service = service.replace("-", "")
    # ATT! 'tenant' now contains the tier name, i.e. "default-devnorth", so it
    # needs to be stripped out.
    # TODO: FIX ME YOU LAZY BASTARDS!!!

    if tenant.endswith("-%s" % tier_name.lower()):
        tenant = tenant.replace("-%s" % tier_name.lower(), "")
    db_name = '{}_{}_{}'.format(tier_name or get_tier_name(), tenant, service)
    return db_name
Esempio n. 8
0
def xxxxcreate_command(args):
    tenant_name = args.tenant
    if not tenant_name:
        tenants_report()
        return

    os.environ['DRIFT_DEFAULT_TENANT'] = tenant_name

    # Minor hack:
    from drift.flaskfactory import load_flask_config

    try:
        conf = get_drift_config(
            tier_name=get_tier_name(),
            tenant_name=tenant_name,
            drift_app=load_flask_config(),
        )
    except TenantNotConfigured as e:
        raise
    except Exception as e:
        print Fore.RED + "'tenant {}' command failed: {}".format(
            args.action, e)
        return

    if not args.action:
        tenant_report(conf)
        return

    if args.action in ['create', 'recreate']:
        # Provision resources
        with TSTransaction() as ts:
            conf = get_config(ts=ts)
            resources = conf.drift_app.get("resources")
            for module_name in resources:
                m = importlib.import_module(module_name)
                if hasattr(m, "provision"):
                    provisioner_name = m.__name__.split('.')[-1]
                    print "Provisioning '%s' for tenant '%s' on tier '%s'" % (
                        provisioner_name, tenant_name, conf.tier['tier_name'])
                    if 0:  # THIS IS BONKERS LOGIC! FIIIIX!
                        conf.tier['resource_defaults'].append({
                            'resource_name':
                            provisioner_name,
                            'parameters':
                            getattr(m, 'NEW_TIER_DEFAULTS', {}),
                        })
                    recreate = 'recreate' if args.action == 'recreate' else 'skip'
                    m.provision(conf, {}, recreate=recreate)

            row = ts.get_table('tenants').get(conf.tenant)
            row['state'] = 'active'

        tenant_report(conf)
Esempio n. 9
0
    def before_request(self):

        try:
            conf = get_drift_config(
                ts=current_app.extensions['driftconfig'].table_store,
                tenant_name=tenant_from_hostname,
                tier_name=get_tier_name(),
                deployable_name=current_app.config['name'])
        except TenantNotConfigured as e:
            abort(httplib.NOT_FOUND, description=str(e))

        if 0:  # Disabling this as it needs to be refactored into a JIT like feature. This simply blocks everything.
            if conf.tenant and conf.tenant[
                    'state'] != 'active' and request.endpoint != "admin.adminprovisionapi":
                raise TenantNotFoundError(
                    "Tenant '{}' for tier '{}' and deployable '{}' is not active, but in state '{}'."
                    .format(conf.tenant['tenant_name'], get_tier_name(),
                            current_app.config['name'], conf.tenant['state']))

        # Add applicable config tables to 'g'
        g.conf = conf
Esempio n. 10
0
def enable_command(args):

    tier_name = get_tier_name()
    tenant_name = vars(args)['tenant-name']

    with TSTransaction() as ts:

        results = refresh_tenants(ts=ts,
                                  tenant_name=tenant_name,
                                  tier_name=tier_name)

    results = list(results)
    print "Result:", results
Esempio n. 11
0
def get_log_details():
    details = OrderedDict()
    tenant_name = None
    tier_name = get_tier_name()
    remote_addr = None

    try:
        remote_addr = request.remote_addr
    except Exception:
        pass

    try:
        if hasattr(g, 'conf'):
            tenant_name = g.conf.tenant_name[
                'tenant_name'] if g.conf.tenant_name else '(none)'
    except RuntimeError as e:
        if "Working outside of application context" in repr(e):
            pass
        else:
            raise
    log_context = {}
    log_context["created"] = datetime.datetime.utcnow().isoformat() + "Z"
    log_context["tenant"] = tenant_name
    log_context["tier"] = tier_name
    log_context["remote_addr"] = remote_addr
    details["logger"] = log_context
    jwt_context = {}
    try:
        fields = set([
            "user_id", "player_id", "roles", "jti", "user_name", "player_name",
            "client_id", "identity_id"
        ])
        for k, v in current_user.iteritems():
            if k in fields:
                key = "{}".format(k)
                jwt_context[key] = v
            if k == "roles" and v:
                jwt_context[k] = ",".join(v)
    except Exception as e:
        pass
    if jwt_context:
        details["user"] = jwt_context

    # add Drift-Log-Context" request headers to the logs
    try:
        details["client"] = json.loads(
            request.headers.get("Drift-Log-Context"))
    except Exception:
        pass

    return details
Esempio n. 12
0
def load_config(tier_name=None):
    if not tier_name:
        tier_name = get_tier_name()
    config_filename = os.environ["drift_CONFIG"]
    config_values = {}

    log.info("Loading configuration from %s", config_filename)

    with open(config_filename) as f:
        config_values = json.load(f)
    config_values["config_filename"] = config_filename
    config_values["tier_name"] = tier_name

    load_config_files(tier_name, config_values, log_progress=False)

    return config_values
Esempio n. 13
0
def setup_tenant():
    """
    Called from individual test modules.
    create a tenant only if the test module was not called from
    the kitrun's systest command
    (in which case drift_test_database has been set in environ)
    Also configure some basic parameters in the app
    """
    from appmodule import app
    global db_name
    tenant_name = _get_test_db()
    service_name = app.config["name"]
    from drift.utils import get_tier_name
    tier_name = get_tier_name()

    db_name = construct_db_name(tenant_name, service_name, tier_name)
    test_target = _get_test_target()
    if test_target:
        flushwrite("Skipping tenant setup due to "
                   "manually specified test target: %s" % test_target)
        return

    db_host = app.config["systest_db"]["server"]
    app.config["db_connection_info"]["server"] = db_host
    app.config["default_tenant"] = tenant_name
    app.config["service_user"] = {
        "username": service_username,
        "password": service_password
    }
    conn_string = "postgresql://*****:*****@{}/{}" \
                  .format(db_host, db_name)
    test_tenant = {
        "name": tenant_name,
        "db_connection_string": conn_string,
    }
    app.config["tenants"].insert(0, test_tenant)
    # flushwrite("Adding test tenant '%s'" % test_tenant)
    # TODO: _get_env assumes "*" is the last tenant and screws things up
    # if you append something else at the end. Fix this plz.

    # Add public and private key for jwt.

    app.config['private_key'] = private_test_key
    app.config['jwt_trusted_issuers'] = [{
        "iss": app.config['name'],
        "pub_rsa": public_test_key,
    }]
Esempio n. 14
0
def tenants_report():
    print "The following tenants are registered in config on tier '{}':".format(
        get_tier_name())
    config = load_config()
    for tenant_config in config.get("tenants", []):
        name = tenant_config["name"]
        # TODO: Get rid of this
        if name == "*":
            continue
        sys.stdout.write("   {}... ".format(name))
        db_error = db_check(tenant_config)
        if not db_error:
            print Fore.GREEN + "OK"
        else:
            if "does not exist" in db_error:
                print Fore.RED + "FAIL! DB does not exist"
            else:
                print Fore.RED + "Error: %s" % db_error
    print "To view more information about each tenant run this command again with the tenant name"
Esempio n. 15
0
def show_command(args):
    tier_name = get_tier_name()
    tenant_name = vars(args)['tenant-name']
    ts = get_default_drift_config()
    tenant_info = ts.get_table('tenant-names').get(
        {'tenant_name': tenant_name})
    if not tenant_info:
        print "Tenant '{}' not found.".format(tenant_name)
        sys.exit(1)

    tenant_info2 = ts.get_table('tenants').find({
        'tier_name': tier_name,
        'tenant_name': tenant_name
    })

    if not tenant_info2:
        print "The tenant '{}' is not defined for any deployable on tier '{}'.".format(
            tenant_name, tier_name)
        sys.exit(1)
Esempio n. 16
0
def tenant_report(tenant_config):
    from drift.tenant import get_connection_string

    conn_string = get_connection_string(tenant_config)
    print "Tenant configuration for '{}' on tier '{}':" \
          .format(tenant_config["name"], get_tier_name())
    for k in sorted(tenant_config.keys()):
        print "  {} = {}".format(k, tenant_config[k])
    print "Connection string:\n  {}".format(conn_string)
    print "Database check... "
    db_error = db_check(tenant_config)
    if db_error:
        if "does not exist" in db_error:
            print Fore.RED + "  FAIL! DB does not exist"
            print "  You can create this database by running this " \
                  "command again with the action 'create'"
        else:
            print Fore.RED + "  {}".format(db_error)
    else:
        print Fore.GREEN + "  OK! Database is online and reachable"
Esempio n. 17
0
def get_connection_string(tenant_config,
                          conn_info=None,
                          service_name=None,
                          tier_name=None):
    """
    Returns a connection string for the current tenant and
    raises TenantNotFoundError if none is found
    """

    # If in Flask request context, use current_app, else load the config straight up
    config = safe_get_config()

    if not tier_name:
        tier_name = get_tier_name()
    connection_string = None
    # if the tenant supplies the entire connection string we use that verbatim
    if "db_connection_string" in tenant_config:
        connection_string = tenant_config["db_connection_string"]
    # otherwise the tenant should supply the server and we construct the connection string
    elif tenant_config.get("db_server", None):
        if not service_name:
            service_name = config["name"]
        db_name = construct_db_name(tenant_config["name"],
                                    service_name,
                                    tier_name=tier_name)
        if not conn_info:
            conn_info = config.get('db_connection_info', {})
        connection_string = '{driver}://{user}:{password}@{server}/{db}'.format(
            driver=conn_info.get("driver", "postgresql"),
            user=conn_info.get("user", "zzp_user"),
            password=conn_info.get("password", "zzp_user"),
            server=tenant_config["db_server"],
            db=db_name)

    if not connection_string:
        log.warning("raising TenantNotFoundError. tenant_config is %s ",
                    tenant_config)
        raise TenantNotFoundError(
            "Tenant '%s' is not registered on tier '%s'" %
            (tenant_config["name"], tier_name))
    return connection_string
Esempio n. 18
0
def update_online_statistics():
    """

    """
    logger = get_task_logger("update_statistics")

    tier_name = get_tier_name()
    config = load_config()
    tenants = config.get("tenants", [])
    logger.info("Updating statistics for %s tenants...", len(tenants))
    num_updated = 0
    for tenant_config in tenants:
        if tenant_config.get("name", "*") == "*":
            continue
        try:
            this_conn_string = get_connection_string(tenant_config, None, tier_name=tier_name)

        except TenantNotFoundError:
            continue

        with sqlalchemy_session(this_conn_string) as session:
            result = session.execute("""SELECT COUNT(DISTINCT(player_id)) AS cnt
                                          FROM ck_clients
                                         WHERE heartbeat > NOW() - INTERVAL '1 minutes'""")
            cnt = result.fetchone()[0]
            if cnt:
                num_updated += 1
                tenant_name = tenant_config["name"]
                name = 'backend.numonline'
                row = session.query(Counter).filter(Counter.name == name).first()
                if not row:
                    row = Counter(name=name, counter_type="absolute")
                    session.add(row)
                    session.commit()
                counter_id = row.counter_id
                timestamp = datetime.datetime.utcnow()
                add_count(counter_id, 0, timestamp, cnt, is_absolute=True, db_session=session)
                session.commit()
                print "Updated num_online for %s to %s" % (tenant_name, cnt)

    logger.info("Updated %s tenants with online user count", num_updated)
Esempio n. 19
0
def refresh_command(args):
    tenant_name = vars(args)['tenant-name']
    tier_name = get_tier_name(fail_hard=False)

    with TSTransaction(commit_to_origin=False, write_to_scratch=False) as ts:
        print "Refreshing configuration for tenants and deployables..."
        for report in refresh_tenants(ts=ts,
                                      tenant_name=tenant_name,
                                      tier_name=tier_name):
            print report
        else:
            print "No configuration found."

    print "Hints for associating tenants and deployables:"
    print "  Make sure deployables are registered and available on a tier."
    print "  Take a look at 'drift-admin register' to register deployables."
    print "  Then run 'dconf deployable register all' for good measure."
    print "  Run 'drift-admin deployable list' to see registration."
    print(
        "  Run 'dconf product edit <product name>' and make sure your product includes all the "
        "necessary deployables.")
Esempio n. 20
0
def get_tenants():
    tier_name = get_tier_name()
    config = load_config()
    _tenants = config.get("tenants", [])
    tenants = []
    for t in _tenants:
        t["heartbeat_timeout"] = config.get("heartbeat_timeout",
                                            DEFAULT_HEARTBEAT_TIMEOUT)
        if t.get("name", "*") == "*":
            continue
        try:
            this_conn_string = get_connection_string(t,
                                                     None,
                                                     tier_name=tier_name)
        except TenantNotFoundError:
            continue
        t["conn_string"] = this_conn_string
        if config.get("redis_server", None):
            t["redis_server"] = config.get("redis_server")
        tenants.append(t)
    return tenants
Esempio n. 21
0
def make_celery(app):

    kombu.serialization.register('drift_celery_json',
                                 drift_celery_dumps,
                                 drift_celery_loads,
                                 content_type='application/x-myjson',
                                 content_encoding='utf-8')

    celery = Celery(app.import_name)

    ts = get_default_drift_config()
    tier_name = get_tier_name()
    tier_config = ts.get_table('tiers').get({'tier_name': tier_name})

    if os.environ.get('DRIFT_USE_LOCAL_SERVERS', False):
        broker_url = "redis://localhost:6379/15"
    else:
        broker_url = tier_config["celery_broker_url"]

    log.info("Celery broker from tier config: %s", broker_url)

    celery.conf.update(app.config)
    celery.conf["BROKER_URL"] = broker_url
    celery.conf["CELERY_RESULT_BACKEND"] = broker_url
    celery.conf["CELERY_TASK_SERIALIZER"] = "drift_celery_json"
    celery.conf["CELERY_RESULT_SERIALIZER"] = "drift_celery_json"
    celery.conf["CELERY_ACCEPT_CONTENT"] = ["drift_celery_json"]
    celery.conf["CELERY_ENABLE_UTC"] = True
    TaskBase = celery.Task

    class ContextTask(TaskBase):
        abstract = True

        def __call__(self, *args, **kwargs):
            with app.app_context():
                g.conf = get_config()
                return TaskBase.__call__(self, *args, **kwargs)

    celery.Task = ContextTask
    return celery
Esempio n. 22
0
def verify_token(token, auth_type):
    """Verifies 'token' and returns its payload."""
    if auth_type == "JTI":
        payload = get_cached_token(token)
        if not payload:
            log.info("Invalid JTI: Token '%s' not found in cache.", token)
            abort_unauthorized("Invalid JTI. Token %s does not exist." % token)

    elif auth_type == "JWT":
        algorithm = JWT_ALGORITHM
        leeway = timedelta(seconds=JWT_LEEWAY)
        verify_claims = JWT_VERIFY_CLAIMS
        required_claims = JWT_REQUIRED_CLAIMS

        options = {
            'verify_' + claim: True
            for claim in verify_claims
        }

        options.update({
            'require_' + claim: True
            for claim in required_claims
        })

        # Get issuer to see if we trust him and have the public key to verify.
        try:
            unverified_payload = jwt.decode(token,
                                            options={
                                                "verify_signature": False
                                            })
        except jwt.InvalidTokenError as e:
            abort_unauthorized("Invalid token: %s" % str(e))

        issuer = unverified_payload.get("iss")
        if not issuer:
            abort_unauthorized("Invalid JWT. The 'iss' field is missing.")

        for trusted_issuer in current_app.config["jwt_trusted_issuers"]:
            if trusted_issuer["iss"] == issuer:
                try:
                    payload = jwt.decode(token, trusted_issuer["pub_rsa"],
                                         options=options,
                                         algorithms=[algorithm],
                                         leeway=leeway)
                except jwt.InvalidTokenError as e:
                    abort_unauthorized("Invalid token: %s" % str(e))
                break
        else:
            abort_unauthorized("Invalid JWT. Issuer '%s' not known "
                               "or not trusted." % issuer)

        # Verify tenant and tier
        tenant, tier = payload.get('tenant'), payload.get('tier')
        if not tenant or not tier:
            abort_unauthorized("Invalid JWT. "
                               "Token must specify both 'tenant' and 'tier'.")

        if tenant != g.driftenv["name"]:
            abort_unauthorized("Invalid JWT. Token is for tenant '%s' but this"
                               " is tenant '%s'" % (tenant, g.driftenv["name"]))

        cfg_tier_name = get_tier_name()
        if tier != cfg_tier_name:
            abort_unauthorized("Invalid JWT. Token is for tier '%s' but this"
                               " is tier '%s'" % (tier, cfg_tier_name))

    return payload
Esempio n. 23
0
def run_command(args):
    print
    from drift import tenant
    tenant_name = args.tenant
    if not tenant_name:
        tenants_report()
        return
    tier_name = get_tier_name()
    config = load_config()
    tenant_config = {}
    for tenant_config in config.get("tenants", []):
        if tenant_config["name"].lower() == tenant_name.lower():
            # get the right casing from config
            tenant_name = tenant_config["name"]
            break
    else:
        print Fore.RED + "ERROR! Tenant '{}' is not registered in config for tier '{}'" \
                         .format(tenant_name, tier_name)
        print "Please add the tenant into config/config_{}.json and " \
              "then run this command again\n".format(tier_name)
        return

    if not args.action:
        tenant_report(tenant_config)
        return

    db_host = tenant_config["db_server"]
    if ":" not in db_host:
        db_host += ":{}".format(POSTGRES_PORT)

    # TODO validation
    db_name = None
    if "recreate" in args.action:
        actions = ["drop", "create"]
        print "Recreating db for tenant '{}'".format(tenant_name)
    else:
        actions = [args.action]

    if "drop" in actions:
        print "Dropping tenant {} on {}...".format(tenant_name, db_host)
        db_error = db_check(tenant_config)
        if db_error:
            print "ERROR: You cannot drop the db because it is not reachable: {}".format(
                db_error)
            return
        else:
            tenant.drop_db(tenant_name, db_host, tier_name)

    if "create" in args.action:
        print "Creating tenant '{}' on server '{}'...".format(
            tenant_name, db_host)
        db_notfound_error = db_check(tenant_config)
        if not db_notfound_error:
            print "ERROR: You cannot create the database because it already exists"
            print "Use the command 'recreate' if you want to drop and create the db"
            from drift.tenant import get_connection_string
            conn_string = get_connection_string(tenant_config)
            print "conn_string = " + conn_string
        else:
            tenant.create_db(tenant_name, db_host, tier_name)
            tenant_report(tenant_config)
Esempio n. 24
0
def run_command(args):
    service_info = get_service_info()
    tier_config = get_tier_config()
    ec2_conn = boto.ec2.connect_to_region(tier_config["region"])
    iam_conn = boto.iam.connect_to_region(tier_config["region"])

    if args.ubuntu:
        # Get all Ubuntu Trusty 14.04 images from the appropriate region and
        # pick the most recent one.
        print "Finding the latest AMI on AWS that matches 'ubuntu-trusty-14.04*'"
        # The 'Canonical' owner. This organization maintains the Ubuntu AMI's on AWS.
        amis = ec2_conn.get_all_images(
            owners=['099720109477'],
            filters={'name': 'ubuntu/images/hvm/ubuntu-trusty-14.04*'},
        )
        ami = max(amis, key=operator.attrgetter("creationDate"))
    else:

        amis = ec2_conn.get_all_images(
            owners=['self'],  # The current organization
            filters={
                'tag:service-name': UBUNTU_BASE_IMAGE_NAME,
                'tag:tier': tier_config["tier"],
            },
        )
        if not amis:
            print "No '{}' AMI found for tier {}. Bake one using this command: {} bakeami --ubuntu".format(
                UBUNTU_BASE_IMAGE_NAME, tier_config["tier"], sys.argv[0])
            sys.exit(1)

        ami = max(amis, key=operator.attrgetter("creationDate"))
        print "{} AMI(s) found.".format(len(amis))

    print "Using source AMI:"
    print "\tID:\t", ami.id
    print "\tName:\t", ami.name
    print "\tDate:\t", ami.creationDate

    if args.ubuntu:
        version = None
        branch = ''
        sha_commit = ''
    else:
        cmd = "python setup.py sdist --formats=zip"
        current_branch = get_branch()
        if not args.tag:
            args.tag = current_branch

        print "Using branch/tag", args.tag
        checkout(args.tag)
        try:
            sha_commit = get_commit()
            branch = get_branch()
            version = get_git_version()
            if not args.preview:
                os.system(cmd)
        finally:
            print "Reverting to ", current_branch
            checkout(current_branch)

    if not version:
        version = {'tag': 'untagged-branch'}

    print "git version:", version

    service_info = get_service_info()
    user = iam_conn.get_user()  # The current IAM user running this command

    # Need to generate a pre-signed url to the tiers root config file on S3
    tiers_config = get_tiers_config()
    tiers_config_url = '{}/{}.{}/{}'.format(tiers_config['region'],
                                            tiers_config['bucket'],
                                            tiers_config['domain'],
                                            TIERS_CONFIG_FILENAME)

    var = {
        "service":
        UBUNTU_BASE_IMAGE_NAME if args.ubuntu else service_info["name"],
        "versionNumber": service_info["version"],
        "region": tier_config["region"],
        "source_ami": str(ami.id),
        "branch": branch,
        "commit": sha_commit,
        "release": version['tag'],
        "user_name": str(user.user_name),
        "tier": tier_config["tier"],
        "tier_url": str(tiers_config_url),
    }

    if args.ubuntu:
        var['setup_script'] = pkg_resources.resource_filename(
            __name__, "ubuntu-packer.sh")
    else:
        var['setup_script'] = pkg_resources.resource_filename(
            __name__, "driftapp-packer.sh")

    print "Using var:\n", json.dumps({k: str(v)
                                      for k, v in var.iteritems()},
                                     indent=4)

    packer_cmd = "packer"
    try:
        result = subprocess.call(packer_cmd,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE)
    except Exception as e:
        print "Error:", e
        print "%s was not found. Please install using the following method:" % packer_cmd
        print "  brew tap homebrew/binary\n  brew install %s" % packer_cmd
        sys.exit(1)
    else:
        print "Packer process returned", result

    cmd = "%s build " % packer_cmd
    if args.debug:
        cmd += "-debug "

    cmd += "-only=amazon-ebs "
    for k, v in var.iteritems():
        cmd += "-var {}=\"{}\" ".format(k, v)

    # Use generic packer script if project doesn't specify one
    pkg_resources.cleanup_resources()
    if args.ubuntu:
        scriptfile = pkg_resources.resource_filename(__name__,
                                                     "ubuntu-packer.json")
        cmd += scriptfile
    elif os.path.exists("config/packer.json"):
        cmd += "config/packer.json"
    else:
        scriptfile = pkg_resources.resource_filename(__name__,
                                                     "driftapp-packer.json")
        cmd += scriptfile
    print "Baking AMI with: {}".format(cmd)

    if args.preview:
        print "Not building or packaging because --preview is on. Exiting now."
        return

    start_time = time.time()
    # Dump deployment manifest into dist folder temporarily. The packer script
    # will pick it up and bake it into the AMI.
    deployment_manifest_filename = os.path.join("dist",
                                                "deployment-manifest.json")
    deployment_manifest_json = json.dumps(
        create_deployment_manifest('bakeami'), indent=4)
    print "Deployment Manifest:\n", deployment_manifest_json
    with open(deployment_manifest_filename, "w") as dif:
        dif.write(deployment_manifest_json)

    try:
        os.system(cmd)
    finally:
        os.remove(deployment_manifest_filename)
        pkg_resources.cleanup_resources()
    duration = time.time() - start_time
    print "Done after %.0f seconds" % (duration)
    slackbot.post_message(
        "Successfully baked a new AMI for '{}' on tier '{}' in %.0f seconds".
        format(service_info["name"], get_tier_name(), duration))
Esempio n. 25
0
def _bake_command(args):
    service_info = get_service_info()
    tier_config = get_tier_config()
    iam_conn = boto.iam.connect_to_region(tier_config["region"])

    if args.ubuntu:
        # Get all Ubuntu Trusty 14.04 images from the appropriate region and
        # pick the most recent one.
        # The 'Canonical' owner. This organization maintains the Ubuntu AMI's on AWS.
        print "Finding the latest AMI on AWS that matches", UBUNTU_RELEASE
        ec2 = boto3.resource('ec2', region_name=tier_config["region"])
        filters = [
            {
                'Name': 'name',
                'Values': [UBUNTU_RELEASE]
            },
        ]
        amis = list(
            ec2.images.filter(Owners=[AMI_OWNER_CANONICAL], Filters=filters))
        if not amis:
            print "No AMI found matching '{}'. Not sure what to do now.".format(
                UBUNTU_RELEASE, tier_config["tier"], sys.argv[0])
            sys.exit(1)
        ami = max(amis, key=operator.attrgetter("creation_date"))
    else:
        ec2 = boto3.resource('ec2', region_name=tier_config["region"])
        filters = [
            {
                'Name': 'tag:service-name',
                'Values': [UBUNTU_BASE_IMAGE_NAME]
            },
            {
                'Name': 'tag:tier',
                'Values': [tier_config["tier"]]
            },
        ]
        amis = list(ec2.images.filter(Owners=['self'], Filters=filters))
        if not amis:
            print "No '{}' AMI found for tier {}. Bake one using this command: {} ami bake --ubuntu".format(
                UBUNTU_BASE_IMAGE_NAME, tier_config["tier"], sys.argv[0])
            sys.exit(1)
        ami = max(amis, key=operator.attrgetter("creation_date"))

    print "Using source AMI:"
    print "\tID:\t", ami.id
    print "\tName:\t", ami.name
    print "\tDate:\t", ami.creation_date

    if args.ubuntu:
        version = None
        branch = ''
        sha_commit = ''
        deployment_manifest = create_deployment_manifest(
            'bakeami')  # Todo: Should be elsewhere or different
    else:
        cmd = "python setup.py sdist --formats=zip"
        current_branch = get_branch()

        if not args.tag:
            # See if service is tagged to a specific version for this tier
            for si in tier_config['deployables']:
                if si['name'] == service_info['name']:
                    if 'release' in si:
                        text = "Error: As deployable '{}' for tier '{}' is pegged to a particular " \
                            "release, you must specify a release tag to which to bake from.\n" \
                            "Note that this is merely a safety measure.\n" \
                            "For reference, the current deployable for this tier is pegged at " \
                            "release tag '{}'."
                        print text.format(service_info['name'],
                                          tier_config['tier'], si['release'])
                        sys.exit(1)
                    break

        if not args.tag:
            args.tag = current_branch

        print "Using branch/tag", args.tag

        checkout(args.tag)
        try:
            deployment_manifest = create_deployment_manifest(
                'bakeami')  # Todo: Should be elsewhere or different
            sha_commit = get_commit()
            branch = get_branch()
            version = get_git_version()
            service_info = get_service_info()
            if not args.preview:
                os.system(cmd)
        finally:
            print "Reverting to ", current_branch
            checkout(current_branch)

    if not version:
        version = {'tag': 'untagged-branch'}

    print "git version:", version

    user = iam_conn.get_user()  # The current IAM user running this command

    # Need to generate a pre-signed url to the tiers root config file on S3
    tiers_config = get_tiers_config()
    tiers_config_url = '{}/{}.{}/{}'.format(tiers_config['region'],
                                            tiers_config['bucket'],
                                            tiers_config['domain'],
                                            TIERS_CONFIG_FILENAME)

    var = {
        "service":
        UBUNTU_BASE_IMAGE_NAME if args.ubuntu else service_info["name"],
        "versionNumber": service_info["version"],
        "region": tier_config["region"],
        "source_ami": ami.id,
        "branch": branch,
        "commit": sha_commit,
        "release": version['tag'],
        "user_name": user.user_name,
        "tier": tier_config["tier"],
        "tier_url": tiers_config_url,
    }

    if args.ubuntu:
        var['setup_script'] = pkg_resources.resource_filename(
            __name__, "ubuntu-packer.sh")
    else:
        var['setup_script'] = pkg_resources.resource_filename(
            __name__, "driftapp-packer.sh")

    print "Using var:\n", pretty(var)

    packer_cmd = "packer"
    try:
        result = subprocess.call(packer_cmd,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE)
    except Exception as e:
        print "Error:", e
        print "%s was not found. Please install using the following method:" % packer_cmd
        print "  brew tap homebrew/binary\n  brew install %s" % packer_cmd
        sys.exit(1)
    else:
        print "Packer process returned", result

    cmd = "%s build " % packer_cmd
    if args.debug:
        cmd += "-debug "

    cmd += "-only=amazon-ebs "
    for k, v in var.iteritems():
        cmd += "-var {}=\"{}\" ".format(k, v)

    # Use generic packer script if project doesn't specify one
    pkg_resources.cleanup_resources()
    if args.ubuntu:
        scriptfile = pkg_resources.resource_filename(__name__,
                                                     "ubuntu-packer.json")
        cmd += scriptfile
    elif os.path.exists("config/packer.json"):
        cmd += "config/packer.json"
    else:
        scriptfile = pkg_resources.resource_filename(__name__,
                                                     "driftapp-packer.json")
        cmd += scriptfile
    print "Baking AMI with: {}".format(cmd)

    # Dump deployment manifest into dist folder temporarily. The packer script
    # will pick it up and bake it into the AMI.
    deployment_manifest_filename = os.path.join("dist",
                                                "deployment-manifest.json")
    deployment_manifest_json = json.dumps(deployment_manifest, indent=4)
    print "Deployment Manifest:\n", deployment_manifest_json

    if args.preview:
        print "Not building or packaging because --preview is on. Exiting now."
        return

    with open(deployment_manifest_filename, "w") as dif:
        dif.write(deployment_manifest_json)

    start_time = time.time()
    try:
        os.system(cmd)
    finally:
        os.remove(deployment_manifest_filename)
        pkg_resources.cleanup_resources()
    duration = time.time() - start_time
    print "Done after %.0f seconds" % (duration)
    slackbot.post_message(
        "Successfully baked a new AMI for '{}' on tier '{}' in %.0f seconds".
        format(service_info["name"], get_tier_name(), duration))
Esempio n. 26
0
def _run_command(args):
    # Always autoscale!
    args.autoscale = True

    if args.launch and args.autoscale:
        print "Error: Can't use --launch and --autoscale together."
        sys.exit(1)

    name = get_app_name()
    tier_name = get_tier_name()
    conf = get_drift_config(tier_name=tier_name,
                            deployable_name=name,
                            drift_app=load_flask_config())
    aws_region = conf.tier['aws']['region']

    print "AWS REGION:", aws_region
    print "DOMAIN:\n", json.dumps(conf.domain.get(), indent=4)
    print "DEPLOYABLE:\n", json.dumps(conf.deployable, indent=4)

    ec2_conn = boto.ec2.connect_to_region(aws_region)
    iam_conn = boto.iam.connect_to_region(aws_region)

    if conf.tier['is_live']:
        print "NOTE! This tier is marked as LIVE. Special restrictions may apply. Use --force to override."

    autoscaling = {
        "min": 1,
        "max": 1,
        "desired": 1,
        "instance_type": args.instance_type,
    }
    autoscaling.update(conf.deployable.get('autoscaling', {}))
    release = conf.deployable.get('release', '')

    if args.launch and autoscaling and not args.force:
        print "--launch specified, but tier config specifies 'use_autoscaling'. Use --force to override."
        sys.exit(1)
    if args.autoscale and not autoscaling and not args.force:
        print "--autoscale specified, but tier config doesn't specify 'use_autoscaling'. Use --force to override."
        sys.exit(1)

    print "Launch an instance of '{}' on tier '{}'".format(name, tier_name)
    if release:
        print "Using AMI with release tag: ", release
    else:
        print "Using the newest AMI baked (which may not be what you expect)."

    ami = _find_latest_ami(name, release)
    print "Latest AMI:", ami

    if args.ami:
        print "Using a specified AMI:", args.ami
        ec2 = boto3.resource('ec2', region_name=aws_region)
        if ami.id != args.ami:
            print "AMI found is different from AMI specified on command line."
            if conf.tier['is_live'] and not args.force:
                print "This is a live tier. Can't run mismatched AMI unless --force is specified"
                sys.exit(1)
        try:
            ami = ec2.Image(args.ami)
        except Exception as e:
            raise RuntimeError("Ami '%s' not found or broken: %s" %
                               (args.ami, e))

    if not ami:
        sys.exit(1)

    ami_info = dict(
        ami_id=ami.id,
        ami_name=ami.name,
        ami_created=ami.creation_date,
        ami_tags={d['Key']: d['Value']
                  for d in ami.tags},
    )
    print "AMI Info:\n", pretty(ami_info)

    if autoscaling:
        print "Autoscaling group:\n", pretty(autoscaling)
    else:
        print "EC2:"
        print "\tInstance Type:\t{}".format(args.instance_type)

    ec2 = boto3.resource('ec2', region_name=aws_region)

    # Get all 'private' subnets
    filters = {'tag:tier': tier_name, 'tag:realm': 'private'}
    subnets = list(ec2.subnets.filter(Filters=filterize(filters)))
    if not subnets:
        print "Error: No subnet available matching filter", filters
        sys.exit(1)

    print "Subnets:"
    for subnet in subnets:
        print "\t{} - {}".format(fold_tags(subnet.tags)['Name'], subnet.id)

    # Get the "one size fits all" security group
    filters = {
        'tag:tier': tier_name,
        'tag:Name': '{}-private-sg'.format(tier_name)
    }
    security_group = list(
        ec2.security_groups.filter(Filters=filterize(filters)))[0]
    print "Security Group:\n\t{} [{} {}]".format(
        fold_tags(security_group.tags)["Name"], security_group.id,
        security_group.vpc_id)

    # The key pair name for SSH
    key_name = conf.tier['aws']['ssh_key']
    if "." in key_name:
        key_name = key_name.split(
            ".",
            1)[0]  # TODO: Distinguish between key name and .pem key file name

    print "SSH Key:\t", key_name
    '''
    autoscaling group:
    Name            LIVENORTH-themachines-backend-auto
    api-port        10080
    api-target      themachines-backend
    service-name    themachines-backend
    service-type    rest-api
    tier            LIVENORTH

    ec2:
    Name            DEVNORTH-drift-base
    launched-by     nonnib
    api-port        10080
    api-target      drift-base
    service-name    drift-base
    service-type    rest-api
    tier            DEVNORTH
    '''

    target_name = "{}-{}".format(tier_name, name)
    if autoscaling:
        target_name += "-auto"

    # To auto-generate Redis cache url, we create the Redis backend using our config,
    # and then ask for a url representation of it:
    drift_config_url = get_redis_cache_backend(conf.table_store,
                                               tier_name).get_url()

    # Specify the app
    app_root = '/etc/opt/{service_name}'.format(service_name=name)

    tags = {
        "Name": target_name,
        "tier": tier_name,
        "service-name": name,
        "service-type": conf.drift_app.get('service_type', 'web-app'),
        "config-url": drift_config_url,
        "app-root": app_root,
        "launched-by": iam_conn.get_user().user_name,
    }

    if tags['service-type'] == 'web-app':
        # Make instance part of api-router round-robin load balancing
        tags.update({
            "api-target": name,
            "api-port": str(conf.drift_app.get('PORT', 10080)),
            "api-status": "online",
        })

    tags.update(fold_tags(ami.tags))

    print "Tags:"
    for k in sorted(tags.keys()):
        print "  %s: %s" % (k, tags[k])

    user_data = '''#!/bin/bash
# Environment variables set by drift-admin run command:
export DRIFT_CONFIG_URL={drift_config_url}
export DRIFT_TIER={tier_name}
export DRIFT_APP_ROOT={app_root}
export DRIFT_SERVICE={service_name}
export AWS_REGION={aws_region}

# Shell script from ami-run.sh:
'''.format(drift_config_url=drift_config_url,
           tier_name=tier_name,
           app_root=app_root,
           service_name=name,
           aws_region=aws_region)

    user_data += pkg_resources.resource_string(__name__, "ami-run.sh")
    custom_script_name = os.path.join(conf.drift_app['app_root'], 'scripts',
                                      'ami-run.sh')
    if os.path.exists(custom_script_name):
        print "Using custom shell script", custom_script_name
        user_data += "\n# Custom shell script from {}\n".format(
            custom_script_name)
        user_data += open(custom_script_name, 'r').read()
    else:
        print "Note: No custom ami-run.sh script found for this application."

    print "user_data:"
    from drift.utils import pretty as poo
    print poo(user_data, 'bash')

    if args.preview:
        print "--preview specified, exiting now before actually doing anything."
        sys.exit(0)

    if autoscaling:
        client = boto3.client('autoscaling', region_name=aws_region)
        launch_config_name = '{}-{}-launchconfig-{}-{}'.format(
            tier_name, name, datetime.utcnow(), release)
        launch_config_name = launch_config_name.replace(':', '.')

        kwargs = dict(
            LaunchConfigurationName=launch_config_name,
            ImageId=ami.id,
            KeyName=key_name,
            SecurityGroups=[security_group.id],
            InstanceType=autoscaling['instance_type'] or args.instance_type,
            IamInstanceProfile=IAM_ROLE,
            InstanceMonitoring={'Enabled': True},
            UserData=user_data,
        )
        print "Creating launch configuration using params:\n", pretty(kwargs)
        client.create_launch_configuration(**kwargs)

        # Update current autoscaling group or create a new one if it doesn't exist.
        groups = client.describe_auto_scaling_groups(
            AutoScalingGroupNames=[target_name])

        kwargs = dict(
            AutoScalingGroupName=target_name,
            LaunchConfigurationName=launch_config_name,
            MinSize=autoscaling['min'],
            MaxSize=autoscaling['max'],
            DesiredCapacity=autoscaling['desired'],
            VPCZoneIdentifier=','.join([subnet.id for subnet in subnets]),
        )

        if not groups['AutoScalingGroups']:
            print "Creating a new autoscaling group using params:\n", pretty(
                kwargs)
            client.create_auto_scaling_group(**kwargs)
        else:
            print "Updating current autoscaling group", target_name
            client.update_auto_scaling_group(**kwargs)

        # Prepare tags which get propagated to all new instances
        tagsarg = [{
            'ResourceId': tags['Name'],
            'ResourceType': 'auto-scaling-group',
            'Key': k,
            'Value': v,
            'PropagateAtLaunch': True,
        } for k, v in tags.items()]
        print "Updating tags on autoscaling group that get propagated to all new instances."
        client.create_or_update_tags(Tags=tagsarg)

        # Define a 2 min termination cooldown so api-router can drain the connections.
        response = client.put_lifecycle_hook(
            LifecycleHookName='Wait-2-minutes-on-termination',
            AutoScalingGroupName=target_name,
            LifecycleTransition='autoscaling:EC2_INSTANCE_TERMINATING',
            HeartbeatTimeout=120,
            DefaultResult='CONTINUE')
        print "Configuring lifecycle hook, response:", response.get(
            'ResponseMetadata')

        print "Done!"
        print "YOU MUST TERMINATE THE OLD EC2 INSTANCES YOURSELF!"
    else:
        # Pick a random subnet from list of available subnets
        subnet = random.choice(subnets)
        print "Randomly picked this subnet to use: ", subnet

        print "Launching EC2 instance..."
        reservation = ec2_conn.run_instances(
            ami.id,
            instance_type=args.instance_type,
            subnet_id=subnet.id,
            security_group_ids=[security_group.id],
            key_name=key_name,
            instance_profile_name=IAM_ROLE,
            user_data=user_data,
        )

        if len(reservation.instances) == 0:
            print "No instances in reservation!"
            sys.exit(1)

        instance = reservation.instances[0]

        print "{} starting up...".format(instance)

        # Check up on its status every so often
        status = instance.update()
        while status == 'pending':
            time.sleep(10)
            status = instance.update()

        if status == 'running':
            for k, v in tags.items():
                instance.add_tag(k, v)
            print "{} running at {}".format(instance,
                                            instance.private_ip_address)
            slackbot.post_message(
                "Started up AMI '{}' for '{}' on tier '{}' with ip '{}'".
                format(ami.id, name, tier_name, instance.private_ip_address))

        else:
            print "Instance was not created correctly"
            sys.exit(1)
Esempio n. 27
0
def run_command(args):
    from drift.utils import uuid_string
    from drift.appmodule import app as _app
    from drift.tenant import create_db, drop_db
    from drift.utils import get_tier_name

    tier_name = get_tier_name()
    tenant = None
    if args.target:
        print "Using test target: {}".format(args.target)
        os.environ["drift_test_target"] = args.target
    else:
        # only provision the DB is the test target is not specified
        db_host = _app.config["systest_db"]["server"]
        if args.db:
            tenant = args.db
            print "Using database {} from commandline on host {}".format(
                tenant, db_host)
            create_db(tenant, db_host, tier_name)
        else:
            tenant = "test{}".format(uuid_string())
            print "Creating database {} on host {}".format(tenant, db_host)
            create_db(tenant, db_host, tier_name)
        os.environ["drift_test_database"] = tenant

    pick_tests = []
    if args.tests:
        pick_tests = [t.lower() for t in args.tests.split(",")]
        print "Picking tests {}".format(pick_tests)

    test_modules = []
    for app in _app.config["apps"]:
        m = importlib.import_module(app)
        path = dirname(m.__file__)
        tests_path = os.path.join(path, "tests")
        if not os.path.exists(tests_path):
            print "No tests found for app '{}'".format(app)
            continue
        if not os.path.exists(os.path.join(tests_path, "__init__.py")):
            print "No tests found for app '{}' (missing __init__.py)".format(
                app)
            continue
        n = 0
        for filename in os.listdir(tests_path):
            if filename.endswith(".py") and not filename.startswith("__"):
                test_module_name = app + ".tests." + filename[:-3]
                test_modules.append(test_module_name)
                n += 1
        print "app '{}' has {} test modules".format(app, n)

    suites = {}
    for module_name in test_modules:
        # first import it to see if we get any errors
        m = importlib.import_module(module_name)
        suites[module_name] = unittest.defaultTestLoader.loadTestsFromName(
            module_name)

    tests_to_run = []
    tests_to_skip = []
    for module_name, suite in suites.iteritems():
        for test_cases in suite:
            for t in test_cases:
                if pick_tests:
                    for p in pick_tests:
                        if p in str(t).lower():
                            tests_to_run.append(t)
                    else:
                        tests_to_skip.append(t)
                else:
                    tests_to_run.append(t)

    print "Running {} test(s) from {} module(s)".format(
        len(tests_to_run), len(suites))
    if tests_to_skip:
        print "Skipping {} test(s)".format(len(tests_to_skip))
    if pick_tests:
        print "Just running the following tests:"
        if not tests_to_run:
            print "   No tests found!"
        for t in tests_to_run:
            print "   {}".format(t)

    test_suite = unittest.TestSuite(tests_to_run)
    verbosity = 1
    if args.verbose:
        verbosity = 2

    if not args.logging:
        logging.disable(logging.WARNING)

    cls = unittest.TextTestRunner
    if is_running_under_teamcity and TeamcityTestRunner:
        if is_running_under_teamcity():
            cls = TeamcityTestRunner
    results = cls(verbosity=verbosity, failfast=args.failfast).run(test_suite)

    # if a tenant was not specified on the commandline we destroy it
    if not args.db and tenant:
        drop_db(tenant, db_host, tier_name)
        pass

    if not results.wasSuccessful():
        sys.exit(1)
Esempio n. 28
0
def _bake_command(args):
    if args.ubuntu:
        name = UBUNTU_BASE_IMAGE_NAME
    else:
        name = get_app_name()

    name = get_app_name()
    tier_name = get_tier_name()
    conf = get_drift_config(tier_name=tier_name,
                            deployable_name=name,
                            drift_app=load_flask_config())

    domain = conf.domain.get()
    aws_region = domain['aws']['ami_baking_region']
    ec2 = boto3.resource('ec2', region_name=aws_region)

    print "DOMAIN:\n", json.dumps(domain, indent=4)
    if not args.ubuntu:
        print "DEPLOYABLE:", name
    print "AWS REGION:", aws_region

    # Create a list of all regions that are active
    if args.ubuntu:
        # Get all Ubuntu images from the appropriate region and pick the most recent one.
        # The 'Canonical' owner. This organization maintains the Ubuntu AMI's on AWS.
        print "Finding the latest AMI on AWS that matches", UBUNTU_RELEASE
        filters = [
            {
                'Name': 'name',
                'Values': [UBUNTU_RELEASE]
            },
        ]
        amis = list(
            ec2.images.filter(Owners=[AMI_OWNER_CANONICAL], Filters=filters))
        if not amis:
            print "No AMI found matching '{}'. Not sure what to do now.".format(
                UBUNTU_RELEASE)
            sys.exit(1)
        ami = max(amis, key=operator.attrgetter("creation_date"))
    else:
        filters = [
            {
                'Name': 'tag:service-name',
                'Values': [UBUNTU_BASE_IMAGE_NAME]
            },
            {
                'Name': 'tag:domain-name',
                'Values': [domain['domain_name']]
            },
        ]
        amis = list(ec2.images.filter(Owners=['self'], Filters=filters))
        if not amis:
            criteria = {d['Name']: d['Values'][0] for d in filters}
            print "No '{}' AMI found using the search criteria {}.".format(
                UBUNTU_BASE_IMAGE_NAME, criteria)
            print "Bake one using this command: {} ami bake --ubuntu".format(
                sys.argv[0])

            sys.exit(1)
        ami = max(amis, key=operator.attrgetter("creation_date"))

    print "Using source AMI:"
    print "\tID:\t", ami.id
    print "\tName:\t", ami.name
    print "\tDate:\t", ami.creation_date

    if args.ubuntu:
        manifest = None
        packer_vars = {
            'setup_script':
            pkg_resources.resource_filename(__name__, "ubuntu-packer.sh"),
            'ubuntu_release':
            UBUNTU_RELEASE,
        }
    else:
        current_branch = get_branch()
        if not args.tag:
            args.tag = current_branch

        print "Using branch/tag", args.tag

        # Wrap git branch modification in RAII.
        checkout(args.tag)
        try:
            setup_script = ""
            setup_script_custom = ""
            with open(
                    pkg_resources.resource_filename(__name__,
                                                    "driftapp-packer.sh"),
                    'r') as f:
                setup_script = f.read()
            custom_script_name = os.path.join(conf.drift_app['app_root'],
                                              'scripts', 'ami-bake.sh')
            if os.path.exists(custom_script_name):
                print "Using custom bake shell script", custom_script_name
                setup_script_custom = "echo Executing custom bake shell script from {}\n".format(
                    custom_script_name)
                setup_script_custom += open(custom_script_name, 'r').read()
                setup_script_custom += "\necho Custom bake shell script completed\n"
            else:
                print "Note: No custom ami-bake.sh script found for this application."
            # custom setup needs to happen first because we might be installing some requirements for the regular setup
            setup_script = setup_script_custom + setup_script
            tf = tempfile.NamedTemporaryFile(delete=False)
            tf.write(setup_script)
            tf.close()
            setup_script_filename = tf.name
            manifest = create_deployment_manifest('ami', comment=None)
            packer_vars = {
                'version': get_app_version(),
                'setup_script': setup_script_filename,
            }

            if not args.preview:
                cmd = ['python', 'setup.py', 'sdist', '--formats=zip']
                ret = subprocess.call(cmd)
                if ret != 0:
                    print "Failed to execute build command:", cmd
                    sys.exit(ret)

                cmd = ["zip", "-r", "dist/aws.zip", "aws"]
                ret = subprocess.call(cmd)
                if ret != 0:
                    print "Failed to execute build command:", cmd
                    sys.exit(ret)
        finally:
            print "Reverting to ", current_branch
            checkout(current_branch)

    user = boto.iam.connect_to_region(
        aws_region).get_user()  # The current IAM user running this command

    packer_vars.update({
        "service": name,
        "region": aws_region,
        "source_ami": ami.id,
        "user_name": user.user_name,
        "domain_name": domain['domain_name'],
    })

    print "Packer variables:\n", pretty(packer_vars)

    # See if Packer is installed and generate sensible error code if something is off.
    # This will also write the Packer version to the terminal which is useful info.
    try:
        subprocess.call(['packer', 'version'],
                        stdout=subprocess.PIPE,
                        stderr=subprocess.PIPE)
    except Exception as e:
        print "Error:", e
        print "'packer version' command failed. Please install it if it's missing."
        sys.exit(127)

    cmd = "packer build "
    if args.debug:
        cmd += "-debug "

    cmd += "-only=amazon-ebs "
    for k, v in packer_vars.iteritems():
        cmd += "-var {}=\"{}\" ".format(k, v)

    # Use generic packer script if project doesn't specify one
    pkg_resources.cleanup_resources()
    if args.ubuntu:
        scriptfile = pkg_resources.resource_filename(__name__,
                                                     "ubuntu-packer.json")
        cmd += scriptfile
    elif os.path.exists("config/packer.json"):
        cmd += "config/packer.json"
    else:
        scriptfile = pkg_resources.resource_filename(__name__,
                                                     "driftapp-packer.json")
        cmd += scriptfile

    print "Baking AMI with: {}".format(cmd)
    if args.preview:
        print "Not building or packaging because --preview is on. Exiting now."
        return

    start_time = time.time()
    try:
        # Execute Packer command and parse the output to find the ami id.
        p = subprocess.Popen(shlex.split(cmd),
                             stdout=subprocess.PIPE,
                             stderr=subprocess.STDOUT)
        while True:
            line = p.stdout.readline()
            print line,
            if line == '' and p.poll() is not None:
                break

            # The last lines from the packer execution look like this:
            # ==> Builds finished. The artifacts of successful builds are:
            # --> amazon-ebs: AMIs were created:
            #
            # eu-west-1: ami-0ee5eb68
            if 'ami-' in line:
                ami_id = line[line.rfind('ami-'):].strip()
                ami = ec2.Image(ami_id)
                print ""
                print "AMI ID: %s" % ami.id
                print ""
    finally:
        pkg_resources.cleanup_resources()

    if p.returncode != 0:
        print "Failed to execute packer command:", cmd
        sys.exit(p.returncode)

    duration = time.time() - start_time

    if manifest:
        print "Adding manifest tags to AMI:"
        pretty(manifest)
        prefix = "drift:manifest:"
        tags = []
        for k, v in manifest.iteritems():
            tag_name = "{}{}".format(prefix, k)
            tags.append({'Key': tag_name, 'Value': v or ''})
        ami.create_tags(DryRun=False, Tags=tags)

    if not args.skipcopy:
        _copy_image(ami.id)

    print "Done after %.0f seconds" % (duration)
    slackbot.post_message(
        "Successfully baked a new AMI for '{}' in %.0f seconds".format(
            name, duration))
Esempio n. 29
0
    def get(self):
        tier_name = get_tier_name()
        deployable_name = current_app.config['name']

        host_info = collections.OrderedDict()
        host_info["host-name"] = socket.gethostname()
        try:
            host_info["ip-address"] = socket.gethostbyname(
                socket.gethostname())
        except Exception:
            """
            TODO: this is just a work around
            there might be a better way to get the address
            """
            host_info["ip-address"] = "Unknown"
        endpoints = collections.OrderedDict()
        endpoints["root"] = url_for("servicestatus.root", _external=True)
        if endpoints["root"].endswith("/"):
            endpoints["root"] = endpoints["root"][:-1]
        endpoints["auth"] = request.url_root + "auth"  # !evil concatination
        for func in current_app.endpoint_registry_funcs:
            try:
                endpoints.update(func(current_user))
            except:
                log.exception("Failed to get endpoint registry from %s", func)

        # Only list out tenants which have a db, and only if caller has service role.
        if (current_user and
            ('service' in current_user['roles'])) or current_app.debug:
            ts = g.conf.table_store
            tenants_table = ts.get_table('tenants')
            tenants = []
            for tenant in tenants_table.find({
                    'tier_name': tier_name,
                    'deployable_name': deployable_name
            }):
                tenants.append(tenant['tenant_name'])

        else:
            tenants = None

        ret = {
            'service_name':
            current_app.config['name'],
            "host_info":
            host_info,
            "endpoints":
            endpoints,
            "current_user":
            dict(current_user) if current_user else None,
            "tier_name":
            tier_name,
            "tenant_name":
            g.conf.tenant_name['tenant_name']
            if g.conf.tenant_name else '(none)',
            "server_time":
            datetime.datetime.utcnow().isoformat("T") + "Z",
            "tenants":
            tenants,
        }

        path = os.path.join(current_app.instance_path, "..",
                            "deployment-manifest.json")
        if not os.path.exists(path):
            if current_app.debug or current_app.testing:
                # Running in debug or testing mode usually means running on local dev machine, which
                # usually means there is no deployment manifest, and no-one should care.
                pass
            else:
                log.info("No deployment manifest found at %s", path)
        else:
            try:
                ret["deployment"] = json.load(open(path))
            except Exception:
                log.exception("Failed to read deployment manifest from %s",
                              path)

        if current_app.debug:
            # TODO: Only do for authenticated sessions.. preferably..
            ret["headers"] = dict(request.headers)

            # Pretty print the config
            d = {k: str(v) for k, v in current_app.config.items()}
            d = collections.OrderedDict(sorted(d.items()))
            d['private_key'] = '...'  # Just to be safe(r)
            ret['config_dump'] = json.dumps(d, indent=4)

        return ret
Esempio n. 30
0
def get_engines():
    if conn_string:
        engines = {
            "dude": {
                "engine":
                create_engine(conn_string, echo=False,
                              poolclass=pool.NullPool),
                "url":
                conn_string
            }
        }
        return engines
    engines = {}
    tenants = []
    ts = get_ts()
    tier = get_tier_name()
    tenants_table = ts.get_table('tenants').find(
        {'deployable_name': 'drift-base'})  #!
    pick_tenant = context.get_x_argument(as_dictionary=True).get('tenant')
    if pick_tenant:
        print 'picking tenant %s' % pick_tenant
    dry_run = context.get_x_argument(as_dictionary=True).get('dry-run')

    for t in tenants_table:
        if not t.get("postgres"):
            continue
        name = t["tenant_name"]
        if not (pick_tenant and name != pick_tenant
                ) and name != "*" and t["tier_name"] == tier:
            tenants.append(t)

    for tenant_config in tenants:
        conn_info = tenant_config["postgres"]
        conn_info["username"] = MASTER_USERNAME
        conn_info["password"] = MASTER_PASSWORD
        this_conn_string = format_connection_string(conn_info)
        print this_conn_string

        if this_conn_string not in [e["url"] for e in engines.itervalues()]:
            engines["{}.{}".format(tenant_config["tier_name"],
                                   tenant_config["tenant_name"])] = rec = {
                                       "url": this_conn_string
                                   }

    # quick and dirty connectivity test before trying to upgrade all db's
    print "Checking connectivity..."
    db_servers = set()
    for key, engine in engines.iteritems():
        server = engine["url"].split("/")
        db_servers.add(server[2].split("@")[1].lower())
    err = False
    for db_server in db_servers:
        parts = db_server.split(":")
        db_server, port = parts[0], int(parts[1])
        sys.stdout.write(db_server + "... ")
        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        sock.settimeout(2)
        result = sock.connect_ex((db_server, port))
        if result != 0:
            print "Unable to connect to server '%s' on port %s" % (db_server,
                                                                   port)
            err = True
        else:
            print "OK"
    if err:
        raise Exception(
            "Unable to connect to one or more db servers. Bailing out!")

    if dry_run:
        print "Dry run, exiting without taking further action"
        return {}

    for key in engines.keys():
        rec = engines[key]
        connection_string = rec["url"]
        logger.info("Connecting '{}'...".format(connection_string))
        rec['engine'] = create_engine(connection_string,
                                      echo=False,
                                      poolclass=pool.NullPool)
        rec['url'] = connection_string
    return engines