Beispiel #1
0
def init_command(args):
    # Fetch master config file from disk, url or s3 bucket.
    master_config = vars(args)["master-config"]
    json_text = fetch(master_config)
    if not json_text:
        return
    verify_master_config(json.loads(json_text))

    print "Initializing master config file {}".format(master_config)
    with open(get_config_path(TIERS_CONFIG_FILENAME), "w") as f:
        f.write(json_text)

    # Remove current tier selection
    tier_selection_file = get_config_path("TIER")
    if os.path.exists(tier_selection_file):
        os.remove(tier_selection_file)

    # Report currently selected config
    get_tiers_config()

    if args.activate:
        print "Activating tier '{}'...".format(args.activate)
        args.tier = args.activate
        args.vpn = False
        use_command(args)
Beispiel #2
0
def unpublish_config_command(args):
    tiers_config = get_tiers_config(display_title=False)
    bucket = get_s3_bucket(tiers_config)
    key_name = "tiers/{}.json".format(args.tier.upper())
    key = bucket.get_key(key_name)
    if key:
        key.delete()
        print "Tier configuration {} removed from S3 bucket.".format(key.name)
    else:
        print "Tier config {} not found.".format(key_name)
Beispiel #3
0
def mirror_staticdata_command(args):
    tiers_config = get_tiers_config(display_title=False)
    bucket = get_s3_bucket(tiers_config)
    keys = set()
    for key in bucket.list(prefix="static-data/", delimiter="/"):
        if key.name == "static-data/":
            continue
        if key.name == "static-data/logs/":
            continue
        for key2 in bucket.list(prefix=key.name, delimiter=""):
            keys.add(key2.name)

    print "{} s3 objects loaded".format(len(keys))

    mirror_alicloud(copy.copy(keys), bucket)

    print "ALL DONE!"
Beispiel #4
0
def list_command(args):
    tiers_config = get_tiers_config()
    conn = connect_to_region(tiers_config["region"],
                             calling_format=OrdinaryCallingFormat())
    bucket_name = "{}.{}".format(tiers_config["bucket"],
                                 tiers_config["domain"])
    print "List of all tiers registered at http://{}/{}".format(
        bucket_name, "tiers")
    bucket = conn.get_bucket(bucket_name)
    for file_key in bucket.list("tiers/", "/"):
        head, tail = os.path.split(file_key.name)
        root, ext = os.path.splitext(tail)
        if ext == ".json":
            if args.verbose:
                print bcolors.BOLD + "Tier: " + root + bcolors.ENDC
                json_text = file_key.get_contents_as_string()
                print json_text
            else:
                print "   ", root
Beispiel #5
0
def publish_command(args):
    print "=========== STATIC DATA COMPRESSION ENABLED ==========="
    user = args.user
    repository = args.repository

    tiers_config = get_tiers_config(display_title=False)
    bucket = get_s3_bucket(tiers_config)
    origin_url = "[email protected]:directivegames/the-machines-static-data.git"
    if not repository:
        try:
            cmd = 'git config --get remote.origin.url'
            print "No repository specified. Using git to figure it out:", cmd
            origin_url = subprocess.check_output(cmd.split(' '))
            if origin_url.startswith("http"):
                repository, _ = os.path.splitext(urlparse(origin_url).path)
            elif origin_url.startswith("git@"):
                repository = "/" + origin_url.split(":")[1].split(".")[0]
            else:
                raise Exception("Unknown origin url format")
        except Exception as e:
            logging.exception(e)
            print "Unable to find repository from origin url '{}'".format(origin_url)
            raise e
        print "Found repository '{}' from '{}'".format(repository, origin_url)
    else:
        print u"Using repository: {}".format(repository)

    s3_upload_batch = []  # List of [filename, data] pairs to upload to bucket.
    repo_folder = "{}{}/data/".format(STATIC_DATA_ROOT_FOLDER, repository)

    if user:
        print "User defined reference ..."
        to_upload = set()
        # TODO: This will crash. No serialno??
        s3_upload_batch.append(["user-{}/{}".format(user, serialno)])
    else:
        # We need to checkout a few branches. Let's remember which branch is currently active
        cmd = 'git rev-parse --abbrev-ref HEAD'
        print "Get all tags and branch head revisions for this repo using:", cmd
        current_branch = subprocess.check_output(cmd.split(' ')).strip()

        # Get all references
        to_upload = set()  # Commit ID's to upload to S3
        indexes = []  # Full list of git references to write to index.json

        print "Index file:"
        ls_remote = subprocess.check_output('git ls-remote --quiet'.split(' ')).strip()
        now = datetime.utcnow()
        for refline in ls_remote.split('\n'):
            commit_id, ref = refline.split("\t")
            # We are only interested in head revision of branches, and tags
            if not ref.startswith("refs/heads/") and not ref.startswith("refs/tags/"):
                continue

            # We want a dereferenced tag
            if ref.startswith("refs/tags/") and not ref.endswith("^{}"):
                continue

            # Prune any "dereference" markers from the ref string.
            ref = ref.replace("^{}", "")

            print "    {:<50}{}".format(ref, commit_id)
            to_upload.add(commit_id)
            indexes.append({"commit_id": commit_id, "ref": ref})
        # List out all subfolders under the repo name to see which commits are already there.
        # Prune the 'to_upload' list accordingly.
        for key in bucket.list(prefix=repo_folder, delimiter="/"):
            # See if this is a commit_id formatted subfolder
            m = re.search("^.*/([a-f0-9]{40})/$", key.name)
            if m:
                commit_id = m.groups()[0]
                to_upload.discard(commit_id)

        # For any referenced commit on git, upload it to S3 if it is not already there.
        print "\nNumber of commits to upload: {}".format(len(to_upload))
        for commit_id in to_upload:
            cmd = "git checkout {}".format(commit_id)
            print "Running git command:", cmd
            print subprocess.check_output(cmd.split(' ')).strip()
            try:
                types_str = json.dumps(load_types())
                schemas_str = json.dumps(load_schemas())
                s3_upload_batch.append(["{}/types.json".format(commit_id), types_str])
                s3_upload_batch.append(["{}/schemas.json".format(commit_id), schemas_str])
            except Exception as e:
                logging.exception(e)
                print "Not uploading {}: {}".format(commit_id, e)
                raise e

        cmd = "git checkout {}".format(current_branch)
        print "Reverting HEAD to original state: "
        print subprocess.check_output(cmd.split(' ')).strip()

    # Upload to S3
    for key_name, data in s3_upload_batch:
        key = Key(bucket)
        mimetype, encoding = mimetypes.guess_type(key_name)
        if not mimetype and key_name.endswith(".json"):
            mimetype = "application/json"
        if mimetype:
            key.set_metadata('Content-Type', mimetype)
        key.set_metadata('Cache-Control', "max-age=1000000")
        key.key = "{}{}".format(repo_folder, key_name)
        print "Uploading: {}".format(key.key)
        key.set_contents_from_string(data)
        key.set_acl('public-read')

    # Upload index
    refs_index = {"created": now.isoformat() + "Z",
                  "repository": repository,
                  "index": indexes,
                  }
    key = Key(bucket)
    key.set_metadata('Content-Type', "application/json")
    key.set_metadata('Cache-Control', "max-age=0, no-cache, no-store")
    key.key = "{}{}/index.json".format(STATIC_DATA_ROOT_FOLDER, repository)
    print "Uploading: {}".format(key.key)
    key.set_contents_from_string(json.dumps(refs_index))
    key.set_acl('public-read')

    print "All done!"
Beispiel #6
0
def _bake_command(args):
    service_info = get_service_info()
    tier_config = get_tier_config()
    iam_conn = boto.iam.connect_to_region(tier_config["region"])

    if args.ubuntu:
        # Get all Ubuntu Trusty 14.04 images from the appropriate region and
        # pick the most recent one.
        # The 'Canonical' owner. This organization maintains the Ubuntu AMI's on AWS.
        print "Finding the latest AMI on AWS that matches", UBUNTU_RELEASE
        ec2 = boto3.resource('ec2', region_name=tier_config["region"])
        filters = [
            {
                'Name': 'name',
                'Values': [UBUNTU_RELEASE]
            },
        ]
        amis = list(
            ec2.images.filter(Owners=[AMI_OWNER_CANONICAL], Filters=filters))
        if not amis:
            print "No AMI found matching '{}'. Not sure what to do now.".format(
                UBUNTU_RELEASE, tier_config["tier"], sys.argv[0])
            sys.exit(1)
        ami = max(amis, key=operator.attrgetter("creation_date"))
    else:
        ec2 = boto3.resource('ec2', region_name=tier_config["region"])
        filters = [
            {
                'Name': 'tag:service-name',
                'Values': [UBUNTU_BASE_IMAGE_NAME]
            },
            {
                'Name': 'tag:tier',
                'Values': [tier_config["tier"]]
            },
        ]
        amis = list(ec2.images.filter(Owners=['self'], Filters=filters))
        if not amis:
            print "No '{}' AMI found for tier {}. Bake one using this command: {} ami bake --ubuntu".format(
                UBUNTU_BASE_IMAGE_NAME, tier_config["tier"], sys.argv[0])
            sys.exit(1)
        ami = max(amis, key=operator.attrgetter("creation_date"))

    print "Using source AMI:"
    print "\tID:\t", ami.id
    print "\tName:\t", ami.name
    print "\tDate:\t", ami.creation_date

    if args.ubuntu:
        version = None
        branch = ''
        sha_commit = ''
        deployment_manifest = create_deployment_manifest(
            'bakeami')  # Todo: Should be elsewhere or different
    else:
        cmd = "python setup.py sdist --formats=zip"
        current_branch = get_branch()

        if not args.tag:
            # See if service is tagged to a specific version for this tier
            for si in tier_config['deployables']:
                if si['name'] == service_info['name']:
                    if 'release' in si:
                        text = "Error: As deployable '{}' for tier '{}' is pegged to a particular " \
                            "release, you must specify a release tag to which to bake from.\n" \
                            "Note that this is merely a safety measure.\n" \
                            "For reference, the current deployable for this tier is pegged at " \
                            "release tag '{}'."
                        print text.format(service_info['name'],
                                          tier_config['tier'], si['release'])
                        sys.exit(1)
                    break

        if not args.tag:
            args.tag = current_branch

        print "Using branch/tag", args.tag

        checkout(args.tag)
        try:
            deployment_manifest = create_deployment_manifest(
                'bakeami')  # Todo: Should be elsewhere or different
            sha_commit = get_commit()
            branch = get_branch()
            version = get_git_version()
            service_info = get_service_info()
            if not args.preview:
                os.system(cmd)
        finally:
            print "Reverting to ", current_branch
            checkout(current_branch)

    if not version:
        version = {'tag': 'untagged-branch'}

    print "git version:", version

    user = iam_conn.get_user()  # The current IAM user running this command

    # Need to generate a pre-signed url to the tiers root config file on S3
    tiers_config = get_tiers_config()
    tiers_config_url = '{}/{}.{}/{}'.format(tiers_config['region'],
                                            tiers_config['bucket'],
                                            tiers_config['domain'],
                                            TIERS_CONFIG_FILENAME)

    var = {
        "service":
        UBUNTU_BASE_IMAGE_NAME if args.ubuntu else service_info["name"],
        "versionNumber": service_info["version"],
        "region": tier_config["region"],
        "source_ami": ami.id,
        "branch": branch,
        "commit": sha_commit,
        "release": version['tag'],
        "user_name": user.user_name,
        "tier": tier_config["tier"],
        "tier_url": tiers_config_url,
    }

    if args.ubuntu:
        var['setup_script'] = pkg_resources.resource_filename(
            __name__, "ubuntu-packer.sh")
    else:
        var['setup_script'] = pkg_resources.resource_filename(
            __name__, "driftapp-packer.sh")

    print "Using var:\n", pretty(var)

    packer_cmd = "packer"
    try:
        result = subprocess.call(packer_cmd,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE)
    except Exception as e:
        print "Error:", e
        print "%s was not found. Please install using the following method:" % packer_cmd
        print "  brew tap homebrew/binary\n  brew install %s" % packer_cmd
        sys.exit(1)
    else:
        print "Packer process returned", result

    cmd = "%s build " % packer_cmd
    if args.debug:
        cmd += "-debug "

    cmd += "-only=amazon-ebs "
    for k, v in var.iteritems():
        cmd += "-var {}=\"{}\" ".format(k, v)

    # Use generic packer script if project doesn't specify one
    pkg_resources.cleanup_resources()
    if args.ubuntu:
        scriptfile = pkg_resources.resource_filename(__name__,
                                                     "ubuntu-packer.json")
        cmd += scriptfile
    elif os.path.exists("config/packer.json"):
        cmd += "config/packer.json"
    else:
        scriptfile = pkg_resources.resource_filename(__name__,
                                                     "driftapp-packer.json")
        cmd += scriptfile
    print "Baking AMI with: {}".format(cmd)

    # Dump deployment manifest into dist folder temporarily. The packer script
    # will pick it up and bake it into the AMI.
    deployment_manifest_filename = os.path.join("dist",
                                                "deployment-manifest.json")
    deployment_manifest_json = json.dumps(deployment_manifest, indent=4)
    print "Deployment Manifest:\n", deployment_manifest_json

    if args.preview:
        print "Not building or packaging because --preview is on. Exiting now."
        return

    with open(deployment_manifest_filename, "w") as dif:
        dif.write(deployment_manifest_json)

    start_time = time.time()
    try:
        os.system(cmd)
    finally:
        os.remove(deployment_manifest_filename)
        pkg_resources.cleanup_resources()
    duration = time.time() - start_time
    print "Done after %.0f seconds" % (duration)
    slackbot.post_message(
        "Successfully baked a new AMI for '{}' on tier '{}' in %.0f seconds".
        format(service_info["name"], get_tier_name(), duration))
Beispiel #7
0
def run_command(args):
    service_info = get_service_info()
    tier_config = get_tier_config()
    ec2_conn = boto.ec2.connect_to_region(tier_config["region"])
    iam_conn = boto.iam.connect_to_region(tier_config["region"])

    if args.ubuntu:
        # Get all Ubuntu Trusty 14.04 images from the appropriate region and
        # pick the most recent one.
        print "Finding the latest AMI on AWS that matches 'ubuntu-trusty-14.04*'"
        # The 'Canonical' owner. This organization maintains the Ubuntu AMI's on AWS.
        amis = ec2_conn.get_all_images(
            owners=['099720109477'],
            filters={'name': 'ubuntu/images/hvm/ubuntu-trusty-14.04*'},
        )
        ami = max(amis, key=operator.attrgetter("creationDate"))
    else:

        amis = ec2_conn.get_all_images(
            owners=['self'],  # The current organization
            filters={
                'tag:service-name': UBUNTU_BASE_IMAGE_NAME,
                'tag:tier': tier_config["tier"],
            },
        )
        if not amis:
            print "No '{}' AMI found for tier {}. Bake one using this command: {} bakeami --ubuntu".format(
                UBUNTU_BASE_IMAGE_NAME, tier_config["tier"], sys.argv[0])
            sys.exit(1)

        ami = max(amis, key=operator.attrgetter("creationDate"))
        print "{} AMI(s) found.".format(len(amis))

    print "Using source AMI:"
    print "\tID:\t", ami.id
    print "\tName:\t", ami.name
    print "\tDate:\t", ami.creationDate

    if args.ubuntu:
        version = None
        branch = ''
        sha_commit = ''
    else:
        cmd = "python setup.py sdist --formats=zip"
        current_branch = get_branch()
        if not args.tag:
            args.tag = current_branch

        print "Using branch/tag", args.tag
        checkout(args.tag)
        try:
            sha_commit = get_commit()
            branch = get_branch()
            version = get_git_version()
            if not args.preview:
                os.system(cmd)
        finally:
            print "Reverting to ", current_branch
            checkout(current_branch)

    if not version:
        version = {'tag': 'untagged-branch'}

    print "git version:", version

    service_info = get_service_info()
    user = iam_conn.get_user()  # The current IAM user running this command

    # Need to generate a pre-signed url to the tiers root config file on S3
    tiers_config = get_tiers_config()
    tiers_config_url = '{}/{}.{}/{}'.format(tiers_config['region'],
                                            tiers_config['bucket'],
                                            tiers_config['domain'],
                                            TIERS_CONFIG_FILENAME)

    var = {
        "service":
        UBUNTU_BASE_IMAGE_NAME if args.ubuntu else service_info["name"],
        "versionNumber": service_info["version"],
        "region": tier_config["region"],
        "source_ami": str(ami.id),
        "branch": branch,
        "commit": sha_commit,
        "release": version['tag'],
        "user_name": str(user.user_name),
        "tier": tier_config["tier"],
        "tier_url": str(tiers_config_url),
    }

    if args.ubuntu:
        var['setup_script'] = pkg_resources.resource_filename(
            __name__, "ubuntu-packer.sh")
    else:
        var['setup_script'] = pkg_resources.resource_filename(
            __name__, "driftapp-packer.sh")

    print "Using var:\n", json.dumps({k: str(v)
                                      for k, v in var.iteritems()},
                                     indent=4)

    packer_cmd = "packer"
    try:
        result = subprocess.call(packer_cmd,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE)
    except Exception as e:
        print "Error:", e
        print "%s was not found. Please install using the following method:" % packer_cmd
        print "  brew tap homebrew/binary\n  brew install %s" % packer_cmd
        sys.exit(1)
    else:
        print "Packer process returned", result

    cmd = "%s build " % packer_cmd
    if args.debug:
        cmd += "-debug "

    cmd += "-only=amazon-ebs "
    for k, v in var.iteritems():
        cmd += "-var {}=\"{}\" ".format(k, v)

    # Use generic packer script if project doesn't specify one
    pkg_resources.cleanup_resources()
    if args.ubuntu:
        scriptfile = pkg_resources.resource_filename(__name__,
                                                     "ubuntu-packer.json")
        cmd += scriptfile
    elif os.path.exists("config/packer.json"):
        cmd += "config/packer.json"
    else:
        scriptfile = pkg_resources.resource_filename(__name__,
                                                     "driftapp-packer.json")
        cmd += scriptfile
    print "Baking AMI with: {}".format(cmd)

    if args.preview:
        print "Not building or packaging because --preview is on. Exiting now."
        return

    start_time = time.time()
    # Dump deployment manifest into dist folder temporarily. The packer script
    # will pick it up and bake it into the AMI.
    deployment_manifest_filename = os.path.join("dist",
                                                "deployment-manifest.json")
    deployment_manifest_json = json.dumps(
        create_deployment_manifest('bakeami'), indent=4)
    print "Deployment Manifest:\n", deployment_manifest_json
    with open(deployment_manifest_filename, "w") as dif:
        dif.write(deployment_manifest_json)

    try:
        os.system(cmd)
    finally:
        os.remove(deployment_manifest_filename)
        pkg_resources.cleanup_resources()
    duration = time.time() - start_time
    print "Done after %.0f seconds" % (duration)
    slackbot.post_message(
        "Successfully baked a new AMI for '{}' on tier '{}' in %.0f seconds".
        format(service_info["name"], get_tier_name(), duration))
Beispiel #8
0
def use_command(args):
    tier_name_upper = args.tier.upper()
    tiers_config = get_tiers_config(display_title=False)
    bucket = get_s3_bucket(tiers_config)
    key_name = "tiers/{}.json".format(tier_name_upper)
    key = bucket.get_key(key_name)
    if not key:
        print "Tier configuration '{}' not found at '{}'".format(
            tier_name_upper, key_name)
        return

    json_text = key.get_contents_as_string()
    tier_config = json.loads(json_text)
    with open(get_config_path("{}.json".format(tier_name_upper)), "w") as f:
        f.write(json_text)

    # Install config files for tier
    for file_key in bucket.list("tiers/{}/".format(tier_name_upper), "/"):
        head, tail = os.path.split(file_key.name)
        if not tail:
            continue  # Skip over directory entry

        config_filename = get_config_path(
            tail, '.drift/tiers/{}/'.format(tier_name_upper))
        print "Installing configuration file:", config_filename
        file_key.get_contents_to_filename(config_filename)

    # Install ssh keys referenced by the master config and deployables in the tier config.
    ssh_keys = [
        deployable["ssh_key"] for deployable in tier_config["deployables"]
        if "ssh_key" in deployable
    ]

    # ssh key files are stored under ssh-keys in the bucket
    for key_name in set(ssh_keys):
        ssh_key_filename = get_config_path(key_name, ".ssh")
        if os.path.exists(ssh_key_filename):
            continue

        key = bucket.get_key("ssh-keys/{}".format(key_name))
        if key:
            key.get_contents_to_filename(ssh_key_filename)
            # Must make file private to user, or else ssh command will fail with:
            # "It is required that your private key files are NOT accessible by others."
            os.chmod(ssh_key_filename, 0o600)
            print "Installing SSH key:", ssh_key_filename
        else:
            print "Warning: SSH key file {} not found in S3 bucket.".format(
                key_name)

    # Finally mark which tier is the current one
    with open(get_config_path("TIER"), "w") as f:
        f.write(tier_name_upper)

    print "Tier configuration installed successfully for local user environment."
    get_tiers_config()  # To write out current status

    # Set up VPN tunnel
    if args.vpn:
        print "(To set up or check VPN status, you may be prompted for sudo password)"
        try:
            tier_lower = args.tier.lower()
            p = subprocess.Popen(["sudo", "ipsec", "status", tier_lower],
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.STDOUT)
            stdout, _ = p.communicate()
            if p.returncode != 0:
                print "Error running ipsec command: %s" % stdout
                return
            if "ESTABLISHED" in stdout:
                print "VPN tunnel '{}' already established.".format(tier_lower)
            else:
                print "Establish VPN connection"
                p = subprocess.Popen(["sudo", "ipsec", "up", tier_lower],
                                     stdout=subprocess.PIPE,
                                     stderr=subprocess.STDOUT)
                stdout, _ = p.communicate()
                if p.returncode != 0:
                    print "Error running ipsec command: %s" % stdout
                    return
                print stdout
        except Exception as e:
            print "Exception setting up strongswan tunnel: %s" % e

    print ""
    print "done."
Beispiel #9
0
def info_command(args):
    tiers_config = get_tiers_config()
    if args.verbose:
        print json.dumps(tiers_config, indent=4)