Пример #1
0
def run_command(args):
    cmd = args.cmd
    if not cmd:
        print "Please enter command to run. Example: kitrun.py remotecmd \"ls -l\""
        return
    tier_config = get_tier_config()
    service_info = get_service_info()
    tier = tier_config["tier"]
    region = tier_config["region"]
    service_name = service_info["name"]
    public = args.public

    pem_file = None
    for deployable in tier_config["deployables"]:
        if deployable["name"] == service_name:
            pem_file = deployable["ssh_key"]
            break
    else:
        print "Service {} not found in tier config for {}".format(service_name, tier)
        sys.exit(1)
    print "\n*** EXECUTING REMOTE COMMAND '{}' ON SERVICE '{}' / TIER '{}' IN REGION '{}'\n".format(cmd, service_name, tier, region)

    filters = {
            'tag:service-name': service_name,
            "instance-state-name": "running",
            "tag:tier": tier,
        }
    print "Finding ec2 instances in region %s from filters: %s" % (region, filters)
    instances = get_ec2_instances(region, filters=filters)
    if not instances:
        print "Found no running ec2 instances with tag service-name={}".format(service_name)
        return

    for ec2 in instances:
        if not public:
            ip_address = ec2.private_ip_address
        else:
            ip_address = ec2.ip_address
        print "*** Running '{}' on {}...".format(cmd, ip_address)

        env.host_string = ip_address
        env.user = EC2_USERNAME
        env.key_filename = '~/.ssh/{}'.format(pem_file)
        run(cmd)
        print
Пример #2
0
def run_command(args):
    cmd = args.cmd
    if not cmd:
        print "Please enter SQL to run. Example: kitrun.py sqlcmd \"SELECT * FROM tm_players LIMIT 10;\""
        return
    tier_config = get_tier_config()
    service_info = get_service_info()
    tier = args.tier or tier_config["tier"]
    config = load_config()
    tiers = []
    if tier == "ALL":
        tiers = [t["name"] for t in config["tiers"]]
    else:
        tiers = [tier]
    print "Running SQL Command on Tiers: {}".format(", ".join(tiers))

    service_name = service_info["name"]
    tenant = args.tenant
    tenants = []
    for tier_name in tiers:
        config = load_config(tier_name)
        for t in config.get("tenants", []):
            name = t["name"]
            if not t.get("db_server"): continue
            if tenant and tenant.lower() != name.lower(): continue
            t["tier"] = tier_name
            tenants.append(t)

    for tenant in tenants:
        db_server = tenant["db_server"]
        tenant_name = tenant["name"]
        tier = tenant["tier"]
        tenant_name = tenant_name.replace("-{}".format(tier.lower()), "")
        full_cmd = "psql postgresql://{db_server}:{port}/{tier}_{tenant}_{service_name} -U postgres -c \"{cmd}\""\
                   .format(db_server=db_server, tier=tier, tenant=tenant_name, service_name=service_name, cmd=cmd, port=PORT)
        print "Running %s" % full_cmd
        #! inject the password into env. Highly undesirable
        full_cmd = "PGPASSWORD=postgres %s" % full_cmd
        os.system(full_cmd)
Пример #3
0
def run_command(args):
    service_info = get_service_info()
    tier_config = get_tier_config()
    ec2_conn = boto.ec2.connect_to_region(tier_config["region"])
    vpc_conn = boto.vpc.connect_to_region(tier_config["region"])
    iam_conn = boto.iam.connect_to_region(tier_config["region"])
    tier_name = tier_config["tier"].upper()  # Canonical name of tier

    print "Launch an instance of '{}' on tier '{}'".format(
        service_info["name"], tier_config["tier"])

    for deployable in tier_config["deployables"]:
        if deployable["name"] == service_info["name"]:
            break
    else:
        print "Error: Deployable '{}' not found in tier config:".format(
            service_info["name"])
        print json.dumps(tier_config, indent=4)
        sys.exit(1)

    if args.ami is None:
        # Pick the most recent image baked by the caller
        print "No source AMI specified. See if your organization has baked one recently..."
        print "Searching AMI's with the following tags:"
        print "  service-name:", service_info["name"]
        print "  tier:", tier_name

        amis = ec2_conn.get_all_images(
            owners=['self'],  # The current organization
            filters={
                'tag:service-name': service_info["name"],
                'tag:tier': tier_name,
            },
        )
        if not amis:
            print "No AMI's found that match this service and tier."
            print "Bake a new one using this command: {} bakeami".format(
                sys.argv[0])
            sys.exit(1)
        ami = max(amis, key=operator.attrgetter("creationDate"))
        print "{} AMI(s) found.".format(len(amis))
    else:
        ami = ec2_conn.get_image(args.ami)

    print "AMI Info:"
    print "\tAMI ID:\t", ami.id
    print "\tName:\t", ami.name
    print "\tDate:\t", ami.creationDate
    import pprint
    print "\tTags:\t"
    for k, v in ami.tags.items():
        print "\t\t", k, ":", v

    print "EC2:"
    print "\tInstance Type:\t{}".format(args.instance_type)
    # Find the appropriate subnet to run on.
    # TODO: The subnet should be tagged more appropriately. For now we deploy
    # all drift apps to private-subnet-2, and keep special purpose services on
    # private-subnet-1, like RabbitMQ.
    for subnet in vpc_conn.get_all_subnets():
        tier_match = subnet.tags.get("tier", "").upper() == tier_name
        name_match = "private-subnet-2" in subnet.tags.get("Name", "").lower()
        if tier_match and name_match:
            break
    else:
        print "Can't find a subnet to run on."
        sys.exit(1)

    print "\tSubnet:\t{} [{} {}]".format(subnet.tags["Name"], subnet.id,
                                         subnet.vpc_id)
    print "\tCIDR:\t", subnet.cidr_block

    # Find the appropriate security group.
    # TODO: For now we just have a "one size fits all" group which allows all
    # traffic from 10.x.x.x. This security group was created manually but needs
    # to be added to the tier provisioning script.
    for security_group in vpc_conn.get_all_security_groups():
        tier_match = security_group.tags.get("tier", "").upper() == tier_name
        name_match = "private-sg" in security_group.tags.get("Name",
                                                             "").lower()
        vpc_match = security_group.vpc_id == subnet.vpc_id
        if tier_match and name_match and vpc_match:
            break
    else:
        print "Can't find a security group to run on."
        sys.exit(1)

    print "\tSecurity Group: {} [{} {}]".format(security_group.tags["Name"],
                                                security_group.id,
                                                security_group.vpc_id)

    # The key pair name for SSH
    key_name = deployable["ssh_key"]
    if "." in key_name:
        key_name = key_name.split(
            ".",
            1)[0]  # TODO: Distinguish between key name and .pem key file name

    print "\tSSH Key:\t", key_name

    tags = {
        "Name": "{}-{}".format(tier_name, service_info["name"]),
        "tier": tier_name,
        "service-name": service_info["name"],
        "launched-by": iam_conn.get_user().user_name,

        # Make instance part of api-router round-robin load balancing
        "api-target": service_info["name"],
        "api-port": "10080",
    }
    print "Tags:"
    print json.dumps(tags, indent=4)

    reservation = ec2_conn.run_instances(
        ami.id,
        instance_type=args.instance_type,
        subnet_id=subnet.id,
        security_group_ids=[security_group.id],
        key_name=key_name,
        instance_profile_name=IAM_ROLE)

    if len(reservation.instances) == 0:
        print "No instances in reservation!"
        sys.exit(1)

    instance = reservation.instances[0]

    print "{} starting up...".format(instance)

    # Check up on its status every so often
    status = instance.update()
    while status == 'pending':
        time.sleep(10)
        status = instance.update()

    if status == 'running':
        for k, v in tags.items():
            instance.add_tag(k, v)
        print "{} running at {}".format(instance, instance.private_ip_address)
        slackbot.post_message(
            "Started up AMI '{}' for '{}' on tier '{}' with ip '{}'".format(
                ami.id, service_info["name"], tier_config["tier"],
                instance.private_ip_address))

    else:
        print "Instance was not created correctly"
        sys.exit(1)
Пример #4
0
def run_command(args):
    service = args.service
    conf = get_tier_config()
    print "Current tier and region: {} on {}".format(conf["tier"],
                                                     conf["region"])
    deployables = {depl["name"]: depl for depl in conf["deployables"]}
    if service is None:
        print "Select an instance to connect to:"
        for k in deployables.keys() + STOCK_SERVICES:
            print "   ", k
        return

    if service not in deployables and service not in STOCK_SERVICES:
        print "Service or deployable '{}' not one of {}. Will still try to find it.".format(
            service,
            deployables.keys() + STOCK_SERVICES)

    if service in STOCK_SERVICES:
        # TODO: Fix assumption about key name
        ssh_key_name = "{}-key.pem".format(conf["tier"].lower())
    elif service not in deployables:
        ssh_key_name = "{}-key.pem".format(conf["tier"].lower())
    else:
        ssh_key_name = deployables[service]["ssh_key"]

    ssh_key_file = get_config_path(ssh_key_name, ".ssh")

    # Get IP address of any instance of this deployable.
    sess = boto3.session.Session(region_name=conf["region"])
    ec2 = sess.client("ec2")
    filters = [
        {
            "Name": "instance-state-name",
            "Values": ["running"]
        },
        {
            "Name": "tag:tier",
            "Values": [conf["tier"]]
        },
        {
            "Name": "tag:service-name",
            "Values": [service]
        },
    ]
    print "Getting a list of EC2's from AWS matching the following criteria:"
    for criteria in filters:
        print "   {} = {}".format(criteria["Name"], criteria["Values"][0])

    ret = ec2.describe_instances(Filters=filters)
    instances = []
    for res in ret["Reservations"]:
        instances += res["Instances"]

    if not instances:
        print "No instance found which matches the criteria."
        return

    print "Instances:"
    inst = instances[0]
    for i, ins in enumerate(instances):
        lb = [
            tag["Value"] for tag in ins["Tags"] if tag["Key"] == "launched-by"
        ] or ["n/a"]
        print "  {}: {} at {} launched by {} on {}".format(
            i + 1, ins["InstanceId"], ins["PrivateIpAddress"], lb[0],
            ins["LaunchTime"])

    if len(instances) > 1:
        which = raw_input(
            "Select an instance to connect to (or press enter for first one): "
        )
        if which:
            inst = instances[int(which) - 1]
    else:
        print "Only one instance available. Connecting to it immediately.."

    ip_address = inst["PrivateIpAddress"]
    cd_cmd = ""
    if service in deployables:
        cd_cmd = 'cd /usr/local/bin/{}; exec bash --login'.format(service)
    cmd = [
        "ssh", "ubuntu@{}".format(ip_address), "-i", ssh_key_file, "-t", cd_cmd
    ]
    print "\nSSH command:", " ".join(cmd)
    p = subprocess.Popen(cmd)
    stdout, _ = p.communicate()
    if p.returncode != 0:
        print stdout
        sys.exit(p.returncode)
Пример #5
0
def _run_command(args):
    if args.launch and args.autoscale:
        print "Error: Can't use --launch and --autoscale together."
        sys.exit(1)

    service_info = get_service_info()
    tier_config = get_tier_config()
    ec2_conn = boto.ec2.connect_to_region(tier_config["region"])
    iam_conn = boto.iam.connect_to_region(tier_config["region"])
    tier_name = tier_config["tier"].upper()  # Canonical name of tier

    print "Launch an instance of '{}' on tier '{}'".format(
        service_info["name"], tier_config["tier"])

    if tier_config.get('is_live', True):
        print "NOTE! This tier is marked as LIVE. Special restrictions may apply. Use --force to override."

    for deployable in tier_config["deployables"]:
        if deployable["name"] == service_info["name"]:
            break
    else:
        print "Error: Deployable '{}' not found in tier config:".format(
            service_info["name"])
        print pretty(tier_config)
        sys.exit(1)

    print "Deployable:\n", pretty(deployable)
    autoscaling = deployable.get('autoscaling')
    release = deployable.get('release', '')

    if args.launch and autoscaling and not args.force:
        print "--launch specified, but tier config specifies 'use_autoscaling'. Use --force to override."
        sys.exit(1)
    if args.autoscale and not autoscaling and not args.force:
        print "--autoscale specified, but tier config doesn't specify 'use_autoscaling'. Use --force to override."
        sys.exit(1)

    if args.autoscale and not autoscaling:
        # Fill using default params
        autoscaling = {
            "min": 1,
            "max": 2,
            "desired": 2,
            "instance_type": args.instance_type,
        }

    # Find AMI
    filters = {
        'tag:service-name': service_info["name"],
        'tag:tier': tier_name,
    }
    if release:
        filters['tag:release'] = release

    print "Searching for AMIs matching the following tags:\n", pretty(filters)
    amis = ec2_conn.get_all_images(
        owners=['self'],  # The current organization
        filters=filters,
    )
    if not amis:
        print "No AMI's found that match the tags."
        print "Bake a new one using this command: {} ami bake {}".format(
            sys.argv[0], release)
        ami = None
    else:
        print "{} AMI(s) found.".format(len(amis))
        ami = max(amis, key=operator.attrgetter("creationDate"))

    if args.ami:
        print "Using a specified AMI:", args.ami
        if ami.id != args.ami:
            print "AMI found is different from AMI specified on command line."
            if tier_config.get('is_live', True) and not args.force:
                print "This is a live tier. Can't run mismatched AMI unless --force is specified"
                sys.exit(1)
        ami = ec2_conn.get_image(args.ami)

    if not ami:
        sys.exit(1)

    ami_info = dict(
        ami_id=ami.id,
        ami_name=ami.name,
        ami_created=ami.creationDate,
        ami_tags=ami.tags,
    )
    print "AMI Info:\n", pretty(ami_info)

    if autoscaling:
        print "Autoscaling group:\n", pretty(autoscaling)
    else:
        print "EC2:"
        print "\tInstance Type:\t{}".format(args.instance_type)

    ec2 = boto3.resource('ec2', region_name=tier_config["region"])

    # Get all 'private' subnets
    filters = {'tag:tier': tier_name, 'tag:realm': 'private'}
    subnets = list(ec2.subnets.filter(Filters=filterize(filters)))
    if not subnets:
        print "Error: No subnet available matching filter", filters
        sys.exit(1)

    print "Subnets:"
    for subnet in subnets:
        print "\t{} - {}".format(fold_tags(subnet.tags)['Name'], subnet.id)

    # Get the "one size fits all" security group
    filters = {
        'tag:tier': tier_name,
        'tag:Name': '{}-private-sg'.format(tier_name)
    }
    security_group = list(
        ec2.security_groups.filter(Filters=filterize(filters)))[0]
    print "Security Group:\n\t{} [{} {}]".format(
        fold_tags(security_group.tags)["Name"], security_group.id,
        security_group.vpc_id)

    # The key pair name for SSH
    key_name = deployable["ssh_key"]
    if "." in key_name:
        key_name = key_name.split(
            ".",
            1)[0]  # TODO: Distinguish between key name and .pem key file name

    print "SSH Key:\t", key_name
    '''
    autoscaling group:
    Name            LIVENORTH-themachines-backend-auto
    api-port        10080
    api-target      themachines-backend
    service-name    themachines-backend
    service-type    rest-api
    tier            LIVENORTH

    ec2:
    Name            DEVNORTH-drift-base
    launched-by     nonnib
    api-port        10080
    api-target      drift-base
    service-name    drift-base
    service-type    rest-api
    tier            DEVNORTH
    '''

    target_name = "{}-{}".format(tier_name, service_info["name"])
    if autoscaling:
        target_name += "-auto"

    tags = {
        "Name": target_name,
        "tier": tier_name,
        "service-name": service_info["name"],
        "service-type":
        "rest-api",  # TODO: Assume there are more types to come.
        "launched-by": iam_conn.get_user().user_name,

        # Make instance part of api-router round-robin load balancing
        "api-target": service_info["name"],
        "api-port": "10080",
        "api-status": "online",
    }

    if args.preview:
        print "--preview specified, exiting now before actually doing anything."
        sys.exit(0)

    if autoscaling:
        client = boto3.client('autoscaling', region_name=tier_config["region"])
        launch_config_name = '{}-{}-launchconfig-{}-{}'.format(
            tier_name, service_info["name"], datetime.utcnow(), release)
        launch_config_name = launch_config_name.replace(':', '.')
        launch_script = '''#!/bin/bash\nsudo bash -c "echo TIERCONFIGPATH='${TIERCONFIGPATH}' >> /etc/environment"'''

        kwargs = dict(
            LaunchConfigurationName=launch_config_name,
            ImageId=ami.id,
            KeyName=key_name,
            SecurityGroups=[security_group.id],
            InstanceType=autoscaling['instance_type'] or args.instance_type,
            IamInstanceProfile=IAM_ROLE,
            InstanceMonitoring={'Enabled': True},
            UserData=launch_script,
        )
        print "Creating launch configuration using params:\n", pretty(kwargs)
        client.create_launch_configuration(**kwargs)

        # Update current autoscaling group or create a new one if it doesn't exist.
        groups = client.describe_auto_scaling_groups(
            AutoScalingGroupNames=[target_name])

        if not groups['AutoScalingGroups']:
            tagsarg = [{
                'ResourceId': tags['Name'],
                'ResourceType': 'auto-scaling-group',
                'Key': k,
                'Value': v,
                'PropagateAtLaunch': True,
            } for k, v in tags.items()]
            kwargs = dict(
                AutoScalingGroupName=target_name,
                LaunchConfigurationName=launch_config_name,
                MinSize=autoscaling['min'],
                MaxSize=autoscaling['max'],
                DesiredCapacity=autoscaling['desired'],
                VPCZoneIdentifier=','.join([subnet.id for subnet in subnets]),
                Tags=tagsarg,
            )
            print "Creating a new autoscaling group using params:\n", pretty(
                kwargs)
            client.create_auto_scaling_group(**kwargs)
        else:
            print "Updating current autoscaling group", target_name
            kwargs = dict(
                AutoScalingGroupName=target_name,
                LaunchConfigurationName=launch_config_name,
                MinSize=autoscaling['min'],
                MaxSize=autoscaling['max'],
                DesiredCapacity=autoscaling['desired'],
                VPCZoneIdentifier=','.join([subnet.id for subnet in subnets]),
            )
            client.update_auto_scaling_group(**kwargs)

        print "Done!"
        print "YOU MUST TERMINATE THE OLD EC2 INSTANCES YOURSELF!"
    else:
        # Pick a random subnet from list of available subnets
        subnet = random.choice(subnets)
        print "Randomly picked this subnet to use: ", subnet

        print "Launching EC2 instance..."
        reservation = ec2_conn.run_instances(
            ami.id,
            instance_type=args.instance_type,
            subnet_id=subnet.id,
            security_group_ids=[security_group.id],
            key_name=key_name,
            instance_profile_name=IAM_ROLE)

        if len(reservation.instances) == 0:
            print "No instances in reservation!"
            sys.exit(1)

        instance = reservation.instances[0]

        print "{} starting up...".format(instance)

        # Check up on its status every so often
        status = instance.update()
        while status == 'pending':
            time.sleep(10)
            status = instance.update()

        if status == 'running':
            for k, v in tags.items():
                instance.add_tag(k, v)
            print "{} running at {}".format(instance,
                                            instance.private_ip_address)
            slackbot.post_message(
                "Started up AMI '{}' for '{}' on tier '{}' with ip '{}'".
                format(ami.id, service_info["name"], tier_config["tier"],
                       instance.private_ip_address))

        else:
            print "Instance was not created correctly"
            sys.exit(1)
Пример #6
0
def _bake_command(args):
    service_info = get_service_info()
    tier_config = get_tier_config()
    iam_conn = boto.iam.connect_to_region(tier_config["region"])

    if args.ubuntu:
        # Get all Ubuntu Trusty 14.04 images from the appropriate region and
        # pick the most recent one.
        # The 'Canonical' owner. This organization maintains the Ubuntu AMI's on AWS.
        print "Finding the latest AMI on AWS that matches", UBUNTU_RELEASE
        ec2 = boto3.resource('ec2', region_name=tier_config["region"])
        filters = [
            {
                'Name': 'name',
                'Values': [UBUNTU_RELEASE]
            },
        ]
        amis = list(
            ec2.images.filter(Owners=[AMI_OWNER_CANONICAL], Filters=filters))
        if not amis:
            print "No AMI found matching '{}'. Not sure what to do now.".format(
                UBUNTU_RELEASE, tier_config["tier"], sys.argv[0])
            sys.exit(1)
        ami = max(amis, key=operator.attrgetter("creation_date"))
    else:
        ec2 = boto3.resource('ec2', region_name=tier_config["region"])
        filters = [
            {
                'Name': 'tag:service-name',
                'Values': [UBUNTU_BASE_IMAGE_NAME]
            },
            {
                'Name': 'tag:tier',
                'Values': [tier_config["tier"]]
            },
        ]
        amis = list(ec2.images.filter(Owners=['self'], Filters=filters))
        if not amis:
            print "No '{}' AMI found for tier {}. Bake one using this command: {} ami bake --ubuntu".format(
                UBUNTU_BASE_IMAGE_NAME, tier_config["tier"], sys.argv[0])
            sys.exit(1)
        ami = max(amis, key=operator.attrgetter("creation_date"))

    print "Using source AMI:"
    print "\tID:\t", ami.id
    print "\tName:\t", ami.name
    print "\tDate:\t", ami.creation_date

    if args.ubuntu:
        version = None
        branch = ''
        sha_commit = ''
        deployment_manifest = create_deployment_manifest(
            'bakeami')  # Todo: Should be elsewhere or different
    else:
        cmd = "python setup.py sdist --formats=zip"
        current_branch = get_branch()

        if not args.tag:
            # See if service is tagged to a specific version for this tier
            for si in tier_config['deployables']:
                if si['name'] == service_info['name']:
                    if 'release' in si:
                        text = "Error: As deployable '{}' for tier '{}' is pegged to a particular " \
                            "release, you must specify a release tag to which to bake from.\n" \
                            "Note that this is merely a safety measure.\n" \
                            "For reference, the current deployable for this tier is pegged at " \
                            "release tag '{}'."
                        print text.format(service_info['name'],
                                          tier_config['tier'], si['release'])
                        sys.exit(1)
                    break

        if not args.tag:
            args.tag = current_branch

        print "Using branch/tag", args.tag

        checkout(args.tag)
        try:
            deployment_manifest = create_deployment_manifest(
                'bakeami')  # Todo: Should be elsewhere or different
            sha_commit = get_commit()
            branch = get_branch()
            version = get_git_version()
            service_info = get_service_info()
            if not args.preview:
                os.system(cmd)
        finally:
            print "Reverting to ", current_branch
            checkout(current_branch)

    if not version:
        version = {'tag': 'untagged-branch'}

    print "git version:", version

    user = iam_conn.get_user()  # The current IAM user running this command

    # Need to generate a pre-signed url to the tiers root config file on S3
    tiers_config = get_tiers_config()
    tiers_config_url = '{}/{}.{}/{}'.format(tiers_config['region'],
                                            tiers_config['bucket'],
                                            tiers_config['domain'],
                                            TIERS_CONFIG_FILENAME)

    var = {
        "service":
        UBUNTU_BASE_IMAGE_NAME if args.ubuntu else service_info["name"],
        "versionNumber": service_info["version"],
        "region": tier_config["region"],
        "source_ami": ami.id,
        "branch": branch,
        "commit": sha_commit,
        "release": version['tag'],
        "user_name": user.user_name,
        "tier": tier_config["tier"],
        "tier_url": tiers_config_url,
    }

    if args.ubuntu:
        var['setup_script'] = pkg_resources.resource_filename(
            __name__, "ubuntu-packer.sh")
    else:
        var['setup_script'] = pkg_resources.resource_filename(
            __name__, "driftapp-packer.sh")

    print "Using var:\n", pretty(var)

    packer_cmd = "packer"
    try:
        result = subprocess.call(packer_cmd,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE)
    except Exception as e:
        print "Error:", e
        print "%s was not found. Please install using the following method:" % packer_cmd
        print "  brew tap homebrew/binary\n  brew install %s" % packer_cmd
        sys.exit(1)
    else:
        print "Packer process returned", result

    cmd = "%s build " % packer_cmd
    if args.debug:
        cmd += "-debug "

    cmd += "-only=amazon-ebs "
    for k, v in var.iteritems():
        cmd += "-var {}=\"{}\" ".format(k, v)

    # Use generic packer script if project doesn't specify one
    pkg_resources.cleanup_resources()
    if args.ubuntu:
        scriptfile = pkg_resources.resource_filename(__name__,
                                                     "ubuntu-packer.json")
        cmd += scriptfile
    elif os.path.exists("config/packer.json"):
        cmd += "config/packer.json"
    else:
        scriptfile = pkg_resources.resource_filename(__name__,
                                                     "driftapp-packer.json")
        cmd += scriptfile
    print "Baking AMI with: {}".format(cmd)

    # Dump deployment manifest into dist folder temporarily. The packer script
    # will pick it up and bake it into the AMI.
    deployment_manifest_filename = os.path.join("dist",
                                                "deployment-manifest.json")
    deployment_manifest_json = json.dumps(deployment_manifest, indent=4)
    print "Deployment Manifest:\n", deployment_manifest_json

    if args.preview:
        print "Not building or packaging because --preview is on. Exiting now."
        return

    with open(deployment_manifest_filename, "w") as dif:
        dif.write(deployment_manifest_json)

    start_time = time.time()
    try:
        os.system(cmd)
    finally:
        os.remove(deployment_manifest_filename)
        pkg_resources.cleanup_resources()
    duration = time.time() - start_time
    print "Done after %.0f seconds" % (duration)
    slackbot.post_message(
        "Successfully baked a new AMI for '{}' on tier '{}' in %.0f seconds".
        format(service_info["name"], get_tier_name(), duration))
Пример #7
0
def run_command(args):
    tier_config = get_tier_config()
    service_info = get_service_info()
    tier = tier_config["tier"]
    region = tier_config["region"]
    service_name = service_info["name"]
    public = args.public
    include_drift = args.drift
    drift_filename = None
    drift_fullpath = None
    default_tenant = tier_config.get("default_tenant",
                                     "default-{}".format(tier.lower()))

    if args.tiername and args.tiername != tier:
        print "Default tier is '{}' but you expected '{}'. Quitting now.".format(
            tier, args.tiername)
        return

    is_protected_tier = _get_tier_protection(tier)
    if is_protected_tier and tier != args.tiername:
        print "You are quickdeploying to '{}' which is a protected tier.".format(
            tier)
        print "This is not recommended!"
        print "If you must do this, and you know what you are doing, state the name of"
        print "the tier using the --deploy-to-this-tier argument and run again."
        return

    # hack
    if include_drift:
        import drift
        drift_path = os.path.split(os.path.split(drift.__file__)[0])[0]
        build_fullpath = os.path.join(drift_path, "dist")

        if os.path.exists(build_fullpath):
            for filename in os.listdir(build_fullpath):
                if filename.startswith("Drift-"):
                    os.remove(os.path.join(build_fullpath, filename))
        drift_filename = None

        print "Building Drift in {}...".format(build_fullpath)
        cmd = [
            "python",
            os.path.join(drift_path, "setup.py"), "sdist", "--formats=zip"
        ]
        p = subprocess.Popen(cmd,
                             cwd=drift_path,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.STDOUT)
        stdout, _ = p.communicate()
        if p.returncode != 0:
            print stdout
            sys.exit(p.returncode)
        drift_filename = None
        for filename in os.listdir(build_fullpath):
            if filename.startswith("Drift-"):
                drift_filename = filename

        if not drift_filename:
            print "Error creating drift package: %s" % stdout
            sys.exit(9)
        print "Including drift package %s" % drift_filename
        drift_fullpath = os.path.join(build_fullpath, drift_filename)

    app_location = APP_LOCATION.format(service_name)
    old_path = app_location + "_old"

    for deployable in tier_config["deployables"]:
        if deployable["name"] == service_name:
            pem_file = deployable["ssh_key"]
            break
    else:
        print "Service {} not found in tier config for {}".format(
            service_name, tier)
        sys.exit(1)
    print "\n*** DEPLOYING service '{}' TO TIER '{}' IN REGION '{}'\n".format(
        service_name, tier, region)

    build_filename = "{}-{}.zip".format(service_info["name"],
                                        service_info["version"])
    build_fullpath = os.path.join("dist", build_filename)
    try:
        os.remove(build_fullpath)
    except Exception as e:
        if "No such file or directory" not in repr(e):
            raise

    print "Building {}...".format(build_fullpath)
    p = subprocess.Popen(["python", "setup.py", "sdist", "--formats=zip"],
                         stdout=subprocess.PIPE,
                         stderr=subprocess.STDOUT)
    stdout, nothing = p.communicate()

    if not os.path.exists(build_fullpath):
        print "Build artefact not found at {}".format(build_fullpath)
        print "Build output: %s" % stdout
        sys.exit(1)

    filters = {
        'tag:service-name': service_name,
        "instance-state-name": "running",
        "tag:tier": tier,
    }
    print "Finding ec2 instances in region %s from filters: %s" % (region,
                                                                   filters)
    instances = get_ec2_instances(region, filters=filters)
    if not instances:
        print "Found no running ec2 instances with tag service-name={}".format(
            service_name)
        return

    for ec2 in instances:
        if not public:
            ip_address = ec2.private_ip_address
        else:
            ip_address = ec2.ip_address
        print "Deploying to {}...".format(ip_address)

        env.host_string = ip_address
        env.user = EC2_USERNAME
        env.key_filename = '~/.ssh/{}'.format(pem_file)
        with settings(warn_only=True):
            run("rm -f {}".format(build_filename))
        put(build_fullpath)
        if drift_filename:
            put(drift_fullpath)
        temp_folder = os.path.splitext(build_filename)[0]
        with settings(warn_only=True):  # expect some commands to fail
            run("sudo rm -f {}".format(UWSGI_LOGFILE))
            run("rm -r -f {}".format(temp_folder))
            with hide('output'):
                run("unzip {}".format(build_filename))
            run("sudo rm -r -f {}".format(old_path))
            run("sudo mv {} {}".format(app_location, old_path))

            deployment_manifest = create_deployment_manifest('quickdeploy')
            if args.comment:
                deployment_manifest['comment'] = args.comment

            deployment_manifest_json = json.dumps(deployment_manifest,
                                                  indent=4)
            cmd = "echo '{}' > {}/deployment-manifest.json".format(
                deployment_manifest_json, temp_folder)
            run(cmd)

        run("sudo mv {} {}".format(temp_folder, app_location))
        if not args.skiprequirements:
            with hide('output'):
                run("sudo pip install -U -r {}/requirements.txt".format(
                    app_location))

        # unpack drift after we've installed requirements
        if drift_filename:
            with hide('output'):
                run("unzip -o {}".format(drift_filename))
                DRIFT_LOCATION = "/usr/local/lib/python2.7/dist-packages/drift"
                run("sudo rm -rf {}/*".format(DRIFT_LOCATION))
                run("sudo cp -r {}/drift/* {}".format(
                    drift_filename.replace(".zip", ""), DRIFT_LOCATION))

        with hide('output'):
            run("sudo service {} restart".format(service_name))
            with settings(warn_only=True):  # celery might not be present
                run("sudo service {}-celery restart".format(service_name))

        # make sure the service keeps running
        sleep(1.0)
        run("sudo service {} status".format(service_name))

        # test the service endpoint
        try:
            with settings(warn_only=True):
                with hide('output'):
                    out = run(
                        'curl http://127.0.0.1:{} -H "Accept: application/json" -H "Drift-Tenant: {}"'
                        .format(SERVICE_PORT, default_tenant))
            d = json.loads(out)
            if "endpoints" not in d:
                raise Exception("service json is incorrect: %s" % out)
            print "\nService {} is running on {}!".format(
                service_name, ip_address)
            slackbot.post_message(
                "Successfully quick-deployed '{}' to tier '{}'".format(
                    service_name, tier))
        except:
            print "Unexpected response: %s" % out
            error_report()
            raise
Пример #8
0
def run_command(args):
    service_info = get_service_info()
    tier_config = get_tier_config()
    ec2_conn = boto.ec2.connect_to_region(tier_config["region"])
    iam_conn = boto.iam.connect_to_region(tier_config["region"])

    if args.ubuntu:
        # Get all Ubuntu Trusty 14.04 images from the appropriate region and
        # pick the most recent one.
        print "Finding the latest AMI on AWS that matches 'ubuntu-trusty-14.04*'"
        # The 'Canonical' owner. This organization maintains the Ubuntu AMI's on AWS.
        amis = ec2_conn.get_all_images(
            owners=['099720109477'],
            filters={'name': 'ubuntu/images/hvm/ubuntu-trusty-14.04*'},
        )
        ami = max(amis, key=operator.attrgetter("creationDate"))
    else:

        amis = ec2_conn.get_all_images(
            owners=['self'],  # The current organization
            filters={
                'tag:service-name': UBUNTU_BASE_IMAGE_NAME,
                'tag:tier': tier_config["tier"],
            },
        )
        if not amis:
            print "No '{}' AMI found for tier {}. Bake one using this command: {} bakeami --ubuntu".format(
                UBUNTU_BASE_IMAGE_NAME, tier_config["tier"], sys.argv[0])
            sys.exit(1)

        ami = max(amis, key=operator.attrgetter("creationDate"))
        print "{} AMI(s) found.".format(len(amis))

    print "Using source AMI:"
    print "\tID:\t", ami.id
    print "\tName:\t", ami.name
    print "\tDate:\t", ami.creationDate

    if args.ubuntu:
        version = None
        branch = ''
        sha_commit = ''
    else:
        cmd = "python setup.py sdist --formats=zip"
        current_branch = get_branch()
        if not args.tag:
            args.tag = current_branch

        print "Using branch/tag", args.tag
        checkout(args.tag)
        try:
            sha_commit = get_commit()
            branch = get_branch()
            version = get_git_version()
            if not args.preview:
                os.system(cmd)
        finally:
            print "Reverting to ", current_branch
            checkout(current_branch)

    if not version:
        version = {'tag': 'untagged-branch'}

    print "git version:", version

    service_info = get_service_info()
    user = iam_conn.get_user()  # The current IAM user running this command

    # Need to generate a pre-signed url to the tiers root config file on S3
    tiers_config = get_tiers_config()
    tiers_config_url = '{}/{}.{}/{}'.format(tiers_config['region'],
                                            tiers_config['bucket'],
                                            tiers_config['domain'],
                                            TIERS_CONFIG_FILENAME)

    var = {
        "service":
        UBUNTU_BASE_IMAGE_NAME if args.ubuntu else service_info["name"],
        "versionNumber": service_info["version"],
        "region": tier_config["region"],
        "source_ami": str(ami.id),
        "branch": branch,
        "commit": sha_commit,
        "release": version['tag'],
        "user_name": str(user.user_name),
        "tier": tier_config["tier"],
        "tier_url": str(tiers_config_url),
    }

    if args.ubuntu:
        var['setup_script'] = pkg_resources.resource_filename(
            __name__, "ubuntu-packer.sh")
    else:
        var['setup_script'] = pkg_resources.resource_filename(
            __name__, "driftapp-packer.sh")

    print "Using var:\n", json.dumps({k: str(v)
                                      for k, v in var.iteritems()},
                                     indent=4)

    packer_cmd = "packer"
    try:
        result = subprocess.call(packer_cmd,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE)
    except Exception as e:
        print "Error:", e
        print "%s was not found. Please install using the following method:" % packer_cmd
        print "  brew tap homebrew/binary\n  brew install %s" % packer_cmd
        sys.exit(1)
    else:
        print "Packer process returned", result

    cmd = "%s build " % packer_cmd
    if args.debug:
        cmd += "-debug "

    cmd += "-only=amazon-ebs "
    for k, v in var.iteritems():
        cmd += "-var {}=\"{}\" ".format(k, v)

    # Use generic packer script if project doesn't specify one
    pkg_resources.cleanup_resources()
    if args.ubuntu:
        scriptfile = pkg_resources.resource_filename(__name__,
                                                     "ubuntu-packer.json")
        cmd += scriptfile
    elif os.path.exists("config/packer.json"):
        cmd += "config/packer.json"
    else:
        scriptfile = pkg_resources.resource_filename(__name__,
                                                     "driftapp-packer.json")
        cmd += scriptfile
    print "Baking AMI with: {}".format(cmd)

    if args.preview:
        print "Not building or packaging because --preview is on. Exiting now."
        return

    start_time = time.time()
    # Dump deployment manifest into dist folder temporarily. The packer script
    # will pick it up and bake it into the AMI.
    deployment_manifest_filename = os.path.join("dist",
                                                "deployment-manifest.json")
    deployment_manifest_json = json.dumps(
        create_deployment_manifest('bakeami'), indent=4)
    print "Deployment Manifest:\n", deployment_manifest_json
    with open(deployment_manifest_filename, "w") as dif:
        dif.write(deployment_manifest_json)

    try:
        os.system(cmd)
    finally:
        os.remove(deployment_manifest_filename)
        pkg_resources.cleanup_resources()
    duration = time.time() - start_time
    print "Done after %.0f seconds" % (duration)
    slackbot.post_message(
        "Successfully baked a new AMI for '{}' on tier '{}' in %.0f seconds".
        format(service_info["name"], get_tier_name(), duration))
Пример #9
0
def run_command(args):
    tier_config = get_tier_config()
    service_info = get_service_info()
    tier = tier_config["tier"]
    region = tier_config["region"]
    service_name = service_info["name"]
    public = args.public

    pem_file = None
    for deployable in tier_config["deployables"]:
        if deployable["name"] == service_name:
            pem_file = deployable["ssh_key"]
            break
    else:
        print "Service {} not found in tier config for {}".format(
            service_name, tier)
        sys.exit(1)
    print "\n*** VIEWING LOGS FOR SERVICE '{}' / TIER '{}' IN REGION '{}'\n".format(
        service_name, tier, region)

    filters = {
        'tag:service-name': service_name,
        "instance-state-name": "running",
        "tag:tier": tier,
    }
    print "Finding ec2 instances in region %s from filters: %s" % (region,
                                                                   filters)
    instances = get_ec2_instances(region, filters=filters)
    if not instances:
        print "Found no running ec2 instances with tag service-name={}".format(
            service_name)
        return
    if args.host:
        instances = [
            i for i in instances
            if [i.private_ip_address, i.ip_address][public] == args.host
        ]
    for ec2 in instances:
        if not public:
            ip_address = ec2.private_ip_address
        else:
            ip_address = ec2.ip_address
        print "*** Logs in {} on {}...".format(UWSGI_LOGFILE, ip_address)
        key_path = '~/.ssh/{}'.format(pem_file)
        if not args.stream:
            env.host_string = ip_address
            env.user = EC2_USERNAME
            env.key_filename = key_path
            cmd = "sudo tail {} -n 100".format(UWSGI_LOGFILE)
            if args.grep:
                cmd += " | grep {}".format(args.grep)
            run(cmd)
            print
        else:
            if len(instances) > 1:
                print "The --stream argument can only be used on a single host. Please use --host to pick one"
                print "Hosts: {}".format(", ".join([
                    str([i.private_ip_address, i.ip_address][public])
                    for i in instances
                ]))
                return
            import paramiko
            import select

            client = paramiko.SSHClient()
            client.load_system_host_keys()
            client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
            key_path = os.path.expanduser(key_path)
            client.connect(ip_address,
                           username=EC2_USERNAME,
                           key_filename=key_path)
            #client.connect(ip_address)
            channel = client.get_transport().open_session()
            grep_cmd = ""
            if args.grep:
                grep_cmd = " | grep --line-buffered {}".format(args.grep)
            channel.exec_command("sudo tail -f {}{}".format(
                UWSGI_LOGFILE, grep_cmd))
            while True:
                if channel.exit_status_ready():
                    break
                rl, wl, xl = select.select([channel], [], [], 0.0)
                if len(rl) > 0:
                    sys.stdout.write(channel.recv(1024))