Пример #1
0
def rsync_mountpoints(src_inst, src_vol, src_mnt, dst_inst, dst_vol, dst_mnt,
                      encr=False):
    """Run `rsync` against mountpoints, copy disk label.

    :param src_inst: source instance;
    :param src_vol: source volume with label that will be copied to
                    dst_vol;
    :param src_mnt: root or directory hierarchy to replicate;
    :param dst_inst: destination instance;
    :param dst_vol: destination volume, that will be marked with label
                    from src_vol;
    :param dst_mnt: destination point where source hierarchy to place;
    :param encr: True if volume is encrypted;
    :type encr: bool."""
    src_key_filename = config.get(src_inst.region.name, 'KEY_FILENAME')
    dst_key_filename = config.get(dst_inst.region.name, 'KEY_FILENAME')
    with config_temp_ssh(dst_inst.connection) as key_file:
        with settings(host_string=dst_inst.public_dns_name,
                      key_filename=dst_key_filename):
            wait_for_sudo('cp /root/.ssh/authorized_keys '
                          '/root/.ssh/authorized_keys.bak')
            pub_key = local('ssh-keygen -y -f {0}'.format(key_file), True)
            append('/root/.ssh/authorized_keys', pub_key, use_sudo=True)
            if encr:
                sudo('screen -d -m sh -c "nc -l 60000 | gzip -dfc | '
                     'sudo dd of={0} bs=16M"'
                     .format(get_vol_dev(dst_vol)), pty=False)  # dirty magick
                dst_ip = sudo(
                    'curl http://169.254.169.254/latest/meta-data/public-ipv4')

        with settings(host_string=src_inst.public_dns_name,
                      key_filename=src_key_filename):
            put(key_file, '.ssh/', mirror_local_mode=True)
            dst_key_filename = os.path.split(key_file)[1]
            if encr:
                sudo('(dd if={0} bs=16M | gzip -cf --fast | nc -v {1} 60000)'
                     .format(get_vol_dev(src_vol), dst_ip))
            else:
                cmd = (
                    'rsync -e "ssh -i .ssh/{key_file} -o '
                    'StrictHostKeyChecking=no" -cahHAX --delete --inplace '
                    '--exclude /root/.bash_history '
                    '--exclude /home/*/.bash_history '
                    '--exclude /etc/ssh/moduli --exclude /etc/ssh/ssh_host_* '
                    '--exclude /etc/udev/rules.d/*persistent-net.rules '
                    '--exclude /var/lib/ec2/* --exclude=/mnt/* '
                    '--exclude=/proc/* --exclude=/tmp/* '
                    '{src_mnt}/ root@{rhost}:{dst_mnt}')
                wait_for_sudo(cmd.format(
                    rhost=dst_inst.public_dns_name, dst_mnt=dst_mnt,
                    key_file=dst_key_filename, src_mnt=src_mnt))
                label = sudo('e2label {0}'.format(get_vol_dev(src_vol)))
        with settings(host_string=dst_inst.public_dns_name,
                      key_filename=dst_key_filename):
            if not encr:
                sudo('e2label {0} {1}'.format(get_vol_dev(dst_vol), label))
            wait_for_sudo('mv /root/.ssh/authorized_keys.bak '
                          '/root/.ssh/authorized_keys')
            wait_for_sudo('sync', shell=False)
            wait_for_sudo('for i in {1..20}; do sync; sleep 1; done &')
Пример #2
0
def modify_kernel(region, instance_id):
    """
    Modify old kernel for stopped instance (needed for make pv-grub working)

    .. note:: install grub-legacy-ec2 and upgrades before run this.

    region
        specify instance region;
    instance_id
        specify instance id for kernel change
    Kernels list:
        ap-southeast-1      x86_64  aki-11d5aa43
        ap-southeast-1  i386    aki-13d5aa41
        eu-west-1       x86_64  aki-4feec43b
        eu-west-1       i386    aki-4deec439
        us-east-1       x86_64  aki-427d952b
        us-east-1       i386    aki-407d9529
        us-west-1       x86_64  aki-9ba0f1de
        us-west-1       i386    aki-99a0f1dc"""
    key_filename = config.get(region, 'KEY_FILENAME')
    conn = get_region_conn(region)
    instance = get_inst_by_id(conn.region.name, instance_id)
    env.update({
        'host_string': instance.public_dns_name,
        'key_filename': key_filename,
    })
    sudo('env DEBIAN_FRONTEND=noninteractive apt-get update && '
         'env DEBIAN_FRONTEND=noninteractive apt-get dist-upgrade && '
         'env DEBIAN_FRONTEND=noninteractive apt-get install grub-legacy-ec2')
    kernel = config.get(conn.region.name,
                        'KERNEL' + instance.architecture.upper())
    instance.stop()
    wait_for(instance, 'stopped')
    instance.modify_attribute('kernel', kernel)
    instance.start()
Пример #3
0
def modify_kernel(region, instance_id):
    """
    Modify old kernel for stopped instance (needed for make pv-grub working)

    .. note:: install grub-legacy-ec2 and upgrades before run this.

    region
        specify instance region;
    instance_id
        specify instance id for kernel change
    Kernels list:
        ap-southeast-1      x86_64  aki-11d5aa43
        ap-southeast-1  i386    aki-13d5aa41
        eu-west-1       x86_64  aki-4feec43b
        eu-west-1       i386    aki-4deec439
        us-east-1       x86_64  aki-427d952b
        us-east-1       i386    aki-407d9529
        us-west-1       x86_64  aki-9ba0f1de
        us-west-1       i386    aki-99a0f1dc"""
    key_filename = config.get(region, 'KEY_FILENAME')
    conn = get_region_conn(region)
    instance = get_inst_by_id(conn.region.name, instance_id)
    env.update({
        'host_string': instance.public_dns_name,
        'key_filename': key_filename,
    })
    sudo('env DEBIAN_FRONTEND=noninteractive apt-get update && '
         'env DEBIAN_FRONTEND=noninteractive apt-get dist-upgrade && '
         'env DEBIAN_FRONTEND=noninteractive apt-get install grub-legacy-ec2')
    kernel = config.get(conn.region.name,
                        'KERNEL' + instance.architecture.upper())
    instance.stop()
    wait_for(instance, 'stopped')
    instance.modify_attribute('kernel', kernel)
    instance.start()
Пример #4
0
def rsync_mountpoints(src_inst, src_vol, src_mnt, dst_inst, dst_vol, dst_mnt,
                      encr=False):
    """Run `rsync` against mountpoints, copy disk label.

    :param src_inst: source instance;
    :param src_vol: source volume with label that will be copied to
                    dst_vol;
    :param src_mnt: root or directory hierarchy to replicate;
    :param dst_inst: destination instance;
    :param dst_vol: destination volume, that will be marked with label
                    from src_vol;
    :param dst_mnt: destination point where source hierarchy to place;
    :param encr: True if volume is encrypted;
    :type encr: bool."""
    src_key_filename = config.get(src_inst.region.name, 'KEY_FILENAME')
    dst_key_filename = config.get(dst_inst.region.name, 'KEY_FILENAME')
    with config_temp_ssh(dst_inst.connection) as key_file:
        with settings(host_string=dst_inst.public_dns_name,
                      key_filename=dst_key_filename):
            wait_for_sudo('cp /root/.ssh/authorized_keys '
                          '/root/.ssh/authorized_keys.bak')
            pub_key = local('ssh-keygen -y -f {0}'.format(key_file), True)
            append('/root/.ssh/authorized_keys', pub_key, use_sudo=True)
            if encr:
                sudo('screen -d -m sh -c "nc -l 60000 | gzip -dfc | '
                     'sudo dd of={0} bs=16M"'
                     .format(get_vol_dev(dst_vol)), pty=False)  # dirty magick
                dst_ip = sudo(
                    'curl http://169.254.169.254/latest/meta-data/public-ipv4')

        with settings(host_string=src_inst.public_dns_name,
                      key_filename=src_key_filename):
            put(key_file, '.ssh/', mirror_local_mode=True)
            dst_key_filename = os.path.split(key_file)[1]
            if encr:
                sudo('(dd if={0} bs=16M | gzip -cf --fast | nc -v {1} 60000)'
                     .format(get_vol_dev(src_vol), dst_ip))
            else:
                cmd = (
                    'rsync -e "ssh -i .ssh/{key_file} -o '
                    'StrictHostKeyChecking=no" -cahHAX --delete --inplace '
                    '--exclude /root/.bash_history '
                    '--exclude /home/*/.bash_history '
                    '--exclude /etc/ssh/moduli --exclude /etc/ssh/ssh_host_* '
                    '--exclude /etc/udev/rules.d/*persistent-net.rules '
                    '--exclude /var/lib/ec2/* --exclude=/mnt/* '
                    '--exclude=/proc/* --exclude=/tmp/* '
                    '{src_mnt}/ root@{rhost}:{dst_mnt}')
                wait_for_sudo(cmd.format(
                    rhost=dst_inst.public_dns_name, dst_mnt=dst_mnt,
                    key_file=dst_key_filename, src_mnt=src_mnt))
                label = sudo('e2label {0}'.format(get_vol_dev(src_vol)))
        with settings(host_string=dst_inst.public_dns_name,
                      key_filename=dst_key_filename):
            if not encr:
                sudo('e2label {0} {1}'.format(get_vol_dev(dst_vol), label))
            wait_for_sudo('mv /root/.ssh/authorized_keys.bak '
                          '/root/.ssh/authorized_keys')
            run('sync', shell=False)
            run('for i in {1..20}; do sync; sleep 1; done &')
Пример #5
0
 def freeze_volume():
     key_filename = config.get(inst.region.name, 'KEY_FILENAME')
     try:
         _user = config.get('SYNC', 'USERNAME')
     except:
         _user = USERNAME
     with settings(host_string=inst.public_dns_name,
                   key_filename=key_filename, user=_user):
         run('sync', shell=False)
         run('for i in {1..20}; do sync; sleep 1; done &')
Пример #6
0
 def freeze_volume():
     key_filename = config.get(inst.region.name, 'KEY_FILENAME')
     try:
         _user = config.get('SYNC', 'USERNAME')
     except:
         _user = USERNAME
     with settings(host_string=inst.public_dns_name,
                   key_filename=key_filename, user=_user):
         wait_for_sudo('sync', shell=False)
         run('for i in {1..20}; do sudo sync; sleep 1; done &')
Пример #7
0
def launch_instance_from_ami(region_name,
                             ami_id,
                             inst_type=None,
                             security_groups='',
                             key_pair=None,
                             zone_name=None,
                             user_data=None):
    """Create instance from specified AMI.

    region_name
        location of the AMI and new instance;
    ami_id
        "ami-..."
    inst_type
        by default will be fetched from AMI description or used
        't1.micro' if not mentioned in the description;
    security_groups
        list of AWS Security Groups names formatted as string separated
        with semicolon ';'
    key_pair
        name of key_pair to be granted access. Will be fetched from
        config by default, may be configured per region;
    zone_name
        in string format;
    user_data
        string with OS configuration commands."""
    conn = get_region_conn(region_name)
    image = conn.get_all_images([ami_id])[0]
    inst_type = inst_type or get_descr_attr(image, 'Type') or 't1.micro'
    security_groups = filter(None, security_groups.strip(';').split(';'))
    security_groups.append(new_security_group(conn.region))
    logger.info('Launching new instance in {reg} using {image}'.format(
        reg=conn.region, image=image))
    inst = image.run(key_name=key_pair
                     or config.get(conn.region.name, 'KEY_PAIR'),
                     security_groups=security_groups,
                     instance_type=inst_type,
                     user_data=user_data
                     or config.get('user_data', 'USER_DATA'),
                     placement=zone_name).instances[0]
    wait_for(inst, 'running', limit=10 * 60)
    groups = [grp.name for grp in inst.groups]
    inst.add_tag('Security Groups', dumps(groups, separators=(',', ':')))
    add_tags(inst, image.tags)
    modify_instance_termination(conn.region.name, inst.id)
    logger.info('{inst} created in {inst.placement}'.format(inst=inst))
    info = ('\nYou may now SSH into the {inst} server, using:'
            '\n ssh -i {key} {user}@{inst.public_dns_name}')
    key_file = config.get(conn.region.name, 'KEY_FILENAME')
    logger.info(info.format(inst=inst, user=env.user, key=key_file))
    return inst
Пример #8
0
def adduser(username,
            region=None,
            instance_ids=None,
            passwordless=None,
            sudo=None):
    """
    creates new <username> with public SSH key on "host1;host2" list in
    <region>. If you want to create passwordless account - set any value to
    <passwrdless> variable, if you want sudo rights - set any value to <sudo>.
    File with public key must be in same directory.
    If region and instance_ids not set - script takes hosts amd key values
    from command line (-H and -i).
    Usage:
    1. WIthout aws api keys and config present:
    :<username>,<passwordless=1>,<sudo=1> - in this case you have to specify
    hosts list in -H and your own account in -u fabric parameters.
    2. With aws api keys and config entries:
    :<username>,<region>,"instance1;instance2",<passwordless>,<sudo>
    Extracts IP's from instance description.
    """
    if instance_ids and region:
        instances_ids = list(unicode(instance_ids).split(';'))
        for inst in instances_ids:
            if inst:
                _instance = get_inst_by_id(region, inst)
                if not env.key_filename:
                    key_filename = config.get(_instance.region.name,
                                              'KEY_FILENAME')
                    env.update({'key_filename': key_filename})
                env.update({'host_string': _instance.public_dns_name})
                _create_account(username, region, instance_ids, passwordless,
                                sudo)
    else:
        _create_account(username, region, instance_ids, passwordless, sudo)
Пример #9
0
def adduser(username, region=None, instance_ids=None,
                                passwordless=None, sudo=None):
    """
    creates new <username> with public SSH key on "host1;host2" list in
    <region>. If you want to create passwordless account - set any value to
    <passwrdless> variable, if you want sudo rights - set any value to <sudo>.
    File with public key must be in same directory.
    If region and instance_ids not set - script takes hosts amd key values
    from command line (-H and -i).
    Usage:
    1. WIthout aws api keys and config present:
    :<username>,<passwordless=1>,<sudo=1> - in this case you have to specify
    hosts list in -H and your own account in -u fabric parameters.
    2. With aws api keys and config entries:
    :<username>,<region>,"instance1;instance2",<passwordless>,<sudo>
    Extracts IP's from instance description.
    """
    if instance_ids and region:
        instances_ids = list(unicode(instance_ids).split(';'))
        for inst in instances_ids:
            if inst:
                _instance = get_inst_by_id(region, inst)
                if not env.key_filename:
                    key_filename = config.get(_instance.region.name,
                                                      'KEY_FILENAME')
                    env.update({'key_filename': key_filename})
                env.update({'host_string': _instance.public_dns_name})
                _create_account(username, region, instance_ids, passwordless,
                                                                    sudo)
    else:
        _create_account(username, region, instance_ids, passwordless, sudo)
def cleanup_security_groups(delete=False):
    """
    Delete unused AWS Security Groups.

    :type delete: boolean
    :param delete: notify only (i.e. False) by default.

    If security group with the same name is used at least in one region,
    it is treated as used.
    """
    groups = defaultdict(lambda: {})
    used_groups = set(
        ['default', config.get('DEFAULT', 'HTTPS_SECURITY_GROUP')])
    regions = get_region_conn().get_all_regions()
    for reg in regions:
        for s_g in get_region_conn(reg.name).get_all_security_groups():
            groups[s_g.name][reg] = s_g
            if s_g.instances():  # Security Group is used by instance.
                used_groups.add(s_g.name)
            for rule in s_g.rules:
                for grant in rule.grants:
                    if grant.name and grant.owner_id == s_g.owner_id:
                        used_groups.add(grant.name)  # SG is used by group.
    for grp in used_groups:
        del groups[grp]

    for grp in sorted(groups):
        if delete:
            for reg in groups[grp]:
                s_g = groups[grp][reg]
                logger.info('Deleting {0} in {1}'.format(s_g, reg))
                s_g.delete()
        else:
            msg = '"SecurityGroup:{grp}" should be removed from {regs}'
            logger.info(msg.format(grp=grp, regs=groups[grp].keys()))
Пример #11
0
def cleanup_security_groups(delete=False):
    """
    Delete unused AWS Security Groups.

    :type delete: boolean
    :param delete: notify only (i.e. False) by default.

    If security group with the same name is used at least in one region,
    it is treated as used.
    """
    groups = defaultdict(lambda: {})
    used_groups = set(["default", config.get("DEFAULT", "HTTPS_SECURITY_GROUP")])
    regions = get_region_conn().get_all_regions()
    for reg in regions:
        for s_g in get_region_conn(reg.name).get_all_security_groups():
            groups[s_g.name][reg] = s_g
            if s_g.instances():  # Security Group is used by instance.
                used_groups.add(s_g.name)
            for rule in s_g.rules:
                for grant in rule.grants:
                    if grant.name and grant.owner_id == s_g.owner_id:
                        used_groups.add(grant.name)  # SG is used by group.
    for grp in used_groups:
        del groups[grp]

    for grp in sorted(groups):
        if delete:
            for reg in groups[grp]:
                s_g = groups[grp][reg]
                logger.info("Deleting {0} in {1}".format(s_g, reg))
                s_g.delete()
        else:
            msg = '"SecurityGroup:{grp}" should be removed from {regs}'
            logger.info(msg.format(grp=grp, regs=groups[grp].keys()))
Пример #12
0
 def attach_snap_to_inst(inst, snap):
     """Cleanup volume(s)."""
     wait_for(inst, 'running')
     try:
         vol, volumes = force_snap_attach(inst, snap)
         if encr:
             mnt = None
         else:
             mnt = mount_volume(vol)
         yield vol, mnt
     except BaseException as err:
         logger.exception(str(err))
         raise
     finally:
         key_filename = config.get(inst.region.name, 'KEY_FILENAME')
         with settings(host_string=inst.public_dns_name,
                       key_filename=key_filename):
             if not encr:
                 try:
                     wait_for_sudo('umount {0}'.format(mnt))
                 except:
                     pass
         for vol in volumes:
             if vol.status != 'available':
                 vol.detach(force=True)
             wait_for(vol, 'available', limit=DETACH_TIME)
             logger.info('Deleting {vol} in {vol.region}.'.format(vol=vol))
             vol.delete()
Пример #13
0
def deluser(name, region=None, instance_ids=None):
    """
    Removes user <name> with deluser from "host1;host2" list in <region>
    If region and instance_ids not set - script takes hosts amd key values
    from command line (-H and -i).
    """
    if instance_ids and region:
        instances_ids = list(unicode(instance_ids).split(';'))
        for inst in instances_ids:
            if inst:
                _instance = get_inst_by_id(region, inst)
                if not env.key_filename:
                    key_filename = config.get(_instance.region.name,
                                              'KEY_FILENAME')
                    env.update({
                        'key_filename': key_filename,
                        'warn_only': True
                    })
                env.update({'host_string': _instance.public_dns_name})
                env.username = name
                _sudo('deluser %(username)s' % env)
    else:
        env.update({'warn_only': True})
        env.username = name
        _sudo('deluser %(username)s' % env)
Пример #14
0
def modify_instance_termination(region, instance_id):
    """Mark production instnaces as uneligible for termination.

    region
        name of region where instance is located;
    instance_id
        instance to be updated;

    You must change value of preconfigured tag_name and run this command
    before terminating production instance via API."""
    conn = get_region_conn(region)
    inst = get_inst_by_id(conn.region.name, instance_id)
    prod_tag = config.get('DEFAULT', 'TAG_NAME')
    prod_val = config.get('DEFAULT', 'TAG_VALUE')
    inst_tag_val = inst.tags.get(prod_tag)
    inst.modify_attribute('disableApiTermination', inst_tag_val == prod_val)
Пример #15
0
def modify_instance_termination(region, instance_id):
    """Mark production instnaces as uneligible for termination.

    region
        name of region where instance is located;
    instance_id
        instance to be updated;

    You must change value of preconfigured tag_name and run this command
    before terminating production instance via API."""
    conn = get_region_conn(region)
    inst = get_inst_by_id(conn.region.name, instance_id)
    prod_tag = config.get('DEFAULT', 'TAG_NAME')
    prod_val = config.get('DEFAULT', 'TAG_VALUE')
    inst_tag_val = inst.tags.get(prod_tag)
    inst.modify_attribute('disableApiTermination', inst_tag_val == prod_val)
Пример #16
0
 def attach_snap_to_inst(inst, snap):
     """Cleanup volume(s)."""
     wait_for(inst, 'running')
     try:
         vol, volumes = force_snap_attach(inst, snap)
         if encr:
             mnt = None
         else:
             mnt = mount_volume(vol)
         yield vol, mnt
     except BaseException as err:
         logger.exception(str(err))
         raise
     finally:
         key_filename = config.get(inst.region.name, 'KEY_FILENAME')
         with settings(host_string=inst.public_dns_name,
                       key_filename=key_filename):
             if not encr:
                 try:
                     wait_for_sudo('umount {0}'.format(mnt))
                 except:
                     pass
         for vol in volumes:
             if vol.status != 'available':
                 vol.detach(force=True)
             wait_for(vol, 'available', limit=DETACH_TIME)
             logger.info('Deleting {vol} in {vol.region}.'.format(vol=vol))
             vol.delete()
Пример #17
0
def mount_snapshot(region_name, snap_id, inst_id=None):

    """Mount snapshot to temporary created instance or inst_id.

    region_name, snap_id
        specify snapshot.
    inst_id
        attach to existing instance. Will be created temporary if
        None."""

    conn = get_region_conn(region_name)
    inst = get_inst_by_id(conn.region.name, inst_id) if inst_id else None
    snap = conn.get_all_snapshots(snapshot_ids=[snap_id, ])[0]

    info = ('\nYou may now SSH into the {inst} server, using:'
            '\n ssh -i {key} {user}@{inst.public_dns_name}')
    with attach_snapshot(snap, inst=inst) as (vol, mountpoint):
        if mountpoint:
            info += ('\nand browse snapshot, mounted at {mountpoint}.')
        else:
            info += ('\nand mount {device}. NOTE: device name may be '
                     'altered by system.')
        key_file = config.get(conn.region.name, 'KEY_FILENAME')
        inst = get_inst_by_id(conn.region.name, vol.attach_data.instance_id)
        assert inst
        logger.info(info.format(inst=inst, user=env.user, key=key_file,
            device=vol.attach_data.device, mountpoint=mountpoint))

        info = ('\nEnter FINISHED if you are finished looking at the '
                'backup and would like to cleanup: ')
        while raw_input(info).strip() != 'FINISHED':
            pass
Пример #18
0
 def create_inst_in_zone(zone, key_pair, sec_grps):
     inst = create_instance(zone.region.name,
                            zone.name,
                            key_pair=key_pair,
                            security_groups=sec_grps)
     inst.add_tag(config.get('DEFAULT', 'TAG_NAME'), 'temporary')
     return inst
Пример #19
0
def launch_instance_from_ami(
    region_name, ami_id, inst_type=None, security_groups='', key_pair=None,
    zone_name=None, user_data=None):
    """Create instance from specified AMI.

    region_name
        location of the AMI and new instance;
    ami_id
        "ami-..."
    inst_type
        by default will be fetched from AMI description or used
        't1.micro' if not mentioned in the description;
    security_groups
        list of AWS Security Groups names formatted as string separated
        with semicolon ';'
    key_pair
        name of key_pair to be granted access. Will be fetched from
        config by default, may be configured per region;
    zone_name
        in string format;
    user_data
        string with OS configuration commands."""
    conn = get_region_conn(region_name)
    image = conn.get_all_images([ami_id])[0]
    inst_type = inst_type or get_descr_attr(image, 'Type') or 't1.micro'
    security_groups = filter(None, security_groups.strip(';').split(';'))
    security_groups.append(new_security_group(conn.region))
    logger.info('Launching new instance in {reg} using {image}'
                .format(reg=conn.region, image=image))
    inst = image.run(
        key_name=key_pair or config.get(conn.region.name, 'KEY_PAIR'),
        security_groups=security_groups,
        instance_type=inst_type,
        user_data=user_data or config.get('user_data', 'USER_DATA'),
        placement=zone_name).instances[0]
    wait_for(inst, 'running', limit=10 * 60)
    groups = [grp.name for grp in inst.groups]
    inst.add_tag('Security Groups', dumps(groups, separators=(',', ':')))
    add_tags(inst, image.tags)
    modify_instance_termination(conn.region.name, inst.id)
    logger.info('{inst} created in {inst.placement}'.format(inst=inst))
    info = ('\nYou may now SSH into the {inst} server, using:'
            '\n ssh -i {key} {user}@{inst.public_dns_name}')
    key_file = config.get(conn.region.name, 'KEY_FILENAME')
    logger.info(info.format(inst=inst, user=env.user, key=key_file))
    return inst
Пример #20
0
def create_instance(
        region_name='us-east-1', zone_name=None, key_pair=None,
        security_groups='', architecture=None, user_data=None, inst_type=None):
    """
    Create AWS EC2 instance.

    Return created instance.

    region_name
        by default will be created in the us-east-1 region;
    zone_name
        string-formatted name, may be omitted;
    key_pair
        name of key_pair to be granted access. Will be fetched from
        config by default, may be configured per region;
    security_groups
        list of AWS Security Groups names formatted as string separated
        with semicolon ';';
    architecture
        "i386" or "x86_64";
    inst_type
        by default will be fetched from AMI description or used
        't1.micro' if not mentioned in the description.
    """
    conn = get_region_conn(region_name)

    ami_ptrn = config.get(conn.region.name, 'AMI_PTRN')
    architecture = architecture or config.get('DEFAULT', 'ARCHITECTURE')
    ubuntu_aws_account = config.get('DEFAULT', 'UBUNTU_AWS_ACCOUNT')
    filters = {'owner_id': ubuntu_aws_account, 'architecture': architecture,
             'name': ami_ptrn, 'image_type': 'machine',
             'root_device_type': 'ebs'}
    images = conn.get_all_images(filters=filters)
    # Filter AMI by latest version.
    ptrn = re.compile(config.get(conn.region.name, 'AMI_REGEXP'))
    versions = set([ptrn.search(img.name).group('version') for img in images])

    def complement(year_month):
        return '0' + year_month if len(year_month) == 4 else year_month

    latest_version = sorted(set(filter(complement, versions)))[-1]  # XXX Y3K.
    ami_ptrn_with_version = config.get(
        conn.region.name, 'AMI_PTRN_WITH_VERSION')
    name_with_version = ami_ptrn_with_version.format(version=latest_version)
    filters.update({'name': name_with_version})
    images = conn.get_all_images(filters=filters)
    # Filter AMI by latest release date.
    dates = set([ptrn.search(img.name).group('released_at') for img in images])
    latest_date = sorted(set(dates))[-1]
    ami_ptrn_with_release_date = config.get(
        conn.region.name, 'AMI_PTRN_WITH_RELEASE_DATE')
    name_with_version_and_release = ami_ptrn_with_release_date.format(
        version=latest_version, released_at=latest_date)
    filters.update({'name': name_with_version_and_release})
    image = conn.get_all_images(filters=filters)[0]
    return launch_instance_from_ami(
        region_name, image.id, security_groups=security_groups,
        key_pair=key_pair, zone_name=zone_name, user_data=user_data,
        inst_type=inst_type)
Пример #21
0
def create_tmp_volume(region, size):
    """Format new filesystem."""
    with create_temp_inst(region) as inst:
        earmarking_tag = config.get(region.name, 'TAG_NAME')
        try:
            vol = get_region_conn(region.name).create_volume(size,
                                                             inst.placement)
            vol.add_tag(earmarking_tag, 'temporary')
            vol.attach(inst.id, get_avail_dev(inst))
            yield vol, mount_volume(vol, mkfs=True)
        finally:
            vol.detach(force=True)
            wait_for(vol, 'available', limit=DETACH_TIME)
            vol.delete()
Пример #22
0
def create_tmp_volume(region, size):
    """Format new filesystem."""
    with create_temp_inst(region) as inst:
        earmarking_tag = config.get(region.name, 'TAG_NAME')
        try:
            vol = get_region_conn(region.name).create_volume(size,
                                                             inst.placement)
            vol.add_tag(earmarking_tag, 'temporary')
            vol.attach(inst.id, get_avail_dev(inst))
            yield vol, mount_volume(vol, mkfs=True)
        finally:
            vol.detach(force=True)
            wait_for(vol, 'available', limit=DETACH_TIME)
            vol.delete()
Пример #23
0
def get_vol_dev(vol):
    """Return OS-specific volume representation as attached device."""
    assert vol.attach_data.instance_id
    inst = get_inst_by_id(vol.region.name, vol.attach_data.instance_id)
    assert inst.public_dns_name, 'Instance is down'
    key_filename = config.get(vol.region.name, 'KEY_FILENAME')
    attached_dev = vol.attach_data.device
    natty_dev = attached_dev.replace('sd', 'xvd')
    representations = [attached_dev, natty_dev]
    with settings(host_string=inst.public_dns_name, key_filename=key_filename):
        logger.debug(env, output)
        for dev in representations:
            if wait_for_exists(dev):
                return dev
        raise NoDevFoundError(
            'Nothing from {variants} was located at {host.state} {host} for '
            '{vol} with {vol.attach_data.__dict__}'.format(
                host=inst, vol=vol, variants=representations))
Пример #24
0
def get_vol_dev(vol):
    """Return OS-specific volume representation as attached device."""
    assert vol.attach_data.instance_id
    inst = get_inst_by_id(vol.region.name, vol.attach_data.instance_id)
    assert inst.public_dns_name, 'Instance is down'
    key_filename = config.get(vol.region.name, 'KEY_FILENAME')
    attached_dev = vol.attach_data.device
    natty_dev = attached_dev.replace('sd', 'xvd')
    representations = [attached_dev, natty_dev]
    with settings(host_string=inst.public_dns_name, key_filename=key_filename):
        logger.debug(env, output)
        for dev in representations:
            if wait_for_exists(dev):
                return dev
        raise NoDevFoundError(
            'Nothing from {variants} was located at {host.state} {host} for '
            '{vol} with {vol.attach_data.__dict__}'.format(
                host=inst, vol=vol, variants=representations))
Пример #25
0
 def force_snap_attach(inst, snap):
     """Iterate over devices until successful attachment."""
     volumes_to_delete = []
     while get_avail_dev(inst):
         vol = inst.connection.create_volume(snap.volume_size,
                                             inst.placement, snap)
         add_tags(vol, snap.tags)
         vol.add_tag(config.get('DEFAULT', 'TAG_NAME'), 'temporary')
         volumes_to_delete.append(vol)
         dev_name = get_avail_dev(inst)
         logger.debug('Got avail {0} from {1}'.format(dev_name, inst))
         vol.attach(inst.id, dev_name)
         try:
             wait_for(vol, 'attached', ['attach_data', 'status'])
         except StateNotChangedError:
             logger.error('Attempt to attach as next device')
         else:
             break
     return vol, volumes_to_delete
Пример #26
0
 def force_snap_attach(inst, snap):
     """Iterate over devices until successful attachment."""
     volumes_to_delete = []
     while get_avail_dev(inst):
         vol = inst.connection.create_volume(snap.volume_size,
                                             inst.placement, snap)
         add_tags(vol, snap.tags)
         vol.add_tag(config.get('DEFAULT', 'TAG_NAME'), 'temporary')
         volumes_to_delete.append(vol)
         dev_name = get_avail_dev(inst)
         logger.debug('Got avail {0} from {1}'.format(dev_name, inst))
         vol.attach(inst.id, dev_name)
         try:
             wait_for(vol, 'attached', ['attach_data', 'status'])
         except StateNotChangedError:
             logger.error('Attempt to attach as next device')
         else:
             break
     return vol, volumes_to_delete
Пример #27
0
def mount_volume(vol, mkfs=False):
    """Mount the device by SSH. Return mountpoint on success.

    vol
        volume to be mounted on the instance it is attached to."""

    wait_for(vol, 'attached', ['attach_data', 'status'])
    inst = get_inst_by_id(vol.region.name, vol.attach_data.instance_id)
    key_filename = config.get(vol.region.name, 'KEY_FILENAME')
    with settings(host_string=inst.public_dns_name, key_filename=key_filename):
        dev = get_vol_dev(vol)
        mountpoint = dev.replace('/dev/', '/media/')
        wait_for_sudo('mkdir -p {0}'.format(mountpoint))
        if mkfs:
            sudo('mkfs.ext3 {dev}'.format(dev=dev))
        sudo('mount {dev} {mnt}'.format(dev=dev, mnt=mountpoint))
        if mkfs:
            sudo('chown -R {user}:{user} {mnt}'.format(user=env.user,
                                                       mnt=mountpoint))
    logger.debug('Mounted {0} to {1} at {2}'.format(vol, inst, mountpoint))
    return mountpoint
Пример #28
0
def mount_volume(vol, mkfs=False):

    """Mount the device by SSH. Return mountpoint on success.

    vol
        volume to be mounted on the instance it is attached to."""

    wait_for(vol, 'attached', ['attach_data', 'status'])
    inst = get_inst_by_id(vol.region.name, vol.attach_data.instance_id)
    key_filename = config.get(vol.region.name, 'KEY_FILENAME')
    with settings(host_string=inst.public_dns_name, key_filename=key_filename):
        dev = get_vol_dev(vol)
        mountpoint = dev.replace('/dev/', '/media/')
        wait_for_sudo('mkdir -p {0}'.format(mountpoint))
        if mkfs:
            sudo('mkfs.ext3 {dev}'.format(dev=dev))
        sudo('mount {dev} {mnt}'.format(dev=dev, mnt=mountpoint))
        if mkfs:
            sudo('chown -R {user}:{user} {mnt}'.format(user=env.user,
                                                       mnt=mountpoint))
    logger.debug('Mounted {0} to {1} at {2}'.format(vol, inst, mountpoint))
    return mountpoint
Пример #29
0
def deluser(name, region=None, instance_ids=None):
    """
    Removes user <name> with deluser from "host1;host2" list in <region>
    If region and instance_ids not set - script takes hosts amd key values
    from command line (-H and -i).
    """
    if instance_ids and region:
        instances_ids = list(unicode(instance_ids).split(';'))
        for inst in instances_ids:
            if inst:
                _instance = get_inst_by_id(region, inst)
                if not env.key_filename:
                    key_filename = config.get(_instance.region.name,
                                                      'KEY_FILENAME')
                    env.update({'key_filename': key_filename,
                                                  'warn_only': True})
                env.update({'host_string': _instance.public_dns_name})
                env.username = name
                _sudo('deluser %(username)s' % env)
    else:
        env.update({'warn_only': True})
        env.username = name
        _sudo('deluser %(username)s' % env)
Пример #30
0
def mount_snapshot(region_name, snap_id, inst_id=None):
    """Mount snapshot to temporary created instance or inst_id.

    region_name, snap_id
        specify snapshot.
    inst_id
        attach to existing instance. Will be created temporary if
        None."""

    conn = get_region_conn(region_name)
    inst = get_inst_by_id(conn.region.name, inst_id) if inst_id else None
    snap = conn.get_all_snapshots(snapshot_ids=[
        snap_id,
    ])[0]

    info = ('\nYou may now SSH into the {inst} server, using:'
            '\n ssh -i {key} {user}@{inst.public_dns_name}')
    with attach_snapshot(snap, inst=inst) as (vol, mountpoint):
        if mountpoint:
            info += ('\nand browse snapshot, mounted at {mountpoint}.')
        else:
            info += ('\nand mount {device}. NOTE: device name may be '
                     'altered by system.')
        key_file = config.get(conn.region.name, 'KEY_FILENAME')
        inst = get_inst_by_id(conn.region.name, vol.attach_data.instance_id)
        assert inst
        logger.info(
            info.format(inst=inst,
                        user=env.user,
                        key=key_file,
                        device=vol.attach_data.device,
                        mountpoint=mountpoint))

        info = ('\nEnter FINISHED if you are finished looking at the '
                'backup and would like to cleanup: ')
        while raw_input(info).strip() != 'FINISHED':
            pass
Пример #31
0
from boto.ec2.blockdevicemapping import BlockDeviceMapping, EBSBlockDeviceType, BlockDeviceType
from boto.exception import BotoServerError
from fabric.api import env, output, prompt, put, settings, sudo, task
from fabric.context_managers import hide
from pkg_resources import resource_stream

from django_fabfile import __name__ as pkg_name
from django_fabfile.security_groups import new_security_group
from django_fabfile.utils import (StateNotChangedError, add_tags, config,
                                  config_temp_ssh, get_descr_attr,
                                  get_inst_by_id, get_region_conn,
                                  get_snap_device, get_snap_instance,
                                  get_snap_time, timestamp, wait_for,
                                  wait_for_exists, wait_for_sudo)

USERNAME = config.get('DEFAULT', 'USERNAME')
env.update({'user': USERNAME, 'disable_known_hosts': True})

logger = logging.getLogger(__name__)

DETACH_TIME = config.getint('DEFAULT', 'MINUTES_FOR_DETACH') * 60
SNAP_TIME = config.getint('DEFAULT', 'MINUTES_FOR_SNAP') * 60


@task
def create_instance(region_name='us-east-1',
                    zone_name=None,
                    key_pair=None,
                    security_groups='',
                    architecture=None,
                    user_data=None,
Пример #32
0
def create_instance(region_name='us-east-1',
                    zone_name=None,
                    key_pair=None,
                    security_groups='',
                    architecture=None,
                    user_data=None,
                    inst_type=None):
    """
    Create AWS EC2 instance.

    Return created instance.

    region_name
        by default will be created in the us-east-1 region;
    zone_name
        string-formatted name, may be omitted;
    key_pair
        name of key_pair to be granted access. Will be fetched from
        config by default, may be configured per region;
    security_groups
        list of AWS Security Groups names formatted as string separated
        with semicolon ';';
    architecture
        "i386" or "x86_64";
    inst_type
        by default will be fetched from AMI description or used
        't1.micro' if not mentioned in the description.
    """
    conn = get_region_conn(region_name)

    ami_ptrn = config.get(conn.region.name, 'AMI_PTRN')
    architecture = architecture or config.get('DEFAULT', 'ARCHITECTURE')
    ubuntu_aws_account = config.get('DEFAULT', 'UBUNTU_AWS_ACCOUNT')
    filters = {
        'owner_id': ubuntu_aws_account,
        'architecture': architecture,
        'name': ami_ptrn,
        'image_type': 'machine',
        'root_device_type': 'ebs'
    }
    images = conn.get_all_images(filters=filters)
    # Filter AMI by latest version.
    ptrn = re.compile(config.get(conn.region.name, 'AMI_REGEXP'))
    versions = set([ptrn.search(img.name).group('version') for img in images])

    def complement(year_month):
        return '0' + year_month if len(year_month) == 4 else year_month

    latest_version = sorted(set(filter(complement, versions)))[-1]  # XXX Y3K.
    ami_ptrn_with_version = config.get(conn.region.name,
                                       'AMI_PTRN_WITH_VERSION')
    name_with_version = ami_ptrn_with_version.format(version=latest_version)
    filters.update({'name': name_with_version})
    images = conn.get_all_images(filters=filters)
    # Filter AMI by latest release date.
    dates = set([ptrn.search(img.name).group('released_at') for img in images])
    latest_date = sorted(set(dates))[-1]
    ami_ptrn_with_release_date = config.get(conn.region.name,
                                            'AMI_PTRN_WITH_RELEASE_DATE')
    name_with_version_and_release = ami_ptrn_with_release_date.format(
        version=latest_version, released_at=latest_date)
    filters.update({'name': name_with_version_and_release})
    image = conn.get_all_images(filters=filters)[0]
    return launch_instance_from_ami(region_name,
                                    image.id,
                                    security_groups=security_groups,
                                    key_pair=key_pair,
                                    zone_name=zone_name,
                                    user_data=user_data,
                                    inst_type=inst_type)
Пример #33
0
def create_ami(region,
               snap_id,
               force=None,
               root_dev='/dev/sda1',
               zone_name=None,
               default_arch=None,
               default_type='t1.micro',
               security_groups=''):
    """
    Creates AMI image from given snapshot.

    Force option removes prompt request and creates new instance from
    created ami image.

    region, snap_id
        specify snapshot to be processed. Snapshot description in json
        format will be used to restore instance with same parameters.
        Will automaticaly process snapshots for same instance with near
        time (10 minutes or shorter), but for other devices (/dev/sdb,
        /dev/sdc, etc);
    force
        Run instance from ami after creation without confirmation. To
        enable set value to "RUN";
    default_arch
        architecture to use if not mentioned in snapshot description;
    default_type
        instance type to use if not mentioned in snapshot description.
        Used only if ``force`` is "RUN";
    security_groups
        list of AWS Security Groups names formatted as string separated
        with semicolon ';'. Used only if ``force`` is "RUN".
    """
    conn = get_region_conn(region)
    snap = conn.get_all_snapshots(snapshot_ids=[
        snap_id,
    ])[0]
    instance_id = get_snap_instance(snap)
    _device = get_snap_device(snap)
    snaps = conn.get_all_snapshots(owner='self')
    snapshots = [
        snp for snp in snaps if get_snap_instance(snp) == instance_id
        and get_snap_device(snp) != _device and
        abs(get_snap_time(snap) - get_snap_time(snp)) <= timedelta(minutes=10)
    ]
    snapshot = sorted(snapshots, key=get_snap_time,
                      reverse=True) if snapshots else None
    # setup for building an EBS boot snapshot
    default_arch = default_arch or config.get('DEFAULT', 'ARCHITECTURE')
    arch = get_descr_attr(snap, 'Arch') or default_arch
    kernel = config.get(conn.region.name, 'KERNEL' + arch.upper())
    dev = re.match(r'^/dev/sda$', _device)  # if our instance encrypted
    if dev:
        kernel = config.get(conn.region.name, 'KERNEL_ENCR_' + arch.upper())
    ebs = EBSBlockDeviceType()
    ebs.snapshot_id = snap_id
    ebs.delete_on_termination = True
    block_map = BlockDeviceMapping()
    block_map[_device] = ebs
    sdb = BlockDeviceType()
    sdb.ephemeral_name = 'ephemeral0'
    block_map['/dev/sdb'] = sdb

    if snapshot:
        for s in snapshot:
            s_dev = get_snap_device(s)
            s_ebs = EBSBlockDeviceType()
            s_ebs.delete_on_termination = True
            s_ebs.snapshot_id = s.id
            block_map[s_dev] = s_ebs

    name = 'Created {0} using access key {1}'.format(timestamp(),
                                                     conn.access_key)
    name = name.replace(":", ".").replace(" ", "_")

    # create the new AMI all options from snap JSON description:
    wait_for(snap, '100%', limit=SNAP_TIME)
    result = conn.register_image(
        name=name,
        description=snap.description,
        architecture=get_descr_attr(snap, 'Arch') or default_arch,
        root_device_name=get_descr_attr(snap, 'Root_dev_name') or root_dev,
        block_device_map=block_map,
        kernel_id=kernel)
    sleep(2)
    image = conn.get_all_images(image_ids=[
        result,
    ])[0]
    wait_for(image, 'available', limit=10 * 60)
    add_tags(image, snap.tags)

    logger.info('The new AMI ID = {0}'.format(result))

    info = ('\nEnter RUN if you want to launch instance using '
            'just created {0}: '.format(image))
    new_instance = None
    if force == 'RUN' or raw_input(info).strip() == 'RUN':
        instance_type = get_descr_attr(snap, 'Type') or default_type
        new_instance = launch_instance_from_ami(
            region,
            image.id,
            inst_type=instance_type,
            security_groups=security_groups,
            zone_name=zone_name)
    return image, new_instance
Пример #34
0
from boto.exception import EC2ResponseError
from dateutil.parser import parse
from dateutil.relativedelta import relativedelta
from dateutil.tz import tzutc
from fabric.api import env, local, put, settings, sudo, task, run
from fabric.contrib.files import append

from django_fabfile.instances import (attach_snapshot, create_temp_inst,
                                      get_avail_dev, get_vol_dev, mount_volume)
from django_fabfile.utils import (
    StateNotChangedError, add_tags, config, config_temp_ssh,
    get_descr_attr, get_inst_by_id, get_region_conn, get_snap_device,
    get_snap_time, get_snap_vol, timestamp, wait_for, wait_for_sudo)


USERNAME = config.get('DEFAULT', 'USERNAME')
env.update({'user': USERNAME, 'disable_known_hosts': True})

logger = logging.getLogger(__name__)


DEFAULT_TAG_NAME = config.get('DEFAULT', 'TAG_NAME')
DEFAULT_TAG_VALUE = config.get('DEFAULT', 'TAG_VALUE')
DESCRIPTION_TAG = 'Description'
SNAP_STATUSES = ['pending', 'completed']    # All but "error".
VOL_STATUSES = ['creating', 'available', 'in-use']
DETACH_TIME = config.getint('DEFAULT', 'MINUTES_FOR_DETACH') * 60
SNAP_TIME = config.getint('DEFAULT', 'MINUTES_FOR_SNAP') * 60
REPLICATION_SPEED = config.getfloat('DEFAULT', 'REPLICATION_SPEED')

Пример #35
0
 def create_inst_in_zone(zone, key_pair, sec_grps):
     inst = create_instance(zone.region.name, zone.name, key_pair=key_pair,
                            security_groups=sec_grps)
     inst.add_tag(config.get('DEFAULT', 'TAG_NAME'), 'temporary')
     return inst
Пример #36
0
from boto.ec2.blockdevicemapping import BlockDeviceMapping, EBSBlockDeviceType, BlockDeviceType
from boto.exception import BotoServerError
from fabric.api import env, output, prompt, put, settings, sudo, task
from fabric.context_managers import hide
from pkg_resources import resource_stream

from django_fabfile import __name__ as pkg_name
from django_fabfile.security_groups import new_security_group
from django_fabfile.utils import (
    StateNotChangedError, add_tags, config, config_temp_ssh, get_descr_attr,
    get_inst_by_id, get_region_conn, get_snap_device, get_snap_instance,
    get_snap_time, timestamp, wait_for, wait_for_exists, wait_for_sudo,
    copy_ami_to_regions)


USERNAME = config.get('DEFAULT', 'USERNAME')
env.update({'user': USERNAME, 'disable_known_hosts': True})

logger = logging.getLogger(__name__)


DETACH_TIME = config.getint('DEFAULT', 'MINUTES_FOR_DETACH') * 60
SNAP_TIME = config.getint('DEFAULT', 'MINUTES_FOR_SNAP') * 60


@task
def create_instance(
        region_name='us-east-1', zone_name=None, key_pair=None,
        security_groups='', architecture=None, user_data=None, inst_type=None):
    """
    Create AWS EC2 instance.
Пример #37
0
def create_encrypted_instance(region_name,
                              release='lucid',
                              volume_size='8',
                              architecture=None,
                              type='t1.micro',
                              name='encr_root',
                              pw1=None,
                              pw2=None,
                              security_groups=''):
    """
    Creates ubuntu instance with luks-encryted root volume.

    region_name
        Region where you want to create instance;
    release
        Ubuntu release name (lucid or natty). "lucid" by default;
    volume_size
        Size of volume in Gb (always remember, that script creates boot volume
        with size 1Gb, so minimal size of whole volume is 3Gb (1Gb for /boot
        2Gb for /)). 8 by default;
    architecture
        "i386" or "x86_64".
    type
        Type of instance. 't1.micro' by default;
    name
        Name of luks encrypted volume. 'encr_root' by default;
    pw1, pw2
        You can specify passwords in parameters to suppress password prompt;
    security_groups
        List of AWS Security Groups names formatted as string separated
        with semicolon ';'.

    To unlock go to https://ip_address_of_instance (only after reboot
    or shutdown).
    You can set up to 8 passwords. Defaut boot.key and boot.crt created
    for .amazonaws.com so must work for all instances. Process of
    creation is about 20 minutes long."""
    assert volume_size >= 3, '1 GiB for /boot and 2 GiB for /'
    conn = get_region_conn(region_name)

    with config_temp_ssh(conn) as key_filename:
        key_pair = os.path.splitext(os.path.split(key_filename)[1])[0]
        zn = conn.get_all_zones()[-1]
        with create_temp_inst(zone=zn, key_pair=key_pair) as inst:
            vol = conn.create_volume(size=volume_size, zone=zn)
            dev = get_avail_dev_encr(inst)
            vol.attach(inst.id, dev)
            arch = architecture or config.get('DEFAULT', 'ARCHITECTURE')
            ubuntu_arch = 'amd64' if arch == 'x86_64' else arch
            make_encrypted_ubuntu(inst.public_dns_name, key_filename, 'ubuntu',
                                  ubuntu_arch, dev, name, release, pw1, pw2)
            description = dumps({
                'Volume': vol.id,
                'Region': vol.region.name,
                'Device': '/dev/sda',
                'Type': type,
                'Arch': arch,
                'Root_dev_name': '/dev/sda1',
                'Time': timestamp(),
            })
            snap = vol.create_snapshot(description)
            wait_for(snap, '100%', limit=SNAP_TIME)
            vol.detach(force=True)
            wait_for(vol, 'available', limit=DETACH_TIME)
            vol.delete()
            HTTPS_SG = config.get('DEFAULT', 'HTTPS_SECURITY_GROUP')
            security_groups = ';'.join([security_groups, HTTPS_SG])
            img, new_instance = create_ami(region_name,
                                           snap.id,
                                           'RUN',
                                           security_groups=security_groups)
            logger.info('\nTo unlock go to:\n   https://{0}\n'.format(
                new_instance.public_dns_name))
            img.deregister()
            snap.delete()
Пример #38
0
from boto.exception import EC2ResponseError
from dateutil.parser import parse
from dateutil.relativedelta import relativedelta
from dateutil.tz import tzutc
from fabric.api import env, local, put, settings, sudo, task, run
from fabric.contrib.files import append

from django_fabfile.instances import (attach_snapshot, create_temp_inst,
                                      get_avail_dev, get_vol_dev, mount_volume)
from django_fabfile.utils import (
    StateNotChangedError, add_tags, config, config_temp_ssh,
    get_descr_attr, get_inst_by_id, get_region_conn, get_snap_device,
    get_snap_time, get_snap_vol, timestamp, wait_for, wait_for_sudo)


USERNAME = config.get('DEFAULT', 'USERNAME')
env.update({'user': USERNAME, 'disable_known_hosts': True})

logger = logging.getLogger(__name__)


DEFAULT_TAG_NAME = config.get('DEFAULT', 'TAG_NAME')
DEFAULT_TAG_VALUE = config.get('DEFAULT', 'TAG_VALUE')
DESCRIPTION_TAG = 'Description'
SNAP_STATUSES = ['pending', 'completed']    # All but "error".
VOL_STATUSES = ['creating', 'available', 'in-use']
DETACH_TIME = config.getint('DEFAULT', 'MINUTES_FOR_DETACH') * 60
SNAP_TIME = config.getint('DEFAULT', 'MINUTES_FOR_SNAP') * 60
REPLICATION_SPEED = config.getfloat('DEFAULT', 'REPLICATION_SPEED')

Пример #39
0
def create_ami(region, snap_id, force=None, root_dev='/dev/sda1', zone_name=None,
               default_arch=None, default_type='t1.micro', security_groups=''):
    """
    Creates AMI image from given snapshot.

    Force option removes prompt request and creates new instance from
    created ami image.

    region, snap_id
        specify snapshot to be processed. Snapshot description in json
        format will be used to restore instance with same parameters.
        Will automaticaly process snapshots for same instance with near
        time (10 minutes or shorter), but for other devices (/dev/sdb,
        /dev/sdc, etc);
    force
        Run instance from ami after creation without confirmation. To
        enable set value to "RUN";
    default_arch
        architecture to use if not mentioned in snapshot description;
    default_type
        instance type to use if not mentioned in snapshot description.
        Used only if ``force`` is "RUN";
    security_groups
        list of AWS Security Groups names formatted as string separated
        with semicolon ';'. Used only if ``force`` is "RUN".
    """
    conn = get_region_conn(region)
    snap = conn.get_all_snapshots(snapshot_ids=[snap_id, ])[0]
    instance_id = get_snap_instance(snap)
    _device = get_snap_device(snap)
    snaps = conn.get_all_snapshots(owner='self')
    snapshots = [snp for snp in snaps if
        get_snap_instance(snp) == instance_id and
        get_snap_device(snp) != _device and
        abs(get_snap_time(snap) - get_snap_time(snp)) <= timedelta(minutes=10)]
    snapshot = sorted(snapshots, key=get_snap_time,
                      reverse=True) if snapshots else None
    # setup for building an EBS boot snapshot
    default_arch = default_arch or config.get('DEFAULT', 'ARCHITECTURE')
    arch = get_descr_attr(snap, 'Arch') or default_arch
    kernel = config.get(conn.region.name, 'KERNEL' + arch.upper())
    dev = re.match(r'^/dev/sda$', _device)  # if our instance encrypted
    if dev:
        kernel = config.get(conn.region.name, 'KERNEL_ENCR_' + arch.upper())
    ebs = EBSBlockDeviceType()
    ebs.snapshot_id = snap_id
    ebs.delete_on_termination = True
    block_map = BlockDeviceMapping()
    block_map[_device] = ebs
    sdb = BlockDeviceType()
    sdb.ephemeral_name = 'ephemeral0'
    block_map['/dev/sdb'] = sdb

    if snapshot:
        for s in snapshot:
            s_dev = get_snap_device(s)
            s_ebs = EBSBlockDeviceType()
            s_ebs.delete_on_termination = True
            s_ebs.snapshot_id = s.id
            block_map[s_dev] = s_ebs

    name = 'Created {0} using access key {1}'.format(timestamp(),
                                                     conn.access_key)
    name = name.replace(":", ".").replace(" ", "_")

    # create the new AMI all options from snap JSON description:
    wait_for(snap, '100%', limit=SNAP_TIME)
    result = conn.register_image(
        name=name,
        description=snap.description,
        architecture=get_descr_attr(snap, 'Arch') or default_arch,
        root_device_name=get_descr_attr(snap, 'Root_dev_name') or root_dev,
        block_device_map=block_map, kernel_id=kernel)
    sleep(2)
    image = conn.get_all_images(image_ids=[result, ])[0]
    wait_for(image, 'available', limit=10 * 60)
    add_tags(image, snap.tags)

    logger.info('The new AMI ID = {0}'.format(result))

    new_instance = None
    if force == 'RUN':
        instance_type = get_descr_attr(snap, 'Type') or default_type
        new_instance = launch_instance_from_ami(
            region, image.id, inst_type=instance_type,
            security_groups=security_groups, zone_name=zone_name)
    return image, new_instance
Пример #40
0
def create_encrypted_instance(
    region_name, release='lucid', volume_size='8', architecture=None,
    type='t1.micro', name='encr_root', pw1=None, pw2=None, security_groups=''):
    """
    Creates ubuntu instance with luks-encryted root volume.

    region_name
        Region where you want to create instance;
    release
        Ubuntu release name (lucid or natty). "lucid" by default;
    volume_size
        Size of volume in Gb (always remember, that script creates boot volume
        with size 1Gb, so minimal size of whole volume is 3Gb (1Gb for /boot
        2Gb for /)). 8 by default;
    architecture
        "i386" or "x86_64".
    type
        Type of instance. 't1.micro' by default;
    name
        Name of luks encrypted volume. 'encr_root' by default;
    pw1, pw2
        You can specify passwords in parameters to suppress password prompt;
    security_groups
        List of AWS Security Groups names formatted as string separated
        with semicolon ';'.

    To unlock go to https://ip_address_of_instance (only after reboot
    or shutdown).
    You can set up to 8 passwords. Defaut boot.key and boot.crt created
    for .amazonaws.com so must work for all instances. Process of
    creation is about 20 minutes long."""
    assert volume_size >= 3, '1 GiB for /boot and 2 GiB for /'
    conn = get_region_conn(region_name)

    with config_temp_ssh(conn) as key_filename:
        key_pair = os.path.splitext(os.path.split(key_filename)[1])[0]
        zn = conn.get_all_zones()[-1]
        with create_temp_inst(zone=zn, key_pair=key_pair) as inst:
            vol = conn.create_volume(size=volume_size, zone=zn)
            dev = get_avail_dev_encr(inst)
            vol.attach(inst.id, dev)
            arch = architecture or config.get('DEFAULT', 'ARCHITECTURE')
            ubuntu_arch = 'amd64' if arch == 'x86_64' else arch
            make_encrypted_ubuntu(inst.public_dns_name, key_filename, 'ubuntu',
                                  ubuntu_arch, dev, name, release, pw1, pw2)
            description = dumps({
                'Volume': vol.id,
                'Region': vol.region.name,
                'Device': '/dev/sda',
                'Type': type,
                'Arch': arch,
                'Root_dev_name': '/dev/sda1',
                'Time': timestamp(),
            })
            snap = vol.create_snapshot(description)
            wait_for(snap, '100%', limit=SNAP_TIME)
            vol.detach(force=True)
            wait_for(vol, 'available', limit=DETACH_TIME)
            vol.delete()
            HTTPS_SG = config.get('DEFAULT', 'HTTPS_SECURITY_GROUP')
            security_groups = ';'.join([security_groups, HTTPS_SG])
            img, new_instance = create_ami(region_name, snap.id, 'RUN',
                                           security_groups=security_groups)
            logger.info('\nTo unlock go to:\n   https://{0}\n'
                        .format(new_instance.public_dns_name))
            img.deregister()
            snap.delete()
Пример #41
0
                 'All rights reserved.').format(date.today().year)

# Set up a specific logger with desired output level
LOG_FORMAT = '%(asctime)-15s %(levelname)s:%(message)s'
LOG_DATEFORMAT = '%Y-%m-%d %H:%M:%S %Z'

logger = logging.getLogger()

debug = config.getboolean('DEFAULT', 'DEBUG')
if debug:
    logger.setLevel(logging.DEBUG)
    output['debug'] = True
else:
    logger.setLevel(logging.INFO)

logging_folder = config.get('DEFAULT', 'LOGGING_FOLDER')
if logging_folder:
    LOG_FILENAME = os.path.join(logging_folder, __name__ + '.log')
    handler = logging.handlers.TimedRotatingFileHandler(LOG_FILENAME,
                                                        'midnight',
                                                        backupCount=30)

    class StreamLogger():
        def __init__(self, level=logging.INFO):
            self.logger = logging.getLogger(__name__)
            self.level = level

        def write(self, row):
            row = row.strip()
            if row:
                self.logger.log(self.level, row)
Пример #42
0
from fabric.api import env, settings, sudo, abort, put, task
from os.path import isfile as _isfile

from django_fabfile.utils import config, get_inst_by_id

try:
    preconfigured_user = config.get('DEFAULT', 'USERNAME')
except:
    pass  # Expecting user to be provided as `-u` option.
else:
    FABRIC_DEFAULT_USER = '******'  # XXX `-u user` will be overridden.
    if env['user'] == FABRIC_DEFAULT_USER:  # Not provided as `-u` option.
        env.update({'user': preconfigured_user})

env.update({'disable_known_hosts': True})


def _sudo(cmd):
    """ Shows output of cmd and allows interaction """
    sudo(cmd, shell=False, pty=True)


def _create_account(username, region, instance_ids, passwordless, sudo):
    if not _isfile(username + '.pub'):
        abort("%s.pub does not exist" % username)
    env.ssh_key = username + '.pub'
    env.username = username  # Own attribute for string formatting.
    if passwordless:
        _sudo('adduser --disabled-password %(username)s' % env)
        if sudo:
            _sudo('sed -i "s/# %sudo ALL=NOPASSWD: ALL/'
Пример #43
0
                 'All rights reserved.').format(date.today().year)

# Set up a specific logger with desired output level
LOG_FORMAT = '%(asctime)-15s %(levelname)s:%(message)s'
LOG_DATEFORMAT = '%Y-%m-%d %H:%M:%S %Z'

logger = logging.getLogger()

debug = config.getboolean('DEFAULT', 'DEBUG')
if debug:
    logger.setLevel(logging.DEBUG)
    output['debug'] = True
else:
    logger.setLevel(logging.INFO)

logging_folder = config.get('DEFAULT', 'LOGGING_FOLDER')
if logging_folder:
    LOG_FILENAME = os.path.join(logging_folder, __name__ + '.log')
    handler = logging.handlers.TimedRotatingFileHandler(
        LOG_FILENAME, 'midnight', backupCount=30)

    class StreamLogger():

        def __init__(self, level=logging.INFO):
            self.logger = logging.getLogger(__name__)
            self.level = level

        def write(self, row):
            row = row.strip()
            if row:
                self.logger.log(self.level, row)
Пример #44
0
from fabric.api import env, settings, sudo, abort, put, task
from os.path import isfile as _isfile

from django_fabfile.utils import config, get_inst_by_id


try:
    preconfigured_user = config.get('DEFAULT', 'USERNAME')
except:
    pass    # Expecting user to be provided as `-u` option.
else:
    FABRIC_DEFAULT_USER = '******'    # XXX `-u user` will be overridden.
    if env['user'] == FABRIC_DEFAULT_USER:  # Not provided as `-u` option.
        env.update({'user': preconfigured_user})

env.update({'disable_known_hosts': True})


def _sudo(cmd):
    """ Shows output of cmd and allows interaction """
    sudo(cmd, shell=False, pty=True)


def _create_account(username, region, instance_ids, passwordless, sudo):
    if not _isfile(username + '.pub'):
        abort("%s.pub does not exist" % username)
    env.ssh_key = username + '.pub'
    env.username = username     # Own attribute for string formatting.
    if passwordless:
        _sudo('adduser --disabled-password %(username)s' % env)
        if sudo:
Пример #45
0
from fabric.api import env, sudo, settings, task
from fabric.state import output

from django_fabfile.utils import config


master = config.get("RDBMS", "MASTER")
backup = config.get("RDBMS", "BACKUP")
username = config.get("DEFAULT", "USERNAME")
pcp_password = config.get("RDBMS", "PCP_PASSWORD")

env.update({"disable_known_hosts": True, "user": username, "warn_only": True})
output.update({"running": False})


def return_(master, backup, node_id):
    with settings(host_string=master):
        sudo(
            'su postgres -c "pcp_attach_node 60 127.0.0.1 9898 postgres'
            ' {pcp_password} {node_id}"'.format(node_id=node_id, pcp_password=pcp_password)
        )
    with settings(host_string=backup):
        sudo(
            'su postgres -c "pcp_attach_node 60 127.0.0.1 9898 postgres'
            ' {pcp_password} {node_id}"'.format(node_id=node_id, pcp_password=pcp_password)
        )


def failover(new_primary_host, old_primary_host, failed_node_id, master_node_id):
    trigger = "/var/log/pgpool/trigger/trigger_file1"
    with settings(host_string=new_primary_host):
Пример #46
0
from fabric.api import env, sudo, settings, task
from fabric.state import output

from django_fabfile.utils import config

master = config.get('RDBMS', 'MASTER')
backup = config.get('RDBMS', 'BACKUP')
username = config.get('DEFAULT', 'USERNAME')
pcp_password = config.get('RDBMS', 'PCP_PASSWORD')

env.update({'disable_known_hosts': True, 'user': username, 'warn_only': True})
output.update({'running': False})


def return_(master, backup, node_id):
    with settings(host_string=master):
        sudo('su postgres -c "pcp_attach_node 60 127.0.0.1 9898 postgres'
             ' {pcp_password} {node_id}"'.format(node_id=node_id,
                                                 pcp_password=pcp_password))
    with settings(host_string=backup):
        sudo('su postgres -c "pcp_attach_node 60 127.0.0.1 9898 postgres'
             ' {pcp_password} {node_id}"'.format(node_id=node_id,
                                                 pcp_password=pcp_password))


def failover(new_primary_host, old_primary_host, failed_node_id,
             master_node_id):
    trigger = '/var/log/pgpool/trigger/trigger_file1'
    with settings(host_string=new_primary_host):
        sudo('su postgres -c "touch {trigger}"'.format(trigger=trigger))
        sudo('su postgres -c "/usr/local/etc/dnsmadeeasy-update.sh'