Exemplo n.º 1
0
def get_volumes(module, ec2):

    instance = module.params.get("instance")

    try:
        if not instance:
            vols = ec2.get_all_volumes()
        else:
            vols = ec2.get_all_volumes(filters={"attachment.instance-id": instance})
    except boto.exception.BotoServerError, e:
        module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
Exemplo n.º 2
0
def find_volume(ec2, name):
    """Looks up the EBS volume with a given Name tag"""
    try:
        return list(filter(lambda volume: volume_available, ec2.get_all_volumes(filters={"tag:Name": name})))[0].id
    except Exception as e:
        LOG.exception(e)
        sys.exit(2)
Exemplo n.º 3
0
Arquivo: ec2-gc.py Projeto: hisaki/osv
def process_region(region):
    print "Processing region %s" % region.name

    ec2 = region.connect()

    print "Scanning instances...\n"

    instances = ec2.get_only_instances()
    for inst in instances:
        inst_accessor = Instance(inst)
        process_instance(inst_accessor)

    print "\nScanning images...\n"

    images = ec2.get_all_images(None, ("self"))
    for image in images:
        image_accessor = Image(image)
        process_image(image_accessor)

    print "\nScanning volumes...\n"

    volumes = ec2.get_all_volumes()
    for volume in volumes:
        volume_accessor = Volume(volume)
        process_volume(volume_accessor)

    print "\nScanning snapshots...\n"

    snapshots = ec2.get_all_snapshots(owner="self")
    for snapshot in snapshots:
        process_snapshot(Snapshot(snapshot))
Exemplo n.º 4
0
def get_volume(module, ec2):
    name = module.params.get('name')
    id = module.params.get('id')
    zone = module.params.get('zone')
    filters = {}
    volume_ids = None

    # If no name or id supplied, just try volume creation based on module parameters
    if id is None and name is None:
        return None

    if zone:
        filters['availability_zone'] = zone
    if name:
        filters = {'tag:Name': name}
    if id:
        volume_ids = [id]
    try:
        vols = ec2.get_all_volumes(volume_ids=volume_ids, filters=filters)
    except boto.exception.BotoServerError as e:
        module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))

    if not vols:
        if id:
            msg = "Could not find the volume with id: %s" % id
            if name:
                msg += (" and name: %s" % name)
            module.fail_json(msg=msg)
        else:
            return None

    if len(vols) > 1:
        module.fail_json(msg="Found more than one volume in zone (if specified) with name: %s" % name)
    return vols[0]
Exemplo n.º 5
0
def check_backups(max_age, environment, cluster, role):
    import dateutil.parser
    import pytz

    ec2 = get_ec2_conn()
    now = datetime.utcnow().replace(tzinfo=pytz.utc)

    tag_filters = {
        'tag:environment': environment,
        'tag:cluster': cluster,
        'tag:role': role,
        'status': 'in-use',
    }

    vols = ec2.get_all_volumes(filters=tag_filters)

    ids = []
    mountpoints = defaultdict(list)

    for vol in vols:
        ids.append(vol.id)
        mountpoints[vol.tags['mount_point']].append(vol.id)

    # filter snapshots with 1 day resolution
    limit = (datetime.now() - timedelta(days=(max_age - 1) // (24 * 3600)))

    snaps = ec2.get_all_snapshots(
        filters={
            'volume_id': ids,
            'start-time': "{}*".format(limit.date().isoformat())
        }
    )
    dones = {}
    for snap in snaps:
        mp = snap.tags['mount_point']
        start_time = dateutil.parser.parse(snap.start_time)

        # do a finer grain age check in python
        if (now - start_time) > timedelta(seconds=max_age):
            continue

        logger.info("Checking mountpoint {}".format(mp))

        # pop any accounted for mountpoints
        if snap.volume_id in mountpoints[mp]:
            dones[mp] = mountpoints.pop(mp)
        else:
            if mp in dones:
                mountpoints.pop(mp)

    if len(mountpoints.keys()) > 0:
        logger.warning("Some volumes are missing a recent snapshot \
            (cluster={}, env={}, role={}):".format(cluster, environment, role))

        for mp in mountpoints:
            logger.warning("\t* {} on volume(s) {}".format(
                mp, ", ".join(mountpoints[mp])))

    return len(mountpoints.keys())
Exemplo n.º 6
0
def attach_volume(
        region='us-west-2', volume_id=None, instance_id=None,
        device_name='/dev/xvdf'):

    ec2 = boto.ec2.connect_to_region(region)
    volume = ec2.get_all_volumes(volume_ids=volume_id)[0]
    volume.attach(instance_id, device_name)
    return volume
Exemplo n.º 7
0
def restore_volumes():
    ec2 = boto.ec2.connect_to_region(region)
    volume_id = 'vol-f95619ec'
    bad_volume = ec2.get_all_volumes([volume_id])[0]
    snaps = bad_volume.snapshots() 
    snaps.sort(key=lambda snap: snap.start_time)
    latest_snap = snaps[-1]
    new_volume = ec2.create_volume(bad_volume.size, bad_volume.zone, latest_snap)
Exemplo n.º 8
0
def ec2_delete_volume(ec2):
	volumes = ec2.get_all_volumes()
	for v in volumes:
		try:
			if v.status == 'available': 
				print "---Deleting Volume: " + v.id
				ec2.delete_volume(v.id)
		except Exception, e:
			print(e)
Exemplo n.º 9
0
def check_ebs(account, region, ec2):
    logging.info('checking ebs')
    try:
        for vol in ec2.get_all_volumes():
            logging.info('volume id: {}'.format(vol.id))
            check_instance_tags(account, region, 'ebs', vol.id, vol.tags)

    except EC2ResponseError as e:
        logging.warning('exception: {}'.format(e.error_message))
def get_volume_by_id(module, ec2, id):
    zone = module.params.get('zone')
    filters = {}
    if zone:
        filters['availability_zone'] = zone
    volume_ids = [id]
    try:
        vols = ec2.get_all_volumes(volume_ids=volume_ids, filters=filters)
    except boto.exception.BotoServerError, e:
        module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
Exemplo n.º 11
0
def get_volumes(module, ec2):
    instance = module.params.get('instance')

    if not instance:
        module.fail_json(msg = "Instance must be specified to get volumes")

    try:
        vols = ec2.get_all_volumes(filters={'attachment.instance-id': instance})
    except boto.exception.BotoServerError, e:
        module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
Exemplo n.º 12
0
 def create_from_volume_id(cls, region_name, volume_id, name):
     vol = None
     ec2 = boto.ec2.connect_to_region(region_name)
     rs = ec2.get_all_volumes([volume_id])
     if len(rs) == 1:
         v = rs[0]
         vol = cls()
         vol.volume_id = v.id
         vol.name = name
         vol.region_name = v.region.name
         vol.put()
     return vol
Exemplo n.º 13
0
def clone_instance(instance):
    
	new_bdm = None
	ec2 = instance.connection

	if instance.block_device_mapping:
	
		root_device_name = instance.get_attribute('rootDeviceName')['rootDeviceName']
		user_data = instance.get_attribute('userData')['userData']
		# user_data comes back base64 encoded.  Need to decode it so it
		# can get re-encoded by run_instance !
		user_data = base64.b64decode(user_data)
		new_bdm = BlockDeviceMapping()

		for dev in instance.block_device_mapping:

			# if this entry is about the root device, skip it
			if dev != root_device_name:

				bdt = instance.block_device_mapping[dev]

				if bdt.volume_id:

					volume = ec2.get_all_volumes([bdt.volume_id])[0]
					snaps = volume.snapshots()

					if len(snaps) == 0:

						print 'No snapshots available for %s' % volume.id
					else:

						# sort the list of snapshots, newest is at the end now
						snaps.sort(key=lambda snap: snap.start_time)
						latest_snap = snaps[-1]
						new_bdt = BlockDeviceType()
						new_bdt.snapshot_id = latest_snap.id
						new_bdm[dev] = new_bdt

	return ec2.run_instances(
		instance.image_id,
		key_name=instance.key_name,
		security_groups=[g.name for g in instance.groups],
		user_data=user_data,
		instance_type=instance.instance_type,
		kernel_id=instance.kernel,
		ramdisk_id=instance.ramdisk,
		monitoring_enabled=instance.monitored,
		placement=instance.placement,
		block_device_map=new_bdm
	).instances[0]
def get_all_volumes(instance_id, region, devices):
    """Return a list of all volumes attached to the given instance id."""

    ec2 = boto.ec2.connect_to_region(region)
    volumes = ec2.get_all_volumes()
    instance_volumes = []
    for vol in volumes:
        if (vol.attach_data is None or
            vol.attach_data.instance_id != instance_id or
            vol.attach_data.device not in devices):
            continue

        instance_volumes.append(vol)

    return instance_volumes
Exemplo n.º 15
0
def get_volume(module, ec2):
    name = module.params.get('name')
    id = module.params.get('id')
    zone = module.params.get('zone')
    filters = {}
    volume_ids = None
    if zone:
        filters['availability_zone'] = zone
    if name:
        filters = {'tag:Name': name}
    if id:
        volume_ids = [id]
    try:
        vols = ec2.get_all_volumes(volume_ids=volume_ids, filters=filters)
    except boto.exception.BotoServerError, e:
        module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
Exemplo n.º 16
0
def find_volume(ec2, name):
    """Looks up the EBS volume with a given Name tag"""
    try:
        volumes = list(ec2.get_all_volumes(filters={
            'tag:Name': name,
            'status': 'available',
            'availability-zone': zone()}))
    except Exception as e:
        logging.exception(e)
        sys.exit(2)
    if not volumes:
        logging.error('No matching EBS volume with name %s found.', name)
        sys.exit(2)
    elif len(volumes) > 1:
        logging.warning('More than one EBS volume with name %s found.', name)
        volumes.sort(key=lambda v: v.id)
    return volumes[0].id
Exemplo n.º 17
0
def main():
    argument_spec = ec2_argument_spec()
    argument_spec.update(
        dict(
            volume_id = dict(),
            description = dict(),
            instance_id = dict(),
            snapshot_id = dict(),
            device_name = dict(),
            wait = dict(type='bool', default='true'),
            wait_timeout = dict(default=0),
            snapshot_tags = dict(type='dict', default=dict()),
            state = dict(choices=['absent','present'], default='present'),
        )
    )
    module = AnsibleModule(argument_spec=argument_spec)

    if not HAS_BOTO:
        module.fail_json(msg='boto required for this module')

    volume_id = module.params.get('volume_id')
    snapshot_id = module.params.get('snapshot_id')
    description = module.params.get('description')
    instance_id = module.params.get('instance_id')
    device_name = module.params.get('device_name')
    wait = module.params.get('wait')
    wait_timeout = module.params.get('wait_timeout')
    snapshot_tags = module.params.get('snapshot_tags')
    state = module.params.get('state')

    if not volume_id and not instance_id and not snapshot_id or volume_id and instance_id and snapshot_id:
        module.fail_json('One and only one of volume_id or instance_id or snapshot_id must be specified')
    if instance_id and not device_name or device_name and not instance_id:
        module.fail_json('Instance ID and device name must both be specified')

    ec2 = ec2_connect(module)

    if instance_id:
        try:
            volumes = ec2.get_all_volumes(filters={'attachment.instance-id': instance_id, 'attachment.device': device_name})
            if not volumes:
                module.fail_json(msg="Could not find volume with name %s attached to instance %s" % (device_name, instance_id))
            volume_id = volumes[0].id
        except boto.exception.BotoServerError, e:
            module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
Exemplo n.º 18
0
def getavailablevolumes(region):
    ec2 = boto.connect_ec2(region=region)
    vol = ec2.get_all_volumes()

    print region
    print vol
    vols = {}
    # Get a list of unattached volumes
    for unattachedvol in vol:
        state = unattachedvol.attachment_state()

        if state == None:
            print unattachedvol.id, state
            vols[unattachedvol.id] = unattachedvol
        else:
            vols[unattachedvol.id] = None

    return vols
Exemplo n.º 19
0
Arquivo: aws.py Projeto: Deltares/dcs
def get_storage_usage(instances):
    ec2 = boto.ec2.connect_to_region(settings.aws_region,
                                     aws_access_key_id=settings.aws_access,
                                     aws_secret_access_key=settings.aws_secret)

    if not ec2:
        logging.error('Cannot connect to region %s' % settings.aws_region)
        return None
    try:
        total = 0
        volumes = ec2.get_all_volumes()
        for volume in volumes:
            if volume.attach_data.instance_id in instances:
                total += volume.size
        return total
    except Exception, e:
        logging.exception('Could not get attributes (%s)' % e)
        return None
Exemplo n.º 20
0
 def create_from_snapshot(self, name, snapshot, size=None):
     if size < self.size:
         size = self.size
     ec2 = self.get_ec2_connection()
     if self.zone_name is None or self.zone_name == '':
         # deal with the migration case where the zone is not set in the logical volume:
         current_volume = ec2.get_all_volumes([self.volume_id])[0]
         self.zone_name = current_volume.zone
     ebs_volume = ec2.create_volume(size, self.zone_name, snapshot)
     v = Volume()
     v.ec2 = self.ec2
     v.volume_id = ebs_volume.id
     v.name = name
     v.mount_point = self.mount_point
     v.device = self.device
     v.region_name = self.region_name
     v.zone_name = self.zone_name
     v.put()
     return v
Exemplo n.º 21
0
 def create_from_snapshot(self, name, snapshot, size=None):
     if size < self.size:
         size = self.size
     ec2 = self.get_ec2_connection()
     if self.zone_name == None or self.zone_name == '':
         # deal with the migration case where the zone is not set in the logical volume:
         current_volume = ec2.get_all_volumes([self.volume_id])[0]
         self.zone_name = current_volume.zone
     ebs_volume = ec2.create_volume(size, self.zone_name, snapshot)
     v = Volume()
     v.ec2 = self.ec2
     v.volume_id = ebs_volume.id
     v.name = name
     v.mount_point = self.mount_point
     v.device = self.device
     v.region_name = self.region_name
     v.zone_name = self.zone_name
     v.put()
     return v
Exemplo n.º 22
0
def main():
    argument_spec = ec2_argument_spec()
    argument_spec.update(
        dict(
            volume_id=dict(),
            description=dict(),
            instance_id=dict(),
            device_name=dict(),
            wait=dict(type="bool", default="true"),
            wait_timeout=dict(default=0),
            snapshot_tags=dict(type="dict", default=dict()),
        )
    )
    module = AnsibleModule(argument_spec=argument_spec)

    volume_id = module.params.get("volume_id")
    description = module.params.get("description")
    instance_id = module.params.get("instance_id")
    device_name = module.params.get("device_name")
    wait = module.params.get("wait")
    wait_timeout = module.params.get("wait_timeout")
    snapshot_tags = module.params.get("snapshot_tags")

    if not volume_id and not instance_id or volume_id and instance_id:
        module.fail_json("One and only one of volume_id or instance_id must be specified")
    if instance_id and not device_name or device_name and not instance_id:
        module.fail_json("Instance ID and device name must both be specified")

    ec2 = ec2_connect(module)

    if instance_id:
        try:
            volumes = ec2.get_all_volumes(
                filters={"attachment.instance-id": instance_id, "attachment.device": device_name}
            )
            if not volumes:
                module.fail_json(
                    msg="Could not find volume with name %s attached to instance %s" % (device_name, instance_id)
                )
            volume_id = volumes[0].id
        except boto.exception.BotoServerError, e:
            module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
Exemplo n.º 23
0
def main():
    argument_spec = ec2_argument_spec()
    argument_spec.update(
        dict(
            volume_id = dict(),
            description = dict(),
            instance_id = dict(),
            snapshot_id = dict(),
            device_name = dict(),
            wait = dict(type='bool', default='true'),
            wait_timeout = dict(default=0),
            snapshot_tags = dict(type='dict', default=dict()),
            state = dict(choices=['absent','present'], default='present'),
        )
    )
    module = AnsibleModule(argument_spec=argument_spec)

    volume_id = module.params.get('volume_id')
    snapshot_id = module.params.get('snapshot_id')
    description = module.params.get('description')
    instance_id = module.params.get('instance_id')
    device_name = module.params.get('device_name')
    wait = module.params.get('wait')
    wait_timeout = module.params.get('wait_timeout')
    snapshot_tags = module.params.get('snapshot_tags')
    state = module.params.get('state')

    if not volume_id and not instance_id and not snapshot_id or volume_id and instance_id and snapshot_id:
        module.fail_json('One and only one of volume_id or instance_id or snapshot_id must be specified')
    if instance_id and not device_name or device_name and not instance_id:
        module.fail_json('Instance ID and device name must both be specified')

    ec2 = ec2_connect(module)

    if instance_id:
        try:
            volumes = ec2.get_all_volumes(filters={'attachment.instance-id': instance_id, 'attachment.device': device_name})
            if not volumes:
                module.fail_json(msg="Could not find volume with name %s attached to instance %s" % (device_name, instance_id))
            volume_id = volumes[0].id
        except boto.exception.BotoServerError, e:
            module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
Exemplo n.º 24
0
def find_volume(ec2, name):
    """Looks up the EBS volume with a given Name tag"""
    try:
        volumes = list(
            ec2.get_all_volumes(
                filters={
                    'tag:Name': name,
                    'status': 'available',
                    'availability-zone': zone()
                }))
    except Exception as e:
        logging.exception(e)
        sys.exit(2)
    if not volumes:
        logging.error('No matching EBS volume with name %s found.', name)
        sys.exit(2)
    elif len(volumes) > 1:
        logging.warning('More than one EBS volume with name %s found.', name)
        volumes.sort(key=lambda v: v.id)
    return volumes[0].id
Exemplo n.º 25
0
def create_snapshot(module,
                    ec2,
                    state=None,
                    description=None,
                    wait=None,
                    wait_timeout=None,
                    volume_id=None,
                    instance_id=None,
                    snapshot_id=None,
                    device_name=None,
                    snapshot_tags=None,
                    last_snapshot_min_age=None):
    snapshot = None
    changed = False

    required = [volume_id, snapshot_id, instance_id]
    if required.count(None) != len(required) - 1:  # only 1 must be set
        module.fail_json(
            msg=
            'One and only one of volume_id or instance_id or snapshot_id must be specified'
        )
    if instance_id and not device_name or device_name and not instance_id:
        module.fail_json(
            msg='Instance ID and device name must both be specified')

    if instance_id:
        try:
            volumes = ec2.get_all_volumes(
                filters={
                    'attachment.instance-id': instance_id,
                    'attachment.device': device_name
                })
        except boto.exception.BotoServerError, e:
            module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))

        if not volumes:
            module.fail_json(
                msg="Could not find volume with name %s attached to instance %s"
                % (device_name, instance_id))

        volume_id = volumes[0].id
Exemplo n.º 26
0
def get_volume(module, ec2):
    name = module.params.get('name')
    id = module.params.get('id')
    zone = module.params.get('zone')
    filters = {}
    volume_ids = None

    # If no name or id supplied, just try volume creation based on module parameters
    if id is None and name is None:
        return None

    if zone:
        filters['availability_zone'] = zone
    if name:
        filters = {'tag:Name': name}
    if id:
        volume_ids = [id]
    try:
        vols = ec2.get_all_volumes(volume_ids=volume_ids, filters=filters)
    except boto.exception.BotoServerError, e:
        module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
Exemplo n.º 27
0
def get_volume(module, ec2):
    name = module.params.get("name")
    id = module.params.get("id")
    zone = module.params.get("zone")
    filters = {}
    volume_ids = None

    # If no name or id supplied, just try volume creation based on module parameters
    if id is None and name is None:
        return None

    if zone:
        filters["availability_zone"] = zone
    if name:
        filters = {"tag:Name": name}
    if id:
        volume_ids = [id]
    try:
        vols = ec2.get_all_volumes(volume_ids=volume_ids, filters=filters)
    except boto.exception.BotoServerError, e:
        module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
Exemplo n.º 28
0
def set_metadata(ec2, instance_id, zone, metadata):
    if 'instance' in metadata:
        instance_metadata = metadata['instance']
        resources = [instance_id]

        # append zone to the name before tagging
        if 'Name' in instance_metadata and zone != '':
            instance_metadata['Name'] += ' - ' + zone

        if not ec2.create_tags(resources, instance_metadata):
            print >> sys.stderr, "Couldn't tag instance: " + instance_id
            raise Exception("Couldn't tag instance: " + instance_id);

        # this doesn't seem inefficient - should be a way to query instance volumes directly
        volumes = [v.id for v in ec2.get_all_volumes() if v.attach_data.instance_id == instance_id]

        # metadata keys are case-sensitive - we assume that if the user wants to tag the name
        # of assets, they've used 'Name' because that's the only one that works
        if 'Name' in instance_metadata:
            if not ec2.create_tags(volumes, {'Name': instance_metadata['Name']}):
                print >> sys.stderr, "Couldn't tag volumes with instance name: " + instance_metadata['Name']
                raise Exception("Couldn't tag volumes with instance name: " + instance_metadata['Name']);
Exemplo n.º 29
0
def create_snapshot(module, ec2, state=None, description=None, wait=None,
                    wait_timeout=None, volume_id=None, instance_id=None,
                    snapshot_id=None, device_name=None, snapshot_tags=None,
                    last_snapshot_min_age=None):
    snapshot = None
    changed = False

    required = [volume_id, snapshot_id, instance_id]
    if required.count(None) != len(required) - 1: # only 1 must be set
        module.fail_json(msg='One and only one of volume_id or instance_id or snapshot_id must be specified')
    if instance_id and not device_name or device_name and not instance_id:
        module.fail_json(msg='Instance ID and device name must both be specified')

    if instance_id:
        try:
            volumes = ec2.get_all_volumes(filters={'attachment.instance-id': instance_id, 'attachment.device': device_name})
        except boto.exception.BotoServerError, e:
            module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))

        if not volumes:
            module.fail_json(msg="Could not find volume with name %s attached to instance %s" % (device_name, instance_id))

        volume_id = volumes[0].id
Exemplo n.º 30
0
def get_volume(module, ec2):
    name = module.params.get('name')
    id = module.params.get('id')
    zone = module.params.get('zone')
    filters = {}
    volume_ids = None

    # If no name or id supplied, just try volume creation based on module parameters
    if id is None and name is None:
        return None

    if zone:
        filters['availability_zone'] = zone
    if name:
        filters = {'tag:Name': name}
    if id:
        volume_ids = [id]
    try:
        vols = ec2.get_all_volumes(volume_ids=volume_ids, filters=filters)
    except boto.exception.BotoServerError as e:
        module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))

    if not vols:
        if id:
            msg = "Could not find the volume with id: %s" % id
            if name:
                msg += (" and name: %s" % name)
            module.fail_json(msg=msg)
        else:
            return None

    if len(vols) > 1:
        module.fail_json(
            msg=
            "Found more than one volume in zone (if specified) with name: %s" %
            name)
    return vols[0]
Exemplo n.º 31
0
 def get_entities_for_region(self, region):
     ec2 = boto.connect_ec2(self.access_key_id, self.secret_access_key, region=region)
     return ec2.get_all_volumes()
Exemplo n.º 32
0
def create_snapshot(module, ec2, state=None, description=None, wait=None,
                    wait_timeout=None, volume_id=None, instance_id=None,
                    snapshot_id=None, device_name=None, snapshot_tags=None,
                    last_snapshot_min_age=None):
    snapshot = None
    changed = False

    required = [volume_id, snapshot_id, instance_id]
    if required.count(None) != len(required) - 1: # only 1 must be set
        module.fail_json(msg='One and only one of volume_id or instance_id or snapshot_id must be specified')
    if instance_id and not device_name or device_name and not instance_id:
        module.fail_json(msg='Instance ID and device name must both be specified')

    if instance_id:
        try:
            volumes = ec2.get_all_volumes(filters={'attachment.instance-id': instance_id, 'attachment.device': device_name})
        except boto.exception.BotoServerError as e:
            module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))

        if not volumes:
            module.fail_json(msg="Could not find volume with name %s attached to instance %s" % (device_name, instance_id))

        volume_id = volumes[0].id

    if state == 'absent':
        if not snapshot_id:
            module.fail_json(msg = 'snapshot_id must be set when state is absent')
        try:
            ec2.delete_snapshot(snapshot_id)
        except boto.exception.BotoServerError as e:
            # exception is raised if snapshot does not exist
            if e.error_code == 'InvalidSnapshot.NotFound':
                module.exit_json(changed=False)
            else:
                module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))

        # successful delete
        module.exit_json(changed=True)

    if last_snapshot_min_age > 0:
        try:
            current_snapshots = ec2.get_all_snapshots(filters={'volume_id': volume_id})
        except boto.exception.BotoServerError as e:
            module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))

        last_snapshot_min_age = last_snapshot_min_age * 60 # Convert to seconds
        snapshot = _get_most_recent_snapshot(current_snapshots,
                                             max_snapshot_age_secs=last_snapshot_min_age)
    try:
        # Create a new snapshot if we didn't find an existing one to use
        if snapshot is None:
            snapshot = ec2.create_snapshot(volume_id, description=description)
            changed = True
        if wait:
            if not _create_with_wait(snapshot, wait_timeout):
                module.fail_json(msg='Timed out while creating snapshot.')
        if snapshot_tags:
            for k, v in snapshot_tags.items():
                snapshot.add_tag(k, v)
    except boto.exception.BotoServerError as e:
        module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))

    module.exit_json(changed=changed,
                     snapshot_id=snapshot.id,
                     volume_id=snapshot.volume_id,
                     volume_size=snapshot.volume_size,
                     tags=snapshot.tags.copy())
Exemplo n.º 33
0
def postgresql_install(id, node_dict, stage, **options):
    """ Installs postgreSQL """

    if _postgresql_is_installed():
        fabric.api.warn(
            fabric.colors.yellow('PostgreSQL is already installed.'))
        return

    config = get_provider_dict()
    if 'slave' in options:
        master = config['machines'][stage][options['slave']]
        options.update(master['services']['postgresql'])

    package_add_repository('ppa:pitti/postgresql')
    package_install(['postgresql', 'python-psycopg2'])

    # Figure out cluster name
    output = fabric.api.run('pg_lsclusters -h')
    version, cluster = output.split()[:2]

    if 'ec2' in fabric.api.env.conf['PROVIDER']:
        if not options.get('simple'):
            package_install('xfsprogs')
            package_install('mdadm', '--no-install-recommends')

            # Create two ebs volumes
            import boto.ec2
            ec2 = boto.ec2.connect_to_region(
                config['location'][:-1],
                aws_access_key_id=fabric.api.env.conf['AWS_ACCESS_KEY_ID'],
                aws_secret_access_key=fabric.api.env.
                conf['AWS_SECRET_ACCESS_KEY'])

            tag1 = u'%s-1' % id
            tag2 = u'%s-2' % id
            if not any(vol for vol in ec2.get_all_volumes()
                       if vol.tags.get(u'Name') == tag1):
                volume1 = ec2.create_volume(
                    options.get('max-size', 10) / 2, config['location'])
                volume1.add_tag('Name', tag1)
                volume1.attach(node_dict['id'], '/dev/sdf')
            if not any(vol for vol in ec2.get_all_volumes()
                       if vol.tags.get(u'Name') == tag2):
                volume2 = ec2.create_volume(
                    options.get('max-size', 10) / 2, config['location'])
                volume2.add_tag('Name', tag2)
                volume2.attach(node_dict['id'], '/dev/sdg')

            time.sleep(10)

            # RAID 0 together the EBS volumes, and format the result as xfs.  Mount at /data.
            if not fabric.contrib.files.exists('/dev/md0', True):
                fabric.api.sudo(
                    'mdadm --create /dev/md0 --level=0 --raid-devices=2 /dev/sdf /dev/sdg'
                )
                fabric.api.sudo('mkfs.xfs /dev/md0')

            # Add mountpoint
            if not fabric.contrib.files.exists('/data'):
                fabric.api.sudo('mkdir -p /data')
                fabric.api.sudo('chown postgres:postgres /data')
                fabric.api.sudo('chmod 644 /data')

            # Add to fstab and mount
            append('/etc/fstab', '/dev/md0  /data  auto  defaults  0  0', True)
            with fabric.api.settings(warn_only=True):
                fabric.api.sudo('mount /data')

            # Move cluster/dbs to /data
            if fabric.api.run('pg_lsclusters -h').split()[5] != '/data':
                fabric.api.sudo('pg_dropcluster --stop %s %s' %
                                (version, cluster))
                fabric.api.sudo(
                    'pg_createcluster --start -d /data -e UTF-8 %s %s' %
                    (version, cluster))

    else:
        fabric.api.warn(
            fabric.colors.yellow(
                'PostgreSQL advanced drive setup (RAID 0 + XFS) is not currently supported on non-ec2 instances'
            ))

    fabric.api.sudo('service postgresql stop')

    # Set up postgres config files - Allow global listening (have a firewall!) and local ubuntu->your user connections
    pg_dir = '/etc/postgresql/%s/%s/' % (version, cluster)
    fabric.contrib.files.comment(pg_dir + 'postgresql.conf',
                                 'listen_addresses', True)
    append(pg_dir + 'postgresql.conf', "listen_addresses = '*'", True)

    append(pg_dir + 'pg_hba.conf', "host all all 0.0.0.0/0 md5", True)
    fabric.contrib.files.sed(pg_dir + 'pg_hba.conf',
                             "ident",
                             "trust",
                             use_sudo=True)

    # Figure out if we're a master
    if 'slave' not in options and any(
            'slave' in values.get('services', {}).get('postgresql', {})
            for name, values in config['machines'][stage].iteritems()):
        # We're a master!

        append(pg_dir + 'postgresql.conf', [
            'wal_level = hot_standby', 'max_wal_senders = 1',
            'checkpoint_segments = 8', 'wal_keep_segments = 8'
        ], True)

        append(pg_dir + 'pg_hba.conf', "host replication all 0.0.0.0/0 md5",
               True)

    elif 'slave' in options:
        # We're a slave!

        append(pg_dir + 'postgresql.conf', [
            'hot_standby = on', 'checkpoint_segments = 8',
            'wal_keep_segments = 8'
        ], True)

        #fabric.api.sudo('rm -rf /data/*')
        append('/data/recovery.conf', [
            "standby_mode = 'on'",
            "primary_conninfo = 'host=%s port=5432 user=%s password=%s'" %
            (master['public_ip'][0], options['user'], options['password']),
            "trigger_file = '/data/failover'"
        ], True)

        fabric.api.local(
            '''ssh -i %s ubuntu@%s sudo tar czf - /data | ssh -i deploy/nbc-west.pem ubuntu@%s sudo tar xzf - -C /'''
            % (fabric.api.env.key_filename[0], master['public_ip'][0],
               node_dict['public_ip'][0]))
        fabric.api.sudo('chown -R postgres:postgres /data')

    fabric.api.sudo('service postgresql start')
Exemplo n.º 34
0
def postgresql_install(id, node_dict, stage, **options):
	""" Installs postgreSQL """

	if _postgresql_is_installed():
		fabric.api.warn(fabric.colors.yellow('PostgreSQL is already installed.'))
		return
	
	config = get_provider_dict()
	if 'slave' in options:
		master = config['machines'][stage][options['slave']]
		options.update(master['services']['postgresql'])
	
	package_add_repository('ppa:pitti/postgresql')
	package_install(['postgresql', 'python-psycopg2'])
	
	# Figure out cluster name
	output = fabric.api.run('pg_lsclusters -h')
	version, cluster = output.split()[:2]
	
	if 'ec2' in fabric.api.env.conf['PROVIDER']:
		if not options.get('simple'):
			package_install('xfsprogs')
			package_install('mdadm', '--no-install-recommends')
			
			# Create two ebs volumes
			import boto.ec2
			ec2 = boto.ec2.connect_to_region(config['location'][:-1],
								aws_access_key_id = fabric.api.env.conf['AWS_ACCESS_KEY_ID'],
								aws_secret_access_key = fabric.api.env.conf['AWS_SECRET_ACCESS_KEY'])
			
			tag1 = u'%s-1' % id
			tag2 = u'%s-2' % id
			if not any(vol for vol in ec2.get_all_volumes() if vol.tags.get(u'Name') == tag1):
				volume1 = ec2.create_volume(options.get('max-size', 10)/2, config['location'])
				volume1.add_tag('Name', tag1)
				volume1.attach(node_dict['id'], '/dev/sdf')
			if not any(vol for vol in ec2.get_all_volumes() if vol.tags.get(u'Name') == tag2):
				volume2 = ec2.create_volume(options.get('max-size', 10)/2, config['location'])
				volume2.add_tag('Name', tag2)
				volume2.attach(node_dict['id'], '/dev/sdg')
			
			time.sleep(10)
			
			# RAID 0 together the EBS volumes, and format the result as xfs.  Mount at /data.
			if not fabric.contrib.files.exists('/dev/md0', True):
				fabric.api.sudo('mdadm --create /dev/md0 --level=0 --raid-devices=2 /dev/sdf /dev/sdg')
				fabric.api.sudo('mkfs.xfs /dev/md0')
			
			# Add mountpoint
			if not fabric.contrib.files.exists('/data'):
				fabric.api.sudo('mkdir -p /data')
				fabric.api.sudo('chown postgres:postgres /data')
				fabric.api.sudo('chmod 644 /data')
			
			# Add to fstab and mount
			append('/etc/fstab', '/dev/md0  /data  auto  defaults  0  0', True)
			with fabric.api.settings(warn_only = True):
				fabric.api.sudo('mount /data')
	
			# Move cluster/dbs to /data
			if fabric.api.run('pg_lsclusters -h').split()[5] != '/data':
				fabric.api.sudo('pg_dropcluster --stop %s %s' % (version, cluster))
				fabric.api.sudo('pg_createcluster --start -d /data -e UTF-8 %s %s' % (version, cluster))
	
	else:
		fabric.api.warn(fabric.colors.yellow('PostgreSQL advanced drive setup (RAID 0 + XFS) is not currently supported on non-ec2 instances'))

	fabric.api.sudo('service postgresql stop')

	# Set up postgres config files - Allow global listening (have a firewall!) and local ubuntu->your user connections
	pg_dir = '/etc/postgresql/%s/%s/' % (version, cluster)
	fabric.contrib.files.comment(pg_dir + 'postgresql.conf', 'listen_addresses', True)
	append(pg_dir + 'postgresql.conf', "listen_addresses = '*'", True)

	append(pg_dir + 'pg_hba.conf', "host all all 0.0.0.0/0 md5", True)
	fabric.contrib.files.sed(pg_dir + 'pg_hba.conf', "ident", "trust", use_sudo=True)
	
	# Figure out if we're a master
	if 'slave' not in options and any('slave' in values.get('services', {}).get('postgresql', {})
									  for name, values in config['machines'][stage].iteritems()):
		# We're a master!
		
		append(pg_dir + 'postgresql.conf', [
			'wal_level = hot_standby',
			'max_wal_senders = 1',
			'checkpoint_segments = 8',
			'wal_keep_segments = 8'], True)
		
		append(pg_dir + 'pg_hba.conf', "host replication all 0.0.0.0/0 md5", True)
		
	elif 'slave' in options:
		# We're a slave!
		
		append(pg_dir + 'postgresql.conf', [
			'hot_standby = on',
			'checkpoint_segments = 8',
			'wal_keep_segments = 8'], True)
		
		#fabric.api.sudo('rm -rf /data/*')
		append('/data/recovery.conf', [
			"standby_mode = 'on'",
			"primary_conninfo = 'host=%s port=5432 user=%s password=%s'" % (master['public_ip'][0], options['user'], options['password']),
			"trigger_file = '/data/failover'"], True)
		
		fabric.api.local('''ssh -i %s ubuntu@%s sudo tar czf - /data | ssh -i deploy/nbc-west.pem ubuntu@%s sudo tar xzf - -C /''' % (fabric.api.env.key_filename[0], master['public_ip'][0], node_dict['public_ip'][0]))
		fabric.api.sudo('chown -R postgres:postgres /data')
	
	fabric.api.sudo('service postgresql start')
Exemplo n.º 35
0
def list_volumes(ec2, instance_id):
    return ec2.get_all_volumes(filters={'attachment.instance-id': instance_id})
Exemplo n.º 36
0
    ])
args = parser.parse_args()

# process common command line arguments
log = logging.getLogger('botocross')
bc.configure_logging(log, args.log_level)
credentials = bc.parse_credentials(args)
regions = bc.filter_regions(boto.ec2.regions(), args.region)
filter = bc.build_filter(args.filter, args.exclude)
log.info(args.resource_ids)

# execute business logic
log.info("Describing EBS volumes:")

for region in regions:
    try:
        ec2 = boto.connect_ec2(region=region, **credentials)
        volumes = ec2.get_all_volumes(volume_ids=args.resource_ids,
                                      filters=filter['filters'])
        if filter['excludes']:
            exclusions = ec2.get_all_volumes(filters=filter['excludes'])
            volumes = bc.filter_list_by_attribute(volumes, exclusions, 'id')
        print region.name + ": " + str(len(volumes)) + " volumes"
        for volume in volumes:
            if args.verbose:
                pprint(vars(volume))
            else:
                print volume.id
    except boto.exception.BotoServerError, e:
        log.error(e.error_message)
Exemplo n.º 37
0
def create_snapshot(module,
                    ec2,
                    state=None,
                    description=None,
                    wait=None,
                    wait_timeout=None,
                    volume_id=None,
                    instance_id=None,
                    snapshot_id=None,
                    device_name=None,
                    snapshot_tags=None,
                    last_snapshot_min_age=None):
    snapshot = None
    changed = False

    required = [volume_id, snapshot_id, instance_id]
    if required.count(None) != len(required) - 1:  # only 1 must be set
        module.fail_json(
            msg=
            'One and only one of volume_id or instance_id or snapshot_id must be specified'
        )
    if instance_id and not device_name or device_name and not instance_id:
        module.fail_json(
            msg='Instance ID and device name must both be specified')

    if instance_id:
        try:
            volumes = ec2.get_all_volumes(
                filters={
                    'attachment.instance-id': instance_id,
                    'attachment.device': device_name
                })
        except boto.exception.BotoServerError as e:
            module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))

        if not volumes:
            module.fail_json(
                msg="Could not find volume with name %s attached to instance %s"
                % (device_name, instance_id))

        volume_id = volumes[0].id

    if state == 'absent':
        if not snapshot_id:
            module.fail_json(
                msg='snapshot_id must be set when state is absent')
        try:
            ec2.delete_snapshot(snapshot_id)
        except boto.exception.BotoServerError as e:
            # exception is raised if snapshot does not exist
            if e.error_code == 'InvalidSnapshot.NotFound':
                module.exit_json(changed=False)
            else:
                module.fail_json(msg="%s: %s" %
                                 (e.error_code, e.error_message))

        # successful delete
        module.exit_json(changed=True)

    if last_snapshot_min_age > 0:
        try:
            current_snapshots = ec2.get_all_snapshots(
                filters={'volume_id': volume_id})
        except boto.exception.BotoServerError as e:
            module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))

        last_snapshot_min_age = last_snapshot_min_age * 60  # Convert to seconds
        snapshot = _get_most_recent_snapshot(
            current_snapshots, max_snapshot_age_secs=last_snapshot_min_age)
    try:
        # Create a new snapshot if we didn't find an existing one to use
        if snapshot is None:
            snapshot = ec2.create_snapshot(volume_id, description=description)
            changed = True
        if wait:
            if not _create_with_wait(snapshot, wait_timeout):
                module.fail_json(msg='Timed out while creating snapshot.')
        if snapshot_tags:
            for k, v in snapshot_tags.items():
                snapshot.add_tag(k, v)
    except boto.exception.BotoServerError as e:
        module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))

    module.exit_json(changed=changed,
                     snapshot_id=snapshot.id,
                     volume_id=snapshot.volume_id,
                     volume_size=snapshot.volume_size,
                     tags=snapshot.tags.copy())
def isSelected(region):
    return True if region.name.find(args.region) != -1 else False

# execute business logic    
credentials = {'aws_access_key_id': args.aws_access_key_id, 'aws_secret_access_key': args.aws_secret_access_key}
heading = "Snapshotting EBS volumes"
regions = boto.ec2.regions()
if args.region:
    heading += " (filtered by region '" + args.region + "')"
    regions = filter(isSelected, regions)

filters = None
if args.filter:
    filters = dict([filter.split('=') for filter in args.filter])
log.info(args.filter)
log.debug(filters)
log.info(args.resource_ids)

backup_set = args.backup_set if args.backup_set else DEFAULT_BACKUP_SET
log.debug(backup_set)

print heading + ":"
for region in regions:
    try:
        ec2 = boto.connect_ec2(region=region, **credentials)
        volumes = ec2.get_all_volumes(volume_ids=args.resource_ids, filters=filters)
        print region.name + ": " + str(len(volumes)) + " volumes"
        create_snapshots(ec2, volumes, backup_set, args.description)
    except boto.exception.BotoServerError, e:
        log.error(e.error_message)
Exemplo n.º 39
0
 def _attachment_state(self):
     ec2 = self.get_ec2_connection()
     rs = ec2.get_all_volumes([self.volume_id])
     return rs[0].attachment_state()
Exemplo n.º 40
0

# accumlating the costs
vol_costs = dict(
    total=0.0,  # total costs for all EBS
    total_unused_gb=0.0,  # costs for unused standard and gp2 EBS
    total_unused_io=0.0,  # costs for unused PIOPS IO
    total_unused_io_gb=0.0,  # costs for unused PIOPS storage
    count=0,  # count of EBS volumes
    count_unused=0,  # count of unused standard and gp2 EBS volumes
    count_io_unused=0  # count of unused PIOPS EBS volumes
)

print "Getting EBS costs for region {0}".format(region)
ec2 = ec2.connect_to_region(region)
vols = ec2.get_all_volumes()
vol_costs['count'] = len(vols)

for vol in vols:
    price_gb = volume_price(vol, region)
    price_io = 0
    if vol.type == 'io1':
        price_io = round(vol.iops * EBS_PRICING[region]['io1_iops'], 2)

    vol_costs['total'] += (price_gb + price_io)

    # Is the volume unused?
    attach_data = vol.attach_data

    if not attach_data.status:
        if vol.type == 'io1':
Exemplo n.º 41
0
 def _size(self):
     if not hasattr(self, '__size'):
         ec2 = self.get_ec2_connection()
         rs = ec2.get_all_volumes([self.volume_id])
         self.__size = rs[0].size
     return self.__size
Exemplo n.º 42
0
 def _attachment_state(self):
     ec2 = self.get_ec2_connection()
     rs = ec2.get_all_volumes([self.volume_id])
     return rs[0].attachment_state()
Exemplo n.º 43
0
def Back_volumes():
    ec2 = ec2 = boto.ec2.connect_to_region(region)
    volumes = ec2.get_all_volumes()
    snaps = [v.create_snapshot() for v in volumes]
    print snaps
Exemplo n.º 44
0
def get_all_volumes(ec2, filters):
    return ec2.get_all_volumes(filters=filters)
Exemplo n.º 45
0
# Positional arguments
# 1. AWS region
if sys.argv[1:]:
    region = sys.argv.pop(1)

vols = list(sys.argv[1:])

ec2 = ec2.connect_to_region(region)

do_archive = True       # do archival snapshots
available_only = True   # snapshot only those volumes which 
                        #     are available (unattached)


if vols:
  volumes = ec2.get_all_volumes(volume_ids=vols)
else:
  volumes = ec2.get_all_volumes()

snapshots = ec2.get_all_snapshots(owner='self')

for volume in volumes:
    if available_only:
        if  volume.attach_data.status:
            print "SKIPPING: Attached volume", volume.id
            continue

    vol_tags = ec2.get_all_tags(filters={'resource-id': volume.id, 'resource-type': 'volume'})

    is_archived = False
    is_named = False
Exemplo n.º 46
0
def create_snapshot(module,
                    ec2,
                    state=None,
                    description=None,
                    wait=None,
                    wait_timeout=None,
                    volume_id=None,
                    instance_id=None,
                    snapshot_id=None,
                    device_name=None,
                    snapshot_tags=None,
                    last_snapshot_min_age=None,
                    create_volume_permissions=None):
    snapshot = None
    changed = False

    if instance_id:
        try:
            volumes = ec2.get_all_volumes(
                filters={
                    'attachment.instance-id': instance_id,
                    'attachment.device': device_name
                })
        except boto.exception.BotoServerError as e:
            module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))

        if not volumes:
            module.fail_json(
                msg="Could not find volume with name %s attached to instance %s"
                % (device_name, instance_id))

        volume_id = volumes[0].id

    if state == 'absent':
        try:
            ec2.delete_snapshot(snapshot_id)
        except boto.exception.BotoServerError as e:
            # exception is raised if snapshot does not exist
            if e.error_code == 'InvalidSnapshot.NotFound':
                module.exit_json(changed=False)
            else:
                module.fail_json(msg="%s: %s" %
                                 (e.error_code, e.error_message))

        # successful delete
        module.exit_json(changed=True)

    if last_snapshot_min_age > 0:
        try:
            current_snapshots = ec2.get_all_snapshots(
                filters={'volume_id': volume_id})
        except boto.exception.BotoServerError as e:
            module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))

        last_snapshot_min_age = last_snapshot_min_age * 60  # Convert to seconds
        snapshot = _get_most_recent_snapshot(
            current_snapshots, max_snapshot_age_secs=last_snapshot_min_age)
    try:
        if snapshot_id:
            snapshot = ec2.get_all_snapshots(snapshot_ids=[snapshot_id])[0]
        # Create a new snapshot if we didn't find an existing one to use
        if snapshot is None:
            snapshot = ec2.create_snapshot(volume_id, description=description)
            changed = True
        if wait:
            if not _create_with_wait(snapshot, wait_timeout):
                module.fail_json(msg='Timed out while creating snapshot.')
        if snapshot_tags:
            tags_to_add, tags_to_remove = existing_to_desired(
                snapshot.tags.items(), snapshot_tags.items())
            for (k, v) in tags_to_add:
                snapshot.add_tag(k, v)
                changed = True
            for (k, v) in tags_to_remove:
                snapshot.remove_tag(k, value=v)
                changed = True

        permissions = ec2.get_snapshot_attribute(snapshot.id)

        users_to_add, users_to_remove = existing_to_desired(
            permissions.attrs.get('user_ids', []), [
                str(user_id)
                for user_id in create_volume_permissions.get('user_ids', [])
            ])
        groups_to_add, groups_to_remove = existing_to_desired(
            permissions.attrs.get('groups', []),
            create_volume_permissions.get('groups', []))

        if users_to_add or groups_to_add:
            ec2.modify_snapshot_attribute(snapshot.id,
                                          user_ids=users_to_add,
                                          groups=groups_to_add)
            permissions = ec2.get_snapshot_attribute(snapshot.id)
            changed = True
        if users_to_remove or groups_to_remove:
            ec2.modify_snapshot_attribute(snapshot.id,
                                          operation='remove',
                                          user_ids=users_to_remove,
                                          groups=groups_to_remove)
            permissions = ec2.get_snapshot_attribute(snapshot.id)
            changed = True
    except boto.exception.BotoServerError as e:
        module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))

    module.exit_json(changed=changed,
                     snapshot_id=snapshot.id,
                     volume_id=snapshot.volume_id,
                     volume_size=snapshot.volume_size,
                     tags=snapshot.tags.copy(),
                     permissions=permissions.attrs)
Exemplo n.º 47
0
def main():
    argument_spec = ec2_argument_spec()
    argument_spec.update(
        dict(instance=dict(),
             id=dict(),
             name=dict(),
             volume_size=dict(),
             volume_type=dict(choices=['standard', 'gp2', 'io1', 'st1', 'sc1'],
                              default='standard'),
             iops=dict(),
             encrypted=dict(type='bool', default=False),
             kms_key_id=dict(),
             device_name=dict(),
             delete_on_termination=dict(type='bool', default=False),
             zone=dict(aliases=['availability_zone', 'aws_zone', 'ec2_zone']),
             snapshot=dict(),
             state=dict(choices=['absent', 'present', 'list'],
                        default='present'),
             tags=dict(type='dict', default={})))
    module = AnsibleModule(argument_spec=argument_spec)

    if not HAS_BOTO:
        module.fail_json(msg='boto required for this module')

    id = module.params.get('id')
    name = module.params.get('name')
    instance = module.params.get('instance')
    volume_size = module.params.get('volume_size')
    encrypted = module.params.get('encrypted')
    kms_key_id = module.params.get('kms_key_id')
    device_name = module.params.get('device_name')
    zone = module.params.get('zone')
    snapshot = module.params.get('snapshot')
    state = module.params.get('state')
    tags = module.params.get('tags')

    # Ensure we have the zone or can get the zone
    if instance is None and zone is None and state == 'present':
        module.fail_json(msg="You must specify either instance or zone")

    # Set volume detach flag
    if instance == 'None' or instance == '':
        instance = None
        detach_vol_flag = True
    else:
        detach_vol_flag = False

    # Set changed flag
    changed = False

    region, ec2_url, aws_connect_params = get_aws_connection_info(module)

    if region:
        try:
            ec2 = connect_to_aws(boto.ec2, region, **aws_connect_params)
        except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
            module.fail_json(msg=str(e))
    else:
        module.fail_json(msg="region must be specified")

    if state == 'list':
        returned_volumes = []
        vols = get_volumes(module, ec2)

        for v in vols:
            attachment = v.attach_data

            returned_volumes.append(get_volume_info(v, state))

        module.exit_json(changed=False, volumes=returned_volumes)

    if encrypted and not boto_supports_volume_encryption():
        module.fail_json(
            msg="You must use boto >= v2.29.0 to use encrypted volumes")

    if kms_key_id is not None and not boto_supports_kms_key_id():
        module.fail_json(msg="You must use boto >= v2.39.0 to use kms_key_id")

    # Here we need to get the zone info for the instance. This covers situation where
    # instance is specified but zone isn't.
    # Useful for playbooks chaining instance launch with volume create + attach and where the
    # zone doesn't matter to the user.
    inst = None
    if instance:
        try:
            reservation = ec2.get_all_instances(instance_ids=instance)
        except BotoServerError as e:
            module.fail_json(msg=e.message)
        inst = reservation[0].instances[0]
        zone = inst.placement

        # Check if there is a volume already mounted there.
        if device_name:
            if device_name in inst.block_device_mapping:
                module.exit_json(
                    msg="Volume mapping for %s already exists on instance %s" %
                    (device_name, instance),
                    volume_id=inst.block_device_mapping[device_name].volume_id,
                    device=device_name,
                    changed=False)

    # Delaying the checks until after the instance check allows us to get volume ids for existing volumes
    # without needing to pass an unused volume_size
    if not volume_size and not (id or name or snapshot):
        module.fail_json(
            msg=
            "You must specify volume_size or identify an existing volume by id, name, or snapshot"
        )

    if volume_size and id:
        module.fail_json(msg="Cannot specify volume_size together with id")

    if state == 'present':
        volume, changed = create_volume(module, ec2, zone)
        if detach_vol_flag:
            volume, changed = detach_volume(module, ec2, volume)
        elif inst is not None:
            volume, changed = attach_volume(module, ec2, volume, inst)

        # Add device, volume_id and volume_type parameters separately to maintain backward compatibility
        volume_info = get_volume_info(volume, state)

        # deleteOnTermination is not correctly reflected on attachment
        if module.params.get('delete_on_termination'):
            for attempt in range(0, 8):
                if volume_info['attachment_set'].get(
                        'deleteOnTermination') == 'true':
                    break
                time.sleep(5)
                volume = ec2.get_all_volumes(volume_ids=volume.id)[0]
                volume_info = get_volume_info(volume, state)
        module.exit_json(changed=changed,
                         volume=volume_info,
                         device=volume_info['attachment_set']['device'],
                         volume_id=volume_info['id'],
                         volume_type=volume_info['type'])
    elif state == 'absent':
        delete_volume(module, ec2)
Exemplo n.º 48
0
 def _size(self):
     if not hasattr(self, '__size'):
         ec2 = self.get_ec2_connection()
         rs = ec2.get_all_volumes([self.volume_id])
         self.__size = rs[0].size
     return self.__size
Exemplo n.º 49
0
            time_left -= backoff
            backoff = backoff * 2

    if environment is None or deployment is None or play is None:
        msg = "Unable to retrieve environment, deployment, or play tag."
        print(msg)
        exit(1)

    #get the hostname of the sandbox
    hostname = socket.gethostname()

    ami_id = get_instance_metadata()['ami-id']

    try:
        #get the list of the volumes, that are attached to the instance
        volumes = ec2.get_all_volumes(
            filters={'attachment.instance-id': instance_id})

        for volume in volumes:
            volume.add_tags({
                "hostname": hostname,
                "environment": environment,
                "deployment": deployment,
                "cluster": play,
                "instance-id": instance_id,
                "ami-id": ami_id,
                "created": volume.create_time
            })
    except Exception as e:
        msg = "Failed to tag volumes associated with {}: {}".format(
            instance_id, str(e))
        print(msg)