def main():
    # Define PrettyTable columns
    pt = PrettyTable(['Source Volume', 'Created', 'Snapshot Description', 'Status'])
    # Slide it on over to the left
    pt.align['Instance Name'] = "l"
    pt.padding_width = 1
    # Get all the snapshots owned by the current AWS account
    log.info("***** Connecting to Amazon EC2 *****")
    snapshots = conn.get_all_snapshots(owner="self")
    for snapshot in snapshots:
        # Get the current time
        current_time = datetime.datetime.now()
        # Get the timestamp when the snapshot was created
        start_time = datetime.datetime.strptime(snapshot.start_time, "%Y-%m-%dT%H:%M:%S.%fZ")
        # If the snapshot creation time is older than 'x' weeks/days, delete it
        if start_time < current_time - datetime.timedelta(weeks=args.weeks[0], days=args.days[0]):
            try:
                log.info("Attempting to delete snapshot '%s'" % (snapshot.volume_id))
                del_snap = conn.delete_snapshot(snapshot.id, dry_run=args.dry_run)
                log.info("SUCCESS: The snapshot was deleted successfully.")
            except boto.exception.EC2ResponseError, ex:
                if ex.status == 403:
                    log.error("FORBIDDEN: " + ex.error_message)
                    del_snap = ex.reason.upper() + ": " + "Access denied."
                else:
                    del_snap = 'ERROR: ' + ex.error_message
            finally:
def main():
    # Connect to AWS with the keys in ~/.boto
    bh = BotoHelper(os.environ.get('AWS_ACCESS_KEY_ID'), os.environ.get('AWS_SECRET_ACCESS_KEY'))
    # Get a list of all volumes for the region
    volumes = bh.get_all_volumes()
    print("\nInitiating snapshots of all instances located in region: " + bh.ec2.region.name)
    # Define PrettyTable columns
    pt = PrettyTable(['Instance Name', 'Volume', 'Snapshot Description', 'Status'])
    # Slide it on over to the left
    pt.align['Instance Name'] = "l"
    pt.padding_width = 1

    # For every volume that is attached to an instance and has a tag 'Snapshot' = 'true' , back it up
    for v in volumes:

        # check if volume is attached to an instance
        if v.attach_data.instance_id is not None:
            # fetch instance name
            instance_name = bh.get_instance_name(v.attach_data.instance_id)
            vol_name = v.tags['Name']
            snapshot_prefix = str(datetime.date.today()) + "_"
            snapshot_description = snapshot_prefix.replace(" ", "_") + vol_name.replace(" ", "_")

            # if 'Snapshot' tag exists on volume and equals 'true', back it up
            if v.tags.get('Snapshot') == 'true':
                # try to back up volume. if it fails, log it and return the exception
                try:
                    log.info("Attempting to snapshot '%s' on instance '%s'" % (v.tags['Name'], instance_name))
                    backup_result = bh.backup_instance(instance_name, snapshot_prefix)
                    log.info("SUCCESS: The snapshot was initiated successfully.")
                except boto.exception.EC2ResponseError, ex:
                    if ex.status == 403:
                        log.error("FORBIDDEN: " + ex.error_message)
                        backup_result = ex.reason.upper() + ": " + "Access denied."
                    else:
                        backup_result = 'ERROR: ' + ex.error_message
                finally:
                    backup_result = str(backup_result)
Пример #3
0
    # Double check that we're working on an active job and that it hasn't
    # already been completed.
    if not connection.sismember("jobs", current_job):
        continue

    # Load the job description.
    job_description = connection.get(current_job)
    job_description = json.loads(job_description)

    # Figure out which job handler to use.
    job_type = job_description.get("type", "unknown")
    job_inst = job_types.get(job_type)
    if not job_inst:
        raise Exception("Unknown Job Type: worker out of date.\n"
                        "Job type: %s\nJob: %s" % (job_type, current_job))

    # Instantiate the job.
    job_inst = job_inst(current_job, job_description, connection, NAME)

    # Prevent the job from being cleaned up while we're using it.
    with job_inst.lock():
        job_inst.deploy()
        try:
            job_inst.run_job()
        except Exception as exc:
            logging.error("Job finished prematurely due to error. (%s)" %
                              exc.message)

    job_inst.cleanup()