def main():
    # Define PrettyTable columns
    pt = PrettyTable(['Source Volume', 'Created', 'Snapshot Description', 'Status'])
    # Slide it on over to the left
    pt.align['Instance Name'] = "l"
    pt.padding_width = 1
    # Get all the snapshots owned by the current AWS account
    log.info("***** Connecting to Amazon EC2 *****")
    snapshots = conn.get_all_snapshots(owner="self")
    for snapshot in snapshots:
        # Get the current time
        current_time = datetime.datetime.now()
        # Get the timestamp when the snapshot was created
        start_time = datetime.datetime.strptime(snapshot.start_time, "%Y-%m-%dT%H:%M:%S.%fZ")
        # If the snapshot creation time is older than 'x' weeks/days, delete it
        if start_time < current_time - datetime.timedelta(weeks=args.weeks[0], days=args.days[0]):
            try:
                log.info("Attempting to delete snapshot '%s'" % (snapshot.volume_id))
                del_snap = conn.delete_snapshot(snapshot.id, dry_run=args.dry_run)
                log.info("SUCCESS: The snapshot was deleted successfully.")
            except boto.exception.EC2ResponseError, ex:
                if ex.status == 403:
                    log.error("FORBIDDEN: " + ex.error_message)
                    del_snap = ex.reason.upper() + ": " + "Access denied."
                else:
                    del_snap = 'ERROR: ' + ex.error_message
            finally:
def main():
    # Connect to AWS with the keys in ~/.boto
    bh = BotoHelper(os.environ.get('AWS_ACCESS_KEY_ID'), os.environ.get('AWS_SECRET_ACCESS_KEY'))
    # Get a list of all volumes for the region
    volumes = bh.get_all_volumes()
    print("\nInitiating snapshots of all instances located in region: " + bh.ec2.region.name)
    # Define PrettyTable columns
    pt = PrettyTable(['Instance Name', 'Volume', 'Snapshot Description', 'Status'])
    # Slide it on over to the left
    pt.align['Instance Name'] = "l"
    pt.padding_width = 1

    # For every volume that is attached to an instance and has a tag 'Snapshot' = 'true' , back it up
    for v in volumes:

        # check if volume is attached to an instance
        if v.attach_data.instance_id is not None:
            # fetch instance name
            instance_name = bh.get_instance_name(v.attach_data.instance_id)
            vol_name = v.tags['Name']
            snapshot_prefix = str(datetime.date.today()) + "_"
            snapshot_description = snapshot_prefix.replace(" ", "_") + vol_name.replace(" ", "_")

            # if 'Snapshot' tag exists on volume and equals 'true', back it up
            if v.tags.get('Snapshot') == 'true':
                # try to back up volume. if it fails, log it and return the exception
                try:
                    log.info("Attempting to snapshot '%s' on instance '%s'" % (v.tags['Name'], instance_name))
                    backup_result = bh.backup_instance(instance_name, snapshot_prefix)
                    log.info("SUCCESS: The snapshot was initiated successfully.")
                except boto.exception.EC2ResponseError, ex:
                    if ex.status == 403:
                        log.error("FORBIDDEN: " + ex.error_message)
                        backup_result = ex.reason.upper() + ": " + "Access denied."
                    else:
                        backup_result = 'ERROR: ' + ex.error_message
                finally:
                    backup_result = str(backup_result)
Пример #3
0
from jobs import DaemonJob, TaskJob
import loghelper as logging
import settings


CORES = multiprocessing.cpu_count()
NAME = settings.WORKER_NAME

# Start all the forks.
for i in range(CORES - 1):
    if os.fork() == 0:
        NAME += ".%d" % os.getpid()
        break

logging.WORKER = NAME
logging.info("Worker was started.")

connection = redis.StrictRedis(host=settings.HOST, port=6379)
pubsub = connection.pubsub()
pubsub.subscribe("work")


def get_job():
    """Return a job id to begin working on."""
    jobs = connection.smembers("jobs")
    if not jobs:
        return

    return list(jobs)[0]

Пример #4
0
def do_work(job_id, work_unit):
    logging.info("Running work unit (%s)" % work_unit)
    return run_in_venv(job_id, "python griz.py %s" % work_unit)
Пример #5
0
#-*- coding: utf-8 -*-
import tushare as ts
import pandas as pd
import numpy as np
import datetime
import uniout
import selectten as se
import toemail as email
import sys
import loghelper as logging

logging.info('开始时间:' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))

date = -180
nowtime = datetime.datetime.now()
detaday = datetime.timedelta(days=date)
da_days = nowtime + detaday
star = da_days.strftime('%Y-%m-%d')
end = nowtime.strftime('%Y-%m-%d')

now_stock = ts.get_stock_basics()
filter_stock = now_stock[now_stock['pe'] != 0]

for code in filter_stock.index:
    try:
        temp = filter_stock.loc[code]
        hist = ts.get_hist_data(code, start=star, end=end)
        if hist is None:
            continue
        if hist is None:
            continue