Exemple #1
0
 def test_expiration_is_optional(self):
     t = Transition(days=30, storage_class='GLACIER')
     r = Rule('myid', 'prefix', 'Enabled', expiration=None, transition=t)
     xml = r.to_xml()
     self.assertIn(
         '<Transition><StorageClass>GLACIER</StorageClass><Days>30</Days>',
         xml)
 def test_lifecycle_rule_xml(self):
     # create a rule directly with id, prefix defaults
     rule = Rule(status='Enabled', expiration=30)
     s = rule.to_xml()
     # Confirm no ID is set in the rule.
     self.assertEqual(s.find("<ID>"), -1)
     # Confirm Prefix is '' and not set to 'None'
     self.assertNotEqual(s.find("<Prefix></Prefix>"), -1)
Exemple #3
0
 def test_expiration_is_optional(self):
     t = Transition(days=30, storage_class='GLACIER')
     r = Rule('myid', 'prefix', 'Enabled', expiration=None,
              transition=t)
     xml = r.to_xml()
     self.assertIn(
         '<Transition><StorageClass>GLACIER</StorageClass><Days>30</Days>',
         xml)
Exemple #4
0
 def test_expiration_with_expiration_and_transition(self):
     t = Transition(date='2012-11-30T00:00:000Z', storage_class='GLACIER')
     r = Rule('myid', 'prefix', 'Enabled', expiration=30, transition=t)
     xml = r.to_xml()
     self.assertIn(
         '<Transition><StorageClass>GLACIER</StorageClass>'
         '<Date>2012-11-30T00:00:000Z</Date>', xml)
     self.assertIn('<Expiration><Days>30</Days></Expiration>', xml)
Exemple #5
0
 def test_expiration_with_expiration_and_transition(self):
     t = Transition(date='2012-11-30T00:00:000Z', storage_class='GLACIER')
     r = Rule('myid', 'prefix', 'Enabled', expiration=30, transition=t)
     xml = r.to_xml()
     self.assertIn(
         '<Transition><StorageClass>GLACIER</StorageClass>'
         '<Date>2012-11-30T00:00:000Z</Date>', xml)
     self.assertIn('<Expiration><Days>30</Days></Expiration>', xml)
Exemple #6
0
 def test_lifecycle_rule_xml(self):
     # create a rule directly with id, prefix defaults
     rule = Rule(status='Enabled', expiration=30)
     s = rule.to_xml()
     # Confirm no ID is set in the rule.
     self.assertEqual(s.find("<ID>"), -1)
     # Confirm Prefix is '' and not set to 'None'
     self.assertNotEqual(s.find("<Prefix></Prefix>"), -1)
Exemple #7
0
def create_s3():
    """
    Create the S3 buckets

    All the buckets use the galaxy name as the 'folder'
    :return:
    """
    # Create the bucket for the images
    s3 = boto.connect_s3()
    images_bucket = 'icrar.{0}.galaxy-images'.format(env.project_name)
    bucket = s3.create_bucket(images_bucket)
    bucket.set_acl('public-read')
    bucket.configure_website(suffix='index.html')
    bucket.set_policy('''{
  "Statement":[
    {
        "Sid":"PublicReadForGetBucketObjects",
        "Effect":"Allow",
        "Principal": {
                "AWS": "*"
        },
        "Action":["s3:GetObject"],
        "Resource":["arn:aws:s3:::%s/*"]
    }
  ]
}
''' % images_bucket)

    # Create the bucket for the output files
    file_bucket = 'icrar.{0}.files'.format(env.project_name)
    s3.create_bucket(file_bucket)

    # Create the bucket for the stats files
    file_bucket = 'icrar.{0}.archive'.format(env.project_name)
    bucket = s3.create_bucket(file_bucket)
    to_glacier = Transition(days=10, storage_class='GLACIER')
    rule1 = Rule('rule01',
                 status='Enabled',
                 prefix='stats/',
                 transition=to_glacier)
    rule2 = Rule('rule02',
                 status='Enabled',
                 prefix='logs/',
                 expiration=Expiration(days=20))
    lifecycle = Lifecycle()
    lifecycle.append(rule1)
    lifecycle.append(rule2)
    bucket.configure_lifecycle(lifecycle)
Exemple #8
0
def glacier(name):
   bucket = conn.get_bucket(name)
   to_glacier = boto.s3.lifecycle.Transition(days=30, storage_class='GLACIER')
   rule = Rule('ruleid', 'logs/', 'Enabled', transition=to_glacier)
   lifecycle = Lifecycle()
   lifecycle.append(rule)
   bucket.configure_lifecycle(lifecycle)
Exemple #9
0
def setup_bucket(s3, dirname, bucket_name):
    """Ensures the given bucket exists and prepares it for a duplicity run
	"""
    if not s3.lookup(bucket_name):
        s3.create_bucket(bucket_name)
        time.sleep(5)
    bucket = s3.get_bucket(bucket_name)

    # tag this bucket with the directory so we know what it
    # is when we retrieve it after the terrible fire or burglary
    tags = Tags()
    tagset = TagSet()
    tagset.add_tag('path', dirname)
    tags.add_tag_set(tagset)
    bucket.set_tags(tags)

    # turn off any lifecycle rotations while we are in the middle of a backup
    to_glacier = Transition(days=1, storage_class='GLACIER')
    rule = Rule('movetoglacier',
                'duplicity',
                'Disabled',
                transition=to_glacier)
    lifecycle = Lifecycle()
    lifecycle.append(rule)
    bucket.configure_lifecycle(lifecycle)

    # rename the manifest files from their glacier-safe versions
    keys = bucket.list(prefix='_duplicity')
    for key in keys:
        key.copy(bucket_name, key.name.replace("_duplicity", "duplicity"))
        key.delete()

    return bucket
Exemple #10
0
 def __init__(self, bucket_name, s3_to_glacier_after_days=None):
     # create s3 connection
     # create bucket if does not exist
     # create S3 connection if archive_S3_bucket name is specified
     self.__bucket_name = bucket_name
     self.__s3_conn = boto.connect_s3()
     self.__bucket = self.__s3_conn.lookup(self.__bucket_name)
     if not self.__bucket:
         try:
             self.__bucket = self.__s3_conn.create_bucket(
                 self.__bucket_name)
             if s3_to_glacier_after_days is not None:
                 to_glacier = Transition(days=s3_to_glacier_after_days,
                                         storage_class='GLACIER')
                 rule = Rule(id='archive-rule1',
                             status='Enabled',
                             transition=to_glacier)
                 lifecycle = Lifecycle()
                 lifecycle.append(rule)
                 self.__bucket.configure_lifecycle(lifecycle)
         except S3CreateError:
             logger.error('failed to create S3 bucket[' +
                          self.__bucket_name +
                          ']. please check your AWS policy.')
             raise
def backup_bucket(bucketname):
    connect()

    bucket = s3.get_bucket(bucketname)
    to_glacier = Transition(days=1, storage_class='GLACIER')
    rule = Rule('ruleid', '/', 'Enabled', transition=to_glacier)
    lifecycle = Lifecycle()
    lifecycle.append(rule)
    bucket.configure_lifecycle(lifecycle)

    return True
Exemple #12
0
 def test_lifecycle_with_glacier_transition(self):
     lifecycle = Lifecycle()
     transition = Transition(days=30, storage_class='GLACIER')
     rule = Rule('myid', prefix='', status='Enabled', expiration=None,
                 transition=transition)
     lifecycle.append(rule)
     self.assertTrue(self.bucket.configure_lifecycle(lifecycle))
     response = self.bucket.get_lifecycle_config()
     transition = response[0].transition
     self.assertEqual(transition.days, 30)
     self.assertEqual(transition.storage_class, 'GLACIER')
     self.assertEqual(transition.date, None)
Exemple #13
0
def test_lifecycle_with_glacier_transition():
    conn = boto.s3.connect_to_region("us-west-1")
    bucket = conn.create_bucket("foobar")

    lifecycle = Lifecycle()
    transition = Transition(days=30, storage_class='GLACIER')
    rule = Rule('myid', prefix='', status='Enabled', expiration=None,
                transition=transition)
    lifecycle.append(rule)
    bucket.configure_lifecycle(lifecycle)
    response = bucket.get_lifecycle_config()
    transition = response[0].transition
    transition.days.should.equal(30)
    transition.storage_class.should.equal('GLACIER')
    transition.date.should.equal(None)
def set_bucket_lifetime(bucket_name, days=14, aws_access={}, conn=None):
    '''
    Set an expiration on a bucket in S3.
    '''

    conn = return_s3_connection(aws_access) if conn is None else conn

    bucket = conn.get_bucket(bucket_name)
    expiration = Expiration(days=days)
    rule = Rule(id='ruleid',
                prefix='',
                status='Enabled',
                expiration=expiration)
    lifecycle = Lifecycle()
    lifecycle.append(rule)

    return bucket.configure_lifecycle(lifecycle)
Exemple #15
0
def lifecycle():
    #transitions = Transitions()
    exp = Expiration(date="2018-06-13 07:05:00")
    #exp = Expiration(days=1)
    rule = Rule(id='rule-1', prefix='', status='Enabled', expiration=exp)
    lifecycle = Lifecycle()
    lifecycle.append(rule)

    bucket = conn.get_bucket(bucket_name)
    ret = bucket.configure_lifecycle(lifecycle)
    print "Bucket Lifecycle Set:", ret
    print "========================="

    current = bucket.get_lifecycle_config()
    print "Bucket Lifecycle Conf:", current
    print "Tran:", current[0].transition
    print "Expi:", current[0].expiration
    print "========================="
Exemple #16
0
def test_set_lifecycle_policy():
    """
     PUTs arbitraty lifecycle_policy and checks whether GET lifecycle_policy API call returns 200 
     and other lifecycle_policy metadata is as set in PUT call 
    """
    bucket = helpers.get_bucket()
    transitions = Transitions()
    transitions.add_transition(days=30, storage_class='STANDARD_IA')
    transitions.add_transition(days=90, storage_class='GLACIER')
    expiration = Expiration(days=120)
    rule = Rule(id='ruleid',
                prefix='logs/',
                status='Enabled',
                expiration=expiration,
                transition=transitions)
    lifecycle = Lifecycle()
    lifecycle.append(rule)
    assert bucket.configure_lifecycle(lifecycle) == True
Exemple #17
0
 def set_transition_to_glacier(self, days, prefix=''):
     """
     Set rules when the files should be moved
     to Amazon Glacier for archiving
     This method must be called before write/upload methods
     Not used at the time, but could be for archiving s3 broker files
     :param prefix: str, prefix
     :param days: int, num of days
     :return: None
     """
     try:
         to_glacier = Transition(days=days, storage_class='GLACIER')
         rule = Rule(id='ruleid', prefix=prefix, status='Enabled', transition=to_glacier)
         lifecycle = Lifecycle()
         lifecycle.append(rule)
         self.bucket.configure_lifecycle(lifecycle)
     except Exception as e:
         logging.exception("S3Client.set_transition_to_glacier failed for bucket {}, error {}"
                           "".format(self.bucket_name, e))
def push_code_to_Aws(dest):
    s3_connection = boto.connect_s3(aws_access_key_id=settings.AWS_ACCESS_KEY_ID,
                                    aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY)
    try:
        bucket = s3_connection.get_bucket('calljson')
    except:
        bucket = s3_connection.create_bucket('calljson')
    expiration = Expiration(days=1)
    rule = Rule(id='ruleid', status='Enabled', expiration=expiration)
    lifecycle = Lifecycle()
    lifecycle.append(rule)
    bucket.configure_lifecycle(lifecycle)
    # create new key in s3
    key = bucket.new_key(dest)
    key.content_type = 'text/plain'
    f = open(dest, 'r')
    mystring = f.read()
    key.set_contents_from_string(mystring, policy='public-read')
    time.sleep(2)
    url = key.generate_url(160)
    o = urlparse(url)
    return o.scheme + "://" + o.netloc + o.path
Exemple #19
0
def cleanup_bucket(s3, bucket):
    """Glacier-proofs the bucket by renaming the .manifest files to not get moved 
	to glacier via our lifecycle rule
	"""

    # this isn't proof against eventual consistency, but it helps
    time.sleep(10)

    keys = bucket.list()

    # rename all the manifest and signature files so they don't get moved to glacier
    for key in keys:
        if not key.name.startswith("_") and \
        key.name.endswith(".manifest"):  # or key.name.endswith(".sigtar.gz")):
            key.copy(bucket.name, "_" + key.name)
            key.delete()

    # re-establish our lifecycle rules
    to_glacier = Transition(days=1, storage_class='GLACIER')
    rule = Rule('movetoglacier', 'duplicity', 'Enabled', transition=to_glacier)
    lifecycle = Lifecycle()
    lifecycle.append(rule)
    bucket.configure_lifecycle(lifecycle)
Exemple #20
0
        expiration_obj = Expiration(date=expiration_date)
    else:
        expiration_obj = None

    # Create transition
    if transition_days is not None:
        transition_obj = Transition(days=transition_days,
                                    storage_class=storage_class.upper())
    elif transition_date is not None:
        transition_obj = Transition(date=transition_date,
                                    storage_class=storage_class.upper())
    else:
        transition_obj = None

    # Create rule
    rule = Rule(rule_id, prefix, status.title(), expiration_obj,
                transition_obj)

    # Create lifecycle
    lifecycle_obj = Lifecycle()

    appended = False
    # If current_lifecycle_obj is not None then we have rules to compare, otherwise just add the rule
    if current_lifecycle_obj:
        # If rule ID exists, use that for comparison otherwise compare based on prefix
        for existing_rule in current_lifecycle_obj:
            if rule.id == existing_rule.id:
                if compare_rule(rule, existing_rule):
                    lifecycle_obj.append(rule)
                    appended = True
                else:
                    lifecycle_obj.append(rule)
Exemple #21
0
def main():
    # Get command line aruments
    opts = getInput()

    #Set-up files that need to be appended to
    saveOldFiles(opts.ident)

    def exit_handler(signum, trace):
        """
        Handle exit smoothly and save progress
        """
        print "Thank you for using FoREST-Cat. Saving progress."
        log("Recieved exit command. Saving progress\n", opts.ident)
        if "states" not in globals():
            states = []
            errors = []
            events = []
        if "sensors" in dir() and "r" in dir():
            saveProgress(states, errors, events, sensors, r, opts.ident)
            log("Progress saved.\n", opts.ident)
        print "Exiting..."
        log("Exiting...\n\n", opts.ident)
        exit(0)

    #Install exit handler - program will exit when it receives SIGINT
    signal.signal(signal.SIGINT, exit_handler)

    print "Welcome to the FoREST-cat program for detecting errors and" + \
        " rare events in data from multiple sensory modalities."

    #Initialize data
    if not opts.test:
        call_rsync(opts)

    #Initialize ravq
    if opts.restart:
        print "Loading stored ravq from file..."
        r = loadFromFile("ravq", opts.ident)
        opts.startDate = loadFromFile("time", opts.ident)
        if r == None or opts.startDate == None:
            log("Failed to load RAVQ. Closing...", opts.ident)
            sendEmail("Failed to load RAVQ, FoREST-cat is closing.",
                      "FoREST-cat load fail", opts.ident,
                      "*****@*****.**")
            exit()
        log("Loaded RAVQ", opts.ident)

        #if we are loading a pre-existing RAVQ, we need to
        #load sensors after loading it, so we know when the start date is
        log("loading sensors", opts.ident)
        sensors = SensorArray(opts.config, opts.startDate)
        initData = sensors.getNext(opts.timeInt)  #this doesn't get input
        #if we want to be really efficient, fix this one day
        log("sensors loaded", opts.ident)
        saveToFile("sensors", sensors, opts.ident)

    else:
        #if we aren't loading a pre-existing RAVQ, we need to
        #load sensors before generating the new one, so we know
        #how many sensors there are
        log("loading sensors", opts.ident)
        sensors = SensorArray(opts.config, opts.startDate)
        initData = sensors.getNext(opts.timeInt)  #this doesn't get input
        #if we want to be really efficient, fix this one day
        log("sensors loaded", opts.ident)
        saveToFile("sensors", sensors, opts.ident)

        log("Generating new RAVQ...", opts.ident)
        r = ARAVQ(opts.bufferSize, opts.epsilon, opts.delta, len(initData),
                  opts.historySize, opts.learningRate)
        log("RAVQ generated.", opts.ident)

    #Set up Amazon Web Services stuff
    if not opts.test:
        from boto.s3.lifecycle import Lifecycle, Rule, Transition
        import boto
        s3Conn = boto.connect_s3()
        bucket = s3Conn.get_bucket("forest-cat")
        lifecycle = Lifecycle()
        for item in ["log", "ravq", "events", "errors", "states"]:
            #set rules for transition to Glacier
            to_glacier = Transition(days=7, storage_class="GLACIER")
            rule = Rule(item + "Rule", item, "Enabled", transition=to_glacier)
            lifecycle.append(rule)
        bucket.configure_lifecycle(lifecycle)

    #This is a dictionary so it can modified in the handler
    today = {0: datetime.date(datetime.now())}

    def alarm_handler(signum, frame):
        """
        This function will get called any time the alarm goes off.
        It is defined here so that it will have access to main() local
        variables. This supports an event-driven design.
        """
        log("Starting processing...", opts.ident)

        #get data
        #check for updates with rsync - RSA keys need to be
        #appropriately configured for this to work
        if not opts.test:
            if callRsync(opts):
                sensors.getData()

        states = []
        errors = []
        events = []

        while sensors.keepGoing:
            #Get data
            data = sensors.getNext(opts.timeInt)
            if opts.verbose:
                log("Data retrieved", opts.ident)
            #send data to RAVQ
            vec, errs = r.input(data, r.prevVec)[2:]
            if opts.verbose:
                log("Input processed: " + str(vec), opts.ident)

            states.append((r.newWinnerIndex, sensors.currTime))

            #Handle events - Check for both event types
            if r.newWinnerIndex != r.previousWinnerIndex:
                ev = Event(r.previousWinnerIndex, r.newWinnerIndex, vec,
                           data[0].time, "state transition")
                if not r.eventState:
                    eventAlertTransition(ev, opts.ident)
                    r.eventState = True
                log("Potential event: " + str(ev), opts.ident)
                events.append(
                    str(ev.prevState) + ", " + str(ev.newState) + ", " +
                    str(ev.vector) + ", " + str(ev.time) + ", " + ev.reason +
                    "\n")

            elif len(errs) > len(sensors) * opts.eventThreshold:
                ev = Event(r.previousWinnerIndex, r.newWinnerIndex, vec,
                           data[0].time, "anomalous number of errors")
                if not r.eventState:
                    eventAlertAnomalous(ev, opts.ident)
                    r.eventState = True
                log("Potential event: " + str(ev), opts.ident)
                events.append(ev)

            elif len(errs) > 0:  #Handle errors
                for e in errs:
                    log(str(e), opts.ident)
                    if not e.sensor.errorState:
                        errorAlert(e, opts.ident)
                        e.sensor.errorState = True
                    errors.append(e.sensor + ", " + str(e.time) + ", " +
                                  str(e.value) + ", " + e.flag + ", " +
                                  str(e.replace) + "\n")

            else:
                r.eventState = False

            if opts.verbose:
                log("Timestep complete.\n", opts.ident)

            if sensors.currTime.hour == 0 and sensors.currTime.minute == 0:
                log("Day is now " + str(sensors.currTime), opts.ident)
        if not opts.test:
            #If this is a test, we don't expect more data to appear
            signal.alarm(60 * opts.refreshRate)  #set next alarm
        log("Buffer emptied.\n\n", opts.ident)

        #Save stuff
        saveProgress(states, errors, events, sensors, r, opts.ident)

        if opts.test:  #tests don't need to run indefinitely
            log("Since this is test mode, exiting...", opts.ident)
            exit(0)

        #If it's a new day, archive files in S3
        if today[0] != datetime.date(datetime.now()) and not opts.test:
            for item in ["log"+opts.ident, "ravq"+opts.ident, \
                     "events"+opts.ident, "errors"+opts.ident, \
                      "states"+opts.ident]:
                #store in s3
                try:  #exception handling in boto documentation is kind of vague
                    key = boto.s3.key.Key(bucket)
                    key.key = item + str(today)
                    key.set_contents_from_filename(item)
                    #clear old files
                    infile = open(item, "w+")
                    infile.write("")
                    infile.close()
                except Exception as e:
                    sendEmail("Received execption " + str(e),
                              "Something went wrong in boto",
                              "*****@*****.**")

            today[0] = datetime.date(datetime.now())
            print today, datetime.date(datetime.now())

    signal.signal(signal.SIGALRM, alarm_handler)
    signal.alarm(1)  #go off once at start-up
    while True:
        signal.pause()  #process sleeps until alarm goes off
Exemple #22
0
def create_lifecycle_rule(connection, module):

    name = module.params.get("name")
    expiration_date = module.params.get("expiration_date")
    expiration_days = module.params.get("expiration_days")
    prefix = module.params.get("prefix")
    rule_id = module.params.get("rule_id")
    status = module.params.get("status")
    storage_class = module.params.get("storage_class")
    transition_date = module.params.get("transition_date")
    transition_days = module.params.get("transition_days")
    changed = False

    try:
        bucket = connection.get_bucket(name)
    except S3ResponseError as e:
        module.fail_json(msg=e.message)

    # Get the bucket's current lifecycle rules
    try:
        current_lifecycle_obj = bucket.get_lifecycle_config()
    except S3ResponseError as e:
        if e.error_code == "NoSuchLifecycleConfiguration":
            current_lifecycle_obj = Lifecycle()
        else:
            module.fail_json(msg=e.message)

    # Create expiration
    if expiration_days is not None:
        expiration_obj = Expiration(days=expiration_days)
    elif expiration_date is not None:
        expiration_obj = Expiration(date=expiration_date)
    else:
        expiration_obj = None

    # Create transition
    if transition_days is not None:
        transition_obj = Transition(days=transition_days,
                                    storage_class=storage_class.upper())
    elif transition_date is not None:
        transition_obj = Transition(date=transition_date,
                                    storage_class=storage_class.upper())
    else:
        transition_obj = None

    # Create rule
    rule = Rule(rule_id, prefix, status.title(), expiration_obj,
                transition_obj)

    # Create lifecycle
    lifecycle_obj = Lifecycle()

    appended = False
    # If current_lifecycle_obj is not None then we have rules to compare, otherwise just add the rule
    if current_lifecycle_obj:
        # If rule ID exists, use that for comparison otherwise compare based on prefix
        for existing_rule in current_lifecycle_obj:
            if rule.id == existing_rule.id:
                if compare_rule(rule, existing_rule):
                    lifecycle_obj.append(rule)
                    appended = True
                else:
                    lifecycle_obj.append(rule)
                    changed = True
                    appended = True
            elif rule.prefix == existing_rule.prefix:
                existing_rule.id = None
                if compare_rule(rule, existing_rule):
                    lifecycle_obj.append(rule)
                    appended = True
                else:
                    lifecycle_obj.append(rule)
                    changed = True
                    appended = True
            else:
                lifecycle_obj.append(existing_rule)
        # If nothing appended then append now as the rule must not exist
        if not appended:
            lifecycle_obj.append(rule)
            changed = True
    else:
        lifecycle_obj.append(rule)
        changed = True

    # Write lifecycle to bucket
    try:
        bucket.configure_lifecycle(lifecycle_obj)
    except S3ResponseError as e:
        module.fail_json(msg=e.message)

    module.exit_json(changed=changed)
Exemple #23
0
#!/usr/bin/env python

import boto.s3

REGION = "us-west-2"
BUCKET = "mybucket"

c = boto.s3.connect_to_region(REGION)

bucket = c.get_bucket(BUCKET)

from boto.s3.lifecycle import Lifecycle, Transition, Rule
to_glacier = Transition(days=30, storage_class='GLACIER')
rule = Rule('ruleid', 'logs/', 'Enabled', transition=to_glacier)

lifecycle = Lifecycle()
lifecycle.append(rule)

bucket.configure_lifecycle(lifecycle)

current = bucket.get_lifecycle_config()
print current[0].transition
Exemple #24
0
 def test_transition_is_optional(self):
     r = Rule('myid', 'prefix', 'Enabled')
     xml = r.to_xml()
     self.assertEqual(
         '<Rule><ID>myid</ID><Prefix>prefix</Prefix><Status>Enabled</Status></Rule>',
         xml)