Пример #1
0
def save_html():
    bucket = folder2s3.getBucket("links.bsoi.st","bsoist")
    key = Key(bucket)
    key.key = "links.html"
    key.set_contents_from_filename("links.html")
    key.set_acl("public-read")
    key.copy(bucket,key.key, preserve_acl=True, metadata={'Content-type': 'text/html'})
Пример #2
0
    def upload(self, image):
        entry = Key(self.bucket)
        entry.key = "%s.jpg" % self.instance_name
        entry.set_contents_from_string(image)

        url = entry.generate_url(configuration.get('s3_url_expiry'))
        entry.copy(entry.bucket.name, entry.name, {'Content-Type':'image/jpeg'}, preserve_acl=True)
        return url
 def rename_package(self, package_old_name, package_new_name):
     package_old_key = Key(self.bucket, package_old_name)
     package_new_key = Key(self.bucket, package_new_name)
     if package_old_key.exists() and (not package_new_key.exists()):
         package_old_key.copy(self.bucket, package_new_key)
     if  package_new_key.exists():
         package_old_key.delete()
     return
Пример #4
0
def mv_file_s3(s3_connection, src_path, dst_path):
    """Move a file within S3."""
    src_bucket_name, src_key_name = _from_path(src_path)
    dst_bucket_name, dst_key_name = _from_path(dst_path)

    src_bucket = s3_connection.get_bucket(src_bucket_name)
    k = Key(src_bucket)
    k.key = src_key_name
    k.copy(dst_bucket_name, dst_key_name)
    k.delete()
Пример #5
0
def mv_file_s3(s3_connection, src_path, dst_path):
    """Move a file within S3."""
    src_bucket_name, src_key_name = _from_path(src_path)
    dst_bucket_name, dst_key_name = _from_path(dst_path)

    src_bucket = s3_connection.get_bucket(src_bucket_name)
    k = Key(src_bucket)
    k.key = src_key_name
    k.copy(dst_bucket_name, dst_key_name)
    k.delete()
Пример #6
0
    def upload(self, image):
        entry = Key(self.bucket)
        entry.key = "%s.jpg" % self.instance_name
        entry.set_contents_from_string(image)

        url = entry.generate_url(configuration.get('s3_url_expiry'))
        entry.copy(entry.bucket.name,
                   entry.name, {'Content-Type': 'image/jpeg'},
                   preserve_acl=True)
        return url
def sendData( localPath, buckName=None, forwardWrite=12):

	#print "localPath 1 %s" % localPath

	if not buckName or len(buckName) < 1 :
		buckName = BUCKET_NAME
		#return

	if len (localPath) < 1:
		return


	try:
		cred = readCred()
		conn = S3Connection(cred[ACCESS], cred[SECRET])
		b = None
		try:
			b = conn.get_bucket(buckName)
		except Exception as e:
			b = conn.create_bucket(buckName)

		if not b:
			#print "Error: bucket cannot be nil"
			return

		strippedPath = re.sub(r'\.json$',"",localPath.lower())
		timeStampStr = re.search( r'\d+$', strippedPath).group()
		
		timestamp = int(timeStampStr)	
		
		publicationName = re.search( r'^\w+', strippedPath).group()

		if timestamp < 100 and len(publicationName) < 1:
			#print "error in publication name or time stamp"
			return


		k = Key(b) 

		for num in range(forwardWrite):

			if num == 0:

				k.key = "%s/%d.json" % (publicationName, timestamp)
				k.set_contents_from_filename(localPath)
				k.make_public()

			else:
				k.copy(buckName,"%s/%d.json" % (publicationName, timestamp))	
			
			timestamp = timestamp + 1		
		os.remove(localPath)

	except Exception as e:
		print(e)
Пример #8
0
def store_file_in_bucket(s3, f, c):
    "Put file 'f' in S3 bucket 'b'."
    b = c['s3_bucket']
    d = c['local_directory']
    bucket = s3.lookup(b) or s3.create_bucket(b)
    key = Key(bucket)
    key.key = f[0]
    with open(d + '/' + f[0], 'r') as fd:
        key.set_contents_from_file(fd)
    key.copy(b, f[1])
    key.copy(b, f[2])
def store_file_in_bucket(s3, f, c):
  "Put file 'f' in S3 bucket 'b'."
  b = c['s3_bucket']
  d = c['local_directory']
  bucket = s3.lookup(b) or s3.create_bucket(b)
  key = Key(bucket)
  key.key = f[0]
  with open(d + '/' + f[0], 'r') as fd:
    key.set_contents_from_file(fd)
  key.copy(b, f[1])
  key.copy(b, f[2])
Пример #10
0
def get_most_wanted():
    wanted = requests.get(MOST_WANTED, params={'max': 100})
    if wanted.status_code == 200:
        s3conn = S3Connection(AWS_KEY, AWS_SECRET)
        bucket = s3conn.get_bucket('crime.static-eric.com')
        wanted_list = []
        for person in wanted.json():
            warrant = person['warrantNo']
            wanted_list.append(warrant)
            mugs = requests.get(MUGSHOTS, params={'warrantNo': warrant})
            person['mugs'] = []
            if mugs.status_code == 200:
                for mug in mugs.json():
                    image_path = 'images/wanted/%s_%s.jpg' % (warrant, mug['mugshotNo'])
                    k = Key(bucket)
                    k.key = image_path
                    k.set_contents_from_string(b64decode(mug['image']))
                    k.set_acl('public-read')
                    person['mugs'].append({'angle': mug['mugshotNo'], 'image_path': image_path})
            else:
                raise ClearPathError('ClearPath API returned %s when fetching mugshots for %s: %s' % (mugs.status_code, warrant, mugs.content[300:]))
            k = Key(bucket)
            k.key = 'data/wanted/%s.json' % warrant
            k.set_contents_from_string(json.dumps(person, indent=4))
            k.set_acl('public-read')
        k = Key(bucket)
        k.key = 'data/wanted/wanted_list.json'
        k = k.copy(k.bucket.name, k.name, {'Content-Type':'application/json'})
        k.set_acl('public-read')
    else:
        raise ClearPathError('ClearPath API returned %s when getting most wanted list: %s' % (wanted.status_code, wanted.content[300:]))
Пример #11
0
def dump_to_csv(start_date, end_date, out_name):
    c = pymongo.MongoClient()
    db = c['chicago']
    db.authenticate(os.environ['CHICAGO_MONGO_USER'], password=os.environ['CHICAGO_MONGO_PW'])
    crime = db['crime']
    weather = db['weather']
    all_rows = []
    for date in daterange(start_date, end_date):
        midnight = date.replace(hour=0).replace(minute=0)
        one_til = date.replace(hour=23).replace(minute=59)
        days_crimes = list(crime.find({'date': {'$gt': midnight, '$lt': one_til}}))
        if days_crimes:
            meta = make_meta(days_crimes)
            days_weather = weather.find_one({'DATE': date})
            out = {
                'date': datetime.strftime(date, '%m-%d-%Y'),
                'temp_max': weather['FAHR_MAX'],
                'total_count': meta['total']['value'],
            }
            fieldnames = sorted(out.keys())
            for category in meta['detail']:
                fieldnames.append(category['key'])
                out[category['key']] = category['value']
            all_rows.append(out)
    out_f = StringIO()
    writer = csv.DictWriter(out_f, fieldnames=fieldnames)
    writer.writerow(dict( (n,n) for n in fieldnames ))
    writer.writerows(all_rows)
    s3conn = S3Connection(AWS_KEY, AWS_SECRET)
    bucket = s3conn.get_bucket('crime.static-eric.com')
    k = Key(bucket)
    k.key = 'data/weather/%s.csv' % out_name
    k.set_contents_from_string(out_f.getvalue())
    k = k.copy(k.bucket.name, k.name, {'Content-Type':'text/csv'})
    k.set_acl('public-read')
Пример #12
0
def upload_to_s3(bucket_name, file_path=None):
    if file_path is None:
        file_path = get_latest_package_path()

    dir_path = file_path.as_posix()
    bucket = S3.get_bucket(bucket_name)

    k = bucket.get_key(dir_path)
    if k is not None:
        # file exists on S3
        md5_hash = hashlib.md5(file_path.open("rb").read()).hexdigest()
        if md5_hash == k.etag[1:-1]:
            # skip if it's the same file
            print "skipping upload for {}".format(dir_path)
            latest = bucket.get_key("dist/activity-streams-latest.xpi")
            update_manifest = bucket.get_key("dist/update.rdf")
            return (k, latest, update_manifest)

    print "uploading {}".format(dir_path)
    headers = get_s3_headers()
    headers["Content-Type"] = "application/x-xpinstall"

    k = Key(bucket)
    k.name = dir_path
    k.set_contents_from_filename(dir_path, headers=headers)
    k.set_acl("public-read")

    k.copy(bucket_name, "dist/activity-streams-latest.xpi")

    # copy latest key
    latest = bucket.get_key("dist/activity-streams-latest.xpi")
    latest.set_acl("public-read")

    # upload update RDF
    headers = get_s3_headers()
    headers["Content-Type"] = "application/xml"
    update_manifest = Key(bucket)
    update_manifest.name = "dist/update.rdf"
    update_manifest.set_contents_from_filename("./dist/update.rdf",
                                               headers=headers)
    update_manifest.set_acl("public-read")

    return (k, latest, update_manifest)
Пример #13
0
Файл: s3.py Проект: wozozo/hh
class S3(object):

    def __init__(self, **kwargs):
        if not 'host' in kwargs:
            kwargs['host'] = 's3-ap-northeast-1.amazonaws.com'  # Tokyo region
        self.conn = S3Connection(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY, **kwargs)
        self.bucket = self.conn.lookup(settings.AWS_STORAGE_BUCKET_NAME)
        self.key = Key(self.bucket)

    def get_mimetype(self, filename):
        return mimetypes.guess_type(filename)[0]

    def store(self, filename, content):
        # TODO guess ext
        mime = self.get_mimetype(filename)
        self.key.key = filename
        self.key.set_metadata("Content-Type", mime)
        self.key.set_contents_from_file(content.file)
        self.key.set_acl("public-read")
        # k.make_public()

        url = 'http://%s.s3.amazonaws.com/%s' % (settings.AWS_STORAGE_BUCKET_NAME, filename)

        return url

    def publish(self, filename, date):
        mime = self.get_mimetype(filename)
        metadata = {'Content-Type': mime}

        self.key.key = filename

        if filename.startswith('tmp/'):
            filename = filename.lstrip('tmp/')
        date = '{year}{month:02d}{day:02d}'.format(year=date.year, month=date.month, day=date.day)
        dst_key = 'photos/{date}/{filename}'.format(date=date, filename=filename)

        self.key.copy(self.bucket.name, dst_key, metadata=metadata, preserve_acl=True)

        return dst_key

    def delete(self, filename):
        # TODO s3.delete
        logging.warning('"S3.deleted" is not implemented.')
 def store_in_cdn(self, filename, image, cdn_path, download):
     # for now we'l use an S3 bucket
     try:
         content_type, encoding = guess_type(filename)
         cdn_bucket = self.conn.get_bucket(self.settings.publishing_buckets_prefix + self.settings.ppp_cdn_bucket)
         key = Key(cdn_bucket)
         key.key = cdn_path + "/" + filename
         key.metadata['Content-Type'] = content_type
         image.seek(0)
         key.set_contents_from_file(image)
         if download:
             metadata = key.metadata.copy()
             metadata['Content-Disposition'] = str("Content-Disposition: attachment; filename=" +
                                                   filename + ";")
             filename_no_extension, extension = filename.rsplit('.', 1)
             key.copy(cdn_bucket, cdn_path + "/" + filename_no_extension + "-download." +
                      extension, metadata=metadata)
     finally:
         image.close()
Пример #15
0
def upload_to_s3(bucket_name, file_path=None):
    if file_path is None:
        file_path = get_latest_package_path()

    dir_path = file_path.as_posix()
    bucket = S3.get_bucket(bucket_name)

    k = bucket.get_key(dir_path)
    if k is not None:
        # file exists on S3
        md5_hash = hashlib.md5(file_path.open("rb").read()).hexdigest()
        if md5_hash == k.etag[1:-1]:
            # skip if it's the same file
            print "skipping upload for {}".format(dir_path)
            latest = bucket.get_key("dist/activity-streams-latest.xpi")
            update_manifest = bucket.get_key("dist/update.rdf")
            return (k, latest, update_manifest)

    print "uploading {}".format(dir_path)
    headers = get_s3_headers()
    headers["Content-Type"] = "application/x-xpinstall"

    k = Key(bucket)
    k.name = dir_path
    k.set_contents_from_filename(dir_path, headers=headers)
    k.set_acl("public-read")

    k.copy(bucket_name, "dist/activity-streams-latest.xpi")

    # copy latest key
    latest = bucket.get_key("dist/activity-streams-latest.xpi")
    latest.set_acl("public-read")

    # upload update RDF
    headers = get_s3_headers()
    headers["Content-Type"] = "application/xml"
    update_manifest = Key(bucket)
    update_manifest.name = "dist/update.rdf"
    update_manifest.set_contents_from_filename(
        "./dist/update.rdf", headers=headers)
    update_manifest.set_acl("public-read")

    return (k, latest, update_manifest)
Пример #16
0
def copy_file_in_bucket(
    s3_conn, src_bucket_name, dest_bucket_name, orig_filename, copy_filename, preserve_acl=True, validate=True
):
    b = get_bucket(s3_conn, src_bucket_name, validate)
    if b:
        try:
            log.debug("Establishing handle with key object '%s'" % orig_filename)
            k = Key(b, orig_filename)
            log.debug(
                "Copying file '%s/%s' to file '%s/%s'"
                % (src_bucket_name, orig_filename, dest_bucket_name, copy_filename)
            )
            k.copy(dest_bucket_name, copy_filename, preserve_acl=preserve_acl)
            return True
        except S3ResponseError as e:
            log.debug(
                "Error copying file '%s/%s' to file '%s/%s': %s"
                % (src_bucket_name, orig_filename, dest_bucket_name, copy_filename, e)
            )
    return False
Пример #17
0
def move_file(new_key, old_key, bucket):
    """
    Move the files

    :param new_key:
    :param old_key:
    :return:
    """
    key = Key(bucket)
    key.key = old_key
    if key.exists():
        if DRY_RUN:
            LOG.info('DRY_RUN: old: {0}, new: {1}'.format(old_key, new_key))
        else:
            key = Key(bucket)
            key.key = old_key
            key.copy(bucket, new_key)
            key.delete()
    else:
        LOG.warning('The key {0} does not exist'.format(old_key))
Пример #18
0
def move_file(new_key, old_key, bucket):
    """
    Move the files

    :param new_key:
    :param old_key:
    :return:
    """
    key = Key(bucket)
    key.key = old_key
    if key.exists():
        if DRY_RUN:
            LOG.info('DRY_RUN: old: {0}, new: {1}'.format(old_key, new_key))
        else:
            key = Key(bucket)
            key.key = old_key
            key.copy(bucket, new_key)
            key.delete()
    else:
        LOG.warning('The key {0} does not exist'.format(old_key))
Пример #19
0
    def copy(self, from_bucket_name, from_key_name, to_bucket_name, to_key_name):
        try:
            from boto.s3.key import Key
        except ImportError as e:
            raise e

        from_bucket = self.bucket(from_bucket_name)
        from_key = Key(from_bucket)
        from_key.key = from_key_name

        to_bucket = self.bucket(to_bucket_name)
        return from_key.copy(to_bucket, to_key_name)
Пример #20
0
    def copy(self, from_bucket_name, from_key_name, to_bucket_name,
             to_key_name):
        try:
            from boto.s3.key import Key
        except ImportError as e:
            raise e

        from_bucket = self.bucket(from_bucket_name)
        from_key = Key(from_bucket)
        from_key.key = from_key_name

        to_bucket = self.bucket(to_bucket_name)
        return from_key.copy(to_bucket, to_key_name)
Пример #21
0
def main(args):
	script_name, input_bucket_name, input_key_name, output_bucket_name, output_key_name, number_of_clones = args
	number_of_clones = int(number_of_clones)
	
	try:
		s3_connection = boto.connect_s3()
		print 'Connection to S3 service established'
		
		input_bucket = get_bucket(s3_connection, input_bucket_name)
		if (input_bucket == None):
			print "There's no bucket with name %s" % input_bucket_name
			return False
		print 'Input bucket was found'

		output_bucket = get_bucket(s3_connection, output_bucket_name)
		if (output_bucket == None):
			print "There's no bucket with name %s" % output_bucket_name
			return False
		print 'Output bucket was found'

		try:
			input_key = Key(input_bucket)	
			input_key.key = input_key_name
			if (number_of_clones > 1):
				for i in range(1, number_of_clones + 1):
					input_key.copy(output_bucket_name, get_clone_name(output_key_name, i))
			else:
				input_key.copy(output_bucket_name, output_key_name)
			print 'File successfully cloned %s times' % str(number_of_clones)
		except Exception, e:
			print 'Could not clone key %s' % input_key_name
			print e
			return False
	except Exception, e:
		print 'Could not establish connection to S3 service'
		print e
		return False
Пример #22
0
def copy_file_in_bucket(s3_conn, src_bucket_name, dest_bucket_name, orig_filename,
                        copy_filename, preserve_acl=True, validate=True):
    """
    Create a copy of an object `orig_filename` in `src_bucket_name` as
    `copy_filename` in `dest_bucket_name`, preserving the access control list
    settings by default. If `validate` is provided, the existence of source
    bucket will be validated before proceeding.
    Return `True` if the copy was successful; `False` otherwise.
    """
    b = get_bucket(s3_conn, src_bucket_name, validate)
    if b:
        try:
            log.debug(
                "Establishing handle with key object '%s'" % orig_filename)
            k = Key(b, orig_filename)
            log.debug(
                "Copying file '%s/%s' to file '%s/%s'" % (src_bucket_name,
                                                          orig_filename, dest_bucket_name, copy_filename))
            k.copy(dest_bucket_name, copy_filename, preserve_acl=preserve_acl)
            return True
        except S3ResponseError as e:
            log.debug("Error copying file '%s/%s' to file '%s/%s': %s" % (
                src_bucket_name, orig_filename, dest_bucket_name, copy_filename, e))
    return False
Пример #23
0
def dumpit(crime, weather, start_date=datetime(2013, 4, 25), end_date=datetime.now()):
    s3conn = S3Connection(AWS_KEY, AWS_SECRET)
    bucket = s3conn.get_bucket('crime.static-eric.com')
    for single_date in daterange(start_date, end_date):
        weat = [w for w in weather.find({'DATE': single_date})]
        if len(weat) > 0:
            midnight = single_date.replace(hour=0).replace(minute=0)
            one_til = single_date.replace(hour=23).replace(minute=59)
            crimes = [c for c in crime.find({'date': {'$gt': midnight, '$lt': one_til}})]
            if len(crimes) > 0:
                out = {
                    'weather': {
                        'CELSIUS_MIN': weat[0]['CELSIUS_MIN'],
                        'CELSIUS_MAX': weat[0]['CELSIUS_MAX'],
                        'FAHR_MAX': weat[0]['FAHR_MAX'],
                        'FAHR_MIN': weat[0]['FAHR_MIN'],
                    }, 
                    'meta': make_meta(crimes),
                    'geojson': {
                        'type': 'FeatureCollection',
                        'features': [{
                            'type': 'Feature',
                            'geometry': f.get('location'),
                            'properties': {
                                'title': f.get('primary_type').title(),
                                'description': f.get('description').title(), 
                                'key': '_'.join(f.get('primary_type').lower().split()),
                                'arrest': f.get('arrest'),
                                'beat': f.get('beat'),
                                'block': f.get('block'),
                                'community_area': f.get('community_area'),
                                'district': f.get('district'),
                                'domestic': f.get('domestic'),
                                'location_desc': f.get('location_description'),
                                'ward': f.get('ward')
                            }
                        } for f in crimes]
                    }
                }
                # f = open('data/%s/%s/%s.json' % (single_date.year, single_date.month, single_date.day), 'wb')
                # f.write(json_util.dumps(out, indent=4, sort_keys=True))
                # f.close()
                k = Key(bucket)
                k.key = 'data/%s/%s/%s.json' % (single_date.year, single_date.month, single_date.day)
                k.set_contents_from_string(json_util.dumps(out, indent=4))
                k = k.copy(k.bucket.name, k.name, {'Content-Type':'application/json'})
                k.set_acl('public-read')
                print 'Uploaded %s' % k.key
Пример #24
0
def _uptoS3(obj, ext):
    from boto.s3.connection import S3Connection   
    try:
        # global conn 
        conn = S3Connection(settings.S3_ACCESS_KEY,settings.S3_SECRET_KEY)
        print 'defined conn'
        bucket = settings.S3_BUCKET
        from boto.s3.key import Key
        from hashlib import md5
        b = conn.get_bucket(bucket)
        print 'connect to S3 via conn'
        k = Key(b)
        print 'connect to Key'

        print 'loop to save item in obj'
        for i in obj:
            print 'new loop'
            k.key = i['filename']
            print 'defined k.key'

            print 'start upload to S3'
            k.set_contents_from_string(i['filedir'].getvalue(), policy='public-read')
            print 'finish uploaded'

            print 'set metadata value'
            if ext=='jpeg' :
                type_value='image/jpeg'     
            elif ext=='png' or ext=='PNG':
                type_value='image/png'
            elif ext=='gif' or ext=='GIF':
                type_value='image/gif'
            elif ext=='bmp' or ext=='BMP':
                type_value='image/bmp'
            k = k.copy(k.bucket.name, k.name, {'Content-Type':type_value}, preserve_acl=True)
            print 'finish setting metadata value'
            ## the content-type of img on S3 can't be added automatically, so here is the way to add the type for img
        k.close()
        print 'close key'

    except IOError, e:
        print "No works"
Пример #25
0
def dump_aggregate(crime_type):
    pipe = [
        {
            '$match': {
                'primary_type': crime_type
            }
        }, 
        {
            '$group': {
                '_id': {
                    'month_reported': {
                        '$month': '$date'
                    }, 
                    'year_reported': {
                        '$year': '$date'
                    }
                }, 
                'count': {'$sum': 1}
            }
        }, 
        {
            '$sort': {'count': -1}
        }
    ]
    results = crime.aggregate(pipe)
    output = []
    for result in results['result']:
        date = '-'.join([result['month_reported'], result['year_reported']])
        output.append({'date': date, 'count': result['count']})
    out_f = StringIO()
    fieldnames = output[0].keys()
    writer = csv.DictWriter(out_f, fieldnames=fieldnames)
    writer.writerow(dict( (n,n) for n in fieldnames ))
    writer.writerows(output)
    s3conn = S3Connection(AWS_KEY, AWS_SECRET)
    bucket = s3conn.get_bucket('crime.static-eric.com')
    k = Key(bucket)
    k.key = 'data/aggregates/%s.csv' % crime_type
    k.set_contents_from_string(out_f.getvalue())
    k = k.copy(k.bucket.name, k.name, {'Content-Type':'text/csv'})
    k.set_acl('public-read')
Пример #26
0
 def set_file_format(self, file_format):
     self.file_format = file_format
     k = Key(FileInfo.objects.s3_bucket)
     if self.is_diff_file:
         k.key = self.object_key + "/diff/" + self.diff_name
         # the strings in the one line below need to be str()-ed to avoid S3 from encoding it.
         k.metadata.update(
             {
                 "Content-Type": str("application/sqlite3-diff"),
                 "Content-Disposition": str("attachment;filename=" + self.name() + "-diff"),
             }
         )
     else:
         k.key = self.object_key + "/" + self.sha1
         # the strings in the one line below need to be str()-ed to avoid S3 from encoding it.
         k.metadata.update(
             {
                 "Content-Type": str(self.file_format.mime_type.text),
                 "Content-Disposition": str("attachment;filename=" + self.name()),
             }
         )
     k2 = k.copy(k.bucket.name, k.name, k.metadata, preserve_acl=True)
     k2.metadata = k.metadata
     k = k2
Пример #27
0
key = str(sys.argv[1])

if key.endswith('dmg'):
    newest = 'newest.dmg'
elif key.endswith('exe'):
    newest = 'newest.exe'
elif key.endswith('32-bit.deb'):
    newest = 'newest-32.deb'
elif key.endswith('64-bit.deb'):
    newest = 'newest-64.deb'
else:
    print 'File name with full version required. .deb files should end in 32-bit.deb or 64-bit.deb'
    sys.exit(1)

conn = boto.connect_s3()

b = conn.get_bucket(BUCKET)

k = Key(b)
k.key = key
k.copy(BUCKET, newest, preserve_acl=True)

# Since we've just updated the fixed name 'lantest.x' file in our bucket,
# we need to make sure to invalidate it on cloudfront in case anyone's
# using it.
#print 'Invalidating newest installers on CloudFront...'
#c = boto.connect_cloudfront()
#paths = [newest]
#inval_req = c.create_invalidation_request(u'E1D7VOTZEUYRZT', paths)
#status = c.invalidation_request_status(u'E1D7VOTZEUYRZT', inval_req.id)
Пример #28
0
b1.set_acl('public-read')
k.key = 'keynum5'
k.set_acl('private')

b3 = conn_u2.create_bucket('mybuck')
try:
    print "\nTrying to get public-read Bucket which belongs to user1"
    b2 = conn_u2.get_bucket(b1.name);
    print "U2: Name of this bucket is {b2name}".format(b2name = b2.name)
    m = Key(b2);
    m.key = 'keynum5'
    print "Listing objects in this bucket:"
    for i in b2.list():
        print str(i)
    print "\nU2: Copying key from user1 bucket to mine"
    m.copy('mybuck', 'copiedkey')
    print "\nTrying to GET private object:"
    print "Private Object content: " + m.get_contents_as_string()
except:
    print "Unexpected error: ", sys.exc_info()

####################################################


#################### CLEANUP #######################
print "\nCleaning up..."
for bkt in conn_u1.get_all_buckets():
    for k in bkt.list():
        k.delete()
    conn_u1.delete_bucket(bkt.name)
Пример #29
0
            if county in COUNTIES:
                try:
                    all_tracts[
                        tract['origin']]['traveling-to'] = tract['counts']
                except KeyError:
                    all_tracts[tract['origin']] = {
                        'traveling-from': {},
                        'traveling-to': tract['counts']
                    }
                work_tracts.add(tract['origin'])
        for tract in (home_tracts - work_tracts):
            all_tracts[tract]['traveling-to'] = {}
        for tract, val in all_tracts.items():
            tt = sorted(val['traveling-to'].items(),
                        key=itemgetter(1),
                        reverse=True)
            tf = sorted(val['traveling-from'].items(),
                        key=itemgetter(1),
                        reverse=True)
            outp = OrderedDict([('traveling-to', OrderedDict(tt)),
                                ('traveling-from', OrderedDict(tf))])
            key = Key(bucket)
            key.key = '{0}/{1}.json'.format(year, tract)
            key.set_contents_from_string(json.dumps(outp, sort_keys=False))
            key.make_public()
            key.copy(key.bucket,
                     key.name,
                     preserve_acl=True,
                     metadata={'Content-Type': 'application/json'})
            print 'saved {0}'.format(key.name)
Пример #30
0
def main(argv=None):
    global DEBUG
    if argv is None:
        argv = sys.argv[1:]
    try:
        try:
            opts, args = getopt.getopt(argv, "hp:b:c:r", [
                "help", "profile=", "bucket=", "default_content_type=",
                "replaceAll"
            ])
        except getopt.error, msg:
            raise Usage(msg)
        profile, bucket, content_type = None, None, None
        replaceAll = False
        for option, value in opts:
            if option in ("-h", "--help"):
                print __doc__
                sys.exit()
            if option in ("-p", "--profile"):
                profile = value
            if option in ("-b", "--bucket"):
                bucket = value
            if option in ("-c", "--default_content_type"):
                default_content_type = value
            if option in ("-r", "--replaceAll"):
                replaceAll = True
        try:
            folder_name = args[0]
        except:
            raise Usage("Missing argument - folder to copy is required")
        os.chdir(folder_name)
        bucket_name = bucket or os.path.basename(folder_name)
        profile_name = profile or "Credentials"
        default_content_type = content_type or "text/html"
        bucket = getBucket(bucket_name, profile_name)
        s3files = bucket.list()
        common_prefix = os.path.commonprefix([folder_name])
        localfiles = [
            os.path.relpath(os.path.join(dp, f), common_prefix)
            for dp, dn, filenames in os.walk(common_prefix) for f in filenames
        ]
        files2upload = {
            'replacing': [],
            'uploading': set(localfiles) - set([s.key for s in s3files])
        }
        for s3file in s3files:
            keyname = s3file.key
            filename = os.path.join(common_prefix, keyname)
            s3mod = boto.utils.parse_ts(s3file.last_modified)
            try:
                localmtime = os.stat(filename).st_mtime
            except OSError:
                print "local file", filename, "not found"
                continue
            localmod = datetime.datetime.utcfromtimestamp(localmtime)
            if localmod > s3mod:
                files2upload['replacing'].append(s3file.key)
        for replace_upload, these_files in files2upload.items():
            for this_file in these_files:
                print replace_upload, os.path.join(bucket_name, this_file)
                key = Key(bucket)
                key.key = this_file
                key.set_contents_from_filename(
                    os.path.join(common_prefix, this_file))
                key.set_acl("public-read")
                try:
                    ext = this_file.split('/')[1].split('.')[1]
                except:
                    key.copy(bucket,
                             key.key,
                             preserve_acl=True,
                             metadata={'Content-Type': default_content_type})
Пример #31
0
elif key.endswith('exe'):
    latest = 'latest.exe'
elif key.endswith('32-bit.deb'):
    latest = 'latest-32.deb'
elif key.endswith('64-bit.deb'):
    latest = 'latest-64.deb'
else:
    print 'File name with full version required. .deb files should end in 32-bit.deb or 64-bit.deb'
    sys.exit(1)

conn = boto.connect_s3()
b = conn.get_bucket('lantern')

k = Key(b)
k.key = key
k.copy('lantern', latest, preserve_acl=True)

# Since we've just updated the fixed name 'lantest.x' file in our bucket,
# we need to make sure to invalidate it on cloudfront in case anyone's
# using it.
print 'Invalidating latest installers on CloudFront...'
c = boto.connect_cloudfront()
#rs = c.get_all_distributions()
#ds = rs[1]
#distro = ds.get_distribution()
#print distro.domain_name
#print distro.id
paths = [latest]
inval_req = c.create_invalidation_request(u'E1D7VOTZEUYRZT', paths)
status = c.invalidation_request_status(u'E1D7VOTZEUYRZT', inval_req.id)
Пример #32
0
        bucket = parts.pop(0)
        filename = '/'.join(parts)
    else:
        bucket = u.hostname
        filename = u.path.lstrip('/')

    print '  bucket: %s' % bucket
    print '  filename: %s' % filename

    if bucket in g.s3_image_buckets:
        print '  skipping - already in correct place'
        continue

    k = Key(s3.get_bucket(bucket))
    k.key = filename
    k.copy(s3.get_bucket(g.s3_image_buckets[0]), filename)
    url = 'http://s3.amazonaws.com/%s/%s' % (g.s3_image_buckets[0], filename)
    print '  new url: %s' % url
    for link in links:
        print '  altering Link %s' % link
        if not good_preview_object(link.preview_object):
            continue
        if not link.preview_object == preview_object:
            print "  aborting - preview objects don't match"
            print '    first: %s' % preview_object
            print '    ours:  %s' % link.preview_object
            continue

        link.preview_object['url'] = url
        link._commit()
        # Guess at the key that'll contain the (now-incorrect) cache of the
Пример #33
0
 def change_metadata(self, bucket, key_name):
     key = Key(bucket, key_name)
     key.copy(bucket.name, key_name, self.updated_metadata)
Пример #34
0
key = str(sys.argv[1])

if key.endswith('dmg'):
    newest = 'newest.dmg'
elif key.endswith('exe'):
    newest = 'newest.exe'
elif key.endswith('32-bit.deb'):
    newest = 'newest-32.deb'
elif key.endswith('64-bit.deb'):
    newest = 'newest-64.deb'
else:
    print 'File name with full version required. .deb files should end in 32-bit.deb or 64-bit.deb'
    sys.exit(1)   

conn = boto.connect_s3()
b = conn.get_bucket('lantern')

k = Key(b)
k.key = key
k.copy('lantern', newest, preserve_acl=True)

# Since we've just updated the fixed name 'lantest.x' file in our bucket,
# we need to make sure to invalidate it on cloudfront in case anyone's
# using it.
#print 'Invalidating newest installers on CloudFront...'
#c = boto.connect_cloudfront()
#paths = [newest] 
#inval_req = c.create_invalidation_request(u'E1D7VOTZEUYRZT', paths)
#status = c.invalidation_request_status(u'E1D7VOTZEUYRZT', inval_req.id)
Пример #35
0
        print "Time to adjust frag_length to %s" % (frag_length + 1)
    else:
        frag = random.choice(available_frags)
        csv_file = open('%s/links.csv' % links_folder, "a+")
        print >>csv_file, "%s;%s;%s" % (frag,url,desc)
        csv_file.close()
        os.mkdir("%s/%s" % (links_folder, frag))
        html_file = open("%s/%s/index.html" % (links_folder, frag), "w+")
        print >>html_file, html_template % (url, url)
        html_file.close()
xml_filename = "%s/%s" % (links_folder, settings.xml_file)
xml_file = open(xml_filename, "w+")
csv_file = open('%s/links.csv' % links_folder)
entries = csv_file.readlines()[-30:]
entries.reverse()
print >>xml_file, rsstemplate.rsstemplate % "\n".join(
        [rsstemplate.entrytemplate % (e[2], e[0], e[0], e[1]) for e in [entry[:-1].split(';') for entry in entries]]
    )
xml_file.close()
print "%s.bsoi.st" % frag

bucket = folder2s3.getBucket("notes.bsoi.st","bsoist")
sys.exit()
from boto.s3.key import Key
key = Key(bucket)
key.key = "feed.xml"
key.set_contents_from_filename(xml_filename)
key.set_acl("public-read")
key.copy(bucket,key.key, preserve_acl=True, metadata={'Content-type': 'text/xml'})

Пример #36
0
def main():
	raw_input("I am about to create a bucket called 'test_bucket1' and a\n text file called 'HelloWorld.txt'. Press enter to continue.");
	print;
	with open("HelloWorld.txt", "w") as f:
		f.writelines("I hope you can read this file!");
	s3=boto.connect_s3();
	bucket1=s3.create_bucket('test_bucket1'); #creates an s3 bucket.
	print "'test_bucket1' should be created. GO CHECK! Press enter to continue.";
	raw_input();
	#I am going to create two new keys
	raw_input("I am going to add a textfile and picture to S3. Press enter to continue.");
	k=Key(bucket1);
	picture=Key(bucket1);
	picture.key="picture";
	picture.set_contents_from_filename("bearandi.jpg");
	k.key="helloWorld";
	k.set_contents_from_filename("helloWorld.txt");
	print;
	raw_input("Look at the files on S3. The Files will now be downloaded. Enter to continue.");
	print;
	#This line and the next download the files from S3
	picture.get_contents_to_filename("newBear.jpg"); 
	k.get_contents_to_filename("newHelloWorld.txt");
	#delete a key
	raw_input("File downloads 100% I am now going to delete the text file. Enter to continue.");
	print;
	#delete the text file.
	bucket1.delete_key("helloWorld");
	raw_input("The text file should now be deleted. I am now going to create 3 more buckets \nand delete one. Press enter to continue.");
	print;
	#create more buckets
	bucket2=s3.create_bucket("lab1_bucket2");
	bucket3=s3.create_bucket("lab1_bucket3");
	bucket4=s3.create_bucket("lab1_bucket4");
	raw_input("The buckets were created. I will now delete lab1_bucket4.");
	print;
	bucket4.delete();
	raw_input("lab1_bucket4 deleted. I will now querry to see if buckets exist and if I have permision.");
	print;
	#find buckets
	print "I am going to try the bucket names 'test_bucket1', which exists, and 'lab1_bucket4', which does not."
	print;
	print "Here is a list of all buckets:";
	print s3.get_all_buckets();
	print;
	try:
		print "test_bucket1:",
		print bucket1.get_acl();
	except NameError:
		print "The bucket 'bucket1' name does not exist.";
	try:
		print "lab1_bucket4:",
		print bucket4.get_acl();
	except :
		print "That bucket 'lab1_bucket4' does not exist. Invalid name.";
	print;
	raw_input("I am now going to copy the picture from test_bucket1 to lab1_bucket2.");
	#move object
	print;
	#kill object in 5 days
	picture.copy("lab1_bucket2","Bucket2Bear.jpg");
	raw_input("There should now be a copied picture in lab1_bucket2.\nI will now add a new photo with a 5 day expiration and with reduced redundancy in bucket 3.");
	print;
	cycle=Lifecycle();
	k3=Key(bucket3);
	cycle.add_rule("Five Days", "My Second picture", "Enabled", 5);
	bucket3.configure_lifecycle(cycle);
	k3.key="My Second picture";
	k3.set_contents_from_filename("GW2.jpg", reduced_redundancy=True);
	raw_input("Check bucket3 for the new object with redundancy and an expiration.\nThe last bucket with versioning is going to be made.");
	print;
	#create last bucket
	lastBucket=s3.create_bucket("last_bucket");
	lastBucket.configure_versioning(True, False, None);
	print "Version Status: ", #print versioning status
	print lastBucket.get_versioning_status();
	print;
	lastK=Key(lastBucket);
	lastK.name="MyFile";
	lastK.set_contents_from_filename("helloWorld.txt"); #add original hello world
	print "Added a hello world containing the string: '",
	print lastK.get_contents_as_string();
	print;
	#editted the same hello world
	with open("helloWorld.txt", "a") as f:
		f.writelines("\nI added some lines.\nLast Line.");
	lastK.name="MyFile";
	lastK.set_contents_from_filename("helloWorld.txt");
	print "Added a hello world containing the string: '",
	print lastK.get_contents_as_string();
	print;
	print "'.\nObject details: ";
	for version in lastBucket.list_versions():
		print version.name;
		print version.version_id;
		print;
		print;
	toDelete=raw_input("There should now be two different versions. Type the version of the file you would like to delete: ");
	try:
		print lastBucket.delete_key("MyFile", version_id=toDelete);
	except:
		print;
	raw_input("Version of the file you entered should be deleted.");
	lastK.set_metadata("My meta data", "This is the meta data");
	print; lastK.get_metadata("My meta data");
def sendData( localPath, buckName=None, forwardWrite=36):
	
	#thelocalPath = "{0}".format( localpath )
	##print "localPath 1 %s" % localPath

	

	if not buckName or len(buckName) < 1:
		buckName = BUCKET_NAME
		#return

	if len (localPath) < 1:
		return


	try:
		# cred = readCred()
		# conn = S3Connection(cred[ACCESS], cred[SECRET])
		b = accessBucket()
		# try:
		# 	b = conn.get_bucket(buckName)
		# except Exception as e:
		# 	b = conn.create_bucket(buckName)

		# if not b:
		# 	#print "Error: bucket cannot be nil"
		# 	return

		systemPath = jsonHelper.getCompleteFilePath().lower().split('/')
		localPathArray = localPath.lower().split('/')
		print("systemPath: {0}, localPath: {1}".format(systemPath, localPathArray))

		for pathIndex in range(len(systemPath)):
			pathStr = systemPath[pathIndex]
			if pathStr.find(localPathArray[pathIndex]) < 0:
				print("NOT MATCH Path name s3Interface: {0}".format(localPathArray[pathIndex]))
				return


		#re.sub(r'\.json$',"",localPath.lower())
		#strippedPath = re.sub(r'\.json$',"",localPath.lower())



		
		if len(localPath) < 7 or len(localPathArray) < 2:
			print("Error check localpath {0}".format(localpath))
			return;


		if IMAGE_PATH in localPath:
		 	##image Operation

		 	topdir = '{0}'.format(localPath)
		# 	# The arg argument for walk, and subsequently ext for step
			exten = '.jpg'
			#imageNameList = [v.name[len("images/"):] for v in list(b.list("images/", "/"))]
			imageNameList = [v.name[len(IMAGE_KEY_SUBFOLDER):] for v in list(b.list(IMAGE_KEY_SUBFOLDER, "/"))]
			# print("imageName: {0}".format(imageNameList[4]) )

			uploadSuffixSubstringHelper = -len(UPLOAD_SUFFIX)

			##PRECONDITION
			## it download image files to a local folder in python
			## on the bash level, the images should be reformatted within the range of acceptable bytes size as JPG images and JPG extension
			##
			##CONDITION 
			## it will iterate through the destination folders.
			## searches for jpg files to upload and compare the S3 image folder.
			##    IF no match is identified and conform to acceptable size, it will be uploaded to the S3 folder and rename the extension to uploaded.
			##    elif match is identified with jpg extension"
			## 			delete the file in the local machine
			## 	  elif file match uploaded extension
			##			check if exceeded the minimum time
			##				delete the file in the S3 and local machine
			##			else do nothing
			##
			##
			##
			## 

			def step(ext, dirname, names):
				#global _localPath

				ext = ext.lower()
				print("0 ext: {0}".format(ext))
				dt = datetime.datetime.now()
				print("1")
				nowInSeconds = time.mktime(dt.timetuple())

				print("2")

				for name in names[:]:

					if len(name) <2:
						continue

					#nameInTheList will be used for idenitfying whether the name is in the S3 data network.
					nameInTheList = False
					_name =""
					if name.lower().endswith(UPLOAD_SUFFIX) is True:
						_name = name[:uploadSuffixSubstringHelper]
					else:
						_name = name

					# check the image name exists in the cloud for with or without upload suffix
					# this will double check if the image has been uploaded to the cloud as sometime the image fail to upload but is incorrectly rename to upload suffix.

					if _name in imageNameList[:]:
						nameInTheList = True
					else:
						nameInTheList = False


					
					#print("name[:-len(UPLOAD_SUFFIX)]: {0}".format(name[:-(len(UPLOAD_SUFFIX)]))
					print("3 try: {0}".format(name[:uploadSuffixSubstringHelper]))


					if name.lower().endswith(ext) is True and not nameInTheList or name.lower().endswith(UPLOAD_SUFFIX) is True and not nameInTheList:

						if name.lower().endswith(UPLOAD_SUFFIX) is True:
							thisName = name[:uploadSuffixSubstringHelper]
						else:
							thisName = name

						
						print("4")

						keyName = "{0}{1}".format(IMAGE_KEY_SUBFOLDER, thisName)

						print("2 keyName: {0}".format(keyName))
						imagekey = b.new_key(keyName)

						print("Uploading file name: {0}".format(thisName))

						imagekey.Content_Type = "image/jpeg"

						try:
							pathToImageFile = "{0}/{1}".format(localPath,name)
							img_size = os.stat(pathToImageFile).st_size
							if img_size > MAX_IMAGE_SIZE or MIN_IMAGE_SIZE > img_size:
								print(" WARNING: improper image size {0}: {1}".format(img_size, name ))
								os.remove(pathToImageFile)
								continue

							imagekey.set_contents_from_filename(pathToImageFile)
							imagekey.make_public()

							if name.lower().endswith(ext) is True:
								localPathExt = "{0}{1}".format(pathToImageFile, UPLOAD_SUFFIX)
								os.rename(pathToImageFile, localPathExt)

							#if os.path.exists(pathToImageFile):
							#	os.remove(pathToImageFile)

						except Exception as e:
							print("Exception uploading image 0: {0} - {1}".format(name, e))

					elif name.lower().endswith(UPLOAD_SUFFIX) is True and nameInTheList or name.lower().endswith(ext) is True and nameInTheList:

						if name.lower().endswith(UPLOAD_SUFFIX) is True:
							_name = name[:uploadSuffixSubstringHelper]
						else:
							_name = name

						keyName = "{0}{1}".format(IMAGE_KEY_SUBFOLDER, _name)
						imagekey = b.get_key(keyName)
						print("Not Uploading file name: {0} last-modified: {1}".format(keyName, imagekey.last_modified))
						##"Thu Jan 29 19:13:17 GMT-800 2015"

						# print("imageNameList: {0}".format(imageNameList[0]))

						modified = time.strptime(imagekey.last_modified, '%a, %d %b %Y %H:%M:%S %Z')

						#convert to datetime
						print("time date 0 keyName: {0}".format(keyName))
						mdt = datetime.datetime.fromtimestamp(mktime(modified))
						print("time date 1")
						#(dt.datetime(1970,1,1)).total_seconds()
						
						#modifiedTimeInSeconds = mdt.datetime(1970,1,1).total_seconds()
						modifiedTimeInSeconds = time.mktime(mdt.timetuple())
						print("time date 2")

						durationInSeconds = nowInSeconds - modifiedTimeInSeconds
						systemPath = jsonHelper.getCompleteFilePath()
						print("should delete: {0}{1}/{2}".format(systemPath, dirname[1:], name))
						#os.remove(localPath)
						#assume default dirname is "./xyz"
						deleteFilePath = "{0}{1}/{2}".format(systemPath, dirname[1:], name)

						if durationInSeconds > OneAndHalfDay and len(imageNameList) > 0:
							try:
								print("LONGER THAN ONE DAY deleting {0}".format(imagekey))
								b.delete_key(imagekey)
								os.remove(deleteFilePath)
							except Exception as e:
								print ("Exception in deleting key: {0} - {1}".format(imagekey, e))
						elif name.lower().endswith(ext) is True:
							pathToImageFile = "{0}/{1}".format(localPath,name)
							localPathExt = "{0}{1}".format(pathToImageFile, UPLOAD_SUFFIX)
							try:
								os.rename(pathToImageFile, localPathExt)
							except Exception as e:
								print ("Exception in deleting key: {0} - {1}".format(pathToImageFile, e))

						else:
							print("WITHIN ONE DAY {0}".format(imagekey))

					# elif name.lower().endswith(UPLOAD_SUFFIX) is True:
					# 	systemPath = jsonHelper.getCompleteFilePath()
					# 	deleteFilePath = "{0}{1}/{2}".format(systemPath, dirname[1:], name)
					# 	try:
					# 		print("Deleting Path: {0}".format(deleteFilePath))
					# 		os.remove(deleteFilePath)
					# 	except Exception as e:
					# 		print ("Exception in deleting path: {0} - {1}".format(deleteFilePath, e))


			os.path.walk(topdir, step, exten)


		else:

			##JSON Operation	
			
			timeName = localPathArray[len(localPathArray)-1]
			strippedPath = re.sub(r'\.json$',"",timeName.lower())
			timeStampStr = re.search( r'\d+$', strippedPath).group()
					
			timestamp = int(timeStampStr)	

			print 'strippedPath ' + strippedPath
			#publicationName = re.search( r'^\w+', strippedPath).group()

			publicationName = localPathArray[len(localPathArray)-2]
			print('publicationName {0}'.format(publicationName))
			if timestamp < 100 and len(publicationName) < 1:
				#print "error in publication name or time stamp"
				return

			# metaData = {'charset': 'utf-8', 'Content-Type': 'application/json; '}
			k = Key(b)
			# k.metadata = metaData
			k.Content_Type = 'application/json; charset=utf-8'
			k.content_disposition = 'inline'
			# k.content_encoding = 'gzip'

			for num in range(forwardWrite):

				if num == 0:

					k.key = "%s/%d.json" % (publicationName, timestamp)
					k.set_contents_from_filename(localPath)
					k.make_public()

				else:
					k.copy(buckName,"%s/%d.json" % (publicationName, timestamp)).make_public()

				
				timestamp = timestamp + 1
		#print("should delete: {0}".format(localpath))
		#os.remove(localPath)

	except Exception as e:
		print(e)
Пример #38
0
C.next() #Skip header row
for row in C:
#    filename = f.split(args.path)[1]
    [id_, hash_, blog_url, seq, filename, parser] = row
    
    print '\tUploading file', filename, '...'
    k.key = hash_+'.html'#filename
    k.set_contents_from_filename(file_path+filename)
    #!!Add meta fields to key
    k.copy(bucket_name, k.key, metadata={'Content-Type': 'text/html', 'Cache-Control' : 'max-age=3601'}, preserve_acl=True)
"""

#!Upload CSS file
k.key = "article_style.css"
k.set_contents_from_file(file("article_style.css", "r"))
k.copy(bucket_name, k.key, metadata={"Content-Type": "text/css", "Cache-Control": "max-age=3601"}, preserve_acl=True)

# Upload index file
k.key = "index.html"
k.set_contents_from_string("<html><body>Batteries not included.</body></html>")
k.copy(bucket_name, k.key, metadata={"Content-Type": "text/html", "Cache-Control": "max-age=3601"}, preserve_acl=True)

print "http://" + bucket.get_website_endpoint()


"""
def list_batches(args):
    buckets = conn.get_all_buckets()

    for bucket in buckets:
        name = bucket.name
Пример #39
0
def dump_by_temp(crime, weather):
    grouped = []
    for temp in range(-30, 120):
        days = [d['DATE'] for d in weather.find({'FAHR_MAX': {'$gt': temp, '$lt': temp + 1}})]
        if days:
            grouped.append({'temp': temp, 'days': days})
    for group in grouped:
        crime_summary = []
        for day in group['days']:
            crimes = [c for c in crime.find({'date': {'$gt': day, '$lt': day + timedelta(hours=24)}})]
            crime_summary.append(make_meta(crimes))
        summary = {
            'total': 0,
            'detail': {
                'arson': 0,
                'assault': 0,
                'battery': 0,
                'burglary': 0,
                'crim_sexual_assault': 0,
                'criminal_damage': 0,
                'criminal_trespass': 0,
                'deceptive_practice': 0,
                'domestic_violence': 0,
                'gambling': 0,
                'homicide': 0,
                'interfere_with_public_officer': 0,
                'interference_with_public_officer': 0,
                'intimidation' :0,
                'kidnapping': 0,
                'liquor_law_violation': 0,
                'motor_vehicle_theft': 0,
                'narcotics': 0,
                'non_criminal': 0,
                'non_criminal_subject_specified': 0,
                'obscenity': 0,
                'offense_involving_children': 0,
                'offenses_involving_children': 0,
                'other_narcotic_violation': 0,
                'other_offense': 0,
                'prostitution': 0,
                'public_indecency': 0,
                'public_peace_violation': 0,
                'ritualism': 0,
                'robbery': 0,
                'sex_offense': 0,
                'stalking': 0,
                'theft': 0,
                'weapons_violation': 0,
            }
        }
        for cr in crime_summary:
            summary['total'] += cr['total']['value']
            for detail in cr['detail']:
                summary['detail'][detail['key']] += detail['value']
        group['summary'] = summary
    organizer = []
    for group in grouped:
        organizer.append({'key': 'total', 'temp': group['temp'], 'average': float(group['summary']['total']) / float(len(group['days'])), 'day_count': len(group['days'])})
        for k,v in group['summary']['detail'].items():
            organizer.append({'key': k, 'temp': group['temp'], 'average': float(v) / float(len(group['days'])), 'day_count': len(group['days'])})
    output = []
    organizer = sorted(organizer, key=itemgetter('key'))
    for k,g in groupby(organizer, key=itemgetter('key')):
        output.append({'key': k, 'data': list(g)})
    for group in output:
        s3conn = S3Connection(AWS_KEY, AWS_SECRET)
        bucket = s3conn.get_bucket('crime.static-eric.com')
        k = Key(bucket)
        name = 'data/weather/%s.json' % group['key']
        k.key = name
        k.set_contents_from_string(json.dumps(group, indent=4))
        k = k.copy(k.bucket.name, k.name, {'Content-Type':'application/json'})
        k.set_acl('public-read')
        print 'Uploaded %s' % name
Пример #40
0
        bucket = parts.pop(0)
        filename = '/'.join(parts)
    else:
        bucket = u.hostname
        filename = u.path.lstrip('/')

    print '  bucket: %s' % bucket
    print '  filename: %s' % filename

    if bucket in g.s3_image_buckets:
        print '  skipping - already in correct place'
        continue

    k = Key(s3.get_bucket(bucket))
    k.key = filename
    k.copy(s3.get_bucket(g.s3_image_buckets[0]), filename)
    url = 'http://s3.amazonaws.com/%s/%s' % (g.s3_image_buckets[0], filename)
    print '  new url: %s' % url
    for link in links:
        print '  altering Link %s' % link
        if not good_preview_object(link.preview_object):
            continue
        if not link.preview_object == preview_object:
            print "  aborting - preview objects don't match"
            print '    first: %s' % preview_object
            print '    ours:  %s' % link.preview_object
            continue

        link.preview_object['url'] = url
        link._commit()
        # Guess at the key that'll contain the (now-incorrect) cache of the
Пример #41
0
    data, meta_data, anno_data,syn_file,agilent_file = getFromS3( source_bucket,d,md,ad, syn_file, agilent_file, data_dir)
    working_bucket = 'hd_working_0'
    #df_path, df = mapNewData(working_bucket, data, meta_data, anno_data,syn_file,agilent_file, network_table)"""

    #TODO rework this
    #data_file, meta_data, anno_data,syn_file,agilent_file = getFromS3( source_bucket,d,md,ad, syn_file, agilent_file, data_dir)
    source_bucket = 'hd_source_data'
    working_dir = '/scratch/sgeadmin/test'
    data_file = 'norm.mean.proc.txt'
    annotations_file = 'annodata.txt'
    agilent_file = 'HDLux_agilent_gene_list.txt'
    synonym_file = 'Mus_homo.gene_info'
    network_table = 'net_info_table'
    source_id = 'c2.cp.biocarta.v4.0.symbols.gmt'
    bucket_name = 'hd_working_0'

    #generate dataframe
    hddg = HDDataGen( working_dir )
    df = hddg.generate_dataframe( data_file, annotations_file, agilent_file,
         synonym_file, network_table, source_id )
    dataframe_name = 'trimmed_dataframe.pandas'
    hddg.write_to_s3( bucket_name, df,  dataframe_name)
    #copy metadata
    conn = boto.connect_s3()
    bucket = conn.create_bucket(source_bucket, location=Location.DEFAULT)
    k = Key(bucket)
    k.key = 'metadata.txt'
    k.copy(bucket_name, k.name)

    logging.info("Ending... hddata_process.py")
Пример #42
0
def copy_to_warehouse(src,
                      warehouse,
                      hash=None,
                      filename=None,
                      placeholder=False):
    """
    copy a local file (eg /tmp/pylons-upload-245145.dat) to the warehouse
    (eg S3:cb-wh:media/cade1361, ./civicboom/public/warehouse/media/ca/de/cade1361)
    """

    if not hash:
        hash = hash_file(src)

    log.info("Copying %s/%s (%s) to %s warehouse" %
             (warehouse, hash, filename, config["warehouse.type"]))

    if config[
            "warehouse.type"] == "local":  #  or not config.get('online', True):  # hrm, tests with s3 access are nice, sometimes...
        dest = "./civicboom/public/warehouse/%s/%s" % (warehouse, hash)
        if not os.path.exists(os.path.dirname(dest)):
            os.makedirs(os.path.dirname(dest))
        shutil.copy(src, dest)

    elif config[
            "warehouse.type"] == "s3":  # pragma: no cover - online services aren't active in test mode
        connection = S3Connection(config["api_key.aws.access"],
                                  config["api_key.aws.secret"])
        bucket = connection.get_bucket(config["warehouse.s3.bucket"])

        key = Key(bucket)
        key.key = warehouse + "/" + hash
        metadata = {
            'Content-Type':
            magic.from_file(src, mime=True),
            'Cache-Control':
            'no-cache' if placeholder else 'public, max-age=31536000',
            #'Expires': 'Sun, 17 Mar 2023 17:48:53 GMT', # FIXME: now() + 1 year
            'Content-Disposition':
            'inline; filename=' +
            __http_escape(filename) if filename else 'inline',
        }

        if key.exists():
            log.warning("%s/%s already exists; updating metadata only" %
                        (warehouse, hash))
            key.copy(bucket.name, key.key, metadata=metadata)
            key.set_acl('public-read')
        else:
            key.set_contents_from_filename(src, headers=metadata)
            key.set_acl('public-read')

    elif config[
            "warehouse.type"] == "ssh":  # pragma: no cover - online services aren't active in test mode
        log.error("SSH warehouse not implemented")
        #scp = SCPClient(SSHTransport("static.civicboom.com"))
        #scp.put(src, "~/staticdata/%s/%s/%s/%s" % (warehouse, hash[0:1], hash[2:3], hash))

    elif config[
            "warehouse.type"] == "null":  # pragma: no cover - online services aren't active in test mode
        pass

    else:  # pragma: no cover - online services aren't active in test mode
        log.warning("Unknown warehouse type: " + config["warehouse.type"])

    return hash
Пример #43
0
# Gets nothing, why
logging.info('src content_type=%s metadata=%s size=%s' % (src_key.content_type, src_key.metadata, src_key.size))

# Metadata names must be HTTP Header compliant -- no 'separators' like colons; RFC2616
# Merge new meta with original
metadata = src_key.metadata                     # does this ever get anything
if metadata:
    logging.info('src metadata=%s' % metadata)
metadata.update({'Content-Type': 'video/video', # WORKS, .content_type does not seem to
                 'dc-title'    : 'dc-title', 
                 'dc-author'    : 'dc-author',
                 })

# I don't have to create the dst_key first, can do it on copy
dst_bucket = conn.create_bucket(dst_bucket_name)
dst_key = src_key.copy(dst_bucket_name, dst_key_name,
                       metadata=metadata, preserve_acl=True)

# Gets nothing, why?
# logging.info('x-amz-meta-dc-title=%s', dst_key.get_metadata('x-amz-meta-dc-title')) # sees metas here
# logging.info('dc-title=%s', dst_key.get_metadata('dc-title'))

# This does not work -- check_key doesn't get anything useful.
# Does get valid key with .size but no meta: cks = list(check_bucket.list())[-2] 
# check_conn = S3Connection()
# check_bucket = check_conn.lookup(dst_bucket_name)
# check_key = Key(check_bucket, dst_key_name)
# logging.info('check_key size=%s metadata=%s' % check_key.size, check_key.metadata) # Gets None, why??
# logging.info('check_key get_metadata dc-title=%s' % check_key.get_metadata('dc-title'))
# logging.info('check_key get_metadata x-amz-meta-dc-title=%s' % check_key.get_metadata('x-amz-neta-dc-title'))
# import pdb; pdb.set_trace()
Пример #44
0
def main():
    raw_input(
        "I am about to create a bucket called 'test_bucket1' and a\n text file called 'HelloWorld.txt'. Press enter to continue."
    )
    print
    with open("HelloWorld.txt", "w") as f:
        f.writelines("I hope you can read this file!")
    s3 = boto.connect_s3()
    bucket1 = s3.create_bucket('test_bucket1')
    #creates an s3 bucket.
    print "'test_bucket1' should be created. GO CHECK! Press enter to continue."
    raw_input()
    #I am going to create two new keys
    raw_input(
        "I am going to add a textfile and picture to S3. Press enter to continue."
    )
    k = Key(bucket1)
    picture = Key(bucket1)
    picture.key = "picture"
    picture.set_contents_from_filename("bearandi.jpg")
    k.key = "helloWorld"
    k.set_contents_from_filename("helloWorld.txt")
    print
    raw_input(
        "Look at the files on S3. The Files will now be downloaded. Enter to continue."
    )
    print
    #This line and the next download the files from S3
    picture.get_contents_to_filename("newBear.jpg")
    k.get_contents_to_filename("newHelloWorld.txt")
    #delete a key
    raw_input(
        "File downloads 100% I am now going to delete the text file. Enter to continue."
    )
    print
    #delete the text file.
    bucket1.delete_key("helloWorld")
    raw_input(
        "The text file should now be deleted. I am now going to create 3 more buckets \nand delete one. Press enter to continue."
    )
    print
    #create more buckets
    bucket2 = s3.create_bucket("lab1_bucket2")
    bucket3 = s3.create_bucket("lab1_bucket3")
    bucket4 = s3.create_bucket("lab1_bucket4")
    raw_input("The buckets were created. I will now delete lab1_bucket4.")
    print
    bucket4.delete()
    raw_input(
        "lab1_bucket4 deleted. I will now querry to see if buckets exist and if I have permision."
    )
    print
    #find buckets
    print "I am going to try the bucket names 'test_bucket1', which exists, and 'lab1_bucket4', which does not."
    print
    print "Here is a list of all buckets:"
    print s3.get_all_buckets()
    print
    try:
        print "test_bucket1:",
        print bucket1.get_acl()
    except NameError:
        print "The bucket 'bucket1' name does not exist."
    try:
        print "lab1_bucket4:",
        print bucket4.get_acl()
    except:
        print "That bucket 'lab1_bucket4' does not exist. Invalid name."
    print
    raw_input(
        "I am now going to copy the picture from test_bucket1 to lab1_bucket2."
    )
    #move object
    print
    #kill object in 5 days
    picture.copy("lab1_bucket2", "Bucket2Bear.jpg")
    raw_input(
        "There should now be a copied picture in lab1_bucket2.\nI will now add a new photo with a 5 day expiration and with reduced redundancy in bucket 3."
    )
    print
    cycle = Lifecycle()
    k3 = Key(bucket3)
    cycle.add_rule("Five Days", "My Second picture", "Enabled", 5)
    bucket3.configure_lifecycle(cycle)
    k3.key = "My Second picture"
    k3.set_contents_from_filename("GW2.jpg", reduced_redundancy=True)
    raw_input(
        "Check bucket3 for the new object with redundancy and an expiration.\nThe last bucket with versioning is going to be made."
    )
    print
    #create last bucket
    lastBucket = s3.create_bucket("last_bucket")
    lastBucket.configure_versioning(True, False, None)
    print "Version Status: ",  #print versioning status
    print lastBucket.get_versioning_status()
    print
    lastK = Key(lastBucket)
    lastK.name = "MyFile"
    lastK.set_contents_from_filename("helloWorld.txt")
    #add original hello world
    print "Added a hello world containing the string: '",
    print lastK.get_contents_as_string()
    print
    #editted the same hello world
    with open("helloWorld.txt", "a") as f:
        f.writelines("\nI added some lines.\nLast Line.")
    lastK.name = "MyFile"
    lastK.set_contents_from_filename("helloWorld.txt")
    print "Added a hello world containing the string: '",
    print lastK.get_contents_as_string()
    print
    print "'.\nObject details: "
    for version in lastBucket.list_versions():
        print version.name
        print version.version_id
        print
        print
    toDelete = raw_input(
        "There should now be two different versions. Type the version of the file you would like to delete: "
    )
    try:
        print lastBucket.delete_key("MyFile", version_id=toDelete)
    except:
        print
    raw_input("Version of the file you entered should be deleted.")
    lastK.set_metadata("My meta data", "This is the meta data")
    print
    lastK.get_metadata("My meta data")
Пример #45
0
 def copy_to_other_s3(self, filename, other_s3, to_filename):
     key = Key(self.bucket)
     key.key = self.prefix + filename
     key.copy(other_s3.bucket, to_filename)
Пример #46
0
elif key.endswith('32-bit.deb'):
    ext = '-32.deb'
elif key.endswith('64-bit.deb'):
    ext = '-64.deb'
else:
    print 'File name with full version required. .deb files should end in 32-bit.deb or 64-bit.deb'
    sys.exit(1)

#newest = newestname + ext

# This is all actually handled externally. TODO -- do it all here! Fix deployBinaries and releaseExisting and do everything through boto/python
newest = newestname
print 'Newest name %s' % newest

conn = boto.connect_s3()

b = conn.get_bucket(BUCKET)

k = Key(b)
k.key = key
k.copy(BUCKET, newest, preserve_acl=True)

# Since we've just updated the fixed name 'lantest.x' file in our bucket,
# we need to make sure to invalidate it on cloudfront in case anyone's
# using it.
#print 'Invalidating newest installers on CloudFront...'
#c = boto.connect_cloudfront()
#paths = [newest]
#inval_req = c.create_invalidation_request(u'E1D7VOTZEUYRZT', paths)
#status = c.invalidation_request_status(u'E1D7VOTZEUYRZT', inval_req.id)