Esempio n. 1
0
def main(stream_url: str, stream_name: str, bucket_name: str, duration: str):
    temp_file = 'temp.m4a'

    print('beginning rip')

    code = subprocess.call(['ffmpeg',
                            '-i', stream_url,
                            '-t', duration,
                            '-acodec', 'copy',
                            '-absf', 'aac_adtstoasc',
                            temp_file])

    assert code == 0, 'stream rip failed with code ' + str(code)

    print('connecting to s3')
    conn = S3Connection(is_secure=False)  # AWS uses invalid certs
    bucket = conn.get_bucket(bucket_name)

    print('writing recorded file to s3')
    m4a = Key(bucket)
    m4a.name = datetime.datetime.utcnow().strftime(stream_name + '--%Y-%m-%d.m4a')
    m4a.content_type = MIME_TYPE
    m4a.metadata = {'Content-Type': MIME_TYPE}
    m4a.storage_class = 'STANDARD_IA'
    m4a.set_contents_from_filename(temp_file)
    m4a.close()

    print('generating new feed.xml from s3 bucket list')
    feed_xml = Key(bucket)
    feed_xml.name = 'feed.xml'
    feed_xml.content_type = 'application/rss+xml'
    feed_xml.set_contents_from_string(
        rss_xml(stream_name, bucket_name, bucket.list()))
    feed_xml.close()
Esempio n. 2
0
def upload_to_s3(file, folder, key):
    conn = boto.connect_s3(config['aws_access_key'], config['aws_secret_key'])
    bucket = conn.get_bucket(config['s3_bucket'])
    k = Key(bucket)
    k.storage_class = 'STANDARD_IA'
    k.key = folder + '/' + key
    k.set_contents_from_filename(file.name, headers={'Content-Type': 'image/jpeg'})
    return 'https://s3.amazonaws.com/' + config['s3_bucket'] + '/' + k.name
Esempio n. 3
0
 def s3move(self, filename, objectname):
     try:
         k = Key(self.bucket)
         k.key = objectname
         k.set_contents_from_filename(filename)
         k.set_acl('public-read')
         k.storage_class = 'REDUCED_REDUNDANCY'
     except Exception as e:
         self.log_err_fatal(str(e))
Esempio n. 4
0
File: s3.py Progetto: SciF0r/zato
    def set(self, key, value, bucket=ZATO_NONE, content_type=ZATO_NONE, metadata=ZATO_NONE,
            storage_class=ZATO_NONE, encrypt_at_rest=ZATO_NONE):
        _bucket = Bucket(self._conn, bucket if bucket != ZATO_NONE else self.zato_default_bucket)
        _key = Key(_bucket)

        _key.content_type = content_type if content_type != ZATO_NONE else self.zato_content_type
        _key.metadata.update(metadata if metadata != ZATO_NONE else self.zato_metadata)
        _key.name = key
        _key.storage_class = storage_class if storage_class != ZATO_NONE else self.zato_storage_class
        _key.set_contents_from_string(
            value, encrypt_key=(encrypt_at_rest if encrypt_at_rest != ZATO_NONE else self.zato_encrypt_at_rest))
def perform_backup(quiet=True):
    try:
        from urllib2 import urlopen
    except:
        try:
            from urllib.request import urlopen
        except:
            pass
    try:
        urlopen('http://s3.amazonaws.com')
    except:
        if quiet:
            return
        else:
            sys.exit('ERROR: Unable to connect to s3.amazonaws.com')
    doc_path = os.path.expanduser('~/Documents')
    os.chdir(doc_path)
    backup_path = os.path.join(doc_path, 'Backup.zip')
    if os.path.exists(backup_path):
        os.remove(backup_path)
    print('Creating backup archive...')
    shutil.make_archive(os.path.join(tempfile.gettempdir(), 'Backup'), 'zip')
    shutil.move(os.path.join(tempfile.gettempdir(), 'Backup.zip'), backup_path)
    print('Backup archive created, uploading to S3 ...')

    date_text = time.strftime('%Y-%b-%d')
    time_text = time.strftime('%I-%M-%S-%p')
    info_dict_version_key = 'CFBundleShortVersionString'
    main_bundle = NSBundle.mainBundle()
    app_version = str(
        main_bundle.objectForInfoDictionaryKey_(info_dict_version_key))[0]

    AWS_ACCESS_KEY_ID = keychain.get_password('aws', 'AWS_ACCESS_KEY_ID')
    AWS_SECRET_ACCESS_KEY = keychain.get_password('aws',
                                                  'AWS_SECRET_ACCESS_KEY')

    bucket_name = 'lukaskollmer'

    def percent_cb(complete, total):
        reprint('{}'.format(round(float(complete) / float(total) * 100, 2)))

    s3 = boto.connect_s3(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)
    bucket = s3.get_bucket(bucket_name)

    filename = 'Backup-{}.zip'.format(time_text)
    k = Key(bucket)
    k.storage_class = 'REDUCED_REDUNDANCY'
    k.key = '/Backup/Pythonista{}/{}/{}'.format(app_version, date_text,
                                                filename)
    print('0.0 %')
    k.set_contents_from_filename('Backup.zip', cb=percent_cb, num_cb=10)
    print('Successfully uploaded')
    os.remove(backup_path)
Esempio n. 6
0
def upload(filename, birthday):
    # Test that it's a date.
    datetime.date(*map(int, birthday.replace('18', '19').split('-')))

    k = Key(b)
    k.key = os.path.join('data', 'bornon', birthday + '.json.gz')
    k.storage_class = 'REDUCED_REDUNDANCY'

    f = open(filename)
    k.set_contents_from_string(f.read(), HEADERS, replace=True)
    f.close()

    k.close()
def perform_backup(quiet=True):
	try:
		from urllib2 import urlopen
	except:
		try:
			from urllib.request import urlopen
		except:
			pass
	try:
		urlopen('http://s3.amazonaws.com')
	except:
		if quiet:
			return
		else:
			sys.exit('ERROR: Unable to connect to s3.amazonaws.com')
	doc_path = os.path.expanduser('~/Documents')
	os.chdir(doc_path)
	backup_path = os.path.join(doc_path, 'Backup.zip')
	if os.path.exists(backup_path):
		os.remove(backup_path)
	print('Creating backup archive...')
	shutil.make_archive(os.path.join(tempfile.gettempdir(), 'Backup'), 'zip')
	shutil.move(os.path.join(tempfile.gettempdir(), 'Backup.zip'), backup_path)
	print('Backup archive created, uploading to S3 ...')
	
	date_text = time.strftime('%Y-%b-%d')
	time_text = time.strftime('%I-%M-%S-%p')
	info_dict_version_key = 'CFBundleShortVersionString'
	main_bundle = NSBundle.mainBundle()
	app_version = str(main_bundle.objectForInfoDictionaryKey_(info_dict_version_key))[0]
	
	AWS_ACCESS_KEY_ID = keychain.get_password('aws', 'AWS_ACCESS_KEY_ID')
	AWS_SECRET_ACCESS_KEY = keychain.get_password('aws', 'AWS_SECRET_ACCESS_KEY')
	
	bucket_name = 'lukaskollmer'
	
	def percent_cb(complete, total):
		reprint('{}'.format(round(float(complete) / float(total) * 100, 2)))
	
	s3 = boto.connect_s3(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)
	bucket = s3.get_bucket(bucket_name)
	
	filename = 'Backup-{}.zip'.format(time_text)
	k = Key(bucket)
	k.storage_class = 'REDUCED_REDUNDANCY'
	k.key = '/Backup/Pythonista{}/{}/{}'.format(app_version, date_text, filename)
	print('0.0 %')
	k.set_contents_from_filename('Backup.zip', cb=percent_cb, num_cb=10)
	print('Successfully uploaded')
	os.remove(backup_path)
Esempio n. 8
0
 def write_to_s3( self, bucket_name, dataframe,  dataframe_name, 
         location=Location.DEFAULT):
     """
     Writes the dataframe(cleaned and aggregated source data) and the 
     metadata file to the given S3 bucket
     """
     conn = boto.connect_s3()
     bucket = conn.create_bucket(bucket_name, location=location)
     dataframe.save('temp.tmp')
     for fname in [dataframe_name]:
         k = Key(bucket)
         k.key = dataframe_name
         k.storage_class = 'REDUCED_REDUNDANCY'
         k.set_contents_from_filename('temp.tmp')
     os.remove('temp.tmp')
Esempio n. 9
0
def push_log():
    import time
    import os, os.path
    import boto.utils
    import boto
    from boto.s3.key import Key
    #only master reads config
    import masterdirac.models.systemdefaults as sys_def
    config = sys_def.get_system_defaults( 'logging', 'Data Cluster')
    log_file = config['logging_file']
    inst_id = boto.utils.get_instance_metadata()['instance-id']
    ctime = time.strftime('%Y-%m-%d-%T', time.gmtime())
    lf_name =config['log_s3_name_format'] % (inst_id,ctime)
    conn = boto.connect_s3()
    bkt = conn.create_bucket( config['log_bucket'] )
    k = Key(bkt)
    k.key = lf_name
    k.set_metadata('project', 'HD')
    k.storage_class = 'REDUCED_REDUNDANCY'
    k.set_contents_from_filename( log_file )
Esempio n. 10
0
    def set(self,
            key,
            value,
            bucket=ZATO_NONE,
            content_type=ZATO_NONE,
            metadata=ZATO_NONE,
            storage_class=ZATO_NONE,
            encrypt_at_rest=ZATO_NONE):
        _bucket = Bucket(
            self.impl,
            bucket if bucket != ZATO_NONE else self.zato_default_bucket)
        _key = Key(_bucket)

        _key.content_type = content_type if content_type != ZATO_NONE else self.zato_content_type
        _key.metadata.update(metadata if metadata != ZATO_NONE else
                             parse_extra_into_dict(self.zato_metadata, False))
        _key.name = key
        _key.storage_class = storage_class if storage_class != ZATO_NONE else self.zato_storage_class
        _key.set_contents_from_string(
            value,
            encrypt_key=(encrypt_at_rest if encrypt_at_rest != ZATO_NONE else
                         self.zato_encrypt_at_rest))
Esempio n. 11
0
    try:
        try:
            BUCKET_NAME = environ['TWBS_S3_BUCKET']
        except KeyError:
            raise SystemExit("TWBS_S3_BUCKET environment variable not set!")

        conn = S3Connection()
        bucket = conn.lookup(BUCKET_NAME)
        if bucket is None:
            raise SystemExit("Could not access bucket!")

        key_file_hash = _sha256_of_file(key_file)

        key = Key(bucket, key_file_hash)
        key.storage_class = 'REDUCED_REDUNDANCY'

        if mode == 'download':
            download(directory)
        elif mode == 'upload':
            if need_to_upload(cache_name):
                upload(directory)
            else:
                print("No need to upload anything.")
        else:
            raise SystemExit("Unrecognized mode {!r}".format(mode))
    except BaseException as exc:
        if mode != 'download':
            raise
        print("Error!:", exc)
        print("Unable to download from cache.")
Esempio n. 12
0
def mapNewData(working_bucket, data, meta_data, anno_data,syn_file,agilent_file,network_table):
    """
    Given local file locations for source data, meta data, annotations data,
        synonyms file and the agilent (probe->gene) file,
    Creates a new dataframe, containing only gene information for genes
        present in the network table, indexed by gene name, columns are sample ids
    Returns dataframe pickle file location and dataframe
    """
    anno = pandas.io.parsers.read_table(anno_data)
    data = pandas.io.parsers.read_table(data)
    metadata = pandas.io.parsers.read_table(meta_data)
    agl = pandas.io.parsers.read_table(agilent_file)
    
    #get rid of control probes

    data.index = anno['ProbeName']
    control_probe_names = anno['ProbeName'][anno['ControlType'] != 0]
    data = data.drop(control_probe_names)

    agl.set_index('ProbeID')
    agl2 = agl[agl['GeneSymbol'].notnull()]
    agl2 = agl2.set_index('ProbeID')

    #map probes to genes from network

    a = agl2['GeneSymbol'].tolist()
    b = set(a)
    table = Table(network_table)
    temp_nets = table.scan()
    network_genes = []
    i = 0
    for net in temp_nets:
        network_genes += net['gene_ids'][6:].split('~:~')
    network_genes_set = set(network_genes)


    mm = {}
    added = []
    with open(syn_file,'r') as synonyms:
        for line in synonyms:
            parsed = line.split()
            try:
                temp = []
                for p in parsed[:5]:
                    tt = p.split('|')
                    for t in tt:
                        if len(t) > 2 and t in network_genes_set and parsed[2] in b:
                            added.append(t)
                            temp.append(t)
                if len(temp) > 0:
                    if parsed[2] not in mm:
                      mm[parsed[2]] = []
                    for t in temp:
                        if t not in mm[parsed[2]]:
                            mm[parsed[2]].append(t)
                
            except IndexError:
                pass
    ng2p = {}
    probes = []
    with open(agilent_file, 'r') as gl:
        for line in gl:
            parsed = line.split()
            try:
                if parsed[2] in mm: #mouse gene is mapped to network gene
                    for ng in mm[parsed[2]]:
                        if ng not in ng2p:
                            ng2p[ng] = []
                        if parsed[0] not in ng2p[ng]:
                            ng2p[ng].append(parsed[0])
                            probes.append(parsed[0])          
            except IndexError:
                pass
    #create newly trimmed and annotated data frame
    #save pickle locally

    df = DataFrame(np.zeros((len(ng2p), len(data.columns))), index=ng2p.keys(), columns=data.columns)
    for k,v in ng2p.iteritems():
        df.ix[k] = data.ix[v].median()
    saved = os.path.join(os.path.split(agilent_file)[0],'trimmed_dataframe.pandas')
    df.save(saved)
    
    #send pickled dataframe to working bucket
    conn = boto.connect_s3()
    b = conn.get_bucket(working_bucket)
    k=Key(b)
    k.key = 'trimmed_dataframe.pandas'
    k.storage_class = 'REDUCED_REDUNDANCY'
    k.set_contents_from_filename(saved)

    k.key = 'metadata.txt'
    k.storage_class = 'REDUCED_REDUNDANCY'
    k.set_contents_from_filename(meta_data)

    return saved,df 
def upload_wrappers():
    conn = boto.connect_s3(**aws_creds)
    try:
        # If we have already uploaded wrappers from this instance, use the same
        # folder as before, so the links in old invite e-mails will point to the
        # new wrappers.
        loc = file(WRAPPER_LOCATION_PATH).read()
        path, version = loc.split(",")
        bucket_name, folder = path.split("/")
        bucket = conn.get_bucket(bucket_name)
    except IOError:
        bucket = conn.get_bucket(BUCKET)
        folder = get_random_folder_name(bucket)
        version = None
    newest_version = version
    for filename in os.listdir("."):
        m = filename_re.match(filename)
        if m is None:
            continue
        if version is None:
            platform, version = m.groups()
            newest_version = version
        else:
            platform, version = m.groups()
            if version != newest_version:
                logging.error("Several versions here?")
            # For robustness, we pick the newest, so the worst that will
            # happen is hopefully that we upload old wrappers in addition to
            # the newest ones, but we still point the controller to the right
            # ones.
            newest_version = max(newest_version, version)
        wrapper_key = Key(bucket)
        # We use the original name for the landing page, so we have to rename
        # the wrappers somehow.  Since the wrappers are supposed to get the
        # latest installer, their version number is misleading (and we've had
        # comments against such low version numbers at this stage).  So let's
        # take that out.
        s3_wrapper_filename = filename.replace("_" + version, '')
        wrapper_key.name = "%s/%s" % (folder, s3_wrapper_filename)
        wrapper_key.storage_class = 'REDUCED_REDUNDANCY'
        wrapper_key.set_metadata('Content-Type', content_types[platform])
        wrapper_key.set_metadata('Content-Disposition', 'attachment')
        logging.info("Uploading wrapper to %s" % wrapper_key.name)
        wrapper_key.set_contents_from_filename(filename, replace=True)
        wrapper_key.set_acl('public-read')
        # Delete successfully uploaded wrappers.
        os.unlink(filename)
        # Generate landing page.
        landing_key = Key(bucket)
        # We give it the URL that we used to give to the wrappers, to keep old
        # invite email links working, and to avoid having to change anything
        # in the controller and deal with whatever transition issues ensue.
        landing_key.name = "%s/%s" % (folder, filename)
        landing_key.storage_class = 'REDUCED_REDUNDANCY'
        logging.info("Uploading landing to %s" % landing_key.name)
        landing_key.set_metadata('Content-Type', 'text/html')
        landing_key.set_metadata('Content-Disposition', 'inline')
        landing_key.set_contents_from_string(
                landing_template.format(wrapper_name=s3_wrapper_filename,
                                        platform=platform),
                replace=True)
        landing_key.set_acl('public-read')

    # DRY warning: lantern-controller needs to understand this format.
    file(WRAPPER_LOCATION_PATH, 'w').write(
            "%s/%s,%s" % (BUCKET, folder, newest_version))
    # DRY warning: the salt scripts use this file name as a state flag.
    file('/home/lantern/uploaded_wrappers', 'w').write('OK')
Esempio n. 14
0
#!/usr/bin/env python2.7
# pylint: disable=C0301
from __future__ import absolute_import, unicode_literals, print_function, division
from sys import argv
from os import environ, stat, chdir, remove as _delete_file
from os.path import dirname, basename, abspath, realpath, expandvars
from hashlib import sha256
from subprocess import check_call as run
from json import load, dump as save
from contextlib import contextmanager
from datetime import datetime
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from boto.exception import S3ResponseError
CONFIG_FILE = './S3Cachefile.json'
UPLOAD_TODO_FILE = './S3CacheTodo.json'
BYTES_PER_MB = 1024 * 1024

@contextmanager
def timer():
    start = datetime.utcnow()
    yield
    end = datetime.utcnow()
    elapsed = end - start
    print("\tDone. Took", int(elapsed.total_seconds()), "second(s).")

@contextmanager
def todo_file(writeback=True):
    try:
        with open(UPLOAD_TODO_FILE, 'rt') as json_file:
Esempio n. 15
0
    try:
        try:
            BUCKET_NAME = environ['TWBS_S3_BUCKET']
        except KeyError:
            raise SystemExit("TWBS_S3_BUCKET environment variable not set!")

        conn = S3Connection()
        bucket = conn.lookup(BUCKET_NAME)
        if bucket is None:
            raise SystemExit("Could not access bucket!")

        key_file_hash = _sha256_of_file(key_file)

        key = Key(bucket, key_file_hash)
        key.storage_class = 'REDUCED_REDUNDANCY'

        if mode == 'download':
            download(directory)
        elif mode == 'upload':
            if need_to_upload(cache_name):
                upload(directory)
            else:
                print("No need to upload anything.")
        else:
            raise SystemExit("Unrecognized mode {!r}".format(mode))
    except BaseException as exc:
        if mode != 'download':
            raise
        print("Error!:", exc)
        print("Unable to download from cache.")
Esempio n. 16
0
def upload_wrappers():
    conn = boto.connect_s3(**aws_creds)
    try:
        # If we have already uploaded wrappers from this instance, use the same
        # folder as before, so the links in old invite e-mails will point to the
        # new wrappers.
        loc = file(WRAPPER_LOCATION_PATH).read()
        path, version = loc.split(",")
        bucket_name, folder = path.split("/")
        bucket = conn.get_bucket(bucket_name)
    except IOError:
        bucket = conn.get_bucket(BUCKET)
        folder = get_random_folder_name(bucket)
        version = None
    newest_version = version
    for filename in os.listdir("."):
        m = filename_re.match(filename)
        if m is None:
            continue
        if version is None:
            platform, version = m.groups()
            newest_version = version
        else:
            platform, version = m.groups()
            if version != newest_version:
                logging.error("Several versions here?")
            # For robustness, we pick the newest, so the worst that will
            # happen is hopefully that we upload old wrappers in addition to
            # the newest ones, but we still point the controller to the right
            # ones.
            newest_version = max(newest_version, version)
        wrapper_key = Key(bucket)
        # We use the original name for the landing page, so we have to rename
        # the wrappers somehow.  Since the wrappers are supposed to get the
        # latest installer, their version number is misleading (and we've had
        # comments against such low version numbers at this stage).  So let's
        # take that out.
        s3_wrapper_filename = filename.replace("_" + version, '')
        wrapper_key.name = "%s/%s" % (folder, s3_wrapper_filename)
        wrapper_key.storage_class = 'REDUCED_REDUNDANCY'
        wrapper_key.set_metadata('Content-Type', content_types[platform])
        wrapper_key.set_metadata('Content-Disposition', 'attachment')
        logging.info("Uploading wrapper to %s" % wrapper_key.name)
        wrapper_key.set_contents_from_filename(filename, replace=True)
        wrapper_key.set_acl('public-read')
        # Delete successfully uploaded wrappers.
        os.unlink(filename)
        # Generate landing page.
        landing_key = Key(bucket)
        # We give it the URL that we used to give to the wrappers, to keep old
        # invite email links working, and to avoid having to change anything
        # in the controller and deal with whatever transition issues ensue.
        landing_key.name = "%s/%s" % (folder, filename)
        landing_key.storage_class = 'REDUCED_REDUNDANCY'
        logging.info("Uploading landing to %s" % landing_key.name)
        landing_key.set_metadata('Content-Type', 'text/html')
        landing_key.set_metadata('Content-Disposition', 'inline')
        landing_key.set_contents_from_string(landing_template.format(
            wrapper_name=s3_wrapper_filename, platform=platform),
                                             replace=True)
        landing_key.set_acl('public-read')

    # DRY warning: lantern-controller needs to understand this format.
    file(WRAPPER_LOCATION_PATH,
         'w').write("%s/%s,%s" % (BUCKET, folder, newest_version))
    # DRY warning: the salt scripts use this file name as a state flag.
    file('/home/lantern/uploaded_wrappers', 'w').write('OK')
Esempio n. 17
0
 def save(self, path, filename):
     self.paths[filename] = path
     key = Key(self.bucket)
     key.storage_class = 'REDUCED_REDUNDANCY'
     key.key = join(self.document_hash, filename)
     key.set_contents_from_filename(path)