Beispiel #1
0
def deploy_lambda_function(session, function_name):

    # load in configs
    function_cfg = json.load(open('lambda/conf/{}.json'.format(function_name)))
    default_cfg = json.load(open('lambda/conf/defaults.json'))
    default_cfg.update(function_cfg)
    cfg = default_cfg

    lambda_client = session.client('lambda')

    try:

        # function already exists, so update it
        print 'checking if function exists: {}'.format(function_name)
        lambda_client.get_function_configuration(FunctionName=function_name)

        print '\tupdating function configuration: {}'.format(function_name)
        lambda_client.update_function_configuration(
            FunctionName=function_name,
            Role=session.resource('iam').Role(cfg['role']).arn,
            Handler=cfg['handler'],
            Description=cfg.get('description', None),
            Timeout=cfg['timeout'],
            MemorySize=cfg['memory'],
            VpcConfig=cfg.get('vpc', {}))

        print '\tupdating function code: {}'.format(function_name)
        lambda_client.update_function_code(
            FunctionName=function_name,
            ZipFile=open('_deploy/_zip/{}.zip'.format(function_name),
                         'rb').read())
    except:

        # function is new, so create it
        print '\tcreating new function: {}'.format(function_name)
        lambda_client.create_function(
            FunctionName=function_name,
            Runtime=cfg['runtime'],
            Role=session.resource('iam').Role(cfg['role']).arn,
            Code={
                'ZipFile':
                open('_deploy/_zip/{}.zip'.format(function_name), 'rb').read()
            },
            Handler=cfg['handler'],
            Description=cfg.get('description', None),
            Timeout=cfg['timeout'],
            MemorySize=cfg['memory'],
            VpcConfig=cfg.get('vpc', {}))
Beispiel #2
0
def instance_info(instance_id, get_ip):
    """Show settings for a given instance.
    
    Args:
        instance_id: ID of instance to query.
        get_ip: True to get instance IP, which will require waiting if the 
            instance is not yet running.
    
    Returns:
        Tuple of ``(instance_id, instance_ip)``.
    """
    # run in separate session since each resource shares data
    session = boto3.session.Session()
    ec2 = session.resource("ec2")
    instance = ec2.Instance(instance_id)
    image_id = instance.image_id
    tags = instance.tags
    instance_ip = "n/a"
    if get_ip:
        print("checking instance {}".format(instance))
        instance.wait_until_running()
        instance.load()
        instance_ip = instance.public_ip_address
    # show tag info but not saving for now since not currently used
    print("instance ID: {}, image ID: {}, tags: {}, IP: {}".format(
        instance_id, image_id, tags, instance_ip))
    return instance_id, instance_ip
def clean_aws_instances(region_name, instance_ids):
    try:
        session = boto3.session.Session(region_name=region_name)
        service = session.resource('ec2')
        service.instances.filter(InstanceIds=instance_ids).terminate()
    except Exception as details:
        logger.exception(str(details))
Beispiel #4
0
def demo():
    region_id = region_select()
    session = boto3.session.Session()
    client = session.client('ec2', region_name=region_id)
    ec2 = session.resource('ec2', region_name=region_id)
    ec2operat = EC2_Operate(client, ec2, region_id)
    print ec2operat.get_availbility_zone()
Beispiel #5
0
    def download(self, bucket_name, prefix=None, save_dir='./', verb=True):
        if not exists(save_dir) or not isdir(save_dir):
            makedirs(save_dir)

        if prefix is None:
            prefix = ''
            file_list = self.check_files(bucket_name=bucket_name, verb=False)
            print('Find {} files in Bucket {}'.format(len(file_list), bucket_name))
            if input('Download all files?[y/n]'.format(bucket_name)) != 'y':
                exit(0)

        session = boto3.session.Session(
            region_name=self.region,
            aws_access_key_id=self._credentials[self._credentials_keys[0]],
            aws_secret_access_key=self._credentials[self._credentials_keys[1]],
            aws_session_token=self._credentials[self._credentials_keys[2]]
        )
        s3 = session.resource('s3')
        bucket = s3.Bucket(bucket_name)
        for obj in bucket.objects.filter(Prefix=prefix):
            save_path = join(save_dir, obj.key)
            if not exists(dirname(save_path)) or not isdir(dirname(save_path)):
                makedirs(dirname(save_path))
            if verb:
                print('\rDownloading Bucket {}/{} -> {}'.format(bucket_name, obj.key, save_path))
            try:
                bucket.download_file(obj.key, save_path)
            except Exception as e:
                print('[!] Failed download {}/{}: {}'.format(bucket_name, obj.key, e))
def clean_aws_instances(region_name, instance_ids):
    try:
        session = boto3.session.Session(region_name=region_name)
        service = session.resource('ec2')
        service.instances.filter(InstanceIds=instance_ids).terminate()
    except Exception as details:
        logger.exception(str(details))
Beispiel #7
0
def s3_upload(files):
    params = {
        'aws_access_key_id': celery.conf.get('S3_ACCESS_KEY'),
        'aws_secret_access_key': celery.conf.get('S3_SECRET_KEY'),
    }

    if celery.conf.get('S3_ENDPOINT_URL'):
        params['endpoint_url'] = celery.conf.get('S3_ENDPOINT_URL')

    if celery.conf.get('S3_REGION'):
        params['region_name'] = celery.conf.get('S3_REGION')

    session = boto3.session.Session()
    s3 = session.resource('s3', **params)
    bucket = s3.Bucket(celery.conf.get('S3_BUCKET'))

    for f in files:
        tries = 0
        while tries < 10:
            try:
                bucket.upload_file(f,
                                   "/".join(f.split("/")[-2:]),
                                   ExtraArgs={'ACL': 'public-read'})
                break
            except:
                tries = tries + 1
Beispiel #8
0
 def _upload(self, bucket_name, bucket_folder=None, is_public=False, verb=False):
     """
     upload data to S3
     :param bucket_name: str, name of bucket
     :param bucket_folder: str, folder name in the bucket, where the data is uploaded to
     :return:
     """
     session = boto3.session.Session(
         region_name=self.region,
         aws_access_key_id=self._credentials[self._credentials_keys[0]],
         aws_secret_access_key=self._credentials[self._credentials_keys[1]],
         aws_session_token=self._credentials[self._credentials_keys[2]]
     )
     s3 = session.resource('s3')
     while self.is_running():
         try:
             if not self.queue.empty():
                 data_path, key = self.queue.get()
                 # data = open(data_path, 'rb')
                 if bucket_folder is not None:
                     key = join(bucket_folder, key)
                 if verb:
                     print('Uploading {} -> Bucket {}/{}'.format(data_path, bucket_name, key))
                 if is_public:
                     s3.meta.client.upload_file(Filename=data_path, Bucket=bucket_name, Key=key,
                                                ExtraArgs={'ACL': 'public-read'})
                     # s3.Bucket(bucket_name).put_object(Key=key, Body=data, ACL='public-read')
                 else:
                     s3.meta.client.upload_file(Filename=data_path, Bucket=bucket_name, Key=key)
                     # s3.Bucket(bucket_name).put_object(Key=key, Body=data)
             else:
                 sleep(self.wait_time)
         except:
             import traceback
             traceback.print_exc()
Beispiel #9
0
def get_list():
    session = boto3.session.Session(profile_name=AWS_PROFILE)
    s3 = session.resource('s3')

    my_bucket = s3.Bucket(BUCKET_NAME)
    for file in my_bucket.objects.all():
        print(file.key)
Beispiel #10
0
    def connection(self):
        connection = getattr(self._connections, 'connection', None)
        if connection is None:
            session = boto3.session.Session()
            print('key: {}, secret: {}'.format(self.access_key,
                                               self.secret_key))
            if self.use_instance_metadata:
                creds = session.get_credentials()
                self.access_key = creds.access_key
                self.secret_key = creds.secret_key
                self.security_token = creds.token
                print('using metadata: {}, {}, {}'.format(
                    self.access_key, self.secret_key, self.security_token))

            self._connections.connection = session.resource(
                's3',
                aws_access_key_id=self.access_key,
                aws_secret_access_key=self.secret_key,
                aws_session_token=self.security_token,
                region_name=self.region_name,
                use_ssl=self.use_ssl,
                endpoint_url=self.endpoint_url,
                config=self.config,
                verify=self.verify,
            )
        return self._connections.connection
Beispiel #11
0
def deploy_lambda_function(session, function_name):

    # load in configs
    function_cfg = json.load(open('lambda/conf/{}.json'.format(function_name)))
    default_cfg = json.load(open('lambda/conf/defaults.json'))
    default_cfg.update(function_cfg)
    cfg = default_cfg

    lambda_client = session.client('lambda')

    try:

        # function already exists, so update it
        print 'checking if function exists: {}'.format(function_name)
        lambda_client.get_function_configuration(
                FunctionName=function_name)

        print '\tupdating function configuration: {}'.format(function_name)
        lambda_client.update_function_configuration(
                FunctionName=function_name,
                Role=session.resource('iam').Role(cfg['role']).arn,
                Handler=cfg['handler'],
                Description=cfg.get('description', None),
                Timeout=cfg['timeout'],
                MemorySize=cfg['memory'],
                VpcConfig=cfg.get('vpc', {}))

        print '\tupdating function code: {}'.format(function_name)
        lambda_client.update_function_code(
                FunctionName=function_name,
                ZipFile=open('_deploy/_zip/{}.zip'.format(function_name), 'rb').read())
    except:

        # function is new, so create it
        print '\tcreating new function: {}'.format(function_name)
        lambda_client.create_function(
                FunctionName=function_name,
                Runtime=cfg['runtime'],
                Role=session.resource('iam').Role(cfg['role']).arn,
                Code={
                    'ZipFile': open('_deploy/_zip/{}.zip'.format(function_name), 'rb').read()
                    },
                Handler=cfg['handler'],
                Description=cfg.get('description', None),
                Timeout=cfg['timeout'],
                MemorySize=cfg['memory'],
                VpcConfig=cfg.get('vpc', {}))
Beispiel #12
0
 def __init__(self, queue, bucket, source, destination):
     self._queue = queue
     self._bucket = bucket
     self._source = source
     self._destination = destination
     session = boto3.session.Session()
     self.s3 = session.resource('s3')
     super(UploadWorker, self).__init__()
Beispiel #13
0
def upload_dbdc5_file(path, body):
    session = boto3.session.Session(
        aws_access_key_id=AWS_ACCESS_KEY,
        aws_secret_access_key=AWS_SECRET_ACCESS_KEY)
    s3 = session.resource('s3')
    if s3.Bucket(AWS_STORAGE_BUCKET_NAME).put_object(Key=path, Body=body):
        return True
    else:
        return False
def clean_aws_credential(region_name, credential_key_name, credential_key_file):
    try:
        session = boto3.session.Session(region_name=region_name)
        service = session.resource('ec2')
        key_pair_info = service.KeyPair(credential_key_name)
        key_pair_info.delete()
        cluster.remove_if_exists(credential_key_file)
    except Exception as details:
        logger.exception(str(details))
 def test_create_tags_injected_to_resource(self):
     session = boto3.session.Session(region_name='us-west-2')
     with mock.patch('boto3.ec2.createtags.create_tags') as mock_method:
         resource = session.resource('ec2')
         self.assertTrue(hasattr(resource, 'create_tags'),
                         'EC2 resource does not have create_tags method.')
         self.assertIs(resource.create_tags, mock_method,
                       'custom create_tags method was not injected onto '
                       'EC2 service resource')
def clean_aws_credential(region_name, credential_key_name, credential_key_file):
    try:
        session = boto3.session.Session(region_name=region_name)
        service = session.resource('ec2')
        key_pair_info = service.KeyPair(credential_key_name)
        key_pair_info.delete()
        cluster.remove_if_exists(credential_key_file)
    except Exception as details:
        logger.exception(str(details))
Beispiel #17
0
def create_instance(region, instance_name, centos_version,
                    instancetype, vpc, publicaccess, azid):
    session = boto3.session.Session()
    client = session.client('ec2', region_name=region)
    ec2 = session.resource('ec2', region_name=region)
    ec2operat = EC2_Operate(client, ec2, region)
    instancetag = ec2operat.ec2_create(instance_name, centos_version, instancetype, vpc, publicaccess, azid)
#import to db
    get_instance_info_bytags(region, [instancetag])
    return instancetag
Beispiel #18
0
 def test_create_tags_injected_to_resource(self):
     session = boto3.session.Session(region_name='us-west-2')
     with mock.patch('boto3.ec2.createtags.create_tags') as mock_method:
         resource = session.resource('ec2')
         self.assertTrue(hasattr(resource, 'create_tags'),
                         'EC2 resource does not have create_tags method.')
         self.assertIs(
             resource.create_tags, mock_method,
             'custom create_tags method was not injected onto '
             'EC2 service resource')
Beispiel #19
0
def make_s3_resource(provider: dict, session: boto3.session.Session, config: Config = None) -> object:
    """
    Construct boto3 resource with specified config and remote endpoint
    :param provider provider configuration from connector configuration.
    :param session User session to create client from.
    :param config Client config parameter in case of using creds from .aws/config file.
    :return Boto3 S3 resource instance.
    """
    client_kv_args = _get_s3_client_args(provider, config)
    return session.resource("s3", **client_kv_args)
Beispiel #20
0
    def remove_volume(self, volume, thread_safe=True):
        self.log.debug('Removing Volume {}'.format(volume.volume_id))
        if thread_safe:
            session = boto3.session.Session(
                aws_access_key_id=self.args.access_key_id,
                aws_secret_access_key=self.args.secret_access_key,
                region_name=self.region)
            ec2 = session.resource('ec2')
            volume = ec2.Volume(volume.volume_id)

        volume.delete()
Beispiel #21
0
    def list_Iam_Users_With_Access_Key(self):
	session = boto3.session.Session()
	iam = session.resource('iam')
	for user in iam.users.all():   
           for key in user.access_keys.all():
		AccessId = key.access_key_id
		Status = key.status
		if (Status == "Active"):
		    print ("User: "******"Key: ",  AccessId,  "Active")
                else:
		    print ("User: "******"Key: ",  AccessId,  "InActive")
Beispiel #22
0
    def init_resources(self, n_db_nodes=None, n_loader_nodes=None, dbs_block_device_mappings=None, loaders_block_device_mappings=None, loaders_type=None, dbs_type=None):
        if n_db_nodes is None:
            n_db_nodes = self.params.get('n_db_nodes')
        if n_loader_nodes is None:
            n_loader_nodes = self.params.get('n_loaders')
        if loaders_type is None:
            loaders_type = self.params.get('instance_type_loader')
        if dbs_type is None:
            dbs_type = self.params.get('instance_type_db')
        user_prefix = self.params.get('user_prefix', None)
        session = boto3.session.Session(region_name=self.params.get('region_name'))
        service = session.resource('ec2')
        self.credentials = RemoteCredentials(service=service,
                                             key_prefix='longevity-test',
                                             user_prefix=user_prefix)

        if self.params.get('db_type') == 'scylla':
            self.db_cluster = ScyllaCluster(ec2_ami_id=self.params.get('ami_id_db_scylla'),
                                            ec2_ami_username=self.params.get('ami_db_scylla_user'),
                                            ec2_security_group_ids=[self.params.get('security_group_ids')],
                                            ec2_subnet_id=self.params.get('subnet_id'),
                                            ec2_instance_type=dbs_type,
                                            service=service,
                                            credentials=self.credentials,
                                            ec2_block_device_mappings=dbs_block_device_mappings,
                                            user_prefix=user_prefix,
                                            n_nodes=n_db_nodes)
        elif self.params.get('db_type') == 'cassandra':
            self.db_cluster = CassandraCluster(ec2_ami_id=self.params.get('ami_id_db_cassandra'),
                                               ec2_ami_username=self.params.get('ami_db_cassandra_user'),
                                               ec2_security_group_ids=[self.params.get('security_group_ids')],
                                               ec2_subnet_id=self.params.get('subnet_id'),
                                               ec2_instance_type=dbs_type,
                                               service=service,
                                               ec2_block_device_mappings=dbs_block_device_mappings,
                                               credentials=self.credentials,
                                               user_prefix=user_prefix,
                                               n_nodes=n_db_nodes)
        else:
            self.error('Incorrect parameter db_type: %s' %
                       self.params.get('db_type'))

        scylla_repo = get_data_path('scylla.repo')
        self.loaders = LoaderSet(ec2_ami_id=self.params.get('ami_id_loader'),
                                 ec2_ami_username=self.params.get('ami_loader_user'),
                                 ec2_security_group_ids=[self.params.get('security_group_ids')],
                                 ec2_subnet_id=self.params.get('subnet_id'),
                                 ec2_instance_type=loaders_type,
                                 service=service,
                                 ec2_block_device_mappings=loaders_block_device_mappings,
                                 credentials=self.credentials,
                                 scylla_repo=scylla_repo,
                                 user_prefix=user_prefix,
                                 n_nodes=n_loader_nodes)
Beispiel #23
0
    def external_connection(self):
        connection = getattr(self._connections, 'external_connection', None)

        if self.is_refreshable_session:
            try:
                session = self.refreshable_session

                self._connections.external_connection = session.resource(
                    's3',
                    region_name=self.region_name,
                    use_ssl=self.use_ssl,
                    endpoint_url=self.endpoint_external_url,
                    config=self.config,
                    verify=self.verify,
                )
            except KeyError:
                # Handle threadsafe
                session = self.refreshable_session_standalone

                self._connections.external_connection = session.resource(
                    's3',
                    region_name=self.region_name,
                    use_ssl=self.use_ssl,
                    endpoint_url=self.endpoint_external_url,
                    config=self.config,
                    verify=self.verify,
                )
        elif connection is None:
            session = boto3.session.Session(
                aws_access_key_id=self.access_key,
                aws_secret_access_key=self.secret_key,
                aws_session_token=self.security_token)
            self._connections.external_connection = session.resource(
                's3',
                region_name=self.region_name,
                use_ssl=self.use_ssl,
                endpoint_url=self.endpoint_external_url,
                config=self.config,
                verify=self.verify,
            )
        return self._connections.external_connection
def dbSetup():
    # Get the service resource.
    DYNAMODB_ACCESS_KEY = environ.get('DYNAMODB_ACCESS_KEY')
    DYNAMODB_SECRET_KEY = environ.get('DYNAMODB_SECRET_KEY')
    SENDGRID_API_KEY = environ.get('SENDGRID_API_KEY')
    os.environ['AWS_DEFAULT_REGION'] = "us-east-2"
    #global dynamodb
    # if dynamodb == None:
    session = boto3.session.Session()
    dynamodb = session.resource('dynamodb', aws_access_key_id=DYNAMODB_ACCESS_KEY,
                                aws_secret_access_key=DYNAMODB_SECRET_KEY)
    return dynamodb
Beispiel #25
0
 def __init__(self, backup, **settings):
     import boto3.session
     self.backup = backup
     self.settings = settings
     params = {
         'region_name': settings.get('region'),
         'profile_name': settings.get('profile'),
         'aws_access_key_id': settings.get('access_key_id'),
         'aws_secret_access_key': settings.get('secret_access_key'),
     }
     session = boto3.session.Session(**params)
     self.service = session.resource(settings['service'])
Beispiel #26
0
 def __init__(self, backup, **settings):
     import boto3.session
     self.backup = backup
     self.settings = settings
     params = {
         'region_name': settings.get('region'),
         'profile_name': settings.get('profile'),
         'aws_access_key_id': settings.get('access_key_id'),
         'aws_secret_access_key': settings.get('secret_access_key'),
     }
     session = boto3.session.Session(**params)
     self.service = session.resource(settings['service'])
Beispiel #27
0
def s3_download(
    source: str,
    destination: Optional[str] = None,
    exists_strategy: ExistsStrategy = ExistsStrategy.RAISE,
    profile_name: Optional[str] = None,
) -> Optional[str]:
    """
    Copy a file from an S3 source to a local destination.

    Parameters
    ----------
    source : str
        Path starting with s3://, e.g. 's3://bucket-name/key/foo.bar'
    destination : str, optional
        If none is given, a temporary file is created
    exists_strategy : {'raise', 'replace', 'abort'}
        What is done when the destination already exists?
        * `ExistsStrategy.RAISE` means a RuntimeError is raised,
        * `ExistsStrategy.REPLACE` means the local file is replaced,
        * `ExistsStrategy.ABORT` means the download is not done.
    profile_name : str, optional
        AWS profile

    Returns
    -------
    download_path : Optional[str]
        Path of the downloaded file, if any was downloaded.

    Raises
    ------
    botocore.exceptions.NoCredentialsError
        Botocore is not able to find your credentials. Either specify
        profile_name or add the environment variables AWS_ACCESS_KEY_ID,
        AWS_SECRET_ACCESS_KEY and AWS_SESSION_TOKEN.
        See https://boto3.readthedocs.io/en/latest/guide/configuration.html
    """
    if not isinstance(exists_strategy, ExistsStrategy):
        raise ValueError(
            f"exists_strategy '{exists_strategy}' is not in {ExistsStrategy}")
    session = boto3.session.Session(profile_name=profile_name)
    s3 = session.resource("s3")
    bucket_name, key = _s3_path_split(source)
    if destination is None:
        _, filename = os.path.split(source)
        prefix, suffix = os.path.splitext(filename)
        _, destination = mkstemp(prefix=prefix, suffix=suffix)
    elif os.path.isfile(destination):
        if exists_strategy is ExistsStrategy.RAISE:
            raise RuntimeError(f"File '{destination}' already exists.")
        elif exists_strategy is ExistsStrategy.ABORT:
            return None
    s3.Bucket(bucket_name).download_file(key, destination)
    return destination
Beispiel #28
0
def simple_upload():
    session = boto3.session.Session(profile_name=AWS_PROFILE)
    s3 = session.resource('s3')

    file_name = create_temp_file(100 * 1000 * 1024, 'file.txt', 't')

    s3_object = s3.Object(BUCKET_NAME, file_name)
    try:
        s3_object.upload_file(file_name)
        logger.info('file uploaded')
    except Exception as e:
        logger.error(e)
Beispiel #29
0
 def available_volumes(self):
     self.log.debug('Finding unused Volumes in Account {} Region {}'.format(
         self.account, self.region))
     session = aws_session(self.account, self.role)
     ec2 = session.resource('ec2', region_name=self.region)
     volumes = ec2.volumes.filter(Filters=[{
         'Name': 'status',
         'Values': ['available']
     }])
     self.log.debug(
         'Found {} unused Volumes in Account {} Region {}'.format(
             len(list(volumes)), self.account, self.region))
     return volumes
Beispiel #30
0
 def connection(self):
     connection = getattr(self._connections, 'connection', None)
     if connection is None:
         session = self._create_session()
         self._connections.connection = session.resource(
             's3',
             region_name=self.region_name,
             use_ssl=self.use_ssl,
             endpoint_url=self.endpoint_url,
             config=self.config,
             verify=self.verify,
         )
     return self._connections.connection
Beispiel #31
0
def copy_objects_by_thread(thread_id, copied_objects):
    # HOWTO: each thread should have its own session
    # http://boto3.readthedocs.io/en/latest/guide/resources.html#multithreading
    session = boto3.session.Session()

    if args.optimal:
        # HOWTO: low-level control
        # http://boto3.readthedocs.io/en/latest/_modules/boto3/s3/transfer.html
        client_config = botocore.config.Config(
            max_pool_connections=args.max_concurrency)
        transfer_config = boto3.s3.transfer.TransferConfig(
            multipart_threshold=8 * 1024 * 1024,
            multipart_chunksize=8 * 1024 * 1024,
            max_concurrency=args.max_concurrency,
            num_download_attempts=5,
            max_io_queue=100,
            io_chunksize=256 * 1024)
        client = session.client('s3', config=client_config)
    else:
        s3 = session.resource('s3')
        client = session.client('s3')

    count = 0
    while True:
        prefix = tasks.get()
        # HOWTO: list objects
        response = s3_client.list_objects_v2(
            Bucket=src_bucket.name,
            Prefix=prefix)  # Important: using prefix to limit listing
        for content in response['Contents']:
            key = content['Key']
            trace('thread %d copy object: s3://%s/%s' % \
                (thread_id, src_bucket.name, key))
            if not args.dryrun:
                if args.optimal:
                    client.copy(CopySource={
                        'Bucket': src_bucket.name,
                        'Key': key
                    },
                                Bucket=dst_bucket.name,
                                Key=key,
                                Config=transfer_config)
                else:
                    obj = s3.Object(dst_bucket.name, key)
                    obj.copy_from(CopySource={
                        'Bucket': src_bucket.name,
                        'Key': key
                    }, )
                count += 1
        copied_objects[thread_id] = count
        tasks.task_done()
Beispiel #32
0
def write_to_storagegrid(content):
    try:
        filename = 'file8.txt'
        bucketname = 'test-bucket-store1'

        session = boto3.session.Session(profile_name='default')
        endpoint = 'https://113.29.246.178:8082/'
        s3 = session.resource(service_name='s3',
                              endpoint_url=endpoint,
                              verify=False)

        file = s3.Object(bucketname, filename).put(Body=content['stream'])

    except Exception as error:
        print(error)
Beispiel #33
0
 def available_volumes(self):
     self.log.debug('Finding unused Volumes in Region {}'.format(
         self.region))
     session = boto3.session.Session(
         aws_access_key_id=self.args.access_key_id,
         aws_secret_access_key=self.args.secret_access_key,
         region_name=self.region)
     ec2 = session.resource('ec2')
     volumes = ec2.volumes.filter(Filters=[{
         'Name': 'status',
         'Values': ['available']
     }])
     self.log.debug('Found {} unused Volumes in Region {}'.format(
         len(list(volumes)), self.region))
     return volumes
Beispiel #34
0
 def connection(self):
     connection = getattr(self._connections, 'connection', None)
     if connection is None:
         session = boto3.session.Session()
         self._connections.connection = session.resource(
             's3',
             aws_access_key_id=self.access_key,
             aws_secret_access_key=self.secret_key,
             aws_session_token=self.security_token,
             region_name=self.region_name,
             use_ssl=self.use_ssl,
             endpoint_url=self.endpoint_url,
             config=self.config,
             verify=self.verify,
         )
     return self._connections.connection
 def connection(self):
     connection = getattr(self._connections, 'connection', None)
     if connection is None:
         session = boto3.session.Session()
         self._connections.connection = session.resource(
             's3',
             aws_access_key_id=self.access_key,
             aws_secret_access_key=self.secret_key,
             aws_session_token=self.security_token,
             region_name=self.region_name,
             use_ssl=self.use_ssl,
             endpoint_url=self.endpoint_url,
             config=self.config,
             verify=self.verify,
         )
     return self._connections.connection
Beispiel #36
0
 def connection(self):
     # TODO: Support host, port like in s3boto
     # Note that proxies are handled by environment variables that the underlying
     # urllib/requests libraries read. See https://github.com/boto/boto3/issues/338
     # and http://docs.python-requests.org/en/latest/user/advanced/#proxies
     if self._connection is None:
         session = boto3.session.Session()
         self._connection = session.resource(
             self.connection_service_name,
             aws_access_key_id=self.access_key,
             aws_secret_access_key=self.secret_key,
             region_name=self.region_name,
             use_ssl=self.use_ssl,
             endpoint_url=self.endpoint_url,
             config=self.config)
     return self._connection
Beispiel #37
0
def s3_delete(video_id):
    params = {
        'aws_access_key_id': celery.conf.get('S3_ACCESS_KEY'),
        'aws_secret_access_key': celery.conf.get('S3_SECRET_KEY'),
    }

    if celery.conf.get('S3_ENDPOINT_URL'):
        params['endpoint_url'] = celery.conf.get('S3_ENDPOINT_URL')

    if celery.conf.get('S3_REGION'):
        params['region_name'] = celery.conf.get('S3_REGION')

    session = boto3.session.Session()
    s3 = session.resource('s3', **params)
    bucket = s3.Bucket(celery.conf.get('S3_BUCKET'))
    bucket.objects.filter(Prefix=f"{video_id}/").delete()
def test_all_collections():
    # This generator yields test functions for every collection
    # on every available resource, except those which have
    # been blacklisted.
    session = boto3.session.Session()
    for service_name in session.get_available_resources():
        resource = session.resource(
            service_name,
            region_name=REGION_MAP.get(service_name, 'us-west-2'))

        for key in dir(resource):
            if key in BLACKLIST.get(service_name, []):
                continue

            value = getattr(resource, key)
            if isinstance(value, CollectionManager):
                yield _test_collection, service_name, key, value
Beispiel #39
0
 def connection(self):
     # TODO: Support host, port like in s3boto
     # Note that proxies are handled by environment variables that the underlying
     # urllib/requests libraries read. See https://github.com/boto/boto3/issues/338
     # and http://docs.python-requests.org/en/latest/user/advanced/#proxies
     if self._connection is None:
         session = boto3.session.Session()
         self._connection = session.resource(
             self.connection_service_name,
             aws_access_key_id=self.access_key,
             aws_secret_access_key=self.secret_key,
             region_name=self.region_name,
             use_ssl=self.use_ssl,
             endpoint_url=self.endpoint_url,
             config=self.config
         )
     return self._connection
    def init_resources(self):
        session = boto3.session.Session(region_name=self.params.get('region_name'))
        service = session.resource('ec2')
        self.credentials = RemoteCredentials(service=service,
                                             key_prefix='longevity-test')

        self.db_cluster = ScyllaCluster(ec2_ami_id=self.params.get('ami_id_db'),
                                        ec2_security_group_ids=[self.params.get('security_group_ids')],
                                        ec2_subnet_id=self.params.get('subnet_id'),
                                        ec2_instance_type=self.params.get('instance_type_db'),
                                        service=service,
                                        credentials=self.credentials,
                                        n_nodes=self.params.get('n_db_nodes'))

        scylla_repo = self.get_data_path('scylla.repo')
        self.loaders = LoaderSet(ec2_ami_id=self.params.get('ami_id_loader'),
                                 ec2_security_group_ids=[self.params.get('security_group_ids')],
                                 ec2_subnet_id=self.params.get('subnet_id'),
                                 ec2_instance_type=self.params.get('instance_type_loader'),
                                 service=service,
                                 credentials=self.credentials,
                                 scylla_repo=scylla_repo,
                                 n_nodes=self.params.get('n_loaders'))
import boto3
import boto3.session

session = boto3.session.Session(profile_name='my_profile')

'''
Do not use this in production - disabling SSL verification is discouraged!
When using a self-signed certificate, make sure to pass it into the constructor:

endpoint = 'https://sg-gw1.mycompany.com:8082'
s3 = session.resource(service_name='s3', endpoint_url=endpoint, verify='server_cert.pem')
'''

endpoint = 'https://10.65.57.176:8082'
s3 = session.resource(service_name='s3', endpoint_url=endpoint, verify=False)
client = s3.meta.client

'''
Bucket related operations
'''

# Create new bucket for S3 account
s3.Bucket('my-bucket').create()

# List all buckets for S3 account
for bucket in s3.buckets.all():
    print(bucket.name)

# Delete bucket
s3.Bucket('my-bucket').delete()
Beispiel #42
0
 def test_bucket_resource_has_load_method(self):
     session = boto3.session.Session(region_name='us-west-2')
     bucket = session.resource('s3').Bucket('fakebucket')
     self.assertTrue(hasattr(bucket, 'load'),
                     'load() was not injected onto S3 Bucket resource.')
Beispiel #43
0
def main():
    args = get_parser().parse_args()

    prefix = args.output_prefix
    if not prefix:
        prefix = "{source_ami_name}-{dt:%Y%m%d%H%M}".format(source_ami_name=args.ami_name,
                                                            dt=datetime.datetime.utcnow())

    vmdk = prefix + ".vmdk"
    box = prefix + ".box"
    guestbox = prefix + "-guest.box"

    # Allocate run identifier to uniquely name temporary resources.
    run_name = "ectou-export-{run_id}".format(run_id=uuid.uuid4())

    # Create boto session.
    session = boto3.session.Session()
    ec2 = session.resource("ec2", args.region)

    # Resolve source and builder images.
    source_image = get_image(ec2, args.ami_owner, args.ami_name)
    builder_image = get_image(ec2, args.builder_ami_owner, args.builder_ami_name)

    # Resolve VPC if provided, otherwise assume account has default VPC.
    vpc = None
    if args.vpc_id:
        vpc = get_first(ec2.vpcs.filter(VpcIds=[args.vpc_id]))
    elif args.vpc_name:
        vpc = get_first(ec2.vpcs.filter(Filters=[{"Name": "tag:Name", "Values": [args.vpc_name]}]))

    subnet = None
    if vpc:
        if args.subnet_id:
            subnet = get_first(vpc.subnets.filter(SubnetIds=[args.subnet_id]))
        else:
            subnet = get_first(vpc.subnets.all())

    # Set options for explicit VPC, default VPC.
    vpc_id = vpc.id if vpc else ""
    subnet_id = subnet.id if subnet else ""

    with resource_cleanup(args.debug) as cleanup:

        # Create temporary key pair
        key_pair = ec2.create_key_pair(KeyName=run_name)
        defer_delete(cleanup, key_pair)

        # Create temporary security group
        sg = ec2.create_security_group(GroupName=run_name,
                                       Description="Temporary security group for ectou-export",
                                       VpcId=vpc_id)
        defer_delete(cleanup, sg)

        # Enable ssh access
        sg.authorize_ingress(IpPermissions=[dict(
                IpProtocol="tcp",
                FromPort=22,
                ToPort=22,
                IpRanges=[dict(CidrIp="0.0.0.0/0")],
        )])

        # Launch builder EC2 instance
        instance = get_first(ec2.create_instances(ImageId=builder_image.id,
                                                  MinCount=1,
                                                  MaxCount=1,
                                                  KeyName=key_pair.name,
                                                  InstanceType=args.instance_type,
                                                  NetworkInterfaces=[dict(
                                                          DeviceIndex=0,
                                                          SubnetId=subnet_id,
                                                          Groups=[sg.id],
                                                          AssociatePublicIpAddress=True,
                                                  )]))
        defer_terminate(cleanup, instance)

        instance.create_tags(Tags=[{"Key": "Name", "Value": run_name}])
        instance.wait_until_running()

        # Attach source image as device
        attach_ebs_image(ec2, instance, source_image, args.device_name)

        # Save key pair for ssh
        with open(PRIVATE_KEY_FILE, "w") as f:
            os.chmod(PRIVATE_KEY_FILE, 0o600)
            f.write(key_pair.key_material)

        print "To access instance for debugging:"
        print "  ssh -i {} {}@{}".format(PRIVATE_KEY_FILE, args.builder_username, instance.public_ip_address)

        ssh_client = connect_ssh(args.builder_username, instance.public_ip_address, PRIVATE_KEY_FILE)

        # Export device to vmdk
        provision_file_put(ssh_client, EXPORT_SCRIPT, "export.sh")
        provision_shell(ssh_client, ["sudo", "bash", "export.sh", args.device_name, "export.vmdk", args.yum_proxy],
                        get_pty=True)
        provision_file_get(ssh_client, "export.vmdk", vmdk)

    # Package vmdk into vagrant box
    local_cmd(["bash", PACKAGE_SCRIPT, vmdk, box])

    # Install guest additions, apply security updates.
    local_cmd(["bash", GUEST_SCRIPT, box, guestbox])
Beispiel #44
0
def drill_grid(df_grid, x_cols, xi, yi, stamp, grid_submit_path, do_blending=True):
  # params init
  best_score = 0
  all_score = []
  Xs, ys = {}, {}
  for m in ['tr', 'te']:
    Xs[m], ys[m], row_id = conv.df2sample(df_grid[m], x_cols)
  scnt = len(ys['tr'])
  
  mdl_path = '/'.join(grid_submit_path.split('/')[:-1])
  dat_file = 'dat_%i_%i.pkl' % (xi, yi)
  sol_file = 'dat_%i_%i.sol.%s' % (xi, yi, stamp)

  # if scnt > 1500:
  #   mdl_configs = [
  #     {'alg': 'skrf', 'n_estimators': 500, 'max_features': 0.35, 'max_depth': 15},
  #     {'alg': 'skrfp', 'n_estimators': 500, 'max_features': 0.35, 'max_depth': 15},
  #     {'alg': 'sket', 'n_estimators': 500, 'max_features': 0.4, 'max_depth': 15},
  #     {'alg': 'sketp', 'n_estimators': 500, 'max_features': 0.4, 'max_depth': 11},
  #   ]
  # else:
  mdl_configs = raw_mdl_configs

  # prepare data
  cmds = {
    'mdl_configs': mdl_configs,
    'Xs': {k:v.values for k,v in Xs.items()},
    'ys': ys,
    'row_id': row_id.values,
    'sol_file': sol_file,
  }
  dat_path = "%s/%s" % (mdl_path, dat_file)
  try:
    pickle.dump(cmds, open(dat_path, 'wb'), protocol=2) # lambda only has python2, which support latest pickle protocol=2 
  except Exception as e:
    print(e)
    print("ERROR for (%i/%i), cmds: %s" % (xi, yi, cmds))

  # upload to s3 for lambda
  job_done = False

  # bucket = boto3.resource('s3', region_name='us-west-2').Bucket('recom.lambda.m1536')
  session = boto3.session.Session()
  bucket = session.resource('s3', region_name='us-west-2').Bucket('recom.lambda.m1536')

  #-----[use aws lambda]-------------------------------------------
  if True: #scnt < 2500:
    # if scnt > 1000:
    #   bucket = bucket1536
    # elif scnt > 500:
    #   bucket = bucket1024
    # elif scnt > 300:  
    #   bucket = bucket512
    # else:
    #   bucket = bucket256
    try:
      bucket.upload_file(dat_path, dat_file)
    except Exception as e:
      print(e)
      print("when bucket.upload_file", dat_path)
    print("upload dat_file %s of %i tr samples @ %s" % (dat_file, len(ys['tr']), datetime.now()))
    df_grid, Xs, ys, row_id, cmds = [None]*5  # release memory
    
    
    # print("try download %s to %s" % (sol_file, grid_submit_path))
    try_cnt, max_try = 0, 6
    while try_cnt <= max_try:
      try:
        bucket.download_file(sol_file, grid_submit_path)
        job_done = True
        break
      except Exception as e:
        if try_cnt > 4: print("(%i/%i) scnt=%i, waiting %i ... @ %s" % (xi, yi, scnt, try_cnt, datetime.now()))
        try_cnt += 1
        sleep(30)

    # remove tmp files
    bucket.delete_objects(Delete={'Objects': [{'Key': sol_file}], 'Quiet': True,})

    # collect sols
    if job_done:
      try:
        sols = json.load(open(grid_submit_path, 'rt'))
      except Exception as e:
        print(e)
        print("when json try load %s" % grid_submit_path)
      # print(sols[:5])
      sols = pd.DataFrame(sols)
      sols['row_id'] = row_id
      df2submit(sols, grid_submit_path)
      # print("get sols:\n %s \n@ %s" % (sols.head(), datetime.now()))
      print("[drill_grid (%i,%i)] blended @ %s" % (xi, yi, datetime.now()))
    else:
      sols = None
      print("[TIMEOUT] job timeout: (%i/%i)" % (xi, yi))

  #-----[use local machine]-------------------------------------------
  else:
    all_bt_preds = []
    for bcfg in mdl_configs:
      bmdl = get_alg(bcfg['alg'], bcfg)
      bmdl.fit(Xs['tr'], ys['tr'])
      _, bt_preds = drill_eva(bmdl, Xs['te'], ys['te'])
      all_bt_preds.append(bt_preds)
    sols = blending(all_bt_preds)
    sols = pd.DataFrame(sols)
    sols['row_id'] = row_id
    df2submit(sols, grid_submit_path)
    print("[LOCAL] done (%i/%i) locally @ %s" % (xi, yi, datetime.now()))

  return 1.0, sols
Beispiel #45
0
    def get_cluster_aws(self, loader_info, db_info, monitor_info):
        if loader_info['n_nodes'] is None:
            loader_info['n_nodes'] = self.params.get('n_loaders')
        if loader_info['type'] is None:
            loader_info['type'] = self.params.get('instance_type_loader')
        if db_info['n_nodes'] is None:
            db_info['n_nodes'] = self.params.get('n_db_nodes')
        if db_info['type'] is None:
            db_info['type'] = self.params.get('instance_type_db')
        if monitor_info['n_nodes'] is None:
            monitor_info['n_nodes'] = self.params.get('n_monitor_nodes')
        if monitor_info['type'] is None:
            monitor_info['type'] = self.params.get('instance_type_monitor')
        user_prefix = self.params.get('user_prefix', None)
        session = boto3.session.Session(region_name=self.params.get('region_name'))
        service = session.resource('ec2')
        self.credentials = RemoteCredentials(service=service,
                                             key_prefix='longevity-test',
                                             user_prefix=user_prefix)

        if self.params.get('db_type') == 'scylla':
            self.db_cluster = ScyllaAWSCluster(ec2_ami_id=self.params.get('ami_id_db_scylla'),
                                               ec2_ami_username=self.params.get('ami_db_scylla_user'),
                                               ec2_security_group_ids=[self.params.get('security_group_ids')],
                                               ec2_subnet_id=self.params.get('subnet_id'),
                                               ec2_instance_type=db_info['type'],
                                               service=service,
                                               credentials=self.credentials,
                                               ec2_block_device_mappings=db_info['device_mappings'],
                                               user_prefix=user_prefix,
                                               n_nodes=db_info['n_nodes'],
                                               params=self.params)
        elif self.params.get('db_type') == 'cassandra':
            self.db_cluster = CassandraAWSCluster(ec2_ami_id=self.params.get('ami_id_db_cassandra'),
                                                  ec2_ami_username=self.params.get('ami_db_cassandra_user'),
                                                  ec2_security_group_ids=[self.params.get('security_group_ids')],
                                                  ec2_subnet_id=self.params.get('subnet_id'),
                                                  ec2_instance_type=db_info['type'],
                                                  service=service,
                                                  ec2_block_device_mappings=db_info['device_mappings'],
                                                  credentials=self.credentials,
                                                  user_prefix=user_prefix,
                                                  n_nodes=db_info['n_nodes'],
                                                  params=self.params)
        else:
            self.error('Incorrect parameter db_type: %s' %
                       self.params.get('db_type'))

        scylla_repo = get_data_path('scylla.repo')
        self.loaders = LoaderSetAWS(ec2_ami_id=self.params.get('ami_id_loader'),
                                    ec2_ami_username=self.params.get('ami_loader_user'),
                                    ec2_security_group_ids=[self.params.get('security_group_ids')],
                                    ec2_subnet_id=self.params.get('subnet_id'),
                                    ec2_instance_type=loader_info['type'],
                                    service=service,
                                    ec2_block_device_mappings=loader_info['device_mappings'],
                                    credentials=self.credentials,
                                    scylla_repo=scylla_repo,
                                    user_prefix=user_prefix,
                                    n_nodes=loader_info['n_nodes'],
                                    params=self.params)

        self.monitors = MonitorSetAWS(ec2_ami_id=self.params.get('ami_id_monitor'),
                                      ec2_ami_username=self.params.get('ami_monitor_user'),
                                      ec2_security_group_ids=[self.params.get('security_group_ids')],
                                      ec2_subnet_id=self.params.get('subnet_id'),
                                      ec2_instance_type=monitor_info['type'],
                                      service=service,
                                      ec2_block_device_mappings=monitor_info['device_mappings'],
                                      credentials=self.credentials,
                                      scylla_repo=scylla_repo,
                                      user_prefix=user_prefix,
                                      n_nodes=monitor_info['n_nodes'],
                                      params=self.params)
    import configparser
except ImportError:
    import ConfigParser as configparser

requests.packages.urllib3.disable_warnings(InsecureRequestWarning)

parser = configparser.ConfigParser()
parser.read('/indexer/indexer.conf')
endpoint = parser.get('config', 'endpoint')
access_key = parser.get('config', 'access_key')
secret_key = parser.get('config', 'secret_key')

audit_log = '/mnt/auditlogs/audit.log'

session = boto3.session.Session(aws_access_key_id=access_key, aws_secret_access_key=secret_key)
s3 = session.resource(service_name='s3', endpoint_url=endpoint, region_name='us-west-2', verify=False)
s3.meta.client.meta.events.unregister('before-sign.s3', fix_s3_host)

es = Elasticsearch('elasticsearch:9200')

print "Indexer started..."

buckets = []
for bucket in s3.buckets.all():
    buckets.append(bucket.name)

print "Will monitor the following buckets: " +  ', '.join(buckets)

pattern = re.compile(r'.*?S3BK\(CSTR\):"(.+?)".*?S3KY\(CSTR\):"(.+?)".*?ATYP\(FC32\):(.+?)].*')

for line in tailer.follow(open(audit_log)):
Beispiel #47
0
    
######Creating Initial WorkBooks####
    book = xlwt.Workbook(encoding="utf-8")
    style = xlwt.easyxf('font: bold 1')
    for name in sheet_name:
        book.add_sheet(name)
    for i,val in enumerate(sheet_name):
            if val == 'EC2':
                sheet = book.get_sheet(i)
                for j, name in enumerate(sheet_ec2):
                    
                    row_counter = 0
                    sheet.col(j).width = 256 * len(name)
                    sheet.write(row_counter, j, name, style)
                session = boto3.session.Session(profile_name=aws_profile)
                ec2 = session.resource('ec2')
                for instance in ec2.instances.all():
                    
                    for i in instance.tags:
                        data_list = []
                        if i['Key'] == 'Name':
                            INS_NAME = i['Value']
                            data_list.append(INS_NAME)
                    INS_ID = instance.id
                    data_list.append(INS_ID)
                    INS_STATE = instance.state['Name']
                    data_list.append(INS_STATE)
                    INS_PRI_IP = instance.private_ip_address
                    data_list.append(INS_PRI_IP)
                    INS_EIP = instance.public_ip_address
                    data_list.append(INS_EIP)
Beispiel #48
0
 def run(self,bucketname):
     session = boto3.session.Session()
     s3 = session.resource('s3')
     bucket1 = s3.Bucket('%s' % bucketname)
     print(bucket1.creation_date)