Exemple #1
0
def remove_deploy_output(bundle_name, deploy_name):
    from syndicate.core import CONFIG, CONN
    key = _build_output_key(bundle_name=bundle_name,
                            deploy_name=deploy_name,
                            is_regular_output=True)
    if CONN.s3().is_file_exists(CONFIG.deploy_target_bucket,
                                key):
        CONN.s3().remove_object(CONFIG.deploy_target_bucket, key)
    else:
        _LOG.warn(
            'Output file for deploy {0} does not exist.'.format(deploy_name))
Exemple #2
0
def load_deploy_output(bundle_name, deploy_name):
    from syndicate.core import CONFIG, CONN
    key = _build_output_key(bundle_name=bundle_name,
                            deploy_name=deploy_name,
                            is_regular_output=True)
    if CONN.s3().is_file_exists(
            CONFIG.deploy_target_bucket, key):
        output_file = CONN.s3().load_file_body(
            CONFIG.deploy_target_bucket, key)
        return json.loads(output_file)
    else:
        raise AssertionError('Deploy name {0} does not exist.'
                             ' Cannot find output file.'.format(deploy_name))
Exemple #3
0
def create_bundles_bucket():
    from syndicate.core import CONFIG, CONN
    if CONN.s3().is_bucket_exists(CONFIG.deploy_target_bucket):
        _LOG.info('Bundles bucket {0} already exists'.format(
            CONFIG.deploy_target_bucket))
    else:
        _LOG.info(
            'Bundles bucket {0} does not exist. Creating bucket..'.format(
                CONFIG.deploy_target_bucket))
        CONN.s3().create_bucket(
            bucket_name=CONFIG.deploy_target_bucket,
            location=CONFIG.region)
        _LOG.info('{0} bucket created successfully'.format(
            CONFIG.deploy_target_bucket))
Exemple #4
0
def if_bundle_exist(bundle_name):
    from syndicate.core import CONFIG, CONN
    _assert_bundle_bucket_exists()
    bundle_folder = bundle_name + DEFAULT_SEP
    return CONN.s3().get_keys_by_prefix(
        CONFIG.deploy_target_bucket,
        bundle_folder)
Exemple #5
0
def create_deploy_output(bundle_name, deploy_name, output, success,
                         replace_output=False):
    from syndicate.core import CONFIG, CONN
    output_str = json.dumps(output, default=_json_serial)
    key = _build_output_key(bundle_name=bundle_name,
                            deploy_name=deploy_name,
                            is_regular_output=success)
    if CONN.s3().is_file_exists(CONFIG.deploy_target_bucket,
                                key) and not replace_output:
        _LOG.warn(
            'Output file for deploy {0} already exists.'.format(deploy_name))
    else:
        CONN.s3().put_object(output_str, key,
                             CONFIG.deploy_target_bucket,
                             'application/json')
        _LOG.info('Output file with name {} has been {}'.format(
            key, 'replaced' if replace_output else 'created'))
Exemple #6
0
def _assert_bundle_bucket_exists():
    from syndicate.core import CONFIG, CONN
    if not CONN.s3().is_bucket_exists(
            CONFIG.deploy_target_bucket):
        raise AssertionError("Bundles bucket {0} does not exist."
                             " Please use 'create_deploy_target_bucket' to "
                             "create the bucket."
                             .format(CONFIG.deploy_target_bucket))
Exemple #7
0
 def real_wrapper(*args, **kwargs):
     deploy_name = kwargs.get('deploy_name')
     bundle_name = kwargs.get('bundle_name')
     replace_output = kwargs.get('replace_output')
     if deploy_name and bundle_name and not replace_output:
         output_file_name = '{}/outputs/{}.json'.format(
             bundle_name, deploy_name)
         exists = CONN.s3().is_file_exists(CONFIG.deploy_target_bucket,
                                           key=output_file_name)
         if exists:
             _LOG.warn(
                 'Output file already exists with name {}.'
                 ' If it should be replaced with new one, '
                 'use --replace_output flag.'.format(output_file_name))
             return
     return func(*args, **kwargs)
Exemple #8
0
    def real_wrapper(*args, **kwargs):
        from syndicate.core import CONN
        from syndicate.core import CONFIG
        deploy_name = kwargs.get('deploy_name')
        bundle_name = kwargs.get('bundle_name')
        replace_output = kwargs.get('replace_output')
        if deploy_name and bundle_name and not replace_output:
            output_file_name = '{}/outputs/{}.json'.format(bundle_name,
                                                           deploy_name)

            exists = CONN.s3().is_file_exists(
                CONFIG.deploy_target_bucket,
                key=output_file_name)
            if exists:
                _LOG.warn(f'Output file already exists with name '
                          f'{output_file_name}. If it should be replaced with '
                          f'new one, use --replace_output flag.')
                return
        return func(*args, **kwargs)
def _create_ebs_app_env_from_meta(name, meta):
    response = _EBS_CONN.describe_applications([name])
    if response:
        _LOG.warn('%s EBS app exists.', name)
        return describe_ebs(name, meta, response[0])

    env_settings = meta['env_settings']
    topic_name = meta.get('notification_topic')
    # check topic exists
    if topic_name:
        topic_arn = CONN.sns().get_topic_arn(topic_name)
        if topic_arn:
            env_settings.append({
                "OptionName": "Notification Topic ARN",
                "Namespace": "aws:elasticbeanstalk:sns:topics",
                "Value": "{0}".format(topic_arn)
            })
        else:
            raise AssertionError('Cant find notification '
                                 'topic {0} for EBS.'.format(topic_name))
    # check key pair exists
    key_pair_name = meta['ec2_key_pair']
    if _EC2_CONN.if_key_pair_exists(key_pair_name):
        env_settings.append({
            "OptionName": "KeyName",
            "ResourceName": "AWSEBAutoScalingLaunchConfiguration",
            "Namespace": "aws:cloudformation:template:resource:property",
            "Value": key_pair_name
        })
    else:
        raise AssertionError('Specified key pair '
                             'does not exist: {0}.'.format(key_pair_name))
    # check ec2 role exists
    iam_role = meta['ec2_role']
    if _IAM_CONN.check_if_role_exists(iam_role):
        env_settings.append({
            "OptionName": "IamInstanceProfile",
            "ResourceName": "AWSEBAutoScalingLaunchConfiguration",
            "Namespace": "aws:autoscaling:launchconfiguration",
            "Value": iam_role
        })
    else:
        raise AssertionError(
            'Specified iam role does not exist: {0}.'.format(iam_role))
    # check service role exists
    iam_role = meta['ebs_service_role']
    if _IAM_CONN.check_if_role_exists(iam_role):
        env_settings.append({
            "OptionName": "ServiceRole",
            "Namespace": "aws:elasticbeanstalk:environment",
            "Value": iam_role
        })
    else:
        raise AssertionError('Specified iam role '
                             'does not exist: {0}.'.format(iam_role))
    image_id = meta.get('image_id')
    if image_id:
        env_settings.append({
            "OptionName": "ImageId",
            "ResourceName": "AWSEBAutoScalingLaunchConfiguration",
            "Namespace": "aws:autoscaling:launchconfiguration",
            "Value": image_id
        })
    else:
        _LOG.warn('Image id is not specified.')
    # check that desired solution stack exists
    stack = meta['stack']
    available_stacks = _EBS_CONN.describe_available_solutions_stack_names()
    if stack not in available_stacks:
        raise AssertionError('No solution stack named {0} found.'
                             ' Available:\n{1}'.format(stack,
                                                       available_stacks))
    vpc_id = next(
        (option for option in env_settings if option['OptionName'] == 'VPCId'),
        None)
    if not vpc_id:
        vpc_id = _EC2_CONN.get_default_vpc_id()
        _LOG.info('Default vpc id %s', vpc_id)
        if vpc_id:
            _LOG.debug('Will use vpc %s', vpc_id)
            subnets = _EC2_CONN.list_subnets(filters=[{
                'Name': 'vpc-id',
                'Values': [vpc_id]
            }])
            _LOG.debug('Found subnets for %s vpc: %s', vpc_id, subnets)
            if subnets:
                _LOG.info('Will attach default %s vpc to env', vpc_id)
                _add_subnets_info(env_settings, subnets, vpc_id)
            sg_id = _EC2_CONN.get_sg_id(group_name='default', vpc_id=vpc_id)
            if sg_id:
                _LOG.debug('Found default sg with id %s', sg_id)
                env_settings.append({
                    "OptionName": "SecurityGroups",
                    "Namespace": "aws:autoscaling:launchconfiguration",
                    "Value": sg_id
                })

    env_name = meta["env_name"] + str(int(time()))

    start = time()
    end = start + 180
    while end > time():
        describe_app_result = _EBS_CONN.describe_applications([name])
        if not describe_app_result:
            break

    # create APP
    response = _EBS_CONN.create_application(name)
    _LOG.info('Created EBS app %s.', name)
    # create ENV
    _EBS_CONN.create_environment(app_name=name,
                                 env_name=env_name,
                                 option_settings=env_settings,
                                 tier=meta['tier'],
                                 solution_stack_name=stack)
    key = meta[S3_PATH_NAME]
    if not CONN.s3().is_file_exists(CONFIG.deploy_target_bucket, key):
        raise AssertionError('Deployment package does not exist in '
                             '{0} bucket'.format(CONFIG.deploy_target_bucket))

    # create VERSION
    version_label = env_name + str(uuid1())
    _EBS_CONN.create_app_version(app_name=name,
                                 version_label=version_label,
                                 s3_bucket=CONFIG.deploy_target_bucket,
                                 s3_key=key)
    _LOG.debug('Waiting for beanstalk env %s', env_name)
    # wait for env creation
    start = time()
    status = {}
    end = start + 360  # end in 6 min
    while end > time():
        status = _EBS_CONN.describe_environment_health(env_name=env_name,
                                                       attr_names=['Status'])
        if status['Status'] == 'Ready':
            _LOG.info('Launching env took %s.', time() - start)
            break
    if status['Status'] != 'Ready':
        _LOG.error('Env status: %s. Failed to create env.', status)
    # deploy new app version
    _EBS_CONN.deploy_env_version(name, env_name, version_label)
    _LOG.info('Created environment for %s.', name)
    return describe_ebs(name, meta, response)
Exemple #10
0
    http://www.apache.org/licenses/LICENSE-2.0

    Unless required by applicable law or agreed to in writing, software
    distributed under the License is distributed on an "AS IS" BASIS,
    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    See the License for the specific language governing permissions and
    limitations under the License.
"""
from syndicate.commons.log_helper import get_logger
from syndicate.core import CONN, ClientError
from syndicate.core.helper import create_pool, unpack_kwargs
from syndicate.core.resources.helper import build_description_obj, chunks

_LOG = get_logger('syndicate.core.resources.s3_resource')
_S3_CONN = CONN.s3()


def create_s3_bucket(args):
    return create_pool(_create_s3_bucket_from_meta, args)


def describe_bucket(name, meta):
    arn = 'arn:aws:s3:::{0}'.format(name)
    acl_response = _S3_CONN.get_bucket_acl(name)
    location_response = _S3_CONN.get_bucket_location(name)
    bucket_policy = _S3_CONN.get_bucket_policy(name)
    response = {
        'bucket_acl': acl_response,
        'location': location_response,
    }
Exemple #11
0
def _put_package_to_s3(path, path_to_package):
    from syndicate.core import CONN, CONFIG
    CONN.s3().upload_single_file(path_to_package, path,
                                 CONFIG.deploy_target_bucket)
Exemple #12
0
def load_meta_resources(bundle_name):
    from syndicate.core import CONFIG, CONN
    key = build_path(bundle_name, BUILD_META_FILE_NAME)
    meta_file = CONN.s3().load_file_body(
        CONFIG.deploy_target_bucket, key)
    return json.loads(meta_file)