Beispiel #1
0
def main():
    LOGGER.info('ADF Version %s', ADF_VERSION)
    LOGGER.info("ADF Log Level is %s", ADF_LOG_LEVEL)

    parameter_store = ParameterStore(DEPLOYMENT_ACCOUNT_REGION, boto3)
    deployment_map = DeploymentMap(parameter_store, ADF_PIPELINE_PREFIX)
    s3 = S3(DEPLOYMENT_ACCOUNT_REGION, S3_BUCKET_NAME)
    sts = STS()
    role = sts.assume_cross_account_role(
        'arn:aws:iam::{0}:role/{1}-readonly'.format(
            MASTER_ACCOUNT_ID,
            parameter_store.fetch_parameter('cross_account_access_role')),
        'pipeline')

    organizations = Organizations(role)
    clean(parameter_store, deployment_map)

    try:
        auto_create_repositories = parameter_store.fetch_parameter(
            'auto_create_repositories')
    except ParameterNotFoundError:
        auto_create_repositories = 'enabled'

    threads = []
    for p in deployment_map.map_contents.get('pipelines'):
        thread = PropagatingThread(target=worker_thread,
                                   args=(p, organizations,
                                         auto_create_repositories, s3,
                                         deployment_map, parameter_store))
        thread.start()
        threads.append(thread)

    for thread in threads:
        thread.join()
def main():
    parameter_store = ParameterStore(DEPLOYMENT_ACCOUNT_REGION, boto3)
    deployment_map = DeploymentMap(parameter_store,
                                   os.environ["ADF_PIPELINE_PREFIX"])
    s3 = S3(DEPLOYMENT_ACCOUNT_REGION, boto3, S3_BUCKET_NAME)
    sts = STS(boto3)
    role = sts.assume_cross_account_role(
        'arn:aws:iam::{0}:role/{1}-org-access-adf'.format(
            MASTER_ACCOUNT_ID,
            parameter_store.fetch_parameter('cross_account_access_role')),
        'pipeline')

    organizations = Organizations(role)
    clean(parameter_store, deployment_map)

    for p in deployment_map.map_contents.get('pipelines'):
        pipeline = Pipeline(p)

        for target in p['targets']:
            target_structure = TargetStructure(target)
            for step in target_structure.target:
                for path in step.get('path'):
                    try:
                        regions = step.get(
                            'regions',
                            p.get('regions', DEPLOYMENT_ACCOUNT_REGION))
                        pipeline.stage_regions.append(regions)
                        pipeline_target = Target(path, regions,
                                                 target_structure,
                                                 organizations)
                        pipeline_target.fetch_accounts_for_target()
                    except BaseException:
                        raise Exception(
                            "Failed to return accounts for {0}".format(path))

            pipeline.template_dictionary["targets"].append(
                target_structure.account_list)

        if DEPLOYMENT_ACCOUNT_REGION not in regions:
            pipeline.stage_regions.append(DEPLOYMENT_ACCOUNT_REGION)

        parameters = pipeline.generate_parameters()
        pipeline.generate()
        deployment_map.update_deployment_parameters(pipeline)
        s3_object_path = upload_if_required(s3, pipeline)

        store_regional_parameter_config(pipeline, parameter_store)
        cloudformation = CloudFormation(
            region=DEPLOYMENT_ACCOUNT_REGION,
            deployment_account_region=DEPLOYMENT_ACCOUNT_REGION,
            role=boto3,
            template_url=s3_object_path,
            parameters=parameters,
            wait=True,
            stack_name="{0}-{1}".format(os.environ["ADF_PIPELINE_PREFIX"],
                                        pipeline.name),
            s3=None,
            s3_key_path=None)

        cloudformation.create_stack()
Beispiel #3
0
def run(bucket_name, prefix, token=None):
    db = S3({})
    keys = []
    if token is not None:
        prefix += token + "/"

    num_keys = None
    while num_keys is None or len(keys) < num_keys:
        try:
            keys = list(
                map(lambda o: o.key,
                    list(db.get_entries("maccoss-tide", prefix))))
            if len(keys) > 0:
                num_keys = util.parse_file_name(keys[0])["num_files"]
        except Exception as e:
            print("Error reading", e)
            keys = []
        time.sleep(10)
    keys.sort(key=lambda k: util.parse_file_name(k)["suffix"])

    species_to_score = {}

    print("Processing...")
    objs = db.get_entries("maccoss-tide", prefix)
    for obj in objs:
        it = confidence.Iterator(obj, None)
        s = it.sum("q-value")
        specie = util.parse_file_name(obj.key)["suffix"]
        species_to_score[specie] = s
        if s > 0:
            print(keys[i])
            print("***", i + 2, specie, s)
        # else:
        #   print(i+2, util.parse_file_name(obj.key)["suffix"], s)
    return species_to_score
Beispiel #4
0
def main(order_id: str,
         start_date: Optional[str] = None,
         end_date: Optional[str] = None):
    """Main program

    Args:

        - order_id: NDFD order id for bulk download
        - start_date: date in yyyy-mm-dd format to start import. This will only download tarballs on or after this date.
        - end_date: date in yyyy-mm-dd format to end import. This will only download tarballs on or before this date
    """
    # Start session connected to S3
    s3_session = S3(bucket_name='hist-wx-map-layer')

    tarball_urls = get_download_urls_for_order(order_id, start_date, end_date)
    print('Got download urls for tarball')

    for tarball_url in tarball_urls:
        wmo_code = tarball_url['wmo_code']

        with tempfile.TemporaryDirectory() as dirpath:
            tarball_dest = download_tarball(tarball_url['tar_url'],
                                            dirpath.name)
            print('Finished downloading tarball')

            with tarfile.open(tarball_dest, 'r') as tar:
                print('Extracting files and saving to S3')
                extract_files_from_tarball(s3_session, tar, wmo_code)
Beispiel #5
0
 def __init__(self, parameter_store, stage_parameters,
              comparison_parameters):
     self.parameter_store = parameter_store
     self.stage_parameters = stage_parameters
     self.comparison_parameters = comparison_parameters
     self.s3 = S3(DEFAULT_REGION, S3_BUCKET_NAME)
     self.sts = STS()
Beispiel #6
0
 def upload(self, value, key, file_name):
     if not any(item in value for item in ['path', 'virtual-hosted']):
         raise Exception(
             'When uploading to S3 you need to specify a '
             'pathing style for the response either path or virtual-hosted, '
             'read more: https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html'
         )
     if str(value).count(':') > 2:
         [_, region, style, value] = value.split(':')
     else:
         [_, style, value] = value.split(':')
         region = DEFAULT_REGION
     bucket_name = self.parameter_store.fetch_parameter(
         '/cross_region/s3_regional_bucket/{0}'.format(region))
     client = S3(region, bucket_name)
     LOGGER.info("Uploading %s as %s to S3 Bucket %s in %s", value,
                 file_name, bucket_name, region)
     try:
         parent_key = list(
             Resolver.determine_parent_key(self.stage_parameters, key))[0]
     except IndexError:
         self.stage_parameters[key] = client.put_object(
             "adf-upload/{0}/{1}".format(value, file_name),
             "{0}".format(value), style)
         return True
     self.stage_parameters[parent_key][key] = client.put_object(
         "adf-upload/{0}/{1}".format(value, file_name), "{0}".format(value),
         style)
     return True
def handler(event, context):
    processor = MusicProcessor(outdir='/tmp')
    s3_client = S3()
    local_file, s3_key = processor.process(event.get('yt_url'))
    s3_client.upload(local_file, s3_key)
    public_url = s3_client.get_public_url(s3_key)
    return {'statusCode': 200, 'body': json.dumps({'url': public_url})}
Beispiel #8
0
 def upload(self, value, key, file_name, param=None):
     if str(value).count(':') > 1:
         [_, region, value] = value.split(':')
         bucket_name = self.parameter_store.fetch_parameter(
             '/cross_region/s3_regional_bucket/{0}'.format(region))
         regional_client = S3(region, bucket_name)
         LOGGER.info("Uploading %s as %s to S3 Bucket %s in %s", value,
                     file_name, bucket_name, region)
         if param:
             self.stage_parameters[param][key] = regional_client.put_object(
                 "adf-upload/{0}/{1}".format(value, file_name),
                 "{0}".format(value))
         else:
             self.stage_parameters[key] = regional_client.put_object(
                 "adf-upload/{0}/{1}".format(value, file_name),
                 "{0}".format(value))
         return True
     [_, value] = value.split(':')
     LOGGER.info("Uploading %s to S3", value)
     if param:
         self.stage_parameters[param][key] = self.s3.put_object(
             "adf-upload/{0}/{1}".format(value, file_name),
             "{0}".format(value))
     else:
         self.stage_parameters[key] = self.s3.put_object(
             "adf-upload/{0}/{1}".format(value, file_name),
             "{0}".format(value))
     return False
Beispiel #9
0
    def test_s3loadDirectory(self):
        """S3: Test that loading a directory works as expected"""
        log = Logger(logging.ERROR, "/opt/mm/testing/conf/logging.ini")
        log.addFileHandler(logging.DEBUG)

        s3object = S3("/opt/mm/testing/conf/aws.ini", log)
        s3object.createBucket("123mybucket321")

        # create a directory
        os.mkdir("test123")

        # create s3.data file
        file = open("test123/s3.data", "w")
        file.write("dataline1\n")
        file.write("dataline2\n")
        file.write("dataline3\n")
        file.close()

        # create s3.data file
        file = open("test123/s3_2.data", "w")
        file.write("dataline1\n")
        file.write("dataline2\n")
        file.write("dataline3\n")
        file.close()

        # upload the file to s3
        s3object.loadDirectory("test123", "123mybucket321")

        # remove local directory
        shutil.rmtree("test123")

        # get the bucket
        s3object.getBucket("123mybucket321", os.getcwd())

        # compare two files
        f1 = open("s3.data_part_0", "r")
        data1 = f1.read()
        f1.close()

        f2 = open("s3_2.data_part_0", "r")
        data2 = f2.read()
        f2.close()

        self.assertEqual(data1, data2, "File contents do not match")

        # remove local files
        os.remove("s3.data_part_0")
        os.remove("s3_2.data_part_0")

        # remove the files from s3
        s3object.removeData("123mybucket321", "s3.data")
        s3object.removeData("123mybucket321", "s3_2.data")

        # verify: test should be able to delete the bucket
        # if file exists, bucket will not get deleted causing the
        # test to fail
        s3object.deleteBucket("123mybucket321")

        # cleanup
        log.cleanup()
 def upload(self, value, key, file_name):
     if not any(item in value for item in S3.supported_path_styles()):
         raise Exception(
             'When uploading to S3 you need to specify a path style'
             'to use for the returned value to be used. '
             'Supported path styles include: {supported_list}'.format(
                 supported_list=S3.supported_path_styles(), )) from None
     if str(value).count(':') > 2:
         [_, region, style, value] = value.split(':')
     else:
         [_, style, value] = value.split(':')
         region = DEFAULT_REGION
     bucket_name = self.parameter_store.fetch_parameter(
         '/cross_region/s3_regional_bucket/{0}'.format(region))
     client = S3(region, bucket_name)
     try:
         parent_key = list(
             Resolver.determine_parent_key(self.comparison_parameters,
                                           key))[0]
     except IndexError:
         if self.stage_parameters.get(key):
             self.stage_parameters[key] = client.put_object(
                 "adf-upload/{0}/{1}".format(value, file_name),
                 "{0}".format(value),
                 style,
                 True  #pre-check
             )
         return True
     self.stage_parameters[parent_key][key] = client.put_object(
         "adf-upload/{0}/{1}".format(value, file_name),
         "{0}".format(value),
         style,
         True  #pre-check
     )
     return True
Beispiel #11
0
    def test_s3LoadUnloadData(self):
        """S3: Test that loading and unloading of data work okay"""
        log = Logger(logging.ERROR, "/opt/mm/testing/conf/logging.ini")
        log.addFileHandler(logging.DEBUG)

        s3object = S3("/opt/mm/testing/conf/aws.ini", log)
        s3object.createBucket("123mybucket321")

        # create s3.data file
        file = open("s3.data", "w")
        file.write("dataline1\n")
        file.write("dataline2\n")
        file.write("dataline3\n")
        file.close()

        # upload the file to s3
        s3object.loadData("s3.data", "123mybucket321")

        # remove the file
        s3object.removeData("123mybucket321", "s3.data")

        # verify: test should be able to delete the bucket
        # if file exists, bucket will not get deleted causing the
        # test to fail
        s3object.deleteBucket("123mybucket321")

        # cleanup
        os.remove("s3.data")
        log.cleanup()
Beispiel #12
0
def test_perform_put_object_success(logger, boto3_resource):
    s3_resource = Mock()
    s3_object = Mock()
    s3_resource.Object.return_value = s3_object
    boto3_resource.return_value = s3_resource
    object_key = "some"
    file_path = "some-file.json"
    file_data = 'some file data'

    s3_cls = S3('eu-west-1', 'some_bucket')
    with patch("builtins.open", mock_open(read_data=file_data)) as mock_file:
        s3_cls._perform_put_object(
            key=object_key,
            file_path=file_path,
        )
        mock_file.assert_called_with(file_path, 'rb')
        s3_resource.Object.assert_called_once_with(s3_cls.bucket, object_key)
        s3_object.put.assert_called_once_with(Body=mock_file.return_value)

    logger.info.assert_called_once_with(
        "Uploading %s as %s to S3 Bucket %s in %s",
        file_path,
        object_key,
        s3_cls.bucket,
        s3_cls.region,
    )
    logger.debug.assert_called_once_with(
        "Upload of %s was successful.",
        object_key,
    )
    logger.error.assert_not_called()
    boto3_resource.assert_called_with('s3', region_name='eu-west-1')
Beispiel #13
0
def main():
    LOGGER.info('ADF Version %s', ADF_VERSION)
    LOGGER.info("ADF Log Level is %s", ADF_LOG_LEVEL)
    s3 = S3(
        DEPLOYMENT_ACCOUNT_REGION,
        S3_BUCKET_NAME
    )
    threads = []
    _templates = glob.glob("cdk.out/*.template.json")
    for counter, template_path in enumerate(_templates):
        name = os.path.splitext(template_path.split('/')[-1].split('.template')[0])[0] # Just stackname no extension and no .template
        with open(template_path) as _template_path:
            thread = PropagatingThread(target=worker_thread, args=(
                template_path,
                name,
                s3
            ))
            thread.start()
            threads.append(thread)
            _batcher = counter % 10
            if _batcher == 9: # 9 meaning we have hit a set of 10 threads since n % 10
                _interval = random.randint(5, 11)
                LOGGER.debug('Waiting for %s seconds before starting next batch of 10 threads.', _interval)
                time.sleep(_interval)

    for thread in threads:
        thread.join()
Beispiel #14
0
def consumer():
    if len(sys.argv) < 4:
        print('Invalid argument list. use python main.py <storage strategy> <resources> <bucket name> [bucket 2 name]')
        exit(-1)
    strategy = sys.argv[1]
    resources = sys.argv[2]
    bucket_name = sys.argv[3]
    s3 = boto3.resource('s3')
    bucket = s3.Bucket(bucket_name)
    dynamo_service = Dynamo()
    s3_service = S3()

    if strategy.upper() == "CREATE":
        if resources.upper() == "DYNAMO":
            contents = s3_service.get_all_bucket_contents(bucket)
            print(dynamo_service.create(contents))
            s3_service.delete_all_bucket_contents(bucket, contents)
        if resources.upper() == "S3":
            if len(sys.argv) < 5:
                print('Invalid argument list. s3 resource requires two buckets use python main.py <storage strategy> '
                      's3 <bucket name> <bucket 2 name>')
                exit(-1)
            bucket2 = sys.argv[4]
            contents = s3_service.get_all_bucket_contents(bucket)
            print(s3_service.create(contents, bucket, bucket2))
            s3_service.delete_all_bucket_contents(bucket, contents)
Beispiel #15
0
def lambda_handler(event, _):
    sts = STS()
    role = sts.assume_cross_account_role(
        'arn:aws:iam::{0}:role/{1}'.format(
            event["account_id"],
            event["cross_account_access_role"]
        ), 'master_lambda'
    )

    if event['is_deployment_account']:
        configure_master_account_parameters(event)
        configure_deployment_account_parameters(event, role)

    s3 = S3(
        region=REGION_DEFAULT,
        bucket=S3_BUCKET
    )

    for region in list(set([event["deployment_account_region"]] + event["regions"])):
        if not event["is_deployment_account"]:
            configure_generic_account(sts, event, region, role)
        cloudformation = CloudFormation(
            region=region,
            deployment_account_region=event["deployment_account_region"],
            role=role,
            wait=False,
            stack_name=None, # Stack name will be automatically defined based on event
            s3=s3,
            s3_key_path=event["full_path"],
            account_id=event["account_id"]
        )
        cloudformation.create_stack()

    return event
Beispiel #16
0
def handoff_event_to_emitter(context, bucket, key, events):
    bucket = os.environ["ProjectConfigurationBucket"]
    lmdclient = Lambda(context)
    s3client = S3(context, bucket)

    parts = KeyParts(key, context[c.KEY_SEPERATOR_PARTITION])
    key = "deployment/share/emitted_event_payloads/{}/{}/{}/{}".format(
        parts.source, parts.event, parts.datetime,
        parts.filename.replace(parts.extension, 'json'))

    payload = {
        'emitted': {
            'key': key,
            'bucket': bucket,
            'type': parts.event,
            'source': parts.source,
            'buildid': parts.buildid,
            'filename': parts.filename.replace(parts.extension, 'json'),
            'datetime': parts.datetime,
            'datetimeformat': util.partition_date_format(),
            'sensitivitylevel': parts.sensitivity_level
        }
    }

    #create a temporary file for the event emitter to read
    expires = datetime.datetime.utcnow() + datetime.timedelta(minutes=30)
    s3client.put_object(key, events.to_json(orient='records'), expires)

    resp = lmdclient.invoke(os.environ[c.ENV_EVENT_EMITTER], payload)
Beispiel #17
0
 def get_players(self):
     s3 = S3()
     key = 'games/' + self.date + '/'
     players = []
     player_objects = s3.client.list_objects(Bucket='soccer-storage', Prefix=key).get('Contents')
     if player_objects:
         for obj in player_objects:
             players.append(obj['Key'].split('/')[-1])
     return players
Beispiel #18
0
def main():  #pylint: disable=R0915
    LOGGER.info('ADF Version %s', ADF_VERSION)
    LOGGER.info("ADF Log Level is %s", ADF_LOG_LEVEL)

    parameter_store = ParameterStore(DEPLOYMENT_ACCOUNT_REGION, boto3)
    deployment_map = DeploymentMap(parameter_store, ADF_PIPELINE_PREFIX)
    s3 = S3(DEPLOYMENT_ACCOUNT_REGION, S3_BUCKET_NAME)
    sts = STS()
    role = sts.assume_cross_account_role(
        'arn:aws:iam::{0}:role/{1}-readonly'.format(
            MASTER_ACCOUNT_ID,
            parameter_store.fetch_parameter('cross_account_access_role')),
        'pipeline')

    organizations = Organizations(role)
    clean(parameter_store, deployment_map)

    for p in deployment_map.map_contents.get('pipelines'):
        pipeline = Pipeline(p)

        for target in p.get('targets', []):
            target_structure = TargetStructure(target)
            for step in target_structure.target:
                for path in step.get('path'):
                    regions = step.get(
                        'regions', p.get('regions', DEPLOYMENT_ACCOUNT_REGION))
                    step_name = step.get('name')
                    params = step.get('params', {})
                    pipeline.stage_regions.append(regions)
                    pipeline_target = Target(path, regions, target_structure,
                                             organizations, step_name, params)
                    pipeline_target.fetch_accounts_for_target()

            pipeline.template_dictionary["targets"].append(
                target_structure.account_list)

        if DEPLOYMENT_ACCOUNT_REGION not in regions:
            pipeline.stage_regions.append(DEPLOYMENT_ACCOUNT_REGION)

        parameters = pipeline.generate_parameters()
        pipeline.generate()
        deployment_map.update_deployment_parameters(pipeline)
        s3_object_path = upload_pipeline(s3, pipeline)

        store_regional_parameter_config(pipeline, parameter_store)
        cloudformation = CloudFormation(
            region=DEPLOYMENT_ACCOUNT_REGION,
            deployment_account_region=DEPLOYMENT_ACCOUNT_REGION,
            role=boto3,
            template_url=s3_object_path,
            parameters=parameters,
            wait=True,
            stack_name="{0}-{1}".format(ADF_PIPELINE_PREFIX, pipeline.name),
            s3=None,
            s3_key_path=None,
            account_id=DEPLOYMENT_ACCOUNT_ID)
        cloudformation.create_stack()
Beispiel #19
0
def run_job(session, specs_json, args):
    job_name = args.name
    script_args = args.script_args

    rdzv_specs = specs_json["rdzv"]
    worker_specs = specs_json["worker"]

    script_url = urlparse(args.script_path)
    scheme = script_url.scheme
    if scheme == "docker":
        # docker://tmp/script.py -> tmp/script.py (relative to working dir in docker)
        # docker:///tmp/script.py -> /tmp/script.py (absolute path in docker)
        script = script_url.netloc + script_url.path
    elif scheme == "s3":
        # fetch_and_run supports s3:// so just pass through
        script = args.script_path
    else:
        s3_bucket = worker_specs["s3_bucket"]
        s3_prefix = worker_specs["s3_prefix"]
        script = S3(session).cp(args.script_path, s3_bucket, f"{s3_prefix}/{job_name}")

    asg = AutoScalingGroup(session)
    rdzv_asg_name = f"{job_name}_rdzv"
    worker_asg_name = f"{job_name}_worker"

    # create a single node asg to host the etcd server for rendezvous
    etcd_server_hostname = asg.create_asg_sync(rdzv_asg_name, size=1, **rdzv_specs)[0]
    rdzv_endpoint = f"{etcd_server_hostname}:2379"

    # allow overriding instance types from cli
    if args.instance_type:
        worker_specs["instance_type"] = args.instance_type
    worker_specs["rdzv_endpoint"] = rdzv_endpoint
    worker_specs["job_name"] = job_name
    worker_specs["script"] = script
    worker_specs["args"] = " ".join(script_args)
    worker_specs["user"] = getpass.getuser()

    instance_type = worker_specs["instance_type"]
    script_args_str = worker_specs["args"]

    log.info(
        f"\n------------------------------------------------------------------\n"
        f"Starting job...\n"
        f"  job name     : {job_name}\n"
        f"  instance type: {instance_type}\n"
        f"  size         : {args.size} (min={args.min_size}, max={args.max_size})\n"
        f"  rdzv endpoint: {rdzv_endpoint}\n"
        f"  cmd          : {script}\n"
        f"  cmd args     : {script_args_str}\n"
        f"------------------------------------------------------------------\n"
    )

    asg.create_asg(
        worker_asg_name, args.size, args.min_size, args.max_size, **worker_specs
    )
Beispiel #20
0
 def add_player(self, username, chat_id):
     s3 = S3()
     key = 'games/' + self.date + '/' + username
     try:
         s3.client.get_object(Bucket='soccer-storage', Key=key)
         message = 'Рад твоему рвению, но записаться можно только один раз.'
     except:
         s3.client.put_object(Bucket='soccer-storage', Key=key, Body=str(chat_id))
         message = 'Отлично! Я внёс тебя в состав на игру.'
     return message
Beispiel #21
0
 def del_player(self, username):
     s3 = S3()
     key = 'games/' + self.date + '/' + username
     try:
         s3.client.get_object(Bucket='soccer-storage', Key=key)
         s3.client.delete_object(Bucket='soccer-storage', Key=key)
         message = 'С глубоким сожалением вычёркиваю тебя из состава на игру.'
     except:
         message = 'Убрать из состава не могу – тебя в нём и так не было.'
     return message
Beispiel #22
0
 def __getitem__(self, item):
     s3 = S3()
     data = json.loads(
         str(
             s3.client.get_object(
                 Bucket='soccer-storage', Key='config.json'
             )['Body'].read(),
             'utf-8'
         )
     )
     return data[item]
Beispiel #23
0
def main():
    s3 = S3(DEPLOYMENT_ACCOUNT_REGION, SHARED_MODULES_BUCKET)
    parameters = Parameters(
        PROJECT_NAME,
        ParameterStore(
            DEPLOYMENT_ACCOUNT_REGION,
            boto3
        ),
        s3
    )
    parameters.create_parameter_files()
def main():
    LOGGER.info('ADF Version %s', ADF_VERSION)
    LOGGER.info("ADF Log Level is %s", ADF_LOG_LEVEL)

    parameter_store = ParameterStore(DEPLOYMENT_ACCOUNT_REGION, boto3)

    s3 = S3(DEPLOYMENT_ACCOUNT_REGION, SHARED_MODULES_BUCKET)
    deployment_map = DeploymentMap(parameter_store, s3, ADF_PIPELINE_PREFIX)

    LOGGER.info('Cleaning Stale Deployment Map entries')
    clean(parameter_store, deployment_map)
def main():
    LOGGER.info('ADF Version %s', ADF_VERSION)
    LOGGER.info("ADF Log Level is %s", ADF_LOG_LEVEL)

    parameter_store = ParameterStore(
        DEPLOYMENT_ACCOUNT_REGION,
        boto3
    )
    deployment_map = DeploymentMap(
        parameter_store,
        ADF_PIPELINE_PREFIX
    )
    s3 = S3(
        DEPLOYMENT_ACCOUNT_REGION,
        S3_BUCKET_NAME
    )
    sts = STS()
    role = sts.assume_cross_account_role(
        'arn:aws:iam::{0}:role/{1}-readonly'.format(
            MASTER_ACCOUNT_ID,
            parameter_store.fetch_parameter('cross_account_access_role')
        ), 'pipeline'
    )

    organizations = Organizations(role)
    clean(parameter_store, deployment_map)

    try:
        auto_create_repositories = parameter_store.fetch_parameter('auto_create_repositories')
    except ParameterNotFoundError:
        auto_create_repositories = 'enabled'

    threads = []
    for counter, p in enumerate(deployment_map.map_contents.get('pipelines')):
        thread = PropagatingThread(target=worker_thread, args=(
            p,
            organizations,
            auto_create_repositories,
            s3,
            deployment_map,
            parameter_store
        ))
        thread.start()
        threads.append(thread)
        _batcher = counter % 10
        if _batcher == 9: # 9 meaning we have hit a set of 10 threads since n % 10
            _interval = random.randint(5, 11)
            LOGGER.debug('Waiting for %s seconds before starting next batch of 10 threads.', _interval)
            time.sleep(_interval)

    for thread in threads:
        thread.join()
Beispiel #26
0
    def test_s3CreateDelete(self):
        """S3: Test that creation and deletion of s3 buckets work okay"""
        log = Logger(logging.ERROR, "/opt/mm/testing/conf/logging.ini")
        log.addFileHandler(logging.DEBUG)

        s3object = S3("/opt/mm/testing/conf/aws.ini", log)
        s3object.createBucket("123mybucket321")

        # verify
        s3object.deleteBucket("123mybucket321")

        # cleanup the logger
        log.cleanup()
def lambda_handler(event, _):
    parameters = ParameterStore(REGION_DEFAULT, boto3)
    account_id = event.get(
        'detail').get(
            'requestParameters').get('accountId')
    organizations = Organizations(boto3, account_id)
    parsed_event = Event(event, parameters, organizations, account_id)
    cache = Cache()

    if parsed_event.moved_to_root or parsed_event.moved_to_protected:
        return parsed_event.create_output_object(cache)

    parsed_event.set_destination_ou_name()

    sts = STS(boto3)
    role = sts.assume_cross_account_role(
        'arn:aws:iam::{0}:role/{1}'.format(
            parsed_event.account_id,
            parsed_event.cross_account_access_role
        ), 'master_lambda'
    )

    if parsed_event.is_deployment_account:
        update_master_account_parameters(parsed_event, parameters)
        configure_deployment_account(parsed_event, role)

    s3 = S3(REGION_DEFAULT, boto3, S3_BUCKET)

    account_path = parsed_event.organizations.build_account_path(
        parsed_event.destination_ou_id,
        [],  # Initial empty array to hold OU Path,
        cache,
    )

    for region in list(set([parsed_event.deployment_account_region] + parsed_event.regions)):
        if not parsed_event.is_deployment_account:
            configure_generic_account(sts, parsed_event, region, role)
        cloudformation = CloudFormation(
            region=region,
            deployment_account_region=parsed_event.deployment_account_region,
            role=role,
            wait=False,
            stack_name=None,
            s3=s3,
            s3_key_path=account_path,
            file_path=None,
        )
        cloudformation.create_stack()

    return parsed_event.create_output_object(cache)
async def handler(event, context):
    parameters = ssm.list_namespace(DEFAULT_NAMESPACE)

    LOG.debug(f"Handling data sources: {parameters}")

    for parameter in parameters:
        LOG.debug(f"Starting {parameter}")
        if 'connection_parameters' not in parameter or 'bucket' not in parameter[
                'connection_parameters']:
            continue
        target = parameter.get("s3_path")[1:]  # remove leading slash
        source = parameter.get("connection_parameters")
        s3 = S3(source.get("bucket"))
        await process_data_source(s3, source, target)
Beispiel #29
0
def lambda_handler(event, _):
    s3 = S3(region=REGION_DEFAULT, bucket=S3_BUCKET)

    cloudformation = CloudFormation(
        region=event['deployment_account_region'],
        deployment_account_region=event['deployment_account_region'],
        role=boto3,
        wait=True,
        stack_name=None,
        s3=s3,
        s3_key_path='adf-build')
    cloudformation.create_stack()

    return event
Beispiel #30
0
def main():
    LOGGER.info('ADF Version %s', ADF_VERSION)
    LOGGER.info("ADF Log Level is %s", ADF_LOG_LEVEL)

    _create_inputs_folder()
    parameter_store = ParameterStore(
        DEPLOYMENT_ACCOUNT_REGION,
        boto3
    )
    s3 = S3(DEPLOYMENT_ACCOUNT_REGION, SHARED_MODULES_BUCKET)
    deployment_map = DeploymentMap(
        parameter_store,
        s3,
        ADF_PIPELINE_PREFIX
    )
    sts = STS()
    role = sts.assume_cross_account_role(
        'arn:aws:iam::{0}:role/{1}-readonly'.format(
            MASTER_ACCOUNT_ID,
            parameter_store.fetch_parameter('cross_account_access_role')
        ), 'pipeline'
    )
    organizations = Organizations(role)
    clean(parameter_store, deployment_map)
    ensure_event_bus_status(ORGANIZATION_ID)
    try:
        auto_create_repositories = parameter_store.fetch_parameter('auto_create_repositories')
    except ParameterNotFoundError:
        auto_create_repositories = 'enabled'
    threads = []
    _cache = Cache()
    for p in deployment_map.map_contents.get('pipelines', []):
        _source_account_id = p.get('default_providers', {}).get('source', {}).get('properties', {}).get('account_id', {})
        if _source_account_id and int(_source_account_id) != int(DEPLOYMENT_ACCOUNT_ID) and not _cache.check(_source_account_id):
            rule = Rule(p['default_providers']['source']['properties']['account_id'])
            rule.create_update()
            _cache.add(p['default_providers']['source']['properties']['account_id'], True)
        thread = PropagatingThread(target=worker_thread, args=(
            p,
            organizations,
            auto_create_repositories,
            deployment_map,
            parameter_store
        ))
        thread.start()
        threads.append(thread)

    for thread in threads:
        thread.join()