def common_pcluster_policies(region): """Create four policies to be attached to ec2_iam_role, iam_lamda_role for awsbatch or traditional schedulers.""" policies = {} policies["awsbatch_instance_policy"] = _create_iam_policies( "integ-tests-ParallelClusterInstancePolicy-batch-" + random_alphanumeric(), region, "batch_instance_policy.json" ) policies["traditional_instance_policy"] = _create_iam_policies( "integ-tests-ParallelClusterInstancePolicy-traditional-" + random_alphanumeric(), region, "traditional_instance_policy.json", ) policies["awsbatch_lambda_policy"] = _create_iam_policies( "integ-tests-ParallelClusterLambdaPolicy-batch-" + random_alphanumeric(), region, "batch_lambda_function_policy.json", ) policies["traditional_lambda_policy"] = _create_iam_policies( "integ-tests-ParallelClusterLambdaPolicy-traditional-" + random_alphanumeric(), region, "traditional_lambda_function_policy.json", ) yield policies iam_client = boto3.client("iam", region_name=region) for policy in policies.values(): iam_client.delete_policy(PolicyArn=policy)
def vpc_stacks(cfn_stacks_factory, request): """Create VPC used by integ tests in all configured regions.""" regions = request.config.getoption("regions") vpc_stacks = {} for region in regions: # defining subnets per region to allow AZs override public_subnet = SubnetConfig( name="PublicSubnet", cidr="10.0.0.0/24", map_public_ip_on_launch=True, has_nat_gateway=True, default_gateway=Gateways.INTERNET_GATEWAY, availability_zone=random.choice( _AVAILABILITY_ZONE_OVERRIDES.get(region, [None])), ) private_subnet = SubnetConfig( name="PrivateSubnet", cidr="10.0.1.0/24", map_public_ip_on_launch=False, has_nat_gateway=False, default_gateway=Gateways.NAT_GATEWAY, availability_zone=random.choice( _AVAILABILITY_ZONE_OVERRIDES.get(region, [None])), ) vpc_config = VPCConfig(subnets=[public_subnet, private_subnet]) template = VPCTemplateBuilder(vpc_config).build() stack = CfnStack(name="integ-tests-vpc-" + random_alphanumeric(), region=region, template=template.to_json()) cfn_stacks_factory.create_stack(stack) vpc_stacks[region] = stack return vpc_stacks
def vpc_stacks(cfn_stacks_factory, request): """Create VPC used by integ tests in all configured regions.""" public_subnet = SubnetConfig( name="PublicSubnet", cidr="10.0.0.0/24", map_public_ip_on_launch=True, has_nat_gateway=True, default_gateway=Gateways.INTERNET_GATEWAY, ) private_subnet = SubnetConfig( name="PrivateSubnet", cidr="10.0.1.0/24", map_public_ip_on_launch=False, has_nat_gateway=False, default_gateway=Gateways.NAT_GATEWAY, ) vpc_config = VPCConfig(subnets=[public_subnet, private_subnet]) template = VPCTemplateBuilder(vpc_config).build() regions = request.config.getoption("regions") vpc_stacks = {} for region in regions: stack = CfnStack(name="integ-tests-vpc-" + random_alphanumeric(), region=region, template=template.to_json()) cfn_stacks_factory.create_stack(stack) vpc_stacks[region] = stack return vpc_stacks
def _write_file_into_efs(region, vpc_stack, efs_stack, request, key_name, cfn_stacks_factory): """Write file stack contains a mount target and a instance to write a empty file with random name into the efs.""" write_file_template = Template() write_file_template.set_version("2010-09-09") write_file_template.set_description( "Stack to write a file to the existing EFS") default_security_group_id = get_default_vpc_security_group( vpc_stack.cfn_outputs["VpcId"], region) write_file_template.add_resource( MountTarget( "MountTargetResource", FileSystemId=efs_stack.cfn_resources["FileSystemResource"], SubnetId=vpc_stack.cfn_outputs["PublicSubnetId"], SecurityGroups=[default_security_group_id], )) random_file_name = random_alphanumeric() user_data = ( """ #cloud-config package_update: true package_upgrade: true runcmd: - yum install -y nfs-utils - file_system_id_1=""" + efs_stack.cfn_resources["FileSystemResource"] + """ - efs_mount_point_1=/mnt/efs/fs1 - mkdir -p "${!efs_mount_point_1}" - mount -t nfs4 -o nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport,_netdev """ + """"${!file_system_id_1}.efs.${AWS::Region}.${AWS::URLSuffix}:/" "${!efs_mount_point_1}" - touch ${!efs_mount_point_1}/""" + random_file_name + """ - umount ${!efs_mount_point_1} - opt/aws/bin/cfn-signal -e $? --stack ${AWS::StackName} --resource InstanceToWriteEFS --region ${AWS::Region} """) write_file_template.add_resource( Instance( "InstanceToWriteEFS", CreationPolicy={"ResourceSignal": { "Timeout": "PT10M" }}, ImageId=retrieve_latest_ami(region, "alinux2"), InstanceType="c5.xlarge", SubnetId=vpc_stack.cfn_outputs["PublicSubnetId"], UserData=Base64(Sub(user_data)), KeyName=key_name, DependsOn=["MountTargetResource"], )) write_file_stack = CfnStack( name=generate_stack_name("integ-tests-efs-write-file", request.config.getoption("stackname_suffix")), region=region, template=write_file_template.to_json(), ) cfn_stacks_factory.create_stack(write_file_stack) cfn_stacks_factory.delete_stack(write_file_stack.name, region) return random_file_name
def verify_directory_correctly_shared(remote_command_executor, mount_dir, scheduler_commands): master_file = random_alphanumeric() compute_file = random_alphanumeric() remote_command_executor.run_remote_command( "touch {mount_dir}/{master_file}".format(mount_dir=mount_dir, master_file=master_file) ) job_command = "cat {mount_dir}/{master_file} && touch {mount_dir}/{compute_file}".format( mount_dir=mount_dir, master_file=master_file, compute_file=compute_file ) result = scheduler_commands.submit_command(job_command) job_id = scheduler_commands.assert_job_submitted(result.stdout) scheduler_commands.wait_job_completed(job_id) scheduler_commands.assert_job_succeeded(job_id) remote_command_executor.run_remote_command( "cat {mount_dir}/{compute_file}".format(mount_dir=mount_dir, compute_file=compute_file) )
def _cluster_factory(cluster_config): cluster_config = _write_cluster_config_to_outdir( request, cluster_config) cluster = Cluster( name="integ-tests-" + random_alphanumeric(), config_file=cluster_config, ssh_key=request.config.getoption("key_path"), ) factory.create_cluster(cluster) return cluster
def verify_directory_correctly_shared(remote_command_executor, mount_dir, scheduler_commands): head_node_file = random_alphanumeric() compute_file = random_alphanumeric() remote_command_executor.run_remote_command( "touch {mount_dir}/{head_node_file}".format( mount_dir=mount_dir, head_node_file=head_node_file)) job_command = "cat {mount_dir}/{head_node_file} && touch {mount_dir}/{compute_file}".format( mount_dir=mount_dir, head_node_file=head_node_file, compute_file=compute_file) result = scheduler_commands.submit_command(job_command) job_id = scheduler_commands.assert_job_submitted(result.stdout) scheduler_commands.wait_job_completed(job_id) scheduler_commands.assert_job_succeeded(job_id) remote_command_executor.run_remote_command( "cat {mount_dir}/{compute_file}".format(mount_dir=mount_dir, compute_file=compute_file))
def _create_vpc_stack(request, template, region, cfn_stacks_factory): if request.config.getoption("vpc_stack"): logging.info("Using stack {0} in region {1}".format( request.config.getoption("vpc_stack"), region)) stack = CfnStack(name=request.config.getoption("vpc_stack"), region=region, template=template.to_json()) else: stack = CfnStack(name="integ-tests-vpc-" + random_alphanumeric(), region=region, template=template.to_json()) cfn_stacks_factory.create_stack(stack) return stack
def _create_network(region, template_path, parameters): file_content = extract_template(template_path) stack = CfnStack( name="integ-tests-networking-{0}{1}{2}".format( random_alphanumeric(), "-" if request.config.getoption("stackname_suffix") else "", request.config.getoption("stackname_suffix"), ), region=region, template=file_content, parameters=parameters, ) factory.create_stack(stack) return stack
def _cluster_factory(cluster_config, extra_args=None, raise_on_error=True): cluster_config = _write_cluster_config_to_outdir(request, cluster_config) cluster = Cluster( name=request.config.getoption("cluster") if request.config.getoption("cluster") else "integ-tests-{0}{1}{2}".format( random_alphanumeric(), "-" if request.config.getoption("stackname_suffix") else "", request.config.getoption("stackname_suffix"), ), config_file=cluster_config, ssh_key=request.config.getoption("key_path"), ) if not request.config.getoption("cluster"): factory.create_cluster(cluster, extra_args=extra_args, raise_on_error=raise_on_error) return cluster
def _get_security_group_id(self): security_group_id = self.boto_client.create_security_group( Description="security group for snapshot instance node", GroupName="snapshot-" + random_alphanumeric(), VpcId=self.config.vpc_id, )["GroupId"] self.boto_client.authorize_security_group_ingress( GroupId=security_group_id, IpPermissions=[{ "IpProtocol": "tcp", "FromPort": 22, "ToPort": 22, "IpRanges": [{ "CidrIp": "0.0.0.0/0" }] }], ) return security_group_id
def save(self): if not self.slug: slug = defaultfilters.slugify( self.title ) try: existing = Knot.objects.get(slug=slug) except Knot.DoesNotExist: pass else: slug = '%s-%s' % (slug, (existing.count() + 1)) self.slug=slug while not self.short_url: count = Knot.objects.count() if count < 1: count = 1 short_url = ''.join(random_alphanumeric(int(math.log(count,62))+1)) if short_url not in RESERVED_WORDS: try: existing = Knot.objects.get(short_url=short_url) except Knot.DoesNotExist: self.short_url = short_url super( Knot, self ).save()
def _create_bucket(): bucket_name = "integ-tests-" + random_alphanumeric() logging.info("Creating S3 bucket {0}".format(bucket_name)) create_s3_bucket(bucket_name, region) created_buckets.append((bucket_name, region)) return bucket_name