def dummy_awsbatch_cluster_config(mocker): """Generate dummy cluster.""" image = Image(os="alinux2") head_node = dummy_head_node(mocker) compute_resources = [ AwsBatchComputeResource(name="dummy_compute_resource1", instance_types=["dummyc5.xlarge", "optimal"]) ] queue_networking = AwsBatchQueueNetworking(subnet_ids=["dummy-subnet-1"], security_groups=["sg-1", "sg-2"]) queues = [AwsBatchQueue(name="queue1", networking=queue_networking, compute_resources=compute_resources)] scheduling = AwsBatchScheduling(queues=queues) # shared storage shared_storage: List[Resource] = [] shared_storage.append(dummy_fsx()) shared_storage.append(dummy_ebs("/ebs1")) shared_storage.append(dummy_ebs("/ebs2", volume_id="vol-abc")) shared_storage.append(dummy_ebs("/ebs3", raid=Raid(raid_type=1, number_of_volumes=5))) shared_storage.append(dummy_efs("/efs1", file_system_id="fs-efs-1")) shared_storage.append(dummy_raid("/raid1")) cluster = _DummyAwsBatchClusterConfig( image=image, head_node=head_node, scheduling=scheduling, shared_storage=shared_storage ) cluster.custom_s3_bucket = "s3://dummy-s3-bucket" cluster.additional_resources = "https://additional.template.url" cluster.config_version = "1.0" cluster.iam = ClusterIam() cluster.tags = [Tag(key="test", value="testvalue")] return cluster
def dummy_slurm_cluster_config(mocker): """Generate dummy cluster.""" image = Image(os="alinux2") head_node = dummy_head_node(mocker) queue_iam = Iam(s3_access=[ S3Access("dummy-readonly-bucket", enable_write_access=True), S3Access("dummy-readwrite-bucket"), ]) compute_resources = [ SlurmComputeResource(name="dummy_compute_resource1", instance_type="dummyc5.xlarge") ] queue_networking1 = QueueNetworking(subnet_ids=["dummy-subnet-1"], security_groups=["sg-1", "sg-2"]) queue_networking2 = QueueNetworking(subnet_ids=["dummy-subnet-1"], security_groups=["sg-1", "sg-3"]) queue_networking3 = QueueNetworking(subnet_ids=["dummy-subnet-1"], security_groups=None) queues = [ SlurmQueue(name="queue1", networking=queue_networking1, compute_resources=compute_resources, iam=queue_iam), SlurmQueue(name="queue2", networking=queue_networking2, compute_resources=compute_resources), SlurmQueue(name="queue3", networking=queue_networking3, compute_resources=compute_resources), ] scheduling = SlurmScheduling(queues=queues) # shared storage shared_storage: List[Resource] = [] shared_storage.append(dummy_fsx()) shared_storage.append(dummy_ebs("/ebs1")) shared_storage.append(dummy_ebs("/ebs2", volume_id="vol-abc")) shared_storage.append( dummy_ebs("/ebs3", raid=Raid(raid_type=1, number_of_volumes=5))) shared_storage.append(dummy_efs("/efs1", file_system_id="fs-efs-1")) shared_storage.append(dummy_raid("/raid1")) cluster = _DummySlurmClusterConfig(image=image, head_node=head_node, scheduling=scheduling, shared_storage=shared_storage) cluster.custom_s3_bucket = "s3://dummy-s3-bucket" cluster.additional_resources = "https://additional.template.url" cluster.config_version = "1.0" cluster.iam = ClusterIam() cluster.tags = [Tag(key="test", value="testvalue")] return cluster
def base_slurm_cluster_config(self): return SlurmClusterConfig( cluster_name="clustername", image=Image("alinux2"), head_node=HeadNode("c5.xlarge", HeadNodeNetworking("subnet")), scheduling=SlurmScheduling( [ SlurmQueue( name="queue0", networking=QueueNetworking(["subnet"]), compute_resources=[SlurmComputeResource(name="compute_resource_1", instance_type="c5.xlarge")], ) ] ), )
def base_cluster_config(self): return BaseClusterConfig( cluster_name="clustername", image=Image("alinux2"), head_node=HeadNode("c5.xlarge", HeadNodeNetworking("subnet")), )
def make_resource(self, data, **kwargs): """Generate resource.""" return Image(**data)