Esempio n. 1
0
    def set_engine_config_settings(self, arguments):
        custom_hadoop_config = util._read_file(
            arguments.custom_hadoop_config_file)
        fairscheduler_config_xml = util._read_file(
            arguments.fairscheduler_config_xml_file)
        custom_presto_config = util._read_file(
            arguments.presto_custom_config_file)
        is_deeplearning = False

        self.set_engine_config(
            custom_hadoop_config=custom_hadoop_config,
            use_qubole_placement_policy=arguments.use_qubole_placement_policy,
            fairscheduler_config_xml=fairscheduler_config_xml,
            default_pool=arguments.default_pool,
            presto_version=arguments.presto_version,
            custom_presto_config=custom_presto_config,
            spark_version=arguments.spark_version,
            custom_spark_config=arguments.custom_spark_config,
            dbtap_id=arguments.dbtap_id,
            fernet_key=arguments.fernet_key,
            overrides=arguments.overrides,
            airflow_version=arguments.airflow_version,
            airflow_python_version=arguments.airflow_python_version,
            enable_rubix=arguments.enable_rubix,
            mlflow_version=arguments.mlflow_version)
Esempio n. 2
0
    def set_engine_config_settings(self, arguments):
        custom_hadoop_config = util._read_file(arguments.custom_hadoop_config_file)
        fairscheduler_config_xml = util._read_file(arguments.fairscheduler_config_xml_file)
        custom_presto_config = util._read_file(arguments.presto_custom_config_file)

        self.set_engine_config(custom_hadoop_config=custom_hadoop_config,
                               use_qubole_placement_policy=arguments.use_qubole_placement_policy,
                               fairscheduler_config_xml=fairscheduler_config_xml,
                               default_pool=arguments.default_pool,
                               presto_version=arguments.presto_version,
                               custom_presto_config=custom_presto_config,
                               spark_version=arguments.spark_version,
                               custom_spark_config=arguments.custom_spark_config,
                               dbtap_id=arguments.dbtap_id,
                               fernet_key=arguments.fernet_key,
                               overrides=arguments.overrides)
Esempio n. 3
0
    def get_cluster_create_clone_update(arguments, action):
        customer_ssh_key = util._read_file(arguments.customer_ssh_key_file)
        # This will set cluster info and monitoring settings
        cluster_info = ClusterInfoV2(arguments.label)
        cluster_info.set_cluster_info(
            disallow_cluster_termination=arguments.
            disallow_cluster_termination,
            enable_ganglia_monitoring=arguments.enable_ganglia_monitoring,
            datadog_api_token=arguments.datadog_api_token,
            datadog_app_token=arguments.datadog_app_token,
            node_bootstrap=arguments.node_bootstrap_file,
            master_instance_type=arguments.master_instance_type,
            slave_instance_type=arguments.slave_instance_type,
            min_nodes=arguments.initial_nodes,
            max_nodes=arguments.max_nodes,
            slave_request_type=arguments.slave_request_type,
            fallback_to_ondemand=arguments.fallback_to_ondemand,
            node_base_cooldown_period=arguments.node_base_cooldown_period,
            node_spot_cooldown_period=arguments.node_spot_cooldown_period,
            custom_tags=arguments.custom_tags,
            heterogeneous_config=arguments.heterogeneous_config,
            maximum_bid_price_percentage=arguments.
            maximum_bid_price_percentage,
            timeout_for_request=arguments.timeout_for_request,
            maximum_spot_instance_percentage=arguments.
            maximum_spot_instance_percentage,
            stable_maximum_bid_price_percentage=arguments.
            stable_maximum_bid_price_percentage,
            stable_timeout_for_request=arguments.stable_timeout_for_request,
            stable_spot_fallback=arguments.stable_spot_fallback,
            spot_block_duration=arguments.spot_block_duration,
            idle_cluster_timeout=arguments.idle_cluster_timeout,
            disk_count=arguments.count,
            disk_type=arguments.disk_type,
            disk_size=arguments.size,
            root_disk_size=arguments.root_disk_size,
            upscaling_config=arguments.upscaling_config,
            enable_encryption=arguments.encrypted_ephemerals,
            customer_ssh_key=customer_ssh_key,
            image_uri_overrides=arguments.image_uri_overrides,
            env_name=arguments.env_name,
            python_version=arguments.python_version,
            r_version=arguments.r_version)

        #  This will set cloud config settings
        cloud_config = Qubole.get_cloud()
        cloud_config.set_cloud_config_from_arguments(arguments)

        # This will set engine settings
        engine_config = Engine(flavour=arguments.flavour)
        engine_config.set_engine_config_settings(arguments)

        cluster_request = ClusterCmdLine.get_cluster_request_parameters(
            cluster_info, cloud_config, engine_config)

        action = action
        if action == "create":
            return arguments.func(cluster_request)
        else:
            return arguments.func(arguments.cluster_id_label, cluster_request)
Esempio n. 4
0
    def set_engine_config_settings(self, arguments):
        custom_hadoop_config = util._read_file(arguments.custom_hadoop_config_file)
        fairscheduler_config_xml = util._read_file(arguments.fairscheduler_config_xml_file)
        custom_presto_config = util._read_file(arguments.presto_custom_config_file)
        is_deeplearning=False

        self.set_engine_config(custom_hadoop_config=custom_hadoop_config,
                               use_qubole_placement_policy=arguments.use_qubole_placement_policy,
                               fairscheduler_config_xml=fairscheduler_config_xml,
                               default_pool=arguments.default_pool,
                               presto_version=arguments.presto_version,
                               custom_presto_config=custom_presto_config,
                               spark_version=arguments.spark_version,
                               custom_spark_config=arguments.custom_spark_config,
                               dbtap_id=arguments.dbtap_id,
                               fernet_key=arguments.fernet_key,
                               overrides=arguments.overrides,
                               airflow_version=arguments.airflow_version,
                               airflow_python_version=arguments.airflow_python_version,
                               enable_rubix=arguments.enable_rubix)
Esempio n. 5
0
    def get_cluster_create_clone_update(arguments, action):
        customer_ssh_key = util._read_file(arguments.customer_ssh_key_file)
        # This will set cluster info and monitoring settings
        cluster_info = ClusterInfoV2(arguments.label)
        cluster_info.set_cluster_info(disallow_cluster_termination=arguments.disallow_cluster_termination,
                                      enable_ganglia_monitoring=arguments.enable_ganglia_monitoring,
                                      datadog_api_token=arguments.datadog_api_token,
                                      datadog_app_token=arguments.datadog_app_token,
                                      node_bootstrap=arguments.node_bootstrap_file,
                                      master_instance_type=arguments.master_instance_type,
                                      slave_instance_type=arguments.slave_instance_type,
                                      min_nodes=arguments.initial_nodes,
                                      max_nodes=arguments.max_nodes,
                                      slave_request_type=arguments.slave_request_type,
                                      fallback_to_ondemand=arguments.fallback_to_ondemand,
                                      custom_tags=arguments.custom_tags,
                                      heterogeneous_config=arguments.heterogeneous_config,
                                      maximum_bid_price_percentage=arguments.maximum_bid_price_percentage,
                                      timeout_for_request=arguments.timeout_for_request,
                                      maximum_spot_instance_percentage=arguments.maximum_spot_instance_percentage,
                                      stable_maximum_bid_price_percentage=arguments.stable_maximum_bid_price_percentage,
                                      stable_timeout_for_request=arguments.stable_timeout_for_request,
                                      stable_spot_fallback=arguments.stable_spot_fallback,
                                      idle_cluster_timeout=arguments.idle_cluster_timeout,
                                      disk_count=arguments.count,
                                      disk_type=arguments.disk_type,
                                      disk_size=arguments.size,
                                      upscaling_config=arguments.upscaling_config,
                                      enable_encryption=arguments.encrypted_ephemerals,
                                      customer_ssh_key=customer_ssh_key,
                                      image_uri_overrides=arguments.image_uri_overrides)

        #  This will set cloud config settings
        cloud_config = Qubole.get_cloud()
        cloud_config.set_cloud_config_from_arguments(arguments)

        # This will set engine settings
        engine_config = Engine(flavour=arguments.flavour)
        engine_config.set_engine_config_settings(arguments)

        cluster_request = ClusterCmdLine.get_cluster_request_parameters(cluster_info, cloud_config, engine_config)

        action = action
        if action == "create":
            return arguments.func(cluster_request)
        else:
            return arguments.func(arguments.cluster_id_label, cluster_request)
Esempio n. 6
0
 def set_cluster_info_from_arguments(self, arguments):
     customer_ssh_key = util._read_file(arguments.customer_ssh_key_file)
     self.set_cluster_info(disallow_cluster_termination=arguments.disallow_cluster_termination,
                           enable_ganglia_monitoring=arguments.enable_ganglia_monitoring,
                           datadog_api_token=arguments.datadog_api_token,
                           datadog_app_token=arguments.datadog_app_token,
                           notification_channels=arguments.notification_channels,
                           node_bootstrap=arguments.node_bootstrap_file,
                           master_instance_type=arguments.master_instance_type,
                           slave_instance_type=arguments.slave_instance_type,
                           min_nodes=arguments.initial_nodes,
                           max_nodes=arguments.max_nodes,
                           slave_request_type=arguments.slave_request_type,
                           fallback_to_ondemand=arguments.fallback_to_ondemand,
                           node_base_cooldown_period=arguments.node_base_cooldown_period,
                           node_spot_cooldown_period=arguments.node_spot_cooldown_period,
                           custom_tags=arguments.custom_tags,
                           heterogeneous_config=arguments.heterogeneous_config,
                           maximum_bid_price_percentage=arguments.maximum_bid_price_percentage,
                           timeout_for_request=arguments.timeout_for_request,
                           maximum_spot_instance_percentage=arguments.maximum_spot_instance_percentage,
                           stable_maximum_bid_price_percentage=arguments.stable_maximum_bid_price_percentage,
                           stable_timeout_for_request=arguments.stable_timeout_for_request,
                           stable_spot_fallback=arguments.stable_spot_fallback,
                           spot_block_duration=arguments.spot_block_duration,
                           idle_cluster_timeout=arguments.idle_cluster_timeout,
                           disk_count=arguments.count,
                           disk_type=arguments.disk_type,
                           disk_size=arguments.size,
                           root_disk_size=arguments.root_disk_size,
                           upscaling_config=arguments.upscaling_config,
                           enable_encryption=arguments.encrypted_ephemerals,
                           customer_ssh_key=customer_ssh_key,
                           image_uri_overrides=arguments.image_uri_overrides,
                           env_name=arguments.env_name,
                           python_version=arguments.python_version,
                           r_version=arguments.r_version,
                           disable_cluster_pause=arguments.disable_cluster_pause,
                           paused_cluster_timeout_mins=arguments.paused_cluster_timeout_mins,
                           disable_autoscale_node_pause=arguments.disable_autoscale_node_pause,
                           paused_autoscale_node_timeout_mins=arguments.paused_autoscale_node_timeout_mins,
                           parent_cluster_id=arguments.parent_cluster_id,
                           parent_cluster_label=arguments.parent_cluster_label,
                           image_version=arguments.image_version)
Esempio n. 7
0
 def set_cluster_info_from_arguments(self, arguments):
     customer_ssh_key = util._read_file(arguments.customer_ssh_key_file)
     self.set_cluster_info(
         disallow_cluster_termination=arguments.
         disallow_cluster_termination,
         enable_ganglia_monitoring=arguments.enable_ganglia_monitoring,
         datadog_api_token=arguments.datadog_api_token,
         datadog_app_token=arguments.datadog_app_token,
         node_bootstrap=arguments.node_bootstrap_file,
         master_instance_type=arguments.master_instance_type,
         slave_instance_type=arguments.slave_instance_type,
         min_nodes=arguments.initial_nodes,
         max_nodes=arguments.max_nodes,
         node_base_cooldown_period=arguments.node_base_cooldown_period,
         node_spot_cooldown_period=arguments.node_spot_cooldown_period,
         custom_tags=arguments.custom_tags,
         heterogeneous_config=arguments.heterogeneous_config,
         idle_cluster_timeout=arguments.idle_cluster_timeout,
         disk_count=arguments.count,
         disk_type=arguments.disk_type,
         disk_size=arguments.size,
         root_disk_size=arguments.root_disk_size,
         upscaling_config=arguments.upscaling_config,
         enable_encryption=arguments.encrypted_ephemerals,
         customer_ssh_key=customer_ssh_key,
         image_uri_overrides=arguments.image_uri_overrides,
         env_name=arguments.env_name,
         python_version=arguments.python_version,
         r_version=arguments.r_version,
         disable_cluster_pause=arguments.disable_cluster_pause,
         paused_cluster_timeout_mins=arguments.paused_cluster_timeout_mins,
         disable_autoscale_node_pause=arguments.
         disable_autoscale_node_pause,
         paused_autoscale_node_timeout_mins=arguments.
         paused_autoscale_node_timeout_mins,
         parent_cluster_id=arguments.parent_cluster_id,
         image_version=arguments.image_version)
     if Qubole.get_cloud_name() == "aws":
         # Need to move to aws cloud.
         self.set_composition(
             master_type=arguments.master_type,
             master_spot_block_duration=arguments.
             master_spot_block_duration,
             master_maximum_bid_price_percentage=arguments.
             master_maximum_bid_price_percentage,
             master_timeout_for_request=arguments.
             master_timeout_for_request,
             master_spot_fallback=arguments.master_spot_fallback,
             min_ondemand_percentage=arguments.min_ondemand_percentage,
             min_spot_block_percentage=arguments.min_spot_block_percentage,
             min_spot_block_duration=arguments.min_spot_block_duration,
             min_spot_percentage=arguments.min_spot_percentage,
             min_maximum_bid_price_percentage=arguments.
             min_maximum_bid_price_percentage,
             min_timeout_for_request=arguments.min_timeout_for_request,
             min_spot_allocation_strategy=arguments.
             min_spot_allocation_strategy,
             min_spot_fallback=arguments.min_spot_fallback,
             autoscaling_ondemand_percentage=arguments.
             autoscaling_ondemand_percentage,
             autoscaling_spot_block_percentage=arguments.
             autoscaling_spot_block_percentage,
             autoscaling_spot_percentage=arguments.
             autoscaling_spot_percentage,
             autoscaling_spot_block_duration=arguments.
             autoscaling_spot_block_duration,
             autoscaling_maximum_bid_price_percentage=arguments.
             autoscaling_maximum_bid_price_percentage,
             autoscaling_timeout_for_request=arguments.
             autoscaling_timeout_for_request,
             autoscaling_spot_allocation_strategy=arguments.
             autoscaling_spot_allocation_strategy,
             autoscaling_spot_fallback=arguments.autoscaling_spot_fallback,
             autoscaling_spot_block_fallback=arguments.
             autoscaling_spot_block_fallback)
     else:
         self.set_composition_from_cloud_using_parser(arguments)