def __init__(self, spark_service_spec):
     super(AwsEMR, self).__init__(spark_service_spec)
     # TODO(hildrum) use availability zone when appropriate
     if self.machine_type is None:
         self.machine_type = DEFAULT_MACHINE_TYPE
     self.cmd_prefix = util.AWS_PREFIX + ['emr']
     if FLAGS.zones:
         region = util.GetRegionFromZone(FLAGS.zones[0])
         self.cmd_prefix += ['--region', region]
示例#2
0
  def __init__(self, dpb_service_spec):
    super(AwsDpbEmr, self).__init__(dpb_service_spec)
    self.project = None
    self.cmd_prefix = util.AWS_PREFIX

    if FLAGS.zones:
      self.zone = FLAGS.zones[0]
      region = util.GetRegionFromZone(self.zone)
      self.cmd_prefix += ['--region', region]

    self.network = aws_network.AwsNetwork.GetNetwork(self)
    self.bucket_to_delete = None
示例#3
0
    def __init__(self, edw_service_spec):
        super(Redshift, self).__init__(edw_service_spec)
        # pkb setup attribute
        self.project = None
        self.cmd_prefix = list(util.AWS_PREFIX)
        if FLAGS.zones:
            self.zone = FLAGS.zones[0]
            self.region = util.GetRegionFromZone(self.zone)
        else:
            self.region = GetDefaultRegion()
        self.cmd_prefix += ['--region', self.region]

        # Redshift specific attribute (see if they can be set)
        self.cluster_subnet_group = None
        self.cluster_parameter_group = None
        self.arn = ''
示例#4
0
 def __init__(self, edw_service_spec):
   super(Redshift, self).__init__(edw_service_spec)
   self.project = None
   self.cmd_prefix = util.AWS_PREFIX
   if FLAGS.zones:
     self.zone = FLAGS.zones[0]
     self.region = util.GetRegionFromZone(self.zone)
     self.cmd_prefix += ['--region', self.region]
   self.arn = ''
   self.cluster_identifier = 'pkb-' + FLAGS.run_uri
   self.cluster_subnet_group = None
   self.cluster_parameter_group = None
   self.concurrency = FLAGS.edw_service_cluster_concurrency
   self.endpoint = ''
   self.db = ''
   self.user = ''
   self.password = ''
示例#5
0
    def __init__(self, spark_service_spec):
        super(AwsEMR, self).__init__(spark_service_spec)
        # TODO(hildrum) use availability zone when appropriate
        if self.machine_type is None:
            self.machine_type = DEFAULT_MACHINE_TYPE

        self.cmd_prefix = util.AWS_PREFIX

        if self.spec.zone:
            region = util.GetRegionFromZone(FLAGS.zones[0])
            self.cmd_prefix += ['--region', region]

        # Certain machine types require subnets.
        if (self.spec.static_cluster_id is None
                and self.machine_type[0:2] in NEEDS_SUBNET):
            self.network = aws_network.AwsNetwork.GetNetwork(self.spec)
        else:
            self.network = None
        self.bucket_to_delete = None
  def __init__(self, spark_service_spec):
    super(AwsEMR, self).__init__(spark_service_spec)
    # TODO(hildrum) use availability zone when appropriate
    worker_machine_type = self.spec.worker_group.vm_spec.machine_type
    leader_machine_type = self.spec.master_group.vm_spec.machine_type
    self.cmd_prefix = list(util.AWS_PREFIX)

    if self.zone:
      region = util.GetRegionFromZone(self.zone)
      self.cmd_prefix += ['--region', region]

    # Certain machine types require subnets.
    if (self.spec.static_cluster_id is None and
        (worker_machine_type[0:2] in NEEDS_SUBNET or
         leader_machine_type[0:2] in NEEDS_SUBNET)):
      # GetNetwork is supposed to take a VM, but all it uses
      # from the VM is the zone attribute, which self has.
      self.network = aws_network.AwsNetwork.GetNetwork(self)
    else:
      self.network = None
    self.bucket_to_delete = None