コード例 #1
0
  def create_security_group(self, parameters, group):
    """Creates a new security group in AWS with the given name.

    Args:
      parameters: A dict that contains the credentials necessary to authenticate
        with AWS.
      group: A str that names the group that should be created.
    Raises:
      AgentRuntimeException: If the security group could not be created.
    """
    AppScaleLogger.log('Creating security group: {0}'.format(group))
    conn = self.open_connection(parameters)
    retries_left = self.SECURITY_GROUP_RETRY_COUNT
    while retries_left:
      try:
        conn.create_security_group(group, 'AppScale security group')
      except EC2ResponseError:
        pass
      try:
        conn.get_all_security_groups(group)
        return
      except EC2ResponseError:
        pass
      time.sleep(self.SLEEP_TIME)
      retries_left -= 1

    raise AgentRuntimeException("Couldn't create security group with " \
      "name {0}".format(group))
コード例 #2
0
  def get_optimal_spot_price(self, conn, instance_type, zone):
    """
    Returns the spot price for an EC2 instance of the specified instance type.
    The returned value is computed by averaging all the spot price history
    values returned by the back-end EC2 APIs and incrementing the average by
    extra 10%.

    Args:
      conn: A boto.EC2Connection that can be used to communicate with AWS.
      instance_type: A str representing the instance type whose prices we
        should speculate for.
      zone: A str representing the availability zone that the instance will
        be placed in.
    Returns:
      The estimated spot price for the specified instance type, in the
        specified availability zone.
    """
    end_time = datetime.datetime.now()
    start_time = end_time - datetime.timedelta(days=7)
    history = conn.get_spot_price_history(start_time=start_time.isoformat(),
      end_time=end_time.isoformat(), product_description='Linux/UNIX',
      instance_type=instance_type, availability_zone=zone)
    var_sum = 0.0
    for entry in history:
      var_sum += entry.price
    average = var_sum / len(history)
    bid_price = average * 1.10
    AppScaleLogger.log('The average spot instance price for a {0} machine is'\
        ' {1}, and 10% more is {2}'.format(instance_type, average, bid_price))
    return bid_price
コード例 #3
0
ファイル: ec2_agent.py プロジェクト: sjones4/appscale-tools
    def create_security_group(self, parameters, group):
        """Creates a new security group in AWS with the given name.

    Args:
      parameters: A dict that contains the credentials necessary to authenticate
        with AWS.
      group: A str that names the group that should be created.
    Returns:
      The 'boto.ec2.securitygroup.SecurityGroup' that was just created.
    Raises:
      AgentRuntimeException: If the security group could not be created.
    """
        AppScaleLogger.log('Creating security group: {0}'.format(group))
        conn = self.open_connection(parameters)
        specified_vpc = parameters.get(self.PARAM_VPC_ID)

        retries_left = self.SECURITY_GROUP_RETRY_COUNT
        while retries_left:
            try:
                conn.create_security_group(group, 'AppScale security group',
                                           specified_vpc)
            except EC2ResponseError:
                pass
            try:
                return self.get_security_group_by_name(conn, group,
                                                       specified_vpc)
            except SecurityGroupNotFoundException:
                pass
            time.sleep(self.SLEEP_TIME)
            retries_left -= 1

        raise AgentRuntimeException("Couldn't create security group with " \
          "name {0}".format(group))
コード例 #4
0
  def cleanup_state(self, parameters):
    """ Removes any remote state that was created to run AppScale instances
    during this deployment.
    Args:
      parameters: A dict that includes keys indicating the remote state
        that should be deleted.
    """
    subscription_id = parameters[self.PARAM_SUBSCRIBER_ID]
    resource_group = parameters[self.PARAM_RESOURCE_GROUP]
    credentials = self.open_connection(parameters)
    network_client = NetworkManagementClient(credentials, subscription_id)
    verbose = parameters[self.PARAM_VERBOSE]

    AppScaleLogger.log("Deleting the Virtual Network, Public IP Address "
      "and Network Interface created for this deployment.")
    network_interfaces = network_client.network_interfaces.list(resource_group)
    for interface in network_interfaces:
      result = network_client.network_interfaces.delete(resource_group, interface.name)
      resource_name = 'Network Interface' + ':' + interface.name
      self.sleep_until_delete_operation_done(result, resource_name,
                                             self.MAX_SLEEP_TIME, verbose)

    public_ip_addresses = network_client.public_ip_addresses.list(resource_group)
    for public_ip in public_ip_addresses:
      result = network_client.public_ip_addresses.delete(resource_group, public_ip.name)
      resource_name = 'Public IP Address' + ':' + public_ip.name
      self.sleep_until_delete_operation_done(result, resource_name,
                                             self.MAX_SLEEP_TIME, verbose)

    virtual_networks = network_client.virtual_networks.list(resource_group)
    for network in virtual_networks:
      result = network_client.virtual_networks.delete(resource_group, network.name)
      resource_name = 'Virtual Network' + ':' + network.name
      self.sleep_until_delete_operation_done(result, resource_name,
                                             self.MAX_SLEEP_TIME, verbose)
コード例 #5
0
  def update_dispatch(cls, source_location, keyname, project_id):
    """ Updates an application's dispatch routing rules from the configuration
      file.

    Args:
      options: A Namespace that has fields for each parameter that can be
        passed in via the command-line interface.
    """
    if cls.TAR_GZ_REGEX.search(source_location):
      fetch_function = utils.config_from_tar_gz
      version = Version.from_tar_gz(source_location)
    elif cls.ZIP_REGEX.search(source_location):
      fetch_function = utils.config_from_zip
      version = Version.from_zip(source_location)
    elif os.path.isdir(source_location):
      fetch_function = utils.config_from_dir
      version = Version.from_directory(source_location)
    elif source_location.endswith('.yaml'):
      fetch_function = utils.config_from_dir
      version = Version.from_yaml_file(source_location)
      source_location = os.path.dirname(source_location)
    else:
      raise BadConfigurationException(
        '{} must be a directory, tar.gz, or zip'.format(source_location))

    if project_id:
      version.project_id = project_id

    dispatch_rules = utils.dispatch_from_yaml(source_location, fetch_function)
    if dispatch_rules is None:
        return
    AppScaleLogger.log('Updating dispatch for {}'.format(version.project_id))

    load_balancer_ip = LocalState.get_host_with_role(keyname, 'load_balancer')
    secret_key = LocalState.get_secret_key(keyname)
    admin_client = AdminClient(load_balancer_ip, secret_key)
    operation_id = admin_client.update_dispatch(version.project_id, dispatch_rules)

    # Check on the operation.
    AppScaleLogger.log("Please wait for your dispatch to be updated.")

    deadline = time.time() + cls.MAX_OPERATION_TIME
    while True:
      if time.time() > deadline:
        raise AppScaleException('The operation took too long.')
      operation = admin_client.get_operation(version.project_id, operation_id)
      if not operation['done']:
        time.sleep(1)
        continue

      if 'error' in operation:
        raise AppScaleException(operation['error']['message'])
      dispatch_rules = operation['response']['dispatchRules']
      break

    AppScaleLogger.verbose(
        "The following dispatchRules have been applied to your application's "
        "configuration : {}".format(dispatch_rules))
    AppScaleLogger.success('Dispatch has been updated for {}'.format(
        version.project_id))
コード例 #6
0
def print_table(table_name, headers, data):
  """
  Prints a list of statistics with specified headers.

  Args:
    table_name: A string representing a name of table.
    headers: A list of statistic headers.
    data: A list of statistics.
  """
  table = tabulate(tabular_data=data, headers=headers, tablefmt='simple',
                   floatfmt=".1f", numalign="right", stralign="left")

  table_width = len(table.split("\n", 2)[1])
  left_signs = " " * ((table_width - len(table_name) - 2) / 2)
  right_signs = left_signs + (
    " " if (table_width - len(table_name)) % 2 == 1 else ""
  )
  result_table_name = (
    "{l_signs} {name} {r_signs}"
      .format(l_signs=left_signs, name=table_name, r_signs=right_signs)
  )

  title = styled(result_table_name, "bold", "blue", "reverse")
  AppScaleLogger.log(title)
  AppScaleLogger.log(table + "\n")
コード例 #7
0
ファイル: ec2_agent.py プロジェクト: AppScale/appscale-tools
  def create_security_group(self, parameters, group):
    """Creates a new security group in AWS with the given name.

    Args:
      parameters: A dict that contains the credentials necessary to authenticate
        with AWS.
      group: A str that names the group that should be created.
    Returns:
      The 'boto.ec2.securitygroup.SecurityGroup' that was just created.
    Raises:
      AgentRuntimeException: If the security group could not be created.
    """
    AppScaleLogger.log('Creating security group: {0}'.format(group))
    conn = self.open_connection(parameters)
    specified_vpc = parameters.get(self.PARAM_VPC_ID)

    retries_left = self.SECURITY_GROUP_RETRY_COUNT
    while retries_left:
      try:
        conn.create_security_group(group, 'AppScale security group',
                                   specified_vpc)
      except EC2ResponseError:
        pass
      try:
        return self.get_security_group_by_name(conn, group, specified_vpc)
      except SecurityGroupNotFoundException:
        pass
      time.sleep(self.SLEEP_TIME)
      retries_left -= 1

    raise AgentRuntimeException("Couldn't create security group with " \
      "name {0}".format(group))
コード例 #8
0
ファイル: ec2_agent.py プロジェクト: tmarballi/appscale-tools
  def attach_disk(self, parameters, disk_name, instance_id):
    """ Attaches the Elastic Block Store volume specified in 'disk_name' to this
    virtual machine.

    Args:
      parameters: A dict with keys for each parameter needed to connect to AWS.
      disk_name: A str naming the EBS mount to attach to this machine.
      instance_id: A str naming the id of the instance that the disk should be
        attached to. In practice, callers add disks to their own instances.
    Returns:
      The location on the local filesystem where the disk has been attached.
    """
    # In Amazon Web Services, if we're running on a Xen Paravirtualized machine,
    # then devices get added starting at /dev/xvda. If not, they get added at
    # /dev/sda. Find out which one we're on so that we know where the disk will
    # get attached to.
    if glob.glob("/dev/xvd*"):
      mount_point = '/dev/xvdc'
    else:
      mount_point = '/dev/sdc'

    conn = self.open_connection(parameters)

    try:
      AppScaleLogger.log('Attaching volume {0} to instance {1}, at {2}'.format(
        disk_name, instance_id, mount_point))
      conn.attach_volume(disk_name, instance_id, mount_point)
      return mount_point
    except EC2ResponseError as exception:
      if self.disk_attached(conn, disk_name, instance_id):
        return mount_point
      AppScaleLogger.log('An error occurred when trying to attach volume {0} '
        'to instance {1} at {2}'.format(disk_name, instance_id, mount_point))
      self.handle_failure('EC2 response error while attaching volume:' +
        exception.error_message)
コード例 #9
0
  def create_storage_account(self, parameters, storage_client):
    """ Creates a Storage Account under the Resource Group, if it does not
    already exist. In the case where no resource group is specified, a default
    storage account is created.
    Args:
      parameters: A dict, containing all the parameters necessary to authenticate
        this user with Azure.
      credentials: A ServicePrincipalCredentials instance, that can be used to access or
      create any resources.
    Raises:
      AgentConfigurationException: If there was a problem creating or accessing
        a storage account with the given subscription.
    """
    storage_account = parameters[self.PARAM_STORAGE_ACCOUNT]
    rg_name = parameters[self.PARAM_RESOURCE_GROUP]

    try:
      AppScaleLogger.log("Creating a new storage account '{0}' under the "
        "resource group '{1}'.".format(storage_account, rg_name))
      result = storage_client.storage_accounts.create(
        rg_name, storage_account,StorageAccountCreateParameters(
          sku=Sku(SkuName.standard_lrs), kind=Kind.storage,
          location=parameters[self.PARAM_ZONE]))
      # Result is a msrestazure.azure_operation.AzureOperationPoller instance.
      # wait() insures polling the underlying async operation until it's done.
      result.wait()
    except CloudError as error:
      raise AgentConfigurationException("Unable to create a storage account "
        "using the credentials provided: {}".format(error.message))
コード例 #10
0
    def does_user_exist(self, username, silent=False):
        """ Queries the AppController to see if the given user exists.

    Args:
      username: The email address registered as username for the user's application.
    Raises:
      AppControllerException if unable to check if user exists.
    """
        for _ in range(self.DEFAULT_NUM_RETRIES):
            try:
                user_exists = self.run_with_timeout(
                    self.DEFAULT_TIMEOUT, self.DEFAULT_NUM_RETRIES,
                    self.server.does_user_exist, username, self.secret)
                if user_exists == 'true':
                    return True
                elif user_exists == 'false':
                    return False
                else:
                    raise AppControllerException(
                        'Invalid return value: {}'.format(user_exists))
            except BadSecretException as exception:
                raise AppControllerException(
                    "Exception when checking if a user exists: {0}".format(
                        exception))
            except Exception as acc_error:
                if not silent:
                    AppScaleLogger.log(
                        "Exception when checking if a user exists: {0}".format(
                            acc_error))
                    AppScaleLogger.log("Backing off and trying again.")
                time.sleep(10)

        raise AppControllerException(
            'Exceeded retries when checking if user exists')
コード例 #11
0
 def _print_nodes_info(cls, nodes, invisible_nodes):
   """ Prints table with details about cluster nodes
   Args:
     nodes: a list of NodeStats
     invisible_nodes: a list of IPs of nodes which didn't report its stats
   """
   header = (
     "PUBLIC IP", "PRIVATE IP", "I/L*", "CPU%xCORES", "MEMORY%", "DISK%",
     "LOADAVG", "ROLES"
   )
   table = [
     (n.public_ip, n.private_ip,
      "{}/{}".format("+" if n.is_initialized else "-",
                     "+" if n.is_loaded else "-"),
      "{:.1f}x{}".format(n.cpu.load, n.cpu.count),
      100.0 - n.memory.available_percent,
      " ".join('"{}" => {:.1f}'.format(p.mountpoint, p.used_percent) for p in n.disk.partitions),
      "{:.1f} {:.1f} {:.1f}".format(
        n.loadavg.last_1_min, n.loadavg.last_5_min, n.loadavg.last_15_min),
      " ".join(n.roles))
     for n in nodes
   ]
   table += [("?", ip, "?", "?", "?", "?", "?", "?") for ip in invisible_nodes]
   table_str = tabulate(table, header, tablefmt="plain", floatfmt=".1f")
   AppScaleLogger.log(table_str)
   AppScaleLogger.log("* I/L means 'Is node Initialized'/'Is node Loaded'")
コード例 #12
0
    def create_security_group(self, parameters, group):
        """Creates a new security group in AWS with the given name.

    Args:
      parameters: A dict that contains the credentials necessary to authenticate
        with AWS.
      group: A str that names the group that should be created.
    Raises:
      AgentRuntimeException: If the security group could not be created.
    """
        AppScaleLogger.log('Creating security group: {0}'.format(group))
        conn = self.open_connection(parameters)
        retries_left = self.SECURITY_GROUP_RETRY_COUNT
        while retries_left:
            try:
                conn.create_security_group(group, 'AppScale security group')
            except EC2ResponseError:
                pass
            try:
                conn.get_all_security_groups(group)
                return
            except EC2ResponseError:
                pass
            time.sleep(self.SLEEP_TIME)
            retries_left -= 1

        raise AgentRuntimeException("Couldn't create security group with " \
          "name {0}".format(group))
コード例 #13
0
def print_table(table_name, headers, data):
  """
  Prints a list of statistics with specified headers.

  Args:
    table_name: A string representing a name of table.
    headers: A list of statistic headers.
    data: A list of statistics.
  """
  table = tabulate(tabular_data=data, headers=headers, tablefmt='simple',
                   floatfmt=".1f", numalign="right", stralign="left")

  table_width = len(table.split("\n", 2)[1])
  left_signs = " " * ((table_width - len(table_name) - 2) / 2)
  right_signs = left_signs + (
    " " if (table_width - len(table_name)) % 2 == 1 else ""
  )
  result_table_name = (
    "{l_signs} {name} {r_signs}"
      .format(l_signs=left_signs, name=table_name, r_signs=right_signs)
  )

  title = styled(result_table_name, "bold", "blue", "reverse")
  AppScaleLogger.log(title)
  AppScaleLogger.log(table + "\n")
コード例 #14
0
    def create_storage_account(self, parameters, storage_client):
        """ Creates a Storage Account under the Resource Group, if it does not
    already exist. In the case where no resource group is specified, a default
    storage account is created.
    Args:
      parameters: A dict, containing all the parameters necessary to authenticate
        this user with Azure.
      credentials: A ServicePrincipalCredentials instance, that can be used to access or
      create any resources.
    Raises:
      AgentConfigurationException: If there was a problem creating or accessing
        a storage account with the given subscription.
    """
        storage_account = parameters[self.PARAM_STORAGE_ACCOUNT]
        rg_name = parameters[self.PARAM_RESOURCE_GROUP]

        try:
            AppScaleLogger.log(
                "Creating a new storage account '{0}' under the "
                "resource group '{1}'.".format(storage_account, rg_name))
            result = storage_client.storage_accounts.create(
                rg_name, storage_account,
                StorageAccountCreateParameters(
                    sku=Sku(SkuName.standard_lrs),
                    kind=Kind.storage,
                    location=parameters[self.PARAM_ZONE]))
            # Result is a msrestazure.azure_operation.AzureOperationPoller instance.
            # wait() insures polling the underlying async operation until it's done.
            result.wait()
        except CloudError as error:
            raise AgentConfigurationException(
                "Unable to create a storage account "
                "using the credentials provided: {}".format(error.message))
コード例 #15
0
  def add_instances(cls, options):
    """Adds additional machines to an AppScale deployment.

    Args:
      options: A Namespace that has fields for each parameter that can be
        passed in via the command-line interface.
    """
    if 'master' in options.ips.keys():
      raise BadConfigurationException("Cannot add master nodes to an " + \
        "already running AppScale deployment.")

    # In virtualized cluster deployments, we need to make sure that the user
    # has already set up SSH keys.
    if LocalState.get_infrastructure_option(keyname=options.keyname,
                                            tag='infrastructure') == "xen":
      ips_to_check = []
      for ip_group in options.ips.values():
        ips_to_check.extend(ip_group)
      for ip in ips_to_check:
        # throws a ShellException if the SSH key doesn't work
        RemoteHelper.ssh(ip, options.keyname, "ls")

    # Finally, find an AppController and send it a message to add
    # the given nodes with the new roles.
    AppScaleLogger.log("Sending request to add instances")
    load_balancer_ip = LocalState.get_host_with_role(
      options.keyname, 'load_balancer')
    acc = AppControllerClient(load_balancer_ip, LocalState.get_secret_key(
      options.keyname))
    acc.start_roles_on_nodes(json.dumps(options.ips))

    # TODO(cgb): Should we wait for the new instances to come up and get
    # initialized?
    AppScaleLogger.success("Successfully sent request to add instances " + \
                           "to this AppScale deployment.")
コード例 #16
0
ファイル: ec2_agent.py プロジェクト: sjones4/appscale-tools
    def get_optimal_spot_price(self, conn, instance_type, zone):
        """
    Returns the spot price for an EC2 instance of the specified instance type.
    The returned value is computed by averaging all the spot price history
    values returned by the back-end EC2 APIs and incrementing the average by
    extra 10%.

    Args:
      conn: A boto.EC2Connection that can be used to communicate with AWS.
      instance_type: A str representing the instance type whose prices we
        should speculate for.
      zone: A str representing the availability zone that the instance will
        be placed in.
    Returns:
      The estimated spot price for the specified instance type, in the
        specified availability zone.
    """
        end_time = datetime.datetime.now()
        start_time = end_time - datetime.timedelta(days=7)
        history = conn.get_spot_price_history(
            start_time=start_time.isoformat(),
            end_time=end_time.isoformat(),
            product_description='Linux/UNIX',
            instance_type=instance_type,
            availability_zone=zone)
        var_sum = 0.0
        for entry in history:
            var_sum += entry.price
        average = var_sum / len(history)
        bid_price = average * 1.10
        AppScaleLogger.log('The average spot instance price for a {0} machine is'\
            ' {1}, and 10% more is {2}'.format(instance_type, average, bid_price))
        return bid_price
コード例 #17
0
 def sleep_until_delete_operation_done(self, result, resource_name,
                                       max_sleep, verbose):
     """ Sleeps until the delete operation for the resource is completed
 successfully.
 Args:
   result: An instance, of the AzureOperationPoller to poll for the status
     of the operation being performed.
   resource_name: The name of the resource being deleted.
   max_sleep: The maximum number of seconds to sleep for the resources to
     be deleted.
   verbose: A boolean indicating whether or not in verbose mode.
 """
     time_start = time.time()
     while not result.done():
         AppScaleLogger.verbose(
             "Waiting {0} second(s) for {1} to be deleted.".format(
                 self.SLEEP_TIME, resource_name), verbose)
         time.sleep(self.SLEEP_TIME)
         total_sleep_time = time.time() - time_start
         if total_sleep_time > max_sleep:
             AppScaleLogger.log(
                 "Waited {0} second(s) for {1} to be deleted. "
                 "Operation has timed out.".format(total_sleep_time,
                                                   resource_name))
             break
コード例 #18
0
  def does_user_exist(self, username, silent=False):
    """ Queries the AppController to see if the given user exists.

    Args:
      username: The email address registered as username for the user's application.
    """
    while True:
      try:
        user_exists = self.run_with_timeout(
          self.DEFAULT_TIMEOUT, self.DEFAULT_NUM_RETRIES,
          self.server.does_user_exist, username, self.secret)
        if user_exists == 'true':
          return True
        elif user_exists == 'false':
          return False
        else:
          raise Exception(user_exists)
      except BadSecretException as exception:
        raise AppControllerException(
          "Exception when checking if a user exists: {0}".format(exception))
      except Exception as acc_error:
        if not silent:
          AppScaleLogger.log("Exception when checking if a user exists: {0}".
                             format(acc_error))
          AppScaleLogger.log("Backing off and trying again.")
        time.sleep(10)
コード例 #19
0
    def configure_instance_security(self, parameters):
        """
    Setup EC2 security keys and groups. Required input values are read from
    the parameters dictionary. More specifically, this method expects to
    find a 'keyname' parameter and a 'group' parameter in the parameters
    dictionary. Using these provided values, this method will create a new
    EC2 key-pair and a security group. Security group will be granted permission
    to access any port on the instantiated VMs. (Also see documentation for the
    BaseAgent class)

    Args:
      parameters: A dictionary of parameters.
    """
        keyname = parameters[self.PARAM_KEYNAME]
        group = parameters[self.PARAM_GROUP]

        AppScaleLogger.log("Verifying that keyname {0}".format(keyname) + \
          " is not already registered.")
        conn = self.open_connection(parameters)
        if conn.get_key_pair(keyname):
            self.handle_failure("SSH keyname {0} is already registered. Please " \
              "change the 'keyname' specified in your AppScalefile to a different " \
              "value, or erase it to have one automatically generated for you." \
              .format(keyname))

        security_groups = conn.get_all_security_groups()
        for security_group in security_groups:
            if security_group.name == group:
                self.handle_failure("Security group {0} is already registered. Please" \
                  " change the 'group' specified in your AppScalefile to a different " \
                  "value, or erase it to have one automatically generated for you." \
                  .format(group))

        AppScaleLogger.log("Creating key pair: {0}".format(keyname))
        key_pair = conn.create_key_pair(keyname)
        ssh_key = '{0}{1}.key'.format(LocalState.LOCAL_APPSCALE_PATH, keyname)
        LocalState.write_key_file(ssh_key, key_pair.material)

        self.create_security_group(parameters, group)
        self.authorize_security_group(parameters,
                                      group,
                                      from_port=1,
                                      to_port=65535,
                                      ip_protocol='udp',
                                      cidr_ip='0.0.0.0/0')
        self.authorize_security_group(parameters,
                                      group,
                                      from_port=1,
                                      to_port=65535,
                                      ip_protocol='tcp',
                                      cidr_ip='0.0.0.0/0')
        self.authorize_security_group(parameters,
                                      group,
                                      from_port=-1,
                                      to_port=-1,
                                      ip_protocol='icmp',
                                      cidr_ip='0.0.0.0/0')
        return True
コード例 #20
0
    def create_resource_group(self, parameters, credentials):
        """ Creates a Resource Group for the application using the Service Principal
    Credentials, if it does not already exist. In the case where no resource
    group is specified, a default group is created.
    Args:
      parameters: A dict, containing all the parameters necessary to
        authenticate this user with Azure.
      credentials: A ServicePrincipalCredentials instance, that can be used to
        access or create any resources.
    Raises:
      AgentConfigurationException: If there was a problem creating or accessing
        a resource group with the given subscription.
    """
        subscription_id = parameters[self.PARAM_SUBSCRIBER_ID]
        resource_client = ResourceManagementClient(credentials,
                                                   subscription_id)
        rg_name = parameters[self.PARAM_RESOURCE_GROUP]

        tag_name = 'default-tag'
        if parameters[self.PARAM_TAG]:
            tag_name = parameters[self.PARAM_TAG]

        storage_client = StorageManagementClient(credentials, subscription_id)
        resource_client.providers.register(self.MICROSOFT_STORAGE_RESOURCE)
        try:
            # If the resource group does not already exist, create a new one with the
            # specified storage account.
            if not parameters[self.PARAM_EXISTING_RG]:
                AppScaleLogger.log(
                    "Creating a new resource group '{0}' with the tag "
                    "'{1}'.".format(rg_name, tag_name))
                resource_client.resource_groups.create_or_update(
                    rg_name,
                    ResourceGroup(location=parameters[self.PARAM_ZONE],
                                  tags={'tag': tag_name}))
                self.create_storage_account(parameters, storage_client)
            else:
                # If it already exists, check if the specified storage account exists
                # under it and if not, create a new account.
                storage_accounts = storage_client.storage_accounts.\
                  list_by_resource_group(rg_name)
                acct_names = []
                for account in storage_accounts:
                    acct_names.append(account.name)

                if parameters[self.PARAM_STORAGE_ACCOUNT] in acct_names:
                    AppScaleLogger.log(
                        "Storage account '{0}' under '{1}' resource group "
                        "already exists. So not creating it again.".format(
                            parameters[self.PARAM_STORAGE_ACCOUNT], rg_name))
                else:
                    self.create_storage_account(parameters, storage_client)
        except CloudError as error:
            raise AgentConfigurationException(
                "Unable to create a resource group "
                "using the credentials provided: {}".format(error.message))
コード例 #21
0
ファイル: ec2_agent.py プロジェクト: sjones4/appscale-tools
    def handle_failure(self, msg):
        """ Log the specified error message and raise an AgentRuntimeException

    Args:
      msg: An error message to be logged and included in the raised exception.
    Raises:
      AgentRuntimeException Contains the input error message.
    """
        AppScaleLogger.log(msg)
        raise AgentRuntimeException(msg)
コード例 #22
0
 def _print_roles_info(cls, nodes):
   """ Prints table with roles and number of nodes serving each specific role
   Args:
     nodes: a list of NodeStats
   """
   # Report number of nodes and roles running in the cluster
   roles_counter = Counter(chain(*[node.roles for node in nodes]))
   header = ("ROLE", "COUNT")
   table = roles_counter.iteritems()
   AppScaleLogger.log("\n" + tabulate(table, headers=header, tablefmt="plain"))
コード例 #23
0
  def handle_failure(self, msg):
    """ Log the specified error message and raise an AgentRuntimeException

    Args:
      msg: An error message to be logged and included in the raised exception.
    Raises:
      AgentRuntimeException Contains the input error message.
    """
    AppScaleLogger.log(msg)
    raise AgentRuntimeException(msg)
コード例 #24
0
ファイル: ec2_agent.py プロジェクト: tmarballi/appscale-tools
  def configure_instance_security(self, parameters):
    """
    Setup EC2 security keys and groups. Required input values are read from
    the parameters dictionary. More specifically, this method expects to
    find a 'keyname' parameter and a 'group' parameter in the parameters
    dictionary. Using these provided values, this method will create a new
    EC2 key-pair and a security group. Security group will be granted permission
    to access any port on the instantiated VMs. (Also see documentation for the
    BaseAgent class)

    Args:
      parameters: A dictionary of parameters.
    """
    keyname = parameters[self.PARAM_KEYNAME]
    group = parameters[self.PARAM_GROUP]
    is_autoscale = parameters['autoscale_agent']

    AppScaleLogger.log("Verifying that keyname {0}".format(keyname) + \
      " is not already registered.")
    conn = self.open_connection(parameters)

    # While creating instances during autoscaling, we do not need to create a
    # new keypair or a security group. We just make use of the existing one.
    if is_autoscale in ['True', True]:
      return

    if conn.get_key_pair(keyname):
      self.handle_failure("SSH keyname {0} is already registered. Please " \
        "change the 'keyname' specified in your AppScalefile to a different " \
        "value, or erase it to have one automatically generated for you." \
        .format(keyname))

    security_groups = conn.get_all_security_groups()
    for security_group in security_groups:
      if security_group.name == group:
        self.handle_failure("Security group {0} is already registered. Please" \
          " change the 'group' specified in your AppScalefile to a different " \
          "value, or erase it to have one automatically generated for you." \
          .format(group))

    AppScaleLogger.log("Creating key pair: {0}".format(keyname))
    key_pair = conn.create_key_pair(keyname)
    ssh_key = '{0}{1}.key'.format(LocalState.LOCAL_APPSCALE_PATH, keyname)
    LocalState.write_key_file(ssh_key, key_pair.material)

    self.create_security_group(parameters, group)
    self.authorize_security_group(parameters, group, from_port=1, to_port=65535,
      ip_protocol='udp', cidr_ip='0.0.0.0/0')
    self.authorize_security_group(parameters, group, from_port=1, to_port=65535,
      ip_protocol='tcp', cidr_ip='0.0.0.0/0')
    self.authorize_security_group(parameters, group, from_port=-1, to_port=-1,
      ip_protocol='icmp', cidr_ip='0.0.0.0/0')
    return True
コード例 #25
0
ファイル: gce_agent.py プロジェクト: AppScale/appscale-tools
  def configure_instance_security(self, parameters):
    """ Creates a GCE network and firewall with the specified name, and opens
    the ports on that firewall as needed for AppScale.

    We expect both the network and the firewall to not exist before this point,
    to avoid accidentally placing AppScale instances from different deployments
    in the same network and firewall (thus enabling them to see each other's web
    traffic).

    Args:
      parameters: A dict with keys for each parameter needed to connect to
        Google Compute Engine, and an additional key indicating the name of the
        network and firewall that we should create in GCE.
    Returns:
      True, if the named network and firewall was created successfully.
    Raises:
      AgentRuntimeException: If the named network or firewall already exist in
      GCE.
    """
    is_autoscale_agent = parameters.get(self.PARAM_AUTOSCALE_AGENT, False)

    # While creating instances during autoscaling, we do not need to create a
    # new keypair or a network. We just make use of the existing one.
    if is_autoscale_agent:
      return

    AppScaleLogger.log("Verifying that SSH key exists locally")
    keyname = parameters[self.PARAM_KEYNAME]
    private_key = LocalState.LOCAL_APPSCALE_PATH + keyname
    public_key = private_key + ".pub"

    if os.path.exists(private_key) or os.path.exists(public_key):
      raise AgentRuntimeException("SSH key already found locally - please " +
        "use a different keyname")

    LocalState.generate_rsa_key(keyname, parameters[self.PARAM_VERBOSE])

    ssh_key_exists, all_ssh_keys = self.does_ssh_key_exist(parameters)
    if not ssh_key_exists:
      self.create_ssh_key(parameters, all_ssh_keys)

    if self.does_network_exist(parameters):
      raise AgentRuntimeException("Network already exists - please use a " + \
        "different group name.")

    if self.does_firewall_exist(parameters):
      raise AgentRuntimeException("Firewall already exists - please use a " + \
        "different group name.")

    network_url = self.create_network(parameters)
    self.create_firewall(parameters, network_url)
コード例 #26
0
  def update_queues(cls, source_location, keyname, project_id):
    """ Updates a project's queues from the configuration file.

    Args:
      source_location: A string specifying the location of the source code.
      keyname: A string specifying the key name.
      project_id: A string specifying the project ID.
    """
    if cls.TAR_GZ_REGEX.search(source_location):
      fetch_function = utils.config_from_tar_gz
      version = Version.from_tar_gz(source_location)
    elif cls.ZIP_REGEX.search(source_location):
      fetch_function = utils.config_from_zip
      version = Version.from_zip(source_location)
    elif os.path.isdir(source_location):
      fetch_function = utils.config_from_dir
      version = Version.from_directory(source_location)
    elif source_location.endswith('.yaml'):
      fetch_function = utils.config_from_dir
      version = Version.from_yaml_file(source_location)
      source_location = os.path.dirname(source_location)
    else:
      raise BadConfigurationException(
        '{} must be a directory, tar.gz, or zip'.format(source_location))

    if project_id:
      version.project_id = project_id

    queue_config = fetch_function('queue.yaml', source_location)
    if queue_config is None:
      queue_config = fetch_function('queue.xml', source_location)
      # If the source does not have a queue configuration file, do nothing.
      if queue_config is None:
        return

      queues = utils.queues_from_xml(queue_config)
    else:
      queues = yaml.safe_load(queue_config)

    AppScaleLogger.log('Updating queues')

    for queue in queues.get('queue', []):
      if 'bucket_size' in queue or 'max_concurrent_requests' in queue:
        AppScaleLogger.warn('Queue configuration uses unsupported rate options'
                            ' (bucket size or max concurrent requests)')
        break

    load_balancer_ip = LocalState.get_host_with_role(keyname, 'load_balancer')
    secret_key = LocalState.get_secret_key(keyname)
    admin_client = AdminClient(load_balancer_ip, secret_key)
    admin_client.update_queues(version.project_id, queues)
コード例 #27
0
  def create_resource_group(self, parameters, credentials):
    """ Creates a Resource Group for the application using the Service Principal
    Credentials, if it does not already exist. In the case where no resource
    group is specified, a default group is created.
    Args:
      parameters: A dict, containing all the parameters necessary to
        authenticate this user with Azure.
      credentials: A ServicePrincipalCredentials instance, that can be used to
        access or create any resources.
    Raises:
      AgentConfigurationException: If there was a problem creating or accessing
        a resource group with the given subscription.
    """
    subscription_id = parameters[self.PARAM_SUBSCRIBER_ID]
    resource_client = ResourceManagementClient(credentials, subscription_id)
    rg_name = parameters[self.PARAM_RESOURCE_GROUP]

    tag_name = 'default-tag'
    if parameters[self.PARAM_TAG]:
      tag_name = parameters[self.PARAM_TAG]

    storage_client = StorageManagementClient(credentials, subscription_id)
    resource_client.providers.register(self.MICROSOFT_STORAGE_RESOURCE)
    try:
      # If the resource group does not already exist, create a new one with the
      # specified storage account.
      if not parameters[self.PARAM_EXISTING_RG]:
        AppScaleLogger.log("Creating a new resource group '{0}' with the tag "
          "'{1}'.".format(rg_name, tag_name))
        resource_client.resource_groups.create_or_update(
          rg_name, ResourceGroup(location=parameters[self.PARAM_ZONE],
                                 tags={'tag': tag_name}))
        self.create_storage_account(parameters, storage_client)
      else:
        # If it already exists, check if the specified storage account exists
        # under it and if not, create a new account.
        storage_accounts = storage_client.storage_accounts.\
          list_by_resource_group(rg_name)
        acct_names = []
        for account in storage_accounts:
          acct_names.append(account.name)

        if parameters[self.PARAM_STORAGE_ACCOUNT] in acct_names:
            AppScaleLogger.log("Storage account '{0}' under '{1}' resource group "
              "already exists. So not creating it again.".format(
              parameters[self.PARAM_STORAGE_ACCOUNT], rg_name))
        else:
          self.create_storage_account(parameters, storage_client)
    except CloudError as error:
      raise AgentConfigurationException("Unable to create a resource group "
        "using the credentials provided: {}".format(error.message))
コード例 #28
0
ファイル: ec2_agent.py プロジェクト: sjones4/appscale-tools
    def authorize_security_group(self, parameters, group_id, from_port,
                                 to_port, ip_protocol, cidr_ip):
        """Opens up traffic on the given port range for traffic of the named type.

    Args:
      parameters: A dict that contains the credentials necessary to authenticate
        with AWS.
      group_id: A str that contains the id of the group whose ports should be
        opened.
      from_port: An int that names the first port that access should be allowed
        on.
      to_port: An int that names the last port that access should be allowed on.
      ip_protocol: A str that indicates if TCP, UDP, or ICMP traffic should be
        allowed.
      cidr_ip: A str that names the IP range that traffic should be allowed
        from.
    Raises:
      AgentRuntimeException: If the ports could not be opened on the security
      group.
    """
        AppScaleLogger.log('Authorizing security group {0} for {1} traffic from ' \
          'port {2} to port {3}'.format(group_id, ip_protocol, from_port, to_port))
        conn = self.open_connection(parameters)
        retries_left = self.SECURITY_GROUP_RETRY_COUNT
        while retries_left:
            try:
                conn.authorize_security_group(group_id=group_id,
                                              from_port=from_port,
                                              to_port=to_port,
                                              cidr_ip=cidr_ip,
                                              ip_protocol=ip_protocol)
            except EC2ResponseError:
                pass
            try:
                group_info = self.get_security_group_by_name(
                    conn, parameters[self.PARAM_GROUP],
                    parameters.get(self.PARAM_VPC_ID))
                for rule in group_info.rules:
                    if int(rule.from_port) == from_port and int(rule.to_port) == to_port \
                      and rule.ip_protocol == ip_protocol:
                        return
            except SecurityGroupNotFoundException as e:
                raise AgentRuntimeException(e.message)
            time.sleep(self.SLEEP_TIME)
            retries_left -= 1

        raise AgentRuntimeException("Couldn't authorize {0} traffic from port " \
          "{1} to port {2} on CIDR IP {3}".format(ip_protocol, from_port, to_port,
          cidr_ip))
コード例 #29
0
ファイル: gce_agent.py プロジェクト: AppScale/appscale-tools
  def attach_disk(self, parameters, disk_name, instance_id):
    """ Attaches the persistent disk specified in 'disk_name' to this virtual
    machine.
    Args:
      parameters: A dict with keys for each parameter needed to connect to
        Google Compute Engine.
      disk_name: A str naming the persistent disk to attach to this machine.
      instance_id: A str naming the id of the instance that the disk should be
        attached to. In practice, callers add disks to their own instance.
    Returns:
      A str indicating where the persistent disk has been attached to.
    """
    gce_service, credentials = self.open_connection(parameters)
    http = httplib2.Http()
    auth_http = credentials.authorize(http)
    project = parameters[self.PARAM_PROJECT]
    zone = parameters[self.PARAM_ZONE]

    # If the disk is already attached, return the mount point.
    request = gce_service.instances().get(project=project, zone=zone,
                                          instance=instance_id)
    disks = request.execute(auth_http)['disks']
    for disk in disks:
      path = disk['source'].split('/')
      if project == path[-5] and zone == path[-3] and disk_name == path[-1]:
        device_name = '/dev/{}'.format(disk['deviceName'])
        AppScaleLogger.log('Disk is already attached at {}'.format(device_name))
        return device_name

    request = gce_service.instances().attachDisk(
      project=project,
      zone=zone,
      instance=instance_id,
      body={
        'kind': 'compute#attachedDisk',
        'type': 'PERSISTENT',
        'mode': 'READ_WRITE',
        'source': "https://www.googleapis.com/compute/{0}/projects/{1}" \
                  "/zones/{2}/disks/{3}".format(self.API_VERSION, project,
                                                zone, disk_name),
        'deviceName': 'sdb'
      }
    )
    response = request.execute(auth_http)
    AppScaleLogger.log(str(response))
    self.ensure_operation_succeeds(gce_service, auth_http, response,
                                   parameters[self.PARAM_PROJECT])

    return '/dev/sdb'
コード例 #30
0
    def warn_if_version_defined(cls, version, test=False):
        """ Warns the user if version is defined in the application configuration.

    Args:
      version: A Version object.
      test: A boolean indicating that the tools are in test mode.
    Raises:
      AppScaleException: If version is defined and user decides to cancel.
    """
        if version.id is not None:
            AppScaleLogger.log(
                'The version element is not supported in {}. Module {} will be '
                'overwritten.'.format(version.config_type, version.service_id))
            if not test:
                response = raw_input('Continue? (y/N) ')
                if response.lower() not in ['y', 'yes']:
                    raise AppScaleException('Cancelled deploy operation')
コード例 #31
0
  def warn_if_version_defined(cls, version, test=False):
    """ Warns the user if version is defined in the application configuration.

    Args:
      version: A Version object.
      test: A boolean indicating that the tools are in test mode.
    Raises:
      AppScaleException: If version is defined and user decides to cancel.
    """
    if version.id is not None:
      AppScaleLogger.log(
        'The version element is not supported in {}. Module {} will be '
        'overwritten.'.format(version.configuration_type, version.service_id))
      if not test:
        response = raw_input('Continue? (y/N) ')
        if response.lower() not in ['y', 'yes']:
          raise AppScaleException('Cancelled deploy operation')
コード例 #32
0
  def does_image_exist(self, parameters):
    """ Queries Amazon EC2 to see if the specified image exists.

    Args:
      parameters: A dict that contains the machine ID to check for existence.
    Returns:
      True if the machine ID exists, False otherwise.
    """
    try:
      conn = self.open_connection(parameters)
      image_id = parameters[self.PARAM_IMAGE_ID]
      conn.get_image(image_id)
      AppScaleLogger.log('Machine image {0} does exist'.format(image_id))
      return True
    except boto.exception.EC2ResponseError:
      AppScaleLogger.log('Machine image {0} does not exist'.format(image_id))
      return False
コード例 #33
0
ファイル: ec2_agent.py プロジェクト: AppScale/appscale-tools
  def authorize_security_group(self, parameters, group_id, from_port,
                               to_port, ip_protocol, cidr_ip):
    """Opens up traffic on the given port range for traffic of the named type.

    Args:
      parameters: A dict that contains the credentials necessary to authenticate
        with AWS.
      group_id: A str that contains the id of the group whose ports should be
        opened.
      from_port: An int that names the first port that access should be allowed
        on.
      to_port: An int that names the last port that access should be allowed on.
      ip_protocol: A str that indicates if TCP, UDP, or ICMP traffic should be
        allowed.
      cidr_ip: A str that names the IP range that traffic should be allowed
        from.
    Raises:
      AgentRuntimeException: If the ports could not be opened on the security
      group.
    """
    AppScaleLogger.log('Authorizing security group {0} for {1} traffic from ' \
      'port {2} to port {3}'.format(group_id, ip_protocol, from_port, to_port))
    conn = self.open_connection(parameters)
    retries_left = self.SECURITY_GROUP_RETRY_COUNT
    while retries_left:
      try:
        conn.authorize_security_group(group_id=group_id, from_port=from_port,
                                      to_port=to_port, cidr_ip=cidr_ip,
                                      ip_protocol=ip_protocol)
      except EC2ResponseError:
        pass
      try:
        group_info = self.get_security_group_by_name(
            conn, parameters[self.PARAM_GROUP], parameters.get(self.PARAM_VPC_ID))
        for rule in group_info.rules:
          if int(rule.from_port) == from_port and int(rule.to_port) == to_port \
            and rule.ip_protocol == ip_protocol:
            return
      except SecurityGroupNotFoundException as e:
        raise AgentRuntimeException(e.message)
      time.sleep(self.SLEEP_TIME)
      retries_left -= 1

    raise AgentRuntimeException("Couldn't authorize {0} traffic from port " \
      "{1} to port {2} on CIDR IP {3}".format(ip_protocol, from_port, to_port,
      cidr_ip))
コード例 #34
0
ファイル: ec2_agent.py プロジェクト: sjones4/appscale-tools
    def does_image_exist(self, parameters):
        """ Queries Amazon EC2 to see if the specified image exists.

    Args:
      parameters: A dict that contains the machine ID to check for existence.
    Returns:
      True if the machine ID exists, False otherwise.
    """
        image_id = parameters[self.PARAM_IMAGE_ID]
        try:
            conn = self.open_connection(parameters)
            conn.get_image(image_id)
            AppScaleLogger.log('Machine image {0} does exist'.format(image_id))
            return True
        except boto.exception.EC2ResponseError:
            AppScaleLogger.log(
                'Machine image {0} does not exist'.format(image_id))
            return False
コード例 #35
0
 def sleep_until_update_operation_done(self, result, resource_name, verbose):
   """ Sleeps until the create/update operation for the resource is completed
     successfully.
     Args:
       result: An instance, of the AzureOperationPoller to poll for the status
         of the operation being performed.
       resource_name: The name of the resource being updated.
   """
   time_start = time.time()
   while not result.done():
     AppScaleLogger.verbose("Waiting {0} second(s) for {1} to be created/updated.".
                            format(self.SLEEP_TIME, resource_name), verbose)
     time.sleep(self.SLEEP_TIME)
     total_sleep_time = time.time() - time_start
     if total_sleep_time > self.MAX_SLEEP_TIME:
       AppScaleLogger.log("Waited {0} second(s) for {1} to be created/updated. "
         "Operation has timed out.".format(total_sleep_time, resource_name))
       break
コード例 #36
0
  def does_zone_exist(self, parameters):
    """ Queries Amazon EC2 to see if the specified availability zone exists.

    Args:
      parameters: A dict that contains the availability zone to check for
        existence.
    Returns:
      True if the availability zone exists, and False otherwise.
    """
    try:
      conn = self.open_connection(parameters)
      zone = parameters[self.PARAM_ZONE]
      conn.get_all_zones(zone)
      AppScaleLogger.log('Availability zone {0} does exist'.format(zone))
      return True
    except boto.exception.EC2ResponseError:
      AppScaleLogger.log('Availability zone {0} does not exist'.format(zone))
      return False
コード例 #37
0
  def does_disk_exist(self, parameters, disk_name):
    """ Queries Amazon EC2 to see if the specified EBS volume exists.

    Args:
      parameters: A dict that contains the credentials needed to authenticate
        with AWS.
      disk_name: A str naming the EBS volume to check for existence.
    Returns:
      True if the named EBS volume exists, and False otherwise.
    """
    conn = self.open_connection(parameters)
    try:
      conn.get_all_volumes([disk_name])
      AppScaleLogger.log('EBS volume {0} does exist'.format(disk_name))
      return True
    except boto.exception.EC2ResponseError:
      AppScaleLogger.log('EBS volume {0} does not exist'.format(disk_name))
      return False
コード例 #38
0
    def cleanup_state(self, parameters):
        """ Removes any remote state that was created to run AppScale instances
    during this deployment.
    Args:
      parameters: A dict that includes keys indicating the remote state
        that should be deleted.
    """
        subscription_id = parameters[self.PARAM_SUBSCRIBER_ID]
        resource_group = parameters[self.PARAM_RESOURCE_GROUP]
        credentials = self.open_connection(parameters)
        network_client = NetworkManagementClient(credentials, subscription_id)
        verbose = parameters[self.PARAM_VERBOSE]

        AppScaleLogger.log(
            "Deleting the Virtual Network, Public IP Address "
            "and Network Interface created for this deployment.")
        network_interfaces = network_client.network_interfaces.list(
            resource_group)
        for interface in network_interfaces:
            result = network_client.network_interfaces.delete(
                resource_group, interface.name)
            resource_name = 'Network Interface' + ':' + interface.name
            self.sleep_until_delete_operation_done(result, resource_name,
                                                   self.MAX_SLEEP_TIME,
                                                   verbose)

        public_ip_addresses = network_client.public_ip_addresses.list(
            resource_group)
        for public_ip in public_ip_addresses:
            result = network_client.public_ip_addresses.delete(
                resource_group, public_ip.name)
            resource_name = 'Public IP Address' + ':' + public_ip.name
            self.sleep_until_delete_operation_done(result, resource_name,
                                                   self.MAX_SLEEP_TIME,
                                                   verbose)

        virtual_networks = network_client.virtual_networks.list(resource_group)
        for network in virtual_networks:
            result = network_client.virtual_networks.delete(
                resource_group, network.name)
            resource_name = 'Virtual Network' + ':' + network.name
            self.sleep_until_delete_operation_done(result, resource_name,
                                                   self.MAX_SLEEP_TIME,
                                                   verbose)
コード例 #39
0
  def configure_instance_security(self, parameters):
    """ Creates a GCE network and firewall with the specified name, and opens
    the ports on that firewall as needed for AppScale.

    We expect both the network and the firewall to not exist before this point,
    to avoid accidentally placing AppScale instances from different deployments
    in the same network and firewall (thus enabling them to see each other's web
    traffic).

    Args:
      parameters: A dict with keys for each parameter needed to connect to
        Google Compute Engine, and an additional key indicating the name of the
        network and firewall that we should create in GCE.
    Returns:
      True, if the named network and firewall was created successfully.
    Raises:
      AgentRuntimeException: If the named network or firewall already exist in
      GCE.
    """
    AppScaleLogger.log("Verifying that SSH key exists locally")
    keyname = parameters[self.PARAM_KEYNAME]
    private_key = LocalState.LOCAL_APPSCALE_PATH + keyname
    public_key = private_key + ".pub"

    if os.path.exists(private_key) or os.path.exists(public_key):
      raise AgentRuntimeException("SSH key already found locally - please " +
        "use a different keyname")

    LocalState.generate_rsa_key(keyname, parameters[self.PARAM_VERBOSE])

    ssh_key_exists, all_ssh_keys = self.does_ssh_key_exist(parameters)
    if not ssh_key_exists:
      self.create_ssh_key(parameters, all_ssh_keys)

    if self.does_network_exist(parameters):
      raise AgentRuntimeException("Network already exists - please use a " + \
        "different group name.")

    if self.does_firewall_exist(parameters):
      raise AgentRuntimeException("Firewall already exists - please use a " + \
        "different group name.")

    network_url = self.create_network(parameters)
    self.create_firewall(parameters, network_url)
コード例 #40
0
  def update_cron(cls, source_location, keyname, project_id):
    """ Updates a project's cron jobs from the configuration file.

    Args:
      source_location: A string specifying the location of the source code.
      keyname: A string specifying the key name.
      project_id: A string specifying the project ID.
    """
    if cls.TAR_GZ_REGEX.search(source_location):
      fetch_function = utils.config_from_tar_gz
      version = Version.from_tar_gz(source_location)
    elif cls.ZIP_REGEX.search(source_location):
      fetch_function = utils.config_from_zip
      version = Version.from_zip(source_location)
    elif os.path.isdir(source_location):
      fetch_function = utils.config_from_dir
      version = Version.from_directory(source_location)
    elif source_location.endswith('.yaml'):
      fetch_function = utils.config_from_dir
      version = Version.from_yaml_file(source_location)
      source_location = os.path.dirname(source_location)
    else:
      raise BadConfigurationException(
        '{} must be a directory, tar.gz, or zip'.format(source_location))

    if project_id:
      version.project_id = project_id

    cron_config = fetch_function('cron.yaml', source_location)
    if cron_config is None:
      cron_config = fetch_function('cron.xml', source_location)
      # If the source does not have a cron configuration file, do nothing.
      if cron_config is None:
        return

      cron_jobs = utils.cron_from_xml(cron_config)
    else:
      cron_jobs = yaml.safe_load(cron_config)

    AppScaleLogger.log('Updating cron jobs')
    load_balancer_ip = LocalState.get_host_with_role(keyname, 'load_balancer')
    secret_key = LocalState.get_secret_key(keyname)
    admin_client = AdminClient(load_balancer_ip, secret_key)
    admin_client.update_cron(version.project_id, cron_jobs)
コード例 #41
0
  def detach_disk(self, parameters, disk_name, instance_id):
    """ Detaches the EBS mount specified in disk_name from the named instance.

    Args:
      parameters: A dict with keys for each parameter needed to connect to AWS.
      disk_name: A str naming the EBS volume to detach.
      instance_id: A str naming the id of the instance that the disk should be
        detached from.
    Returns:
      True if the disk was detached, and False otherwise.
    """
    conn = self.open_connection(parameters)
    try:
      conn.detach_volume(disk_name, instance_id, device='/dev/sdc')
      return True
    except boto.exception.EC2ResponseError:
      AppScaleLogger.log("Could not detach volume with name {0}".format(
        disk_name))
      return False
コード例 #42
0
    def configure_instance_security(self, parameters):
        """ Configure the resource group and storage account needed to create the
    network interface for the VMs to be spawned. This method is called before
    starting virtual machines.
    Args:
      parameters: A dict containing values necessary to authenticate with the
        underlying cloud.
    Returns:
      True, if the group and account were created successfully.
      False, otherwise.
    Raises:
      AgentRuntimeException: If security features could not be successfully
        configured in the underlying cloud.
    """
        credentials = self.open_connection(parameters)
        resource_group = parameters[self.PARAM_RESOURCE_GROUP]
        storage_account = parameters[self.PARAM_STORAGE_ACCOUNT]
        zone = parameters[self.PARAM_ZONE]
        subscription_id = parameters[self.PARAM_SUBSCRIBER_ID]

        AppScaleLogger.log("Verifying that SSH key exists locally.")
        keyname = parameters[self.PARAM_KEYNAME]
        private_key = LocalState.LOCAL_APPSCALE_PATH + keyname
        public_key = private_key + ".pub"

        if os.path.exists(private_key) or os.path.exists(public_key):
            raise AgentRuntimeException(
                "SSH key already found locally - please "
                "use a different keyname.")

        LocalState.generate_rsa_key(keyname, parameters[self.PARAM_VERBOSE])

        AppScaleLogger.log("Configuring network for machine/s under "
                           "resource group '{0}' with storage account '{1}' "
                           "in zone '{2}'".format(resource_group,
                                                  storage_account, zone))
        # Create a resource group and an associated storage account to access resources.
        self.create_resource_group(parameters, credentials)

        resource_client = ResourceManagementClient(credentials,
                                                   subscription_id)
        resource_client.providers.register(self.MICROSOFT_COMPUTE_RESOURCE)
        resource_client.providers.register(self.MICROSOFT_NETWORK_RESOURCE)
コード例 #43
0
ファイル: ec2_agent.py プロジェクト: sjones4/appscale-tools
    def does_zone_exist(self, parameters):
        """ Queries Amazon EC2 to see if the specified availability zone exists.

    Args:
      parameters: A dict that contains the availability zone to check for
        existence.
    Returns:
      True if the availability zone exists, and False otherwise.
    """
        zone = parameters[self.PARAM_ZONE]
        try:
            conn = self.open_connection(parameters)
            conn.get_all_zones(zone)
            AppScaleLogger.log('Availability zone {0} does exist'.format(zone))
            return True
        except boto.exception.EC2ResponseError:
            AppScaleLogger.log(
                'Availability zone {0} does not exist'.format(zone))
            return False
コード例 #44
0
  def does_address_exist(self, parameters):
    """ Queries Amazon EC2 to see if the specified Elastic IP address has been
    allocated with the given credentials.

    Args:
      parameters: A dict that contains the Elastic IP to check for existence.
    Returns:
      True if the given Elastic IP has been allocated, and False otherwise.
    """
    try:
      conn = self.open_connection(parameters)
      elastic_ip = parameters[self.PARAM_STATIC_IP]
      conn.get_all_addresses(elastic_ip)
      AppScaleLogger.log('Elastic IP {0} can be used for this AppScale ' \
        'deployment.'.format(elastic_ip))
      return True
    except boto.exception.EC2ResponseError:
      AppScaleLogger.log('Elastic IP {0} does not exist.'.format(elastic_ip))
      return False
コード例 #45
0
ファイル: ec2_agent.py プロジェクト: sjones4/appscale-tools
    def detach_disk(self, parameters, disk_name, instance_id):
        """ Detaches the EBS mount specified in disk_name from the named instance.

    Args:
      parameters: A dict with keys for each parameter needed to connect to AWS.
      disk_name: A str naming the EBS volume to detach.
      instance_id: A str naming the id of the instance that the disk should be
        detached from.
    Returns:
      True if the disk was detached, and False otherwise.
    """
        conn = self.open_connection(parameters)
        try:
            conn.detach_volume(disk_name, instance_id)
            return True
        except boto.exception.EC2ResponseError:
            AppScaleLogger.log(
                "Could not detach volume with name {0}".format(disk_name))
            return False
コード例 #46
0
ファイル: ec2_agent.py プロジェクト: sjones4/appscale-tools
    def does_disk_exist(self, parameters, disk_name):
        """ Queries Amazon EC2 to see if the specified EBS volume exists.

    Args:
      parameters: A dict that contains the credentials needed to authenticate
        with AWS.
      disk_name: A str naming the EBS volume to check for existence.
    Returns:
      True if the named EBS volume exists, and False otherwise.
    """
        conn = self.open_connection(parameters)
        try:
            conn.get_all_volumes([disk_name])
            AppScaleLogger.log('EBS volume {0} does exist'.format(disk_name))
            return True
        except boto.exception.EC2ResponseError:
            AppScaleLogger.log(
                'EBS volume {0} does not exist'.format(disk_name))
            return False
コード例 #47
0
    def cleanup_state(self, parameters):
        """ Removes the keyname and security group created during this AppScale
    deployment.

    Args:
      parameters: A dict that contains the keyname and security group to delete.
    """
        AppScaleLogger.log("Deleting keyname {0}".format(
            parameters[self.PARAM_KEYNAME]))
        conn = self.open_connection(parameters)
        conn.delete_key_pair(parameters[self.PARAM_KEYNAME])

        AppScaleLogger.log("Deleting security group {0}".format(
            parameters[self.PARAM_GROUP]))
        while True:
            try:
                conn.delete_security_group(parameters[self.PARAM_GROUP])
                return
            except EC2ResponseError:
                time.sleep(5)
コード例 #48
0
ファイル: ec2_agent.py プロジェクト: sjones4/appscale-tools
    def does_address_exist(self, parameters):
        """ Queries Amazon EC2 to see if the specified Elastic IP address has been
    allocated with the given credentials.

    Args:
      parameters: A dict that contains the Elastic IP to check for existence.
    Returns:
      True if the given Elastic IP has been allocated, and False otherwise.
    """
        elastic_ip = parameters[self.PARAM_STATIC_IP]
        try:
            conn = self.open_connection(parameters)
            conn.get_all_addresses(elastic_ip)
            AppScaleLogger.log('Elastic IP {0} can be used for this AppScale ' \
              'deployment.'.format(elastic_ip))
            return True
        except boto.exception.EC2ResponseError:
            AppScaleLogger.log(
                'Elastic IP {0} does not exist.'.format(elastic_ip))
            return False
コード例 #49
0
  def cleanup_state(self, parameters):
    """ Removes the keyname and security group created during this AppScale
    deployment.

    Args:
      parameters: A dict that contains the keyname and security group to delete.
    """
    AppScaleLogger.log("Deleting keyname {0}".format(
      parameters[self.PARAM_KEYNAME]))
    conn = self.open_connection(parameters)
    conn.delete_key_pair(parameters[self.PARAM_KEYNAME])

    AppScaleLogger.log("Deleting security group {0}".format(
      parameters[self.PARAM_GROUP]))
    while True:
      try:
        conn.delete_security_group(parameters[self.PARAM_GROUP])
        return
      except EC2ResponseError:
        time.sleep(5)
コード例 #50
0
ファイル: euca_agent.py プロジェクト: Sunnepah/appscale-tools
  def does_zone_exist(self, parameters):
    """
    Queries Eucalyptus to see if the specified availability zone exists.

    Args:
      parameters: A dict that contains the zone to check for existence.
    Returns:
      True if the availability zone exists, False otherwise.
    """
    # Note that we can't use does_zone_exist in EC2Agent. There, if the image
    # doesn't exist, it throws an EC2ResponseError, but in Eucalyptus, it
    # doesn't (and returns None instead).
    conn = self.open_connection(parameters)
    zone = parameters[self.PARAM_ZONE]
    if conn.get_all_zones(zone):
      AppScaleLogger.log('Availability zone {0} does exist'.format(zone))
      return True
    else:
      AppScaleLogger.log('Availability zone {0} does not exist'.format(zone))
      return False
コード例 #51
0
  def _print_services(cls, services):
    """ Prints information about deployed services.

    Args:
      services: A list ServiceInfo objects.
    """
    header = (
      "PROJECT ID", "SERVICE ID", "HTTP/HTTPS", "APPSERVERS/PENDING",
      "REQS. ENQUEUED/TOTAL", "STATE"
    )
    table = (
      (service.project_id, service.service_id,
       "{}/{}".format(service.http, service.https),
       "{}/{}".format(service.appservers, service.pending_appservers),
       "{}/{}".format(service.reqs_enqueued, service.total_reqs),
       ("Ready" if service.appservers > 0 else
        "Starting" if service.pending_appservers > 0 else "Stopped"))
      for service in services
    )
    AppScaleLogger.log("\n" + tabulate(table, headers=header, tablefmt="plain"))
コード例 #52
0
  def does_zone_exist(self, parameters):
    """
    Queries Eucalyptus to see if the specified availability zone exists.

    Args:
      parameters: A dict that contains the zone to check for existence.
    Returns:
      True if the availability zone exists, False otherwise.
    """
    # Note that we can't use does_zone_exist in EC2Agent. There, if the image
    # doesn't exist, it throws an EC2ResponseError, but in Eucalyptus, it
    # doesn't (and returns None instead).
    conn = self.open_connection(parameters)
    zone = parameters[self.PARAM_ZONE]
    if conn.get_all_zones(zone):
      AppScaleLogger.log('Availability zone {0} does exist'.format(zone))
      return True
    else:
      AppScaleLogger.log('Availability zone {0} does not exist'.format(zone))
      return False
コード例 #53
0
  def set_admin_role(self, username, is_cloud_admin, capabilities):
    """ Grants the given user the ability to perform any administrative action.

    Args:
      username: The e-mail address that should be given administrative
        authorizations.
    Raises:
      AppControllerException if unable to set admin role.
    """
    AppScaleLogger.log('Granting admin privileges to %s' % username)
    try:
      result = self.run_with_timeout(
        self.DEFAULT_TIMEOUT, self.DEFAULT_NUM_RETRIES,
        self.server.set_admin_role, username, is_cloud_admin, capabilities,
        self.secret)
    except TimeoutException:
      raise AppControllerException('Timeout when making AppController call')

    if result != 'true':
      raise AppControllerException(
        'Unable to set admin role: {}'.format(result))
コード例 #54
0
  def stop_instances(self, parameters):
    """
    Stop one of more EC2 instances. The input instance IDs are
    fetched from the 'instance_ids' parameters in the input map. (Also
    see documentation for the BaseAgent class)

    Args:
      parameters: A dictionary of parameters.
    """
    instance_ids = parameters[self.PARAM_INSTANCE_IDS]
    conn = self.open_connection(parameters)
    conn.stop_instances(instance_ids)
    AppScaleLogger.log('Stopping instances: '+' '.join(instance_ids))
    if not self.wait_for_status_change(parameters, conn, 'stopped',
           max_wait_time=120):
      AppScaleLogger.log("re-stopping instances: "+' '.join(instance_ids))
      conn.stop_instances(instance_ids)
      if not self.wait_for_status_change(parameters, conn, 'stopped',
            max_wait_time=120):
        self.handle_failure("ERROR: could not stop instances: " + \
            ' '.join(instance_ids))
コード例 #55
0
  def set_admin_role(self, username, is_cloud_admin, capabilities):
    """ Grants the given user the ability to perform any administrative action.

    Args:
      username: The e-mail address that should be given administrative
        authorizations.
    Raises:
      AppControllerException if unable to set admin role.
    """
    AppScaleLogger.log('Granting admin privileges to %s' % username)
    try:
      result = self.run_with_timeout(
        self.DEFAULT_TIMEOUT, self.DEFAULT_NUM_RETRIES,
        self.server.set_admin_role, username, is_cloud_admin, capabilities,
        self.secret)
    except TimeoutException:
      raise AppControllerException('Timeout when making AppController call')

    if result != 'true':
      raise AppControllerException(
        'Unable to set admin role: {}'.format(result))
コード例 #56
0
ファイル: ec2_agent.py プロジェクト: AppScale/appscale-tools
  def cleanup_state(self, parameters):
    """ Removes the keyname and security group created during this AppScale
    deployment.

    Args:
      parameters: A dict that contains the keyname and security group to delete.
    """
    AppScaleLogger.log("Deleting keyname {0}".format(
      parameters[self.PARAM_KEYNAME]))
    conn = self.open_connection(parameters)
    conn.delete_key_pair(parameters[self.PARAM_KEYNAME])

    AppScaleLogger.log("Deleting security group {0}".format(
      parameters[self.PARAM_GROUP]))
    retries_left = self.SECURITY_GROUP_RETRY_COUNT
    while True:
      try:
        sg = self.get_security_group_by_name(conn, parameters[self.PARAM_GROUP],
                                             parameters.get(self.PARAM_VPC_ID))
        conn.delete_security_group(group_id=sg.id)
        return
      except EC2ResponseError as e:
        time.sleep(self.SLEEP_TIME)
        retries_left -= 1
        if retries_left == 0:
          raise AgentRuntimeException('Error deleting security group! Reason: '
                                      '{}'.format(e.message))
      except SecurityGroupNotFoundException:
        AppScaleLogger.log('Could not find security group {}, skipping '
                           'delete.'.format(parameters[self.PARAM_GROUP]))
        return
コード例 #57
0
  def terminate_instances(self, parameters):
    """
    Terminate one of more EC2 instances. The input instance IDs are
    fetched from the 'instance_ids' parameters in the input map. (Also
    see documentation for the BaseAgent class)

    Args:
      parameters: A dictionary of parameters.
    """
    instance_ids = parameters[self.PARAM_INSTANCE_IDS]
    conn = self.open_connection(parameters)
    conn.terminate_instances(instance_ids)
    AppScaleLogger.log('Terminating instances: ' + ' '.join(instance_ids))
    if not self.wait_for_status_change(parameters, conn, 'terminated',
            max_wait_time=120):
      AppScaleLogger.log("re-terminating instances: " + ' '.join(instance_ids))
      conn.terminate_instances(instance_ids)
      if not self.wait_for_status_change(parameters, conn, 'terminated',
                max_wait_time=120):
        self.handle_failure("ERROR: could not terminate instances: " + \
            ' '.join(instance_ids))
    # Sending a second terminate to a terminated instance to remove it
    # from the system (ie no more in describe-instances).  This helps when
    # bringing deployments up and down frequently and instances are still
    # associated with keyname (although they are terminated).
    AppScaleLogger.log("Removing terminated instances: " + ' '.join(instance_ids))
    conn.terminate_instances(instance_ids)
コード例 #58
0
  def create_user(self, username, password, account_type='xmpp_user'):
    """ Creates a new user account, with the given username and hashed password.

    Args:
      username: An e-mail address that should be set as the new username.
      password: A sha1-hashed password that is bound to the given username.
      account_type: A str that indicates if this account can be logged into by
        XMPP users.
    Raises:
      AppControllerException if unable to create user.
    """
    AppScaleLogger.log("Creating new user account {0}".format(username))
    while 1:
      try:
        result = self.run_with_timeout(
          self.LONGER_TIMEOUT, self.DEFAULT_NUM_RETRIES,
          self.server.create_user, username, password, account_type,
          self.secret)
        break
      except Exception as exception:
        AppScaleLogger.log("Exception when creating user: {0}".format(exception))
        AppScaleLogger.log("Backing off and trying again")
        time.sleep(10)

    if result != 'true':
      raise AppControllerException(result)
コード例 #59
0
  def configure_instance_security(self, parameters):
    """ Configure the resource group and storage account needed to create the
    network interface for the VMs to be spawned. This method is called before
    starting virtual machines.
    Args:
      parameters: A dict containing values necessary to authenticate with the
        underlying cloud.
    Returns:
      True, if the group and account were created successfully.
      False, otherwise.
    Raises:
      AgentRuntimeException: If security features could not be successfully
        configured in the underlying cloud.
    """
    credentials = self.open_connection(parameters)
    resource_group = parameters[self.PARAM_RESOURCE_GROUP]
    storage_account = parameters[self.PARAM_STORAGE_ACCOUNT]
    zone = parameters[self.PARAM_ZONE]
    subscription_id = parameters[self.PARAM_SUBSCRIBER_ID]

    AppScaleLogger.log("Verifying that SSH key exists locally.")
    keyname = parameters[self.PARAM_KEYNAME]
    private_key = LocalState.LOCAL_APPSCALE_PATH + keyname
    public_key = private_key + ".pub"

    if os.path.exists(private_key) or os.path.exists(public_key):
      raise AgentRuntimeException("SSH key already found locally - please "
                                  "use a different keyname.")

    LocalState.generate_rsa_key(keyname, parameters[self.PARAM_VERBOSE])

    AppScaleLogger.log("Configuring network for machine/s under "
                       "resource group '{0}' with storage account '{1}' "
                       "in zone '{2}'".format(resource_group, storage_account, zone))
    # Create a resource group and an associated storage account to access resources.
    self.create_resource_group(parameters, credentials)

    resource_client = ResourceManagementClient(credentials, subscription_id)
    resource_client.providers.register(self.MICROSOFT_COMPUTE_RESOURCE)
    resource_client.providers.register(self.MICROSOFT_NETWORK_RESOURCE)
コード例 #60
0
 def sleep_until_delete_operation_done(self, result, resource_name,
                                       max_sleep, verbose):
   """ Sleeps until the delete operation for the resource is completed
   successfully.
   Args:
     result: An instance, of the AzureOperationPoller to poll for the status
       of the operation being performed.
     resource_name: The name of the resource being deleted.
     max_sleep: The maximum number of seconds to sleep for the resources to
       be deleted.
     verbose: A boolean indicating whether or not in verbose mode.
   """
   time_start = time.time()
   while not result.done():
     AppScaleLogger.verbose("Waiting {0} second(s) for {1} to be deleted.".
                            format(self.SLEEP_TIME, resource_name), verbose)
     time.sleep(self.SLEEP_TIME)
     total_sleep_time = time.time() - time_start
     if total_sleep_time > max_sleep:
       AppScaleLogger.log("Waited {0} second(s) for {1} to be deleted. "
         "Operation has timed out.".format(total_sleep_time, resource_name))
       break