Example #1
0
  def terminate_instances(self, parameters):
    """ Deletes the instances specified in 'parameters' running in Google
    Compute Engine.

    Args:
      parameters: A dict with keys for each parameter needed to connect to
        Google Compute Engine, and an additional key mapping to a list of
        instance names that should be deleted.
    """
    instance_ids = parameters[self.PARAM_INSTANCE_IDS]
    responses = []
    for instance_id in instance_ids:
      gce_service, credentials = self.open_connection(parameters)
      http = httplib2.Http()
      auth_http = credentials.authorize(http)
      request = gce_service.instances().delete(
        project=parameters[self.PARAM_PROJECT],
        zone=parameters[self.PARAM_ZONE],
        instance=instance_id
      )
      response = request.execute(http=auth_http)
      AppScaleLogger.verbose(str(response), parameters[self.PARAM_VERBOSE])
      responses.append(response)

    for response in responses:
      gce_service, credentials = self.open_connection(parameters)
      http = httplib2.Http()
      auth_http = credentials.authorize(http)
      self.ensure_operation_succeeds(gce_service, auth_http, response,
        parameters[self.PARAM_PROJECT])
Example #2
0
  def add_access_config(self, parameters, instance_id, static_ip):
    """ Instructs Google Compute Engine to use the given IP address as the
    public IP for the named instance.

    This assumes that there is no existing public IP address for the named
    instance. If this is not the case, callers should use delete_access_config
    first to remove it.

    Args:
      parameters: A dict with keys for each parameter needed to connect to
        Google Compute Engine, and an additional key mapping to a list of
        instance names that should be deleted.
      instance_id: A str naming the running instance that the new public IP
        address should be added to.
      static_ip: A str naming the already allocated static IP address that
        will be used for the named instance.
    """
    gce_service, credentials = self.open_connection(parameters)
    http = httplib2.Http()
    auth_http = credentials.authorize(http)
    request = gce_service.instances().addAccessConfig(
      project=parameters[self.PARAM_PROJECT],
      instance=instance_id,
      networkInterface="nic0",
      zone=parameters[self.PARAM_ZONE],
      body={
        "kind": "compute#accessConfig",
        "type" : "ONE_TO_ONE_NAT",
        "name" : "External NAT",
        "natIP" : static_ip
      }
    )
    response = request.execute(http=auth_http)
    AppScaleLogger.verbose(str(response), parameters[self.PARAM_VERBOSE])
Example #3
0
  def does_address_exist(self, parameters):
    """ Queries Google Compute Engine to see if the specified static IP address
    exists for this user.

    Args:
      parameters: A dict with keys for each parameter needed to connect to
        Google Compute Engine, and an additional key indicating the name of the
        static IP address that we should check for existence.
    Returns:
      True if the named address exists, and False otherwise.
    """
    gce_service, credentials = self.open_connection(parameters)
    http = httplib2.Http()
    auth_http = credentials.authorize(http)
    request = gce_service.addresses().list(
      project=parameters[self.PARAM_PROJECT],
      filter="address eq {0}".format(parameters[self.PARAM_STATIC_IP]),
      region=parameters[self.PARAM_REGION]
    )
    response = request.execute(http=auth_http)
    AppScaleLogger.verbose(str(response), parameters[self.PARAM_VERBOSE])

    if 'items' in response:
      return True
    else:
      return False
Example #4
0
  def create_firewall(self, parameters, network_url):
    """ Creates a new firewall in Google Compute Engine with the specified name,
    bound to the specified network.

    Args:
      parameters: A dict with keys for each parameter needed to connect to
        Google Compute Engine, and an additional key indicating the name of the
        firewall that we should create.
      network_url: A str containing the URL of the network that this new
        firewall should be applied to.
    """
    gce_service, credentials = self.open_connection(parameters)
    http = httplib2.Http()
    auth_http = credentials.authorize(http)
    request = gce_service.firewalls().insert(
      project=parameters[self.PARAM_PROJECT],
      body={
        "name" : parameters[self.PARAM_GROUP],
        "description" : "Firewall used for AppScale instances",
        "network" : network_url,
        "sourceRanges" : ["0.0.0.0/0"],
        "allowed" : [
          {"IPProtocol" : "tcp", "ports": ["1-65535"]},
          {"IPProtocol" : "udp", "ports": ["1-65535"]},
          {"IPProtocol" : "icmp"}
        ]
      }
    )
    response = request.execute(http=auth_http)
    AppScaleLogger.verbose(str(response), parameters[self.PARAM_VERBOSE])
    self.ensure_operation_succeeds(gce_service, auth_http, response,
      parameters[self.PARAM_PROJECT])
Example #5
0
  def cleanup_state(self, parameters):
    """ Removes any remote state that was created to run AppScale instances
    during this deployment.
    Args:
      parameters: A dict that includes keys indicating the remote state
        that should be deleted.
    """
    subscription_id = parameters[self.PARAM_SUBSCRIBER_ID]
    resource_group = parameters[self.PARAM_RESOURCE_GROUP]
    credentials = self.open_connection(parameters)
    network_client = NetworkManagementClient(credentials, subscription_id)
    verbose = parameters[self.PARAM_VERBOSE]

    AppScaleLogger.log("Deleting the Virtual Network, Public IP Address "
      "and Network Interface created for this deployment.")
    network_interfaces = network_client.network_interfaces.list(resource_group)
    for interface in network_interfaces:
      result = network_client.network_interfaces.delete(resource_group, interface.name)
      resource_name = 'Network Interface' + ':' + interface.name
      self.sleep_until_delete_operation_done(result, resource_name,
                                             self.MAX_SLEEP_TIME, verbose)

    public_ip_addresses = network_client.public_ip_addresses.list(resource_group)
    for public_ip in public_ip_addresses:
      result = network_client.public_ip_addresses.delete(resource_group, public_ip.name)
      resource_name = 'Public IP Address' + ':' + public_ip.name
      self.sleep_until_delete_operation_done(result, resource_name,
                                             self.MAX_SLEEP_TIME, verbose)

    virtual_networks = network_client.virtual_networks.list(resource_group)
    for network in virtual_networks:
      result = network_client.virtual_networks.delete(resource_group, network.name)
      resource_name = 'Virtual Network' + ':' + network.name
      self.sleep_until_delete_operation_done(result, resource_name,
                                             self.MAX_SLEEP_TIME, verbose)
  def test_generate_crash_log(self):
    crashlog_suffix = '123456'
    flexmock(uuid)
    uuid.should_receive('uuid4').and_return(crashlog_suffix)

    exception_class = 'Exception'
    exception_message = 'baz message'
    exception = Exception(exception_message)
    stacktrace = "\n".join(['Traceback (most recent call last):',
      '  File "<stdin>", line 2, in <module>',
      '{0}: {1}'.format(exception_class, exception_message)])

    # Mock out grabbing our system's information
    flexmock(platform)
    platform.should_receive('platform').and_return("MyOS")
    platform.should_receive('python_implementation').and_return("MyPython")

    # Mock out writing it to the crash log file
    expected = '{0}log-{1}'.format(LocalState.LOCAL_APPSCALE_PATH,
      crashlog_suffix)

    fake_file = flexmock(name='fake_file')
    fake_file.should_receive('write').with_args(str)

    fake_builtins = flexmock(sys.modules['__builtin__'])
    fake_builtins.should_call('open')  # set the fall-through
    fake_builtins.should_receive('open').with_args(expected, 'w').and_return(
      fake_file)

    # mock out printing the crash log message
    flexmock(AppScaleLogger)
    AppScaleLogger.should_receive('warn')

    actual = LocalState.generate_crash_log(exception, stacktrace)
    self.assertEquals(expected, actual)
  def setUp(self):
    self.keyname = "boobazblargfoo"
    self.function = "appscale-add-keypair"

    # mock out any writing to stdout
    flexmock(AppScaleLogger)
    AppScaleLogger.should_receive('log').and_return()

    # mock out all sleeping
    flexmock(time)
    time.should_receive('sleep').and_return()

    # throw some default mocks together for when invoking via shell succeeds
    # and when it fails
    self.fake_temp_file = flexmock(name='fake_temp_file')
    self.fake_temp_file.should_receive('seek').with_args(0).and_return()
    self.fake_temp_file.should_receive('read').and_return('boo out')
    self.fake_temp_file.should_receive('close').and_return()

    flexmock(tempfile)
    tempfile.should_receive('NamedTemporaryFile').and_return(self.fake_temp_file)

    self.success = flexmock(name='success', returncode=0)
    self.success.should_receive('wait').and_return(0)

    self.failed = flexmock(name='success', returncode=1)
    self.failed.should_receive('wait').and_return(1)
Example #8
0
  def create_network_interface(self, network_client, interface_name, ip_name,
                               subnet, parameters):
    """ Creates the Public IP Address resource and uses that to create the
    Network Interface.
    Args:
      network_client: A NetworkManagementClient instance.
      interface_name: The name to use for the Network Interface.
      ip_name: The name to use for the Public IP Address.
      subnet: The Subnet resource from the Virtual Network created.
      parameters:  A dict, containing all the parameters necessary to
        authenticate this user with Azure.
    """
    group_name = parameters[self.PARAM_RESOURCE_GROUP]
    region = parameters[self.PARAM_ZONE]
    verbose = parameters[self.PARAM_VERBOSE]
    AppScaleLogger.verbose("Creating/Updating the Public IP Address '{}'".
                           format(ip_name), verbose)
    ip_address = PublicIPAddress(
      location=region, public_ip_allocation_method=IPAllocationMethod.dynamic,
      idle_timeout_in_minutes=4)
    result = network_client.public_ip_addresses.create_or_update(
      group_name, ip_name, ip_address)
    self.sleep_until_update_operation_done(result, ip_name, verbose)
    public_ip_address = network_client.public_ip_addresses.get(group_name, ip_name)

    AppScaleLogger.verbose("Creating/Updating the Network Interface '{}'".
                           format(interface_name), verbose)
    network_interface_ip_conf = NetworkInterfaceIPConfiguration(
      name=interface_name, private_ip_allocation_method=IPAllocationMethod.dynamic,
      subnet=subnet, public_ip_address=PublicIPAddress(id=(public_ip_address.id)))

    result = network_client.network_interfaces.create_or_update(group_name,
      interface_name, NetworkInterface(location=region,
                                       ip_configurations=[network_interface_ip_conf]))
    self.sleep_until_update_operation_done(result, interface_name, verbose)
Example #9
0
 def create_virtual_network(self, network_client, parameters, network_name,
                            subnet_name):
   """ Creates the network resources, such as Virtual network and Subnet.
   Args:
     network_client: A NetworkManagementClient instance.
     parameters:  A dict, containing all the parameters necessary to
       authenticate this user with Azure.
     network_name: The name to use for the Virtual Network resource.
     subnet_name: The name to use for the Subnet resource.
   Returns:
     A Subnet instance from the Virtual Network created.
   """
   group_name = parameters[self.PARAM_RESOURCE_GROUP]
   region = parameters[self.PARAM_ZONE]
   verbose = parameters[self.PARAM_VERBOSE]
   AppScaleLogger.verbose("Creating/Updating the Virtual Network '{}'".
                          format(network_name), verbose)
   address_space = AddressSpace(address_prefixes=['10.1.0.0/16'])
   subnet1 = Subnet(name=subnet_name, address_prefix='10.1.0.0/24')
   result = network_client.virtual_networks.create_or_update(group_name, network_name,
     VirtualNetwork(location=region, address_space=address_space,
                    subnets=[subnet1]))
   self.sleep_until_update_operation_done(result, network_name, verbose)
   subnet = network_client.subnets.get(group_name, network_name, subnet_name)
   return subnet
Example #10
0
  def get_optimal_spot_price(self, conn, instance_type, zone):
    """
    Returns the spot price for an EC2 instance of the specified instance type.
    The returned value is computed by averaging all the spot price history
    values returned by the back-end EC2 APIs and incrementing the average by
    extra 10%.

    Args:
      conn: A boto.EC2Connection that can be used to communicate with AWS.
      instance_type: A str representing the instance type whose prices we
        should speculate for.
      zone: A str representing the availability zone that the instance will
        be placed in.
    Returns:
      The estimated spot price for the specified instance type, in the
        specified availability zone.
    """
    end_time = datetime.datetime.now()
    start_time = end_time - datetime.timedelta(days=7)
    history = conn.get_spot_price_history(start_time=start_time.isoformat(),
      end_time=end_time.isoformat(), product_description='Linux/UNIX',
      instance_type=instance_type, availability_zone=zone)
    var_sum = 0.0
    for entry in history:
      var_sum += entry.price
    average = var_sum / len(history)
    bid_price = average * 1.10
    AppScaleLogger.log('The average spot instance price for a {0} machine is'\
        ' {1}, and 10% more is {2}'.format(instance_type, average, bid_price))
    return bid_price
Example #11
0
  def register(self, deployment_id):
    """ Allows users to register their AppScale deployment with the AppScale
    Portal.

    Raises:
      AppScaleException: If the deployment has already been registered.
    """
    appscale_yaml = yaml.safe_load(self.read_appscalefile())
    if 'keyname' in appscale_yaml:
      keyname = appscale_yaml['keyname']
    else:
      keyname = 'appscale'

    nodes = self.get_nodes(keyname)
    head_node = self.get_head_node(nodes)
    if RegistrationHelper.appscale_has_deployment_id(head_node, keyname):
      existing_id = RegistrationHelper.get_deployment_id(head_node, keyname)
      if existing_id != deployment_id:
        raise AppScaleException(
          'This deployment has already been registered with a different ID.')

    if 'infrastructure' in appscale_yaml:
      deployment_type = 'cloud'
    else:
      deployment_type = 'cluster'

    deployment = RegistrationHelper.update_deployment(deployment_type, nodes,
      deployment_id)

    RegistrationHelper.set_deployment_id(head_node, keyname, deployment_id)

    AppScaleLogger.success(
      'Registration complete for AppScale deployment {0}.'
      .format(deployment['name']))
Example #12
0
  def create_network(self, parameters):
    """ Creates a new network in Google Compute Engine with the specified name.

    Args:
      parameters: A dict with keys for each parameter needed to connect to
        Google Compute Engine, and an additional key indicating the name of the
        network that we should create in GCE.
    Returns:
      The URL corresponding to the name of the network that was created, for use
      with binding this network to one or more firewalls.
    """
    gce_service, credentials = self.open_connection(parameters)
    http = httplib2.Http()
    auth_http = credentials.authorize(http)
    request = gce_service.networks().insert(
      project=parameters[self.PARAM_PROJECT],
      body={
        "name" : parameters[self.PARAM_GROUP],
        "description" : "Network used for AppScale instances",
        "IPv4Range" : "10.240.0.0/16"
      }
    )
    response = request.execute(http=auth_http)
    AppScaleLogger.verbose(str(response), parameters[self.PARAM_VERBOSE])
    self.ensure_operation_succeeds(gce_service, auth_http, response,
      parameters[self.PARAM_PROJECT])
    return response['targetLink']
    def setUp(self):
        self.keyname = "boobazblargfoo"
        self.function = "appscale-gather-logs"

        # mock out any writing to stdout
        flexmock(AppScaleLogger)
        AppScaleLogger.should_receive("log").and_return()

        # mock out all sleeping
        flexmock(time)
        time.should_receive("sleep").and_return()

        # throw some default mocks together for when invoking via shell succeeds
        # and when it fails
        self.fake_temp_file = flexmock(name="fake_temp_file")
        self.fake_temp_file.should_receive("read").and_return("boo out")
        self.fake_temp_file.should_receive("close").and_return()
        self.fake_temp_file.should_receive("seek").with_args(0).and_return()

        flexmock(tempfile)
        tempfile.should_receive("NamedTemporaryFile").and_return(self.fake_temp_file)

        self.success = flexmock(name="success", returncode=0)
        self.success.should_receive("wait").and_return(0)

        self.failed = flexmock(name="success", returncode=1)
        self.failed.should_receive("wait").and_return(1)
Example #14
0
  def create_security_group(self, parameters, group):
    """Creates a new security group in AWS with the given name.

    Args:
      parameters: A dict that contains the credentials necessary to authenticate
        with AWS.
      group: A str that names the group that should be created.
    Returns:
      The 'boto.ec2.securitygroup.SecurityGroup' that was just created.
    Raises:
      AgentRuntimeException: If the security group could not be created.
    """
    AppScaleLogger.log('Creating security group: {0}'.format(group))
    conn = self.open_connection(parameters)
    specified_vpc = parameters.get(self.PARAM_VPC_ID)

    retries_left = self.SECURITY_GROUP_RETRY_COUNT
    while retries_left:
      try:
        conn.create_security_group(group, 'AppScale security group',
                                   specified_vpc)
      except EC2ResponseError:
        pass
      try:
        return self.get_security_group_by_name(conn, group, specified_vpc)
      except SecurityGroupNotFoundException:
        pass
      time.sleep(self.SLEEP_TIME)
      retries_left -= 1

    raise AgentRuntimeException("Couldn't create security group with " \
      "name {0}".format(group))
  def does_user_exist(self, username, silent=False):
    """ Queries the AppController to see if the given user exists.

    Args:
      username: The email address registered as username for the user's application.
    """
    while True:
      try:
        user_exists = self.run_with_timeout(
          self.DEFAULT_TIMEOUT, self.DEFAULT_NUM_RETRIES,
          self.server.does_user_exist, username, self.secret)
        if user_exists == 'true':
          return True
        elif user_exists == 'false':
          return False
        else:
          raise Exception(user_exists)
      except BadSecretException as exception:
        raise AppControllerException(
          "Exception when checking if a user exists: {0}".format(exception))
      except Exception as acc_error:
        if not silent:
          AppScaleLogger.log("Exception when checking if a user exists: {0}".
                             format(acc_error))
          AppScaleLogger.log("Backing off and trying again.")
        time.sleep(10)
  def setUp(self):
    # mock out logging, since it clutters out test output
    flexmock(AppScaleLogger)
    AppScaleLogger.should_receive('log').and_return()

    # next, pretend our ec2 credentials are properly set
    for credential in EC2Agent.REQUIRED_CREDENTIALS:
      os.environ[credential] = "baz"

    # finally, pretend that our ec2 image to use exists
    fake_ec2 = flexmock(name="fake_ec2")
    fake_ec2.should_receive('get_image').with_args('ami-ABCDEFG') \
      .and_return()
    flexmock(boto)
    boto.should_receive('connect_ec2').with_args('baz', 'baz').and_return(
      fake_ec2)

    # add in some instance variables so that we don't have
    # a lot IP addresses everywhere
    self.blank_input_yaml = None
    self.default_options = {
      'table' : 'cassandra'
    }
    self.ip_1 = '192.168.1.1'
    self.ip_2 = '192.168.1.2'
    self.ip_3 = '192.168.1.3'
    self.ip_4 = '192.168.1.4'
    self.ip_5 = '192.168.1.5'
    self.ip_6 = '192.168.1.6'
    self.ip_7 = '192.168.1.7'
    self.ip_8 = '192.168.1.8'
Example #17
0
def print_table(table_name, headers, data):
  """
  Prints a list of statistics with specified headers.

  Args:
    table_name: A string representing a name of table.
    headers: A list of statistic headers.
    data: A list of statistics.
  """
  table = tabulate(tabular_data=data, headers=headers, tablefmt='simple',
                   floatfmt=".1f", numalign="right", stralign="left")

  table_width = len(table.split("\n", 2)[1])
  left_signs = " " * ((table_width - len(table_name) - 2) / 2)
  right_signs = left_signs + (
    " " if (table_width - len(table_name)) % 2 == 1 else ""
  )
  result_table_name = (
    "{l_signs} {name} {r_signs}"
      .format(l_signs=left_signs, name=table_name, r_signs=right_signs)
  )

  title = styled(result_table_name, "bold", "blue", "reverse")
  AppScaleLogger.log(title)
  AppScaleLogger.log(table + "\n")
Example #18
0
  def attach_disk(self, parameters, disk_name, instance_id):
    """ Attaches the Elastic Block Store volume specified in 'disk_name' to this
    virtual machine.

    Args:
      parameters: A dict with keys for each parameter needed to connect to AWS.
      disk_name: A str naming the EBS mount to attach to this machine.
      instance_id: A str naming the id of the instance that the disk should be
        attached to. In practice, callers add disks to their own instances.
    Returns:
      The location on the local filesystem where the disk has been attached.
    """
    # In Amazon Web Services, if we're running on a Xen Paravirtualized machine,
    # then devices get added starting at /dev/xvda. If not, they get added at
    # /dev/sda. Find out which one we're on so that we know where the disk will
    # get attached to.
    if glob.glob("/dev/xvd*"):
      mount_point = '/dev/xvdc'
    else:
      mount_point = '/dev/sdc'

    conn = self.open_connection(parameters)

    try:
      AppScaleLogger.log('Attaching volume {0} to instance {1}, at {2}'.format(
        disk_name, instance_id, mount_point))
      conn.attach_volume(disk_name, instance_id, mount_point)
      return mount_point
    except EC2ResponseError as exception:
      if self.disk_attached(conn, disk_name, instance_id):
        return mount_point
      AppScaleLogger.log('An error occurred when trying to attach volume {0} '
        'to instance {1} at {2}'.format(disk_name, instance_id, mount_point))
      self.handle_failure('EC2 response error while attaching volume:' +
        exception.error_message)
Example #19
0
  def create_security_group(self, parameters, group):
    """Creates a new security group in AWS with the given name.

    Args:
      parameters: A dict that contains the credentials necessary to authenticate
        with AWS.
      group: A str that names the group that should be created.
    Raises:
      AgentRuntimeException: If the security group could not be created.
    """
    AppScaleLogger.log('Creating security group: {0}'.format(group))
    conn = self.open_connection(parameters)
    retries_left = self.SECURITY_GROUP_RETRY_COUNT
    while retries_left:
      try:
        conn.create_security_group(group, 'AppScale security group')
      except EC2ResponseError:
        pass
      try:
        conn.get_all_security_groups(group)
        return
      except EC2ResponseError:
        pass
      time.sleep(self.SLEEP_TIME)
      retries_left -= 1

    raise AgentRuntimeException("Couldn't create security group with " \
      "name {0}".format(group))
Example #20
0
  def create_storage_account(self, parameters, storage_client):
    """ Creates a Storage Account under the Resource Group, if it does not
    already exist. In the case where no resource group is specified, a default
    storage account is created.
    Args:
      parameters: A dict, containing all the parameters necessary to authenticate
        this user with Azure.
      credentials: A ServicePrincipalCredentials instance, that can be used to access or
      create any resources.
    Raises:
      AgentConfigurationException: If there was a problem creating or accessing
        a storage account with the given subscription.
    """
    storage_account = parameters[self.PARAM_STORAGE_ACCOUNT]
    rg_name = parameters[self.PARAM_RESOURCE_GROUP]

    try:
      AppScaleLogger.log("Creating a new storage account '{0}' under the "
        "resource group '{1}'.".format(storage_account, rg_name))
      result = storage_client.storage_accounts.create(
        rg_name, storage_account,StorageAccountCreateParameters(
          sku=Sku(SkuName.standard_lrs), kind=Kind.storage,
          location=parameters[self.PARAM_ZONE]))
      # Result is a msrestazure.azure_operation.AzureOperationPoller instance.
      # wait() insures polling the underlying async operation until it's done.
      result.wait()
    except CloudError as error:
      raise AgentConfigurationException("Unable to create a storage account "
        "using the credentials provided: {}".format(error.message))
  def setUp(self):
    self.cloud_argv = ['--min', '1', '--max', '1', '--group', 'blargscale',
      '--infrastructure', 'ec2', '--instance_type', 'm3.medium',
      '--machine', 'ami-ABCDEFG', '--zone', 'my-zone-1b']
    self.cluster_argv = ['--ips', 'ips.yaml']
    self.function = "appscale-run-instances"

    # mock out all logging, since it clutters our output
    flexmock(AppScaleLogger)
    AppScaleLogger.should_receive('log').and_return()

    # set up phony AWS credentials for each test
    # ones that test not having them present can
    # remove them
    for credential in EucalyptusAgent.REQUIRED_EC2_CREDENTIALS:
      os.environ[credential] = "baz"
    os.environ['EC2_URL'] = "http://boo"

    # pretend that our credentials are valid.
    fake_ec2 = flexmock(name="fake_ec2")
    fake_ec2.should_receive('get_all_instances')

    # similarly, pretend that our image does exist in EC2
    # and Euca
    fake_ec2.should_receive('get_image').with_args('ami-ABCDEFG') \
      .and_return()
    fake_ec2.should_receive('get_image').with_args('emi-ABCDEFG') \
      .and_return('anything')

    # Slip in mocks that assume our EBS volume exists in EC2.
    fake_ec2.should_receive('get_all_volumes').with_args(['vol-ABCDEFG']) \
      .and_return('anything')

    # Also pretend that the availability zone we want to use exists.
    fake_ec2.should_receive('get_all_zones').with_args('my-zone-1b') \
      .and_return('anything')

    # Pretend that a bad availability zone doesn't exist.
    fake_ec2.should_receive('get_all_zones').with_args('bad-zone-1b') \
      .and_raise(boto.exception.EC2ResponseError, 'baz', 'baz')

    # Pretend that we have one elastic IP allocated for use.
    fake_ec2.should_receive('get_all_addresses').with_args('GOOD.IP.ADDRESS') \
      .and_return('anything')

    # Pretend that asking for a bad elastic IP doesn't work.
    fake_ec2.should_receive('get_all_addresses').with_args('BAD.IP.ADDRESS') \
      .and_raise(boto.exception.EC2ResponseError, 'baz', 'baz')

    fake_price = flexmock(name='fake_price', price=1.00)
    fake_ec2.should_receive('get_spot_price_history').and_return([fake_price])

    flexmock(boto)
    flexmock(boto.ec2)
    boto.ec2.should_receive('connect_to_region').with_args('my-zone-1',
      aws_access_key_id='baz', aws_secret_access_key='baz').and_return(fake_ec2)
    boto.ec2.should_receive('connect_to_region').with_args('bad-zone-1',
      aws_access_key_id='baz', aws_secret_access_key='baz').and_return(fake_ec2)
    boto.should_receive('connect_euca').and_return(fake_ec2)
Example #22
0
  def handle_failure(self, msg):
    """ Log the specified error message and raise an AgentRuntimeException

    Args:
      msg: An error message to be logged and included in the raised exception.
    Raises:
      AgentRuntimeException Contains the input error message.
    """
    AppScaleLogger.log(msg)
    raise AgentRuntimeException(msg)
Example #23
0
  def down(self, clean=False, terminate=False):
    """ 'down' provides a nicer experience for users than the
    appscale-terminate-instances command, by using the configuration options
    present in the AppScalefile found in the current working directory.

    Args:
      clean: A boolean to indicate if the deployment data and metadata
        needs to be clean. This will clear the datastore.
      terminate: A boolean to indicate if instances needs to be terminated
        (valid only if we spawn instances at start).

    Raises:
      AppScalefileException: If there is no AppScalefile in the current working
      directory.
    """
    contents = self.read_appscalefile()

    # Construct a terminate-instances command from the file's contents
    command = []
    contents_as_yaml = yaml.safe_load(contents)

    if 'verbose' in contents_as_yaml and contents_as_yaml['verbose'] == True:
      command.append("--verbose")

    if 'keyname' in contents_as_yaml:
      keyname = contents_as_yaml['keyname']
      command.append("--keyname")
      command.append(contents_as_yaml['keyname'])
    else:
      keyname = 'appscale'

    if clean:
      if 'test' not in contents_as_yaml or contents_as_yaml['test'] != True:
        LocalState.confirm_or_abort("Clean will delete every data in the deployment.")
      command.append("--clean")

    if terminate:
      infrastructure = LocalState.get_infrastructure(keyname)
      if infrastructure != "xen" and not LocalState.are_disks_used(
        keyname) and 'test' not in contents_as_yaml:
        LocalState.confirm_or_abort("Terminate will delete instances and the data on them.")
      command.append("--terminate")

    if 'test' in contents_as_yaml and contents_as_yaml['test'] == True:
      command.append("--test")

    # Finally, exec the command. Don't worry about validating it -
    # appscale-terminate-instances will do that for us.
    options = ParseArgs(command, "appscale-terminate-instances").args
    AppScaleTools.terminate_instances(options)

    LocalState.cleanup_appscale_files(keyname, terminate)
    AppScaleLogger.success("Successfully stopped your AppScale deployment.")
  def setUp(self):
    self.keyname = "boobazblargfoo"
    self.function = "appscale-remove-app"

    # mock out any writing to stdout
    flexmock(AppScaleLogger)
    AppScaleLogger.should_receive('log').and_return()
    AppScaleLogger.should_receive('success').and_return()

    # mock out all sleeping
    flexmock(time)
    time.should_receive('sleep').and_return()
Example #25
0
  def configure_instance_security(self, parameters):
    """
    Setup EC2 security keys and groups. Required input values are read from
    the parameters dictionary. More specifically, this method expects to
    find a 'keyname' parameter and a 'group' parameter in the parameters
    dictionary. Using these provided values, this method will create a new
    EC2 key-pair and a security group. Security group will be granted permission
    to access any port on the instantiated VMs. (Also see documentation for the
    BaseAgent class)

    Args:
      parameters: A dictionary of parameters.
    """
    keyname = parameters[self.PARAM_KEYNAME]
    group = parameters[self.PARAM_GROUP]
    is_autoscale = parameters['autoscale_agent']

    AppScaleLogger.log("Verifying that keyname {0}".format(keyname) + \
      " is not already registered.")
    conn = self.open_connection(parameters)

    # While creating instances during autoscaling, we do not need to create a
    # new keypair or a security group. We just make use of the existing one.
    if is_autoscale in ['True', True]:
      return

    if conn.get_key_pair(keyname):
      self.handle_failure("SSH keyname {0} is already registered. Please " \
        "change the 'keyname' specified in your AppScalefile to a different " \
        "value, or erase it to have one automatically generated for you." \
        .format(keyname))

    security_groups = conn.get_all_security_groups()
    for security_group in security_groups:
      if security_group.name == group:
        self.handle_failure("Security group {0} is already registered. Please" \
          " change the 'group' specified in your AppScalefile to a different " \
          "value, or erase it to have one automatically generated for you." \
          .format(group))

    AppScaleLogger.log("Creating key pair: {0}".format(keyname))
    key_pair = conn.create_key_pair(keyname)
    ssh_key = '{0}{1}.key'.format(LocalState.LOCAL_APPSCALE_PATH, keyname)
    LocalState.write_key_file(ssh_key, key_pair.material)

    self.create_security_group(parameters, group)
    self.authorize_security_group(parameters, group, from_port=1, to_port=65535,
      ip_protocol='udp', cidr_ip='0.0.0.0/0')
    self.authorize_security_group(parameters, group, from_port=1, to_port=65535,
      ip_protocol='tcp', cidr_ip='0.0.0.0/0')
    self.authorize_security_group(parameters, group, from_port=-1, to_port=-1,
      ip_protocol='icmp', cidr_ip='0.0.0.0/0')
    return True
Example #26
0
  def configure_instance_security(self, parameters):
    """ Creates a GCE network and firewall with the specified name, and opens
    the ports on that firewall as needed for AppScale.

    We expect both the network and the firewall to not exist before this point,
    to avoid accidentally placing AppScale instances from different deployments
    in the same network and firewall (thus enabling them to see each other's web
    traffic).

    Args:
      parameters: A dict with keys for each parameter needed to connect to
        Google Compute Engine, and an additional key indicating the name of the
        network and firewall that we should create in GCE.
    Returns:
      True, if the named network and firewall was created successfully.
    Raises:
      AgentRuntimeException: If the named network or firewall already exist in
      GCE.
    """
    is_autoscale_agent = parameters.get(self.PARAM_AUTOSCALE_AGENT, False)

    # While creating instances during autoscaling, we do not need to create a
    # new keypair or a network. We just make use of the existing one.
    if is_autoscale_agent:
      return

    AppScaleLogger.log("Verifying that SSH key exists locally")
    keyname = parameters[self.PARAM_KEYNAME]
    private_key = LocalState.LOCAL_APPSCALE_PATH + keyname
    public_key = private_key + ".pub"

    if os.path.exists(private_key) or os.path.exists(public_key):
      raise AgentRuntimeException("SSH key already found locally - please " +
        "use a different keyname")

    LocalState.generate_rsa_key(keyname, parameters[self.PARAM_VERBOSE])

    ssh_key_exists, all_ssh_keys = self.does_ssh_key_exist(parameters)
    if not ssh_key_exists:
      self.create_ssh_key(parameters, all_ssh_keys)

    if self.does_network_exist(parameters):
      raise AgentRuntimeException("Network already exists - please use a " + \
        "different group name.")

    if self.does_firewall_exist(parameters):
      raise AgentRuntimeException("Firewall already exists - please use a " + \
        "different group name.")

    network_url = self.create_network(parameters)
    self.create_firewall(parameters, network_url)
Example #27
0
  def create_resource_group(self, parameters, credentials):
    """ Creates a Resource Group for the application using the Service Principal
    Credentials, if it does not already exist. In the case where no resource
    group is specified, a default group is created.
    Args:
      parameters: A dict, containing all the parameters necessary to
        authenticate this user with Azure.
      credentials: A ServicePrincipalCredentials instance, that can be used to
        access or create any resources.
    Raises:
      AgentConfigurationException: If there was a problem creating or accessing
        a resource group with the given subscription.
    """
    subscription_id = parameters[self.PARAM_SUBSCRIBER_ID]
    resource_client = ResourceManagementClient(credentials, subscription_id)
    rg_name = parameters[self.PARAM_RESOURCE_GROUP]

    tag_name = 'default-tag'
    if parameters[self.PARAM_TAG]:
      tag_name = parameters[self.PARAM_TAG]

    storage_client = StorageManagementClient(credentials, subscription_id)
    resource_client.providers.register(self.MICROSOFT_STORAGE_RESOURCE)
    try:
      # If the resource group does not already exist, create a new one with the
      # specified storage account.
      if not parameters[self.PARAM_EXISTING_RG]:
        AppScaleLogger.log("Creating a new resource group '{0}' with the tag "
          "'{1}'.".format(rg_name, tag_name))
        resource_client.resource_groups.create_or_update(
          rg_name, ResourceGroup(location=parameters[self.PARAM_ZONE],
                                 tags={'tag': tag_name}))
        self.create_storage_account(parameters, storage_client)
      else:
        # If it already exists, check if the specified storage account exists
        # under it and if not, create a new account.
        storage_accounts = storage_client.storage_accounts.\
          list_by_resource_group(rg_name)
        acct_names = []
        for account in storage_accounts:
          acct_names.append(account.name)

        if parameters[self.PARAM_STORAGE_ACCOUNT] in acct_names:
            AppScaleLogger.log("Storage account '{0}' under '{1}' resource group "
              "already exists. So not creating it again.".format(
              parameters[self.PARAM_STORAGE_ACCOUNT], rg_name))
        else:
          self.create_storage_account(parameters, storage_client)
    except CloudError as error:
      raise AgentConfigurationException("Unable to create a resource group "
        "using the credentials provided: {}".format(error.message))
Example #28
0
  def does_ssh_key_exist(self, parameters):
    """ Queries Google Compute Engine to see if the specified SSH key exists.

    Args:
      parameters: A dict with keys for each parameter needed to connect to
        Google Compute Engine. We don't have an additional key for the name of
        the SSH key, since we use the one in ~/.ssh.
    Returns:
      A tuple of two items. The first item is a bool that is True if
        our public key's contents are in GCE, and False otherwise, while
        the second item is the contents of all SSH keys stored in GCE.
    """
    our_public_ssh_key = None
    public_ssh_key_location = LocalState.LOCAL_APPSCALE_PATH + \
      parameters[self.PARAM_KEYNAME] + ".pub"
    with open(public_ssh_key_location) as file_handle:
      system_user = os.getenv('LOGNAME', default=pwd.getpwuid(os.getuid())[0])
      our_public_ssh_key = system_user + ":" + file_handle.read().rstrip()

    gce_service, credentials = self.open_connection(parameters)
    try:
      http = httplib2.Http()
      auth_http = credentials.authorize(http)
      request = gce_service.projects().get(
        project=parameters[self.PARAM_PROJECT])
      response = request.execute(http=auth_http)
      AppScaleLogger.verbose(str(response), parameters[self.PARAM_VERBOSE])

      if not 'items' in response['commonInstanceMetadata']:
        return False, ""

      metadata = response['commonInstanceMetadata']['items']
      if not metadata:
        return False, ""

      all_ssh_keys = ""
      for item in metadata:
        if item['key'] != 'sshKeys':
          continue

        # Now that we know there's one or more SSH keys, just make sure that
        # ours is in this list.
        all_ssh_keys = item['value']
        if our_public_ssh_key in all_ssh_keys:
          return True, all_ssh_keys

      return False, all_ssh_keys
    except errors.HttpError:
      return False, ""
Example #29
0
  def attach_disk(self, parameters, disk_name, instance_id):
    """ Attaches the persistent disk specified in 'disk_name' to this virtual
    machine.
    Args:
      parameters: A dict with keys for each parameter needed to connect to
        Google Compute Engine.
      disk_name: A str naming the persistent disk to attach to this machine.
      instance_id: A str naming the id of the instance that the disk should be
        attached to. In practice, callers add disks to their own instance.
    Returns:
      A str indicating where the persistent disk has been attached to.
    """
    gce_service, credentials = self.open_connection(parameters)
    http = httplib2.Http()
    auth_http = credentials.authorize(http)
    project = parameters[self.PARAM_PROJECT]
    zone = parameters[self.PARAM_ZONE]

    # If the disk is already attached, return the mount point.
    request = gce_service.instances().get(project=project, zone=zone,
                                          instance=instance_id)
    disks = request.execute(auth_http)['disks']
    for disk in disks:
      path = disk['source'].split('/')
      if project == path[-5] and zone == path[-3] and disk_name == path[-1]:
        device_name = '/dev/{}'.format(disk['deviceName'])
        AppScaleLogger.log('Disk is already attached at {}'.format(device_name))
        return device_name

    request = gce_service.instances().attachDisk(
      project=project,
      zone=zone,
      instance=instance_id,
      body={
        'kind': 'compute#attachedDisk',
        'type': 'PERSISTENT',
        'mode': 'READ_WRITE',
        'source': "https://www.googleapis.com/compute/{0}/projects/{1}" \
                  "/zones/{2}/disks/{3}".format(self.API_VERSION, project,
                                                zone, disk_name),
        'deviceName': 'sdb'
      }
    )
    response = request.execute(auth_http)
    AppScaleLogger.log(str(response))
    self.ensure_operation_succeeds(gce_service, auth_http, response,
                                   parameters[self.PARAM_PROJECT])

    return '/dev/sdb'
Example #30
0
  def does_image_exist(self, parameters):
    """ Queries Amazon EC2 to see if the specified image exists.

    Args:
      parameters: A dict that contains the machine ID to check for existence.
    Returns:
      True if the machine ID exists, False otherwise.
    """
    try:
      conn = self.open_connection(parameters)
      image_id = parameters[self.PARAM_IMAGE_ID]
      conn.get_image(image_id)
      AppScaleLogger.log('Machine image {0} does exist'.format(image_id))
      return True
    except boto.exception.EC2ResponseError:
      AppScaleLogger.log('Machine image {0} does not exist'.format(image_id))
      return False
Example #31
0
    def test_remote_log_tools_state_when_remote_is_up(self):
        # mock out the posting to the remote app
        fake_connection = flexmock(name="fake_connection")
        fake_connection.should_receive('request').with_args('POST',
          '/upload', self.payload, AppScaleLogger.HEADERS) \
          .and_return()
        flexmock(httplib).should_receive('HTTPConnection') \
          .and_return(fake_connection)

        actual = AppScaleLogger.remote_log_tools_state(self.options,
                                                       self.my_id, "started",
                                                       "X.Y.Z")
        self.assertEquals(self.expected, actual)
Example #32
0
    def stop_instances(self, parameters):
        """
    Stop one of more EC2 instances. The input instance IDs are
    fetched from the 'instance_ids' parameters in the input map. (Also
    see documentation for the BaseAgent class)

    Args:
      parameters: A dictionary of parameters.
    """
        instance_ids = parameters[self.PARAM_INSTANCE_IDS]
        conn = self.open_connection(parameters)
        conn.stop_instances(instance_ids)
        AppScaleLogger.log('Stopping instances: ' + ' '.join(instance_ids))
        if not self.wait_for_status_change(
                parameters, conn, 'stopped', max_wait_time=120):
            AppScaleLogger.log("re-stopping instances: " +
                               ' '.join(instance_ids))
            conn.stop_instances(instance_ids)
            if not self.wait_for_status_change(
                    parameters, conn, 'stopped', max_wait_time=120):
                self.handle_failure("ERROR: could not stop instances: " + \
                    ' '.join(instance_ids))
Example #33
0
  def does_image_exist(self, parameters):
    """ Queries Google Compute Engine to see if the specified image exists for
    this user.

    Args:
      parameters: A dict with keys for each parameter needed to connect to
        Google Compute Engine, and an additional key indicating the name of the
        image that we should check for existence.
    Returns:
      True if the named image exists, and False otherwise.
    """
    gce_service, credentials = self.open_connection(parameters)
    try:
      http = httplib2.Http()
      auth_http = credentials.authorize(http)
      request = gce_service.images().get(project=parameters[self.PARAM_PROJECT],
        image=parameters[self.PARAM_IMAGE_ID])
      response = request.execute(http=auth_http)
      AppScaleLogger.verbose(str(response), parameters[self.PARAM_VERBOSE])
      return True
    except errors.HttpError:
      return False
Example #34
0
  def describe_instances(self, parameters, pending=False):
    """ Queries Google Compute Engine to see which instances are currently
    running, and retrieve information about their public and private IPs.

    Args:
      parameters: A dict with keys for each parameter needed to connect to
        Google Compute Engine.
      pending: Boolean if we should show pending instances.
    Returns:
      A tuple of the form (public_ips, private_ips, instance_ids), where each
        member is a list. Items correspond to each other across these lists,
        so a caller is guaranteed that item X in each list belongs to the same
        virtual machine.
    """
    gce_service, credentials = self.open_connection(parameters)
    http = httplib2.Http()
    auth_http = credentials.authorize(http)
    request = gce_service.instances().list(
      project=parameters[self.PARAM_PROJECT],
      filter="name eq {group}-.*".format(group=parameters[self.PARAM_GROUP]),
      zone=parameters[self.PARAM_ZONE]
    )
    response = request.execute(http=auth_http)
    AppScaleLogger.verbose(str(response), parameters[self.PARAM_VERBOSE])

    instance_ids = []
    public_ips = []
    private_ips = []

    if response and 'items' in response:
      instances = response['items']
      for instance in instances:
        if instance['status'] == "RUNNING":
          instance_ids.append(instance['name'])
          network_interface = instance['networkInterfaces'][0]
          public_ips.append(network_interface['accessConfigs'][0]['natIP'])
          private_ips.append(network_interface['networkIP'])

    return public_ips, private_ips, instance_ids
Example #35
0
  def delete_firewall(self, parameters):
    """ Deletes a firewall in Google Compute Engine with the specified name.

    Callers should not invoke this method until they are certain that no
    instances are using the specified firewall, or this method will fail.

    Args:
      parameters: A dict with keys for each parameter needed to connect to
        Google Compute Engine, and an additional key indicating the name of the
        firewall that we should create.
    """
    gce_service, credentials = self.open_connection(parameters)
    http = httplib2.Http()
    auth_http = credentials.authorize(http)
    request = gce_service.firewalls().delete(
      project=parameters[self.PARAM_PROJECT],
      firewall=parameters[self.PARAM_GROUP]
    )
    response = request.execute(http=auth_http)
    AppScaleLogger.verbose(str(response), parameters[self.PARAM_VERBOSE])
    self.ensure_operation_succeeds(gce_service, auth_http, response,
      parameters[self.PARAM_PROJECT])
def print_failures(failures):
  """
  Prints a failure list.

  Args:
    failures: A dict in which each key is a kind of statistics and
      value if a failure list.
  """
  stats_kinds = {
    "nodes": "Node",
    "processes": "Process",
    "proxies": "Proxy"
  }

  AppScaleLogger.warn("There are some failures while getting stats:")
  for kind, fails in failures.iteritems():
    for ip, failure in fails.iteritems():
      AppScaleLogger.warn(
        "  {stats_kind} stats from {ip}: {failure}".format(
          stats_kind=stats_kinds[kind], ip=ip, failure=failure
        )
      )
Example #37
0
    def attach_disk(self, parameters, disk_name, instance_id):
        """ Attaches the Elastic Block Store volume specified in 'disk_name' to this
    virtual machine.

    Args:
      parameters: A dict with keys for each parameter needed to connect to AWS.
      disk_name: A str naming the EBS mount to attach to this machine.
      instance_id: A str naming the id of the instance that the disk should be
        attached to. In practice, callers add disks to their own instances.
    Returns:
      The location on the local filesystem where the disk has been attached.
    """
        # In Amazon Web Services, if we're running on a Xen Paravirtualized machine,
        # then devices get added starting at /dev/xvda. If not, they get added at
        # /dev/sda. Find out which one we're on so that we know where the disk will
        # get attached to.
        if glob.glob("/dev/xvd*"):
            mount_point = '/dev/xvdc'
        else:
            mount_point = '/dev/sdc'

        conn = self.open_connection(parameters)

        try:
            AppScaleLogger.log(
                'Attaching volume {0} to instance {1}, at {2}'.format(
                    disk_name, instance_id, mount_point))
            conn.attach_volume(disk_name, instance_id, mount_point)
            return mount_point
        except EC2ResponseError as exception:
            if self.disk_attached(conn, disk_name, instance_id):
                return mount_point
            AppScaleLogger.log(
                'An error occurred when trying to attach volume {0} '
                'to instance {1} at {2}'.format(disk_name, instance_id,
                                                mount_point))
            self.handle_failure('EC2 response error while attaching volume:' +
                                exception.error_message)
  def update_indexes(cls, source_location, keyname, project_id):
    """ Updates a project's composite indexes from the configuration file.

    Args:
      source_location: A string specifying the location of the source code.
      keyname: A string specifying the key name.
      project_id: A string specifying the project ID.
    """
    if cls.TAR_GZ_REGEX.search(source_location):
      fetch_function = utils.config_from_tar_gz
      version = Version.from_tar_gz(source_location)
    elif cls.ZIP_REGEX.search(source_location):
      fetch_function = utils.config_from_zip
      version = Version.from_zip(source_location)
    elif os.path.isdir(source_location):
      fetch_function = utils.config_from_dir
      version = Version.from_directory(source_location)
    elif source_location.endswith('.yaml'):
      fetch_function = utils.config_from_dir
      version = Version.from_yaml_file(source_location)
      source_location = os.path.dirname(source_location)
    else:
      raise BadConfigurationException(
        '{} must be a directory, tar.gz, or zip'.format(source_location))

    if project_id:
      version.project_id = project_id

    indexes = utils.get_indexes(source_location, fetch_function)
    # If the source does not have an index configuration file, do nothing.
    if indexes is None:
      return

    AppScaleLogger.log('Updating indexes')
    load_balancer_ip = LocalState.get_host_with_role(keyname, 'load_balancer')
    secret_key = LocalState.get_secret_key(keyname)
    admin_client = AdminClient(load_balancer_ip, secret_key)
    admin_client.update_indexes(version.project_id, indexes)
  def reset_password(cls, options):
    """Resets a user's password the currently running AppScale deployment.

    Args:
      options: A Namespace that has fields for each parameter that can be
        passed in via the command-line interface.
    """
    secret = LocalState.get_secret_key(options.keyname)
    load_balancer_ip = LocalState.get_host_with_role(
      options.keyname, 'load_balancer')
    username, password = LocalState.get_credentials(is_admin=False)
    encrypted_password = LocalState.encrypt_password(username, password)

    acc = AppControllerClient(load_balancer_ip, secret)

    try:
      acc.reset_password(username, encrypted_password)
      AppScaleLogger.success("The password was successfully changed for the " \
        "given user.")
    except Exception as exception:
      AppScaleLogger.warn("Could not change the user's password for the " + \
        "following reason: {0}".format(str(exception)))
      sys.exit(1)
Example #40
0
  def delete_access_config(self, parameters, instance_id):
    """ Instructs Google Compute Engine to remove the public IP address from
    the named instance.

    Args:
      parameters: A dict with keys for each parameter needed to connect to
        Google Compute Engine, and an additional key mapping to a list of
        instance names that should be deleted.
      instance_id: A str naming the running instance that the new public IP
        address should be added to.
    """
    gce_service, credentials = self.open_connection(parameters)
    http = httplib2.Http()
    auth_http = credentials.authorize(http)
    request = gce_service.instances().deleteAccessConfig(
      project=parameters[self.PARAM_PROJECT],
      accessConfig="External NAT",
      instance=instance_id,
      networkInterface="nic0",
      zone=parameters[self.PARAM_ZONE]
    )
    response = request.execute(http=auth_http)
    AppScaleLogger.verbose(str(response), parameters[self.PARAM_VERBOSE])
  def remove_app(cls, options):
    """Instructs AppScale to no longer host the named application.

    Args:
      options: A Namespace that has fields for each parameter that can be
        passed in via the command-line interface.
    """
    if not options.confirm:
      response = raw_input(
        "Are you sure you want to delete this project's services? (y/N) ")
      if response.lower() not in ['y', 'yes']:
        raise AppScaleException("Cancelled application removal.")

    load_balancer_ip = LocalState.get_host_with_role(
      options.keyname, 'load_balancer')
    secret = LocalState.get_secret_key(options.keyname)
    admin_client = AdminClient(load_balancer_ip, secret)

    for service_id in admin_client.list_services(options.project_id):
      AppScaleLogger.log('Deleting service: {}'.format(service_id))
      cls._remove_service(admin_client, options.project_id, service_id)

    AppScaleLogger.success('Done shutting down {}.'.format(options.project_id))
Example #42
0
  def does_disk_exist(self, parameters, disk):
    """ Queries Google Compute Engine to see if the specified persistent disk
    exists for this user.

    Args:
      parameters: A dict with keys for each parameter needed to connect to
        Google Compute Engine.
      disk: A str containing the name of the disk that we should check for
        existence.
    Returns:
      True if the named persistent disk exists, and False otherwise.
    """
    gce_service, credentials = self.open_connection(parameters)
    try:
      http = httplib2.Http()
      auth_http = credentials.authorize(http)
      request = gce_service.disks().get(project=parameters[self.PARAM_PROJECT],
        disk=disk, zone=parameters[self.PARAM_ZONE])
      response = request.execute(http=auth_http)
      AppScaleLogger.verbose(str(response), parameters[self.PARAM_VERBOSE])
      return True
    except errors.HttpError:
      return False
    def set_admin_role(self, username, is_cloud_admin, capabilities):
        """ Grants the given user the ability to perform any administrative action.

    Args:
      username: The e-mail address that should be given administrative
        authorizations.
    Raises:
      AppControllerException if unable to set admin role.
    """
        AppScaleLogger.log('Granting admin privileges to %s' % username)
        try:
            result = self.run_with_timeout(self.DEFAULT_TIMEOUT,
                                           self.DEFAULT_NUM_RETRIES,
                                           self.server.set_admin_role,
                                           username, is_cloud_admin,
                                           capabilities, self.secret)
        except TimeoutException:
            raise AppControllerException(
                'Timeout when making AppController call')

        if result != 'true':
            raise AppControllerException(
                'Unable to set admin role: {}'.format(result))
Example #44
0
  def assert_credentials_are_valid(self, parameters):
    """Contacts GCE to see if the given credentials are valid.

    Args:
      parameters: A dict containing the credentials necessary to interact with
      GCE.

    Raises:
      AgentConfigurationException: If an error is encountered during
      authentication.
    """
    gce_service, credentials = self.open_connection(parameters)
    try:
      http = httplib2.Http()
      auth_http = credentials.authorize(http)
      request = gce_service.instances().list(project=parameters
        [self.PARAM_PROJECT], zone=parameters[self.PARAM_ZONE])
      response = request.execute(http=auth_http)
      AppScaleLogger.verbose(str(response), parameters[self.PARAM_VERBOSE])
      return True
    except errors.HttpError as e:
      error_message = json.loads(e.content)['error']['message']
      raise AgentConfigurationException(error_message)
    def setUp(self):
        self.keyname = "boobazblargfoo"
        self.group = "bazboogroup"
        self.function = "appscale-terminate-instances"

        # mock out any writing to stdout
        flexmock(AppScaleLogger)
        AppScaleLogger.should_receive('log').and_return()
        AppScaleLogger.should_receive('verbose').and_return()

        # mock out all sleeping
        flexmock(time)
        time.should_receive('sleep').and_return()

        local_state = flexmock(LocalState)
        local_state.should_receive('shell').and_return("")

        # throw some default mocks together for when invoking via shell succeeds
        # and when it fails
        self.fake_temp_file = flexmock(name='fake_temp_file')
        self.fake_temp_file.should_receive('read').and_return('boo out')
        self.fake_temp_file.should_receive('close').and_return()
        self.fake_temp_file.should_receive('seek').with_args(0).and_return()

        flexmock(tempfile)
        tempfile.should_receive('NamedTemporaryFile').and_return(
            self.fake_temp_file)

        self.success = flexmock(name='success', returncode=0)
        self.success.should_receive('wait').and_return(0)

        self.failed = flexmock(name='failed', returncode=1)
        self.failed.should_receive('wait').and_return(1)

        # throw in some mocks that assume our EC2 environment variables are set
        for credential in EC2Agent.REQUIRED_EC2_CREDENTIALS:
            os.environ[credential] = "baz"
Example #46
0
  def create_scratch_disk(self, parameters):
    """ Creates a disk from a given machine image.

    GCE does not support scratch disks on API version v1 and higher. We create
    a persistent disk upon creation to act like one to keep the abstraction used
    in other infrastructures.

    Args:
      parameters: A dict with keys for each parameter needed to connect to
        Google Compute Engine.
    Returns:
      A str, the url to the disk to use.
    """
    gce_service, credentials = self.open_connection(parameters)
    http = httplib2.Http()
    auth_http = credentials.authorize(http)
    disk_name = self.generate_disk_name(parameters)
    project_url = '{0}{1}'.format(self.GCE_URL, 
      parameters[self.PARAM_PROJECT])
    source_image_url = '{0}{1}/global/images/{2}'.format(self.GCE_URL,
      parameters[self.PARAM_PROJECT], parameters[self.PARAM_IMAGE_ID])
    request = gce_service.disks().insert(
      project=parameters[self.PARAM_PROJECT],
      zone=parameters[self.PARAM_ZONE],
      body={
        'name':disk_name 
      },
      sourceImage=source_image_url
    )
    response = request.execute(http=auth_http)
    AppScaleLogger.verbose(str(response), parameters[self.PARAM_VERBOSE])
    self.ensure_operation_succeeds(gce_service, auth_http, response,
      parameters[self.PARAM_PROJECT])

    disk_url = "{0}/zones/{1}/disks/{2}".format(
      project_url, parameters[self.PARAM_ZONE], disk_name)
    return disk_url
    def test_generate_crash_log(self):
        crashlog_suffix = '123456'
        flexmock(uuid)
        uuid.should_receive('uuid4').and_return(crashlog_suffix)

        exception_class = 'Exception'
        exception_message = 'baz message'
        exception = Exception(exception_message)
        stacktrace = "\n".join([
            'Traceback (most recent call last):',
            '  File "<stdin>", line 2, in <module>',
            '{0}: {1}'.format(exception_class, exception_message)
        ])

        # Mock out grabbing our system's information
        flexmock(platform)
        platform.should_receive('platform').and_return("MyOS")
        platform.should_receive('python_implementation').and_return("MyPython")

        # Mock out writing it to the crash log file
        expected = '{0}log-{1}'.format(LocalState.LOCAL_APPSCALE_PATH,
                                       crashlog_suffix)

        fake_file = flexmock(name='fake_file')
        fake_file.should_receive('write').with_args(str)

        fake_builtins = flexmock(sys.modules['__builtin__'])
        fake_builtins.should_call('open')  # set the fall-through
        fake_builtins.should_receive('open').with_args(
            expected, 'w').and_return(fake_file)

        # mock out printing the crash log message
        flexmock(AppScaleLogger)
        AppScaleLogger.should_receive('warn')

        actual = LocalState.generate_crash_log(exception, stacktrace)
        self.assertEquals(expected, actual)
Example #48
0
    def register(self, deployment_id):
        """ Allows users to register their AppScale deployment with the AppScale
    Portal.

    Raises:
      AppScaleException: If the deployment has already been registered.
    """
        appscale_yaml = yaml.safe_load(self.read_appscalefile())
        if 'keyname' in appscale_yaml:
            keyname = appscale_yaml['keyname']
        else:
            keyname = 'appscale'

        nodes = self.get_nodes(keyname)
        head_node = self.get_head_node(nodes)
        if RegistrationHelper.appscale_has_deployment_id(head_node, keyname):
            existing_id = RegistrationHelper.get_deployment_id(
                head_node, keyname)
            if existing_id != deployment_id:
                raise AppScaleException(
                    'This deployment has already been registered with a different ID.'
                )

        if 'infrastructure' in appscale_yaml:
            deployment_type = 'cloud'
        else:
            deployment_type = 'cluster'

        deployment = RegistrationHelper.update_deployment(
            deployment_type, nodes, deployment_id)

        RegistrationHelper.set_deployment_id(head_node, keyname, deployment_id)

        AppScaleLogger.success(
            'Registration complete for AppScale deployment {0}.'.format(
                deployment['name']))
Example #49
0
    def disk_attached(self, conn, disk_name, instance_id):
        """ Check if disk is attached to instance id.

    Args:
      conn: A boto connection.
      disk_name: A str naming the EBS mount to check.
      instance_id: A str naming the id of the instance that the disk should be
        attached to.
    Returns:
      True if the volume is attached to the instance, False if it is not.
    """
        try:
            volumes = conn.get_all_volumes(
                filters={'attachment.instance-id': instance_id})
            for volume in volumes:
                if volume.id == disk_name:
                    return True

            return False
        except EC2ResponseError as exception:
            AppScaleLogger.log('An error occurred when trying to find '
                               'attached volumes.')
            self.handle_failure('EC2 response error while checking attached '
                                'volumes: {}'.format(exception.error_message))
Example #50
0
  def detach_disk(self, parameters, disk_name, instance_id):
    """ Detaches the persistent disk specified in 'disk_name' from the named
    instance.

    Args:
      parameters: A dict with keys for each parameter needed to connect to
        Google Compute Engine.
      disk_name: A str naming the persistent disk to detach.
      instance_id: A str naming the id of the instance that the disk should be
        detached from.
    """
    gce_service, credentials = self.open_connection(parameters)
    http = httplib2.Http()
    auth_http = credentials.authorize(http)
    project_id = parameters[self.PARAM_PROJECT]
    request = gce_service.instances().detachDisk(
      project=project_id,
      zone=parameters[self.PARAM_ZONE],
      instance=instance_id,
      deviceName='sdb')
    response = request.execute(http=auth_http)
    AppScaleLogger.verbose(str(response), parameters[self.PARAM_VERBOSE])
    self.ensure_operation_succeeds(gce_service, auth_http, response,
      parameters[self.PARAM_PROJECT])
Example #51
0
    def setUp(self):
        self.keyname = "boobazblargfoo"
        self.function = "appscale-set-property"

        # mock out any writing to stdout
        flexmock(AppScaleLogger)
        AppScaleLogger.should_receive('log').and_return()
        AppScaleLogger.should_receive('success').and_return()
        AppScaleLogger.should_receive('warn').and_return()

        # mock out all sleeping
        flexmock(time)
        time.should_receive('sleep').and_return()
Example #52
0
    def setUp(self):
        self.keyname = "boobazblargfoo"
        self.function = "appscale-relocate-app"
        self.appid = 'my-crazy-app'

        # mock out any writing to stdout
        flexmock(AppScaleLogger)
        AppScaleLogger.should_receive('log').and_return()
        AppScaleLogger.should_receive('success').and_return()
        AppScaleLogger.should_receive('warn').and_return()

        # mock out all sleeping
        flexmock(time)
        time.should_receive('sleep').and_return()

        # mock out reading the locations.json file, and slip in our own json
        flexmock(os.path)
        os.path.should_call('exists')  # set the fall-through
        os.path.should_receive('exists').with_args(
            LocalState.get_locations_json_location(
                self.keyname)).and_return(True)

        fake_nodes_json = flexmock(name="fake_nodes_json")
        fake_nodes_json.should_receive('read').and_return(
            json.dumps({
                "node_info": [{
                    "public_ip": "public1",
                    "private_ip": "private1",
                    "jobs": ["shadow", "login"]
                }]
            }))
        fake_nodes_json.should_receive('write').and_return()
        builtins = flexmock(sys.modules['__builtin__'])
        builtins.should_call('open')  # set the fall-through
        builtins.should_receive('open').with_args(
          LocalState.get_locations_json_location(self.keyname), 'r') \
          .and_return(fake_nodes_json)

        # put in a mock for reading the secret file
        secret_key_location = LocalState.get_secret_key_location(self.keyname)
        fake_secret = flexmock(name="fake_secret")
        fake_secret.should_receive('read').and_return('the secret')
        builtins.should_receive('open').with_args(secret_key_location, 'r') \
          .and_return(fake_secret)
  def relocate_app(cls, options):
    """Instructs AppScale to move the named application to a different port.

    Args:
      options: A Namespace that has fields for each parameter that can be passed
        in via the command-line interface.
    Raises:
      AppScaleException: If the named application isn't running in this AppScale
        cloud, if the destination port is in use by a different application, or
        if the AppController rejects the request to relocate the application (in
        which case it includes the reason why the rejection occurred).
    """
    load_balancer_ip = LocalState.get_host_with_role(
      options.keyname, 'load_balancer')
    acc = AppControllerClient(
      load_balancer_ip, LocalState.get_secret_key(options.keyname))

    version_key = '_'.join([options.appname, DEFAULT_SERVICE, DEFAULT_VERSION])
    app_info_map = acc.get_app_info_map()
    if version_key not in app_info_map:
      raise AppScaleException("The given application, {0}, is not currently " \
        "running in this AppScale cloud, so we can't move it to a different " \
        "port.".format(options.appname))

    try:
      login_host = acc.get_property('login')['login']
    except KeyError:
      raise AppControllerException('login property not found')

    acc.relocate_version(version_key, options.http_port, options.https_port)
    AppScaleLogger.success(
      'Successfully issued request to move {0} to ports {1} and {2}'.format(
        options.appname, options.http_port, options.https_port))
    RemoteHelper.sleep_until_port_is_open(login_host, options.http_port)
    AppScaleLogger.success(
      'Your app serves unencrypted traffic at: http://{0}:{1}'.format(
        login_host, options.http_port))
    AppScaleLogger.success(
      'Your app serves encrypted traffic at: https://{0}:{1}'.format(
        login_host, options.https_port))
Example #54
0
  def run_instances(self, count, parameters, security_configured):
    """ Starts 'count' instances in Google Compute Engine, and returns once they
    have been started.

    Callers should create a network and attach a firewall to it before using
    this method, or the newly created instances will not have a network and
    firewall to attach to (and thus this method will fail).

    Args:
      count: An int that specifies how many virtual machines should be started.
      parameters: A dict with keys for each parameter needed to connect to
        Google Compute Engine.
      security_configured: Unused, as we assume that the network and firewall
        has already been set up.
    """
    project_id = parameters[self.PARAM_PROJECT]
    image_id = parameters[self.PARAM_IMAGE_ID]
    instance_type = parameters[self.PARAM_INSTANCE_TYPE]
    keyname = parameters[self.PARAM_KEYNAME]
    group = parameters[self.PARAM_GROUP]
    zone = parameters[self.PARAM_ZONE]

    AppScaleLogger.log("Starting {0} machines with machine id {1}, with " \
      "instance type {2}, keyname {3}, in security group {4}, in zone {5}" \
      .format(count, image_id, instance_type, keyname, group, zone))

    # First, see how many instances are running and what their info is.
    start_time = datetime.datetime.now()
    active_public_ips, active_private_ips, active_instances = \
      self.describe_instances(parameters)

    # Construct URLs
    image_url = '{0}{1}/global/images/{2}'.format(self.GCE_URL, project_id,
      image_id)
    project_url = '{0}{1}'.format(self.GCE_URL, project_id)
    machine_type_url = '{0}/zones/{1}/machineTypes/{2}'.format(project_url,
      zone, instance_type)
    network_url = '{0}/global/networks/{1}'.format(project_url, group)

    # Construct the request body
    for index in range(count):
      disk_url = self.create_scratch_disk(parameters)
      instances = {
        # Truncate the name down to the first 62 characters, since GCE doesn't
        # let us use arbitrarily long instance names.
        'name': '{group}-{uuid}'.format(group=group, uuid=uuid.uuid4())[:62],
        'machineType': machine_type_url,
        'disks':[{
          'source': disk_url,
          'boot': 'true',
          'type': 'PERSISTENT'
        }],
        'image': image_url,
        'networkInterfaces': [{
          'accessConfigs': [{
            'type': 'ONE_TO_ONE_NAT',
            'name': 'External NAT'
           }],
          'network': network_url
        }],
        'serviceAccounts': [{
             'email': self.DEFAULT_SERVICE_EMAIL,
             'scopes': [self.GCE_SCOPE]
        }]
      }

      # Create the instance
      gce_service, credentials = self.open_connection(parameters)
      http = httplib2.Http()
      auth_http = credentials.authorize(http)
      request = gce_service.instances().insert(
           project=project_id, body=instances, zone=zone)
      response = request.execute(http=auth_http)
      AppScaleLogger.verbose(str(response), parameters[self.PARAM_VERBOSE])
      self.ensure_operation_succeeds(gce_service, auth_http, response,
        parameters[self.PARAM_PROJECT])
    
    instance_ids = []
    public_ips = []
    private_ips = []
    end_time = datetime.datetime.now() + datetime.timedelta(0,
      self.MAX_VM_CREATION_TIME)
    now = datetime.datetime.now()

    while now < end_time:
      AppScaleLogger.log("Waiting for your instances to start...")
      instance_info = self.describe_instances(parameters)
      public_ips = instance_info[0]
      private_ips = instance_info[1]
      instance_ids = instance_info[2]
      public_ips = self.diff(public_ips, active_public_ips)
      private_ips = self.diff(private_ips, active_private_ips)
      instance_ids = self.diff(instance_ids, active_instances)
      if count == len(public_ips):
        break
      time.sleep(self.SLEEP_TIME)
      now = datetime.datetime.now()

    if not public_ips:
      self.handle_failure('No public IPs were able to be procured '
                          'within the time limit')

    if len(public_ips) != count:
      for index in range(0, len(public_ips)):
        if public_ips[index] == '0.0.0.0':
          instance_to_term = instance_ids[index]
          AppScaleLogger.log('Instance {0} failed to get a public IP address'\
                  'and is being terminated'.format(instance_to_term))
          self.terminate_instances([instance_to_term])

    end_time = datetime.datetime.now()
    total_time = end_time - start_time
    AppScaleLogger.log("Started {0} on-demand instances in {1} seconds" \
      .format(count, total_time.seconds))
    return instance_ids, public_ips, private_ips
Example #55
0
    def get_params_from_args(self, args):
        """
    Searches through args to build a dict containing the parameters
    necessary to interact with Amazon EC2.

    Args:
      args: A Namespace containing the arguments that the user has
        invoked an AppScale Tool with.
    """
        # need to convert this to a dict if it is not already
        if not isinstance(args, dict):
            args = vars(args)

        params = {
            self.PARAM_CREDENTIALS: {},
            self.PARAM_GROUP: args['group'],
            self.PARAM_IMAGE_ID: args['machine'],
            self.PARAM_INSTANCE_TYPE: args['instance_type'],
            self.PARAM_KEYNAME: args['keyname'],
            self.PARAM_STATIC_IP: args.get(self.PARAM_STATIC_IP),
            self.PARAM_ZONE: args.get('zone'),
            self.PARAM_VERBOSE: args.get('verbose', False),
            self.PARAM_AUTOSCALE_AGENT: False
        }

        if params[self.PARAM_ZONE]:
            params[self.PARAM_REGION] = params[self.PARAM_ZONE][:-1]
        else:
            params[self.PARAM_REGION] = self.DEFAULT_REGION

        for credential in self.REQUIRED_CREDENTIALS:
            if args.get(credential):
                params[self.PARAM_CREDENTIALS][credential] = args[credential]
            else:
                raise AgentConfigurationException("Couldn't find {0} in your " \
                  "environment. Please set it and run AppScale again."
                  .format(credential))
        self.assert_credentials_are_valid(params)

        if args.get('use_spot_instances') == True:
            params[self.PARAM_SPOT] = True
        else:
            params[self.PARAM_SPOT] = False

        if params[self.PARAM_SPOT]:
            if args.get('max_spot_price'):
                params[self.PARAM_SPOT_PRICE] = args['max_spot_price']
            else:
                params[self.PARAM_SPOT_PRICE] = self.get_optimal_spot_price(
                    self.open_connection(params),
                    params[self.PARAM_INSTANCE_TYPE], params[self.PARAM_ZONE])

        # If VPC id and Subnet id are not set assume classic networking should be
        # used.
        vpc_id = args.get(self.PARAM_VPC_ID)
        subnet_id = args.get(self.PARAM_SUBNET_ID)
        if not vpc_id and not subnet_id:
            AppScaleLogger.log(
                'Using Classic Networking since subnet and vpc were '
                'not specified.')
        # All further checks are for VPC Networking.
        elif (vpc_id or subnet_id) and not (vpc_id and subnet_id):
            raise AgentConfigurationException(
                'Both VPC id and Subnet id must be '
                'specified to use VPC Networking.')
        else:
            # VPC must exist.
            vpc_conn = self.open_vpc_connection(params)

            params[self.PARAM_VPC_ID] = args[self.PARAM_VPC_ID]
            try:
                vpc_conn.get_all_vpcs(params[self.PARAM_VPC_ID])
            except EC2ResponseError as e:
                raise AgentConfigurationException(
                    'Error looking for vpc: {}'.format(e.message))

            # Subnet must exist.
            all_subnets = vpc_conn.get_all_subnets(
                filters={'vpcId': params[self.PARAM_VPC_ID]})
            params[self.PARAM_SUBNET_ID] = args[self.PARAM_SUBNET_ID]

            if not any(subnet.id == params[self.PARAM_SUBNET_ID]
                       for subnet in all_subnets):
                raise AgentConfigurationException(
                    'Specified subnet {} does not exist '
                    'in vpc {}!'.format(params[self.PARAM_SUBNET_ID],
                                        params[self.PARAM_VPC_ID]))
        return params
Example #56
0
    def run_instances(self, count, parameters, security_configured,
                      public_ip_needed):
        """
    Spawns the specified number of EC2 instances using the parameters
    provided. This method is blocking in that it waits until the
    requested VMs are properly booted up. However if the requested
    VMs cannot be procured within 1800 seconds, this method will treat
    it as an error and return. (Also see documentation for the BaseAgent
    class)

    Args:
      count: Number of VMs to spawned.
      parameters: A dictionary of parameters. This must contain
        'keyname', 'group', 'image_id' and 'instance_type' parameters.
      security_configured: Uses this boolean value as an heuristic to
        detect brand new AppScale deployments.
      public_ip_needed: A boolean, specifies whether to launch with a public
        ip or not.
    Returns:
      A tuple of the form (instances, public_ips, private_ips)
    """
        image_id = parameters[self.PARAM_IMAGE_ID]
        instance_type = parameters[self.PARAM_INSTANCE_TYPE]
        keyname = parameters[self.PARAM_KEYNAME]
        group = parameters[self.PARAM_GROUP]
        zone = parameters[self.PARAM_ZONE]

        # In case of autoscaling, the server side passes these parameters as a
        # string, so this check makes sure that spot instances are only created
        # when the flag is True.
        spot = parameters[self.PARAM_SPOT] in ['True', 'true', True]

        AppScaleLogger.log("Starting {0} machines with machine id {1}, with " \
          "instance type {2}, keyname {3}, in security group {4}, in availability" \
          " zone {5}".format(count, image_id, instance_type, keyname, group, zone))

        if spot:
            AppScaleLogger.log("Using spot instances")
        else:
            AppScaleLogger.log("Using on-demand instances")

        start_time = datetime.datetime.now()
        active_public_ips = []
        active_private_ips = []
        active_instances = []

        # Make sure we do not have terminated instances using the same keyname.
        instances = self.__describe_instances(parameters)
        term_instance_info = self.__get_instance_info(instances, 'terminated',
                                                      keyname)
        if len(term_instance_info[2]):
            self.handle_failure('SSH keyname {0} is already registered to a '\
                                'terminated instance. Please change the "keyname" '\
                                'you specified in your AppScalefile to a different '\
                                'value. If the keyname was autogenerated, erase it '\
                                'to have a new one generated for you.'.format(keyname))

        try:
            attempts = 1
            while True:
                instance_info = self.describe_instances(parameters)
                active_public_ips = instance_info[0]
                active_private_ips = instance_info[1]
                active_instances = instance_info[2]

                # If security has been configured on this agent just now,
                # that's an indication that this is a fresh cloud deployment.
                # As such it's not expected to have any running VMs.
                if len(active_instances) > 0 or security_configured:
                    break
                elif attempts == self.DESCRIBE_INSTANCES_RETRY_COUNT:
                    self.handle_failure('Failed to invoke describe_instances')
                attempts += 1

            # Get subnet from parameters.
            subnet = parameters.get(self.PARAM_SUBNET_ID)

            network_interfaces = None
            groups = None

            conn = self.open_connection(parameters)

            # A subnet indicates we're using VPC Networking.
            if subnet:
                # Get security group by name.
                try:
                    sg = self.get_security_group_by_name(
                        conn, group, parameters[self.PARAM_VPC_ID])
                except SecurityGroupNotFoundException as e:
                    raise AgentRuntimeException(e.message)
                # Create network interface specification.
                network_interface = NetworkInterfaceSpecification(
                    associate_public_ip_address=public_ip_needed,
                    groups=[sg.id],
                    subnet_id=subnet)
                network_interfaces = NetworkInterfaceCollection(
                    network_interface)
            else:
                groups = [group]

            if spot:
                price = parameters[self.PARAM_SPOT_PRICE] or \
                  self.get_optimal_spot_price(conn, instance_type, zone)

                conn.request_spot_instances(
                    str(price),
                    image_id,
                    key_name=keyname,
                    instance_type=instance_type,
                    count=count,
                    placement=zone,
                    security_groups=groups,
                    network_interfaces=network_interfaces)
            else:
                conn.run_instances(image_id,
                                   count,
                                   count,
                                   key_name=keyname,
                                   instance_type=instance_type,
                                   placement=zone,
                                   security_groups=groups,
                                   network_interfaces=network_interfaces)

            instance_ids = []
            public_ips = []
            private_ips = []
            end_time = datetime.datetime.now() + datetime.timedelta(
                0, self.MAX_VM_CREATION_TIME)

            while datetime.datetime.now() < end_time:
                AppScaleLogger.log("Waiting for your instances to start...")
                public_ips, private_ips, instance_ids = self.describe_instances(
                    parameters)

                # If we need a public ip, make sure we actually get one.
                if public_ip_needed and not self.diff(public_ips, private_ips):
                    time.sleep(self.SLEEP_TIME)
                    continue

                public_ips = self.diff(public_ips, active_public_ips)
                private_ips = self.diff(private_ips, active_private_ips)
                instance_ids = self.diff(instance_ids, active_instances)
                if count == len(public_ips):
                    break
                time.sleep(self.SLEEP_TIME)

            if not public_ips:
                self.handle_failure('No public IPs were able to be procured '
                                    'within the time limit')

            if len(public_ips) != count:
                for index in range(0, len(public_ips)):
                    if public_ips[index] == '0.0.0.0':
                        instance_to_term = instance_ids[index]
                        AppScaleLogger.log('Instance {0} failed to get a public IP address'\
                                'and is being terminated'.format(instance_to_term))
                        conn.terminate_instances([instance_to_term])

            end_time = datetime.datetime.now()
            total_time = end_time - start_time
            if spot:
                AppScaleLogger.log("Started {0} spot instances in {1} seconds" \
                  .format(count, total_time.seconds))
            else:
                AppScaleLogger.log("Started {0} on-demand instances in {1} seconds" \
                  .format(count, total_time.seconds))
            return instance_ids, public_ips, private_ips
        except EC2ResponseError as exception:
            self.handle_failure('EC2 response error while starting VMs: ' +
                                exception.error_message)
  def gather_logs(cls, options):
    """Collects logs from each machine in the currently running AppScale
    deployment.

    Args:
      options: A Namespace that has fields for each parameter that can be
        passed in via the command-line interface.
    """
    location = os.path.abspath(options.location)
    # First, make sure that the place we want to store logs doesn't
    # already exist.
    if os.path.exists(location):
      raise AppScaleException("Can't gather logs, as the location you " + \
        "specified, {}, already exists.".format(location))

    load_balancer_ip = LocalState.get_host_with_role(
      options.keyname, 'load_balancer')
    secret = LocalState.get_secret_key(options.keyname)
    acc = AppControllerClient(load_balancer_ip, secret)

    try:
      all_ips = acc.get_all_public_ips()
    except socket.error:  # Occurs when the AppController has failed.
      AppScaleLogger.warn("Couldn't get an up-to-date listing of the " + \
        "machines in this AppScale deployment. Using our locally cached " + \
        "info instead.")
      all_ips = LocalState.get_all_public_ips(options.keyname)

    # Get information about roles and public IPs
    # for creating navigation symlinks in gathered logs
    try:
      nodes_info = acc.get_role_info()
    except socket.error:  # Occurs when the AppController has failed.
      AppScaleLogger.warn("Couldn't get an up-to-date nodes info. "
                          "Using our locally cached info instead.")
      nodes_info = LocalState.get_local_nodes_info(options.keyname)
    nodes_dict = {node['public_ip']: node for node in nodes_info}

    # do the mkdir after we get the secret key, so that a bad keyname will
    # cause the tool to crash and not create this directory
    os.mkdir(location)

    # make dir for private IP navigation links
    private_ips_dir = os.path.join(location, 'symlinks', 'private-ips')
    utils.mkdir(private_ips_dir)

    # The log paths that we collect logs from.
    log_paths = [
      {'remote': '/opt/cassandra/cassandra/logs/*', 'local': 'cassandra'},
      {'remote': '/var/log/appscale'},
      {'remote': '/var/log/haproxy.log*'},
      {'remote': '/var/log/kern.log*'},
      {'remote': '/var/log/nginx'},
      {'remote': '/var/log/rabbitmq/*', 'local': 'rabbitmq'},
      {'remote': '/var/log/syslog*'},
      {'remote': '/var/log/zookeeper'}
    ]

    failures = False
    for public_ip in all_ips:
      # Get the logs from each node, and store them in our local directory
      local_dir = os.path.join(location, public_ip)
      utils.mkdir(local_dir)
      local_link = os.path.join('..', '..', public_ip)

      # Create symlinks for easier navigation in gathered logs
      node_info = nodes_dict.get(public_ip)
      if node_info:
        private_ip_dir = os.path.join(private_ips_dir, node_info["private_ip"])
        os.symlink(local_link, private_ip_dir)
        for role in node_info['roles']:
          role_dir = os.path.join(location, 'symlinks', role)
          utils.mkdir(role_dir)
          os.symlink(local_link, os.path.join(role_dir, public_ip))

      for log_path in log_paths:
        sub_dir = local_dir

        if 'local' in log_path:
          sub_dir = os.path.join(local_dir, log_path['local'])
          utils.mkdir(sub_dir)

        try:
          RemoteHelper.scp_remote_to_local(
            public_ip, options.keyname, log_path['remote'], sub_dir
          )
        except ShellException as shell_exception:
          failures = True
          AppScaleLogger.warn('Unable to collect logs from {} for host {}'.
                              format(log_path['remote'], public_ip))
          AppScaleLogger.verbose(
            'Encountered exception: {}'.format(str(shell_exception)))

    if failures:
      AppScaleLogger.log("Done copying to {}. There were failures while "
                         "collecting AppScale logs.".format(location))
    else:
      AppScaleLogger.success("Successfully collected all AppScale logs into "
                             "{}".format(location))
  def run_instances(cls, options):
    """Starts a new AppScale deployment with the parameters given.

    Args:
      options: A Namespace that has fields for each parameter that can be
        passed in via the command-line interface.
    Raises:
      AppControllerException: If the AppController on the head node crashes.
        When this occurs, the message in the exception contains the reason why
        the AppController crashed.
      BadConfigurationException: If the user passes in options that are not
        sufficient to start an AppScale deployment (e.g., running on EC2 but
        not specifying the AMI to use), or if the user provides us
        contradictory options (e.g., running on EC2 but not specifying EC2
        credentials).
    """
    LocalState.make_appscale_directory()
    LocalState.ensure_appscale_isnt_running(options.keyname, options.force)
    node_layout = NodeLayout(options)

    if options.infrastructure:
      if (not options.test and not options.force and
          not (options.disks or node_layout.are_disks_used())):
        LocalState.ensure_user_wants_to_run_without_disks()

    reduced_version = '.'.join(x for x in APPSCALE_VERSION.split('.')[:2])
    AppScaleLogger.log("Starting AppScale " + reduced_version)

    my_id = str(uuid.uuid4())
    AppScaleLogger.remote_log_tools_state(options, my_id, "started",
      APPSCALE_VERSION)

    head_node = node_layout.head_node()
    # Start VMs in cloud via cloud agent.
    if options.infrastructure:
      node_layout = RemoteHelper.start_all_nodes(options, node_layout)

      # Enables root logins and SSH access on the head node.
      RemoteHelper.enable_root_ssh(options, head_node.public_ip)
    AppScaleLogger.verbose("Node Layout: {}".format(node_layout.to_list()))

    # Ensure all nodes are compatible.
    RemoteHelper.ensure_machine_is_compatible(
      head_node.public_ip, options.keyname)

    # Use rsync to move custom code into the deployment.
    if options.rsync_source:
      AppScaleLogger.log("Copying over local copy of AppScale from {0}".
        format(options.rsync_source))
      RemoteHelper.rsync_files(head_node.public_ip, options.keyname,
                               options.rsync_source)

    # Start services on head node.
    RemoteHelper.start_head_node(options, my_id, node_layout)

    # Write deployment metadata to disk (facilitates SSH operations, etc.)
    db_master = node_layout.db_master().private_ip
    head_node = node_layout.head_node().public_ip
    LocalState.update_local_metadata(options, db_master, head_node)

    # Copy the locations.json to the head node
    RemoteHelper.copy_local_metadata(node_layout.head_node().public_ip,
                                     options.keyname)

    # Wait for services on head node to start.
    secret_key = LocalState.get_secret_key(options.keyname)
    acc = AppControllerClient(head_node, secret_key)
    try:
      while not acc.is_initialized():
        AppScaleLogger.log('Waiting for head node to initialize...')
        # This can take some time in particular the first time around, since
        # we will have to initialize the database.
        time.sleep(cls.SLEEP_TIME*3)
    except socket.error as socket_error:
      AppScaleLogger.warn('Unable to initialize AppController: {}'.
                          format(socket_error.message))
      message = RemoteHelper.collect_appcontroller_crashlog(
        head_node, options.keyname)
      raise AppControllerException(message)

    # Set up admin account.
    try:
      # We don't need to have any exception information here: we do expect
      # some anyway while the UserAppServer is coming up.
      acc.does_user_exist("non-existent-user", True)
    except Exception:
      AppScaleLogger.log('UserAppServer not ready yet. Retrying ...')
      time.sleep(cls.SLEEP_TIME)

    if options.admin_user and options.admin_pass:
      AppScaleLogger.log("Using the provided admin username/password")
      username, password = options.admin_user, options.admin_pass
    elif options.test:
      AppScaleLogger.log("Using default admin username/password")
      username, password = LocalState.DEFAULT_USER, LocalState.DEFAULT_PASSWORD
    else:
      username, password = LocalState.get_credentials()

    RemoteHelper.create_user_accounts(username, password, head_node,
                                      options.keyname)
    acc.set_admin_role(username, 'true', cls.ADMIN_CAPABILITIES)

    # Wait for machines to finish loading and AppScale Dashboard to be deployed.
    RemoteHelper.wait_for_machines_to_finish_loading(head_node, options.keyname)

    try:
      login_host = acc.get_property('login')['login']
    except KeyError:
      raise AppControllerException('login property not found')

    RemoteHelper.sleep_until_port_is_open(
      login_host, RemoteHelper.APP_DASHBOARD_PORT)

    AppScaleLogger.success("AppScale successfully started!")
    AppScaleLogger.success(
      'View status information about your AppScale deployment at '
      'http://{}:{}'.format(login_host, RemoteHelper.APP_DASHBOARD_PORT))
    AppScaleLogger.remote_log_tools_state(options, my_id,
      "finished", APPSCALE_VERSION)
  def upload_app(cls, options):
    """Uploads the given App Engine application into AppScale.

    Args:
      options: A Namespace that has fields for each parameter that can be
        passed in via the command-line interface.
    Returns:
      A tuple containing the host and port where the application is serving
        traffic from.
    """
    custom_service_yaml = None
    if cls.TAR_GZ_REGEX.search(options.file):
      file_location = LocalState.extract_tgz_app_to_dir(options.file)
      created_dir = True
      version = Version.from_tar_gz(options.file)
    elif cls.ZIP_REGEX.search(options.file):
      file_location = LocalState.extract_zip_app_to_dir(options.file)
      created_dir = True
      version = Version.from_zip(options.file)
    elif os.path.isdir(options.file):
      file_location = options.file
      created_dir = False
      version = Version.from_directory(options.file)
    elif options.file.endswith('.yaml'):
      file_location = os.path.dirname(options.file)
      created_dir = False
      version = Version.from_yaml_file(options.file)
      custom_service_yaml = options.file
    else:
      raise AppEngineConfigException('{0} is not a tar.gz file, a zip file, ' \
        'or a directory. Please try uploading either a tar.gz file, a zip ' \
        'file, or a directory.'.format(options.file))

    if options.project:
      if version.runtime == 'java':
        raise BadConfigurationException("AppScale doesn't support --project for"
          "Java yet. Please specify the application id in appengine-web.xml.")

      version.project_id = options.project

    if version.project_id is None:
      if version.config_type == 'app.yaml':
        message = 'Specify --project or define "application" in your app.yaml'
      else:
        message = 'Define "application" in your appengine-web.xml'

      raise AppEngineConfigException(message)

    # Let users know that versions are not supported yet.
    AppEngineHelper.warn_if_version_defined(version, options.test)

    AppEngineHelper.validate_app_id(version.project_id)

    extras = {}
    if version.runtime == 'go':
      extras = LocalState.get_extra_go_dependencies(options.file, options.test)

    if (version.runtime == 'java'
        and AppEngineHelper.is_sdk_mismatch(file_location)):
      AppScaleLogger.warn(
        'AppScale did not find the correct SDK jar versions in your app. The '
        'current supported SDK version is '
        '{}.'.format(AppEngineHelper.SUPPORTED_SDK_VERSION))

    head_node_public_ip = LocalState.get_host_with_role(
      options.keyname, 'shadow')
    secret_key = LocalState.get_secret_key(options.keyname)
    admin_client = AdminClient(head_node_public_ip, secret_key)

    remote_file_path = RemoteHelper.copy_app_to_host(
      file_location, version.project_id, options.keyname,
      extras, custom_service_yaml)

    AppScaleLogger.log(
      'Deploying service {} for {}'.format(version.service_id,
                                           version.project_id))
    operation_id = admin_client.create_version(version, remote_file_path)

    # now that we've told the AppController to start our app, find out what port
    # the app is running on and wait for it to start serving
    AppScaleLogger.log("Please wait for your app to start serving.")

    deadline = time.time() + cls.MAX_OPERATION_TIME
    while True:
      if time.time() > deadline:
        raise AppScaleException('The deployment operation took too long.')
      operation = admin_client.get_operation(version.project_id, operation_id)
      if not operation['done']:
        time.sleep(1)
        continue

      if 'error' in operation:
        raise AppScaleException(operation['error']['message'])
      version_url = operation['response']['versionUrl']
      break

    AppScaleLogger.success(
      'Your app can be reached at the following URL: {}'.format(version_url))

    if created_dir:
      shutil.rmtree(file_location)

    match = re.match('http://(.+):(\d+)', version_url)
    login_host = match.group(1)
    http_port = int(match.group(2))
    return login_host, http_port
  def terminate_instances(cls, options):
    """Stops all services running in an AppScale deployment, and in cloud
    deployments, also powers off the instances previously spawned.

    Raises:
      AppScaleException: If AppScale is not running, and thus can't be
      terminated.
    """
    try:
      infrastructure = LocalState.get_infrastructure(options.keyname)
    except IOError:
      raise AppScaleException("Cannot find AppScale's configuration for keyname {0}".
        format(options.keyname))

    if infrastructure == "xen" and options.terminate:
      raise AppScaleException("Terminate option is invalid for cluster mode.")

    if infrastructure == "xen" or not options.terminate:
      # We are in cluster mode: let's check if AppScale is running.
      if not os.path.exists(LocalState.get_secret_key_location(options.keyname)):
        raise AppScaleException("AppScale is not running with the keyname {0}".
          format(options.keyname))

    # Stop gracefully the AppScale deployment.
    try:
      RemoteHelper.terminate_virtualized_cluster(options.keyname,
                                                 options.clean)
    except (IOError, AppScaleException, AppControllerException,
            BadConfigurationException) as e:
      if not (infrastructure in InfrastructureAgentFactory.VALID_AGENTS and
            options.terminate):
        raise

      if options.test:
        AppScaleLogger.warn(e)
      else:
        AppScaleLogger.verbose(e)
        if isinstance(e, AppControllerException):
          response = raw_input(
            'AppScale may not have shut down properly, are you sure you want '
            'to continue terminating? (y/N) ')
        else:
          response = raw_input(
            'AppScale could not find the configuration files for this '
            'deployment, are you sure you want to continue terminating? '
            '(y/N) ')
        if response.lower() not in ['y', 'yes']:
          raise AppScaleException("Cancelled cloud termination.")


    # And if we are on a cloud infrastructure, terminate instances if
    # asked.
    if (infrastructure in InfrastructureAgentFactory.VALID_AGENTS and
          options.terminate):
      RemoteHelper.terminate_cloud_infrastructure(options.keyname)
    elif infrastructure in InfrastructureAgentFactory.VALID_AGENTS and not \
        options.terminate:
      AppScaleLogger.log("AppScale did not terminate any of your cloud "
                         "instances, to terminate them run 'appscale "
                         "down --terminate'")
    if options.clean:
      LocalState.clean_local_metadata(keyname=options.keyname)