def test_advanced_format_yaml_only(self):
   input_yaml = {'master' : self.ip_1, 'database' : self.ip_1,
     'appengine' : self.ip_1, 'open' : self.ip_2}
   options = self.default_options.copy()
   options['ips'] = input_yaml
   layout_1 = NodeLayout(options)
   self.assertEquals(True, layout_1.is_valid())
Esempio n. 2
0
    def valid_ssh_key(self, config, run_instances_opts):
        """ Checks if the tools can log into the head node with the current key.

    Args:
      config: A dictionary that includes the IPs layout (which itself is a dict
        mapping role names to IPs) and, optionally, the keyname to use.
      run_instances_opts: The arguments parsed from the appscale-run-instances
        command.

    Returns:
      A bool indicating whether or not the specified keyname can be used to log
      into the head node.

    Raises:
      BadConfigurationException: If the IPs layout was not a dictionary.
    """
        keyname = config['keyname']
        verbose = config.get('verbose', False)

        if not isinstance(config['ips_layout'], dict):
            raise BadConfigurationException(
                'ips_layout should be a dictionary. Please fix it and try again.'
            )

        ssh_key_location = self.APPSCALE_DIRECTORY + keyname + ".key"
        if not os.path.exists(ssh_key_location):
            return False

        all_ips = LocalState.get_all_public_ips(keyname)

        # If a login node is defined, use that to communicate with other nodes.
        node_layout = NodeLayout(run_instances_opts)
        head_node = node_layout.head_node()
        if head_node is not None:
            remote_key = '{}/ssh.key'.format(RemoteHelper.CONFIG_DIR)
            try:
                RemoteHelper.scp(head_node.public_ip, keyname,
                                 ssh_key_location, remote_key, verbose)
            except ShellException:
                return False

            for ip in all_ips:
                ssh_to_ip = 'ssh -i {key} -o StrictHostkeyChecking=no root@{ip} true'\
                  .format(key=remote_key, ip=ip)
                try:
                    RemoteHelper.ssh(head_node.public_ip,
                                     keyname,
                                     ssh_to_ip,
                                     verbose,
                                     user='******')
                except ShellException:
                    return False
            return True

        for ip in all_ips:
            if not self.can_ssh_to_ip(ip, keyname, verbose):
                return False

        return True
  def add_keypair(cls, options):
    """Sets up passwordless SSH login to the machines used in a virtualized
    cluster deployment.

    Args:
      options: A Namespace that has fields for each parameter that can be
        passed in via the command-line interface.
    Raises:
      AppScaleException: If any of the machines named in the ips_layout are
        not running, or do not have the SSH daemon running.
    """
    LocalState.require_ssh_commands(options.auto, options.verbose)
    LocalState.make_appscale_directory()

    path = LocalState.LOCAL_APPSCALE_PATH + options.keyname
    if options.add_to_existing:
      public_key = path + ".pub"
      private_key = path
    else:
      public_key, private_key = LocalState.generate_rsa_key(options.keyname,
        options.verbose)

    if options.auto:
      if 'root_password' in options:
        AppScaleLogger.log("Using the provided root password to log into " + \
          "your VMs.")
        password = options.root_password
      else:
        AppScaleLogger.log("Please enter the password for the root user on" + \
          " your VMs:")
        password = getpass.getpass()

    node_layout = NodeLayout(options)
    if not node_layout.is_valid():
      raise BadConfigurationException("There were problems with your " + \
        "placement strategy: " + str(node_layout.errors()))

    all_ips = [node.public_ip for node in node_layout.nodes]
    for ip in all_ips:
      # first, make sure ssh is actually running on the host machine
      if not RemoteHelper.is_port_open(ip, RemoteHelper.SSH_PORT,
        options.verbose):
        raise AppScaleException("SSH does not appear to be running at {0}. " \
          "Is the machine at {0} up and running? Make sure your IPs are " \
          "correct!".format(ip))

      # next, set up passwordless ssh
      AppScaleLogger.log("Executing ssh-copy-id for host: {0}".format(ip))
      if options.auto:
        LocalState.shell("{0} root@{1} {2} {3}".format(cls.EXPECT_SCRIPT, ip,
          private_key, password), options.verbose)
      else:
        LocalState.shell("ssh-copy-id -i {0} root@{1}".format(private_key, ip),
          options.verbose)

    AppScaleLogger.success("Generated a new SSH key for this deployment " + \
      "at {0}".format(private_key))
Esempio n. 4
0
  def add_keypair(cls, options):
    """Sets up passwordless SSH login to the machines used in a virtualized
    cluster deployment.

    Args:
      options: A Namespace that has fields for each parameter that can be
        passed in via the command-line interface.
    Raises:
      AppScaleException: If any of the machines named in the ips_layout are
        not running, or do not have the SSH daemon running.
    """
    LocalState.require_ssh_commands(options.auto, options.verbose)
    LocalState.make_appscale_directory()

    path = LocalState.LOCAL_APPSCALE_PATH + options.keyname
    if options.add_to_existing:
      public_key = path + ".pub"
      private_key = path
    else:
      public_key, private_key = LocalState.generate_rsa_key(options.keyname,
        options.verbose)

    if options.auto:
      if 'root_password' in options:
        AppScaleLogger.log("Using the provided root password to log into " + \
          "your VMs.")
        password = options.root_password
      else:
        AppScaleLogger.log("Please enter the password for the root user on" + \
          " your VMs:")
        password = getpass.getpass()

    node_layout = NodeLayout(options)
    if not node_layout.is_valid():
      raise BadConfigurationException("There were problems with your " + \
        "placement strategy: " + str(node_layout.errors()))

    all_ips = [node.public_ip for node in node_layout.nodes]
    for ip in all_ips:
      # first, make sure ssh is actually running on the host machine
      if not RemoteHelper.is_port_open(ip, RemoteHelper.SSH_PORT,
        options.verbose):
        raise AppScaleException("SSH does not appear to be running at {0}. " \
          "Is the machine at {0} up and running? Make sure your IPs are " \
          "correct!".format(ip))

      # next, set up passwordless ssh
      AppScaleLogger.log("Executing ssh-copy-id for host: {0}".format(ip))
      if options.auto:
        LocalState.shell("{0} root@{1} {2} {3}".format(cls.EXPECT_SCRIPT, ip,
          private_key, password), options.verbose)
      else:
        LocalState.shell("ssh-copy-id -i {0} root@{1}".format(private_key, ip),
          options.verbose)

    AppScaleLogger.success("Generated a new SSH key for this deployment " + \
      "at {0}".format(private_key))
 def test_with_wrong_number_of_disks(self):
     # suppose that the user has specified two nodes, but only one EBS / PD disk
     # this should fail.
     input_yaml = {'controller': self.ip_1, 'servers': [self.ip_2]}
     options = self.default_options.copy()
     options['ips'] = input_yaml
     options['disks'] = {self.ip_1: 'disk_number_one'}
     layout = NodeLayout(options)
     self.assertEquals(False, layout.is_valid())
Esempio n. 6
0
    def add_keypair(cls, options):
        """Sets up passwordless SSH login to the machines used in a virtualized
    cluster deployment.

    Args:
      options: A Namespace that has fields for each parameter that can be
        passed in via the command-line interface.
    """
        LocalState.require_ssh_commands(options.auto, options.verbose)
        LocalState.make_appscale_directory()

        path = LocalState.LOCAL_APPSCALE_PATH + options.keyname
        if options.add_to_existing:
            public_key = path + ".pub"
            private_key = path
        else:
            public_key, private_key = LocalState.generate_rsa_key(
                options.keyname, options.verbose)

        if options.auto:
            if 'root_password' in options:
                AppScaleLogger.log("Using the provided root password to log into " + \
                  "your VMs.")
                password = options.root_password
            else:
                AppScaleLogger.log("Please enter the password for the root user on" + \
                  " your VMs:")
                password = getpass.getpass()

        node_layout = NodeLayout(options)
        if not node_layout.is_valid():
            raise BadConfigurationException("There were problems with your " + \
              "placement strategy: " + str(node_layout.errors()))

        all_ips = [node.public_ip for node in node_layout.nodes]
        for ip in all_ips:
            # first, set up passwordless ssh
            AppScaleLogger.log(
                "Executing ssh-copy-id for host: {0}".format(ip))
            if options.auto:
                LocalState.shell(
                    "{0} root@{1} {2} {3}".format(cls.EXPECT_SCRIPT, ip,
                                                  private_key, password),
                    options.verbose)
            else:
                LocalState.shell(
                    "ssh-copy-id -i {0} root@{1}".format(private_key, ip),
                    options.verbose)

            # next, copy over the ssh keypair we generate
            RemoteHelper.scp(ip, options.keyname, public_key,
                             '/root/.ssh/id_rsa.pub', options.verbose)
            RemoteHelper.scp(ip, options.keyname, private_key,
                             '/root/.ssh/id_rsa', options.verbose)

        AppScaleLogger.success("Generated a new SSH key for this deployment " + \
          "at {0}".format(private_key))
Esempio n. 7
0
  def valid_ssh_key(self, config, run_instances_opts):
    """ Checks if the tools can log into the head node with the current key.

    Args:
      config: A dictionary that includes the IPs layout (which itself is a dict
        mapping role names to IPs) and, optionally, the keyname to use.
      run_instances_opts: The arguments parsed from the appscale-run-instances
        command.

    Returns:
      A bool indicating whether or not the specified keyname can be used to log
      into the head node.

    Raises:
      BadConfigurationException: If the IPs layout was not a dictionary.
    """
    keyname = config['keyname']
    verbose = config.get('verbose', False)

    if not isinstance(config['ips_layout'], dict):
      raise BadConfigurationException(
        'ips_layout should be a dictionary. Please fix it and try again.')

    ssh_key_location = self.APPSCALE_DIRECTORY + keyname + ".key"
    if not os.path.exists(ssh_key_location):
      return False

    all_ips = LocalState.get_all_public_ips(keyname)

    # If a login node is defined, use that to communicate with other nodes.
    node_layout = NodeLayout(run_instances_opts)
    head_node = node_layout.head_node()
    if head_node is not None:
      remote_key = '{}/ssh.key'.format(RemoteHelper.CONFIG_DIR)
      try:
        RemoteHelper.scp(
          head_node.public_ip, keyname, ssh_key_location, remote_key, verbose)
      except ShellException:
        return False

      for ip in all_ips:
        ssh_to_ip = 'ssh -i {key} -o StrictHostkeyChecking=no root@{ip} true'\
          .format(key=remote_key, ip=ip)
        try:
          RemoteHelper.ssh(
            head_node.public_ip, keyname, ssh_to_ip, verbose, user='******')
        except ShellException:
          return False
      return True

    for ip in all_ips:
      if not self.can_ssh_to_ip(ip, keyname, verbose):
        return False

    return True
Esempio n. 8
0
 def test_advanced_format_yaml_only(self):
     input_yaml = {
         'master': self.ip_1,
         'database': self.ip_1,
         'appengine': self.ip_1,
         'open': self.ip_2
     }
     options = self.default_options.copy()
     options['ips'] = input_yaml
     layout_1 = NodeLayout(options)
     self.assertEquals(True, layout_1.is_valid())
 def test_with_right_number_of_disks_but_not_unique(self):
     # suppose that the user has specified two nodes, but uses the same name for
     # both disks. This isn't acceptable.
     input_yaml = {'controller': self.ip_1, 'servers': [self.ip_2]}
     options = self.default_options.copy()
     options['ips'] = input_yaml
     options['disks'] = {
         self.ip_1: 'disk_number_one',
         self.ip_2: 'disk_number_one'
     }
     layout = NodeLayout(options)
     self.assertEquals(False, layout.is_valid())
Esempio n. 10
0
  def add_keypair(cls, options):
    """Sets up passwordless SSH login to the machines used in a virtualized
    cluster deployment.

    Args:
      options: A Namespace that has fields for each parameter that can be
        passed in via the command-line interface.
    """
    LocalState.require_ssh_commands(options.auto, options.verbose)
    LocalState.make_appscale_directory()

    path = LocalState.LOCAL_APPSCALE_PATH + options.keyname
    if options.add_to_existing:
      public_key = path + ".pub"
      private_key = path
    else:
      public_key, private_key = LocalState.generate_rsa_key(options.keyname,
        options.verbose)

    if options.auto:
      if 'root_password' in options:
        AppScaleLogger.log("Using the provided root password to log into " + \
          "your VMs.")
        password = options.root_password
      else:
        AppScaleLogger.log("Please enter the password for the root user on" + \
          " your VMs:")
        password = getpass.getpass()

    node_layout = NodeLayout(options)
    if not node_layout.is_valid():
      raise BadConfigurationException("There were problems with your " + \
        "placement strategy: " + str(node_layout.errors()))

    all_ips = [node.public_ip for node in node_layout.nodes]
    for ip in all_ips:
      # first, set up passwordless ssh
      AppScaleLogger.log("Executing ssh-copy-id for host: {0}".format(ip))
      if options.auto:
        LocalState.shell("{0} root@{1} {2} {3}".format(cls.EXPECT_SCRIPT, ip,
          private_key, password), options.verbose)
      else:
        LocalState.shell("ssh-copy-id -i {0} root@{1}".format(private_key, ip),
          options.verbose)

      # next, copy over the ssh keypair we generate
      RemoteHelper.scp(ip, options.keyname, public_key, '/root/.ssh/id_rsa.pub',
        options.verbose)
      RemoteHelper.scp(ip, options.keyname, private_key, '/root/.ssh/id_rsa',
        options.verbose)

    AppScaleLogger.success("Generated a new SSH key for this deployment " + \
      "at {0}".format(private_key))
Esempio n. 11
0
    def upgrade(cls, options):
        """ Upgrades the deployment to the latest AppScale version.
    Args:
      options: A Namespace that has fields for each parameter that can be
        passed in via the command-line interface.
    """
        node_layout = NodeLayout(options)
        if not node_layout.is_valid():
            raise BadConfigurationException(
                'Your ips_layout is invalid:\n{}'.format(node_layout.errors()))

        latest_tools = APPSCALE_VERSION
        try:
            AppScaleLogger.log(
                'Checking if an update is available for appscale-tools')
            latest_tools = latest_tools_version()
        except:
            # Prompt the user if version metadata can't be fetched.
            if not options.test:
                response = raw_input(
                    'Unable to check for the latest version of appscale-tools. Would '
                    'you like to continue upgrading anyway? (y/N) ')
                if response.lower() not in ['y', 'yes']:
                    raise AppScaleException('Cancelled AppScale upgrade.')

        if latest_tools > APPSCALE_VERSION:
            raise AppScaleException(
                "There is a newer version ({}) of appscale-tools available. Please "
                "upgrade the tools package before running 'appscale upgrade'.".
                format(latest_tools))

        master_ip = node_layout.head_node().public_ip
        upgrade_version_available = cls.get_upgrade_version_available()

        current_version = RemoteHelper.get_host_appscale_version(
            master_ip, options.keyname, options.verbose)

        # Don't run bootstrap if current version is later that the most recent
        # public one. Covers cases of revoked versions/tags and ensures we won't
        # try to downgrade the code.
        if current_version >= upgrade_version_available:
            AppScaleLogger.log(
                'AppScale is already up to date. Skipping code upgrade.')
            AppScaleLogger.log(
                'Running upgrade script to check if any other upgrades are needed.'
            )
            cls.shut_down_appscale_if_running(options)
            cls.run_upgrade_script(options, node_layout)
            return

        cls.shut_down_appscale_if_running(options)
        cls.upgrade_appscale(options, node_layout)
Esempio n. 12
0
    def test_with_login_override(self):
        # if the user wants to set a login host, make sure that gets set as the
        # login node's public IP address instead of what we'd normally put in

        # use a simple deployment so we can get the login node with .head_node()
        input_yaml_1 = {'controller': self.ip_1, 'servers': [self.ip_2]}
        options_1 = self.default_options.copy()
        options_1['ips'] = input_yaml_1
        options_1['login_host'] = "www.booscale.com"
        layout_1 = NodeLayout(options_1)
        self.assertEquals(True, layout_1.is_valid())

        head_node = layout_1.head_node()
        self.assertEquals(options_1['login_host'], head_node.public_ip)
 def test_with_right_number_of_unique_disks(self):
     # suppose that the user has specified two nodes, and two EBS / PD disks
     # with different names. This is the desired user behavior.
     input_yaml = {'controller': self.ip_1, 'servers': [self.ip_2]}
     options = self.default_options.copy()
     options['ips'] = input_yaml
     options['disks'] = {
         self.ip_1: 'disk_number_one',
         self.ip_2: 'disk_number_two'
     }
     layout = NodeLayout(options)
     self.assertEquals(True, layout.is_valid())
     self.assertEquals('disk_number_one', layout.head_node().disk)
     self.assertEquals('disk_number_two', layout.other_nodes()[0].disk)
Esempio n. 14
0
 def test_with_wrong_number_of_disks(self):
   # suppose that the user has specified two nodes, but only one EBS / PD disk
   # this should fail.
   input_yaml = {
     'controller' : self.ip_1,
     'servers' : [self.ip_2]
   }
   options = self.default_options.copy()
   options['ips'] = input_yaml
   options['disks'] = {
     self.ip_1 : 'disk_number_one'
   }
   layout = NodeLayout(options)
   self.assertEquals(False, layout.is_valid())
Esempio n. 15
0
 def test_with_right_number_of_disks_but_not_unique(self):
   # suppose that the user has specified two nodes, but uses the same name for
   # both disks. This isn't acceptable.
   input_yaml = {
     'controller' : self.ip_1,
     'servers' : [self.ip_2]
   }
   options = self.default_options.copy()
   options['ips'] = input_yaml
   options['disks'] = {
     self.ip_1 : 'disk_number_one',
     self.ip_2 : 'disk_number_one'
   }
   layout = NodeLayout(options)
   self.assertEquals(False, layout.is_valid())
Esempio n. 16
0
    def add_instances(cls, options):
        """Adds additional machines to an AppScale deployment.

    Args:
      options: A Namespace that has fields for each parameter that can be
        passed in via the command-line interface.
    """
        if 'master' in options.ips.keys():
            raise BadConfigurationException("Cannot add master nodes to an " + \
              "already running AppScale deployment.")

        # Skip checking for -n (replication) because we don't allow the user
        # to specify it here (only allowed in run-instances).
        additional_nodes_layout = NodeLayout(options)

        # In virtualized cluster deployments, we need to make sure that the user
        # has already set up SSH keys.
        if LocalState.get_from_yaml(options.keyname,
                                    'infrastructure') == "xen":
            for ip in options.ips.values():
                # throws a ShellException if the SSH key doesn't work
                RemoteHelper.ssh(ip, options.keyname, "ls", options.verbose)

        # Finally, find an AppController and send it a message to add
        # the given nodes with the new roles.
        AppScaleLogger.log("Sending request to add instances")
        login_ip = LocalState.get_login_host(options.keyname)
        acc = AppControllerClient(login_ip,
                                  LocalState.get_secret_key(options.keyname))
        acc.start_roles_on_nodes(json.dumps(options.ips))

        # TODO(cgb): Should we wait for the new instances to come up and get
        # initialized?
        AppScaleLogger.success("Successfully sent request to add instances " + \
          "to this AppScale deployment.")
Esempio n. 17
0
    def upgrade(cls, options):
        """ Upgrades the deployment to the latest AppScale version.
    Args:
      options: A Namespace that has fields for each parameter that can be
        passed in via the command-line interface.
    """
        node_layout = NodeLayout(options)
        if not node_layout.is_valid():
            raise BadConfigurationException("Your ips_layout is invalid:\n{}".format(node_layout.errors()))

        latest_tools = APPSCALE_VERSION
        try:
            AppScaleLogger.log("Checking if an update is available for appscale-tools")
            latest_tools = latest_tools_version()
        except:
            # Prompt the user if version metadata can't be fetched.
            if not options.test:
                response = raw_input(
                    "Unable to check for the latest version of appscale-tools. Would "
                    "you like to continue upgrading anyway? (y/N) "
                )
                if response.lower() not in ["y", "yes"]:
                    raise AppScaleException("Cancelled AppScale upgrade.")

        if latest_tools > APPSCALE_VERSION:
            raise AppScaleException(
                "There is a newer version ({}) of appscale-tools available. Please "
                "upgrade the tools package before running 'appscale upgrade'.".format(latest_tools)
            )

        master_ip = node_layout.head_node().public_ip
        upgrade_version_available = cls.get_upgrade_version_available()

        current_version = RemoteHelper.get_host_appscale_version(master_ip, options.keyname, options.verbose)

        # Don't run bootstrap if current version is later that the most recent
        # public one. Covers cases of revoked versions/tags and ensures we won't
        # try to downgrade the code.
        if current_version >= upgrade_version_available:
            AppScaleLogger.log("AppScale is already up to date. Skipping code upgrade.")
            AppScaleLogger.log("Running upgrade script to check if any other upgrades are needed.")
            cls.shut_down_appscale_if_running(options)
            cls.run_upgrade_script(options, node_layout)
            return

        cls.shut_down_appscale_if_running(options)
        cls.upgrade_appscale(options, node_layout)
Esempio n. 18
0
  def test_with_login_override(self):
    # if the user wants to set a login host, make sure that gets set as the
    # login node's public IP address instead of what we'd normally put in

    # use a simple deployment so we can get the login node with .head_node()
    input_yaml_1 = {
      'controller' : self.ip_1,
      'servers' : [self.ip_2]
    }
    options_1 = self.default_options.copy()
    options_1['ips'] = input_yaml_1
    options_1['login_host'] = "www.booscale.com"
    layout_1 = NodeLayout(options_1)
    self.assertEquals(True, layout_1.is_valid())

    head_node = layout_1.head_node()
    self.assertEquals(options_1['login_host'], head_node.public_ip)
Esempio n. 19
0
 def test_with_right_number_of_unique_disks(self):
   # suppose that the user has specified two nodes, and two EBS / PD disks
   # with different names. This is the desired user behavior.
   input_yaml = {
     'controller' : self.ip_1,
     'servers' : [self.ip_2]
   }
   options = self.default_options.copy()
   options['ips'] = input_yaml
   options['disks'] = {
     self.ip_1 : 'disk_number_one',
     self.ip_2 : 'disk_number_two'
   }
   layout = NodeLayout(options)
   self.assertEquals(True, layout.is_valid())
   self.assertEquals('disk_number_one', layout.head_node().disk)
   self.assertEquals('disk_number_two', layout.other_nodes()[0].disk)
Esempio n. 20
0
    def test_generate_deployment_params(self):
        # this method is fairly light, so just make sure that it constructs the dict
        # to send to the AppController correctly
        options = flexmock(name='options',
                           table='cassandra',
                           keyname='boo',
                           appengine='1',
                           autoscale=False,
                           group='bazgroup',
                           infrastructure='ec2',
                           machine='ami-ABCDEFG',
                           instance_type='m1.large',
                           use_spot_instances=True,
                           max_spot_price=1.23,
                           alter_etc_resolv=True,
                           clear_datastore=False,
                           disks={'node-1': 'vol-ABCDEFG'},
                           zone='my-zone-1b',
                           verbose=True,
                           user_commands=[],
                           flower_password="******",
                           max_memory=ParseArgs.DEFAULT_MAX_MEMORY)
        node_layout = NodeLayout({
            'table': 'cassandra',
            'infrastructure': "ec2",
            'min': 1,
            'max': 1
        })

        expected = {
            'alter_etc_resolv': 'True',
            'clear_datastore': 'False',
            'table': 'cassandra',
            'hostname': 'public1',
            'ips': json.dumps([]),
            'keyname': 'boo',
            'replication': '1',
            'appengine': '1',
            'autoscale': 'False',
            'group': 'bazgroup',
            'machine': 'ami-ABCDEFG',
            'infrastructure': 'ec2',
            'instance_type': 'm1.large',
            'min_images': 1,
            'max_images': 1,
            'use_spot_instances': True,
            'user_commands': json.dumps([]),
            'max_spot_price': '1.23',
            'zone': json.dumps('my-zone-1b'),
            'verbose': 'True',
            'flower_password': '******',
            'max_memory': ParseArgs.DEFAULT_MAX_MEMORY
        }
        actual = LocalState.generate_deployment_params(
            options, node_layout, 'public1', {'max_spot_price': '1.23'})
        self.assertEquals(expected, actual)
Esempio n. 21
0
  def test_update_local_metadata(self):
    # mock out getting all the ips in the deployment from the head node
    fake_soap = flexmock(name='fake_soap')
    fake_soap.should_receive('get_all_public_ips').with_args('the secret') \
      .and_return(json.dumps(['public1']))
    role_info = [{
        'public_ip' : 'public1',
        'private_ip' : 'private1',
        'jobs' : ['shadow', 'db_master']
    }]
    fake_soap.should_receive('get_role_info').with_args('the secret') \
      .and_return(json.dumps(role_info))
    flexmock(SOAPpy)
    SOAPpy.should_receive('SOAPProxy').with_args('https://public1:17443') \
      .and_return(fake_soap)

    # mock out reading the secret key
    fake_secret = flexmock(name='fake_secret')
    fake_secret.should_receive('read').and_return('the secret')
    builtins = flexmock(sys.modules['__builtin__'])
    builtins.should_call('open')
    builtins.should_receive('open').with_args(
      LocalState.get_secret_key_location('booscale'), 'r') \
      .and_return(fake_secret)

    # mock out writing the yaml file
    fake_locations_yaml = flexmock(name='fake_locations_yaml')
    fake_locations_yaml.should_receive('write').with_args(yaml.dump({
      'load_balancer': 'public1', 'instance_id': 'i-ABCDEFG',
      'secret': 'the secret', 'infrastructure': 'ec2',
      'group': 'boogroup', 'ips': 'public1', 'table': 'cassandra',
      'db_master': 'node-0', 'zone' : 'my-zone-1b'
    })).and_return()
    builtins.should_receive('open').with_args(
      LocalState.get_locations_yaml_location('booscale'), 'w') \
      .and_return(fake_locations_yaml)

    # and mock out writing the json file
    fake_locations_json = flexmock(name='fake_locations_json')
    fake_locations_json.should_receive('write').with_args(json.dumps(
      role_info)).and_return()
    builtins.should_receive('open').with_args(
      LocalState.get_locations_json_location('booscale'), 'w') \
      .and_return(fake_locations_json)

    options = flexmock(name='options', table='cassandra', infrastructure='ec2',
      keyname='booscale', group='boogroup', zone='my-zone-1b')
    node_layout = NodeLayout(options={
      'min' : 1,
      'max' : 1,
      'infrastructure' : 'ec2',
      'table' : 'cassandra'
    })
    host = 'public1'
    instance_id = 'i-ABCDEFG'
    LocalState.update_local_metadata(options, node_layout, host, instance_id)
 def test_is_database_replication_valid_with_db_slave(self):
     fake_node = flexmock()
     fake_node.should_receive('is_role').with_args('database').and_return(
         False)
     fake_node.should_receive('is_role').with_args('db_master').and_return(
         False)
     fake_node.should_receive('is_role').with_args('db_slave').and_return(
         True)
     output = NodeLayout({}).is_database_replication_valid([fake_node])
     self.assertTrue(output['result'])
Esempio n. 23
0
  def test_warn_users_on_unsupported_deployment_strategies(self):
    # don't test simple deployments - those are all supported
    # instead, test out some variations of the supported advanced
    # strategies, as those should not be supported
    advanced_yaml_1 = {
      'master' : self.ip_1,
      'appengine' : self.ip_1,
      'database' : self.ip_2,
      'zookeeper' : self.ip_2
    }
    options_1 = self.default_options.copy()
    options_1['ips'] = advanced_yaml_1
    advanced_layout_1 = NodeLayout(options_1)
    self.assertEquals(True, advanced_layout_1.is_valid())
    self.assertEquals(False, advanced_layout_1.is_supported())

    # four node deployments that don't match the only supported
    # deployment are not supported
    advanced_yaml_2 = {
      'master' : self.ip_1,
      'appengine' : self.ip_2,
      'database' : self.ip_3,
      'zookeeper' : self.ip_3,
      'open' : self.ip_4
    }
    options_2 = self.default_options.copy()
    options_2['ips'] = advanced_yaml_2
    advanced_layout_2 = NodeLayout(options_2)
    self.assertEquals(True, advanced_layout_2.is_valid())
    self.assertEquals(False, advanced_layout_2.is_supported())

    # eight node deployments that don't match the only supported
    # deployment are not supported
    advanced_yaml_3 = {
      'master' : self.ip_1,
      'appengine' : [self.ip_2, self.ip_3],
      'database' : [self.ip_4, self.ip_5],
      'zookeeper' : [self.ip_6, self.ip_7],
      'open' : self.ip_8
    }
    options_3 = self.default_options.copy()
    options_3['ips'] = advanced_yaml_3
    advanced_layout_3 = NodeLayout(options_3)
    self.assertEquals(True, advanced_layout_3.is_valid())
    self.assertEquals(False, advanced_layout_3.is_supported())
Esempio n. 24
0
  def test_dont_warn_users_on_supported_deployment_strategies(self):
    # all simple deployment strategies are supported
    input_yaml_1 = {'controller' : self.ip_1}
    options_1 = self.default_options.copy()
    options_1['ips'] = input_yaml_1
    layout_1 = NodeLayout(options_1)
    self.assertEquals(True, layout_1.is_supported())

    input_yaml_2 = {'controller' : self.ip_1, 'servers' : [self.ip_2]}
    options_2 = self.default_options.copy()
    options_2['ips'] = input_yaml_2
    layout_2 = NodeLayout(options_2)
    self.assertEquals(True, layout_2.is_supported())

    input_yaml_3 = {'controller' : self.ip_1, 'servers' : [self.ip_2, self.ip_3]}
    options_3 = self.default_options.copy()
    options_3['ips'] = input_yaml_3
    layout_3 = NodeLayout(options_3)
    self.assertEquals(True, layout_3.is_supported())

    # in advanced deployments, four nodes are ok with the following
    # layout: (1) load balancer, (2) appserver, (3) database,
    # (4) zookeeper
    advanced_yaml_1 = {
      'master' : self.ip_1,
      'appengine' : self.ip_2,
      'database' : self.ip_3,
      'zookeeper' : self.ip_4
    }
    options_1 = self.default_options.copy()
    options_1['ips'] = advanced_yaml_1
    advanced_layout_1 = NodeLayout(options_1)
    self.assertEquals(True, advanced_layout_1.is_valid())
    self.assertEquals(True, advanced_layout_1.is_supported())

    # in advanced deployments, eight nodes are ok with the following
    # layout: (1) load balancer, (2) appserver, (3) appserver,
    # (4) database, (5) database, (6) zookeeper, (7) zookeeper,
    # (8) zookeeper
    advanced_yaml_2 = {
      'master' : self.ip_1,
      'appengine' : [self.ip_2, self.ip_3],
      'database' : [self.ip_4, self.ip_5],
      'zookeeper' : [self.ip_6, self.ip_7, self.ip_8]
    }
    options_2 = self.default_options.copy()
    options_2['ips'] = advanced_yaml_2
    advanced_layout_2 = NodeLayout(options_2)
    self.assertEquals(True, advanced_layout_2.is_valid())
    self.assertEquals(True, advanced_layout_2.is_supported())
Esempio n. 25
0
    def test_warn_users_on_unsupported_deployment_strategies(self):
        # don't test simple deployments - those are all supported
        # instead, test out some variations of the supported advanced
        # strategies, as those should not be supported
        advanced_yaml_1 = {
            'master': self.ip_1,
            'appengine': self.ip_1,
            'database': self.ip_2,
            'zookeeper': self.ip_2
        }
        options_1 = self.default_options.copy()
        options_1['ips'] = advanced_yaml_1
        advanced_layout_1 = NodeLayout(options_1)
        self.assertEquals(True, advanced_layout_1.is_valid())
        self.assertEquals(False, advanced_layout_1.is_supported())

        # four node deployments that don't match the only supported
        # deployment are not supported
        advanced_yaml_2 = {
            'master': self.ip_1,
            'appengine': self.ip_2,
            'database': self.ip_3,
            'zookeeper': self.ip_3,
            'open': self.ip_4
        }
        options_2 = self.default_options.copy()
        options_2['ips'] = advanced_yaml_2
        advanced_layout_2 = NodeLayout(options_2)
        self.assertEquals(True, advanced_layout_2.is_valid())
        self.assertEquals(False, advanced_layout_2.is_supported())

        # eight node deployments that don't match the only supported
        # deployment are not supported
        advanced_yaml_3 = {
            'master': self.ip_1,
            'appengine': [self.ip_2, self.ip_3],
            'database': [self.ip_4, self.ip_5],
            'zookeeper': [self.ip_6, self.ip_7],
            'open': self.ip_8
        }
        options_3 = self.default_options.copy()
        options_3['ips'] = advanced_yaml_3
        advanced_layout_3 = NodeLayout(options_3)
        self.assertEquals(True, advanced_layout_3.is_valid())
        self.assertEquals(False, advanced_layout_3.is_supported())
Esempio n. 26
0
  def test_simple_layout_options(self):
    # Using Euca with no input yaml, and no max or min images is not ok
    options_1 = self.default_options.copy()
    options_1['infrastructure'] = 'euca'
    layout_1 = NodeLayout(options_1)
    self.assertEquals(False, layout_1.is_valid())
    self.assertEquals(NodeLayout.NO_YAML_REQUIRES_MIN, layout_1.errors())

    options_2 = self.default_options.copy()
    options_2['infrastructure'] = "euca"
    options_2['max'] = 2
    layout_2 = NodeLayout(options_2)
    self.assertEquals(False, layout_2.is_valid())
    self.assertEquals(NodeLayout.NO_YAML_REQUIRES_MIN, layout_2.errors())

    options_3 = self.default_options.copy()
    options_3['infrastructure'] = "euca"
    options_3['min'] = 2
    layout_3 = NodeLayout(options_3)
    self.assertEquals(False, layout_3.is_valid())
    self.assertEquals(NodeLayout.NO_YAML_REQUIRES_MAX, layout_3.errors())

    # Using Euca with no input yaml, with max and min images set is ok
    options_4 = self.default_options.copy()
    options_4['infrastructure'] = "euca"
    options_4['min'] = 2
    options_4['max'] = 2
    layout_4 = NodeLayout(options_4)
    self.assertEquals(True, layout_4.is_valid())

    # Using virtualized deployments with no input yaml is not ok
    options_5 = self.default_options.copy()
    layout_5 = NodeLayout(options_5)
    self.assertEquals(False, layout_5.is_valid())
    self.assertEquals([NodeLayout.INPUT_YAML_REQUIRED], layout_5.errors())
Esempio n. 27
0
    def test_generate_deployment_params(self):
        # this method is fairly light, so just make sure that it constructs the dict
        # to send to the AppController correctly
        options = flexmock(name='options',
                           table='cassandra',
                           keyname='boo',
                           appengine='1',
                           autoscale=False,
                           group='bazgroup',
                           infrastructure='ec2',
                           machine='ami-ABCDEFG',
                           instance_type='m1.large',
                           use_spot_instances=True,
                           max_spot_price=1.23)
        node_layout = NodeLayout({
            'table': 'cassandra',
            'infrastructure': "ec2",
            'min': 2,
            'max': 2
        })

        expected = {
            'table':
            'cassandra',
            'hostname':
            'public1',
            'ips':
            json.dumps({
                'node-1': [
                    'database', 'taskqueue_slave', 'taskqueue', 'memcache',
                    'db_slave', 'appengine'
                ]
            }),
            'keyname':
            'boo',
            'replication':
            '2',
            'appengine':
            '1',
            'autoscale':
            'False',
            'group':
            'bazgroup',
            'machine':
            'ami-ABCDEFG',
            'infrastructure':
            'ec2',
            'instance_type':
            'm1.large',
            'min_images':
            2,
            'max_images':
            2,
            'use_spot_instances':
            True,
            'max_spot_price':
            '1.23'
        }
        actual = LocalState.generate_deployment_params(
            options, node_layout, 'public1', {'max_spot_price': '1.23'})
        self.assertEquals(expected, actual)
Esempio n. 28
0
    def test_simple_layout_options(self):
        # Using Euca with no input yaml, and no max or min images is not ok
        options_1 = self.default_options.copy()
        options_1['infrastructure'] = 'euca'
        layout_1 = NodeLayout(options_1)
        self.assertEquals(False, layout_1.is_valid())
        self.assertEquals(NodeLayout.NO_YAML_REQUIRES_MIN, layout_1.errors())

        options_2 = self.default_options.copy()
        options_2['infrastructure'] = "euca"
        options_2['max'] = 2
        layout_2 = NodeLayout(options_2)
        self.assertEquals(False, layout_2.is_valid())
        self.assertEquals(NodeLayout.NO_YAML_REQUIRES_MIN, layout_2.errors())

        options_3 = self.default_options.copy()
        options_3['infrastructure'] = "euca"
        options_3['min'] = 2
        layout_3 = NodeLayout(options_3)
        self.assertEquals(False, layout_3.is_valid())
        self.assertEquals(NodeLayout.NO_YAML_REQUIRES_MAX, layout_3.errors())

        # Using Euca with no input yaml, with max and min images set is ok
        options_4 = self.default_options.copy()
        options_4['infrastructure'] = "euca"
        options_4['min'] = 2
        options_4['max'] = 2
        layout_4 = NodeLayout(options_4)
        self.assertEquals(True, layout_4.is_valid())

        # Using virtualized deployments with no input yaml is not ok
        options_5 = self.default_options.copy()
        layout_5 = NodeLayout(options_5)
        self.assertEquals(False, layout_5.is_valid())
        self.assertEquals([NodeLayout.INPUT_YAML_REQUIRED], layout_5.errors())
  def setUp(self):
    # mock out all logging, since it clutters our output
    flexmock(AppScaleLogger)
    AppScaleLogger.should_receive('log').and_return()

    # mock out all sleeps, as they aren't necessary for unit testing
    flexmock(time)
    time.should_receive('sleep').and_return()

    # set up some fake options so that we don't have to generate them via
    # ParseArgs
    self.options = flexmock(infrastructure='ec2', group='boogroup',
      machine='ami-ABCDEFG', instance_type='m1.large', keyname='bookey',
      table='cassandra', verbose=False, test=False, use_spot_instances=False,
      zone='my-zone-1b', static_ip=None)
    self.my_id = "12345"
    self.node_layout = NodeLayout(self.options)

    # set up phony AWS credentials for each test
    # ones that test not having them present can
    # remove them
    for credential in EucalyptusAgent.REQUIRED_EC2_CREDENTIALS:
      os.environ[credential] = "baz"
    os.environ['EC2_URL'] = "http://boo"

    # mock out calls to EC2
    # begin by assuming that our ssh keypair doesn't exist, and thus that we
    # need to create it
    key_contents = "key contents here"
    fake_key = flexmock(name="fake_key", material=key_contents)
    fake_key.should_receive('save').with_args(os.environ['HOME']+'/.appscale').and_return(None)

    fake_ec2 = flexmock(name="fake_ec2")
    fake_ec2.should_receive('get_key_pair').with_args('bookey') \
      .and_return(None)
    fake_ec2.should_receive('create_key_pair').with_args('bookey') \
      .and_return(fake_key)

    # mock out writing the secret key
    builtins = flexmock(sys.modules['__builtin__'])
    builtins.should_call('open')  # set the fall-through

    secret_key_location = LocalState.LOCAL_APPSCALE_PATH + "bookey.secret"
    fake_secret = flexmock(name="fake_secret")
    fake_secret.should_receive('write').and_return()
    builtins.should_receive('open').with_args(secret_key_location, 'w') \
      .and_return(fake_secret)

    # also, mock out the keypair writing and chmod'ing
    ssh_key_location = LocalState.LOCAL_APPSCALE_PATH + "bookey.key"
    fake_file = flexmock(name="fake_file")
    fake_file.should_receive('write').with_args(key_contents).and_return()

    builtins.should_receive('open').with_args(ssh_key_location, 'w') \
      .and_return(fake_file)

    flexmock(os)
    os.should_receive('chmod').with_args(ssh_key_location, 0600).and_return()

    # next, assume there are no security groups up at first, but then it gets
    # created.
    udp_rule = flexmock(from_port=1, to_port=65535, ip_protocol='udp')
    tcp_rule = flexmock(from_port=1, to_port=65535, ip_protocol='tcp')
    icmp_rule = flexmock(from_port=-1, to_port=-1, ip_protocol='icmp')
    group = flexmock(name='boogroup', rules=[tcp_rule, udp_rule, icmp_rule])
    fake_ec2.should_receive('get_all_security_groups').with_args().and_return([])
    fake_ec2.should_receive('get_all_security_groups').with_args('boogroup').and_return([group])

    # and then assume we can create and open our security group fine
    fake_ec2.should_receive('create_security_group').with_args('boogroup',
      'AppScale security group').and_return()
    fake_ec2.should_receive('authorize_security_group').and_return()

    # next, add in mocks for run_instances
    # the first time around, let's say that no machines are running
    # the second time around, let's say that our machine is pending
    # and that it's up the third time around
    fake_pending_instance = flexmock(state='pending')
    fake_pending_reservation = flexmock(instances=fake_pending_instance)

    fake_running_instance = flexmock(state='running', key_name='bookey',
      id='i-12345678', public_dns_name='public1', private_dns_name='private1')
    fake_running_reservation = flexmock(instances=fake_running_instance)

    fake_ec2.should_receive('get_all_instances').and_return([]) \
      .and_return([]) \
      .and_return([fake_pending_reservation]) \
      .and_return([fake_running_reservation])

    # next, assume that our run_instances command succeeds
    fake_ec2.should_receive('run_instances').and_return()

    # finally, inject our mocked EC2
    flexmock(boto.ec2)
    boto.ec2.should_receive('connect_to_region').and_return(fake_ec2)

    # assume that ssh comes up on the third attempt
    fake_socket = flexmock(name='fake_socket')
    fake_socket.should_receive('connect').with_args(('public1',
      RemoteHelper.SSH_PORT)).and_raise(Exception).and_raise(Exception) \
      .and_return(None)
    flexmock(socket)
    socket.should_receive('socket').and_return(fake_socket)

    # throw some default mocks together for when invoking via shell succeeds
    # and when it fails
    self.fake_temp_file = flexmock(name='fake_temp_file')
    self.fake_temp_file.should_receive('seek').with_args(0).and_return()
    self.fake_temp_file.should_receive('read').and_return('boo out')
    self.fake_temp_file.should_receive('close').and_return()

    flexmock(tempfile)
    tempfile.should_receive('NamedTemporaryFile')\
      .and_return(self.fake_temp_file)

    self.success = flexmock(name='success', returncode=0)
    self.success.should_receive('wait').and_return(0)

    self.failed = flexmock(name='success', returncode=1)
    self.failed.should_receive('wait').and_return(1)

    # assume that root login isn't already enabled
    local_state = flexmock(LocalState)
    local_state.should_receive('shell') \
      .with_args(re.compile('^ssh .*root'), False, 1, stdin='ls') \
      .and_return(RemoteHelper.LOGIN_AS_UBUNTU_USER)

    # and assume that we can ssh in as ubuntu to enable root login
    local_state = flexmock(LocalState)
    local_state.should_receive('shell')\
      .with_args(re.compile('^ssh .*ubuntu'),False,5)\
      .and_return()

    # also assume that we can scp over our ssh keys
    local_state.should_receive('shell')\
      .with_args(re.compile('scp .*/root/.ssh/id_'),False,5)\
      .and_return()

    local_state.should_receive('shell')\
      .with_args(re.compile('scp .*/root/.appscale/bookey.key'),False,5)\
      .and_return()
    def run_instances(cls, options):
        """Starts a new AppScale deployment with the parameters given.

    Args:
      options: A Namespace that has fields for each parameter that can be
        passed in via the command-line interface.
    Raises:
      AppControllerException: If the AppController on the head node crashes.
        When this occurs, the message in the exception contains the reason why
        the AppController crashed.
      BadConfigurationException: If the user passes in options that are not
        sufficient to start an AppScale deployment (e.g., running on EC2 but
        not specifying the AMI to use), or if the user provides us
        contradictory options (e.g., running on EC2 but not specifying EC2
        credentials).
    """
        LocalState.make_appscale_directory()
        LocalState.ensure_appscale_isnt_running(options.keyname, options.force)

        if options.infrastructure:
            if not options.disks and not options.test and not options.force:
                LocalState.ensure_user_wants_to_run_without_disks()
            AppScaleLogger.log(
                "Starting AppScale " + APPSCALE_VERSION + " over the " + options.infrastructure + " cloud."
            )
        else:
            AppScaleLogger.log("Starting AppScale " + APPSCALE_VERSION + " over a virtualized cluster.")
        my_id = str(uuid.uuid4())
        AppScaleLogger.remote_log_tools_state(options, my_id, "started", APPSCALE_VERSION)

        node_layout = NodeLayout(options)
        if not node_layout.is_valid():
            raise BadConfigurationException(
                "There were errors with your " + "placement strategy:\n{0}".format(str(node_layout.errors()))
            )

        public_ip, instance_id = RemoteHelper.start_head_node(options, my_id, node_layout)
        AppScaleLogger.log(
            "\nPlease wait for AppScale to prepare your machines " + "for use. This can take few minutes."
        )

        # Write our metadata as soon as possible to let users SSH into those
        # machines via 'appscale ssh'.
        LocalState.update_local_metadata(options, node_layout, public_ip, instance_id)
        RemoteHelper.copy_local_metadata(public_ip, options.keyname, options.verbose)

        acc = AppControllerClient(public_ip, LocalState.get_secret_key(options.keyname))

        # Let's now wait till the server is initialized.
        while not acc.is_initialized():
            AppScaleLogger.log("Waiting for head node to initialize...")
            # This can take some time in particular the first time around, since
            # we will have to initialize the database.
            time.sleep(cls.SLEEP_TIME * 3)

        # Now let's make sure the UserAppServer is fully initialized.
        uaserver_client = UserAppClient(public_ip, LocalState.get_secret_key(options.keyname))
        try:
            # We don't need to have any exception information here: we do expect
            # some anyway while the UserAppServer is coming up.
            uaserver_client.does_user_exist("non-existent-user", True)
        except Exception as exception:
            AppScaleLogger.log("UserAppServer not ready yet. Retrying ...")
            time.sleep(cls.SLEEP_TIME)

        # Update our metadata again so that users can SSH into other boxes that
        # may have been started.
        LocalState.update_local_metadata(options, node_layout, public_ip, instance_id)
        RemoteHelper.copy_local_metadata(public_ip, options.keyname, options.verbose)

        if options.admin_user and options.admin_pass:
            AppScaleLogger.log("Using the provided admin username/password")
            username, password = options.admin_user, options.admin_pass
        elif options.test:
            AppScaleLogger.log("Using default admin username/password")
            username, password = LocalState.DEFAULT_USER, LocalState.DEFAULT_PASSWORD
        else:
            username, password = LocalState.get_credentials()

        RemoteHelper.create_user_accounts(username, password, public_ip, options.keyname, options.clear_datastore)
        uaserver_client.set_admin_role(username)

        RemoteHelper.wait_for_machines_to_finish_loading(public_ip, options.keyname)
        # Finally, update our metadata once we know that all of the machines are
        # up and have started all their API services.
        LocalState.update_local_metadata(options, node_layout, public_ip, instance_id)
        RemoteHelper.copy_local_metadata(public_ip, options.keyname, options.verbose)

        RemoteHelper.sleep_until_port_is_open(
            LocalState.get_login_host(options.keyname), RemoteHelper.APP_DASHBOARD_PORT, options.verbose
        )
        AppScaleLogger.success("AppScale successfully started!")
        AppScaleLogger.success(
            "View status information about your AppScale "
            + "deployment at http://{0}:{1}/status".format(
                LocalState.get_login_host(options.keyname), RemoteHelper.APP_DASHBOARD_PORT
            )
        )
        AppScaleLogger.remote_log_tools_state(options, my_id, "finished", APPSCALE_VERSION)
Esempio n. 31
0
  def run_instances(cls, options):
    """Starts a new AppScale deployment with the parameters given.

    Args:
      options: A Namespace that has fields for each parameter that can be
        passed in via the command-line interface.
    Raises:
      AppControllerException: If the AppController on the head node crashes.
        When this occurs, the message in the exception contains the reason why
        the AppController crashed.
      BadConfigurationException: If the user passes in options that are not
        sufficient to start an AppScale deployment (e.g., running on EC2 but
        not specifying the AMI to use), or if the user provides us
        contradictory options (e.g., running on EC2 but not specifying EC2
        credentials).
    """
    LocalState.make_appscale_directory()
    LocalState.ensure_appscale_isnt_running(options.keyname, options.force)
    if options.infrastructure:
      if not options.disks and not options.test and not options.force:
        LocalState.ensure_user_wants_to_run_without_disks()

    reduced_version = '.'.join(x for x in APPSCALE_VERSION.split('.')[:2])
    AppScaleLogger.log("Starting AppScale " + reduced_version)

    my_id = str(uuid.uuid4())
    AppScaleLogger.remote_log_tools_state(options, my_id, "started",
      APPSCALE_VERSION)

    node_layout = NodeLayout(options)
    if not node_layout.is_valid():
      raise BadConfigurationException("There were errors with your " + \
                                      "placement strategy:\n{0}".format(str(node_layout.errors())))

    head_node = node_layout.head_node()
    # Start VMs in cloud via cloud agent.
    if options.infrastructure:
      instance_ids, public_ips, private_ips = RemoteHelper.start_all_nodes(
        options, len(node_layout.nodes))
      AppScaleLogger.log("\nPlease wait for AppScale to prepare your machines "
                         "for use. This can take few minutes.")

      # Set newly obtained node layout info for this deployment.
      for i, _ in enumerate(instance_ids):
        node_layout.nodes[i].public_ip = public_ips[i]
        node_layout.nodes[i].private_ip = private_ips[i]
        node_layout.nodes[i].instance_id = instance_ids[i]

      # Enables root logins and SSH access on the head node.
      RemoteHelper.enable_root_ssh(options, head_node.public_ip)
    AppScaleLogger.verbose("Node Layout: {}".format(node_layout.to_list()),
                           options.verbose)

    # Ensure all nodes are compatible.
    RemoteHelper.ensure_machine_is_compatible(
      head_node.public_ip, options.keyname, options.verbose)

    # Use rsync to move custom code into the deployment.
    if options.scp:
      AppScaleLogger.log("Copying over local copy of AppScale from {0}".
        format(options.scp))
      RemoteHelper.rsync_files(head_node.public_ip, options.keyname, options.scp,
        options.verbose)

    # Start services on head node.
    RemoteHelper.start_head_node(options, my_id, node_layout)

    # Write deployment metadata to disk (facilitates SSH operations, etc.)
    db_master = node_layout.db_master().private_ip
    head_node = node_layout.head_node().public_ip
    LocalState.update_local_metadata(options, db_master, head_node)

    # Copy the locations.json to the head node
    RemoteHelper.copy_local_metadata(node_layout.head_node().public_ip,
                                     options.keyname, options.verbose)

    # Wait for services on head node to start.
    secret_key = LocalState.get_secret_key(options.keyname)
    acc = AppControllerClient(head_node, secret_key)
    try:
      while not acc.is_initialized():
        AppScaleLogger.log('Waiting for head node to initialize...')
        # This can take some time in particular the first time around, since
        # we will have to initialize the database.
        time.sleep(cls.SLEEP_TIME*3)
    except socket.error as socket_error:
      AppScaleLogger.warn('Unable to initialize AppController: {}'.
                          format(socket_error.message))
      message = RemoteHelper.collect_appcontroller_crashlog(
        head_node, options.keyname, options.verbose)
      raise AppControllerException(message)

    # Set up admin account.
    try:
      # We don't need to have any exception information here: we do expect
      # some anyway while the UserAppServer is coming up.
      acc.does_user_exist("non-existent-user", True)
    except Exception:
      AppScaleLogger.log('UserAppServer not ready yet. Retrying ...')
      time.sleep(cls.SLEEP_TIME)

    if options.admin_user and options.admin_pass:
      AppScaleLogger.log("Using the provided admin username/password")
      username, password = options.admin_user, options.admin_pass
    elif options.test:
      AppScaleLogger.log("Using default admin username/password")
      username, password = LocalState.DEFAULT_USER, LocalState.DEFAULT_PASSWORD
    else:
      username, password = LocalState.get_credentials()

    RemoteHelper.create_user_accounts(username, password, head_node,
                                      options.keyname)
    acc.set_admin_role(username, 'true', cls.ADMIN_CAPABILITIES)

    # Wait for machines to finish loading and AppScale Dashboard to be deployed.
    RemoteHelper.wait_for_machines_to_finish_loading(head_node, options.keyname)
    RemoteHelper.sleep_until_port_is_open(LocalState.get_login_host(
      options.keyname), RemoteHelper.APP_DASHBOARD_PORT, options.verbose)

    AppScaleLogger.success("AppScale successfully started!")
    AppScaleLogger.success("View status information about your AppScale " + \
                           "deployment at http://{0}:{1}".format(LocalState.get_login_host(
                           options.keyname), RemoteHelper.APP_DASHBOARD_PORT))
    AppScaleLogger.remote_log_tools_state(options, my_id,
      "finished", APPSCALE_VERSION)
Esempio n. 32
0
    def test_dont_warn_users_on_supported_deployment_strategies(self):
        # all simple deployment strategies are supported
        input_yaml_1 = {'controller': self.ip_1}
        options_1 = self.default_options.copy()
        options_1['ips'] = input_yaml_1
        layout_1 = NodeLayout(options_1)
        self.assertEquals(True, layout_1.is_supported())

        input_yaml_2 = {'controller': self.ip_1, 'servers': [self.ip_2]}
        options_2 = self.default_options.copy()
        options_2['ips'] = input_yaml_2
        layout_2 = NodeLayout(options_2)
        self.assertEquals(True, layout_2.is_supported())

        input_yaml_3 = {
            'controller': self.ip_1,
            'servers': [self.ip_2, self.ip_3]
        }
        options_3 = self.default_options.copy()
        options_3['ips'] = input_yaml_3
        layout_3 = NodeLayout(options_3)
        self.assertEquals(True, layout_3.is_supported())

        # in advanced deployments, four nodes are ok with the following
        # layout: (1) load balancer, (2) appserver, (3) database,
        # (4) zookeeper
        advanced_yaml_1 = {
            'master': self.ip_1,
            'appengine': self.ip_2,
            'database': self.ip_3,
            'zookeeper': self.ip_4
        }
        options_1 = self.default_options.copy()
        options_1['ips'] = advanced_yaml_1
        advanced_layout_1 = NodeLayout(options_1)
        self.assertEquals(True, advanced_layout_1.is_valid())
        self.assertEquals(True, advanced_layout_1.is_supported())

        # in advanced deployments, eight nodes are ok with the following
        # layout: (1) load balancer, (2) appserver, (3) appserver,
        # (4) database, (5) database, (6) zookeeper, (7) zookeeper,
        # (8) zookeeper
        advanced_yaml_2 = {
            'master': self.ip_1,
            'appengine': [self.ip_2, self.ip_3],
            'database': [self.ip_4, self.ip_5],
            'zookeeper': [self.ip_6, self.ip_7, self.ip_8]
        }
        options_2 = self.default_options.copy()
        options_2['ips'] = advanced_yaml_2
        advanced_layout_2 = NodeLayout(options_2)
        self.assertEquals(True, advanced_layout_2.is_valid())
        self.assertEquals(True, advanced_layout_2.is_supported())
Esempio n. 33
0
  def test_simple_layout_yaml_only(self):
    # Specifying one controller and one server should be ok
    input_yaml_1 = {
      'controller' : self.ip_1,
      'servers' : [self.ip_2]
    }
    options_1 = self.default_options.copy()
    options_1['ips'] = input_yaml_1
    layout_1 = NodeLayout(options_1)
    self.assertEquals(True, layout_1.is_valid())

    # Specifying one controller should be ok
    input_yaml_2 = {'controller' : self.ip_1}
    options_2 = self.default_options.copy()
    options_2['ips'] = input_yaml_2
    layout_2 = NodeLayout(options_2)
    self.assertEquals(True, layout_2.is_valid())

    # Specifying the same IP more than once is not ok
    input_yaml_3 = {'controller' : self.ip_1, 'servers' : [self.ip_1]}
    options_3 = self.default_options.copy()
    options_3['ips'] = input_yaml_3
    layout_3 = NodeLayout(options_3)
    self.assertEquals(False, layout_3.is_valid())
    self.assertEquals(NodeLayout.DUPLICATE_IPS, layout_3.errors())

    # Failing to specify a controller is not ok
    input_yaml_4 = {'servers' : [self.ip_1, self.ip_2]}
    options_4 = self.default_options.copy()
    options_4['ips'] = input_yaml_4
    layout_4 = NodeLayout(options_4)
    self.assertEquals(False, layout_4.is_valid())
    self.assertEquals(NodeLayout.NO_CONTROLLER, layout_4.errors())

    # Specifying more than one controller is not ok
    input_yaml_5 = {'controller' : [self.ip_1, self.ip_2], 'servers' : [self.ip_3]}
    options_5 = self.default_options.copy()
    options_5['ips'] = input_yaml_5
    layout_5 = NodeLayout(options_5)
    self.assertEquals(False, layout_5.is_valid())
    self.assertEquals(NodeLayout.ONLY_ONE_CONTROLLER, layout_5.errors())

    # Specifying something other than controller and servers in simple
    # deployments is not ok
    input_yaml_6 = {'controller' : self.ip_1, 'servers' : [self.ip_2],
      'boo' : self.ip_3}
    options_6 = self.default_options.copy()
    options_6['ips'] = input_yaml_6
    layout_6 = NodeLayout(options_6)
    self.assertEquals(False, layout_6.is_valid())
    self.assertEquals(["The flag boo is not a supported flag"],
      layout_6.errors())
Esempio n. 34
0
    def test_simple_layout_yaml_only(self):
        # Specifying one controller and one server should be ok
        input_yaml_1 = {'controller': self.ip_1, 'servers': [self.ip_2]}
        options_1 = self.default_options.copy()
        options_1['ips'] = input_yaml_1
        layout_1 = NodeLayout(options_1)
        self.assertEquals(True, layout_1.is_valid())

        # Specifying one controller should be ok
        input_yaml_2 = {'controller': self.ip_1}
        options_2 = self.default_options.copy()
        options_2['ips'] = input_yaml_2
        layout_2 = NodeLayout(options_2)
        self.assertEquals(True, layout_2.is_valid())

        # Specifying the same IP more than once is not ok
        input_yaml_3 = {'controller': self.ip_1, 'servers': [self.ip_1]}
        options_3 = self.default_options.copy()
        options_3['ips'] = input_yaml_3
        layout_3 = NodeLayout(options_3)
        self.assertEquals(False, layout_3.is_valid())
        self.assertEquals(NodeLayout.DUPLICATE_IPS, layout_3.errors())

        # Failing to specify a controller is not ok
        input_yaml_4 = {'servers': [self.ip_1, self.ip_2]}
        options_4 = self.default_options.copy()
        options_4['ips'] = input_yaml_4
        layout_4 = NodeLayout(options_4)
        self.assertEquals(False, layout_4.is_valid())
        self.assertEquals(NodeLayout.NO_CONTROLLER, layout_4.errors())

        # Specifying more than one controller is not ok
        input_yaml_5 = {
            'controller': [self.ip_1, self.ip_2],
            'servers': [self.ip_3]
        }
        options_5 = self.default_options.copy()
        options_5['ips'] = input_yaml_5
        layout_5 = NodeLayout(options_5)
        self.assertEquals(False, layout_5.is_valid())
        self.assertEquals(NodeLayout.ONLY_ONE_CONTROLLER, layout_5.errors())

        # Specifying something other than controller and servers in simple
        # deployments is not ok
        input_yaml_6 = {
            'controller': self.ip_1,
            'servers': [self.ip_2],
            'boo': self.ip_3
        }
        options_6 = self.default_options.copy()
        options_6['ips'] = input_yaml_6
        layout_6 = NodeLayout(options_6)
        self.assertEquals(False, layout_6.is_valid())
        self.assertEquals(["The flag boo is not a supported flag"],
                          layout_6.errors())
Esempio n. 35
0
  def run_instances(cls, options):
    """Starts a new AppScale deployment with the parameters given.

    Args:
      options: A Namespace that has fields for each parameter that can be
        passed in via the command-line interface.
    Raises:
      BadConfigurationException: If the user passes in options that are not
        sufficient to start an AppScale deplyoment (e.g., running on EC2 but
        not specifying the AMI to use), or if the user provides us
        contradictory options (e.g., running on EC2 but not specifying EC2
        credentials).
    """
    LocalState.make_appscale_directory()
    LocalState.ensure_appscale_isnt_running(options.keyname, options.force)

    if options.infrastructure:
      AppScaleLogger.log("Starting AppScale " + APPSCALE_VERSION +
        " over the " + options.infrastructure + " cloud.")
    else:
      AppScaleLogger.log("Starting AppScale " + APPSCALE_VERSION +
        " over a virtualized cluster.")
    my_id = str(uuid.uuid4())
    AppScaleLogger.remote_log_tools_state(options, my_id, "started",
      APPSCALE_VERSION)

    node_layout = NodeLayout(options)
    if not node_layout.is_valid():
      raise BadConfigurationException("There were errors with your " + \
        "placement strategy:\n{0}".format(str(node_layout.errors())))

    if not node_layout.is_supported():
      AppScaleLogger.warn("Warning: This deployment strategy is not " + \
        "officially supported.")

    public_ip, instance_id = RemoteHelper.start_head_node(options, my_id,
      node_layout)
    AppScaleLogger.log("\nPlease wait for AppScale to prepare your machines " +
      "for use.")

    # Write our metadata as soon as possible to let users SSH into those
    # machines via 'appscale ssh'
    LocalState.update_local_metadata(options, node_layout, public_ip,
      instance_id)
    RemoteHelper.copy_local_metadata(public_ip, options.keyname,
      options.verbose)

    acc = AppControllerClient(public_ip, LocalState.get_secret_key(
      options.keyname))
    uaserver_host = acc.get_uaserver_host(options.verbose)

    RemoteHelper.sleep_until_port_is_open(uaserver_host, UserAppClient.PORT,
      options.verbose)

    # Update our metadata again so that users can SSH into other boxes that
    # may have been started.
    LocalState.update_local_metadata(options, node_layout, public_ip,
      instance_id)
    RemoteHelper.copy_local_metadata(public_ip, options.keyname,
      options.verbose)

    AppScaleLogger.log("UserAppServer is at {0}".format(uaserver_host))

    uaserver_client = UserAppClient(uaserver_host,
      LocalState.get_secret_key(options.keyname))

    if options.admin_user and options.admin_pass:
      AppScaleLogger.log("Using the provided admin username/password")
      username, password = options.admin_user, options.admin_pass
    elif options.test:
      AppScaleLogger.log("Using default admin username/password")
      username, password = LocalState.DEFAULT_USER, LocalState.DEFAULT_PASSWORD
    else:
      username, password = LocalState.get_credentials()

    RemoteHelper.create_user_accounts(username, password, uaserver_host,
      options.keyname)
    uaserver_client.set_admin_role(username)

    RemoteHelper.wait_for_machines_to_finish_loading(public_ip, options.keyname)
    # Finally, update our metadata once we know that all of the machines are
    # up and have started all their API services.
    LocalState.update_local_metadata(options, node_layout, public_ip,
      instance_id)
    RemoteHelper.copy_local_metadata(public_ip, options.keyname, options.verbose)

    RemoteHelper.sleep_until_port_is_open(LocalState.get_login_host(
      options.keyname), RemoteHelper.APP_LOAD_BALANCER_PORT, options.verbose)
    AppScaleLogger.success("AppScale successfully started!")
    AppScaleLogger.success("View status information about your AppScale " + \
      "deployment at http://{0}/status".format(LocalState.get_login_host(
      options.keyname)))
    AppScaleLogger.remote_log_tools_state(options, my_id,
      "finished", APPSCALE_VERSION)
Esempio n. 36
0
    def run_instances(cls, options):
        """Starts a new AppScale deployment with the parameters given.

    Args:
      options: A Namespace that has fields for each parameter that can be
        passed in via the command-line interface.
    Raises:
      AppControllerException: If the AppController on the head node crashes.
        When this occurs, the message in the exception contains the reason why
        the AppController crashed.
      BadConfigurationException: If the user passes in options that are not
        sufficient to start an AppScale deployment (e.g., running on EC2 but
        not specifying the AMI to use), or if the user provides us
        contradictory options (e.g., running on EC2 but not specifying EC2
        credentials).
    """
        LocalState.make_appscale_directory()
        LocalState.ensure_appscale_isnt_running(options.keyname, options.force)

        if options.infrastructure:
            if not options.disks and not options.test and not options.force:
                LocalState.ensure_user_wants_to_run_without_disks()
            AppScaleLogger.log("Starting AppScale " + APPSCALE_VERSION +
                               " over the " + options.infrastructure +
                               " cloud.")
        else:
            AppScaleLogger.log("Starting AppScale " + APPSCALE_VERSION +
                               " over a virtualized cluster.")
        my_id = str(uuid.uuid4())
        AppScaleLogger.remote_log_tools_state(options, my_id, "started",
                                              APPSCALE_VERSION)

        node_layout = NodeLayout(options)
        if not node_layout.is_valid():
            raise BadConfigurationException("There were errors with your " + \
              "placement strategy:\n{0}".format(str(node_layout.errors())))

        if not node_layout.is_supported():
            AppScaleLogger.warn("Warning: This deployment strategy is not " + \
              "officially supported.")

        public_ip, instance_id = RemoteHelper.start_head_node(
            options, my_id, node_layout)
        AppScaleLogger.log(
            "\nPlease wait for AppScale to prepare your machines " +
            "for use.")

        # Write our metadata as soon as possible to let users SSH into those
        # machines via 'appscale ssh'
        LocalState.update_local_metadata(options, node_layout, public_ip,
                                         instance_id)
        RemoteHelper.copy_local_metadata(public_ip, options.keyname,
                                         options.verbose)

        acc = AppControllerClient(public_ip,
                                  LocalState.get_secret_key(options.keyname))
        try:
            uaserver_host = acc.get_uaserver_host(options.verbose)
        except Exception:
            message = RemoteHelper.collect_appcontroller_crashlog(
                public_ip, options.keyname, options.verbose)
            raise AppControllerException(message)

        RemoteHelper.sleep_until_port_is_open(uaserver_host,
                                              UserAppClient.PORT,
                                              options.verbose)

        # Update our metadata again so that users can SSH into other boxes that
        # may have been started.
        LocalState.update_local_metadata(options, node_layout, public_ip,
                                         instance_id)
        RemoteHelper.copy_local_metadata(public_ip, options.keyname,
                                         options.verbose)

        AppScaleLogger.log("UserAppServer is at {0}".format(uaserver_host))

        uaserver_client = UserAppClient(
            uaserver_host, LocalState.get_secret_key(options.keyname))

        if options.admin_user and options.admin_pass:
            AppScaleLogger.log("Using the provided admin username/password")
            username, password = options.admin_user, options.admin_pass
        elif options.test:
            AppScaleLogger.log("Using default admin username/password")
            username, password = LocalState.DEFAULT_USER, LocalState.DEFAULT_PASSWORD
        else:
            username, password = LocalState.get_credentials()

        RemoteHelper.create_user_accounts(username, password, uaserver_host,
                                          options.keyname,
                                          options.clear_datastore)
        uaserver_client.set_admin_role(username)

        RemoteHelper.wait_for_machines_to_finish_loading(
            public_ip, options.keyname)
        # Finally, update our metadata once we know that all of the machines are
        # up and have started all their API services.
        LocalState.update_local_metadata(options, node_layout, public_ip,
                                         instance_id)
        RemoteHelper.copy_local_metadata(public_ip, options.keyname,
                                         options.verbose)

        RemoteHelper.sleep_until_port_is_open(
            LocalState.get_login_host(options.keyname),
            RemoteHelper.APP_DASHBOARD_PORT, options.verbose)
        AppScaleLogger.success("AppScale successfully started!")
        AppScaleLogger.success("View status information about your AppScale " + \
          "deployment at http://{0}:{1}/status".format(LocalState.get_login_host(
          options.keyname), RemoteHelper.APP_DASHBOARD_PORT))
        AppScaleLogger.remote_log_tools_state(options, my_id, "finished",
                                              APPSCALE_VERSION)
Esempio n. 37
0
    def run_instances(cls, options):
        """Starts a new AppScale deployment with the parameters given.

    Args:
      options: A Namespace that has fields for each parameter that can be
        passed in via the command-line interface.
    Raises:
      AppControllerException: If the AppController on the head node crashes.
        When this occurs, the message in the exception contains the reason why
        the AppController crashed.
      BadConfigurationException: If the user passes in options that are not
        sufficient to start an AppScale deployment (e.g., running on EC2 but
        not specifying the AMI to use), or if the user provides us
        contradictory options (e.g., running on EC2 but not specifying EC2
        credentials).
    """
        LocalState.make_appscale_directory()
        LocalState.ensure_appscale_isnt_running(options.keyname, options.force)

        if options.infrastructure:
            if not options.disks and not options.test and not options.force:
                LocalState.ensure_user_wants_to_run_without_disks()
            AppScaleLogger.log("Starting AppScale " + APPSCALE_VERSION +
                               " over the " + options.infrastructure +
                               " cloud.")
        else:
            AppScaleLogger.log("Starting AppScale " + APPSCALE_VERSION +
                               " over a virtualized cluster.")
        my_id = str(uuid.uuid4())
        AppScaleLogger.remote_log_tools_state(options, my_id, "started",
                                              APPSCALE_VERSION)

        node_layout = NodeLayout(options)
        if not node_layout.is_valid():
            raise BadConfigurationException("There were errors with your " + \
              "placement strategy:\n{0}".format(str(node_layout.errors())))

        public_ip, instance_id = RemoteHelper.start_head_node(
            options, my_id, node_layout)
        AppScaleLogger.log(
            "\nPlease wait for AppScale to prepare your machines " +
            "for use. This can take few minutes.")

        # Write our metadata as soon as possible to let users SSH into those
        # machines via 'appscale ssh'.
        LocalState.update_local_metadata(options, node_layout, public_ip,
                                         instance_id)
        RemoteHelper.copy_local_metadata(public_ip, options.keyname,
                                         options.verbose)

        acc = AppControllerClient(public_ip,
                                  LocalState.get_secret_key(options.keyname))

        # Let's now wait till the server is initialized.
        while not acc.is_initialized():
            AppScaleLogger.log('Waiting for head node to initialize...')
            # This can take some time in particular the first time around, since
            # we will have to initialize the database.
            time.sleep(cls.SLEEP_TIME * 3)

        try:
            # We don't need to have any exception information here: we do expect
            # some anyway while the UserAppServer is coming up.
            acc.does_user_exist("non-existent-user", True)
        except Exception as exception:
            AppScaleLogger.log('UserAppServer not ready yet. Retrying ...')
            time.sleep(cls.SLEEP_TIME)

        # Update our metadata again so that users can SSH into other boxes that
        # may have been started.
        LocalState.update_local_metadata(options, node_layout, public_ip,
                                         instance_id)
        RemoteHelper.copy_local_metadata(public_ip, options.keyname,
                                         options.verbose)

        if options.admin_user and options.admin_pass:
            AppScaleLogger.log("Using the provided admin username/password")
            username, password = options.admin_user, options.admin_pass
        elif options.test:
            AppScaleLogger.log("Using default admin username/password")
            username, password = LocalState.DEFAULT_USER, LocalState.DEFAULT_PASSWORD
        else:
            username, password = LocalState.get_credentials()

        RemoteHelper.create_user_accounts(username, password, public_ip,
                                          options.keyname,
                                          options.clear_datastore)
        acc.set_admin_role(username, 'true', cls.ADMIN_CAPABILITIES)

        RemoteHelper.wait_for_machines_to_finish_loading(
            public_ip, options.keyname)
        # Finally, update our metadata once we know that all of the machines are
        # up and have started all their API services.
        LocalState.update_local_metadata(options, node_layout, public_ip,
                                         instance_id)
        RemoteHelper.copy_local_metadata(public_ip, options.keyname,
                                         options.verbose)

        RemoteHelper.sleep_until_port_is_open(
            LocalState.get_login_host(options.keyname),
            RemoteHelper.APP_DASHBOARD_PORT, options.verbose)
        AppScaleLogger.success("AppScale successfully started!")
        AppScaleLogger.success("View status information about your AppScale " + \
          "deployment at http://{0}:{1}/status".format(LocalState.get_login_host(
          options.keyname), RemoteHelper.APP_DASHBOARD_PORT))
        AppScaleLogger.remote_log_tools_state(options, my_id, "finished",
                                              APPSCALE_VERSION)
Esempio n. 38
0
    def run_instances(cls, options):
        """Starts a new AppScale deployment with the parameters given.

    Args:
      options: A Namespace that has fields for each parameter that can be
        passed in via the command-line interface.
    Raises:
      AppControllerException: If the AppController on the head node crashes.
        When this occurs, the message in the exception contains the reason why
        the AppController crashed.
      BadConfigurationException: If the user passes in options that are not
        sufficient to start an AppScale deployment (e.g., running on EC2 but
        not specifying the AMI to use), or if the user provides us
        contradictory options (e.g., running on EC2 but not specifying EC2
        credentials).
    """
        LocalState.make_appscale_directory()
        LocalState.ensure_appscale_isnt_running(options.keyname, options.force)
        if options.infrastructure:
            if not options.disks and not options.test and not options.force:
                LocalState.ensure_user_wants_to_run_without_disks()

        reduced_version = ".".join(x for x in APPSCALE_VERSION.split(".")[:2])
        AppScaleLogger.log("Starting AppScale " + reduced_version)

        my_id = str(uuid.uuid4())
        AppScaleLogger.remote_log_tools_state(options, my_id, "started", APPSCALE_VERSION)

        node_layout = NodeLayout(options)
        if not node_layout.is_valid():
            raise BadConfigurationException(
                "There were errors with your " + "placement strategy:\n{0}".format(str(node_layout.errors()))
            )

        head_node = node_layout.head_node()
        # Start VMs in cloud via cloud agent.
        if options.infrastructure:
            instance_ids, public_ips, private_ips = RemoteHelper.start_all_nodes(options, len(node_layout.nodes))
            AppScaleLogger.log(
                "\nPlease wait for AppScale to prepare your machines " "for use. This can take few minutes."
            )

            # Set newly obtained node layout info for this deployment.
            for i, _ in enumerate(instance_ids):
                node_layout.nodes[i].public_ip = public_ips[i]
                node_layout.nodes[i].private_ip = private_ips[i]
                node_layout.nodes[i].instance_id = instance_ids[i]

            # Enables root logins and SSH access on the head node.
            RemoteHelper.enable_root_ssh(options, head_node.public_ip)
        AppScaleLogger.verbose("Node Layout: {}".format(node_layout.to_list()), options.verbose)

        # Ensure all nodes are compatible.
        RemoteHelper.ensure_machine_is_compatible(head_node.public_ip, options.keyname, options.verbose)

        # Use rsync to move custom code into the deployment.
        if options.scp:
            AppScaleLogger.log("Copying over local copy of AppScale from {0}".format(options.scp))
            RemoteHelper.rsync_files(head_node.public_ip, options.keyname, options.scp, options.verbose)

        # Start services on head node.
        RemoteHelper.start_head_node(options, my_id, node_layout)

        # Write deployment metadata to disk (facilitates SSH operations, etc.)
        db_master = node_layout.db_master().private_ip
        head_node = node_layout.head_node().public_ip
        LocalState.update_local_metadata(options, db_master, head_node)

        # Copy the locations.json to the head node
        RemoteHelper.copy_local_metadata(node_layout.head_node().public_ip, options.keyname, options.verbose)

        # Wait for services on head node to start.
        secret_key = LocalState.get_secret_key(options.keyname)
        acc = AppControllerClient(head_node, secret_key)
        try:
            while not acc.is_initialized():
                AppScaleLogger.log("Waiting for head node to initialize...")
                # This can take some time in particular the first time around, since
                # we will have to initialize the database.
                time.sleep(cls.SLEEP_TIME * 3)
        except socket.error as socket_error:
            AppScaleLogger.warn("Unable to initialize AppController: {}".format(socket_error.message))
            message = RemoteHelper.collect_appcontroller_crashlog(head_node, options.keyname, options.verbose)
            raise AppControllerException(message)

        # Set up admin account.
        try:
            # We don't need to have any exception information here: we do expect
            # some anyway while the UserAppServer is coming up.
            acc.does_user_exist("non-existent-user", True)
        except Exception:
            AppScaleLogger.log("UserAppServer not ready yet. Retrying ...")
            time.sleep(cls.SLEEP_TIME)

        if options.admin_user and options.admin_pass:
            AppScaleLogger.log("Using the provided admin username/password")
            username, password = options.admin_user, options.admin_pass
        elif options.test:
            AppScaleLogger.log("Using default admin username/password")
            username, password = LocalState.DEFAULT_USER, LocalState.DEFAULT_PASSWORD
        else:
            username, password = LocalState.get_credentials()

        RemoteHelper.create_user_accounts(username, password, head_node, options.keyname)
        acc.set_admin_role(username, "true", cls.ADMIN_CAPABILITIES)

        # Wait for machines to finish loading and AppScale Dashboard to be deployed.
        RemoteHelper.wait_for_machines_to_finish_loading(head_node, options.keyname)
        RemoteHelper.sleep_until_port_is_open(
            LocalState.get_login_host(options.keyname), RemoteHelper.APP_DASHBOARD_PORT, options.verbose
        )

        AppScaleLogger.success("AppScale successfully started!")
        AppScaleLogger.success(
            "View status information about your AppScale "
            + "deployment at http://{0}:{1}".format(
                LocalState.get_login_host(options.keyname), RemoteHelper.APP_DASHBOARD_PORT
            )
        )
        AppScaleLogger.remote_log_tools_state(options, my_id, "finished", APPSCALE_VERSION)