Пример #1
0
    def terminate_virtualized_cluster(cls, keyname, is_verbose):
        """Stops all API services running on all nodes in the currently running
    AppScale deployment.

    Args:
      keyname: The name of the SSH keypair used for this AppScale deployment.
      is_verbose: A bool that indicates if we should print the commands executed
        to stdout.
    """
        AppScaleLogger.log(
            "Terminating appscale deployment with keyname {0}".format(keyname))
        time.sleep(2)

        shadow_host = LocalState.get_host_with_role(keyname, 'shadow')
        try:
            secret = LocalState.get_secret_key(keyname)
        except IOError:
            # We couldn't find the secret key: AppScale is most likely not
            # running.
            raise AppScaleException("Couldn't find AppScale secret key.")

        acc = AppControllerClient(shadow_host, secret)
        try:
            all_ips = acc.get_all_public_ips()
        except Exception as exception:
            AppScaleLogger.warn(
                'Saw Exception while getting deployments IPs {0}'.format(
                    str(exception)))
            all_ips = LocalState.get_all_public_ips(keyname)

        threads = []
        for ip in all_ips:
            thread = threading.Thread(target=cls.stop_remote_appcontroller,
                                      args=(ip, keyname, is_verbose))
            thread.start()
            threads.append(thread)

        for thread in threads:
            thread.join()

        boxes_shut_down = 0
        is_running_regex = re.compile("appscale-controller stop")
        for ip in all_ips:
            AppScaleLogger.log(
                "Shutting down AppScale API services at {0}".format(ip))
            while True:
                remote_output = cls.ssh(ip, keyname, 'ps x', is_verbose)
                AppScaleLogger.verbose(remote_output, is_verbose)
                if not is_running_regex.match(remote_output):
                    break
                time.sleep(0.3)
            boxes_shut_down += 1

        if boxes_shut_down != len(all_ips):
            raise AppScaleException(
                "Couldn't terminate your AppScale deployment on"
                " all machines - please do so manually.")

        AppScaleLogger.log(
            "Terminated AppScale on {0} machines.".format(boxes_shut_down))
Пример #2
0
    def valid_ssh_key(self, config, run_instances_opts):
        """ Checks if the tools can log into the head node with the current key.

    Args:
      config: A dictionary that includes the IPs layout (which itself is a dict
        mapping role names to IPs) and, optionally, the keyname to use.
      run_instances_opts: The arguments parsed from the appscale-run-instances
        command.

    Returns:
      A bool indicating whether or not the specified keyname can be used to log
      into the head node.

    Raises:
      BadConfigurationException: If the IPs layout was not a dictionary.
    """
        keyname = config['keyname']
        verbose = config.get('verbose', False)

        if not isinstance(config['ips_layout'], dict):
            raise BadConfigurationException(
                'ips_layout should be a dictionary. Please fix it and try again.'
            )

        ssh_key_location = self.APPSCALE_DIRECTORY + keyname + ".key"
        if not os.path.exists(ssh_key_location):
            return False

        all_ips = LocalState.get_all_public_ips(keyname)

        # If a login node is defined, use that to communicate with other nodes.
        node_layout = NodeLayout(run_instances_opts)
        head_node = node_layout.head_node()
        if head_node is not None:
            remote_key = '{}/ssh.key'.format(RemoteHelper.CONFIG_DIR)
            try:
                RemoteHelper.scp(head_node.public_ip, keyname,
                                 ssh_key_location, remote_key, verbose)
            except ShellException:
                return False

            for ip in all_ips:
                ssh_to_ip = 'ssh -i {key} -o StrictHostkeyChecking=no root@{ip} true'\
                  .format(key=remote_key, ip=ip)
                try:
                    RemoteHelper.ssh(head_node.public_ip,
                                     keyname,
                                     ssh_to_ip,
                                     verbose,
                                     user='******')
                except ShellException:
                    return False
            return True

        for ip in all_ips:
            if not self.can_ssh_to_ip(ip, keyname, verbose):
                return False

        return True
Пример #3
0
  def terminate_virtualized_cluster(cls, keyname, is_verbose):
    """Stops all API services running on all nodes in the currently running
    AppScale deployment.

    Args:
      keyname: The name of the SSH keypair used for this AppScale deployment.
      is_verbose: A bool that indicates if we should print the commands executed
        to stdout.
    """
    AppScaleLogger.log("Terminating appscale deployment with keyname {0}"
                       .format(keyname))
    time.sleep(2)

    shadow_host = LocalState.get_host_with_role(keyname, 'shadow')
    try:
      secret = LocalState.get_secret_key(keyname)
    except IOError:
      # We couldn't find the secret key: AppScale is most likely not
      # running.
      raise AppScaleException("Couldn't find AppScale secret key.")

    acc = AppControllerClient(shadow_host, secret)
    try:
      all_ips = acc.get_all_public_ips()
    except Exception as exception:
      AppScaleLogger.warn('Saw Exception while getting deployments IPs {0}'.
                          format(str(exception)))
      all_ips = LocalState.get_all_public_ips(keyname)

    threads = []
    for ip in all_ips:
      thread = threading.Thread(target=cls.stop_remote_appcontroller, args=(ip,
        keyname, is_verbose))
      thread.start()
      threads.append(thread)

    for thread in threads:
      thread.join()

    boxes_shut_down = 0
    is_running_regex = re.compile("appscale-controller stop")
    for ip in all_ips:
      AppScaleLogger.log("Shutting down AppScale API services at {0}".
                         format(ip))
      while True:
        remote_output = cls.ssh(ip, keyname, 'ps x', is_verbose)
        AppScaleLogger.verbose(remote_output, is_verbose)
        if not is_running_regex.match(remote_output):
          break
        time.sleep(0.3)
      boxes_shut_down += 1

    if boxes_shut_down != len(all_ips):
      raise AppScaleException("Couldn't terminate your AppScale deployment on"
                              " all machines - please do so manually.")

    AppScaleLogger.log("Terminated AppScale on {0} machines.".
                       format(boxes_shut_down))
Пример #4
0
  def valid_ssh_key(self, config, run_instances_opts):
    """ Checks if the tools can log into the head node with the current key.

    Args:
      config: A dictionary that includes the IPs layout (which itself is a dict
        mapping role names to IPs) and, optionally, the keyname to use.
      run_instances_opts: The arguments parsed from the appscale-run-instances
        command.

    Returns:
      A bool indicating whether or not the specified keyname can be used to log
      into the head node.

    Raises:
      BadConfigurationException: If the IPs layout was not a dictionary.
    """
    keyname = config['keyname']
    verbose = config.get('verbose', False)

    if not isinstance(config['ips_layout'], dict):
      raise BadConfigurationException(
        'ips_layout should be a dictionary. Please fix it and try again.')

    ssh_key_location = self.APPSCALE_DIRECTORY + keyname + ".key"
    if not os.path.exists(ssh_key_location):
      return False

    all_ips = LocalState.get_all_public_ips(keyname)

    # If a login node is defined, use that to communicate with other nodes.
    node_layout = NodeLayout(run_instances_opts)
    head_node = node_layout.head_node()
    if head_node is not None:
      remote_key = '{}/ssh.key'.format(RemoteHelper.CONFIG_DIR)
      try:
        RemoteHelper.scp(
          head_node.public_ip, keyname, ssh_key_location, remote_key, verbose)
      except ShellException:
        return False

      for ip in all_ips:
        ssh_to_ip = 'ssh -i {key} -o StrictHostkeyChecking=no root@{ip} true'\
          .format(key=remote_key, ip=ip)
        try:
          RemoteHelper.ssh(
            head_node.public_ip, keyname, ssh_to_ip, verbose, user='******')
        except ShellException:
          return False
      return True

    for ip in all_ips:
      if not self.can_ssh_to_ip(ip, keyname, verbose):
        return False

    return True
Пример #5
0
  def gather_logs(cls, options):
    """Collects logs from each machine in the currently running AppScale
    deployment.

    Args:
      options: A Namespace that has fields for each parameter that can be
        passed in via the command-line interface.
    """
    # First, make sure that the place we want to store logs doesn't
    # already exist.
    if os.path.exists(options.location):
      raise AppScaleException("Can't gather logs, as the location you " + \
        "specified, {0}, already exists.".format(options.location))

    acc = AppControllerClient(LocalState.get_login_host(options.keyname),
      LocalState.get_secret_key(options.keyname))

    try:
      all_ips = acc.get_all_public_ips()
    except socket.error:  # Occurs when the AppController has failed.
      AppScaleLogger.warn("Couldn't get an up-to-date listing of the " + \
        "machines in this AppScale deployment. Using our locally cached " + \
        "info instead.")
      all_ips = LocalState.get_all_public_ips(options.keyname)

    # do the mkdir after we get the secret key, so that a bad keyname will
    # cause the tool to crash and not create this directory
    os.mkdir(options.location)

    for ip in all_ips:
      # Get the logs from each node, and store them in our local directory
      local_dir = "{0}/{1}".format(options.location, ip)
      os.mkdir(local_dir)
      RemoteHelper.scp_remote_to_local(ip, options.keyname, '/var/log/appscale',
        local_dir, options.verbose)
      try:
        RemoteHelper.scp_remote_to_local(ip, options.keyname,
          '/var/log/cassandra', local_dir, options.verbose)
      except ShellException:
        pass

      try:
        RemoteHelper.scp_remote_to_local(ip, options.keyname,
          '/var/log/zookeeper', local_dir, options.verbose)
      except ShellException:
        pass

      RemoteHelper.scp_remote_to_local(ip, options.keyname, '/var/log/kern.log',
        local_dir, options.verbose)
      RemoteHelper.scp_remote_to_local(ip, options.keyname, '/var/log/syslog',
        local_dir, options.verbose)
    AppScaleLogger.success("Successfully copied logs to {0}".format(
      options.location))
Пример #6
0
    def gather_logs(cls, options):
        """Collects logs from each machine in the currently running AppScale
    deployment.

    Args:
      options: A Namespace that has fields for each parameter that can be
        passed in via the command-line interface.
    """
        # First, make sure that the place we want to store logs doesn't
        # already exist.
        if os.path.exists(options.location):
            raise AppScaleException("Can't gather logs, as the location you " + \
              "specified, {0}, already exists.".format(options.location))

        acc = AppControllerClient(LocalState.get_login_host(options.keyname),
                                  LocalState.get_secret_key(options.keyname))

        try:
            all_ips = acc.get_all_public_ips()
        except socket.error:  # Occurs when the AppController has failed.
            AppScaleLogger.warn("Couldn't get an up-to-date listing of the " + \
              "machines in this AppScale deployment. Using our locally cached " + \
              "info instead.")
            all_ips = LocalState.get_all_public_ips(options.keyname)

        # do the mkdir after we get the secret key, so that a bad keyname will
        # cause the tool to crash and not create this directory
        os.mkdir(options.location)

        for ip in all_ips:
            # Get the logs from each node, and store them in our local directory
            local_dir = "{0}/{1}".format(options.location, ip)
            os.mkdir(local_dir)
            RemoteHelper.scp_remote_to_local(ip, options.keyname,
                                             '/var/log/appscale', local_dir,
                                             options.verbose)
        AppScaleLogger.success("Successfully copied logs to {0}".format(
            options.location))
Пример #7
0
    def gather_logs(cls, options):
        """Collects logs from each machine in the currently running AppScale
    deployment.

    Args:
      options: A Namespace that has fields for each parameter that can be
        passed in via the command-line interface.
    """
        # First, make sure that the place we want to store logs doesn't
        # already exist.
        if os.path.exists(options.location):
            raise AppScaleException("Can't gather logs, as the location you " + \
              "specified, {0}, already exists.".format(options.location))

        acc = AppControllerClient(LocalState.get_login_host(options.keyname),
                                  LocalState.get_secret_key(options.keyname))

        try:
            all_ips = acc.get_all_public_ips()
        except socket.error:  # Occurs when the AppController has failed.
            AppScaleLogger.warn("Couldn't get an up-to-date listing of the " + \
              "machines in this AppScale deployment. Using our locally cached " + \
              "info instead.")
            all_ips = LocalState.get_all_public_ips(options.keyname)

        # do the mkdir after we get the secret key, so that a bad keyname will
        # cause the tool to crash and not create this directory
        os.mkdir(options.location)

        # The log paths that we collect logs from.
        log_paths = [
            '/var/log/appscale', '/var/log/kern.log*', '/var/log/monit.log*',
            '/var/log/nginx', '/var/log/syslog*', '/var/log/zookeeper'
        ]

        failures = False
        for ip in all_ips:
            # Get the logs from each node, and store them in our local directory
            local_dir = "{0}/{1}".format(options.location, ip)
            os.mkdir(local_dir)

            for log_path in log_paths:
                try:
                    RemoteHelper.scp_remote_to_local(ip, options.keyname,
                                                     log_path, local_dir,
                                                     options.verbose)
                except ShellException as shell_exception:
                    failures = True
                    AppScaleLogger.warn(
                        "Unable to collect logs from '{}' for host '{}'".
                        format(log_path, ip))
                    AppScaleLogger.verbose(
                        "Encountered exception: {}".format(
                            str(shell_exception)), options.verbose)

        if failures:
            AppScaleLogger.log(
                "Done copying to {0}. There were "
                "failures while collecting AppScale logs.".format(
                    options.location))
        else:
            AppScaleLogger.success(
                "Successfully collected all AppScale logs into "
                "{0}".format(options.location))
Пример #8
0
    def down(self, clean=False, terminate=False):
        """ 'down' provides a nicer experience for users than the
    appscale-terminate-instances command, by using the configuration options
    present in the AppScalefile found in the current working directory.

    Args:
      clean: A boolean to indicate if the deployment data and metadata
        needs to be clean. This will clear the datastore.
      terminate: A boolean to indicate if instances needs to be terminated
        (valid only if we spawn instances at start).

    Raises:
      AppScalefileException: If there is no AppScalefile in the current working
      directory.
    """
        contents = self.read_appscalefile()

        # Construct a terminate-instances command from the file's contents
        command = []
        contents_as_yaml = yaml.safe_load(contents)

        if 'verbose' in contents_as_yaml and contents_as_yaml[
                'verbose'] == True:
            is_verbose = contents_as_yaml['verbose']
            command.append("--verbose")
        else:
            is_verbose = False

        if 'keyname' in contents_as_yaml:
            keyname = contents_as_yaml['keyname']
            command.append("--keyname")
            command.append(contents_as_yaml['keyname'])
        else:
            keyname = 'appscale'

        if "EC2_ACCESS_KEY" in contents_as_yaml:
            os.environ["EC2_ACCESS_KEY"] = contents_as_yaml["EC2_ACCESS_KEY"]

        if "EC2_SECRET_KEY" in contents_as_yaml:
            os.environ["EC2_SECRET_KEY"] = contents_as_yaml["EC2_SECRET_KEY"]

        if "EC2_URL" in contents_as_yaml:
            os.environ["EC2_URL"] = contents_as_yaml["EC2_URL"]

        if clean:
            if 'test' not in contents_as_yaml or contents_as_yaml[
                    'test'] != True:
                LocalState.confirm_or_abort(
                    "Clean will delete every data in the deployment.")
            all_ips = LocalState.get_all_public_ips(keyname)
            for ip in all_ips:
                RemoteHelper.ssh(ip, keyname, self.TERMINATE, is_verbose)
            AppScaleLogger.success(
                "Successfully cleaned your AppScale deployment.")

        if terminate:
            infrastructure = LocalState.get_infrastructure(keyname)
            if infrastructure != "xen" and not LocalState.are_disks_used(
                    keyname) and 'test' not in contents_as_yaml:
                LocalState.confirm_or_abort(
                    "Terminate will delete instances and the data on them.")
            command.append("--terminate")

        if 'test' in contents_as_yaml and contents_as_yaml['test'] == True:
            command.append("--test")

        # Finally, exec the command. Don't worry about validating it -
        # appscale-terminate-instances will do that for us.
        options = ParseArgs(command, "appscale-terminate-instances").args
        AppScaleTools.terminate_instances(options)

        LocalState.cleanup_appscale_files(keyname, terminate)
        AppScaleLogger.success(
            "Successfully shut down your AppScale deployment.")
    def gather_logs(cls, options):
        """Collects logs from each machine in the currently running AppScale
    deployment.

    Args:
      options: A Namespace that has fields for each parameter that can be
        passed in via the command-line interface.
    """
        # First, make sure that the place we want to store logs doesn't
        # already exist.
        if os.path.exists(options.location):
            raise AppScaleException(
                "Can't gather logs, as the location you " + "specified, {0}, already exists.".format(options.location)
            )

        acc = AppControllerClient(
            LocalState.get_login_host(options.keyname), LocalState.get_secret_key(options.keyname)
        )

        try:
            all_ips = acc.get_all_public_ips()
        except socket.error:  # Occurs when the AppController has failed.
            AppScaleLogger.warn(
                "Couldn't get an up-to-date listing of the "
                + "machines in this AppScale deployment. Using our locally cached "
                + "info instead."
            )
            all_ips = LocalState.get_all_public_ips(options.keyname)

        # do the mkdir after we get the secret key, so that a bad keyname will
        # cause the tool to crash and not create this directory
        os.mkdir(options.location)

        # The log paths that we collect logs from.
        log_paths = [
            "/var/log/appscale",
            "/var/log/kern.log*",
            "/var/log/monit.log*",
            "/var/log/nginx",
            "/var/log/syslog*",
            "/var/log/zookeeper",
        ]

        failures = False
        for ip in all_ips:
            # Get the logs from each node, and store them in our local directory
            local_dir = "{0}/{1}".format(options.location, ip)
            os.mkdir(local_dir)

            for log_path in log_paths:
                try:
                    RemoteHelper.scp_remote_to_local(ip, options.keyname, log_path, local_dir, options.verbose)
                except ShellException as shell_exception:
                    failures = True
                    AppScaleLogger.warn("Unable to collect logs from '{}' for host '{}'".format(log_path, ip))
                    AppScaleLogger.verbose("Encountered exception: {}".format(str(shell_exception)), options.verbose)

        if failures:
            AppScaleLogger.log(
                "Done copying to {0}. There were " "failures while collecting AppScale logs.".format(options.location)
            )
        else:
            AppScaleLogger.success("Successfully collected all AppScale logs into " "{0}".format(options.location))
Пример #10
0
  def down(self, clean=False, terminate=False):
    """ 'down' provides a nicer experience for users than the
    appscale-terminate-instances command, by using the configuration options
    present in the AppScalefile found in the current working directory.

    Args:
      clean: A boolean to indicate if the deployment data and metadata
        needs to be clean. This will clear the datastore.
      terminate: A boolean to indicate if instances needs to be terminated
        (valid only if we spawn instances at start).

    Raises:
      AppScalefileException: If there is no AppScalefile in the current working
      directory.
    """
    contents = self.read_appscalefile()

    # Construct a terminate-instances command from the file's contents
    command = []
    contents_as_yaml = yaml.safe_load(contents)

    if 'verbose' in contents_as_yaml and contents_as_yaml['verbose'] == True:
      is_verbose = contents_as_yaml['verbose']
      command.append("--verbose")
    else:
      is_verbose = False

    if 'keyname' in contents_as_yaml:
      keyname = contents_as_yaml['keyname']
      command.append("--keyname")
      command.append(contents_as_yaml['keyname'])
    else:
      keyname = 'appscale'

    if "EC2_ACCESS_KEY" in contents_as_yaml:
      os.environ["EC2_ACCESS_KEY"] = contents_as_yaml["EC2_ACCESS_KEY"]

    if "EC2_SECRET_KEY" in contents_as_yaml:
      os.environ["EC2_SECRET_KEY"] = contents_as_yaml["EC2_SECRET_KEY"]

    if "EC2_URL" in contents_as_yaml:
      os.environ["EC2_URL"] = contents_as_yaml["EC2_URL"]

    if clean:
      if 'test' not in contents_as_yaml or contents_as_yaml['test'] != True:
        LocalState.confirm_or_abort("Clean will delete every data in the deployment.")
      all_ips = LocalState.get_all_public_ips(keyname)
      for ip in all_ips:
        RemoteHelper.ssh(ip, keyname, self.TERMINATE, is_verbose)
      AppScaleLogger.success("Successfully cleaned your AppScale deployment.")

    if terminate:
      infrastructure = LocalState.get_infrastructure(keyname)
      if infrastructure != "xen" and not LocalState.are_disks_used(
        keyname) and 'test' not in contents_as_yaml:
        LocalState.confirm_or_abort("Terminate will delete instances and the data on them.")
      command.append("--terminate")

    if 'test' in contents_as_yaml and contents_as_yaml['test'] == True:
      command.append("--test")

    # Finally, exec the command. Don't worry about validating it -
    # appscale-terminate-instances will do that for us.
    options = ParseArgs(command, "appscale-terminate-instances").args
    AppScaleTools.terminate_instances(options)

    LocalState.cleanup_appscale_files(keyname, terminate)
    AppScaleLogger.success("Successfully shut down your AppScale deployment.")