Пример #1
0
    def terminate_virtualized_cluster(cls, keyname, is_verbose):
        """Stops all API services running on all nodes in the currently running
    AppScale deployment.

    Args:
      keyname: The name of the SSH keypair used for this AppScale deployment.
      is_verbose: A bool that indicates if we should print the commands executed
        to stdout.
    """
        AppScaleLogger.log(
            "Terminating appscale deployment with keyname {0}".format(keyname))
        time.sleep(2)

        shadow_host = LocalState.get_host_with_role(keyname, 'shadow')
        try:
            secret = LocalState.get_secret_key(keyname)
        except IOError:
            # We couldn't find the secret key: AppScale is most likely not
            # running.
            raise AppScaleException("Couldn't find AppScale secret key.")

        acc = AppControllerClient(shadow_host, secret)
        try:
            all_ips = acc.get_all_public_ips()
        except Exception as exception:
            AppScaleLogger.warn(
                'Saw Exception while getting deployments IPs {0}'.format(
                    str(exception)))
            all_ips = LocalState.get_all_public_ips(keyname)

        threads = []
        for ip in all_ips:
            thread = threading.Thread(target=cls.stop_remote_appcontroller,
                                      args=(ip, keyname, is_verbose))
            thread.start()
            threads.append(thread)

        for thread in threads:
            thread.join()

        boxes_shut_down = 0
        is_running_regex = re.compile("appscale-controller stop")
        for ip in all_ips:
            AppScaleLogger.log(
                "Shutting down AppScale API services at {0}".format(ip))
            while True:
                remote_output = cls.ssh(ip, keyname, 'ps x', is_verbose)
                AppScaleLogger.verbose(remote_output, is_verbose)
                if not is_running_regex.match(remote_output):
                    break
                time.sleep(0.3)
            boxes_shut_down += 1

        if boxes_shut_down != len(all_ips):
            raise AppScaleException(
                "Couldn't terminate your AppScale deployment on"
                " all machines - please do so manually.")

        AppScaleLogger.log(
            "Terminated AppScale on {0} machines.".format(boxes_shut_down))
Пример #2
0
    def expand_roles(self):
        """Converts the 'master' composite role into the roles it represents, and
    adds dependencies necessary for the 'login' and 'database' roles.
    """
        for i in range(len(self.roles)):
            role = self.roles[i]
            if role in NodeLayout.DEPRECATED_ROLES:
                AppScaleLogger.warn(
                    "'{}' role has been deprecated, please use '{}'".format(
                        role, NodeLayout.DEPRECATED_ROLES[role]))
                self.roles.remove(role)
                self.roles.append(NodeLayout.DEPRECATED_ROLES[role])

        if 'master' in self.roles:
            self.roles.remove('master')
            self.roles.append('shadow')
            self.roles.append('load_balancer')

        if 'login' in self.roles:
            self.roles.append('load_balancer')

        # TODO: remove these, db_slave and taskqueue_slave are currently deprecated.
        if 'db_slave' in self.roles or 'db_master' in self.roles \
            and 'database' not in self.roles:
            self.roles.append('database')

        if 'taskqueue_slave' in self.roles or 'taskqueue_master' in self.roles \
            and 'taskqueue' not in self.roles:
            self.roles.append('taskqueue')

        # Remove any duplicate roles
        self.roles = list(set(self.roles))
Пример #3
0
    def remove_app(cls, options):
        """Instructs AppScale to no longer host the named application.

    Args:
      options: A Namespace that has fields for each parameter that can be
        passed in via the command-line interface.
    """
        if not options.confirm:
            response = raw_input("Are you sure you want to remove this " + \
              "application? (Y/N) ")
            if response not in ['y', 'yes', 'Y', 'YES']:
                raise AppScaleException("Cancelled application removal.")

        login_host = LocalState.get_login_host(options.keyname)
        secret = LocalState.get_secret_key(options.keyname)
        acc = AppControllerClient(login_host, secret)

        if not acc.does_app_exist(options.appname):
            raise AppScaleException(
                "The given application is not currently running.")

        acc.stop_app(options.appname)
        AppScaleLogger.log("Please wait for your app to shut down.")
        while True:
            if acc.is_app_running(options.appname):
                time.sleep(cls.SLEEP_TIME)
            else:
                break
        AppScaleLogger.success("Done shutting down {0}".format(
            options.appname))
Пример #4
0
  def create_firewall(self, parameters, network_url):
    """ Creates a new firewall in Google Compute Engine with the specified name,
    bound to the specified network.

    Args:
      parameters: A dict with keys for each parameter needed to connect to
        Google Compute Engine, and an additional key indicating the name of the
        firewall that we should create.
      network_url: A str containing the URL of the network that this new
        firewall should be applied to.
    """
    gce_service, credentials = self.open_connection(parameters)
    http = httplib2.Http()
    auth_http = credentials.authorize(http)
    request = gce_service.firewalls().insert(
      project=parameters[self.PARAM_PROJECT],
      body={
        "name" : parameters[self.PARAM_GROUP],
        "description" : "Firewall used for AppScale instances",
        "network" : network_url,
        "sourceRanges" : ["0.0.0.0/0"],
        "allowed" : [
          {"IPProtocol" : "tcp", "ports": ["1-65535"]},
          {"IPProtocol" : "udp", "ports": ["1-65535"]},
          {"IPProtocol" : "icmp"}
        ]
      }
    )
    response = request.execute(http=auth_http)
    AppScaleLogger.verbose(str(response), parameters[self.PARAM_VERBOSE])
    self.ensure_operation_succeeds(gce_service, auth_http, response,
      parameters[self.PARAM_PROJECT])
    def setUp(self):
        self.keyname = "boobazblargfoo"
        self.function = "appscale-gather-logs"

        # mock out any writing to stdout
        flexmock(AppScaleLogger)
        AppScaleLogger.should_receive('log').and_return()

        # mock out all sleeping
        flexmock(time)
        time.should_receive('sleep').and_return()

        # throw some default mocks together for when invoking via shell succeeds
        # and when it fails
        self.fake_temp_file = flexmock(name='fake_temp_file')
        self.fake_temp_file.should_receive('read').and_return('boo out')
        self.fake_temp_file.should_receive('close').and_return()
        self.fake_temp_file.should_receive('seek').with_args(0).and_return()

        flexmock(tempfile)
        tempfile.should_receive('NamedTemporaryFile').and_return(
            self.fake_temp_file)

        self.success = flexmock(name='success', returncode=0)
        self.success.should_receive('wait').and_return(0)

        self.failed = flexmock(name='success', returncode=1)
        self.failed.should_receive('wait').and_return(1)
Пример #6
0
  def destroy(self):
    """'destroy' provides a nicer experience for users than the
    appscale-terminate-instances command, by using the configuration options
    present in the AppScalefile found in the current working directory.

    Raises:
      AppScalefileException: If there is no AppScalefile in the current working
      directory.
    """
    contents = self.read_appscalefile()

    # Construct a terminate-instances command from the file's contents
    command = []
    contents_as_yaml = yaml.safe_load(contents)
    if 'keyname' in contents_as_yaml:
      command.append("--keyname")
      command.append(contents_as_yaml['keyname'])

    if 'verbose' in contents_as_yaml:
      command.append("--verbose")

    # Finally, exec the command. Don't worry about validating it -
    # appscale-terminate-instances will do that for us.
    options = ParseArgs(command, "appscale-terminate-instances").args
    try:
      AppScaleTools.terminate_instances(options)
    except Exception as e:
      AppScaleLogger.warn(str(e))
Пример #7
0
    def setUp(self):
        self.cloud_argv = [
            '--min', '1', '--max', '1', '--group', 'blargscale',
            '--infrastructure', 'ec2', '--machine', 'ami-ABCDEFG'
        ]
        self.cluster_argv = ['--ips', 'ips.yaml']
        self.function = "appscale-run-instances"

        # mock out all logging, since it clutters our output
        flexmock(AppScaleLogger)
        AppScaleLogger.should_receive('log').and_return()

        # set up phony AWS credentials for each test
        # ones that test not having them present can
        # remove them
        for credential in EucalyptusAgent.REQUIRED_EC2_CREDENTIALS:
            os.environ[credential] = "baz"
        os.environ['EC2_URL'] = "http://boo"

        # similarly, pretend that our image does exist in EC2
        # and Euca
        fake_ec2 = flexmock(name="fake_ec2")
        fake_ec2.should_receive('get_image').with_args('ami-ABCDEFG') \
          .and_return()
        fake_ec2.should_receive('get_image').with_args('emi-ABCDEFG') \
          .and_return('anything')

        fake_price = flexmock(name='fake_price', price=1.00)
        fake_ec2.should_receive('get_spot_price_history').and_return(
            [fake_price])

        flexmock(boto)
        boto.should_receive('connect_ec2').with_args('baz', 'baz') \
          .and_return(fake_ec2)
        boto.should_receive('connect_euca').and_return(fake_ec2)
Пример #8
0
  def terminate_cloud_infrastructure(cls, keyname, is_verbose):
    """Powers off all machines in the currently running AppScale deployment.

    Args:
      keyname: The name of the SSH keypair used for this AppScale deployment.
      is_verbose: A bool that indicates if we should print the commands executed
        to stdout.
    """
    AppScaleLogger.log("About to terminate instances spawned with keyname {0}"
      .format(keyname))
    # This sleep is here to allow a moment for user to Ctrl-C
    time.sleep(2)

    # get all the instance IDs for machines in our deployment
    agent = InfrastructureAgentFactory.create_agent(
      LocalState.get_infrastructure(keyname))
    params = agent.get_params_from_yaml(keyname)
    params['IS_VERBOSE'] = is_verbose
    _, _, instance_ids = agent.describe_instances(params)

    # terminate all the machines
    params[agent.PARAM_INSTANCE_IDS] = instance_ids
    agent.terminate_instances(params)

    # delete the keyname and group
    agent.cleanup_state(params)
Пример #9
0
    def copy_app_to_host(cls, app_location, keyname, is_verbose):
        """Copies the given application to a machine running the Login service
    within an AppScale deployment.

    Args:
      app_location: The location on the local filesystem where the application
        can be found.
      keyname: The name of the SSH keypair that uniquely identifies this
        AppScale deployment.
      is_verbose: A bool that indicates if we should print the commands we exec
        to copy the app to the remote host to stdout.

    Returns:
      A str corresponding to the location on the remote filesystem where the
        application was copied to.
    """
        app_id = AppEngineHelper.get_app_id_from_app_config(app_location)

        AppScaleLogger.log("Tarring application")
        rand = str(uuid.uuid4()).replace('-', '')[:8]
        local_tarred_app = "/tmp/appscale-app-{0}-{1}.tar.gz".format(
            app_id, rand)
        LocalState.shell(
            "cd {0} && tar -czhf {1} *".format(app_location, local_tarred_app),
            is_verbose)

        AppScaleLogger.log("Copying over application")
        remote_app_tar = "{0}/{1}.tar.gz".format(cls.REMOTE_APP_DIR, app_id)
        cls.scp(LocalState.get_login_host(keyname), keyname, local_tarred_app,
                remote_app_tar, is_verbose)

        os.remove(local_tarred_app)
        return remote_app_tar
Пример #10
0
  def get_serving_info(self, app_id, keyname):
    """Finds out what host and port are used to host the named application.

    Args:
      app_id: The application that we should find a serving URL for.
    Returns:
      A tuple containing the host and port where the application is serving
        traffic from.
    """
    total_wait_time = 0
    # first, wait for the app to start serving
    sleep_time = self.STARTING_SLEEP_TIME
    while True:
      if self.does_app_exist(app_id):
        break
      else:
        AppScaleLogger.log("Waiting {0} second(s) to check on application...".\
          format(sleep_time))
        time.sleep(sleep_time)
        sleep_time = min(sleep_time * 2, self.MAX_SLEEP_TIME)
        total_wait_time += sleep_time
      if total_wait_time > self.MAX_WAIT_TIME:
        raise AppScaleException("App took too long to upload")
    # next, get the serving host and port
    app_data = self.server.get_app_data(app_id, self.secret)
    host = LocalState.get_login_host(keyname)
    port = int(re.search(".*\sports: (\d+)[\s|:]", app_data).group(1))
    return host, port
Пример #11
0
    def copy_deployment_credentials(cls, host, options):
        """Copies credentials needed to start the AppController and have it create
    other instances (in cloud deployments).

    Args:
      host: A str representing the machine (reachable from this computer) to
        copy our deployment credentials to.
      options: A Namespace that indicates which SSH keypair to use, and whether
        or not we are running in a cloud infrastructure.
    """
        cls.scp(host, options.keyname,
                LocalState.get_secret_key_location(options.keyname),
                '/etc/appscale/secret.key', options.verbose)
        cls.scp(host, options.keyname,
                LocalState.get_key_path_from_name(options.keyname),
                '/etc/appscale/ssh.key', options.verbose)

        LocalState.generate_ssl_cert(options.keyname, options.verbose)
        cls.scp(host, options.keyname,
                LocalState.get_certificate_location(options.keyname),
                '/etc/appscale/certs/mycert.pem', options.verbose)
        cls.scp(host, options.keyname,
                LocalState.get_private_key_location(options.keyname),
                '/etc/appscale/certs/mykey.pem', options.verbose)

        hash_id = subprocess.Popen([
            "openssl", "x509", "-hash", "-noout", "-in",
            LocalState.get_certificate_location(options.keyname)
        ],
                                   stdout=subprocess.PIPE).communicate()[0]
        cls.ssh(host, options.keyname,
          'ln -fs /etc/appscale/certs/mycert.pem /etc/ssl/certs/{0}.0'.\
            format(hash_id.rstrip()),
          options.verbose)

        AppScaleLogger.log("Copying over deployment credentials")
        cert = LocalState.get_certificate_location(options.keyname)
        private_key = LocalState.get_private_key_location(options.keyname)

        cls.ssh(host, options.keyname, 'mkdir -p /etc/appscale/keys/cloud1',
                options.verbose)
        cls.scp(host, options.keyname, cert,
                "/etc/appscale/keys/cloud1/mycert.pem", options.verbose)
        cls.scp(host, options.keyname, private_key,
                "/etc/appscale/keys/cloud1/mykey.pem", options.verbose)

        # In Google Compute Engine, we also need to copy over our client_secrets
        # file and the OAuth2 file that the user has approved for use with their
        # credentials, otherwise the AppScale VMs won't be able to interact with
        # GCE.
        if options.infrastructure and options.infrastructure == 'gce':
            if os.path.exists(LocalState.get_client_secrets_location( \
                options.keyname)):
                cls.scp(
                    host, options.keyname,
                    LocalState.get_client_secrets_location(options.keyname),
                    '/etc/appscale/client_secrets.json', options.verbose)
            cls.scp(host, options.keyname,
                    LocalState.get_oauth2_storage_location(options.keyname),
                    '/etc/appscale/oauth2.dat', options.verbose)
Пример #12
0
    def enable_root_login(cls, host, keyname, infrastructure, is_verbose):
        """Logs into the named host and alters its ssh configuration to enable the
    root user to directly log in.

    Args:
      host: A str representing the host to enable root logins on.
      keyname: A str representing the name of the SSH keypair to login with.
      infrastructure: A str representing the name of the cloud infrastructure
        we're running on.
      is_verbose: A bool indicating if we should print the command we execute to
        enable root login to stdout.
    """
        # First, see if we need to enable root login at all (some VMs have it
        # already enabled).
        try:
            output = cls.ssh(host, keyname, 'ls', is_verbose, user='******')
        except ShellException as exception:
            # Google Compute Engine creates a user with the same name as the currently
            # logged-in user, so log in as that user to enable root login.
            if infrastructure == "gce":
                cls.merge_authorized_keys(host, keyname, getpass.getuser(),
                                          is_verbose)
                return
            else:
                raise exception

        # Amazon EC2 rejects a root login request and tells the user to log in as
        # the ubuntu user, so do that to enable root login.
        if re.search(cls.LOGIN_AS_UBUNTU_USER, output):
            cls.merge_authorized_keys(host, keyname, 'ubuntu', is_verbose)
        else:
            AppScaleLogger.log(
                "Root login already enabled - not re-enabling it.")
Пример #13
0
    def merge_authorized_keys(cls, host, keyname, user, is_verbose):
        """ Adds the contents of the user's authorized_keys file to the root's
    authorized_keys file.

    Args:
      host: A str representing the host to enable root logins on.
      keyname: A str representing the name of the SSH keypair to login with.
      user: A str representing the name of the user to login as.
      is_verbose: A bool indicating if we should print the command we execute to
        enable root login to stdout.
    """
        AppScaleLogger.log('Root login not enabled - enabling it now.')

        create_root_keys = 'sudo touch /root/.ssh/authorized_keys'
        cls.ssh(host, keyname, create_root_keys, is_verbose, user=user)

        set_permissions = 'sudo chmod 600 /root/.ssh/authorized_keys'
        cls.ssh(host, keyname, set_permissions, is_verbose, user=user)

        temp_file = cls.ssh(host, keyname, 'mktemp', is_verbose, user=user)

        merge_to_tempfile = 'sudo sort -u ~/.ssh/authorized_keys '\
          '/root/.ssh/authorized_keys -o {}'.format(temp_file)
        cls.ssh(host, keyname, merge_to_tempfile, is_verbose, user=user)

        overwrite_root_keys = "sudo sed -n '/.*Please login/d; "\
          "w/root/.ssh/authorized_keys' {}".format(temp_file)
        cls.ssh(host, keyname, overwrite_root_keys, is_verbose, user=user)

        remove_tempfile = 'rm -f {0}'.format(temp_file)
        cls.ssh(host, keyname, remove_tempfile, is_verbose, user=user)
        return
Пример #14
0
  def spawn_node_in_cloud(cls, options):
    """Starts a single virtual machine in a cloud infrastructure.

    This method also prepares the virual machine for use by the AppScale Tools.
    Specifically, it enables root logins on the machine, enables SSH access,
    and copies the user's SSH key to that machine.

    Args:
      options: A Namespace that specifies the cloud infrastructure to use, as
        well as how to interact with that cloud.
    Returns:
      The instance ID, public IP address, and private IP address of the machine
        that was started.
    """
    agent = InfrastructureAgentFactory.create_agent(options.infrastructure)
    params = agent.get_params_from_args(options)
    agent.configure_instance_security(params)
    instance_ids, public_ips, private_ips = agent.run_instances(count=1,
      parameters=params, security_configured=True)
    AppScaleLogger.log("Please wait for your instance to boot up.")
    cls.sleep_until_port_is_open(public_ips[0], cls.SSH_PORT, options.verbose)
    cls.enable_root_login(public_ips[0], options.keyname,
      options.infrastructure, options.verbose)
    cls.copy_ssh_keys_to_node(public_ips[0], options.keyname, options.verbose)
    return instance_ids[0], public_ips[0], private_ips[0]
Пример #15
0
    def gather_logs(cls, options):
        """Collects logs from each machine in the currently running AppScale
    deployment.

    Args:
      options: A Namespace that has fields for each parameter that can be
        passed in via the command-line interface.
    """
        # First, make sure that the place we want to store logs doesn't
        # already exist.
        if os.path.exists(options.location):
            raise AppScaleException("Can't gather logs, as the location you " + \
              "specified, {0}, already exists.".format(options.location))

        acc = AppControllerClient(LocalState.get_login_host(options.keyname),
                                  LocalState.get_secret_key(options.keyname))

        # do the mkdir after we get the secret key, so that a bad keyname will
        # cause the tool to crash and not create this directory
        os.mkdir(options.location)

        for ip in acc.get_all_public_ips():
            # Get the logs from each node, and store them in our local directory
            local_dir = "{0}/{1}".format(options.location, ip)
            os.mkdir(local_dir)
            RemoteHelper.scp_remote_to_local(ip, options.keyname,
                                             '/var/log/appscale', local_dir,
                                             options.verbose)
        AppScaleLogger.success("Successfully copied logs to {0}".format(
            options.location))
Пример #16
0
  def start_remote_appcontroller(cls, host, keyname, is_verbose):
    """Starts the AppController daemon on the specified host.

    Args:
      host: A str representing the host to start the AppController on.
      keyname: A str representing the name of the SSH keypair that can log into
        the specified host.
      is_verbose: A bool that indicates if we should print the commands needed
        to start the AppController to stdout.
    """
    AppScaleLogger.log("Starting AppController at {0}".format(host))

    # remove any possible appcontroller state that may not have been
    # properly removed in virtualized clusters
    cls.ssh(host, keyname, 'rm -rf /etc/appscale/appcontroller-state.json',
      is_verbose)

    # start up god, who will start up the appcontroller once we give it the
    # right config file
    cls.ssh(host, keyname, 'god &', is_verbose)
    time.sleep(1)

    # scp over that config file
    cls.scp(host, keyname, cls.TEMPLATE_GOD_CONFIG_FILE,
      '/tmp/appcontroller.god', is_verbose)

    # finally, tell god to start the appcontroller and then wait for it to start
    cls.ssh(host, keyname, 'god load /tmp/appcontroller.god', is_verbose)

    AppScaleLogger.log("Please wait for the AppController to finish " + \
      "pre-processing tasks.")

    cls.sleep_until_port_is_open(host, AppControllerClient.PORT, is_verbose)
Пример #17
0
    def terminate_instances(cls, options):
        """Stops all services running in an AppScale deployment, and in cloud
    deployments, also powers off the instances previously spawned.

    Raises:
      AppScaleException: If AppScale is not running, and thus can't be
      terminated.
    """
        if not os.path.exists(
                LocalState.get_locations_yaml_location(options.keyname)):
            raise AppScaleException(
                "AppScale is not running with the keyname {0}".format(
                    options.keyname))

        if LocalState.get_infrastructure(options.keyname) in \
          InfrastructureAgentFactory.VALID_AGENTS:
            RemoteHelper.terminate_cloud_infrastructure(
                options.keyname, options.verbose)
        else:
            RemoteHelper.terminate_virtualized_cluster(options.keyname,
                                                       options.verbose)

        LocalState.cleanup_appscale_files(options.keyname)
        AppScaleLogger.success(
            "Successfully shut down your AppScale deployment.")
Пример #18
0
  def get_serving_info(self, app_id, keyname):
    """Finds out what host and port are used to host the named application.

    Args:
      app_id: The application that we should find a serving URL for.
    Returns:
      A tuple containing the host and port where the application is serving
        traffic from.
    """
    total_wait_time = 0
    # first, wait for the app to start serving
    sleep_time = self.STARTING_SLEEP_TIME
    while True:
      if self.does_app_exist(app_id):
        break
      else:
        AppScaleLogger.log("Waiting {0} second(s) to check on application...".\
          format(sleep_time))
        time.sleep(sleep_time)
        sleep_time = min(sleep_time * 2, self.MAX_SLEEP_TIME)
        total_wait_time += sleep_time
      if total_wait_time > self.MAX_WAIT_TIME:
        raise AppScaleException("App took too long to upload")
    # next, get the serving host and port
    app_data = self.server.get_app_data(app_id, self.secret)
    host = LocalState.get_login_host(keyname)
    port = int(re.search(".*\sports: (\d+)[\s|:]", app_data).group(1))
    return host, port
Пример #19
0
    def create_network(self, parameters):
        """ Creates a new network in Google Compute Engine with the specified name.

    Args:
      parameters: A dict with keys for each parameter needed to connect to
        Google Compute Engine, and an additional key indicating the name of the
        network that we should create in GCE.
    Returns:
      The URL corresponding to the name of the network that was created, for use
      with binding this network to one or more firewalls.
    """
        gce_service, credentials = self.open_connection(parameters)
        http = httplib2.Http()
        auth_http = credentials.authorize(http)
        request = gce_service.networks().insert(
            project=parameters[self.PARAM_PROJECT],
            body={
                "name": parameters[self.PARAM_GROUP],
                "description": "Network used for AppScale instances",
                "IPv4Range": "10.240.0.0/16"
            })
        response = request.execute(http=auth_http)
        AppScaleLogger.verbose(str(response), parameters[self.PARAM_VERBOSE])
        self.ensure_operation_succeeds(gce_service, auth_http, response,
                                       parameters[self.PARAM_PROJECT])
        return response['targetLink']
  def setUp(self):
    self.keyname = "boobazblargfoo"
    self.function = "appscale-upload-app"
    self.app_dir = "/tmp/baz/gbaz"

    # mock out any writing to stdout
    flexmock(AppScaleLogger)
    AppScaleLogger.should_receive('log').and_return()
    AppScaleLogger.should_receive('success').and_return()

    # mock out all sleeping
    flexmock(time)
    time.should_receive('sleep').and_return()

    local_state = flexmock(LocalState)
    local_state.should_receive('shell').and_return()

    # throw some default mocks together for when invoking via shell succeeds
    # and when it fails
    self.fake_temp_file = flexmock(name='fake_temp_file')
    self.fake_temp_file.should_receive('read').and_return('boo out')
    self.fake_temp_file.should_receive('close').and_return()
    self.fake_temp_file.should_receive('seek').with_args(0).and_return()

    flexmock(tempfile)
    tempfile.should_receive('NamedTemporaryFile').and_return(self.fake_temp_file)

    self.success = flexmock(name='success', returncode=0)
    self.success.should_receive('wait').and_return(0)

    self.failed = flexmock(name='success', returncode=1)
    self.failed.should_receive('wait').and_return(1)
Пример #21
0
    def add_access_config(self, parameters, instance_id, static_ip):
        """ Instructs Google Compute Engine to use the given IP address as the
    public IP for the named instance.

    This assumes that there is no existing public IP address for the named
    instance. If this is not the case, callers should use delete_access_config
    first to remove it.

    Args:
      parameters: A dict with keys for each parameter needed to connect to
        Google Compute Engine, and an additional key mapping to a list of
        instance names that should be deleted.
      instance_id: A str naming the running instance that the new public IP
        address should be added to.
      static_ip: A str naming the already allocated static IP address that
        will be used for the named instance.
    """
        gce_service, credentials = self.open_connection(parameters)
        http = httplib2.Http()
        auth_http = credentials.authorize(http)
        request = gce_service.instances().addAccessConfig(
            project=parameters[self.PARAM_PROJECT],
            instance=instance_id,
            networkInterface="nic0",
            zone=parameters[self.PARAM_ZONE],
            body={
                "kind": "compute#accessConfig",
                "type": "ONE_TO_ONE_NAT",
                "name": "External NAT",
                "natIP": static_ip
            })
        response = request.execute(http=auth_http)
        AppScaleLogger.verbose(str(response), parameters[self.PARAM_VERBOSE])
Пример #22
0
  def remove_app(cls, options):
    """Instructs AppScale to no longer host the named application.

    Args:
      options: A Namespace that has fields for each parameter that can be
        passed in via the command-line interface.
    """
    if not options.confirm:
      response = raw_input("Are you sure you want to remove this " + \
        "application? (Y/N) ")
      if response not in ['y', 'yes', 'Y', 'YES']:
        raise AppScaleException("Cancelled application removal.")

    login_host = LocalState.get_login_host(options.keyname)
    acc = AppControllerClient(login_host, LocalState.get_secret_key(
      options.keyname))
    userappserver_host = acc.get_uaserver_host(options.verbose)
    userappclient = UserAppClient(userappserver_host, LocalState.get_secret_key(
      options.keyname))
    if not userappclient.does_app_exist(options.appname):
      raise AppScaleException("The given application is not currently running.")

    acc.stop_app(options.appname)
    AppScaleLogger.log("Please wait for your app to shut down.")
    while True:
      if acc.is_app_running(options.appname):
        time.sleep(cls.SLEEP_TIME)
      else:
        break
    AppScaleLogger.success("Done shutting down {0}".format(options.appname))
Пример #23
0
    def terminate_instances(self, parameters):
        """ Deletes the instances specified in 'parameters' running in Google
    Compute Engine.

    Args:
      parameters: A dict with keys for each parameter needed to connect to
        Google Compute Engine, and an additional key mapping to a list of
        instance names that should be deleted.
    """
        instance_ids = parameters[self.PARAM_INSTANCE_IDS]
        responses = []
        for instance_id in instance_ids:
            gce_service, credentials = self.open_connection(parameters)
            http = httplib2.Http()
            auth_http = credentials.authorize(http)
            request = gce_service.instances().delete(
                project=parameters[self.PARAM_PROJECT],
                zone=parameters[self.PARAM_ZONE],
                instance=instance_id)
            response = request.execute(http=auth_http)
            AppScaleLogger.verbose(str(response),
                                   parameters[self.PARAM_VERBOSE])
            responses.append(response)

        for response in responses:
            gce_service, credentials = self.open_connection(parameters)
            http = httplib2.Http()
            auth_http = credentials.authorize(http)
            self.ensure_operation_succeeds(gce_service, auth_http, response,
                                           parameters[self.PARAM_PROJECT])
Пример #24
0
 def confirm_or_abort(cls, message):
     AppScaleLogger.warn(message)
     confirm = raw_input("Are you sure you want to do this? (Y/N) ")
     if confirm.lower() == "y" or confirm.lower() == "yes":
         return
     else:
         raise AppScaleException("AppScale termination was cancelled.")
Пример #25
0
    def does_address_exist(self, parameters):
        """ Queries Google Compute Engine to see if the specified static IP address
    exists for this user.

    Args:
      parameters: A dict with keys for each parameter needed to connect to
        Google Compute Engine, and an additional key indicating the name of the
        static IP address that we should check for existence.
    Returns:
      True if the named address exists, and False otherwise.
    """
        gce_service, credentials = self.open_connection(parameters)
        http = httplib2.Http()
        auth_http = credentials.authorize(http)
        request = gce_service.addresses().list(
            project=parameters[self.PARAM_PROJECT],
            filter="address eq {0}".format(parameters[self.PARAM_STATIC_IP]),
            region=parameters[self.PARAM_REGION])
        response = request.execute(http=auth_http)
        AppScaleLogger.verbose(str(response), parameters[self.PARAM_VERBOSE])

        if 'items' in response:
            return True
        else:
            return False
Пример #26
0
  def terminate_instances(self, parameters):
    """ Deletes the instances specified in 'parameters' running in Google
    Compute Engine.

    Args:
      parameters: A dict with keys for each parameter needed to connect to
        Google Compute Engine, and an additional key mapping to a list of
        instance names that should be deleted.
    """
    instance_ids = parameters[self.PARAM_INSTANCE_IDS]
    responses = []
    for instance_id in instance_ids:
      gce_service, credentials = self.open_connection(parameters)
      http = httplib2.Http()
      auth_http = credentials.authorize(http)
      request = gce_service.instances().delete(
        project=parameters[self.PARAM_PROJECT],
        zone=parameters[self.PARAM_ZONE],
        instance=instance_id
      )
      response = request.execute(http=auth_http)
      AppScaleLogger.verbose(str(response), parameters[self.PARAM_VERBOSE])
      responses.append(response)

    for response in responses:
      gce_service, credentials = self.open_connection(parameters)
      http = httplib2.Http()
      auth_http = credentials.authorize(http)
      self.ensure_operation_succeeds(gce_service, auth_http, response,
        parameters[self.PARAM_PROJECT])
Пример #27
0
    def does_disk_exist(self, parameters, disk):
        """ Queries Google Compute Engine to see if the specified persistent disk
    exists for this user.

    Args:
      parameters: A dict with keys for each parameter needed to connect to
        Google Compute Engine.
      disk: A str containing the name of the disk that we should check for
        existence.
    Returns:
      True if the named persistent disk exists, and False otherwise.
    """
        gce_service, credentials = self.open_connection(parameters)
        try:
            http = httplib2.Http()
            auth_http = credentials.authorize(http)
            request = gce_service.disks().get(
                project=parameters[self.PARAM_PROJECT],
                disk=disk,
                zone=parameters[self.PARAM_ZONE])
            response = request.execute(http=auth_http)
            AppScaleLogger.verbose(str(response),
                                   parameters[self.PARAM_VERBOSE])
            return True
        except apiclient.errors.HttpError:
            return False
Пример #28
0
    def does_user_exist(self, username, silent=False):
        """ Queries the AppController to see if the given user exists.

    Args:
      username: The email address registered as username for the user's application.
    """
        while True:
            try:
                user_exists = self.run_with_timeout(
                    self.DEFAULT_TIMEOUT,
                    'Request to check if user exists timed out.',
                    self.DEFAULT_NUM_RETRIES, self.server.does_user_exist,
                    username, self.secret)
                if user_exists == 'true':
                    return True
                elif user_exists == 'false':
                    return False
                else:
                    raise Exception(user_exists)
            except Exception as acc_error:
                if not silent:
                    AppScaleLogger.log(
                        "Exception when checking if a user exists: {0}".format(
                            acc_error))
                    AppScaleLogger.log("Backing off and trying again.")
                time.sleep(10)
Пример #29
0
  def register(self, deployment_id):
    """ Allows users to register their AppScale deployment with the AppScale
    Portal.

    Raises:
      AppScaleException: If the deployment has already been registered.
    """
    appscale_yaml = yaml.safe_load(self.read_appscalefile())
    if 'keyname' in appscale_yaml:
      keyname = appscale_yaml['keyname']
    else:
      keyname = 'appscale'

    nodes = self.get_nodes(keyname)
    head_node = self.get_head_node(nodes)
    if RegistrationHelper.appscale_has_deployment_id(head_node, keyname):
      existing_id = RegistrationHelper.get_deployment_id(head_node, keyname)
      if existing_id != deployment_id:
        raise AppScaleException(
          'This deployment has already been registered with a different ID.')

    if 'infrastructure' in appscale_yaml:
      deployment_type = 'cloud'
    else:
      deployment_type = 'cluster'

    deployment = RegistrationHelper.update_deployment(deployment_type, nodes,
      deployment_id)

    RegistrationHelper.set_deployment_id(head_node, keyname, deployment_id)

    AppScaleLogger.success(
      'Registration complete for AppScale deployment {0}.'
      .format(deployment['name']))
Пример #30
0
  def expand_roles(self):
    """Converts the 'master' composite role into the roles it represents, and
    adds dependencies necessary for the 'login' and 'database' roles.
    """
    for i in range(len(self.roles)):
      role = self.roles[i]
      if role in NodeLayout.DEPRECATED_ROLES:
        AppScaleLogger.warn("'{}' role has been deprecated, please use '{}'"
                            .format(role, NodeLayout.DEPRECATED_ROLES[role]))
        self.roles.remove(role)
        self.roles.append(NodeLayout.DEPRECATED_ROLES[role])

    if 'master' in self.roles:
      self.roles.remove('master')
      self.roles.append('shadow')
      self.roles.append('load_balancer')

    if 'login' in self.roles:
      self.roles.append('load_balancer')

    # TODO: remove these, db_slave and taskqueue_slave are currently deprecated.
    if 'db_slave' in self.roles or 'db_master' in self.roles \
        and 'database' not in self.roles:
      self.roles.append('database')

    if 'taskqueue_slave' in self.roles or 'taskqueue_master' in self.roles \
        and 'taskqueue' not in self.roles:
      self.roles.append('taskqueue')

    # Remove any duplicate roles
    self.roles = list(set(self.roles))
Пример #31
0
  def get_optimal_spot_price(self, conn, instance_type, zone):
    """
    Returns the spot price for an EC2 instance of the specified instance type.
    The returned value is computed by averaging all the spot price history
    values returned by the back-end EC2 APIs and incrementing the average by
    extra 10%.

    Args:
      conn: A boto.EC2Connection that can be used to communicate with AWS.
      instance_type: A str representing the instance type whose prices we
        should speculate for.
      zone: A str representing the availability zone that the instance will
        be placed in.
    Returns:
      The estimated spot price for the specified instance type, in the
        specified availability zone.
    """
    end_time = datetime.datetime.now()
    start_time = end_time - datetime.timedelta(days=7)
    history = conn.get_spot_price_history(start_time=start_time.isoformat(),
      end_time=end_time.isoformat(), product_description='Linux/UNIX',
      instance_type=instance_type, availability_zone=zone)
    var_sum = 0.0
    for entry in history:
      var_sum += entry.price
    average = var_sum / len(history)
    bid_price = average * 1.10
    AppScaleLogger.log('The average spot instance price for a {0} machine is'\
        ' {1}, and 10% more is {2}'.format(instance_type, average, bid_price))
    return bid_price
Пример #32
0
  def copy_deployment_credentials(cls, host, options):
    """Copies credentials needed to start the AppController and have it create
    other instances (in cloud deployments).

    Args:
      host: A str representing the machine (reachable from this computer) to
        copy our deployment credentials to.
      options: A Namespace that indicates which SSH keypair to use, and whether
        or not we are running in a cloud infrastructure.
    """
    cls.scp(host, options.keyname, LocalState.get_secret_key_location(
      options.keyname), '/etc/appscale/secret.key', options.verbose)
    cls.scp(host, options.keyname, LocalState.get_key_path_from_name(
      options.keyname), '/etc/appscale/ssh.key', options.verbose)

    LocalState.generate_ssl_cert(options.keyname, options.verbose)
    cls.scp(host, options.keyname, LocalState.get_certificate_location(
      options.keyname), '/etc/appscale/certs/mycert.pem', options.verbose)
    cls.scp(host, options.keyname, LocalState.get_private_key_location(
      options.keyname), '/etc/appscale/certs/mykey.pem', options.verbose)

    AppScaleLogger.log("Copying over deployment credentials")
    cert = LocalState.get_certificate_location(options.keyname)
    private_key = LocalState.get_private_key_location(options.keyname)

    cls.ssh(host, options.keyname, 'mkdir -p /etc/appscale/keys/cloud1',
      options.verbose)
    cls.scp(host, options.keyname, cert, "/etc/appscale/keys/cloud1/mycert.pem",
      options.verbose)
    cls.scp(host, options.keyname, private_key,
      "/etc/appscale/keys/cloud1/mykey.pem", options.verbose)
Пример #33
0
  def start_remote_appcontroller(cls, host, keyname, is_verbose):
    """Starts the AppController daemon on the specified host.

    Args:
      host: A str representing the host to start the AppController on.
      keyname: A str representing the name of the SSH keypair that can log into
        the specified host.
      is_verbose: A bool that indicates if we should print the commands needed
        to start the AppController to stdout.
    """
    AppScaleLogger.log("Starting AppController at {0}".format(host))

    # Remove any previous state. TODO: Don't do this with the tools.
    cls.ssh(host, keyname,
      'rm -rf {}/appcontroller-state.json'.format(cls.CONFIG_DIR), is_verbose)

    # Remove any monit configuration files from previous AppScale deployments.
    cls.ssh(host, keyname, 'rm -rf /etc/monit/conf.d/appscale-*.cfg', is_verbose)

    cls.ssh(host, keyname, 'service monit start', is_verbose)

    # Start the AppController.
    cls.ssh(host, keyname, 'service appscale-controller start', is_verbose)

    AppScaleLogger.log("Please wait for the AppController to finish " + \
      "pre-processing tasks.")

    cls.sleep_until_port_is_open(host, AppControllerClient.PORT, is_verbose)
Пример #34
0
  def create_user_accounts(cls, email, password, uaserver_host, keyname):
    """Registers two new user accounts with the UserAppServer.

    One account is the standard account that users log in with (via their
    e-mail address. The other is their XMPP account, so that they can log into
    any jabber-compatible service and send XMPP messages to their application
    (and receive them).

    Args:
      email: The e-mail address that should be registered for the user's
        standard account.
      password: The password that should be used for both the standard and XMPP
        accounts.
      uaserver_host: The location of a UserAppClient, that can create new user
        accounts.
      keyname: The name of the SSH keypair used for this AppScale deployment.
    """
    uaserver = UserAppClient(uaserver_host, LocalState.get_secret_key(keyname))

    # first, create the standard account
    encrypted_pass = LocalState.encrypt_password(email, password)
    uaserver.create_user(email, encrypted_pass)

    # next, create the XMPP account. if the user's e-mail is [email protected], then that
    # means their XMPP account name is a@login_ip
    username_regex = re.compile('\A(.*)@')
    username = username_regex.match(email).groups()[0]
    xmpp_user = "******".format(username, LocalState.get_login_host(keyname))
    xmpp_pass = LocalState.encrypt_password(xmpp_user, password)
    uaserver.create_user(xmpp_user, xmpp_pass)
    AppScaleLogger.log("Your XMPP username is {0}".format(xmpp_user))
Пример #35
0
    def setUp(self):
        # mock out logging, since it clutters out test output
        flexmock(AppScaleLogger)
        AppScaleLogger.should_receive('log').and_return()

        # next, pretend our ec2 credentials are properly set
        for credential in EC2Agent.REQUIRED_CREDENTIALS:
            os.environ[credential] = "baz"

        # finally, pretend that our ec2 image to use exists
        fake_ec2 = flexmock(name="fake_ec2")
        fake_ec2.should_receive('get_image').with_args('ami-ABCDEFG') \
          .and_return()
        flexmock(boto)
        boto.should_receive('connect_ec2').with_args(
            'baz', 'baz').and_return(fake_ec2)

        # add in some instance variables so that we don't have
        # a lot IP addresses everywhere
        self.blank_input_yaml = None
        self.default_options = {'table': 'cassandra'}
        self.ip_1 = '192.168.1.1'
        self.ip_2 = '192.168.1.2'
        self.ip_3 = '192.168.1.3'
        self.ip_4 = '192.168.1.4'
        self.ip_5 = '192.168.1.5'
        self.ip_6 = '192.168.1.6'
        self.ip_7 = '192.168.1.7'
        self.ip_8 = '192.168.1.8'
Пример #36
0
  def register(self, deployment_id):
    """ Allows users to register their AppScale deployment with the AppScale
    Portal.

    Raises:
      AppScaleException: If the deployment has already been registered.
    """
    appscale_yaml = yaml.safe_load(self.read_appscalefile())
    if 'keyname' in appscale_yaml:
      keyname = appscale_yaml['keyname']
    else:
      keyname = 'appscale'

    nodes = self.get_nodes(keyname)
    head_node = self.get_head_node(nodes)
    if RegistrationHelper.appscale_has_deployment_id(head_node, keyname):
      existing_id = RegistrationHelper.get_deployment_id(head_node, keyname)
      if existing_id != deployment_id:
        raise AppScaleException(
          'This deployment has already been registered with a different ID.')

    if 'infrastructure' in appscale_yaml:
      deployment_type = 'cloud'
    else:
      deployment_type = 'cluster'

    deployment = RegistrationHelper.update_deployment(deployment_type, nodes,
      deployment_id)

    RegistrationHelper.set_deployment_id(head_node, keyname, deployment_id)

    AppScaleLogger.success(
      'Registration complete for AppScale deployment {0}.'
      .format(deployment['name']))
Пример #37
0
  def ensure_appscale_isnt_running(cls, keyname, force):
    """Checks the secret key file to see if AppScale is running, and
    aborts if it is.

    Args:
      keyname: The keypair name that is used to identify AppScale deployments.
      force: A bool that is used to run AppScale even if the secret key file
        is present.
    Raises:
      BadConfigurationException: If AppScale is already running.
    """
    if force:
      return

    if os.path.exists(cls.get_secret_key_location(keyname)):
      try:
        login_host = cls.get_login_host(keyname)
        secret_key = cls.get_secret_key(keyname)
      except (IOError, AppScaleException, BadConfigurationException):
        # If we don't have the locations files, we are not running.
        return

      acc = AppControllerClient(login_host, secret_key)
      try:
        acc.get_all_public_ips()
      except AppControllerException:
        # AC is not running, so we assume appscale is not up and running.
        AppScaleLogger.log("AppController not running on login node.")
      else:
        raise BadConfigurationException("AppScale is already running. Terminate" +
          " it, set 'force: True' in your AppScalefile, or use the --force flag" +
          " to run anyways.")
  def setUp(self):
    self.keyname = "boobazblargfoo"
    self.function = "appscale-add-keypair"

    # mock out any writing to stdout
    flexmock(AppScaleLogger)
    AppScaleLogger.should_receive('log').and_return()

    # mock out all sleeping
    flexmock(time)
    time.should_receive('sleep').and_return()

    # throw some default mocks together for when invoking via shell succeeds
    # and when it fails
    self.fake_temp_file = flexmock(name='fake_temp_file')
    self.fake_temp_file.should_receive('read').and_return('boo out')
    self.fake_temp_file.should_receive('close').and_return()

    flexmock(tempfile)
    tempfile.should_receive('TemporaryFile').and_return(self.fake_temp_file)

    self.success = flexmock(name='success', returncode=0)
    self.success.should_receive('wait').and_return(0)

    self.failed = flexmock(name='success', returncode=1)
    self.failed.should_receive('wait').and_return(1)
Пример #39
0
  def setUp(self):
    self.cloud_argv = ['--min', '1', '--max', '1', '--group', 'blargscale',
      '--infrastructure', 'ec2', '--machine', 'ami-ABCDEFG']
    self.cluster_argv = ['--ips', 'ips.yaml']
    self.function = "appscale-run-instances"

    # mock out all logging, since it clutters our output
    flexmock(AppScaleLogger)
    AppScaleLogger.should_receive('log').and_return()

    # set up phony AWS credentials for each test
    # ones that test not having them present can
    # remove them
    for credential in EucalyptusAgent.REQUIRED_EC2_CREDENTIALS:
      os.environ[credential] = "baz"
    os.environ['EC2_URL'] = "http://boo"

    # similarly, pretend that our image does exist in EC2
    # and Euca
    fake_ec2 = flexmock(name="fake_ec2")
    fake_ec2.should_receive('get_image').with_args('ami-ABCDEFG') \
      .and_return()
    fake_ec2.should_receive('get_image').with_args('emi-ABCDEFG') \
      .and_return('anything')

    flexmock(boto)
    boto.should_receive('connect_ec2').with_args('baz', 'baz') \
      .and_return(fake_ec2)
    boto.should_receive('connect_euca').and_return(fake_ec2)
  def get_application_info(cls, owner, app_language, app_dir):
    name = AppEngineHelper.get_app_id_from_app_config(app_dir)
    version = AppEngineHelper.get_app_version_from_app_config(app_dir)
    app = Application(name, version, owner)

    dependencies_path = app_dir + os.sep + 'dependencies.yaml'
    if app_language == 'java':
      dependencies_path = app_dir + os.sep + 'war' + os.sep + 'WEB-INF' + os.sep + 'dependencies.yaml'
      api_specs_dir = app_dir + os.sep + 'war' + os.sep + 'WEB-INF' + os.sep + 'specs'
      if os.path.exists(api_specs_dir):
        for f in os.listdir(api_specs_dir):
          if f.endswith('.json'):
            api = API(api_specs_dir + os.sep + f)
            AppScaleLogger.log('Detected API: {0}-v{1}'.format(api.name, api.version))
            app.api_list.append(api)

    if os.path.exists(dependencies_path):
      dependencies_file = open(dependencies_path, 'r')
      dependencies = yaml.load(dependencies_file)
      dependencies_file.close()
      if dependencies:
        EagerHelper.validate_dependencies(dependencies)
        app.dependencies = dependencies['dependencies']

    return app
Пример #41
0
    def add_instances(cls, options):
        """Adds additional machines to an AppScale deployment.

    Args:
      options: A Namespace that has fields for each parameter that can be
        passed in via the command-line interface.
    """
        if 'master' in options.ips.keys():
            raise BadConfigurationException("Cannot add master nodes to an " + \
              "already running AppScale deployment.")

        # Skip checking for -n (replication) because we don't allow the user
        # to specify it here (only allowed in run-instances).
        additional_nodes_layout = NodeLayout(options)

        # In virtualized cluster deployments, we need to make sure that the user
        # has already set up SSH keys.
        if LocalState.get_from_yaml(options.keyname,
                                    'infrastructure') == "xen":
            for ip in options.ips.values():
                # throws a ShellException if the SSH key doesn't work
                RemoteHelper.ssh(ip, options.keyname, "ls", options.verbose)

        # Finally, find an AppController and send it a message to add
        # the given nodes with the new roles.
        AppScaleLogger.log("Sending request to add instances")
        login_ip = LocalState.get_login_host(options.keyname)
        acc = AppControllerClient(login_ip,
                                  LocalState.get_secret_key(options.keyname))
        acc.start_roles_on_nodes(json.dumps(options.ips))

        # TODO(cgb): Should we wait for the new instances to come up and get
        # initialized?
        AppScaleLogger.success("Successfully sent request to add instances " + \
          "to this AppScale deployment.")
Пример #42
0
  def logs(self, location):
    """'logs' provides a cleaner experience for users than the
    appscale-gather-logs command, by using the configuration options present in
    the AppScalefile found in the current working directory.

    Args:
      location: The path on the local filesystem where logs should be copied to.
    Raises:
      AppScalefileException: If there is no AppScalefile in the current working
      directory.
    """
    contents = self.read_appscalefile()
    contents_as_yaml = yaml.safe_load(contents)

    # construct the appscale-gather-logs command
    command = []
    if 'keyname' in contents_as_yaml:
      command.append("--keyname")
      command.append(contents_as_yaml["keyname"])

    command.append("--location")
    command.append(location)

    # and exec it
    options = ParseArgs(command, "appscale-gather-logs").args
    try:
      AppScaleTools.gather_logs(options)
    except Exception as e:
      AppScaleLogger.warn(str(e))
Пример #43
0
  def upgrade_appscale(cls, options, node_layout):
    """ Runs the bootstrap script on each of the remote machines.
      Args:
        options: A Namespace that has fields for each parameter that can be
          passed in via the command-line interface.
        node_layout: A NodeLayout object for the deployment.
    """
    unique_ips = [node.public_ip for node in node_layout.nodes]

    AppScaleLogger.log("Upgrading AppScale code to the latest version on "
      "these machines: {}".format(unique_ips))
    threads = []
    error_ips = []
    for ip in unique_ips:
      t = threading.Thread(target=cls.run_bootstrap, args=(ip, options, error_ips))
      threads.append(t)

    for x in threads:
      x.start()

    for x in threads:
      x.join()

    if not error_ips:
      cls.run_upgrade_script(options, node_layout)
Пример #44
0
  def gather_logs(cls, options):
    """Collects logs from each machine in the currently running AppScale
    deployment.

    Args:
      options: A Namespace that has fields for each parameter that can be
        passed in via the command-line interface.
    """
    # First, make sure that the place we want to store logs doesn't
    # already exist.
    if os.path.exists(options.location):
      raise AppScaleException("Can't gather logs, as the location you " + \
        "specified, {0}, already exists.".format(options.location))

    acc = AppControllerClient(LocalState.get_login_host(options.keyname),
      LocalState.get_secret_key(options.keyname))

    # do the mkdir after we get the secret key, so that a bad keyname will
    # cause the tool to crash and not create this directory
    os.mkdir(options.location)

    for ip in acc.get_all_public_ips():
      # Get the logs from each node, and store them in our local directory
      local_dir = "{0}/{1}".format(options.location, ip)
      os.mkdir(local_dir)
      RemoteHelper.scp_remote_to_local(ip, options.keyname, '/var/log/appscale',
        local_dir, options.verbose)
    AppScaleLogger.success("Successfully copied logs to {0}".format(
      options.location))
    def terminate_instances(cls, options):
        """Stops all services running in an AppScale deployment, and in cloud
    deployments, also powers off the instances previously spawned.

    Raises:
      AppScaleException: If AppScale is not running, and thus can't be
      terminated.
    """
        if not os.path.exists(LocalState.get_secret_key_location(options.keyname)):
            raise AppScaleException("AppScale is not running with the keyname {0}".format(options.keyname))

        infrastructure = LocalState.get_infrastructure(options.keyname)

        # If the user is on a cloud deployment, and not backing their data to
        # persistent disks, warn them before shutting down AppScale.
        # Also, if we're in developer mode, skip the warning.
        if infrastructure != "xen" and not LocalState.are_disks_used(options.keyname) and not options.test:
            LocalState.ensure_user_wants_to_terminate()

        if infrastructure in InfrastructureAgentFactory.VALID_AGENTS:
            RemoteHelper.terminate_cloud_infrastructure(options.keyname, options.verbose)
        else:
            RemoteHelper.terminate_virtualized_cluster(options.keyname, options.verbose)

        LocalState.cleanup_appscale_files(options.keyname)
        AppScaleLogger.success("Successfully shut down your AppScale deployment.")
Пример #46
0
  def add_instances(cls, options):
    """Adds additional machines to an AppScale deployment.

    Args:
      options: A Namespace that has fields for each parameter that can be
        passed in via the command-line interface.
    """
    if 'master' in options.ips.keys():
      raise BadConfigurationException("Cannot add master nodes to an " + \
        "already running AppScale deployment.")

    # Skip checking for -n (replication) because we don't allow the user
    # to specify it here (only allowed in run-instances).
    additional_nodes_layout = NodeLayout(options)

    # In virtualized cluster deployments, we need to make sure that the user
    # has already set up SSH keys.
    if LocalState.get_from_yaml(options.keyname, 'infrastructure') == "xen":
      for ip in options.ips.values():
        # throws a ShellException if the SSH key doesn't work
        RemoteHelper.ssh(ip, options.keyname, "ls", options.verbose)

    # Finally, find an AppController and send it a message to add
    # the given nodes with the new roles.
    AppScaleLogger.log("Sending request to add instances")
    login_ip = LocalState.get_login_host(options.keyname)
    acc = AppControllerClient(login_ip, LocalState.get_secret_key(
      options.keyname))
    acc.start_roles_on_nodes(json.dumps(options.ips))

    # TODO(cgb): Should we wait for the new instances to come up and get
    # initialized?
    AppScaleLogger.success("Successfully sent request to add instances " + \
      "to this AppScale deployment.")
Пример #47
0
  def merge_authorized_keys(cls, host, keyname, user, is_verbose):
    """ Adds the contents of the user's authorized_keys file to the root's
    authorized_keys file.

    Args:
      host: A str representing the host to enable root logins on.
      keyname: A str representing the name of the SSH keypair to login with.
      user: A str representing the name of the user to login as.
      is_verbose: A bool indicating if we should print the command we execute to
        enable root login to stdout.
    """
    AppScaleLogger.log('Root login not enabled - enabling it now.')

    create_root_keys = 'sudo touch /root/.ssh/authorized_keys'
    cls.ssh(host, keyname, create_root_keys, is_verbose, user=user)

    set_permissions = 'sudo chmod 600 /root/.ssh/authorized_keys'
    cls.ssh(host, keyname, set_permissions, is_verbose, user=user)

    temp_file = cls.ssh(host, keyname, 'mktemp', is_verbose, user=user)

    merge_to_tempfile = 'sudo sort -u ~/.ssh/authorized_keys '\
      '/root/.ssh/authorized_keys -o {}'.format(temp_file)
    cls.ssh(host, keyname, merge_to_tempfile, is_verbose, user=user)

    overwrite_root_keys = "sudo sed -n '/.*Please login/d; "\
      "w/root/.ssh/authorized_keys' {}".format(temp_file)
    cls.ssh(host, keyname, overwrite_root_keys, is_verbose, user=user)

    remove_tempfile = 'rm -f {0}'.format(temp_file)
    cls.ssh(host, keyname, remove_tempfile, is_verbose, user=user)
    return
Пример #48
0
    def terminate_instances(cls, options):
        """Stops all services running in an AppScale deployment, and in cloud
    deployments, also powers off the instances previously spawned.

    Raises:
      AppScaleException: If AppScale is not running, and thus can't be
      terminated.
    """
        if not os.path.exists(
                LocalState.get_secret_key_location(options.keyname)):
            raise AppScaleException(
                "AppScale is not running with the keyname {0}".format(
                    options.keyname))

        infrastructure = LocalState.get_infrastructure(options.keyname)

        # If the user is on a cloud deployment, and not backing their data to
        # persistent disks, warn them before shutting down AppScale.
        # Also, if we're in developer mode, skip the warning.
        if infrastructure != "xen" and not LocalState.are_disks_used(
                options.keyname) and not options.test:
            LocalState.ensure_user_wants_to_terminate()

        if infrastructure in InfrastructureAgentFactory.VALID_AGENTS:
            RemoteHelper.terminate_cloud_infrastructure(
                options.keyname, options.verbose)
        else:
            RemoteHelper.terminate_virtualized_cluster(options.keyname,
                                                       options.verbose)

        LocalState.cleanup_appscale_files(options.keyname)
        AppScaleLogger.success(
            "Successfully shut down your AppScale deployment.")
Пример #49
0
  def enable_root_login(cls, host, keyname, infrastructure, is_verbose):
    """Logs into the named host and alters its ssh configuration to enable the
    root user to directly log in.

    Args:
      host: A str representing the host to enable root logins on.
      keyname: A str representing the name of the SSH keypair to login with.
      infrastructure: A str representing the name of the cloud infrastructure
        we're running on.
      is_verbose: A bool indicating if we should print the command we execute to
        enable root login to stdout.
    """
    # First, see if we need to enable root login at all (some VMs have it
    # already enabled).
    try:
      output = cls.ssh(host, keyname, 'ls', is_verbose, user='******')
    except ShellException as exception:
      # Google Compute Engine creates a user with the same name as the currently
      # logged-in user, so log in as that user to enable root login.
      if infrastructure == "gce":
        cls.merge_authorized_keys(host, keyname, getpass.getuser(),
          is_verbose)
        return
      else:
        raise exception

    # Amazon EC2 rejects a root login request and tells the user to log in as
    # the ubuntu user, so do that to enable root login.
    if re.search(cls.LOGIN_AS_UBUNTU_USER, output):
      cls.merge_authorized_keys(host, keyname, 'ubuntu', is_verbose)
    else:
      AppScaleLogger.log("Root login already enabled - not re-enabling it.")
Пример #50
0
  def create_network(self, parameters):
    """ Creates a new network in Google Compute Engine with the specified name.

    Args:
      parameters: A dict with keys for each parameter needed to connect to
        Google Compute Engine, and an additional key indicating the name of the
        network that we should create in GCE.
    Returns:
      The URL corresponding to the name of the network that was created, for use
      with binding this network to one or more firewalls.
    """
    gce_service, credentials = self.open_connection(parameters)
    http = httplib2.Http()
    auth_http = credentials.authorize(http)
    request = gce_service.networks().insert(
      project=parameters[self.PARAM_PROJECT],
      body={
        "name" : parameters[self.PARAM_GROUP],
        "description" : "Network used for AppScale instances",
        "IPv4Range" : "10.240.0.0/16"
      }
    )
    response = request.execute(http=auth_http)
    AppScaleLogger.verbose(str(response), parameters[self.PARAM_VERBOSE])
    self.ensure_operation_succeeds(gce_service, auth_http, response,
      parameters[self.PARAM_PROJECT])
    return response['targetLink']
Пример #51
0
  def copy_app_to_host(cls, app_location, keyname, is_verbose):
    """Copies the given application to a machine running the Login service
    within an AppScale deployment.

    Args:
      app_location: The location on the local filesystem where the application
        can be found.
      keyname: The name of the SSH keypair that uniquely identifies this
        AppScale deployment.
      is_verbose: A bool that indicates if we should print the commands we exec
        to copy the app to the remote host to stdout.

    Returns:
      A str corresponding to the location on the remote filesystem where the
        application was copied to.
    """
    app_id = AppEngineHelper.get_app_id_from_app_config(app_location)

    AppScaleLogger.log("Tarring application")
    rand = str(uuid.uuid4()).replace('-', '')[:8]
    local_tarred_app = "{0}/appscale-app-{1}-{2}.tar.gz".format(tempfile.gettempdir(),
      app_id, rand)
    LocalState.shell("cd '{0}' && COPYFILE_DISABLE=1 tar -czhf {1} --exclude='*.pyc' *".format(
      app_location, local_tarred_app), is_verbose)

    AppScaleLogger.log("Copying over application")
    remote_app_tar = "{0}/{1}.tar.gz".format(cls.REMOTE_APP_DIR, app_id)
    cls.scp(LocalState.get_login_host(keyname), keyname, local_tarred_app,
      remote_app_tar, is_verbose)

    os.remove(local_tarred_app)
    return remote_app_tar
Пример #52
0
  def add_access_config(self, parameters, instance_id, static_ip):
    """ Instructs Google Compute Engine to use the given IP address as the
    public IP for the named instance.

    This assumes that there is no existing public IP address for the named
    instance. If this is not the case, callers should use delete_access_config
    first to remove it.

    Args:
      parameters: A dict with keys for each parameter needed to connect to
        Google Compute Engine, and an additional key mapping to a list of
        instance names that should be deleted.
      instance_id: A str naming the running instance that the new public IP
        address should be added to.
      static_ip: A str naming the already allocated static IP address that
        will be used for the named instance.
    """
    gce_service, credentials = self.open_connection(parameters)
    http = httplib2.Http()
    auth_http = credentials.authorize(http)
    request = gce_service.instances().addAccessConfig(
      project=parameters[self.PARAM_PROJECT],
      instance=instance_id,
      networkInterface="nic0",
      zone=parameters[self.PARAM_ZONE],
      body={
        "kind": "compute#accessConfig",
        "type" : "ONE_TO_ONE_NAT",
        "name" : "External NAT",
        "natIP" : static_ip
      }
    )
    response = request.execute(http=auth_http)
    AppScaleLogger.verbose(str(response), parameters[self.PARAM_VERBOSE])
Пример #53
0
  def setUp(self):
    # mock out logging, since it clutters out test output
    flexmock(AppScaleLogger)
    AppScaleLogger.should_receive('log').and_return()

    # next, pretend our ec2 credentials are properly set
    for credential in EC2Agent.REQUIRED_CREDENTIALS:
      os.environ[credential] = "baz"

    # finally, pretend that our ec2 image to use exists
    fake_ec2 = flexmock(name="fake_ec2")
    fake_ec2.should_receive('get_image').with_args('ami-ABCDEFG') \
      .and_return()
    flexmock(boto)
    boto.should_receive('connect_ec2').with_args('baz', 'baz').and_return(fake_ec2)

    # add in some instance variables so that we don't have
    # a lot IP addresses everywhere
    self.blank_input_yaml = None
    self.default_options = {
      'table' : 'cassandra'
    }
    self.ip_1 = '192.168.1.1'
    self.ip_2 = '192.168.1.2'
    self.ip_3 = '192.168.1.3'
    self.ip_4 = '192.168.1.4'
    self.ip_5 = '192.168.1.5'
    self.ip_6 = '192.168.1.6'
    self.ip_7 = '192.168.1.7'
    self.ip_8 = '192.168.1.8'
Пример #54
0
  def does_address_exist(self, parameters):
    """ Queries Google Compute Engine to see if the specified static IP address
    exists for this user.

    Args:
      parameters: A dict with keys for each parameter needed to connect to
        Google Compute Engine, and an additional key indicating the name of the
        static IP address that we should check for existence.
    Returns:
      True if the named address exists, and False otherwise.
    """
    gce_service, credentials = self.open_connection(parameters)
    http = httplib2.Http()
    auth_http = credentials.authorize(http)
    request = gce_service.addresses().list(
      project=parameters[self.PARAM_PROJECT],
      filter="address eq {0}".format(parameters[self.PARAM_STATIC_IP]),
      region=parameters[self.PARAM_REGION]
    )
    response = request.execute(http=auth_http)
    AppScaleLogger.verbose(str(response), parameters[self.PARAM_VERBOSE])

    if 'items' in response:
      return True
    else:
      return False
Пример #55
0
 def confirm_or_abort(cls, message):
     AppScaleLogger.warn(message)
     confirm = raw_input("Are you sure you want to do this? (Y/N) ")
     if confirm.lower() == 'y' or confirm.lower() == 'yes':
         return
     else:
         raise AppScaleException('AppScale termination was cancelled.')