def test_copy_deployment_credentials_in_cloud(self): options = flexmock( keyname='key1', infrastructure='ec2', verbose=True, ) local_state = flexmock(LocalState) remote_helper = flexmock(RemoteHelper) local_state.should_receive('get_secret_key_location').and_return() local_state.should_receive('get_key_path_from_name').and_return() local_state.should_receive('get_certificate_location').and_return() local_state.should_receive('get_private_key_location').and_return() remote_helper.should_receive('scp').and_return() local_state.should_receive('generate_ssl_cert').and_return() popen_object = flexmock(communicate=lambda: ['hash_id']) flexmock(subprocess).should_receive('Popen').and_return(popen_object) remote_helper.should_receive('ssh').and_return() flexmock(AppScaleLogger).should_receive('log').and_return() RemoteHelper.copy_deployment_credentials('public1', options) flexmock(GCEAgent).should_receive('get_secrets_type').\ and_return(CredentialTypes.OAUTH) flexmock(os.path).should_receive('exists').and_return(True) options = flexmock( keyname='key1', infrastructure='gce', verbose=True, ) local_state.should_receive('get_oauth2_storage_location').and_return() RemoteHelper.copy_deployment_credentials('public1', options)
def test_start_remote_appcontroller(self): # mock out removing the old json file local_state = flexmock(LocalState) local_state.should_receive('shell')\ .with_args(re.compile('^ssh'),False,5,stdin=re.compile('rm -rf'))\ .and_return() # assume we started monit on public1 fine local_state.should_receive('shell')\ .with_args(re.compile('^ssh'), False, 5, stdin=re.compile('monit'))\ .and_return() # and assume we started the AppController on public1 fine local_state.should_receive('shell').with_args( re.compile('^ssh'), False, 5, stdin='service appscale-controller start') # finally, assume the appcontroller comes up after a few tries # assume that ssh comes up on the third attempt fake_socket = flexmock(name='fake_socket') fake_socket.should_receive('connect').with_args(('public1', AppControllerClient.PORT)).and_raise(Exception) \ .and_raise(Exception).and_return(None) socket.should_receive('socket').and_return(fake_socket) # Mock out additional remote calls. local_state.should_receive('shell').with_args('ssh -i /root/.appscale/bookey.key -o LogLevel=quiet -o NumberOfPasswordPrompts=0 -o StrictHostkeyChecking=no -o UserKnownHostsFile=/dev/null root@public1 ', False, 5, stdin='cp /root/appscale/AppController/scripts/appcontroller /etc/init.d/').and_return() local_state.should_receive('shell').with_args('ssh -i /root/.appscale/bookey.key -o LogLevel=quiet -o NumberOfPasswordPrompts=0 -o StrictHostkeyChecking=no -o UserKnownHostsFile=/dev/null root@public1 ', False, 5, stdin='chmod +x /etc/init.d/appcontroller').and_return() local_state.should_receive('shell').with_args('ssh -i /root/.appscale/boobazblargfoo.key -o LogLevel=quiet -o NumberOfPasswordPrompts=0 -o StrictHostkeyChecking=no -o UserKnownHostsFile=/dev/null root@elastic-ip ', False, 5, stdin='chmod +x /etc/init.d/appcontroller').and_return() RemoteHelper.start_remote_appcontroller('public1', 'bookey', False)
def setup_appscale_compatibility_mocks(self): # Assume the config directory exists. self.local_state.should_receive('shell').with_args(re.compile('ssh'), False, 5, stdin=re.compile(RemoteHelper.CONFIG_DIR)).and_return() flexmock(RemoteHelper) RemoteHelper.should_receive('get_host_appscale_version').\ and_return(APPSCALE_VERSION) # Assume we are using a supported database. db_file = '{}/{}/{}'.\ format(RemoteHelper.CONFIG_DIR, APPSCALE_VERSION, 'cassandra') self.local_state.should_receive('shell').with_args(re.compile('ssh'), False, 5, stdin=re.compile(db_file))
def test_rsync_files_from_dir_that_does_exist(self): # if the user specifies that we should copy from a directory that does # exist, and has all the right directories in it, we should succeed flexmock(os.path) os.path.should_receive('exists').with_args('/tmp/booscale-local').\ and_return(True) # assume the rsyncs succeed local_state = flexmock(LocalState) local_state.should_receive('shell')\ .with_args(re.compile('^rsync'),False)\ .and_return().ordered() RemoteHelper.rsync_files('public1', 'booscale', '/tmp/booscale-local', False)
def test_start_all_nodes_reattach(self): self.node_layout = NodeLayout(self.reattach_options) self.assertNotEqual([], self.node_layout.nodes) fake_agent = FakeAgent() flexmock(factory.InfrastructureAgentFactory). \ should_receive('create_agent'). \ with_args('euca'). \ and_return(fake_agent) LocalState.should_receive('get_host_with_role').and_return(IP_1) LocalState.should_receive('get_local_nodes_info') \ .and_return(self.reattach_node_info) RemoteHelper.start_all_nodes(self.reattach_options, self.node_layout)
def setup_appscale_compatibility_mocks(self): # Assume the config directory exists. self.local_state.should_receive('shell').with_args( re.compile('ssh'), False, 5, stdin=re.compile(RemoteHelper.CONFIG_DIR)).and_return() flexmock(RemoteHelper) RemoteHelper.should_receive('get_host_appscale_version').\ and_return(APPSCALE_VERSION) # Assume we are using a supported database. db_file = '{}/{}/{}'.\ format(RemoteHelper.CONFIG_DIR, APPSCALE_VERSION, 'cassandra') self.local_state.should_receive('shell').with_args( re.compile('ssh'), False, 5, stdin=re.compile(db_file))
def test_create_user_accounts(self): # mock out reading the secret key builtins = flexmock(sys.modules['__builtin__']) builtins.should_call('open') # set the fall-through secret_key_location = LocalState.LOCAL_APPSCALE_PATH + "bookey.secret" fake_secret = flexmock(name="fake_secret") fake_secret.should_receive('read').and_return('the secret') builtins.should_receive('open').with_args(secret_key_location, 'r') \ .and_return(fake_secret) # mock out reading the locations.json file, and slip in our own json flexmock(os.path) os.path.should_call('exists') # set the fall-through os.path.should_receive('exists').with_args( LocalState.get_locations_json_location('bookey')).and_return(True) fake_nodes_json = flexmock(name="fake_nodes_json") fake_nodes_json.should_receive('read').and_return( json.dumps({"node_info": [{ "public_ip": "public1", "private_ip": "private1", "roles": ["shadow"] }]})) builtins.should_receive('open').with_args( LocalState.get_locations_json_location('bookey'), 'r') \ .and_return(fake_nodes_json) # Mock out SOAP interactions with the AppController. fake_appcontroller = flexmock(name="fake_appcontroller") fake_appcontroller.should_receive('does_user_exist').with_args('*****@*****.**', 'the secret').and_return('false') fake_appcontroller.should_receive('create_user').with_args('*****@*****.**', str, 'xmpp_user', 'the secret').and_return('true') fake_appcontroller.should_receive('does_user_exist').with_args('boo@public1', 'the secret').and_return('false') fake_appcontroller.should_receive('create_user').with_args('boo@public1', str, 'xmpp_user', 'the secret').and_return('true') fake_appcontroller.should_receive('get_property').\ with_args('login', 'the secret').and_return('{"login":"******"}') flexmock(SOAPpy) SOAPpy.should_receive('SOAPProxy').with_args('https://*****:*****@foo.goo', 'password', 'public1', 'bookey')
def test_create_user_accounts(self): # mock out reading the secret key builtins = flexmock(sys.modules['__builtin__']) builtins.should_call('open') # set the fall-through secret_key_location = LocalState.LOCAL_APPSCALE_PATH + "bookey.secret" fake_secret = flexmock(name="fake_secret") fake_secret.should_receive('read').and_return('the secret') builtins.should_receive('open').with_args(secret_key_location, 'r') \ .and_return(fake_secret) # mock out reading the locations.json file, and slip in our own json flexmock(os.path) os.path.should_call('exists') # set the fall-through os.path.should_receive('exists').with_args( LocalState.get_locations_json_location('bookey')).and_return(True) fake_nodes_json = flexmock(name="fake_nodes_json") fake_nodes_json.should_receive('read').and_return( json.dumps({ "node_info": [{ "public_ip": "public1", "private_ip": "private1", "jobs": ["shadow", "login"] }] })) builtins.should_receive('open').with_args( LocalState.get_locations_json_location('bookey'), 'r') \ .and_return(fake_nodes_json) # Mock out SOAP interactions with the AppController. fake_appcontroller = flexmock(name="fake_appcontroller") fake_appcontroller.should_receive('does_user_exist').with_args( '*****@*****.**', 'the secret').and_return('false') fake_appcontroller.should_receive('create_user').with_args( '*****@*****.**', str, 'xmpp_user', 'the secret').and_return('true') fake_appcontroller.should_receive('does_user_exist').with_args( 'boo@public1', 'the secret').and_return('false') fake_appcontroller.should_receive('create_user').with_args( 'boo@public1', str, 'xmpp_user', 'the secret').and_return('true') flexmock(SOAPpy) SOAPpy.should_receive('SOAPProxy').with_args('https://*****:*****@foo.goo', 'password', 'public1', 'bookey')
def test_start_remote_appcontroller(self): local_state = flexmock(LocalState) # and assume we started the AppController on public1 fine local_state.should_receive('shell').with_args( re.compile('^ssh'), False, 5, stdin='systemctl start appscale-controller') # finally, assume the appcontroller comes up after a few tries # assume that ssh comes up on the third attempt fake_socket = flexmock(name='fake_socket') fake_socket.should_receive('connect').with_args(('public1', AppControllerClient.PORT)).and_raise(Exception) \ .and_raise(Exception).and_return(None) socket.should_receive('socket').and_return(fake_socket) RemoteHelper.start_remote_appcontroller('public1', 'bookey', False)
def test_wait_for_machines_to_finish_loading(self): # mock out reading the secret key builtins = flexmock(sys.modules['__builtin__']) builtins.should_call('open') # set the fall-through secret_key_location = LocalState.LOCAL_APPSCALE_PATH + "bookey.secret" fake_secret = flexmock(name="fake_secret") fake_secret.should_receive('read').and_return('the secret') builtins.should_receive('open').with_args(secret_key_location, 'r') \ .and_return(fake_secret) # mock out getting all the ips in the deployment from the head node fake_soap = flexmock(name='fake_soap') fake_soap.should_receive('get_all_public_ips').with_args('the secret') \ .and_return(json.dumps(['public1', 'public2'])) role_info = [ { 'public_ip' : 'public1', 'private_ip' : 'private1', 'jobs' : ['shadow', 'db_master'] }, { 'public_ip' : 'public2', 'private_ip' : 'private2', 'jobs' : ['appengine'] } ] fake_soap.should_receive('get_role_info').with_args('the secret') \ .and_return(json.dumps(role_info)) # also, let's say that our machines aren't running the first time we ask, # but that they are the second time fake_soap.should_receive('is_done_initializing').with_args('the secret') \ .and_return(False).and_return(True) flexmock(SOAPpy) SOAPpy.should_receive('SOAPProxy').with_args('https://public1:17443') \ .and_return(fake_soap) SOAPpy.should_receive('SOAPProxy').with_args('https://public2:17443') \ .and_return(fake_soap) RemoteHelper.wait_for_machines_to_finish_loading('public1', 'bookey')
def can_ssh_to_ip(self, ip, keyname, is_verbose): """ Attempts to SSH into the machine located at the given IP address with the given SSH key. Args: ip: The IP address to attempt to SSH into. keyname: The name of the SSH key that uniquely identifies this AppScale deployment. is_verbose: A bool that indicates if we should print the SSH command we execute to stdout. Returns: A bool that indicates whether or not the given SSH key can log in without a password to the given machine. """ try: RemoteHelper.ssh(ip, keyname, 'ls', is_verbose, user='******') return True except ShellException: return False
def test_copy_local_metadata(self): # Assume the locations files were copied successfully. local_state = flexmock(LocalState) locations_yaml = '{}/locations-bookey.yaml'.\ format(RemoteHelper.CONFIG_DIR) local_state.should_receive('shell').with_args( re.compile('^scp .*{}'.format(locations_yaml)), False, 5) locations_json = '{}/locations-bookey.json'.\ format(RemoteHelper.CONFIG_DIR) local_state.should_receive('shell').with_args( re.compile('^scp .*{}'.format(locations_json)), False, 5) local_state.should_receive('shell').with_args( re.compile('^scp .*/root/.appscale/locations-bookey.json'), False, 5) # Assume the secret file was copied successfully. local_state.should_receive('shell').with_args( re.compile('^scp .*bookey.secret'), False, 5) RemoteHelper.copy_local_metadata('public1', 'bookey', False)
def relocate_app(cls, options): """Instructs AppScale to move the named application to a different port. Args: options: A Namespace that has fields for each parameter that can be passed in via the command-line interface. Raises: AppScaleException: If the named application isn't running in this AppScale cloud, if the destination port is in use by a different application, or if the AppController rejects the request to relocate the application (in which case it includes the reason why the rejection occurred). """ load_balancer_ip = LocalState.get_host_with_role( options.keyname, 'load_balancer') acc = AppControllerClient( load_balancer_ip, LocalState.get_secret_key(options.keyname)) version_key = '_'.join([options.appname, DEFAULT_SERVICE, DEFAULT_VERSION]) app_info_map = acc.get_app_info_map() if version_key not in app_info_map: raise AppScaleException("The given application, {0}, is not currently " \ "running in this AppScale cloud, so we can't move it to a different " \ "port.".format(options.appname)) try: login_host = acc.get_property('login')['login'] except KeyError: raise AppControllerException('login property not found') acc.relocate_version(version_key, options.http_port, options.https_port) AppScaleLogger.success( 'Successfully issued request to move {0} to ports {1} and {2}'.format( options.appname, options.http_port, options.https_port)) RemoteHelper.sleep_until_port_is_open(login_host, options.http_port) AppScaleLogger.success( 'Your app serves unencrypted traffic at: http://{0}:{1}'.format( login_host, options.http_port)) AppScaleLogger.success( 'Your app serves encrypted traffic at: https://{0}:{1}'.format( login_host, options.https_port))
def add_keypair(cls, options): """Sets up passwordless SSH login to the machines used in a virtualized cluster deployment. Args: options: A Namespace that has fields for each parameter that can be passed in via the command-line interface. Raises: AppScaleException: If any of the machines named in the ips_layout are not running, or do not have the SSH daemon running. """ LocalState.require_ssh_commands(options.auto) LocalState.make_appscale_directory() path = LocalState.LOCAL_APPSCALE_PATH + options.keyname if options.add_to_existing: private_key = path else: _, private_key = LocalState.generate_rsa_key(options.keyname) if options.auto: if 'root_password' in options: AppScaleLogger.log("Using the provided root password to log into " + \ "your VMs.") password = options.root_password else: AppScaleLogger.log("Please enter the password for the root user on" + \ " your VMs:") password = getpass.getpass() node_layout = NodeLayout(options) all_ips = [node.public_ip for node in node_layout.nodes] for ip in all_ips: # first, make sure ssh is actually running on the host machine if not RemoteHelper.is_port_open(ip, RemoteHelper.SSH_PORT): raise AppScaleException("SSH does not appear to be running at {0}. " \ "Is the machine at {0} up and running? Make sure your IPs are " \ "correct!".format(ip)) # next, set up passwordless ssh AppScaleLogger.log("Executing ssh-copy-id for host: {0}".format(ip)) if options.auto: LocalState.shell("{0} root@{1} {2} {3}".format(cls.EXPECT_SCRIPT, ip, private_key, password)) else: LocalState.shell("ssh-copy-id -i {0} root@{1}".format(private_key, ip)) AppScaleLogger.success("Generated a new SSH key for this deployment " + \ "at {0}".format(private_key))
def test_start_head_node(self): self.options = flexmock(infrastructure='public cloud', group='group', machine='vm image', instance_type='instance type', keyname='keyname', table='cassandra', verbose=False, test=False, use_spot_instances=False, zone='zone', static_ip=None, replication=None, appengine=None, autoscale=None, user_commands=[], flower_password='', max_memory='X', ips=ONE_NODE_CLOUD) self.node_layout = NodeLayout(self.options) flexmock(LocalState).\ should_receive("generate_secret_key").\ with_args(self.options.keyname).\ and_return('some secret key') flexmock(LocalState).\ should_receive("get_key_path_from_name").\ with_args(self.options.keyname).\ and_return('some key path') flexmock(NodeLayout).should_receive('head_node').\ and_return(Node('some IP', 'cloud')) fake_agent = FakeAgent() flexmock(factory.InfrastructureAgentFactory).\ should_receive('create_agent').\ with_args('public cloud').\ and_return(fake_agent) self.additional_params = {} deployment_params = {} flexmock(LocalState).\ should_receive('generate_deployment_params').\ with_args(self.options, self.node_layout, self.additional_params).\ and_return(deployment_params) flexmock(AppScaleLogger).should_receive('log').and_return() flexmock(AppScaleLogger).should_receive('remote_log_tools_state').\ and_return() flexmock(time).should_receive('sleep').and_return() flexmock(RemoteHelper).\ should_receive('copy_deployment_credentials').\ with_args('some IP', self.options).\ and_return() flexmock(RemoteHelper).\ should_receive('run_user_commands').\ with_args('some IP', self.options.user_commands, self.options.keyname, self.options.verbose).\ and_return() flexmock(RemoteHelper).\ should_receive('start_remote_appcontroller').\ with_args('some IP', self.options.keyname, self.options.verbose).\ and_return() layout = {} flexmock(NodeLayout).should_receive('to_list').and_return(layout) flexmock(AppControllerClient).\ should_receive('set_parameters').\ with_args(layout, deployment_params).\ and_return() RemoteHelper.start_head_node(self.options, 'an ID', self.node_layout)
def test_appscale_in_one_node_virt_deployment_with_login_override(self): # let's say that appscale isn't already running self.local_state.should_receive('ensure_appscale_isnt_running').and_return() self.local_state.should_receive('make_appscale_directory').and_return() self.local_state.should_receive('update_local_metadata').and_return() self.local_state.should_receive('get_local_nodes_info').and_return(json.loads( json.dumps([{ "public_ip" : "1.2.3.4", "private_ip" : "1.2.3.4", "jobs" : ["shadow", "login"] }]))) self.local_state.should_receive('get_secret_key').and_return("fookey") flexmock(RemoteHelper) RemoteHelper.should_receive('enable_root_ssh').and_return() RemoteHelper.should_receive('ensure_machine_is_compatible')\ .and_return() RemoteHelper.should_receive('start_head_node')\ .and_return(('1.2.3.4','i-ABCDEFG')) RemoteHelper.should_receive('sleep_until_port_is_open').and_return() RemoteHelper.should_receive('copy_local_metadata').and_return() RemoteHelper.should_receive('create_user_accounts').and_return() RemoteHelper.should_receive('wait_for_machines_to_finish_loading')\ .and_return() RemoteHelper.should_receive('copy_deployment_credentials') flexmock(AppControllerClient) AppControllerClient.should_receive('does_user_exist').and_return(True) AppControllerClient.should_receive('is_initialized').and_return(True) AppControllerClient.should_receive('set_admin_role').and_return() # don't use a 192.168.X.Y IP here, since sometimes we set our virtual # machines to boot with those addresses (and that can mess up our tests). ips_layout = yaml.safe_load(""" master : 1.2.3.4 database: 1.2.3.4 zookeeper: 1.2.3.4 appengine: 1.2.3.4 """) argv = [ "--ips_layout", base64.b64encode(yaml.dump(ips_layout)), "--keyname", self.keyname, "--test", "--login_host", "www.booscale.com" ] options = ParseArgs(argv, self.function).args AppScaleTools.run_instances(options)
def terminate_instances(cls, options): """Stops all services running in an AppScale deployment, and in cloud deployments, also powers off the instances previously spawned. Raises: AppScaleException: If AppScale is not running, and thus can't be terminated. """ try: infrastructure = LocalState.get_infrastructure(options.keyname) except IOError: raise AppScaleException("Cannot find AppScale's configuration for keyname {0}". format(options.keyname)) if infrastructure == "xen" and options.terminate: raise AppScaleException("Terminate option is invalid for cluster mode.") if infrastructure == "xen" or not options.terminate: # We are in cluster mode: let's check if AppScale is running. if not os.path.exists(LocalState.get_secret_key_location(options.keyname)): raise AppScaleException("AppScale is not running with the keyname {0}". format(options.keyname)) # Stop gracefully the AppScale deployment. try: RemoteHelper.terminate_virtualized_cluster(options.keyname, options.clean) except (IOError, AppScaleException, AppControllerException, BadConfigurationException) as e: if not (infrastructure in InfrastructureAgentFactory.VALID_AGENTS and options.terminate): raise if options.test: AppScaleLogger.warn(e) else: AppScaleLogger.verbose(e) if isinstance(e, AppControllerException): response = raw_input( 'AppScale may not have shut down properly, are you sure you want ' 'to continue terminating? (y/N) ') else: response = raw_input( 'AppScale could not find the configuration files for this ' 'deployment, are you sure you want to continue terminating? ' '(y/N) ') if response.lower() not in ['y', 'yes']: raise AppScaleException("Cancelled cloud termination.") # And if we are on a cloud infrastructure, terminate instances if # asked. if (infrastructure in InfrastructureAgentFactory.VALID_AGENTS and options.terminate): RemoteHelper.terminate_cloud_infrastructure(options.keyname) elif infrastructure in InfrastructureAgentFactory.VALID_AGENTS and not \ options.terminate: AppScaleLogger.log("AppScale did not terminate any of your cloud " "instances, to terminate them run 'appscale " "down --terminate'") if options.clean: LocalState.clean_local_metadata(keyname=options.keyname)
def gather_logs(cls, options): """Collects logs from each machine in the currently running AppScale deployment. Args: options: A Namespace that has fields for each parameter that can be passed in via the command-line interface. """ location = os.path.abspath(options.location) # First, make sure that the place we want to store logs doesn't # already exist. if os.path.exists(location): raise AppScaleException("Can't gather logs, as the location you " + \ "specified, {}, already exists.".format(location)) load_balancer_ip = LocalState.get_host_with_role( options.keyname, 'load_balancer') secret = LocalState.get_secret_key(options.keyname) acc = AppControllerClient(load_balancer_ip, secret) try: all_ips = acc.get_all_public_ips() except socket.error: # Occurs when the AppController has failed. AppScaleLogger.warn("Couldn't get an up-to-date listing of the " + \ "machines in this AppScale deployment. Using our locally cached " + \ "info instead.") all_ips = LocalState.get_all_public_ips(options.keyname) # Get information about roles and public IPs # for creating navigation symlinks in gathered logs try: nodes_info = acc.get_role_info() except socket.error: # Occurs when the AppController has failed. AppScaleLogger.warn("Couldn't get an up-to-date nodes info. " "Using our locally cached info instead.") nodes_info = LocalState.get_local_nodes_info(options.keyname) nodes_dict = {node['public_ip']: node for node in nodes_info} # do the mkdir after we get the secret key, so that a bad keyname will # cause the tool to crash and not create this directory os.mkdir(location) # make dir for private IP navigation links private_ips_dir = os.path.join(location, 'symlinks', 'private-ips') utils.mkdir(private_ips_dir) # The log paths that we collect logs from. log_paths = [ {'remote': '/opt/cassandra/cassandra/logs/*', 'local': 'cassandra'}, {'remote': '/var/log/appscale'}, {'remote': '/var/log/haproxy.log*'}, {'remote': '/var/log/kern.log*'}, {'remote': '/var/log/nginx'}, {'remote': '/var/log/rabbitmq/*', 'local': 'rabbitmq'}, {'remote': '/var/log/syslog*'}, {'remote': '/var/log/zookeeper'} ] failures = False for public_ip in all_ips: # Get the logs from each node, and store them in our local directory local_dir = os.path.join(location, public_ip) utils.mkdir(local_dir) local_link = os.path.join('..', '..', public_ip) # Create symlinks for easier navigation in gathered logs node_info = nodes_dict.get(public_ip) if node_info: private_ip_dir = os.path.join(private_ips_dir, node_info["private_ip"]) os.symlink(local_link, private_ip_dir) for role in node_info['roles']: role_dir = os.path.join(location, 'symlinks', role) utils.mkdir(role_dir) os.symlink(local_link, os.path.join(role_dir, public_ip)) for log_path in log_paths: sub_dir = local_dir if 'local' in log_path: sub_dir = os.path.join(local_dir, log_path['local']) utils.mkdir(sub_dir) try: RemoteHelper.scp_remote_to_local( public_ip, options.keyname, log_path['remote'], sub_dir ) except ShellException as shell_exception: failures = True AppScaleLogger.warn('Unable to collect logs from {} for host {}'. format(log_path['remote'], public_ip)) AppScaleLogger.verbose( 'Encountered exception: {}'.format(str(shell_exception))) if failures: AppScaleLogger.log("Done copying to {}. There were failures while " "collecting AppScale logs.".format(location)) else: AppScaleLogger.success("Successfully collected all AppScale logs into " "{}".format(location))
def valid_ssh_key(self, config, run_instances_opts): """ Checks if the tools can log into the head node with the current key. Args: config: A dictionary that includes the IPs layout (which itself is a dict mapping role names to IPs) and, optionally, the keyname to use. run_instances_opts: The arguments parsed from the appscale-run-instances command. Returns: A bool indicating whether or not the specified keyname can be used to log into the head node. Raises: BadConfigurationException: If the IPs layout was not a dictionary. """ keyname = config['keyname'] verbose = config.get('verbose', False) if not isinstance(config['ips_layout'], dict) and \ not isinstance(config['ips_layout'], list): raise BadConfigurationException( 'ips_layout should be a dictionary or list. Please fix it and try ' 'again.') ssh_key_location = self.APPSCALE_DIRECTORY + keyname + ".key" if not os.path.exists(ssh_key_location): return False try: all_ips = LocalState.get_all_public_ips(keyname) except BadConfigurationException: # If this is an upgrade from 3.1.0, there may not be a locations JSON. all_ips = self.get_ips_from_options(run_instances_opts.ips) # If a login node is defined, use that to communicate with other nodes. node_layout = NodeLayout(run_instances_opts) head_node = node_layout.head_node() if head_node is not None: remote_key = '{}/ssh.key'.format(RemoteHelper.CONFIG_DIR) try: RemoteHelper.scp(head_node.public_ip, keyname, ssh_key_location, remote_key, verbose) except ShellException: return False for ip in all_ips: ssh_to_ip = 'ssh -i {key} -o StrictHostkeyChecking=no root@{ip} true'\ .format(key=remote_key, ip=ip) try: RemoteHelper.ssh(head_node.public_ip, keyname, ssh_to_ip, verbose, user='******') except ShellException: return False return True for ip in all_ips: if not self.can_ssh_to_ip(ip, keyname, verbose): return False return True
def run_instances(cls, options): """Starts a new AppScale deployment with the parameters given. Args: options: A Namespace that has fields for each parameter that can be passed in via the command-line interface. Raises: AppControllerException: If the AppController on the head node crashes. When this occurs, the message in the exception contains the reason why the AppController crashed. BadConfigurationException: If the user passes in options that are not sufficient to start an AppScale deployment (e.g., running on EC2 but not specifying the AMI to use), or if the user provides us contradictory options (e.g., running on EC2 but not specifying EC2 credentials). """ LocalState.make_appscale_directory() LocalState.ensure_appscale_isnt_running(options.keyname, options.force) node_layout = NodeLayout(options) if options.infrastructure: if (not options.test and not options.force and not (options.disks or node_layout.are_disks_used())): LocalState.ensure_user_wants_to_run_without_disks() reduced_version = '.'.join(x for x in APPSCALE_VERSION.split('.')[:2]) AppScaleLogger.log("Starting AppScale " + reduced_version) my_id = str(uuid.uuid4()) AppScaleLogger.remote_log_tools_state(options, my_id, "started", APPSCALE_VERSION) head_node = node_layout.head_node() # Start VMs in cloud via cloud agent. if options.infrastructure: node_layout = RemoteHelper.start_all_nodes(options, node_layout) # Enables root logins and SSH access on the head node. RemoteHelper.enable_root_ssh(options, head_node.public_ip) AppScaleLogger.verbose("Node Layout: {}".format(node_layout.to_list())) # Ensure all nodes are compatible. RemoteHelper.ensure_machine_is_compatible( head_node.public_ip, options.keyname) # Use rsync to move custom code into the deployment. if options.rsync_source: AppScaleLogger.log("Copying over local copy of AppScale from {0}". format(options.rsync_source)) RemoteHelper.rsync_files(head_node.public_ip, options.keyname, options.rsync_source) # Start services on head node. RemoteHelper.start_head_node(options, my_id, node_layout) # Write deployment metadata to disk (facilitates SSH operations, etc.) db_master = node_layout.db_master().private_ip head_node = node_layout.head_node().public_ip LocalState.update_local_metadata(options, db_master, head_node) # Copy the locations.json to the head node RemoteHelper.copy_local_metadata(node_layout.head_node().public_ip, options.keyname) # Wait for services on head node to start. secret_key = LocalState.get_secret_key(options.keyname) acc = AppControllerClient(head_node, secret_key) try: while not acc.is_initialized(): AppScaleLogger.log('Waiting for head node to initialize...') # This can take some time in particular the first time around, since # we will have to initialize the database. time.sleep(cls.SLEEP_TIME*3) except socket.error as socket_error: AppScaleLogger.warn('Unable to initialize AppController: {}'. format(socket_error.message)) message = RemoteHelper.collect_appcontroller_crashlog( head_node, options.keyname) raise AppControllerException(message) # Set up admin account. try: # We don't need to have any exception information here: we do expect # some anyway while the UserAppServer is coming up. acc.does_user_exist("non-existent-user", True) except Exception: AppScaleLogger.log('UserAppServer not ready yet. Retrying ...') time.sleep(cls.SLEEP_TIME) if options.admin_user and options.admin_pass: AppScaleLogger.log("Using the provided admin username/password") username, password = options.admin_user, options.admin_pass elif options.test: AppScaleLogger.log("Using default admin username/password") username, password = LocalState.DEFAULT_USER, LocalState.DEFAULT_PASSWORD else: username, password = LocalState.get_credentials() RemoteHelper.create_user_accounts(username, password, head_node, options.keyname) acc.set_admin_role(username, 'true', cls.ADMIN_CAPABILITIES) # Wait for machines to finish loading and AppScale Dashboard to be deployed. RemoteHelper.wait_for_machines_to_finish_loading(head_node, options.keyname) try: login_host = acc.get_property('login')['login'] except KeyError: raise AppControllerException('login property not found') RemoteHelper.sleep_until_port_is_open( login_host, RemoteHelper.APP_DASHBOARD_PORT) AppScaleLogger.success("AppScale successfully started!") AppScaleLogger.success( 'View status information about your AppScale deployment at ' 'http://{}:{}'.format(login_host, RemoteHelper.APP_DASHBOARD_PORT)) AppScaleLogger.remote_log_tools_state(options, my_id, "finished", APPSCALE_VERSION)
def valid_ssh_key(self, config, run_instances_opts): """ Checks if the tools can log into the head node with the current key. Args: config: A dictionary that includes the IPs layout (which itself is a dict mapping role names to IPs) and, optionally, the keyname to use. run_instances_opts: The arguments parsed from the appscale-run-instances command. Returns: A bool indicating whether or not the specified keyname can be used to log into the head node. Raises: BadConfigurationException: If the IPs layout was not a dictionary. """ keyname = config['keyname'] verbose = config.get('verbose', False) if not isinstance(config['ips_layout'], dict) and \ not isinstance(config['ips_layout'], list): raise BadConfigurationException( 'ips_layout should be a dictionary or list. Please fix it and try ' 'again.') ssh_key_location = self.APPSCALE_DIRECTORY + keyname + ".key" if not os.path.exists(ssh_key_location): return False try: all_ips = LocalState.get_all_public_ips(keyname) except BadConfigurationException: # If this is an upgrade from 3.1.0, there may not be a locations JSON. all_ips = self.get_ips_from_options(run_instances_opts.ips) # If a login node is defined, use that to communicate with other nodes. node_layout = NodeLayout(run_instances_opts) head_node = node_layout.head_node() if head_node is not None: remote_key = '{}/ssh.key'.format(RemoteHelper.CONFIG_DIR) try: RemoteHelper.scp( head_node.public_ip, keyname, ssh_key_location, remote_key, verbose) except ShellException: return False for ip in all_ips: ssh_to_ip = 'ssh -i {key} -o StrictHostkeyChecking=no root@{ip} true'\ .format(key=remote_key, ip=ip) try: RemoteHelper.ssh( head_node.public_ip, keyname, ssh_to_ip, verbose, user='******') except ShellException: return False return True for ip in all_ips: if not self.can_ssh_to_ip(ip, keyname, verbose): return False return True
def upload_app(cls, options): """Uploads the given App Engine application into AppScale. Args: options: A Namespace that has fields for each parameter that can be passed in via the command-line interface. Returns: A tuple containing the host and port where the application is serving traffic from. """ custom_service_yaml = None if cls.TAR_GZ_REGEX.search(options.file): file_location = LocalState.extract_tgz_app_to_dir(options.file) created_dir = True version = Version.from_tar_gz(options.file) elif cls.ZIP_REGEX.search(options.file): file_location = LocalState.extract_zip_app_to_dir(options.file) created_dir = True version = Version.from_zip(options.file) elif os.path.isdir(options.file): file_location = options.file created_dir = False version = Version.from_directory(options.file) elif options.file.endswith('.yaml'): file_location = os.path.dirname(options.file) created_dir = False version = Version.from_yaml_file(options.file) custom_service_yaml = options.file else: raise AppEngineConfigException('{0} is not a tar.gz file, a zip file, ' \ 'or a directory. Please try uploading either a tar.gz file, a zip ' \ 'file, or a directory.'.format(options.file)) if options.project: if version.runtime == 'java': raise BadConfigurationException("AppScale doesn't support --project for" "Java yet. Please specify the application id in appengine-web.xml.") version.project_id = options.project if version.project_id is None: if version.config_type == 'app.yaml': message = 'Specify --project or define "application" in your app.yaml' else: message = 'Define "application" in your appengine-web.xml' raise AppEngineConfigException(message) # Let users know that versions are not supported yet. AppEngineHelper.warn_if_version_defined(version, options.test) AppEngineHelper.validate_app_id(version.project_id) extras = {} if version.runtime == 'go': extras = LocalState.get_extra_go_dependencies(options.file, options.test) if (version.runtime == 'java' and AppEngineHelper.is_sdk_mismatch(file_location)): AppScaleLogger.warn( 'AppScale did not find the correct SDK jar versions in your app. The ' 'current supported SDK version is ' '{}.'.format(AppEngineHelper.SUPPORTED_SDK_VERSION)) head_node_public_ip = LocalState.get_host_with_role( options.keyname, 'shadow') secret_key = LocalState.get_secret_key(options.keyname) admin_client = AdminClient(head_node_public_ip, secret_key) remote_file_path = RemoteHelper.copy_app_to_host( file_location, version.project_id, options.keyname, extras, custom_service_yaml) AppScaleLogger.log( 'Deploying service {} for {}'.format(version.service_id, version.project_id)) operation_id = admin_client.create_version(version, remote_file_path) # now that we've told the AppController to start our app, find out what port # the app is running on and wait for it to start serving AppScaleLogger.log("Please wait for your app to start serving.") deadline = time.time() + cls.MAX_OPERATION_TIME while True: if time.time() > deadline: raise AppScaleException('The deployment operation took too long.') operation = admin_client.get_operation(version.project_id, operation_id) if not operation['done']: time.sleep(1) continue if 'error' in operation: raise AppScaleException(operation['error']['message']) version_url = operation['response']['versionUrl'] break AppScaleLogger.success( 'Your app can be reached at the following URL: {}'.format(version_url)) if created_dir: shutil.rmtree(file_location) match = re.match('http://(.+):(\d+)', version_url) login_host = match.group(1) http_port = int(match.group(2)) return login_host, http_port
def test_start_head_node(self): self.options = flexmock( infrastructure='public cloud', group='group', machine='vm image', instance_type='instance type', keyname='keyname', table='cassandra', verbose=False, test=False, use_spot_instances=False, zone='zone', static_ip=None, replication=None, appengine=None, autoscale=None, user_commands=[], flower_password='', max_memory='X', ) self.node_layout = NodeLayout(self.options) flexmock(LocalState).\ should_receive("generate_secret_key").\ with_args(self.options.keyname).\ and_return('some secret key') flexmock(LocalState).\ should_receive("get_key_path_from_name").\ with_args(self.options.keyname).\ and_return('some key path') flexmock(NodeLayout).should_receive('head_node').\ and_return(SimpleNode('some IP', 'cloud')) fake_agent = FakeAgent() flexmock(factory.InfrastructureAgentFactory).\ should_receive('create_agent').\ with_args('public cloud').\ and_return(fake_agent) self.additional_params = {} deployment_params = {} flexmock(LocalState).\ should_receive('generate_deployment_params').\ with_args(self.options, self.node_layout, self.additional_params).\ and_return(deployment_params) flexmock(AppScaleLogger).should_receive('log').and_return() flexmock(AppScaleLogger).should_receive('remote_log_tools_state').\ and_return() flexmock(time).should_receive('sleep').and_return() flexmock(RemoteHelper).\ should_receive('copy_deployment_credentials').\ with_args('some IP', self.options).\ and_return() flexmock(RemoteHelper).\ should_receive('run_user_commands').\ with_args('some IP', self.options.user_commands, self.options.keyname, self.options.verbose).\ and_return() flexmock(RemoteHelper).\ should_receive('start_remote_appcontroller').\ with_args('some IP', self.options.keyname, self.options.verbose).\ and_return() layout = {} flexmock(NodeLayout).should_receive('to_list').and_return(layout) flexmock(AppControllerClient).\ should_receive('set_parameters').\ with_args(layout, deployment_params).\ and_return() RemoteHelper.start_head_node(self.options, 'an ID', self.node_layout)