def test_start_all_nodes_reattach_changed_locations(self): self.node_layout = NodeLayout(self.reattach_options) fake_agent = FakeAgent() flexmock(factory.InfrastructureAgentFactory). \ should_receive('create_agent'). \ with_args('public cloud'). \ and_return(fake_agent) LocalState.should_receive('get_host_with_role').and_return('0.0.0.1') node_info = [{ "public_ip": "0.0.0.0", "private_ip": "0.0.0.0", "instance_id": "i-APPSCALE1", "roles": ['load_balancer', 'taskqueue', 'shadow', 'taskqueue_master'] }, { "public_ip": "0.0.0.0", "private_ip": "0.0.0.0", "instance_id": "i-APPSCALE2", "roles": ['memcache', 'appengine'] }, { "public_ip": "0.0.0.0", "private_ip": "0.0.0.0", "instance_id": "i-APPSCALE3", "roles": ['zookeeper', "appengine"] }, { "public_ip": "0.0.0.0", "private_ip": "0.0.0.0", "instance_id": "i-APPSCALE4", "roles": ['db_master'] } ] LocalState.should_receive('get_local_nodes_info').and_return(node_info) self.assertRaises(BadConfigurationException)
def _get_stats(keyname, stats_kind, include_lists): """ Returns statistics from Hermes. Args: keyname: A string representing an identifier from AppScaleFile. stats_kind: A string representing a kind of statistics. include_lists: A dict representing desired fields. Returns: A dict of statistics. A dict of failures. """ load_balancer_ip = LocalState.get_host_with_role(keyname, 'load_balancer') secret = LocalState.get_secret_key(keyname=keyname) administration_port = "17441" stats_path = "/stats/cluster/{stats_kind}".format(stats_kind=stats_kind) headers = {'Appscale-Secret': secret} data = {'include_lists': include_lists} url = "https://{ip}:{port}{path}".format(ip=load_balancer_ip, port=administration_port, path=stats_path) try: requests.packages.urllib3.disable_warnings(InsecureRequestWarning) resp = requests.get(url=url, headers=headers, json=data, verify=False) resp.raise_for_status() except requests.HTTPError as err: AppScaleLogger.warn("Failed to get {stats_kind} stats ({err})".format( stats_kind=stats_kind, err=err)) return {}, {} json_body = resp.json() return json_body["stats"], json_body["failures"]
def test_start_all_nodes_reattach_changed_asf(self): self.options = flexmock( infrastructure='public cloud', group='group', machine='vm image', instance_type='instance type', keyname='keyname', table='cassandra', verbose=False, test=False, use_spot_instances=False, zone='zone', static_ip=None, replication=None, appengine=None, autoscale=None, user_commands=[], flower_password='', max_memory='X', ips=THREE_NODE_CLOUD ) self.node_layout = NodeLayout(self.options) fake_agent = FakeAgent() flexmock(factory.InfrastructureAgentFactory). \ should_receive('create_agent'). \ with_args('public cloud'). \ and_return(fake_agent) LocalState.should_receive('get_local_nodes_info')\ .and_return(self.reattach_node_info) self.assertRaises(BadConfigurationException)
def add_instances(cls, options): """Adds additional machines to an AppScale deployment. Args: options: A Namespace that has fields for each parameter that can be passed in via the command-line interface. """ if 'master' in options.ips.keys(): raise BadConfigurationException("Cannot add master nodes to an " + \ "already running AppScale deployment.") # In virtualized cluster deployments, we need to make sure that the user # has already set up SSH keys. if LocalState.get_infrastructure_option(keyname=options.keyname, tag='infrastructure') == "xen": ips_to_check = [] for ip_group in options.ips.values(): ips_to_check.extend(ip_group) for ip in ips_to_check: # throws a ShellException if the SSH key doesn't work RemoteHelper.ssh(ip, options.keyname, "ls") # Finally, find an AppController and send it a message to add # the given nodes with the new roles. AppScaleLogger.log("Sending request to add instances") load_balancer_ip = LocalState.get_host_with_role( options.keyname, 'load_balancer') acc = AppControllerClient(load_balancer_ip, LocalState.get_secret_key( options.keyname)) acc.start_roles_on_nodes(json.dumps(options.ips)) # TODO(cgb): Should we wait for the new instances to come up and get # initialized? AppScaleLogger.success("Successfully sent request to add instances " + \ "to this AppScale deployment.")
def print_cluster_status(cls, options): """ Gets cluster stats and prints it nicely. Args: options: A Namespace that has fields for each parameter that can be passed in via the command-line interface. """ try: load_balancer_ip = LocalState.get_host_with_role( options.keyname, 'load_balancer') acc = AppControllerClient( load_balancer_ip, LocalState.get_secret_key(options.keyname)) all_private_ips = acc.get_all_private_ips() cluster_stats = acc.get_cluster_stats() except (faultType, AppControllerException, BadConfigurationException): AppScaleLogger.warn("AppScale deployment is probably down") raise # Convert cluster stats to useful structures node_stats = { ip: next((n for n in cluster_stats if n["private_ip"] == ip), None) for ip in all_private_ips } apps_dict = next((n["apps"] for n in cluster_stats if n["apps"]), {}) services = [ServiceInfo(key.split('_')[0], key.split('_')[1], app_info) for key, app_info in apps_dict.iteritems()] nodes = [NodeStats(ip, node) for ip, node in node_stats.iteritems() if node] invisible_nodes = [ip for ip, node in node_stats.iteritems() if not node] if options.verbose: AppScaleLogger.log("-"*76) cls._print_nodes_info(nodes, invisible_nodes) cls._print_roles_info(nodes) else: AppScaleLogger.log("-"*76) cls._print_cluster_summary(nodes, invisible_nodes, services) cls._print_services(services) cls._print_status_alerts(nodes) try: login_host = acc.get_property('login')['login'] except KeyError: raise AppControllerException('login property not found') dashboard = next( (service for service in services if service.http == RemoteHelper.APP_DASHBOARD_PORT), None) if dashboard and dashboard.appservers >= 1: AppScaleLogger.success( "\nView more about your AppScale deployment at http://{}:{}/status" .format(login_host, RemoteHelper.APP_DASHBOARD_PORT) ) else: AppScaleLogger.log( "\nAs soon as AppScale Dashboard is started you can visit it at " "http://{0}:{1}/status and see more about your deployment" .format(login_host, RemoteHelper.APP_DASHBOARD_PORT) )
def get_cloud_params(self, keyname): """Searches through the locations.json file with key 'infrastructure_info' to build a dict containing the parameters necessary to interact with Amazon EC2. Args: keyname: The name of the SSH keypair that uniquely identifies this AppScale deployment. """ params = { self.PARAM_CREDENTIALS: {}, self.PARAM_GROUP: LocalState.get_group(keyname), self.PARAM_KEYNAME: keyname } zone = LocalState.get_zone(keyname) if zone: params[self.PARAM_REGION] = zone[:-1] else: params[self.PARAM_REGION] = self.DEFAULT_REGION for credential in self.REQUIRED_CREDENTIALS: cred = LocalState.get_infrastructure_option(tag=credential, keyname=keyname) if not cred: raise AgentConfigurationException("no " + credential) params[self.PARAM_CREDENTIALS][credential] = cred return params
def get_cloud_params(self, keyname): """Searches through the locations.json file with key 'infrastructure_info' to build a dict containing the parameters necessary to interact with Amazon EC2. Args: keyname: The name of the SSH keypair that uniquely identifies this AppScale deployment. """ params = { self.PARAM_CREDENTIALS : {}, self.PARAM_GROUP : LocalState.get_group(keyname), self.PARAM_KEYNAME : keyname } zone = LocalState.get_zone(keyname) if zone: params[self.PARAM_REGION] = zone[:-1] else: params[self.PARAM_REGION] = self.DEFAULT_REGION for credential in self.REQUIRED_CREDENTIALS: if os.environ.get(credential): params[self.PARAM_CREDENTIALS][credential] = os.environ[credential] else: raise AgentConfigurationException("no " + credential) return params
def start_service(cls, options): """Instructs AppScale to start the named service. This is applicable for services using manual scaling. Args: options: A Namespace that has fields for each parameter that can be passed in via the command-line interface. Raises: AppScaleException: If the named service isn't running in this AppScale cloud, or if start is not valid for the service. """ load_balancer_ip = LocalState.get_host_with_role( options.keyname, 'load_balancer') secret = LocalState.get_secret_key(options.keyname) admin_client = AdminClient(load_balancer_ip, secret) version = Version(None, None) version.project_id = options.project_id version.service_id = options.service_id or DEFAULT_SERVICE version.id = DEFAULT_VERSION version.serving_status = 'SERVING' admin_client.patch_version(version, ['servingStatus']) AppScaleLogger.success('Start requested for {}.'.format(options.project_id))
def test_start_all_nodes_reattach_changed_asf(self): self.options = flexmock(infrastructure='public cloud', group='group', machine='vm image', instance_type='instance type', keyname='keyname', table='cassandra', verbose=False, test=False, use_spot_instances=False, zone='zone', static_ip=None, replication=None, appengine=None, autoscale=None, user_commands=[], flower_password='', max_memory='X', ips=THREE_NODE_CLOUD) self.node_layout = NodeLayout(self.options) fake_agent = FakeAgent() flexmock(factory.InfrastructureAgentFactory). \ should_receive('create_agent'). \ with_args('public cloud'). \ and_return(fake_agent) LocalState.should_receive('get_login_host').and_return('0.0.0.1') LocalState.should_receive('get_local_nodes_info')\ .and_return(self.reattach_node_info) self.assertRaises(BadConfigurationException)
def create_user(cls, options, is_admin): """Create a new user with the parameters given. Args: options: A Namespace that has fields for each parameter that can be passed in via the command-line interface. is_admin: A flag to indicate if the user to be created is an admin user Raises: AppControllerException: If the AppController on the head node crashes. When this occurs, the message in the exception contains the reason why the AppController crashed. """ secret = LocalState.get_secret_key(options.keyname) load_balancer_ip = LocalState.get_host_with_role( options.keyname, 'load_balancer') username, password = LocalState.get_credentials(is_admin) acc = AppControllerClient(load_balancer_ip, secret) RemoteHelper.create_user_accounts( username, password, load_balancer_ip, options.keyname) try: if is_admin: acc.set_admin_role(username, 'true', cls.ADMIN_CAPABILITIES) except Exception as exception: AppScaleLogger.warn("Could not grant admin privileges to the user for the " + "following reason: {0}".format(str(exception))) sys.exit(1)
def stop_service(cls, options): """Instructs AppScale to stop the named service. This is applicable for services using manual scaling. Args: options: A Namespace that has fields for each parameter that can be passed in via the command-line interface. Raises: AppScaleException: If the named service isn't running in this AppScale cloud, or if stop is not valid for the service. """ if not options.confirm: response = raw_input( 'Are you sure you want to stop this service? (y/N) ') if response.lower() not in ['y', 'yes']: raise AppScaleException("Cancelled service stop.") load_balancer_ip = LocalState.get_host_with_role( options.keyname, 'load_balancer') secret = LocalState.get_secret_key(options.keyname) admin_client = AdminClient(load_balancer_ip, secret) version = Version(None, None) version.project_id = options.project_id version.service_id = options.service_id or DEFAULT_SERVICE version.id = DEFAULT_VERSION version.serving_status = 'STOPPED' admin_client.patch_version(version, ['servingStatus']) AppScaleLogger.success('Stop requested for {}.'.format(options.project_id))
def update_dispatch(cls, source_location, keyname, project_id): """ Updates an application's dispatch routing rules from the configuration file. Args: options: A Namespace that has fields for each parameter that can be passed in via the command-line interface. """ if cls.TAR_GZ_REGEX.search(source_location): fetch_function = utils.config_from_tar_gz version = Version.from_tar_gz(source_location) elif cls.ZIP_REGEX.search(source_location): fetch_function = utils.config_from_zip version = Version.from_zip(source_location) elif os.path.isdir(source_location): fetch_function = utils.config_from_dir version = Version.from_directory(source_location) elif source_location.endswith('.yaml'): fetch_function = utils.config_from_dir version = Version.from_yaml_file(source_location) source_location = os.path.dirname(source_location) else: raise BadConfigurationException( '{} must be a directory, tar.gz, or zip'.format(source_location)) if project_id: version.project_id = project_id dispatch_rules = utils.dispatch_from_yaml(source_location, fetch_function) if dispatch_rules is None: return AppScaleLogger.log('Updating dispatch for {}'.format(version.project_id)) load_balancer_ip = LocalState.get_host_with_role(keyname, 'load_balancer') secret_key = LocalState.get_secret_key(keyname) admin_client = AdminClient(load_balancer_ip, secret_key) operation_id = admin_client.update_dispatch(version.project_id, dispatch_rules) # Check on the operation. AppScaleLogger.log("Please wait for your dispatch to be updated.") deadline = time.time() + cls.MAX_OPERATION_TIME while True: if time.time() > deadline: raise AppScaleException('The operation took too long.') operation = admin_client.get_operation(version.project_id, operation_id) if not operation['done']: time.sleep(1) continue if 'error' in operation: raise AppScaleException(operation['error']['message']) dispatch_rules = operation['response']['dispatchRules'] break AppScaleLogger.verbose( "The following dispatchRules have been applied to your application's " "configuration : {}".format(dispatch_rules)) AppScaleLogger.success('Dispatch has been updated for {}'.format( version.project_id))
def test_start_all_nodes_reattach_changed_locations(self): self.node_layout = NodeLayout(self.reattach_options) fake_agent = FakeAgent() flexmock(factory.InfrastructureAgentFactory). \ should_receive('create_agent'). \ with_args('public cloud'). \ and_return(fake_agent) LocalState.should_receive('get_login_host').and_return('0.0.0.1') node_info = [{ "public_ip": "0.0.0.0", "private_ip": "0.0.0.0", "instance_id": "i-APPSCALE1", "jobs": ['load_balancer', 'taskqueue', 'shadow', 'login', 'taskqueue_master'] }, { "public_ip": "0.0.0.0", "private_ip": "0.0.0.0", "instance_id": "i-APPSCALE2", "jobs": ['memcache', 'appengine'] }, { "public_ip": "0.0.0.0", "private_ip": "0.0.0.0", "instance_id": "i-APPSCALE3", "jobs": ['zookeeper', "appengine"] }, { "public_ip": "0.0.0.0", "private_ip": "0.0.0.0", "instance_id": "i-APPSCALE4", "jobs": ['db_master'] } ] LocalState.should_receive('get_local_nodes_info').and_return(node_info) self.assertRaises(BadConfigurationException)
def test_remove_app_but_app_isnt_running(self): # mock out reading from stdin, and assume the user says 'yes' builtins = flexmock(sys.modules['__builtin__']) builtins.should_receive('raw_input').and_return('yes') # mock out reading the secret key builtins.should_call('open') # set the fall-through app_stats_data = { 'apps': { 'pippo': { 'http': 8080, 'language': 'python27', 'total_reqs': 'no_change', 'appservers': 1, 'https': 4380, 'reqs_enqueued': None } } } secret_key_location = LocalState.get_secret_key_location(self.keyname) fake_secret = flexmock(name="fake_secret") fake_secret.should_receive('read').and_return('the secret') builtins.should_receive('open').with_args(secret_key_location, 'r') \ .and_return(fake_secret) # mock out the SOAP call to the AppController and assume it succeeded fake_appcontroller = flexmock(name='fake_appcontroller') fake_appcontroller.should_receive('status').with_args('the secret') \ .and_return('Database is at public1') fake_appcontroller.should_receive('get_all_stats').with_args( 'the secret').and_return(json.dumps(app_stats_data)) flexmock(SOAPpy) SOAPpy.should_receive('SOAPProxy').with_args('https://public1:17443') \ .and_return(fake_appcontroller) # mock out reading the locations.json file, and slip in our own json flexmock(os.path) os.path.should_call('exists') # set the fall-through os.path.should_receive('exists').with_args( LocalState.get_locations_json_location( self.keyname)).and_return(True) fake_nodes_json = flexmock(name="fake_nodes_json") fake_nodes_json.should_receive('read').and_return( json.dumps({ "node_info": [{ "public_ip": "public1", "private_ip": "private1", "jobs": ["shadow", "login"] }] })) fake_nodes_json.should_receive('write').and_return() builtins.should_receive('open').with_args( LocalState.get_locations_json_location(self.keyname), 'r') \ .and_return(fake_nodes_json) argv = ["--appname", "blargapp", "--keyname", self.keyname] options = ParseArgs(argv, self.function).args self.assertRaises(AppScaleException, AppScaleTools.remove_app, options)
def test_remove_app_and_app_is_running(self): # mock out reading from stdin, and assume the user says 'YES' builtins = flexmock(sys.modules['__builtin__']) builtins.should_receive('raw_input').and_return('YES') # mock out reading the secret key builtins.should_call('open') # set the fall-through secret_key_location = LocalState.get_secret_key_location(self.keyname) fake_secret = flexmock(name="fake_secret") fake_secret.should_receive('read').and_return('the secret') builtins.should_receive('open').with_args(secret_key_location, 'r') \ .and_return(fake_secret) app_stats_data = {'apps': {'blargapp': {'http': 8080, 'language': 'python27', 'total_reqs': 'no_change', 'appservers': 1, 'https': 4380, 'reqs_enqueued': None}}} # mock out the SOAP call to the AppController and assume it succeeded fake_appcontroller = flexmock(name='fake_appcontroller') fake_appcontroller.should_receive('status').with_args('the secret') \ .and_return('Database is at public1') fake_appcontroller.should_receive('stop_app').with_args('blargapp', 'the secret').and_return('OK') fake_appcontroller.should_receive('is_app_running').with_args('blargapp', 'the secret').and_return(True).and_return(True).and_return(False) fake_appcontroller.should_receive('does_app_exist').with_args('blargapp', 'the secret').and_return(True) fake_appcontroller.should_receive('get_all_stats').with_args( 'the secret').and_return(json.dumps(app_stats_data)) flexmock(SOAPpy) SOAPpy.should_receive('SOAPProxy').with_args('https://public1:17443') \ .and_return(fake_appcontroller) # mock out reading the locations.json file, and slip in our own json flexmock(os.path) os.path.should_call('exists') # set the fall-through os.path.should_receive('exists').with_args( LocalState.get_locations_json_location(self.keyname)).and_return(True) fake_nodes_json = flexmock(name="fake_nodes_json") fake_nodes_json.should_receive('read').and_return( json.dumps({"node_info": [{ "public_ip": "public1", "private_ip": "private1", "jobs": ["shadow", "login"] }]})) fake_nodes_json.should_receive('write').and_return() builtins.should_receive('open').with_args( LocalState.get_locations_json_location(self.keyname), 'r') \ .and_return(fake_nodes_json) flexmock(RemoteHelper).should_receive('is_port_open').and_return(False) argv = [ "--appname", "blargapp", "--keyname", self.keyname ] options = ParseArgs(argv, self.function).args AppScaleTools.remove_app(options)
def test_describe_instances_with_two_nodes(self): # mock out writing the secret key to ~/.appscale, as well as reading it # later builtins = flexmock(sys.modules['__builtin__']) builtins.should_call('open') # set the fall-through secret_key_location = LocalState.get_secret_key_location(self.keyname) fake_secret = flexmock(name="fake_secret") fake_secret.should_receive('read').and_return('the secret') fake_secret.should_receive('write').and_return() builtins.should_receive('open').with_args(secret_key_location, 'r') \ .and_return(fake_secret) # mock out the SOAP call to the AppController and assume it succeeded fake_appcontroller = flexmock(name='fake_appcontroller') fake_appcontroller.should_receive('get_all_public_ips').with_args('the secret') \ .and_return(json.dumps(['public1', 'public2'])) fake_appcontroller.should_receive('status').with_args('the secret') \ .and_return('nothing interesting here') \ .and_return('Database is at not-up-yet') \ .and_return('Database is at 1.2.3.4') flexmock(SOAPpy) SOAPpy.should_receive('SOAPProxy').with_args('https://public1:17443') \ .and_return(fake_appcontroller) SOAPpy.should_receive('SOAPProxy').with_args('https://public2:17443') \ .and_return(fake_appcontroller) # mock out reading the locations.json file, and slip in our own json flexmock(os.path) os.path.should_call('exists') # set the fall-through os.path.should_receive('exists').with_args( LocalState.get_locations_json_location( self.keyname)).and_return(True) fake_nodes_json = flexmock(name="fake_nodes_json") fake_nodes_json.should_receive('read').and_return( json.dumps({ "node_info": [ { "public_ip": "public1", "private_ip": "private1", "jobs": ["shadow", "login"] }, { "public_ip": "public2", "private_ip": "private2", "jobs": ["appengine"] }, ] })) fake_nodes_json.should_receive('write').and_return() builtins.should_receive('open').with_args( LocalState.get_locations_json_location(self.keyname), 'r') \ .and_return(fake_nodes_json) # assume that there are two machines running in our deployment argv = ["--keyname", self.keyname] options = ParseArgs(argv, self.function).args AppScaleTools.describe_instances(options)
def open_connection(self, parameters): """ Connects to Google Compute Engine with the given credentials. Args: parameters: A dict that contains all the parameters necessary to authenticate this user with Google Compute Engine. We assume that the user has already authorized this account for use with GCE. Returns: An apiclient.discovery.Resource that is a connection valid for requests to Google Compute Engine for the given user, and a Credentials object that can be used to sign requests performed with that connection. Raises: AppScaleException if the user wants to abort. """ is_autoscale_agent = parameters.get(self.PARAM_AUTOSCALE_AGENT, False) # Determine paths to credential files if is_autoscale_agent: client_secrets_path = self.CLIENT_SECRETS_LOCATION oauth2_storage_path = self.OAUTH2_STORAGE_LOCATION else: # Determine client secrets path client_secrets_path = LocalState.get_client_secrets_location( parameters[self.PARAM_KEYNAME]) if not os.path.exists(client_secrets_path): client_secrets_path = parameters.get(self.PARAM_SECRETS, '') # Determine oauth2 storage oauth2_storage_path = parameters.get(self.PARAM_STORAGE) if not oauth2_storage_path or not os.path.exists(oauth2_storage_path): oauth2_storage_path = LocalState.get_oauth2_storage_location( parameters[self.PARAM_KEYNAME]) if os.path.exists(client_secrets_path): # Attempt to perform authorization using Service account secrets_type = GCEAgent.get_secrets_type(client_secrets_path) if secrets_type == CredentialTypes.SERVICE: scopes = [GCPScopes.COMPUTE] credentials = ServiceAccountCredentials.from_json_keyfile_name( client_secrets_path, scopes=scopes) return discovery.build('compute', self.API_VERSION), credentials # Perform authorization using OAuth2 storage storage = oauth2client.file.Storage(oauth2_storage_path) credentials = storage.get() if not credentials or credentials.invalid: # If we couldn't get valid credentials from OAuth2 storage if not os.path.exists(client_secrets_path): raise AgentConfigurationException( 'Couldn\'t find client secrets file at {}'.format(client_secrets_path) ) # Run OAuth2 flow to get credentials flow = oauth2client.client.flow_from_clientsecrets( client_secrets_path, scope=self.GCE_SCOPE) flags = oauth2client.tools.argparser.parse_args(args=[]) credentials = oauth2client.tools.run_flow(flow, storage, flags) # Build the service return discovery.build('compute', self.API_VERSION), credentials
def configure_instance_security(self, parameters): """ Setup EC2 security keys and groups. Required input values are read from the parameters dictionary. More specifically, this method expects to find a 'keyname' parameter and a 'group' parameter in the parameters dictionary. Using these provided values, this method will create a new EC2 key-pair and a security group. Security group will be granted permission to access any port on the instantiated VMs. (Also see documentation for the BaseAgent class) Args: parameters: A dictionary of parameters. """ keyname = parameters[self.PARAM_KEYNAME] group = parameters[self.PARAM_GROUP] AppScaleLogger.log("Verifying that keyname {0}".format(keyname) + \ " is not already registered.") conn = self.open_connection(parameters) if conn.get_key_pair(keyname): self.handle_failure("SSH keyname {0} is already registered. Please " \ "change the 'keyname' specified in your AppScalefile to a different " \ "value, or erase it to have one automatically generated for you." \ .format(keyname)) security_groups = conn.get_all_security_groups() for security_group in security_groups: if security_group.name == group: self.handle_failure("Security group {0} is already registered. Please" \ " change the 'group' specified in your AppScalefile to a different " \ "value, or erase it to have one automatically generated for you." \ .format(group)) AppScaleLogger.log("Creating key pair: {0}".format(keyname)) key_pair = conn.create_key_pair(keyname) ssh_key = '{0}{1}.key'.format(LocalState.LOCAL_APPSCALE_PATH, keyname) LocalState.write_key_file(ssh_key, key_pair.material) self.create_security_group(parameters, group) self.authorize_security_group(parameters, group, from_port=1, to_port=65535, ip_protocol='udp', cidr_ip='0.0.0.0/0') self.authorize_security_group(parameters, group, from_port=1, to_port=65535, ip_protocol='tcp', cidr_ip='0.0.0.0/0') self.authorize_security_group(parameters, group, from_port=-1, to_port=-1, ip_protocol='icmp', cidr_ip='0.0.0.0/0') return True
def test_describe_instances_with_two_nodes(self): # mock out writing the secret key to ~/.appscale, as well as reading it # later builtins = flexmock(sys.modules['__builtin__']) builtins.should_call('open') # set the fall-through secret_key_location = LocalState.get_secret_key_location(self.keyname) fake_secret = flexmock(name="fake_secret") fake_secret.should_receive('read').and_return('the secret') fake_secret.should_receive('write').and_return() builtins.should_receive('open').with_args(secret_key_location, 'r') \ .and_return(fake_secret) # mock out the SOAP call to the AppController and assume it succeeded fake_appcontroller = flexmock(name='fake_appcontroller') fake_appcontroller.should_receive('get_all_public_ips').with_args('the secret') \ .and_return(json.dumps(['public1', 'public2'])) fake_appcontroller.should_receive('status').with_args('the secret') \ .and_return('nothing interesting here') \ .and_return('Database is at not-up-yet') \ .and_return('Database is at 1.2.3.4') flexmock(SOAPpy) SOAPpy.should_receive('SOAPProxy').with_args('https://public1:17443') \ .and_return(fake_appcontroller) SOAPpy.should_receive('SOAPProxy').with_args('https://public2:17443') \ .and_return(fake_appcontroller) # mock out reading the locations.json file, and slip in our own json flexmock(os.path) os.path.should_call('exists') # set the fall-through os.path.should_receive('exists').with_args( LocalState.get_locations_json_location(self.keyname)).and_return(True) fake_nodes_json = flexmock(name="fake_nodes_json") fake_nodes_json.should_receive('read').and_return(json.dumps( {"node_info": [{ "public_ip": "public1", "private_ip": "private1", "jobs": ["shadow", "login"] }, { "public_ip": "public2", "private_ip": "private2", "jobs": ["appengine"] }, ]})) fake_nodes_json.should_receive('write').and_return() builtins.should_receive('open').with_args( LocalState.get_locations_json_location(self.keyname), 'r') \ .and_return(fake_nodes_json) # assume that there are two machines running in our deployment argv = [ "--keyname", self.keyname ] options = ParseArgs(argv, self.function).args AppScaleTools.describe_instances(options)
def main(): """ Execute appscale-describe-instances script. """ options = ParseArgs(sys.argv[1:], "appscale-show-stats").args try: show_stats(options) sys.exit(0) except Exception, e: LocalState.generate_crash_log(e, traceback.format_exc()) sys.exit(1)
def configure_instance_security(self, parameters): """ Setup EC2 security keys and groups. Required input values are read from the parameters dictionary. More specifically, this method expects to find a 'keyname' parameter and a 'group' parameter in the parameters dictionary. Using these provided values, this method will create a new EC2 key-pair and a security group. Security group will be granted permission to access any port on the instantiated VMs. (Also see documentation for the BaseAgent class) Args: parameters: A dictionary of parameters. """ keyname = parameters[self.PARAM_KEYNAME] group = parameters[self.PARAM_GROUP] is_autoscale = parameters['autoscale_agent'] AppScaleLogger.log("Verifying that keyname {0}".format(keyname) + \ " is not already registered.") conn = self.open_connection(parameters) # While creating instances during autoscaling, we do not need to create a # new keypair or a security group. We just make use of the existing one. if is_autoscale in ['True', True]: return if conn.get_key_pair(keyname): self.handle_failure("SSH keyname {0} is already registered. Please " \ "change the 'keyname' specified in your AppScalefile to a different " \ "value, or erase it to have one automatically generated for you." \ .format(keyname)) security_groups = conn.get_all_security_groups() for security_group in security_groups: if security_group.name == group: self.handle_failure("Security group {0} is already registered. Please" \ " change the 'group' specified in your AppScalefile to a different " \ "value, or erase it to have one automatically generated for you." \ .format(group)) AppScaleLogger.log("Creating key pair: {0}".format(keyname)) key_pair = conn.create_key_pair(keyname) ssh_key = '{0}{1}.key'.format(LocalState.LOCAL_APPSCALE_PATH, keyname) LocalState.write_key_file(ssh_key, key_pair.material) self.create_security_group(parameters, group) self.authorize_security_group(parameters, group, from_port=1, to_port=65535, ip_protocol='udp', cidr_ip='0.0.0.0/0') self.authorize_security_group(parameters, group, from_port=1, to_port=65535, ip_protocol='tcp', cidr_ip='0.0.0.0/0') self.authorize_security_group(parameters, group, from_port=-1, to_port=-1, ip_protocol='icmp', cidr_ip='0.0.0.0/0') return True
def test_reset_password_for_user_that_exists(self): # put in a mock for reading the secret file builtins = flexmock(sys.modules['__builtin__']) builtins.should_call('open') # set the fall-through secret_key_location = LocalState.get_secret_key_location(self.keyname) fake_secret = flexmock(name="fake_secret") fake_secret.should_receive('read').and_return('the secret') builtins.should_receive('open').with_args(secret_key_location, 'r') \ .and_return(fake_secret) # mock out reading the username and new password from the user builtins.should_receive('raw_input').and_return('*****@*****.**') flexmock(getpass) getpass.should_receive('getpass').and_return('the password') # mock out finding the login node's IP address from the json file flexmock(os.path) os.path.should_call('exists') # set the fall-through os.path.should_receive('exists').with_args( LocalState.get_locations_json_location( self.keyname)).and_return(True) fake_nodes_json = flexmock(name="fake_secret") fake_nodes_json.should_receive('read').and_return( json.dumps({ "node_info": [{ 'public_ip': 'public1', 'private_ip': 'private1', 'jobs': ['login', 'db_master'] }] })) builtins.should_receive('open').with_args( LocalState.get_locations_json_location(self.keyname), 'r') \ .and_return(fake_nodes_json) # mock out grabbing the userappserver ip from an appcontroller fake_appcontroller = flexmock(name='fake_appcontroller') fake_appcontroller.should_receive('status').with_args('the secret') \ .and_return('nothing interesting here') \ .and_return('Database is at not-up-yet') \ .and_return('Database is at public1') fake_appcontroller.should_receive('reset_password').with_args( '*****@*****.**', str, 'the secret').and_return('true') flexmock(SOAPpy) SOAPpy.should_receive('SOAPProxy').with_args('https://public1:17443') \ .and_return(fake_appcontroller) argv = ["--keyname", self.keyname] options = ParseArgs(argv, self.function).args AppScaleTools.reset_password(options)
def set_property(cls, options): """Instructs AppScale to replace the value it uses for a particular AppController instance variable (property) with a new value. Args: options: A Namespace that has fields for each parameter that can be passed in via the command-line interface. """ shadow_host = LocalState.get_host_with_role(options.keyname, 'shadow') acc = AppControllerClient(shadow_host, LocalState.get_secret_key( options.keyname)) acc.set_property(options.property_name, options.property_value) AppScaleLogger.success('Successfully updated the given property.')
def configure_instance_security(self, parameters): """ Creates a GCE network and firewall with the specified name, and opens the ports on that firewall as needed for AppScale. We expect both the network and the firewall to not exist before this point, to avoid accidentally placing AppScale instances from different deployments in the same network and firewall (thus enabling them to see each other's web traffic). Args: parameters: A dict with keys for each parameter needed to connect to Google Compute Engine, and an additional key indicating the name of the network and firewall that we should create in GCE. Returns: True, if the named network and firewall was created successfully. Raises: AgentRuntimeException: If the named network or firewall already exist in GCE. """ is_autoscale_agent = parameters.get(self.PARAM_AUTOSCALE_AGENT, False) # While creating instances during autoscaling, we do not need to create a # new keypair or a network. We just make use of the existing one. if is_autoscale_agent: return AppScaleLogger.log("Verifying that SSH key exists locally") keyname = parameters[self.PARAM_KEYNAME] private_key = LocalState.LOCAL_APPSCALE_PATH + keyname public_key = private_key + ".pub" if os.path.exists(private_key) or os.path.exists(public_key): raise AgentRuntimeException("SSH key already found locally - please " + "use a different keyname") LocalState.generate_rsa_key(keyname, parameters[self.PARAM_VERBOSE]) ssh_key_exists, all_ssh_keys = self.does_ssh_key_exist(parameters) if not ssh_key_exists: self.create_ssh_key(parameters, all_ssh_keys) if self.does_network_exist(parameters): raise AgentRuntimeException("Network already exists - please use a " + \ "different group name.") if self.does_firewall_exist(parameters): raise AgentRuntimeException("Firewall already exists - please use a " + \ "different group name.") network_url = self.create_network(parameters) self.create_firewall(parameters, network_url)
def test_reset_password_for_user_that_exists(self): # put in a mock for reading the secret file builtins = flexmock(sys.modules['__builtin__']) builtins.should_call('open') # set the fall-through secret_key_location = LocalState.get_secret_key_location(self.keyname) fake_secret = flexmock(name="fake_secret") fake_secret.should_receive('read').and_return('the secret') builtins.should_receive('open').with_args(secret_key_location, 'r') \ .and_return(fake_secret) # mock out reading the username and new password from the user builtins.should_receive('raw_input').and_return('*****@*****.**') flexmock(getpass) getpass.should_receive('getpass').and_return('the password') # mock out finding the login node's IP address from the json file flexmock(os.path) os.path.should_call('exists') # set the fall-through os.path.should_receive('exists').with_args( LocalState.get_locations_json_location(self.keyname)).and_return(True) fake_nodes_json = flexmock(name="fake_secret") fake_nodes_json.should_receive('read').and_return( json.dumps({"node_info": [{ 'public_ip': 'public1', 'private_ip': 'private1', 'jobs': ['login', 'db_master'] }]})) builtins.should_receive('open').with_args( LocalState.get_locations_json_location(self.keyname), 'r') \ .and_return(fake_nodes_json) # mock out grabbing the userappserver ip from an appcontroller fake_appcontroller = flexmock(name='fake_appcontroller') fake_appcontroller.should_receive('status').with_args('the secret') \ .and_return('nothing interesting here') \ .and_return('Database is at not-up-yet') \ .and_return('Database is at public1') fake_appcontroller.should_receive('reset_password').with_args( '*****@*****.**', str, 'the secret').and_return('true') flexmock(SOAPpy) SOAPpy.should_receive('SOAPProxy').with_args('https://public1:17443') \ .and_return(fake_appcontroller) argv = [ "--keyname", self.keyname ] options = ParseArgs(argv, self.function).args AppScaleTools.reset_password(options)
def update_queues(cls, source_location, keyname, project_id): """ Updates a project's queues from the configuration file. Args: source_location: A string specifying the location of the source code. keyname: A string specifying the key name. project_id: A string specifying the project ID. """ if cls.TAR_GZ_REGEX.search(source_location): fetch_function = utils.config_from_tar_gz version = Version.from_tar_gz(source_location) elif cls.ZIP_REGEX.search(source_location): fetch_function = utils.config_from_zip version = Version.from_zip(source_location) elif os.path.isdir(source_location): fetch_function = utils.config_from_dir version = Version.from_directory(source_location) elif source_location.endswith('.yaml'): fetch_function = utils.config_from_dir version = Version.from_yaml_file(source_location) source_location = os.path.dirname(source_location) else: raise BadConfigurationException( '{} must be a directory, tar.gz, or zip'.format(source_location)) if project_id: version.project_id = project_id queue_config = fetch_function('queue.yaml', source_location) if queue_config is None: queue_config = fetch_function('queue.xml', source_location) # If the source does not have a queue configuration file, do nothing. if queue_config is None: return queues = utils.queues_from_xml(queue_config) else: queues = yaml.safe_load(queue_config) AppScaleLogger.log('Updating queues') for queue in queues.get('queue', []): if 'bucket_size' in queue or 'max_concurrent_requests' in queue: AppScaleLogger.warn('Queue configuration uses unsupported rate options' ' (bucket size or max concurrent requests)') break load_balancer_ip = LocalState.get_host_with_role(keyname, 'load_balancer') secret_key = LocalState.get_secret_key(keyname) admin_client = AdminClient(load_balancer_ip, secret_key) admin_client.update_queues(version.project_id, queues)
def test_make_appscale_directory_creation(self): # let's say that our ~/.appscale directory # does not exist os.path.should_receive('exists') \ .with_args(LocalState.LOCAL_APPSCALE_PATH) \ .and_return(False) \ .once() # thus, mock out making the appscale dir os.should_receive('mkdir') \ .with_args(LocalState.LOCAL_APPSCALE_PATH) \ .and_return() LocalState.make_appscale_directory()
def test_get_property(self): # put in a mock for reading the secret file builtins = flexmock(sys.modules['__builtin__']) builtins.should_call('open') # set the fall-through secret_key_location = LocalState.get_secret_key_location(self.keyname) fake_secret = flexmock(name="fake_secret") fake_secret.should_receive('read').and_return('the secret') builtins.should_receive('open').with_args(secret_key_location, 'r') \ .and_return(fake_secret) # mock out finding the shadow node's IP address from the json file flexmock(os.path) os.path.should_call('exists') # set the fall-through os.path.should_receive('exists').with_args( LocalState.get_locations_json_location( self.keyname)).and_return(True) fake_nodes_json = flexmock(name="fake_secret") fake_nodes_json.should_receive('read').and_return( json.dumps({ "node_info": [{ 'public_ip': 'public1', 'private_ip': 'private1', 'jobs': ['login', 'shadow'] }] })) builtins.should_receive('open').with_args( LocalState.get_locations_json_location(self.keyname), 'r') \ .and_return(fake_nodes_json) # mock out grabbing the userappserver ip from an appcontroller property_name = "name" property_value = "value" fake_appcontroller = flexmock(name='fake_appcontroller') fake_appcontroller.should_receive('set_property').with_args( property_name, property_value, 'the secret').and_return('OK') flexmock(SOAPpy) SOAPpy.should_receive('SOAPProxy').with_args('https://public1:17443') \ .and_return(fake_appcontroller) argv = [ "--keyname", self.keyname, "--property_name", property_name, "--property_value", property_value ] options = ParseArgs(argv, self.function).args result = AppScaleTools.set_property(options) self.assertEqual(None, result)
def test_start_all_nodes_reattach(self): self.node_layout = NodeLayout(self.reattach_options) self.assertNotEqual([], self.node_layout.nodes) fake_agent = FakeAgent() flexmock(factory.InfrastructureAgentFactory). \ should_receive('create_agent'). \ with_args('euca'). \ and_return(fake_agent) LocalState.should_receive('get_login_host').and_return(IP_1) LocalState.should_receive('get_local_nodes_info') \ .and_return(self.reattach_node_info) RemoteHelper.start_all_nodes(self.reattach_options, self.node_layout)
def down(self, clean=False, terminate=False): """ 'down' provides a nicer experience for users than the appscale-terminate-instances command, by using the configuration options present in the AppScalefile found in the current working directory. Args: clean: A boolean to indicate if the deployment data and metadata needs to be clean. This will clear the datastore. terminate: A boolean to indicate if instances needs to be terminated (valid only if we spawn instances at start). Raises: AppScalefileException: If there is no AppScalefile in the current working directory. """ contents = self.read_appscalefile() # Construct a terminate-instances command from the file's contents command = [] contents_as_yaml = yaml.safe_load(contents) if 'verbose' in contents_as_yaml and contents_as_yaml[ 'verbose'] == True: command.append("--verbose") if 'keyname' in contents_as_yaml: keyname = contents_as_yaml['keyname'] command.append("--keyname") command.append(contents_as_yaml['keyname']) else: keyname = 'appscale' if clean: if 'test' not in contents_as_yaml or contents_as_yaml[ 'test'] != True: LocalState.confirm_or_abort( "Clean will delete every data in the deployment.") command.append("--clean") if terminate: infrastructure = LocalState.get_infrastructure(keyname) if infrastructure != "xen" and not LocalState.are_disks_used( keyname) and 'test' not in contents_as_yaml: LocalState.confirm_or_abort( "Terminate will delete instances and the data on them.") command.append("--terminate") if 'test' in contents_as_yaml and contents_as_yaml['test'] == True: command.append("--test") # Finally, exec the command. Don't worry about validating it - # appscale-terminate-instances will do that for us. options = ParseArgs(command, "appscale-terminate-instances").args AppScaleTools.terminate_instances(options) LocalState.cleanup_appscale_files(keyname, terminate) AppScaleLogger.success( "Successfully stopped your AppScale deployment.")
def test_remove_app_but_app_isnt_running(self): # mock out reading from stdin, and assume the user says 'yes' builtins = flexmock(sys.modules['__builtin__']) builtins.should_receive('raw_input').and_return('yes') # mock out reading the secret key builtins.should_call('open') # set the fall-through secret_key_location = LocalState.get_secret_key_location(self.keyname) fake_secret = flexmock(name="fake_secret") fake_secret.should_receive('read').and_return('the secret') builtins.should_receive('open').with_args(secret_key_location, 'r') \ .and_return(fake_secret) # mock out the SOAP call to the AppController and assume it succeeded fake_appcontroller = flexmock(name='fake_appcontroller') fake_appcontroller.should_receive('status').with_args('the secret') \ .and_return('Database is at public1') fake_appcontroller.should_receive('is_app_running').with_args('blargapp', 'the secret').and_return(False) flexmock(SOAPpy) SOAPpy.should_receive('SOAPProxy').with_args('https://public1:17443') \ .and_return(fake_appcontroller) # mock out reading the locations.json file, and slip in our own json flexmock(os.path) os.path.should_call('exists') # set the fall-through os.path.should_receive('exists').with_args( LocalState.get_locations_json_location(self.keyname)).and_return(True) fake_nodes_json = flexmock(name="fake_nodes_json") fake_nodes_json.should_receive('read').and_return( json.dumps({"node_info": [{ "public_ip": "public1", "private_ip": "private1", "jobs": ["shadow", "login"] }]})) fake_nodes_json.should_receive('write').and_return() builtins.should_receive('open').with_args( LocalState.get_locations_json_location(self.keyname), 'r') \ .and_return(fake_nodes_json) argv = [ "--appname", "blargapp", "--keyname", self.keyname ] options = ParseArgs(argv, self.function).args self.assertRaises(AppScaleException, AppScaleTools.remove_app, options)
def test_get_property(self): # put in a mock for reading the secret file builtins = flexmock(sys.modules['__builtin__']) builtins.should_call('open') # set the fall-through secret_key_location = LocalState.get_secret_key_location(self.keyname) fake_secret = flexmock(name="fake_secret") fake_secret.should_receive('read').and_return('the secret') builtins.should_receive('open').with_args(secret_key_location, 'r') \ .and_return(fake_secret) # mock out finding the shadow node's IP address from the json file flexmock(os.path) os.path.should_call('exists') # set the fall-through os.path.should_receive('exists').with_args( LocalState.get_locations_json_location(self.keyname)).and_return(True) fake_nodes_json = flexmock(name="fake_secret") fake_nodes_json.should_receive('read').and_return( json.dumps({"node_info": [{ 'public_ip': 'public1', 'private_ip': 'private1', 'roles': ['shadow'] }]})) builtins.should_receive('open').with_args( LocalState.get_locations_json_location(self.keyname), 'r') \ .and_return(fake_nodes_json) # mock out grabbing the userappserver ip from an appcontroller property_name = "name" property_value = "value" fake_appcontroller = flexmock(name='fake_appcontroller') fake_appcontroller.should_receive('set_property').with_args(property_name, property_value, 'the secret').and_return('OK') flexmock(SOAPpy) SOAPpy.should_receive('SOAPProxy').with_args('https://public1:17443') \ .and_return(fake_appcontroller) argv = [ "--keyname", self.keyname, "--property_name", property_name, "--property_value", property_value ] options = ParseArgs(argv, self.function).args result = AppScaleTools.set_property(options) self.assertEqual(None, result)
def test_update_local_metadata(self): # mock out getting all the ips in the deployment from the head node fake_soap = flexmock(name='fake_soap') fake_soap.should_receive('get_all_public_ips').with_args('the secret') \ .and_return(json.dumps(['public1'])) role_info = [{ 'public_ip': 'public1', 'private_ip': 'private1', 'jobs': ['shadow', 'db_master'] }] fake_soap.should_receive('get_role_info').with_args('the secret') \ .and_return(json.dumps(role_info)) flexmock(SOAPpy) SOAPpy.should_receive('SOAPProxy').with_args('https://public1:17443') \ .and_return(fake_soap) # mock out reading the secret key fake_secret = flexmock(name='fake_secret') fake_secret.should_receive('read').and_return('the secret') builtins = flexmock(sys.modules['__builtin__']) builtins.should_call('open') builtins.should_receive('open').with_args( LocalState.get_secret_key_location('booscale'), 'r') \ .and_return(fake_secret) # Mock out writing the json file. json_location = LocalState.get_locations_json_location('booscale') builtins.should_receive('open').with_args(json_location, 'w')\ .and_return(flexmock(write=lambda *args: None)) options = flexmock(name='options', table='cassandra', infrastructure='ec2', keyname='booscale', group='boogroup', zone='my-zone-1b', EC2_ACCESS_KEY='baz', EC2_SECRET_KEY='baz', EC2_URL='') node_layout = NodeLayout( options={ 'min_machines': 1, 'max_machines': 1, 'infrastructure': 'ec2', 'table': 'cassandra', 'instance_type': 'm1.large' }) LocalState.update_local_metadata(options, 'public1', 'public1')
def test_generate_crash_log(self): crashlog_suffix = '123456' flexmock(uuid) uuid.should_receive('uuid4').and_return(crashlog_suffix) exception_class = 'Exception' exception_message = 'baz message' exception = Exception(exception_message) stacktrace = "\n".join(['Traceback (most recent call last):', ' File "<stdin>", line 2, in <module>', '{0}: {1}'.format(exception_class, exception_message)]) # Mock out grabbing our system's information flexmock(platform) platform.should_receive('platform').and_return("MyOS") platform.should_receive('python_implementation').and_return("MyPython") # Mock out writing it to the crash log file expected = '{0}log-{1}'.format(LocalState.LOCAL_APPSCALE_PATH, crashlog_suffix) fake_file = flexmock(name='fake_file') fake_file.should_receive('write').with_args(str) fake_builtins = flexmock(sys.modules['__builtin__']) fake_builtins.should_call('open') # set the fall-through fake_builtins.should_receive('open').with_args(expected, 'w').and_return( fake_file) # mock out printing the crash log message flexmock(AppScaleLogger) AppScaleLogger.should_receive('warn') actual = LocalState.generate_crash_log(exception, stacktrace) self.assertEquals(expected, actual)
def test_fails_if_app_isnt_running(self): # If the user wants to relocate their app to port X, but their app isn't # even running, this should fail. # Assume that the AppController is running but our app isn't. flexmock(os.path) os.path.should_call('exists') # set the fall-through os.path.should_receive('exists').with_args( LocalState.get_locations_json_location(self.keyname)).and_return(True) fake_appcontroller = flexmock(name='fake_appcontroller') fake_appcontroller.should_receive('get_app_info_map').with_args( 'the secret').and_return(json.dumps({})) flexmock(SOAPpy) SOAPpy.should_receive('SOAPProxy').with_args('https://public1:17443') \ .and_return(fake_appcontroller) argv = [ '--keyname', self.keyname, '--appname', self.appid, '--http_port', '80', '--https_port', '443' ] options = ParseArgs(argv, self.function).args self.assertRaises(AppScaleException, AppScaleTools.relocate_app, options)
def up(self): """ Starts an AppScale deployment with the configuration options from the AppScalefile in the current directory. Raises: AppScalefileException: If there is no AppScalefile in the current directory. """ contents = self.read_appscalefile() # If running in a cluster environment, we first need to set up SSH keys contents_as_yaml = yaml.safe_load(contents) if not LocalState.ensure_appscalefile_is_up_to_date(): contents = self.read_appscalefile() contents_as_yaml = yaml.safe_load(contents) # Construct a run-instances command from the file's contents command = [] for key, value in contents_as_yaml.items(): if key in self.DEPRECATED_ASF_ARGS: raise AppScalefileException( "'{0}' has been deprecated. Refer to {1} to see the full changes.". format(key, NodeLayout.APPSCALEFILE_INSTRUCTIONS )) if value is True: command.append(str("--%s" % key)) elif value is False: pass else: if key == "ips_layout": command.append("--ips_layout") command.append(base64.b64encode(yaml.dump(value))) elif key == "disks": command.append("--disks") command.append(base64.b64encode(yaml.dump(value))) elif key == "user_commands": command.append("--user_commands") command.append(base64.b64encode(yaml.dump(value))) else: command.append(str("--%s" % key)) command.append(str("%s" % value)) run_instances_opts = ParseArgs(command, "appscale-run-instances").args if 'infrastructure' not in contents_as_yaml: # Generate a new keypair if necessary. if not self.valid_ssh_key(contents_as_yaml, run_instances_opts): add_keypair_command = [] if 'keyname' in contents_as_yaml: add_keypair_command.append('--keyname') add_keypair_command.append(str(contents_as_yaml['keyname'])) add_keypair_command.append('--ips_layout') add_keypair_command.append( base64.b64encode(yaml.dump(contents_as_yaml['ips_layout']))) add_keypair_opts = ParseArgs( add_keypair_command, 'appscale-add-keypair').args AppScaleTools.add_keypair(add_keypair_opts) AppScaleTools.run_instances(run_instances_opts)
def test_start_all_nodes_reattach(self): self.node_layout = NodeLayout(self.reattach_options) self.assertNotEqual([], self.node_layout.nodes) fake_agent = FakeAgent() flexmock(factory.InfrastructureAgentFactory). \ should_receive('create_agent'). \ with_args('euca'). \ and_return(fake_agent) LocalState.should_receive('get_host_with_role').and_return(IP_1) LocalState.should_receive('get_local_nodes_info') \ .and_return(self.reattach_node_info) RemoteHelper.start_all_nodes(self.reattach_options, self.node_layout)
def test_fails_if_app_isnt_running(self): # If the user wants to relocate their app to port X, but their app isn't # even running, this should fail. # Assume that the AppController is running but our app isn't. flexmock(os.path) os.path.should_call('exists') # set the fall-through os.path.should_receive('exists').with_args( LocalState.get_locations_json_location( self.keyname)).and_return(True) fake_appcontroller = flexmock(name='fake_appcontroller') fake_appcontroller.should_receive('get_app_info_map').with_args( 'the secret').and_return(json.dumps({})) flexmock(SOAPpy) SOAPpy.should_receive('SOAPProxy').with_args('https://public1:17443') \ .and_return(fake_appcontroller) argv = [ '--keyname', self.keyname, '--appname', self.appid, '--http_port', '80', '--https_port', '443' ] options = ParseArgs(argv, self.function).args self.assertRaises(AppScaleException, AppScaleTools.relocate_app, options)
def test_create_user_accounts(self): # mock out reading the secret key builtins = flexmock(sys.modules['__builtin__']) builtins.should_call('open') # set the fall-through secret_key_location = LocalState.LOCAL_APPSCALE_PATH + "bookey.secret" fake_secret = flexmock(name="fake_secret") fake_secret.should_receive('read').and_return('the secret') builtins.should_receive('open').with_args(secret_key_location, 'r') \ .and_return(fake_secret) # mock out reading the locations.json file, and slip in our own json flexmock(os.path) os.path.should_call('exists') # set the fall-through os.path.should_receive('exists').with_args( LocalState.get_locations_json_location('bookey')).and_return(True) fake_nodes_json = flexmock(name="fake_nodes_json") fake_nodes_json.should_receive('read').and_return( json.dumps({ "node_info": [{ "public_ip": "public1", "private_ip": "private1", "roles": ["shadow"] }] })) builtins.should_receive('open').with_args( LocalState.get_locations_json_location('bookey'), 'r') \ .and_return(fake_nodes_json) # Mock out SOAP interactions with the AppController. fake_appcontroller = flexmock(name="fake_appcontroller") fake_appcontroller.should_receive('does_user_exist').with_args( '*****@*****.**', 'the secret').and_return('false') fake_appcontroller.should_receive('create_user').with_args( '*****@*****.**', str, 'xmpp_user', 'the secret').and_return('true') fake_appcontroller.should_receive('does_user_exist').with_args( 'boo@public1', 'the secret').and_return('false') fake_appcontroller.should_receive('create_user').with_args( 'boo@public1', str, 'xmpp_user', 'the secret').and_return('true') fake_appcontroller.should_receive('get_property').\ with_args('login', 'the secret').and_return('{"login":"******"}') flexmock(SOAPpy) SOAPpy.should_receive('SOAPProxy').with_args('https://*****:*****@foo.goo', 'password', 'public1', 'bookey')
def get_property(cls, options): """Queries AppScale for a list of system properties matching the provided regular expression, as well as the values associated with each matching property. Args: options: A Namespace that has fields for each parameter that can be passed in via the command-line interface. Returns: A dict mapping each property matching the given regex to its associated value. """ shadow_host = LocalState.get_host_with_role(options.keyname, 'shadow') acc = AppControllerClient(shadow_host, LocalState.get_secret_key( options.keyname)) return acc.get_property(options.property)
def test_create_user_accounts(self): # mock out reading the secret key builtins = flexmock(sys.modules['__builtin__']) builtins.should_call('open') # set the fall-through secret_key_location = LocalState.LOCAL_APPSCALE_PATH + "bookey.secret" fake_secret = flexmock(name="fake_secret") fake_secret.should_receive('read').and_return('the secret') builtins.should_receive('open').with_args(secret_key_location, 'r') \ .and_return(fake_secret) # mock out reading the locations.json file, and slip in our own json flexmock(os.path) os.path.should_call('exists') # set the fall-through os.path.should_receive('exists').with_args( LocalState.get_locations_json_location('bookey')).and_return(True) fake_nodes_json = flexmock(name="fake_nodes_json") fake_nodes_json.should_receive('read').and_return( json.dumps({"node_info": [{ "public_ip": "public1", "private_ip": "private1", "roles": ["shadow"] }]})) builtins.should_receive('open').with_args( LocalState.get_locations_json_location('bookey'), 'r') \ .and_return(fake_nodes_json) # Mock out SOAP interactions with the AppController. fake_appcontroller = flexmock(name="fake_appcontroller") fake_appcontroller.should_receive('does_user_exist').with_args('*****@*****.**', 'the secret').and_return('false') fake_appcontroller.should_receive('create_user').with_args('*****@*****.**', str, 'xmpp_user', 'the secret').and_return('true') fake_appcontroller.should_receive('does_user_exist').with_args('boo@public1', 'the secret').and_return('false') fake_appcontroller.should_receive('create_user').with_args('boo@public1', str, 'xmpp_user', 'the secret').and_return('true') fake_appcontroller.should_receive('get_property').\ with_args('login', 'the secret').and_return('{"login":"******"}') flexmock(SOAPpy) SOAPpy.should_receive('SOAPProxy').with_args('https://*****:*****@foo.goo', 'password', 'public1', 'bookey')
def add_keypair(cls, options): """Sets up passwordless SSH login to the machines used in a virtualized cluster deployment. Args: options: A Namespace that has fields for each parameter that can be passed in via the command-line interface. Raises: AppScaleException: If any of the machines named in the ips_layout are not running, or do not have the SSH daemon running. """ LocalState.require_ssh_commands(options.auto) LocalState.make_appscale_directory() path = LocalState.LOCAL_APPSCALE_PATH + options.keyname if options.add_to_existing: private_key = path else: _, private_key = LocalState.generate_rsa_key(options.keyname) if options.auto: if 'root_password' in options: AppScaleLogger.log("Using the provided root password to log into " + \ "your VMs.") password = options.root_password else: AppScaleLogger.log("Please enter the password for the root user on" + \ " your VMs:") password = getpass.getpass() node_layout = NodeLayout(options) all_ips = [node.public_ip for node in node_layout.nodes] for ip in all_ips: # first, make sure ssh is actually running on the host machine if not RemoteHelper.is_port_open(ip, RemoteHelper.SSH_PORT): raise AppScaleException("SSH does not appear to be running at {0}. " \ "Is the machine at {0} up and running? Make sure your IPs are " \ "correct!".format(ip)) # next, set up passwordless ssh AppScaleLogger.log("Executing ssh-copy-id for host: {0}".format(ip)) if options.auto: LocalState.shell("{0} root@{1} {2} {3}".format(cls.EXPECT_SCRIPT, ip, private_key, password)) else: LocalState.shell("ssh-copy-id -i {0} root@{1}".format(private_key, ip)) AppScaleLogger.success("Generated a new SSH key for this deployment " + \ "at {0}".format(private_key))
def test_extract_tgz_app_to_dir(self): flexmock(os) os.should_receive('mkdir').and_return() flexmock(os.path) os.path.should_receive('abspath').with_args('relative/app.tar.gz') \ .and_return('/tmp/relative/app.tar.gz') flexmock(LocalState) LocalState.should_receive('shell') \ .with_args(re.compile("tar zxvf '/tmp/relative/app.tar.gz'"), False) \ .and_return() os.should_receive('listdir').and_return(['one_folder']) os.path.should_receive('isdir').with_args(re.compile('one_folder')) \ .and_return(True) location = LocalState.extract_tgz_app_to_dir('relative/app.tar.gz', False) self.assertEquals(True, 'one_folder' in location)
def ssh(self, node): """ 'ssh' provides a simple way to log into virtual machines in an AppScale deployment, using the SSH key provided in the user's AppScalefile. Args: node: An int that represents the node to SSH to. The value is used as an index into the list of nodes running in the AppScale deployment, starting with zero. Raises: AppScalefileException: If there is no AppScalefile in the current directory. TypeError: If the user does not provide an integer for 'node'. """ contents = self.read_appscalefile() contents_as_yaml = yaml.safe_load(contents) if 'keyname' in contents_as_yaml: keyname = contents_as_yaml['keyname'] else: keyname = "appscale" if node is None: node = "shadow" try: index = int(node) nodes = self.get_nodes(keyname) # make sure there is a node at position 'index' ip = nodes[index]['public_ip'] except IndexError: raise AppScaleException( "Cannot ssh to node at index " + ", as there are only " + str(len(nodes)) + " in the currently running AppScale deployment.") except ValueError: try: ip = LocalState.get_host_with_role(keyname, node.lower()) except AppScaleException: raise AppScaleException("No role exists by that name. " "Valid roles are {}".format( NodeLayout.ADVANCED_FORMAT_KEYS)) # construct the ssh command to exec with that IP address command = [ "ssh", "-o", "StrictHostkeyChecking=no", "-i", self.get_key_location(keyname), "root@" + ip ] # exec the ssh command try: subprocess.check_call(command) except subprocess.CalledProcessError: raise AppScaleException( "Unable to ssh to the machine at " "{}. Please make sure this machine is reachable, " "has a public ip, or that the role is in use by " "the deployment.".format(ip))
def test_generate_deployment_params(self): # this method is fairly light, so just make sure that it constructs the dict # to send to the AppController correctly options = flexmock(name='options', table='cassandra', keyname='boo', appengine='1', autoscale=False, group='bazgroup', replication=None, infrastructure='ec2', machine='ami-ABCDEFG', instance_type='m1.large', use_spot_instances=True, max_spot_price=1.23, clear_datastore=False, disks={'node-1': 'vol-ABCDEFG'}, zone='my-zone-1b', verbose=True, user_commands=[], flower_password="******", max_memory=ParseArgs.DEFAULT_MAX_MEMORY) node_layout = NodeLayout({ 'table': 'cassandra', 'infrastructure': "ec2", 'min': 1, 'max': 1 }) flexmock(NodeLayout).should_receive("head_node").and_return( SimpleNode('public1', 'some cloud', ['some role'])) expected = { 'table': 'cassandra', 'login': '******', 'clear_datastore': 'False', 'keyname': 'boo', 'appengine': '1', 'autoscale': 'False', 'replication': 'None', 'group': 'bazgroup', 'machine': 'ami-ABCDEFG', 'infrastructure': 'ec2', 'instance_type': 'm1.large', 'min_images': '1', 'max_images': '1', 'use_spot_instances': 'True', 'user_commands': json.dumps([]), 'max_spot_price': '1.23', 'zone': 'my-zone-1b', 'verbose': 'True', 'flower_password': '******', 'max_memory': str(ParseArgs.DEFAULT_MAX_MEMORY) } actual = LocalState.generate_deployment_params( options, node_layout, {'max_spot_price': '1.23'}) self.assertEquals(expected, actual)
def test_remove_app_and_app_is_running(self): # mock out reading from stdin, and assume the user says 'YES' builtins = flexmock(sys.modules['__builtin__']) builtins.should_receive('raw_input').and_return('YES') # mock out reading the secret key builtins.should_call('open') # set the fall-through secret_key_location = LocalState.get_secret_key_location(self.keyname) fake_secret = flexmock(name="fake_secret") fake_secret.should_receive('read').and_return('the secret') builtins.should_receive('open').with_args(secret_key_location, 'r') \ .and_return(fake_secret) # mock out reading the locations.json file, and slip in our own json flexmock(os.path) os.path.should_call('exists') # set the fall-through os.path.should_receive('exists').with_args( LocalState.get_locations_json_location(self.keyname)).and_return(True) fake_nodes_json = flexmock(name="fake_nodes_json") fake_nodes_json.should_receive('read').and_return( json.dumps({"node_info": [{ "public_ip": "public1", "private_ip": "private1", "roles": ["shadow", "load_balancer"] }]})) fake_nodes_json.should_receive('write').and_return() builtins.should_receive('open').with_args( LocalState.get_locations_json_location(self.keyname), 'r') \ .and_return(fake_nodes_json) flexmock(AdminClient).should_receive('list_services').\ and_return(['default']) flexmock(AdminClient).should_receive('delete_service').\ with_args('blargapp', 'default').and_return('op_id') flexmock(AdminClient).should_receive('get_operation').\ with_args('blargapp', 'op_id').and_return({'done': True}) argv = [ "--project-id", "blargapp", "--keyname", self.keyname ] options = ParseArgs(argv, self.function).args AppScaleTools.remove_app(options)
def update_cron(cls, source_location, keyname, project_id): """ Updates a project's cron jobs from the configuration file. Args: source_location: A string specifying the location of the source code. keyname: A string specifying the key name. project_id: A string specifying the project ID. """ if cls.TAR_GZ_REGEX.search(source_location): fetch_function = utils.config_from_tar_gz version = Version.from_tar_gz(source_location) elif cls.ZIP_REGEX.search(source_location): fetch_function = utils.config_from_zip version = Version.from_zip(source_location) elif os.path.isdir(source_location): fetch_function = utils.config_from_dir version = Version.from_directory(source_location) elif source_location.endswith('.yaml'): fetch_function = utils.config_from_dir version = Version.from_yaml_file(source_location) source_location = os.path.dirname(source_location) else: raise BadConfigurationException( '{} must be a directory, tar.gz, or zip'.format(source_location)) if project_id: version.project_id = project_id cron_config = fetch_function('cron.yaml', source_location) if cron_config is None: cron_config = fetch_function('cron.xml', source_location) # If the source does not have a cron configuration file, do nothing. if cron_config is None: return cron_jobs = utils.cron_from_xml(cron_config) else: cron_jobs = yaml.safe_load(cron_config) AppScaleLogger.log('Updating cron jobs') load_balancer_ip = LocalState.get_host_with_role(keyname, 'load_balancer') secret_key = LocalState.get_secret_key(keyname) admin_client = AdminClient(load_balancer_ip, secret_key) admin_client.update_cron(version.project_id, cron_jobs)
def setUp(self): self.keyname = "boobazblargfoo" self.function = "appscale-relocate-app" self.appid = 'my-crazy-app' # mock out any writing to stdout flexmock(AppScaleLogger) AppScaleLogger.should_receive('log').and_return() AppScaleLogger.should_receive('success').and_return() AppScaleLogger.should_receive('warn').and_return() # mock out all sleeping flexmock(time) time.should_receive('sleep').and_return() # mock out reading the locations.json file, and slip in our own json flexmock(os.path) os.path.should_call('exists') # set the fall-through os.path.should_receive('exists').with_args( LocalState.get_locations_json_location( self.keyname)).and_return(True) fake_nodes_json = flexmock(name="fake_nodes_json") fake_nodes_json.should_receive('read').and_return( json.dumps({ "node_info": [{ "public_ip": "public1", "private_ip": "private1", "jobs": ["shadow", "login"] }] })) fake_nodes_json.should_receive('write').and_return() builtins = flexmock(sys.modules['__builtin__']) builtins.should_call('open') # set the fall-through builtins.should_receive('open').with_args( LocalState.get_locations_json_location(self.keyname), 'r') \ .and_return(fake_nodes_json) # put in a mock for reading the secret file secret_key_location = LocalState.get_secret_key_location(self.keyname) fake_secret = flexmock(name="fake_secret") fake_secret.should_receive('read').and_return('the secret') builtins.should_receive('open').with_args(secret_key_location, 'r') \ .and_return(fake_secret)
def configure_instance_security(self, parameters): """ Creates a GCE network and firewall with the specified name, and opens the ports on that firewall as needed for AppScale. We expect both the network and the firewall to not exist before this point, to avoid accidentally placing AppScale instances from different deployments in the same network and firewall (thus enabling them to see each other's web traffic). Args: parameters: A dict with keys for each parameter needed to connect to Google Compute Engine, and an additional key indicating the name of the network and firewall that we should create in GCE. Returns: True, if the named network and firewall was created successfully. Raises: AgentRuntimeException: If the named network or firewall already exist in GCE. """ AppScaleLogger.log("Verifying that SSH key exists locally") keyname = parameters[self.PARAM_KEYNAME] private_key = LocalState.LOCAL_APPSCALE_PATH + keyname public_key = private_key + ".pub" if os.path.exists(private_key) or os.path.exists(public_key): raise AgentRuntimeException("SSH key already found locally - please " + "use a different keyname") LocalState.generate_rsa_key(keyname, parameters[self.PARAM_VERBOSE]) ssh_key_exists, all_ssh_keys = self.does_ssh_key_exist(parameters) if not ssh_key_exists: self.create_ssh_key(parameters, all_ssh_keys) if self.does_network_exist(parameters): raise AgentRuntimeException("Network already exists - please use a " + \ "different group name.") if self.does_firewall_exist(parameters): raise AgentRuntimeException("Firewall already exists - please use a " + \ "different group name.") network_url = self.create_network(parameters) self.create_firewall(parameters, network_url)
def _get_stats(keyname, stats_kind, include_lists): """ Returns statistics from Hermes. Args: keyname: A string representing an identifier from AppScaleFile. stats_kind: A string representing a kind of statistics. include_lists: A dict representing desired fields. Returns: A dict of statistics. A dict of failures. """ load_balancer_ip = LocalState.get_host_with_role(keyname, 'load_balancer') secret = LocalState.get_secret_key(keyname=keyname) administration_port = "17441" stats_path = "/stats/cluster/{stats_kind}".format(stats_kind=stats_kind) headers = {'Appscale-Secret': secret} data = {'include_lists': include_lists} url = "https://{ip}:{port}{path}".format( ip=load_balancer_ip, port=administration_port, path=stats_path ) try: requests.packages.urllib3.disable_warnings(InsecureRequestWarning) resp = requests.get( url=url, headers=headers, json=data, verify=False ) resp.raise_for_status() except requests.HTTPError as err: AppScaleLogger.warn( "Failed to get {stats_kind} stats ({err})" .format(stats_kind=stats_kind, err=err) ) return {}, {} json_body = resp.json() return json_body["stats"], json_body["failures"]
def down(self, clean=False, terminate=False): """ 'down' provides a nicer experience for users than the appscale-terminate-instances command, by using the configuration options present in the AppScalefile found in the current working directory. Args: clean: A boolean to indicate if the deployment data and metadata needs to be clean. This will clear the datastore. terminate: A boolean to indicate if instances needs to be terminated (valid only if we spawn instances at start). Raises: AppScalefileException: If there is no AppScalefile in the current working directory. """ contents = self.read_appscalefile() # Construct a terminate-instances command from the file's contents command = [] contents_as_yaml = yaml.safe_load(contents) if 'verbose' in contents_as_yaml and contents_as_yaml['verbose'] == True: command.append("--verbose") if 'keyname' in contents_as_yaml: keyname = contents_as_yaml['keyname'] command.append("--keyname") command.append(contents_as_yaml['keyname']) else: keyname = 'appscale' if clean: if 'test' not in contents_as_yaml or contents_as_yaml['test'] != True: LocalState.confirm_or_abort("Clean will delete every data in the deployment.") command.append("--clean") if terminate: infrastructure = LocalState.get_infrastructure(keyname) if infrastructure != "xen" and not LocalState.are_disks_used( keyname) and 'test' not in contents_as_yaml: LocalState.confirm_or_abort("Terminate will delete instances and the data on them.") command.append("--terminate") if 'test' in contents_as_yaml and contents_as_yaml['test'] == True: command.append("--test") # Finally, exec the command. Don't worry about validating it - # appscale-terminate-instances will do that for us. options = ParseArgs(command, "appscale-terminate-instances").args AppScaleTools.terminate_instances(options) LocalState.cleanup_appscale_files(keyname, terminate) AppScaleLogger.success("Successfully stopped your AppScale deployment.")
def test_generate_deployment_params(self): # this method is fairly light, so just make sure that it constructs the dict # to send to the AppController correctly options = flexmock( name='options', table='cassandra', keyname='boo', default_min_appservers='1', autoscale=False, group='bazgroup', replication=None, infrastructure='ec2', machine='ami-ABCDEFG', instance_type='m1.large', use_spot_instances=True, max_spot_price=1.23, clear_datastore=False, disks={'node-1' : 'vol-ABCDEFG'}, zone='my-zone-1b', verbose=True, user_commands=[], flower_password="******", default_max_appserver_memory=ParseArgs.DEFAULT_MAX_APPSERVER_MEMORY, EC2_ACCESS_KEY='baz', EC2_SECRET_KEY='baz', EC2_URL='', login_host='public1', aws_subnet_id=None, aws_vpc_id=None) node_layout = NodeLayout({ 'table' : 'cassandra', 'infrastructure' : "ec2", 'min_machines' : 1, 'max_machines' : 1, 'instance_type': 'm1.large' }) flexmock(NodeLayout).should_receive("head_node").and_return(Node( 'public1', 'some cloud', ['some role'])) expected = { 'table' : 'cassandra', 'login' : 'public1', 'clear_datastore': 'False', 'keyname' : 'boo', 'default_min_appservers' : '1', 'autoscale' : 'False', 'replication': 'None', 'group' : 'bazgroup', 'machine' : 'ami-ABCDEFG', 'infrastructure' : 'ec2', 'instance_type' : 'm1.large', 'min_machines' : '1', 'max_machines' : '1', 'use_spot_instances' : 'True', 'user_commands' : json.dumps([]), 'max_spot_price' : '1.23', 'zone' : 'my-zone-1b', 'verbose' : 'True', 'flower_password' : 'abc', 'default_max_appserver_memory' : str(ParseArgs.DEFAULT_MAX_APPSERVER_MEMORY), 'EC2_ACCESS_KEY': 'baz', 'EC2_SECRET_KEY': 'baz', 'EC2_URL': '', 'aws_subnet_id': None, 'aws_vpc_id': None } actual = LocalState.generate_deployment_params(options, node_layout, {'max_spot_price':'1.23'}) self.assertEquals(expected, actual)
def test_get_property(self): # put in a mock for reading the secret file builtins = flexmock(sys.modules["__builtin__"]) builtins.should_call("open") # set the fall-through secret_key_location = LocalState.get_secret_key_location(self.keyname) fake_secret = flexmock(name="fake_secret") fake_secret.should_receive("read").and_return("the secret") builtins.should_receive("open").with_args(secret_key_location, "r").and_return(fake_secret) # mock out finding the shadow node's IP address from the json file flexmock(os.path) os.path.should_call("exists") # set the fall-through os.path.should_receive("exists").with_args(LocalState.get_locations_json_location(self.keyname)).and_return( True ) fake_nodes_json = flexmock(name="fake_secret") fake_nodes_json.should_receive("read").and_return( json.dumps({"node_info": [{"public_ip": "public1", "private_ip": "private1", "jobs": ["login", "shadow"]}]}) ) builtins.should_receive("open").with_args(LocalState.get_locations_json_location(self.keyname), "r").and_return( fake_nodes_json ) # mock out grabbing the userappserver ip from an appcontroller property_name = "name" property_value = "value" fake_appcontroller = flexmock(name="fake_appcontroller") fake_appcontroller.should_receive("set_property").with_args( property_name, property_value, "the secret" ).and_return("OK") flexmock(SOAPpy) SOAPpy.should_receive("SOAPProxy").with_args("https://public1:17443").and_return(fake_appcontroller) argv = ["--keyname", self.keyname, "--property_name", property_name, "--property_value", property_value] options = ParseArgs(argv, self.function).args result = AppScaleTools.set_property(options) self.assertEqual(None, result)
def test_remove_app_but_app_isnt_running(self): # mock out reading from stdin, and assume the user says 'yes' builtins = flexmock(sys.modules['__builtin__']) builtins.should_receive('raw_input').and_return('yes') # mock out reading the secret key builtins.should_call('open') # set the fall-through secret_key_location = LocalState.get_secret_key_location(self.keyname) fake_secret = flexmock(name="fake_secret") fake_secret.should_receive('read').and_return('the secret') builtins.should_receive('open').with_args(secret_key_location, 'r') \ .and_return(fake_secret) flexmock(AdminClient).should_receive('delete_project').\ and_raise(AdminError) # mock out reading the locations.json file, and slip in our own json flexmock(os.path) os.path.should_call('exists') # set the fall-through os.path.should_receive('exists').with_args( LocalState.get_locations_json_location(self.keyname)).and_return(True) fake_nodes_json = flexmock(name="fake_nodes_json") fake_nodes_json.should_receive('read').and_return( json.dumps({"node_info": [{ "public_ip": "public1", "private_ip": "private1", "jobs": ["shadow", "login"] }]})) fake_nodes_json.should_receive('write').and_return() builtins.should_receive('open').with_args( LocalState.get_locations_json_location(self.keyname), 'r') \ .and_return(fake_nodes_json) argv = [ "--project-id", "blargapp", "--keyname", self.keyname ] options = ParseArgs(argv, self.function).args self.assertRaises(AdminError, AppScaleTools.remove_app, options)
def setUp(self): self.keyname = "boobazblargfoo" self.function = "appscale-relocate-app" self.appid = 'my-crazy-app' # mock out any writing to stdout flexmock(AppScaleLogger) AppScaleLogger.should_receive('log').and_return() AppScaleLogger.should_receive('success').and_return() AppScaleLogger.should_receive('warn').and_return() # mock out all sleeping flexmock(time) time.should_receive('sleep').and_return() # mock out reading the locations.json file, and slip in our own json flexmock(os.path) os.path.should_call('exists') # set the fall-through os.path.should_receive('exists').with_args( LocalState.get_locations_json_location(self.keyname)).and_return(True) fake_nodes_json = flexmock(name="fake_nodes_json") fake_nodes_json.should_receive('read').and_return( json.dumps({"node_info": [{ "public_ip": "public1", "private_ip": "private1", "jobs": ["shadow", "login"] }]})) fake_nodes_json.should_receive('write').and_return() builtins = flexmock(sys.modules['__builtin__']) builtins.should_call('open') # set the fall-through builtins.should_receive('open').with_args( LocalState.get_locations_json_location(self.keyname), 'r') \ .and_return(fake_nodes_json) # put in a mock for reading the secret file secret_key_location = LocalState.get_secret_key_location(self.keyname) fake_secret = flexmock(name="fake_secret") fake_secret.should_receive('read').and_return('the secret') builtins.should_receive('open').with_args(secret_key_location, 'r') \ .and_return(fake_secret)
def test_update_local_metadata(self): # mock out getting all the ips in the deployment from the head node fake_soap = flexmock(name='fake_soap') fake_soap.should_receive('get_all_public_ips').with_args('the secret') \ .and_return(json.dumps(['public1'])) role_info = [{ 'public_ip' : 'public1', 'private_ip' : 'private1', 'roles' : ['shadow', 'db_master'] }] fake_soap.should_receive('get_role_info').with_args('the secret') \ .and_return(json.dumps(role_info)) flexmock(SOAPpy) SOAPpy.should_receive('SOAPProxy').with_args('https://public1:17443') \ .and_return(fake_soap) # mock out reading the secret key fake_secret = flexmock(name='fake_secret') fake_secret.should_receive('read').and_return('the secret') builtins = flexmock(sys.modules['__builtin__']) builtins.should_call('open') builtins.should_receive('open').with_args( LocalState.get_secret_key_location('booscale'), 'r') \ .and_return(fake_secret) # Mock out writing the json file. json_location = LocalState.get_locations_json_location('booscale') builtins.should_receive('open').with_args(json_location, 'w')\ .and_return(flexmock(write=lambda *args: None)) options = flexmock(name='options', table='cassandra', infrastructure='ec2', keyname='booscale', group='boogroup', zone='my-zone-1b', EC2_ACCESS_KEY='baz', EC2_SECRET_KEY='baz', EC2_URL='') node_layout = NodeLayout(options={ 'min_machines' : 1, 'max_machines' : 1, 'infrastructure' : 'ec2', 'table' : 'cassandra', 'instance_type': 'm1.large' }) LocalState.update_local_metadata(options, 'public1', 'public1')
def ssh(self, node): """ 'ssh' provides a simple way to log into virtual machines in an AppScale deployment, using the SSH key provided in the user's AppScalefile. Args: node: An int that represents the node to SSH to. The value is used as an index into the list of nodes running in the AppScale deployment, starting with zero. Raises: AppScalefileException: If there is no AppScalefile in the current directory. TypeError: If the user does not provide an integer for 'node'. """ contents = self.read_appscalefile() contents_as_yaml = yaml.safe_load(contents) if 'keyname' in contents_as_yaml: keyname = contents_as_yaml['keyname'] else: keyname = "appscale" if node is None: node = "shadow" try: index = int(node) nodes = self.get_nodes(keyname) # make sure there is a node at position 'index' ip = nodes[index]['public_ip'] except IndexError: raise AppScaleException("Cannot ssh to node at index " + ", as there are only " + str(len(nodes)) + " in the currently running AppScale deployment.") except ValueError: try: ip = LocalState.get_host_with_role(keyname, node.lower()) except AppScaleException: raise AppScaleException("No role exists by that name. " "Valid roles are {}" .format(NodeLayout.ADVANCED_FORMAT_KEYS)) # construct the ssh command to exec with that IP address command = ["ssh", "-o", "StrictHostkeyChecking=no", "-i", self.get_key_location(keyname), "root@" + ip] # exec the ssh command try: subprocess.check_call(command) except subprocess.CalledProcessError: raise AppScaleException("Unable to ssh to the machine at " "{}. Please make sure this machine is reachable, " "has a public ip, or that the role is in use by " "the deployment.".format(ip))
def test_ensure_appscale_isnt_running_but_it_is(self): # if there is a secret file and force isn't set, we should abort os.path.should_receive('exists').with_args( LocalState.get_secret_key_location(self.keyname)).and_return(True) flexmock(LocalState).should_receive('get_login_host').and_return('login_ip') flexmock(LocalState).should_receive('get_secret_key').and_return('super-secret') flexmock(AppControllerClient).should_receive('get_status').and_return("OK") self.assertRaises(BadConfigurationException, LocalState.ensure_appscale_isnt_running, self.keyname, False)
def test_get_key_path_from_local_appscale(self): keyname = "keyname" # Test key path returned is ~/.appscale when .key file is present in # that location. local_appscale_key_file_path = LocalState.LOCAL_APPSCALE_PATH + keyname + \ ".key" os.path.should_receive('isfile').with_args(local_appscale_key_file_path). \ and_return(True) actual_key_path = LocalState.get_key_path_from_name(keyname) self.assertEquals(local_appscale_key_file_path, actual_key_path) # Test key path returned is /etc/appscale/keys/cloud1/when .key file is # present in that location. etc_appscale_key_file_path = LocalState.ETC_APPSCALE_KEY_PATH + keyname + \ ".key" os.path.should_receive('isfile').with_args(local_appscale_key_file_path). \ and_return(False) os.path.should_receive('isfile').with_args(etc_appscale_key_file_path). \ and_return(True) actual_key_path = LocalState.get_key_path_from_name(keyname) self.assertEquals(etc_appscale_key_file_path, actual_key_path)