def test_new_with_right_number_of_unique_disks_one_node(self): # suppose that the user has specified two nodes, and two EBS / PD disks # with different names. This is the desired user behavior. input_yaml = [{ 'roles': ['master', 'database'], 'nodes': 1, 'instance_type': 'm1.large' }, { 'roles': ['appengine'], 'nodes': 2, 'instance_type': 'm1.large', 'disks': [self.DISK_ONE, self.DISK_TWO] }] options = self.default_options.copy() options['ips'] = input_yaml layout = NodeLayout(options) self.assertNotEqual([], layout.nodes) self.assertEquals(self.DISK_ONE, layout.other_nodes()[0].disk) self.assertEquals(self.DISK_TWO, layout.other_nodes()[1].disk)
def test_start_all_nodes_reattach_changed_locations(self): self.node_layout = NodeLayout(self.reattach_options) fake_agent = FakeAgent() flexmock(factory.InfrastructureAgentFactory). \ should_receive('create_agent'). \ with_args('public cloud'). \ and_return(fake_agent) LocalState.should_receive('get_login_host').and_return('0.0.0.1') node_info = [{ "public_ip": "0.0.0.0", "private_ip": "0.0.0.0", "instance_id": "i-APPSCALE1", "jobs": [ 'load_balancer', 'taskqueue', 'shadow', 'login', 'taskqueue_master' ] }, { "public_ip": "0.0.0.0", "private_ip": "0.0.0.0", "instance_id": "i-APPSCALE2", "jobs": ['memcache', 'appengine'] }, { "public_ip": "0.0.0.0", "private_ip": "0.0.0.0", "instance_id": "i-APPSCALE3", "jobs": ['zookeeper', "appengine"] }, { "public_ip": "0.0.0.0", "private_ip": "0.0.0.0", "instance_id": "i-APPSCALE4", "jobs": ['db_master'] }] LocalState.should_receive('get_local_nodes_info').and_return(node_info) self.assertRaises(BadConfigurationException)
def test_start_all_nodes_reattach_changed_asf(self): self.options = flexmock(infrastructure='public cloud', group='group', machine='vm image', instance_type='instance type', keyname='keyname', table='cassandra', verbose=False, test=False, use_spot_instances=False, zone='zone', static_ip=None, replication=None, appengine=None, autoscale=None, user_commands=[], flower_password='', max_memory='X', ips={ 'zookeeper': 'node-2', 'master': 'node-1', 'appengine': 'node-3', 'database': 'node-3' }) self.node_layout = NodeLayout(self.options) fake_agent = FakeAgent() flexmock(factory.InfrastructureAgentFactory). \ should_receive('create_agent'). \ with_args('public cloud'). \ and_return(fake_agent) LocalState.should_receive('get_login_host').and_return('0.0.0.1') LocalState.should_receive('get_local_nodes_info')\ .and_return(self.reattach_node_info) self.assertRaises(BadConfigurationException)
def test_from_locations_json_list_able_to_match(self): options = flexmock(infrastructure='euca', group='group', machine='vm image', instance_type='instance type', keyname='keyname', table='cassandra', verbose=False, test=False, use_spot_instances=False, zone='zone', static_ip=None, replication=None, appengine=None, autoscale=None, user_commands=[], flower_password='', max_memory='X', ips={ 'master': 'node-1', 'zookeeper': 'node-2', 'appengine': 'node-4', 'database': 'node-3' }) node_layout = NodeLayout(options) self.assertTrue(node_layout.is_valid()) new_layout = node_layout.from_locations_json_list( self.reattach_node_info) self.assertNotEqual(new_layout, None) nodes_copy = new_layout[:] for old_node in node_layout.nodes: for _, node in enumerate(nodes_copy): # Match nodes based on jobs/roles. if set(old_node.roles) == set(node.roles): nodes_copy.remove(node) self.assertEqual(nodes_copy, [])
def run_instances(cls, options): """Starts a new AppScale deployment with the parameters given. Args: options: A Namespace that has fields for each parameter that can be passed in via the command-line interface. Raises: AppControllerException: If the AppController on the head node crashes. When this occurs, the message in the exception contains the reason why the AppController crashed. BadConfigurationException: If the user passes in options that are not sufficient to start an AppScale deployment (e.g., running on EC2 but not specifying the AMI to use), or if the user provides us contradictory options (e.g., running on EC2 but not specifying EC2 credentials). """ LocalState.make_appscale_directory() LocalState.ensure_appscale_isnt_running(options.keyname, options.force) node_layout = NodeLayout(options) if options.infrastructure: if (not options.test and not options.force and not (options.disks or node_layout.are_disks_used())): LocalState.ensure_user_wants_to_run_without_disks() reduced_version = '.'.join(x for x in APPSCALE_VERSION.split('.')[:2]) AppScaleLogger.log("Starting AppScale " + reduced_version) my_id = str(uuid.uuid4()) AppScaleLogger.remote_log_tools_state(options, my_id, "started", APPSCALE_VERSION) head_node = node_layout.head_node() # Start VMs in cloud via cloud agent. if options.infrastructure: node_layout = RemoteHelper.start_all_nodes(options, node_layout) # Enables root logins and SSH access on the head node. RemoteHelper.enable_root_ssh(options, head_node.public_ip) AppScaleLogger.verbose("Node Layout: {}".format(node_layout.to_list())) # Ensure all nodes are compatible. RemoteHelper.ensure_machine_is_compatible( head_node.public_ip, options.keyname) # Use rsync to move custom code into the deployment. if options.rsync_source: AppScaleLogger.log("Copying over local copy of AppScale from {0}". format(options.rsync_source)) RemoteHelper.rsync_files(head_node.public_ip, options.keyname, options.rsync_source) # Start services on head node. RemoteHelper.start_head_node(options, my_id, node_layout) # Write deployment metadata to disk (facilitates SSH operations, etc.) db_master = node_layout.db_master().private_ip head_node = node_layout.head_node().public_ip LocalState.update_local_metadata(options, db_master, head_node) # Copy the locations.json to the head node RemoteHelper.copy_local_metadata(node_layout.head_node().public_ip, options.keyname) # Wait for services on head node to start. secret_key = LocalState.get_secret_key(options.keyname) acc = AppControllerClient(head_node, secret_key) try: while not acc.is_initialized(): AppScaleLogger.log('Waiting for head node to initialize...') # This can take some time in particular the first time around, since # we will have to initialize the database. time.sleep(cls.SLEEP_TIME*3) except socket.error as socket_error: AppScaleLogger.warn('Unable to initialize AppController: {}'. format(socket_error.message)) message = RemoteHelper.collect_appcontroller_crashlog( head_node, options.keyname) raise AppControllerException(message) # Set up admin account. try: # We don't need to have any exception information here: we do expect # some anyway while the UserAppServer is coming up. acc.does_user_exist("non-existent-user", True) except Exception: AppScaleLogger.log('UserAppServer not ready yet. Retrying ...') time.sleep(cls.SLEEP_TIME) if options.admin_user and options.admin_pass: AppScaleLogger.log("Using the provided admin username/password") username, password = options.admin_user, options.admin_pass elif options.test: AppScaleLogger.log("Using default admin username/password") username, password = LocalState.DEFAULT_USER, LocalState.DEFAULT_PASSWORD else: username, password = LocalState.get_credentials() RemoteHelper.create_user_accounts(username, password, head_node, options.keyname) acc.set_admin_role(username, 'true', cls.ADMIN_CAPABILITIES) # Wait for machines to finish loading and AppScale Dashboard to be deployed. RemoteHelper.wait_for_machines_to_finish_loading(head_node, options.keyname) try: login_host = acc.get_property('login')['login'] except KeyError: raise AppControllerException('login property not found') RemoteHelper.sleep_until_port_is_open( login_host, RemoteHelper.APP_DASHBOARD_PORT) AppScaleLogger.success("AppScale successfully started!") AppScaleLogger.success( 'View status information about your AppScale deployment at ' 'http://{}:{}'.format(login_host, RemoteHelper.APP_DASHBOARD_PORT)) AppScaleLogger.remote_log_tools_state(options, my_id, "finished", APPSCALE_VERSION)
def test_generate_deployment_params(self): # this method is fairly light, so just make sure that it constructs the dict # to send to the AppController correctly options = flexmock(name='options', table='cassandra', keyname='boo', default_min_appservers='1', autoscale=False, group='bazgroup', replication=None, infrastructure='ec2', machine='ami-ABCDEFG', instance_type='m1.large', use_spot_instances=True, max_spot_price=1.23, clear_datastore=False, disks={'node-1': 'vol-ABCDEFG'}, zone='my-zone-1b', verbose=True, user_commands=[], flower_password="******", default_max_appserver_memory=ParseArgs. DEFAULT_MAX_APPSERVER_MEMORY, EC2_ACCESS_KEY='baz', EC2_SECRET_KEY='baz', EC2_URL='') node_layout = NodeLayout({ 'table': 'cassandra', 'infrastructure': "ec2", 'min_machines': 1, 'max_machines': 1, 'instance_type': 'm1.large' }) flexmock(NodeLayout).should_receive("head_node").and_return( Node('public1', 'some cloud', ['some role'])) expected = { 'table': 'cassandra', 'login': '******', 'clear_datastore': 'False', 'keyname': 'boo', 'default_min_appservers': '1', 'autoscale': 'False', 'replication': 'None', 'group': 'bazgroup', 'machine': 'ami-ABCDEFG', 'infrastructure': 'ec2', 'instance_type': 'm1.large', 'min_machines': '1', 'max_machines': '1', 'use_spot_instances': 'True', 'user_commands': json.dumps([]), 'max_spot_price': '1.23', 'zone': 'my-zone-1b', 'verbose': 'True', 'flower_password': '******', 'default_max_appserver_memory': str(ParseArgs.DEFAULT_MAX_APPSERVER_MEMORY), 'EC2_ACCESS_KEY': 'baz', 'EC2_SECRET_KEY': 'baz', 'EC2_URL': '' } actual = LocalState.generate_deployment_params( options, node_layout, {'max_spot_price': '1.23'}) self.assertEquals(expected, actual)
def setUp(self): # mock out all logging, since it clutters our output flexmock(AppScaleLogger) AppScaleLogger.should_receive('log').and_return() # mock out all sleeps, as they aren't necessary for unit testing flexmock(time) time.should_receive('sleep').and_return() # set up some fake options so that we don't have to generate them via # ParseArgs self.options = flexmock(infrastructure='ec2', group='boogroup', machine='ami-ABCDEFG', instance_type='m1.large', keyname='bookey', table='cassandra', verbose=False, test=False, use_spot_instances=False, zone='my-zone-1b', static_ip=None, replication=None, appengine=None, autoscale=None, user_commands=[], flower_password='', max_memory='400', ips=FOUR_NODE_CLOUD) self.my_id = "12345" self.node_layout = NodeLayout(self.options) # set up phony AWS credentials for each test # ones that test not having them present can # remove them for credential in EucalyptusAgent.REQUIRED_EC2_CREDENTIALS: os.environ[credential] = "baz" os.environ['EC2_URL'] = "http://boo" # mock out calls to EC2 # begin by assuming that our ssh keypair doesn't exist, and thus that we # need to create it key_contents = "key contents here" fake_key = flexmock(name="fake_key", material=key_contents) fake_key.should_receive('save').with_args( os.environ['HOME'] + '/.appscale').and_return(None) fake_ec2 = flexmock(name="fake_ec2") fake_ec2.should_receive('get_key_pair').with_args('bookey') \ .and_return(None) fake_ec2.should_receive('create_key_pair').with_args('bookey') \ .and_return(fake_key) # mock out writing the secret key builtins = flexmock(sys.modules['__builtin__']) builtins.should_call('open') # set the fall-through secret_key_location = LocalState.LOCAL_APPSCALE_PATH + "bookey.secret" fake_secret = flexmock(name="fake_secret") fake_secret.should_receive('write').and_return() builtins.should_receive('open').with_args(secret_key_location, 'w') \ .and_return(fake_secret) # also, mock out the keypair writing and chmod'ing ssh_key_location = LocalState.LOCAL_APPSCALE_PATH + "bookey.key" fake_file = flexmock(name="fake_file") fake_file.should_receive('write').with_args(key_contents).and_return() builtins.should_receive('open').with_args(ssh_key_location, 'w') \ .and_return(fake_file) flexmock(os) os.should_receive('chmod').with_args(ssh_key_location, 0600).and_return() # next, assume there are no security groups up at first, but then it gets # created. udp_rule = flexmock(from_port=1, to_port=65535, ip_protocol='udp') tcp_rule = flexmock(from_port=1, to_port=65535, ip_protocol='tcp') icmp_rule = flexmock(from_port=-1, to_port=-1, ip_protocol='icmp') group = flexmock(name='boogroup', rules=[tcp_rule, udp_rule, icmp_rule]) fake_ec2.should_receive( 'get_all_security_groups').with_args().and_return([]) fake_ec2.should_receive('get_all_security_groups').with_args( 'boogroup').and_return([group]) # and then assume we can create and open our security group fine fake_ec2.should_receive('create_security_group').with_args( 'boogroup', 'AppScale security group').and_return() fake_ec2.should_receive('authorize_security_group').and_return() # next, add in mocks for run_instances # the first time around, let's say that no machines are running # the second time around, let's say that our machine is pending # and that it's up the third time around fake_pending_instance = flexmock(state='pending') fake_pending_reservation = flexmock(instances=fake_pending_instance) fake_running_instance = flexmock(state='running', key_name='bookey', id='i-12345678', ip_address=IP_1, private_ip_address=IP_1) fake_running_reservation = flexmock(instances=fake_running_instance) fake_ec2.should_receive('get_all_instances').and_return([]) \ .and_return([]) \ .and_return([fake_pending_reservation]) \ .and_return([fake_running_reservation]) # next, assume that our run_instances command succeeds fake_ec2.should_receive('run_instances').and_return() # finally, inject our mocked EC2 flexmock(boto.ec2) boto.ec2.should_receive('connect_to_region').and_return(fake_ec2) # assume that ssh comes up on the third attempt fake_socket = flexmock(name='fake_socket') fake_socket.should_receive('connect').with_args(('public1', RemoteHelper.SSH_PORT)).and_raise(Exception).and_raise(Exception) \ .and_return(None) flexmock(socket) socket.should_receive('socket').and_return(fake_socket) # throw some default mocks together for when invoking via shell succeeds # and when it fails self.fake_temp_file = flexmock(name='fake_temp_file') self.fake_temp_file.should_receive('seek').with_args(0).and_return() self.fake_temp_file.should_receive('read').and_return('boo out') self.fake_temp_file.should_receive('close').and_return() flexmock(tempfile) tempfile.should_receive('NamedTemporaryFile')\ .and_return(self.fake_temp_file) self.success = flexmock(name='success', returncode=0) self.success.should_receive('wait').and_return(0) self.failed = flexmock(name='success', returncode=1) self.failed.should_receive('wait').and_return(1) # assume that root login isn't already enabled local_state = flexmock(LocalState) local_state.should_receive('shell') \ .with_args(re.compile('^ssh .*root'), False, 1, stdin='ls') \ .and_return( 'Please login as the user "ubuntu" rather than the user "root"') # and assume that we can ssh in as ubuntu to enable root login local_state = flexmock(LocalState) local_state.should_receive('shell')\ .with_args(re.compile('^ssh .*ubuntu'),False,5)\ .and_return() # also assume that we can scp over our ssh keys local_state.should_receive('shell')\ .with_args(re.compile('scp .*/root/.ssh/id_'),False,5)\ .and_return() local_state.should_receive('shell')\ .with_args(re.compile('scp .*/root/.appscale/bookey.key'),False,5)\ .and_return()
def test_start_head_node(self): self.options = flexmock(infrastructure='public cloud', group='group', machine='vm image', instance_type='instance type', keyname='keyname', table='cassandra', verbose=False, test=False, use_spot_instances=False, zone='zone', static_ip=None, replication=None, appengine=None, autoscale=None, user_commands=[], flower_password='', max_memory='X', ips=ONE_NODE_CLOUD) self.node_layout = NodeLayout(self.options) flexmock(LocalState).\ should_receive("generate_secret_key").\ with_args(self.options.keyname).\ and_return('some secret key') flexmock(LocalState).\ should_receive("get_key_path_from_name").\ with_args(self.options.keyname).\ and_return('some key path') flexmock(NodeLayout).should_receive('head_node').\ and_return(Node('some IP', 'cloud')) fake_agent = FakeAgent() flexmock(factory.InfrastructureAgentFactory).\ should_receive('create_agent').\ with_args('public cloud').\ and_return(fake_agent) self.additional_params = {} deployment_params = {} flexmock(LocalState).\ should_receive('generate_deployment_params').\ with_args(self.options, self.node_layout, self.additional_params).\ and_return(deployment_params) flexmock(AppScaleLogger).should_receive('log').and_return() flexmock(AppScaleLogger).should_receive('remote_log_tools_state').\ and_return() flexmock(time).should_receive('sleep').and_return() flexmock(RemoteHelper).\ should_receive('copy_deployment_credentials').\ with_args('some IP', self.options).\ and_return() flexmock(RemoteHelper).\ should_receive('run_user_commands').\ with_args('some IP', self.options.user_commands, self.options.keyname, self.options.verbose).\ and_return() flexmock(RemoteHelper).\ should_receive('start_remote_appcontroller').\ with_args('some IP', self.options.keyname, self.options.verbose).\ and_return() layout = {} flexmock(NodeLayout).should_receive('to_list').and_return(layout) flexmock(AppControllerClient).\ should_receive('set_parameters').\ with_args(layout, deployment_params).\ and_return() RemoteHelper.start_head_node(self.options, 'an ID', self.node_layout)
def test_advanced_format_yaml_only(self): input_yaml = OPEN_NODE_CLOUD options = self.default_options.copy() options['ips'] = input_yaml layout_1 = NodeLayout(options) self.assertNotEqual([], layout_1.nodes)
def test_from_locations_json_list_after_clean(self): options = flexmock(infrastructure='euca', group='group', machine='vm image', instance_type='instance type', keyname='keyname', table='cassandra', verbose=False, test=False, use_spot_instances=False, zone='zone', static_ip=None, replication=None, appengine=None, autoscale=None, user_commands=[], flower_password='', max_memory='X', ips=FOUR_NODE_CLOUD) cleaned_node_info = [{ "public_ip": "0.0.0.0", "private_ip": "0.0.0.0", "instance_id": "i-APPSCALE1", "roles": ['load_balancer', 'taskqueue', 'shadow', 'taskqueue_master'], "instance_type": "instance_type_1" }, { "public_ip": "0.0.0.0", "private_ip": "0.0.0.0", "instance_id": "i-APPSCALE2", "roles": ['open'], "instance_type": "instance_type_1" }, { "public_ip": "0.0.0.0", "private_ip": "0.0.0.0", "instance_id": "i-APPSCALE3", "roles": ['open'], "instance_type": "instance_type_1" }, { "public_ip": "0.0.0.0", "private_ip": "0.0.0.0", "instance_id": "i-APPSCALE4", "roles": ['open'], "instance_type": "instance_type_1" }] node_layout = NodeLayout(options) self.assertNotEqual([], node_layout.nodes) old_nodes = node_layout.nodes[:] new_layout = node_layout.from_locations_json_list(cleaned_node_info) for node in new_layout: # Match nodes based on jobs/roles. for index, old_node in enumerate(old_nodes): if set(old_node.roles) == set(node.roles): old_nodes.pop(index) break self.assertEqual(old_nodes, [])
def valid_ssh_key(self, config, run_instances_opts): """ Checks if the tools can log into the head node with the current key. Args: config: A dictionary that includes the IPs layout (which itself is a dict mapping role names to IPs) and, optionally, the keyname to use. run_instances_opts: The arguments parsed from the appscale-run-instances command. Returns: A bool indicating whether or not the specified keyname can be used to log into the head node. Raises: BadConfigurationException: If the IPs layout was not a dictionary. """ keyname = config['keyname'] verbose = config.get('verbose', False) if not isinstance(config['ips_layout'], dict) and \ not isinstance(config['ips_layout'], list): raise BadConfigurationException( 'ips_layout should be a dictionary or list. Please fix it and try ' 'again.') ssh_key_location = self.APPSCALE_DIRECTORY + keyname + ".key" if not os.path.exists(ssh_key_location): return False try: all_ips = LocalState.get_all_public_ips(keyname) except BadConfigurationException: # If this is an upgrade from 3.1.0, there may not be a locations JSON. all_ips = self.get_ips_from_options(run_instances_opts.ips) # If a login node is defined, use that to communicate with other nodes. node_layout = NodeLayout(run_instances_opts) head_node = node_layout.head_node() if head_node is not None: remote_key = '{}/ssh.key'.format(RemoteHelper.CONFIG_DIR) try: RemoteHelper.scp(head_node.public_ip, keyname, ssh_key_location, remote_key, verbose) except ShellException: return False for ip in all_ips: ssh_to_ip = 'ssh -i {key} -o StrictHostkeyChecking=no root@{ip} true'\ .format(key=remote_key, ip=ip) try: RemoteHelper.ssh(head_node.public_ip, keyname, ssh_to_ip, verbose, user='******') except ShellException: return False return True for ip in all_ips: if not self.can_ssh_to_ip(ip, keyname, verbose): return False return True