Ejemplo n.º 1
0
 def test_advanced_format_yaml_only(self):
   input_yaml = {'master' : self.ip_1, 'database' : self.ip_1,
     'appengine' : self.ip_1, 'open' : self.ip_2}
   options = self.default_options.copy()
   options['ips'] = input_yaml
   layout_1 = NodeLayout(options)
   self.assertEquals(True, layout_1.is_valid())
Ejemplo n.º 2
0
  def test_from_locations_json_list_invalid_locations(self):
    node_layout = NodeLayout(self.reattach_options)
    self.assertNotEqual([], node_layout.nodes)

    node_info = [{ "public_ip": "0.0.0.0",
                   "private_ip": "0.0.0.0",
                   "instance_id": "i-APPSCALE1",
                   "roles": ['load_balancer', 'taskqueue', 'shadow',
                             'taskqueue_master'] },
                 { "public_ip": "0.0.0.0",
                   "private_ip": "0.0.0.0",
                   "instance_id": "i-APPSCALE2",
                   "roles": ['memcache', 'appengine'] },
                 { "public_ip": "0.0.0.0",
                   "private_ip": "0.0.0.0",
                   "instance_id": "i-APPSCALE3",
                   "roles": ['zookeeper'] },
                 { "public_ip": "0.0.0.0",
                   "private_ip": "0.0.0.0",
                   "instance_id": "i-APPSCALE4",
                   "roles": ['database', 'db_master', 'zookeeper'] }
                 ]

    with self.assertRaises(BadConfigurationException):
      node_layout.from_locations_json_list(node_info)
Ejemplo n.º 3
0
  def test_from_locations_json_list_invalid_asf(self):
    options = flexmock(
      infrastructure='euca',
      group='group',
      machine='vm image',
      instance_type='instance type',
      keyname='keyname',
      table='cassandra',
      verbose=False,
      test=False,
      use_spot_instances=False,
      zone='zone',
      static_ip=None,
      replication=None,
      appengine=None,
      autoscale=None,
      user_commands=[],
      flower_password='',
      max_memory='X',
      ips=THREE_NODE_CLOUD
    )

    node_layout = NodeLayout(options)
    self.assertNotEqual([], node_layout.nodes)

    with self.assertRaises(BadConfigurationException):
      node_layout.from_locations_json_list(self.reattach_node_info)
Ejemplo n.º 4
0
  def test_from_locations_json_list_able_to_match(self):
    options = flexmock(
      infrastructure='euca',
      group='group',
      machine='vm image',
      instance_type='instance type',
      keyname='keyname',
      table='cassandra',
      verbose=False,
      test=False,
      use_spot_instances=False,
      zone='zone',
      static_ip=None,
      replication=None,
      appengine=None,
      autoscale=None,
      user_commands=[],
      flower_password='',
      max_memory='X',
      ips=FOUR_NODE_CLOUD
    )

    node_layout = NodeLayout(options)
    self.assertNotEqual([], node_layout.nodes)
    old_nodes = node_layout.nodes[:]
    new_layout = node_layout.from_locations_json_list(self.reattach_node_info)
    for node in new_layout:
      # Match nodes based on jobs/roles.
      for index, old_node in enumerate(old_nodes):
        if set(old_node.roles) == set(node.roles):
          old_nodes.pop(index)
          break

    self.assertEqual(old_nodes, [])
Ejemplo n.º 5
0
 def test_with_right_number_of_unique_disks_both_nodes(self):
   # suppose that the user has specified two nodes, and two EBS / PD disks
   # with different names. This is the desired user behavior.
   input_yaml = TWO_NODES_TWO_DISKS_CLOUD
   options = self.default_options.copy()
   options['ips'] = input_yaml
   layout = NodeLayout(options)
   self.assertNotEqual([], layout.nodes)
   self.assertEquals(DISK_ONE, layout.head_node().disk)
   self.assertEquals(DISK_TWO, layout.other_nodes()[0].disk)
Ejemplo n.º 6
0
  def test_from_locations_json_list_after_clean(self):
    options = flexmock(
      infrastructure='euca',
      group='group',
      machine='vm image',
      instance_type='instance type',
      keyname='keyname',
      table='cassandra',
      verbose=False,
      test=False,
      use_spot_instances=False,
      zone='zone',
      static_ip=None,
      replication=None,
      appengine=None,
      autoscale=None,
      user_commands=[],
      flower_password='',
      max_memory='X',
      ips=FOUR_NODE_CLOUD
    )
    cleaned_node_info = [{"public_ip": "0.0.0.0",
                           "private_ip": "0.0.0.0",
                           "instance_id": "i-APPSCALE1",
                           "roles": ['load_balancer', 'taskqueue', 'shadow',
                                     'taskqueue_master'],
                           "instance_type": "instance_type_1"},
                          {"public_ip": "0.0.0.0",
                           "private_ip": "0.0.0.0",
                           "instance_id": "i-APPSCALE2",
                           "roles": ['open'],
                           "instance_type": "instance_type_1"},
                          {"public_ip": "0.0.0.0",
                           "private_ip": "0.0.0.0",
                           "instance_id": "i-APPSCALE3",
                           "roles": ['open'],
                           "instance_type": "instance_type_1"},
                          {"public_ip": "0.0.0.0",
                           "private_ip": "0.0.0.0",
                           "instance_id": "i-APPSCALE4",
                           "roles": ['open'],
                           "instance_type": "instance_type_1"}
                          ]
    node_layout = NodeLayout(options)
    self.assertNotEqual([], node_layout.nodes)
    old_nodes = node_layout.nodes[:]
    new_layout = node_layout.from_locations_json_list(cleaned_node_info)
    for node in new_layout:
      # Match nodes based on jobs/roles.
      for index, old_node in enumerate(old_nodes):
        if set(old_node.roles) == set(node.roles):
          old_nodes.pop(index)
          break

    self.assertEqual(old_nodes, [])
Ejemplo n.º 7
0
 def test_new_with_right_number_of_unique_disks_one_node(self):
   # suppose that the user has specified two nodes, and two EBS / PD disks
   # with different names. This is the desired user behavior.
   input_yaml = [
     {'roles': ['master', 'database'], 'nodes': 1, 'instance_type': 'm1.large'},
     {'roles': ['appengine'], 'nodes': 2,
      'instance_type': 'm1.large', 'disks': [self.DISK_ONE, self.DISK_TWO]}]
   options = self.default_options.copy()
   options['ips'] = input_yaml
   layout = NodeLayout(options)
   self.assertNotEqual([], layout.nodes)
   self.assertEquals(self.DISK_ONE, layout.other_nodes()[0].disk)
   self.assertEquals(self.DISK_TWO, layout.other_nodes()[1].disk)
Ejemplo n.º 8
0
  def test_from_locations_json_list_valid(self):
    node_layout = NodeLayout(self.reattach_options)
    self.assertNotEqual([], node_layout.nodes)
    old_nodes = node_layout.nodes[:]
    new_layout = node_layout.from_locations_json_list(self.reattach_node_info)
    for node in new_layout:
      # Match nodes based on jobs/roles.
      for index, old_node in enumerate(old_nodes):
        if set(old_node.roles) == set(node.roles):
          old_nodes.pop(index)
          break

    self.assertEqual(old_nodes, [])
Ejemplo n.º 9
0
 def test_with_wrong_number_of_disks(self):
   # suppose that the user has specified two nodes, but only one EBS / PD disk
   # this should fail.
   input_yaml = {
     'controller' : self.ip_1,
     'servers' : [self.ip_2]
   }
   options = self.default_options.copy()
   options['ips'] = input_yaml
   options['disks'] = {
     self.ip_1 : 'disk_number_one'
   }
   layout = NodeLayout(options)
   self.assertEquals(False, layout.is_valid())
Ejemplo n.º 10
0
 def test_with_right_number_of_disks_but_not_unique(self):
   # suppose that the user has specified two nodes, but uses the same name for
   # both disks. This isn't acceptable.
   input_yaml = {
     'controller' : self.ip_1,
     'servers' : [self.ip_2]
   }
   options = self.default_options.copy()
   options['ips'] = input_yaml
   options['disks'] = {
     self.ip_1 : 'disk_number_one',
     self.ip_2 : 'disk_number_one'
   }
   layout = NodeLayout(options)
   self.assertEquals(False, layout.is_valid())
Ejemplo n.º 11
0
  def test_with_login_override(self):
    # if the user wants to set a login host, make sure that gets set as the
    # login node's public IP address instead of what we'd normally put in

    # use a simple deployment so we can get the login node with .head_node()
    input_yaml_1 = {
      'controller' : self.ip_1,
      'servers' : [self.ip_2]
    }
    options_1 = self.default_options.copy()
    options_1['ips'] = input_yaml_1
    options_1['login_host'] = "www.booscale.com"
    layout_1 = NodeLayout(options_1)
    self.assertEquals(True, layout_1.is_valid())

    head_node = layout_1.head_node()
    self.assertEquals(options_1['login_host'], head_node.public_ip)
Ejemplo n.º 12
0
 def test_with_right_number_of_unique_disks(self):
   # suppose that the user has specified two nodes, and two EBS / PD disks
   # with different names. This is the desired user behavior.
   input_yaml = {
     'controller' : self.ip_1,
     'servers' : [self.ip_2]
   }
   options = self.default_options.copy()
   options['ips'] = input_yaml
   options['disks'] = {
     self.ip_1 : 'disk_number_one',
     self.ip_2 : 'disk_number_two'
   }
   layout = NodeLayout(options)
   self.assertEquals(True, layout.is_valid())
   self.assertEquals('disk_number_one', layout.head_node().disk)
   self.assertEquals('disk_number_two', layout.other_nodes()[0].disk)
Ejemplo n.º 13
0
    def test_start_all_nodes_reattach(self):
        self.node_layout = NodeLayout(self.reattach_options)
        self.assertNotEqual([], self.node_layout.nodes)
        fake_agent = FakeAgent()
        flexmock(factory.InfrastructureAgentFactory). \
          should_receive('create_agent'). \
          with_args('euca'). \
          and_return(fake_agent)

        LocalState.should_receive('get_host_with_role').and_return(IP_1)

        LocalState.should_receive('get_local_nodes_info') \
          .and_return(self.reattach_node_info)

        RemoteHelper.start_all_nodes(self.reattach_options, self.node_layout)
    def test_update_local_metadata(self):
        # mock out getting all the ips in the deployment from the head node
        fake_soap = flexmock(name='fake_soap')
        fake_soap.should_receive('get_all_public_ips').with_args('the secret') \
          .and_return(json.dumps(['public1']))
        role_info = [{
            'public_ip': 'public1',
            'private_ip': 'private1',
            'jobs': ['shadow', 'db_master']
        }]
        fake_soap.should_receive('get_role_info').with_args('the secret') \
          .and_return(json.dumps(role_info))
        flexmock(SOAPpy)
        SOAPpy.should_receive('SOAPProxy').with_args('https://public1:17443') \
          .and_return(fake_soap)

        # mock out reading the secret key
        fake_secret = flexmock(name='fake_secret')
        fake_secret.should_receive('read').and_return('the secret')
        builtins = flexmock(sys.modules['__builtin__'])
        builtins.should_call('open')
        builtins.should_receive('open').with_args(
          LocalState.get_secret_key_location('booscale'), 'r') \
          .and_return(fake_secret)

        # Mock out writing the json file.
        json_location = LocalState.get_locations_json_location('booscale')
        builtins.should_receive('open').with_args(json_location, 'w')\
          .and_return(flexmock(write=lambda *args: None))

        options = flexmock(name='options',
                           table='cassandra',
                           infrastructure='ec2',
                           keyname='booscale',
                           group='boogroup',
                           zone='my-zone-1b',
                           EC2_ACCESS_KEY='baz',
                           EC2_SECRET_KEY='baz',
                           EC2_URL='')
        node_layout = NodeLayout(
            options={
                'min_machines': 1,
                'max_machines': 1,
                'infrastructure': 'ec2',
                'table': 'cassandra',
                'instance_type': 'm1.large'
            })
        LocalState.update_local_metadata(options, 'public1', 'public1')
Ejemplo n.º 15
0
 def test_is_database_replication_valid_with_db_slave(self):
     input_yaml = [{
         'roles': ['master', 'database', 'appengine'],
         'nodes': 1,
         'instance_type': 'm1.large'
     }]
     options = self.default_options.copy()
     options['ips'] = input_yaml
     fake_node = flexmock()
     fake_node.should_receive('is_role').with_args('database').and_return(
         False)
     fake_node.should_receive('is_role').with_args('db_master').and_return(
         False)
     fake_node.should_receive('is_role').with_args('db_slave').and_return(
         True)
     # validate_database_replication will raise BadConfigurationException if
     # it is invalid.
     NodeLayout(options).validate_database_replication([fake_node])
Ejemplo n.º 16
0
    def test_start_all_nodes_reattach_changed_locations(self):
        self.node_layout = NodeLayout(self.reattach_options)

        fake_agent = FakeAgent()
        flexmock(factory.InfrastructureAgentFactory). \
          should_receive('create_agent'). \
          with_args('public cloud'). \
          and_return(fake_agent)

        LocalState.should_receive('get_login_host').and_return('0.0.0.1')

        node_info = [{
            "public_ip":
            "0.0.0.0",
            "private_ip":
            "0.0.0.0",
            "instance_id":
            "i-APPSCALE1",
            "jobs": [
                'load_balancer', 'taskqueue', 'shadow', 'login',
                'taskqueue_master'
            ]
        }, {
            "public_ip": "0.0.0.0",
            "private_ip": "0.0.0.0",
            "instance_id": "i-APPSCALE2",
            "jobs": ['memcache', 'appengine']
        }, {
            "public_ip": "0.0.0.0",
            "private_ip": "0.0.0.0",
            "instance_id": "i-APPSCALE3",
            "jobs": ['zookeeper', "appengine"]
        }, {
            "public_ip": "0.0.0.0",
            "private_ip": "0.0.0.0",
            "instance_id": "i-APPSCALE4",
            "jobs": ['db_master']
        }]

        LocalState.should_receive('get_local_nodes_info').and_return(node_info)

        self.assertRaises(BadConfigurationException)
  def test_start_all_nodes_reattach_changed_asf(self):
    self.options = flexmock(
      infrastructure='public cloud',
      group='group',
      machine='vm image',
      instance_type='instance type',
      keyname='keyname',
      table='cassandra',
      verbose=False,
      test=False,
      use_spot_instances=False,
      zone='zone',
      static_ip=None,
      replication=None,
      appengine=None,
      autoscale=None,
      user_commands=[],
      flower_password='',
      max_memory='X',
      ips=THREE_NODE_CLOUD
    )

    self.node_layout = NodeLayout(self.options)

    fake_agent = FakeAgent()
    flexmock(factory.InfrastructureAgentFactory). \
      should_receive('create_agent'). \
      with_args('public cloud'). \
      and_return(fake_agent)

    LocalState.should_receive('get_login_host').and_return('0.0.0.1')

    LocalState.should_receive('get_local_nodes_info')\
      .and_return(self.reattach_node_info)

    self.assertRaises(BadConfigurationException)
Ejemplo n.º 18
0
  def test_simple_layout_yaml_only(self):
    # Specifying one controller and one server should be ok
    input_yaml_1 = {
      'controller' : self.ip_1,
      'servers' : [self.ip_2]
    }
    options_1 = self.default_options.copy()
    options_1['ips'] = input_yaml_1
    layout_1 = NodeLayout(options_1)
    self.assertEquals(True, layout_1.is_valid())

    # Specifying one controller should be ok
    input_yaml_2 = {'controller' : self.ip_1}
    options_2 = self.default_options.copy()
    options_2['ips'] = input_yaml_2
    layout_2 = NodeLayout(options_2)
    self.assertEquals(True, layout_2.is_valid())

    # Specifying the same IP more than once is not ok
    input_yaml_3 = {'controller' : self.ip_1, 'servers' : [self.ip_1]}
    options_3 = self.default_options.copy()
    options_3['ips'] = input_yaml_3
    layout_3 = NodeLayout(options_3)
    self.assertEquals(False, layout_3.is_valid())
    self.assertEquals(NodeLayout.DUPLICATE_IPS, layout_3.errors())

    # Failing to specify a controller is not ok
    input_yaml_4 = {'servers' : [self.ip_1, self.ip_2]}
    options_4 = self.default_options.copy()
    options_4['ips'] = input_yaml_4
    layout_4 = NodeLayout(options_4)
    self.assertEquals(False, layout_4.is_valid())
    self.assertEquals(NodeLayout.NO_CONTROLLER, layout_4.errors())

    # Specifying more than one controller is not ok
    input_yaml_5 = {'controller' : [self.ip_1, self.ip_2], 'servers' :
      [self.ip_3]}
    options_5 = self.default_options.copy()
    options_5['ips'] = input_yaml_5
    layout_5 = NodeLayout(options_5)
    self.assertEquals(False, layout_5.is_valid())
    self.assertEquals(NodeLayout.ONLY_ONE_CONTROLLER, layout_5.errors())

    # Specifying something other than controller and servers in simple
    # deployments is not ok
    input_yaml_6 = {'controller' : self.ip_1, 'servers' : [self.ip_2],
      'boo' : self.ip_3}
    options_6 = self.default_options.copy()
    options_6['ips'] = input_yaml_6
    layout_6 = NodeLayout(options_6)
    self.assertEquals(False, layout_6.is_valid())
    self.assertEquals(["The flag boo is not a supported flag"],
      layout_6.errors())
Ejemplo n.º 19
0
    def setUp(self):
        # mock out all logging, since it clutters our output
        flexmock(AppScaleLogger)
        AppScaleLogger.should_receive('log').and_return()

        # mock out all sleeps, as they aren't necessary for unit testing
        flexmock(time)
        time.should_receive('sleep').and_return()

        # set up some fake options so that we don't have to generate them via
        # ParseArgs
        self.options = flexmock(infrastructure='ec2',
                                group='boogroup',
                                machine='ami-ABCDEFG',
                                instance_type='m1.large',
                                keyname='bookey',
                                table='cassandra',
                                verbose=False,
                                test=False,
                                use_spot_instances=False,
                                zone='my-zone-1b',
                                static_ip=None,
                                replication=None,
                                appengine=None,
                                autoscale=None,
                                user_commands=[],
                                flower_password='',
                                max_memory='400',
                                ips=FOUR_NODE_CLOUD)
        self.my_id = "12345"
        self.node_layout = NodeLayout(self.options)

        # set up phony AWS credentials for each test
        # ones that test not having them present can
        # remove them
        for credential in EucalyptusAgent.REQUIRED_EC2_CREDENTIALS:
            os.environ[credential] = "baz"
        os.environ['EC2_URL'] = "http://boo"

        # mock out calls to EC2
        # begin by assuming that our ssh keypair doesn't exist, and thus that we
        # need to create it
        key_contents = "key contents here"
        fake_key = flexmock(name="fake_key", material=key_contents)
        fake_key.should_receive('save').with_args(
            os.environ['HOME'] + '/.appscale').and_return(None)

        fake_ec2 = flexmock(name="fake_ec2")
        fake_ec2.should_receive('get_key_pair').with_args('bookey') \
          .and_return(None)
        fake_ec2.should_receive('create_key_pair').with_args('bookey') \
          .and_return(fake_key)

        # mock out writing the secret key
        builtins = flexmock(sys.modules['__builtin__'])
        builtins.should_call('open')  # set the fall-through

        secret_key_location = LocalState.LOCAL_APPSCALE_PATH + "bookey.secret"
        fake_secret = flexmock(name="fake_secret")
        fake_secret.should_receive('write').and_return()
        builtins.should_receive('open').with_args(secret_key_location, 'w') \
          .and_return(fake_secret)

        # also, mock out the keypair writing and chmod'ing
        ssh_key_location = LocalState.LOCAL_APPSCALE_PATH + "bookey.key"
        fake_file = flexmock(name="fake_file")
        fake_file.should_receive('write').with_args(key_contents).and_return()

        builtins.should_receive('open').with_args(ssh_key_location, 'w') \
          .and_return(fake_file)

        flexmock(os)
        os.should_receive('chmod').with_args(ssh_key_location,
                                             0600).and_return()

        # next, assume there are no security groups up at first, but then it gets
        # created.
        udp_rule = flexmock(from_port=1, to_port=65535, ip_protocol='udp')
        tcp_rule = flexmock(from_port=1, to_port=65535, ip_protocol='tcp')
        icmp_rule = flexmock(from_port=-1, to_port=-1, ip_protocol='icmp')
        group = flexmock(name='boogroup',
                         rules=[tcp_rule, udp_rule, icmp_rule])
        fake_ec2.should_receive(
            'get_all_security_groups').with_args().and_return([])
        fake_ec2.should_receive('get_all_security_groups').with_args(
            'boogroup').and_return([group])

        # and then assume we can create and open our security group fine
        fake_ec2.should_receive('create_security_group').with_args(
            'boogroup', 'AppScale security group').and_return()
        fake_ec2.should_receive('authorize_security_group').and_return()

        # next, add in mocks for run_instances
        # the first time around, let's say that no machines are running
        # the second time around, let's say that our machine is pending
        # and that it's up the third time around
        fake_pending_instance = flexmock(state='pending')
        fake_pending_reservation = flexmock(instances=fake_pending_instance)

        fake_running_instance = flexmock(state='running',
                                         key_name='bookey',
                                         id='i-12345678',
                                         ip_address=IP_1,
                                         private_ip_address=IP_1)
        fake_running_reservation = flexmock(instances=fake_running_instance)

        fake_ec2.should_receive('get_all_instances').and_return([]) \
          .and_return([]) \
          .and_return([fake_pending_reservation]) \
          .and_return([fake_running_reservation])

        # next, assume that our run_instances command succeeds
        fake_ec2.should_receive('run_instances').and_return()

        # finally, inject our mocked EC2
        flexmock(boto.ec2)
        boto.ec2.should_receive('connect_to_region').and_return(fake_ec2)

        # assume that ssh comes up on the third attempt
        fake_socket = flexmock(name='fake_socket')
        fake_socket.should_receive('connect').with_args(('public1',
          RemoteHelper.SSH_PORT)).and_raise(Exception).and_raise(Exception) \
          .and_return(None)
        flexmock(socket)
        socket.should_receive('socket').and_return(fake_socket)

        # throw some default mocks together for when invoking via shell succeeds
        # and when it fails
        self.fake_temp_file = flexmock(name='fake_temp_file')
        self.fake_temp_file.should_receive('seek').with_args(0).and_return()
        self.fake_temp_file.should_receive('read').and_return('boo out')
        self.fake_temp_file.should_receive('close').and_return()

        flexmock(tempfile)
        tempfile.should_receive('NamedTemporaryFile')\
          .and_return(self.fake_temp_file)

        self.success = flexmock(name='success', returncode=0)
        self.success.should_receive('wait').and_return(0)

        self.failed = flexmock(name='success', returncode=1)
        self.failed.should_receive('wait').and_return(1)

        # assume that root login isn't already enabled
        local_state = flexmock(LocalState)
        local_state.should_receive('shell') \
          .with_args(re.compile('^ssh .*root'), False, 1, stdin='ls') \
          .and_return(
          'Please login as the user "ubuntu" rather than the user "root"')

        # and assume that we can ssh in as ubuntu to enable root login
        local_state = flexmock(LocalState)
        local_state.should_receive('shell')\
          .with_args(re.compile('^ssh .*ubuntu'),False,5)\
          .and_return()

        # also assume that we can scp over our ssh keys
        local_state.should_receive('shell')\
          .with_args(re.compile('scp .*/root/.ssh/id_'),False,5)\
          .and_return()

        local_state.should_receive('shell')\
          .with_args(re.compile('scp .*/root/.appscale/bookey.key'),False,5)\
          .and_return()
Ejemplo n.º 20
0
    def test_start_head_node(self):
        self.options = flexmock(infrastructure='public cloud',
                                group='group',
                                machine='vm image',
                                instance_type='instance type',
                                keyname='keyname',
                                table='cassandra',
                                verbose=False,
                                test=False,
                                use_spot_instances=False,
                                zone='zone',
                                static_ip=None,
                                replication=None,
                                appengine=None,
                                autoscale=None,
                                user_commands=[],
                                flower_password='',
                                max_memory='X',
                                ips=ONE_NODE_CLOUD)

        self.node_layout = NodeLayout(self.options)

        flexmock(LocalState).\
          should_receive("generate_secret_key").\
          with_args(self.options.keyname).\
          and_return('some secret key')

        flexmock(LocalState).\
          should_receive("get_key_path_from_name").\
          with_args(self.options.keyname).\
          and_return('some key path')

        flexmock(NodeLayout).should_receive('head_node').\
          and_return(Node('some IP', 'cloud'))

        fake_agent = FakeAgent()
        flexmock(factory.InfrastructureAgentFactory).\
          should_receive('create_agent').\
          with_args('public cloud').\
          and_return(fake_agent)

        self.additional_params = {}
        deployment_params = {}

        flexmock(LocalState).\
          should_receive('generate_deployment_params').\
          with_args(self.options, self.node_layout, self.additional_params).\
          and_return(deployment_params)

        flexmock(AppScaleLogger).should_receive('log').and_return()
        flexmock(AppScaleLogger).should_receive('remote_log_tools_state').\
          and_return()

        flexmock(time).should_receive('sleep').and_return()

        flexmock(RemoteHelper).\
          should_receive('copy_deployment_credentials').\
          with_args('some IP', self.options).\
          and_return()

        flexmock(RemoteHelper).\
          should_receive('run_user_commands').\
          with_args('some IP', self.options.user_commands,
                    self.options.keyname, self.options.verbose).\
          and_return()

        flexmock(RemoteHelper).\
          should_receive('start_remote_appcontroller').\
          with_args('some IP', self.options.keyname, self.options.verbose).\
          and_return()

        layout = {}
        flexmock(NodeLayout).should_receive('to_list').and_return(layout)

        flexmock(AppControllerClient).\
          should_receive('set_parameters').\
          with_args(layout, deployment_params).\
          and_return()

        RemoteHelper.start_head_node(self.options, 'an ID', self.node_layout)
Ejemplo n.º 21
0
 def test_advanced_format_yaml_only(self):
     input_yaml = OPEN_NODE_CLOUD
     options = self.default_options.copy()
     options['ips'] = input_yaml
     layout_1 = NodeLayout(options)
     self.assertNotEqual([], layout_1.nodes)
Ejemplo n.º 22
0
    def test_from_locations_json_list_after_clean(self):
        options = flexmock(infrastructure='euca',
                           group='group',
                           machine='vm image',
                           instance_type='instance type',
                           keyname='keyname',
                           table='cassandra',
                           verbose=False,
                           test=False,
                           use_spot_instances=False,
                           zone='zone',
                           static_ip=None,
                           replication=None,
                           appengine=None,
                           autoscale=None,
                           user_commands=[],
                           flower_password='',
                           max_memory='X',
                           ips=FOUR_NODE_CLOUD)
        cleaned_node_info = [{
            "public_ip":
            "0.0.0.0",
            "private_ip":
            "0.0.0.0",
            "instance_id":
            "i-APPSCALE1",
            "roles":
            ['load_balancer', 'taskqueue', 'shadow', 'taskqueue_master'],
            "instance_type":
            "instance_type_1"
        }, {
            "public_ip": "0.0.0.0",
            "private_ip": "0.0.0.0",
            "instance_id": "i-APPSCALE2",
            "roles": ['open'],
            "instance_type": "instance_type_1"
        }, {
            "public_ip": "0.0.0.0",
            "private_ip": "0.0.0.0",
            "instance_id": "i-APPSCALE3",
            "roles": ['open'],
            "instance_type": "instance_type_1"
        }, {
            "public_ip": "0.0.0.0",
            "private_ip": "0.0.0.0",
            "instance_id": "i-APPSCALE4",
            "roles": ['open'],
            "instance_type": "instance_type_1"
        }]
        node_layout = NodeLayout(options)
        self.assertNotEqual([], node_layout.nodes)
        old_nodes = node_layout.nodes[:]
        new_layout = node_layout.from_locations_json_list(cleaned_node_info)
        for node in new_layout:
            # Match nodes based on jobs/roles.
            for index, old_node in enumerate(old_nodes):
                if set(old_node.roles) == set(node.roles):
                    old_nodes.pop(index)
                    break

        self.assertEqual(old_nodes, [])
Ejemplo n.º 23
0
    def test_simple_layout_yaml_only(self):
        # Specifying one controller and one server should be ok
        input_yaml_1 = {'controller': self.ip_1, 'servers': [self.ip_2]}
        options_1 = self.default_options.copy()
        options_1['ips'] = input_yaml_1
        layout_1 = NodeLayout(options_1)
        self.assertEquals(True, layout_1.is_valid())

        # Specifying one controller should be ok
        input_yaml_2 = {'controller': self.ip_1}
        options_2 = self.default_options.copy()
        options_2['ips'] = input_yaml_2
        layout_2 = NodeLayout(options_2)
        self.assertEquals(True, layout_2.is_valid())

        # Specifying the same IP more than once is not ok
        input_yaml_3 = {'controller': self.ip_1, 'servers': [self.ip_1]}
        options_3 = self.default_options.copy()
        options_3['ips'] = input_yaml_3
        layout_3 = NodeLayout(options_3)
        self.assertEquals(False, layout_3.is_valid())
        self.assertEquals(NodeLayout.DUPLICATE_IPS, layout_3.errors())

        # Failing to specify a controller is not ok
        input_yaml_4 = {'servers': [self.ip_1, self.ip_2]}
        options_4 = self.default_options.copy()
        options_4['ips'] = input_yaml_4
        layout_4 = NodeLayout(options_4)
        self.assertEquals(False, layout_4.is_valid())
        self.assertEquals(NodeLayout.NO_CONTROLLER, layout_4.errors())

        # Specifying more than one controller is not ok
        input_yaml_5 = {
            'controller': [self.ip_1, self.ip_2],
            'servers': [self.ip_3]
        }
        options_5 = self.default_options.copy()
        options_5['ips'] = input_yaml_5
        layout_5 = NodeLayout(options_5)
        self.assertEquals(False, layout_5.is_valid())
        self.assertEquals(NodeLayout.ONLY_ONE_CONTROLLER, layout_5.errors())

        # Specifying something other than controller and servers in simple
        # deployments is not ok
        input_yaml_6 = {
            'controller': self.ip_1,
            'servers': [self.ip_2],
            'boo': self.ip_3
        }
        options_6 = self.default_options.copy()
        options_6['ips'] = input_yaml_6
        layout_6 = NodeLayout(options_6)
        self.assertEquals(False, layout_6.is_valid())
        self.assertEquals(["The flag boo is not a supported flag"],
                          layout_6.errors())
Ejemplo n.º 24
0
    def test_simple_layout_options(self):
        # Using Euca with no input yaml, and no max or min images is not ok
        options_1 = self.default_options.copy()
        options_1['infrastructure'] = 'euca'
        layout_1 = NodeLayout(options_1)
        self.assertEquals(False, layout_1.is_valid())
        self.assertEquals(NodeLayout.NO_YAML_REQUIRES_MIN, layout_1.errors())

        options_2 = self.default_options.copy()
        options_2['infrastructure'] = "euca"
        options_2['max'] = 2
        layout_2 = NodeLayout(options_2)
        self.assertEquals(False, layout_2.is_valid())
        self.assertEquals(NodeLayout.NO_YAML_REQUIRES_MIN, layout_2.errors())

        options_3 = self.default_options.copy()
        options_3['infrastructure'] = "euca"
        options_3['min'] = 2
        layout_3 = NodeLayout(options_3)
        self.assertEquals(False, layout_3.is_valid())
        self.assertEquals(NodeLayout.NO_YAML_REQUIRES_MAX, layout_3.errors())

        # Using Euca with no input yaml, with max and min images set is ok
        options_4 = self.default_options.copy()
        options_4['infrastructure'] = "euca"
        options_4['min'] = 2
        options_4['max'] = 2
        layout_4 = NodeLayout(options_4)
        self.assertEquals(True, layout_4.is_valid())

        # Using virtualized deployments with no input yaml is not ok
        options_5 = self.default_options.copy()
        layout_5 = NodeLayout(options_5)
        self.assertEquals(False, layout_5.is_valid())
        self.assertEquals([NodeLayout.INPUT_YAML_REQUIRED], layout_5.errors())
Ejemplo n.º 25
0
  def valid_ssh_key(self, config, run_instances_opts):
    """ Checks if the tools can log into the head node with the current key.

    Args:
      config: A dictionary that includes the IPs layout (which itself is a dict
        mapping role names to IPs) and, optionally, the keyname to use.
      run_instances_opts: The arguments parsed from the appscale-run-instances
        command.

    Returns:
      A bool indicating whether or not the specified keyname can be used to log
      into the head node.

    Raises:
      BadConfigurationException: If the IPs layout was not a dictionary.
    """
    keyname = config['keyname']
    verbose = config.get('verbose', False)

    if not isinstance(config['ips_layout'], dict) and \
        not isinstance(config['ips_layout'], list):
      raise BadConfigurationException(
        'ips_layout should be a dictionary or list. Please fix it and try '
        'again.')

    ssh_key_location = self.APPSCALE_DIRECTORY + keyname + ".key"
    if not os.path.exists(ssh_key_location):
      return False

    try:
      all_ips = LocalState.get_all_public_ips(keyname)
    except BadConfigurationException:
      # If this is an upgrade from 3.1.0, there may not be a locations JSON.
      all_ips = self.get_ips_from_options(run_instances_opts.ips)

    # If a login node is defined, use that to communicate with other nodes.
    node_layout = NodeLayout(run_instances_opts)
    head_node = node_layout.head_node()
    if head_node is not None:
      remote_key = '{}/ssh.key'.format(RemoteHelper.CONFIG_DIR)
      try:
        RemoteHelper.scp(
          head_node.public_ip, keyname, ssh_key_location, remote_key, verbose)
      except ShellException:
        return False

      for ip in all_ips:
        ssh_to_ip = 'ssh -i {key} -o StrictHostkeyChecking=no root@{ip} true'\
          .format(key=remote_key, ip=ip)
        try:
          RemoteHelper.ssh(
            head_node.public_ip, keyname, ssh_to_ip, verbose, user='******')
        except ShellException:
          return False
      return True

    for ip in all_ips:
      if not self.can_ssh_to_ip(ip, keyname, verbose):
        return False

    return True
Ejemplo n.º 26
0
    def valid_ssh_key(self, config, run_instances_opts):
        """ Checks if the tools can log into the head node with the current key.

    Args:
      config: A dictionary that includes the IPs layout (which itself is a dict
        mapping role names to IPs) and, optionally, the keyname to use.
      run_instances_opts: The arguments parsed from the appscale-run-instances
        command.

    Returns:
      A bool indicating whether or not the specified keyname can be used to log
      into the head node.

    Raises:
      BadConfigurationException: If the IPs layout was not a dictionary.
    """
        keyname = config['keyname']
        verbose = config.get('verbose', False)

        if not isinstance(config['ips_layout'], dict) and \
            not isinstance(config['ips_layout'], list):
            raise BadConfigurationException(
                'ips_layout should be a dictionary or list. Please fix it and try '
                'again.')

        ssh_key_location = self.APPSCALE_DIRECTORY + keyname + ".key"
        if not os.path.exists(ssh_key_location):
            return False

        try:
            all_ips = LocalState.get_all_public_ips(keyname)
        except BadConfigurationException:
            # If this is an upgrade from 3.1.0, there may not be a locations JSON.
            all_ips = self.get_ips_from_options(run_instances_opts.ips)

        # If a login node is defined, use that to communicate with other nodes.
        node_layout = NodeLayout(run_instances_opts)
        head_node = node_layout.head_node()
        if head_node is not None:
            remote_key = '{}/ssh.key'.format(RemoteHelper.CONFIG_DIR)
            try:
                RemoteHelper.scp(head_node.public_ip, keyname,
                                 ssh_key_location, remote_key, verbose)
            except ShellException:
                return False

            for ip in all_ips:
                ssh_to_ip = 'ssh -i {key} -o StrictHostkeyChecking=no root@{ip} true'\
                  .format(key=remote_key, ip=ip)
                try:
                    RemoteHelper.ssh(head_node.public_ip,
                                     keyname,
                                     ssh_to_ip,
                                     verbose,
                                     user='******')
                except ShellException:
                    return False
            return True

        for ip in all_ips:
            if not self.can_ssh_to_ip(ip, keyname, verbose):
                return False

        return True
Ejemplo n.º 27
0
  def test_simple_layout_options(self):
    # Using Euca with no input yaml, and no max or min images is not ok
    options_1 = self.default_options.copy()
    options_1['infrastructure'] = 'euca'
    layout_1 = NodeLayout(options_1)
    self.assertEquals(False, layout_1.is_valid())
    self.assertEquals(NodeLayout.NO_YAML_REQUIRES_MIN, layout_1.errors())

    options_2 = self.default_options.copy()
    options_2['infrastructure'] = "euca"
    options_2['max'] = 2
    layout_2 = NodeLayout(options_2)
    self.assertEquals(False, layout_2.is_valid())
    self.assertEquals(NodeLayout.NO_YAML_REQUIRES_MIN, layout_2.errors())

    options_3 = self.default_options.copy()
    options_3['infrastructure'] = "euca"
    options_3['min'] = 2
    layout_3 = NodeLayout(options_3)
    self.assertEquals(False, layout_3.is_valid())
    self.assertEquals(NodeLayout.NO_YAML_REQUIRES_MAX, layout_3.errors())

    # Using Euca with no input yaml, with max and min images set is ok
    options_4 = self.default_options.copy()
    options_4['infrastructure'] = "euca"
    options_4['min'] = 2
    options_4['max'] = 2
    layout_4 = NodeLayout(options_4)
    self.assertEquals(True, layout_4.is_valid())

    # Using virtualized deployments with no input yaml is not ok
    options_5 = self.default_options.copy()
    layout_5 = NodeLayout(options_5)
    self.assertEquals(False, layout_5.is_valid())
    self.assertEquals([NodeLayout.INPUT_YAML_REQUIRED], layout_5.errors())
    def test_generate_deployment_params(self):
        # this method is fairly light, so just make sure that it constructs the dict
        # to send to the AppController correctly
        options = flexmock(name='options',
                           table='cassandra',
                           keyname='boo',
                           default_min_appservers='1',
                           autoscale=False,
                           group='bazgroup',
                           replication=None,
                           infrastructure='ec2',
                           machine='ami-ABCDEFG',
                           instance_type='m1.large',
                           use_spot_instances=True,
                           max_spot_price=1.23,
                           clear_datastore=False,
                           disks={'node-1': 'vol-ABCDEFG'},
                           zone='my-zone-1b',
                           verbose=True,
                           user_commands=[],
                           flower_password="******",
                           default_max_appserver_memory=ParseArgs.
                           DEFAULT_MAX_APPSERVER_MEMORY,
                           EC2_ACCESS_KEY='baz',
                           EC2_SECRET_KEY='baz',
                           EC2_URL='')
        node_layout = NodeLayout({
            'table': 'cassandra',
            'infrastructure': "ec2",
            'min_machines': 1,
            'max_machines': 1,
            'instance_type': 'm1.large'
        })

        flexmock(NodeLayout).should_receive("head_node").and_return(
            Node('public1', 'some cloud', ['some role']))

        expected = {
            'table':
            'cassandra',
            'login':
            '******',
            'clear_datastore':
            'False',
            'keyname':
            'boo',
            'default_min_appservers':
            '1',
            'autoscale':
            'False',
            'replication':
            'None',
            'group':
            'bazgroup',
            'machine':
            'ami-ABCDEFG',
            'infrastructure':
            'ec2',
            'instance_type':
            'm1.large',
            'min_machines':
            '1',
            'max_machines':
            '1',
            'use_spot_instances':
            'True',
            'user_commands':
            json.dumps([]),
            'max_spot_price':
            '1.23',
            'zone':
            'my-zone-1b',
            'verbose':
            'True',
            'flower_password':
            '******',
            'default_max_appserver_memory':
            str(ParseArgs.DEFAULT_MAX_APPSERVER_MEMORY),
            'EC2_ACCESS_KEY':
            'baz',
            'EC2_SECRET_KEY':
            'baz',
            'EC2_URL':
            ''
        }
        actual = LocalState.generate_deployment_params(
            options, node_layout, {'max_spot_price': '1.23'})
        self.assertEquals(expected, actual)
Ejemplo n.º 29
0
  def run_instances(cls, options):
    """Starts a new AppScale deployment with the parameters given.

    Args:
      options: A Namespace that has fields for each parameter that can be
        passed in via the command-line interface.
    Raises:
      AppControllerException: If the AppController on the head node crashes.
        When this occurs, the message in the exception contains the reason why
        the AppController crashed.
      BadConfigurationException: If the user passes in options that are not
        sufficient to start an AppScale deployment (e.g., running on EC2 but
        not specifying the AMI to use), or if the user provides us
        contradictory options (e.g., running on EC2 but not specifying EC2
        credentials).
    """
    LocalState.make_appscale_directory()
    LocalState.ensure_appscale_isnt_running(options.keyname, options.force)
    node_layout = NodeLayout(options)

    if options.infrastructure:
      if (not options.test and not options.force and
          not (options.disks or node_layout.are_disks_used())):
        LocalState.ensure_user_wants_to_run_without_disks()

    reduced_version = '.'.join(x for x in APPSCALE_VERSION.split('.')[:2])
    AppScaleLogger.log("Starting AppScale " + reduced_version)

    my_id = str(uuid.uuid4())
    AppScaleLogger.remote_log_tools_state(options, my_id, "started",
      APPSCALE_VERSION)

    head_node = node_layout.head_node()
    # Start VMs in cloud via cloud agent.
    if options.infrastructure:
      node_layout = RemoteHelper.start_all_nodes(options, node_layout)

      # Enables root logins and SSH access on the head node.
      RemoteHelper.enable_root_ssh(options, head_node.public_ip)
    AppScaleLogger.verbose("Node Layout: {}".format(node_layout.to_list()))

    # Ensure all nodes are compatible.
    RemoteHelper.ensure_machine_is_compatible(
      head_node.public_ip, options.keyname)

    # Use rsync to move custom code into the deployment.
    if options.rsync_source:
      AppScaleLogger.log("Copying over local copy of AppScale from {0}".
        format(options.rsync_source))
      RemoteHelper.rsync_files(head_node.public_ip, options.keyname,
                               options.rsync_source)

    # Start services on head node.
    RemoteHelper.start_head_node(options, my_id, node_layout)

    # Write deployment metadata to disk (facilitates SSH operations, etc.)
    db_master = node_layout.db_master().private_ip
    head_node = node_layout.head_node().public_ip
    LocalState.update_local_metadata(options, db_master, head_node)

    # Copy the locations.json to the head node
    RemoteHelper.copy_local_metadata(node_layout.head_node().public_ip,
                                     options.keyname)

    # Wait for services on head node to start.
    secret_key = LocalState.get_secret_key(options.keyname)
    acc = AppControllerClient(head_node, secret_key)
    try:
      while not acc.is_initialized():
        AppScaleLogger.log('Waiting for head node to initialize...')
        # This can take some time in particular the first time around, since
        # we will have to initialize the database.
        time.sleep(cls.SLEEP_TIME*3)
    except socket.error as socket_error:
      AppScaleLogger.warn('Unable to initialize AppController: {}'.
                          format(socket_error.message))
      message = RemoteHelper.collect_appcontroller_crashlog(
        head_node, options.keyname)
      raise AppControllerException(message)

    # Set up admin account.
    try:
      # We don't need to have any exception information here: we do expect
      # some anyway while the UserAppServer is coming up.
      acc.does_user_exist("non-existent-user", True)
    except Exception:
      AppScaleLogger.log('UserAppServer not ready yet. Retrying ...')
      time.sleep(cls.SLEEP_TIME)

    if options.admin_user and options.admin_pass:
      AppScaleLogger.log("Using the provided admin username/password")
      username, password = options.admin_user, options.admin_pass
    elif options.test:
      AppScaleLogger.log("Using default admin username/password")
      username, password = LocalState.DEFAULT_USER, LocalState.DEFAULT_PASSWORD
    else:
      username, password = LocalState.get_credentials()

    RemoteHelper.create_user_accounts(username, password, head_node,
                                      options.keyname)
    acc.set_admin_role(username, 'true', cls.ADMIN_CAPABILITIES)

    # Wait for machines to finish loading and AppScale Dashboard to be deployed.
    RemoteHelper.wait_for_machines_to_finish_loading(head_node, options.keyname)

    try:
      login_host = acc.get_property('login')['login']
    except KeyError:
      raise AppControllerException('login property not found')

    RemoteHelper.sleep_until_port_is_open(
      login_host, RemoteHelper.APP_DASHBOARD_PORT)

    AppScaleLogger.success("AppScale successfully started!")
    AppScaleLogger.success(
      'View status information about your AppScale deployment at '
      'http://{}:{}'.format(login_host, RemoteHelper.APP_DASHBOARD_PORT))
    AppScaleLogger.remote_log_tools_state(options, my_id,
      "finished", APPSCALE_VERSION)
Ejemplo n.º 30
0
class TestRemoteHelper(unittest.TestCase):
    def setUp(self):
        # mock out all logging, since it clutters our output
        flexmock(AppScaleLogger)
        AppScaleLogger.should_receive('log').and_return()

        # mock out all sleeps, as they aren't necessary for unit testing
        flexmock(time)
        time.should_receive('sleep').and_return()

        # set up some fake options so that we don't have to generate them via
        # ParseArgs
        self.options = flexmock(infrastructure='ec2',
                                group='boogroup',
                                machine='ami-ABCDEFG',
                                instance_type='m1.large',
                                keyname='bookey',
                                table='cassandra',
                                verbose=False,
                                test=False,
                                use_spot_instances=False,
                                zone='my-zone-1b',
                                static_ip=None,
                                replication=None,
                                appengine=None,
                                autoscale=None,
                                user_commands=[],
                                flower_password='',
                                max_memory='400',
                                ips={
                                    'zookeeper': 'node-2',
                                    'master': 'node-1',
                                    'appengine': 'node-3',
                                    'database': 'node-4'
                                })
        self.my_id = "12345"
        self.node_layout = NodeLayout(self.options)

        # set up phony AWS credentials for each test
        # ones that test not having them present can
        # remove them
        for credential in EucalyptusAgent.REQUIRED_EC2_CREDENTIALS:
            os.environ[credential] = "baz"
        os.environ['EC2_URL'] = "http://boo"

        # mock out calls to EC2
        # begin by assuming that our ssh keypair doesn't exist, and thus that we
        # need to create it
        key_contents = "key contents here"
        fake_key = flexmock(name="fake_key", material=key_contents)
        fake_key.should_receive('save').with_args(
            os.environ['HOME'] + '/.appscale').and_return(None)

        fake_ec2 = flexmock(name="fake_ec2")
        fake_ec2.should_receive('get_key_pair').with_args('bookey') \
          .and_return(None)
        fake_ec2.should_receive('create_key_pair').with_args('bookey') \
          .and_return(fake_key)

        # mock out writing the secret key
        builtins = flexmock(sys.modules['__builtin__'])
        builtins.should_call('open')  # set the fall-through

        secret_key_location = LocalState.LOCAL_APPSCALE_PATH + "bookey.secret"
        fake_secret = flexmock(name="fake_secret")
        fake_secret.should_receive('write').and_return()
        builtins.should_receive('open').with_args(secret_key_location, 'w') \
          .and_return(fake_secret)

        # also, mock out the keypair writing and chmod'ing
        ssh_key_location = LocalState.LOCAL_APPSCALE_PATH + "bookey.key"
        fake_file = flexmock(name="fake_file")
        fake_file.should_receive('write').with_args(key_contents).and_return()

        builtins.should_receive('open').with_args(ssh_key_location, 'w') \
          .and_return(fake_file)

        flexmock(os)
        os.should_receive('chmod').with_args(ssh_key_location,
                                             0600).and_return()

        # next, assume there are no security groups up at first, but then it gets
        # created.
        udp_rule = flexmock(from_port=1, to_port=65535, ip_protocol='udp')
        tcp_rule = flexmock(from_port=1, to_port=65535, ip_protocol='tcp')
        icmp_rule = flexmock(from_port=-1, to_port=-1, ip_protocol='icmp')
        group = flexmock(name='boogroup',
                         rules=[tcp_rule, udp_rule, icmp_rule])
        fake_ec2.should_receive(
            'get_all_security_groups').with_args().and_return([])
        fake_ec2.should_receive('get_all_security_groups').with_args(
            'boogroup').and_return([group])

        # and then assume we can create and open our security group fine
        fake_ec2.should_receive('create_security_group').with_args(
            'boogroup', 'AppScale security group').and_return()
        fake_ec2.should_receive('authorize_security_group').and_return()

        # next, add in mocks for run_instances
        # the first time around, let's say that no machines are running
        # the second time around, let's say that our machine is pending
        # and that it's up the third time around
        fake_pending_instance = flexmock(state='pending')
        fake_pending_reservation = flexmock(instances=fake_pending_instance)

        fake_running_instance = flexmock(state='running',
                                         key_name='bookey',
                                         id='i-12345678',
                                         ip_address='1.2.3.4',
                                         private_ip_address='1.2.3.4')
        fake_running_reservation = flexmock(instances=fake_running_instance)

        fake_ec2.should_receive('get_all_instances').and_return([]) \
          .and_return([]) \
          .and_return([fake_pending_reservation]) \
          .and_return([fake_running_reservation])

        # next, assume that our run_instances command succeeds
        fake_ec2.should_receive('run_instances').and_return()

        # finally, inject our mocked EC2
        flexmock(boto.ec2)
        boto.ec2.should_receive('connect_to_region').and_return(fake_ec2)

        # assume that ssh comes up on the third attempt
        fake_socket = flexmock(name='fake_socket')
        fake_socket.should_receive('connect').with_args(('public1',
          RemoteHelper.SSH_PORT)).and_raise(Exception).and_raise(Exception) \
          .and_return(None)
        flexmock(socket)
        socket.should_receive('socket').and_return(fake_socket)

        # throw some default mocks together for when invoking via shell succeeds
        # and when it fails
        self.fake_temp_file = flexmock(name='fake_temp_file')
        self.fake_temp_file.should_receive('seek').with_args(0).and_return()
        self.fake_temp_file.should_receive('read').and_return('boo out')
        self.fake_temp_file.should_receive('close').and_return()

        flexmock(tempfile)
        tempfile.should_receive('NamedTemporaryFile')\
          .and_return(self.fake_temp_file)

        self.success = flexmock(name='success', returncode=0)
        self.success.should_receive('wait').and_return(0)

        self.failed = flexmock(name='success', returncode=1)
        self.failed.should_receive('wait').and_return(1)

        # assume that root login isn't already enabled
        local_state = flexmock(LocalState)
        local_state.should_receive('shell') \
          .with_args(re.compile('^ssh .*root'), False, 1, stdin='ls') \
          .and_return(RemoteHelper.LOGIN_AS_UBUNTU_USER)

        # and assume that we can ssh in as ubuntu to enable root login
        local_state = flexmock(LocalState)
        local_state.should_receive('shell')\
          .with_args(re.compile('^ssh .*ubuntu'),False,5)\
          .and_return()

        # also assume that we can scp over our ssh keys
        local_state.should_receive('shell')\
          .with_args(re.compile('scp .*/root/.ssh/id_'),False,5)\
          .and_return()

        local_state.should_receive('shell')\
          .with_args(re.compile('scp .*/root/.appscale/bookey.key'),False,5)\
          .and_return()

    def test_start_head_node(self):
        self.options = flexmock(
            infrastructure='public cloud',
            group='group',
            machine='vm image',
            instance_type='instance type',
            keyname='keyname',
            table='cassandra',
            verbose=False,
            test=False,
            use_spot_instances=False,
            zone='zone',
            static_ip=None,
            replication=None,
            appengine=None,
            autoscale=None,
            user_commands=[],
            flower_password='',
            max_memory='X',
        )

        self.node_layout = NodeLayout(self.options)

        flexmock(LocalState).\
          should_receive("generate_secret_key").\
          with_args(self.options.keyname).\
          and_return('some secret key')

        flexmock(LocalState).\
          should_receive("get_key_path_from_name").\
          with_args(self.options.keyname).\
          and_return('some key path')

        flexmock(NodeLayout).should_receive('head_node').\
          and_return(SimpleNode('some IP', 'cloud'))

        fake_agent = FakeAgent()
        flexmock(factory.InfrastructureAgentFactory).\
          should_receive('create_agent').\
          with_args('public cloud').\
          and_return(fake_agent)

        self.additional_params = {}
        deployment_params = {}

        flexmock(LocalState).\
          should_receive('generate_deployment_params').\
          with_args(self.options, self.node_layout, self.additional_params).\
          and_return(deployment_params)

        flexmock(AppScaleLogger).should_receive('log').and_return()
        flexmock(AppScaleLogger).should_receive('remote_log_tools_state').\
          and_return()

        flexmock(time).should_receive('sleep').and_return()

        flexmock(RemoteHelper).\
          should_receive('copy_deployment_credentials').\
          with_args('some IP', self.options).\
          and_return()

        flexmock(RemoteHelper).\
          should_receive('run_user_commands').\
          with_args('some IP', self.options.user_commands,
                    self.options.keyname, self.options.verbose).\
          and_return()

        flexmock(RemoteHelper).\
          should_receive('start_remote_appcontroller').\
          with_args('some IP', self.options.keyname, self.options.verbose).\
          and_return()

        layout = {}
        flexmock(NodeLayout).should_receive('to_list').and_return(layout)

        flexmock(AppControllerClient).\
          should_receive('set_parameters').\
          with_args(layout, deployment_params).\
          and_return()

        RemoteHelper.start_head_node(self.options, 'an ID', self.node_layout)

    def test_rsync_files_from_dir_that_doesnt_exist(self):
        # if the user specifies that we should copy from a directory that doesn't
        # exist, we should throw up and die
        flexmock(os.path)
        os.path.should_receive('exists').with_args('/tmp/booscale-local').\
          and_return(False)
        self.assertRaises(BadConfigurationException, RemoteHelper.rsync_files,
                          'public1', 'booscale', '/tmp/booscale-local', False)

    def test_rsync_files_from_dir_that_does_exist(self):
        # if the user specifies that we should copy from a directory that does
        # exist, and has all the right directories in it, we should succeed
        flexmock(os.path)
        os.path.should_receive('exists').with_args('/tmp/booscale-local').\
          and_return(True)

        # assume the rsyncs succeed
        local_state = flexmock(LocalState)
        local_state.should_receive('shell')\
          .with_args(re.compile('^rsync'),False)\
          .and_return().ordered()

        RemoteHelper.rsync_files('public1', 'booscale', '/tmp/booscale-local',
                                 False)

    def test_copy_deployment_credentials_in_cloud(self):
        options = flexmock(
            keyname='key1',
            infrastructure='ec2',
            verbose=True,
        )

        local_state = flexmock(LocalState)
        remote_helper = flexmock(RemoteHelper)
        local_state.should_receive('get_secret_key_location').and_return()
        local_state.should_receive('get_key_path_from_name').and_return()
        local_state.should_receive('get_certificate_location').and_return()
        local_state.should_receive('get_private_key_location').and_return()

        remote_helper.should_receive('scp').and_return()
        local_state.should_receive('generate_ssl_cert').and_return()
        popen_object = flexmock(communicate=lambda: ['hash_id'])
        flexmock(subprocess).should_receive('Popen').and_return(popen_object)
        remote_helper.should_receive('ssh').and_return()
        flexmock(AppScaleLogger).should_receive('log').and_return()

        RemoteHelper.copy_deployment_credentials('public1', options)

        flexmock(GCEAgent).should_receive('get_secrets_type').\
          and_return(CredentialTypes.OAUTH)
        flexmock(os.path).should_receive('exists').and_return(True)

        options = flexmock(
            keyname='key1',
            infrastructure='gce',
            verbose=True,
        )
        local_state.should_receive('get_oauth2_storage_location').and_return()

        RemoteHelper.copy_deployment_credentials('public1', options)

    def test_start_remote_appcontroller(self):
        # mock out removing the old json file
        local_state = flexmock(LocalState)
        local_state.should_receive('shell')\
          .with_args(re.compile('^ssh'),False,5,stdin=re.compile('rm -rf'))\
          .and_return()

        # assume we started monit on public1 fine
        local_state.should_receive('shell')\
          .with_args(re.compile('^ssh'), False, 5, stdin=re.compile('monit'))\
          .and_return()

        # also assume that we scp'ed over the god config file fine
        local_state.should_receive('shell')\
          .with_args(re.compile('scp .*controller-17443.cfg*'),False,5)\
          .and_return()

        # and assume we started the AppController on public1 fine
        local_state.should_receive('shell')\
          .with_args(re.compile('^ssh'), False, 5,
            stdin=re.compile('^monit start -g controller'))\
          .and_return()

        # finally, assume the appcontroller comes up after a few tries
        # assume that ssh comes up on the third attempt
        fake_socket = flexmock(name='fake_socket')
        fake_socket.should_receive('connect').with_args(('public1',
          AppControllerClient.PORT)).and_raise(Exception) \
          .and_raise(Exception).and_return(None)
        socket.should_receive('socket').and_return(fake_socket)

        # Mock out additional remote calls.
        local_state.should_receive('shell').with_args(
            'ssh -i /root/.appscale/bookey.key -o LogLevel=quiet -o NumberOfPasswordPrompts=0 -o StrictHostkeyChecking=no -o UserKnownHostsFile=/dev/null root@public1 ',
            False,
            5,
            stdin=
            'cp /root/appscale/AppController/scripts/appcontroller /etc/init.d/'
        ).and_return()

        local_state.should_receive('shell').with_args(
            'ssh -i /root/.appscale/bookey.key -o LogLevel=quiet -o NumberOfPasswordPrompts=0 -o StrictHostkeyChecking=no -o UserKnownHostsFile=/dev/null root@public1 ',
            False,
            5,
            stdin='chmod +x /etc/init.d/appcontroller').and_return()

        local_state.should_receive('shell').with_args(
            'ssh -i /root/.appscale/boobazblargfoo.key -o LogLevel=quiet -o NumberOfPasswordPrompts=0 -o StrictHostkeyChecking=no -o UserKnownHostsFile=/dev/null root@elastic-ip ',
            False,
            5,
            stdin='chmod +x /etc/init.d/appcontroller').and_return()

        RemoteHelper.start_remote_appcontroller('public1', 'bookey', False)

    def test_copy_local_metadata(self):
        # Assume the locations files were copied successfully.
        local_state = flexmock(LocalState)
        locations_yaml = '{}/locations-bookey.yaml'.\
          format(RemoteHelper.CONFIG_DIR)
        local_state.should_receive('shell').with_args(
            re.compile('^scp .*{}'.format(locations_yaml)), False, 5)

        locations_json = '{}/locations-bookey.json'.\
          format(RemoteHelper.CONFIG_DIR)
        local_state.should_receive('shell').with_args(
            re.compile('^scp .*{}'.format(locations_json)), False, 5)

        local_state.should_receive('shell').with_args(
            re.compile('^scp .*/root/.appscale/locations-bookey.json'), False,
            5)

        # Assume the secret file was copied successfully.
        local_state.should_receive('shell').with_args(
            re.compile('^scp .*bookey.secret'), False, 5)

        RemoteHelper.copy_local_metadata('public1', 'bookey', False)

    def test_create_user_accounts(self):
        # mock out reading the secret key
        builtins = flexmock(sys.modules['__builtin__'])
        builtins.should_call('open')  # set the fall-through

        secret_key_location = LocalState.LOCAL_APPSCALE_PATH + "bookey.secret"
        fake_secret = flexmock(name="fake_secret")
        fake_secret.should_receive('read').and_return('the secret')
        builtins.should_receive('open').with_args(secret_key_location, 'r') \
          .and_return(fake_secret)

        # mock out reading the locations.json file, and slip in our own json
        flexmock(os.path)
        os.path.should_call('exists')  # set the fall-through
        os.path.should_receive('exists').with_args(
            LocalState.get_locations_json_location('bookey')).and_return(True)

        fake_nodes_json = flexmock(name="fake_nodes_json")
        fake_nodes_json.should_receive('read').and_return(
            json.dumps({
                "node_info": [{
                    "public_ip": "public1",
                    "private_ip": "private1",
                    "jobs": ["shadow", "login"]
                }]
            }))
        builtins.should_receive('open').with_args(
          LocalState.get_locations_json_location('bookey'), 'r') \
          .and_return(fake_nodes_json)

        # Mock out SOAP interactions with the AppController.
        fake_appcontroller = flexmock(name="fake_appcontroller")
        fake_appcontroller.should_receive('does_user_exist').with_args(
            '*****@*****.**', 'the secret').and_return('false')
        fake_appcontroller.should_receive('create_user').with_args(
            '*****@*****.**', str, 'xmpp_user', 'the secret').and_return('true')
        fake_appcontroller.should_receive('does_user_exist').with_args(
            'boo@public1', 'the secret').and_return('false')
        fake_appcontroller.should_receive('create_user').with_args(
            'boo@public1', str, 'xmpp_user', 'the secret').and_return('true')
        flexmock(SOAPpy)
        SOAPpy.should_receive('SOAPProxy').with_args('https://*****:*****@foo.goo', 'password', 'public1',
                                          'bookey')

    def test_wait_for_machines_to_finish_loading(self):
        # mock out reading the secret key
        builtins = flexmock(sys.modules['__builtin__'])
        builtins.should_call('open')  # set the fall-through

        secret_key_location = LocalState.LOCAL_APPSCALE_PATH + "bookey.secret"
        fake_secret = flexmock(name="fake_secret")
        fake_secret.should_receive('read').and_return('the secret')
        builtins.should_receive('open').with_args(secret_key_location, 'r') \
          .and_return(fake_secret)

        # mock out getting all the ips in the deployment from the head node
        fake_soap = flexmock(name='fake_soap')
        fake_soap.should_receive('get_all_public_ips').with_args('the secret') \
          .and_return(json.dumps(['public1', 'public2']))
        role_info = [{
            'public_ip': 'public1',
            'private_ip': 'private1',
            'jobs': ['shadow', 'db_master']
        }, {
            'public_ip': 'public2',
            'private_ip': 'private2',
            'jobs': ['appengine']
        }]
        fake_soap.should_receive('get_role_info').with_args('the secret') \
          .and_return(json.dumps(role_info))

        # also, let's say that our machines aren't running the first time we ask,
        # but that they are the second time
        fake_soap.should_receive('is_done_initializing').with_args('the secret') \
          .and_return(False).and_return(True)

        flexmock(SOAPpy)
        SOAPpy.should_receive('SOAPProxy').with_args('https://public1:17443') \
          .and_return(fake_soap)
        SOAPpy.should_receive('SOAPProxy').with_args('https://public2:17443') \
          .and_return(fake_soap)

        RemoteHelper.wait_for_machines_to_finish_loading('public1', 'bookey')

    reattach_options = flexmock(infrastructure='euca',
                                group='group',
                                machine='vm image',
                                instance_type='instance type',
                                keyname='keyname',
                                table='cassandra',
                                verbose=False,
                                test=False,
                                use_spot_instances=False,
                                zone='zone',
                                static_ip=None,
                                replication=None,
                                appengine=None,
                                autoscale=None,
                                user_commands=[],
                                flower_password='',
                                max_memory='X',
                                ips={
                                    'master': 'node-1',
                                    'zookeeper': 'node-2',
                                    'appengine': 'node-3',
                                    'database': 'node-4'
                                })

    reattach_node_info = [{
        "public_ip":
        "0.0.0.0",
        "private_ip":
        "0.0.0.0",
        "instance_id":
        "i-APPSCALE1",
        "jobs":
        ['load_balancer', 'taskqueue', 'shadow', 'login', 'taskqueue_master']
    }, {
        "public_ip": "0.0.0.0",
        "private_ip": "0.0.0.0",
        "instance_id": "i-APPSCALE2",
        "jobs": ['memcache', 'appengine']
    }, {
        "public_ip": "0.0.0.0",
        "private_ip": "0.0.0.0",
        "instance_id": "i-APPSCALE3",
        "jobs": ['zookeeper']
    }, {
        "public_ip": "0.0.0.0",
        "private_ip": "0.0.0.0",
        "instance_id": "i-APPSCALE4",
        "jobs": ['db_master']
    }]

    def test_start_all_nodes_reattach(self):
        self.node_layout = NodeLayout(self.reattach_options)
        self.node_layout.is_valid()
        fake_agent = FakeAgent()
        flexmock(factory.InfrastructureAgentFactory). \
          should_receive('create_agent'). \
          with_args('euca'). \
          and_return(fake_agent)

        LocalState.should_receive('get_login_host').and_return('0.0.0.1')

        LocalState.should_receive('get_local_nodes_info') \
          .and_return(self.reattach_node_info)

        RemoteHelper.start_all_nodes(self.reattach_options, self.node_layout)

    def test_start_all_nodes_reattach_changed_asf(self):
        self.options = flexmock(infrastructure='public cloud',
                                group='group',
                                machine='vm image',
                                instance_type='instance type',
                                keyname='keyname',
                                table='cassandra',
                                verbose=False,
                                test=False,
                                use_spot_instances=False,
                                zone='zone',
                                static_ip=None,
                                replication=None,
                                appengine=None,
                                autoscale=None,
                                user_commands=[],
                                flower_password='',
                                max_memory='X',
                                ips={
                                    'zookeeper': 'node-2',
                                    'master': 'node-1',
                                    'appengine': 'node-3',
                                    'database': 'node-3'
                                })

        self.node_layout = NodeLayout(self.options)

        fake_agent = FakeAgent()
        flexmock(factory.InfrastructureAgentFactory). \
          should_receive('create_agent'). \
          with_args('public cloud'). \
          and_return(fake_agent)

        LocalState.should_receive('get_login_host').and_return('0.0.0.1')

        LocalState.should_receive('get_local_nodes_info')\
          .and_return(self.reattach_node_info)

        self.assertRaises(BadConfigurationException)

    def test_start_all_nodes_reattach_changed_locations(self):
        self.node_layout = NodeLayout(self.reattach_options)

        fake_agent = FakeAgent()
        flexmock(factory.InfrastructureAgentFactory). \
          should_receive('create_agent'). \
          with_args('public cloud'). \
          and_return(fake_agent)

        LocalState.should_receive('get_login_host').and_return('0.0.0.1')

        node_info = [{
            "public_ip":
            "0.0.0.0",
            "private_ip":
            "0.0.0.0",
            "instance_id":
            "i-APPSCALE1",
            "jobs": [
                'load_balancer', 'taskqueue', 'shadow', 'login',
                'taskqueue_master'
            ]
        }, {
            "public_ip": "0.0.0.0",
            "private_ip": "0.0.0.0",
            "instance_id": "i-APPSCALE2",
            "jobs": ['memcache', 'appengine']
        }, {
            "public_ip": "0.0.0.0",
            "private_ip": "0.0.0.0",
            "instance_id": "i-APPSCALE3",
            "jobs": ['zookeeper', "appengine"]
        }, {
            "public_ip": "0.0.0.0",
            "private_ip": "0.0.0.0",
            "instance_id": "i-APPSCALE4",
            "jobs": ['db_master']
        }]

        LocalState.should_receive('get_local_nodes_info').and_return(node_info)

        self.assertRaises(BadConfigurationException)