Example #1
0
  def test_network(self):
    self.assertEqual(
        shortcuts.network(),
        [{'accessConfigs': [{'type': 'ONE_TO_ONE_NAT', 'name': 'External NAT'}],
          'network': 'default'}])

    self.assertEqual(
        shortcuts.network(None),
        [{'accessConfigs': [{'type': 'ONE_TO_ONE_NAT', 'name': 'External NAT'}],
          'network': 'default'}])

    self.assertEqual(
        shortcuts.network('default'),
        [{'accessConfigs': [{'type': 'ONE_TO_ONE_NAT', 'name': 'External NAT'}],
          'network': 'default'}])

    self.assertEqual(
        shortcuts.network('default', use_access_config=False),
        [{'network': 'default'}])

    self.assertEqual(
        shortcuts.network('default', external_ip='123.123.123.123'),
        [{'accessConfigs': [
            {'type': 'ONE_TO_ONE_NAT', 'name': 'External NAT',
             'natIP': '123.123.123.123'}],
          'network': 'default'}])
Example #2
0
def main():
  # Performs the oauth2 dance.
  credentials = gce_util.get_credentials()

  # Grabs default values. Defaults can be saved in ~/.gce.config in
  # the following format:
  #
  # [gce_config]
  # project: my-project
  # image: projects/google/images/ubuntu-12-04-v20120621
  # zone: us-east-a
  # machine_type: n1-standard-1
  defaults = gce_util.get_defaults()

  if DEFAULT_PROJECT is None and defaults.project is None:
    print 'Please specify a default project by editing DEFAULT_PROJECT in'
    print 'this script or by using a ~/gce.config file.'
    exit(1)

  # Constructs an instance of GoogleComputeEngine.
  api = gce.get_api(
      credentials,
      logging_level=LOG_LEVEL,
      default_project=defaults.project or DEFAULT_PROJECT,
      default_zone=defaults.zone or DEFAULT_ZONE,
      default_image=defaults.image or DEFAULT_IMAGE,
      default_machine_type=defaults.machine_type or DEFAULT_MACHINE_TYPE)

  # Prints project info.
  print api.get_project()

  # Creates an instance asynchronously.
  print api.insert_instance(
      'i-was-created-asynchronously',
      networkInterfaces=shortcuts.network(),
      blocking=False)

  # Creates 9 test instances synchronously.
  names = ['test-instance-{0}'.format(i) for i in xrange(9)]
  print api.insert_instances(names, networkInterfaces=shortcuts.network())

  # Prints the names of all instances in the given project.
  for instance in api.all_instances():
    print instance.name

  # Prints the number of instances by zone.
  print get_instances_by_zone(api)

  # Deletes the asynchronously-created instance.
  print api.delete_instance('i-was-created-asynchronously', blocking=False)

  # Deletes the test instances synchronously.
  print api.delete_instances(names)

  # Prints the number of operations.
  print len(list(api.all_operations()))
 def spawn_instance(self, name, snitch):
     """Create an instance with the specified snitch."""
     disks = []
     if cfg.disk:
         # Can't mount rw if others have already mounted it ro... so just only
         # mount it on one instance
         if cfg.rw_disk_instance:
             if cfg.rw_disk_instance == name:
                 disks = gce_shortcuts.rw_disks([cfg.disk])
             # Otherwise, don't mount
         else:
             # Everyone gets it ro
             disks = gce_shortcuts.ro_disks([cfg.disk])
     network = []
     # Always give the JobTracker and NameNode an external IP.
     if (not name.startswith('hadoop-slave-')) or cfg.external_ips:
         network = gce_shortcuts.network()
     else:
         network = gce_shortcuts.network(use_access_config=False)
     if name == cfg.hadoop_namenode:
         # This instance handles transfers from HDFS to GS.
         scope = cfg.rw_storage_scope
     else:
         scope = cfg.ro_storage_scope
     try:
         resp = util.api.insert_instance(
             name=name,
             zone=cfg.zone,
             machineType=cfg.machine_type,
             image=cfg.image,
             serviceAccounts=gce_shortcuts.service_accounts([scope]),
             disks=disks,
             metadata=gce_shortcuts.metadata({
                 # Key modified to avoid dots, which are disallowed in v1beta13.
                 'gs_bucket':
                 cfg.gs_bucket,
                 'snitch-tarball_tgz':
                 cfg.gs_snitch_tarball,
                 'startup-script':
                 open('start_setup.sh').read(),
                 'bootstrap_sh':
                 open('hadoop/bootstrap.sh').read(),
                 'snitch_py':
                 open(snitch).read()
             }),
             networkInterfaces=network,
             blocking=True)
     except gce.GceError as e:
         logging.info('GCE exception inserting instance ' + name + ': ' +
                      str(e))
     except Exception as e:
         logging.info('exception inserting instance ' + name + ': ' +
                      str(e))
     return True
def main():
  common.setup()

  # CHANGE ME
  zone = 'us-central1-a'
  machtype = 'n1-standard-4-d'
  image = 'projects/google/images/ubuntu-12-04-v20120621'
  # Persistent disk, if any.
  disk = ''
  # If this is set, only this slave will have the disk mounted, and it'll be rw.
  # Otherwise, all slaves get the disk mounted ro
  rw_disk_instance = ''

  print 'Packaging up the stuff the coordinator will need...'
  # tar will insert directories, so flatten the view a bit
  subprocess.call(['cp', 'coordinator/coordinator.py', '.'])
  subprocess.call(['cp', 'coordinator/hadoop_cluster.py', '.'])
  subprocess.call(['tar', 'czf', 'coordinator.tgz', 'hadoop', 'gcelib',
                   'hadoop-tools.jar', 'cfg.py', 'util.py', 'coordinator.py',
                   'hadoop_cluster.py', 'start_setup.sh'])
  subprocess.call(['rm', 'coordinator.py', 'hadoop_cluster.py'])
  # Push to a fixed place for now
  subprocess.call(['gsutil', 'cp', 'coordinator.tgz',
                   cfg.gs_coordinators_tarball])
  subprocess.call(['rm', 'coordinator.tgz'])
  print

  print 'Launching coordinator...'
  util.api.insert_instance(
      name=cfg.coordinator, zone=zone,
      machineType=machtype, image=image,
      serviceAccounts=gce_shortcuts.service_accounts([cfg.compute_scope,
                                                      cfg.rw_storage_scope]),
      networkInterfaces=gce_shortcuts.network(),
      metadata=gce_shortcuts.metadata({
          'startup-script': open('start_setup.sh').read(),
          'bootstrap.sh': open('coordinator/bootstrap.sh').read(),
          'tarball': cfg.gs_coordinators_tarball,
          'gs_bucket': cfg.gs_bucket,
          'zone': zone,
          'machine_type': machtype,
          'image': image,
          'disk': disk,
          'rw_disk_instance': rw_disk_instance,
          'secret': cfg.secret
      }),
      blocking=True
  )
  print

  print 'Waiting for coordinator to come online...'
  while True:
    status, _ = util.get_status(cfg.coordinator)
    print status[1]
    if status == util.InstanceState.SNITCH_READY:
      break
    time.sleep(cfg.poll_delay_secs)
  print

  print 'Controller is ready to receive commands.'
 def spawn_instance(self, name, snitch):
   """Create an instance with the specified snitch."""
   disks = []
   if cfg.disk:
     # Can't mount rw if others have already mounted it ro... so just only
     # mount it on one instance
     if cfg.rw_disk_instance:
       if cfg.rw_disk_instance == name:
         disks = gce_shortcuts.rw_disks([cfg.disk])
       # Otherwise, don't mount
     else:
       # Everyone gets it ro
       disks = gce_shortcuts.ro_disks([cfg.disk])
   network = []
   # Always give the JobTracker and NameNode an external IP.
   if (not name.startswith('hadoop-slave-')) or cfg.external_ips:
     network = gce_shortcuts.network()
   else:
     network = gce_shortcuts.network(use_access_config=False)
   if name == cfg.hadoop_namenode:
     # This instance handles transfers from HDFS to GS.
     scope = cfg.rw_storage_scope
   else:
     scope = cfg.ro_storage_scope
   try:
     resp = util.api.insert_instance(
         name=name, zone=cfg.zone,
         machineType=cfg.machine_type, image=cfg.image,
         serviceAccounts=gce_shortcuts.service_accounts([scope]),
         disks=disks,
         metadata=gce_shortcuts.metadata({
             # Key modified to avoid dots, which are disallowed in v1beta13.
             'gs_bucket': cfg.gs_bucket,
             'snitch-tarball_tgz': cfg.gs_snitch_tarball,
             'startup-script': open('start_setup.sh').read(),
             'bootstrap_sh': open('hadoop/bootstrap.sh').read(),
             'snitch_py': open(snitch).read()
         }),
         networkInterfaces=network,
         blocking=True
     )
   except gce.GceError as e:
     logging.info('GCE exception inserting instance ' + name + ': ' + str(e))
   except Exception as e:
     logging.info('exception inserting instance ' + name + ': ' + str(e))
   return True
Example #6
0
    def test_network(self):
        self.assertEqual(shortcuts.network(), [{
            'accessConfigs': [{
                'type': 'ONE_TO_ONE_NAT',
                'name': 'External NAT'
            }],
            'network':
            'default'
        }])

        self.assertEqual(shortcuts.network(None), [{
            'accessConfigs': [{
                'type': 'ONE_TO_ONE_NAT',
                'name': 'External NAT'
            }],
            'network':
            'default'
        }])

        self.assertEqual(shortcuts.network('default'), [{
            'accessConfigs': [{
                'type': 'ONE_TO_ONE_NAT',
                'name': 'External NAT'
            }],
            'network':
            'default'
        }])

        self.assertEqual(shortcuts.network('default', use_access_config=False),
                         [{
                             'network': 'default'
                         }])

        self.assertEqual(
            shortcuts.network('default', external_ip='123.123.123.123'), [{
                'accessConfigs': [{
                    'type': 'ONE_TO_ONE_NAT',
                    'name': 'External NAT',
                    'natIP': '123.123.123.123'
                }],
                'network':
                'default'
            }])
Example #7
0
    def AddInstances(self, count=1):
        with open(STARTUP_SCRIPT_PATH, "r") as f:
            startup_script = f.read()

        metadata = [{
            "key": "startup-script",
            "value": startup_script,
        }]

        svc_accounts = [
            "https://www.googleapis.com/auth/devstorage.read_write",
        ]

        self.api.insert_instances(names=self._GetNewInstanceNames(count),
                                  networkInterfaces=shortcuts.network(),
                                  metadata=gce_v1beta12.Metadata(metadata),
                                  serviceAccounts=gce_v1beta12.ServiceAccount(
                                      "default", svc_accounts))
Example #8
0
 def AddInstances(self, count=1):
   with open(STARTUP_SCRIPT_PATH, "r") as f:
     startup_script = f.read()
     
   metadata = [
     {
       "key": "startup-script",
       "value": startup_script,
     }
   ]
   
   svc_accounts = [
     "https://www.googleapis.com/auth/devstorage.read_write",
   ]
    
   self.api.insert_instances(
       names=self._GetNewInstanceNames(count),
       networkInterfaces=shortcuts.network(),
       metadata=gce_v1beta12.Metadata(metadata),
       serviceAccounts=gce_v1beta12.ServiceAccount("default", svc_accounts))
Example #9
0
    def __init__(self,
                 credentials,
                 logging_level=logging.WARN,
                 base_url=None,
                 default_image=None,
                 default_machine_type=None,
                 default_network='default',
                 default_network_interface=None,
                 default_project=None,
                 default_zone=None,
                 trace_token=None):
        """Base class constructor.

    Args:
      credentials: A OAuth2Credentials object that contains the
        client's credentials.
      logging_level: The verbosity of the log messages as defined
        in the logging module.
      base_url: The base URL to which REST requests can be made. This
        should not be changed.
      default_image: The name of the default image. This value can be
        overwritten by the different API calls.
      default_machine_type: The name of the default machine type. This
        value can be overwritten by the different API calls.
      default_network: The default network. This value can be overwritten
        by the different API calls.
      default_network_interface: The default network interface. This
        value can be overwritten by the different API calls.
      default_project: The name of the default project. This value can
        be overwritten by the different API calls.
      default_zone: The name of the default zone. This value can be
        overwritten by the different API calls.
      trace_token: A Google-provided token that can be used to trace API
        calls. Note that specifying this token will cause all calls to be
        rate limited to one request every 10 seconds, with a maximum burst
        of 60 requests.

    Raises:
      ValueError: When an invalid base_url is provided.
    """
        self.credentials = credentials
        if base_url is None and hasattr(self, 'BASE_URL'):
            base_url = self.BASE_URL
        if base_url is None:
            base_url = DEFAULT_BASE_URL

        GoogleComputeEngineBase._check_url(base_url)

        self.base_url = base_url.rstrip('/')
        self.logger = logging.getLogger('GoogleComputeEngine')
        handler = logging.StreamHandler()
        handler.setFormatter(logging.Formatter(LOG_FORMAT))
        self.logger.addHandler(handler)
        self.logger.setLevel(logging_level)

        self.default_image = default_image
        self.default_machine_type = default_machine_type
        self.default_network = default_network
        self.default_network_interface = (default_network_interface or
                                          shortcuts.network(default_network))
        self.default_project = default_project
        self.default_zone = default_zone

        self.trace_token = trace_token
Example #10
0
 def default_network_interface(self):
     self._default_network_interface = shortcuts.network()
Example #11
0
def main():
    common.setup()

    # CHANGE ME
    zone = 'us-central1-a'
    machtype = 'n1-standard-4-d'
    image = 'projects/google/images/ubuntu-12-04-v20120621'
    # Persistent disk, if any.
    disk = ''
    # If this is set, only this slave will have the disk mounted, and it'll be rw.
    # Otherwise, all slaves get the disk mounted ro
    rw_disk_instance = ''

    print 'Packaging up the stuff the coordinator will need...'
    # tar will insert directories, so flatten the view a bit
    subprocess.call(['cp', 'coordinator/coordinator.py', '.'])
    subprocess.call(['cp', 'coordinator/hadoop_cluster.py', '.'])
    subprocess.call([
        'tar', 'czf', 'coordinator.tgz', 'hadoop', 'gcelib',
        'hadoop-tools.jar', 'cfg.py', 'util.py', 'coordinator.py',
        'hadoop_cluster.py', 'start_setup.sh'
    ])
    subprocess.call(['rm', 'coordinator.py', 'hadoop_cluster.py'])
    # Push to a fixed place for now
    subprocess.call(
        ['gsutil', 'cp', 'coordinator.tgz', cfg.gs_coordinators_tarball])
    subprocess.call(['rm', 'coordinator.tgz'])
    print

    print 'Launching coordinator...'
    util.api.insert_instance(
        name=cfg.coordinator,
        zone=zone,
        machineType=machtype,
        image=image,
        serviceAccounts=gce_shortcuts.service_accounts(
            [cfg.compute_scope, cfg.rw_storage_scope]),
        networkInterfaces=gce_shortcuts.network(),
        metadata=gce_shortcuts.metadata({
            # Key modified to avoid dots, which are disallowed in v1beta13.
            'startup-script':
            open('start_setup.sh').read(),
            'bootstrap_sh':
            open('coordinator/bootstrap.sh').read(),
            'tarball':
            cfg.gs_coordinators_tarball,
            'gs_bucket':
            cfg.gs_bucket,
            'zone':
            zone,
            'machine_type':
            machtype,
            'image':
            image,
            'disk':
            disk,
            'rw_disk_instance':
            rw_disk_instance,
            'secret':
            cfg.secret
        }),
        blocking=True)
    print

    print 'Waiting for coordinator to come online...'
    while True:
        status, _ = util.get_status(cfg.coordinator)
        print status[1]
        if status == util.InstanceState.SNITCH_READY:
            break
        time.sleep(cfg.poll_delay_secs)
    print

    print 'Controller is ready to receive commands.'
Example #12
0
  def __init__(self, credentials,
               logging_level=logging.WARN,
               base_url=None,
               default_image=None,
               default_machine_type=None,
               default_network='default',
               default_network_interface=None,
               default_project=None,
               default_zone=None,
               trace_token=None):
    """Base class constructor.

    Args:
      credentials: A OAuth2Credentials object that contains the
        client's credentials.
      logging_level: The verbosity of the log messages as defined
        in the logging module.
      base_url: The base URL to which REST requests can be made. This
        should not be changed.
      default_image: The name of the default image. This value can be
        overwritten by the different API calls.
      default_machine_type: The name of the default machine type. This
        value can be overwritten by the different API calls.
      default_network: The default network. This value can be overwritten
        by the different API calls.
      default_network_interface: The default network interface. This
        value can be overwritten by the different API calls.
      default_project: The name of the default project. This value can
        be overwritten by the different API calls.
      default_zone: The name of the default zone. This value can be
        overwritten by the different API calls.
      trace_token: A Google-provided token that can be used to trace API
        calls. Note that specifying this token will cause all calls to be
        rate limited to one request every 10 seconds, with a maximum burst
        of 60 requests.

    Raises:
      ValueError: When an invalid base_url is provided.
    """
    self.credentials = credentials
    if base_url is None and hasattr(self, 'BASE_URL'):
      base_url = self.BASE_URL
    if base_url is None:
      base_url = DEFAULT_BASE_URL

    GoogleComputeEngineBase._check_url(base_url)

    self.base_url = base_url.rstrip('/')
    self.logger = logging.getLogger('GoogleComputeEngine')
    handler = logging.StreamHandler()
    handler.setFormatter(logging.Formatter(LOG_FORMAT))
    self.logger.addHandler(handler)
    self.logger.setLevel(logging_level)

    self.default_image = default_image
    self.default_machine_type = default_machine_type
    self.default_network = default_network
    self.default_network_interface = (default_network_interface or
                                      shortcuts.network(default_network))
    self.default_project = default_project
    self.default_zone = default_zone

    self.trace_token = trace_token
Example #13
0
 def default_network_interface(self):
   self._default_network_interface = shortcuts.network()