def __init__(
      self, proto_id=None, ip_aliases=True, target_instance_ips=True,
      debug=False):
    """Constructor.

    Args:
      proto_id: string, the routing protocol identifier for Google IP changes.
      ip_aliases: bool, True if the guest should configure IP alias routes.
      target_instance_ips: bool, True supports internal IP load balancing.
      debug: bool, True if debug output should write to the console.
    """
    facility = logging.handlers.SysLogHandler.LOG_DAEMON
    self.logger = logger.Logger(
        name='google-ip-forwarding', debug=debug, facility=facility)
    self.watcher = metadata_watcher.MetadataWatcher(logger=self.logger)
    self.network_utils = network_utils.NetworkUtils(logger=self.logger)
    self.ip_forwarding_utils = ip_forwarding_utils.IpForwardingUtils(
        logger=self.logger, proto_id=proto_id)
    self.ip_aliases = ip_aliases
    self.target_instance_ips = target_instance_ips
    try:
      with file_utils.LockFile(LOCKFILE):
        self.logger.info('Starting Google IP Forwarding daemon.')
        timeout = 60 + random.randint(0, 30)
        self.watcher.WatchMetadata(
            self.HandleNetworkInterfaces, metadata_key=self.network_interfaces,
            recursive=True, timeout=timeout)
    except (IOError, OSError) as e:
      self.logger.warning(str(e))
Exemple #2
0
    def __init__(self, debug=False):
        """Constructor.

    Args:
      debug: bool, True if debug output should write to the console.
    """
        facility = logging.handlers.SysLogHandler.LOG_DAEMON
        self.logger = logger.Logger(name='google-diagnostics',
                                    debug=debug,
                                    facility=facility)
        self.watcher = metadata_watcher.MetadataWatcher(logger=self.logger)
        try:
            with file_utils.LockFile(LOCKFILE):
                self.logger.info('Starting Google Diagnostics daemon.')
                self.watcher.WatchMetadata(self.HandleDiagnostics,
                                           metadata_key=self.diagnose_token,
                                           recursive=False)
                self.watcher.WatchMetadata(
                    self.SetInstanceDiagnosticsEnabled,
                    metadata_key=self.instance_diagnostics_token,
                    recursive=False)
                self.watcher.WatchMetadata(
                    self.SetProjectDiagnosticsEnabled,
                    metadata_key=self.project_diagnostics_token,
                    recursive=False)
        except (IOError, OSError) as e:
            self.logger.warning(str(e))
Exemple #3
0
    def __init__(self, groups=None, remove=False, debug=False):
        """Constructor.

    Args:
      groups: string, a comma separated list of groups.
      remove: bool, True if deprovisioning a user should be destructive.
      debug: bool, True if debug output should write to the console.
    """
        facility = logging.handlers.SysLogHandler.LOG_DAEMON
        self.logger = logger.Logger(name='google-accounts',
                                    debug=debug,
                                    facility=facility)
        self.watcher = metadata_watcher.MetadataWatcher(logger=self.logger)
        self.utils = accounts_utils.AccountsUtils(logger=self.logger,
                                                  groups=groups,
                                                  remove=remove)
        try:
            with file_utils.LockFile(LOCKFILE):
                self.logger.info('Starting Google Accounts daemon.')
                timeout = 60 + random.randint(0, 30)
                self.watcher.WatchMetadata(self.HandleAccounts,
                                           recursive=True,
                                           timeout=timeout)
        except (IOError, OSError) as e:
            self.logger.warning(str(e))
Exemple #4
0
    def __init__(self, debug=False):
        """Constructor.

    Args:
      debug: bool, True if debug output should write to the console.
    """
        facility = logging.handlers.SysLogHandler.LOG_DAEMON
        self.logger = logger.Logger(name='instance-setup',
                                    debug=debug,
                                    facility=facility)
        self.watcher = metadata_watcher.MetadataWatcher(logger=self.logger)
        self.metadata_dict = None
        self.instance_config = instance_config.InstanceConfig()

        if self.instance_config.GetOptionBool('InstanceSetup',
                                              'optimize_local_ssd'):
            self._RunScript('optimize_local_ssd')
        if self.instance_config.GetOptionBool('InstanceSetup',
                                              'set_multiqueue'):
            self._RunScript('set_multiqueue')
        if self.instance_config.GetOptionBool('InstanceSetup',
                                              'network_enabled'):
            self.metadata_dict = self.watcher.GetMetadata()
            if self.instance_config.GetOptionBool('InstanceSetup',
                                                  'set_host_keys'):
                self._SetSshHostKeys()
            if self.instance_config.GetOptionBool('InstanceSetup',
                                                  'set_boto_config'):
                self._SetupBotoConfig()
        try:
            self.instance_config.WriteConfig()
        except (IOError, OSError) as e:
            self.logger.warning(str(e))
def Main(argv, watcher=None, loop_watcher=True):
  """Runs the watcher.

  Args:
    argv: map => [string, string], Command line arguments
    watcher: MetadataWatcher, used to stub out MetadataWatcher for testing.
    loop_watcher: Boolean, whether or not to loop upon an update.
  """
  logger = logging.getLogger()
  logger.setLevel(logging.INFO)

  polling_interval = argv.polling_interval

  # Currently, failsafe logic in the nginx module will start failing open if the
  # state file's modification time is more than two mintues in the past, so
  # we enforce that the polling interval is updated sensibly.
  if (polling_interval < 1 or polling_interval > 119):
    polling_interval = DEFAULT_POLLING_INTERVAL_SEC

  watcher = watcher or metadata_watcher.MetadataWatcher()

  while True:
    value = watcher.GetMetadata(
        metadata_key='instance/attributes/%s' % argv.iap_metadata_key,
        recursive=False,
        timeout=1)
    UpdateStateFileFromMetadata(value, argv.output_state_file)
    if not loop_watcher:
      break
    time.sleep(polling_interval)
    def __init__(self, project_id=None):
        """Constructor.

    Args:
      project_id: string, the project ID to use in the config file.
    """
        self.logger = logger.Logger(name='boto-setup')
        self.watcher = metadata_watcher.MetadataWatcher(logger=self.logger)
        self._CreateConfig(project_id)
 def __init__(self, path, config, provider):
   self.logger = logger.Logger(name='compute-auth')
   self.watcher = metadata_watcher.MetadataWatcher(logger=self.logger)
   self.service_account = config.get('GoogleCompute', 'service_account', '')
   self.scopes = None
   if provider.name == 'google' and self.service_account:
     self.scopes = self._GetGsScopes()
   if not self.scopes:
     raise auth_handler.NotReadyToAuthenticate()
    def __init__(self, logger, script_type):
        """Constructor.

    Args:
      logger: logger object, used to write to SysLog and serial port.
      script_type: string, the metadata script type to run.
    """
        self.logger = logger
        self.script_type = script_type
        self.watcher = metadata_watcher.MetadataWatcher(logger=self.logger)
  def __init__(self, project_id=None, debug=False):
    """Constructor.

    Args:
      project_id: string, the project ID to use in the config file.
      debug: bool, True if debug output should write to the console.
    """
    self.logger = logger.Logger(name='boto-setup', debug=debug)
    self.watcher = metadata_watcher.MetadataWatcher(logger=self.logger)
    self._CreateConfig(project_id)
    def __init__(self, debug=False):
        """Constructor.

    Args:
      debug: bool, True if debug output should write to the console.
    """
        self.debug = debug
        facility = logging.handlers.SysLogHandler.LOG_DAEMON
        self.logger = logger.Logger(name='instance-setup',
                                    debug=self.debug,
                                    facility=facility)
        self.watcher = metadata_watcher.MetadataWatcher(logger=self.logger)
        self.metadata_dict = None
        self.instance_config = instance_config.InstanceConfig(
            logger=self.logger)

        if self.instance_config.GetOptionBool('InstanceSetup',
                                              'network_enabled'):
            self.metadata_dict = self.watcher.GetMetadata()
            instance_config_metadata = self._GetInstanceConfig()
            self.instance_config = instance_config.InstanceConfig(
                logger=self.logger,
                instance_config_metadata=instance_config_metadata)

            if self.instance_config.GetOptionBool('InstanceSetup',
                                                  'set_host_keys'):
                host_key_types = self.instance_config.GetOptionString(
                    'InstanceSetup', 'host_key_types')
                self._SetSshHostKeys(host_key_types=host_key_types)

            if self.instance_config.GetOptionBool('InstanceSetup',
                                                  'set_boto_config'):
                self._SetupBotoConfig()

            # machineType is e.g. u'projects/00000000000000/machineTypes/n1-standard-1'
            machineType = self.metadata_dict['instance']['machineType'].split(
                '/')[-1]
            if machineType.startswith(
                    "e2-"
            ) and 'bsd' not in distro_name:  # Not yet supported on BSD.
                subprocess.call(["sysctl", "vm.overcommit_memory=1"])

        if self.instance_config.GetOptionBool('InstanceSetup',
                                              'optimize_local_ssd'):
            self._RunScript('google_optimize_local_ssd')

        if self.instance_config.GetOptionBool('InstanceSetup',
                                              'set_multiqueue'):
            self._RunScript('google_set_multiqueue')

        try:
            self.instance_config.WriteConfig()
        except (IOError, OSError) as e:
            self.logger.warning(str(e))
Exemple #11
0
    def __init__(self,
                 ip_forwarding_enabled,
                 proto_id,
                 ip_aliases,
                 target_instance_ips,
                 dhclient_script,
                 dhcp_command,
                 network_setup_enabled,
                 debug=False):
        """Constructor.

    Args:
      ip_forwarding_enabled: bool, True if ip forwarding is enabled.
      proto_id: string, the routing protocol identifier for Google IP changes.
      ip_aliases: bool, True if the guest should configure IP alias routes.
      target_instance_ips: bool, True supports internal IP load balancing.
      dhclient_script: string, the path to a dhclient script used by dhclient.
      dhcp_command: string, a command to enable Ethernet interfaces.
      network_setup_enabled: bool, True if network setup is enabled.
      debug: bool, True if debug output should write to the console.
    """
        facility = logging.handlers.SysLogHandler.LOG_DAEMON
        self.logger = logger.Logger(name='google-networking',
                                    debug=debug,
                                    facility=facility)
        self.ip_aliases = ip_aliases
        self.ip_forwarding_enabled = ip_forwarding_enabled
        self.network_setup_enabled = network_setup_enabled
        self.target_instance_ips = target_instance_ips
        self.dhclient_script = dhclient_script

        self.ip_forwarding = ip_forwarding.IpForwarding(proto_id=proto_id,
                                                        debug=debug)
        self.network_setup = network_setup.NetworkSetup(
            dhclient_script=dhclient_script,
            dhcp_command=dhcp_command,
            debug=debug)
        self.network_utils = network_utils.NetworkUtils(logger=self.logger)
        self.watcher = metadata_watcher.MetadataWatcher(logger=self.logger)
        self.distro_utils = distro_utils.Utils(debug=debug)

        try:
            with file_utils.LockFile(LOCKFILE):
                self.logger.info('Starting Google Networking daemon.')
                timeout = 60 + random.randint(0, 30)
                self.watcher.WatchMetadata(
                    self.HandleNetworkInterfaces,
                    metadata_key=self.instance_metadata_key,
                    recursive=True,
                    timeout=timeout)
        except (IOError, OSError) as e:
            self.logger.warning(str(e))
 def setUp(self):
   self.mock_logger = mock.Mock()
   self.timeout = 60
   self.url = 'http://metadata.google.internal/computeMetadata/v1'
   self.params = {
       'alt': 'json',
       'last_etag': 0,
       'recursive': True,
       'timeout_sec': self.timeout,
       'wait_for_change': True,
   }
   self.mock_watcher = metadata_watcher.MetadataWatcher(
       logger=self.mock_logger, timeout=self.timeout)
  def __init__(self, dhcp_command=None, debug=False):
    """Constructor.

    Args:
      dhcp_command: string, a command to enable Ethernet interfaces.
      debug: bool, True if debug output should write to the console.
    """
    self.dhcp_command = dhcp_command
    facility = logging.handlers.SysLogHandler.LOG_DAEMON
    self.logger = logger.Logger(
        name='network-setup', debug=debug, facility=facility)
    self.watcher = metadata_watcher.MetadataWatcher(logger=self.logger)
    self.network_utils = network_utils.NetworkUtils(logger=self.logger)
    self._SetupNetworkInterfaces()
Exemple #14
0
    def __init__(self,
                 groups=None,
                 remove=False,
                 gpasswd_add_cmd=None,
                 gpasswd_remove_cmd=None,
                 groupadd_cmd=None,
                 useradd_cmd=None,
                 userdel_cmd=None,
                 usermod_cmd=None,
                 debug=False):
        """Constructor.

    Args:
      groups: string, a comma separated list of groups.
      remove: bool, True if deprovisioning a user should be destructive.
      useradd_cmd: string, command to create a new user.
      userdel_cmd: string, command to delete a user.
      usermod_cmd: string, command to modify user's groups.
      groupadd_cmd: string, command to add a new group.
      gpasswd_add_cmd: string, command to add an user to a group.
      gpasswd_remove_cmd: string, command to remove an user from a group.
      debug: bool, True if debug output should write to the console.
    """
        facility = logging.handlers.SysLogHandler.LOG_DAEMON
        self.logger = logger.Logger(name='google-accounts',
                                    debug=debug,
                                    facility=facility)
        self.watcher = metadata_watcher.MetadataWatcher(logger=self.logger)
        self.utils = accounts_utils.AccountsUtils(
            logger=self.logger,
            groups=groups,
            remove=remove,
            gpasswd_add_cmd=gpasswd_add_cmd,
            gpasswd_remove_cmd=gpasswd_remove_cmd,
            groupadd_cmd=groupadd_cmd,
            useradd_cmd=useradd_cmd,
            userdel_cmd=userdel_cmd,
            usermod_cmd=usermod_cmd)
        self.oslogin = oslogin_utils.OsLoginUtils(logger=self.logger)

        try:
            with file_utils.LockFile(LOCKFILE):
                self.logger.info('Starting Google Accounts daemon.')
                timeout = 60 + random.randint(0, 30)
                self.watcher.WatchMetadata(self.HandleAccounts,
                                           recursive=True,
                                           timeout=timeout)
        except (IOError, OSError) as e:
            self.logger.warning(str(e))
  def __init__(self, debug=False):
    """Constructor.

    Args:
      debug: bool, True if debug output should write to the console.
    """
    facility = logging.handlers.SysLogHandler.LOG_DAEMON
    self.logger = logger.Logger(
        name='google-clock-skew', debug=debug, facility=facility)
    self.watcher = metadata_watcher.MetadataWatcher(logger=self.logger)
    try:
      with file_utils.LockFile(LOCKFILE):
        self.logger.info('Starting Google Clock Skew daemon.')
        self.watcher.WatchMetadata(
            self.HandleClockSync, metadata_key=self.drift_token,
            recursive=False)
    except (IOError, OSError) as e:
      self.logger.warning(str(e))
    def __init__(self, proto_id=None, debug=False):
        """Constructor.

    Args:
      proto_id: string, the routing protocol identifier for Google IP changes.
      debug: bool, True if debug output should write to the console.
    """
        facility = logging.handlers.SysLogHandler.LOG_DAEMON
        self.logger = logger.Logger(name='google-ip-forwarding',
                                    debug=debug,
                                    facility=facility)
        self.watcher = metadata_watcher.MetadataWatcher(logger=self.logger)
        self.utils = ip_forwarding_utils.IpForwardingUtils(logger=self.logger,
                                                           proto_id=proto_id)
        try:
            with file_utils.LockFile(LOCKFILE):
                self.logger.info('Starting Google IP Forwarding daemon.')
                self.watcher.WatchMetadata(self.HandleForwardedIps,
                                           metadata_key=self.forwarded_ips,
                                           recursive=True)
        except (IOError, OSError) as e:
            self.logger.warning(str(e))
Exemple #17
0
def Main(argv, watcher=None, loop_watcher=True, os_system=os.system):
    """Runs the watcher.

  Args:
    argv: map => [string, string], Command line arguments
    watcher: MetadataWatcher, used to stub out MetadataWatcher for testing.
    loop_watcher: Boolean, whether or not to loop upon an update.
  """
    logger = logging.getLogger()
    logger.setLevel(logging.INFO)

    # This ensures we have fresh keys at container start. Doing it here because
    # Docker doesn't support multiple CMD/ENTRYPOINT statements in Dockerfiles.
    if (argv.fetch_keys):
        os_system(
            'curl "https://www.gstatic.com/iap/verify/public_key-jwk" > ' +
            argv.output_key_file)

    polling_interval = argv.polling_interval

    # Currently, failsafe logic in the nginx module will start failing open if the
    # state file's modification time is more than two mintues in the past, so
    # we enforce that the polling interval is updated sensibly.
    if (polling_interval < 1 or polling_interval > 119):
        polling_interval = DEFAULT_POLLING_INTERVAL_SEC

    watcher = watcher or metadata_watcher.MetadataWatcher()

    while True:
        value = watcher.GetMetadata(metadata_key='project/attributes/%s' %
                                    argv.iap_metadata_key,
                                    recursive=False,
                                    timeout=1)
        UpdateStateFileFromMetadata(value, argv.output_state_file)
        if not loop_watcher:
            break
        time.sleep(polling_interval)
        signal.alarm(0)
        sys.exit(0)
    else:
        logging.info('Retry due to empty value.')


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Set up app updater.')
    parser.add_argument('--key',
                        type=str,
                        required=True,
                        help='Metadata key to be watched.')
    parser.add_argument('--timeout',
                        type=int,
                        required=False,
                        help='Number of seconds to watch.')
    args = parser.parse_args()

    logger = logging.getLogger()
    logger.setLevel(logging.INFO)

    timeout = args.timeout or 600
    signal.signal(signal.SIGALRM, _ExitWithExceptionHandle)
    signal.alarm(timeout)

    watcher = metadata_watcher.MetadataWatcher()
    watcher.WatchMetadata(_RetryIfValueIsEmptyHandler,
                          metadata_key='instance/attributes/%s' % args.key,
                          recursive=False,
                          timeout=timeout)