def update_cataloged_instance(key): """Updates an Instance based on its state in Machine Provider's catalog. Args: key: ndb.Key for a models.Instance entity. """ entity = key.get() if not entity: logging.warning('Instance does not exist: %s', key) return if not entity.cataloged: return try: response = machine_provider.retrieve_machine(key.id()) if response.get( 'pubsub_subscription') and not entity.pubsub_subscription: metrics.send_machine_event('SUBSCRIPTION_RECEIVED', key.id()) instances.add_subscription_metadata( key, response['pubsub_subscription_project'], response['pubsub_subscription'], ) metrics.send_machine_event('METADATA_UPDATE_PROPOSED', key.id()) except net.NotFoundError: instances.mark_for_deletion(key)
def _delete(instance_template_revision, instance_group_manager, instance): """Deletes the given instance. Args: instance_template_revision: models.InstanceTemplateRevision. instance_group_manager: models.InstanceGroupManager. instance: models.Instance """ api = gce.Project(instance_template_revision.project) try: result = api.delete_instances( instance_group_managers.get_name(instance_group_manager), instance_group_manager.key.id(), [instance.url], ) if result['status'] != 'DONE': logging.warning( 'Instance group manager operation failed: %s\n%s', parent.key, json.dumps(result, indent=2), ) else: metrics.send_machine_event('DELETION_SCHEDULED', instance.key.id()) except net.Error as e: if e.status_code == 400: metrics.send_machine_event('DELETION_SUCCEEDED', instance.key.id()) else: raise
def check_deleted_instance(key): """Marks the given Instance as deleted if it refers to a deleted GCE instance. Args: key: ndb.Key for a models.Instance entity. """ instance = key.get() if not instance: return if instance.deleted: return if not instance.pending_deletion: logging.warning('Instance not pending deletion: %s', key) return if not instance.url: logging.warning('Instance URL unspecified: %s', key) return now = utils.utcnow() if not exists(instance.url): # When the instance isn't found, assume it's deleted. if instance.deletion_ts: metrics.instance_deletion_time.add( (now - instance.deletion_ts).total_seconds(), fields={ 'zone': instance.instance_group_manager.id(), }, ) set_instance_deleted(key, False) metrics.send_machine_event('DELETION_SUCCEEDED', instance.hostname)
def cleanup_drained_instance(key): """Deletes the given drained Instance. Args: key: ndb.Key for a models.Instance entity. """ instance = key.get() if not instance: return if instance.deleted: return if not instance.url: logging.warning('Instance URL unspecified: %s', key) return instance_group_manager = instance.instance_group_manager.get() if not instance_group_manager: logging.warning( 'InstanceGroupManager does not exist: %s', instance.instance_group_manager, ) return instance_template_revision = instance_group_manager.key.parent().get() if not instance_template_revision: logging.warning( 'InstanceTemplateRevision does not exist: %s', instance_group_manager.key.parent(), ) return instance_template = instance_template_revision.key.parent().get() if not instance_template: logging.warning( 'InstanceTemplate does not exist: %s', instance_template_revision.key.parent(), ) return if instance_group_manager.key not in instance_template_revision.drained: if instance_template_revision.key not in instance_template.drained: logging.warning('Instance is not drained: %s', key) return now = utils.utcnow() if not exists(instance.url): # When the instance isn't found, assume it's deleted. if instance.deletion_ts: metrics.instance_deletion_time.add( (now - instance.deletion_ts).total_seconds(), fields={ 'zone': instance.instance_group_manager.id(), }, ) set_instance_deleted(key, True) metrics.send_machine_event('DELETION_SUCCEEDED', instance.hostname)
def catalog(key): """Catalogs the given instance. Args: key: ndb.Key for a models.Instance entity. """ instance = key.get() if not instance: logging.warning('Instance does not exist: %s', key) return if instance.cataloged: return if instance.pending_deletion: logging.warning('Instance pending deletion: %s', key) return instance_group_manager = instance.instance_group_manager.get() if not instance_group_manager: logging.warning( 'InstanceGroupManager does not exist: %s', instance.instance_group_manager, ) return instance_template_revision = instance_group_manager.key.parent().get() if not instance_template_revision: logging.warning( 'InstanceTemplateRevision does not exist: %s', instance_group_manager.key.parent(), ) return if not instance_template_revision.service_accounts: logging.warning( 'InstanceTemplateRevision service account unspecified: %s', instance_template_revision.key, ) return logging.info('Cataloging Instance: %s', key) response = machine_provider.add_machine( extract_dimensions(instance, instance_template_revision), get_policies(key, instance_template_revision.service_accounts[0].name), ) if response.get('error') and response['error'] != 'HOSTNAME_REUSE': # Assume HOSTNAME_REUSE implies a duplicate request. logging.warning( 'Error adding Instance to catalog: %s\nError: %s', key, response['error'], ) return set_cataloged(key) metrics.send_machine_event('CATALOGED', instance.hostname)
def update(key): """Updates instance metadata. Args: key: ndb.Key for a models.instance entity. """ entity = key.get() if not entity: logging.warning('Instance does not exist: %s', key) return if not entity.active_metadata_update: logging.warning('Instance active metadata update unspecified: %s', key) return if entity.active_metadata_update.url: return parent = key.parent().get() if not parent: logging.warning('InstanceGroupManager does not exist: %s', key.parent()) return grandparent = parent.key.parent().get() if not grandparent: logging.warning('InstanceTemplateRevision does not exist: %s', parent.key.parent()) return if not grandparent.project: logging.warning('InstanceTemplateRevision project unspecified: %s', grandparent.key) return result = net.json_request(entity.url, scopes=gce.AUTH_SCOPES) api = gce.Project(grandparent.project) operation = api.set_metadata( parent.key.id(), key.id(), result['metadata']['fingerprint'], apply_metadata_update(result['metadata']['items'], entity.active_metadata_update.metadata), ) metrics.send_machine_event('METADATA_UPDATE_SCHEDULED', key.id()) associate_metadata_operation( key, utilities.compute_checksum(entity.active_metadata_update.metadata), operation.url, )
def ensure_entity_exists(key, url): """Ensures an Instance entity exists. Args: key: ndb.Key for a models.Instance entity. url: URL for the instance. """ entity = yield key.get_async() if entity: logging.info('Instance entity already exists: %s', key) return logging.info('Creating Instance entity: %s', key) yield models.Instance(key=key, url=url).put_async() metrics.send_machine_event('CREATED', key.id())
def mark_for_deletion(key): """Marks the given instance for deletion. Args: key: ndb.Key for a models.Instance entity. """ entity = key.get() if not entity: logging.warning('Instance does not exist: %s', key) return if not entity.pending_deletion: logging.info('Marking Instance for deletion: %s', key) entity.pending_deletion = True entity.put() metrics.send_machine_event('DELETION_PROPOSED', key.id())
def ensure_entity_exists(key, url, instance_group_manager): """Ensures an Instance entity exists. Args: key: ndb.Key for a models.Instance entity. url: URL for the instance. instance_group_manager: ndb.Key for the models.InstanceGroupManager the instance was created from. """ instance = yield key.get_async() if instance: return put = yield _ensure_entity_exists(key, url, instance_group_manager) if put: metrics.send_machine_event('CREATED', gce.extract_instance_name(url))
def cleanup_deleted_instance(key): """Deletes the given Instance. Args: key: ndb.Key for a models.Instance entity. """ instance = key.get() if not instance: return if not instance.deleted: logging.warning('Instance not deleted: %s', key) return logging.info('Deleting Instance entity: %s', key) key.delete() metrics.send_machine_event('DELETED', instance.hostname)
def mark_for_deletion(key): """Marks the given instance for deletion. Args: key: ndb.Key for a models.Instance entity. """ instance = key.get() if not instance: logging.warning('Instance does not exist: %s', key) return if not instance.pending_deletion: logging.info('Marking Instance for deletion: %s', key) instance.lease_expiration_ts = None instance.pending_deletion = True instance.put() metrics.send_machine_event('DELETION_PROPOSED', instance.hostname)
def _delete(instance_template_revision, instance_group_manager, instance): """Deletes the given instance. Args: instance_template_revision: models.InstanceTemplateRevision. instance_group_manager: models.InstanceGroupManager. instance: models.Instance """ # We don't check if there are any pending deletion calls because we don't # care. We just want the instance to be deleted, so we make repeated calls # until the instance is no longer detected. However, we do care how long # it takes, so we only write instance.deletion_ts once. api = gce.Project(instance_template_revision.project) try: now = utils.utcnow() result = api.delete_instances( instance_group_managers.get_name(instance_group_manager), instance_group_manager.key.id(), [instance.url], ) if result['status'] != 'DONE': # This is not the status of the instance deletion, it's the status of # scheduling the instance deletions in the managed instance group. If # it's not DONE, the deletions won't even be attempted. If it is DONE, # the actual deletions may still fail. logging.warning( 'Instance group manager operation failed: %s\n%s', instance_group_manager.key, json.dumps(result, indent=2), ) else: if not instance.deletion_ts: set_deletion_time(instance.key, now) metrics.send_machine_event('DELETION_SCHEDULED', instance.hostname) except net.Error as e: if e.status_code == 400: if not instance.deletion_ts: set_deletion_time(instance.key, now) else: raise
def compress(key): """Sets active instance metadata update. Args: key: ndb.Key for a models.instance entity. """ entity = key.get() if not entity: logging.warning('Instance does not exist: %s', key) return if entity.active_metadata_update: logging.warning('Instance already has active metadata update: %s', key) return if not entity.pending_metadata_updates: return compress_pending_metadata_updates(key) metrics.send_machine_event('METADATA_UPDATE_READY', key.id())
def check(key): """Checks the active metadata update operation. Reschedules the active metadata update if the operation failed. Args: key: ndb.Key for a models.Instance entity. """ entity = key.get() if not entity: logging.warning('Instance does not exist: %s', key) return if not entity.active_metadata_update: logging.warning('Instance active metadata operation unspecified: %s', key) return if not entity.active_metadata_update.url: logging.warning( 'Instance active metadata operation URL unspecified: %s', key) return result = net.json_request(entity.active_metadata_update.url, scopes=gce.AUTH_SCOPES) if result['status'] != 'DONE': return if result.get('error'): logging.warning( 'Instance metadata operation failed: %s\n%s', key, json.dumps(result, indent=2), ) metrics.send_machine_event('METADATA_UPDATE_FAILED', key.id()) reschedule_active_metadata_update(key, entity.active_metadata_update.url) metrics.send_machine_event('METADATA_UPDATE_READY', key.id()) else: metrics.send_machine_event('METADATA_UPDATE_SUCCEEDED', key.id()) clear_active_metadata_update(key, entity.active_metadata_update.url)