Ejemplo n.º 1
0
    def _init_subscriber_client(self, auth_client):
        # Silly emulator constraints
        creds = getattr(auth_client, 'creds', None)
        client = pubsub.SubscriberClient(credentials=creds)

        try:
            client.create_subscription(self.config['subscription'],
                                       self.config['topic'])

        except google_exceptions.AlreadyExists:
            # subscription already exists
            pass

        except google_exceptions.NotFound as e:
            msg = f'Topic "{self.config["topic"]}" does not exist.'
            logging.error(msg, exc_info=e)
            raise exceptions.GCPGordonError(msg)

        except Exception as e:
            sub = self.config['subscription']
            msg = f'Error trying to create subscription "{sub}": {e}'
            logging.error(msg, exc_info=e)
            raise exceptions.GCPGordonError(msg)

        max_messages = self.config.get('max_messages', 25)
        flow_control = types.FlowControl(max_messages=max_messages)

        logging.info(f'Starting a "{self.config["subscription"]}" subscriber '
                     f'to "{self.config["topic"]}" topic.')

        return client, flow_control
Ejemplo n.º 2
0
 def _load_keyfile(self, keyfile):
     if not keyfile:
         return None
     try:
         with open(keyfile, 'r') as f:
             return json.load(f)
     except FileNotFoundError as e:
         msg = f'Keyfile {keyfile} was not found.'
         logging.error(msg, exc_info=e)
         raise exceptions.GCPGordonError(msg)
     except json.JSONDecodeError as e:
         msg = f'Keyfile {keyfile} is not valid JSON.'
         logging.error(msg, exc_info=e)
         raise exceptions.GCPGordonError(msg)
Ejemplo n.º 3
0
    async def _poll_for_instance_data(self, resource_name, msg_logger):
        exception = None
        backoff = 2
        base_url = f'https://www.googleapis.com/compute/v1/{resource_name}'

        # Poll until instance data contains all necessary information.
        for attempt in range(1, self.config['retries'] + 1):
            try:
                msg_logger.debug(f'Attempt {attempt}: fetching {base_url}')
                instance_data = await self._http_client.get_json(base_url)

                self._check_instance_data(instance_data)

                return instance_data
            except exceptions.GCPHTTPError as e:
                exception = e
                break
            except (KeyError, IndexError) as e:
                exception = e
                if attempt == self.config['retries']:
                    continue
                await asyncio.sleep(backoff)
                backoff = backoff**2

        msg = (f'Could not get necessary information for {resource_name}: '
               f'{exception.__class__.__name__}: {exception}')
        raise exceptions.GCPGordonError(msg)
Ejemplo n.º 4
0
    def _load_schemas(self):
        schema_path = pathlib.Path(self.HERE, self.SCHEMA_DIR).absolute()
        schema_path_contents = schema_path.glob('*.json')

        schemas = {}
        for schema_file in schema_path_contents:
            schema_name = schema_file.name.split('.')[0]
            try:
                with open(schema_file, 'r') as f:
                    schemas[schema_name] = json.load(f)
                    logging.info(
                        f'Successfully loaded schema "{schema_name}".')

            except (FileNotFoundError, json.JSONDecodeError) as e:
                msg = f'Error loading schema "{schema_name}": {e}.'
                logging.error(msg, exc_info=e)
                raise exceptions.GCPGordonError(msg)

        if not schemas:
            msg = 'Unable to load any schemas.'
            logging.error(msg)
            raise exceptions.GCPGordonError(msg)

        return schemas
Ejemplo n.º 5
0
    def _manage_subs(self):
        # NOTE: automatically extends deadline in the background;
        #       must `nack()` if can't finish. We don't proactively
        #       `nack` in this plugin since it'll just get redelivered.
        #       A dead process will also timeout the message, with which
        #       it will redeliver.
        future = self._subscriber.subscribe(self._subscription,
                                            self._schedule_pubsub_msg,
                                            flow_control=self._flow_control)

        try:
            # we're running in a threadpool because this is blocking
            future.result()
        except Exception as e:
            self._subscriber.close()
            logging.error(f'Issue polling subscription: {e}', exc_info=e)
            raise exceptions.GCPGordonError(e)