Пример #1
0
    def test_create_wo_topic_name(self):
        from google.cloud.exceptions import BadRequest
        from google.cloud.storage.notification import NONE_PAYLOAD_FORMAT

        client = mock.Mock(spec=["_post_resource", "project"])
        client.project = self.BUCKET_PROJECT
        client._post_resource.side_effect = BadRequest(
            "Invalid Google Cloud Pub/Sub topic.")
        bucket = self._make_bucket(client)
        notification = self._make_one(bucket, None)

        with self.assertRaises(BadRequest):
            notification.create()

        expected_topic = self.TOPIC_REF_FMT.format(self.BUCKET_PROJECT, "")
        expected_data = {
            "topic": expected_topic,
            "payload_format": NONE_PAYLOAD_FORMAT,
        }
        expected_query_params = {}
        client._post_resource.assert_called_once_with(
            self.CREATE_PATH,
            expected_data,
            query_params=expected_query_params,
            timeout=self._get_default_timeout(),
            retry=None,
        )
Пример #2
0
 def test_cost_usage_source_is_reachable_dataset_bad_format(self, mock_auth, mock_discovery, mock_bigquery):
     """Test that cost_usage_source_is_reachable throws appropriate error when dataset not correct."""
     mock_bigquery.Client.side_effect = BadRequest(message="Incorrect dataset format")
     gcp_creds = MagicMock()
     mock_auth.return_value = (gcp_creds, MagicMock())
     mock_discovery.build.return_value.projects.return_value.testIamPermissions.return_value.execute.return_value.get.return_value = (  # noqa: E501
         REQUIRED_IAM_PERMISSIONS
     )
     billing_source_param = {"dataset": FAKE.word()}
     credentials_param = {"project_id": FAKE.word()}
     provider = GCPProvider()
     with self.assertRaises(ValidationError):
         provider.cost_usage_source_is_reachable(credentials_param, billing_source_param)
Пример #3
0
    def run_query(self, project, request_pb):
        """Perform a ``runQuery`` request.

        :type project: string
        :param project: The project to connect to. This is
                        usually your project name in the cloud console.

        :type request_pb: :class:`._generated.datastore_pb2.RunQueryRequest`
        :param request_pb: The request protobuf object.

        :rtype: :class:`._generated.datastore_pb2.RunQueryResponse`
        :returns: The returned protobuf response object.
        """
        request_pb.project_id = project
        try:
            return self._stub.RunQuery(request_pb)
        except GrpcRendezvous as exc:
            error_code = exc.code()
            if error_code == StatusCode.INVALID_ARGUMENT:
                raise BadRequest(exc.details())
            raise
Пример #4
0
 def _api_request(**kw):
     _requested.append(kw)
     raise BadRequest('missing dns_name / description')
Пример #5
0
    def populate_table(self, table_path, schema, data=[], make_immediately_available=True,
                       replace_existing_table=False):
        # type: (str, List[SchemaField], Optional[List[Any]], Optional[bool], Optional[bool]) -> None
        """Creates a table and populates it with a list of rows.

        If make_immediately_available is False, the table will be created using streaming inserts.
        Note that streaming inserts are immediately available for querying, but not for exporting or
        copying, so if you need that capability you should set make_immediately_available to True.
        https://cloud.google.com/bigquery/streaming-data-into-bigquery

        If the table is already created, it will raise a RuntimeError, unless replace_existing_table
        is True.

        Args:
          table_path: A string of the form '<dataset id>.<table name>'
              or '<project id>.<dataset id>.<table name>'.
          schema: A list of SchemaFields to represent the table's schema.
          data: A list of rows, each of which corresponds to a row to insert into the table.
          make_immediately_available: If False, the table won't immediately be available for
              copying or exporting, but will be available for querying. If True, after this
              operation returns, it will be available for copying and exporting too.
          replace_existing_table: If set to True, the table at table_path will be deleted and
              recreated if it's already present.

        Raises:
            RuntimeError if the table at table_path is already there and replace_existing_table
                is False
        """
        # Use the Table object so we can pass through the schema.
        table = Table(self.get_table_reference_from_path(table_path), schema)
        if self.table_exists(table):
            if replace_existing_table:
                self.delete_table(table)
            else:
                raise RuntimeError('The table {} already exists.'.format(table_path))
        self.create_table(table)

        if data:
            if make_immediately_available:
                output = cStringIO.StringIO()

                csv_out = csv.writer(output)
                for row in data:
                    csv_out.writerow(row)

                job_config = LoadJobConfig()
                job_config.source_format = 'text/csv'
                # By default this does six retries. It does not accept any other timeout or
                # retry parameters.
                job = self.gclient.load_table_from_file(output, table.reference,
                                                        job_config=job_config,
                                                        rewind=True)
                try:
                    job.result()
                except BadRequest as e:
                    raise BadRequest('{} Error stream: {}'.format(str(e), job.error_result))
                job.result()

                output.close()
            else:
                self._stream_chunks_of_rows(table, data, schema)