Esempio n. 1
0
    def test_it(self):
        from google.cloud.proto.datastore.v1 import datastore_pb2
        from google.cloud.proto.datastore.v1 import entity_pb2

        index_updates = 1337
        keys = [
            entity_pb2.Key(
                path=[
                    entity_pb2.Key.PathElement(
                        kind='Foo',
                        id=1234,
                    ),
                ],
            ),
            entity_pb2.Key(
                path=[
                    entity_pb2.Key.PathElement(
                        kind='Bar',
                        name='baz',
                    ),
                ],
            ),
        ]
        response = datastore_pb2.CommitResponse(
            mutation_results=[
                datastore_pb2.MutationResult(key=key) for key in keys
            ],
            index_updates=index_updates,
        )
        result = self._call_fut(response)
        self.assertEqual(result, (index_updates, keys))
Esempio n. 2
0
def _make_key(id_):
    from google.cloud.proto.datastore.v1 import entity_pb2

    key = entity_pb2.Key()
    elem = key.path.add()
    elem.id = id_
    return key
Esempio n. 3
0
    def test_is_key_valid(self):
        key = entity_pb2.Key()
        # Complete with name, no ancestor
        datastore_helper.add_key_path(key, 'kind', 'name')
        self.assertTrue(helper.is_key_valid(key))

        key = entity_pb2.Key()
        # Complete with id, no ancestor
        datastore_helper.add_key_path(key, 'kind', 12)
        self.assertTrue(helper.is_key_valid(key))

        key = entity_pb2.Key()
        # Incomplete, no ancestor
        datastore_helper.add_key_path(key, 'kind')
        self.assertFalse(helper.is_key_valid(key))

        key = entity_pb2.Key()
        # Complete with name and ancestor
        datastore_helper.add_key_path(key, 'kind', 'name', 'kind2', 'name2')
        self.assertTrue(helper.is_key_valid(key))

        key = entity_pb2.Key()
        # Complete with id and ancestor
        datastore_helper.add_key_path(key, 'kind', 'name', 'kind2', 123)
        self.assertTrue(helper.is_key_valid(key))

        key = entity_pb2.Key()
        # Incomplete with ancestor
        datastore_helper.add_key_path(key, 'kind', 'name', 'kind2')
        self.assertFalse(helper.is_key_valid(key))

        key = entity_pb2.Key()
        self.assertFalse(helper.is_key_valid(key))
Esempio n. 4
0
def _make_key(kind, id_, project):
    from google.cloud.proto.datastore.v1 import entity_pb2

    key = entity_pb2.Key()
    key.partition_id.project_id = project
    elem = key.path.add()
    elem.kind = kind
    elem.id = id_
    return key
Esempio n. 5
0
def _make_mutation(id_):
    from google.cloud.proto.datastore.v1 import datastore_pb2
    from google.cloud.proto.datastore.v1 import entity_pb2

    key = entity_pb2.Key()
    key.partition_id.project_id = 'PROJECT'
    elem = key.path.add()
    elem.kind = 'Kind'
    elem.id = id_
    return datastore_pb2.MutationResult(key=key)
Esempio n. 6
0
    def to_protobuf(self):
        from google.cloud.proto.datastore.v1 import entity_pb2

        key = self._key = entity_pb2.Key()
        # Don't assign it, because it will just get ripped out
        # key.partition_id.project_id = self.project

        element = key.path.add()
        element.kind = self._kind
        if self._id is not None:
            element.id = self._id

        return key
Esempio n. 7
0
def make_ancestor_query(kind, namespace, ancestor):
  """Creates a Cloud Datastore ancestor query."""
  ancestor_key = entity_pb2.Key()
  datastore_helper.add_key_path(ancestor_key, kind, ancestor)
  if namespace is not None:
    ancestor_key.partition_id.namespace_id = namespace

  query = query_pb2.Query()
  query.kind.add().name = kind

  datastore_helper.set_property_filter(
      query.filter, '__key__', PropertyFilter.HAS_ANCESTOR, ancestor_key)

  return query
    def _makePB(self, project=None, namespace=None, path=()):
        from google.cloud.proto.datastore.v1 import entity_pb2

        pb = entity_pb2.Key()
        if project is not None:
            pb.partition_id.project_id = project
        if namespace is not None:
            pb.partition_id.namespace_id = namespace
        for elem in path:
            added = pb.path.add()
            added.kind = elem['kind']
            if 'id' in elem:
                added.id = elem['id']
            if 'name' in elem:
                added.name = elem['name']
        return pb
Esempio n. 9
0
def make_ancestor_query(kind, namespace, ancestor):
  """Creates a Cloud Datastore ancestor query.
  The returned query will fetch all the entities that have the parent key name
  set to the given `ancestor`.
  """
  ancestor_key = entity_pb2.Key()
  datastore_helper.add_key_path(ancestor_key, kind, ancestor)
  if namespace is not None:
    ancestor_key.partition_id.namespace_id = namespace

  query = query_pb2.Query()
  query.kind.add().name = kind

  datastore_helper.set_property_filter(
      query.filter, '__key__', PropertyFilter.HAS_ANCESTOR, ancestor_key)

  return query
Esempio n. 10
0
    def to_protobuf(self):
        """Return a protobuf corresponding to the key.

        :rtype: :class:`.entity_pb2.Key`
        :returns: The protobuf representing the key.
        """
        key = _entity_pb2.Key()
        key.partition_id.project_id = self.project

        if self.namespace:
            key.partition_id.namespace_id = self.namespace

        for item in self.path:
            element = key.path.add()
            if 'kind' in item:
                element.kind = item['kind']
            if 'id' in item:
                element.id = item['id']
            if 'name' in item:
                element.name = item['name']

        return key
Esempio n. 11
0
def run_data_migration():
    request_data = json.loads(request.get_data())

    # Required fields
    fields = [
        'name',
        'function_kwargs',
        'user'
    ]

    # Some basic validation
    for f in fields:
        if f not in request_data:
            resp_data = json.dumps(
                {
                    'error': 'The ' + f + ' field is required.'
                }
            )
            resp = Response(resp_data, status=400, mimetype='application/json')
            return resp

    if request_data['name'] not in migration.choices:
        resp_data = json.dumps(
            {
                'error': 'The migration name is not valid.'
            }
        )
        resp = Response(resp_data, status=400, mimetype='application/json')
        return resp

    migration_name = request_data['name']
    function_kwargs = request_data['function_kwargs'] or {}
    user = request_data['user']

    function_kwargs.update({'name': migration_name})

    # Create a MigrationHistory entity to keep track of the migration status
    # set the project
    project = PROJECT or 'meridianedit-staging'

    # Create entity key
    partition_id = entity_pb2.PartitionId(project_id=project, namespace_id="")
    migration_history_obj_id = datetime.now().strftime("%Y%m%d%H%M%S")
    path_element = entity_pb2.Key.PathElement(kind="MigrationHistory", name=migration_history_obj_id)
    key = entity_pb2.Key(partition_id=partition_id, path=[path_element])

    # Create entity and give it properties
    entity = entity_pb2.Entity(key=key)
    property_dict = {
        'name': migration_name,
        'function_kwargs': json.dumps(function_kwargs),
        'started_by': user,
        'status': 'running',
        'created': datetime.now()
    }
    datastore_helper.add_properties(entity, property_dict)

    # Add entity to datastore
    mutations = [Mutation(insert=entity)]
    client = apache_helper.get_datastore(project)
    throttler = AdaptiveThrottler(window_ms=120000, bucket_ms=1000, overload_ratio=1.25)
    apache_helper.write_mutations(client, project, mutations, throttler, rpc_stats_callback)

    # Call the migration with any given function kwargs
    migration_kwargs = {
        'migration_history_obj': migration_history_obj_id,
    }
    migration_kwargs.update(function_kwargs)

    # Run the migration in a celery task worker to prevent it timing
    # out this connection. Also monitor the task so we can update
    # migration status.
    run_dataflow_migration.delay(pickle.dumps(entity), **migration_kwargs)

    resp_data = {
        'migration_history_obj_id': migration_history_obj_id
    }

    # A default 500 error message is returned if any of this breaks
    return Response(json.dumps(resp_data), status=200, mimetype='application/json')