def get_data(dataset_id): # Set the dataset from the command line parameters. datastore.set_options(dataset=dataset_id) try: req = datastore.RunQueryRequest() query = req.query query.kind.add().name = 'Hackathlon' results = datastore.run_query(req).batch.entity_result # results[0].entity.property[0].value.string_value useful_res = [] for res in results: ent = res.entity dict_res = {} for prop in ent.property: dict_res[prop.name] = prop.value.string_value useful_res.append(dict_res) return json.dumps(useful_res) except datastore.RPCError as e: logging.error('Error while doing datastore operation') logging.error('RPCError: %(method)s %(reason)s', {'method': e.method, 'reason': e.reason}) logging.error('HTTPError: %(status)s %(reason)s', {'status': e.response.status, 'reason': e.response.reason}) return
def wipeout(cls): """ Deletes all entities of this type from the datastore. """ _log.debug('%s.wipeout()', cls.__name__) batch_size = 500 cursor = None while True: req = googledatastore.RunQueryRequest() query = req.gql_query query.query_string = 'SELECT __key__ FROM %s LIMIT %d OFFSET @startCursor ' % ( cls.__name__, batch_size) query.allow_literal = True cursor_arg = query.name_arg.add() cursor_arg.name = 'startCursor' if cursor is None: cursor_arg.value.integer_value = 0 else: cursor_arg.cursor = cursor resp = googledatastore.run_query(req) req = googledatastore.CommitRequest() req.mode = googledatastore.CommitRequest.NON_TRANSACTIONAL req.mutation.delete.extend([result.entity.key for result in resp.batch.entity_result]) googledatastore.commit(req) if resp.batch.more_results == googledatastore.QueryResultBatch.NO_MORE_RESULTS: break if len(resp.batch.entity_result) == 0: break cursor = resp.batch.end_cursor
def related_kills(self, back_minutes = 60, forward_minutes = 15, system_ids = None): micros_per_minute = 60 * 1000000L ts = _date_to_timestamp(self.kill_time) start = ts - back_minutes * micros_per_minute end = ts + forward_minutes * micros_per_minute if system_ids is None: system_ids = [self.solar_system_id] for system in system_ids: req = googledatastore.RunQueryRequest() query = req.gql_query query.query_string = ('SELECT * FROM KillMail ' + 'WHERE solar_system_id = @system ' + 'AND kill_time >= @startTime ' + 'AND kill_time < @endTime ' + 'ORDER BY kill_time, kill_id ') system_arg = query.name_arg.add() system_arg.name = 'system' system_arg.value.integer_value = system start_time_arg = query.name_arg.add() start_time_arg.name = 'startTime' start_time_arg.value.timestamp_microseconds_value = start end_time_arg = query.name_arg.add() end_time_arg.name = 'endTime' end_time_arg.value.timestamp_microseconds_value = end resp = googledatastore.run_query(req) for result in resp.batch.entity_result: entity = KillMail() entity._set_entity(result.entity) yield entity
def by_ship_type(cls, ship_type_id): _log.debug('LossMailAttributes.by_ship_type(%d)', ship_type_id) cursor = None while True: req = googledatastore.RunQueryRequest() query = req.gql_query query.query_string = ('SELECT * FROM LossMailAttributes ' + 'WHERE ship_type_id = @shipTypeId ' + 'LIMIT 50 OFFSET @startCursor ') query.allow_literal = True ship_type_arg = query.name_arg.add() ship_type_arg.name = 'shipTypeId' ship_type_arg.value.integer_value = ship_type_id cursor_arg = query.name_arg.add() cursor_arg.name = 'startCursor' if cursor is None: cursor_arg.value.integer_value = 0 else: cursor_arg.cursor = cursor resp = googledatastore.run_query(req) for result in resp.batch.entity_result: entity = cls() entity._set_entity(result.entity) yield entity if resp.batch.more_results == googledatastore.QueryResultBatch.NO_MORE_RESULTS: break if len(resp.batch.entity_result) == 0: break cursor = resp.batch.end_cursor
def wipeout(cls): """ Deletes all entities of this type from the datastore. """ _log.debug('%s.wipeout()', cls.__name__) batch_size = 500 cursor = None while True: req = googledatastore.RunQueryRequest() query = req.gql_query query.query_string = 'SELECT __key__ FROM %s LIMIT %d OFFSET @startCursor ' % ( cls.__name__, batch_size) query.allow_literal = True cursor_arg = query.name_arg.add() cursor_arg.name = 'startCursor' if cursor is None: cursor_arg.value.integer_value = 0 else: cursor_arg.cursor = cursor resp = googledatastore.run_query(req) req = googledatastore.CommitRequest() req.mode = googledatastore.CommitRequest.NON_TRANSACTIONAL req.mutation.delete.extend( [result.entity.key for result in resp.batch.entity_result]) googledatastore.commit(req) if resp.batch.more_results == googledatastore.QueryResultBatch.NO_MORE_RESULTS: break if len(resp.batch.entity_result) == 0: break cursor = resp.batch.end_cursor
def unverified_payments(cls): _log.debug('Payment.unverified_payments()') cursor = None while True: req = googledatastore.RunQueryRequest() query = req.gql_query query.query_string = ('SELECT * FROM Payment ' + 'WHERE paid = TRUE ' + 'AND api_verified = FALSE ' + 'LIMIT 50 OFFSET @startCursor ') query.allow_literal = True cursor_arg = query.name_arg.add() cursor_arg.name = 'startCursor' if cursor is None: cursor_arg.value.integer_value = 0 else: cursor_arg.cursor = cursor resp = googledatastore.run_query(req) for result in resp.batch.entity_result: entity = cls() entity._set_entity(result.entity) yield entity if resp.batch.more_results == googledatastore.QueryResultBatch.NO_MORE_RESULTS: break if len(resp.batch.entity_result) == 0: break cursor = resp.batch.end_cursor
def archive(cls): """Delete all Todo items that are done.""" req = datastore.BeginTransactionRequest() resp = datastore.begin_transaction(req) tx = resp.transaction req = datastore.RunQueryRequest() req.read_options.transaction = tx q = req.query set_kind(q, kind='Todo') add_projection(q, '__key__') set_composite_filter(q.filter, datastore.CompositeFilter.AND, set_property_filter( datastore.Filter(), 'done', datastore.PropertyFilter.EQUAL, True), set_property_filter( datastore.Filter(), '__key__', datastore.PropertyFilter.HAS_ANCESTOR, default_todo_list.key)) resp = datastore.run_query(req) keys = [r.entity.key for r in resp.batch.entity_result] req = datastore.CommitRequest() req.transaction = tx req.mutation.delete.extend(keys) resp = datastore.commit(req) return ''
def archive(cls): """Delete all Todo items that are done.""" req = datastore.BeginTransactionRequest() resp = datastore.begin_transaction(req) tx = resp.transaction req = datastore.RunQueryRequest() req.read_options.transaction = tx q = req.query set_kind(q, kind='Todo') add_projection(q, '__key__') set_composite_filter( q.filter, datastore.CompositeFilter.AND, set_property_filter(datastore.Filter(), 'done', datastore.PropertyFilter.EQUAL, True), set_property_filter(datastore.Filter(), '__key__', datastore.PropertyFilter.HAS_ANCESTOR, default_todo_list.key)) resp = datastore.run_query(req) keys = [r.entity.key for r in resp.batch.entity_result] req = datastore.CommitRequest() req.transaction = tx req.mutation.delete.extend(keys) resp = datastore.commit(req) return ''
def get_all(cls): """Query for all Todo items ordered by creation date.""" req = datastore.RunQueryRequest() q = req.query set_kind(q, kind='Todo') set_property_filter(q.filter, '__key__', datastore.PropertyFilter.HAS_ANCESTOR, default_todo_list.key) resp = datastore.run_query(req) todos = [Todo.from_proto(r.entity) for r in resp.batch.entity_result] return todos
def get_by_name(name): req = googledatastore.RunQueryRequest() req.query.kind.add().name = 'Tower' name_filter = req.query.filter.property_filter name_filter.property.name = 'pos_name' name_filter.operator = googledatastore.PropertyFilter.EQUAL name_filter.value.string_value = name resp = googledatastore.run_query(req) if resp.batch.entity_result: tower = Tower() tower._set_entity(resp.batch.entity_result[0].entity) return tower return None
def get_all(cls): """Query for all Todo items ordered by creation date. This method is eventually consistent to avoid the need for an extra index. """ req = datastore.RunQueryRequest() q = req.query set_kind(q, kind='Todo') add_property_orders(q, 'created') resp = datastore.run_query(req) todos = [Todo.from_proto(r.entity) for r in resp.batch.entity_results] return todos
def cbquery(self, master): req = datastore.RunQueryRequest() query = req.query query.kind.add().name = 'PSCams' master_filter = query.filter.property_filter master_filter.property.name = 'master' master_filter.operator = datastore.PropertyFilter.EQUAL master_filter.value.string_value = master resp = datastore.run_query(req) mycams = [] for entity_result in resp.batch.entity_result: entity = entity_result.entity for prop in entity.property: if prop.name == 'camid': camid = prop.value.string_value elif prop.name == 'name': camname = prop.value.string_value mycams.append([camid, camname]) return mycams
def cbquery(self,master): req = datastore.RunQueryRequest() query = req.query query.kind.add().name = 'PSCams' master_filter = query.filter.property_filter master_filter.property.name = 'master' master_filter.operator = datastore.PropertyFilter.EQUAL master_filter.value.string_value = master resp = datastore.run_query(req) mycams = [] for entity_result in resp.batch.entity_result: entity = entity_result.entity for prop in entity.property: if prop.name == 'camid': camid = prop.value.string_value elif prop.name == 'name': camname = prop.value.string_value mycams.append([camid, camname]) return mycams
def read_by_indexes(table_name, index_name_values=None): """Index reader.""" req = datastore.RunQueryRequest() query = req.query query.kind.add().name = table_name if not index_name_values: index_name_values = [] for name, val in index_name_values: queryFilter = query.filter.property_filter queryFilter.property.name = name queryFilter.operator = datastore.PropertyFilter.EQUAL queryFilter.value.string_value = str(val) loop_its = 0 have_more = True while have_more: resp = datastore.run_query(req) found_something = False for found in resp.batch.entity_result: yield extract_entity(found) found_something = True if not found_something: # This is a guard against bugs or excessive looping - as long we # can keep yielding records we'll continue to execute loop_its += 1 if loop_its > 5: raise ValueError("Exceeded the excessive query threshold") if resp.batch.more_results != datastore.QueryResultBatch.NOT_FINISHED: have_more = False else: have_more = True end_cursor = resp.batch.end_cursor query.start_cursor.CopyFrom(end_cursor)
def related_kills(self, back_minutes=60, forward_minutes=15, system_ids=None): micros_per_minute = 60 * 1000000L ts = _date_to_timestamp(self.kill_time) start = ts - back_minutes * micros_per_minute end = ts + forward_minutes * micros_per_minute if system_ids is None: system_ids = [self.solar_system_id] for system in system_ids: req = googledatastore.RunQueryRequest() query = req.gql_query query.query_string = ('SELECT * FROM KillMail ' + 'WHERE solar_system_id = @system ' + 'AND kill_time >= @startTime ' + 'AND kill_time < @endTime ' + 'ORDER BY kill_time, kill_id ') system_arg = query.name_arg.add() system_arg.name = 'system' system_arg.value.integer_value = system start_time_arg = query.name_arg.add() start_time_arg.name = 'startTime' start_time_arg.value.timestamp_microseconds_value = start end_time_arg = query.name_arg.add() end_time_arg.name = 'endTime' end_time_arg.value.timestamp_microseconds_value = end resp = googledatastore.run_query(req) for result in resp.batch.entity_result: entity = KillMail() entity._set_entity(result.entity) yield entity
def query(cls, req): _log.debug('%s.query(%s)', cls.__name__, req.gql_query.query_string) cursor_arg = req.gql_query.name_arg.add() cursor_arg.name = 'startCursor' cursor = None while True: if cursor is None: cursor_arg.value.integer_value = 0 else: cursor_arg.ClearField('value') cursor_arg.cursor = cursor resp = googledatastore.run_query(req) for result in resp.batch.entity_result: entity = cls() entity._set_entity(result.entity) yield entity if resp.batch.more_results == googledatastore.QueryResultBatch.NO_MORE_RESULTS: break if len(resp.batch.entity_result) == 0: break cursor = resp.batch.end_cursor