コード例 #1
0
ファイル: game.py プロジェクト: Web5design/CommonConsensus
    def start_new_game(self):
        """
        Starts a new game
        """
        question = self.generate_question()
        question_template = question.question_template.get()
        question.times_used += 1
        question_template.times_used += 1

        # reset the game 
        self.question_string = question.question
        self.question = question.key
        self.started_at = datetime.datetime.now()
        self.answers = []
        self.background_color = random.choice(Game.GAME_COLORS) 
        self.cached_status = None
        self.is_dirty = False
        self.players = []
        self.times_played += 1
        self.is_banned = False
        self.flagged_irrelevant = 0
        self.flagged_nonsense = 0
        # save the question and the game
        ndb.put_multi([self, question, question_template])
        return self
コード例 #2
0
ファイル: quick_logger.py プロジェクト: danbeam/catapult
  def SetRecords(self, key, records):
    """Sets records for this log and put into datastore.

    Serializes records and save over multiple entities if necessary.

    Args:
      key: String key name of a QuickLog entity.
      records: List of Record object.
    """
    # Number of bytes less than 1MB for ndb.BlobProperty.
    chunk_size = 1000000
    serialized = pickle.dumps(records, 2)
    length = len(serialized)
    if length / chunk_size > _MAX_NUM_PARTS:
      logging.error('Data too large to save.')
      return None

    log_parts = []
    for i in xrange(0, length, chunk_size):
      # +1 to start entitiy key at 1.
      part_id = i // chunk_size + 1
      part_value = serialized[i:i+chunk_size]
      parent_key = ndb.Key('QuickLog', key)
      log_part = QuickLogPart(id=part_id, parent=parent_key, value=part_value)
      log_parts.append(log_part)

    self.size = len(log_parts)
    ndb.put_multi(log_parts + [self])
コード例 #3
0
ファイル: task_result_test.py プロジェクト: rmistry/luci-py
  def test_append_output_max_chunk(self):
    # This test case is very slow (1m25s locally) if running with the default
    # values, so scale it down a bit which results in ~2.5s.
    self.mock(
        task_result.TaskOutput, 'PUT_MAX_CONTENT',
        task_result.TaskOutput.PUT_MAX_CONTENT / 8)
    self.mock(
        task_result.TaskOutput, 'PUT_MAX_CHUNKS',
        task_result.TaskOutput.PUT_MAX_CHUNKS / 8)
    self.assertFalse(
        task_result.TaskOutput.PUT_MAX_CONTENT %
            task_result.TaskOutput.CHUNK_SIZE)

    calls = []
    self.mock(logging, 'error', lambda *args: calls.append(args))
    max_chunk = 'x' * task_result.TaskOutput.PUT_MAX_CONTENT
    entities = self.run_result.append_output(0, max_chunk, 0)
    self.assertEqual(task_result.TaskOutput.PUT_MAX_CHUNKS, len(entities))
    ndb.put_multi(entities)
    self.assertEqual([], calls)

    # Try with PUT_MAX_CONTENT + 1 bytes, so the last byte is discarded.
    entities = self.run_result.append_output(1, max_chunk + 'x', 0)
    self.assertEqual(task_result.TaskOutput.PUT_MAX_CHUNKS, len(entities))
    ndb.put_multi(entities)
    self.assertEqual(1, len(calls))
    self.assertTrue(calls[0][0].startswith('Dropping '), calls[0][0])
    self.assertEqual(1, calls[0][1])
コード例 #4
0
ファイル: game.py プロジェクト: Web5design/CommonConsensus
    def flag(self, reason):
        """
        Add as a bad question

        1 - Nonsense
        2 - Irrelevant
        """
        if reason == 1:
            self.flagged_nonsense += 1
        else:
            # problem_type = 2
            self.flagged_irrelevant += 1

        percent_flagged = float(len(self.players)) / self.times_flagged
        logging.error("%f percent flagged" % (percent_flagged))
        if self.times_flagged > 1 and percent_flagged > 0.34 and \
                self.duration() > 1.5 and self.duration() < 10:
            # ban the question
            question = self.question.get()
            question.is_banned = True
            self.is_banned = True
            ndb.put_multi([self, question])
            logging.info("Question banned")
            return True
        return False
コード例 #5
0
ファイル: task_result_test.py プロジェクト: rmistry/luci-py
 def test_append_output_partial(self):
   ndb.put_multi(self.run_result.append_output(0, 'Foo', 10))
   expected_output = '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00Foo'
   self.assertEqual(
       expected_output,
       self.run_result.get_command_output_async(0).get_result())
   self.assertTaskOutputChunk([{'chunk': expected_output, 'gaps': [0, 10]}])
コード例 #6
0
ファイル: main.py プロジェクト: deniscostadsc/tg
def get_tweets():
    disease_terms = [
        'dengue',

        'gripe',
        'gripado',
        'gripada',
        'h1n1',

        'tuberculose',

        u'cólera',
        'colera',

        u'malária',
        'malaria'
    ]

    for term in disease_terms:
        tweets = twitter_api.search(q=term, rpp=100)

        documents = [
            Document(id=tweet.id_str, text=tweet.text) for tweet in tweets
        ]

        ndb.put_multi(documents)
コード例 #7
0
ファイル: mails.py プロジェクト: 7kfpun/com.getmewrite
def send_contact_mail(self):
    contacts = Contact.query(Contact.sent != True)
    count = contacts.count()
    if count:
        datetime_handler = lambda obj: obj.isoformat() \
            if isinstance(obj, datetime) else None
        message = json.dumps(
            [contact.to_dict() for contact in contacts],
            default=datetime_handler,
            indent=4
        )

        mail.send_mail(
            sender="Getmewrite.com Support <*****@*****.**>",
            to="Mona <*****@*****.**>",
            subject="You have {0} new contact(s)".format(count),
            body=message
        )

        self.response.write(message)
        logger.info(
            'Send daily mail success, {0} new contacts'.format(count))

        put_list = []
        for contact in contacts:
            contact.sent = True
            put_list.append(contact)

        ndb.put_multi(put_list)
コード例 #8
0
ファイル: task_result_test.py プロジェクト: rmistry/luci-py
  def test_set_from_run_result_two_tries(self):
    request = task_request.make_request(_gen_request(), True)
    result_summary = task_result.new_result_summary(request)
    run_result_1 = task_result.new_run_result(
        request, 1, 'localhost', 'abc', {})
    run_result_2 = task_result.new_run_result(
        request, 2, 'localhost', 'abc', {})
    self.assertTrue(result_summary.need_update_from_run_result(run_result_1))
    run_result_2.modified_ts = utils.utcnow()
    result_summary.modified_ts = utils.utcnow()
    ndb.transaction(lambda: ndb.put_multi((result_summary, run_result_2)))

    self.assertTrue(result_summary.need_update_from_run_result(run_result_1))
    run_result_1.modified_ts = utils.utcnow()
    result_summary.set_from_run_result(run_result_1, request)
    ndb.transaction(lambda: ndb.put_multi((result_summary, run_result_1)))

    result_summary = result_summary.key.get()
    self.assertFalse(result_summary.need_update_from_run_result(run_result_1))

    self.assertTrue(result_summary.need_update_from_run_result(run_result_2))
    run_result_2.modified_ts = utils.utcnow()
    result_summary.set_from_run_result(run_result_2, request)
    ndb.transaction(lambda: ndb.put_multi((result_summary, run_result_2)))
    result_summary = result_summary.key.get()

    self.assertEqual(2, result_summary.try_number)
    self.assertFalse(result_summary.need_update_from_run_result(run_result_1))
コード例 #9
0
def refreshData():
	try:
		spider = citySpider()
		city_list = spider.parse()

		"""First empty the data-store"""
		city_query = cityDB.query()
		entity_list = city_query.fetch()
		key_list = []
		for entity in entity_list:
			key_list.append(entity.key)
		ndb.delete_multi(key_list)
		
		"""Store fresh cities"""
		entity_list = []
		for city in city_list:
			city_name = ""
			for i in xrange(len(city[0])): # replcaing spaces with '-'(dash)
	  			if city[0][i] == ' ':
	 				city_name += '-'
	 			else:
	 				city_name += city[0][i]
	 		city_name = city_name.lower()
			mykey = ndb.Key('cityDB',city_name)
			if cityDB.get_by_id(city_name) is None:
				entity = cityDB(key = mykey, alt_name = city[1], timestamp = dt.datetime(2000,1,1,0,0,0,0))
			else:
				raise Exception("Assertion Fail. Two cities with same name encountered")
			entity_list.append(entity)
		ndb.put_multi(entity_list)

	except Exception as exp:
		raise exp
コード例 #10
0
ファイル: annotations.py プロジェクト: gdg-garage/knihovna-db
def _put_annotations_batch(annotation_data):
    try:
        annotation_objects = []
        qry = BookRecord.query(
            BookRecord.item_id_array.IN(annotation_data.keys())
        )
        keys = qry.fetch(MAX_OBJECTS_PER_BATCH, keys_only=True)
        for key in keys:
            first_item_id = key.string_id().split('|')[0]

            if first_item_id not in annotation_data:
                continue

            short_text = annotation_data[first_item_id][0]
            long_text = annotation_data[first_item_id][1]

            anno_key = ndb.Key(BookAnnotation, key.string_id())
            annotation = BookAnnotation(key=anno_key,
                                        short=short_text,
                                        long=long_text)
            annotation_objects.append(annotation)
        ndb.put_multi(annotation_objects)
        logging.info("{} annotations put into datastore"
                     .format(len(annotation_objects)))
    except Exception as e:
        logging.error(e)
        raise deferred.PermanentTaskFailure()
コード例 #11
0
ファイル: graph_json_test.py プロジェクト: n054/catapult
  def testGetGraphJsonError(self):
    self._AddTestColumns(start_rev=15000, end_rev=15015)

    rows = graph_data.Row.query(
        graph_data.Row.parent_test == ndb.Key(
            'TestMetadata', 'ChromiumGPU/win7/dromaeo/dom'))
    for row in rows:
      row.error = 1 + ((row.revision - 15000) * 0.25)
    ndb.put_multi(rows)
    flot_json_str = graph_json.GetGraphJson(
        {
            'ChromiumGPU/win7/dromaeo/dom': [],
        })
    flot = json.loads(flot_json_str)
    self.assertEqual(1, len(flot['error_bars'].keys()))
    rev = 0
    for col_dom, col_top, col_bottom in zip(
        flot['data']['0']['data'],
        flot['error_bars']['0'][1]['data'],
        flot['error_bars']['0'][0]['data']):
      error = 1 + (rev * 0.25)
      self.assertEqual(rev + 15000, col_top[0])
      self.assertEqual(col_dom[1] + error, col_top[1])
      self.assertEqual(rev + 15000, col_bottom[0])
      self.assertEqual(col_dom[1] - error, col_bottom[1])
      rev += 3
コード例 #12
0
ファイル: graph_json_test.py プロジェクト: n054/catapult
  def _AddLongTestColumns(self, start_rev=15000, end_rev=16500, step=3):
    """Adds test data with long nested sub test to the mock datastore.

    Args:
      start_rev: Starting revision number.
      end_rev: Ending revision number.
      step: Difference between adjacent revisions.
    """
    master = graph_data.Master(id='master')
    master.put()
    bot = graph_data.Bot(id='bot', parent=master.key)
    bot.put()
    test = graph_data.TestMetadata(id='master/bot/suite')
    test.put()

    rows = []
    path = 'master/bot/suite'
    for sub_name in ['sub1', 'sub2', 'sub3', 'sub4', 'sub5']:
      path = '%s/%s' % (path, sub_name)
      test = graph_data.TestMetadata(id=path,
                                     improvement_direction=anomaly.UP,
                                     has_rows=True)
      test.put()
      test_container_key = utils.GetTestContainerKey(test.key)
      for i in range(start_rev, end_rev, step):
        row = graph_data.Row(
            parent=test_container_key, id=i, value=float(i * 2),
            r_webkit=int(i * 0.25), a_str='some_string',
            buildnumber=i - start_rev,
            a_tracing_uri='http://trace/%d' % i,
            a_trace_rerun_options={'foo': '--foo'})
        rows.append(row)
    ndb.put_multi(rows)
    def get(self):
        cursorString = self.request.get('cursor')

        cursor = None
        if cursorString != 'None':
            cursor = Cursor(urlsafe=cursorString)

        listVectors, next_cursor, more = Vector.query().fetch_page(BATCH_SIZE, start_cursor=cursor)
        to_put = []

        for vector in listVectors:
            if any([
            (vector.server == 'OGC' and vector.dataset == 'BusStops' and vector.name == 'GetCapabilities' and vector.httpMethod == 'GET' and vector.returnType == 'XML'),
            (vector.server == 'OGC' and vector.dataset == 'BusStops' and vector.name == 'AttributeFilter' and vector.httpMethod == 'GET' and vector.returnType == 'JSON'),
            (vector.server == 'OGC' and vector.dataset == 'BusStops' and vector.name == 'GetCapabilities' and vector.httpMethod == 'POST' and vector.returnType == 'XML'),
            (vector.server == 'OGC' and vector.dataset == 'Topo' and vector.name == 'Big' and vector.httpMethod == 'GET' and vector.returnType == 'Image'),
            (vector.server == 'OGC' and vector.dataset == 'Topo' and vector.name == 'Small' and vector.httpMethod == 'GET' and vector.returnType == 'Image'),
            (vector.server == 'GME' and vector.dataset == 'BusStops' and vector.name == 'Small' and vector.httpMethod == 'GET' and vector.returnType == 'JSON'),
            (vector.server == 'GME' and vector.dataset == 'AerialPhoto' and vector.name == 'WMSGetCapabilities' and vector.httpMethod == 'GET' and vector.returnType == 'XML'),
            (vector.server == 'GME' and vector.dataset == 'AerialPhoto' and vector.name == 'WMTSGetCapabilities' and vector.httpMethod == 'GET' and vector.returnType == 'XML')
            ]):
                vector.referenceCheckValid = False
                print 'Changed flag! False!'
                to_put.append(vector)
            else:
                vector.referenceCheckValid = True
                print 'Changed flag! True!'
                to_put.append(vector)

        if to_put:
            ndb.put_multi(to_put)

        if more:
            print next_cursor.urlsafe()
            taskqueue.add(url='/updateschemaworker', method='GET', params={'cursor':next_cursor.urlsafe()})
コード例 #14
0
ファイル: pokemonListDao.py プロジェクト: neat-nimbus/app
def init():
    """
    dbの初期状態を作成する.すでに初期化されている場合は異常を返す
    変数は取らない
    返却型はBooleanで初期化成功はTrue、失敗はFalse
    """
    key = ndb.Key('PokemonList', u'フシギダネ')
    dao = key.get()
    if dao == None:
        daoList = []
        for normalOrder, (pokemon, gengarOrder, nicoTime) in enumerate(zip(POKEMON_LIST, GENGARS_ORDER, NICO_TIMES)):
            dao = PokemonList(id=pokemon)
            dao.normalOrder = normalOrder
            dao.gengarOrder = gengarOrder
            dao.pokemon = pokemon
            dao.clefableFlag = False
            dao.gengarFlag = False
            dao.nicoFlag = True
            dao.nicoTime = nicoTime
            daoList.append(dao)
        ndb.put_multi(daoList)
        logging.info(u"PokemonListの初期化に成功しました")
        return True
    else:
        logging.warning(u"すでにPokemonListは初期化されています")
        return False
コード例 #15
0
ファイル: gae_models.py プロジェクト: DSeanLaw/oppia
    def get_or_create_multi(cls, exploration_id, rule_data):
        """Gets or creates entities for the given rules.
        Args:
            exploration_id: the exploration id
            rule_data: a list of dicts, each with the following keys:
                (state_name, rule_str).
        """
        # TODO(sll): Use a hash instead to disambiguate.
        entity_ids = ['.'.join([
            exploration_id, datum['state_name'],
            _OLD_SUBMIT_HANDLER_NAME, datum['rule_str']
        ])[:490] for datum in rule_data]

        entity_keys = [cls._get_entity_key(exploration_id, entity_id)
                       for entity_id in entity_ids]

        entities = ndb.get_multi(entity_keys)
        entities_to_put = []
        for ind, entity in enumerate(entities):
            if entity is None:
                new_entity = cls(id=entity_ids[ind], answers={})
                entities_to_put.append(new_entity)
                entities[ind] = new_entity

        ndb.put_multi(entities_to_put)
        return entities
コード例 #16
0
ファイル: blob.py プロジェクト: chris-rudmin/videodiscoveries
  def post(self):

    videoBlobInfo = self.get_uploads('video')
    screenshotBlobInfo = self.get_uploads('screenshot')
    title = self.request.get("title")
    description = self.request.get("description")

    if len( videoBlobInfo ) != 1 or len( screenshotBlobInfo ) != 1 or not title or not description:
      return self.uploadError( 400 )
    if not users.is_current_user_admin():
      return self.uploadError( 401 )

    uploadRoot = self.request.host_url + BLOB_ROOT
    videoBlobKey = str(videoBlobInfo[0].key())
    videoBlobUri = uploadRoot + videoBlobKey + "/" + videoBlobInfo[0].filename
    screenshotBlobKey = str(screenshotBlobInfo[0].key())
    screenshotBlobUri = uploadRoot + screenshotBlobKey + "/" + screenshotBlobInfo[0].filename
    userId = users.get_current_user().user_id()

    userEntity = User.getById( userId )
    videoEntity = Video(id=videoBlobKey, videoUri=videoBlobUri, screenshotUri=screenshotBlobUri, uploadUser=userId, title=title, description=description, vector=[])
    ndb.put_multi([userEntity, videoEntity])

    self.response.headers['Content-Type'] = 'application/json'
    self.response.out.write( json.dumps({ 'id' : videoBlobKey }) )
コード例 #17
0
def release_lease(lease_key):
  """Releases a lease on a machine.

  Args:
    lease_key: ndb.Key for a models.LeaseRequest entity.
  """
  lease = lease_key.get()
  if not lease:
    logging.warning('LeaseRequest not found: %s', lease_key)
    return
  if not lease.released:
    logging.warning('LeaseRequest not released:\n%s', lease)
    return

  lease.released = False
  if not lease.machine_id:
    logging.warning('LeaseRequest has no associated machine:\n%s', lease)
    lease.put()
    return

  machine = ndb.Key(models.CatalogMachineEntry, lease.machine_id).get()
  if not machine:
    logging.error('LeaseRequest has non-existent machine leased:\n%s', lease)
    lease.put()
    return

  # Just expire the lease now and let MachineReclamationProcessor handle it.
  logging.info('Expiring LeaseRequest:\n%s', lease)
  now = utils.utcnow()
  lease.response.lease_expiration_ts = utils.datetime_to_timestamp(
      now) / 1000 / 1000
  machine.lease_expiration_ts = now
  ndb.put_multi([lease, machine])
コード例 #18
0
ファイル: testing_common.py プロジェクト: joebubba/catapult
def _AddRowsFromIterable(container_key, row_ids):
  """Adds a set of Rows given an iterable of ID numbers."""
  rows = []
  for int_id in sorted(row_ids):
    rows.append(graph_data.Row(id=int_id, parent=container_key, value=int_id))
  ndb.put_multi(rows)
  return rows
コード例 #19
0
ファイル: internal.py プロジェクト: firi/appengine-btree
 def txn():
     first_batch_call = not all([hasattr(self, "_nodes_to_put"),
                                 hasattr(self, "_indices_to_put"),
                                 hasattr(self, "_identifier_cache"),
                                 hasattr(self, "_keys_to_delete")])
     if first_batch_call:
         self._nodes_to_put = dict()
         self._indices_to_put = dict()
         self._identifier_cache = dict()
         self._keys_to_delete = set()
     try:
         results = func()
         if first_batch_call and any([self._nodes_to_put,
                                      self._indices_to_put,
                                      self._keys_to_delete]):
             futures = ndb.delete_multi_async(self._keys_to_delete)
             ndb.put_multi(chain(self._nodes_to_put.itervalues(),
                                 self._indices_to_put.itervalues()))
             [future.get_result() for future in futures]
     finally:
         if first_batch_call:
             del self._nodes_to_put
             del self._indices_to_put
             del self._identifier_cache
             del self._keys_to_delete
     return results
コード例 #20
0
ファイル: model.py プロジェクト: rmistry/luci-py
 def commit(self):
     assert not self.committed
     puts = [c.entity.make_historical_copy(c.deletion, c.comment) for c in self.changes]
     ndb.put_multi(puts + [self.replication_state])
     for cb in _commit_callbacks:
         cb(self.replication_state.auth_db_rev)
     self.committed = True
コード例 #21
0
ファイル: task_result_test.py プロジェクト: rmistry/luci-py
 def test_append_output_overwrite(self):
   # Overwrite previously written data.
   ndb.put_multi(self.run_result.append_output(0, 'FooBar', 0))
   ndb.put_multi(self.run_result.append_output(0, 'X', 3))
   self.assertEqual(
       'FooXar', self.run_result.get_command_output_async(0).get_result())
   self.assertTaskOutputChunk([{'chunk': 'FooXar', 'gaps': []}])
コード例 #22
0
ファイル: content.py プロジェクト: Outernet-Project/tekhenu
def update_content_details(id):
    to_put = []
    content = get_content_or_404(id)
    ref_path = i18n_path(request.forms.get('back', content.path))

    if not content.is_editable:
        response.flash(_('Voting is disabled for content that is being '
                         'broadcast'))
        redirect(ref_path)

    vote = request.forms.get('vote')

    if vote not in ['up', 'down']:
        response.flash(_('There was a problem with the request. Please try '
                         'again later.'))
        redirect(ref_path)

    if vote == 'up':
        content.upvotes += 1
        to_put.append(Event.create(Event.UPVOTE, content.key))
    elif vote == 'down':
        content.downvotes += 1
        to_put.append(Event.create(Event.DOWNVOTE, content.key))
    to_put.append(content)
    ndb.put_multi(to_put)
    redirect(ref_path)
コード例 #23
0
 def addN(self, quads):
     # TODO: Handle splitting large graphs into two entities
     # Note: quads is a generator, not a list. It cannot be traversed twice.
     # Step 1: Collect the triples into the Graphs reflecting the GraphShards they will be added to.
     new_shard_dict = defaultdict(Graph)
     for (s, p, o, _) in quads:  # Last component ignored as this Store is not context_aware
         subject_shard = choice(self.keys_for(self._ID, s, 0))
         new_shard_dict[subject_shard].add((s, p, o))
         predicate_shard = choice(self.keys_for(self._ID, p, 1))
         new_shard_dict[predicate_shard].add((s, p, o))
     keys = list(new_shard_dict.keys())
     # Step 2: Load all existing, corresponding GraphShards
     keys_models = zip(keys, ndb.get_multi(keys))  # TODO: Use async get
     # Step 3: Update or create GraphShards with the added triples
     updated = list()
     for index in range(len(keys_models)):
         (key, model) = keys_models[index]
         if model is None:
             model = GraphShard(key=key, graph_ID=self._ID, graph_n3=new_shard_dict[key].serialize(format="n3"))
         else:
             new_shard_dict[key].parse(data=model.graph_n3, format="n3")
             model.graph_n3 = new_shard_dict[key].serialize(format="n3")
         updated.append(model)
     # Step 4: Invalidate and store all created/updated GraphShards
     if len(updated) > 0:
         GraphShard.invalidate(updated)
         ndb.put_multi(updated)
コード例 #24
0
def migrate(dry_run=False):
    to_put = [ProfileAppEmailMapping(key=ProfileAppEmailMapping.create_key(p.app_email), username=p.username) for p in
              Profile.list_with_app_user()]
    if dry_run:
        return len(to_put), to_put
    for parts in chunks(to_put, 200):
        ndb.put_multi(parts)
コード例 #25
0
ファイル: content.py プロジェクト: Outernet-Project/tekhenu
def update_content_details(id):
    content = get_content_or_404(id)

    if not content.is_editable:
        # Translators, shown when content is not editable (it's on air, etc)
        response.flash(_('This content is not editable'))
        redirect(i18n_path(content.path))

    errors = {}

    title = request.forms.getunicode('title', '').strip()
    license = request.forms.get('license') or None

    if not content.title and not title:
        errors['title'] = _('Title cannot be blank')

    if license and license not in Content.LICENSE_CHOICES:
        errors['license'] = _('Please select a license from provided choices')

    if not errors:
        to_put = []
        if title and content.title != title:
            content.title = title
            to_put.append(Event.create(Event.TITLE, content.key))
        if license and content.license != license:
            content.license = license
            to_put.append(Event.create(Event.LICENSE, content.key))
        if to_put:
            # If we have events in to_put list, we also need to put the content
            to_put.append(content)
        ndb.put_multi(to_put)
        response.flash(_('Content has been updated'))
        redirect(content.path)

    return dict(vals=request.forms, errors=erorrs, content=content)
コード例 #26
0
    def post(self):
        self._require_admin()

        additions = json.loads(self.request.get("youtube_additions_json"))
        match_keys, youtube_videos = zip(*additions["videos"])
        matches = ndb.get_multi([ndb.Key(Match, match_key) for match_key in match_keys])

        matches_to_put = []
        results = {"existing": [], "bad_match": [], "added": []}
        for (match, match_key, youtube_video) in zip(matches, match_keys, youtube_videos):
            if match:
                if youtube_video not in match.youtube_videos:
                    match.youtube_videos.append(youtube_video)
                    matches_to_put.append(match)
                    results["added"].append(match_key)
                else:
                    results["existing"].append(match_key)
            else:
                results["bad_match"].append(match_key)
        ndb.put_multi(matches_to_put)

        # TODO use Manipulators -gregmarra 20121006

        self.template_values.update({
            "results": results,
        })

        path = os.path.join(os.path.dirname(__file__), '../../templates/admin/videos_add.html')
        self.response.out.write(template.render(path, self.template_values))
コード例 #27
0
    def start_draft(self, draft):
        draft.in_setup = False
        draft.in_progress = True

        #shuffle user_keys to create random order
        random.shuffle(draft.user_keys)

        #create the individual drafter items
        position = 0
        drafter_entities = []
        for user_key in draft.user_keys:
            drafter_entities.append(Drafter.create(user_key=user_key, 
                draft_key=draft.key, position=position))
            position += 1
        draft.drafter_keys = ndb.put_multi(drafter_entities)
#         logging.error('424: draft')
#         logging.error(draft)

        pack_entities = []
        #get boosters for all the packs
        for pack_code in draft.pack_codes:
            packs = SetUtil().generate_boosters(
                num=draft.num_drafters, 
                set_code=pack_code)
            random.shuffle(packs)
            for pack in packs:
                pack_entities.append(Pack.create(draft_key=draft.key, 
                                                 cards=pack))
        draft.unopened_pack_keys = ndb.put_multi(pack_entities)
        draft.put()
#         time.sleep(5)
#         logging.error('440: draft')
#         logging.error(draft)
        self.next_pack(draft)
コード例 #28
0
ファイル: handlers_backend.py プロジェクト: nodirt/luci-py
  def post(self, namespace, timestamp):
    digests = []
    now = utils.timestamp_to_datetime(long(timestamp))
    expiration = config.settings().default_expiration
    try:
      digests = payload_to_hashes(self, namespace)
      # Requests all the entities at once.
      futures = ndb.get_multi_async(
          model.entry_key(namespace, binascii.hexlify(d)) for d in digests)

      to_save = []
      while futures:
        # Return opportunistically the first entity that can be retrieved.
        future = ndb.Future.wait_any(futures)
        futures.remove(future)
        item = future.get_result()
        if item and item.next_tag_ts < now:
          # Update the timestamp. Add a bit of pseudo randomness.
          item.expiration_ts, item.next_tag_ts = model.expiration_jitter(
              now, expiration)
          to_save.append(item)
      if to_save:
        ndb.put_multi(to_save)
      logging.info(
          'Timestamped %d entries out of %s', len(to_save), len(digests))
    except Exception as e:
      logging.error('Failed to stamp entries: %s\n%d entries', e, len(digests))
      raise
コード例 #29
0
ファイル: streams.py プロジェクト: xchewtoyx/pulldb-api
 def post(self):
     self.results = defaultdict(list)
     self.updated = []
     user_key = users.user_key(self.user)
     request = json.loads(self.request.body)
     for stream_updates in request:
         stream = streams.stream_key(
             stream_updates['name'],
             user_key = user_key,
             create=False,
         ).get()
         if not stream:
             self.results['failed'].append(stream_updates['name'])
             continue
         if stream_updates.get('publishers'):
             self.update_publishers(stream, stream_updates['publishers'])
         if stream_updates.get('volumes'):
             self.update_volumes(stream, stream_updates['volumes'])
         if stream_updates.get('issues'):
             self.update_issues(stream, stream_updates['issues'])
     if self.updated:
         ndb.put_multi(self.updated)
         status = 200
         message = '%d stream changes' % len(self.updated)
     else:
         status = 203
         message = 'no changes'
     self.response.write(json.dumps({
         'status': status,
         'message': message,
         'results': self.results,
     }))
コード例 #30
0
  def _AddUserRecordsToDB(self, users_to_add):
    """Helper to perform ndb put() with error handling.

    Args:
      users_to_add: List of user (DomainUserToCheckModel) entities to add.
    """
    ndb.put_multi(users_to_add)
コード例 #31
0
ファイル: __init__.py プロジェクト: mouedhen/sndlatr
def init_zoneinfo():
    """
    Add each zone info to the datastore. This will overwrite existing zones.

    This must be called before the AppengineTimezoneLoader will work.
    """
    import os, logging
    from zipfile import ZipFile
    zoneobjs = []

    zoneinfo_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
      'zoneinfo.zip'))

    with ZipFile(zoneinfo_path) as zf:
        for zfi in zf.filelist:
            key = ndb.Key('Zoneinfo', zfi.filename, namespace=NDB_NAMESPACE)
            zobj = Zoneinfo(key=key, data=zf.read(zfi))
            zoneobjs.append(zobj)

    logging.info("Adding %d timezones to the pytz-appengine database" %
        len(zoneobjs)
        )

    ndb.put_multi(zoneobjs)
コード例 #32
0
def _create_token_value_agreement_if_needed(profile_key):
    profile = profile_key.get()  # type: TffProfile
    investments = [
        i for i in InvestmentAgreement.list_by_user(profile.app_user)
        if PaymentInfo.HAS_MULTIPLIED_TOKENS not in i.payment_info
        and i.creation_time <= FF_ENDED_TIMESTAMP
    ]
    statuses = [
        InvestmentAgreement.STATUS_PAID, InvestmentAgreement.STATUS_SIGNED
    ]
    canceled_or_started_investments = [
        i for i in investments if i.status not in statuses
    ]
    to_put, token_count = multiply_tokens_for_agreements(
        canceled_or_started_investments)
    azzert(token_count == 0, 'Expected token_count to be 0')
    logging.info('Updated %s agreements for user %s', len(to_put),
                 profile.username)
    if to_put:
        ndb.put_multi(to_put)
    has_document = any(d.type == DocumentType.TOKEN_VALUE_ADDENDUM.value
                       for d in Document.list_by_username(profile.username))
    if any(i.status in statuses for i in investments) and not has_document:
        create_token_value_agreement(profile.username)
コード例 #33
0
    def testGetGraphJsonError(self):
        self._AddTestColumns(start_rev=15000, end_rev=15015)

        rows = graph_data.Row.query(graph_data.Row.parent_test == ndb.Key(
            'Master', 'ChromiumGPU', 'Bot', 'win7', 'Test', 'dromaeo', 'Test',
            'dom')).fetch()
        for row in rows:
            row.error = 1 + ((row.revision - 15000) * 0.25)
        ndb.put_multi(rows)
        flot_json_str = graph_json.GetGraphJson({
            'ChromiumGPU/win7/dromaeo/dom': [],
        })
        flot = json.loads(flot_json_str)
        self.assertEqual(1, len(flot['error_bars'].keys()))
        rev = 0
        for col_dom, col_top, col_bottom in zip(
                flot['data']['0']['data'], flot['error_bars']['0'][1]['data'],
                flot['error_bars']['0'][0]['data']):
            error = 1 + (rev * 0.25)
            self.assertEqual(rev + 15000, col_top[0])
            self.assertEqual(col_dom[1] + error, col_top[1])
            self.assertEqual(rev + 15000, col_bottom[0])
            self.assertEqual(col_dom[1] - error, col_bottom[1])
            rev += 3
コード例 #34
0
ファイル: main.py プロジェクト: ericcolon/NBA-Projections-1
	def get(self):
		teamsToPut = []
		
		url = "http://stats.nba.com/stats/leaguedashteamstats?Conference=&DateFrom=&DateTo=&Division=&GameScope=&GameSegment=&LastNGames=0&LeagueID=00&Location=&MeasureType=Advanced&Month=0&OpponentTeamID=0&Outcome=&PORound=0&PaceAdjust=N&PerMode=Totals&Period=0&PlayerExperience=&PlayerPosition=&PlusMinus=N&Rank=N&Season=2016-17&SeasonSegment=&SeasonType=Regular+Season&ShotClockRange=&StarterBench=&TeamID=0&VsConference=&VsDivision="
		response = urlfetch.fetch(url)
		
		jsonData = json.loads(response.content)
		teams = jsonData['resultSets'][0]['rowSet']
		statHeaders = jsonData['resultSets'][0]['headers']
		
		for i in range(len(teams)):
			teamData = {}
			
			for j in range(len(teams[i])):
				teamData[statHeaders[j]] = teams[i][j]
				
			newTeam = Team(
				teamId = teams[i][0],
				name = teams[i][1],
				data = teamData,
				id = teams[i][0]					
			)
			teamsToPut.append(newTeam)
		ndb.put_multi(teamsToPut)
コード例 #35
0
 def post(self):
     if self.request.get('auth') == 'bd2c952a4b2febc39b81c967cd8556cd':
         response = {}
         response['cursor'] = ''
         query = APIData.query()
         query = query.filter(APIData.indexed_data == 'TYPE->PROJECT')
         query = query.filter(APIData.indexed_data == 'COA->0')
         n = 20
         if self.request.get('n'):
             n = int(self.request.get('n'))
         if self.GET("cursor"):
             curs = Cursor(urlsafe=self.request.get("cursor"))
             data, cursor, more = query.fetch_page(n, start_cursor=curs)
         else:
             data, cursor, more = query.fetch_page(n)
         things_to_put = []
         for d in  data:
             d.additional_data['coa'] = '0'
             things_to_put.append(d)
         if things_to_put:
             ndb.put_multi(things_to_put)
         response['cursor'] = cursor.urlsafe()
         self.response.headers['Content-Type'] = 'application/json'
         self.response.write(json.dumps(response))
コード例 #36
0
    def _AddLongTestColumns(self, start_rev=15000, end_rev=16500, step=3):
        """Adds test data with long nested sub test to the mock datastore.

    Args:
      start_rev: Starting revision number.
      end_rev: Ending revision number.
      step: Difference between adjacent revisions.
    """
        master = graph_data.Master(id='master')
        master.put()
        bot = graph_data.Bot(id='bot', parent=master.key)
        bot.put()
        test = graph_data.TestMetadata(id='master/bot/suite')
        test.UpdateSheriff()
        test.put()

        rows = []
        path = 'master/bot/suite'
        for sub_name in ['sub1', 'sub2', 'sub3', 'sub4', 'sub5']:
            path = '%s/%s' % (path, sub_name)
            test = graph_data.TestMetadata(id=path,
                                           improvement_direction=anomaly.UP,
                                           has_rows=True)
            test.UpdateSheriff()
            test.put()
            test_container_key = utils.GetTestContainerKey(test.key)
            for i in range(start_rev, end_rev, step):
                row = graph_data.Row(parent=test_container_key,
                                     id=i,
                                     value=float(i * 2),
                                     r_webkit=int(i * 0.25),
                                     a_str='some_string',
                                     buildnumber=i - start_rev,
                                     a_tracing_uri='http://trace/%d' % i)
                rows.append(row)
        ndb.put_multi(rows)
コード例 #37
0
def recalculate_goat_index(sport):
    ndb.delete_multi(UserGoatIndex.query().filter(
        UserGoatIndex.sport == sport).fetch(keys_only=True))
    q = Pick.query().filter(Pick.sport == sport)  # sport == "nba"
    picks = q.fetch()
    goat_indexes = []
    for pick in picks:
        #get_or_insert user goat index
        user_id = pick.user_id
        event = pick.event.get()
        if event.start_time < datetime.datetime.utcnow():
            goat_index = UserGoatIndex.get_or_insert("{}{}".format(
                sport, user_id),
                                                     user_id=user_id,
                                                     sport=sport,
                                                     num_pick=0,
                                                     num_point=0,
                                                     num_correct=0)
            outcome = event.outcome
            goat_index.num_pick = goat_index.num_pick + 1
            if outcome.winner == pick.pick:
                goat_index.num_correct = goat_index.num_correct + 1
            goat_indexes.append(goat_index)
    ndb.put_multi(goat_indexes)
コード例 #38
0
    def create_project_cohort(self, cohort_date=datetime.datetime.today()):
        program = Program.get_config(self.program_label)
        liaison = User.create(email='*****@*****.**')
        org = Organization.create(name="Org Foo", liaison_id=liaison.uid)
        liaison.owned_organizations.append(org.uid)
        project = Project.create(organization_id=org.uid,
                                 program_label=self.program_label)
        liaison.put()
        org.put()
        project.put()

        pc = ProjectCohort.create(
            project_id=project.uid,
            organization_id=org.uid,
            program_label=self.program_label,
            cohort_label=self.cohort_label,
            liaison_id=liaison.uid,
        )
        pc.put()

        surveys = Survey.create_for_project_cohort(program['surveys'], pc)
        ndb.put_multi(surveys)

        return liaison, org, project, pc, surveys
コード例 #39
0
def put_multi(models, update_last_updated_time=True):
    """Stores a sequence of Model instances.

    Args:
        models: list(datastore_services.Model). A list of Model instances.
        update_last_updated_time: bool. Whether to update the last_updated field
            of the entities.

    Returns:
        list(str). A list with the stored keys.
    """
    # TODO(#10863): Stop passing in update_last_updated_time through these
    # top-level functions.
    return ndb.put_multi(
        models, update_last_updated_time=update_last_updated_time)
コード例 #40
0
    def dispatch_request(self):
        form = TokenForm()
        if form.validate_on_submit():
            token = uuid4().hex[:6].upper()
            phone = phonenumbers.format_number(
                phonenumbers.parse(form.user_phone.data, region='US'),
                phonenumbers.PhoneNumberFormat.E164)
            registered_users = UserModel.query(UserModel.user_phone == phone)
            updated_users = []
            for registered_user in registered_users:
                registered_user.user_token = token
                updated_users.append(registered_user)

            if updated_users:
                ndb.put_multi(updated_users)

                url = request.url_root.rstrip('/') + url_for('validate',
                                                             user_token=token)

                client = TwilioRestClient(TWILIO_SID, TWILIO_TOKEN)
                message = client.messages.create(body="Jeff's Xmas Tracker\n\n"
                                                 "Token: %s\n"
                                                 "or\n"
                                                 "Click: %s" % (token, url),
                                                 to=phone,
                                                 from_="+15153052239")

                flash(
                    u"Token sent to %s. Follow link or type in token above." %
                    phone, 'success')
            else:
                flash(
                    u"The phone number %s isn't registered to any users." %
                    phone, 'warning')

        return render_template('validate_token.html', form=form)
コード例 #41
0
    def GetFailureKeysToAnalyzeTestFailures(self, failure_entities):
        """Gets failures that'll actually be analyzed in the analysis.

    Groups failures by suite, picks one failure per group and links other
    failures in group to it.

    Note because of the lack of test level failure info, such in-build grouping
    could cause false positives, but we still decide to do it in consideration
    of saving resources and speeding up analysis.
    """
        suite_to_failure_map = defaultdict(list)
        for failure in failure_entities:
            properties = failure.properties or {}
            if not properties.get('needs_bisection', True):
                # Should not include the failure if it doesn't need bisection.
                continue
            suite_to_failure_map[properties.get('suite')].append(failure)

        analyzing_failure_keys = []
        failures_to_update = []
        for same_suite_failures in suite_to_failure_map.itervalues():
            sample_failure_key = same_suite_failures[0].key
            analyzing_failure_keys.append(sample_failure_key)
            if len(same_suite_failures) == 1:
                continue

            for i in xrange(1, len(same_suite_failures)):
                # Merges the rest of failures into the sample failure.
                failure = same_suite_failures[i]
                failure.merged_failure_key = sample_failure_key
                failures_to_update.append(failure)

        if failures_to_update:
            ndb.put_multi(failures_to_update)

        return analyzing_failure_keys
コード例 #42
0
    def post(self):
        self.response.headers.add_header('Access-Control-Allow-Origin', '*')

        data = self.request.body
        email = getEmail(json.loads(self.request.body))
        if (email == None):
            msg = "invalid user"
            self.response.write(json.dumps(msg))
            return
        deleteCourses(email)
        j = json.loads(data)
        courses = j['courselist']
        ents = []
        l = len(courses)
        for i in range(0, l):
            print courses[i]
            crs = Course(email=email,
                         name=courses[i]['name'],
                         description=courses[i]['description'])
            ents.append(crs)

        ndb.put_multi(ents)
        self.response.headers['Content-Type'] = 'application/json'
        self.response.write(data)
コード例 #43
0
  def touch_auth_db():
    # Recheck under transaction.
    if key.get() is not None:
      return
    to_process = []

    # Start slow queries in parallel.
    groups_future = model.AuthGroup.query(
        ancestor=model.root_key()).fetch_async()
    whitelists_future = model.AuthIPWhitelist.query(
        ancestor=model.root_key()).fetch_async()

    # Singleton entities.
    to_process.append(model.root_key().get())
    to_process.append(model.ip_whitelist_assignments_key().get())

    # Finish queries.
    to_process.extend(groups_future.get_result())
    to_process.extend(whitelists_future.get_result())

    # Update auth_db_rev properties, make *History entities. Keep modified_by
    # and modified_ts as they were.
    to_put = []
    for ent in to_process:
      if not ent:
        continue
      ent.record_revision(
          modified_by=ent.modified_by,
          modified_ts=ent.modified_ts,
          comment='Initial snapshot')
      to_put.append(ent)

    # Store changes, update the marker to make sure this won't run again.
    ndb.put_multi(to_put)
    auth_db_rev = model.replicate_auth_db()
    _AuthDBSnapshotMarker(key=key, auth_db_rev=auth_db_rev).put()
コード例 #44
0
    def post(self):
        register_name = self.request.get('register_name',
                                         DEFAULT_REGISTER_NAME)

        all_trades = ClerkshipTrade.query(
            ancestor=register_key(register_name)).fetch()

        # zero out past matches
        for trade in all_trades:
            trade.match_to_desired = None
            trade.match_to_current = None
        ndb.put_multi(all_trades)

        # Get request and update trades to match
        results = json.loads(self.request.body)
        for key, matches in results['data'].iteritems():
            trade = ndb.Key(urlsafe=key).get()
            to_current_key = ndb.Key(urlsafe=matches['match_to_current'])
            to_desired_key = ndb.Key(urlsafe=matches['match_to_desired'])
            trade.match_to_current = to_current_key
            trade.match_to_desired = to_desired_key
            trade.put()

        self.response.write('Save successful')
コード例 #45
0
ファイル: api.py プロジェクト: maximsch2/flow-dashboard
 def action(self, d):
     '''
     '''
     action = self.request.get('action')
     res = {}
     if action == 'archive_complete':
         recent = Task.Recent(self.user, limit=20)
         to_archive = []
         for t in recent:
             if not t.archived and t.is_done():
                 t.archive()
                 to_archive.append(t)
         if to_archive:
             ndb.put_multi(to_archive)
             res['archived_ids'] = [t.key.id() for t in to_archive]
             self.message = "Archived %d %s" % (
                 len(to_archive),
                 tools.pluralize('task', count=len(to_archive)))
         else:
             self.message = "No completed tasks to archive"
         self.success = True
     else:
         self.message = "Unknown action"
     self.set_response(res)
コード例 #46
0
def importStories(r, limit=400, skip=0, overwrite=False):

    storyList = Story.Query.all().order_by('title').limit(limit).skip(skip)
    storyCount = 0
    wordCount = 0
    r.out.write('got stories from parse, overwrite=' + str(overwrite) +
                '<br/>')
    targetList = []
    for s in storyList:
        storyCount += 1
        r.out.write(str(storyCount) + '. ')
        story = processStory(s, r, overwrite)
        if story:
            targetList.append(story)
            wordCount += story.wordCount

    if (storyCount > 0):
        source = db.StorySource(title='Nature',
                                storyCount=storyCount,
                                wordCount=wordCount)
        targetList.append(source)
        ndb.put_multi(targetList)

    r.out.write('end of importStories<br/>')
コード例 #47
0
    def update_index(cls, title, old_data, new_data):
        old_pairs = cls.data_as_pairs(old_data)
        new_pairs = cls.data_as_pairs(new_data)

        deletes = old_pairs.difference(new_pairs)
        inserts = new_pairs.difference(old_pairs)

        # delete
        queries = [cls.query(cls.title == title, cls.name == name, cls.value == unicode(v.pvalue if isinstance(v, schema.Property) else v))
                   for name, v in deletes
                   if not isinstance(v, schema.Property) or v.should_index()]

        entities = reduce(lambda a, b: a + b, [q.fetch() for q in queries], [])
        keys = [e.key for e in entities]
        if len(keys) > 0:
            ndb.delete_multi(keys)

        # insert
        entities = [cls(title=title, name=name, value=unicode(v.pvalue if isinstance(v, schema.Property) else v))
                    for name, v in inserts
                    if not isinstance(v, schema.Property) or v.should_index()]

        if len(entities) > 0:
            ndb.put_multi(entities)
コード例 #48
0
    def get(self):
        q = taskqueue.Queue('views')
        while True:
            try:
                tasks = q.lease_tasks(60, 1000)
                if not tasks:
                    return
                tallies = {}
                for t in tasks:
                    tallies[t.payload] = tallies.get(t.payload, 0) + 1
                objects = ndb.get_multi(
                    [ndb.Key(BlogPost, int(k)) for k in tallies])
                for object in objects:
                    object.views += tallies[str(object.key.id())]
                ndb.put_multi(objects)
                q.delete_tasks(tasks)

            except google.appengine.api.taskqueue.TransientError:
                print("google.appengine.api.taskqueue.TransientError")

            except google.appengine.runtime.apiproxy_errors.DeadlineExceededError:
                print(
                    "google.appengine.runtime.apiproxy_errors.DeadlineExceededError"
                )
コード例 #49
0
def accept_invite(member_id, token):
    member = TenantMembership.from_urlsafe(member_id)
    if not member:
        time.sleep(config.security_wait)
        return flask.render_template('no-invite-token.html'), 404
    assert isinstance(member, TenantMembership)
    if member.is_token_valid(token):
        # Invite accepted
        if g.current_account:
            member.user = g.current_account.key
            member.put()
            flask.session['current_tenant'] = member.tenant.urlsafe()

            flasher.success(_('Invite accepted'))
            return flask.redirect(flask.url_for('tenants.tenant_overview'))
        else:
            form = ChoosePasswordForm(flask.request.form)
            if form.validate_on_submit():
                account, auth = UserAccount.from_email(member.invite_email,
                                                       email_is_verified=True)
                auth.email_is_verified = True
                auth.email_verified_date = datetime.datetime.now()
                account.set_password(form.password.data)
                member.user = account.key

                put_multi([account, auth, member])
                login_user(account)
                flasher.success(_('Invite accepted'))
                return flask.redirect(flask.url_for('tenants.tenant_overview'))

            flask.session['current_tenant'] = member.tenant.urlsafe()
            return flask.render_template('invite-accept-choose-password.html',
                                         form=form)

    else:
        return flask.render_template('no-invite-token.html'), 404
コード例 #50
0
    def test_put_hook(self):
        """Test put hooks."""
        entity = TestModel3(prop_0=0)
        entity.put()
        self.assertEqual(1, entity.prop_1)
        self.assertEqual(entity.key.id(), entity.prop_2)

        self.assertDictEqual({
            'prop_0': 0,
            'prop_1': 1,
            'prop_2': None,
        },
                             entity.key.get().to_dict())

        entities = [
            TestModel3(id=entity.key.id(), prop_0=0),
            TestModel3(prop_0=1)
        ]
        ndb.put_multi(entities)
        self.assertEqual(1, entities[0].prop_1)
        self.assertEqual(entities[0].key.id(), entities[0].prop_2)
        self.assertEqual(2, entities[1].prop_1)
        self.assertEqual(entities[1].key.id(), entities[1].prop_2)

        self.assertItemsEqual([
            {
                'prop_0': 0,
                'prop_1': 1,
                'prop_2': None,
            },
            {
                'prop_0': 1,
                'prop_1': 2,
                'prop_2': None,
            },
        ], [e.to_dict() for e in TestModel3.query()])
コード例 #51
0
ファイル: wipeout_service.py プロジェクト: purhan/oppia
    def _pseudonymize_models(activity_related_models, pseudonymized_user_id):
        """Pseudonymize user ID fields in the models.

        This function is run in a transaction, with the maximum number of
        activity_related_models being MAX_NUMBER_OF_OPS_IN_TRANSACTION.

        Args:
            activity_related_models: list(BaseModel). Models whose user IDs
                should be pseudonymized.
            pseudonymized_user_id: str. New pseudonymized user ID to be used for
                the models.
        """
        metadata_models = [
            model for model in activity_related_models
            if isinstance(model, snapshot_model_class)]
        for metadata_model in metadata_models:
            metadata_model.committer_id = pseudonymized_user_id

        commit_log_models = [
            model for model in activity_related_models
            if isinstance(model, commit_log_model_class)]
        for commit_log_model in commit_log_models:
            commit_log_model.user_id = pseudonymized_user_id
        ndb.put_multi(metadata_models + commit_log_models)
コード例 #52
0
ファイル: impl.py プロジェクト: mcgreevy/chromium-infra
    def attach_tags(self, package_name, instance_id, tags, caller, now=None):
        """Adds a bunch of tags to an existing package instance.

    Idempotent. Skips existing tags. Package instance must exist and must have
    all processors successfully finished.

    Args:
      package_name: name of the package, e.g. 'infra/tools/cipd'.
      instance_id: identifier of the package instance (SHA1 of package file).
      tags: list of strings with tags to attach.
      caller: auth.Identity that issued the request.
      now: datetime when the request was made (or None for current time).

    Returns:
      {tag: corresponding InstanceTag (just created or existing one)}.
    """
        assert tags and all(is_valid_instance_tag(tag) for tag in tags), tags
        self._assert_instance_is_ready(package_name, instance_id)

        # Grab info about existing tags, register new ones.
        now = now or utils.utcnow()
        existing = ndb.get_multi(
            instance_tag_key(package_name, instance_id, tag) for tag in tags)
        to_create = [
            InstanceTag(key=instance_tag_key(package_name, instance_id, tag),
                        tag=tag,
                        registered_by=caller,
                        registered_ts=now) for tag, ent in zip(tags, existing)
            if not ent
        ]
        ndb.put_multi(to_create)

        attached = {}
        attached.update({e.tag: e for e in existing if e})
        attached.update({e.tag: e for e in to_create})
        return attached
コード例 #53
0
    def testGetGraphJson_WithSelectedTrace(self):
        self._AddTestColumns(start_rev=15000, end_rev=15050)
        rows = graph_data.Row.query(
            graph_data.Row.parent_test == utils.OldStyleTestKey(
                'ChromiumGPU/win7/dromaeo/jslib')).fetch()
        for row in rows:
            row.error = 1 + ((row.revision - 15000) * 0.25)
        ndb.put_multi(rows)

        flot_json_str = graph_json.GetGraphJson(
            {
                'ChromiumGPU/win7/dromaeo/jslib': ['jslib'],
            },
            rev=15000,
            num_points=8,
            is_selected=True)
        flot = json.loads(flot_json_str)

        self.assertEqual(1, len(flot['data']))
        self.assertEqual(5, len(flot['data']['0']['data']))
        self.assertEqual(1, len(flot['annotations']['series']))
        self.assertEqual(5, len(flot['annotations'].get('0').keys()))
        self.assertEqual(5, len(flot['error_bars']['0'][0]['data']))
        self.assertEqual(5, len(flot['error_bars']['0'][1]['data']))
コード例 #54
0
 def test_run_result_timeout(self):
   request = task_request.make_request(_gen_request_data())
   result_summary = task_result.new_result_summary(request)
   result_summary.modified_ts = utils.utcnow()
   ndb.transaction(result_summary.put)
   run_result = task_result.new_run_result(request, 1, 'localhost', 'abc')
   run_result.state = task_result.State.TIMED_OUT
   run_result.completed_ts = utils.utcnow()
   run_result.modified_ts = utils.utcnow()
   result_summary.set_from_run_result(run_result, request)
   ndb.transaction(lambda: ndb.put_multi((run_result, result_summary)))
   run_result = run_result.key.get()
   result_summary = result_summary.key.get()
   self.assertEqual(True, run_result.failure)
   self.assertEqual(True, result_summary.failure)
コード例 #55
0
 def _CreateRows(self):
     test_path = 'Chromium/win7/suite/metric'
     test_key = utils.TestKey(test_path)
     stat_names_to_test_keys = {
         'avg': utils.TestKey('Chromium/win7/suite/metric_avg'),
         'std': utils.TestKey('Chromium/win7/suite/metric_std'),
         'count': utils.TestKey('Chromium/win7/suite/metric_count'),
         'max': utils.TestKey('Chromium/win7/suite/metric_max'),
         'min': utils.TestKey('Chromium/win7/suite/metric_min'),
         'sum': utils.TestKey('Chromium/win7/suite/metric_sum')
     }
     histograms = add_histograms_queue_test.TEST_HISTOGRAM
     histograms['diagnostics'][
         reserved_infos.CHROMIUM_COMMIT_POSITIONS.name]['values'] = [99]
     ndb.put_multi(
         add_histograms_queue.CreateRowEntities(histograms, test_key,
                                                stat_names_to_test_keys,
                                                99))
     histograms['diagnostics'][
         reserved_infos.CHROMIUM_COMMIT_POSITIONS.name]['values'] = [200]
     ndb.put_multi(
         add_histograms_queue.CreateRowEntities(
             add_histograms_queue_test.TEST_HISTOGRAM, test_key,
             stat_names_to_test_keys, 200))
コード例 #56
0
ファイル: Message.py プロジェクト: nitishparkar/udacityplus
    def delete_messages_for(cls, username, keys):
        """Deletes messages for username

        Args:
         username: username of the user who to delete messages for
         keys: list of Message keys to delete

        Returns:
         A list of keys for the deleted messages
        """
        msgs = ndb.get_multi(keys)

        for m in msgs:
            m.deleted_for = username

        return ndb.put_multi(msgs)
コード例 #57
0
 def setUp(self):
     super(TestOutput, self).setUp()
     request = _gen_request()
     result_summary = task_result.new_result_summary(request)
     result_summary.modified_ts = utils.utcnow()
     ndb.transaction(result_summary.put)
     to_run = task_to_run.new_task_to_run(request, 1, 0)
     self.run_result = task_result.new_run_result(request, to_run,
                                                  'localhost', 'abc', {})
     self.run_result.started_ts = result_summary.modified_ts
     self.run_result.modified_ts = utils.utcnow()
     ndb.transaction(lambda: result_summary.set_from_run_result(
         self.run_result, request))
     ndb.transaction(lambda: ndb.put_multi(
         (result_summary, self.run_result)))
     self.run_result = self.run_result.key.get()
コード例 #58
0
ファイル: management.py プロジェクト: erwenzhang/ConnexUs
    def get(self):
        original_url = self.request.headers['Referer']
        dellsts = self.request.get_all("status")
        if (len(dellsts) > 0):
            streams = Stream.query(
                Stream.name.IN(dellsts),
                Stream.author == users.get_current_user()).fetch()
            for stream in streams:
                pictures = db.GqlQuery(
                    "SELECT * FROM Picture " + "WHERE ANCESTOR IS :1",
                    db.Key.from_path('Stream', stream.name))

                db.delete(pictures)

            ndb.delete_multi(ndb.put_multi(streams))
        self.redirect(original_url)
コード例 #59
0
def store_context(context):
    """Persist a furious.context.Context object to the datastore by loading it
    into a FuriousContext ndb.Model.
    """

    logging.debug("Attempting to store Context %s.", context.id)

    entity = FuriousContext.from_context(context)

    # TODO: Handle exceptions and retries here.
    marker = FuriousCompletionMarker(id=context.id)
    key, _ = ndb.put_multi((entity, marker))

    logging.debug("Stored Context with key: %s.", key)

    return key
コード例 #60
0
def _DetectTreeClosureForTree(tree_name, all_statuses):
    """Detects tree closures for the given tree, and return the number."""
    tree_closures = []
    index = 0

    previous_closure_complete = False
    while index < len(all_statuses):
        # Skip all leading open statuses to find the next closure.
        if not all_statuses[index].closed:
            index += 1
            continue

        close_index = index
        index += 1

        first_open_index = None
        # Skip all non-open status (close, throttle) to find the next open status.
        while index < len(all_statuses):
            if not all_statuses[index].closed:
                first_open_index = index
                break
            index += 1

        latest_open_index = first_open_index
        # Skip to the most recent open status for this tree closure.
        while index < len(all_statuses):
            if all_statuses[index].closed:
                break
            latest_open_index = index
            index += 1

        # Skip if no matching open status is found.
        if first_open_index is not None:
            # The identified closure might not be complete with all the open statuses.
            tree_closures.append(
                _CreateTreeClosure(
                    tree_name, all_statuses[close_index:latest_open_index + 1],
                    all_statuses[first_open_index]))
        else:
            # The previous tree closure was complete with all the open statuses,
            # because a new tree closure started and was incomplete.
            previous_closure_complete = True

    if not previous_closure_complete:
        tree_closures = tree_closures[:-1]
    # Save all the closures to datastore.
    return len(ndb.put_multi(tree_closures))