示例#1
0
 def post(self):
     """Create a new campaign."""
     campaign_dict = self.get_request_json()
     validate_campaign_dict(campaign_dict)
     # get a list of platforms
     platforms_list = campaign_dict["platforms"]
     del campaign_dict["platforms"]
     # construct and store a new campaign
     campaign = Campaign(**campaign_dict)
     campaign.put()
     campaign_id = campaign.key.id()
     # construct and store platforms for campaign
     platforms = []
     for platform_name in platforms_list:
         platform = Platform(name=platform_name,
                             counter=0,
                             campaign=campaign.key,
                             id="%d-%s" % (campaign_id, platform_name))
         platforms.append(platform)
     ndb.put_multi_async(platforms)
     # prepare response representation of the created campaign
     output = campaign_to_dict(campaign, platforms=platforms)
     # set the appropriate response headers
     self.response.location = self.uri_for("campaign-detail",
                                           campaign_id=campaign_id)
     self.response.status_int = 201
     return output
示例#2
0
  def Reset(self):
    """Resets all policy (i.e. votes, rules, score) for the target blockable.

    Raises:
      BlockableNotFound: The target blockable ID is not a known Blockable.
    """
    logging.info('Resetting blockable: %s', self.blockable_id)

    self.blockable = base.Blockable.get_by_id(self.blockable_id)

    votes = self.blockable.GetVotes()

    # Delete existing votes.
    delete_futures = ndb.delete_multi_async(vote.key for vote in votes)

    # Store old vote entities with a different key indicating that they are
    # deactivated so they won't be counted towards the blockable's score.
    archived_votes = votes
    for vote in archived_votes:
      vote.key = base.Vote.GetKey(
          vote.blockable_key, vote.user_key, in_effect=False)
    ndb.put_multi_async(archived_votes)

    # Disable all existing rules.
    existing_rules = self.blockable.GetRules()
    for rule in existing_rules:
      rule.MarkDisabled()
    ndb.put_multi_async(existing_rules)

    # Create REMOVE-type rules from the existing blockable rules.
    self._GenerateRemoveRules(existing_rules)

    # Ensure past votes are deleted and then reset the blockable score.
    ndb.Future.wait_all(delete_futures)
    self.blockable.ResetState()
示例#3
0
文件: tasks.py 项目: westmark/maeve
def sync_orders(character, api_orders, existing_orders):
  existing_orders = dict([(o.hash_key, o) for o in existing_orders])
  to_put = []

  for row in api_orders.orders:
    issued = datetime.fromtimestamp(row.issued)
    hash_key = hash((character.char_id,
                    row.volEntered,
                    str(row.stationID),
                    str(row.typeID),
                    issued))

    existing = existing_orders.get(hash_key, None)
    if existing:
      existing.remaining_quantity = row.volRemaining
      existing.order_state = row.orderState
      to_put.append(existing)

    else:
      order = MarketOrder(hash_key=hash_key,
                          character_key=character.key,
                          char_id=character.char_id,
                          original_quantity=row.volEntered,
                          remaining_quantity=row.volRemaining,
                          station_id=str(row.stationID),
                          type_id=str(row.typeID),
                          unit_price=row.price,
                          order_type=(row.bid and MarketOrder.BUY or MarketOrder.SELL),
                          order_state=row.orderState,
                          issued=issued)
      to_put.append(order)

  ndb.put_multi_async(to_put)
示例#4
0
文件: admin.py 项目: achenxu/PickMeUp
def updateRideOffers():
    prop_to_delete = [
        'routes_info', 'fermate_intermedie', 'percorsi_passeggeri_compatibili'
    ]
    more, cursor = True, None
    updated_records = []
    percorsi = set()
    while more:
        records, cursor, more = RideOffer.query(
            RideOffer.active == True).fetch_page(100, start_cursor=cursor)
        for ent in records:
            changed = False
            for prop in prop_to_delete:
                if prop in ent._properties:
                    del ent._properties[prop]
                    changed = True
            if changed:
                updated_records.append(ent)
            percorsi.add(ent.percorso)
    if updated_records:
        print 'Updating {} records'.format(len(updated_records))
        create_futures = ndb.put_multi_async(updated_records)
        ndb.Future.wait_all(create_futures)
    if percorsi:
        import route
        print 'Updating {} percorsi'.format(len(percorsi))
        routes = []
        for n, percorso in enumerate(percorsi, 1):
            print '{}) {}'.format(n, percorso.encode('utf-8'))
            r = route.addRoute(percorso)
            r.populateWithDetails(put=False)
            routes.append(r)
        create_futures = ndb.put_multi_async(routes)
        ndb.Future.wait_all(create_futures)
示例#5
0
文件: admin.py 项目: achenxu/PickMeUp
def updateFermate():
    from routing_util import FERMATE

    # Update old active fermate as inactive
    fermate_keys = FERMATE.keys()
    updated_count = 0
    more, cursor = True, None
    while more:
        records, cursor, more = Fermata.query(
            Fermata.active == True).fetch_page(100, start_cursor=cursor)
        updated_records = []
        for f in records:
            f.active = False
            if f.key.id() not in fermate_keys:
                updated_records.append(f)
        create_futures = ndb.put_multi_async(updated_records)
        ndb.Future.wait_all(create_futures)
        updated_count += len(updated_records)
    print 'Set {} fermate as inactive'.format(updated_count)

    # Adding new fermate
    # FERMATE {zona_stop: {'zona': refZona, 'stop': <fermata_name>, 'loc': (<lat>,<lon>)}}
    new_entries = []
    for f_key, f_info in FERMATE.items():
        f_entry = Fermata(id=f_key,
                          location=ndb.GeoPt(*f_info['loc']),
                          active=True)
        f_entry.update_location()
        new_entries.append(f_entry)
    create_futures = ndb.put_multi_async(new_entries)
    ndb.Future.wait_all(create_futures)
    print 'Added or updated {} new fermate'.format(len(create_futures))
示例#6
0
def initialize_classes(month, class_types):
    db_month = model.Month.get_or_insert_async(month.key.id())
    existing_classes = model.Class.query(ancestor=month.key
                                         ).fetch_async()

    # Compute new classes while we're trying to fetch any
    # existing classes.
    new_classes = []
    thursdays = month.Thursdays()
    for class_type in class_types:
        new_classes.append(model.Class(parent=month.key,
                                       type=class_type.key,
                                       days=thursdays))

    # Ensure month object exists. Unlike put()s, this doesn't get
    # completed at the end of the transaction.
    db_month.get_result()

    if existing_classes.get_result():
        # If there are already classes in this month,
        # don't insert new ones.
        return existing_classes.get_result()

    ndb.put_multi_async(new_classes)
    return new_classes
示例#7
0
  def update_poem(entry_keys, poem_type, vote):
    entries = ndb.get_multi(entry_keys)
    for i, entry_type in enumerate(POEM_TYPE_ENTRY_TYPES[poem_type]):
      if not entry_type:
        raise Exception('One or more entries is invalid.')
      elif entry_type != entries[i].type:
        raise Exception('Unexpected entry type: %s.' % entries[i].type)

    ids = []
    for entry in entries:
      ids += [entry.key.id()]
      if vote > 0:
        entry.upvotes += 1
      else:
        entry.downvotes += 1

    key = ndb.Key(Poem, encode_ids(ids))
    poem = key.get()
    if poem is None:
      poem = Poem(key=key, type=poem_type, entry_keys=entry_keys,
                  debug_text=', '.join(entry.text for entry in entries))
    if vote > 0:
      poem.upvotes += 1
    else:
      poem.downvotes += 1

    ndb.put_multi_async(entries + [poem])

    return poem
示例#8
0
def saveTopics(item, forum):
    topics = []
    count = 0
    for t in item['topic']:
        if '_id' not in t.keys():
            continue
        if t['_id'] is None:
            continue
        tags = []
        if isinstance(t['tags'], list):
            for tt in t['tags']:
                tags.append(tt['tag'])
        top_key = ndb.Key(Topic, str(t['_id']))
        if not counted(top_key, forum):
            count += 1
        topic = Topic(key=top_key,
                      top_id=str(t['_id']),
                      vote=t['votes'],
                      comment=t['comments'],
                      author=t['author'],
                      disp_topic=t['disp_topic'],
                      topic_type=str(t['topic_type']),
                      utime=datetime.strptime(t['utime'], '%m/%d/%Y %H:%M:%S'),
                      tags=tags,
                      forums=addForum(top_key, forum))
        topics.append(topic)
    ndb.put_multi_async(topics)
    last_id = str(item['last_id_current_page'])
    return count, last_id
示例#9
0
    def process_parsed_feed(cls, parsed_feed, feed, overflow, overflow_reason=OVERFLOW_REASON.BACKLOG):
        keys_by_guid = {guid_for_item(item): ndb.Key(cls, guid_for_item(item), parent=feed.key) for item in parsed_feed.entries}
        entries = yield ndb.get_multi_async(keys_by_guid.values())
        old_guids = [x.key.id() for x in entries if x]
        new_guids = filter(lambda x: x not in old_guids, keys_by_guid.keys())
        new_entries_by_guid = {x: cls(key=keys_by_guid.get(x), guid=x, creating=True) for x in new_guids}
        new_entries = yield ndb.put_multi_async(new_entries_by_guid.values())

        published = overflow
        futures = []
        for item in parsed_feed.entries:
            entry = new_entries_by_guid.get(guid_for_item(item))
            if not entry:
                continue

            futures.append((entry, prepare_entry_from_item(parsed_feed, item, feed, overflow, overflow_reason, published)))

        for entry, future in futures:
            entry_kwargs = yield future
            if not entry_kwargs:
                continue

            entry_kwargs.pop('parent')
            entry_kwargs['creating'] = False
            entry.populate(**entry_kwargs)

        saved_entries = yield ndb.put_multi_async(new_entries_by_guid.values())

        raise ndb.Return((new_guids, old_guids))
示例#10
0
    def delete_multi(
            cls, entity_ids, committer_id, commit_message,
            force_deletion=False):
        """Deletes the given cls instances with the given entity_ids.

        Note that this extends the superclass method.

        Args:
            entity_ids: list(str). Ids of entities to delete.
            committer_id: str. The user_id of the user who committed the change.
            commit_message: str. The commit description message.
            force_deletion: bool. If True these models are deleted completely
                from storage, otherwise there are only marked as deleted.
                Default is False.
        """
        super(CollectionModel, cls).delete_multi(
            entity_ids, committer_id,
            commit_message, force_deletion=force_deletion)

        if not force_deletion:
            commit_log_models = []
            collection_rights_models = CollectionRightsModel.get_multi(
                entity_ids, include_deleted=True)
            versioned_models = cls.get_multi(entity_ids, include_deleted=True)
            for model, rights_model in python_utils.ZIP(
                    versioned_models, collection_rights_models):
                collection_commit_log = CollectionCommitLogEntryModel.create(
                    model.id, model.version, committer_id,
                    cls._COMMIT_TYPE_DELETE,
                    commit_message, [{'cmd': cls.CMD_DELETE_COMMIT}],
                    rights_model.status, rights_model.community_owned
                )
                collection_commit_log.collection_id = model.id
                commit_log_models.append(collection_commit_log)
            ndb.put_multi_async(commit_log_models)
示例#11
0
def _merge_notes(user_key, notes_from_client, old_last_synchronized,
                 new_last_synchronized):
    """
    Merge notes from client with notes from the server.

    :param user_key: Key of the :class:`google.appengine.api.users.User`
        with which the notes are associated.
    :param notes_from_client: Iterable of Note-like objects.
    :param datetime.datetime old_last_synchronized: Datetime after which to
        fetch server notes.
    :param datetime.datetime new_last_synchronized: Datetime of the current
        merge operation.
    :type user_key: :class:`google.appengine.ext.db.Key`
    :return: Notes to be merged back into the client.
    :rtype: list
    """
    from_server_map = {o.key: o for o in Note.get_synchronized_after(
        user_key, old_last_synchronized)}
    to_persist = []

    for client_note in notes_from_client:
        server_note, is_created = Note.get_or_create(user_key,
                                                     client_note.id)
        if is_created or client_note.modified >= server_note.modified:
            server_note.update_from_note(client_note, new_last_synchronized)
            to_persist.append(server_note)

            # The ``client_note`` supersedes the ``server_note``, so don't
            # return the ``server_note`` to the client.
            from_server_map.pop(server_note.key, None)

    if to_persist:
        ndb.put_multi_async(to_persist)
    return from_server_map.values()
示例#12
0
def refresh_EnkiModelRestAPIConnectToken_non_expiring():
	likelyhood = 10 # occurs with a probability of 1%
	number = random.randint( 1, 1000 )
	if number < likelyhood:
		list = fetch_EnkiModelRestAPIDataStore_non_expiring()
		for item in list:
			item.time_expires = datetime.datetime.now() + datetime.timedelta( seconds = DATASTORE_NON_EXPIRING )
		ndb.put_multi_async( list )
示例#13
0
 def post(self):
   movies_str = self.request.params.get('movies')
   logging.info(movies_str)
   movies = json.loads(movies_str)
   ndb.put_multi_async([movie.Movie(**amovie) for amovie in movies])
   self.response.headers.add_header("Access-Control-Allow-Origin", "*")
   self.response.headers['Content-Type'] = '*.*'
   self.response.write('Hello')
示例#14
0
  def updateClustering(self, wordSpaceLength):
    videos = self.query().fetch()
    vectors = [video.vector for video in videos]

    if len(vectors) >= self.NUM_CLUSTERS:
      clusterer = description_analysis.getClusterer( vectors, self.NUM_CLUSTERS, wordSpaceLength )
      for video in videos:
        video.clusterGroup = clusterer.classify( video.vector )
      ndb.put_multi_async(videos)
示例#15
0
 def refresh_non_expiring(cls):
     likelihood = 10  # occurs with a probability of 1%
     number = random.randint(1, 1000)
     if number < likelihood:
         list = cls.fetch_non_expiring()
         for item in list:
             item.time_expires = datetime.datetime.now(
             ) + datetime.timedelta(seconds=cls.DATASTORE_NON_EXPIRING)
         ndb.put_multi_async(list)
def htmlParser(key, content):
    outlinks = re.findall(r'href=[\'"]?([^\'" >]+)', content)
    link_datums = []
    for link in outlinks:
        link_datum = LinkDbDatum(parent=key, link_url=link)
        link_datums.append(link_datum)
    ndb.put_multi_async(link_datums)
    content_links = re.findall(r'src=[\'"]?([^\'" >]+)', content)
    return content_links
def htmlParser(key, content):
  outlinks = re.findall(r'href=[\'"]?([^\'" >]+)', content)
  link_datums = []
  for link in outlinks:
    link_datum = LinkDbDatum(parent=key, link_url=link)
    link_datums.append(link_datum)
  ndb.put_multi_async(link_datums) 
  content_links = re.findall(r'src=[\'"]?([^\'" >]+)', content) 
  return content_links
示例#18
0
def refresh_EnkiModelRestAPIConnectToken_non_expiring():
    likelyhood = 10  # occurs with a probability of 1%
    number = random.randint(1, 1000)
    if number < likelyhood:
        list = fetch_EnkiModelRestAPIDataStore_non_expiring()
        for item in list:
            item.time_expires = datetime.datetime.now() + datetime.timedelta(
                seconds=DATASTORE_NON_EXPIRING)
        ndb.put_multi_async(list)
示例#19
0
def process_parsed_feed(cls,
                        parsed_feed,
                        feed,
                        overflow,
                        overflow_reason=OVERFLOW_REASON.BACKLOG):

    feed_entries_by_guid = get_entries_by_guid(parsed_feed)

    new_entries_by_guid, new_guids, old_guids = yield stage_new_entries(
        cls, feed_entries_by_guid, feed.key)

    yield ndb.put_multi_async(new_entries_by_guid.values())

    entry_items = feed_entries_by_guid.items()
    # If we process first time feeds backwards the entries will be in the right added order
    entry_items = reversed(entry_items)

    published = overflow
    futures = []
    counter = 0
    first_time = getattr(feed, 'first_time', False)
    for guid, item in entry_items:
        entry = new_entries_by_guid.get(guid)
        if not entry:
            continue

        # We only need the first three items to be fully fleshed out on the first fetch because that is all
        # The user can see in the preview area.
        # Otherwise always fetch remote data
        remote_fetch = True
        if first_time and counter > 2:
            remote_fetch = False

        added = datetime.now()
        futures.append(
            (entry,
             prepare_entry_from_item(item, feed, overflow, overflow_reason,
                                     published, added, remote_fetch)))
        counter += 1

    for entry, future in futures:
        entry_kwargs = yield future
        if not entry_kwargs:
            continue

        entry_kwargs.pop('parent')
        entry_kwargs['creating'] = False
        entry.populate(**entry_kwargs)

    if len(futures):
        feed.is_dirty = True
        yield feed.put_async()

    yield ndb.put_multi_async(new_entries_by_guid.values())

    raise ndb.Return((new_guids, old_guids))
示例#20
0
def _CopyLocalRules(user_key, dest_host_id):
    """Copy over a user's local rules to a newly-associated host.

  NOTE: Because of the implementation of local whitelisting on Bit9, many of
  these new copied local rules will likely be initially unfulfilled, that is,
  held in Upvote and not saved to Bit9.

  Args:
    user_key: str, The user for whom the rules will be copied.
    dest_host_id: str, The ID of the host for which the new rules will be
        created.
  """
    logging.info('Copying rules for %s to host %s', user_key.id(),
                 dest_host_id)

    username = user_map.EmailToUsername(user_key.id())
    host_query = bit9.Bit9Host.query(bit9.Bit9Host.users == username)
    src_host = yield host_query.get_async()
    if src_host is None:
        raise ndb.Return()
    assert src_host.key.id() != dest_host_id, (
        'User already associated with target host')

    # Get all local rules from that host.
    rules_query = bit9.Bit9Rule.query(
        bit9.Bit9Rule.host_id == src_host.key.id(),
        bit9.Bit9Rule.in_effect == True)  # pylint: disable=g-explicit-bool-comparison

    # Get a rough idea of how many rules we're in for. Since this is a
    # non-critical query, we limit the max number to a fairly low bound.
    rule_count = yield rules_query.count_async(limit=250)
    logging.info('Retrieved %s%s rules to copy',
                 '>' if rule_count == 250 else '', rule_count)

    # Copy the local rules to the new host.
    new_rules = []
    for batch in query_utils.Paginate(rules_query):
        for rule in batch:
            new_rule = model_utils.CopyEntity(rule,
                                              new_parent=rule.key.parent(),
                                              host_id=dest_host_id,
                                              user_key=user_key)
            new_rules.append(new_rule)
    logging.info('Copying %s rules to new host', len(new_rules))
    yield ndb.put_multi_async(new_rules)

    # Create the change sets necessary to submit the new rules to Bit9.
    changes = []
    for new_rule in new_rules:
        change = bit9.RuleChangeSet(rule_keys=[new_rule.key],
                                    change_type=new_rule.policy,
                                    parent=new_rule.key.parent())
        changes.append(change)
    logging.info('Creating %s RuleChangeSet', len(changes))
    yield ndb.put_multi_async(changes)
示例#21
0
    def add_firstaid_stats(self, student):
        keys = ndb.put_multi(list(self.add_firstaid_session_stats(student)))

        @ndb.toplevel
        def populate_cache(keys):
            for k in keys:
                k.get_async(use_cache=False)

        populate_cache(keys)
        stats = FirstAidUserStats.new_user_stats(student)
        ndb.put_multi_async(stats)
def _CopyLocalRules(user_key, dest_host_id):
    """Copy over a user's local rules to a newly-associated host.

  NOTE: Because of the implementation of local whitelisting on Bit9, many of
  these new copied local rules will likely be initially unfulfilled, that is,
  held in Upvote and not saved to Bit9.

  Args:
    user_key: str, The user for whom the rules will be copied.
    dest_host_id: str, The ID of the host for which the new rules will be
        created.
  """
    logging.info('Copying rules for user %s to host %s', user_key.id(),
                 dest_host_id)

    # Query for a host belonging to the user.
    username = user_utils.EmailToUsername(user_key.id())
    query = host_models.Bit9Host.query(host_models.Bit9Host.users == username)
    src_host = yield query.get_async()
    if src_host is None:
        logging.warning('User %s has no hosts to copy from', username)
        raise ndb.Return()
    src_host_id = src_host.key.id()

    # Query for all the Bit9Rules in effect for the given user on the chosen host.
    query = rule_models.Bit9Rule.query(
        rule_models.Bit9Rule.host_id == src_host_id,
        rule_models.Bit9Rule.user_key == user_key,
        rule_models.Bit9Rule.in_effect == True)  # pylint: disable=g-explicit-bool-comparison, singleton-comparison
    src_rules = yield query.fetch_async()
    logging.info('Found a total of %d rule(s) for user %s', len(src_rules),
                 user_key.id())

    # Copy the local rules to the new host.
    logging.info('Copying %d rule(s) to host %s', len(src_rules), dest_host_id)
    new_rules = []
    for src_rule in src_rules:
        new_rule = datastore_utils.CopyEntity(src_rule,
                                              new_parent=src_rule.key.parent(),
                                              host_id=dest_host_id,
                                              user_key=user_key)
        new_rules.append(new_rule)
        new_rule.InsertBigQueryRow()
    yield ndb.put_multi_async(new_rules)

    # Create the change sets necessary to submit the new rules to Bit9.
    changes = []
    for new_rule in new_rules:
        change = bit9.RuleChangeSet(rule_keys=[new_rule.key],
                                    change_type=new_rule.policy,
                                    parent=new_rule.key.parent())
        changes.append(change)
    logging.info('Creating %d RuleChangeSet(s)', len(changes))
    yield ndb.put_multi_async(changes)
示例#23
0
  def get(self):
    exo_data = {}
    print('working')
    with open('kepler/total.json', 'r') as exoplanet_file:
      exo_data = json.loads(exoplanet_file.read())
      self.response.write(len(exo_data.keys()))
    exoplanet_file.close()
    for system_name in exo_data:
      to_put = []
      system = exo_data[system_name]

      sys_key = ndb.Key('System', system_name)
      sys = sys_key.get()
      if sys is None:
        sys = System(key=sys_key)
      system['name'] = system_name
      sys.populate(
        distance = system['distance'],
        name = system_name,
        num_planets = system['num_planets'],
        num_stars = system['num_stars'],
        eccentricity = system['eccentricity'],
        period = system['period'],
        inclination = system['inclination'],
        semimajor_axis = system['semimajor_axis'],
        ra = system['ra'],
        dec = system['dec'],
        habzone_min = system['habzone_min'],
        habzone_max = system['habzone_max'],
      )
      to_put.append(sys)
      # sys.put()

      for planet in system['planets']:
        # pl_key = ndb.Key("Planet", planet['name'], "System", system_name)
        # pl = pl_key.get()
        pl = Planet.get_by_id(planet['name'])
        if pl is None:
          pl = Planet(id=planet['name'], parent=sys_key)
        pl.populate(**planet)
        to_put.append(pl)
        # pl.put()

      for star in system['stars']:
        # st_key = ndb.Key("Star", star['name'], "System", system_name)
        # st = st_key.get()
        st = Star.get_by_id(star['name'])
        if st is None:
          st = Star(id=star['name'], parent=sys_key)
        st.populate(**star)
        to_put.append(st)
        # st.put()
      ndb.put_multi_async(to_put)
示例#24
0
 def post(self):
     forum = self.request.get('title')
     forum_key = ndb.Key(Forum, forum)
     loops = int(self.request.get('loops'))
     last_id = self.request.get('last_id')
     url = 'https://pantip.com/forum/topic/ajax_json_all_topic_info_loadmore'
     headers = {
         'User-Agent': '*****@*****.**',
         'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
         'x-requested-with': 'XMLHttpRequest'
     }
     payload = [('last_id_current_page', '0'), ('dataSend[room]', forum),
                ('dataSend[topic_type][type]', '0'),
                ('dataSend[topic_type][default_type]', '1'),
                ('thumbnailview', 'false'), ('current_page', '1')]
     if last_id != '0':
         payload[0] = (payload[0][0], last_id)
     res = requests.post(url, payload, headers=headers)
     j = res.json()
     item = j['item']
     looping = 0
     while len(item['topic']) > 0 and looping < loops:
         topics = []
         for t in item['topic']:
             if '_id' not in t.keys():
                 continue
             tags = []
             if isinstance(t['tags'], list):
                 for tt in t['tags']:
                     tags.append(ndb.Key(Tag, tt['tag']))
             top_key = ndb.Key(Topic, str(t['_id']))
             topic = Topic(key=top_key,
                           top_id=str(t['_id']),
                           vote=t['votes'],
                           comment=t['comments'],
                           author=t['author'],
                           disp_topic=t['disp_topic'],
                           topic_type=str(t['topic_type']),
                           utime=datetime.strptime(t['utime'],
                                                   '%m/%d/%Y %H:%M:%S'),
                           tags=tags,
                           forums=self._addForum(top_key, forum_key))
             topics.append(topic)
             # counting += 1
         ndb.put_multi_async(topics)
         # task.put_async()
         looping += 1
         last_id = str(item['last_id_current_page'])
         payload[0] = (payload[0][0], last_id)
         res = requests.post(url, payload, headers=headers)
         j = res.json()
         item = j['item']
 def taskqueue_model_update(self):
     self.meta.change_view('json')
     from argeweb.core.model import DataUpdater, DataWatcher
     from google.appengine.datastore.datastore_query import Cursor
     updater = DataUpdater.query(DataUpdater.need_updater==True).get()
     if updater is None:
         self.context['data'] = {
             'update': 'done'
         }
         return
     setattr(updater, '__stop_update__', True)
     be_watcher = updater.updater.get()
     if be_watcher is None:
         updater.need_updater = False
         updater.put()
         self.context['data'] = {
             'update': 'be_watcher not exist'
         }
         return
     cursor = Cursor(urlsafe=updater.cursor)
     query = DataWatcher.query(DataWatcher.be_watcher == be_watcher.key)
     data, next_cursor, more = query.fetch_page(100, start_cursor=cursor)
     need_update_records = []
     last_item = None
     last_w = None
     for item in data:
         if last_item == item.watcher:
             w = last_w
         else:
             w = item.watcher.get()
             last_item = item.watcher
             last_w = w
             need_update_records.append(w)
         if w is not None:
             setattr(w, '__stop_update__', True)
             setattr(w, item.watcher_field, getattr(be_watcher, item.be_watcher_field))
         setattr(item, '__stop_update__', True)
         item.last_update = updater.last_update
     updater.cursor = next_cursor.urlsafe() if more else None
     updater.need_updater = more
     updater.put_async()
     ndb.put_multi_async(data)
     ndb.put_multi_async(need_update_records)
     self.context['data'] = {
         'update': updater.name
     }
     self.fire(
         event_name='create_taskqueue',
         url='/taskqueue/backend_ui_material/backend_ui_material/model_update'
     )
示例#26
0
def _copy_notes_between_users(from_user_key, to_user_key):
    """
    Copy notes from one :class:`google.appengine.api.users.User` to
    another.

    :param from_user_key: Key of the :class:`google.appengine.api.users.User`
        from which to copy the notes.
    :param to_user_key: Key of the :class:`google.appengine.api.users.User`
        to which to copy the notes.
    :type from_user_key: :class:`google.appengine.ext.db.Key`
    :type to_user_key: :class:`google.appengine.ext.db.Key`
    """
    notes = [src.copy_to_user(to_user_key) for src in Note.get_active(from_user_key)]
    if notes:
        ndb.put_multi_async(notes)
示例#27
0
def process_parsed_feed(cls, parsed_feed, feed, overflow, overflow_reason=OVERFLOW_REASON.BACKLOG):

    feed_entries_by_guid = get_entries_by_guid(parsed_feed)

    new_entries_by_guid, new_guids, old_guids = yield stage_new_entries(cls, feed_entries_by_guid, feed.key)

    yield ndb.put_multi_async(new_entries_by_guid.values())

    entry_items = feed_entries_by_guid.items()
    # If we process first time feeds backwards the entries will be in the right added order
    entry_items = reversed(entry_items)

    published = overflow
    futures = []
    counter = 0
    first_time = getattr(feed, 'first_time', False)
    for guid, item in entry_items:
        entry = new_entries_by_guid.get(guid)
        if not entry:
            continue

        # We only need the first three items to be fully fleshed out on the first fetch because that is all
        # The user can see in the preview area.
        # Otherwise always fetch remote data
        remote_fetch = True
        if first_time and counter > 2:
            remote_fetch = False

        added = datetime.now()
        futures.append((entry, prepare_entry_from_item(item, feed, overflow, overflow_reason, published, added, remote_fetch)))
        counter += 1

    for entry, future in futures:
        entry_kwargs = yield future
        if not entry_kwargs:
            continue

        entry_kwargs.pop('parent')
        entry_kwargs['creating'] = False
        entry.populate(**entry_kwargs)

    if len(futures):
        feed.is_dirty = True
        yield feed.put_async()

    yield ndb.put_multi_async(new_entries_by_guid.values())

    raise ndb.Return((new_guids, old_guids))
示例#28
0
    def set_layout(cls, page, layoutid, headers):
        site = page.key.parent().get()
        layout = spec(site.theme, page.type, layoutid)

        t = {
            'links': '',
            'maps': headers.get('X-AppEngine-CityLatLong', ''),
        }

        if not layout:
            return page

        def callback():
            p = page.key.get()

            images = []
            for n, i in enumerate(layout.get('images', [])):
                if n >= len(p.images):
                    images.append(
                        Image(key=ndb.Key('Image', str(n), parent=p.key),
                              width=i[0],
                              height=i[1]))
                    p.images.append(images[-1].key)
            ndb.put_multi_async(images)

            for d in ['links', 'text', 'lines', 'maps']:
                a = getattr(p, d)
                a.extend([t.get(d, d)] * (layout.get(d, 0) - len(a)))
            p.layout = layoutid
            p.put()
            return p

        p = ndb.transaction(callback)

        images = ndb.get_multi(p.images)
        for n, i in enumerate(layout.get('images', [])):
            images[n].width = i[0]
            images[n].height = i[1]

            if images[n].type == IMAGE_TYPE_BLOB:
                images[n].set_type(IMAGE_TYPE_BLOB, images[n].blob_key.get())
                images[n].set_blob()
            else:
                images[n].set_type(IMAGE_TYPE_HOLDER)

        ndb.put_multi_async(images)

        return p
def _PersistBit9Certificates(signing_chain):
    """Creates Bit9Certificates from the given Event protobuf.

  Args:
    signing_chain: List[api.Certificate] the signing chain of the event.

  Returns:
    An ndb.Future that resolves when all certs are created.
  """
    if not signing_chain:
        return datastore_utils.GetNoOpFuture()

    to_create = []
    for cert in signing_chain:
        thumbprint = cert.thumbprint
        existing_cert = bit9.Bit9Certificate.get_by_id(thumbprint)
        if existing_cert is None:
            cert = bit9.Bit9Certificate(id=thumbprint,
                                        id_type=cert.thumbprint_algorithm,
                                        valid_from_dt=cert.valid_from,
                                        valid_to_dt=cert.valid_to)

            # Insert a row into the Certificate table. Allow the timestamp to be
            # generated within InsertBigQueryRow(). The Blockable.recorded_dt Property
            # is set to auto_now_add, but this isn't filled in until persist time.
            cert.InsertBigQueryRow(constants.BLOCK_ACTION.FIRST_SEEN)

            to_create.append(cert)

    futures = ndb.put_multi_async(to_create)
    return datastore_utils.GetMultiFuture(futures)
示例#30
0
def write_commits_to_db(commits, project, repo, batch=100):
    """Write provided commits to the database (chunked in batches).

  Args:
    commits (list): a list of commit dictionaries returned by gitiles
    project (models.Project): the current project object
    repo (models.Repo): the current repo object
    batch (int): the number of commits to write at a time
  """

    futures = []
    # Batch our writes so we don't blow our memory limit.
    for chunk in (commits[i:i + batch] for i in range(0, len(commits), batch)):
        converted_tuples = [
            convert_commit_json_to_commit(project, repo, c) for c in chunk
        ]
        commit_objs, map_futures = zip(*converted_tuples)
        for future_list in map_futures:
            futures.extend(future_list)
        futures.extend(ndb.put_multi_async(commit_objs))
        logging.info('%d commits dispatched for write' % len(commit_objs))
        ndb.get_context().clear_cache()

    ndb.Future.wait_all(futures)
    logging.info('all set.')
示例#31
0
 def get(self):
     logging.info("Cron starting...")
     
     try:
         # authenticate to twitter
         client = twitter.Api(consumer_key=Globals.CONSUMER_KEY,
                              consumer_secret=Globals.CONSUMER_SECRET,
                              access_token_key=Globals.CLIENT_TOKEN,
                              access_token_secret=Globals.CLIENT_SECRET,
                              cache=None)
         
         q_futures = []
         for region in Globals.REGIONS:
             # make request
             response = client.GetTrendsWoeid(id=region)
             # get current timestamp in seconds
             timestamp = int(math.floor(time.time()))
             # put trends to db
             entityList = []
             for trend in response:
                 entityList.append(Trend(name=trend.name, woeid=region, timestamp=timestamp, time=10))
             q_futures.extend(ndb.put_multi_async(entityList))
         
         # wait all async put operations to finish.
         ndb.Future.wait_all(q_futures)
     except Exception, e:
         traceback.print_exc()
         Error(msg=str(e), timestamp=int(math.floor(time.time()))).put()
示例#32
0
def _fix_build_async(build_key):  # pragma: no cover
    in_props_key = model.BuildInputProperties.key_for(build_key)
    infra_key = model.BuildInfra.key_for(build_key)
    build, in_props, build_infra = yield ndb.get_multi_async(
        [build_key, in_props_key, infra_key])
    if not build or not build.is_ended:
        return

    to_put = []

    if not in_props:
        to_put.append(
            model.BuildInputProperties(
                key=in_props_key,
                properties=build.input_properties_bytes or '',
            ))

    if not build_infra:
        to_put.append(
            model.BuildInfra(
                key=infra_key,
                infra=build.parse_infra().SerializeToString(),
            ))

    if to_put:
        yield ndb.put_multi_async(to_put)
示例#33
0
def split_pdf_to_pages(pdf_key):
    pdf = blobstore.BlobReader(pdf_key).read()

    logging.info("Attmempting to extract images from PDF of %d bytes" % len(pdf))
    pages = extract_images_from_pdf(pdf)

    to_wait = []
    last = None
    for page in pages:
        file_name = files.blobstore.create(mime_type="image/jpeg")
        with files.open(file_name, "a") as f:
            f.write(page)
        files.finalize(file_name)
        blob_key = files.blobstore.get_blob_key(file_name)

        p = PreCommitteePage(pdf=pdf_key, page=blob_key)
        to_wait.extend(ndb.put_multi_async([p]))
        last = p

    ndb.Future.wait_all(to_wait)

    if last != None:
        last.last = True
    else:
        last = PreCommitteePage(pdf=pdf_key, page=None, last=True)
    ndb.put_multi([last])
示例#34
0
  def _CreateNewLocalRules(self, uuid, user_key):
    """Creates copies of all local rules for the new host."""
    # Pick any host owned by the user to copy rules from. Exclude hosts that
    # haven't completed a full sync because they won't have a complete rule set.
    # NOTE: Because we expect all hosts owned by a user to have the same local
    # rules, we should get the same rules set with any one of the user's hosts.
    username = user_map.EmailToUsername(user_key.id())
    host_query = santa_db.SantaHost.query(
        santa_db.SantaHost.primary_user == username,
        santa_db.SantaHost.last_postflight_dt != None)  # pylint: disable=g-equals-none
    a_host = host_query.get()
    if a_host is None:
      return utils.GetNoOpFuture()

    # Get all local rules from that host.
    rules_query = santa_db.SantaRule.query(
        santa_db.SantaRule.host_id == a_host.key.id(),
        santa_db.SantaRule.in_effect == True)  # pylint: disable=g-explicit-bool-comparison

    # Copy the local rules to the new host.
    new_rules = []
    for batch in query_utils.Paginate(rules_query):
      for rule in batch:
        new_rule = utils.CopyEntity(
            rule, new_parent=rule.key.parent(), host_id=uuid, user_key=user_key)
        new_rules.append(new_rule)

    futures = ndb.put_multi_async(new_rules)
    return utils.GetMultiFuture(futures)
def sync_gapps_orgunits():
    directory = create_directory_service()
    orgunits = directory.orgunits().list(customerId=API_ACCESS_DATA[CURRENT_DOMAIN]["CUSTOMER_ID"], type="all").execute()
    for orgunit in orgunits["organizationUnits"]:
        ou = OrgUnit.new(orgunit["name"])
        users = CirclePerson.query(CirclePerson.orgUnitPath=orgunit["orgUnitPath"])
        ou.people = ndb.put_multi_async([x in users])  # is this equivalent to [x.key for x in users] ?
示例#36
0
  def update_auth_db():
    # AuthDB changed since 'new_auth_db_snapshot' transaction? Back off.
    state = model.get_replication_state()
    if state.auth_db_rev != current_state.auth_db_rev:
      return False, state

    # Update auth_db_rev in AuthReplicationState.
    state.auth_db_rev = auth_db_rev
    state.modified_ts = modified_ts

    # Apply changes.
    futures = []
    futures.extend(ndb.put_multi_async([state] + entites_to_put))
    futures.extend(ndb.delete_multi_async(keys_to_delete))

    # Wait for all pending futures to complete. Aborting the transaction with
    # outstanding futures is a bad idea (ndb complains in log about that).
    ndb.Future.wait_all(futures)

    # Raise an exception, if any.
    for future in futures:
      future.check_success()

    # Success.
    return True, state
示例#37
0
    def save(self):
        """
    pickle recipient list array to RecipientQueueData data property.

    1. when recipeints length >= SAVE_QUEUE_SIZE.
    2. move to next hourly.
    3. check last left recipient queue that have not saved.
    """

        if len(self.save_queue) > 0:
            valid_rows = [
                row for row in self.save_queue if not row.has_key('invalid')
            ]
            invalid_rows = [
                row for row in self.save_queue if row.has_key('invalid')
            ]

            rqd = RecipientQueueData(data=json.dumps(valid_rows),
                                     schedule_key=self.new_schedule.key)
            ies = [
                InvalidEmails.new(self.new_schedule.key, row)
                for row in invalid_rows
            ]

            self.new_schedule.hour_capacity += len(valid_rows)
            self.new_schedule.invalid_email += len(invalid_rows)
            self.list_of_rqd.extend(ndb.put_multi_async([rqd] + ies))

            self.save_queue = []
            self.save_queue_index = 0
示例#38
0
文件: sync.py 项目: crudbug/upvote
def _PersistBit9Certificates(signing_chain):
    """Creates Bit9Certificates from the given Event protobuf.

  Args:
    signing_chain: List[api.Certificate] the signing chain of the event.

  Returns:
    An ndb.Future that resolves when all certs are created.
  """
    if not signing_chain:
        return datastore_utils.GetNoOpFuture()

    to_create = []
    for cert in signing_chain:
        thumbprint = cert.thumbprint
        existing_cert = bit9.Bit9Certificate.get_by_id(thumbprint)
        if existing_cert is None:
            cert = bit9.Bit9Certificate(id=thumbprint,
                                        id_type=cert.thumbprint_algorithm,
                                        valid_from_dt=cert.valid_from,
                                        valid_to_dt=cert.valid_to)

            cert.PersistRow(constants.BLOCK_ACTION.FIRST_SEEN,
                            timestamp=cert.recorded_dt)

            to_create.append(cert)

    futures = ndb.put_multi_async(to_create)
    return datastore_utils.GetMultiFuture(futures)
def _AddRowsFromData(params, revision, parent_test, legacy_parent_tests):
  data_dict = params['data']
  test_key = parent_test.key

  stat_names_to_test_keys = {k: v.key for k, v in
                             legacy_parent_tests.items()}
  rows = CreateRowEntities(
      data_dict, test_key, stat_names_to_test_keys, revision)
  if not rows:
    raise ndb.Return()

  yield ndb.put_multi_async(rows) + [r.UpdateParentAsync() for r in rows]

  tests_keys = []
  is_monitored = parent_test.sheriff and parent_test.has_rows
  if is_monitored:
    tests_keys.append(parent_test.key)

  for legacy_parent_test in legacy_parent_tests.values():
    is_monitored = legacy_parent_test.sheriff and legacy_parent_test.has_rows
    if is_monitored:
      tests_keys.append(legacy_parent_test.key)

  tests_keys = [
      k for k in tests_keys if not add_point_queue.IsRefBuild(k)]

  # Updating of the cached graph revisions should happen after put because
  # it requires the new row to have a timestamp, which happens upon put.
  futures = [
      graph_revisions.AddRowsToCacheAsync(rows),
      find_anomalies.ProcessTestsAsync(tests_keys)]
  yield futures
示例#40
0
def _assign_task_num(time_fn=datetime.datetime.utcnow):
  expired_keys = []
  unassigned = []
  used_task_nums = []
  time_now = time_fn()
  expired_time = time_now - datetime.timedelta(
      seconds=shared.INSTANCE_EXPIRE_SEC)
  for entity in shared.Instance.query():
    # Don't reassign expired task_num right away to avoid races.
    if entity.task_num >= 0:
      used_task_nums.append(entity.task_num)
    # At the same time, don't assign task_num to expired entities.
    if entity.last_updated < expired_time:
      expired_keys.append(entity.key)
      shared.expired_counter.increment()
      logging.debug(
          'Expiring %s task_num %d, inactive for %s',
          entity.key.id(), entity.task_num,
          time_now - entity.last_updated)
    elif entity.task_num < 0:
      shared.started_counter.increment()
      unassigned.append(entity)

  logging.debug('Found %d expired and %d unassigned instances',
                len(expired_keys), len(unassigned))

  used_task_nums = sorted(used_task_nums)
  for entity, task_num in zip(unassigned, find_gaps(used_task_nums)):
    entity.task_num = task_num
    logging.debug('Assigned %s task_num %d', entity.key.id(), task_num)
  futures_unassigned = ndb.put_multi_async(unassigned)
  futures_expired = ndb.delete_multi_async(expired_keys)
  ndb.Future.wait_all(futures_unassigned + futures_expired)
  logging.debug('Committed all changes')
示例#41
0
	def _batch_write(self):
		"""Writes updates and deletes entities in a batch."""
		logging.debug("Batch writing")
		if self.to_put and len(self.to_put) > 0:
			put_rpcs = ndb.put_multi_async(self.to_put)
		else:
			put_rpcs = None

		if self.to_delete and len(self.to_delete) > 0:
			delete_rpcs = ndb.delete_multi_async(self.to_delete)
		else:
			delete_rpcs = None

		if self.blobstore_to_delete and len(self.blobstore_to_delete) > 0:
			blobstore_delete_rpc = blobstore.delete_async(self.blobstore_to_delete)
		else:
			blobstore_delete_rpc = None

		if put_rpcs:
			for put_rpc in put_rpcs:
				put_rpc.wait()
			self.to_put = []

		if delete_rpcs:
			for delete_rpc in delete_rpcs:
				delete_rpc.wait()
			self.to_delete = []

		if blobstore_delete_rpc:
			blobstore_delete_rpc.wait()
			self.blobstore_to_delete = []
示例#42
0
文件: admin.py 项目: achenxu/PickMeUp
def updatePercorsiInRideOffers(test=False):
    import route
    more, cursor = True, None
    while more:
        # for all ride offers, including expired ones
        records, cursor, more = RideOffer.query().fetch_page(
            100, start_cursor=cursor)
        updating_records = []
        for r in records:
            old_percorso = convertToUtfIfNeeded(r.percorso)
            new_percorso = getNewPercorso(old_percorso)
            if new_percorso is None:
                # triggering a warning in getNewPercorso
                print 'aborting'
                return
            if old_percorso != new_percorso:
                print 'updating percorso from {} to {}'.format(
                    old_percorso, new_percorso)
                r.percorso = new_percorso
                updating_records.append(r)
                if Route.get_by_id(new_percorso) is None:
                    rt = route.addRoute(new_percorso)
                    rt.populateWithDetails(put=not test)
                    print 'populating new route: {}'.format(new_percorso)
        if not test:
            create_futures = ndb.put_multi_async(updating_records)
            ndb.Future.wait_all(create_futures)
示例#43
0
文件: admin.py 项目: achenxu/PickMeUp
def updatePercorsiPreferiti(test=False):
    more, cursor = True, None
    while more:
        records, cursor, more = Person.query(
            Person.percorsi_size > 0).fetch_page(100, start_cursor=cursor)
        updating_records = []
        for r in records:
            new_percorsi = []
            updated = False
            for old_percorso in r.percorsi:
                old_percorso = convertToUtfIfNeeded(old_percorso)
                new_percorso = getNewPercorso(old_percorso)
                if new_percorso is None:
                    # triggering a warning in getNewPercorso
                    print 'aborting'
                    return
                if old_percorso != new_percorso:
                    updated = True
                    print 'updating percorso from {} to {}'.format(
                        old_percorso, new_percorso)
                    new_percorsi.append(new_percorso)
                else:
                    new_percorsi.append(old_percorso)
            if updated:
                r.percorsi = new_percorsi
                updating_records.append(r)
        if not test:
            create_futures = ndb.put_multi_async(updating_records)
            ndb.Future.wait_all(create_futures)
示例#44
0
  def update_auth_db():
    # AuthDB changed since 'new_auth_db_snapshot' transaction? Back off.
    state = model.get_replication_state()
    if state.auth_db_rev != current_state.auth_db_rev:
      return False, state

    # Update auth_db_rev in AuthReplicationState.
    state.auth_db_rev = auth_db_rev
    state.modified_ts = modified_ts

    # Apply changes.
    futures = []
    futures.extend(ndb.put_multi_async([state] + entites_to_put))
    futures.extend(ndb.delete_multi_async(keys_to_delete))

    # Wait for all pending futures to complete. Aborting the transaction with
    # outstanding futures is a bad idea (ndb complains in log about that).
    ndb.Future.wait_all(futures)

    # Raise an exception, if any.
    for future in futures:
      future.check_success()

    # Success.
    return True, state
def _AddRowsFromData(params, revision, parent_test, legacy_parent_tests):
    data_dict = params['data']
    test_key = parent_test.key

    stat_names_to_test_keys = {
        k: v.key
        for k, v in legacy_parent_tests.iteritems()
    }
    rows = CreateRowEntities(data_dict, test_key, stat_names_to_test_keys,
                             revision)
    if not rows:
        raise ndb.Return()

    yield ndb.put_multi_async(rows) + [r.UpdateParentAsync() for r in rows]

    tests_keys = []
    is_monitored = parent_test.sheriff and parent_test.has_rows
    if is_monitored:
        tests_keys.append(parent_test.key)

    for legacy_parent_test in legacy_parent_tests.itervalues():
        is_monitored = legacy_parent_test.sheriff and legacy_parent_test.has_rows
        if is_monitored:
            tests_keys.append(legacy_parent_test.key)

    tests_keys = [k for k in tests_keys if not add_point_queue.IsRefBuild(k)]

    # Updating of the cached graph revisions should happen after put because
    # it requires the new row to have a timestamp, which happens upon put.
    futures = [
        graph_revisions.AddRowsToCacheAsync(rows),
        find_anomalies.ProcessTestsAsync(tests_keys)
    ]
    yield futures
示例#46
0
    def testPutAsync(self):

        key1 = ndb.Key(helper_models.TestBC1, 1)
        model1 = helper_models.TestBC1(key=key1)

        key2 = ndb.Key(helper_models.TestBC2, 2)
        model2 = helper_models.TestBC2(key=key2)

        key3 = ndb.Key(helper_models.TestBC3, 3)
        model3 = helper_models.TestBC3(key=key3)

        self.removeNDBCache(key1)
        self.removeNDBCache(key2)
        self.removeNDBCache(key3)
        self.clearContext()

        futures = ndb.put_multi_async([model1, model2, model3])

        for future in futures:
            key = future.get_result()
            model = key.get()

            if key.id() in [1, 2]:
                self.assertEqual({}, model.get_counter_actions())
            if key.id() == 3:
                self.assertEqual({'bc3n': 1}, model.get_counter_actions())
示例#47
0
    def run():
        # 2 concurrent GET, one PUT. Optionally with an additional serialized GET.
        to_run_future = to_run_key.get_async()
        result_summary_future = result_summary_key.get_async()
        to_run = to_run_future.get_result()
        if not to_run or not to_run.is_reapable:
            result_summary_future.wait()
            return False

        to_run.queue_number = None
        result_summary = result_summary_future.get_result()
        if result_summary.try_number:
            # It's a retry that is being expired. Keep the old state. That requires an
            # additional pipelined GET but that shouldn't be the common case.
            run_result = result_summary.run_result_key.get()
            result_summary.set_from_run_result(run_result, request)
        else:
            result_summary.state = task_result.State.EXPIRED
        result_summary.abandoned_ts = now
        result_summary.modified_ts = now

        futures = ndb.put_multi_async((to_run, result_summary))
        _maybe_pubsub_notify_via_tq(result_summary, request)
        for f in futures:
            f.check_success()

        return True
示例#48
0
    def run():
        # 2 concurrent GET, one PUT. Optionally with an additional serialized GET.
        to_run_future = to_run_key.get_async()
        result_summary_future = result_summary_key.get_async()
        to_run = to_run_future.get_result()
        if not to_run or not to_run.is_reapable:
            result_summary_future.wait()
            return False

        to_run.queue_number = None
        result_summary = result_summary_future.get_result()
        if result_summary.try_number:
            # It's a retry that is being expired. Keep the old state. That requires an
            # additional pipelined GET but that shouldn't be the common case.
            run_result = result_summary.run_result_key.get()
            result_summary.set_from_run_result(run_result, request)
        else:
            result_summary.state = task_result.State.EXPIRED
        result_summary.abandoned_ts = now
        result_summary.modified_ts = now

        futures = ndb.put_multi_async((to_run, result_summary))
        _maybe_pubsub_notify_via_tq(result_summary, request)
        for f in futures:
            f.check_success()

        return True
示例#49
0
def _assign_task_num(time_fn=datetime.datetime.utcnow):
    expired_keys = []
    unassigned = []
    used_task_nums = []
    time_now = time_fn()
    expired_time = time_now - datetime.timedelta(
        seconds=shared.INSTANCE_EXPIRE_SEC)
    for entity in shared.Instance.query():
        # Don't reassign expired task_num right away to avoid races.
        if entity.task_num >= 0:
            used_task_nums.append(entity.task_num)
        # At the same time, don't assign task_num to expired entities.
        if entity.last_updated < expired_time:
            expired_keys.append(entity.key)
            shared.expired_counter.increment()
            logging.debug('Expiring %s task_num %d, inactive for %s',
                          entity.key.id(), entity.task_num,
                          time_now - entity.last_updated)
        elif entity.task_num < 0:
            shared.started_counter.increment()
            unassigned.append(entity)

    logging.debug('Found %d expired and %d unassigned instances',
                  len(expired_keys), len(unassigned))

    used_task_nums = sorted(used_task_nums)
    for entity, task_num in zip(unassigned, find_gaps(used_task_nums)):
        entity.task_num = task_num
        logging.debug('Assigned %s task_num %d', entity.key.id(), task_num)
    futures_unassigned = ndb.put_multi_async(unassigned)
    futures_expired = ndb.delete_multi_async(expired_keys)
    ndb.Future.wait_all(futures_unassigned + futures_expired)
    logging.debug('Committed all changes')
示例#50
0
def _SyncKoiExoPlanets(kois):
  """Sync kois with exo planets by creating new ones or updating existing.

  Args:
    list<dict>, the exo-planets to sync.
  """
  results = collections.defaultdict(list)
  koi_ids = [ep.get('kepid') for ep in kois]
  results['received'] += koi_ids
  to_sync = []
  existing_exo_planets = yield _MODEL.query(
      _MODEL.planet_id.IN(koi_ids)).fetch_async()
  exo_planet_map = {ep.planet_id: ep for ep in existing_exo_planets}
  for koi in kois:
    new_exo_planet_id = koi.get('kopid')
    existing_exo_planet = exo_planet_map.get(new_exo_planet_id)
    if existing_exo_planet and (
        existing_exo_planet.last_updated < datetime.datetime(
            planet.get('last_update', 0))):
      models.UpdateExoPlanetProperties(existing_exo_planet, koi)
      results['synced'].append(existing_exo_planet.planet_id)
      to_sync.append(existing_exo_planet)
    else:
      new_exo_planet = models.ExoPlanet.CreateExoPlanetEntity(koi)
      to_sync.append(new_exo_planet)
      results['added'].append(new_exo_planet.planet_id)
  yield ndb.put_multi_async(to_sync)
  raise ndb.Return(results)
示例#51
0
def _ProcessTest(test_key):
  """Processes a test to find new anomalies.

  Args:
    test_key: The ndb.Key for a TestMetadata.
  """
  test = yield test_key.get_async()

  sheriff = yield _GetSheriffForTest(test)
  if not sheriff:
    logging.error('No sheriff for %s', test_key)
    raise ndb.Return(None)

  config = yield anomaly_config.GetAnomalyConfigDictAsync(test)
  max_num_rows = config.get('max_window_size', DEFAULT_NUM_POINTS)
  rows = yield GetRowsToAnalyzeAsync(test, max_num_rows)
  # If there were no rows fetched, then there's nothing to analyze.
  if not rows:
    # In some cases (e.g. if some points are deleted) it might be possible
    # that last_alerted_revision is incorrect. In this case, reset it.
    highest_rev = yield _HighestRevision(test_key)
    if test.last_alerted_revision > highest_rev:
      logging.error('last_alerted_revision %d is higher than highest rev %d '
                    'for test %s; setting last_alerted_revision to None.',
                    test.last_alerted_revision, highest_rev, test.test_path)
      test.last_alerted_revision = None
      yield test.put_async()
    logging.error('No rows fetched for %s', test.test_path)
    raise ndb.Return(None)

  # Get anomalies and check if they happen in ref build also.
  change_points = FindChangePointsForTest(rows, config)
  change_points = yield _FilterAnomaliesFoundInRef(
      change_points, test_key, len(rows))

  anomalies = yield [_MakeAnomalyEntity(c, test, rows) for c in change_points]

  # If no new anomalies were found, then we're done.
  if not anomalies:
    return

  logging.info('Created %d anomalies', len(anomalies))
  logging.info(' Test: %s', test_key.id())
  logging.info(' Sheriff: %s', test.sheriff.id())

  # Update the last_alerted_revision property of the test.
  test.last_alerted_revision = anomalies[-1].end_revision
  yield test.put_async()

  yield ndb.put_multi_async(anomalies)

  # TODO(simonhatch): email_sheriff.EmailSheriff() isn't a tasklet yet, so this
  # code will run serially.
  # Email sheriff about any new regressions.
  for anomaly_entity in anomalies:
    if (anomaly_entity.bug_id is None and
        not anomaly_entity.is_improvement and
        not sheriff.summarize):
      email_sheriff.EmailSheriff(sheriff, test, anomaly_entity)
示例#52
0
 def add_roshreview_stats(self, student):
     perfs = {
         k: random.randint(40, 100) for k in self.random_rosh_review_topics
     }
     average = sum(perfs.itervalues()) / len(perfs)
     data = {
         "email": student.secondary_email,
         "training_level": "Student",
         "pgy": None,
         "category_performances": perfs,
         "cumulative_performance": average,
         "percentage_complete": random.randint(10, 100),
     }
     stats = RoshReviewUserStats.new_stats(student, data, commit=False)
     stats.put_async()
     t_stats = stats.update_topic_stats(commit=False)
     ndb.put_multi_async(t_stats)
def _MigrateHistogramClassData(cls, old_parent_key, new_parent_key):
  query = cls.query(cls.test == old_parent_key)
  entities = yield query.fetch_async(
      limit=_MAX_DATASTORE_PUTS_PER_PUT_MULTI_CALL,
      use_cache=False, use_memcache=False)
  for e in entities:
    e.test = new_parent_key
  yield ndb.put_multi_async(entities)
  raise ndb.Return(bool(entities))
示例#54
0
  def post(self):
    api_vcode = self.request.POST.get('api_vcode', None)
    api_id = self.request.POST.get('api_id', None)

    if not (api_id and api_vcode) or not(len(api_id) == 6 and len(api_vcode) == 64):
      env = dict(values=dict(api_id=api_id, api_vcode=api_vcode),
                 errors=dict(api_id=(not api_id or len(api_id) != 6), api_vcode=(not api_vcode or len(api_vcode) != 64)))

      self.session.add_flash(env, key='env_flash')

    else:
      if Account.query().filter(Account.api_id == api_id).count():
        self.session.add_flash('This API key has already been added to this profile', key='error_messages')
      else:
        api = Api(api_id, api_vcode)
        api.authenticate()

        if api.is_authenticated():
          accounts_chars = []

          for api_char in api.characters:
            if not filter(lambda c: api_char.charactedID == c.char_id, self.userprofile.characters):
              accounts_chars.append(Character(user=users.get_current_user(),
                                              name=api_char.name,
                                              char_id=str(api_char.characterID)))

          account = Account(
                            user=users.get_current_user(),
                            api_vcode=api_vcode,
                            api_id=api_id,
                            key_name=self.request.POST.get('name', None)
                            )
          account.put()

          for char in accounts_chars:
            char.account_key = account.key

          put_multi_async(accounts_chars)
          #self.userprofile.characters = (self.userprofile.characters or []) + accounts_chars
          #self.userprofile.put_async()

          self.session.add_flash('Key added successfully', key='messages')

    self.redirect('/profile')
    def POST(self, world):
        cur_world = world_exists(world)
        self.redirect_if_not_create(cur_world)

        i = web.input(long=[], lat=[], prop_name=[], prop_val=[])
        coord = []
        new = []
        access = cur_world.get_access_string(self.page_user)
        form = self.text_form()

        if not form.validates():
            world_menu = self.get_or_put_menu(cur_world, access)
            return self.reset_text_page(cur_world, form, access, world_menu)
        else:

            if cur_world.is_mod(self.page_user):
                visible = True
                """ Flush menus for everyone so they get reset"""
                mrpc = memcache.create_rpc()
                m = flush_caches("", cur_world, mrpc)
                try:
                    m.get_result()
                except AssertionError: # There were no caches to flush - is this even the problem?
                    pass
            else:
                visible = False

            new_text = Room(world = cur_world.key,
                            name=form.d.name,
                            short_desc=form.d.short_desc,
                            long_desc=form.d.long_desc,
                            added_by=self.page_user.key,
                            latitudes=i.lat,
                            longitudes=i.long,
                            visible=visible,
                            rejected=False
                        )
            new_text.put()

            for index, name in enumerate(i.prop_name):
                p = Property.get_by_id(int(name))
                new.append(PropertyValue())
                new[index].value = i.prop_val[index]
                new[index].of_property = p.key
                new[index].room = new_text.key
                new[index].added_by = self.page_user.key
                new[index].visible = visible

            valftrs = ndb.put_multi_async(new)
            self.set_menu(cur_world, access)

            for ftr in valftrs:
                ftr.get_result()

            return web.seeother('/%s/text/%s' % (cur_world.key.id(), new_text.key.id()))
示例#56
0
 def saveToDatastore(self, q_futures, trends, woeid, date):
     logging.info("Saving to datastore")
     entityList = []
     for trend in trends:
         entityList.append(
             TrendSummary(
                 name=trend['name'],
                 woeid=woeid,
                 date=date.getDate(),
                 duration=trend['duration'],
                 volume=trend['volume']))
     q_futures.extend(ndb.put_multi_async(entityList))
示例#57
0
 def putTrends(self, trends):
     logging.info("will put %s", len(trends))
     entityList = []  
     for trend in trends:
         entityList.append(
             TrendWindow(
                 name=trend.name,
                 woeid=trend.woeid,
                 timestamp=trend.timestamp,
                 time=trend.time,
                 volume=trend.volume))
     return ndb.put_multi_async(entityList)
示例#58
0
def put_uuids_async( uuids, namespace ):
   """
   Put UUIDs to a namespace 
   """
   all_key_names = __uuid_key_names( uuids, namespace )
   all_keys = [ndb.Key( GeneralUUID, key_name ) for key_name in all_key_names]
   
   uuid_data = [ GeneralUUID( key=key, uuid=uuid ) for (key, uuid) in zip( all_keys, uuids ) ]
   
   put_keys = yield ndb.put_multi_async( uuid_data )
   
   raise ndb.Return( put_keys )