def handleOpenClose(request, closing): l_id_str = [] if request.COOKIES.has_key("newsrivr_userid_md5"): userid = request.COOKIES["newsrivr_userid_md5"] if userid!="None": user = getUserByMD5(request.COOKIES["newsrivr_userid_md5"]) if user: data = request.POST drop_id = data["_id"] drop = getCollDrops().find_one({"_id": objectid.ObjectId(drop_id)}) if not drop: clog("mailDrop: can't find drop") else: user_id_str = drop["user_id_str"] drops = getCollDrops().find({"user_id_str": user_id_str}, fields=["id_str"], sort=[("added_at_precise", -1)]).limit(30) for id_str in drops: l_id_str.append(id_str["id_str"]) if "closed_drops" not in user: user["closed_drops"] = [] if closing: user["closed_drops"].append(user_id_str) user["closed_drops"] = list(set(user["closed_drops"])) else: if user_id_str in user["closed_drops"]: user["closed_drops"].remove(user_id_str) getCollUsers().save(user, safe=True) return l_id_str
def mailDrop(_id, frommail, reply_to, tomail, mail_templates, errorstr, replyto_name="Bezoeker" ): data = None data = getCollDrops().find_one({"_id": objectid.ObjectId(_id)}) if not data: return user = getUserByMD5(data["newsrivr_userid_md5"]) if not user: return if "name" in user: name = user["name"] else: name = user["screen_name"] subject = striptags(data["org_content"]) #for key in cfform.fields.keys(): # data.append([key, unicode(cfform.cleaned_data[key])]) tmpl_html_body = loader.get_template(mail_templates[0]) tmpl_plain_body = loader.get_template(mail_templates[1]) c = Context({"data": data, "subject":subject, "user": user, "name":name, "datum": datetime.now()}) html_content = tmpl_html_body.render(c) plain_content = tmpl_plain_body.render(c) msg = mailer.GenerateMessage(name, "*****@*****.**", replyto_name, reply_to, "A friend", tomail, subject, html_content, plain_content, []) result = mailer.SendMessage("*****@*****.**", [tomail], msg) return errorstr
def FromPrimary(cls, connection, pkey_value): from pymongo import objectid if not isinstance(pkey_value, objectid.ObjectId): pkey_value = objectid.ObjectId(pkey_value) collection = cls.Collection(connection) record = collection.find({cls._PRIMARY_KEY: pkey_value}) if not record: raise NotExistError('There is no %r for primary key %r' % ( cls.__name__, pkey_value)) return cls(connection, record[0])
def check_subscription(self): subscription = self.subscription sid = self.subscriptionId() # print '[%s] Now at subscr' % id, self.subscription entries = [] try: docsdb = MONCON.xds.docs lastUpdated = subscription['lastChecked_'] endpoint = subscription.get('endpoint_') or 'http://example.org/' patientId = subscription['patientId'] query = {'mimeType': 'text/xml', 'storedAt_': {'$gt': lastUpdated}} pid = parsePid(subscription['patientId'])['pid'] if pid != '*': query['patientId'] = patientId careRecordTimePeriod = subscription.get('careRecordTimePeriod') # if careRecordTimePeriod is not None: # query['creationTime'] = {'$gte':careRecordTimePeriod['low'], # '$lte':careRecordTimePeriod['high']} # clinicalStatementTimePeriod = subscription.get('clinicalStatementTimePeriod') # if clinicalStatementTimePeriod is not None: # query['creationTime'] = {'$gte':clinicalStatementTimePeriod['low'], # '$lte':clinicalStatementTimePeriod['high']} logging.debug('[%s] MONQ=%s', sid, query) crs = docsdb.find(query, fields=['filename', 'patientId', 'storedAt_'], sort=[('storedAt_', pymongo.ASCENDING)]) tm = None for d in crs: filename = d['filename'] doc = load_xml_doc(filename) e = self.match_subscription(doc) tm = d['storedAt_'] if len(e) > 0: entries.extend(e) send_pcc10(self.subscription, e, d['patientId']) if tm: subscription['lastChecked_'] = tm MONCON.xds.pcc.update({'_id': objectid.ObjectId(sid)}, {"$set": { "lastChecked_": tm }}) logging.debug("[%s] Total Entries Found: %d", sid, len(entries)) finally: MONCON.end_request() return entries
def getStory(self, story_id): """Given the story id gives a story summary, along with tweet data for the most recent time period""" try: id = objectid.ObjectId(str(story_id)) except objectid.InvalidId: return {'error' : 'Invalid Id'} story = self.stories.find_one({'_id':id}) if story != None: return { 'title': story["title"], 'summary': story["summary"], 'link': story["link_main_story"], 'keywords': story["keywords"], 'wordcloud': Database.combineWordclouds(story["periods"]), 'sentiment' : Database.combineSentiment(story["periods"]), 'tweets': map(lambda x: {"user": x["user"], "text" : x["text"], "score" : x["score"]}, story["periods"][-1]["tweets"] ) } else: return {'error' : 'Story does not exist'}
def run(self): data = None data = getCollDrops().find_one({"_id": objectid.ObjectId(self.drop_id)}) if not data: clog("mailDrop: can't find drop") return user = getUserByMD5(self.user_md5) if not user: clog("mailDrop: can't find user") return name = user["twitter_credentials"]["name"] if not name: name = user["twitter_credentials"]["screen_name"] subject = striptags(data["org_content"]) tmpl_html_body = loader.get_template("drop_share_html.html") tmpl_plain_body = loader.get_template("drop_share_txt.html") for l in data["followed_links"]: html = "" if "simplehtml" in l: soup = BeautifulSoup(l["simplehtml"]) for tag in soup.findAll(True): if tag.has_key("id"): if "nr_hide_" in str(tag["id"]): tag.hidden = True if "nr_readmore_" in str(tag["id"]): tag.extract() l["simplehtml"] = soup.renderContents() cdict = {"data": data, "body":self.body, "subject":subject, "user": user, "name":name, "datum": data["created_at"]} if "retweet_created_at" in data: cdict["retweet_datum"] = data["retweet_created_at"] c = Context(cdict) html_content = tmpl_html_body.render(c) plain_content = tmpl_plain_body.render(c) msg = mailer.GenerateMessage("NewsRivr", "*****@*****.**", name, self.reply_to, self.tomail, self.tomail, subject, html_content, plain_content, []) result = mailer.SendMessage("*****@*****.**", [self.tomail], msg) return "True"
inserts = 0 row_count = 0 # Create dummy 2000->2010 crosswalk if FILENAME == 'FAKE': for geography in collection.find({}, fields=['geoid', 'xwalk']): if 'xwalk' not in geography: geography['xwalk'] = {} geography['xwalk'][geography['geoid']] = { 'POPPCT00': 1.0, 'HUPCT00': 1.0 } collection.update({ '_id': objectid.ObjectId(geography['_id']) }, { '$set': { 'xwalk': geography['xwalk'] } }, safe=True) row_count += 1 inserts += 1 else: with open(FILENAME) as f: rows = UnicodeCSVReader(f) headers = rows.next() for row in rows: row_count += 1 row_dict = dict(zip(headers, row)) if row_dict['STATE10'] != STATE_FIPS: continue geography = collection.find_one({ 'geoid': row_dict['GEOID10'] }, fields=['xwalk'])
if table not in geography['data']['delta']: geography['data']['delta'][table] = {} if table not in geography['data']['pct_change']: geography['data']['pct_change'][table] = {} for k, v in geography['data']['2010'][table].items(): if k not in geography['data']['2000'][table]: continue value_2010 = float(v) value_2000 = float(geography['data']['2000'][table][k]) if value_2000 == 0: continue geography['data']['delta'][table][k] = str(value_2010 - value_2000) geography['data']['pct_change'][table][k] = str( (value_2010 - value_2000) / value_2000) collection.update({'_id': objectid.ObjectId(geography['_id'])}, {'$set': { 'data': geography['data'] }}, safe=True) computations += 1 print ' Row count: %i' % row_count print ' Computations: %i' % computations
def getRecommend(self, recommend): return self.conn.recommends.find_one( {'_id': objectid.ObjectId(recommend)})