def savePredictionsStep(self): from dao.user import User from dao.client import Client uniqueModes = sorted(set(self.cleanedResultVector)) for i in range(self.predictedProb.shape[0]): currSectionId = self.sectionIds[i] currProb = self.convertPredictedProbToMap(self.modeList, uniqueModes, self.predictedProb[i]) logging.debug("Updating probability for section with id = %s" % currSectionId) self.Sections.update({'_id': currSectionId}, {"$set": { "predicted_mode": currProb }}) currUser = User.fromUUID(self.sectionUserIds[i]) clientSpecificUpdate = Client( currUser.getFirstStudy()).clientSpecificSetters( currUser.uuid, currSectionId, currProb) if clientSpecificUpdate != None: self.Sections.update({'_id': currSectionId}, clientSpecificUpdate)
def testRunBackgroundTasksForDay(self): self.testUsers = [ "*****@*****.**", "*****@*****.**", "*****@*****.**", "*****@*****.**", "*****@*****.**" ] load_database_json.loadTable(self.serverName, "Stage_Modes", "tests/data/modes.json") load_database_json.loadTable(self.serverName, "Stage_Sections", "tests/data/testCarbonFile") # Let's make sure that the users are registered so that they have profiles for userEmail in self.testUsers: User.register(userEmail) self.SectionsColl = get_section_db() tests.common.updateSections(self) self.assertNotEqual(len(self.uuid_list), 0) # Can access the zeroth element because we know that then length is greater than zero # (see above) test_uuid = self.uuid_list[0] test_user = User.fromUUID(test_uuid) self.assertNotIn('carbon_footprint', test_user.getProfile().keys()) default.runBackgroundTasks(test_user.uuid) self.assertIn('carbon_footprint', test_user.getProfile().keys())
def getScoreComponents(user_uuid, start, end): # The score is based on the following components: # - Percentage of trips classified. We are not auto-classifying high # confidence trips, so don't need to handle those here user = User.fromUUID(user_uuid) pctClassified = common.getClassifiedRatio(user_uuid, start, end) (myModeShareCount, avgModeShareCount, myModeShareDistance, avgModeShareDistance, myModeCarbonFootprint, avgModeCarbonFootprint, myModeCarbonFootprintNoLongMotorized, avgModeCarbonFootprintNoLongMotorized, myOptimalCarbonFootprint, avgOptimalCarbonFootprint, myOptimalCarbonFootprintNoLongMotorized, avgOptimalCarbonFootprintNoLongMotorized) = carbon.getFootprintCompareForRange(user.uuid, start, end) carbon.delLongMotorizedModes(myModeShareDistance) myAllDrive = carbon.getAllDrive(user.uuid, myModeShareDistance) myCarbonFootprintSum = sum(myModeCarbonFootprintNoLongMotorized.values()) myOptimalFootprintSum = sum(myOptimalCarbonFootprintNoLongMotorized.values()) logging.debug("myCarbonFootprintSum = %s, myOptimalFootprintSum = %s, myAllDrive = %s" % (myCarbonFootprintSum, myOptimalFootprintSum, myAllDrive)) handleZero = lambda x, y: 0 if y == 0 else float(x)/y components = [pctClassified, handleZero(myCarbonFootprintSum - myOptimalFootprintSum, myOptimalFootprintSum), handleZero(myAllDrive - myCarbonFootprintSum, myAllDrive), handleZero(sb375DailyGoal - myCarbonFootprintSum, sb375DailyGoal)] return components
def getResult(user_uuid): # This is in here, as opposed to the top level as recommended by the PEP # because then we don't have to worry about loading bottle in the unit tests from bottle import template import base64 from dao.user import User from dao.client import Client user = User.fromUUID(user_uuid) renderedTemplate = template( "clients/choice/result_template.html", variables=json.dumps({ 'curr_view': getCurrView(user_uuid), 'uuid': str(user_uuid), 'client_key': Client("choice").getClientKey() }), gameResult=base64.b64encode(gamified.getResult(user_uuid)), leaderboardResult=base64.b64encode(leaderboard.getResult(user_uuid)), dataResult=base64.b64encode(data.getResult(user_uuid)), commonTripsResult=base64.b64encode(commontrips.getResult(user_uuid)), recommendationResult=base64.b64encode( recommendation.getResult(user_uuid))) return renderedTemplate
def getResult(user_uuid): # This is in here, as opposed to the top level as recommended by the PEP # because then we don't have to worry about loading bottle in the unit tests from bottle import template user = User.fromUUID(user_uuid) currFootprint = getCarbonFootprint(user) if currFootprint is None: currFootprint = carbon.getFootprintCompare(user_uuid) setCarbonFootprint(user, currFootprint) (myModeShareCount, avgModeShareCount, myModeShareDistance, avgModeShareDistance, myModeCarbonFootprint, avgModeCarbonFootprint, myModeCarbonFootprintNoLongMotorized, avgModeCarbonFootprintNoLongMotorized, # ignored myOptimalCarbonFootprint, avgOptimalCarbonFootprint, myOptimalCarbonFootprintNoLongMotorized, avgOptimalCarbonFootprintNoLongMotorized) = currFootprint renderedTemplate = template("clients/data/result_template.html", myModeShareCount = json.dumps(myModeShareCount), avgModeShareCount = json.dumps(avgModeShareCount), myModeShareDistance = json.dumps(myModeShareDistance), avgModeShareDistance = json.dumps(avgModeShareDistance), myModeCarbonFootprint = json.dumps(myModeCarbonFootprint), avgModeCarbonFootprint = json.dumps(avgModeCarbonFootprint), myOptimalCarbonFootprint = json.dumps(myOptimalCarbonFootprint), avgOptimalCarbonFootprint = json.dumps(avgOptimalCarbonFootprint)) # logging.debug(renderedTemplate) return renderedTemplate
def runBackgroundTasksForDay(user_uuid, today): today_dt = datetime.combine(today, time.max) user = User.fromUUID(user_uuid) # carbon compare results is a tuple. Tuples are converted to arrays # by mongodb # In [44]: testUser.setScores(('a','b', 'c', 'd'), ('s', 't', 'u', 'v')) # In [45]: testUser.getScore() # Out[45]: ([u'a', u'b', u'c', u'd'], [u's', u't', u'u', u'v']) weekago = today_dt - timedelta(days=7) carbonCompareResults = carbon.getFootprintCompareForRange(user_uuid, weekago, today_dt) setCarbonFootprint(user, carbonCompareResults) (myModeShareCount, avgModeShareCount, myModeShareDistance, avgModeShareDistance, myModeCarbonFootprint, avgModeCarbonFootprint, myModeCarbonFootprintNoLongMotorized, avgModeCarbonFootprintNoLongMotorized, # ignored myOptimalCarbonFootprint, avgOptimalCarbonFootprint, myOptimalCarbonFootprintNoLongMotorized, avgOptimalCarbonFootprintNoLongMotorized) = carbonCompareResults # We only compute server stats in the background, because including them in # the set call means that they may be invoked when the user makes a call and # the cached value is None, which would potentially slow down user response time msNow = systime.time() stats.storeResultEntry(user_uuid, stats.STAT_MY_CARBON_FOOTPRINT, msNow, getCategorySum(myModeCarbonFootprint)) stats.storeResultEntry(user_uuid, stats.STAT_MY_CARBON_FOOTPRINT_NO_AIR, msNow, getCategorySum(myModeCarbonFootprintNoLongMotorized)) stats.storeResultEntry(user_uuid, stats.STAT_MY_OPTIMAL_FOOTPRINT, msNow, getCategorySum(myOptimalCarbonFootprint)) stats.storeResultEntry(user_uuid, stats.STAT_MY_OPTIMAL_FOOTPRINT_NO_AIR, msNow, getCategorySum(myOptimalCarbonFootprintNoLongMotorized)) stats.storeResultEntry(user_uuid, stats.STAT_MY_ALLDRIVE_FOOTPRINT, msNow, getCategorySum(myModeShareDistance) * (278.0/(1609 * 1000))) stats.storeResultEntry(user_uuid, stats.STAT_MEAN_FOOTPRINT, msNow, getCategorySum(avgModeCarbonFootprint)) stats.storeResultEntry(user_uuid, stats.STAT_MEAN_FOOTPRINT_NO_AIR, msNow, getCategorySum(avgModeCarbonFootprintNoLongMotorized))
def getScoreComponents(user_uuid, start, end): # The score is based on the following components: # - Percentage of trips classified. We are not auto-classifying high # confidence trips, so don't need to handle those here user = User.fromUUID(user_uuid) pctClassified = common.getClassifiedRatio(user_uuid, start, end) (myModeShareCount, avgModeShareCount, myModeShareDistance, avgModeShareDistance, myModeCarbonFootprint, avgModeCarbonFootprint, myModeCarbonFootprintNoLongMotorized, avgModeCarbonFootprintNoLongMotorized, myOptimalCarbonFootprint, avgOptimalCarbonFootprint, myOptimalCarbonFootprintNoLongMotorized, avgOptimalCarbonFootprintNoLongMotorized ) = carbon.getFootprintCompareForRange(user.uuid, start, end) carbon.delLongMotorizedModes(myModeShareDistance) myAllDrive = carbon.getAllDrive(user.uuid, myModeShareDistance) myCarbonFootprintSum = sum(myModeCarbonFootprintNoLongMotorized.values()) myOptimalFootprintSum = sum( myOptimalCarbonFootprintNoLongMotorized.values()) logging.debug( "myCarbonFootprintSum = %s, myOptimalFootprintSum = %s, myAllDrive = %s" % (myCarbonFootprintSum, myOptimalFootprintSum, myAllDrive)) handleZero = lambda x, y: 0 if y == 0 else float(x) / y components = [ pctClassified, handleZero(myCarbonFootprintSum - myOptimalFootprintSum, myOptimalFootprintSum), handleZero(myAllDrive - myCarbonFootprintSum, myAllDrive), handleZero(sb375DailyGoal - myCarbonFootprintSum, sb375DailyGoal) ] return components
def getCurrView(uuid): user = User.fromUUID(uuid) profile = user.getProfile() if profile is None: logging.debug("profile is None, returning data") return "data" logging.debug("profile.get('curr_view', 'dummy') is %s" % profile.get("curr_view", "data")) return profile.get("curr_view", "data")
def testCarbonFootprintStore(self): user = User.fromUUID(self.uuid) # Tuple of JSON objects, similar to the real footprint dummyCarbonFootprint = ({'myModeShareCount': 10}, {'avgModeShareCount': 20}) self.assertEquals(data.getCarbonFootprint(user), None) data.setCarbonFootprint(user, dummyCarbonFootprint) # recall that pymongo converts tuples to lists somewhere down the line self.assertEquals(data.getCarbonFootprint(user), list(dummyCarbonFootprint))
def getUserClient(user_uuid): study = User.fromUUID(user_uuid).getFirstStudy() if study != None: client = Client(study) return client else: # User is not part of any study, so no additional filtering is needed return None
def calc_car_cost(trip_id, distance): uuid = sectiondb.find_one({'trip_id': trip_id})['user_id'] our_user = User.fromUUID(uuid) ave_mpg = our_user.getAvgMpg() gallons = meters_to_miles(distance) / ave_mpg price = urllib2.urlopen('http://www.fueleconomy.gov/ws/rest/fuelprices') xml = price.read() p = ET.fromstring(xml)[-1] return float(p.text) * gallons
def getResult(user_uuid): # This is in here, as opposed to the top level as recommended by the PEP # because then we don't have to worry about loading bottle in the unit tests from bottle import template (prevScore, currScore) = getStoredScore(User.fromUUID(user_uuid)) (level, sublevel) = getLevel(currScore) otherCurrScoreList = [] for user_uuid_dict in get_uuid_db().find({}, {'uuid': 1, '_id': 0}): (currPrevScore, currCurrScore) = getStoredScore(User.fromUUID(user_uuid_dict['uuid'])) otherCurrScoreList.append(currCurrScore) otherCurrScoreList.sort() renderedTemplate = template("clients/leaderboard/result_template.html", level_picture_filename = getFileName(level, sublevel), prevScore = prevScore, currScore = currScore, otherCurrScoreList = otherCurrScoreList) return renderedTemplate
def getResult(user_uuid): # This is in here, as opposed to the top level as recommended by the PEP # because then we don't have to worry about loading bottle in the unit tests from bottle import template (prevScore, currScore) = getStoredScore(User.fromUUID(user_uuid)) (level, sublevel) = getLevel(currScore) renderedTemplate = template("clients/gamified/result_template.html", level_picture_filename = getFileName(level, sublevel), prevScore = prevScore, currScore = currScore) return renderedTemplate
def getResult(user_uuid): # This is in here, as opposed to the top level as recommended by the PEP # because then we don't have to worry about loading bottle in the unit tests from bottle import template (prevScore, currScore) = getStoredScore(User.fromUUID(user_uuid)) (level, sublevel) = getLevel(currScore) renderedTemplate = template("clients/gamified/result_template.html", level_picture_filename=getFileName( level, sublevel), prevScore=prevScore, currScore=currScore) return renderedTemplate
def runBackgroundTasksForDay(user_uuid, today): today_dt = datetime.combine(today, time.max) user = User.fromUUID(user_uuid) # carbon compare results is a tuple. Tuples are converted to arrays # by mongodb # In [44]: testUser.setScores(('a','b', 'c', 'd'), ('s', 't', 'u', 'v')) # In [45]: testUser.getScore() # Out[45]: ([u'a', u'b', u'c', u'd'], [u's', u't', u'u', u'v']) weekago = today_dt - timedelta(days=7) carbonCompareResults = carbon.getFootprintCompareForRange( user_uuid, weekago, today_dt) setCarbonFootprint(user, carbonCompareResults) ( myModeShareCount, avgModeShareCount, myModeShareDistance, avgModeShareDistance, myModeCarbonFootprint, avgModeCarbonFootprint, myModeCarbonFootprintNoLongMotorized, avgModeCarbonFootprintNoLongMotorized, # ignored myOptimalCarbonFootprint, avgOptimalCarbonFootprint, myOptimalCarbonFootprintNoLongMotorized, avgOptimalCarbonFootprintNoLongMotorized) = carbonCompareResults # We only compute server stats in the background, because including them in # the set call means that they may be invoked when the user makes a call and # the cached value is None, which would potentially slow down user response time msNow = systime.time() stats.storeResultEntry(user_uuid, stats.STAT_MY_CARBON_FOOTPRINT, msNow, getCategorySum(myModeCarbonFootprint)) stats.storeResultEntry( user_uuid, stats.STAT_MY_CARBON_FOOTPRINT_NO_AIR, msNow, getCategorySum(myModeCarbonFootprintNoLongMotorized)) stats.storeResultEntry(user_uuid, stats.STAT_MY_OPTIMAL_FOOTPRINT, msNow, getCategorySum(myOptimalCarbonFootprint)) stats.storeResultEntry( user_uuid, stats.STAT_MY_OPTIMAL_FOOTPRINT_NO_AIR, msNow, getCategorySum(myOptimalCarbonFootprintNoLongMotorized)) stats.storeResultEntry( user_uuid, stats.STAT_MY_ALLDRIVE_FOOTPRINT, msNow, getCategorySum(myModeShareDistance) * (278.0 / (1609 * 1000))) stats.storeResultEntry(user_uuid, stats.STAT_MEAN_FOOTPRINT, msNow, getCategorySum(avgModeCarbonFootprint)) stats.storeResultEntry( user_uuid, stats.STAT_MEAN_FOOTPRINT_NO_AIR, msNow, getCategorySum(avgModeCarbonFootprintNoLongMotorized))
def getSectionFilter(uuid): from dao.user import User from datetime import datetime, timedelta logging.info("testclient.getSectionFilter called for user %s" % uuid) # If this is the first two weeks, show everything user = User.fromUUID(uuid) # Note that this is the last time that the profile was updated. So if the # user goes to the "Auth" screen and signs in again, it will be updated, and # we will reset the clock. If this is not acceptable, we need to ensure that # we have a create ts that is never updated updateTS = user.getUpdateTS() if (datetime.now() - updateTS) < timedelta(days=14): # In the first two weeks, don't do any filtering return [] else: return [{'test_auto_confirmed.prob': {'$lt': 0.9}}]
def savePredictionsStep(self): from dao.user import User from dao.client import Client uniqueModes = sorted(set(self.cleanedResultVector)) for i in range(self.predictedProb.shape[0]): currSectionId = self.sectionIds[i] currProb = self.convertPredictedProbToMap(self.modeList, uniqueModes, self.predictedProb[i]) logging.debug("Updating probability for section with id = %s" % currSectionId) self.Sections.update({'_id': currSectionId}, {"$set": {"predicted_mode": currProb}}) currUser = User.fromUUID(self.sectionUserIds[i]) clientSpecificUpdate = Client(currUser.getFirstStudy()).clientSpecificSetters(currUser.uuid, currSectionId, currProb) if clientSpecificUpdate != None: self.Sections.update({'_id': currSectionId}, clientSpecificUpdate)
def getSectionFilter(uuid): from dao.user import User from datetime import datetime, timedelta logging.info("testclient.getSectionFilter called for user %s" % uuid) # If this is the first two weeks, show everything user = User.fromUUID(uuid) # Note that this is the last time that the profile was updated. So if the # user goes to the "Auth" screen and signs in again, it will be updated, and # we will reset the clock. If this is not acceptable, we need to ensure that # we have a create ts that is never updated updateTS = user.getUpdateTS() if (datetime.now() - updateTS) < timedelta(days = 14): # In the first two weeks, don't do any filtering return [] else: return [{'test_auto_confirmed.prob': {'$lt': 0.9}}]
def updateScoreForDay(user_uuid, today): yesterday = today - timedelta(days = 1) dayBeforeYesterday = today - timedelta(days = 2) dayBeforeYesterdayStart = datetime.combine(dayBeforeYesterday, dttime.min) yesterdayStart = datetime.combine(yesterday, dttime.min) todayStart = datetime.combine(today, dttime.min) user = User.fromUUID(user_uuid) (discardedScore, prevScore) = getStoredScore(user) # Using score from dayBeforeYesterday instead of yesterday because there is # currently a significant lag in the time for e-mission to prompt for # entries, so people might not confirm yesterday's trips until sometime # today, which means that it won't be counted in their score newScore = prevScore + getScore(user_uuid, dayBeforeYesterdayStart, yesterdayStart) if newScore < 0: newScore = 0 stats.storeResultEntry(user_uuid, stats.STAT_GAME_SCORE, time.time(), newScore) setScores(user, prevScore, newScore)
def testRunBackgroundTasksForDay(self): self.testUsers = ["*****@*****.**", "*****@*****.**", "*****@*****.**", "*****@*****.**", "*****@*****.**"] load_database_json.loadTable(self.serverName, "Stage_Modes", "tests/data/modes.json") load_database_json.loadTable(self.serverName, "Stage_Sections", "tests/data/testCarbonFile") # Let's make sure that the users are registered so that they have profiles for userEmail in self.testUsers: User.register(userEmail) self.SectionsColl = get_section_db() tests.common.updateSections(self) self.assertNotEqual(len(self.uuid_list), 0) # Can access the zeroth element because we know that then length is greater than zero # (see above) test_uuid = self.uuid_list[0] test_user = User.fromUUID(test_uuid) self.assertNotIn('carbon_footprint', test_user.getProfile().keys()) data.runBackgroundTasks(test_user.uuid) self.assertIn('carbon_footprint', test_user.getProfile().keys())
def getResult(user_uuid): # This is in here, as opposed to the top level as recommended by the PEP # because then we don't have to worry about loading bottle in the unit tests from bottle import template import base64 from dao.user import User from dao.client import Client user = User.fromUUID(user_uuid) renderedTemplate = template("clients/choice/result_template.html", variables = json.dumps({'curr_view': getCurrView(user_uuid), 'uuid': str(user_uuid), 'client_key': Client("choice").getClientKey()}), gameResult = base64.b64encode(gamified.getResult(user_uuid)), leaderboardResult = base64.b64encode(leaderboard.getResult(user_uuid)), dataResult = base64.b64encode(data.getResult(user_uuid)), commonTripsResult = base64.b64encode(commontrips.getResult(user_uuid)), recommendationResult = base64.b64encode(recommendation.getResult(user_uuid))) return renderedTemplate
def getResult(user_uuid): # This is in here, as opposed to the top level as recommended by the PEP # because then we don't have to worry about loading bottle in the unit tests from bottle import template user = User.fromUUID(user_uuid) currFootprint = getCarbonFootprint(user) if currFootprint is None: currFootprint = carbon.getFootprintCompare(user_uuid) setCarbonFootprint(user, currFootprint) ( myModeShareCount, avgModeShareCount, myModeShareDistance, avgModeShareDistance, myModeCarbonFootprint, avgModeCarbonFootprint, myModeCarbonFootprintNoLongMotorized, avgModeCarbonFootprintNoLongMotorized, # ignored myOptimalCarbonFootprint, avgOptimalCarbonFootprint, myOptimalCarbonFootprintNoLongMotorized, avgOptimalCarbonFootprintNoLongMotorized) = currFootprint renderedTemplate = template( "compare.html", myModeShareCount=json.dumps(myModeShareCount), avgModeShareCount=json.dumps(avgModeShareCount), myModeShareDistance=json.dumps(myModeShareDistance), avgModeShareDistance=json.dumps(avgModeShareDistance), myModeCarbonFootprint=json.dumps(myModeCarbonFootprint), avgModeCarbonFootprint=json.dumps(avgModeCarbonFootprint), myOptimalCarbonFootprint=json.dumps(myOptimalCarbonFootprint), avgOptimalCarbonFootprint=json.dumps(avgOptimalCarbonFootprint)) # logging.debug(renderedTemplate) return renderedTemplate
def getCustomizationForProfile(): user_uuid = getUUID(request) user = User.fromUUID(user_uuid) logging.debug("Returning settings for user %s" % user_uuid) return user.getSettings()
def updateUserCreateTime(uuid): from datetime import datetime, timedelta from dao.user import User user = User.fromUUID(uuid) user.changeUpdateTs(timedelta(days = -20))
def runBackgroundTasks(uuid): from dao.user import User testuser = User.fromUUID(uuid) testuser.setClientSpecificProfileFields({'testfield1': 'value1', 'testfield2': 'value2'})
def updateUserProfile(): logging.debug("Called updateUserProfile") user_uuid = getUUID(request) user = User.fromUUID(user_uuid) mpg_array = request.json['mpg_array'] return user.setMpgArray(mpg_array)
def setCurrView(uuid, newView): user = User.fromUUID(uuid) user.setClientSpecificProfileFields({'curr_view': newView}) stats.storeResultEntry(uuid, stats.STAT_VIEW_CHOICE, time.time(), newView)
def getFootprintCompareForRange(user_uuid, start, end): """ The input userObj is assumed to be a UUID, not a User object """ assert (not isinstance(user_uuid, User)) userObj = User.fromUUID(user_uuid) myCarbonFootprintForMode = userObj.getCarbonFootprintForMode() myModeShareCount = getModeShare(user_uuid, start, end) totalModeShareCount = getModeShare(None, start, end) logging.debug("myModeShareCount = %s totalModeShareCount = %s" % (myModeShareCount, totalModeShareCount)) myModeShareDistance = getModeShareDistance(user_uuid, start, end) totalModeShareDistance = getModeShareDistance(None, start, end) logging.debug("myModeShareDistance = %s totalModeShareDistance = %s" % (myModeShareDistance, totalModeShareDistance)) myShortLongModeShareDistance = getShortLongModeShareDistance( user_uuid, start, end) totalShortLongModeShareDistance = getShortLongModeShareDistance( None, start, end) myModeCarbonFootprint = getCarbonFootprintsForMap( myShortLongModeShareDistance, myCarbonFootprintForMode) totalModeCarbonFootprint = getCarbonFootprintsForMap( totalShortLongModeShareDistance, myCarbonFootprintForMode) logging.debug("myModeCarbonFootprint = %s, totalModeCarbonFootprint = %s" % (myModeCarbonFootprint, totalModeCarbonFootprint)) myOptimalCarbonFootprint = getCarbonFootprintsForMap( myShortLongModeShareDistance, optimalCarbonFootprintForMode) totalOptimalCarbonFootprint = getCarbonFootprintsForMap( totalShortLongModeShareDistance, optimalCarbonFootprintForMode) logging.debug( "myOptimalCarbonFootprint = %s, totalOptimalCarbonFootprint = %s" % (myOptimalCarbonFootprint, totalOptimalCarbonFootprint)) delLongMotorizedModes(myShortLongModeShareDistance) delLongMotorizedModes(totalShortLongModeShareDistance) logging.debug("After deleting long motorized mode, map is %s", myShortLongModeShareDistance) myModeCarbonFootprintNoLongMotorized = getCarbonFootprintsForMap( myShortLongModeShareDistance, myCarbonFootprintForMode) totalModeCarbonFootprintNoLongMotorized = getCarbonFootprintsForMap( totalShortLongModeShareDistance, myCarbonFootprintForMode) myOptimalCarbonFootprintNoLongMotorized = getCarbonFootprintsForMap( myShortLongModeShareDistance, optimalCarbonFootprintForMode) totalOptimalCarbonFootprintNoLongMotorized = getCarbonFootprintsForMap( totalShortLongModeShareDistance, optimalCarbonFootprintForMode) nUsers = getDistinctUserCount(getQuerySpec(None, None, start, end)) # Hack to prevent divide by zero on an empty DB. # We will never really have an empty DB in the real production world, # but shouldn't crash in that case. # This is pretty safe because if we have no user_uuids, we won't have any modeCarbonFootprint either if nUsers == 0: nUsers = 1 avgModeShareCount = convertToAvg(totalModeShareCount, nUsers) avgModeShareDistance = convertToAvg(totalModeShareDistance, nUsers) avgModeCarbonFootprint = convertToAvg(totalModeCarbonFootprint, nUsers) avgModeCarbonFootprintNoLongMotorized = convertToAvg( totalModeCarbonFootprintNoLongMotorized, nUsers) avgOptimalCarbonFootprint = convertToAvg(totalModeCarbonFootprint, nUsers) avgOptimalCarbonFootprintNoLongMotorized = convertToAvg( totalModeCarbonFootprintNoLongMotorized, nUsers) # avgCarbonFootprint = totalCarbonFootprint/nUsers # # carbonFootprint = {"mine": myCarbonFootprint, # "mean": avgCarbonFootprint, # "2005 avg": 47173.568, # "2020 target": 43771.628, # "2035 target": 40142.892} return (myModeShareCount, avgModeShareCount, myModeShareDistance, avgModeShareDistance, myModeCarbonFootprint, avgModeCarbonFootprint, myModeCarbonFootprintNoLongMotorized, avgModeCarbonFootprintNoLongMotorized, myOptimalCarbonFootprint, avgOptimalCarbonFootprint, myOptimalCarbonFootprintNoLongMotorized, avgOptimalCarbonFootprintNoLongMotorized)
def getAllDrive(user_uuid, modeDistanceMap): assert (not isinstance(user_uuid, User)) user = User.fromUUID(user_uuid) myCarbonFootprintForMode = user.getCarbonFootprintForMode() totalDistance = sum(modeDistanceMap.values()) / 1000 return totalDistance * myCarbonFootprintForMode['car_short']
# But I need to go through and fix the existing users # For all users who are not in the "gamified" group, we need to move their # carbon footprint from the currentScore field to the carbon_footprint field, # and delete the currentScore and previousScore fields from get_database import get_uuid_db, get_profile_db from dao.user import User from clients.default import default import logging logging.basicConfig(level=logging.DEBUG) for user_uuid_dict in get_uuid_db().find({}, {'uuid': 1, '_id': 0}): currUUID = user_uuid_dict['uuid'] logging.info("Fixing results for %s" % currUUID) currUser = User.fromUUID(currUUID) if currUser.getFirstStudy() is None: currFootprint = currUser.getProfile().get("currentScore", None) default.setCarbonFootprint(currUser, currFootprint) get_profile_db().update( {'user_id': currUUID}, {'$unset': { 'previousScore': "", 'currentScore': "" }}) logging.debug("After change, currentScore = %s, currFootprint = %s" % (currUser.getProfile().get("currentScore"), default.getCarbonFootprint(currUser))) # Informal testing from the command line since this is a one-time script # Can be pulled out into a unit test if reworked
# But I need to go through and fix the existing users # For all users who are not in the "gamified" group, we need to move their # carbon footprint from the currentScore field to the carbon_footprint field, # and delete the currentScore and previousScore fields from get_database import get_uuid_db, get_profile_db from dao.user import User from clients.default import default import logging logging.basicConfig(level=logging.DEBUG) for user_uuid_dict in get_uuid_db().find({}, {'uuid': 1, '_id': 0}): currUUID = user_uuid_dict['uuid'] logging.info("Fixing results for %s" % currUUID) currUser = User.fromUUID(currUUID) if currUser.getFirstStudy() is None: currFootprint = currUser.getProfile().get("currentScore", None) default.setCarbonFootprint(currUser, currFootprint) get_profile_db().update({'user_id': currUUID}, {'$unset': {'previousScore': "", 'currentScore': ""}}) logging.debug("After change, currentScore = %s, currFootprint = %s" % ( currUser.getProfile().get("currentScore"), default.getCarbonFootprint(currUser))) # Informal testing from the command line since this is a one-time script # Can be pulled out into a unit test if reworked # Test setup steps from the REPL: # In [52]: userTest = User.register("*****@*****.**") # In [53]: userTest1 = User.register("*****@*****.**") # In [54]: gamified.setScores(userTest, None, [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}])