def GetConnectionRecordsByUser(user): return [ ServiceRecord(x) for x in db.connections.find( {"_id": { "$in": [x["ID"] for x in user["ConnectedServices"]] }}) ]
def RefreshPaymentStateForExternalIDs(self, external_ids): from tapiriik.services import Service, ServiceRecord external_ids = [str(x) for x in external_ids] connections = [ ServiceRecord(x) for x in db.connections.find({ "Service": "motivato", "ExternalID": { "$in": external_ids } }) ] users = db.users.find( {"ConnectedServices.ID": { "$in": [x._id for x in connections] }}) for user in users: my_connection = [ x for x in connections if x._id in [y["ID"] for y in user["ConnectedServices"]] ][0] # Defer to the actual service module, where all the session stuff is set up state = Service.FromID("motivato")._getPaymentState(my_connection) self.ApplyPaymentState(user, state, my_connection.ExternalID, duration=None)
def fullRecords(conns): return [ ServiceRecord(x) for x in db.connections.find( {"_id": { "$in": [x["ID"] for x in conns] }}) if x["Service"] not in WITHDRAWN_SERVICES ]
def fullRecords(conns): return [ ServiceRecord(x) for x in db.connections.find( {"_id": { "$in": [x["ID"] for x in conns] }}) ]
def GetConnectionRecordsByUser(user): return [ ServiceRecord(x) for x in db.connections.find( {"_id": { "$in": [x["ID"] for x in user["ConnectedServices"]] }}) if x["Service"] not in WITHDRAWN_SERVICES ]
def RefreshPaymentState(self): from tapiriik.services import ServiceRecord external_ids = requests.get(MOTIVATO_PREMIUM_USERS_LIST_URL).json() connections = [ServiceRecord(x) for x in db.connections.find({"Service": "motivato", "ExternalID": {"$in": external_ids}})] users = db.users.find({"ConnectedServices.ID": {"$in": [x._id for x in connections]}}) for user in users: my_connection = [x for x in connections if x._id in [y["ID"] for y in user["ConnectedServices"]]][0] self.ApplyPaymentState(user, True, my_connection.ExternalID, duration=None)
def GetConnectionRecord(user, svcId): rec = db.connections.find_one({ "_id": { "$in": [ x["ID"] for x in user["ConnectedServices"] if x["Service"] == svcId ] } }) return ServiceRecord(rec) if rec else None
def RefreshPaymentState(self): from tapiriik.services import ServiceRecord from tapiriik.payments import Payments from tapiriik.auth import User external_ids = requests.get(MOTIVATO_PREMIUM_USERS_LIST_URL).json() connections = [ ServiceRecord(x) for x in db.connections.find({ "Service": "motivato", "ExternalID": { "$in": external_ids } }) ] users = list( db.users.find({ "ConnectedServices.ID": { "$in": [x._id for x in connections] } })) payments = [] # Pull relevant payment objects and associate with users for user in users: my_connection = [ x for x in connections if x._id in [y["ID"] for y in user["ConnectedServices"]] ][0] pmt = Payments.EnsureExternalPayment(self.ID, my_connection.ExternalID, duration=None) payments.append(pmt) User.AssociateExternalPayment(user, pmt, skip_deassoc=True) # Bulk-remove these payments from users who don't own them (more or less - it'll leave anyone who switched remote accounts) db.users.update({"_id": { "$nin": [x["_id"] for x in users] }}, { "$pull": { "ExternalPayments": { "_id": { "$in": [x["_id"] for x in payments] } } } }, multi=True)
def create_mock_svc_record(svc): return ServiceRecord({ "Service": svc.ID, "_id": str(random.randint(1, 1000)), "ExternalID": str(random.randint(1, 1000)) })
def PerformUserSync(user, exhaustive=False, null_next_sync_on_unlock=False, heartbeat_callback=None): # And thus begins the monolithic sync function that's a pain to test. connectedServiceIds = [x["ID"] for x in user["ConnectedServices"]] if len(connectedServiceIds) <= 1: return # nothing's going anywhere anyways # mark this user as in-progress db.users.update({ "_id": user["_id"], "SynchronizationWorker": None }, { "$set": { "SynchronizationWorker": os.getpid(), "SynchronizationHost": socket.gethostname(), "SynchronizationProgress": 0, "SynchronizationStartTime": datetime.utcnow() } }) lockCheck = db.users.find_one({ "_id": user["_id"], "SynchronizationWorker": os.getpid(), "SynchronizationHost": socket.gethostname() }) if lockCheck is None: raise SynchronizationConcurrencyException # failed to get lock logging_file_handler = logging.handlers.RotatingFileHandler( USER_SYNC_LOGS + str(user["_id"]) + ".log", maxBytes=5242880, backupCount=1) logging_file_handler.setFormatter( logging.Formatter(Sync._logFormat, Sync._logDateFormat)) _global_logger.addHandler(logging_file_handler) logger.info("Beginning sync for " + str(user["_id"]) + "(exhaustive: " + str(exhaustive) + ")") try: serviceConnections = [ ServiceRecord(x) for x in db.connections.find( {"_id": { "$in": connectedServiceIds }}) ] allExtendedAuthDetails = list( cachedb.extendedAuthDetails.find( {"ID": { "$in": connectedServiceIds }})) activities = [] excludedServices = [] tempSyncErrors = {} tempSyncExclusions = {} for conn in serviceConnections: svc = conn.Service if hasattr(conn, "SyncErrors"): # Remove non-blocking errors tempSyncErrors[conn._id] = [ x for x in conn.SyncErrors if "Block" in x and x["Block"] ] del conn.SyncErrors else: tempSyncErrors[conn._id] = [] # Remove temporary exclusions (live tracking etc). tempSyncExclusions[conn._id] = dict( (k, v) for k, v in (conn.ExcludedActivities if conn. ExcludedActivities else {}).items() if v["Permanent"]) if conn.ExcludedActivities: del conn.ExcludedActivities # Otherwise the exception messages get really, really, really huge and break mongodb. # If we're not going to be doing anything anyways, stop now if len(serviceConnections) - len(excludedServices) <= 1: activities = [] break if heartbeat_callback: heartbeat_callback(SyncStep.List) # Bail out as appropriate for the entire account (tempSyncErrors contains only blocking errors at this point) if [ x for x in tempSyncErrors[conn._id] if x["Scope"] == ServiceExceptionScope.Account ]: activities = [ ] # Kinda meh, I'll make it better when I break this into seperate functions, whenever that happens... break # ...and for this specific service if [ x for x in tempSyncErrors[conn._id] if x["Scope"] == ServiceExceptionScope.Service ]: excludedServices.append(conn) continue if svc.ID in DISABLED_SERVICES: excludedServices.append(conn) continue if svc.RequiresExtendedAuthorizationDetails: if not hasattr(conn, "ExtendedAuthorization" ) or not conn.ExtendedAuthorization: extAuthDetails = [ x["ExtendedAuthorization"] for x in allExtendedAuthDetails if x["ID"] == conn._id ] if not len(extAuthDetails): logger.info("No extended auth details for " + svc.ID) excludedServices.append(conn) continue # the connection never gets saved in full again, so we can sub these in here at no risk conn.ExtendedAuthorization = extAuthDetails[0] try: logger.info("\tRetrieving list from " + svc.ID) svcActivities, svcExclusions = svc.DownloadActivityList( conn, exhaustive) except (ServiceException, ServiceWarning) as e: tempSyncErrors[conn._id].append( _packServiceException(SyncStep.List, e)) excludedServices.append(conn) if not issubclass(e.__class__, ServiceWarning): continue except Exception as e: tempSyncErrors[conn._id].append({ "Step": SyncStep.List, "Message": _formatExc() }) excludedServices.append(conn) continue Sync._accumulateExclusions(conn, svcExclusions, tempSyncExclusions) Sync._accumulateActivities(svc, svcActivities, activities) origins = list( db.activity_origins.find( {"ActivityUID": { "$in": [x.UID for x in activities] }})) activitiesWithOrigins = [x["ActivityUID"] for x in origins] # Makes reading the logs much easier. activities = sorted(activities, key=lambda v: v.StartTime.replace(tzinfo=None), reverse=True) # Populate origins for activity in activities: updated_database = False if len(activity.UploadedTo) == 1: if not len(excludedServices ): # otherwise it could be incorrectly recorded # we can log the origin of this activity if activity.UID not in activitiesWithOrigins: # No need to hammer the database updating these when they haven't changed logger.info( "\t\t Updating db with origin for proceeding activity" ) db.activity_origins.insert({ "ActivityUID": activity.UID, "Origin": { "Service": activity.UploadedTo[0] ["Connection"].Service.ID, "ExternalID": activity.UploadedTo[0] ["Connection"].ExternalID } }) activity.Origin = activity.UploadedTo[0]["Connection"] else: if activity.UID in activitiesWithOrigins: knownOrigin = [ x for x in origins if x["ActivityUID"] == activity.UID ] connectedOrigins = [ x for x in serviceConnections if knownOrigin[0]["Origin"]["Service"] == x.Service.ID and knownOrigin[0]["Origin"] ["ExternalID"] == x.ExternalID ] if len(connectedOrigins ) > 0: # they might have disconnected it activity.Origin = connectedOrigins[0] else: activity.Origin = ServiceRecord( knownOrigin[0]["Origin"] ) # I have it on good authority that this will work logger.info("\t" + str(activity) + " " + str(activity.UID[:3]) + " from " + str([ x["Connection"].Service.ID for x in activity.UploadedTo ])) totalActivities = len(activities) processedActivities = 0 for activity in activities: # We don't always know if the activity is private before it's downloaded, but we can check anyways since it saves a lot of time. if activity.Private: logger.info( "\t %s is private and restricted from sync (pre-download)" % activity.UID) # Sync exclusion instead? del activity continue # recipientServices are services that don't already have this activity recipientServices = Sync._determineRecipientServices( activity, serviceConnections) if len(recipientServices) == 0: totalActivities -= 1 # doesn't count del activity continue # eligibleServices are services that are permitted to receive this activity - taking into account flow exceptions, excluded services, unfufilled configuration requirements, etc. eligibleServices = Sync._determineEligibleRecipientServices( activity=activity, recipientServices=recipientServices, excludedServices=excludedServices, user=user) if not len(eligibleServices): logger.info("\t %s has no eligible destinations" % activity.UID) totalActivities -= 1 # Again, doesn't really count. del activity continue if heartbeat_callback: heartbeat_callback(SyncStep.Download) # Locally mark this activity as present on the appropriate services. db.connections.update( { "_id": { "$in": [x["Connection"]._id for x in activity.UploadedTo] } }, {"$addToSet": { "SynchronizedActivities": activity.UID }}, multi=True) if totalActivities <= 0: syncProgress = 1 else: syncProgress = max( 0, min(1, processedActivities / totalActivities)) # This is after the above exit point since it's the most frequent case - want to avoid DB churn db.users.update( {"_id": user["_id"]}, {"$set": { "SynchronizationProgress": syncProgress }}) # The second most important line of logging in the application... logger.info("\tActivity " + str(activity.UID) + " to " + str([x.Service.ID for x in recipientServices])) # Download the full activity record act = None for dlSvcUploadRec in activity.UploadedTo: dlSvcRecord = dlSvcUploadRec[ "Connection"] # I guess in the future we could smartly choose which for >1 dlSvc = dlSvcRecord.Service logger.info("\t from " + dlSvc.ID) if activity.UID in tempSyncExclusions[dlSvcRecord._id]: logger.info("\t\t has activity exclusion logged") continue if dlSvcRecord in excludedServices: logger.info( "\t\t service became excluded after listing" ) # Because otherwise we'd never have been trying to download from it in the first place. continue workingCopy = copy.copy(activity) # we can hope try: workingCopy = dlSvc.DownloadActivity( dlSvcRecord, workingCopy) except (ServiceException, ServiceWarning) as e: tempSyncErrors[conn._id].append( _packServiceException(SyncStep.Download, e)) if e.Block and e.Scope == ServiceExceptionScope.Service: # I can't imagine why the same would happen at the account level, so there's no behaviour to immediately abort the sync in that case. excludedServices.append(dlSvcRecord) if not issubclass(e.__class__, ServiceWarning): continue except APIExcludeActivity as e: logger.info("\t\t excluded by service") e.Activity = workingCopy Sync._accumulateExclusions(dlSvcRecord, e, tempSyncExclusions) continue except Exception as e: tempSyncErrors[dlSvcRecord._id].append({ "Step": SyncStep.Download, "Message": _formatExc() }) continue if workingCopy.Private and not dlSvcRecord.GetConfiguration( )["sync_private"]: logger.info("\t\t is private and restricted from sync" ) # Sync exclusion instead? continue try: workingCopy.CheckSanity() except: logger.info("\t\t failed sanity check") Sync._accumulateExclusions( dlSvcRecord, APIExcludeActivity( "Sanity check failed " + _formatExc(), activity=workingCopy), tempSyncExclusions) continue else: act = workingCopy break # succesfully got the activity + passed sanity checks, can stop now if act is None: # couldn't download it from anywhere, or the places that had it said it was broken processedActivities += 1 # we tried del act del activity continue for destinationSvcRecord in eligibleServices: if heartbeat_callback: heartbeat_callback(SyncStep.Upload) destSvc = destinationSvcRecord.Service try: logger.info("\t\tUploading to " + destSvc.ID) destSvc.UploadActivity(destinationSvcRecord, act) except (ServiceException, ServiceWarning) as e: tempSyncErrors[destinationSvcRecord._id].append( _packServiceException(SyncStep.Upload, e)) if e.Block and e.Scope == ServiceExceptionScope.Service: # Similarly, no behaviour to immediately abort the sync if an account-level exception is raised excludedServices.append(destinationSvcRecord) if not issubclass(e.__class__, ServiceWarning): continue except Exception as e: tempSyncErrors[destinationSvcRecord._id].append({ "Step": SyncStep.Upload, "Message": _formatExc() }) continue # flag as successful db.connections.update({"_id": destinationSvcRecord._id}, { "$addToSet": { "SynchronizedActivities": activity.UID } }) db.sync_stats.update({"ActivityID": activity.UID}, { "$addToSet": { "DestinationServices": destSvc.ID, "SourceServices": dlSvc.ID }, "$set": { "Distance": activity.Distance, "Timestamp": datetime.utcnow() } }, upsert=True) del act del activity processedActivities += 1 nonblockingSyncErrorsCount = 0 blockingSyncErrorsCount = 0 syncExclusionCount = 0 for conn in serviceConnections: db.connections.update({"_id": conn._id}, { "$set": { "SyncErrors": tempSyncErrors[conn._id], "ExcludedActivities": tempSyncExclusions[conn._id] } }) nonblockingSyncErrorsCount += len([ x for x in tempSyncErrors[conn._id] if "Block" not in x or not x["Block"] ]) blockingSyncErrorsCount += len([ x for x in tempSyncErrors[conn._id] if "Block" in x and x["Block"] ]) syncExclusionCount += len(tempSyncExclusions[conn._id].items()) # clear non-persisted extended auth details cachedb.extendedAuthDetails.remove( {"ID": { "$in": connectedServiceIds }}) # unlock the row update_values = { "$unset": { "SynchronizationWorker": None, "SynchronizationHost": None, "SynchronizationProgress": None }, "$set": { "NonblockingSyncErrorCount": nonblockingSyncErrorsCount, "BlockingSyncErrorCount": blockingSyncErrorsCount, "SyncExclusionCount": syncExclusionCount } } if null_next_sync_on_unlock: # Sometimes another worker would pick this record in the timespan between this update and the one in PerformGlobalSync that sets the true next sync time. # Hence, an option to unset the NextSynchronization in the same operation that releases the lock on the row. update_values["$unset"]["NextSynchronization"] = None db.users.update( { "_id": user["_id"], "SynchronizationWorker": os.getpid(), "SynchronizationHost": socket.gethostname() }, update_values) except: # oops. logger.exception("Core sync exception") else: logger.info("Finished sync for " + str(user["_id"])) finally: _global_logger.removeHandler(logging_file_handler) logging_file_handler.close()