def test_svc_level_dupe(self): ''' check that service-level duplicate activities are caught (no DB involvement) ''' svcA, svcB = TestTools.create_mock_services() recA = TestTools.create_mock_svc_record(svcA) recB = TestTools.create_mock_svc_record(svcB) actA = Activity() actA.StartTime = datetime(1, 2, 3, 4, 5, 6, 7) actA.ServiceDataCollection = TestTools.create_mock_servicedatacollection(svcA, record=recA) actB = Activity() actB.StartTime = actA.StartTime actB.ServiceDataCollection = TestTools.create_mock_servicedatacollection(svcB, record=recB) actA.CalculateUID() actB.CalculateUID() activities = [] Sync._accumulateActivities(recA, [actA], activities) Sync._accumulateActivities(recB, [actB], activities) self.assertEqual(len(activities), 1)
def test_eligibility_flowexception_shortcircuit(self): user = TestTools.create_mock_user() svcA, svcB = TestTools.create_mock_services() svcC = TestTools.create_mock_service("mockC") recA = TestTools.create_mock_svc_record(svcA) recB = TestTools.create_mock_svc_record(svcB) recC = TestTools.create_mock_svc_record(svcC) act = TestTools.create_blank_activity(svcA, record=recA) User.SetFlowException(user, recA, recC, flowToTarget=False) # Behaviour with known origin and no override set act.Origin = recA recipientServices = [recC, recB] excludedServices = [] eligible = Sync._determineEligibleRecipientServices(activity=act, connectedServices=[recA, recB, recC], recipientServices=recipientServices, excludedServices=excludedServices, user=user) self.assertTrue(recA not in eligible) self.assertTrue(recB in eligible) self.assertTrue(recC not in eligible) # Enable alternate routing recB.SetConfiguration({"allow_activity_flow_exception_bypass_via_self":True}, no_save=True) self.assertTrue(recB.GetConfiguration()["allow_activity_flow_exception_bypass_via_self"]) # We should now be able to arrive at recC via recB act.Origin = recA recipientServices = [recC, recB] excludedServices = [] eligible = Sync._determineEligibleRecipientServices(activity=act, connectedServices=[recA, recB, recC], recipientServices=recipientServices, excludedServices=excludedServices, user=user) self.assertTrue(recA not in eligible) self.assertTrue(recB in eligible) self.assertTrue(recC in eligible)
def test_svc_supported_activity_types(self): ''' check that only activities are only sent to services which support them ''' svcA, svcB = TestTools.create_mock_services() recA = TestTools.create_mock_svc_record(svcA) recB = TestTools.create_mock_svc_record(svcB) svcA.SupportedActivities = [ActivityType.CrossCountrySkiing] svcB.SupportedActivities = [ActivityType.Cycling] actA = Activity() actA.StartTime = datetime(1, 2, 3, 4, 5, 6, 7) actA.ServiceDataCollection = TestTools.create_mock_servicedatacollection(svcA, record=recA) actA.Type = svcA.SupportedActivities[0] actB = Activity() actB.StartTime = datetime(5, 6, 7, 8, 9, 10, 11) actB.ServiceDataCollection = TestTools.create_mock_servicedatacollection(svcB, record=recB) actB.Type = [x for x in svcB.SupportedActivities if x != actA.Type][0] actA.CalculateUID() actB.CalculateUID() allConns = [recA, recB] activities = [] Sync._accumulateActivities(recA, [actA], activities) Sync._accumulateActivities(recB, [actB], activities) syncToA = Sync._determineRecipientServices(actA, allConns) syncToB = Sync._determineRecipientServices(actB, allConns) self.assertEqual(len(syncToA), 0) self.assertEqual(len(syncToB), 0) svcB.SupportedActivities = svcA.SupportedActivities syncToA = Sync._determineRecipientServices(actA, allConns) syncToB = Sync._determineRecipientServices(actB, allConns) self.assertEqual(len(syncToA), 1) self.assertEqual(len(syncToB), 0) svcB.SupportedActivities = svcA.SupportedActivities = [ActivityType.CrossCountrySkiing, ActivityType.Cycling] syncToA = Sync._determineRecipientServices(actA, allConns) syncToB = Sync._determineRecipientServices(actB, allConns) self.assertEqual(len(syncToA), 1) self.assertEqual(len(syncToB), 1)
def test_svc_level_dupe_tz_nonuniform(self): ''' check that service-level duplicate activities with non-uniform TZs are caught ''' svcA, svcB = TestTools.create_mock_services() actA = Activity() actA.StartTime = datetime(1, 2, 3, 4, 5, 6, 7) actA.UploadedTo = [TestTools.create_mock_upload_record(svcA)] actB = Activity() actB.StartTime = pytz.timezone("America/Denver").localize(actA.StartTime) actB.UploadedTo = [TestTools.create_mock_upload_record(svcB)] actA.CalculateUID() actB.CalculateUID() activities = [] Sync._accumulateActivities(Service.FromID("mockA"), [actA], activities) Sync._accumulateActivities(Service.FromID("mockB"), [actB], activities) self.assertEqual(len(activities), 1)
def test_activity_deduplicate_normaltz(self): ''' ensure that we can't deduplicate activities with non-pytz timezones ''' svcA, svcB = TestTools.create_mock_services() actA = TestTools.create_random_activity(svcA, tz=UTC()) actB = Activity() actB.StartTime = actA.StartTime.replace(tzinfo=None) + timedelta(seconds=10) actB.EndTime = actA.EndTime.replace(tzinfo=None) actB.UploadedTo = [TestTools.create_mock_upload_record(svcB)] actA.Name = "Not this" actB.Name = "Heya" actB.Type = ActivityType.Walking actA.CalculateUID() actB.CalculateUID() activities = [] Sync._accumulateActivities(Service.FromID("mockB"), [copy.deepcopy(actB)], activities) self.assertRaises(ValueError, Sync._accumulateActivities, Service.FromID("mockA"), [copy.deepcopy(actA)], activities)
def test_svc_level_dupe(self): ''' check that service-level duplicate activities are caught (no DB involvement) ''' svcA, svcB = TestTools.create_mock_services() actA = Activity() actA.StartTime = datetime(1, 2, 3, 4, 5, 6, 7) actA.UploadedTo = [TestTools.create_mock_upload_record(svcA)] actB = Activity() actB.StartTime = actA.StartTime actB.UploadedTo = [TestTools.create_mock_upload_record(svcB)] actA.CalculateUID() actB.CalculateUID() activities = [] Sync._accumulateActivities(Service.FromID("mockA"), [actA], activities) Sync._accumulateActivities(Service.FromID("mockB"), [actB], activities) self.assertEqual(len(activities), 1)
def test_svc_level_dupe_tz_irregular(self): ''' check that service-level duplicate activities with irregular TZs are caught ''' svcA, svcB = TestTools.create_mock_services() actA = Activity() actA.StartTime = pytz.timezone("America/Edmonton").localize(datetime(1, 2, 3, 4, 5, 6, 7)) actA.UploadedTo = [TestTools.create_mock_upload_record(svcA)] actB = Activity() actB.StartTime = actA.StartTime.astimezone(pytz.timezone("America/Iqaluit")) actB.UploadedTo = [TestTools.create_mock_upload_record(svcB)] actA.CalculateUID() actB.CalculateUID() activities = [] Sync._accumulateActivities(Service.FromID("mockA"), [actA], activities) Sync._accumulateActivities(Service.FromID("mockB"), [actB], activities) self.assertEqual(len(activities), 1)
def test_eligibility_flowexception_both(self): user = TestTools.create_mock_user() svcA, svcB = TestTools.create_mock_services() recA = TestTools.create_mock_svc_record(svcA) recB = TestTools.create_mock_svc_record(svcB) act = TestTools.create_blank_activity(svcA, record=recB) act.Origin = recB User.SetFlowException(user, recA, recB, flowToSource=False, flowToTarget=False) recipientServices = [recA, recB] excludedServices = [] eligible = Sync._determineEligibleRecipientServices(activity=act, connectedServices=recipientServices, recipientServices=recipientServices, excludedServices=excludedServices, user=user) self.assertFalse(recA in eligible) self.assertTrue(recB in eligible) act.Origin = recA act.ServiceDataCollection = TestTools.create_mock_servicedatacollection(svcA, record=recA) eligible = Sync._determineEligibleRecipientServices(activity=act, connectedServices=recipientServices, recipientServices=recipientServices, excludedServices=excludedServices, user=user) self.assertTrue(recA in eligible) self.assertFalse(recB in eligible)
def test_eligibility_flowexception_none(self): user = TestTools.create_mock_user() svcA, svcB = TestTools.create_mock_services() recA = TestTools.create_mock_svc_record(svcA) recB = TestTools.create_mock_svc_record(svcB) act = TestTools.create_blank_activity(svcA, record=recB) act.Origin = recB User.SetFlowException(user, recA, recB, flowToSource=False, flowToTarget=False) recipientServices = [recA] excludedServices = [] eligible = Sync._determineEligibleRecipientServices(activity=act, recipientServices=recipientServices, excludedServices=excludedServices, user=user) self.assertTrue(recA not in eligible) self.assertTrue(recB not in eligible) recipientServices = [recB] act.Origin = recA act.UploadedTo = [TestTools.create_mock_upload_record(svcA, record=recA)] eligible = Sync._determineEligibleRecipientServices(activity=act, recipientServices=recipientServices, excludedServices=excludedServices, user=user) self.assertTrue(recA not in eligible) self.assertTrue(recB not in eligible)
def test_svc_level_dupe_tz_irregular(self): ''' check that service-level duplicate activities with irregular TZs are caught ''' svcA, svcB = TestTools.create_mock_services() recA = TestTools.create_mock_svc_record(svcA) recB = TestTools.create_mock_svc_record(svcB) actA = Activity() actA.StartTime = pytz.timezone("America/Edmonton").localize(datetime(1, 2, 3, 4, 5, 6, 7)) actA.ServiceDataCollection = TestTools.create_mock_servicedatacollection(svcA, record=recA) actB = Activity() actB.StartTime = actA.StartTime.astimezone(pytz.timezone("America/Iqaluit")) actB.ServiceDataCollection = TestTools.create_mock_servicedatacollection(svcB, record=recB) actA.CalculateUID() actB.CalculateUID() activities = [] Sync._accumulateActivities(recA, [actA], activities) Sync._accumulateActivities(recB, [actB], activities) self.assertEqual(len(activities), 1)
def test_eligibility_excluded(self): user = TestTools.create_mock_user() svcA, svcB = TestTools.create_mock_services() recA = TestTools.create_mock_svc_record(svcA) recB = TestTools.create_mock_svc_record(svcB) act = TestTools.create_blank_activity(svcA, record=recB) recipientServices = [recA, recB] excludedServices = [recA] eligible = Sync._determineEligibleRecipientServices(activity=act, connectedServices=recipientServices, recipientServices=recipientServices, excludedServices=excludedServices, user=user) self.assertTrue(recB in eligible) self.assertTrue(recA not in eligible)
def test_svc_level_dupe_tz_nonuniform(self): ''' check that service-level duplicate activities with non-uniform TZs are caught ''' svcA, svcB = TestTools.create_mock_services() recA = TestTools.create_mock_svc_record(svcA) recB = TestTools.create_mock_svc_record(svcB) actA = Activity() actA.StartTime = datetime(1, 2, 3, 4, 5, 6, 7) actA.ServiceDataCollection = TestTools.create_mock_servicedatacollection(svcA, record=recA) actB = Activity() actB.StartTime = pytz.timezone("America/Denver").localize(actA.StartTime) actB.ServiceDataCollection = TestTools.create_mock_servicedatacollection(svcB, record=recB) actA.CalculateUID() actB.CalculateUID() activities = [] Sync._accumulateActivities(recA, [actA], activities) Sync._accumulateActivities(recB, [actB], activities) self.assertEqual(len(activities), 1)
def test_eligibility_config(self): user = TestTools.create_mock_user() svcA, svcB = TestTools.create_mock_services() svcA.Configurable = True svcA.RequiresConfiguration = lambda x: True recA = TestTools.create_mock_svc_record(svcA) recB = TestTools.create_mock_svc_record(svcB) act = TestTools.create_blank_activity(svcA, record=recB) recipientServices = [recA, recB] excludedServices = [] eligible = Sync._determineEligibleRecipientServices(activity=act, connectedServices=recipientServices, recipientServices=recipientServices, excludedServices=excludedServices, user=user) self.assertTrue(recB in eligible) self.assertTrue(recA not in eligible)
def sync_clear_errorgroup(req, service, group): if not req.user: return HttpResponse(status=401) rec = User.GetConnectionRecord(req.user, service) if not rec: return HttpResponse(status=404) # Prevent this becoming a vehicle for rapid synchronization to_clear_count = 0 for x in rec.SyncErrors: if "UserException" in x and "ClearGroup" in x["UserException"] and x["UserException"]["ClearGroup"] == group: to_clear_count += 1 if to_clear_count > 0: db.connections.update({"_id": rec._id}, {"$pull":{"SyncErrors":{"UserException.ClearGroup": group}}}) db.users.update({"_id": req.user["_id"]}, {'$inc':{"BlockingSyncErrorCount":-to_clear_count}}) # In the interests of data integrity, update the summary counts immediately as opposed to waiting for a sync to complete. Sync.ScheduleImmediateSync(req.user, True) # And schedule them for an immediate full resynchronization, so the now-unblocked services can be brought up to speed. return HttpResponse() return HttpResponse() return HttpResponse(status=404)
def test_accumulate_exclusions(self): svcA, svcB = TestTools.create_mock_services() recA = TestTools.create_mock_svc_record(svcA) exclusionstore = {recA._id: {}} # regular exc = APIExcludeActivity("Messag!e", activityId=3.14) Sync._accumulateExclusions(recA, exc, exclusionstore) self.assertTrue("3_14" in exclusionstore[recA._id]) self.assertEqual(exclusionstore[recA._id]["3_14"]["Message"], "Messag!e") self.assertEqual(exclusionstore[recA._id]["3_14"]["Activity"], None) self.assertEqual( exclusionstore[recA._id]["3_14"]["ExternalActivityID"], 3.14) self.assertEqual(exclusionstore[recA._id]["3_14"]["Permanent"], True) # updating act = TestTools.create_blank_activity(svcA) act.UID = "3_14" # meh exc = APIExcludeActivity("Messag!e2", activityId=42, permanent=False, activity=act) Sync._accumulateExclusions(recA, exc, exclusionstore) self.assertTrue("3_14" in exclusionstore[recA._id]) self.assertEqual(exclusionstore[recA._id]["3_14"]["Message"], "Messag!e2") self.assertNotEqual( exclusionstore[recA._id]["3_14"]["Activity"], None) # Who knows what the string format will be down the road? self.assertEqual( exclusionstore[recA._id]["3_14"]["ExternalActivityID"], 42) self.assertEqual(exclusionstore[recA._id]["3_14"]["Permanent"], False) # multiple, retaining existing exc2 = APIExcludeActivity("INM", activityId=13) exc3 = APIExcludeActivity("FNIM", activityId=37) Sync._accumulateExclusions(recA, [exc2, exc3], exclusionstore) self.assertTrue("3_14" in exclusionstore[recA._id]) self.assertTrue("37" in exclusionstore[recA._id]) self.assertTrue("13" in exclusionstore[recA._id]) # don't allow with no identifiers exc4 = APIExcludeActivity("nooooo") self.assertRaises(ValueError, Sync._accumulateExclusions, recA, [exc4], exclusionstore)
def _assocPaymentLikeObject(user, collection, payment_like_object, schedule_now, skip_deassoc=False): # Since I seem to have taken this duck-typing quite far # First, deassociate payment ids from other accounts that may be using them if "_id" in payment_like_object and not skip_deassoc: db.users.update( {}, {"$pull": { collection: { "_id": payment_like_object["_id"] } }}, multi=True) # Then, attach to us db.users.update({"_id": ObjectId(user["_id"])}, {"$addToSet": { collection: payment_like_object }}) if schedule_now: Sync.ScheduleImmediateSync(user)
def test_svc_level_dupe_time_leeway(self): ''' check that service-level duplicate activities within the defined time leeway are caught ''' svcA, svcB = TestTools.create_mock_services() recA = TestTools.create_mock_svc_record(svcA) recB = TestTools.create_mock_svc_record(svcB) actA = Activity() actA.StartTime = datetime(1, 2, 3, 4, 5, 6, 7) actA.ServiceDataCollection = TestTools.create_mock_servicedatacollection( svcA, record=recA) actA.Type = set(svcA.SupportedActivities).intersection( set(svcB.SupportedActivities)).pop() actB = Activity() actB.StartTime = datetime(1, 2, 3, 4, 6, 6, 7) actB.ServiceDataCollection = TestTools.create_mock_servicedatacollection( svcB, record=recB) actB.Type = actA.Type actA.CalculateUID() actB.CalculateUID() activities = [] Sync._accumulateActivities(recA, [actA], activities) Sync._accumulateActivities(recB, [actB], activities) self.assertIn(actA.UID, actA.UIDs) self.assertIn(actB.UID, actA.UIDs) self.assertIn(actA.UID, actB.UIDs) self.assertIn(actB.UID, actB.UIDs) # we need to fake up the service records to avoid having to call the actual sync method where these values are normally preset recA = TestTools.create_mock_svc_record(svcA) recB = TestTools.create_mock_svc_record(svcB) recA.SynchronizedActivities = [actA.UID] recB.SynchronizedActivities = [actB.UID] recipientServicesA = Sync._determineRecipientServices( actA, [recA, recB]) recipientServicesB = Sync._determineRecipientServices( actB, [recA, recB]) self.assertEqual(len(recipientServicesA), 0) self.assertEqual(len(recipientServicesB), 0) self.assertEqual(len(activities), 1)
def test_svc_level_dupe_time_leeway(self): ''' check that service-level duplicate activities within the defined time leeway are caught ''' svcA, svcB = TestTools.create_mock_services() recA = TestTools.create_mock_svc_record(svcA) recB = TestTools.create_mock_svc_record(svcB) actA = Activity() actA.StartTime = datetime(1, 2, 3, 4, 5, 6, 7) actA.ServiceDataCollection = TestTools.create_mock_servicedatacollection(svcA, record=recA) actA.Type = set(svcA.SupportedActivities).intersection(set(svcB.SupportedActivities)).pop() actB = Activity() actB.StartTime = datetime(1, 2, 3, 4, 6, 6, 7) actB.ServiceDataCollection = TestTools.create_mock_servicedatacollection(svcB, record=recB) actB.Type = actA.Type actA.CalculateUID() actB.CalculateUID() activities = [] Sync._accumulateActivities(recA, [actA], activities) Sync._accumulateActivities(recB, [actB], activities) self.assertIn(actA.UID, actA.UIDs) self.assertIn(actB.UID, actA.UIDs) self.assertIn(actA.UID, actB.UIDs) self.assertIn(actB.UID, actB.UIDs) # we need to fake up the service records to avoid having to call the actual sync method where these values are normally preset recA = TestTools.create_mock_svc_record(svcA) recB = TestTools.create_mock_svc_record(svcB) recA.SynchronizedActivities = [actA.UID] recB.SynchronizedActivities = [actB.UID] recipientServicesA = Sync._determineRecipientServices(actA, [recA, recB]) recipientServicesB = Sync._determineRecipientServices(actB, [recA, recB]) self.assertEqual(len(recipientServicesA), 0) self.assertEqual(len(recipientServicesB), 0) self.assertEqual(len(activities), 1)
def test_accumulate_exclusions(self): svcA, svcB = TestTools.create_mock_services() recA = TestTools.create_mock_svc_record(svcA) exclusionstore = {recA._id: {}} # regular exc = APIExcludeActivity("Messag!e", activityId=3.14) Sync._accumulateExclusions(recA, exc, exclusionstore) self.assertTrue("3_14" in exclusionstore[recA._id]) self.assertEqual(exclusionstore[recA._id]["3_14"]["Message"], "Messag!e") self.assertEqual(exclusionstore[recA._id]["3_14"]["Activity"], None) self.assertEqual(exclusionstore[recA._id]["3_14"]["ExternalActivityID"], 3.14) self.assertEqual(exclusionstore[recA._id]["3_14"]["Permanent"], True) # updating act = TestTools.create_blank_activity(svcA) act.UID = "3_14" # meh exc = APIExcludeActivity("Messag!e2", activityId=42, permanent=False, activity=act) Sync._accumulateExclusions(recA, exc, exclusionstore) self.assertTrue("3_14" in exclusionstore[recA._id]) self.assertEqual(exclusionstore[recA._id]["3_14"]["Message"], "Messag!e2") self.assertNotEqual(exclusionstore[recA._id]["3_14"]["Activity"], None) # Who knows what the string format will be down the road? self.assertEqual(exclusionstore[recA._id]["3_14"]["ExternalActivityID"], 42) self.assertEqual(exclusionstore[recA._id]["3_14"]["Permanent"], False) # multiple, retaining existing exc2 = APIExcludeActivity("INM", activityId=13) exc3 = APIExcludeActivity("FNIM", activityId=37) Sync._accumulateExclusions(recA, [exc2, exc3], exclusionstore) self.assertTrue("3_14" in exclusionstore[recA._id]) self.assertTrue("37" in exclusionstore[recA._id]) self.assertTrue("13" in exclusionstore[recA._id]) # don't allow with no identifiers exc4 = APIExcludeActivity("nooooo") self.assertRaises(ValueError, Sync._accumulateExclusions, recA, [exc4], exclusionstore)
if isinstance(settings.HTTP_SOURCE_ADDR, list): settings.HTTP_SOURCE_ADDR = settings.HTTP_SOURCE_ADDR[ settings.WORKER_INDEX % len(settings.HTTP_SOURCE_ADDR)] patch_requests_source_address((settings.HTTP_SOURCE_ADDR, 0)) print(" -> Index %s\n -> Interface %s" % (settings.WORKER_INDEX, settings.HTTP_SOURCE_ADDR)) # We defer including the main body of the application till here so the settings aren't captured before we've set them up. # The better way would be to defer initializing services until they're requested, but it's 10:30 and this will work just as well. from tapiriik.sync import Sync while Run: cycleStart = datetime.utcnow( ) # Avoid having synchronization fall down during DST setback processed_user_count = Sync.PerformGlobalSync( heartbeat_callback=sync_heartbeat, version=WorkerVersion) RecycleInterval -= processed_user_count # When there's no queue, all the workers sit sending 1000s of the queries to the database server if processed_user_count == 0: # Put this before the recycle shutdown, otherwise it'll quit and get rebooted ASAP remaining_cycle_time = NoQueueMinCycleTime - (datetime.utcnow() - cycleStart) if remaining_cycle_time > timedelta(0): print("Pausing for %ss" % remaining_cycle_time.total_seconds()) sync_heartbeat("idle-spin") time.sleep(remaining_cycle_time.total_seconds()) if RecycleInterval <= 0: break sync_heartbeat("idle") print("Sync worker shutting down cleanly")
def ConfigurationUpdating(self, svcRec, newConfig, oldConfig): from tapiriik.sync import Sync from tapiriik.auth import User if newConfig["SyncRoot"] != oldConfig["SyncRoot"]: Sync.ScheduleImmediateSync(User.AuthByService(svcRec), True) cachedb.dropbox_cache.update({"ExternalID": svcRec.ExternalID}, {"$unset": {"Structure": None}})
def ConnectService(user, serviceRecord): from tapiriik.services import Service, UserExceptionType existingUser = db.users.find_one({ "_id": { '$ne': ObjectId(user["_id"]) }, "ConnectedServices.ID": ObjectId(serviceRecord._id) }) if "ConnectedServices" not in user: user["ConnectedServices"] = [] delta = False if existingUser is not None: # merge merge merge user["ConnectedServices"] += existingUser["ConnectedServices"] if "Payments" in existingUser: if "Payments" not in user: user["Payments"] = [] user["Payments"] += existingUser["Payments"] if "FlowExceptions" in existingUser: if "FlowExceptions" not in user: user["FlowExceptions"] = [] user["FlowExceptions"] += existingUser["FlowExceptions"] user["Email"] = user["Email"] if "Email" in user and user[ "Email"] is not None else ( existingUser["Email"] if "Email" in existingUser else None) user["NonblockingSyncErrorCount"] = ( user["NonblockingSyncErrorCount"] if "NonblockingSyncErrorCount" in user and user["NonblockingSyncErrorCount"] is not None else 0 ) + (existingUser["NonblockingSyncErrorCount"] if "NonblockingSyncErrorCount" in existingUser and existingUser["NonblockingSyncErrorCount"] is not None else 0) user["BlockingSyncErrorCount"] = ( user["BlockingSyncErrorCount"] if "BlockingSyncErrorCount" in user and user["BlockingSyncErrorCount"] is not None else 0 ) + (existingUser["BlockingSyncErrorCount"] if "BlockingSyncErrorCount" in existingUser and existingUser["BlockingSyncErrorCount"] is not None else 0) user["SyncExclusionCount"] = ( user["SyncExclusionCount"] if "SyncExclusionCount" in user and user["SyncExclusionCount"] is not None else 0) + ( existingUser["SyncExclusionCount"] if "SyncExclusionCount" in existingUser and existingUser["SyncExclusionCount"] is not None else 0) user[ "Created"] = user["Created"] if user["Created"] < existingUser[ "Created"] else existingUser["Created"] if "AncestorAccounts" not in user: user["AncestorAccounts"] = [] user["AncestorAccounts"] += existingUser["AncestorAccounts"] if "AncestorAccounts" in existingUser else [] user["AncestorAccounts"] += [existingUser["_id"]] user["Timezone"] = user["Timezone"] if user[ "Timezone"] else existingUser["Timezone"] delta = True db.users.remove({"_id": existingUser["_id"]}) else: if serviceRecord._id not in [ x["ID"] for x in user["ConnectedServices"] ]: # we might be connecting a second account for the same service for duplicateConn in [ x for x in user["ConnectedServices"] if x["Service"] == serviceRecord.Service.ID ]: dupeRecord = User.GetConnectionRecord( user, serviceRecord.Service.ID ) # this'll just pick the first connection of type, but we repeat the right # of times anyways Service.DeleteServiceRecord(dupeRecord) # We used to call DisconnectService() here, but the results of that call were getting overwritten, which was unfortunate. user["ConnectedServices"] = [ x for x in user["ConnectedServices"] if x["Service"] != serviceRecord.Service.ID ] user["ConnectedServices"].append({ "Service": serviceRecord.Service.ID, "ID": serviceRecord._id }) delta = True db.users.update({"_id": user["_id"]}, user) if delta or ( hasattr(serviceRecord, "SyncErrors") and len(serviceRecord.SyncErrors) > 0 ): # also schedule an immediate sync if there is an outstanding error (i.e. user reconnected) db.connections.update( {"_id": serviceRecord._id}, { "$pull": { "SyncErrors": { "UserException.Type": UserExceptionType.Authorization } } } ) # Pull all auth-related errors from the service so they don't continue to see them while the sync completes. Sync.SetNextSyncIsExhaustive( user, True ) # exhaustive, so it'll pick up activities from newly added services / ones lost during an error if hasattr(serviceRecord, "SyncErrors") and len(serviceRecord.SyncErrors) > 0: Sync.ScheduleImmediateSync(user)
patch_requests_with_default_timeout(timeout=60) if isinstance(settings.HTTP_SOURCE_ADDR, list): settings.HTTP_SOURCE_ADDR = settings.HTTP_SOURCE_ADDR[ settings.WORKER_INDEX % len(settings.HTTP_SOURCE_ADDR)] patch_requests_source_address((settings.HTTP_SOURCE_ADDR, 0)) print(" %d -> Index %s\n -> Interface %s" % (os.getpid(), settings.WORKER_INDEX, settings.HTTP_SOURCE_ADDR)) # We defer including the main body of the application till here so the settings aren't captured before we've set them up. # The better way would be to defer initializing services until they're requested, but it's 10:30 and this will work just as well. from tapiriik.sync import Sync Sync.InitializeWorkerBindings() sync_heartbeat("ready") worker_message("ready") Sync.PerformGlobalSync(heartbeat_callback=sync_heartbeat, version=WorkerVersion, max_users=RecycleInterval) worker_message("shutting down cleanly") db.sync_workers.remove({"_id": heartbeat_rec_id}) close_connections() worker_message("shut down") sys.stdout.flush()
def ConnectService(user, serviceRecord): from tapiriik.services import Service, UserExceptionType existingUser = db.users.find_one({ "_id": { '$ne': ObjectId(user["_id"]) }, "ConnectedServices.ID": ObjectId(serviceRecord._id) }) if "ConnectedServices" not in user: user["ConnectedServices"] = [] delta = False if existingUser is not None: # merge merge merge # Don't let the user end up with two services of the same type, ever # It's not fully supported, plus it's caused all sorts of trauma in the past. # Note that this will discard the new serviceRecord connection if an existing one exists on the other account # ...which isn't the end of the world, compared to screwing around asking the user which they wanted to keep. for to_merge_service in existingUser["ConnectedServices"]: if len([ x for x in user["ConnectedServices"] if x["Service"] == to_merge_service["Service"] ]) == 0: user["ConnectedServices"].append(to_merge_service) # There's got to be some 1-liner to do this merge if "Payments" in existingUser: if "Payments" not in user: user["Payments"] = [] user["Payments"] += existingUser["Payments"] if "Promos" in existingUser: if "Promos" not in user: user["Promos"] = [] user["Promos"] += existingUser["Promos"] if "ExternalPayments" in existingUser: if "ExternalPayments" not in user: user["ExternalPayments"] = [] user["ExternalPayments"] += existingUser["ExternalPayments"] if "FlowExceptions" in existingUser: if "FlowExceptions" not in user: user["FlowExceptions"] = [] user["FlowExceptions"] += existingUser["FlowExceptions"] user["Email"] = user["Email"] if "Email" in user and user[ "Email"] is not None else ( existingUser["Email"] if "Email" in existingUser else None) user["NonblockingSyncErrorCount"] = ( user["NonblockingSyncErrorCount"] if "NonblockingSyncErrorCount" in user and user["NonblockingSyncErrorCount"] is not None else 0 ) + (existingUser["NonblockingSyncErrorCount"] if "NonblockingSyncErrorCount" in existingUser and existingUser["NonblockingSyncErrorCount"] is not None else 0) user["BlockingSyncErrorCount"] = ( user["BlockingSyncErrorCount"] if "BlockingSyncErrorCount" in user and user["BlockingSyncErrorCount"] is not None else 0 ) + (existingUser["BlockingSyncErrorCount"] if "BlockingSyncErrorCount" in existingUser and existingUser["BlockingSyncErrorCount"] is not None else 0) user["SyncExclusionCount"] = ( user["SyncExclusionCount"] if "SyncExclusionCount" in user and user["SyncExclusionCount"] is not None else 0) + ( existingUser["SyncExclusionCount"] if "SyncExclusionCount" in existingUser and existingUser["SyncExclusionCount"] is not None else 0) user[ "Created"] = user["Created"] if user["Created"] < existingUser[ "Created"] else existingUser["Created"] if "AncestorAccounts" not in user: user["AncestorAccounts"] = [] user["AncestorAccounts"] += existingUser["AncestorAccounts"] if "AncestorAccounts" in existingUser else [] user["AncestorAccounts"] += [existingUser["_id"]] user["Timezone"] = user["Timezone"] if "Timezone" in user and user[ "Timezone"] else (existingUser["Timezone"] if "Timezone" in existingUser else None) user["CreationIP"] = user[ "CreationIP"] if "CreationIP" in user and user[ "CreationIP"] else (existingUser["CreationIP"] if "CreationIP" in existingUser else None) existing_config = existingUser[ "Config"] if "Config" in existingUser else {} existing_config.update(user["Config"] if "Config" in user else {}) user["Config"] = existing_config delta = True db.users.remove({"_id": existingUser["_id"]}) else: if serviceRecord._id not in [ x["ID"] for x in user["ConnectedServices"] ]: # we might be connecting a second account for the same service for duplicateConn in [ x for x in user["ConnectedServices"] if x["Service"] == serviceRecord.Service.ID ]: dupeRecord = User.GetConnectionRecord( user, serviceRecord.Service.ID ) # this'll just pick the first connection of type, but we repeat the right # of times anyways Service.DeleteServiceRecord(dupeRecord) # We used to call DisconnectService() here, but the results of that call were getting overwritten, which was unfortunate. user["ConnectedServices"] = [ x for x in user["ConnectedServices"] if x["Service"] != serviceRecord.Service.ID ] user["ConnectedServices"].append({ "Service": serviceRecord.Service.ID, "ID": serviceRecord._id }) delta = True db.users.update({"_id": user["_id"]}, user) if delta or ( hasattr(serviceRecord, "SyncErrors") and len(serviceRecord.SyncErrors) > 0 ): # also schedule an immediate sync if there is an outstanding error (i.e. user reconnected) db.connections.update( {"_id": serviceRecord._id}, { "$pull": { "SyncErrors": { "UserException.Type": UserExceptionType.Authorization } } } ) # Pull all auth-related errors from the service so they don't continue to see them while the sync completes. db.connections.update( {"_id": serviceRecord._id}, { "$pull": { "SyncErrors": { "UserException.Type": UserExceptionType.RenewPassword } } } ) # Pull all auth-related errors from the service so they don't continue to see them while the sync completes. Sync.SetNextSyncIsExhaustive( user, True ) # exhaustive, so it'll pick up activities from newly added services / ones lost during an error if hasattr(serviceRecord, "SyncErrors") and len(serviceRecord.SyncErrors) > 0: Sync.ScheduleImmediateSync(user)
}, upsert=True, return_document=ReturnDocument.AFTER) heartbeat_rec_id = heartbeat_rec["_id"] patch_requests_with_default_timeout(timeout=60) if isinstance(settings.HTTP_SOURCE_ADDR, list): settings.HTTP_SOURCE_ADDR = settings.HTTP_SOURCE_ADDR[ settings.WORKER_INDEX % len(settings.HTTP_SOURCE_ADDR)] patch_requests_source_address((settings.HTTP_SOURCE_ADDR, 0)) # We defer including the main body of the application till here so the settings aren't captured before we've set them up. # The better way would be to defer initializing services until they're requested, but it's 10:30 and this will work just as well. from tapiriik.sync import Sync sync_heartbeat("ready") worker_message("ready") Sync = Sync() Sync.PerformGlobalSync(heartbeat_callback=sync_heartbeat, version=WorkerVersion) worker_message("shutting down cleanly") db.sync_workers.delete_one({"_id": heartbeat_rec_id}) close_connections() worker_message("shut down") logging.info("-----[ ENDING SYNC_WORKER ]-----") sys.stdout.flush()
from django.shortcuts import render, redirect from django.http import HttpResponse from tapiriik.settings import DIAG_AUTH_TOTP_SECRET, DIAG_AUTH_PASSWORD, SITE_VER from tapiriik.database import db from tapiriik.sync import Sync from tapiriik.auth import TOTP, DiagnosticsUser, User from bson.objectid import ObjectId import hashlib import json import urllib.parse from datetime import datetime, timedelta Sync = Sync() def diag_requireAuth(view): def authWrapper(req, *args, **kwargs): if not DiagnosticsUser.IsAuthenticated(req): return redirect("diagnostics_login") return view(req, *args, **kwargs) return authWrapper @diag_requireAuth def diag_dashboard(req): return redirect("diagnostics_queue_dashboard") @diag_requireAuth def diag_queue_dashboard(req):
def test_activity_coalesce(self): ''' ensure that activity data is getting coalesced by _accumulateActivities ''' svcA, svcB = TestTools.create_mock_services() recA = TestTools.create_mock_svc_record(svcA) recB = TestTools.create_mock_svc_record(svcB) actA = TestTools.create_random_activity(svcA, tz=pytz.timezone("America/Iqaluit")) actB = Activity() actB.StartTime = actA.StartTime.replace(tzinfo=None) actB.ServiceDataCollection = TestTools.create_mock_servicedatacollection(svcB) actA.Name = "Not this" actA.Private = True actB.Name = "Heya" actB.Type = ActivityType.Walking actA.CalculateUID() actB.CalculateUID() activities = [] Sync._accumulateActivities(recB, [copy.deepcopy(actB)], activities) Sync._accumulateActivities(recA, [copy.deepcopy(actA)], activities) self.assertEqual(len(activities), 1) act = activities[0] self.assertEqual(act.StartTime, actA.StartTime) self.assertEqual(act.EndTime, actA.EndTime) self.assertEqual(act.EndTime.tzinfo, actA.StartTime.tzinfo) self.assertEqual(act.StartTime.tzinfo, actA.StartTime.tzinfo) self.assertEqual(act.Waypoints, actA.Waypoints) self.assertTrue(act.Private) # Most restrictive setting self.assertEqual(act.Name, actB.Name) # The first activity takes priority. self.assertEqual(act.Type, actB.Type) # Same here. self.assertTrue(list(actB.ServiceDataCollection.keys())[0] in act.ServiceDataCollection) self.assertTrue(list(actA.ServiceDataCollection.keys())[0] in act.ServiceDataCollection) activities = [] Sync._accumulateActivities(recA, [copy.deepcopy(actA)], activities) Sync._accumulateActivities(recB, [copy.deepcopy(actB)], activities) self.assertEqual(len(activities), 1) act = activities[0] self.assertEqual(act.StartTime, actA.StartTime) self.assertEqual(act.EndTime, actA.EndTime) self.assertEqual(act.EndTime.tzinfo, actA.StartTime.tzinfo) self.assertEqual(act.StartTime.tzinfo, actA.StartTime.tzinfo) self.assertEqual(act.Waypoints, actA.Waypoints) self.assertEqual(act.Name, actA.Name) # The first activity takes priority. self.assertEqual(act.Type, actB.Type) # Exception: ActivityType.Other does not take priority self.assertTrue(list(actB.ServiceDataCollection.keys())[0] in act.ServiceDataCollection) self.assertTrue(list(actA.ServiceDataCollection.keys())[0] in act.ServiceDataCollection) actA.Type = ActivityType.CrossCountrySkiing activities = [] Sync._accumulateActivities(recA, [copy.deepcopy(actA)], activities) Sync._accumulateActivities(recB, [copy.deepcopy(actB)], activities) self.assertEqual(len(activities), 1) act = activities[0] self.assertEqual(act.Type, actA.Type) # Here, it will take priority.
def test_activity_deduplicate_tzerror(self): ''' Test that probably-duplicate activities with starttimes like 09:12:22 and 15:12:22 (on the same day) are recognized as one ''' svcA, svcB = TestTools.create_mock_services() actA = TestTools.create_random_activity(svcA, tz=pytz.timezone("America/Iqaluit")) actB = Activity() actB.StartTime = actA.StartTime.replace(tzinfo=pytz.timezone("America/Denver")) + timedelta(hours=5) actB.UploadedTo = [TestTools.create_mock_upload_record(svcB)] actA.Name = "Not this" actB.Name = "Heya" actB.Type = ActivityType.Walking actA.CalculateUID() actB.CalculateUID() activities = [] Sync._accumulateActivities(Service.FromID("mockB"), [copy.deepcopy(actB)], activities) Sync._accumulateActivities(Service.FromID("mockA"), [copy.deepcopy(actA)], activities) self.assertEqual(len(activities), 1) # Ensure that it is an exact match actB.StartTime = actA.StartTime.replace(tzinfo=pytz.timezone("America/Denver")) + timedelta(hours=5, seconds=1) activities = [] Sync._accumulateActivities(Service.FromID("mockB"), [copy.deepcopy(actB)], activities) Sync._accumulateActivities(Service.FromID("mockA"), [copy.deepcopy(actA)], activities) self.assertEqual(len(activities), 2) # Ensure that overly large differences >14hr - not possible via TZ differences - are not deduplicated actB.StartTime = actA.StartTime.replace(tzinfo=pytz.timezone("America/Denver")) + timedelta(hours=15) activities = [] Sync._accumulateActivities(Service.FromID("mockB"), [copy.deepcopy(actB)], activities) Sync._accumulateActivities(Service.FromID("mockA"), [copy.deepcopy(actA)], activities) self.assertEqual(len(activities), 2)
def test_activity_deduplicate_tzerror(self): ''' Test that probably-duplicate activities with starttimes like 09:12:22 and 15:12:22 (on the same day) are recognized as one ''' svcA, svcB = TestTools.create_mock_services() recA = TestTools.create_mock_svc_record(svcA) recB = TestTools.create_mock_svc_record(svcB) actA = TestTools.create_random_activity(svcA, tz=pytz.timezone("America/Iqaluit")) actB = Activity() actB.StartTime = actA.StartTime.replace(tzinfo=pytz.timezone("America/Denver")) + timedelta(hours=5) actB.ServiceDataCollection = TestTools.create_mock_servicedatacollection(svcB) actA.Name = "Not this" actB.Name = "Heya" actB.Type = ActivityType.Walking actA.CalculateUID() actB.CalculateUID() activities = [] Sync._accumulateActivities(recB, [copy.deepcopy(actB)], activities) Sync._accumulateActivities(recA, [copy.deepcopy(actA)], activities) self.assertEqual(len(activities), 1) # Ensure that it is an exact match actB.StartTime = actA.StartTime.replace(tzinfo=pytz.timezone("America/Denver")) + timedelta(hours=5, seconds=1) activities = [] Sync._accumulateActivities(recB, [copy.deepcopy(actB)], activities) Sync._accumulateActivities(recA, [copy.deepcopy(actA)], activities) self.assertEqual(len(activities), 2) # Ensure that overly large differences >38hr - not possible via TZ differences & shamefully bad import/export code on the part of some services - are not deduplicated actB.StartTime = actA.StartTime.replace(tzinfo=pytz.timezone("America/Denver")) + timedelta(hours=50) activities = [] Sync._accumulateActivities(recB, [copy.deepcopy(actB)], activities) Sync._accumulateActivities(recA, [copy.deepcopy(actA)], activities) self.assertEqual(len(activities), 2)
def test_activity_coalesce(self): ''' ensure that activity data is getting coalesced by _accumulateActivities ''' svcA, svcB = TestTools.create_mock_services() actA = TestTools.create_random_activity(svcA, tz=pytz.timezone("America/Iqaluit")) actB = Activity() actB.StartTime = actA.StartTime.replace(tzinfo=None) actB.UploadedTo = [TestTools.create_mock_upload_record(svcB)] actA.Name = "Not this" actB.Name = "Heya" actB.Type = ActivityType.Walking actA.CalculateUID() actB.CalculateUID() activities = [] Sync._accumulateActivities(Service.FromID("mockB"), [copy.deepcopy(actB)], activities) Sync._accumulateActivities(Service.FromID("mockA"), [copy.deepcopy(actA)], activities) self.assertEqual(len(activities), 1) act = activities[0] self.assertEqual(act.StartTime, actA.StartTime) self.assertEqual(act.EndTime, actA.EndTime) self.assertEqual(act.EndTime.tzinfo, actA.StartTime.tzinfo) self.assertEqual(act.StartTime.tzinfo, actA.StartTime.tzinfo) self.assertEqual(act.Waypoints, actA.Waypoints) self.assertEqual(act.Name, actB.Name) # The first activity takes priority. self.assertEqual(act.Type, actB.Type) # Same here. self.assertTrue(actB.UploadedTo[0] in act.UploadedTo) self.assertTrue(actA.UploadedTo[0] in act.UploadedTo) activities = [] Sync._accumulateActivities(Service.FromID("mockA"), [copy.deepcopy(actA)], activities) Sync._accumulateActivities(Service.FromID("mockB"), [copy.deepcopy(actB)], activities) self.assertEqual(len(activities), 1) act = activities[0] self.assertEqual(act.StartTime, actA.StartTime) self.assertEqual(act.EndTime, actA.EndTime) self.assertEqual(act.EndTime.tzinfo, actA.StartTime.tzinfo) self.assertEqual(act.StartTime.tzinfo, actA.StartTime.tzinfo) self.assertEqual(act.Waypoints, actA.Waypoints) self.assertEqual(act.Name, actA.Name) # The first activity takes priority. self.assertEqual(act.Type, actB.Type) # Exception: ActivityType.Other does not take priority self.assertTrue(actB.UploadedTo[0] in act.UploadedTo) self.assertTrue(actA.UploadedTo[0] in act.UploadedTo) actA.Type = ActivityType.CrossCountrySkiing activities = [] Sync._accumulateActivities(Service.FromID("mockA"), [copy.deepcopy(actA)], activities) Sync._accumulateActivities(Service.FromID("mockB"), [copy.deepcopy(actB)], activities) self.assertEqual(len(activities), 1) act = activities[0] self.assertEqual(act.Type, actA.Type) # Here, it will take priority.
class User: ConfigurationDefaults = { "suppress_auto_sync": False, "sync_upload_delay": 0, "sync_skip_before": None, "historical_sync": False } Sync = Sync() def Get(id): return db.users.find_one({"_id": ObjectId(id)}) def GetByConnection(svcRec): return db.users.find_one({"ConnectedServices.ID": svcRec._id}) def Ensure(req): from ipware.ip import get_real_ip if req.user == None: req.user = User.Create(creationIP=get_real_ip(req)) User.Login(req.user, req) return req.user def Login(user, req): req.session["userid"] = str(user["_id"]) req.user = user def Logout(req): del req.session["userid"] del req.user def Create(creationIP=None): uid = db.users.insert({"Created": datetime.utcnow(), "CreationIP": creationIP}) # will mongodb insert an almost empty doc, i.e. _id? return db.users.with_options(read_preference=ReadPreference.PRIMARY).find_one({"_id": uid}) def GetConnectionRecordsByUser(user): return [ServiceRecord(x) for x in db.connections.find({"_id": {"$in": [x["ID"] for x in user["ConnectedServices"]]}})] def GetConnectionRecord(user, svcId): rec = db.connections.find_one({"_id": {"$in": [x["ID"] for x in user["ConnectedServices"] if x["Service"] == svcId]}}) return ServiceRecord(rec) if rec else None def SetEmail(user, email): db.users.update({"_id": ObjectId(user["_id"])}, {"$set": {"Email": email}}) def SetTimezone(user, tz): db.users.update({"_id": ObjectId(user["_id"])}, {"$set": {"Timezone": tz}}) def _assocPaymentLikeObject(user, collection, payment_like_object, schedule_now, skip_deassoc=False): # Since I seem to have taken this duck-typing quite far # First, deassociate payment ids from other accounts that may be using them if "_id" in payment_like_object and not skip_deassoc: db.users.update({}, {"$pull": {collection: {"_id": payment_like_object["_id"]}}}, multi=True) # Then, attach to us db.users.update({"_id": ObjectId(user["_id"])}, {"$addToSet": {collection: payment_like_object}}) if schedule_now: Sync.ScheduleImmediateSync(user) def AssociatePayment(user, payment, schedule_now=True): User._assocPaymentLikeObject(user, "Payments", payment, schedule_now) def AssociateExternalPayment(user, external_payment, schedule_now=False, skip_deassoc=False): User._assocPaymentLikeObject(user, "ExternalPayments", external_payment, schedule_now, skip_deassoc) def AssociatePromo(user, promo, schedule_now=True): User._assocPaymentLikeObject(user, "Promos", promo, schedule_now) def HasActivePayment(user): # Payments and Promos share the essential data field - Expiry # We don't really care if the payment has yet to take place yet - why would it be in the system then? # (Timestamp too, but the fact we rely on it here is only for backwards compatability with some old payment records) payment_like_objects = (user["Payments"] if "Payments" in user else []) + (user["Promos"] if "Promos" in user else []) + (user["ExternalPayments"] if "ExternalPayments" in user else []) for payment in payment_like_objects: if "Expiry" in payment: if payment["Expiry"] == None or payment["Expiry"] > datetime.utcnow(): return True else: if payment["Timestamp"] > (datetime.utcnow() - timedelta(days=365.25)): return True return False def PaidUserMongoQuery(): # Don't need the no-expiry case here, those payments have all expired by now return { "$or": [ {"Payments.Expiry": {"$gt": datetime.utcnow()}}, {"Promos.Expiry": {"$gt": datetime.utcnow()}}, {"Promos.Expiry": {"$type": 10, "$exists": True}} # === null ] } def IsServiceConnected(user, service_id): return service_id in [x["Service"] for x in user["ConnectedServices"]] def ConnectService(user, serviceRecord): from tapiriik.services import Service, UserExceptionType existingUser = db.users.find_one({"_id": {'$ne': ObjectId(user["_id"])}, "ConnectedServices.ID": ObjectId(serviceRecord._id)}) if "ConnectedServices" not in user: user["ConnectedServices"] = [] delta = False if existingUser is not None: # merge merge merge # Don't let the user end up with two services of the same type, ever # It's not fully supported, plus it's caused all sorts of trauma in the past. # Note that this will discard the new serviceRecord connection if an existing one exists on the other account # ...which isn't the end of the world, compared to screwing around asking the user which they wanted to keep. for to_merge_service in existingUser["ConnectedServices"]: if len([x for x in user["ConnectedServices"] if x["Service"] == to_merge_service["Service"]]) == 0: user["ConnectedServices"].append(to_merge_service) # There's got to be some 1-liner to do this merge if "Payments" in existingUser: if "Payments" not in user: user["Payments"] = [] user["Payments"] += existingUser["Payments"] if "Promos" in existingUser: if "Promos" not in user: user["Promos"] = [] user["Promos"] += existingUser["Promos"] if "ExternalPayments" in existingUser: if "ExternalPayments" not in user: user["ExternalPayments"] = [] user["ExternalPayments"] += existingUser["ExternalPayments"] if "FlowExceptions" in existingUser: if "FlowExceptions" not in user: user["FlowExceptions"] = [] user["FlowExceptions"] += existingUser["FlowExceptions"] user["Email"] = user["Email"] if "Email" in user and user["Email"] is not None else (existingUser["Email"] if "Email" in existingUser else None) user["NonblockingSyncErrorCount"] = (user["NonblockingSyncErrorCount"] if "NonblockingSyncErrorCount" in user and user["NonblockingSyncErrorCount"] is not None else 0) + (existingUser["NonblockingSyncErrorCount"] if "NonblockingSyncErrorCount" in existingUser and existingUser["NonblockingSyncErrorCount"] is not None else 0) user["BlockingSyncErrorCount"] = (user["BlockingSyncErrorCount"] if "BlockingSyncErrorCount" in user and user["BlockingSyncErrorCount"] is not None else 0) + (existingUser["BlockingSyncErrorCount"] if "BlockingSyncErrorCount" in existingUser and existingUser["BlockingSyncErrorCount"] is not None else 0) user["SyncExclusionCount"] = (user["SyncExclusionCount"] if "SyncExclusionCount" in user and user["SyncExclusionCount"] is not None else 0) + (existingUser["SyncExclusionCount"] if "SyncExclusionCount" in existingUser and existingUser["SyncExclusionCount"] is not None else 0) user["Created"] = user["Created"] if user["Created"] < existingUser["Created"] else existingUser["Created"] if "AncestorAccounts" not in user: user["AncestorAccounts"] = [] user["AncestorAccounts"] += existingUser["AncestorAccounts"] if "AncestorAccounts" in existingUser else [] user["AncestorAccounts"] += [existingUser["_id"]] user["Timezone"] = user["Timezone"] if "Timezone" in user and user["Timezone"] else (existingUser["Timezone"] if "Timezone" in existingUser else None) user["CreationIP"] = user["CreationIP"] if "CreationIP" in user and user["CreationIP"] else (existingUser["CreationIP"] if "CreationIP" in existingUser else None) existing_config = existingUser["Config"] if "Config" in existingUser else {} existing_config.update(user["Config"] if "Config" in user else {}) user["Config"] = existing_config delta = True db.users.remove({"_id": existingUser["_id"]}) else: if serviceRecord._id not in [x["ID"] for x in user["ConnectedServices"]]: # we might be connecting a second account for the same service for duplicateConn in [x for x in user["ConnectedServices"] if x["Service"] == serviceRecord.Service.ID]: dupeRecord = User.GetConnectionRecord(user, serviceRecord.Service.ID) # this'll just pick the first connection of type, but we repeat the right # of times anyways Service.DeleteServiceRecord(dupeRecord) # We used to call DisconnectService() here, but the results of that call were getting overwritten, which was unfortunate. user["ConnectedServices"] = [x for x in user["ConnectedServices"] if x["Service"] != serviceRecord.Service.ID] user["ConnectedServices"].append({"Service": serviceRecord.Service.ID, "ID": serviceRecord._id}) delta = True db.users.update({"_id": user["_id"]}, user) if delta or (hasattr(serviceRecord, "SyncErrors") and len(serviceRecord.SyncErrors) > 0): # also schedule an immediate sync if there is an outstanding error (i.e. user reconnected) db.connections.update({"_id": serviceRecord._id}, {"$pull": {"SyncErrors": {"UserException.Type": UserExceptionType.Authorization}}}) # Pull all auth-related errors from the service so they don't continue to see them while the sync completes. db.connections.update({"_id": serviceRecord._id}, {"$pull": {"SyncErrors": {"UserException.Type": UserExceptionType.RenewPassword}}}) # Pull all auth-related errors from the service so they don't continue to see them while the sync completes. Sync.SetNextSyncIsExhaustive(user, True) # exhaustive, so it'll pick up activities from newly added services / ones lost during an error if hasattr(serviceRecord, "SyncErrors") and len(serviceRecord.SyncErrors) > 0: Sync.ScheduleImmediateSync(user) def DisconnectService(serviceRecord, preserveUser=False): # not that >1 user should have this connection activeUsers = list(db.users.find({"ConnectedServices.ID": serviceRecord._id})) if len(activeUsers) == 0: raise Exception("No users found with service " + serviceRecord._id) db.users.update({}, {"$pull": {"ConnectedServices": {"ID": serviceRecord._id}}}, multi=True) if not preserveUser: for user in activeUsers: if len(user["ConnectedServices"]) - 1 == 0: # I guess we're done here? db.activity_records.remove({"UserID": user["_id"]}) db.users.remove({"_id": user["_id"]}) def AuthByService(serviceRecord): return db.users.find_one({"ConnectedServices.ID": serviceRecord._id}) def SetFlowException(user, sourceServiceRecord, targetServiceRecord, flowToTarget=True, flowToSource=True): if "FlowExceptions" not in user: user["FlowExceptions"] = [] # flow exceptions are stored in "forward" direction - service-account X will not send activities to service-account Y forwardException = {"Target": {"Service": targetServiceRecord.Service.ID, "ExternalID": targetServiceRecord.ExternalID}, "Source": {"Service": sourceServiceRecord.Service.ID, "ExternalID": sourceServiceRecord.ExternalID}} backwardsException = {"Target": forwardException["Source"], "Source": forwardException["Target"]} if flowToTarget is not None: if flowToTarget: user["FlowExceptions"][:] = [x for x in user["FlowExceptions"] if x != forwardException] elif not flowToTarget and forwardException not in user["FlowExceptions"]: user["FlowExceptions"].append(forwardException) if flowToSource is not None: if flowToSource: user["FlowExceptions"][:] = [x for x in user["FlowExceptions"] if x != backwardsException] elif not flowToSource and backwardsException not in user["FlowExceptions"]: user["FlowExceptions"].append(backwardsException) db.users.update({"_id": user["_id"]}, {"$set": {"FlowExceptions": user["FlowExceptions"]}}) def GetFlowExceptions(user): if "FlowExceptions" not in user: return {} return user["FlowExceptions"] def CheckFlowException(user, sourceServiceRecord, targetServiceRecord): ''' returns true if there is a flow exception blocking activities moving from source to destination ''' forwardException = {"Target": {"Service": targetServiceRecord.Service.ID, "ExternalID": targetServiceRecord.ExternalID}, "Source": {"Service": sourceServiceRecord.Service.ID, "ExternalID": sourceServiceRecord.ExternalID}} return "FlowExceptions" in user and forwardException in user["FlowExceptions"] # You may recognize that these functions are shamelessly copy-pasted from service_base.py def GetConfiguration(user): config = copy.deepcopy(User.ConfigurationDefaults) config.update(user["Config"] if "Config" in user else {}) return config def SetConfiguration(user, config, no_save=False, drop_existing=False): sparseConfig = {} if not drop_existing: sparseConfig = copy.deepcopy(User.GetConfiguration(user)) sparseConfig.update(config) keys_to_delete = [] for k, v in sparseConfig.items(): if (k in User.ConfigurationDefaults and User.ConfigurationDefaults[k] == v): keys_to_delete.append(k) # it's the default, we can not store it for k in keys_to_delete: del sparseConfig[k] user["Config"] = sparseConfig if not no_save: db.users.update({"_id": user["_id"]}, {"$set": {"Config": sparseConfig}})
upsert=True) sys.stdout.flush() patch_requests_with_default_timeout(timeout=60) if isinstance(settings.HTTP_SOURCE_ADDR, list): settings.HTTP_SOURCE_ADDR = settings.HTTP_SOURCE_ADDR[ settings.WORKER_INDEX % len(settings.HTTP_SOURCE_ADDR)] patch_requests_source_address((settings.HTTP_SOURCE_ADDR, 0)) print(" -> Index %s\n -> Interface %s" % (settings.WORKER_INDEX, settings.HTTP_SOURCE_ADDR)) # We defer including the main body of the application till here so the settings aren't captured before we've set them up. # The better way would be to defer initializing services until they're requested, but it's 10:30 and this will work just as well. from tapiriik.sync import Sync while Run: cycleStart = datetime.datetime.utcnow() RecycleInterval -= Sync.PerformGlobalSync( heartbeat_callback=sync_heartbeat, version=WorkerVersion) if RecycleInterval <= 0: break if (datetime.datetime.utcnow() - cycleStart).total_seconds() < 1: time.sleep(1) sync_heartbeat("idle") print("Sync worker shutting down cleanly") db.sync_workers.remove({"Process": os.getpid()}) sys.stdout.flush()
oldCwd = os.getcwd() WorkerVersion = subprocess.Popen(["git", "rev-parse", "HEAD"], stdout=subprocess.PIPE, cwd=os.path.dirname(__file__)).communicate()[0].strip() os.chdir(oldCwd) def sync_interrupt(signal, frame): global Run Run = False signal.signal(signal.SIGINT, sync_interrupt) def sync_heartbeat(state): db.sync_workers.update({"Process": os.getpid()}, {"$set": {"Heartbeat": datetime.datetime.utcnow(), "State": state}}) print("Sync worker starting at " + datetime.datetime.now().ctime() + " pid " + str(os.getpid())) db.sync_workers.update({"Process": os.getpid()}, {"Process": os.getpid(), "Heartbeat": datetime.datetime.utcnow(), "Startup": datetime.datetime.utcnow(), "Version": WorkerVersion, "Host": socket.gethostname(), "State": "startup"}, upsert=True) sys.stdout.flush() patch_requests_with_default_timeout(timeout=60) while Run: cycleStart = datetime.datetime.utcnow() Sync.PerformGlobalSync(heartbeat_callback=sync_heartbeat) if (datetime.datetime.utcnow() - cycleStart).total_seconds() < 1: time.sleep(1) sync_heartbeat("idle") print("Sync worker shutting down cleanly") db.sync_workers.remove({"Process": os.getpid()}) sys.stdout.flush()
def diag_user(req, user): try: userRec = db.users.find_one({"_id": ObjectId(user)}) except: userRec = None if not userRec: searchOpts = [{"Payments.Txn": user}, {"Payments.Email": user}] try: searchOpts.append({"AncestorAccounts": ObjectId(user)}) searchOpts.append({"ConnectedServices.ID": ObjectId(user)}) except: pass # Invalid format for ObjectId userRec = db.users.find_one({"$or": searchOpts}) if not userRec: searchOpts = [{"ExternalID": user}] try: searchOpts.append({"ExternalID": int(user)}) except: pass # Not an int svcRec = db.connections.find_one({"$or": searchOpts}) if svcRec: userRec = db.users.find_one( {"ConnectedServices.ID": svcRec["_id"]}) if userRec: return redirect("diagnostics_user", user=userRec["_id"]) if not userRec: return render(req, "diag/error_user_not_found.html") delta = True # Easier to set this to false in the one no-change case. if "sync" in req.POST: Sync.ScheduleImmediateSync(userRec, req.POST["sync"] == "Full") elif "unlock" in req.POST: db.users.update({"_id": ObjectId(user)}, {"$unset": { "SynchronizationWorker": None }}) elif "lock" in req.POST: db.users.update({"_id": ObjectId(user)}, {"$set": { "SynchronizationWorker": 1 }}) elif "requeue" in req.POST: db.users.update({"_id": ObjectId(user)}, {"$unset": { "QueuedAt": None }}) elif "hostrestrict" in req.POST: host = req.POST["host"] if host: db.users.update({"_id": ObjectId(user)}, {"$set": { "SynchronizationHostRestriction": host }}) else: db.users.update( {"_id": ObjectId(user)}, {"$unset": { "SynchronizationHostRestriction": None }}) elif "substitute" in req.POST: req.session["substituteUserid"] = user return redirect("dashboard") elif "svc_setauth" in req.POST and len(req.POST["authdetails"]): db.connections.update( {"_id": ObjectId(req.POST["id"])}, {"$set": { "Authorization": json.loads(req.POST["authdetails"]) }}) elif "svc_setconfig" in req.POST and len(req.POST["config"]): db.connections.update( {"_id": ObjectId(req.POST["id"])}, {"$set": { "Config": json.loads(req.POST["config"]) }}) elif "svc_unlink" in req.POST: from tapiriik.services import Service from tapiriik.auth import User svcRec = Service.GetServiceRecordByID(req.POST["id"]) try: Service.DeleteServiceRecord(svcRec) except: pass try: User.DisconnectService(svcRec) except: pass elif "svc_marksync" in req.POST: db.connections.update( {"_id": ObjectId(req.POST["id"])}, {"$addToSet": { "SynchronizedActivities": req.POST["uid"] }}, multi=False) elif "svc_clearexc" in req.POST: db.connections.update({"_id": ObjectId(req.POST["id"])}, {"$unset": { "ExcludedActivities": 1 }}) elif "svc_clearacts" in req.POST: db.connections.update({"_id": ObjectId(req.POST["id"])}, {"$unset": { "SynchronizedActivities": 1 }}) Sync.SetNextSyncIsExhaustive(userRec, True) elif "svc_toggle_poll_sub" in req.POST: from tapiriik.services import Service svcRec = Service.GetServiceRecordByID(req.POST["id"]) svcRec.SetPartialSyncTriggerSubscriptionState( not svcRec.PartialSyncTriggerSubscribed) elif "svc_toggle_poll_trigger" in req.POST: from tapiriik.services import Service svcRec = Service.GetServiceRecordByID(req.POST["id"]) db.connections.update({"_id": ObjectId(req.POST["id"])}, { "$set": { "TriggerPartialSync": not getattr(svcRec, "TriggerPartialSync", False) } }) elif "svc_tryagain" in req.POST: from tapiriik.services import Service svcRec = Service.GetServiceRecordByID(req.POST["id"]) db.connections.update({"_id": ObjectId(req.POST["id"])}, {"$pull": { "SyncErrors": { "Scope": "activity" } }}) act_recs = db.activity_records.find_one({"UserID": ObjectId(user)}) for act in act_recs["Activities"]: if "FailureCounts" in act and svcRec.Service.ID in act[ "FailureCounts"]: del act["FailureCounts"][svcRec.Service.ID] db.activity_records.save(act_recs) else: delta = False if delta: return redirect("diagnostics_user", user=user) return render(req, "diag/user.html", {"diag_user": userRec})
def test_eligibility_flowexception_change(self): user = TestTools.create_mock_user() svcA, svcB = TestTools.create_mock_services() recA = TestTools.create_mock_svc_record(svcA) recB = TestTools.create_mock_svc_record(svcB) act = TestTools.create_blank_activity(svcA, record=recB) act.Origin = recB recipientServices = [recA] excludedServices = [] User.SetFlowException(user, recA, recB, flowToSource=False, flowToTarget=True) eligible = Sync._determineEligibleRecipientServices( activity=act, recipientServices=recipientServices, excludedServices=excludedServices, user=user) self.assertTrue(recA not in eligible) self.assertTrue(recB not in eligible) recipientServices = [recB] act.Origin = recA act.UploadedTo = [ TestTools.create_mock_upload_record(svcA, record=recA) ] User.SetFlowException(user, recA, recB, flowToSource=True, flowToTarget=False) eligible = Sync._determineEligibleRecipientServices( activity=act, recipientServices=recipientServices, excludedServices=excludedServices, user=user) self.assertTrue(recA not in eligible) self.assertTrue(recB not in eligible) User.SetFlowException(user, recA, recB, flowToSource=False, flowToTarget=False) eligible = Sync._determineEligibleRecipientServices( activity=act, recipientServices=recipientServices, excludedServices=excludedServices, user=user) self.assertTrue(recA not in eligible) self.assertTrue(recB not in eligible) recipientServices = [recA, recB] User.SetFlowException(user, recA, recB, flowToSource=True, flowToTarget=True) eligible = Sync._determineEligibleRecipientServices( activity=act, recipientServices=recipientServices, excludedServices=excludedServices, user=user) self.assertTrue(recA in eligible) self.assertTrue(recB in eligible) eligible = Sync._determineEligibleRecipientServices( activity=act, recipientServices=recipientServices, excludedServices=excludedServices, user=user) self.assertTrue(recA in eligible) self.assertTrue(recB in eligible)
def diag_user(req, user): try: userRec = db.users.find_one({"_id": ObjectId(user)}) except: userRec = None if not userRec: searchOpts = [{"Payments.Txn": user}, {"Payments.Email": user}] try: searchOpts.append({"AncestorAccounts": ObjectId(user)}) searchOpts.append({"ConnectedServices.ID": ObjectId(user)}) except: pass # Invalid format for ObjectId userRec = db.users.find_one({"$or": searchOpts}) if not userRec: searchOpts = [{"ExternalID": user}] try: searchOpts.append({"ExternalID": int(user)}) except: pass # Not an int svcRec = db.connections.find_one({"$or": searchOpts}) if svcRec: userRec = db.users.find_one( {"ConnectedServices.ID": svcRec["_id"]}) if userRec: return redirect("diagnostics_user", user=userRec["_id"]) if not userRec: return render(req, "diag/error_user_not_found.html") delta = True # Easier to set this to false in the one no-change case. if "sync" in req.POST: Sync.ScheduleImmediateSync(userRec, req.POST["sync"] == "Full") elif "unlock" in req.POST: db.users.update({"_id": ObjectId(user)}, {"$unset": { "SynchronizationWorker": None }}) elif "lock" in req.POST: db.users.update({"_id": ObjectId(user)}, {"$set": { "SynchronizationWorker": 1 }}) elif "hostrestrict" in req.POST: host = req.POST["host"] if host: db.users.update({"_id": ObjectId(user)}, {"$set": { "SynchronizationHostRestriction": host }}) else: db.users.update( {"_id": ObjectId(user)}, {"$unset": { "SynchronizationHostRestriction": None }}) elif "substitute" in req.POST: req.session["substituteUserid"] = user return redirect("dashboard") elif "svc_setauth" in req.POST and len(req.POST["authdetails"]): db.connections.update( {"_id": ObjectId(req.POST["id"])}, {"$set": { "Authorization": json.loads(req.POST["authdetails"]) }}) elif "svc_unlink" in req.POST: from tapiriik.services import Service from tapiriik.auth import User svcRec = Service.GetServiceRecordByID(req.POST["id"]) try: Service.DeleteServiceRecord(svcRec) except: pass try: User.DisconnectService(svcRec) except: pass elif "svc_marksync" in req.POST: db.connections.update( {"_id": ObjectId(req.POST["id"])}, {"$addToSet": { "SynchronizedActivities": req.POST["uid"] }}, multi=False) elif "svc_clearexc" in req.POST: db.connections.update({"_id": ObjectId(req.POST["id"])}, {"$unset": { "ExcludedActivities": 1 }}) elif "svc_clearacts" in req.POST: db.connections.update({"_id": ObjectId(req.POST["id"])}, {"$unset": { "SynchronizedActivities": 1 }}) Sync.SetNextSyncIsExhaustive(userRec, True) else: delta = False if delta: return redirect("diagnostics_user", user=user) return render(req, "diag/user.html", {"user": userRec})
def test_activity_coalesce(self): ''' ensure that activity data is getting coalesced by _accumulateActivities ''' svcA, svcB = TestTools.create_mock_services() actA = TestTools.create_random_activity( svcA, tz=pytz.timezone("America/Iqaluit")) actB = Activity() actB.StartTime = actA.StartTime.replace(tzinfo=None) actB.UploadedTo = [TestTools.create_mock_upload_record(svcB)] actA.Name = "Not this" actA.Private = True actB.Name = "Heya" actB.Type = ActivityType.Walking actA.CalculateUID() actB.CalculateUID() activities = [] Sync._accumulateActivities(Service.FromID("mockB"), [copy.deepcopy(actB)], activities) Sync._accumulateActivities(Service.FromID("mockA"), [copy.deepcopy(actA)], activities) self.assertEqual(len(activities), 1) act = activities[0] self.assertEqual(act.StartTime, actA.StartTime) self.assertEqual(act.EndTime, actA.EndTime) self.assertEqual(act.EndTime.tzinfo, actA.StartTime.tzinfo) self.assertEqual(act.StartTime.tzinfo, actA.StartTime.tzinfo) self.assertEqual(act.Waypoints, actA.Waypoints) self.assertTrue(act.Private) # Most restrictive setting self.assertEqual(act.Name, actB.Name) # The first activity takes priority. self.assertEqual(act.Type, actB.Type) # Same here. self.assertTrue(actB.UploadedTo[0] in act.UploadedTo) self.assertTrue(actA.UploadedTo[0] in act.UploadedTo) activities = [] Sync._accumulateActivities(Service.FromID("mockA"), [copy.deepcopy(actA)], activities) Sync._accumulateActivities(Service.FromID("mockB"), [copy.deepcopy(actB)], activities) self.assertEqual(len(activities), 1) act = activities[0] self.assertEqual(act.StartTime, actA.StartTime) self.assertEqual(act.EndTime, actA.EndTime) self.assertEqual(act.EndTime.tzinfo, actA.StartTime.tzinfo) self.assertEqual(act.StartTime.tzinfo, actA.StartTime.tzinfo) self.assertEqual(act.Waypoints, actA.Waypoints) self.assertEqual(act.Name, actA.Name) # The first activity takes priority. self.assertEqual( act.Type, actB.Type) # Exception: ActivityType.Other does not take priority self.assertTrue(actB.UploadedTo[0] in act.UploadedTo) self.assertTrue(actA.UploadedTo[0] in act.UploadedTo) actA.Type = ActivityType.CrossCountrySkiing activities = [] Sync._accumulateActivities(Service.FromID("mockA"), [copy.deepcopy(actA)], activities) Sync._accumulateActivities(Service.FromID("mockB"), [copy.deepcopy(actB)], activities) self.assertEqual(len(activities), 1) act = activities[0] self.assertEqual(act.Type, actA.Type) # Here, it will take priority.
from tapiriik.database import db from tapiriik.messagequeue import mq from tapiriik.sync import Sync from datetime import datetime from pymongo.read_preferences import ReadPreference import kombu import time import uuid from tapiriik.settings import MONGO_FULL_WRITE_CONCERN Sync.InitializeWorkerBindings() producer = kombu.Producer(Sync._channel, Sync._exchange) while True: generation = str(uuid.uuid4()) queueing_at = datetime.utcnow() users = list( db.users.find( { "NextSynchronization": { "$lte": datetime.utcnow() }, "QueuedAt": { "$exists": False } }, { "_id": True, "SynchronizationHostRestriction": True }, read_preference=ReadPreference.PRIMARY))
def test_activity_deduplicate_tzerror(self): ''' Test that probably-duplicate activities with starttimes like 09:12:22 and 15:12:22 (on the same day) are recognized as one ''' svcA, svcB = TestTools.create_mock_services() actA = TestTools.create_random_activity( svcA, tz=pytz.timezone("America/Iqaluit")) actB = Activity() actB.StartTime = actA.StartTime.replace( tzinfo=pytz.timezone("America/Denver")) + timedelta(hours=5) actB.UploadedTo = [TestTools.create_mock_upload_record(svcB)] actA.Name = "Not this" actB.Name = "Heya" actB.Type = ActivityType.Walking actA.CalculateUID() actB.CalculateUID() activities = [] Sync._accumulateActivities(Service.FromID("mockB"), [copy.deepcopy(actB)], activities) Sync._accumulateActivities(Service.FromID("mockA"), [copy.deepcopy(actA)], activities) self.assertEqual(len(activities), 1) # Ensure that it is an exact match actB.StartTime = actA.StartTime.replace( tzinfo=pytz.timezone("America/Denver")) + timedelta(hours=5, seconds=1) activities = [] Sync._accumulateActivities(Service.FromID("mockB"), [copy.deepcopy(actB)], activities) Sync._accumulateActivities(Service.FromID("mockA"), [copy.deepcopy(actA)], activities) self.assertEqual(len(activities), 2) # Ensure that overly large differences >38hr - not possible via TZ differences & shamefully bad import/export code on the part of some services - are not deduplicated actB.StartTime = actA.StartTime.replace( tzinfo=pytz.timezone("America/Denver")) + timedelta(hours=50) activities = [] Sync._accumulateActivities(Service.FromID("mockB"), [copy.deepcopy(actB)], activities) Sync._accumulateActivities(Service.FromID("mockA"), [copy.deepcopy(actA)], activities) self.assertEqual(len(activities), 2)
def diag_user(req, user): userRec = db.users.find_one({"_id": ObjectId(user)}) if not userRec: userRec = db.users.find_one({"AncestorAccounts": ObjectId(user)}) if userRec: return redirect("diagnostics_user", user=userRec["_id"]) if not userRec: return render(req, "diag/error_user_not_found.html") delta = False if "sync" in req.POST: Sync.ScheduleImmediateSync(userRec, req.POST["sync"] == "Full") delta = True elif "unlock" in req.POST: db.users.update({"_id": ObjectId(user)}, {"$unset": { "SynchronizationWorker": None }}) delta = True elif "lock" in req.POST: db.users.update({"_id": ObjectId(user)}, {"$set": { "SynchronizationWorker": 1 }}) delta = True elif "substitute" in req.POST: req.session["substituteUserid"] = user return redirect("dashboard") elif "svc_setauth" in req.POST and len(req.POST["authdetails"]): db.connections.update( {"_id": ObjectId(req.POST["id"])}, {"$set": { "Authorization": json.loads(req.POST["authdetails"]) }}) delta = True elif "svc_unlink" in req.POST: from tapiriik.services import Service from tapiriik.auth import User svcRec = Service.GetServiceRecordByID(req.POST["id"]) try: Service.DeleteServiceRecord(svcRec) except: pass try: User.DisconnectService(svcRec) except: pass delta = True elif "svc_marksync" in req.POST: from tapiriik.services import Service from tapiriik.auth import User db.connections.update( {"_id": ObjectId(req.POST["id"])}, {"$addToSet": { "SynchronizedActivities": req.POST["uid"] }}, multi=False) delta = True elif "svc_clearexc" in req.POST: from tapiriik.services import Service from tapiriik.auth import User db.connections.update({"_id": ObjectId(req.POST["id"])}, {"$unset": { "ExcludedActivities": 1 }}) delta = True elif "svc_clearacts" in req.POST: from tapiriik.services import Service from tapiriik.auth import User db.connections.update({"_id": ObjectId(req.POST["id"])}, {"$unset": { "SynchronizedActivities": 1 }}) Sync.SetNextSyncIsExhaustive(userRec, True) delta = True if delta: return redirect("diagnostics_user", user=user) return render(req, "diag/user.html", {"user": userRec})
patch_requests_with_default_timeout(timeout=60) if isinstance(settings.HTTP_SOURCE_ADDR, list): settings.HTTP_SOURCE_ADDR = settings.HTTP_SOURCE_ADDR[ settings.WORKER_INDEX % len(settings.HTTP_SOURCE_ADDR)] patch_requests_source_address((settings.HTTP_SOURCE_ADDR, 0)) print(" -> Index %s\n -> Interface %s" % (settings.WORKER_INDEX, settings.HTTP_SOURCE_ADDR)) # We defer including the main body of the application till here so the settings aren't captured before we've set them up. # The better way would be to defer initializing services until they're requested, but it's 10:30 and this will work just as well. from tapiriik.sync import Sync Sync.InitializeWorkerBindings() sync_heartbeat("ready") while Run: cycleStart = datetime.utcnow( ) # Avoid having synchronization fall down during DST setback processed_user_count = Sync.PerformGlobalSync( heartbeat_callback=sync_heartbeat, version=WorkerVersion) RecycleInterval -= processed_user_count if RecycleInterval <= 0: break sync_heartbeat("idle") print("Sync worker shutting down cleanly") db.sync_workers.remove({"Process": os.getpid(), "Host": socket.gethostname()})