def storeClientEntry(user, key, ts, reading, metadata): logging.debug("storing client entry for user %s, key %s at timestamp %s" % (user, key, ts)) response = None # float first, because int doesn't recognize floats represented as strings. # Android timestamps are in milliseconds, while Giles expects timestamps to be # in seconds, so divide by 1000 when you hit this case. # ios timestamps are in seconds. ts = int(ts) if ts > 9999999999: ts = ts/1000 currEntry = createEntry(user, key, ts, reading) # Add the os and app versions from the metadata dict currEntry.update(metadata) try: response = get_client_stats_db().insert(currEntry) if response == None: get_client_stats_db_backup().insert(currEntry) except Exception as e: logging.debug("failed to store client entry for user %s, key %s at timestamp %s" % (user, key, ts)) logging.debug("exception was: %s" % (e)) get_client_stats_db_backup().insert(currEntry) return response != None
def storeClientEntry(user, key, ts, reading, metadata): logging.debug("storing client entry for user %s, key %s at timestamp %s" % (user, key, ts)) response = None # float first, because int doesn't recognize floats represented as strings. # Android timestamps are in milliseconds, while Giles expects timestamps to be # in seconds, so divide by 1000 when you hit this case. # ios timestamps are in seconds. ts = int(ts) if ts > 9999999999: ts = ts / 1000 currEntry = createEntry(user, key, ts, reading) # Add the os and app versions from the metadata dict currEntry.update(metadata) try: response = get_client_stats_db().insert(currEntry) except Exception as e: logging.debug( "failed to store client entry for user %s, key %s at timestamp %s" % (user, key, ts)) logging.debug("exception was: %s" % (e)) get_client_stats_db_backup().insert(currEntry) return response != None
def export_client_stats(): entries = list(get_client_stats_db_backup().find()) fname = "client_stats.csv" headers = ["reported_ts", "stat", "reading", "ts", "client_os_version", "client_app_version", "user", "_id"] write_stats(fname, headers, entries)
def export_client_stats(): entries = list(get_client_stats_db_backup().find()) fname = "client_stats.csv" headers = ['reported_ts', 'stat', 'reading', 'ts', 'client_os_version', 'client_app_version', 'user', '_id'] write_stats(fname, headers, entries)
# c) even if we do, we don't know if we need to use them for older entries # So let's leave the hacky reconstruction algorithm until we know that we really need it return new_entry def convertClientStats(collection): for old_entry in collection.find(): try: enas.save_to_timeseries(old_entry) except: logging.error("Error converting entry %s" % old_entry) raise def convertServerStats(collection): for old_entry in collection.find(): try: save_to_timeseries_server(old_entry) except: logging.error("Error converting entry %s" % old_entry) raise if __name__ == '__main__': logging.basicConfig(level=logging.DEBUG) # No arguments - muahahahaha. Just going to convert everything. logging.info("About to convert client stats") convertClientStats(edb.get_client_stats_db_backup()) logging.info("About to convert server stats") convertServerStats(edb.get_server_stats_db_backup()) logging.info("Not about to convert result stats - they are no longer relevant")
else: new_uuid = uuid.uuid4() logging.debug("Mapping %s -> %s" % (new_uuid, user.uuid)) edb.get_uuid_db().update({"uuid": user.uuid}, {"$set": { "uuid": new_uuid }}) logging.debug("Resetting alternatives...") reset_collection(edb.get_alternatives_db(), user.uuid, new_uuid) logging.debug("Resetting analysis...") reset_collection(edb.get_analysis_timeseries_db(), user.uuid, new_uuid) logging.debug("Resetting client...") reset_collection(edb.get_client_db(), user.uuid, new_uuid) logging.debug("Resetting client_stats_backup...") reset_collection(edb.get_client_stats_db_backup(), user.uuid, new_uuid) logging.debug("Resetting server_stats_backup...") reset_collection(edb.get_server_stats_db_backup(), user.uuid, new_uuid) logging.debug("Resetting result_stats_backup...") reset_collection(edb.get_result_stats_db_backup(), user.uuid, new_uuid) logging.debug("Resetting edb.get_common_place_db...") reset_collection(edb.get_common_place_db(), user.uuid, new_uuid) logging.debug("Resetting edb.get_common_trip_db...") reset_collection(edb.get_common_trip_db(), user.uuid, new_uuid) logging.debug("Resetting edb.get_habitica_db...") reset_collection(edb.get_habitica_db(), user.uuid, new_uuid) logging.debug("Resetting edb.get_pipeline_state_db...") reset_collection(edb.get_pipeline_state_db(), user.uuid, new_uuid)
user = ad.AttrDict(user_dict) if user.uuid in estag.TEST_PHONE_IDS: logging.debug("Found test phone, skipping reset") else: new_uuid = uuid.uuid4() logging.debug("Mapping %s -> %s" % (new_uuid, user.uuid)) edb.get_uuid_db().update({"uuid" : user.uuid}, {"$set": {"uuid" : new_uuid}}) logging.debug("Resetting alternatives...") reset_collection(edb.get_alternatives_db(), user.uuid, new_uuid) logging.debug("Resetting analysis...") reset_collection(edb.get_analysis_timeseries_db(), user.uuid, new_uuid) logging.debug("Resetting client...") reset_collection(edb.get_client_db(), user.uuid, new_uuid) logging.debug("Resetting client_stats_backup...") reset_collection(edb.get_client_stats_db_backup(), user.uuid, new_uuid) logging.debug("Resetting server_stats_backup...") reset_collection(edb.get_server_stats_db_backup(), user.uuid, new_uuid) logging.debug("Resetting result_stats_backup...") reset_collection(edb.get_result_stats_db_backup(), user.uuid, new_uuid) logging.debug("Resetting edb.get_common_place_db...") reset_collection(edb.get_common_place_db(), user.uuid, new_uuid) logging.debug("Resetting edb.get_common_trip_db...") reset_collection(edb.get_common_trip_db(), user.uuid, new_uuid) logging.debug("Resetting edb.get_habitica_db...") reset_collection(edb.get_habitica_db(), user.uuid, new_uuid) logging.debug("Resetting edb.get_pipeline_state_db...") reset_collection(edb.get_pipeline_state_db(), user.uuid, new_uuid) logging.debug("Resetting edb.get_profile_db...") reset_collection(edb.get_profile_db(), user.uuid, new_uuid) logging.debug("Resetting edb.get_timeseries_db...")
# c) even if we do, we don't know if we need to use them for older entries # So let's leave the hacky reconstruction algorithm until we know that we really need it return new_entry def convertClientStats(collection): for old_entry in collection.find(): try: enas.save_to_timeseries(old_entry) except: logging.error("Error converting entry %s" % old_entry) raise def convertServerStats(collection): for old_entry in collection.find(): try: save_to_timeseries_server(old_entry) except: logging.error("Error converting entry %s" % old_entry) raise if __name__ == '__main__': logging.basicConfig(level=logging.DEBUG) # No arguments - muahahahaha. Just going to convert everything. logging.info("About to convert client stats") convertClientStats(edb.get_client_stats_db_backup()) logging.info("About to convert server stats") convertServerStats(edb.get_server_stats_db_backup()) logging.info("Not about to convert result stats - they are no longer relevant")