def __init__(self, config): self.config = config self.activities_df = pandas.read_csv(get_path("../../activities.csv"), index_col="id") # todo: read from config self.JWT_ALGORITHM = "HS256" self.JWT_SECRET = "0" # secrets.token_bytes(16) self.EXPIRED_TOKENS = set()
async def fetch_activity(self, request): if request.id == None: raise errors.Unauthorized("A valid token is required") activity_id = int(request.rel_url.query["itemid"]) if activity_id >= len(self.activities_df): raise errors.UserError("this id is too big") with open(get_path("../../interactions.csv"), "a") as output_csv: writer = csv.writer(output_csv) writer.writerow([request.id, activity_id, 1, int(time.time())]) output = self.activities_df.iloc[activity_id].to_json() return web.Response(text=output)
def GET(self, id): path = "/%s" % storage.get_path(int(id)) raise web.seeother(path)
def download(file_hash): file = storage.get_path(file_hash) return web.FileResponse(file)
import pandas as pd import numpy as np from storage import get_path import torch from torch.autograd import Variable import torch.nn.functional as Functional from tqdm import tqdm import matplotlib.pyplot as plt from storage import get_path activites = pd.read_csv(get_path("../../activities.csv")) activites.head() # Read the CSV of interactions and sort it by timestamp data = pd.read_csv(get_path("../../interactions.csv")) data_sorted = data.sort_values("timestamp") data_sorted.head() # Cut the data off at 75% in order to save some data for the test process CUTOFF = 0.70 cutoff_idx = int(len(data_sorted) * CUTOFF) # Generate the train and test data data_train = data_sorted.iloc[0:cutoff_idx] data_test = data_sorted.iloc[cutoff_idx:] def get_intersection_test_and_train(field): """ Get interactions between the test and train datasets for the given field