def loadFeatures(catalog): featuresfile = cf.data_dir + "context_content_features-small.csv" input_file = csv.DictReader(open(featuresfile, encoding='utf-8')) for event in input_file: model.assignGenre(catalog, event) model.addEvent(catalog, event) model.updateHour_Tree(catalog, event)
def loadEvents(analyzer): """ Carga los datos de los archivos CSV en el modelo """ context = cf.data_dir + 'context_content_features-small.csv' input_file = csv.DictReader(open(context, encoding="utf-8"), delimiter=",") # "artist_id","track_id","user_id","id" for line in input_file: filtered = { "instrumentalness": float(line["instrumentalness"]), "liveness": float(line["liveness"]), "speechiness": float(line["speechiness"]), "danceability": float(line["danceability"]), "valence": float(line["valence"]), "loudness": float(line["loudness"]), "tempo": float(line["tempo"]), "acousticness": float(line["acousticness"]), "energy": float(line["energy"]), "mode": float(line["mode"]), "key": float(line["key"]), "artist_id": line["artist_id"].replace(" ", ''), "track_id": line["track_id"].lower(), "user_id": line["user_id"], "id": line["id"], "created_at": line["created_at"] # dt.datetime.strptime(line["created_at"], "%Y-%m-%d %H:%M:%S").time() } model.addEvent(analyzer, filtered) model.addArtist(analyzer, filtered) model.addTrack(analyzer, filtered) model.addDate(analyzer, filtered) return analyzer
def loadContext(catalog, contextcontefile): contextcontentfile = cf.data_dir + contextcontefile input_file = csv.DictReader(open(contextcontentfile, encoding="utf-8"), delimiter=",") for event in input_file: model.addEvent(catalog, event) return catalog
def loadData(analyzer, eventsfile): """ Carga los datos de los archivos CSV en el modelo """ # delta_time = -1.0 # delta_memory = -1.0 # tracemalloc.start() # start_time = getTime() # start_memory = getMemory() eventsfile = cf.data_dir + eventsfile input_file = csv.DictReader(open(eventsfile, encoding="utf-8"), delimiter=",") for event in input_file: model.addEvent(analyzer, event) # stop_memory = getMemory() # stop_time = getTime() # tracemalloc.stop() # delta_time = stop_time - start_time # delta_memory = deltaMemory(start_memory, stop_memory) return analyzer, 0, 0
def loadEvents(analyzer): crimesfile = cf.data_dir + 'context_content_features/context_content_features-small.csv' input_file = csv.DictReader(open(crimesfile, encoding="utf-8"), delimiter=",") for crime in input_file: model.addEvent(analyzer, crime) return analyzer
def loadEvents(analyzer): """ Carga los eventos del archivo csv. """ eventsfile = cf.data_dir + 'subsamples-small/context_content_features-small.csv' input_file = csv.DictReader(open(eventsfile, encoding='utf-8')) for event in input_file: model.addEvent(analyzer, event)
def loadEvents(analyzer): """ Carga los eventos del archivo csv. Por cada evento se indica al modelo que debe adicionarlo al analizador """ eventsfile = cf.data_dir + 'subsamples-small/context_content_features-small.csv' input_file = csv.DictReader(open(eventsfile, encoding='utf-8')) for event in input_file: model.addEvent(analyzer, event)
def loadEvents(analyzer, file): """ Itera cada elemento del archivo csv """ analysis_file = cf.data_dir + file input_file = csv.DictReader(open(analysis_file, encoding="utf-8"), delimiter=",") for event in input_file: model.addEvent(analyzer, event)
def loadData(analyzer, crimesfile): """ Carga los datos de los archivos CSV en el modelo """ crimesfile = cf.data_dir + crimesfile input_file = csv.DictReader(open(crimesfile, encoding="utf-8"), delimiter=",") for event in input_file: model.addEvent(analyzer, event)
def loadData(catalogo): """ Carga los datos de los archivos CSV en el modelo """ files = cf.data_dir + 'context_content_features-small.csv' input_file = csv.DictReader(open(files, encoding="utf-8"), delimiter=",") files2 = cf.data_dir + 'sentiment_values.csv' input_file2 = csv.DictReader(open(files, encoding="utf-8"), delimiter=",") for evento in input_file: model.addEvent(catalogo, evento) for hashtag in input_file2: model.createHashtag(catalogo, hashtag) return catalogo
def loadData(catalog): TSfile = cf.data_dir + 'user_track_hashtag_timestamp-small.csv' ts_sub = csv.DictReader(open(TSfile, encoding='utf-8')) for register in ts_sub: model.createmap2file(catalog, register, 'track') HVfile = cf.data_dir + 'sentiment_values.csv' sv_file = csv.DictReader(open(HVfile, encoding='utf-8')) for pair in sv_file: model.addSentiment(catalog, pair) Dfile = cf.data_dir + 'context_content_features-small.csv' main_file = csv.DictReader(open(Dfile, encoding='utf-8')) for event in main_file: model.addEvent(catalog, event)
def loadData(analyzer, hashtag, sentiments, context): """ Carga los datos de los archivos CSV en el modelo """ hashtagsfile = cf.data_dir + hashtag hashtag_file = csv.DictReader(open(hashtagsfile, encoding="utf-8"), delimiter=",") contextfile = cf.data_dir + context context_file = csv.DictReader(open(contextfile, encoding="utf-8"), delimiter=",") sentimentsfile = cf.data_dir + sentiments sentiments_file = csv.DictReader(open(sentimentsfile, encoding="utf-8"), delimiter=",") for event in context_file: model.addEvent(analyzer, event) posEvent = 1 for hastag in hashtag_file: posEvent = model.addHashtag(analyzer, hastag, posEvent) model.crearArboles(analyzer) for sentiment in sentiments_file: model.addSentiment(analyzer, sentiment) return analyzer
def loadEvent(analyzer): eventfile = cf.data_dir + 'context_content_features-small.csv' input_file = csv.DictReader(open(eventfile, encoding="utf-8"), delimiter=",") for event in input_file: model.addEvent(analyzer, event)