def createPost(): session = getSession() name = request.form.get('name') location = request.form.get('location') size = request.form.get('size') try: """ connection.execute( "INSERT INTO tracks(name,location,size) VALUES(%s, %s,%s)", name, location, size ) """ track = Track() track.name = name track.location = location track.size = size session.add(track) session.commit() files = request.files.getlist("images") imagesPaths = uploadfiles(files, folderTracks) for imagesPath in imagesPaths: trackImage = TrackImage() trackImage.src = imagesPath trackImage.track_id = track.id session.add(trackImage) session.commit() return redirect(url_for('track.tracks')) except Exception as error: session.rollback() raise error return render_template('createTracks.html')
def run(self): session = getSession() for item in data: track = self.db.session.query(Track).filter( Track.name == item.get("name") ).first() exist = bool(track) if not exist: track = Track() track.name = item.get("name") track.location = item.get("location") track.size = item.get("size") or 5000 if not exist: self.db.session.add(track) self.db.session.commit() images = item.get("images") if images: for image in images: haveImage = self.db.session.query(TrackImage).filter( TrackImage.track_id == track.id ).first() if not haveImage: ext = Path(image).suffix namefile = uuid.uuid4().hex + ext file = open(path + image, 'rb') s3.put_object(Bucket=bucket, Key=to + namefile, Body=file) trackImage = TrackImage() trackImage.src = namefile trackImage.track_id = track.id self.db.session.add(trackImage)
def update_user_tracks(self): """ Executes one cycle of requesting data from API Once it had data updates db with tracs and what time ranges are still missing """ time_range = self.next_time_range() if not time_range: return False tracks = self.request_tracks(*time_range) if not tracks: TimeRange(*time_range).remove_from_db(self.cur) return not TimeRange.table_empty(self.cur) Track.add_to_db(self.cur, tracks) newest_ts = tracks[0].timestamp oldest_ts = tracks[-1].timestamp new_time_range = (oldest_ts, newest_ts) self._update_time_ranges(time_range, new_time_range) return True
def create_track(payload): try: name = request.get_json()['name'] artist_id = request.get_json()['artist_id'] release_id = request.get_json()['release_id'] price = request.get_json()['price'] new_track = Track( name=name, artist_id=artist_id, release_id=release_id, price=price ) new_track.insert() return jsonify({ 'success': True, 'name': new_track.name, 'artist_id': new_track.artist_id, 'release_id': new_track.release_id, 'price': new_track.price }) except: abort(422)
def track(request): if request.method == 'POST': form = UserForm(request.POST) if form.is_valid(): user = form.cleaned_data['username'] raw_feed = build_feed(user, 5, 2) f = Feed(username = user) f.save() for track in raw_feed: art_url = 'http://i.imgur.com/BNBFGfg.jpg' if track[5] != None: art_url = track[5] t = Track(id = track[0] , date = track[1] , title = track[2] , artist = track[3] , uri = track[4] , art = art_url) t.save() f.tracks.add(t) return HttpResponseRedirect('track.html') else: form = UserForm() return render(request, 'subs/track.html', { 'form': form })
def add_to_db(audio_files): for audio_file in audio_files: audio_file_id3 = eyed3.load(audio_file) # If the artist, album or track doesn't exist in the database, create # table(s) for them. try: if not Artist.objects.filter(name=audio_file_id3.tag.artist).exists(): artist = Artist(name=audio_file_id3.tag.artist) artist.save() if not Album.objects.filter(title=audio_file_id3.tag.album).exists(): album = Album(title=audio_file_id3.tag.album, \ artist=artist) album.save() if not Track.objects.filter(title=audio_file_id3.tag.title).exists(): track = Track(title=audio_file_id3.tag.title, \ album=album, \ artist=artist, \ fspath=audio_file, \ media_url=MEDIA_URL + audio_file.split(MEDIA_ROOT)[1]) track.save() print 'Added to DB: ' + audio_file_id3.tag.title except Exception as e: print 'Error: ' + e
def new_game(request, tag=None): """Start new game.""" new_game = Game.objects.create(quiz_length=10) if tag: Track.fetch_top_tracks(tag) tag_object, _ = Tag.objects.get_or_create(name='%s' % tag) tagged = TaggedItem.objects.get_by_model(Track, tag_object) tracks_with_tag = tagged.count() if tracks_with_tag == 0: tag_object.delete() if tracks_with_tag < 20: err = ''.join(['There is not enough data to create a quiz. ', 'Please choose a more popular tag.']) new_game.delete() return render_to_response('quiz/index.html', { 'error_msg' : err, 'current_game' : None, }) else: new_game.tags = '"%s"' % tag request.session['game'] = new_game question_url = reverse('musicquiz.quiz.views.question') return HttpResponseRedirect(question_url)
def db_cleanup(): # Movie cleanup aprint('Cleaning up movies...', 'NOTIFICATOR') movies = Movie.select().order_by(Movie.title) n_movies = len(movies) for movie in movies: deletion = Movie.delete().where(Movie.imdb == movie.imdb, Movie.quality == movie.quality) deletion.execute() aprint('Deleted {} movies.'.format(n_movies), 'NOTIFICATOR') # TV cleanup aprint('Cleaning up tv shows...', 'NOTIFICATOR') episodes = Show.select().order_by(Show.series).order_by( Show.season).order_by(Show.episode) n_episodes = len(episodes) for episode in episodes: deletion = Show.delete().where(Show.series == episode.series, Show.season == episode.season, Show.episode == episode.episode) deletion.execute() aprint('Deleted {} episodes.'.format(n_episodes), 'NOTIFICATOR') # Tracks cleanup aprint('Cleaning up tracks...', 'NOTIFICATOR') tracks = Track.select().order_by(Track.artist).order_by(Track.tracknumber) n_tracks = len(tracks) for track in tracks: deletion = Track.delete().where(Track.artist == track.artist, Track.title == track.title, Track.tracknumber == track.tracknumber, Track.quality == track.quality) deletion.execute() aprint('Deleted {} tracks.'.format(n_tracks), 'NOTIFICATOR')
def track_bid(request, pk): bid = get_object_or_404(Bid, pk=pk) t = Track() t.bid = bid if request.user: t.user = request.user t.save() return HttpResponseRedirect(bid.link)
def addtrack(): trackurl = request.json['url'] track = Track(url = db.Link(trackurl)) track.put() #TODO Add track to correct playlist return jsonify(id = track.key().id(), url=trackurl)
def queue_get_previous(): """Returns the next item in the Queue""" track = sonos.get_current_track_info() nextTrack = sonos.get_queue(max_items = 9999)[int(track['playlist_position']) - 2] returnTrack = Track(nextTrack) returnTrack.message = 'The previous song was: ' + nextTrack.creator + ' - ' + nextTrack.title return jsonpickle.encode(returnTrack, unpicklable=False)
def test_track(self): for i in xrange(10): track = Track(band='lalal', release='lolo', name='Track: %s' % (i,), fp_track_code='random: %s' % (i,), year='1980', youtube_code='aakaka') track.save() self.assertEqual(Track.objects.count(), 10)
def scrape_track(name, folder): # TODO DETAIL in logger name_parts = name.split('-') if len(name_parts) == 4: year = name_parts[0] title = name_parts[1] artist = name_parts[2] track = name_parts[3] else: print "Invalid name: %s" % (name,) return False, False track_name = '%s - %s' % (artist, track) try: link = YouTubeExtractor.search_youtube_links(track_name) except Exception: return False, False try: # search in youtube based on en artist - track ydl = youtube_dl.YoutubeDL({'outtmpl': '%(id)s%(ext)s', 'noplaylist': '--no-playlist'}) # Add all the available extractors ydl.add_default_info_extractors() result = ydl.extract_info(link, download=False) found = False Track.sync() display_id = result['display_id'] exists = Track.objects.filter(youtube_code=display_id).count() > 0 # Check Tracker.youtube_code doesn't exist if not exists: for format in result['formats']: if format['ext'] == 'm4a': url = format['url'] try: r = requests.get(url, stream=True) chunk_size = 1000 filename = result['display_id']+'.mp3' try: with open('/%s/%s' % (folder, filename,), 'wb') as fd: for chunk in r.iter_content(chunk_size): fd.write(chunk) except Exception: raise StorageException('Some problem writing file /%s/%s' % (folder, filename)) found = True break except Exception: pass if found: return name, '/%s/%s' % (folder, filename,) else: return False, False else: return False, False except: return False, False
def start(request): request.first_request = True track = Track() track.save() print track request.session["is_reporting"] = track.id logging.debug = patch_function(logging.debug, TypeSubRequest.LOG) logging.info = patch_function(logging.info, TypeSubRequest.LOG) logging.critical = patch_function(logging.critical, TypeSubRequest.LOG) logging.error = patch_function(logging.error, TypeSubRequest.LOG) logging.exception = patch_function(logging.exception, TypeSubRequest.LOG)
def show(): tags = Tag.select(Tag.value).distinct().order_by(Tag.value.asc()) #pylint: disable=E1111 if request.method == "POST": # Filter the track list based on selected tags # Evaluate selected tags, modify tag object with selection sel_tags = request.form.getlist('tag-select') for tag in tags: for sel_tag in sel_tags: if tag.value == sel_tag: tag.selected = True # Query database all_tracks = Track.select().order_by(Track.date.asc()) # pylint: disable=E1111 tracks = [] for track in all_tracks: tag_elems_for_track = Tag.select().where(Tag.track == track) # pylint: disable=E1111 tags_for_track = [] for elem in tag_elems_for_track: tags_for_track.append(elem.value) if set(sel_tags).issubset(tags_for_track): tracks.append(track) # Pass information about selected map if len(tracks) == 0: # No available tracks - give warning track_id = None elif request.form.get("track-select") == None or request.form.get( "track-select") == "": # No track selected - return latest map track_id = len(tracks) - 1 else: # Return selected map track_id = int(request.form.get("track-select")) else: # List all tracks tracks = Track.select().order_by(Track.date.asc()) #pylint: disable=E1111 # show latest map if len(tracks) == 0: track_id = None else: track_id = len(tracks) - 1 if track_id: tags_for_track = Tag.select(Tag.value).where( Tag.track == tracks[track_id]) #pylint: disable=E1111 else: tags_for_track = "" overall_statistics = calc_statistics(tracks) return render_template("show.html", tracks=tracks, tags=tags, tags_for_track=tags_for_track, track_id=track_id, overall_statistics=overall_statistics)
def test_scrape_track_cassandra_exists(self): # It shouldn't add if exists model with patch('web.utils.YouTubeExtractor.search_youtube_links', MagicMock(return_value=u'https://www.youtube.com/watch?v=Ckom3gf57Yw')): track = Track(band='lalal', release='lolo', name='Track', fp_track_code='random', youtube_code='Ckom3gf57Yw') track.save() task = scrape_track.delay('year - release - artist - Lalalala', 'tmp') name, folder = task.get() self.assertEqual(name, False) self.assertEqual(folder, False)
def post(self): """responsible for the name lists in users friend list""" usrlist= User.listUsers() frn= [] for usr in usrlist: if(usr.key.id()!= self.user.key.id()): if(usr.key.id() < self.user.key.id()): trid = Track.track_id(usr.key.id(), self.user.key.id()) else: trid = Track.track_id(self.user.key.id(), usr.key.id()) frn.append({'name': usr.username, 'sex': usr.sex, 'uid': usr.key.id(), 'msg': Track.checkNew(trid,self.user.key.id())}) self.response.out.write(json.dumps(frn))
def save_to_db(self): for track_info in self.tracks_info: artist, created = Artist.get_or_create(name=track_info['artist']) if "label" in track_info: label, created = Label.get_or_create(name=track_info['label']) track, created = Track.create_or_get(artist=artist.id, title=track_info['title']) track.label = label.id track.save() else: track, created = Track.create_or_get(artist=artist.id, title=track_info['title']) self.setlist_trackids.append(track.id) return
def addReleases(artist_id, update_artist = True): artist_record = Artist.get(id=artist_id) musicbrainz_artist = musicbrainz.getBestArtistMatch(artist_record.name) release_ids = [] for release in musicbrainz_artist.getReleases(): release_ids.append(utils.extractUuid(release.id)) # These release results do not contain all the information, we must re-query for that info... for rid in release_ids: release = musicbrainz.getRelease(rid) if not release: continue release_group_id = utils.extractUuid(release.getReleaseGroup().id) try: release_group_tracked = Album.get(release_group_id=release_group_id) except peewee.DoesNotExist: release_group_tracked = None if release_group_tracked: continue release_record = Album.create( musicbrainz_id = rid, asin = release.getAsin(), release_group_id = release_group_id, artist_id = artist_id, name = release.getTitle(), type = release.getType(), released_on = release.getEarliestReleaseDate(), state = 'wanted') track_number = 1 for track in release.getTracks(): Track.create( album_id = release_record.id, number = track_number, title = track.getTitle(), length = track.getDuration(), state = 'wanted') track_number += 1 # Rescan the Music Library after adding new releases to see if the user has # them or not. Will not run if explicitly told not to by the caller. if(update_artist): ThreadPool.put(updateArtist, {'artist_id': artist_id})
def generate_fingerprint_from_list(results, file_list): # TODO: os.system is thread safe?? # TODO: How to test this? codes_file = '/tmp/allcodes_%s.json' % (random.randint(1, 10000)) command = '/home/vagrant/echoprint-codegen/echoprint-codegen -s 10 30 < %s > %s' % (file_list, codes_file) os.system(command) # Create the Track models with open(codes_file, 'r') as data_file: data = json.load(data_file) for fingerprint in data: # check fp doesn't exist in database code_string = fingerprint.get('code') if code_string: response = fp.best_match_for_query(code_string) if not response.match(): label = [v for v in results if v[1] == fingerprint['metadata']['filename']][0][0] youtube_code = fingerprint['metadata']['filename'].replace('.mp3', '').replace('/tmp/', '') year = label.split('-')[0].strip() release = label.split('-')[1].strip() artist = label.split('-')[2].strip() title = label.split('-')[3].strip() fingerprint['metadata']['artist'] = artist fingerprint['metadata']['title'] = title # Track creation Track.sync() track = Track(band=artist, release=release, name=title, year=year, youtube_code=youtube_code) track.save() # Remove all - (due to limitation in fingerprint-server track_id match) fingerprint['metadata']['track_id'] = track.echoprint_id else: # remove duplicate element data.remove(fingerprint) print "This file is duplicated" # Overwrite with artist and title with open(codes_file, 'w') as data_file: data_file.write(json.dumps(data)) # Fastingest invoke => post all into echo-fingerprint codes, _ = parse_json_dump(codes_file) fp.ingest(codes) FileHandler.delete_file(codes_file) return True
def get_playlists(self): if not self.library_goosed: print("The library is not goosed. Call goose_up_library().") return False if not self.playlists: playlists_directory = "{0}/src/playlists".format( self.base_location) playlist_files = os.scandir(playlists_directory) self.playlists = [] for playlist_file in playlist_files: with open(playlist_file.path) as pfp: playlist_json = json.load(pfp) playlist = Playlist(title=playlist_json["title"]) for track_path in playlist_json["tracks"]: track_full_path = "{0}{1}".format( self.media_path, track_path) track = Track.init_from_file(track_full_path) if track: playlist.playlist_tracks.append(track) self.playlists.append(playlist) return self.playlists
def _tidal_track_to_track(self, tidal_track: dict, album: Optional[Album] = None) -> Track: # we can be pretty sure that an album ID is valid if it comes from TIDAL album = album or cast(Album, self.get_album_by_id(tidal_track["album"]["id"])) artists = [ self._tidal_artist_to_artist(tidal_artist) for tidal_artist in tidal_track["artists"] ] chosen_quality, best_available_quality = self._get_quality(tidal_track) file_extension = { "master": "flac", "lossless": "flac", "high": "mp4", "low": "mp4", }[chosen_quality] return Track( id=tidal_track["id"], name=tidal_track["title"], artists=artists, album=album, explicit=tidal_track.get("explicit", False), track_number=tidal_track["trackNumber"], disc_number=tidal_track["volumeNumber"], chosen_quality=chosen_quality, best_available_quality=best_available_quality, file_extension=file_extension, )
def user_home(username): user = User.selectBy(username=username).getOne(None) if not user: abort(404) else: tracks = Track.selectBy(user=user) return context(user=user, tracks=tracks)
def get(self): #get specific trip trip_key = self.request.get('trip_id') tripk = ndb.Key(urlsafe=trip_key) trip = tripk.get() #get tracks for trip track_query = Track.query(ancestor=tripk) tracks = track_query.fetch() for track in tracks: #delete .gpx files from blobstore bolob = track.blob_key blobstore.delete(bolob) #delete statistic for item in TrackStatistic.query(ancestor=track.key): item.key.delete() #delete track track.key.delete() #redirect to mytrips because showing all tips will only be consistent in scope of user # and only eventually consistent for whole datastore trip.key.delete() self.redirect('/tripmanager')
def get_all_tracks(): list_Track = Track.select() dicts_Track = [] for x in range(len(list_Track)): try: _track = { 'id':list_Track[x].id, 'number':list_Track[x].number, 'title':list_Track[x].title, 'artist':list_Track[x].artist, 'album':list_Track[x].album, 'duration':list_Track[x].duration } dicts_Track.append(_track) except: _track = { 'id':0, 'number':'not found', 'title':'not found', 'artist':'not found', 'album':'not found', 'duration':'not found' } dicts_Track.append(_track) return dicts_Track
def get_tracks_by_album_title(title:str): query = Track.select().where(Track.album == title) my_tracks = [] for x in range(len(query)): try: _track = { 'id':query[x].id, 'number':query[x].number, 'title':query[x].title, 'artist':query[x].artist, 'album':query[x].album, 'duration':query[x].duration } my_tracks.append(_track) except: _track = { 'id':0, 'number':'not found', 'title':'not found', 'artist':'not found', 'album':'not found', 'duration':'not found' } my_tracks.append(_track) return my_tracks
def get_collections(self): if not self.library_goosed: print("The library is not goosed. Call goose_up_library().") return False if not self.collections: collections_directory = "{0}/src/collections".format( self.base_location) collection_files = os.scandir(collections_directory) self.collections = [] for collection_file in collection_files: with open(collection_file.path) as cfp: collection_json = json.load(cfp) collection = Collection(title=collection_json["title"]) for album_path in collection_json["albums"]: album = Album() album_files = os.scandir(album_path) for album_file in album_files: track = Track.init_from_file(album_file.path) if track: album.tracks.append(track) album.title = track.album_title album.artist_name = track.artist_name collection.albums.append(album) self.collections.append(collection) return self.collections
def make_track_tree(id,location,type,season): ''' Create tree (http://www.ztree.me/v3/main.php#_zTreeInfo) ''' #get all trips trips_query = Trip.query(ancestor=trip_key(id)) if location != []: trips_query = trips_query.filter(Trip.trip_tags.location.IN(location)) if type != []: trips_query = trips_query.filter(Trip.trip_tags.type.IN(type)) if season != []: trips_query = trips_query.filter(Trip.trip_tags.season.IN(season)) trips = trips_query.fetch() #create tree structure from trips tree = [] i = 1 of = 0 tree.append({'id':0, 'pId':0, 'name':'My Trips','isParent': 'true','open':'true'}) for trip in trips: tree.append({'id':i, 'pId':0, 'name':str(trip.trip_name),'isParent': 'true', 'click':"openTrip('"+ str(trip.key.urlsafe()) +"')"}) #get tracks from trip track_query = Track.query(ancestor=trip.key).order(-Track.creation_date) tracks = track_query.fetch(20) for track in tracks: tree.append({'id':i+10+of, 'pId':i, 'name':str(track.track_name),'click':"openTrack('"+ str(track.key.urlsafe()) +"')"}) of += 1 i += 1 return tree
def obj_create(self,bundle,**kwargs): station = Station.objects.get(pk=bundle.data['station']) track = Track.from_uri(bundle.data['uri']) track.save() success = station.add_track(track) if not success: raise Exception('already added song') return track
def demo(): demoID = '513a07ec9c7c840007acfc52' #me! posts = Track.objects(author=demoID) analysis = Analysis.objects(author=demoID).first() return render_template("demo.html", title = 'Demo', posts = posts, analysis = analysis)
def gettracks(): tracks = Track.all() response = [] for track in tracks: response.append(track.toDict()) return json.dumps(response)
def delete(track_id): track = Track.get(Track.id == track_id) track_name = track.name track_path = track.path gpx_fspath = os.path.join(UPLOAD_BASE_DIR, track_path) track.delete_instance(recursive=True) os.remove(gpx_fspath) flash("Track '%s' deleted sucessfully." % track_name, "info") return redirect(url_for("show"))
def song_info(spotify_id): track = Track.find_by_id(current_user.get_access_token(), spotify_id) track.perform_audio_analysis() return render_template('song_analysis.html', title=track.track_info['name'], track_info=track.to_simple_json(), labels=[], data_labels=track.data_points().get('labels'), data_values=track.data_points().get('data'))
def next_time_range(self): """ Calculates next missing range of data """ if self.request_couter > 0 and TimeRange.table_empty(self.cur): return None if self.request_couter == 0: if Track.table_empty(self.cur): timestamp_from = 0 else: timestamp_from = Track.latest(self.cur).timestamp return (timestamp_from, self._utc_timestamp_now()) latest = TimeRange.latest(self.cur) return latest.timestamp_from, latest.timestamp_to
def generate_data(): faker = Faker() for i in range(0, 24): name = faker.name() u = User(name=name) u.put() full = faker.lorem() firstbio, secounddis = full[:len(full) / 2], full[len(full) / 2:] a = Artist(name=name, owner=u._key, is_active=True, location=faker.full_address(), bio=firstbio) a.put() cover_art = "http://lorempixel.com/1024/1024/{}".format( random.choice(image_sets)) track_list = [] for i in range(0, 12): t = Track(title=faker.name(), artist_id=a._key, artist_name=a.name, source_file=random.choice(song_set), explicit=False, is_active=True, price=CurrencyValue(amount=0, currency="USD")) t.put() track_list.append(t._key) alb = Album(title=faker.name(), artist_name=a.name, artist_id=a._key, tracks=track_list, cover_art=cover_art, description=secounddis, release_date=datetime.utcnow(), genre_id=random.choice(genres), price=CurrencyValue(amount=0, currency="USD"), is_active=True) alb.put()
def set_tracks_to_db(tracks, session): # добавляем трек for track in tracks: artist = track[0] title = track[1] set_id = tracks.get(track, -1) session.add(Track(artist, title, set_id)) # комитим session.commit()
def filter_library(): print("REACHED BEGINNING OF FUNCTION") dummy_list = [{ 'name': 'Just My Luck', 'album_name': 'PMD', 'artists': 'Marc E. Bassy, blackbear', 'popularity': 60, 'id': '2QsBAfiNmngcrZsOTznqBG', 'audio_features': { 'danceability': 0.616, 'energy': 0.62, 'valence': 0.367, 'tempo': 156.986, 'loudness': -6.398, 'acousticness': 0.231, 'instrumentalness': 7.83e-06, 'liveness': 0.181, 'speechiness': 0.0358 } }, { 'name': 'Rushing Back', 'album_name': 'Rushing Back', 'artists': 'Flume, Vera Blue', 'popularity': 70, 'id': '2zoNNEAyPK2OGDfajardlY', 'audio_features': { 'danceability': 0.574, 'energy': 0.612, 'valence': 0.368, 'tempo': 136.046, 'loudness': -4.741, 'acousticness': 0.357, 'instrumentalness': 0, 'liveness': 0.158, 'speechiness': 0.0781 } }] #import json; print(json.dumps(request.args, indent=2)) #import code; code.interact(local=dict(globals(), **locals())) library = TrackCollection( current_user.get_access_token(), list( map(lambda x: Track(current_user.get_access_token(), x), dummy_list))) return render_template('library.html', saved_tracks=library.filter_by_query( request.args.get('query_str'))) # # Launch App # if __name__ == "__main__": # try: # app.run(host='0.0.0.0', port=8080, debug=True) # except: # print("Server Crashed :(")
def _parse_track(self, json_obj): track = Track(**json_obj) if 'artist' in json_obj: track.artist = self._parse_artist(json_obj['artist']) elif 'artists' in json_obj: track.artist = self._parse_artist(json_obj['artists'][0]) if 'artists' in json_obj: track.artists, track._ftArtists = self._parse_all_artists( track.artist.id, json_obj['artists']) else: track.artists = [track.artist] track._ftArtists = [] track.album = self._parse_album(json_obj['album'], artist=track.artist) if self.is_logged_in and self.user.favorites: track._isFavorite = self.user.favorites.isFavoriteTrack(track.id) return track
def addTrack(for_show, track_info): track = Track() track.artist = track_info["artist"] track.title = track_info["title"] track.start_mspos = track_info["start_mspos"] track.show = for_show track.put() return track
def webhook_lidarr(): try: if request.json['eventType'] == 'Test': aprint('Received TEST webhook', 'WEBHOOK.MAIN') return HTTPResponse(status=200) if not request.json: error = { 'error': 'Request JSON not correct', 'code': 10, } return HTTPResponse(status=500, body=error) # pprint.pprint(request.json) webhook_request = request.json artist = webhook_request['artist']['name'] tracks = webhook_request['tracks'] except Exception as e: error = { 'error': 'Request JSON not correct', 'code': 10, 'stack_trace': str(e) } return HTTPResponse(status=500, body=error) for track in tracks: track_data = { 'ARTIST': artist, 'TITLE': track['title'], 'TRACK_NUMBER': track['trackNumber'], 'QUALITY': track['quality'] } msg = '{ARTIST} - {TITLE} ({TRACK_NUMBER}) | {QUALITY}'.format( ARTIST=track_data['ARTIST'], TITLE=track_data['TITLE'], TRACK_NUMBER=track_data['TRACK_NUMBER'], QUALITY=track_data['QUALITY']) new_track = Track(artist=track_data['ARTIST'], title=track_data['TITLE'], tracknumber=track_data['TRACK_NUMBER'], quality=track_data['QUALITY'], timestamp=datetime.datetime.now(current_tz)) new_track.save() aprint(msg, 'WEBHOOK.MUSIC') return HTTPResponse(status=200)
def test_db_create_track(self): track = Track('track_name', 'genre_name', 'release_date', 1, 0, 'url', True, 'uri') db.session.add(track) db.session.commit() track_result = Track.query.filter(Track.name == 'track_name').first() assert (track_result is track) db.session.delete(track) db.session.commit() assert Track.query.filter_by(name='track_name').first() is None
def catalog_mp3(self, path): if path in self.files: return False try: track = Track.from_file(path) self.files.add(path) except Exception as e: print("catalog failed (" + path + "): " + e.__str__()) return self.catalog(track)
def track_decoder(t, album): # explicit = True if t['explicit'] == 'true' else False # need to get genre and release date from album # need to get url from external URL object spotify_track = requests.get(t['href']) t = json.loads(spotify_track.text) track = Track(t['name'], album.genre, album.release_date, t['duration_ms'], t['popularity'], t['preview_url'], t['explicit'], t['uri']) return track
def track_save(request): genres = Genre.objects.filter(id__in=request.POST.getlist('genre')) if request.POST.get('track_id') is not None: track = Track.objects.get(id=request.POST.get('track_id')) track.track_name = request.POST.get('track_name') track.rating = request.POST.get('rating') track.genres = genres # print track.genres.all() track.save() else: track = Track( track_name=request.POST.get('track_name'), rating=request.POST.get('rating'), ) track.save() for genre in genres: track.genres.add(genre) track.save() return redirect('music.views.track_detail', track_id=track.id)
def read_track(file: str) -> Track: track = Track() with open(file) as f: for i, row in enumerate(csv.DictReader(f)): point = Point(i, float(row['radius']), prev=track.points[-1] if i > 0 else None) track.points.append(point) if point.prev: point.prev.next = point return track
def get(self): upload_url = blobstore.create_upload_url('/tripmanager/upload') user,_,url,url_linktext = self.get_user() #get specific trip trip_key = self.request.get('trip_id') tripk = ndb.Key(urlsafe=trip_key) trip = tripk.get() #get id of user for given trip trip_user = tripk.parent().id() #get tracks for trip track_query = Track.query(ancestor=tripk).order(-Track.creation_date) tracks = track_query.fetch(20) #get number of tracks num = len(tracks) #get global statistic for trip stat_query = TrackStatistic.query(ancestor=trip.key).fetch(1) #get blobInfo objects from blob_key bli = [] for track in tracks: bli.append(BlobInfo(track.blob_key)) #get trip cities cities = trip.cities #create list of lon,lat pares for every city cordinates = [] for city in cities: try: city = city.lower().replace(" ", "+") api_url = "http://api.geonames.org/searchJSON?formatted=true&name={0}&maxRows=1&lang=es&username=wsanjaw&style=short".format(city) result = urlfetch.fetch(api_url) cordinates.append(helper.procesCity(result.content)) except: cordinates.append([0,0]) #create template template_values = {'user': user, 'url': url, 'url_linktext': url_linktext,'trip':trip,'upload':upload_url, 'tracks':tracks,'blobs':bli,'num':num,'trip_user':trip_user,'stats':stat_query,'cordinates':cordinates} template = JINJA_ENVIRONMENT.get_template('templates/new/onetrip.html') #set cookie value to this page url self.response.set_cookie('redirect_url', self.request.url) self.response.write(template.render(template_values))
def add_record(release): from models import Record, Artist, Track release_discogs_id = release.id try: # Check if we already have this album existing = Record.objects.get(discogs_id=release_discogs_id) return existing except Record.DoesNotExist: # Process record record_title = release.title if (len(record_title.split('- '))>1): record_title = record_title.split('- ')[1] record = Record(discogs_id = release_discogs_id, title = record_title, year = release.year, thumb = release.thumb, notes = release.notes) record.save() # Process artists for release_artist in release.artists: artist = Artist(discogs_id=release_artist.id, name=release_artist.name) artist.save() record.artists.add(artist) # Process tracklist for release_track in release.tracklist: track = Track() track.position = release_track.position track.title = release_track.title track.duration = release_track.duration track.save() record.tracklist.add(track) record.save() return record
def add_to_playlist(track, user, track_info): matchingTrack = Track.query.filter_by(spotify_id=track['id']).first() if matchingTrack is None: matchingTrack = Track(track['name'], get_artists_string(track), track['id'], 0 , "") db.session.add(matchingTrack) db.session.commit() playedTrack = PlayedTracks(matchingTrack.id, user.id) db.session.add(playedTrack) db.session.commit() return matchingTrack.id
def _save_spot(self, cursor): cursor = [round(x, 6) for x in cursor] lat = cursor[0] lon = cursor[1] track = self.session.query(Track).filter_by(lat=lat, lon=lon).first() if not track: track = Track(lat=lat, lon=lon, rad=self.rad, found=0, saved=0) try: self.session.add(track) self.session.commit() except exc.SQLAlchemyError as err: session.rollback() logger.error(err)
def post(self): upload_files = self.get_uploads('file') blob_info = upload_files[0] #get trip_id from hidden filed in form trip_key = self.request.get('trip_id') tripk = ndb.Key(urlsafe=trip_key) #updetr trip status trip = tripk.get() trip.put() #create new track which parent is current track track = Track(parent=tripk,track_name=self.request.get('track_name'),blob_key=blob_info.key()) track.status = "Processing track, try again later" track.put() track_key = track.key #Add the task to the default queue. taskqueue.add(url='/worker', params={'key': track_key.urlsafe()},target="mybackend") self.redirect(self.request.referer)
def user(name): user = User.objects(name = name).first() if user == None: flash('User ' + name + ' not found.') return redirect(url_for('index')) if g.user == user: posts = Track.objects(author=user) analysis = Analysis.objects(author=g.user).first() return render_template('user.html', user = user, posts = posts, analysis = analysis) flash('You can only look at your own profile.') return redirect(url_for('index'))
def edit_track(): track = db.get_track(request.json['id']) edited_track = Track(id=track['id'], date=track['date'], time=track['time'], kind=track['kind'], camera_id=track['camera']['id'], person_id=request.json['person']['id'], encoding_id=request.json['encoding_id'], image=track['image']) db.edit_track(edited_track) return jsonify({'track': 'true'})
def crawl(collection, path): for file_ in walkfiles(path): if collection.find_one({'path': file_}): continue metadata = mutagen.File(file_, easy=True) if not metadata: continue data = dict() for key in KEYS: try: data[key] = metadata[key][0] except KeyError: data[key] = 'Unknown' track = Track(data['title'], data['artist'], data['album'], data['genre'], data['year'], metadata.info.length, relpath(file_, path), 0 ) collection.insert(track.to_dict())
def index(): form = TrackForm() if form.validate_on_submit(): tracking = Track ( weight = form.weight.data, happy = form.happy.data, diet = form.diet.data, exercise = form.exercise.data, floss = form.floss.data, meditation = form.meditation.data, note = form.note.data, timestamp = datetime.utcnow(), author = g.user.to_dbref()) tracking.save() flash('Your post is now live!') calculate_weightAvg_async(g.user.to_dbref()) return redirect(url_for('index')) posts = Track.objects(author=g.user) analysis = Analysis.objects(author=g.user).first() return render_template("index.html", title = 'Home', form = form, posts = posts, analysis = analysis)
def stats(self): """ Produces printable stats """ data = { "username": self.username, "count": Track.count(self.cur), "top_artists": u", ".join(Track.favourite_artists(self.cur)), "most_active_day_of_week": Track.most_active_day_of_week(self.cur), "average_tracks_per_day": Track.average_tracks_per_day(self.cur), } msg = u""" Stats for user '{username}': - listened to a total of {count} tracks. - top 5 favorite artists: {top_artists}. - listen to an average of {average_tracks_per_day} tracks a day. - most active day is {most_active_day_of_week}. All stats based on data fetched for far """.format(**data) return inspect.cleandoc(msg)
def create_tracks_msg(): msg = '*Tracks*\n\n{TRACKS}\n\n\n' tracks = Track.select().order_by(Track.artist).order_by(Track.tracknumber) if len(tracks) > 0: tks = [] for track in tracks: timestamp = get_datetime(track.timestamp) tks.append(CONFIG['custom_track_entry'].format( ARTIST=track.artist, TITLE=track.title, TRACK_NUMBER=track.tracknumber, QUALITY=track.quality, TIME=get_hours_min(timestamp))) tks_full_text = '\n'.join(tks) msg = msg.format(TRACKS=tks_full_text) return len(tks), msg return 0, ''
def dict_to_db(data_dict, title, index_to_id): """Enters the data into the MongoDB. Args: * data_dict: dict containing annotations for 1 video. { 'url': <url>, 'tracks': [ {'faces':[ {'time': 0.0, 'box': [1, 2, 3, 4]}, {'time': 1.0, 'box': [2, 3, 4, 5]}, ... ], 'annotation': 'Georges Clooney' } ] } * title : string, title of the movie * index_to_id: dict maps from index in the timecode.txt to entity ID """ tracks = [] for track in data_dict['tracks']: ent_id = index_to_id[int(track['annotation'])] boxes = [] for box in sorted(track['faces'], key=lambda x: x['time']): boxes.append( Box(timestamp=box['time'], x=box['box'][0], y=box['box'][1], w=box['box'][2], h=box['box'][3])) tracks.append( Track(start=boxes[0].timestamp, end=boxes[-1].timestamp, boxes=boxes, entity=ent_id)) # Create the video vid = Video(title=title, tracks=tracks) vid.save() # Get the hash of the URL (minus the protocol) url = data_dict['url'].split('https://www.')[1] hasher = hashlib.md5() hasher.update(url) hash_md5 = hasher.hexdigest() SourceVideo(source_url=url, hash=hash_md5, reminiz_video=vid.id).save()
def search_track(self, track, artist='', album=''): if artist: results = self.sp.search(q=artist + ' ' + track, type='track') else: results = self.sp.search(q=track, type='track') if len(results['tracks']['items']): for i, result in enumerate(results['tracks']['items']): total_matches = [] # @todo: move all the mean_match stuff to another method where you just pass as many args as you want and their matches result['track_match'] = Strings.fuzzy_match( result['name'], track) total_matches.append(result['track_match']) # track without features etc total_matches.append( Strings.fuzzy_match( re.sub(r'\([^)]*\)', '', result['name']), track)) if album: result['album']['album_match'] = Strings.fuzzy_match( result['album']['name'], album) total_matches.append(result['album']['album_match']) if artist: if album: for album_artist in result['album']['artists']: album_artist[ 'album_artist_match'] = Strings.fuzzy_match( album_artist['name'], artist) total_matches.append( album_artist['album_artist_match']) for track_artist in result['artists']: track_artist['track_artist'] = Strings.fuzzy_match( track_artist['name'], artist) total_matches.append(track_artist['track_artist']) mean_match = statistics.mean(total_matches) if mean_match >= 50: return Track.create() return False
def hodl(ticker: str, start_date: date, end_date: date, user_config: dict = None): user_config = user_config or {} config = deepcopy( DEFAULT_CONFIG.get( ticker, { 'x_scale': settings.DEFAULT_X_SCALE, 'y_scale': settings.DEFAULT_Y_SCALE })) for k, v in user_config.items(): if v is not None: config[k] = v start_date_string = start_date.isoformat() if start_date else "" end_date_string = end_date.isoformat() if end_date else "" data = tiingo_client.get(start_date_string, end_date_string, ticker) start = data[0] start_date = parser.parse(start["date"]).date() end_date = parser.parse(data[-1]["date"]).date() x_scale = config["x_scale"] y_scale = config["y_scale"] track = Track( start_date, end_date, ticker, [ Point( x=idx + settings.STARTING_AREA_X * x_scale, y=day["close"] - start["close"] + settings.STARTING_AREA_Y * y_scale, date_recorded=day["date"].split('T')[0], price=day["close"], ) for idx, day in enumerate(data) ], config, ) return track_to_json(track)
def search_view(): """ Return object matching a search """ query = request.args.get('q') length = request.args.get('n', 20, type=int) if not query or len(query) <= 3: abort(400, "Must provide a query longer than 3 characters") if length > 100: abort(400, "Maximum 100 of results") albums = Album.search(query).limit(length) artists = Artist.search(query).limit(length) songs = Track.search(query).limit(length) return jsonify({ "albums": [album.serialize() for album in albums], "artists": [Artist(artist).serialize() for artist in artists], "songs": [song.serialize() for song in songs], })