def parse_for_image(url):
    soup = BeautifulSoup(tools.get_url(url)[2])
    link = soup.find_all(style=compile('cursor: pointer'))
    if len(link) == 0:
        link =  soup.find_all(src=compile('/manga/'))
    imgurl = sub(' ', '%20', link[0]['src'])
    imgext = imgurl[len(imgurl) - 3:len(imgurl)]
    return imgext, tools.get_url(imgurl)[2]
Esempio n. 2
0
	def create(self):
		self.tracks=[]
		url=tools.get_url("echonest","%sCreate"%self.tipo ,self.data)
		j=tools.getjson(url)
		logging.error(url)
		session=j["response"]["session_id"]
		self.session=session

		url=tools.get_url("echonest","getNext10" ,session)
		logging.error(url)
		j=tools.getjson(url)


		self.parseTracks(j)
def parse_for_page(url):
    page = []
    soup = BeautifulSoup(tools.get_url(url)[2])
    for item in soup.find_all(href=compile("/manga/.*/chapter/.*/")):
        page.append([tools.unifyChapter(findall('chapter/.*/(.*)',
                                        item['href'])), item['href']])
    return page
Esempio n. 4
0
def parse_for_page(url):
    page = []
    soup = BeautifulSoup(tools.get_url(url)[2])
    for item in soup.find_all(onclick=compile("changePage")):
        page.append([tools.unifyNumber(findall('\d+', item.text)[0]),
                    item['href']])
    return page
Esempio n. 5
0
def  get_similar(mbid=""):
    
    data=memcache.get("similar_%s"%mbid)
    
    if data is not None:
        return data

    
    logging.error("GETTING SIMILAR FROM ECHONEST")
    url=tools.get_url('echonest','similar',mbid)
    j=tools.get_json(url)
    
    if j is None:
        return []
    
    similar=[]
    try:
        a=j['response']['artists']
    except:
        return []
    for i in j['response']['artists']:
        name=i['name']
        if 'foreign_ids' in i.keys():
            s_mbid=i['foreign_ids'][0]['foreign_id'][19:]
        
            similar.append([name,s_mbid])
        
    logging.error("END OF ECHONEST CODE")
    memcache.set("similar_%s"%mbid,similar)   
    return similar
Esempio n. 6
0
def funds():
    fund = Fund.query.all()
    if (fund):
        data = []
        for u in fund:
            object_data = {
                'name': u.name,
                'description': u.description,
                'short_description': u.short_description,
                'budget': u.budget,
                'phone': u.phone,
                'email': u.email,
                'bank_book': u.bank_book,
                'cover': get_url('funds', u.cover),
                'id': u.id
            }
            data.append(object_data)
        response = {
            'info': 'success',
            'data': data
        }
    else:
        response = {
            'info': 'error'
        }
    return jsonify(response)
Esempio n. 7
0
def parse_for_chap(url):
    chap = []
    soup = BeautifulSoup(tools.get_url(url)[2])
    for item in soup.find_all(href=compile("/read/.*/fr/")):
        chap.append([tools.unifyChapter(findall('fr/[0-9]+/(.*)/',
                                        item['href'])), item['href']])
    return chap
Esempio n. 8
0
def events():
    city_name = request.args.get('city')
    city = City.query.filter(City.city == city_name).first()
    u = Event.query.filter(Event.city_id == city.id).all()
    if u:
        data = []
        for event in u:
            object_data = {
                'info': 'success',
                'title': event.title,
                'description': event.description,
                'date': event.date,
                'cover': get_url('events', event.cover),
                'longi': event.longi,
                'lati': event.lati,
                'city': event.city_id,
                'id': event.id,
                'category': event.category_id,
                'address': event.address
            }
            data.append(object_data)
        response = {
            'info': 'success',
            'data': data
        }
    else:
        response = {
            'info': 'error'
        }
    return jsonify(response)
Esempio n. 9
0
def patients():
    if request.method == 'GET':
        lim = request.args.get('limit')
        off = request.args.get('offset')
        fund_id = request.args.get('fund_id')
        if fund_id:
            results = Patient.query.filter(Patient.fund_id == fund_id).limit(lim).offset(off).all()
        else:
            results = Patient.query.limit(lim).offset(off).all()
        if results:
            data = []
            for u in results:
                object_data = {
                    'fund': u.funds.name,
                    'name': u.name,
                    'surname': u.surname,
                    'secname': u.secname,
                    'age': u.age,
                    'diagnosis': u.diagnosis,
                    'budget': u.budget,
                    'current_budget': u.current_budget,
                    'biography': u.biography,
                    'id': u.id,
                    'cover': get_url('patients', u.cover)
                }
                data.append(object_data)
            return jsonify({
                'info': 'success',
                'data': data
            })
        else:
            return jsonify({
                'info': 'error'
            })
Esempio n. 10
0
    def getSimilarsFromUrl(self):
        logging.error("getSimilars")
        mbid=self.key.id()
        similars=None
        #similar=memcache.get("similars of %s"%mbid)
        if similars is None:
            similars=[]
            url=tools.get_url('lastfm','similar',mbid)
            j=tools.get_json(url)
            if j is None:
                return []

            try:
                a=j['similarartists']['artist']
            except:
                return []
            for i in a:
                if i["mbid"]!="":
                    try:
      
                        cmbid=CorrectArtist.by_id(i["mbid"])
                        if cmbid is not None:
                            skey=ndb.Key("Artist",cmbid.mbid)
                        else:
                            skey=ndb.Key("Artist",i['mbid'])
                        similars.append(skey)
                    except:
                        pass
            self.similars=similars
            
            
            self.put()
Esempio n. 11
0
    def get_episode(self, episode_id):
        trycount = 0
        findepisode = False

        while not findepisode:
            if trycount > 0:
                print("再接続中" + str(trycount) + "......")
            if trycount > 1:
                print("error url: " + self.episode_detail_url.format(episode_id) + "\n")
                return False, "error url: " + self.episode_detail_url.format(episode_id) + "\n"

            webpage = tools.get_url(self.episode_detail_url.format(episode_id), self.headers)
            detail = json.loads(webpage.decode('utf-8'))
            episode = self.podcast.add_episode()
            episode.id = str('ximalaya_' + str(episode_id))
            episode.title = detail['title']
            # print(self.podcast.name + '=====' + episode.title)
            if 'intro' in detail:
                episode.summary = detail['intro'].replace('\r', '\\r').replace('\n', '\\n')
            episode.publication_date = tools.publication_time(detail['createdAt'])
            episode.media = Media(detail['playUrl32'], duration=timedelta(milliseconds=detail['duration']))
            # episode.media = Media.create_from_server_response(detail['playUrl32'],
            #                                                   duration=timedelta(seconds=detail['duration']))
            episode.position = 1
            findepisode = True

            if not findepisode:
                trycount += 1
                print("30秒後に再接続する.......")
                sleep(30)

        return True, detail['playUrl32'] + '\n'
Esempio n. 12
0
def parse_page(title, text, proxy=None):
    #    r = r_meta.search(text)
    #    if r:
    #        encoding=r.group(1)
    #        if encoding.lower() == 'gb2312':
    #            encoding = 'gb18030'
    #    else:
    #        encoding='gb18030'
    r = r_title.search(text)
    if r:
        title = r.group(1).strip().split('/')[1]
        title = tools.to_utf8(title)

    r = r_content.search(text)
    if r:
        url = r.group(1)
        text = tools.get_url(url, proxy).strip()
        b = "document.write('"
        e = "');"
        if text.startswith(b) and text.endswith(e):
            text = text[len(b):-1 * len(e)]
        text = tools.format_html_text(text)
    else:
        text = ''
    return title + '\r\n' * 2 + text
Esempio n. 13
0
	def getNext(self):
		self.tracks=[]
		playlist={"data":[]}
		url=tools.get_url("echonest","getNext",self.session)
		logging.error(url)
		j=tools.getjson(url)
		logging.error(url)
		logging.error(j)
		self.parseTracks(j)	
Esempio n. 14
0
    def createEchoTag(self):

        url = tools.get_url("echonest", "genre", self.param).replace(" ", "%20")

        j = tools.get_json(url)
        for d in j["response"]["songs"]:
            try:
                mbid = d["artist_foreign_ids"][0]["foreign_id"].split(":")[2]
            except:
                continue

            track = {}
            track["artist"] = {}
            tracKey = ndb.Key("Track", d["title"] + " - " + d["artist_name"])
            trac = tracKey.get()
            if trac is not None:

                track["ytid"] = trac.ytid
                track["img"] = "http://img.youtube.com/vi/" + trac.ytid + "/0.jpg"

            cmbid = CorrectArtist.by_id(mbid)
            if cmbid is not None:
                track["artist"]["mbid"] = cmbid.mbid
                artist = ndb.Key("Artist", cmbid.mbid).get()
            else:
                track["artist"]["mbid"] = mbid
                artist = ndb.Key("Artist", mbid).get()

            if artist is not None:

                if artist.name is not None:
                    track["artist"]["name"] = artist.name
                else:
                    track["artist"]["name"] = d["artist_name"]
                if artist.logo is not None:
                    track["artist"]["logo"] = images.get_serving_url(artist.logo)
                if artist.info is not None:
                    track["artist"]["info"] = artist.info
                if artist.tags is not None:
                    track["artist"]["tags"] = artist.tags
                if artist.similars is not None:
                    similars = []
                    for s in artist.similars:
                        similar = s.get()
                        similars.append({"name": similar.name, "logo": similar.image, "mbid": similar.key.id()})
                    track["artist"]["similars"] = similars

            else:
                track["artist"]["name"] = d["artist_name"]
                track["artist"]["similars"] = []
                track["artist"]["logo"] = ""
                track["artist"]["info"] = ""
                track["artist"]["tags"] = []
            track["name"] = d["title"]
            self.tracks.append(track)
Esempio n. 15
0
def get_artist_albums(artist_mbid):
    logging.error("START OF Artist_ALBUMS")

    url=tools.get_url("musicbrainz","artist_mbid",artist_mbid)
    xml=tools.get_xml(url)

    artist_name= xml.getElementsByTagName("name")[0].childNodes[0].nodeValue
    logo=image.get_image(artist_mbid," ",'artist')     
    background=image.get_image(artist_mbid,"artist_name",'bg')
    
    artist=Artists(artist_name=artist_name, artist_mbid=artist_mbid,disambiguation=" ",letter=artist_name[0], key=ndb.Key('Artists',artist_mbid))
    artist.logo=logo
    artist.background=background
    artist_key=artist.put()  

    logging.error(artist)

    releases=xml.getElementsByTagName("release-group")

    albums=[]
    for rg in releases:
        
        

        try:
            if rg.getElementsByTagName("primary-type")[0].childNodes[0].nodeValue != "Album":
                continue
            error=rg.getElementsByTagName("secondary-type")[0]
            continue
        except:
            try:
                if rg.attributes.get("type").value == "Album":

                    album_mbid=rg.attributes.get("id").value

                    b=Albums(key=ndb.Key(Albums,album_mbid,parent=ndb.Key(Artist,artist_mbid)))

                    b.album_mbid=album_mbid
                    b.album_name=rg.getElementsByTagName("title")[0].childNodes[0].nodeValue
                    b.album_date=rg.getElementsByTagName("first-release-date")[0].childNodes[0].nodeValue
                    b.album_image=image.get_image(album_mbid,b.album_name,key="album")
                    
                    albums.append(b)
                    logging.error(b)
            except:
                continue
    
    albums.sort(key=lambda tup: tup.album_date)

    test=ndb.put_multi(albums)

    logging.error("END OF Artist_ALBUMS")

    return artist,albums
Esempio n. 16
0
 def get(self):
     artists=memcache.get("lastfm topArtists")
     if artists is None:
         url=tools.get_url("lastfm","topArtists"," ")
         j=tools.get_json(url)
         artists=[]
         for a in j["artists"]["artist"]:
             artist={}
             artist["name"]=a["name"]
             artist["mbid"]=a["mbid"]
             artists.append(artist)
         memcache.set("lastfm topArtists",artists)
     self.response.out.write(json.dumps(artists))
Esempio n. 17
0
 def get(self):
     tags=memcache.get("lastfm topTags")
     if tags is None:
         url=tools.get_url("lastfm","topTags"," ")
         j=tools.get_json(url)
         tags=[]
         for t in j["tags"]["tag"]:
             if t["name"]!="seen live":
                 tag={}
                 tag["name"]=t["name"]
                 tags.append(tag)
         memcache.set("lastfm topTags",tags)
     self.response.out.write(json.dumps(tags))
Esempio n. 18
0
    def get_podcast(self):
        webpage = tools.get_url(self.album_info_url.format(self.album_id), self.headers)
        album_info = json.loads(webpage.decode('utf-8'))
        if album_info['ret'] == 200:
            album_info_data = album_info['data']

            self.podcast = Podcast()
            self.podcast.name = album_info_data['mainInfo']['albumTitle']
            self.podcast.website = self.album_url.format(self.album_id)
            if album_info_data['mainInfo']['richIntro']:
                self.podcast.description = album_info_data['mainInfo']['richIntro']
            self.podcast.language = 'cn'
            self.podcast.image = 'https:' + album_info_data['mainInfo']['cover'].split('!')[0]
            self.podcast.generator = 'kanemori.getpodcast'
            self.podcast.explicit = False
            self.podcast.withhold_from_itunes = True

            text = ''
            page_num = 1
            album_page_count = math.ceil(album_info_data['tracksInfo']['trackTotalCount'] / self.episode_pre_page) + 1
            while page_num <= album_page_count:
                webpage = tools.get_url(self.album_list_url.format(self.album_id, page_num, self.episode_pre_page),
                                        self.headers)
                album_list = json.loads(webpage.decode('utf-8'))
                for episode_info in album_list['data']['tracksAudioPlay']:
                    _, link = self.get_episode(episode_info['trackId'])
                    text += link

                page_num += 1

        path = './podcast/ximalaya'
        if not os.path.exists(path):
            os.makedirs(path)

        self.podcast.rss_file(os.path.join(path, '{}.xml'.format(self.album_id)), minimize=True)
        # tools.save_m4a(os.path.join(path, '{}.txt'.format(self.album_id)), text)
        print("「{}」が上手に焼きました".format(self.album_id))
Esempio n. 19
0
def get_data(artist,d=False, I=False):
    
    logging.error("getting data of %s"%artist)
    data=get_ndbArtist(artist)
    if data != []:
        logging.error("mbid from ndb or memcache get_data")
        return data
    
    url=tools.get_url('musicbrainz','artist',artist)
    xml=tools.get_xml(url)
    parsed=xml.getElementsByTagName("artist")
    
    disambiguation=" "
    mbid=[]

    if xml.getElementsByTagName("artist-list")[0].attributes.get("count").value == '1' :
        
        mbidId=parsed[0].attributes.get("id").value
        url="http://musicbrainz.org/ws/2/artist/"+mbidId+"?inc=releases"

        
        x=tools.get_xml(url)  
        name=xml.getElementsByTagName("name")[0].childNodes[0].nodeValue
        disambiguation=" "
        ar=Artist(artist=name, mbid=mbidId, disambiguation=disambiguation, letter=name[0])
        
        mbid.append(ar)
        memcache.set("select * from Artist where artist='%s'"%name,[ar])

    else:
        
        for a in parsed:
        
            mbidId=a.attributes.get("id").value
            name=a.getElementsByTagName("name")[0].childNodes[0].nodeValue

            try:
                disambiguation=a.getElementsByTagName("disambiguation")[0].childNodes[0].nodeValue
                if d==False:
                    continue
            except:
                disambiguation=" "
            
            ar=Artist(artist=name, mbid=mbidId, disambiguation=disambiguation, letter=name[0])
            mbid.append(ar)
            memcache.set("select * from Artist where artist='%s'"%name,[ar])
    logging.error(mbid)

    return mbid
Esempio n. 20
0
    def get(self):
        genres=memcache.get("lastfm genres")
        if genres is not None:
            self.render("last.html",genres=genres)
        else:
            url=tools.get_url("lastfm","toptags"," ")
            j=tools.get_json(url)

            genres=[]
            for i in j["tags"]["tag"]:

                genres.append((i["name"],i["url"][23:]))

            memcache.set("lastfm genres",genres)
            logging.error(genres)
            self.render("last.html",genres=genres)
Esempio n. 21
0
    def getLogo(self):
        if self.logo is None:
            mbid=self.key.id()
            logging.error("uploading photo")
            url=tools.get_url('fanart','artist',mbid)
            logging.error(url)
            j=tools.get_json(url) 
            #logging.error(j)
            if j is None:
                return None

            try:
                logo=j['hdmusiclogo'][0]['url'].replace('fanart/','preview/')
                #logging.error(logo)
            except:
                try:
                    logo=j['musiclogo'][0]['url'].replace('fanart/','preview/')
                    #logging.error(logo)
                except:
                    return None

            data=urlfetch.fetch(logo).content

            logging.error("creating base64")


            content_type, body = BlobstoreUpload.encode_multipart_formdata(
              [], [('file', mbid, data)])

            logging.error("createing uploaddir")
            response = urlfetch.fetch(
              url=blobstore.create_upload_url('/uploadblob'),
              payload=body,
              method=urlfetch.POST,
              headers={'Content-Type': content_type},
              deadline=30
            )
            logging.error("response.content")
            logging.error(response.content)
            blob_key = blobstore.BlobKey(response.content)
            logging.error("blob_key")
            self.logo=blob_key
            logging.error("getLogo")
            logging.error(self)
            self.put()
        return self.logo
Esempio n. 22
0
    def post(self):
    
        j=self.request.body
        track=json.loads(j)
        id=memcache.get("7digital of %s"%track)
        if id is None:
            url=tools.get_url("7digital", "buytrack", track)
            logging.error(url)
            xml=tools.get_xml(url)

            tracka=xml.getElementsByTagName("track")

            id=tracka[0].attributes.get("id").value

            memcache.set("7digital of %s"%track,id)

        self.response.out.write(json.dumps(id))
Esempio n. 23
0
def search_artist(artist_name):


    logging.error("getting data of %s"%artist_name)
    data = None
    #data=memcache.get("search %s"%artist_name)
    if data is not None:
        logging.error("mbid from ndb or memcache get_data")
        return data

    url=tools.get_url('musicbrainz','artist',artist_name)
    logging.error(url)
    xml=tools.get_xml(url)
    parsed=xml.getElementsByTagName("artist")

    disambiguation=" "
    artists=[]

    if xml.getElementsByTagName("artist-list")[0].attributes.get("count").value == '1' :
        artist={}
        artist["mbid"]=parsed[0].attributes.get("id").value
        artist["name"]=parsed[0].getElementsByTagName("name")[0].childNodes[0].nodeValue
        artists.append(artist)
    else:

        for a in parsed:
            artist={}

            artist["mbid"]=a.attributes.get("id").value
            artist["name"]=a.getElementsByTagName("name")[0].childNodes[0].nodeValue

            try:
                artist["country"]=a.getElementsByTagName("area")[0].getElementsByTagName("name")[0].childNodes[0].nodeValue
            except:
                artist["country"]=""

            try:
                disambiguation=a.getElementsByTagName("disambiguation")[0].childNodes[0].nodeValue
            except:
                disambiguation=" "

            artists.append(artist)

    memcache.set("search %s"%artist_name, artists)

    return artists
Esempio n. 24
0
    def getData(self):
        logging.error("getData")

        mbid=self.key.id()
        url=tools.get_url("lastfm","artistInfo",mbid)
        j=tools.get_json(url)
   
        self.info=strip_tags(j["artist"]["bio"]["content"])
        self.name=j["artist"]["name"]
        self.image=j["artist"]["image"][4]["#text"]

        tags=[]
        for t in j["artist"]["tags"]["tag"]:
            tag=t["name"]
            tags.append(tag)
        self.tags=tags

        self.put()
Esempio n. 25
0
def getTopTracks():
    url = tools.get_url("lastfm", "toptracks", " ")
    logging.error(url)
    toptracks = tools.get_json(url)

    return toptracks
Esempio n. 26
0
def get_lastfmGenre_playlist(genre):
    url = tools.get_url("lastfm", "toptagtracks", genre)
    j = tools.get_json(url)
    return j
Esempio n. 27
0
def getTagTracks(genre):
    url = tools.get_url("lastfm", "toptagtracks", genre)
    logging.error(url)
    tracks = tools.get_json(url)

    return tracks
Esempio n. 28
0
def getArtistTracks(genre):
    url = tools.get_url("lastfm", "artisttoptracks", genre)
    logging.error(url)
    tracks = tools.get_json(url)

    return tracks
Esempio n. 29
0
def getEchoTagTracks(genre):
    playlist = {"data": []}
    url = tools.get_url("echonest", "genre", genre).replace(" ", "%20")
    logging.error(url)
    j = tools.get_json(url)
    return j["response"]["songs"]
Esempio n. 30
0
def get_playlist_from_url(playlist_name):

    playlist = {"data": []}

    if "frontpage" in playlist_name:
        logging.error("front")
        url = tools.get_url("lastfm", "toptracks", " ")
        logging.error(url)
        j = tools.get_json(url)

        for i in j["tracks"]["track"]:
            if len(playlist["data"]) < 11:
                mbid = i["mbid"]
                track_name = i["name"]
                artist_name = i["artist"]["name"]
                ancestor_key = Class.Artists().query(Class.Artists.artist_name == artist_name).get(keys_only=True)
                if ancestor_key is not None:
                    tracks = Class.Tracks().query(Class.Tracks.track_name == track_name, ancestor=ancestor_key).get()
                else:
                    tracks = None
                if tracks is None:
                    track_video = track.get_video(artist_name, track_name)
                else:
                    track_video = tracks.track_video

                video = {
                    "video_artist": artist_name,
                    "video_track": track_name,
                    "playlist_videos": track_video,
                    "mbid": mbid,
                }

                playlist["data"].append(video)

                p = Class.Playlists(
                    playlist_name=playlist_name,
                    playlist_json=j,
                    playlist_videos=playlist,
                    key=ndb.Key(Class.Playlists, playlist_name),
                )
                # p.put()
        memcache.set(playlist_name, playlist)
        return playlist

    logging.error("radio")
    if "radio" in playlist_name:
        if "artist" in playlist_name:
            params = playlist_name.split()
            mbid = params[1]
            url = tools.get_url("echonest", "playlist", mbid)
        else:
            genre = playlist_name[0 : playlist_name.find("radio")]
            url = tools.get_url("echonest", "genre", genre).replace(" ", "%20")

        logging.error(url)

        j = tools.get_json(url)

        for i in j["response"]["songs"]:
            track_name = i["title"]
            artist_name = i["artist_name"]
            ancestor_key = Class.Artists().query(Class.Artists.artist_name == artist_name).get(keys_only=True)
            if ancestor_key is not None:
                tracks = Class.Tracks().query(Class.Tracks.track_name == track_name, ancestor=ancestor_key).get()
            else:
                tracks = None
            if tracks is None:
                track_video = track.get_video(artist_name, track_name)
            else:
                track_video = tracks.track_video

            video = {"video_artist": artist_name, "video_track": track_name, "playlist_videos": track_video}

            playlist["data"].append(video)

        p = Class.Playlists(
            playlist_name=playlist_name,
            playlist_json=j,
            playlist_videos=playlist,
            key=ndb.Key(Class.Playlists, playlist_name),
        )
        p.put()
        memcache.set(playlist_name, playlist)
        logging.error(playlist)
        return playlist
Esempio n. 31
0
    def create(self):
        time_start = time.time()

        self.tracks = []
        filter = "toptracks"

        url = ""
        logging.error(url)
        if self.tipo == "tag":
            if {"name": self.param} in echonest_genres["response"]["genres"]:
                echo = GenrePL(self.param)
                echo.create()
                self.tracks = echo.tracks
                self.session = echo.session
                # self.createEchoTag()

                actual = time.time() - time_start
                logging.error("After create echo playlist= %s" % actual)
                return

            url = tools.get_url("lastfm", "genreCreate", self.param)
        elif self.tipo == "artist":
            echo = ArtistPL(self.param)
            echo.create()
            self.tracks = echo.tracks
            self.session = echo.session

            actual = time.time() - time_start
            logging.error("After create echo playlist= %s" % actual)
            return
            # url=tools.get_url("lastfm","artistCreate",self.param)
        elif self.tipo == "artist-radio":
            echo = ArtistR(self.param)
            echo.create()
            self.tracks = echo.tracks
            self.session = echo.session

            actual = time.time() - time_start
            logging.error("After create echo playlist= %s" % actual)
            return
            # url=tools.get_url("lastfm","artistCreate",self.param)
        else:
            url = tools.get_url("lastfm", "toptracks", " ")
            filter = "tracks"
        logging.error(url)
        page = urllib2.urlopen(url)
        p = page.read()
        j = json.loads(p)
        actual = time.time() - time_start
        logging.error("after json: %s" % actual)
        for d in j[filter]["track"]:

            track = {}
            track["artist"] = {}
            tracKey = ndb.Key("Track", d["name"] + " - " + d["artist"]["name"])
            trac = tracKey.get()
            if trac is not None:

                track["ytid"] = trac.ytid
                track["img"] = "http://img.youtube.com/vi/" + trac.ytid + "/0.jpg"

            mbid = d["artist"]["mbid"]

            if mbid == "":
                continue
            logging.error(mbid)
            cmbid = CorrectArtist.by_id(mbid)
            if cmbid is not None:
                track["artist"]["mbid"] = cmbid.mbid
                artist = ndb.Key("Artist", cmbid.mbid).get()
            else:
                track["artist"]["mbid"] = mbid
                artist = ndb.Key("Artist", mbid).get()

            if artist is not None:
                actual = time.time() - time_start
                logging.error("before similars: %s" % actual)

                if artist.name is not None:
                    track["artist"]["name"] = artist.name
                else:
                    track["artist"]["name"] = d["artist"]["name"]
                if artist.logo is not None:
                    track["artist"]["logo"] = images.get_serving_url(artist.logo)
                if artist.info is not None:
                    track["artist"]["info"] = artist.info
                if artist.tags is not None:
                    track["artist"]["tags"] = artist.tags
                if artist.similars is not None:
                    similars = []
                    s = ndb.get_multi(artist.similars)
                    for similar in s:
                        similars.append({"name": similar.name, "logo": similar.image, "mbid": similar.key.id()})

                    track["artist"]["similars"] = similars

                actual = time.time() - time_start
                logging.error("after getting similars: %s" % actual)
            else:
                track["artist"]["name"] = d["artist"]["name"]
                track["artist"]["similars"] = []
                track["artist"]["logo"] = ""
                track["artist"]["info"] = ""
                track["artist"]["tags"] = []
            track["name"] = d["name"]

            self.tracks.append(track)
Esempio n. 32
0
def parse_for_image(url):
    soup = BeautifulSoup(tools.get_url(url)[2])
    link = soup.find_all(src=compile('/comics/'))
    imgurl = sub(' ', '%20', link[0]['src'])
    imgext = imgurl[len(imgurl) - 3:len(imgurl)]
    return imgext, tools.get_url(imgurl)[2]
Esempio n. 33
0
    def get(self):
        tipo=self.request.get("tipo")
        if tipo=="album" or tipo == "artist":
            mbid=self.request.get("mbid")

            logging.error(self.request.arguments())

            d={'album':['number','asc'],'artist':['hottness','desc']}

            query="select video from Tracks where %s_mbid='%s' order by %s %s"%(tipo,mbid,d[tipo][0],d[tipo][1])
            data=list(ndb.gql(query))
            f=data.pop(0)
            first=f.video

            playlist=""
            videos=[]
            for d in data:
                if d.video not in videos:
                    playlist=playlist+d.video+","
                    videos.append(d.video)


            
        
        elif tipo=="lastfm":
            modo=self.request.get("modo")

            
            videos=[]
            playlist=""
            
            tracks="tracks"
            if modo=="hypped":
                url=tools.get_url("lastfm","hyppedtracks", " ")
            elif modo=="top":
                url=tools.get_url("lastfm","toptracks"," ")
            elif modo=="loved":
                url=tools.get_url("lastfm","lovedtracks"," ")
            elif modo=="tag":
                """first=memcache.get("lastfm first %s")
                    if first is not None:
                        playlist=memcache.get("lastfm playlist %s")
                        if playlist is not None:
                            url="http://www.youtube.com/embed/"+first+"?playlist="+playlist
                            self.render_playlist(url=url)"""
                genre=self.request.get("genre")
                url=tools.get_url("lastfm","toptagtracks",genre)
                tracks="toptracks"


            j=tools.get_json(url)
            
            for i in j[tracks]['track']:
                track_mbid=i['mbid']
                if track_mbid!="":
                    song=i['name']
                    artist=i['artist']['name']
                    artist_mbid=i['artist']['mbid']
                    t=ndb.gql("select video from Tracks where track_mbid='%s'"%track_mbid)
                
                    if t.get() is None:
                        video=track.get_video(artist,song)
                        if artist_mbid!="":
                            taskqueue.add(url='/artist', params={'mbid':artist_mbid})
                    else:
                        video=t.get().video
                    videos.append(video)

            if len(videos)>=1:
                first=videos.pop(0)
                for i in videos:
                    playlist=playlist+i+","
                        



        url="http://www.youtube.com/embed/"+first+"?playlist="+playlist
        self.render_playlist(url=url)