def index(status = None): add = Watch(request.form) if request.method == 'GET' and status: return redirect('/') if request.method == 'POST' and add.validate(): anime = Anime(add.name.data, add.subber.data, add.format.data, add.quality.data, add.email.data) anime.save() flash( ''' You are now watching [{0}] {1}. To unwatch this click <a href="/unsubscribe/{2}">here</a>. '''.format(add.subber.data, add.name.data, anime.h) ) return redirect('/success') return render_template('index.html', title = 'Watch My Anime', form = add, status = status)
def add_anime(): anime_data = request.get_json(force=True, silent=True) if anime_data == None: return "Bad request", 400 anime = Anime(anime_data["title"], anime_data["image"], anime_data["studio"], anime_data["episodes"], anime_data["seasons"], None) anime.save() return json.dumps(anime.to_dict()), 201
def remove_anime(title): anime_data = request.get_json(force=True, silent=True) if anime_data == None: return "Bad request", 400 anime = Anime.find_by_title([title]) Anime.delete([title]) return ""
def test_least_favorite_with_nodata(self): """ お気に入りアニメリストに存在しないアニメを削除しようとしたときに例外が送出されるかテスト """ anime =Anime() with self.assertRaises(Exception) as e: anime.least_favorite('ワンピース') self.assertEqual('指定したアニメはお気に入りリストに存在しません', e.exception.args[0])
def test_single_call_favorite(self): """ favoriteメソッドを1回だけコールして正常に登録できたかテストする """ anime_title = '日常' anime = Anime() anime.favorite(anime_title) self.assertEqual(anime_title, anime.favorite_animes[0])
def test_favorite_send_duplication_anime(self): """ お気に入りアニメのタイトルを重複して登録されないかテストする """ anime_title = '日常' anime = Anime() anime.favorite(anime_title) anime.favorite(anime_title) self.assertEqual(len(anime.favorite_animes), 1) self.assertEqual(anime_title, anime.favorite_animes[0])
def __init__(self, client_id: str, client_secret: str): Authenticator.__init__(self, { 'client_id': client_id, 'client_secret': client_secret }) Session.__init__(self) self.anime, self.manga = Anime(), Manga()
def get_list_of_animes(self): """Returns a list of Anime extracted from the content of the animelist""" animes = list() for data in self.anime_data: print(data['anime_title'].encode('utf-8')) animes.append(Anime.from_dict(data)) return animes
async def anime_list(session, username): global jikan if jikan is None: jikan = AioJikan(session=session) d = await jikan.user(username, "animelist", "completed") return [ Anime(x["title"], int(x["start_date"].split("-")[0])) for x in d["anime"] ]
class Updater: def __init__(self): self.load_config() self.anime = Anime(self.cache_dir) self.downloader = Subscribe(self.list_path, self.cache_dir, self.aria2_url, self.aria2_dir) def load_config(self): self.main_dir = os.path.abspath(os.path.dirname(__file__)) try: with open(os.path.join(self.main_dir,"config.json"), 'r', encoding='utf8') as f: config = json.load(f) self.log_dir = path_format(config["log_dir"]) if not os.path.isdir(self.log_dir): raise Exception("log dir not exist!") # aria2 parameter self.aria2_dir = path_format(config["download_dir"]) self.aria2_url = config["aria2"] # two log files self.error_log = self.log_dir + "error.log" self.update_log = self.log_dir + "update.log" logs._init(self.error_log, self.update_log) # list file path self.list_path = self.log_dir + "mylist.json" if not os.path.isfile(self.list_path): raise Exception("list.json not exist!") # cache dir path: ./log/cache/ self.cache_dir = self.log_dir + "cache/" if not os.path.isdir(self.cache_dir): os.makedirs(self.cache_dir) except Exception as e: print("[error] init: {}".format(e)) os._exit(1) def update(self): self.anime.update() def download(self): self.downloader.download()
def unsubscribe(h): if h: watch = Anime.getUnique(h) if watch.count() == 1: watch = watch.first() watch.delete() flash('You are no longer watching [{0}] {1}.'.format(watch.subber, watch.anime)) return redirect('/')
def index(status=None): add = Watch(request.form) if request.method == 'GET' and status: return redirect('/') if request.method == 'POST' and add.validate(): anime = Anime(add.name.data, add.subber.data, add.format.data, add.quality.data, add.email.data) anime.save() flash(''' You are now watching [{0}] {1}. To unwatch this click <a href="/unsubscribe/{2}">here</a>. '''.format(add.subber.data, add.name.data, anime.h)) return redirect('/success') return render_template('index.html', title='Watch My Anime', form=add, status=status)
def unsubscribe(h): if h: watch = Anime.getUnique(h) if watch.count() == 1: watch = watch.first() watch.delete() flash('You are no longer watching [{0}] {1}.'.format( watch.subber, watch.anime)) return redirect('/')
def api_fetch(email): if email: watching = Anime.fetchUpdates(email) watchList = [] for watch in watching: watch.last_update = watch.watched watch.save() watchList.append(watch.toJSON()) return dumps(watchList) return redirect('/')
def getAllRank(self): html = urllib.request.urlopen(self.ann_rank_url) soupRank = BeautifulSoup(html) animeList = [] for tr in soupRank.findAll('tr', {"bgcolor": "#EEEEEE"}): (rank, name, rating, _) = map(lambda w: w.contents[0], tr.findAll('td')) soupName = BeautifulSoup(str(name)) name = name.text url = (soupName.find('a', href=True)['href']) animeList.append(Anime(rank, name, rating, self.ann_base_url + url)) return animeList
def get_anime(self, library: str) -> List[Anime]: """ Loads all the shows and seasons in a library into a list of anime objects. :param library: The Plex library to look through. :return: A list of Anime objects representing the shows in the targeted library. """ anime = [] for show in self.get_shows(library): tvdb_id = show.guid.rsplit('/')[-1].split('?')[0] for season in [x for x in show.seasons() if x.title.lower() != 'specials']: watched_episodes = len([x for x in season.episodes() if x.isWatched]) anime.append(Anime(show.title, tvdb_id, str(season.seasonNumber), watched_episodes)) return anime
def parse(animeElement: Element): anime = Anime( animeId=AnimeXMLParser.parseId(animeElement), title=AnimeXMLParser.parseTitle(animeElement), mediaType=AnimeXMLParser.parseMediaType(animeElement), genres=AnimeXMLParser.parseGenres(animeElement), themes=AnimeXMLParser.parseThemes(animeElement), vintage=AnimeXMLParser.parseVintage(animeElement), summary=AnimeXMLParser.parseSummary(animeElement), cast=AnimeXMLParser.parseCast(animeElement), staff=AnimeXMLParser.parseStaff(animeElement), companies=AnimeXMLParser.parseCompany(animeElement), averageRating=AnimeXMLParser.parseAverageRating(animeElement), totalVotes=AnimeXMLParser.parseTotalVotes(animeElement) ) return anime
def update_anime(title): anime_data = request.get_json(force=True, silent=True) if anime_data == None: return "Bad request", 400 anime = Anime.find_by_title([title]) if "title" in anime_data: anime.title = anime_data["title"] if "studio" in anime_data: anime.studio = anime_data["studio"] if "episodes" in anime_data: anime.episodes = anime_data["episodes"] if "seasons" in anime_data: anime.seasons = anime_data["seasons"] return json.dumps(anime.save().to_dict())
def getRecommendations(userId): url = 'https://stateless.pythonanywhere.com/animewebapi/recommendations?userId=' + str( userId) response = session.get(url) response.raise_for_status anime = [] animejson = response.json()['anime'] for a in animejson: anime.append( Anime(a['title'], a['japaneseTitles']['romaji'], a['japaneseTitles']['native'], a['description'], a['score'], a['links']['anilist'], a['links']['mal'], a['image'])) return anime
async def anime_list(session, username): async with session.post("https://graphql.anilist.co", json={ "query": q, "variables": { "userName": username } }) as resp: d = await resp.json() o = [] for l in d["data"]["MediaListCollection"]["lists"]: if l["name"] == "Completed": for entry in l["entries"]: o.append( Anime(entry["media"]["title"]["romaji"], entry["media"]["startDate"]["year"], entry["media"]["title"]["english"])) return o
def getAnime(animeId): url = 'https://stateless.pythonanywhere.com/anilist/anime?id=' + str( animeId) response = session.get(url, timeout=4) response.raise_for_status animejson = response.json()['anime'] if animejson: anime = Anime(animejson['title'], animejson['japaneseTitles']['romaji'], animejson['japaneseTitles']['native'], animejson['description'], animejson['score'], animejson['links']['anilist'], animejson['links']['mal'], animejson['image'], animeId) return anime return None
def fetchAnisongs(self): user_animes = list() for data in self.user_animelist.anime_data: self.import_dialog.anisong_loading_widget.infoReceived.emit( str(data['anime_title'])) user_animes.append(Anime.from_dict(data)) self.import_dialog.anisong_loading_widget.progressed.emit() # User clicked on Cancel while loading anisongs if not self.import_dialog.anisong_loading_widget.isVisible(): break anisongs = list() for user_anime in user_animes: anisongs += user_anime.songs self.model.insertRows(anisongs) self.import_dialog.anisong_loading_widget.close() self.import_dialog.close()
def main(args): r_list = [[12, 11, 5, 3], [17, 7, 5, 3]] anim = False n_tests = 50 # liczba testów opt_cnt = 0 # licznik światów, w których została znaleziona trasa najbardziej optymalna fail_cnt = 0 # licznik światów, w których nie udało się znaleźć trasy mean_exc = 0 # średnia bezwzględna nadmiarowość trasy random_seed = 1 for i in range(n_tests): w = Wourld(r_list, random_seed) w.combine2Rooms() w.calcTargets() agent = Agent(w.agent_start_point, w.targets) if not agent.backtracking_algorithm(w): print(i, w.shortest_route, "NIE UDAŁO SIĘ ZNALEŹĆ ŚCIEŻKI") fail_cnt = fail_cnt + 1 else: if not agent.backtracking_algorithm(w): print(i, w.shortest_route, "NIE UDAŁO SIĘ ZNALEŹĆ ŚCIEŻKI") fail_cnt = fail_cnt + 1 else: track = agent.track[0] + agent.track[1] len_track = len(track) # iteration, Shortest track,robot track if len_track == w.shortest_route: opt_cnt = opt_cnt + 1 print(i, w.shortest_route, len(track)) mean_exc = mean_exc + len_track - w.shortest_route if anim: anime = Anime(w, agent, 'm') input() random_seed = random_seed + 1 mean_exc = mean_exc / n_tests print("- liczba testów: ", n_tests) print("- liczba tras najbardziej optymalnych: ", opt_cnt) print("- liczba tras nieznalezionych: ", fail_cnt) print("- średni współczynnik bezwzględnej nadmiarowości tras: ", mean_exc) return 0
if not any(character in x for x in json[anime]['characters']): json[anime]['characters'].append( {character: { 'quotes': [], 'image': img }}) for i in json[anime]['characters']: character_json = i if character in i: # print(type(tags.split(','))) q = Quote(html_quote, tags.split(','), 0, 0) i[character]['quotes'].append(q) ANIME = [] for anime_name in json: # print(json[anime_name]['image']) anime = Anime(anime_name, json[anime_name]['image'], [], 0) for char in json[anime_name]['characters']: for character_name in char: quotes = char[character_name]['quotes'] image = char[character_name]['image'] character = Character(character_name, image, quotes, 0) anime.set_characters(character) ANIME.append(anime) with open('db.pickle', 'wb') as db: pickle.dump(ANIME, db)
def view_anime(title): return render_template("anime.html", anime=Anime.find_by_title([title]))
def list_anime(): result = {"result": []} for anime in Anime.all(): result["result"].append(anime.to_dict()) return json.dumps(result)
# Regular expression verified using http://www.regexr.com/ # on every episode of anime I have downloaded. regex = r'\[(.+)\][ _]+(.+?)[ _]*-?[ _]*(\d+)[ _]*((\[|\().*((480|720|1080|BD)p?).*(\]|\)))?([ _]*\[.+][ _]*)?\.(\w+)' details = re.search(regex, name) try: if details and len(details.groups()) == 10: subber = details.group(1) title = details.group(2) episode = str(int(details.group(3))) quality = details.group(7) format = details.group(10).upper() match = Anime.getAllMatches(subber, title, episode, quality, format) for watch in match: message = Message( 'New Episode of {0} Available'.format(watch.anime), sender='A Letter Bee <*****@*****.**>', ) message.add_recipient(watch.email) message.html = ''' Episode {0} of {1} is now available. Click <a href="{2}">here</a> to download it now. <br /> <br /> <small>To stop receiving notifications click <a href="http://watchmyani.me/unsubscribe/{3}">here</a>.</small> '''.format(episode, title, anime['links'][0]['href'], watch.h)
from animeScraper import AnimeHTMLParser from anime import Anime from event import Event configFile = open("db_config") for line in configFile.readlines(): elements = line.rstrip("\n").split(":") if (elements[0]=="ip"): ip = elements[1] elif (elements[0]=="user"): user = elements[1] elif (elements[0]=="db"): db = elements[1] elif (elements[0]=="pwd"): pwd = elements[1] connexion = pymysql.connect(host=ip, user=user, db=db, passwd=pwd) baseUrl = 'http://www.animenewsnetwork.com/encyclopedia/anime.php?id=' parser = AnimeHTMLParser() eventParser = AnimeEventHTMLParser() begin = 14723 end = 14724 for i in range(begin,end): print(str(i-begin+1)) anime = Anime(parser, eventParser) anime.feed(baseUrl + str(i)) anime.insert(connexion)
# Regular expression verified using http://www.regexr.com/ # on every episode of anime I have downloaded. regex = r'\[(.+)\][ _]+(.+?)[ _]*-?[ _]*(\d+)[ _]*((\[|\().*((480|720|1080|BD)p?).*(\]|\)))?([ _]*\[.+][ _]*)?\.(\w+)' details = re.search(regex, name) try: if details and len(details.groups()) == 10: subber = details.group(1) title = details.group(2) episode = str(int(details.group(3))) quality = details.group(7) format = details.group(10).upper() match = Anime.getAllMatches(subber, title, episode, quality, format) for watch in match: message = Message( 'New Episode of {0} Available'.format(watch.anime), sender = 'A Letter Bee <*****@*****.**>', ) message.add_recipient(watch.email) message.html = ''' Episode {0} of {1} is now available. Click <a href="{2}">here</a> to download it now. <br /> <br /> <small>To stop receiving notifications click <a href="http://watchmyani.me/unsubscribe/{3}">here</a>.</small> '''.format(episode, title, anime['links'][0]['href'], watch.h)
def parse_anime(soup, write_image=False) -> Anime: ''' Данная функция производит парсинг страницы аниме с сайта world-art; на вход принимается объекта типа BeautifulSoup, установленный на корень страницы, на выходе выдаётся заполненный объект Anime; ''' # object to fill anime = Anime( name='', fields={ 'engname': { 'hint': 'Название', 'tr': null }, 'genre': { 'hint': 'Жанр', 'tr': genre_parse }, 'target': { 'hint': 'Целевая аудитория', 'tr': null }, 'type': { 'hint': 'Тип', 'tr': type_parse }, 'base': { 'hint': 'Основа', 'tr': null }, 'season': { 'hint': 'Сезон', 'tr': season_parse }, 'director': { 'hint': 'Режиссёр', 'tr': null }, 'score': { 'hint': 'Средний балл', 'tr': score_parse }, 'voted': { 'hint': 'Проголосовало', 'tr': voted_parse }, 'rating': { 'hint': 'Место в рейтинге', 'tr': rating_parse }, }, tags={ # 'tagname' : { # 'desc' : 'description...', # 'score' : float # } }, annotation=None, coms=[]) # get name namesoup = soup.find(lambda tag: tag.name == 'font' and tag.has_attr( 'size') and tag['size'] == '5') if namesoup is None: return None anime.name = namesoup.text # fields table = (soup.body.center.find_all('table')[6].tr.td.table.tr.contents[4]. find_all('table')[1].find_all('tr')[1].contents[2]) for f in anime.fields: tag = table.find(lambda tag: re.match(anime.fields[f][ 'hint'], tag.text) is not None and tag.b is not None and tag.b.text .startswith(anime.fields[f]['hint'])) if tag is None: anime.fields[f] = None else: anime.fields[f] = anime.fields[f]['tr']( tag.find_all('td')[2].text.strip()) if anime.fields['season'] is None: tag = table.find(lambda tag: re.match('Выпуск', tag.text) is not None) if tag is not None: anime.fields['season'] = date_parse( tag.find_all('td')[2].text.strip()) else: tag = table.find( lambda tag: re.match('Премьера', tag.text) is not None) if tag is not None: anime.fields['season'] = date_parse( tag.find_all('td')[2].text.strip()) # tags for tag in soup.select('.newtag'): anime.tags[tag.a.text] = { 'desc': tag.a['title'], 'score': float(tag.font.text) } # annotation try: anime.annotation = (soup.center.find_all( 'table')[6].tr.td.table.tr.contents[4].contents[14].tr.td.p) if (anime.annotation.text.strip( ) == "Для этого аниме есть описание (1), но вы можете написать ещё одно." ): anime.annotation = None else: anime.annotation = anime.annotation.prettify() except: pass # comments try: anime.coms = [None, None, None] bestcoms = soup.find( lambda tag: tag.text.strip() == 'Лучшие отзывы на это аниме') t = nsib(bestcoms, 5) anime.coms[0] = t.p.prettify() t = nsib(t, 6) anime.coms[1] = t.p.prettify() t = nsib(t, 6) anime.coms[2] = t.p.prettify() except: pass if not write_image or anime.fields['rating'] in [1233, 3016, 3334]: return anime rt = str(anime.fields['rating']) fname = '0' * (4 - len(rt)) + rt + '. ' + anime.name fname = re.sub(r'/', '|', fname) src = soup.find(lambda tag: tag.name == 'img' and tag.has_attr('alt') and tag['alt'].startswith('постер аниме'))['src'] content = None while content is None: content = req.get(src).content if content.startswith('<html>'.encode()): content = None time.sleep(0.1) with open('images/' + fname + re.search(r'(\.\w+)$', src).group(1), 'wb') as file: file.write(content) return anime
def __init__(self): self.load_config() self.anime = Anime(self.cache_dir) self.downloader = Subscribe(self.list_path, self.cache_dir, self.aria2_url, self.aria2_dir)