def _get_video_files(cls, directory): """Get all the valid movie/tv show files from the search directory. Arguments: directory (str): The directory to search in. Returns: (tuple): Two lists one for movie files another tv show files. """ movies = [] tv_shows = [] for dirpath, _, filenames in os.walk(directory): for filename in filenames: if os.path.splitext(filename)[-1] not in [ '.avi', '.mp4', '.mkv', '.srt' ]: continue if guessit(filename)['type'] == 'movie': movies.append(Movie(os.path.join(dirpath, filename))) else: tv_shows.append(Episode(os.path.join(dirpath, filename))) print('{0} TV Shows Found / {1} Movies Found.'.format( len(tv_shows), len(movies))) return (movies, sorted(tv_shows, key=lambda file_name: file_name.get_sortable_info()))
def test_get_title(self): file_names = TEST_DATA['compile_truth'] for t1 in ['Brian', '']: for t2 in ['Roger', '', 'Brian']: for t3 in ['John', '', 'Brian', 'Roger']: for eo in [SINGLE, DOUBLE, TRIPLE]: for a in [True, False]: for se in [[2, 1], [20, 10], [2, 100]]: if se[1] == 100 and not a: continue s = ''.join([t1[0] if t1 else '_', t2[0] if t2 else '_', t3[0] if t3 else '_', 'A' if a else '_', eo[0], str(len(str(se[0])) + 3), str(len(str(se[1])))]) # print(s) file_name = file_names[s] location = os.path.join(ANIME_DIR if a else SERIES_DIR, 'Freddie', 'Season 01', file_name) e = Episode(location=location) title_truth = '' title_split = file_name.split(' - ') if len(title_split) > 1: title_split = title_split[1].split('.') if len(title_split) > 1: title_truth = title_split[0] self.assertEqual(e.get_title(), title_truth)
def get_episode(number, new_rating): html_contents = requests.get(base_url + episodes).text soup = BeautifulSoup(html_contents, 'html.parser') ep_table = soup.find('table').find_all('tr')[1:] for entry in ep_table: columns = entry.find_all('td') numFound = scrub_string(columns[0].text) avg_score = scrub_string(columns[2].text) if numFound == str(number): if not numFound.isdigit() and not avg_score.replace('.', '', 1).isdigit(): return None a_tag = entry.find_all('td')[1].find('a') title = scrub_string(a_tag.text).replace( "Rockaroundtheclockdoughberfest: ", "") title = title.replace("Tropical Freeze: ", "") print("Fetching Episode " + numFound + " - " + title) href = scrub_string(a_tag.attrs['href']) date = scrub_string(columns[3].text) fork_ratings = get_fork_ratings(base_url + href) global current_scores current_scores = fork_ratings image = scrub_string(get_image(base_url + href)) synopsis = scrub_string(get_synopsis(base_url + href)) duration = get_duration(numFound.strip()) current_ep = Episode(title, numFound, date, duration, fork_ratings, image, new_rating, synopsis) print(json.dumps(current_ep.__dict__, indent=4)) return current_ep
def __init__(self, data): self.Xi = [1.0, 1.0] self.Yi = [[1.0, 1.0], [1.0, 1.0]] self.Xr = [1.0, 1.0] self.Yr = [[1.0, 1.0], [1.0, 1.0], [1.0, 1.0], [1.0, 1.0]] self.trial_number = 1 # If data contains a csv path, reads the data from it if isinstance(data, str) and data[-3:] == "csv" and os.path.isfile(data): with open(data, 'rb') as csv_file: reader = csv.reader(csv_file, delimiter=',') episode_list = [] for row in reader: # Split data and time datalist = row[:-1] time = row[-1] # Transforms a list of strings in a list of ints int_datalist = map(int, datalist) episode_list.append(Episode(int_datalist, int(time))) self.episode_dataset = episode_list # If data contains a list, it's pure data elif isinstance(data, list): self.episode_dataset = data else: print "[ERROR]. DatasetParser. Invalid data input: " + str(data) quit(-1)
def load_episode_by_rss_id(self, rss_episode_id=None): sql = "SELECT * FROM " + self.table_episodes + " AS e WHERE rss_episode_id = ? LIMIT 1" cur = self.db.cursor() cur.row_factory = sqlite3.Row cur.execute(sql, [rss_episode_id]) d = cur.fetchone() e = Episode(episode_id=d['id'], rss_episode_id=d["rss_episode_id"], duration=d["duration"], title=d["title"], description=d["description"], subtitle=d["subtitle"], link=d["link"], published=d["published"], image=d["image"], chapters=[]) sql = "SELECT * FROM " + self.table_chapters + " WHERE episode_id = ?" cur.row_factory = sqlite3.Row cur.execute(sql, [d["id"]]) d = cur.fetchall() chapters = [] for c in d: chapters.append( Chapter(c["start"], c["title"], chapter_id=c["id"], episode_id=c["episode_id"], image=c["image"], href=c["href"])) e.chapters = chapters return e
def label_set(): """ Lables the the set via user input. """ for title, description in zip(all_episodes[0::3], all_episodes[1::3]): ep = Episode(title, description) ep_tokenized = ep.tokenize() for tokenized, name in ep_tokenized: text = title + "\n" + description last_pos = 0 name_len = len(name) for it in re.finditer(name, text): pos = it.start() print(text[last_pos:pos], end="") print(colored(name.title(), 'green'), end="") last_pos = pos + name_len print(text[last_pos:]) print("Is " + colored(name.title(), 'green') + " a topic (t) or a guest (g)?") i = input() with open("data/labeled_test.txt", "a") as f: if i == "t": f.write(tokenized) f.write("\n") f.write("T\n\n") elif i == "g": f.write(tokenized) f.write("\n") f.write("G\n\n") print()
def check_for_empty_season(show, s): if not s.episodes: return _generate_error(message='Empty Season', e=Episode(location=s.location, s_nr=s.s_nr, e_nr=1), show=show)
def getContent(self, category='all', subCategory=None): assert category in self.categories.keys(), \ "category must be one of the following: %s" % ", ".join(self.categories.keys()) if subCategory: subCategory = str(subCategory) subCategories = self.getSubCategories(category) assert (subCategory == None and subCategories == None) or (subCategory in subCategories.keys()), \ "subCategory must be one of the following: %s. use method getSubCategories() to get key/title" \ " pairs." % ", ".join(subCategories.keys()) if subCategory == None: container = self.server.query("/library/sections/%d/%s" % (self.key, category)) else: container = self.server.query("/library/sections/%d/%s/%s" % (self.key, category, subCategory)) content = [] for e in container: if not 'type' in e.attrib: continue type_ = e.attrib['type'] if type_ == 'movie': # append movie obj = Movie(e, self.server) if type_ == 'show': # append show obj = Show(e, self.server) if type_ == 'episode': obj = Episode(e, self.server) content.append(obj) return content
def just_tell(self, clipart, *args, **kwargs): assert hasattr(self, 'tell'), "Model is not a teller" if isinstance(self, nn.Module): self.eval() episode = Episode([codraw_data.SelectClipart(clipart)]) self.tell(episode, *args, **kwargs) return episode.get_last(codraw_data.TellGroup).msg
def get_page_episode_list(webtoon, page=1): webtoon_base_url = 'http://comic.naver.com/webtoon/list.nhn' response = requests.get(webtoon_base_url, params={ 'titleId': webtoon.title_id, 'page': page }) soup = BeautifulSoup(response.text, 'html.parser') webtoon_page_episodes = soup.select('tr')[1:] webtoon_page_episode_list = [ Episode( webtoon=webtoon, no=parse_qs( urlparse( item.select_one('td a').attrs.get('href')).query)['no'][0], url_thumbnail=item.select_one('td a img').attrs.get('src'), title=item.select_one('td.title a').text, rating=item.select_one('div.rating_type strong').text, created_date=item.select_one('td.num').text) for item in webtoon_page_episodes if not item.attrs.get('class') ] return webtoon_page_episode_list
def match(p1, p2, games, gname): # game on log.info('playing <%s> against <%s>...', p1.q.fname, 'itself' if p1==p2 else p2.q.fname) wins_left = wins_right = draws = played = 0 for game in range(games): left_starts = random.choice([True, False]) state,_,rounds = Episode(AlgoPlay(), p1, p2).run(State.create(gname, left_starts)) log.info('game %d: 1st=%s, rounds=%3d, winner=%s, score=%d/%d', game, 'left ' if left_starts else 'right', rounds, 'left ' if state.player_wins() else 'right' if state.opponent_wins() else 'draw', state.player_score(), state.opponent_score() ) played += 1 if state.player_wins(): wins_left += 1 elif state.opponent_wins(): wins_right += 1 else: draws += 1 if not running: break log.info('stats: left %d%% / right %d%% / draw %d%%', wins_left*100/played, wins_right*100/played, draws*100/played ) return 1 if wins_left > wins_right else 0 if wins_left == wins_right else -1
def do_POST(self): event_type = self.headers.get('X-Github-Event') if event_type != 'push': return length = int(self.headers.get('Content-Length')) http_body = self.rfile.read(length).decode('utf-8') data = json.loads(http_body) ref = data.get('ref') if ref != 'refs/heads/source': return # todo: pull repo & branch to source & build & push to master repo_addr = data.get("repository")['ssh_url'] print('repo', repo_addr) repo = GitRepo(repo_address=repo_addr, dst=WORK_DIR) repo.clone() os.chdir(WORK_DIR) repo.checkout_or_create("source") Episode().deploy() os.chdir("..") shutil.rmtree(WORK_DIR) self.send_response(200) self.send_header("Content-type", "text/html") self.end_headers() # self.wfile.write(bytes("Hello World", "utf-8")) return
def getNextUnwatchedEpisode(self): """ returns the episode that follows the last watched episode in the show over all seasons. if all are watched, return None. """ key = '/'.join(self.key.split('/')[:-1]) + '/allLeaves' element = self.server.query(key) prev = None for e in reversed(element): if ('viewCount' in e.attrib) and (e.attrib['viewCount'] == '1'): if prev == None: return None else: return Episode(prev, self.server) prev = e return Episode(element[0], self.server)
def create_full_episodic_bn(bn_list, time): dataset = [] for bn in bn_list: episode_list = bn.get_episode_dataset() for episode in episode_list: dataset.append(Episode(episode.raw_data, time)) episodic_bn = BeliefNetwork("Episodic", dataset) return episodic_bn
def episodes(self): if not self.episodes_: element = self.server.query(self.key) self.episodes_ = [ Episode(e, self.server) for e in element if ('type' in e.attrib) and (e.attrib['type'] == 'episode') ] return self.episodes_
def getAllEpisodes(self): """ returns a list of all episodes of the show independent of seasons. """ key = '/'.join(self.key.split('/')[:-1]) + '/allLeaves' element = self.server.query(key) episodes = [ Episode(e, self.server) for e in element if ('type' in e.attrib) and (e.attrib['type'] == 'episode') ] return episodes
def train(num_episodes=1000, save_every=100, checkpoint_dir="checkpoints", tensorboard_dir="tensorboard", tboard_every=10, find_target_prop=0): pol = Policy() writer = tf.contrib.summary.create_file_writer(tensorboard_dir) for j in range(1, num_episodes + 1): random_secret = random.randint(0, config.max_guesses - 1) e = Episode(pol, random_secret, find_target_prop, True) history = e.generate() print("Episode:{}, length: {}".format(j, len(history))) G = -1 optimizer = \ tf.train.GradientDescentOptimizer( learning_rate=config.reinforce_alpha*G) for i in reversed(range(1, len(history))): history_so_far = history[:i] next_action, _ = history[i] with tfe.GradientTape() as tape: action_logits = pol(history_so_far, with_softmax=False) loss = tf.nn.softmax_cross_entropy_with_logits_v2( labels=tf.one_hot(tf.convert_to_tensor([next_action]), config.max_guesses), logits=action_logits) grads = tape.gradient(loss, pol.variables) optimizer.apply_gradients(zip(grads, pol.variables)) G -= 1 optimizer._learning_rate = G * config.reinforce_alpha optimizer._learning_rate_tensor = None # hack. Should be able to pass a callable as learning_rate, see # https://www.tensorflow.org/api_docs/python/tf/train/GradientDescentOptimizer#args # can I perhaps submit a PR to fix this bug? sys.stdout.write("{}/{}\r".format(len(history) - i, len(history))) if j % save_every == 0 or j == num_episodes: saver = tfe.Saver(pol.named_variables) save_path = os.path.join( checkpoint_dir, "episode{}".format(str(j).zfill(len(str(num_episodes))))) saver.save(save_path) if j % tboard_every == 0: with writer.as_default(): with tf.contrib.summary.always_record_summaries(): tf.contrib.summary.scalar('total_return', tf.convert_to_tensor([G]), step=j) return pol
def test1(): env = Environment(RandomPlayer('O')) agent = RLPlayer('X') episode = Episode(agent, env) board = p.empty_board() agent, final_board = episode.execute(board) return agent, final_board
def train(self): loss = np.array([0., 0., 0.]) loss += self._train( np.concatenate([ep.map_input[:ep.current_step] for ep in self.recorder]), np.concatenate([ep.reward[:ep.current_step] for ep in self.recorder]), np.concatenate([ep.policy_mask[:ep.current_step] for ep in self.recorder]), np.concatenate([ep.policy_one_hot[:ep.current_step] for ep in self.recorder]) ) self.recorder = [Episode()] # Clear recorder after training return loss
def getMediaObject(self): media = False if self.type == "episode": media = Episode(self.tag, self.server) elif self.type == "movie": media = Movie(self.tag, self.server) elif self.type == "show": media = Show(self.tag, self.server) elif self.type == "season": media = Season(self.tag, self.server) return media
async def get_episodes(): sheet = service.spreadsheets() episodes_result = sheet.values().get(spreadsheetId=ReccID, range='Episodes!A2:F1000').execute() episodes_result = episodes_result.get('values', []) episodes = {} for row in episodes_result: try: if str(row[0]) != "": name = str(row[0]) website = None spotify = None itunes = None youtube = None direct = None try: if str(row[1]) != "": website = str(row[1]) except: pass try: if str(row[2]) != "": spotify = str(row[2]) except: pass try: if str(row[3]) != "": itunes = str(row[3]) except: pass try: if str(row[4]) != "": youtube = str(row[4]) except: pass try: if str(row[5]) != "": direct = str(row[5]) except: pass ep = Episode(name, website, spotify, itunes, youtube, direct) episodes[name.lower()] = ep else: break except: continue return episodes
def just_draw(self, msg, scene=[], *args, **kwargs): assert hasattr(self, 'draw'), "Model is not a drawer" episode = Episode([codraw_data.TellGroup(msg), codraw_data.ObserveCanvas(scene)]) if isinstance(self, nn.Module): self.eval() self.draw(episode, *args, **kwargs) event_multi = episode.get_last(codraw_data.DrawGroup) if event_multi is not None: return codraw_data.AbstractScene(event_multi.cliparts) event_single = episode.get_last(codraw_data.DrawClipart) return event_single.clipart
def get_webtoon_episode_list(webtoon, page=1): """ 특정 page의 episode 리스트를 리턴하도록 리팩토링 :param webtoon: 웹툰 고유 ID :param page: 가져오려는 Episode list 페이지 :return: list(Episode) """ # webtoon_url을 기반으로 특정 웹툰의 리스트페이지 내용을 가져와 soup객체에 할당 webtoon_list_url = 'http://comic.naver.com/webtoon/list.nhn' params = { 'titleId': webtoon.title_id, 'page': page, } response = requests.get(webtoon_list_url, params=params) soup = BeautifulSoup(response.text, 'lxml') # 필요한 데이터들 (img_url, title, rating, created_date)추출 episode_list = list() webtoon_table = soup.select_one('table.viewList') tr_list = webtoon_table.find_all('tr', recursive=False) for tr in tr_list: td_list = tr.find_all('td') if len(td_list) < 4: continue td_thumbnail = td_list[0] td_title = td_list[1] td_rating = td_list[2] td_created_date = td_list[3] # Episode고유의 no url_episode = td_thumbnail.a.get('href') parse_result = urlparse(url_episode) queryset = parse_qs(parse_result.query) no = queryset['no'][0] # td_thumbnail에 해당하는 Tag의 첫 번째 a tag의 첫 번째 img태그의 'src'속성값 url_thumbnail = td_thumbnail.a.img.get('src') # td_title tag의 내용을 좌우여백 잘라냄 title = td_title.get_text(strip=True) # td_rating내의 strong태그내의 내용을 좌우여백 잘라냄 rating = td_rating.strong.get_text(strip=True) # td_title과 같음 created_date = td_created_date.get_text(strip=True) # Episode형 namedtuple객체 생성, episode_list에 추가 episode = Episode(webtoon=webtoon, no=no, url_thumbnail=url_thumbnail, title=title, rating=rating, created_date=created_date) episode_list.append(episode) return episode_list
def demonstration(self, informant_number): # Gets face samples for future recognition if not self.simulation: self.robot.animation_service.runTag("show") self.robot.acquire_examples(self.face_frames_captured, informant_number) self.robot.say( "We are starting a brief demonstration. I am going to ask you to tell me where " "the sticker is. We are going to undertake " + str(self.demo_number) + (" trial" if self.demo_number == 1 else " trials")) if not self.simulation: self.robot.animation_service.runTag("explain") time.sleep(2) demo_result = [] for i in range(self.demo_number): # demo_sample = [Xr, Yr, Xi, Yi] demo_sample = [0, 0, 0, 0] if self.simulation: self.relocate_sticker() if i == self.demo_number - 1: self.robot.say("Now for the last time.") self.robot.say( "Can you suggest me the location of the sticker? Left or right?" ) hint = self.robot.listen_for_side(self.informant_vocabulary) found = self.robot.look_for_landmark(hint) if self.mature: # Mature ToM if (hint == 'A' and found) or (hint == 'B' and not found): demo_sample[0] = 1 demo_sample[1] = 1 demo_sample[2] = 1 if hint == 'A': demo_sample[3] = 1 else: # Immature ToM if hint == 'A': demo_sample = [1, 1, 1, 1] else: demo_sample = [0, 0, 0, 0] demo_sample_episode = Episode(demo_sample, self.robot.get_and_inc_time()) demo_result.append(demo_sample_episode) # Give experimenters the time to switch the sticker location if not self.simulation and i < self.demo_number - 1: time.sleep(5) self.robot.say("Excellent, now I know you a little more. Thank you") # Creates the belief network for this informer self.robot.beliefs.append( BeliefNetwork("Informer" + str(informant_number), demo_result))
def __init__(self, real, simulator): self.Real = real self.Simulator = simulator self.Episode = Episode() if ExperimentParams.AutoExploration: if SearchParams.UseRave: SearchParams.ExplorationConstant = 0 else: SearchParams.ExplorationConstant = self.Simulator.GetRewardRange( ) self.Results = Results() MCTS.InitFastUCB(SearchParams.ExplorationConstant)
def copy_batch(self): """ Make a copy of the current batch :return: the copied batch """ b2 = Batch() for i in range(self.size): ep = Episode() sep = self.episodes[i] for j in range(self.episodes[i].len): ep.add(sep.state_pool[j], sep.action_pool[j], sep.reward_pool[j], sep.done_pool[j], sep.next_state_pool[j]) b2.add_episode(ep) return b2
def get_webtoon_episode_list(title_id, page=1, min_no=1): payload = {'titleId': title_id, 'page': page} response = requests.get('http://comic.naver.com/webtoon/list.nhn', params=payload) source = response.text soup = BeautifulSoup(source, 'lxml') # 필요한 데이터들 (thumbnail_url, title, rating, created_date)추출 episode_list = list() webtoon_table = soup.select_one('table.viewList') # class = 'viewList'인 '첫번째' <table> 태그 안의 html 반환 tr_list = webtoon_table.find_all('tr', recursive=False) # 그 안에서 모든 <tr> 태그의 html 반환. recursive=False 설정은 자식 태그만 반환 for i, tr in enumerate(tr_list): td_list = tr.find_all('td') if len(td_list) < 4: # 배너광고 차단 continue td_thumbnail = td_list[0] thumbnail_url = td_thumbnail.a.img.get('src') # td_thumbnail에 해당하는 Tag의 첫 번째 a tag의 첫 번째 img태그의 'src'속성값 title_url = td_thumbnail.a.get('href') # 웹툰 url # url 쪼개기 (6개 항목의 namedtuple 반환. # 그러나 그 외의 다른 속성도 참조할 수 있음. 예를 들어 port, geturl(url전체 반환)) parsed_url = urlparse(title_url) queryset = parse_qs(parsed_url.query) # query 쪼개기 (string:[string]구조의 딕셔너리로 반환) no = queryset['no'] title_id = queryset['titleId'] if int(no[0]) < min_no: break td_title = td_list[1] title = td_title.get_text(strip=True) # td_title tag의 내용을 좌우여백 잘라냄 td_rating = td_list[2] rating = td_rating.strong.get_text(strip=True) # td_rating내의 strong태그내의 내용을 좌우여백 잘라냄 td_created_date = td_list[3] created_date = td_created_date.get_text(strip=True) # td_title과 같음 episode = Episode( # Episode형 namedtuple객체 생성, episode_list에 추가 no=no[0], title_id=title_id[0], thumbnail_url=thumbnail_url, title=title, rating=rating, created_date=created_date ) episode_list.append(episode) # for episode in episode_list: # print('\n', episode) return episode_list
def sync_queue(queue=None): if not queue: queue = QUEUE for file in queue: if file.delete: if recursive_delete(os.sep.join(file.old_location.split(os.sep)[:3 + MAC_OFFSET])): file.report['info'].append('Delete successful') else: file.report['error'].append('Delete failed') continue if file.override: delete_file(file) try: e = SHOWS[file.series_name].get_episode_by_sxe(file.s_nr, file.e_nr) if e: del SHOWS[file.series_name].seasons[file.s_nr].episodes[file.e_nr] SHOWS[file.series_name].seasons[file.s_nr].episode_numbers.remove(file.e_nr) except KeyError: pass except ValueError: pass if file.type_option == 'Series' and file_exists(file, SHOWS): file.report['error'].append('File exists') continue try: shutil.move(file.old_location, file.location) except Exception as e: print('rename', e) file.report['error'].append('Copy failed') return if wait_on_creation(file.location): file.report['success'].append('Copy successful') else: file.report['error'].append('Copy failed') if file.type_option == 'Series' and file.extension not in SUBS: show = SHOWS[file.series_name] if not show.status == file.status: file.report['info'].append('Status changed to ' + file.status) show.status = file.status e = Episode(file.location) e.update_file_meta() if show.add_episode(e): file.report['info'].append('Season created') loc = os.sep.join(file.old_location.split(os.sep)[:3 + MAC_OFFSET]) if os.path.isdir(loc): if loc not in CLEAN_UP: CLEAN_UP.append(loc)
def __init__(self, showDir, seasonNum, weight, partOneEp=-1): self.seasonNum = seasonNum self.weight = weight self.seasonDir = getSeasonDirFromNum(self.seasonNum) self.seasonPath = os.path.join(showDir, self.seasonDir) self.episodes = {} self.simpleWeights = False episodeNames = os.listdir(self.seasonPath) for episode in episodeNames: if episode != 'Thumbs.db': seasonEpisodeStr = self.seasonDir + ', ' + episode seasonEpisodePair = episodeStr2IntPair(seasonEpisodeStr) epNum = seasonEpisodePair[1] self.episodes[epNum] = Episode(episode, epNum == partOneEp, epNum == (partOneEp + 1))
def get_webtoon_episode_list(webtoon, page=1): webtoon_list_url = 'http://comic.naver.com/webtoon/list.nhn' params = { 'titleId': webtoon.title_id, 'page': page, } response = requests.get(webtoon_list_url, params=params) soup = BeautifulSoup(response.text, 'lxml') episode_list = list() webtoon_table = soup.select_one('table.viewList') tr_list = webtoon_table.find_all('tr', recursive=False) for tr in tr_list: td_list = tr.find_all('td') if len(td_list) < 4: continue td_thumbnail = td_list[0] td_title = td_list[1] td_rating = td_list[2] td_created_date = td_list[3] url_episode = td_thumbnail.a.get('href') parse_result = urlparse(url_episode) pqueryset = parse_qs(parse_result.query) no = pqueryset['no'][0] # 나머지 쿼리값들 중에서(키:벨류 타입으로 되어있다) # no 키에 해당하는 그룹으로 된 벨류의0 번째 항목(넘버번호) {no:1923} url_thumbnail = td_thumbnail.a.img.get('src') title = td_title.get_text(strip=True) rating = td_rating.strong.get_text(strip=True) created_date = td_created_date.get_text(strip=True) # webtoon_name = soup.select_one('div.comicinfo').div.a.img['alt'] episode = Episode( webtoon=webtoon, no=no, url_thumbnail=url_thumbnail, title=title, rating=rating, created_date=created_date, # webtoon_name=webtoon_name ) episode_list.append(episode) return episode_list