def test_get_title(self): file_names = TEST_DATA['compile_truth'] for t1 in ['Brian', '']: for t2 in ['Roger', '', 'Brian']: for t3 in ['John', '', 'Brian', 'Roger']: for eo in [SINGLE, DOUBLE, TRIPLE]: for a in [True, False]: for se in [[2, 1], [20, 10], [2, 100]]: if se[1] == 100 and not a: continue s = ''.join([t1[0] if t1 else '_', t2[0] if t2 else '_', t3[0] if t3 else '_', 'A' if a else '_', eo[0], str(len(str(se[0])) + 3), str(len(str(se[1])))]) # print(s) file_name = file_names[s] location = os.path.join(ANIME_DIR if a else SERIES_DIR, 'Freddie', 'Season 01', file_name) e = Episode(location=location) title_truth = '' title_split = file_name.split(' - ') if len(title_split) > 1: title_split = title_split[1].split('.') if len(title_split) > 1: title_truth = title_split[0] self.assertEqual(e.get_title(), title_truth)
def just_tell(self, clipart, *args, **kwargs): assert hasattr(self, 'tell'), "Model is not a teller" if isinstance(self, nn.Module): self.eval() episode = Episode([codraw_data.SelectClipart(clipart)]) self.tell(episode, *args, **kwargs) return episode.get_last(codraw_data.TellGroup).msg
def get_episode(self, url): domain = get_tld(url) scrap_info = self.scrap_info[domain]['episode'] episode = Episode(scrap_info) episode.scrap(url) return episode.get()
def load_episode_by_rss_id(self, rss_episode_id=None): sql = "SELECT * FROM " + self.table_episodes + " AS e WHERE rss_episode_id = ? LIMIT 1" cur = self.db.cursor() cur.row_factory = sqlite3.Row cur.execute(sql, [rss_episode_id]) d = cur.fetchone() e = Episode(episode_id=d['id'], rss_episode_id=d["rss_episode_id"], duration=d["duration"], title=d["title"], description=d["description"], subtitle=d["subtitle"], link=d["link"], published=d["published"], image=d["image"], chapters=[]) sql = "SELECT * FROM " + self.table_chapters + " WHERE episode_id = ?" cur.row_factory = sqlite3.Row cur.execute(sql, [d["id"]]) d = cur.fetchall() chapters = [] for c in d: chapters.append( Chapter(c["start"], c["title"], chapter_id=c["id"], episode_id=c["episode_id"], image=c["image"], href=c["href"])) e.chapters = chapters return e
def label_set(): """ Lables the the set via user input. """ for title, description in zip(all_episodes[0::3], all_episodes[1::3]): ep = Episode(title, description) ep_tokenized = ep.tokenize() for tokenized, name in ep_tokenized: text = title + "\n" + description last_pos = 0 name_len = len(name) for it in re.finditer(name, text): pos = it.start() print(text[last_pos:pos], end="") print(colored(name.title(), 'green'), end="") last_pos = pos + name_len print(text[last_pos:]) print("Is " + colored(name.title(), 'green') + " a topic (t) or a guest (g)?") i = input() with open("data/labeled_test.txt", "a") as f: if i == "t": f.write(tokenized) f.write("\n") f.write("T\n\n") elif i == "g": f.write(tokenized) f.write("\n") f.write("G\n\n") print()
def train(num_episodes=1000, save_every=100, checkpoint_dir="checkpoints", tensorboard_dir="tensorboard", tboard_every=10, find_target_prop=0): pol = Policy() writer = tf.contrib.summary.create_file_writer(tensorboard_dir) for j in range(1, num_episodes + 1): random_secret = random.randint(0, config.max_guesses - 1) e = Episode(pol, random_secret, find_target_prop, True) history = e.generate() print("Episode:{}, length: {}".format(j, len(history))) G = -1 optimizer = \ tf.train.GradientDescentOptimizer( learning_rate=config.reinforce_alpha*G) for i in reversed(range(1, len(history))): history_so_far = history[:i] next_action, _ = history[i] with tfe.GradientTape() as tape: action_logits = pol(history_so_far, with_softmax=False) loss = tf.nn.softmax_cross_entropy_with_logits_v2( labels=tf.one_hot(tf.convert_to_tensor([next_action]), config.max_guesses), logits=action_logits) grads = tape.gradient(loss, pol.variables) optimizer.apply_gradients(zip(grads, pol.variables)) G -= 1 optimizer._learning_rate = G * config.reinforce_alpha optimizer._learning_rate_tensor = None # hack. Should be able to pass a callable as learning_rate, see # https://www.tensorflow.org/api_docs/python/tf/train/GradientDescentOptimizer#args # can I perhaps submit a PR to fix this bug? sys.stdout.write("{}/{}\r".format(len(history) - i, len(history))) if j % save_every == 0 or j == num_episodes: saver = tfe.Saver(pol.named_variables) save_path = os.path.join( checkpoint_dir, "episode{}".format(str(j).zfill(len(str(num_episodes))))) saver.save(save_path) if j % tboard_every == 0: with writer.as_default(): with tf.contrib.summary.always_record_summaries(): tf.contrib.summary.scalar('total_return', tf.convert_to_tensor([G]), step=j) return pol
def _prepare_synthetic_episodes(self): self.episodes = [] count = 0 for t in self.episode_titles: e = Episode(self.sub) e.guid = count e.title = t e.url = 'http://www.example.com/foo/bar/baz.mp3' count = count + 1 self.episodes.append(e)
def test1(): env = Environment(RandomPlayer('O')) agent = RLPlayer('X') episode = Episode(agent, env) board = p.empty_board() agent, final_board = episode.execute(board) return agent, final_board
def just_draw(self, msg, scene=[], *args, **kwargs): assert hasattr(self, 'draw'), "Model is not a drawer" episode = Episode([codraw_data.TellGroup(msg), codraw_data.ObserveCanvas(scene)]) if isinstance(self, nn.Module): self.eval() self.draw(episode, *args, **kwargs) event_multi = episode.get_last(codraw_data.DrawGroup) if event_multi is not None: return codraw_data.AbstractScene(event_multi.cliparts) event_single = episode.get_last(codraw_data.DrawClipart) return event_single.clipart
def _OnQueryMember(root_episode, sharer_member): assert Member.OWNED in sharer_member.labels, sharer_member logging.info('migrating share from user "%s" to user "%s" in episode "%s" in viewpoint "%s"' % \ (sharer_member.user_id, member.user_id, root_episode.episode_id, root_episode.viewpoint_id)) if Version._mutate_items: Episode._MigrateShare(client, root_episode, sharer_member=sharer_member, recipient_member=member, add_photo_ids=None, callback=partial(callback, member)) else: callback(member)
def match(p1, p2, games, gname): # game on log.info('playing <%s> against <%s>...', p1.q.fname, 'itself' if p1==p2 else p2.q.fname) wins_left = wins_right = draws = played = 0 for game in range(games): left_starts = random.choice([True, False]) state,_,rounds = Episode(AlgoPlay(), p1, p2).run(State.create(gname, left_starts)) log.info('game %d: 1st=%s, rounds=%3d, winner=%s, score=%d/%d', game, 'left ' if left_starts else 'right', rounds, 'left ' if state.player_wins() else 'right' if state.opponent_wins() else 'draw', state.player_score(), state.opponent_score() ) played += 1 if state.player_wins(): wins_left += 1 elif state.opponent_wins(): wins_right += 1 else: draws += 1 if not running: break log.info('stats: left %d%% / right %d%% / draw %d%%', wins_left*100/played, wins_right*100/played, draws*100/played ) return 1 if wins_left > wins_right else 0 if wins_left == wins_right else -1
def __init__(self, real, simulator): self.Real = real self.Simulator = simulator self.Episode = Episode() if ExperimentParams.AutoExploration: if SearchParams.UseRave: SearchParams.ExplorationConstant = 0 else: SearchParams.ExplorationConstant = self.Simulator.GetRewardRange( ) self.Results = Results() MCTS.InitFastUCB(SearchParams.ExplorationConstant)
def copy_batch(self): """ Make a copy of the current batch :return: the copied batch """ b2 = Batch() for i in range(self.size): ep = Episode() sep = self.episodes[i] for j in range(self.episodes[i].len): ep.add(sep.state_pool[j], sep.action_pool[j], sep.reward_pool[j], sep.done_pool[j], sep.next_state_pool[j]) b2.add_episode(ep) return b2
def sync_queue(queue=None): if not queue: queue = QUEUE for file in queue: if file.delete: if recursive_delete(os.sep.join(file.old_location.split(os.sep)[:3 + MAC_OFFSET])): file.report['info'].append('Delete successful') else: file.report['error'].append('Delete failed') continue if file.override: delete_file(file) try: e = SHOWS[file.series_name].get_episode_by_sxe(file.s_nr, file.e_nr) if e: del SHOWS[file.series_name].seasons[file.s_nr].episodes[file.e_nr] SHOWS[file.series_name].seasons[file.s_nr].episode_numbers.remove(file.e_nr) except KeyError: pass except ValueError: pass if file.type_option == 'Series' and file_exists(file, SHOWS): file.report['error'].append('File exists') continue try: shutil.move(file.old_location, file.location) except Exception as e: print('rename', e) file.report['error'].append('Copy failed') return if wait_on_creation(file.location): file.report['success'].append('Copy successful') else: file.report['error'].append('Copy failed') if file.type_option == 'Series' and file.extension not in SUBS: show = SHOWS[file.series_name] if not show.status == file.status: file.report['info'].append('Status changed to ' + file.status) show.status = file.status e = Episode(file.location) e.update_file_meta() if show.add_episode(e): file.report['info'].append('Season created') loc = os.sep.join(file.old_location.split(os.sep)[:3 + MAC_OFFSET]) if os.path.isdir(loc): if loc not in CLEAN_UP: CLEAN_UP.append(loc)
def Transform(self, client, member, callback): from episode import Episode from member import Member def _OnQueryMember(root_episode, sharer_member): assert Member.OWNED in sharer_member.labels, sharer_member logging.info('migrating share from user "%s" to user "%s" in episode "%s" in viewpoint "%s"' % \ (sharer_member.user_id, member.user_id, root_episode.episode_id, root_episode.viewpoint_id)) if Version._mutate_items: Episode._MigrateShare(client, root_episode, sharer_member=sharer_member, recipient_member=member, add_photo_ids=None, callback=partial(callback, member)) else: callback(member) def _OnQueryEpisode(root_episode): Member.Query(client, root_episode.user_id, root_episode.episode_id, None, partial(_OnQueryMember, root_episode)) if member.sharing_user_id is None and Member.OWNED not in member.labels: assert list(member.labels) == [Member.SHARED], member Episode.Query(client, member.episode_id, None, _OnQueryEpisode) else: callback(member)
def getNextUnwatchedEpisode(self): """ returns the episode that follows the last watched episode in the show over all seasons. if all are watched, return None. """ key = '/'.join(self.key.split('/')[:-1]) + '/allLeaves' element = self.server.query(key) prev = None for e in reversed(element): if ('viewCount' in e.attrib) and (e.attrib['viewCount'] == '1'): if prev == None: return None else: return Episode(prev, self.server) prev = e return Episode(element[0], self.server)
def __init__(self, data): self.Xi = [1.0, 1.0] self.Yi = [[1.0, 1.0], [1.0, 1.0]] self.Xr = [1.0, 1.0] self.Yr = [[1.0, 1.0], [1.0, 1.0], [1.0, 1.0], [1.0, 1.0]] self.trial_number = 1 # If data contains a csv path, reads the data from it if isinstance(data, str) and data[-3:] == "csv" and os.path.isfile(data): with open(data, 'rb') as csv_file: reader = csv.reader(csv_file, delimiter=',') episode_list = [] for row in reader: # Split data and time datalist = row[:-1] time = row[-1] # Transforms a list of strings in a list of ints int_datalist = map(int, datalist) episode_list.append(Episode(int_datalist, int(time))) self.episode_dataset = episode_list # If data contains a list, it's pure data elif isinstance(data, list): self.episode_dataset = data else: print "[ERROR]. DatasetParser. Invalid data input: " + str(data) quit(-1)
def get_last_episodes(tvshow_id, hd=False): """ Returns the list with most recent episodes for the given tvshow available in show RSS website. The list returned does not specify any specific order. :param tvshow_id: id of the tv show to get the episodes from. :param hd: if true it will be returned the HD version of the episodes. :raises RSSFormatError: if the rssfeed returned by the show rss is incorrect """ try: try: # get to the channel element root = ElementTree.fromstring(read_rssfeed(tvshow_id)) channel = root.find("channel") except ElementTree.ParseError as parse_error: raise RSSFormatError(parse_error.msg) episodes = [ Episode.from_string(item.find("title").text, item.find("link").text) for item in channel.findall("item") ] # TODO include HD episodes # remove HD episodes episodes = [episode for episode in episodes if not episode.hd] except AttributeError: # one of the necessary elements was not provided raise RSSFormatError() return episodes
def _get_video_files(cls, directory): """Get all the valid movie/tv show files from the search directory. Arguments: directory (str): The directory to search in. Returns: (tuple): Two lists one for movie files another tv show files. """ movies = [] tv_shows = [] for dirpath, _, filenames in os.walk(directory): for filename in filenames: if os.path.splitext(filename)[-1] not in [ '.avi', '.mp4', '.mkv', '.srt' ]: continue if guessit(filename)['type'] == 'movie': movies.append(Movie(os.path.join(dirpath, filename))) else: tv_shows.append(Episode(os.path.join(dirpath, filename))) print('{0} TV Shows Found / {1} Movies Found.'.format( len(tv_shows), len(movies))) return (movies, sorted(tv_shows, key=lambda file_name: file_name.get_sortable_info()))
def do_POST(self): event_type = self.headers.get('X-Github-Event') if event_type != 'push': return length = int(self.headers.get('Content-Length')) http_body = self.rfile.read(length).decode('utf-8') data = json.loads(http_body) ref = data.get('ref') if ref != 'refs/heads/source': return # todo: pull repo & branch to source & build & push to master repo_addr = data.get("repository")['ssh_url'] print('repo', repo_addr) repo = GitRepo(repo_address=repo_addr, dst=WORK_DIR) repo.clone() os.chdir(WORK_DIR) repo.checkout_or_create("source") Episode().deploy() os.chdir("..") shutil.rmtree(WORK_DIR) self.send_response(200) self.send_header("Content-type", "text/html") self.end_headers() # self.wfile.write(bytes("Hello World", "utf-8")) return
def _process(self, url): log.info(f'Processing {url}') episode = Episode.cached(url) if not episode: return [] tmp_audio_path = utils.convert(file_path=episode.audio_path, extension='.flac') subtitles = Subtitles.from_srt(episode.captions_path) results = [] start = None accum_text = [] for line, next_line in pairwise(subtitles.lines): # log.info(f'Processing {line}') if start is None: start = line.start accum_text.append(line.text) if line.end - start > 10 * 1000: self._save_part(start, line.end, tmp_audio_path, ' '.join(accum_text), results) accum_text = [] start = next_line.start log.info(f'Processed {url}') return results
def getContent(self, category='all', subCategory=None): assert category in self.categories.keys(), \ "category must be one of the following: %s" % ", ".join(self.categories.keys()) if subCategory: subCategory = str(subCategory) subCategories = self.getSubCategories(category) assert (subCategory == None and subCategories == None) or (subCategory in subCategories.keys()), \ "subCategory must be one of the following: %s. use method getSubCategories() to get key/title" \ " pairs." % ", ".join(subCategories.keys()) if subCategory == None: container = self.server.query("/library/sections/%d/%s" % (self.key, category)) else: container = self.server.query("/library/sections/%d/%s/%s" % (self.key, category, subCategory)) content = [] for e in container: if not 'type' in e.attrib: continue type_ = e.attrib['type'] if type_ == 'movie': # append movie obj = Movie(e, self.server) if type_ == 'show': # append show obj = Show(e, self.server) if type_ == 'episode': obj = Episode(e, self.server) content.append(obj) return content
def get_page_episode_list(webtoon, page=1): webtoon_base_url = 'http://comic.naver.com/webtoon/list.nhn' response = requests.get(webtoon_base_url, params={ 'titleId': webtoon.title_id, 'page': page }) soup = BeautifulSoup(response.text, 'html.parser') webtoon_page_episodes = soup.select('tr')[1:] webtoon_page_episode_list = [ Episode( webtoon=webtoon, no=parse_qs( urlparse( item.select_one('td a').attrs.get('href')).query)['no'][0], url_thumbnail=item.select_one('td a img').attrs.get('src'), title=item.select_one('td.title a').text, rating=item.select_one('div.rating_type strong').text, created_date=item.select_one('td.num').text) for item in webtoon_page_episodes if not item.attrs.get('class') ] return webtoon_page_episode_list
def test_compiler(self): comp_true = TEST_DATA['compile_truth'] for t1 in ['Brian', '']: for t2 in ['Roger', '', 'Brian']: for t3 in ['John', '', 'Brian', 'Roger']: for eo in [SINGLE, DOUBLE, TRIPLE]: for a in [True, False]: for se in [[2, 1], [20, 10], [2, 100]]: if se[1] == 100 and not a: continue f = File( s_nr=se[0], e_nr=se[1], series_name='Freddie', title=t1, title2=t2, title3=t3, episode_option=eo, anime=a) f.extension = 'mkv' comp = Episode.compile_file_name(None, f) s = ''.join([t1[0] if t1 else '_', t2[0] if t2 else '_', t3[0] if t3 else '_', 'A' if a else '_', eo[0], str(len(str(se[0])) + 3), str(len(str(se[1])))]) # print(s) self.assertEqual(comp, comp_true[s])
def episodes_for_podcast(self, podcast, sort_order=Podcast.SortOrder.NewestFirst): page = 1 pages_left = True episodes = [] while pages_left: params = {'page': page, 'sort': sort_order, 'uuid': podcast.uuid} response = self._session.post("https://play.pocketcasts.com" "/web/episodes/find_by_podcast.json", json=params) response.raise_for_status() json_response = response.json() for episode_json in json_response['result']['episodes']: episode = Episode._from_json(episode_json, podcast) # episode = episode_json episodes.append(episode) # we should never ever receive more episodes than specified # well, better be fault tolerant if(json_response['result']['total'] > len(episodes)): page = page + 1 else: pages_left = False return episodes
def check_for_empty_season(show, s): if not s.episodes: return _generate_error(message='Empty Season', e=Episode(location=s.location, s_nr=s.s_nr, e_nr=1), show=show)
def get_episode(number, new_rating): html_contents = requests.get(base_url + episodes).text soup = BeautifulSoup(html_contents, 'html.parser') ep_table = soup.find('table').find_all('tr')[1:] for entry in ep_table: columns = entry.find_all('td') numFound = scrub_string(columns[0].text) avg_score = scrub_string(columns[2].text) if numFound == str(number): if not numFound.isdigit() and not avg_score.replace('.', '', 1).isdigit(): return None a_tag = entry.find_all('td')[1].find('a') title = scrub_string(a_tag.text).replace( "Rockaroundtheclockdoughberfest: ", "") title = title.replace("Tropical Freeze: ", "") print("Fetching Episode " + numFound + " - " + title) href = scrub_string(a_tag.attrs['href']) date = scrub_string(columns[3].text) fork_ratings = get_fork_ratings(base_url + href) global current_scores current_scores = fork_ratings image = scrub_string(get_image(base_url + href)) synopsis = scrub_string(get_synopsis(base_url + href)) duration = get_duration(numFound.strip()) current_ep = Episode(title, numFound, date, duration, fork_ratings, image, new_rating, synopsis) print(json.dumps(current_ep.__dict__, indent=4)) return current_ep
def update_episodes(self, reload_metadata=True): self.episodes = {} self.episode_numbers = [] files = listdir(self.location) for file in files: episode = Episode(location=path.join(self.location, file), s_nr=self.s_nr) if reload_metadata: episode.update_file_meta() if episode.e_nr in self.episode_numbers: if not episode.e_nr == 999: episode.e_nr = 777 while episode.e_nr in self.episodes: episode.e_nr += 1 self.episodes[episode.e_nr] = episode if episode.e_nr < 777: self.episode_numbers.append(episode.e_nr) sorted(self.episode_numbers)
def create_full_episodic_bn(bn_list, time): dataset = [] for bn in bn_list: episode_list = bn.get_episode_dataset() for episode in episode_list: dataset.append(Episode(episode.raw_data, time)) episodic_bn = BeliefNetwork("Episodic", dataset) return episodic_bn
def getAllEpisodes(self): """ returns a list of all episodes of the show independent of seasons. """ key = '/'.join(self.key.split('/')[:-1]) + '/allLeaves' element = self.server.query(key) episodes = [ Episode(e, self.server) for e in element if ('type' in e.attrib) and (e.attrib['type'] == 'episode') ] return episodes
def episodes(self): if not self.episodes_: element = self.server.query(self.key) self.episodes_ = [ Episode(e, self.server) for e in element if ('type' in e.attrib) and (e.attrib['type'] == 'episode') ] return self.episodes_
def collect_episode(bc, env): """ :param bc: BehaviorCloning :param env: OpenAI gym environment """ state = env.reset() episode = Episode(env.discount) done = False while not done: action, action_prob = bc.single_action(state) next_state, reward, done, _ = env.step(action) transition = Transition(state, action, action_prob, reward, next_state, done) state = next_state episode.insert(transition) return episode
def test_train(self): q = MemoryOnlyHashQ('test', 3) algo = AlgoQLearning(q) # train on 10 games for _ in range(100): ep = Episode(algo, PolicyRandom()) ep.run(StateTest(random.choice([True, False]))) # check if policy is optimal policy = PolicyExploit(q) # print(q.get_all(StateTest().inputs())) for cells, optimal_action in [ ([0, 0, 0], 1), ([2, 0, 0], 1), ([0, 0, 2], 1), ]: state = StateTest() state.cells = cells self.assertEqual(policy.play(state)[0], optimal_action, state)
def label_set(all_names, dead_names, fict_names): """ Lables the the set. """ topic_names = {} guest_names = {} for title, description in zip(all_episodes[0::3], all_episodes[1::3]): ep = Episode(title, description) ep_tokenized = ep.tokenize() for tokenized, name in ep_tokenized: if name in topic_names: topic_names[name].append(tokenized) continue if name in guest_names: guest_names[name].append(tokenized) continue in_fict, _ = trie.find_prefix(fict_names, name) in_dead, _ = trie.find_prefix(dead_names, name) if in_fict or in_dead: topic_names[name] = [tokenized] continue in_all, _ = trie.find_prefix(all_names, name) if not in_all: guest_names[name] = [tokenized] for episode_list in topic_names.values(): for text in episode_list: print(text) print("T") print() for episode_list in guest_names.values(): n_mentions = len(episode_list) if n_mentions > 5 and n_mentions < 25: for text in episode_list: print(text) print("G") print()
def starred_episodes(self): response = self._session.post("https://play.pocketcasts.com" "/web/episodes/" "starred_episodes.json") response.raise_for_status() episodes = [] podcasts = {} for episode_json in response.json()['episodes']: podcast_uuid = episode_json['podcast_uuid'] if podcast_uuid not in podcasts: podcasts[podcast_uuid] = self.podcast(podcast_uuid) episode = Episode._from_json(episode_json, podcasts[podcast_uuid]) episodes.append(episode) return episodes
def parse_rss_file(self, filename): episodes = [] root = self._fetch_root(filename) self.title = (root.findall("./channel/title")[0].text) elements = root.findall("./channel/item") for el in elements: episode = Episode(self) self.pubdate_to_timestamp(el.findall('pubDate')[0].text, episode) episode.title = el.findall('title')[0].text episode.guid = el.findall('guid')[0].text episode.description = el.findall('description')[0].text e = el.findall('enclosure') if e and len(e) > 0: episode.url = e[0].get('url') episode.enclosure_length = e[0].get('length') if episode.pubDate and episode.url and episode.title \ and episode.guid: episodes.append(episode) return episodes
def rename(self, details): working, filename = details try: tv = TvRenamr(working, self.config, options.debug, options.dry) episode = Episode(tv.extract_details_from_file(filename, user_regex=options.regex)) if options.show: episode.show_name = options.show if options.season: episode.season = options.season if options.episode: episode.episode = options.episode episode.title = tv.retrieve_episode_name(episode, library=options.library, canonical=options.canonical) episode.show_name = tv.format_show_name(episode.show_name, the=options.the, override=options.show_override) path = tv.build_path( episode, rename_dir=options.rename_dir, organise=options.organise, format=options.output_format ) tv.rename(filename, path) except (ConfigNotFoundException, NoMoreLibrariesException, NoNetworkConnectionException): if options.dry or options.debug: self._stop_dry_run() sys.exit(1) except ( EmptyEpisodeNameException, EpisodeAlreadyExistsInDirectoryException, EpisodeNotFoundException, IncorrectCustomRegularExpressionSyntaxException, InvalidXMLException, OutputFormatMissingSyntaxException, ShowNotFoundException, UnexpectedFormatException, ): pass except Exception as err: if options.debug: # In debug mode, show the full traceback. raise log.critical("tvr: critical error: %s" % str(err)) sys.exit(1)
def test_from_string(self, complete_title, expected): assert Episode.from_string(complete_title, self.link) == expected
def test_from_string_raise_exception(self, complete_title, exception_expected): with pytest.raises(exception_expected): Episode.from_string(complete_title, self.link)
job = _get_job_by_id(job_id, jobs) job = Job(job_id) if job is None else job if '-' not in job_data[1]: successors_ids = job_data[1].split(',') for sjob_id in successors_ids: sjob_id = int(sjob_id) pjob = _get_job_by_id(sjob_id, jobs) pjob = Job(sjob_id) if pjob is None else pjob job.successors.append(pjob) pjob.predecessors.append(job) jobs.append(pjob) jobs.append(job) _init_node(job_id, job_data, nodes) else: jobs.append(job) _init_node(job_id, job_data, nodes) return {'jobs': jobs, 'nodes': nodes} instance = 1 if len(sys.argv) > 0: for arg in sys.argv: if 'instance' in arg: arg = arg.split('=') instance = arg[1] if len(arg) > 1 else 1 jinstance_data = _init_jobs_and_nodes('japinstance%s' % instance) episode1 = Episode(jinstance_data['jobs'], jinstance_data['nodes'], 5) episode1.execute()
def main(): colorama.init() config = load_config_file() args, wtf = create_args().parse_known_args() if (wtf): logger.error('Unknown argument %s', wtf[0]) sys.exit(1) init_logging(args.verbose) # don't proceed if paths aren't right/programs missing pre_check(args, config) try: working_dir = config.get(APP_NAME, 'working_dir') except configparser.Error: working_dir = None if working_dir: if not os.path.isdir(working_dir): create_dir(working_dir) tempfile.tempdir = working_dir tmp_dir = tempfile.mkdtemp() logger.debug('Episode temp folder: %s', tmp_dir) atexit.register(delete_temp, tmp_dir) start, end, special = validate_args(args) print(WELCOME_MSG) for ep in range(start, end + 1): start_time = time.clock() episode = Episode(ep, config, args, tmp_dir, special) if not args.no_demux: episode.demux() else: if args.sub_only: detect_streams(os.path.join(config.get(APP_NAME, 'output_dir'), args.series, str(ep if not special else special).zfill(3), 'R1', 'Subtitle.idx')) if not args.no_retime: episode.retime_subs() episode.retime_audio() if not args.no_demux and args.no_mux: # move files to destination folder episode.move_demuxed_files() if not args.no_mux: episode.mux() if args.make_avs: # only works on files generated with --no-mux episode.make_avs() delete_temp(episode.temp_dir) elapsed = time.clock() - start_time logger.debug('Elapsed time: %s seconds', elapsed) logger.info('Finished!')