Esempio n. 1
0
def SearchDbForShow(list_of_shows):
    db = models.connect()
    source = db.query(ScanURL).filter(ScanURL.media_type == "index").first()

    if source:
        browser = LinkRetrieve.source_login(source)
        if browser is None:
            ActionLog.log('%s could not logon' % source.login_page)
        else:
            for show_searcher in [l for l in list_of_shows if not l.retrieved]:
                config = db.query(models.Config).first()
                matching_indexes = []
                for search_text in show_searcher.search_list:  # all potential links to list
                    matching_indexes.extend(db.query(LinkIndex).filter(LinkIndex.link_text.like('%' + search_text + '%')).filter(LinkIndex.link_text.like('%' + show_searcher.episode_code + '%')).all())

                if len(matching_indexes) > 0:
                    for match in matching_indexes:
                        tv_response = browser.get(match.link_url)
                        if tv_response.status_code == 200:
                            episode_soup = tv_response.soup
                            episode_links = LinkRetrieve.get_download_links(episode_soup, config, source.domain, config.hd_format)
                            # check to make sure links are active
                            for l in episode_links:
                                link_response = browser.get(l)
                                if link_response.status_code == 404:
                                    episode_links = None
                                    break

                            if episode_links:
                                LinkRetrieve.process_tv_link(db, config, show_searcher, episode_links)
                                break  # since we got the download, we can break out of the loop
Esempio n. 2
0
    def search(self, show_search):
        try:
            t = tvdb_api.Tvdb()
            search_results = t.search(show_search)
            ActionLog.log('Search for "%s".' % show_search)
        except Exception as ex:
            search_results = "{error: %s}" % Exception

        return json.dumps(search_results)
Esempio n. 3
0
 def update_episode(self):
     status = 'success'
     try:
         data = cherrypy.request.json
         episode_id = data['episodeid']
         change_to_value = data['changeto']
         if (change_to_value == 'search'):
             new_status = LinkRetrieve.show_search(episode_id, cherrypy.request.db)
             status = 'success' if new_status == 'Retrieved' else 'failed'
         else:
             change_to_value = change_to_value.title()
             e = cherrypy.request.db.query(Episode).filter(Episode.id == episode_id).first()
             e.status = change_to_value
             cherrypy.request.db.commit()
     except Exception as ex:
         ActionLog.log(ex)
         status = 'error'
     return json.dumps(status)
Esempio n. 4
0
    def refresh(self):
        status = 'success'
        try:
            data = cherrypy.request.json
            is_show_refresh = data['isshowrefresh']
            is_scan = data['isscan']

            if is_scan:
                LinkRetrieve.handle_downloads()

            if is_show_refresh:
                Utils.update_all()

        except Exception as ex:
            ActionLog.log(ex)
            status = 'error'

        return json.dumps(status)
Esempio n. 5
0
    def handle_movie(self):
        ar = AjaxResponse('Movie downloading...')
        try:
            data = cherrypy.request.json
            movie_id = data['movieid']
            is_ignore = data['isignore']
            is_cleanup = data['iscleanup']
            config = cherrypy.request.db.query(Config).first()
            if is_cleanup:
                cherrypy.request.db.query(Movie).filter(Movie.status == 'Ignored').delete()
                cherrypy.request.db.commit()
                ActionLog.log("DB cleanup completed")
            else:
                m = cherrypy.request.db.query(Movie).filter(Movie.id == movie_id).first()
                if is_ignore:
                    m.status = 'Ignored'
                else:
                    jdownloader_string = ''
                    for l in m.movieurls.all():
                        jdownloader_string += l.url + ' '
                    LinkRetrieve.write_crawljob_file(m.name, config.movies_directory, jdownloader_string,
                                                     config.crawljob_directory)
                    ActionLog.log('"%s\'s" .crawljob file created.' % m.name)
                    m.status = 'Retrieved'
                cherrypy.request.db.commit()
        except Exception as ex:
            ActionLog.log('error - ' + ex)
            ar.status = 'error'
            ar.message = ex

        return ar.to_JSON()
Esempio n. 6
0
    def update_show(self):
        status = 'success'
        try:

            data = cherrypy.request.json
            show_id = data['showid']
            action = data['action']

            if action == 'refresh':
                Utils.add_episodes(show_id)

            if action == 'remove':
                s = cherrypy.request.db.query(Show).filter(Show.show_id == show_id).first()
                ActionLog.log('"%s" removed.' % s.show_name)
                cherrypy.request.db.delete(s)
                cherrypy.request.db.commit()

        except Exception as ex:
            # logger.exception(ex)
            status = 'error'

        return json.dumps(status)
Esempio n. 7
0
    def add_show(self):
        status = 'success'
        try:
            data = cherrypy.request.json
            series_id = data['seriesid']
            t = tvdb_api.Tvdb()
            s = t[series_id]
            # db = models.connect()
            if cherrypy.request.db.query(Show).filter(Show.show_id == series_id).first() is None:
                # save new show to db
                first_aired_date = datetime.strptime(s['firstaired'], "%Y-%m-%d")
                new_show = Show(show_id=series_id, show_name=s['seriesname'], first_aired=first_aired_date,
                                is_active=s.data['status'] == 'Continuing', banner=s['banner'])

                if new_show.banner == None:
                    new_show.banner = ''

                # create folder based on show name:
                new_show.show_directory = '/' + new_show.show_name.replace('.', '').strip()
                phys_directory = cherrypy.request.db.query(Config).first().tv_parent_directory + new_show.show_directory
                if not os.path.exists(phys_directory):
                    os.makedirs(phys_directory)

                cherrypy.request.db.add(new_show)
                cherrypy.request.db.commit()
                ActionLog.log('"%s" added.' % new_show.show_name)
                Utils.add_episodes(series_id, t, cherrypy.request.db)
            else:
                status = 'duplicate'
                # http://stackoverflow.com/questions/7753073/jquery-ajax-post-to-django-view
        except Exception as ex:
            # logger.exception(ex)
            ActionLog.log(ex)
            status = 'error'

        return json.dumps(status)
Esempio n. 8
0
def CreateIndexes(is_full_index = False):
    db = models.connect()
    # check if index is empty to make the default index
    indexes = db.query(LinkIndex).all()

    #if len(indexes) == 0:
        #start new indexing thingy
    source = db.query(ScanURL).filter(ScanURL.media_type == "index").first()
    if (source):
        browser = source_login(source)
        if browser is None:
            ActionLog.log('%s could not logon' % source.login_page)
        else:

            # this is the first page, we have to index by 1 for each subsequent page
            i = 1
            is_indexed = False
            duplicates_encountered = 0
            try:
                while is_indexed == False:

                    link = source.url
                    if i > 1:
                        link = '%sindex%s.html' % (source.url, i)

                    if i%100 == 0:
                        browser = source_login(source)  # reset the browser session every 100 pages

                    soup = browser.get(link).soup


                    all_rows_soup = soup.select(source.link_select)  #  get all rows $("#threadbits_forum_73 tr").soup
                    page_adds = 0
                    for row in all_rows_soup:
                        if 'STICKY' in row.text.upper():  # skip sticky links
                            continue
                        else:
                            row_links = row.find_all('a')
                            links_with_text = [t for t in row_links if t.text != '']
                            index_link = links_with_text[0]
                            l = LinkIndex()
                            l.link_text = index_link.text
                            l.link_url = index_link.attrs['href']
                            l.id = int(index_link.attrs['id'].split('_')[2])

                            if l.link_text == '':
                                continue

                            existing_link_index = db.query(LinkIndex).filter(LinkIndex.id == l.id).first()
                            if existing_link_index:
                                duplicates_encountered += 1
                                if duplicates_encountered > 35 and not is_full_index:  #this should be 1 plus pages
                                    is_indexed = True
                                    break
                                else:
                                    continue
                            else:
                                page_adds += 1
                                db.add(l)
                    if page_adds > 0:
                        db.commit()


                    if i == 100000:
                        is_indexed = True
                    else:
                        print(i)
                        i += 1
            except Exception as ex:
                print(ex)