def mark_trailer_watched(self, trailer, clear=False):
        # set info
        info = {
            "playcount": [(trailer[7] or 0) + 1, None][clear],
            "watched": [today(), None][clear]
        }

        self.mark_trailer(trailer[0], info)
    def mark_trailer_downloaded(self, trailer, clear=False):
        # set info
        info = {
            "downloaded": [today(), None][clear],
            "path": [trailer[10], None][clear]
        }
        # only add size if we're not clearing status
        if (not clear):
            info.update({"size": trailer[5]})

        self.mark_trailer(trailer[0], info)
 def _update_source(self):
     # check if scraper exists and source date
     record = self.m_database.scraper(self.m_addon.getSetting("trailer.scraper"))
     # update source if no scraper info or if user preference to update on first run
     if ((record is not None and
             self.m_addon.getSetting("source.schedule.when") == 0 and
             self.m_addon.getSetting("source.last.checked") < today(format_="%Y-%m-%d") and
             get_refresh(date=record[1], expires=record[2]))
         or record is None
         ):
             print "UPDATING SOURCE................................................................"
             # set new refresh date
             self.m_addon.setSetting(
                 "source.last.checked",
                 today(format_="%Y-%m-%d")
             )
             # import preferred scraper and fetch new trailers
             exec "from resources.scrapers.trailers import {scraper} as scraper".format(
                 scraper=self.m_addon.getSetting("trailer.scraper").replace(" - ", "_").replace(" ", "_").lower()
             )
             # update source
             scraper.Scraper(self.m_addon, self.m_database).update()
Esempio n. 4
0
        print("Error: imgt-data.fasta file not detected for\'", species + \
                "'. Please generate and place it in the appropriate Data subdirectory.")
        sys.exit()

    # If so, check the modification time for the imgt-data.fasta file, assuming that's the last download time
    input_imgt_file = species_dir + 'imgt-data.fasta'
    mod_date = datetime.fromtimestamp(
        os.path.getmtime(input_imgt_file)).strftime('%Y-%m-%d')

    # Then read through the FASTA and sort into the appropriate chains
    with open(input_imgt_file, 'rU') as in_file, \
            open(species_dir + 'TRA.fasta', 'w') as TRA, \
            open(species_dir + 'TRB.fasta', 'w') as TRB:

        prot = coll.defaultdict(coll.defaultdict)

        for fasta_id, seq, blank in fxn.read_fa(in_file):
            gene, allele = fasta_id.split('|')[1].split('*')

            # NB: TRDV included with TRA genes due to the evidence that even non 'TRAV/DV' genes can recombine with TRAJ
            if 'TRA' in gene or 'TRDV' in gene:
                TRA.write(fxn.fastafy(fasta_id, seq))
            elif 'TRB' in gene:
                TRB.write(fxn.fastafy(fasta_id, seq))

    # Finally log the dates
    log_txt = 'imgt-data.fasta_last_modified ' + mod_date + '\nsplit-imgt-data.py_last_run ' + fxn.today(
    )
    with open(species_dir + 'data-production-date.txt', 'w') as log_file:
        log_file.write(log_txt)
    def scraper(self, scraper, **kwargs):
        """
            Gets and/or sets scraper info.
        """
        # grab scrapers id
        idScraper = self.execute("SELECT idScraper, sourcedate, expires FROM scraper WHERE scraper=?;", (scraper,)).fetchone()
        # insert/update scraper info
        if (idScraper is None and kwargs.has_key("useragent")):
            idScraper = [self.execute("INSERT INTO scraper (scraper, useragent, expires) VALUES (?, ?, ?);", (scraper, kwargs.get("useragent", ""), kwargs.get("expires", 7),)).lastrowid, None, None]
            self.commit()
        elif (idScraper is not None and kwargs.has_key("complete")):
            self.execute("UPDATE scraper SET complete=?, sourcedate=? WHERE idScraper=?;", (int(kwargs.get("complete", 0)), kwargs.get("sourcedate", today()), idScraper[0],))
            self.commit()

        return idScraper
    def get_movies(self, category, search=None):
        """Returns a list of movies."""

        # initialize these as they don't apply to all lists
        tables = category_sql = orderby_sql = ""
        params = None
        # recent
        if (category.startswith("recent:")):
            params = (30,)
            orderby_sql = "ORDER BY trailer.postdate DESC LIMIT ?"  # trailer.idTrailer
        # downloaded
        elif (category.startswith("downloaded")):
            # only recent categories require order by and params
            if (category.startswith("downloadedrecently:")):
                params = (30,)
                orderby_sql = "ORDER BY trailer.downloaded DESC LIMIT ?"
        # release date based
        elif (category.startswith("intheaters:") or category.startswith("comingsoon:")):
            params = (today(days=[-60, 1][category.startswith("comingsoon:")], format_="%Y-%m-%d"), today(days=[0, 365][category.startswith("comingsoon:")], format_="%Y-%m-%d"),)
            category_sql = "AND ({soon}releasedate BETWEEN ? AND ?)".format(soon=["", "releasedate='Coming Soon' OR "][category.startswith("comingsoon:")])
        # all other
        elif (category != "" and not category.startswith("hd:")):
            kind, subcat = category.split(": ")
            params = (subcat,)
            tables = ", {kind}, {kind}_link_movie".format(kind=kind)
            category_sql = "AND {kind}_link_movie.idMovie=movie.idMovie AND {kind}_link_movie.id{kind}={kind}.id{kind} AND {kind}.{kind}=?".format(kind=kind)

        # search
        if (search is not None):
            # need to make sure params is not None since we add to it
            if (params is None): params = ()

            # loop thru and add each search
            # should only be multiple if user preference is to search current list
            for s in search.split(" - "):
                # order by param needs to be last
                if (orderby_sql != ""):
                    params = (self._get_regex_pattern(s),) + params
                else:
                    params += (self._get_regex_pattern(s),)
                # add search SQL
                category_sql += "\nAND ((movie.title || ' ' || movie.studio || ' ' || movie.director || ' ' || movie.writer || ' ' || movie.plot || ' ' || movie.cast || ' ' || movie.genre) REGEXP ?)"

        # get Quality and MPAA rating limits SQL and format SQL
        sql = """
            SELECT movie.*, trailer.*,
            SUM(trailer.runtime),
            GROUP_CONCAT(DISTINCT trailer.title),
            GROUP_CONCAT(DISTINCT trailer.url),
            GROUP_CONCAT(DISTINCT trailer.url || '|' || trailer.watched),
            GROUP_CONCAT(DISTINCT trailer.url || '|' || trailer.downloaded),
            scraper.*
            FROM movie, trailer, scraper, trailer_link_movie, trailer_link_scraper{tables}
            WHERE movie.idMovie=trailer_link_movie.idMovie
            AND trailer.idTrailer=trailer_link_movie.idTrailer
            AND trailer.idTrailer=trailer_link_scraper.idTrailer
            AND scraper.idScraper=trailer_link_scraper.idScraper
            {category}
            {limits[0]}
            {limits[1]}
            {limits[2]}
            GROUP BY trailer_link_movie.idMovie
            {orderby};
        """.format(
                tables=tables,
                category=category_sql,
                limits=self._get_limits(
                    hd=category.startswith("hd:"),
                    downloaded=category.startswith("downloaded"),
                ),
                orderby=orderby_sql
            )
        print sql
        # fetch records
        if (params is None):
            return self.execute(sql)
        else:
            return self._highlight_search_results(self.execute(sql, params), search)