Ejemplo n.º 1
0
    def read(self, args, c):
        self.ore, c = utils.get_int(args, c)
        self.gas, c = utils.get_int(args, c)
        self.used_psi, c = utils.get_int(args, c)
        self.total_psi, c = utils.get_int(args, c)

        return c
Ejemplo n.º 2
0
 def get(self, page_current=1):
     db = self.db
     d = load_info(db).copy()
     page_size = utils.get_int(d.get('posts_pagesize', ''), 10)
     page_current = utils.get_int(page_current, 1)
     start_index = page_size * (page_current-1)
     posts_count = d['posts_available']
     page_count = utils.get_pagecount(posts_count, page_size)
     if posts_count < 1:
         self.finish("<p>No any posts</p>")
         return
     elif page_current > page_count or page_current < 1:
         pass
     else:
         if posts_count > 0:
             post_ids = db.query(Post.id).\
                 filter(Post.ispass == True).\
                 order_by( 
                     Post.pubdate.desc()
                 )[start_index:start_index+page_size]
             ids = (x[0] for x in post_ids)
             d['posts'] = db.query(Post).\
                 filter(Post.id.in_(ids)).\
                 order_by(Post.pubdate.desc()).\
                 all()
         d['page_count'] = page_count
         d['page_current'] = page_current
         return self.render('index.html', **d)
     self.notfound()
Ejemplo n.º 3
0
def generate_payload(generator=None, verify_name=True, verify_size=True):
    Filtering.information()  # print filters xbmcaddon.Addon()
    results = []
    cont = 0
    for name, info_hash, magnet, size, seeds, peers in generator:
        size = clean_size(size)
        magnet = clean_magnet(magnet, info_hash)
        v_name = name if verify_name else Filtering.title
        v_size = size if verify_size else None
        logger.log.debug(
            "name: %s \n info_hash: %s\n magnet: %s\n size: %s\n seeds: %s\n peers: %s"
            % (name, info_hash, magnet, size, seeds, peers))
        if Filtering.verify(v_name, v_size):
            cont += 1
            if Settings["read_magnet_link"] == "true":
                magnet = get_links(magnet)  # magnet
            results.append({
                "name": name,
                "uri": magnet,
                "info_hash": info_hash,
                "size": size,
                "seeds": get_int(seeds),
                "peers": get_int(peers),
                "language": Settings["language"],
                "provider": Settings.name,
                "icon": Settings.icon,
            })  # return the torrent
            if cont >= Settings["max_magnets"]:  # limit magnets
                break
        else:
            logger.log.debug(Filtering.reason)
    logger.log.debug('>>>>>>' + str(cont) +
                     ' torrents sent to Magnetic<<<<<<<')
    return results
Ejemplo n.º 4
0
    def load(self, identifier):
        filename = file_utils.make_legal_filename(identifier, suffix='.json')
        target = os.path.join(SAVE_PATH, filename)
        try:
            with open(target, mode='r') as target_file:
                hashes = json.load(target_file)
        except (IOError, OSError, TypeError, ValueError):
            self.log('Could not load stored hashes from {0}'.format(target))
            return False

        if not hashes:
            return False

        self.version = hashes.get('version', self.version)
        self.hash_size = hashes.get('hash_size', self.hash_size)
        if 'data' in hashes:
            hash_size = self.hash_size[0] * self.hash_size[1]
            self.data = {
                tuple([utils.get_int(i) for i in key[1:-1].split(', ')]):  # pylint: disable=consider-using-generator
                self.int_to_hash(hashes['data'][key], hash_size)
                for key in hashes['data']
            }
        if 'timestamps' in hashes:
            self.timestamps = {
                utils.get_int(episode): hashes['timestamps'][episode]
                for episode in hashes['timestamps']
            }

        self.log('Hashes loaded from {0}'.format(target))
        return True
Ejemplo n.º 5
0
    def do(self, db, d, post, page_current=1):
        '''do'''
        if post:
            d['post'] = post
            comment_pagesize = utils.get_int(
                d.get('comments_pagesize', ''), 10)
            page_current = utils.get_int(page_current, 1)
            start_index = comment_pagesize * (page_current-1)

            commentpass = post.commentpass
            comments = None
            if commentpass > 0:
                comments = db.query(Comment).\
                    filter(Comment.post_id == post.id).\
                    filter(Comment.ispass==True).\
                    order_by( 
                        Comment.id.desc()
                    )[start_index:start_index + comment_pagesize]
            if comments:
                page_count = utils.get_pagecount(commentpass, 
                    comment_pagesize)
                d['post_comments'] = comments
                d['page_current'] = page_current 
                d['page_count'] = page_count
                d['page_comment_count'] = commentpass 
            return self.render('details.html', **d)
        self.notfound()
Ejemplo n.º 6
0
def get_edge_time(cur, outliers, alg='gcn', layer=2):
    """
    获取edge-cal细分下的各算子的用时
    :param cur: sqlite的cursor对象
    :param alg: 算法
    :return: ['collect', 'message', 'aggregate', 'update']对应的用时
    """
    labels = ['collect', 'message', 'aggregate', 'update']
    edge_times = []
    for label in labels:
        step = layer * 3 if alg == 'gaan' else layer
        sql = "select start, end, text from nvtx_events where text == '{}'".format(
            label)
        res = cur.execute(sql).fetchall()[step:]  # 过滤掉warm-up中forward阶段的结果
        cost_time = 0
        for i in range(50):
            if i in outliers: continue
            # epoch_time = forward time + backward time + eval time
            # 1. 获取forward time和eval time
            for j in range(step * 2):
                time = get_real_time(res[step * 2 * i + j], cur)[0]
                cost_time += time
            # 2. 基于forward的标签对应的seq获取backward time
            for j in range(step):
                # 思路:首先得到label的时间段[st, ed]; 然后寻找该时间段中所有的seq, 然后找对应的backward中的seq
                # 2.1 寻找该时间段中所有的seq
                seq_sql = "select text from nvtx_events where start >= {} and end <= {} and text like '%seq%'"
                seq_res = cur.execute(
                    seq_sql.format(res[step * 2 * i + j][0],
                                   res[step * 2 * i + j][1])).fetchall()

                if not seq_res:  # ggnn, flickr; edge-cal, message=0
                    continue

                # 2.2 获取seq的最值,seq为连续分布
                min_seq, max_seq = get_int(seq_res[0][0]), get_int(
                    seq_res[-1][0])

                seq_backward_sql = "select start, end, text from nvtx_events where text like '%Backward%seq = {0}' or text like '%ScatterMax%seq = {0}'"
                end_time = cur.execute(
                    seq_backward_sql.format(min_seq)).fetchone()

                # 2.3 为了将空格时间考虑进去,这里在左边时间进行延伸
                start_time = cur.execute(
                    seq_backward_sql.format(max_seq + 1)).fetchone()
                if start_time:
                    backward_time = get_real_time(
                        (start_time[1], end_time[1], label), cur)[0]
                else:
                    start_time = cur.execute(
                        seq_backward_sql.format(max_seq)).fetchone()
                    backward_time = get_real_time(
                        (start_time[0], end_time[1], label), cur)[0]
                cost_time += backward_time

        cost_time /= 50 - len(outliers)  # 基于epochs的平均
        # print(label, cost_time)
        edge_times.append(cost_time)
    return edge_times
Ejemplo n.º 7
0
def main():
    print("Enter x:", end=" ")
    x = get_int()
    print("Enter y:", end=" ")
    y = get_int()

    result = pow_(x, y)
    print(f"{x} ^ {y} = {result}")
Ejemplo n.º 8
0
def cleanup_results(results_list):
    """ Remove duplicate results, hash results without an info_hash, and sort by seeders

    Args:
        results_list (list): Results to clean-up

    Returns:
        list: De-duplicated, hashed and sorted results
    """
    if len(results_list) == 0:
        return []

    hashes = []
    filtered_list = []
    allow_noseeds = get_setting('allow_noseeds', bool)
    for result in results_list:
        if not result['seeds'] and not allow_noseeds:
            continue

        provider_name = result['provider'][result['provider'].find(']')+1:result['provider'].find('[/')]

        if not result['uri']:
            if not result['name']:
                continue
            try:
                log.warning('[%s] No URI for %s' % (provider_name, repr(result['name'])))
            except Exception as e:
                import traceback
                log.warning("%s logging failed with: %s" % (provider_name, repr(e)))
                map(log.debug, traceback.format_exc().split("\n"))
            continue

        hash_ = result['info_hash'].upper()

        if not hash_:
            if result['uri'] and result['uri'].startswith('magnet'):
                hash_ = Magnet(result['uri']).info_hash.upper()
            else:
                hash_ = hashlib.md5(result['uri']).hexdigest()

        try:
            log.debug("[%s] Hash for %s: %s" % (provider_name, repr(result['name']), hash_))
        except Exception as e:
            import traceback
            log.warning("%s logging failed with: %s" % (result['provider'], repr(e)))
            map(log.debug, traceback.format_exc().split("\n"))

        if not any(existing == hash_ for existing in hashes):
            filtered_list.append(result)
            hashes.append(hash_)

    if (get_setting("sort_by_resolution", bool)):
        log.debug("[EXPEREMENTAL] Start last sorting list by resolution of all result before send to Elementum")
        filtered_list = sorted(filtered_list, key=lambda r: (get_int(r.pop('resolution'))), reverse=True)
    else:
        filtered_list = sorted(filtered_list, key=lambda r: (get_int(r['seeds'])), reverse=True)

    return filtered_list
Ejemplo n.º 9
0
def get_calculations_time(cur, outliers, alg, layer=2):
    labels = all_labels[alg]
    vertex_time, edge_time = 0, 0
    for label in labels:
        sql = "select start, end, text from nvtx_events where text == '{}'".format(
            label)
        res = cur.execute(sql).fetchall()[layer:]  # 不考虑warm up
        cost_time = 0
        for i in range(50):
            if i in outliers: continue
            # epoch_time = forward time + backward time + eval time
            # 1. 获取forward time和eval time
            for j in range(2 * layer):
                time = get_real_time(res[2 * layer * i + j], cur)[0]
                cost_time += time
            # 2. 基于forward的标签对应的seq获取backward time
            for j in range(layer):
                # 思路:首先得到label的时间段[st, ed]; 然后寻找该时间段中所有的seq, 然后找对应的backward中的seq
                # 2.1 寻找该时间段中所有的seq
                seq_sql = "select text from nvtx_events where start >= {} and end <= {} and text like '%seq%'"
                seq_res = cur.execute(
                    seq_sql.format(res[2 * layer * i + j][0],
                                   res[2 * layer * i + j][1])).fetchall()

                min_seq, max_seq = get_int(seq_res[0][0]), get_int(
                    seq_res[-1][0])

                seq_backward_sql = "select start, end, text from nvtx_events where text like '%Backward%seq = {0}' or text like '%ScatterMax%seq = {0}'"
                end_time = cur.execute(
                    seq_backward_sql.format(min_seq)).fetchone()

                start_time = cur.execute(
                    seq_backward_sql.format(max_seq + 1)).fetchone()
                if start_time:
                    backward_time = get_real_time(
                        (start_time[1], end_time[1], label), cur)[0]
                else:
                    start_time = cur.execute(
                        seq_backward_sql.format(max_seq)).fetchone()
                    backward_time = get_real_time(
                        (start_time[0], end_time[1], label), cur)[0]
                cost_time += backward_time

        cost_time /= 50 - len(outliers)  # 平均epochs
        if 'vertex' in label:
            vertex_time += cost_time
        else:
            edge_time += cost_time
    return [vertex_time, edge_time]
Ejemplo n.º 10
0
 def __getitem__(mcs, item):
     # default values
     if item is "max_magnets":
         return get_int(mcs.value.get(item, "10"))
     elif item is "separator":
         return mcs.value.get(item, "%20")
     elif item is "time_noti":
         return get_int(mcs.value.get(item, "750"))
     elif item.endswith("accept"):
         temp = mcs.value.get(item, "{*}")
         return "{*}" if temp is "" else temp
     elif item.endswith("max_size"):
         return get_float(mcs.value.get(item, "10"))
     elif item.endswith("min_size"):
         return get_float(mcs.value.get(item, "0"))
     else:
         return mcs.value.get(item, "")
Ejemplo n.º 11
0
def get_positive_int():
    """Get positive integer

    Returns:
        int: positive integer
    """
    n = 0
    while n < 1:
        print("Enter positive integer: ", end="")
        n = get_int()
    return n
Ejemplo n.º 12
0
    def _init_hashes(self):
        # Limit captured data to 16 kB
        self.capture_size, self.capture_ar = self._capture_resolution(
            max_size=16)

        self.hash_index = {
            # Current hash index
            'current': (0, 0),
            # Previous hash index
            'previous': None,
            # Representative end credits hash index
            'credits': (0, 0),
            # Other episodes hash index
            'episodes': None,
            # Detected end credits timestamp from end of file
            'detected_at': None
        }

        # Hash size as (width, height)
        hash_size = [8 * self.capture_ar, 8]
        # Round down width to multiple of 2
        hash_size[0] = int(hash_size[0] - hash_size[0] % 2)
        # Hashes for currently playing episode
        self.hashes = UpNextHashStore(
            version='0.1',
            hash_size=hash_size,
            seasonid=self.state.get_season_identifier(),
            episode=utils.get_int(self.state.get_episode()),
            # Representative hash of centred end credits text on a dark
            # background stored as first hash
            data={
                self.hash_index['credits']:
                self._generate_initial_hash(hash_size)
            },
            timestamps={})

        # Calculated maximum allowable significant level
        # self.significance_level = 0.90 * self.calc_significance(
        #    self.hashes.data[self.hash_index['credits']]
        # )
        self.significance_level = 25

        # Hashes from previously played episodes
        self.past_hashes = UpNextHashStore(hash_size=hash_size)
        if self.hashes.is_valid():
            self.past_hashes.load(self.hashes.seasonid)

        # Number of consecutive frame matches required for a positive detection
        self.match_number = 5
        # Number of consecutive frame mismatches required to reset match count
        # Set to 3 to account for bad frame capture
        self.mismatch_number = 3
        self._hash_match_reset()
Ejemplo n.º 13
0
 def __reduct_power(self, _power_of, _first):
     space = '' if _first == 0 else ' '
     signe = '' if _first == 0 else '+ '
     regex_power = r"(((\s+)?(\+|\-)(\s+)?)?((\d+\.)?\d+)((\s+)?\*(\s+)?)[X|x]\^{power}\b)".format(
         power=_power_of)
     core_power = re.search(regex_power, self.core_equation)
     egal_power = re.search(regex_power, self.start_egal)
     if core_power:
         core_power = core_power.group()
         self.core_equation = self.core_equation[len(core_power)::]
         core_power_int = get_int(core_power)
         self.__save_int_by_p(core_power_int, _power_of)
         if egal_power:
             egal_power = egal_power.group()
             egal_power_int = get_int(egal_power)
             reduct_int = core_power_int - egal_power_int
             self.start_egal = re.sub(regex_power.format(power=_power_of),
                                      "", self.start_egal, 1)
             self.__save_int_by_p(reduct_int, _power_of)
             if reduct_int < 0:
                 reduct_int = ft_abs(reduct_int)
                 signe = '- '
             elif reduct_int == 0:
                 return
             return f"{space}{signe}{reduct_int} * X^{_power_of}"
         if _first == 0:
             if core_power_int >= 0:
                 core_power = re.sub(r"(\s)?\+(\s)?", "", core_power)
             else:
                 core_power = re.sub(r"(\s)?", "", core_power, count=1)
         return core_power
     elif not core_power and egal_power:
         egal_power = egal_power.group()
         egal_power_int = get_int(egal_power)
         egal_power_int *= -1
         self.__save_int_by_p(egal_power_int, _power_of)
         if egal_power_int < 0:
             egal_power_int = ft_abs(egal_power_int)
             signe = '- '
         return f"{space}{signe}{egal_power_int} * X^{_power_of}"
Ejemplo n.º 14
0
def generate_payload(provider, generator, filtering, verify_name=True, verify_size=True):
    """ Payload formatter to format results the way Quasar expects them

    Args:
        provider        (str): Provider ID
        generator  (function): Generator method, can be either ``extract_torrents`` or ``extract_from_api``
        filtering (Filtering): Filtering class instance
        verify_name    (bool): Whether to double-check the results' names match the query or not
        verify_size    (bool): Whether to check the results' file sizes

    Returns:
        list: Formatted results
    """
    filtering.information(provider)
    results = []

    definition = definitions[provider]

    for name, info_hash, uri, size, seeds, peers in generator:
        size = clean_size(size)
        # uri, info_hash = clean_magnet(uri, info_hash)
        v_name = name if verify_name else filtering.title
        v_size = size if verify_size else None
        if filtering.verify(provider, v_name, v_size):
            results.append({"name": name,
                            "uri": uri,
                            "info_hash": info_hash,
                            "size": size,
                            "seeds": get_int(seeds),
                            "peers": get_int(peers),
                            "language": definition["language"] if 'language' in definition else 'en',
                            "provider": '[COLOR %s]%s[/COLOR]' % (definition['color'], definition['name']),
                            "icon": os.path.join(ADDON_PATH, 'burst', 'providers', 'icons', '%s.png' % provider),
                            })
        else:
            log.debug(filtering.reason.encode('utf-8'))

    log.debug('>>>>>> %s would send %d torrents to Quasar <<<<<<<' % (provider, len(results)))

    return results
Ejemplo n.º 15
0
 def get(self, tag_name, page_current=1):
     db = self.db
     d = load_info(db).copy()
     tag = db.query(Tag).\
         filter(Tag.tag==tag_name).\
         scalar()
     if not tag:
         self.notfound()
     d['tag_item'] = tag
     tag_posts_count = tag.nums 
     if tag_posts_count is None: 
         tag_posts_count = 0
     tag_page_size = utils.get_int(d.get('posts_pagesize', ''), 10)
     tag_page_current = utils.get_int(page_current, 1)
     start_index = tag_page_size * (tag_page_current-1)
     tag_page_count = utils.get_pagecount(tag_posts_count, tag_page_size)
     if tag_page_current > tag_page_count or tag_page_current < 1:
         pass
     else:
         if tag_posts_count > 0:
             post_ids = db.query(post_tag.c.post_id).\
                 filter(post_tag.c.tag_id==tag.id).\
                 filter(Post.ispass==True).\
                 filter(Post.id==post_tag.c.post_id).\
                 order_by( 
                     Post.pubdate.desc()
                 )[start_index:start_index+tag_page_size]
             ids = [x[0] for x in post_ids]
             d['post_list'] = db.query(Post).\
                 filter(Post.id.in_(ids)).\
                 order_by(Post.pubdate.desc()).\
                 all()
         d['post_page_count'] = tag_page_count
         d['post_page_current'] = tag_page_current
         d['mode_type'] = 'tag' 
         return self.render('list.html', **d)
     self.notfound()
Ejemplo n.º 16
0
 def get(self, year, month, page_current=1):
     db = self.db
     d = load_info(db).copy()
     year = utils.get_int(year)
     month = utils.get_int(month)
     first_day = None
     if 13 > month > 0:
         first_day = datetime.datetime(year, month, 1)
     if first_day in d['archives_count']:
         archive_posts_count = d['archives_count'][first_day]
         if archive_posts_count is None: 
             archive_posts_count = 0
         archive_page_size = utils.get_int(d.get('posts_pagesize', ''), 10)
         page_current = utils.get_int(page_current, 1)
         start_index = archive_page_size * (page_current-1)
         archive_page_count = utils.get_pagecount(archive_posts_count,
             archive_page_size)
         if page_current > archive_page_count or page_current < 1:
             pass
         elif archive_posts_count > 0:
             post_ids = db.query(Post.id).\
                 filter(Post.ispass==True).\
                 filter(Post.pubyear==year).\
                 filter(Post.pubmonth==month).\
                 order_by( 
                     Post.id.desc()
                 )[start_index:start_index+archive_page_size]
             ids = (x[0] for x in post_ids)
             d['post_list'] = db.query(Post).\
                 filter(Post.id.in_(ids)).\
                 all()
             d['post_page_count'] = archive_page_count
             d['post_page_current'] = page_current
             d['mode_type'] = 'archive' 
             d['first_day'] = first_day 
             return self.render('list.html', **d)
     self.notfound()
Ejemplo n.º 17
0
 def get(self):
     self.set_header('Content-Type','text/xml')
     db = self.db
     d = load_info(db).copy()
     site_url = d['site_url']
     rss_size = utils.get_int(d.get('rss_size', ''), 10)
     items = []
     last_update = None
     if rss_size > 0:
         postIds = db.query(Post.id).\
             filter(Post.ispass == True).\
             order_by( 
                 Post.id.desc()
             )[0:rss_size]
         postIds = (x[0] for x in postIds)
         posts = db.query(Post).\
             filter(Post.id.in_(postIds)).\
             all()
         for post in posts:
             items.append({
                 'title':post.title,
                 'link': utils.urlwrite(site_url, post.url),
                 'pubDate': post.\
                     pubdate.strftime('%a, %d %b %Y %H:%M:%S GMT'),
                 'dc:creator': post.user.nickname,
                 'guid':{
                     '_text': utils.urlwrite(site_url, post.url),
                     '__isPermaLink': 'true'
                 },
                 'content:encoded': post.content 
             })
         if posts and len(posts) > 0:
             last_update = posts[0].\
                 pubdate.strftime('%a, %d %b %Y %H:%M:%S GMT')
     xmldict = {'rss': {
         '__version':'2.0',
         '__xmlns:content': 'http://purl.org/rss/1.0/modules/content/',
         '__xmlns:wfw': 'http://wellformedweb.org/CommentAPI/',
         '__xmlns:dc': 'http://purl.org/dc/elements/1.1/',
         'channel': {
             'title': d['site_name'],
             'link': d['site_url'],
             'description': d['site_description'],
             'pubDate': last_update,
             'language': 'zh-CN',
             'item': items# end item
         }# end channel 
     }}
     return self.finish(utils.dict_to_xml(xmldict))
Ejemplo n.º 18
0
def cleanup_results(results_list):
    """ Remove duplicate results, hash results without an info_hash, and sort by seeders

    Args:
        results_list (list): Results to clean-up

    Returns:
        list: De-duplicated, hashed and sorted results
    """
    if len(results_list) == 0:
        return []

    hashes = []
    filtered_list = []
    for result in results_list:
        if not result['seeds']:
            continue

        if not result['uri']:
            if not result['name']:
                continue
            try:
                log.warning('[%s] No URI for %s' % (result['provider'][16:-8], repr(result['name'])))
            except Exception as e:
                import traceback
                log.warning("%s logging failed with: %s" % (result['provider'], repr(e)))
                map(log.debug, traceback.format_exc().split("\n"))
            continue

        hash_ = result['info_hash'].upper()

        if not hash_:
            if result['uri'] and result['uri'].startswith('magnet'):
                hash_ = Magnet(result['uri']).info_hash.upper()
            else:
                hash_ = hashlib.md5(result['uri']).hexdigest()

        try:
            log.debug("[%s] Hash for %s: %s" % (result['provider'][16:-8], repr(result['name']), hash_))
        except Exception as e:
            import traceback
            log.warning("%s logging failed with: %s" % (result['provider'], repr(e)))
            map(log.debug, traceback.format_exc().split("\n"))

        if not any(existing == hash_ for existing in hashes):
            filtered_list.append(result)
            hashes.append(hash_)

    return sorted(filtered_list, key=lambda r: (get_int(r['seeds'])), reverse=True)
Ejemplo n.º 19
0
    def post(self):
        db = self.db 
        site_name = self.get_argument("site_name", "")
        site_url = self.get_argument("site_url", "")
        site_keywords = self.get_argument("site_keywords", "")
        site_description = self.get_argument("site_description", "")

        posts_pagesize = utils.get_int(
                self.get_argument("posts_pagesize", ""), 10)
        comments_pagesize = utils.get_int(
                self.get_argument("comments_pagesize", ""), 10)
        rss_size = utils.get_int(self.get_argument("rss_size", ""), 10)

        domains.update_option(db, "site_name", site_name)
        domains.update_option(db, "site_url", site_url)
        domains.update_option(db, "site_keywords", site_keywords)
        domains.update_option(db, "site_description", site_description)

        domains.update_option(db, "posts_pagesize", posts_pagesize)
        domains.update_option(db, "comments_pagesize", comments_pagesize)
        domains.update_option(db, "rss_size", rss_size)
        db.commit()
        self.set_flash({'error': False, 'msg': u"更新成功"})
        self.redirect(u"/admin/settings")
Ejemplo n.º 20
0
def load_info(db):
    d = {}
    #配置表
    options = db.query(Option).all()
    for x in options:
        d[x.name] = x.value
    posts_available = get_int(d.get('posts_available',''), -1)
    posts_total = get_int(d.get('posts_total',''), -1)
    if posts_available == -1: 
        posts_available = db.query(func.count(Post.id)).\
            filter(Post.ispass == True).\
            scalar()

    if posts_total== -1: 
        posts_total= db.query(func.count(Post.id)).\
            scalar()

    d['posts_available'] = posts_available
    d['posts_total'] = posts_total

    # archive
    archives_count = get_archives_count(db)
    d['archives_count'] = archives_count
    return d
Ejemplo n.º 21
0
def delete(args, timesheet):
    if args['<date>']:
        day = datetime.timetuple(parse(args['<date>']))
        date_response = timesheet.get_day(day[7], day[0])
        data = [date_response['day_entries']]

        for sublist in data:
            for k, entry in enumerate(sublist):
                print str.format(
                    STATUS_TASK_FORMAT,
                    task = entry['task'],
                    client = entry['client'],
                    note = entry['notes'],
                    hours = entry['hours'],
                    date = entry['spent_at'],
                    indicator = k+1
                )

        if args['-a'] or args['--all']:
            if raw_input("Confirm: Delete all entries for this date? ").lower() in 'yes':
                for sublist in data:
                    for entry in sublist:
                        timesheet.delete(entry['id'])
        else:
            selection = get_int('Delete which entry? ') - 1
            print str.format(
                STATUS_TASK_FORMAT,
                task = data[0][selection]['task'],
                client = data[0][selection]['client'],
                note = data[0][selection]['notes'],
                hours = data[0][selection]['hours'],
                date = data[0][selection]['spent_at'],
                indicator = '-'
            )
            if raw_input("Confirm: Delete this entry? ").lower() in 'yes':
                try:
                    timesheet.delete(data[0][selection]['id'])
                except:
                    print 'Entry deleted.'
            else:
                print 'Deletion aborted.'
Ejemplo n.º 22
0
 def get(self, page_current=1):
     db = self.db 
     d = {}
     d['user_name'] = self.get_current_user()
     d['flash'] = self.get_flash()
     page_size = 10
     page_current = utils.get_int(page_current, 1)
     start_index = page_size * (page_current-1)
     total_count = db.query(func.count(Post.id)).\
         scalar()
     page_count = utils.get_pagecount(total_count, page_size)
     items = db.query(Post).\
         order_by(Post.id.desc())[start_index:start_index+page_size]
     start_last = start_index + len(items)
     d['start_index'] = start_index
     d['start_last'] = start_last
     d['total_count'] = total_count
     d['page_count'] = page_count
     d['page_current'] = page_current 
     d['posts'] = items
     self.render('posts.html', **d)
Ejemplo n.º 23
0
    def start(self, restart=False, reset=False):
        """Method to run actual detection test loop in a separate thread"""

        if restart or self.running:
            self.stop()
        if reset:
            self._hash_match_reset()
        # Reset detector data if episode has changed
        if not self.hashes.is_valid(self.state.get_season_identifier(),
                                    utils.get_int(self.state.get_episode())):
            self._init_hashes()

        # If a previously detected timestamp exists then use it
        stored_timestamp = self.past_hashes.timestamps.get(self.hashes.episode)
        if stored_timestamp and not reset:
            self.log('Stored credits timestamp found')
            self.state.set_popup_time(detected_time=stored_timestamp)

        # Otherwise run the detector in a new thread
        else:
            self.thread = utils.run_threaded(self._run)
Ejemplo n.º 24
0
    def get(self, page_current=1):
        db = self.db 
        d = {}
        d['user_name'] = self.get_current_user()
        d['flash'] = self.get_flash()
        tag_page_size = 10
        tag_page_current = utils.get_int(page_current, 1)
        start_index = tag_page_size * (tag_page_current-1)
        tag_count = db.query(func.count(Tag.id)).\
            scalar()
        tag_page_count = utils.get_pagecount(tag_count, tag_page_size)
        tags = db.query(Tag).\
            order_by(Tag.id.desc())[start_index:start_index+tag_page_size]
        start_last = start_index + len(tags)
        d['start_index'] = start_index
        d['start_last'] = start_last
        d['total_count'] = tag_count
        d['page_count'] = tag_page_count
        d['page_current'] = tag_page_current 
        d['tags'] = tags

        self.render('tags.html', **d)
Ejemplo n.º 25
0
 def get(self, page_current=1):
     db = self.db
     d = load_info(db).copy()
     tag_page_size = 100
     tag_page_current = utils.get_int(page_current, 1)
     start_index = tag_page_size * (tag_page_current-1)
     tag_count = db.query(func.count(Tag.id)).\
         filter(Tag.nums>0).\
         scalar()
     tag_page_count = utils.get_pagecount(tag_count, tag_page_size)
     tag_max_nums = 1
     tag_maxs = db.query(Tag).\
         order_by(Tag.nums.desc())[:1]
     if tag_maxs and len(tag_maxs) == 1:
         tag_max_nums = tag_maxs[0].nums
     tags = db.query(Tag).\
         filter(Tag.nums>0).\
         order_by(Tag.id.desc())[start_index:start_index+tag_page_size]
     d['tag_max_nums'] = tag_max_nums
     d['tag_count'] = tag_count
     d['tag_page_count'] = tag_page_count
     d['tag_page_current'] = tag_page_current 
     d['tags'] = tags
     return self.render('tags.html', **d)
Ejemplo n.º 26
0
def add(args, config, timesheet):
    today = timesheet.today

    if args['<alias>']:
        try:
            client_id = config.get(args['<alias>'], 'client')
            client_name = config.get(args['<alias>'], 'clientname')
            task_id = config.get(args['<alias>'], 'task')
            task_name = config.get(args['<alias>'], 'taskname')
            note = args['<note>']
            hours = args['<hours>']

            data = {"notes": args['<note>'], "project_id": client_id, "hours": args['<hours>'], "task_id": task_id}
        except ConfigParser.NoSectionError:
            print "No alias named " + args['<alias>'] + " found."
            sys.exit()

    if not args['<alias>']:
        #Get the number of hours
        if not args['<hours>']:
            hours = get_int("How many hours to enter? ")

        #User selects a client from the list
        for key, value in enumerate(today['projects']):
            print key + 1, value['name']
        client_selection = get_int("Select a project: ")
        client = today['projects'][client_selection-1]
        client_name = client['name']

        #User selects a task from the client task list
        for key, task in enumerate(client['tasks']):
            print key + 1, task['name']
        task_selection = get_int("Select a task: ")
        task = client['tasks'][task_selection-1]
        task_name = task['name']

        #User adds a note to the entry
        if not args['<note>']:
            note = raw_input("Leave a note (Skip with enter): ")

        #Prompt the user to set an alias.
        set_alias(client['name'], client['id'], task['name'], task['id'])

        data = {"notes": note, "project_id":client['id'], "hours":hours, "task_id":task['id']}

    if args['--date'] or args['-d']:
        date = {'spent_at': parse(args['<date>'])}
        data.update(date)
    else:
        date = datetime.today()

    timesheet.add(data)
    print "Your entry has been saved."
    print str.format(
            STATUS_TASK_FORMAT,
            client = client_name,
            task = task_name,
            note = note,
            hours = hours,
            date = date,
            indicator = '+'
        )
Ejemplo n.º 27
0
#reprogram the previous assignment, but also prompt the user for the 'fill character'.

import utils

def grid2(n, fill):
        for i in range(0, n):
            print fill*n
            


n = utils.get_int("Enter an int ", "That's not int")
fill = raw_input("Enter any single character ")

grid2(n, fill)
Ejemplo n.º 28
0
def prompt():
    a = utils.get_int("Enter a positive integer:  ", "That's not int")
    b = utils.get_int("Enter another positive integer: ", "That's not int")
    print gcd(a, b)
Ejemplo n.º 29
0
# Prompt the user for two positive integers and print their greatest common   # divisor (using Euclid's method). The GCD is the largest number which will   # divide into both original numbers.

import utils

def prompt():
    a = utils.get_int("Enter a positive integer:  ", "That's not int")
    b = utils.get_int("Enter another positive integer: ", "That's not int")
    print gcd(a, b)

    
def gcd(i, j):
    r = i % j
    while r != 0:
        i = j
        j = r
        r = i % j
    return j

print gcd(utils.get_int("Enter a positive integer:  ", "That's not int"),  utils.get_int("Enter another positive integer: ", "That's not int"))

#works
Ejemplo n.º 30
0
import math
import utils

def find_e(nterm):
    """This finds the value of e to a specified number of terms."""
    n = 0
    x = 0
    while n <= nterm:
        a = float(1)/math.factorial(n)
        x = x + a
        n = n + 1
    return x

print find_e(utils.get_int("How many terms to calculate? ", "Integer, please."))

#works
Ejemplo n.º 31
0
    def get_meta(self):
        info_labels = self.info_labels_init.copy()

        if self.tmdb_api != '':
            url = 'https://api.themoviedb.org/3/find/%s?external_source=imdb_id&api_key=%s' % (
                self.imdb_id, self.tmdb_api)
            Browser.open(url)
            response = read_json(Browser.content)
            if read_dict(response, 'movie_results', False) and self.type_video == 'MOVIE':
                for item in response['movie_results']:
                    info_labels["tmdb_id"] = self.tmdb_id = read_dict(item, 'id')
                    info_labels["code"] = self.imdb_id
                    info_labels["type"] = "movie"
                    info_labels["fanart"] = self.base_url + read_dict(item, "backdrop_path")
                    info_labels['cover_url'] = self.base_url + read_dict(item, "poster_path")
                    info_labels["originaltitle"] = read_dict(item, "original_title")
                    info_labels["premiered"] = read_dict(item, "release_date")
                    info_labels["aired"] = read_dict(item, "release_date")
                    info_labels["year"] = read_dict(item, "release_date")[:4]
                    info_labels["title"] = read_dict(item, "title")
                    info_labels["sorttitle"] = read_dict(item, "title").replace(" ", "")
                    info_labels["plot"] = read_dict(item, "overview")
                    info_labels["votes"] = read_dict(item, "vote_count")

                    # Read Details
                    url = 'http://api.themoviedb.org/3/movie/%s?api_key=%s' % (self.tmdb_id, self.tmdb_api)
                    Browser.open(url)
                    details = read_json(Browser.content)
                    info_labels["status"] = read_dict(details, "status")
                    info_labels["duration"] = read_dict(details, "runtime", 0) * 60
                    info_labels["rating"] = read_dict(details, "popularity")
                    info_labels["tagline"] = read_dict(details, "tagline")

                    # Read genres
                    genres = read_dict(details, "genres", [])
                    for genre in genres:
                        info_labels["genre"] += genre["name"] + ", "

                    # Read studios
                    studios = read_dict(details, "production_companies", [])
                    for studio in studios:
                        info_labels["studio"] += studio["name"] + ", "

                    # Read cast
                    url = 'http://api.themoviedb.org/3/movie/%s/credits?api_key=%s' % (self.tmdb_id, self.tmdb_api)
                    Browser.open(url)
                    details = read_json(Browser.content)
                    cast_list = []
                    cast_and_role_list = []
                    for cast in read_dict(details, 'cast', []):
                        cast_list.append(cast["name"])
                        cast_and_role_list.append((cast["name"], cast["character"]))
                    info_labels["cast"] = cast_list
                    info_labels["castandrole"] = cast_and_role_list

                    # read crew
                    info_labels["director"] = ''
                    info_labels["credits"] = ''
                    for crew in read_dict(details, 'crew', []):
                        info_labels["credits"] += " %s - %s," % (crew["name"], crew["job"])
                        if crew["job"].lower() == "director":
                            info_labels["director"] = crew["name"]
                        if crew["job"].lower() == "writer":
                            info_labels["writer"] = crew["name"]

                    # read trailer
                    url = 'http://api.themoviedb.org/3/movie/%s/videos?api_key=%s' % (self.tmdb_id, self.tmdb_api)
                    Browser.open(url)
                    trailers = read_json(Browser.content)
                    for trailer in trailers.get("results", []):
                        info_labels["trailer"] = 'http://www.youtube.com/watch?v=%s' % trailer["key"]
                        break

                    # other information
                    info_labels["top250"] = 0
                    info_labels["tracknumber"] = 0
                    info_labels["playcount"] = 0
                    info_labels["overlay"] = 0
                    info_labels["mpaa"] = ''
                    info_labels["tvshowtitle"] = ''
                    info_labels["lastplayed"] = ''
                    info_labels["album"] = ''
                    info_labels["artist"] = ['']
                    info_labels["dateadded"] = ''

            elif read_dict(response, 'tv_results', False):
                for item in response['tv_results']:
                    info_labels["tmdb_id"] = self.tmdb_id = read_dict(item, 'id')
                    info_labels["code"] = self.imdb_id
                    info_labels["type"] = "series"
                    info_labels["fanart"] = self.base_url + read_dict(item, "backdrop_path")
                    info_labels['cover_url'] = self.base_url + read_dict(item, "poster_path")
                    info_labels["originaltitle"] = read_dict(item, "original_name")
                    info_labels["premiered"] = read_dict(item, "first_air_date")
                    info_labels["aired"] = read_dict(item, "first_air_date")
                    info_labels["year"] = read_dict(item, "first_air_date")[:4]
                    info_labels["title"] = read_dict(item, "name")
                    info_labels["sorttitle"] = read_dict(item, "name").replace(" ", "")
                    info_labels["plot"] = read_dict(item, "overview")
                    info_labels["votes"] = read_dict(item, "vote_count")

                    # Read Details
                    url = 'http://api.themoviedb.org/3/tv/%s?api_key=%s' % (self.tmdb_id, self.tmdb_api)
                    Browser.open(url)
                    details = read_json(Browser.content)
                    info_labels["status"] = read_dict(details, "status")
                    info_labels["duration"] = read_dict(details, "episode_run_time", [0])[0] * 60
                    info_labels["rating"] = read_dict(details, "popularity")
                    info_labels["tagline"] = ""

                    # Read genres
                    genres = read_dict(details, "genres", [])
                    for genre in genres:
                        info_labels["genre"] += genre["name"] + ", "

                    # Read studios
                    studios = read_dict(details, "production_companies", [])
                    for studio in studios:
                        info_labels["studio"] += studio["name"] + ", "

                    # Read cast
                    url = 'http://api.themoviedb.org/3/movie/%s/credits?api_key=%s' % (self.tmdb_id, self.tmdb_api)
                    Browser.open(url)
                    details = read_json(Browser.content)
                    cast_list = []
                    cast_and_role_list = []
                    for cast in read_dict(details, 'cast', []):
                        cast_list.append(cast["name"])
                        cast_and_role_list.append((cast["name"], cast["character"]))
                    info_labels["cast"] = cast_list
                    info_labels["castandrole"] = cast_and_role_list

                    # read crew
                    info_labels["director"] = ''
                    info_labels["credits"] = ''
                    for crew in read_dict(details, 'crew', []):
                        info_labels["credits"] += " %s - %s," % (crew["name"], crew["job"])
                        if crew["job"].lower() == "director":
                            info_labels["director"] = crew["name"]
                        if crew["job"].lower() == "writer":
                            info_labels["writer"] = crew["name"]

                    # read trailer
                    url = 'http://api.themoviedb.org/3/tv/%s/videos?api_key=%s' % (self.tmdb_id, self.tmdb_api)
                    Browser.open(url)
                    trailers = read_json(Browser.content)
                    for trailer in trailers.get("results", []):
                        info_labels["trailer"] = 'http://www.youtube.com/watch?v=%s' % trailer["key"]
                        break

                    # other information
                    info_labels["top250"] = 0
                    info_labels["tracknumber"] = 0
                    info_labels["playcount"] = 0
                    info_labels["overlay"] = 0
                    info_labels["mpaa"] = ''
                    info_labels["tvshowtitle"] = info_labels["title"]
                    info_labels["lastplayed"] = ''
                    info_labels["album"] = ''
                    info_labels["artist"] = ['']
                    info_labels["dateadded"] = ''
        else:
            # Search in http://www.omdbapi.com/
            # Please visit the page and donate
            url = 'http://www.omdbapi.com/?i=%s&plot=full&r=json' % self.imdb_id
            Browser.open(url)
            response = read_json(Browser.content)
            info_labels["tmdb_id"] = ""
            info_labels["code"] = read_dict(response, "imdbID")
            info_labels["type"] = read_dict(response, "Type")
            info_labels['fanart'] = read_dict(response, "Poster")
            info_labels['cover_url'] = read_dict(response, "Poster")
            info_labels["originaltitle"] = read_dict(response, "Title")
            info_labels["premiered"] = read_dict(response, "Released")
            info_labels["aired"] = read_dict(response, "Released")
            info_labels["year"] = read_dict(response, "Year")
            info_labels["title"] = read_dict(response, "Title")
            if info_labels["type"] == "series":
                info_labels["tvshowtitle"] = read_dict(response, "Title")
            info_labels["sorttitle"] = read_dict(response, "Title").replace(" ", "")
            info_labels["plot"] = read_dict(response, "Plot")
            info_labels["votes"] = read_dict(response, "imdbVotes")
            info_labels["status"] = "Released"
            info_labels["duration"] = get_int(read_dict(response, "Runtime", "0")) * 60
            info_labels["rating"] = read_dict(response, "imdbRating")
            info_labels["tagline"] = read_dict(response, "Awards")
            info_labels["genre"] = read_dict(response, "Genre")
            info_labels["studios"] = ""
            info_labels["cast"] = read_dict(response, "Actors")
            info_labels["castandrole"] = ""
            info_labels["director"] = read_dict(response, "Director")
            info_labels["writer"] = read_dict(response, "Writer")
            info_labels["credits"] = read_dict(response, "Writer")
            info_labels["trailer"] = ""

            # other information
            info_labels["episode"] = 0
            info_labels["season"] = 0
            info_labels["top250"] = 0
            info_labels["tracknumber"] = 0
            info_labels["playcount"] = 0
            info_labels["overlay"] = 0
            info_labels["mpaa"] = read_dict(response, "Rated")
            info_labels["tvshowtitle"] = ''
            info_labels["lastplayed"] = ''
            info_labels["album"] = ''
            info_labels["artist"] = ['']
            info_labels["dateadded"] = ''
        # Common information
        info_labels["episode"] = self.info_title.get("episode", 0)
        info_labels["season"] = self.info_title.get("season", 0)
        return info_labels
Ejemplo n.º 32
0
def main():
    s = utils.get_int("Give us an int: ", "That's not int")
    table(s)
Ejemplo n.º 33
0
    def process_keywords(self, provider, text):
        """ Processes the query payload from a provider's keyword definitions

        Args:
            provider (str): Provider ID
            text     (str): Keyword placeholders from definitions, ie. {title}

        Returns:
            str: Processed query keywords
        """
        keywords = self.read_keywords(text)

        for keyword in keywords:
            keyword = keyword.lower()
            if 'title' in keyword:
                title = self.info["title"]
                language = definitions[provider]['language']
                use_language = None
                if ':' in keyword:
                    use_language = keyword.split(':')[1]
                if provider not in self.language_exceptions and \
                   (use_language or self.kodi_language) and \
                   'titles' in self.info and self.info['titles']:
                    try:
                        if self.kodi_language and self.kodi_language in self.info['titles']:
                            use_language = self.kodi_language
                        if use_language not in self.info['titles']:
                            use_language = language
                        if use_language in self.info['titles'] and self.info['titles'][use_language]:
                            title = self.info['titles'][use_language]
                            title = self.normalize_name(title)
                            log.info("[%s] Using translated '%s' title %s" % (provider, use_language,
                                                                              repr(title)))
                            log.debug("[%s] Translated titles from Quasar: %s" % (provider,
                                                                                  repr(self.info['titles'])))
                    except Exception as e:
                        import traceback
                        log.error("%s failed with: %s" % (provider, repr(e)))
                        map(log.debug, traceback.format_exc().split("\n"))
                text = text.replace('{%s}' % keyword, title)

            if 'year' in keyword:
                text = text.replace('{%s}' % keyword, str(self.info["year"]))

            if 'season' in keyword:
                if '+' in keyword:
                    keys = keyword.split('+')
                    if keys[1] == "t411season":
                        season = str(t411season(self.info['season']))
                    else:
                        season = str(self.info["season"] + get_int(keys[1]))
                elif ':' in keyword:
                    keys = keyword.split(':')
                    season = ('%%.%sd' % keys[1]) % self.info["season"]
                else:
                    season = '%s' % self.info["season"]
                text = text.replace('{%s}' % keyword, season)

            if 'episode' in keyword:
                if '+' in keyword:
                    keys = keyword.split('+')
                    if keys[1] == "t411episode":
                        episode = str(t411episode(self.info['episode']))
                    else:
                        episode = str(self.info["episode"] + get_int(keys[1]))
                elif ':' in keyword:
                    keys = keyword.split(':')
                    episode = ('%%.%sd' % keys[1]) % self.info["episode"]
                else:
                    episode = '%s' % self.info["episode"]
                text = text.replace('{%s}' % keyword, episode)

        return text
Ejemplo n.º 34
0
def reddit_post_worker(idx, entry, q_out, delay=0):
    import datetime
    from utils import pretty_datediff, clean_str, get_int, format_description
    from reddit import determine_if_video_media_from_reddit_json
    from domains import sitesBase

    if delay>0:
        xbmc.Monitor().waitForAbort( float(delay)/1000 )         #xbmc.sleep(delay)
    try:
        credate = ""
        is_a_video=False
        title_line2=""
        thumb_w=0; thumb_h=0

        t_on = translation(32071)  #"on"
        #t_pts = u"\U0001F4AC"  # translation(30072) #"cmnts"  comment bubble symbol. doesn't work
        t_pts = u"\U00002709"  # translation(30072)   envelope symbol
        t_up = u"\U000025B4"  #u"\U00009650"(up arrow)   #upvote symbol

        #on 3/21/2017 we're adding a new feature that lets users view their saved posts by entering /user/username/saved as their subreddit.
        #  in addition to saved posts, users can also save comments. we need to handle it by checking for "kind"
        kind=entry.get('kind')  #t1 for comments  t3 for posts
        data=entry.get('data')
        if data:
            if kind=='t3':
                title = clean_str(data,['title'])
                description=clean_str(data,['media','oembed','description'])
                post_selftext=clean_str(data,['selftext'])

                description=post_selftext+'\n'+description if post_selftext else description
                domain=clean_str(data,['domain'])
            else:
                title=clean_str(data,['link_title'])
                description=clean_str(data,['body'])
                domain='Comment post'

            description=format_description(description, hide_text_in_parens=False)
            first_link_in_description=None

            #title=strip_emoji(title) #an emoji in the title was causing a KeyError  u'\ud83c'
            title=format_description(title)

            is_a_video = determine_if_video_media_from_reddit_json(entry)
            log("  POS%s%cTITLE%.2d=%s d=%d" %( kind, ("v" if is_a_video else " "), idx, title,delay ))
            #log("description%.2d=%s" %(idx,description))
            post_id = entry['kind'] + '_' + data.get('id')  #same as entry['data']['name']
            #log('  %s  %s ' % (post_id, entry['data']['name'] ))
            commentsUrl = urlMain+clean_str(data,['permalink'])
            #log("commentsUrl"+str(idx)+"="+commentsUrl)
            try:
                aaa = data.get('created_utc')
                credate = datetime.datetime.utcfromtimestamp( aaa )
                now_utc = datetime.datetime.utcnow()
                pretty_date=pretty_datediff(now_utc, credate)
                credate = str(credate)
            except (AttributeError,TypeError,ValueError):
                credate = ""

            subreddit=clean_str(data,['subreddit'])
            author=clean_str(data,['author'])
            #log("     DOMAIN%.2d=%s" %(idx,domain))

            #post_excluded_from() is a misnomer. it just returns true if subreddit is in csv-list
            if (post_excluded_from( use_first_link_in_textpost_for_the_following_subreddits, subreddit) or
                post_excluded_from( use_first_link_in_textpost_for_the_following_subreddits, 'all')     and
                domain.startswith('self.')):
                first_link_in_description=sitesBase.get_first_url_from(description)
                #override the domain so that bottom right of gui matches link
                if first_link_in_description:
                    domain = '({uri.netloc})'.format( uri=urlparse.urlparse( first_link_in_description ) )

            ups = data.get('score',0)       #downs not used anymore
            num_comments = data.get('num_comments',0)

            d_url=clean_str(data,['url'])
            link_url=clean_str(data,['link_url'])
            media_oembed_url=clean_str(data,['media','oembed','url'])
#            log('   kind     ='+kind)
#            log('    url     ='+d_url)
#            log('    link_url='+link_url)
#            log('   permalink='+clean_str(data,['permalink']))
#            log('    media_oembed_url='+media_oembed_url)
            media_url=next((item for item in [first_link_in_description,d_url,link_url,media_oembed_url] if item ), '')
            #log("     MEDIA%.2d=%s" %(idx,media_url))

            thumb=clean_str(data,['thumbnail'])

            #media_w=get_int(data,['media','oembed','width'])
            #media_h=get_int(data,['media','oembed','height'])
            #log('  media_w='+repr(media_w)+' h='+repr(media_h) )

            #try:log('  media_w='+repr(data.get('media')['oembed']['width']  ) )
            #except:pass

            if not thumb.startswith('http'): #in ['nsfw','default','self']:  #reddit has a "default" thumbnail (alien holding camera with "?")
                thumb=""

            if thumb=="":
                thumb=clean_str(data,['media','oembed','thumbnail_url']).replace('&amp;','&')

            #a blank preview image will be replaced with poster_url from parse_reddit_link() for domains that support it
            preview=clean_str(data,['preview','images',0,'source','url']).replace('&amp;','&') #data.get('preview')['images'][0]['source']['url'].encode('utf-8').replace('&amp;','&')
            #log('  preview='+repr(preview))
            #try:
            thumb_h=get_int(data,['preview','images',0,'source','height'])#float( data.get('preview')['images'][0]['source']['height'] )
            thumb_w=get_int(data,['preview','images',0,'source','width']) #float( data.get('preview')['images'][0]['source']['width'] )
            #except (AttributeError,TypeError,ValueError):
                #log("   thumb_w _h EXCEPTION:="+ str( sys.exc_info()[0]) + "  " + str(e) )
            #   thumb_w=0; thumb_h=0

            #preview images are 'keep' stretched to fit inside 1080x1080.
            #  if preview image is smaller than the box we have for thumbnail, we'll use that as thumbnail and not have a bigger stretched image
            if thumb_w > 0 and thumb_w < 280:
                #log('*******preview is small ')
                thumb=preview
                thumb_w=0; thumb_h=0; preview=""

            over_18=data.get('over_18')

            title_line2=""
            title_line2 = "[I][COLOR dimgrey]%d%c %s %s [B][COLOR cadetblue]r/%s[/COLOR][/B] (%d) %s[/COLOR][/I]" %(ups,t_up,pretty_date,t_on, subreddit,num_comments, t_pts)

            liz=addLink(title=title,
                    title_line2=title_line2,
                    iconimage=thumb,
                    previewimage=preview,
                    preview_w=thumb_w,
                    preview_h=thumb_h,
                    domain=domain,
                    description=description,
                    credate=credate,
                    reddit_says_is_video=is_a_video,
                    commentsUrl=commentsUrl,
                    subreddit=subreddit,
                    link_url=media_url,
                    over_18=over_18,
                    posted_by=author,
                    num_comments=num_comments,
                    post_id=post_id,
                    )

            q_out.put( [idx, liz] )  #we put the idx back for easy sorting

    except Exception as e:
        log( '  #reddit_post_worker EXCEPTION:' + repr(sys.exc_info()) +'--'+ str(e) )
Ejemplo n.º 35
0
import utils

def pyramid2(n, x):
    """This draws a pyramid of a given size using asterisks."""
    print "*"*x
    x = x+1
    if x <= n:
        pyramid2(n, x)
    

pyramid2(utils.get_int("Please enter an int: ", "That's not int"), 1)
Ejemplo n.º 36
0
# Prompt the user for a number and a tolerance and calculate the number's
# square root to the tolerance (accuracy) specified, using the Newton-Raphson
# iteration method (not by importing math and using math.sqrt!). 

import math
import utils 

def root(n, delta):
    xold = n/2.0
    xnew = (n/xold+xold)/2.0
    while math.fabs(xold-xnew) >= delta:
        xold = xnew
        xnew = (n/xold+xold)/2.0
    return round(xnew, 3)

    #works

print root(utils.get_int("Number to factor: ", "That's not int"), utils.get_float("Tolerance: ", "That's not float" ))


#utils.get_float("Tolerance: ", "That's not float"))