示例#1
0
 def add(self, files):
     dir = path(cc_file(self.file, self.version))
     diff = cc_exec(['diff', '-diff_format', '-pred', dir], errors=False)
     def getFile(line):
         return join(self.file, line[2:max(line.find('  '), line.find(FS + ' '))])
     for line in diff.split('\n'):
         sym = line.find(' -> ')
         if sym >= 0:
             continue
         if line.startswith('<'):
             git_exec(['rm', '-r', getFile(line)], errors=False)
             cache.remove(getFile(line))
         elif line.startswith('>'):
             added = getFile(line)
             cc_added = join(CC_DIR, added)
             if not exists(cc_added) or isdir(cc_added) or added in files:
                 continue
             history = cc_exec(['lshistory', '-fmt', '%o%m|%Nd|%Vn\\n', added], errors=False)
             if not history:
                 continue
             date = cc_exec(['describe', '-fmt', '%Nd', dir])
             def f(s):
                 return s[0] == 'checkinversion' and s[1] < date and filterBranches(s[2], True)
             versions = list(filter(f, list(map(lambda x: x.split('|'), history.split('\n')))))
             if not versions:
                 print("It appears that you may be missing a branch in the includes section of your gitcc config for file '%s'." % added)  
                 continue
             self._add(added, versions[0][2].strip())
示例#2
0
def get(url):
    c = conn.cursor()
    c.execute("SELECT * FROM pages WHERE url = ?", (url, ))
    row = c.fetchone()

    utcnow = int(datetime.datetime.utcnow().strftime("%s"))
    if row is not None and row[1] > utcnow:
        c.execute("DELETE FROM pages WHERE url = ?", (url, ))
        conn.commit()
        cache.remove(url)
        row = None

    if row is None:
        response = urllib2.urlopen(url)
        html = response.read()

        expiry = response.info().getheader("Expires")
        expiry = datetime.datetime(*(eut.parsedate(expiry)[0:7]))
        expiry = int(expiry.strftime("%s"))

        c.execute("INSERT INTO pages VALUES (?,?)", (url, expiry))

        cache.write(url, html)

        conn.commit()
    else:
        html = cache.get(url)
    return BeautifulSoup(html, "html5lib")
示例#3
0
文件: tasks.py 项目: mgway/skillbook
def update_character_sheet(key_id, vcode, mask, character_id):
    data = eveapi.character_sheet(key_id, vcode, mask, character_id)
    # Fudge the cached_until timer because it always returns ~30 seconds, and we
    # don't care to update that often
    data.cached_until = data.cached_until + datetime.timedelta(minutes=30)
    db.save_character_sheet(data)
    cache.remove("character-sheet:%s" % character_id)
    cache.remove("character-skills:%s" % character_id)
    
    stat = {'cached_until': data.cached_until,
            'response_code': 200,
            'response_error': '',
            'character_id': character_id,
            'api_method': 'CharacterSheet',
            'ignored': False,
            'key_id': key_id}
    db.save_update_status(stat)
    
    # Handle clone status alert
    alerts = db.get_alert('CLONE_CAPACITY', character_id)
    skillpoints = sum(int(skill.skillpoints) for skill in data.skills.rows)
    
    for alert in alerts:
        cooldown = datetime.datetime.utcnow() + datetime.timedelta(minutes=alert.interval)
        remaining = int(data.cloneskillpoints) - skillpoints
        if remaining < alert.option_1_value:
            mail.send_alert(alert.user_id, alert, remaining)
            db.update_alert(alert.alert_type_id, alert.user_id, alert.character_id, cooldown)
示例#4
0
文件: tasks.py 项目: mgway/skillbook
def update_character_queue(key_id, vcode, mask, character_id):
    data = eveapi.skill_queue(key_id, vcode, mask, character_id)
    db.save_skill_queue(character_id, data.skillqueue)
    cache.remove("character-queue:%s" % character_id)
    
    stat = {'cached_until': data.cached_until,
            'response_code': 200,
            'response_error': '',
            'character_id': character_id,
            'api_method': 'SkillQueue',
            'ignored': False,
            'key_id': key_id}
    db.save_update_status(stat)
    
    # Handle queue length alert
    alerts = db.get_alert('QUEUE_TIME', character_id)
    for alert in alerts:
        cooldown = datetime.datetime.utcnow() + datetime.timedelta(minutes=alert.interval)
        if len(data.skillqueue.rows) == 0:
            return
        last_skill = data.skillqueue.rows[-1]
        if last_skill.endtime != '':
            end_time = datetime.datetime.strptime(last_skill.endtime, '%Y-%m-%d %H:%M:%S')
            if end_time - datetime.timedelta(hours=int(alert.option_1_value)) < datetime.datetime.utcnow():
                mail.send_alert(alert.user_id, alert, end_time)
                db.update_alert(alert.alert_type_id, alert.user_id, alert.character_id, cooldown)
示例#5
0
文件: tasks.py 项目: mgway/skillbook
def update_character_info_public(key_id, character_id):
    data = eveapi.character_info(character_id)
    db.save_character_info(data)
    cache.remove("character-sheet:%s" % character_id)
    
    stat = {'cached_until': data.cached_until,
            'response_code': 200,
            'response_error': '',
            'character_id': character_id,
            'api_method': 'CharacterInfo',
            'ignored': False,
            'key_id': key_id}
    db.save_update_status(stat)
示例#6
0
文件: api.py 项目: raylu/skillbook
def add_key(user_id, key_id, vcode):
    mask, characters = eveapi.key_info(key_id, vcode)

    # Make sure the key has the minimum amount access
    requirements = db.get_api_calls()
    grants = []
    for req in requirements:
        if int(mask) & req.mask == 0:
            if req.required:
                raise SkillbookException('The supplied key is missing the %s permission' % req.name)
        else:
            grants.append({'name': req.name, 'ignored': not req.required})

    db.add_key(user_id, key_id, vcode, mask, characters.key.characters.rows)
    db.add_grants(key_id, grants, characters.key.characters.rows)
    perform_updates(key_id=key_id)
    cache.remove('*:characters:%s' % user_id)
示例#7
0
    def add(self, files):
        dir = path(cc_file(self.file, self.version))
        diff = cc_exec(['diff', '-diff_format', '-pred', dir], errors=False)

        def getFile(line):
            return join(self.file,
                        line[2:max(line.find('  '), line.find(FS + ' '))])

        for line in diff.split('\n'):
            sym = line.find(' -> ')
            if sym >= 0:
                continue
            if line.startswith('<'):
                git_exec(['rm', '-r', getFile(line)], errors=False)
                cache.remove(getFile(line))
            elif line.startswith('>'):
                added = getFile(line)
                cc_added = join(CC_DIR, added)
                if not exists(cc_added) or isdir(cc_added) or added in files:
                    continue
                history = cc_exec(
                    ['lshistory', '-fmt', '%o%m|%Nd|%Vn\\n', added],
                    errors=False)
                if not history:
                    continue
                date = cc_exec(['describe', '-fmt', '%Nd', dir])

                def f(s):
                    return s[0] == 'checkinversion' and s[
                        1] < date and filterBranches(s[2], True)

                versions = list(
                    filter(
                        f,
                        list(map(lambda x: x.split('|'),
                                 history.split('\n')))))
                if not versions:
                    print(
                        "It appears that you may be missing a branch in the includes section of your gitcc config for file '%s'."
                        % added)
                    continue
                self._add(added, versions[0][2].strip())
示例#8
0
def invalidate():
    """
    Delete specified item from cache.
    :param item_num: item id
    :param topic: topic
    :return: always return success
    """
    item_num  = request.values.get('item_num')
    if item_num is not None:
        key = repr(('lookupNum', item_num))
        cache.remove(key)
        app.logger.info("[Cache]-------------------- Remove %s" % key)
    
    topic_val = request.values.get('topic')
    if topic_val is not None:
        key = repr(('topic', topic_val))
        cache.remove(key)
        app.logger.info("[Cache]-------------------- Remove %s" % key)

    return "Success"
示例#9
0
文件: api.py 项目: raylu/skillbook
def perform_updates(key_id=None):
    if key_id:
        updates = db.get_update_for_key(key_id)
    else:
        updates = db.get_update_list()
    results = []
    for row in updates:
        try:
            result = row.raw
            if row.method == 'CharacterSheet':
                data = eveapi.character_sheet(row.keyid, row.vcode, row.keymask, row.characterid)
                # Fudge the cached_until timer because it always returns ~30 seconds, and we
                # don't care to update that often
                data.cached_until = data.cached_until + datetime.timedelta(minutes=15)
                db.save_character_sheet(data)
                cache.remove("*:sheet:%s" % row.characterid)
                cache.remove("*:skills:%s" % row.characterid)
            elif row.method == 'SkillQueue':
                data = eveapi.skill_queue(row.keyid, row.vcode, row.keymask, row.characterid)
                db.save_skill_queue(row.characterid, data.skillqueue)
                cache.remove("*:queue:%s" % row.characterid)
            else:
                raise SkillbookException('Unknown API method %s' % row.method)

            # Fix the timezone, they give us UTC which might not be the TZ of the server
            result.update({'cached_until': data.cached_until.replace(tzinfo=FixedOffsetTimezone(0)), 
                'response_code': 200, 'response_error': '', 'ignored': False})
            results.append(result)
        except Exception as e:
            # Ignore this call in the future if we've gotten an error before
            ignored = True if row.response_code == 500 else False

            result.update({'cached_until': None, 'response_code': 500, 
                'response_error': repr(e), 'ignored': ignored})
            results.append(result)

    db.save_update_list(results)
示例#10
0
文件: api.py 项目: raylu/skillbook
def remove_key(user_id, key_id):
    db.remove_key(user_id, key_id)
    cache.remove('characters:%s' % user_id)
示例#11
0
def search():
    """
    Function for responding topic searching and item lookup
    :param topic: searching by a specific topic
    :param lookupNum: lookup information of a specific item id
    :return: a json object if success, otherwise a failed message
    """
    cache_ret = None
    is_from_cache = False
    start_time = datetime.now()
    ###################################################
    # topic searching
    topic_val = request.values.get('topic')
    if topic_val is not None:
        key = repr(('topic', topic_val))
        if DEFINE['withCache'] == 1:
            cache_ret = cache.get(key)
        
        if cache_ret is not None:
            result = cache_ret
            is_from_cache = True
            app.logger.info("[Cache] Return cached value: %s", result)
        elif DEFINE["testenv"] == 0:
            res = []
            for item in DEFINE["booklist"]:
                if item["topic"] == topic_val:
                    res.append(item)
            result = jsonify(result=res)
        else:
            failureCount = 0
            while failureCount < 5:
                ip = remote.get_server("catalog")
                failureCount += 1
                
                if ip is None:
                    app.logger.info("Cannot get a catalog server IP; retry time(s): %d" % failureCount)
                    continue
                try:
                    res = rq.get(ip + 'search/%s' % topic_val, timeout=5)
                    res.raise_for_status()
                    break
                except:
                    app.logger.info("Catalog server is timeout; retry time(s): %d" % failureCount)
            
            # if failed, return failure
            # clear cache, abort recording time
            if failureCount >= 5:
                cache.remove(key)
                end_time = datetime.now()
                diff = (end_time - start_time).total_seconds()
                logging.getLogger('search').info("Failed, %s" % diff)

                return "Failed", 201

            result = res.json()
        
        #-------------------------------
        if DEFINE['withCache'] == 1 and not is_from_cache:
            app.logger.info("[Cache] Set cache: %s", result)
            cache.set_pair(key, result)

        end_time = datetime.now()
        diff = (end_time - start_time).total_seconds()
        logging.getLogger('search').info("Success, %s" % diff)

        return result

    ###################################################
    # item information lookup
    lookup_num = request.values.get('lookupNum')
    if lookup_num is not None:
        key = repr(('lookupNum', lookup_num))
        if DEFINE['withCache'] == 1:
            cache_ret = cache.get(key)

        if cache_ret is not None:
            result = cache_ret
            is_from_cache = True
            app.logger.info("[Cache] Return cached value: %s", result)
        elif DEFINE["testenv"] == 0:
            res = [DEFINE["booklist"][int(lookup_num) - 1]]
            result = jsonify(result=res)
        else:
            failureCount = 0
            while failureCount < 5:
                ip = remote.get_server("catalog")
                failureCount += 1
                
                if ip is None:
                    app.logger.info("Cannot get a catalog server IP; retry time(s): %d" % failureCount)
                    continue
                try:
                    res = rq.get(ip + 'lookup/%s' % lookup_num, timeout=5)
                    res.raise_for_status()
                    break
                except:
                    app.logger.info("Catalog server is timeout; retry time(s): %d" % failureCount)

            # if failed, return failure
            # clear cache, abort recording time
            if failureCount >= 5:
                cache.remove(key)
                end_time = datetime.now()
                diff = (end_time - start_time).total_seconds()
                logging.getLogger('lookup').info("Failed, %s" % diff)

                return "Failed", 201

            result = res.json()

        #-------------------------------
        if DEFINE['withCache'] == 1 and not is_from_cache:
            app.logger.info("[Cache] Set cache: %s", result)
            cache.set_pair(key, result)

        end_time = datetime.now()
        diff = (end_time - start_time).total_seconds()
        logging.getLogger('lookup').info("Success, %s" % diff)

        return result

    return "Failed", 201
示例#12
0
 def testRemove(self):
     cache.add("a",GOOD_RESPONSE,2)
     self.assertEqual(cache.get('a'),'result')
     self.assertEqual(cache.remove('a'),True)
     self.assertEqual(cache.get('a'),False)