Exemple #1
0
 def createEmotionTable(self):
     qzoneDB = DB(self.db)
     qzoneDB.createTable('emotion', (
                                     'id integer primary key autoincrement',
                                     'name varchar(20)',
                                     'weight varchar(20)',
                                     'datetime varchar(20)'
                 ))
     qzoneDB.close()
Exemple #2
0
    def get_user_following(cls, uid):
        """Ta关注的人"""

        uid = '%d' % float(uid)
        db = DB.get_crawlerdb()
        cookies_id, cookies = Cookies.get_cookies(cls.cookies_policies, cls.cookies_type, db)
        if not cookies:
            return []

        params = {
            'luicode': '10000011',
            'lfid': '100505{}'.format(uid),
            'containerid': '231051_-_followers_-_{}'.format(uid),
            'featurecode': '20000320',
            'type': 'uid',
            'value': uid
        }
        followed_id_list = []
        for page_id in range(1, 2):
            params['page'] = page_id
            r = requests.get(cls.BASE_URL, params=params, cookies=cookies, timeout=3)
            Cookies.update_cookies(cookies_id, cls.cookies_type, r, db)
            raw_result = r.json()

            if raw_result["ok"] is 0:
                raise RequestException(raw_result['msg'])
            else:
                if raw_result['data'].get('cards'):
                    for card in raw_result['data']['cards']:
                        for u in card['card_group']:
                            if u['card_type'] == 10:
                                followed_id_list.append(u["user"]["id"])
        return followed_id_list
Exemple #3
0
    def get_user_blogs(cls, uid, raw=False):
        """Ta发的微博"""

        db = DB.get_crawlerdb()
        cookies_id, cookies = Cookies.get_cookies(cls.cookies_policies, cls.cookies_type, db)
        if not cookies:
            return None

        params = {
            'type': uid,
            'value': uid,
            'containerid': '107603{}'.format(uid),
            'uid': uid
        }

        blogs = []
        for page_id in range(1, 2):
            params['page'] = page_id
            r = requests.get(cls.BASE_URL, params=params, cookies=cookies, timeout=3)
            Cookies.update_cookies(cookies_id, cls.cookies_type, r, db)
            raw_result = r.json()

            if raw_result["ok"] is 0:
                raise RequestException(raw_result['msg'])
            else:
                if raw:
                    blogs.append(raw_result)
                else:
                    for item in raw_result["data"]["cards"]:
                        if item["card_type"] == 9:
                            raw_blog = item["mblog"]
                            blog = WeiboAPI.parse_raw_blog(raw_blog)
                            blogs.append(blog)
        return blogs
Exemple #4
0
    def get_blog_reposts(cls, weibo_id, raw=False):
        """获得一条微博的转发"""
        db = DB.get_crawlerdb()
        cookies_id, cookies = Cookies.get_cookies(cls.cookies_policies, cls.cookies_type, db)
        if not cookies:
            return None

        params = {
            'id': weibo_id
        }
        url = 'https://m.weibo.cn/api/statuses/repostTimeline'

        reposts = []
        for page_id in range(1, 2):
            params['page'] = page_id
            r = requests.get(url, params=params, cookies=cookies, timeout=3)
            Cookies.update_cookies(cookies_id, cls.cookies_type, r, db)
            raw_result = r.json()

            if raw_result["ok"] is 0:
                raise RequestException(raw_result['msg'])
            else:
                raw_data = raw_result["data"]
                if raw:
                    reposts += raw_data['data']
                else:
                    for l in raw_data['data']:
                        reposts.append(WeiboAPI.parse_raw_blog(l))

        return reposts
Exemple #5
0
    def plandoRateWebService(self):
        if self.vars.plando == None:
            raiseHttp(400, "Missing parameter plando")
        plando = self.vars.plando

        if self.vars.rate == None:
            raiseHttp(400, "Missing parameter rate")
        rate = self.vars.rate

        if IS_LENGTH(maxsize=32, minsize=1)(plando)[1] is not None:
            raiseHttp(400, "Plando name must be between 1 and 32 characters")

        if IS_MATCH('^[a-zA-Z0-9 -_]*$')(plando)[1] is not None:
            raiseHttp(400, "Plando name can only contain [a-zA-Z0-9 -_]")

        if IS_INT_IN_RANGE(1, 6)(rate)[1] is not None:
            raiseHttp(400, "Rate name must be between 1 and 5")
        rate = int(rate)
        ip = self.request.client

        with DB() as db:
            db.addRating(plando, rate, ip)
            newRate = db.getPlandoRate(plando)
        if newRate == None:
            raiseHttp(400, "Can't get new rate")
        newCount = newRate[0][0]
        newRate = float(newRate[0][1])
        data = {
            "msg": "",
            "purePlandoName": re.sub('[\W_]+', '', plando),
            "rate": newRate,
            "count": newCount
        }
        return json.dumps(data)
Exemple #6
0
    def downloadPlandoWebService(self):
        if self.vars.plando is None:
            raiseHttp(400, "Missing parameter plando")
        plandoName = self.vars.plando

        if IS_LENGTH(maxsize=32, minsize=1)(plandoName)[1] is not None:
            raiseHttp(400, "Plando name must be between 1 and 32 characters")

        if IS_MATCH('^[a-zA-Z0-9 -_]*$')(plandoName)[1] is not None:
            raiseHttp(400, "Plando name can only contain [a-zA-Z0-9 -_]")

        ipsFileName = os.path.join(ipsBasePath, "{}.ips".format(plandoName))
        if not os.path.isfile(ipsFileName):
            raiseHttp(400, "Plando ips not found on server")

        with open(ipsFileName, 'rb') as ipsFile:
            ipsData = ipsFile.read()

        with DB() as db:
            maxSize = db.getPlandoIpsMaxSize(plandoName)
            db.increaseDownloadCount(plandoName)

        data = {
            "ips": base64.b64encode(ipsData).decode(),
            "fileName": "{}.sfc".format(plandoName),
            "maxSize": maxSize
        }

        return json.dumps(data)
Exemple #7
0
    def run(self):
        weeks = 1

        with DB() as db:
            solverPresets = db.getSolverPresets(weeks)
            randomizerPresets = db.getRandomizerPresets(weeks)

            solverDurations = db.getSolverDurations(weeks)
            randomizerDurations = db.getRandomizerDurations(weeks)

            solverData = db.getSolverData(weeks)
            randomizerData = db.getRandomizerData(weeks)

            isolver = db.getISolver(weeks)
            isolverData = db.getISolverData(weeks)

            spritesData = db.getSpritesData(weeks)
            shipsData = db.getShipsData(weeks)
            plandoRandoData = db.getPlandoRandoData(weeks)

            randomizerParamsStats = db.getRandomizerParamsStats(weeks)

        errors = self.getErrors()

        (fsStatus, fsPercent) = self.getFsUsage()

        return dict(solverPresets=solverPresets, randomizerPresets=randomizerPresets,
                    solverDurations=solverDurations, randomizerDurations=randomizerDurations,
                    solverData=solverData, randomizerData=randomizerData,
                    randomizerParamsStats=randomizerParamsStats,
                    isolver=isolver, isolverData=isolverData, spritesData=spritesData,
                    shipsData=shipsData, errors=errors,
                    fsStatus=fsStatus, fsPercent=fsPercent, plandoRandoData=plandoRandoData)
Exemple #8
0
    def deletePlandoWebService(self):
        for param in ["plandoName", "plandoKey"]:
            if self.vars[param] == None:
                raiseHttp(400, "Missing parameter {}".format(param))

        plandoName = self.vars.plandoName
        plandoKey = self.vars.plandoKey

        if IS_LENGTH(maxsize=32, minsize=1)(plandoName)[1] is not None:
            raiseHttp(400, "Plando name must be between 1 and 32 characters")
        if IS_MATCH('^[a-zA-Z0-9 -_]*$')(plandoName)[1] is not None:
            raiseHttp(400, "Plando name can only contain [a-zA-Z0-9 -_]")

        if IS_LENGTH(maxsize=8, minsize=1)(plandoKey)[1] is not None:
            raiseHttp(400, "Plando key must be between 1 and 8 characters")
        if IS_MATCH('^[a-zA-Z0-9]*$')(plandoKey)[1] is not None:
            raiseHttp(400, "Plando key can only contain [a-zA-Z0-9]")

        with DB() as db:
            valid = db.isValidPlandoKey(plandoName, plandoKey)
            if valid is None or len(valid) == 0:
                raiseHttp(400, "Plando key mismatch")
            db.deletePlandoRating(plandoName)
            db.deletePlando(plandoName)

        return json.dumps("Plando {} deleted".format(plandoName))
Exemple #9
0
    def run(self):
        with DB() as db:
            url = self.request.env.request_uri.split('/')
            msg = ""
            plandos = []
            expand = True
            if len(url) > 0 and url[-1] != 'plandorepo':
                # a plando name was passed as parameter
                plandoName = url[-1]

                # decode url
                plandoName = urllib.parse.unquote(plandoName)

                # sanity check
                if IS_MATCH('^[a-zA-Z0-9 -_]*$')(plandoName)[1] is not None:
                    msg = "Plando name can only contain [a-zA-Z0-9 -_]"
                else:
                    plandos = db.getPlando(plandoName)
                    if plandos is None or len(plandos) == 0:
                        msg = "Plando not found"
            if plandos is None or len(plandos) == 0:
                # get plando list
                plandos = db.getPlandos()
                expand = False

        return dict(plandos=plandos, msg=msg, expand=expand, math=math, re=re)
Exemple #10
0
    def get_user_info(cls, uid, raw=False):
        """获取用户基础信息"""

        uid = '%d' % float(uid)
        db = DB.get_crawlerdb()
        cookies_id, cookies = Cookies.get_cookies(cls.cookies_policies, cls.cookies_type, db)
        if not cookies:
            return []

        params = {
            'containerid': '100505{}'.format(uid),
            'type': 'uid',
            'value': uid
        }
        r = requests.get(cls.BASE_URL, params=params, cookies=cookies, timeout=3)
        Cookies.update_cookies(cookies_id, cls.cookies_type, r, db)

        raw_result = r.json()

        if raw_result["ok"] is 0:
            raise RequestException(raw_result['msg'])
        else:
            if raw:
                return raw_result
            else:
                user_info_raw = raw_result["data"]["userInfo"]
                user_info = WeiboAPI.parse_user_info(user_info_raw)
                return user_info
Exemple #11
0
    def get_blog_detail(cls, weibo_id, raw=False):
        """
        获得一条微博详细信息。
        :param raw: 是否返回raw data
        :param weibo_id: 微博id
        """
        db = DB.get_crawlerdb()
        cookies_id, cookies = Cookies.get_cookies(cls.cookies_policies, cls.cookies_type, db)
        if not cookies:
            return None

        url = 'https://m.weibo.cn/statuses/show?id={}'.format(weibo_id)
        r = requests.get(url, cookies=cookies, timeout=3)
        Cookies.update_cookies(cookies_id, cls.cookies_type, r, db)
        raw_result = r.json()

        if raw_result["ok"] is 0:
            raise RequestException(raw_result['msg'])
        else:
            if raw:
                return raw_result
            else:
                raw_blog = raw_result["data"]
                blog = WeiboAPI.parse_raw_blog(raw_blog)
                return blog
Exemple #12
0
    def uploadPlandoWebService(self):
        with DB() as db:
            count = db.getPlandoCount()
            plandoLimit = 2048
            if count is None or count[0][0] >= plandoLimit:
                raiseHttp(400, "Maximum number of plandos reach: {}".format(plandoLimit))

        for param in ["author", "plandoName", "longDesc", "preset", "romData"]:
            if self.vars[param] == None:
                raiseHttp(400, "Missing parameter {}".format(param))

        for param in ["author", "plandoName", "preset"]:
            if IS_LENGTH(maxsize=32, minsize=1)(self.vars[param])[1] is not None:
                raiseHttp(400, "{} must be between 1 and 32 characters".format(param))

        for param in ["longDesc"]:
            if IS_LENGTH(maxsize=2048, minsize=1)(self.vars[param])[1] is not None:
                raiseHttp(400, "{} must be between 1 and 2048 characters".format(param))

        plandoName = self.vars.plandoName
        if IS_MATCH('^[a-zA-Z0-9 -_]*$')(plandoName)[1] is not None:
            raiseHttp(400, "Plando name can only contain [a-zA-Z0-9 -_]")

        # check if plando doesn't already exist
        with DB() as db:
            check = db.checkPlando(plandoName)

        if check is not None and len(check) > 0 and check[0][0] == plandoName:
            raiseHttp(400, "Can't create plando, a plando with the same name already exists")

        author = self.vars.author
        longDesc = self.removeHtmlTags(self.vars.longDesc)
        preset = self.vars.preset

        maxSize = self.handleIps(plandoName, self.vars.romData)

        updateKey = self.generateUpdateKey()

        with DB() as db:
            db.insertPlando((plandoName, author, longDesc, preset, updateKey, maxSize))

        if webhookAvailable:
            self.plandoWebhook(plandoName, author, preset, longDesc)

        return json.dumps(updateKey)
Exemple #13
0
 def __init__(self, bot, client,
              torrentsQueue: Queue, ytQueue, megaQueue):
     self.bot = bot
     self.client = client
     self.ytQueue = ytQueue
     self.megaQueue = megaQueue
     self.torrentsQueue = torrentsQueue
     self.utils = Utils()
     self.db = DB()
    def computeDifficulty(self, jsonRomFileName, preset):
        randomizedRom = os.path.basename(jsonRomFileName.replace(
            'json', 'sfc'))

        presetFileName = "{}/{}.json".format(getPresetDir(preset), preset)
        (fd, jsonFileName) = tempfile.mkstemp()

        db = DB()
        id = db.initSolver()

        params = [
            getPythonExec(),
            os.path.expanduser("~/RandomMetroidSolver/solver.py"), '-r',
            str(jsonRomFileName), '--preset', presetFileName,
            '--difficultyTarget',
            str(self.session.solver['difficultyTarget']), '--pickupStrategy',
            self.session.solver['pickupStrategy'], '--type', 'web', '--output',
            jsonFileName, '--runtime', '10'
        ]

        for item in self.session.solver['itemsForbidden']:
            params += ['--itemsForbidden', item]

        db.addSolverParams(id, randomizedRom, preset,
                           self.session.solver['difficultyTarget'],
                           self.session.solver['pickupStrategy'],
                           self.session.solver['itemsForbidden'])

        print("before calling solver: {}".format(params))
        start = datetime.now()
        ret = subprocess.call(params)
        end = datetime.now()
        duration = (end - start).total_seconds()
        print("ret: {}, duration: {}s".format(ret, duration))

        if ret == 0:
            with open(jsonFileName) as jsonFile:
                result = json.load(jsonFile)
        else:
            result = "Solver: something wrong happened while solving the ROM"

        db.addSolverResult(id, ret, duration, result)
        db.close()

        os.close(fd)
        os.remove(jsonFileName)

        return (ret == 0, result)
Exemple #15
0
 def __init__(self, bot, client, torrentsQueue: Queue, megaQueue: Queue,
              ytQueue: Queue):
     self.bot = bot
     self.client = client
     self.torrentsQueue = torrentsQueue
     self.megaQueue = megaQueue
     self.ytQueue = ytQueue
     self.utils = Utils()
     self.db = DB()
     self.logger = logging.getLogger(' Admin Conv ')
Exemple #16
0
    def randoParamsWebService(self):
        # get a json string of the randomizer parameters for a given seed.
        # seed is the id in randomizer table, not actual seed number.
        if self.vars.seed == None:
            raiseHttp(400, "Missing parameter seed", True)

        seed = getInt(self.request, 'seed', False)
        if seed < 0 or seed > sys.maxsize:
            raiseHttp(400, "Wrong value for seed", True)

        with DB() as db:
            (seed, params) = db.getRandomizerSeedParams(seed)

        return json.dumps({"seed": seed, "params": params})
Exemple #17
0
    def updateEmotion(self):
        timestamp, lastTimestamp = self.getUpdateTimestamp()
        url = 'http://taotao.qq.com/cgi-bin/emotion_cgi_msglist_v6'
        params = {
            'uin': self.user,
            'pos': 0,
            'num': 20,
            'g_tk': Qzone.getCSRFToken(self.session.cookies.get('skey')),
            'code_version': 1,
            'format': 'json'
        }
        total, msgList, content, createdTime = None, None, None, None
        qzoneDB = DB(self.db)
        while True:
            res = self.session.get(url, params=params)
            data = res.json()
            total = data['total']
            msgList = data['msglist']

            for all in msgList:
                content = all['content']; createdTime = all['created_time']
                if int(createdTime) < int(lastTimestamp):
                    return qzoneDB.close()
                if int(createdTime) < int(timestamp) and int(createdTime) > int(lastTimestamp) \
                        and re.match(r'^~?\d{2}\.?\d?$', content):
                    # print(content, '#', createdTime)
                    qzoneDB.insert('emotion', {
                        'name': str(self.user),
                        'weight': content,
                        'datetime': createdTime
                    })

            # when to break loop
            if total < params['num'] or total < params['pos'] + params['num']:
                return qzoneDB.close()

            params['pos'] = params['pos'] + params['num']
Exemple #18
0
 def __init__(self, bot):
     f = open('config.json', 'r')
     self.config = json.load(f)
     self.bot = bot
     self.db = DB()
     jobstore = {
         'mongo': MongoDBJobStore(database='posts', collection='posts')
     }
     self.sched = AsyncIOScheduler(
         jobstores=jobstore)
     self.sched.start()
     app_location = os.getcwd()
     self.download_location = f'{app_location}/images'
     if not os.path.exists(self.download_location):
         os.makedirs(self.download_location)
Exemple #19
0
def main():
    f = open('config.json', 'r')
    config = json.load(f)
    dbQueue = Queue()
    db = DB(dbQueue)
    tweetsQueue = Queue()
    extendedQueue = Queue()
    monitor = Monitor(tweetsQueue, extendedQueue)
    threading.Thread(target=db.start, name='Database Thread').start()
    threading.Thread(target=monitor.start, name='Monitor Thread').start()
    for i in range(50):
        filteringThread = FilterTweets(f'Filter {i}', tweetsQueue, dbQueue,
                                       config.get("SET1"), config.get("SET2"),
                                       config.get("SET3"),
                                       config.get("DISCORD_CHANNEL_WEBHOOK"))
        threading.Thread(target=filteringThread.start,
                         name=f'Tweets Filter Thread {i}').start()
    print(' Bot is up!')
Exemple #20
0
    def updatePlandoWebService(self):
        for param in ["author", "plandoName", "longDesc", "preset", "plandoKey"]:
            if self.vars[param] == None:
                raiseHttp(400, "Missing parameter {}".format(param))

        for param in ["author", "plandoName", "preset"]:
            if IS_LENGTH(maxsize=32, minsize=1)(self.vars[param])[1] is not None:
                raiseHttp(400, "{} must be between 1 and 32 characters".format(param))

        for param in ["plandoKey"]:
            if IS_LENGTH(maxsize=8, minsize=1)(self.vars[param])[1] is not None:
                raiseHttp(400, "{} must be between 1 and 8 characters".format(param))

        for param in ["longDesc"]:
            if IS_LENGTH(maxsize=2048, minsize=1)(self.vars[param])[1] is not None:
                raiseHttp(400, "{} must be between 1 and 2048 characters".format(param))

        plandoName = self.vars.plandoName
        if IS_MATCH('^[a-zA-Z0-9 -_]*$')(plandoName)[1] is not None:
            raiseHttp(400, "Plando name can only contain [a-zA-Z0-9 -_]")

        author = self.vars.author
        longDesc = self.removeHtmlTags(self.vars.longDesc)
        preset = self.vars.preset
        plandoKey = self.vars.plandoKey

        # check update key
        with DB() as db:
            valid = db.isValidPlandoKey(plandoName, plandoKey)
            if valid is None or len(valid) == 0:
                raiseHttp(400, "Plando key mismatch")

            if self.vars.romData is not None:
                print("updatePlandoWebService: update ips")
                maxSize = self.handleIps(plandoName, self.vars.romData)
                db.updatePlandoAll((author, longDesc, preset, maxSize, plandoName))
            else:
                db.updatePlandoMeta((author, longDesc, preset, plandoName))

        return json.dumps("Plando {} updated succesfully.".format(plandoName))
Exemple #21
0
    def get_blog_comments(cls, weibo_id, raw=False):
        """
        获得一条微博下的评论。
        分页接口,一次只拿20个。
        :param weibo_id: 微博的id
        :param raw: 是否返回raw data
        """
        db = DB.get_crawlerdb()
        cookies_id, cookies = Cookies.get_cookies(cls.cookies_policies, cls.cookies_type, db)
        if not cookies:
            return None

        params = {
            'id': weibo_id,
            'mid': weibo_id
        }
        url = 'https://m.weibo.cn/comments/hotflow'

        comments = []
        for _ in range(1, 2):
            r = requests.get(url, params=params, cookies=cookies, timeout=3)
            Cookies.update_cookies(cookies_id, cls.cookies_type, r, db)
            raw_result = r.json()

            if raw_result["ok"] is 0:
                raise RequestException(raw_result['msg'])
            else:
                raw_data = raw_result["data"]
                params['max_id'] = raw_data["max_id"]
                params['max_id_type'] = raw_data['max_id_type']
                if raw:
                    comments += raw_data['data']
                else:
                    for c in raw_data['data']:
                        comments.append(WeiboAPI.parse_raw_blog(c))

        return comments
Exemple #22
0
    def getSkillLevelBarData(self, preset):
        result = {'name': preset}
        try:
            params = PresetLoader.factory('{}/{}.json'.format(
                getPresetDir(preset), preset)).params
            result['custom'] = (preset, params['score'])
            # add stats on the preset
            result['knowsKnown'] = len([
                know for know in params['Knows']
                if params['Knows'][know][0] == True
            ])
        except:
            result['custom'] = (preset, 'N/A')
            result['knowsKnown'] = 'N/A'

        # get score of standard presets
        standardScores = self.cache.ram('standardScores',
                                        lambda: dict(),
                                        time_expire=None)
        if not standardScores:
            for preset in [
                    'newbie', 'casual', 'regular', 'veteran', 'expert',
                    'master', 'samus'
            ]:
                score = PresetLoader.factory('{}/{}.json'.format(
                    getPresetDir(preset), preset)).params['score']
                standardScores[preset] = score

        result['standards'] = standardScores

        with DB() as db:
            result['lastAction'] = db.getPresetLastActionDate(
                result['custom'][0])

        # TODO: normalize result (or not ?)
        return result
Exemple #23
0
 def createQzoneDB(self):
     qzoneDB = DB(self.db)
     qzoneDB.close()
Exemple #24
0
def write_summary_sheet02(workbook, PROJ_DICT, WEEK, NEW_PROJECT,
                          currentWeekSolveRate, currentWeekAddRate,
                          correSituation):
    title = "开始编写【汇总】工作表"
    print(title.center(40, '='))
    worksheet = workbook.add_worksheet('汇总')
    worksheet.hide_gridlines(option=2)  # 隐藏网格线
    style = Styles(workbook).style_of_cell()
    style_lightGray = Styles(workbook).style_of_cell('gray')
    # style_small_width = Styles(workbook).style_of_cell()
    worksheet.set_row(0, 20)  # 设置行高
    worksheet.set_column('A:A', 2)  # 设置列宽
    worksheet.set_column('B:B', 24)  # 设置列宽
    worksheet.merge_range(1, 1, 2, 1, '项目', style)
    projDict = {}
    for proj in PROJ_DICT:
        index = PROJ_DICT[proj]['index']
        proj_name = PROJ_DICT[proj]['sheet_name']
        projDict[proj] = index + 2
        worksheet.write(index + 2, 1, proj_name, style)

    # 查询上周汇报时间
    connect = DB().conn()
    sql = 'select project,week_num,statis_time,bug_leave_num,bug_total_num,bug_leave_rate,bug_add_num,bug_solve_num from wbg_history_data where week_num = %(week_num)s'
    # week_num = str(WEEK-1)+'周'
    value = {'week_num': str(WEEK - 1) + '周'}
    # print(WEEK)
    # print(type(WEEK))
    totalDate = DB().search(sql, value, connect)
    beforeWeek = totalDate[0][2]
    beforeWeek = datetime.datetime.strftime(beforeWeek, '%m-%d').replace(
        '-', '月') + '日'
    worksheet.merge_range(1, 2, 1, 6, beforeWeek, style)
    worksheet.write(2, 2, '遗留数', style)
    worksheet.write(2, 3, '总数', style)
    worksheet.write(2, 4, '遗留率', style)
    worksheet.write(2, 5, '新增数', style)
    worksheet.write(2, 6, '解决数', style)
    # projDict = {'BIM': 3, 'EBID': 4, 'OA': 5, 'EDU': 6, 'FAS': 7, 'EVAL': 8}
    for i in totalDate:
        if i[0] in PROJ_DICT:
            worksheet.write(projDict[i[0]], 2, i[3], style)
            worksheet.write(projDict[i[0]], 3, i[4], style)
            worksheet.write(projDict[i[0]], 4, format(float(i[5]), '.1%'),
                            style)
            worksheet.write(projDict[i[0]], 5, i[6], style)
            worksheet.write(projDict[i[0]], 6, i[7], style)
    for i in NEW_PROJECT:
        worksheet.write(projDict[i], 2, '—', style)
        worksheet.write(projDict[i], 3, '—', style)
        worksheet.write(projDict[i], 4, '—', style)
        worksheet.write(projDict[i], 5, '—', style)
        worksheet.write(projDict[i], 6, '—', style)

    # 查询本周汇报数据
    sql = 'select project,week_num,statis_time,bug_leave_num,bug_total_num,bug_leave_rate,bug_add_num,bug_solve_num from wbg_history_data where week_num = %(week_num)s'
    value = {'week_num': WEEK}
    totalDate = DB().search(sql, value, connect)
    beforeWeek = totalDate[0][2]
    beforeWeek = datetime.datetime.strftime(beforeWeek, '%m-%d').replace(
        '-', '月') + '日'
    worksheet.merge_range(1, 7, 1, 11, beforeWeek, style)
    worksheet.write(2, 7, '遗留数', style)
    worksheet.write(2, 8, '总数', style)
    worksheet.write(2, 9, '遗留率', style)
    worksheet.write(2, 10, '新增数', style)
    worksheet.write(2, 11, '解决数', style)
    # projDict = {'BIM': 3, 'EBID': 4, 'OA': 5, 'EDU': 6, 'FAS': 7, 'EVAL': 8}
    for i in totalDate:
        if i[0] in PROJ_DICT:
            worksheet.write(projDict[i[0]], 7, i[3], style)
            worksheet.write(projDict[i[0]], 8, i[4], style)
            worksheet.write(projDict[i[0]], 9, format(float(i[5]), '.1%'),
                            style)
            worksheet.write(projDict[i[0]], 10, i[6], style)
            worksheet.write(projDict[i[0]], 11, i[7], style)

    # 当周bug整体情况
    worksheet.merge_range(1, 12, 1, 14, '当周bug整体状况', style_lightGray)
    worksheet.write(2, 12, '解决速率', style_lightGray)
    worksheet.write(2, 13, '新增速率', style_lightGray)
    worksheet.write(2, 14, '对应状况', style_lightGray)
    for proj in projDict:
        if proj in currentWeekSolveRate:
            worksheet.write(projDict[proj], 12, currentWeekSolveRate[proj],
                            style_lightGray)
        else:
            worksheet.write(projDict[proj], 12, '0%', style_lightGray)
        if proj in currentWeekSolveRate:
            worksheet.write(projDict[proj], 13, currentWeekAddRate[proj],
                            style_lightGray)
        else:
            worksheet.write(projDict[proj], 13, '0%', style_lightGray)
        if proj in correSituation:
            worksheet.write(projDict[proj], 14, correSituation[proj],
                            style_lightGray)
        else:
            worksheet.write(projDict[proj], 14, '无应对', style_lightGray)

    for i in range(1, 9):
        worksheet.set_row(i, 22)
    print("【汇总】工作表编写完毕".center(40, '-'))
Exemple #25
0
setup()
f = open('config.json', 'r')
config = json.load(f)
API_ID = config.get('API_ID')
API_HASH = config.get('API_HASH')
PHONE_NUMBER = config.get('PHONE_NUMBER')
BOT_TOKEN = config.get('BOT_TOKEN')

client = TelegramClient('./sessionFiles/client', API_ID, API_HASH)
client.parse_mode = 'html'
bot = TelegramClient('./sessionFiles/bot', API_ID, API_HASH)
torrentsQueue = Queue()
megaQueue = Queue()
ytQueue = Queue()
db = DB()


async def startAdminConv(event):
    conversation = AdminConversation(bot, client, torrentsQueue, megaQueue,
                                     ytQueue)
    await conversation.start(event)
    raise StopPropagation


async def startUserConv(event):
    conversation = UserConversation(bot, client, torrentsQueue, ytQueue,
                                    megaQueue)
    await conversation.start(event)
    raise StopPropagation
    def run(self):
        self.initProgSpeedStatsSession()

        if self.vars.action == 'Load':
            (ok, msg) = self.validateProgSpeedStatsParams()
            if not ok:
                self.session.flash = msg
                redirect(URL(r=self.request, f='progSpeedStats'))

            self.updateProgSpeedStatsSession()

            skillPreset = "Season_Races"
            randoPreset = "Season_Races"
            majorsSplit = self.vars.majorsSplit

            with DB() as db:
                progSpeedStatsRaw = {}
                progSpeedStats = {}
                progSpeedStats["open14"] = {}
                progSpeedStats["open24"] = {}
                progSpeedStats["open34"] = {}
                progSpeedStats["open44"] = {}
                progSpeeds = [
                    'speedrun', 'slowest', 'slow', 'medium', 'fast', 'fastest',
                    'basic', 'variable', 'total'
                ]
                realProgSpeeds = []
                realProgSpeedsName = []
                for progSpeed in progSpeeds:
                    curRandoPreset = "{}_{}_{}".format(randoPreset,
                                                       majorsSplit, progSpeed)
                    progSpeedStatsRaw[progSpeed] = db.getProgSpeedStat(
                        skillPreset, curRandoPreset)

                    if len(progSpeedStatsRaw[progSpeed]) != 0:
                        progSpeedStats[progSpeed] = {}
                        progSpeedStats[progSpeed]["avgLocs"] = transformStats(
                            progSpeedStatsRaw[progSpeed]["avgLocs"], 50)
                        open14 = transformStats(
                            progSpeedStatsRaw[progSpeed]["open14"])
                        open24 = transformStats(
                            progSpeedStatsRaw[progSpeed]["open24"])
                        open34 = transformStats(
                            progSpeedStatsRaw[progSpeed]["open34"])
                        open44 = transformStats(
                            progSpeedStatsRaw[progSpeed]["open44"])
                        progSpeedStats[progSpeed]["open"] = zipStats(
                            [open14, open24, open34, open44])
                        progSpeedStats[progSpeed]["open"].insert(
                            0, [
                                'Collected items', '1/4 locations available',
                                '2/4 locations available',
                                '3/4 locations available',
                                '4/4 locations available'
                            ])

                        progSpeedStats["open14"][progSpeed] = open14
                        progSpeedStats["open24"][progSpeed] = open24
                        progSpeedStats["open34"][progSpeed] = open34
                        progSpeedStats["open44"][progSpeed] = open44

                        realProgSpeeds.append(progSpeed)
                        if progSpeed == 'total':
                            realProgSpeedsName.append('total_rando')
                        else:
                            realProgSpeedsName.append(progSpeed)

            # avg locs
            if len(realProgSpeeds) > 0:
                progSpeedStats['avgLocs'] = zipStats([
                    progSpeedStats[progSpeed]["avgLocs"]
                    for progSpeed in realProgSpeeds
                ])
                progSpeedStats["avgLocs"].insert(0, ['Available locations'] +
                                                 realProgSpeedsName)

            # prog items
            if len(progSpeedStats["open14"]) > 0:
                progSpeedStats["open14"] = zipStats([
                    progSpeedStats["open14"][progSpeed]
                    for progSpeed in realProgSpeeds
                ])
                progSpeedStats["open14"].insert(0, ['Collected items'] +
                                                realProgSpeedsName)
                progSpeedStats["open24"] = zipStats([
                    progSpeedStats["open24"][progSpeed]
                    for progSpeed in realProgSpeeds
                ])
                progSpeedStats["open24"].insert(0, ['Collected items'] +
                                                realProgSpeedsName)
                progSpeedStats["open34"] = zipStats([
                    progSpeedStats["open34"][progSpeed]
                    for progSpeed in realProgSpeeds
                ])
                progSpeedStats["open34"].insert(0, ['Collected items'] +
                                                realProgSpeedsName)
                progSpeedStats["open44"] = zipStats([
                    progSpeedStats["open44"][progSpeed]
                    for progSpeed in realProgSpeeds
                ])
                progSpeedStats["open44"].insert(0, ['Collected items'] +
                                                realProgSpeedsName)
        else:
            progSpeedStats = None

        majorsSplit = ['Major', 'Full']

        return dict(majorsSplit=majorsSplit, progSpeedStats=progSpeedStats)
Exemple #27
0
    val_str = val_str.strip(', ')
    return 'INSERT IGNORE INTO xiami_music (' + key_str + ') VALUES (' + val_str + ')'


def write_song_to_sql(cur, in_song):
    statement = gen_sql_statement(in_song)
    cur.execute(statement)
    return False


print '---------------------------------------'
print 'Transferring mongo to mysql'
print '---------------------------------------'
sql_conn = pymysql.connect(**config.mysql)
sql_cur = sql_conn.cursor()
mongo_db = DB('xiami')

print '..flushing mongo collection to mysql'
songs = mongo_db.get_songs()
for song in songs:
    write_song_to_sql(sql_cur, song)

try:
    sql_conn.commit()
    print '..commit success, dropping mongo collection'
    sql_conn.close()
except Exception, e:
    print e.message
    print '..commit failed, rolling back, nothing\'s changed'
    sql_conn.rollback()
    sql_conn.close()
Exemple #28
0
def load_db():
    stu_db = DB('stu')
    stu_db.select(TABLE_STU)
    stu_db.select(TABLE_COURSE)
    stu_db.select(STU_TABLE_COURSE)
    logger.info("Tables are created;")
Exemple #29
0
parser.add_argument('table_name', help='name of the table')
parser.add_argument('--drop_table',
                    help='drops table if it exists',
                    action='store_true')
parser.add_argument('--create_table',
                    help='creates table if it exist',
                    action='store_true')
parser.add_argument('--batch_size',
                    help='number of items per insert call',
                    default=10)
parser.add_argument("--configfile",
                    default="config.json",
                    help="config file to use")

# add db args with no default database
DB.add_args(parser, None)

args = parser.parse_args()

# config file
config = ConfigFile(args.configfile)

# get db connection
conn = DB(args=args, configfile=config)
cur = conn.get_cursor()

# first drop table if needed
if args.drop_table:
    print "Dropping Table..."
    cur.execute("DROP TABLE IF EXISTS {};".format(args.table_name))
Exemple #30
0
def createDateBase():
    db = DB(app.config['DATABASE'])
    db.close()
Exemple #31
0
class DbSearch():

    def __init__(self):
        print('connect')
        self.connect = DB().conn()

    def search_proj_before_bug(self):
        PROJ_BEFORE_BUG = {}
        title = '查询项目纳入统计之前的新建bug数'
        print(title.center(40, '-'))
        cur = self.connect.cursor()
        sql = 'select * from wbg_proj_before_bug'
        cur.execute(sql)
        result = cur.fetchall()
        print(result)
        for i in result:
            PROJ_BEFORE_BUG[i[0]] = i[1]
        print("项目纳入统计之前的新建bug数:" ,PROJ_BEFORE_BUG)
        return PROJ_BEFORE_BUG

    # 统计当前周数
    def week(self):
        cur = self.connect.cursor()
        sql = 'select last_week from wbg_week where id = 0'
        cur.execute(sql)
        week = cur.fetchall()[0][0]
        print("当前周数:", week)
        # print(type(week))
        self.connect.close()
        return week

    # 查询是否为新增项目(查询wbg_history_data表中的上周数据是否有该proj的值)
    def search_new_project(self,PROJ_DICT,WEEK,NEW_PROJECT):
        title = "查询新增项目"
        print(title.center(40, '-'))
        cur = self.connect.cursor()
        for proj in PROJ_DICT:
            sql = 'select project from wbg_history_data where week_num = %(week_num)s and project = %(project)s'
            # week_num = str(self.WEEK-1)+'周'
            value = {'week_num': str(WEEK - 1) + '周', 'project': proj}
            # print(self.WEEK)
            # print(type(self.WEEK))
            cur.execute(sql, value)
            projDate = cur.fetchone()
            if not projDate:
                NEW_PROJECT.append(proj)
        print('新增项目查询结束'.center(40, '-'))
        print("新增项目列表:", NEW_PROJECT)
        return NEW_PROJECT

    # 查询并计算当前周bug总数
    def search_current_week_totalBugNum(self, proj):
        title = "查询当前%s项目当前周bug总数" % proj
        print(title.center(40, '-'))
        cur = self.connect.cursor()
        sql = 'select year,month,build_bug_num from wbg_year_sta where project = %(proj)s'
        value = {'proj': proj}
        cur.execute(sql, value)
        result = cur.fetchall()
        print(result)
        total_num = 0
        for i in result:
            if i[2]:
                total_num += i[2]
        print('-------', total_num)
        return total_num

    # 查询wbg_history_data某项目所有历史数据,返回成列表,用来绘制首页图表
    def search_allDate_history(self,PROJ_DICT):
        # 查询某项目某周数据
        title = "查询当前所有项目所有历史数据"
        print(title.center(40, '-'))
        cur = self.connect.cursor()
        sql = 'select project,week_num,bug_leave_num,bug_total_num,bug_leave_rate,bug_add_num,bug_solve_num from wbg_history_data where del_flag is NULL '
        # value = {'proj': 'BIM'}
        cur.execute(sql)
        result = cur.fetchall()
        # print(result)
        resultDict = {}
        for proj in PROJ_DICT:
            resultDict[proj] = {'wkn': [], 'bln': [], 'btn': [], 'blr': [], 'ban': [], 'bsn': []}
        for date in result:
            if date[0] in resultDict:
                resultDict[date[0]]['wkn'].append(date[1][0:-1])  # 周数列表
                resultDict[date[0]]['bln'].append(int(date[2]))  # bug遗留数列表
                resultDict[date[0]]['btn'].append(int(date[3]))  # bug总数列表
                resultDict[date[0]]['blr'].append(float(date[4][0:5]) * 100)  # bug遗留率
                resultDict[date[0]]['ban'].append(int(date[5]))  # bug新增数
                resultDict[date[0]]['bsn'].append(int(date[6]))  # bug解决数

        # print(resultDict)
        return resultDict

    # 在wbg_history_data中根据周数查询每周历史数据
    def search_weekDate_history(self, weekNum, proj=None, ):
        # 查询某项目某周数据
        title = "查询当前%s项目%s周历史数据" % (proj, weekNum)
        print(title.center(40, '-'))
        cur = self.connect.cursor()
        sql = 'select project,week_num,statis_time,bug_leave_num,bug_total_num from wbg_history_data where week_num = %(week_num)s'
        if proj != None:
            sql = 'select project,week_num,statis_time,bug_leave_num,bug_total_num from wbg_history_data where (week_num = %(week_num)s and project=%(proj)s)'
        value = {'week_num': weekNum, 'proj': proj}
        # print(sql)
        cur.execute(sql, value)
        weekData = cur.fetchall()
        # print(weekData)
        print(weekData)
        return weekData
Exemple #32
0
    def skillPresetActionWebService(self):
        print("skillPresetActionWebService call")

        if self.session.presets is None:
            self.session.presets = {}

        # for create/update, not load
        (ok, msg) = self.validatePresetsParams(self.vars.action)
        if not ok:
            raiseHttp(400, json.dumps(msg))
        else:
            self.session.presets['currentTab'] = self.vars.currenttab

        if self.vars.action == 'Create':
            preset = self.vars.presetCreate
        else:
            preset = self.vars.preset

        # check if the presets file already exists
        password = self.vars['password']
        password = password.encode('utf-8')
        passwordSHA256 = hashlib.sha256(password).hexdigest()
        fullPath = '{}/{}.json'.format(getPresetDir(preset), preset)
        if os.path.isfile(fullPath):
            # load it
            end = False
            try:
                oldParams = PresetLoader.factory(fullPath).params
            except Exception as e:
                msg = "UC:Error loading the preset {}: {}".format(preset, e)
                end = True
            if end == True:
                raiseHttp(400, json.dumps(msg))

            # check if password match
            if 'password' in oldParams and passwordSHA256 == oldParams[
                    'password']:
                # update the presets file
                paramsDict = self.genJsonFromParams(self.vars)
                paramsDict['password'] = passwordSHA256
                try:
                    PresetLoader.factory(paramsDict).dump(fullPath)
                    with DB() as db:
                        db.addPresetAction(preset, 'update')
                    self.updatePresetsSession()
                    msg = "Preset {} updated".format(preset)
                    return json.dumps(msg)
                except Exception as e:
                    msg = "Error writing the preset {}: {}".format(preset, e)
                    raiseHttp(400, json.dumps(msg))
            else:
                msg = "Password mismatch with existing presets file {}".format(
                    preset)
                raiseHttp(400, json.dumps(msg))
        else:
            # prevent a malicious user from creating presets in a loop
            if not self.maxPresetsReach():
                # write the presets file
                paramsDict = self.genJsonFromParams(self.vars)
                paramsDict['password'] = passwordSHA256
                try:
                    PresetLoader.factory(paramsDict).dump(fullPath)
                    with DB() as db:
                        db.addPresetAction(preset, 'create')
                    self.updatePresetsSession()

                    # add new preset in cache
                    (stdPresets, tourPresets,
                     comPresets) = loadPresetsList(self.cache)
                    comPresets.append(preset)
                    comPresets.sort(key=lambda v: v.upper())

                    msg = "Preset {} created".format(preset)
                    return json.dumps(msg)
                except Exception as e:
                    msg = "Error writing the preset {}: {}".format(preset, e)
                    raiseHttp(400, json.dumps(msg))
                redirect(URL(r=request, f='presets'))
            else:
                msg = "Sorry, maximum number of presets reached, can't add more"
                raiseHttp(400, json.dumps(msg))
Exemple #33
0
 def __init__(self):
     print('connect')
     self.connect = DB().conn()
Exemple #34
0
def main(*args):
	if NUM_OF_VARS != len(args): exit("\nLength of variables does not match arguments passed to main()")

	method = args[0]
	fos_trial = args[1]
	soil_cohesion = args[2]
	inter_fric = args[3]
	numslices = args[4]
	water_pres = args[5]
	vslice = args[6]
	percent = args[7]
	verbose = args[8]
	save = args[9]
	show = args[10]
	do_crit_slope = args[11]
	ellip_coor = args[12]
	delimiter = args[13]
	bulk = args[14]
	f_config = args[15]
	f_data = args[16]
	

	variables = {
		'method' : method,
		'fos_trial' : fos_trial,
		'soil_cohesion' : soil_cohesion,
		'internal_friction_angle' : inter_fric,
		'num_of_slices' : numslices,
		'water_pressure' :  water_pres,
		'vslice' : vslice,
		'percentage_status' : percent,
		'verbose' : verbose,
		'save_figure' : save,
		'show_figure' : show,
		'perform_critical_slope' : do_crit_slope,
		'ellipse_coordinates' : ellip_coor,
		'delimiter' : delimiter,
		'bulk_density' : bulk,
		'f_config' : f_config,
		'f_data' : f_data
	}
	# read values from config files
	config_values = ReadConfig(f_config, variables.keys()).return_variables()
	
	# sort through None types and replace with value from config file
	for var in variables.keys():
		if variables[var] is not None: continue
		for opt in config_values.keys():
			if var == opt:
				variables[var] = config_values[opt]
		
	# create variable storage object
	config = DB(data_manipulation(variables))
	
	# preview geometry
	if config.show_figure: previewGeometry(config)
	
	# create working space
	sprofile, circle, circ_coords = create.working_space(config, format.load_profile_data(config))
	
	# perform slope stability calculation
	fos, errstr = perform.calculation(config, sprofile, circle)
	
	# plot final diagram
	if config.show_figure:
		profile = format.linspace2d(format.load_profile_data(config), config.num_of_slices)
		plt.scatter(circ_coords[:,0], circ_coords[:,1], color="red")
		plt.plot(sprofile[:,0], sprofile[:,1], color="green")
		plt.plot (profile[:,0], profile[:,1], color="green")
		if config.save_figure:
			plt.savefig('slope_profile.tif')
		#plt.show()
	
	print fos, errstr
	#display.results(config, results)
		
output_layer_1_size = 80
output_size = 1
lstm_depth = 3
output_keep_prob = 0.5
input_keep_prob = 1.0

l2_regularization_rate = 0.001
is_training = 0
is_multi_layer = 1
is_predict_by_timestep = 0

'''
DB config
'''
system = 'BOCOP-*'
db_eops = DB(host='192.168.130.30', port=3306, user='******', passwd='Password01!', database='eops')

'''
model source
'''
model_dir = 'ckpt_169d_8f_306'
print('Using model from %s.' % model_dir)
print('Using dict from %s' %(params.project_dir + '/' + params.dict_dir))
# 恢复标准化的交易量
normalized_count_data_dir = params.project_dir + '/' + params.dict_dir + '/normalized_count.dict'
with open(normalized_count_data_dir, 'r') as f:
    data = f.readlines()
    count_mean = float(data[0])
    count_std = float(data[1])

'''