Beispiel #1
0
 def fetch_blacklist(self):
     blacklist_update = {}
     # What rule keys exist in Redis?
     try:
         blacklist_keys = redis_conn.smembers('blacklist')
     except Exception:
         log.warn("Redis unreachable, retrying in %ds" % redis_retry)
         time.sleep(redis_retry)
     # Get each rule KV and build map.        
     for i in blacklist_keys:
         k = i.decode('utf-8')
         get = redis_conn.get(k)
         if get == None:
             # If Rule key value is None, it was likely expired - remove it from blacklist set.
             redis_conn.srem('blacklist', k)
         else:
             kv = get.decode('utf-8').split(':')
             # Create a key in the blacklist map for the rule KV pair found in Redis,
             # or, append to existing.
             if not kv[0] in blacklist_update:
                 blacklist_update[kv[0]] = []
                 blacklist_update[kv[0]].append(kv[1])
             else:
                 blacklist_update[kv[0]].append(kv[1])
     return blacklist_update
Beispiel #2
0
    def call(self, func, args=None):
        self.resetTimeout()
        _d = defer.Deferred()

        if self.transport and func and self.deferreds is not None:
            obj = (func, args)
            data = dumps(obj)

            body_length = len(data)
            self.seq = inc_seq(self.seq)
            self.deferreds[self.seq]= _d
 
            _header = struct.pack(self.HEADER_FORMAT_L, TCP_REQ, self.seq, body_length)
            try:
                self.transport.write(_header + data)
                log.debug("[ CALL ]:ar_id:%d, func:%s, body_length:%d, to:%s" % (self.seq, func, body_length, self.transport.getPeer()))
            except:
                self.transport.loseConnection()
                _d.errback(Exception("call failed"))
        else:
            log.warn("[ CALL ]:unknown args client:%s or func:%s or deferreds:%s." % (self.transport.getPeer(), func, self.deferreds))
            _d.errback(Exception("call failed"))

        self.resetTimeout()

        return _d
Beispiel #3
0
def delete(self, primary_key=None, where=None):
    '''
    @where : when primary_key=None, where format like: {'cid':1} 
    '''
    yield self.load(need_value=False)

    if not self._multirow:
        log.warn('[ %s ]Deleted. %s.' % (self.__class__, self.dict_attribs.value))
        self.dict_attribs.delete()
    else:
        if primary_key:
            _attr = self.dict_attribs.pop( primary_key, None )
            if _attr:
                log.debug('[ %s.delete ]deleted, id:%s, data:%s' % ( self.__class__, primary_key, _attr.value ))
                _attr.delete()
                _attr.syncdb()
        elif where:
            _found = False
            for _key, _attr in self.dict_attribs.items():
                eq = 0

                for where_k, where_v in where.iteritems():
                    _v = getattr(_attr, where_k, None)
                    if _v == where_v: eq += 1

                if eq >= len(where):
                    log.debug('[ %s.delete ]deleted, id:%s, where:%s, data:%s' % ( self.__class__, _key, where, _attr.value ))
                    del self.dict_attribs[_key]
                    _attr.delete()
                    _attr.syncdb()
Beispiel #4
0
def migrateActionServices(oldPath, _newPath, accum):
    log.info("updating service level for %s" % oldPath)
    
    rcdir = os.path.basename(os.path.dirname(oldPath))
    rcMatch = re.match(r'rc(\d)\.d', rcdir)
    if not rcMatch:
        log.warn("unknown level -- %s" % oldPath)
        return False

    level = rcMatch.group(1)
    
    serviceMatch = re.match(r'([SK])\d\d(.+)', os.path.basename(oldPath))
    if not serviceMatch:
        log.warn("unknown service -- %s" % oldPath)
        return False

    if serviceMatch.group(1) == "S":
        onoff = "on"
    else:
        onoff = "off"

    name = serviceMatch.group(2)

    initPath = os.path.join(HOST_ROOT, "etc/init.d", name)

    isChkconfigService = False
    try:
        initFile = open(initPath)
        for line in initFile:
            if 'chkconfig' in line:
                isChkconfigService = True
                break
    except IOError, _e:
        log.info("  service discontinued -- %s" % name)
        return False
Beispiel #5
0
    def inputFileSysMountPoint(self):
        "Set file system mount point from user input."
        mountPoint = self.userinput

        # sanity check mountPoint 
        errMsg = None
        if not mountPoint.startswith('/'):
            errMsg = mperrInvalidText
        elif mountPoint == '/boot':
            errMsg = mperrBootReservedText
        elif mountPoint in partition.INVALID_MOUNTPOINTS:
            errMsg = mperrReservedText % mountPoint

        if errMsg:
            log.warn(errMsg)
            self.errorPushPop(self.title, errMsg + TransMenu.Back)
            return

        # In edit mode, mountPoint can overwrite itself.
        for request in self.parent.requests:
            if request == self.currentRequest:
                continue
            if request.mountPoint == mountPoint:
                errMsg = mperrExistsText
                log.warn(errMsg)
                self.errorPushPop(self.title, errMsg + TransMenu.Back)
                return

        self.newPartInfo['mountPoint'] = mountPoint
        self.setSubstepEnv({'next': self.enterFileSysSize})
    def reset_climbing(self):
        '''
        @summary: 重置天外天数据。当前塔层归1, 挑战次数置满, 更新last_time, 扣重置次数
        '''
        self.system_daily_reset()
        # 正在扫荡, 不能重置
        if self.climbing.start_datetime > 0:
            log.error('In climbing could not reset.')
            defer.returnValue( IN_CLIMBING_ONGOING )
        if self.climbing.cur_layer == 1:
            log.error('Tower layer 1 could not reset.')
            defer.returnValue( IN_CLIMBING_MIN_LAYER )
        climbing_data = self.get_vip_conf_of_climbing()
        if (climbing_data[1] + self.climbing.buyed_reset < 1):
            log.warn('User no reset count. cid: {0}, had_free_reset: {1}, buyed_reset: {2}.'.format( self.cid, self.climbing.free_reset, self.climbing.buyed_reset ))
            res_err = yield self.buy_count(1)
            if isinstance(res_err, int):
                defer.returnValue( res_err )
            climbing_data = self.get_vip_conf_of_climbing()

        self.climbing.cur_layer = 1
        if self.climbing.buyed_reset > 0:
            self.climbing.buyed_reset -= 1
        elif climbing_data[1] > 0:
            self.climbing.free_reset += 1
            climbing_data[1] -= 1

        self.climbing.free_fight    = 0
        self.climbing.last_datetime = datetime.now()
 
        defer.returnValue( (self.climbing.cur_layer, self.climbing.max_layer, climbing_data[3]+self.climbing.buyed_fight, \
                climbing_data[1]+self.climbing.buyed_reset, climbing_data[2], self.climbing.start_datetime, self.user.credits) )
Beispiel #7
0
def gen_pshelper(info, tgt):
    """Generate text files with host lists for the PowerShell scripts.

    Will create (or use) a directory structure in the given 'tgt' directory. By
    default it will create one file for each host, using the *first* host alias
    as file name, containing the host's DNS hostname. In addition, files will
    be created for *each* host group, containing all the DNS hostnames of those
    hosts belonging to the group.
    """
    if tgt is None:
        tgt = '/tmp/pshelper'
        log.warn("No output directory specified, trying fallback '%s'.", tgt)
    if not os.path.exists(tgt):
        try:
            os.makedirs(tgt)
        except Exception as err:
            sys.exit("ERROR creating output directory: %s" % err)
    for hostname, details in info.hosts.iteritems():
        # the host's primary alias:
        alias = details['aliases'][0]
        log.info("Host: %s (%s)", alias, hostname)
        list_to_file(tgt, alias, [hostname])
    for groupname in info.groups.keys():
        log.info("Group: %s", groupname)
        list_to_file(tgt, groupname, info.groups[groupname])
Beispiel #8
0
    async def call(self, func, args=None):
        _f = asyncio.Future()

        if self.transport and func:
            obj = (func, args)
            data = dumps(obj)

            body_length = len(data)
            self.seq = inc_seq(self.seq)
            self.__futures[self.seq]= _f

            _header = struct.pack(self.HEADER_FORMAT_L, TCP_REQ, self.seq, body_length)
            try:
                self.transport.write(_header + data)
                asyncio.get_event_loop().call_later(self.TIMEOUT, self.__callTimeout, obj)
                log.debug("[ CALL ]:ar_id:{0}, func:{1}, body_length:{2}, to:{3}".format(self.seq, func, body_length, self.__peer))
            except Exception as e:
                self.transport.abort()
                _f.set_exception(e)
        else:
            log.warn("[ CALL ]:unknown args client:{0} or func:{1}.".format(self.__peer, func))
            _f.set_exception(Exception("call failed"))

        self.resetTimeout()

        return (await _f)
Beispiel #9
0
def fellowsoul_combine(p, req):
    res_err = UNKNOWN_ERROR

    cid, [user_fellow_id] = req

    user = g_UserMgr.getUser(cid)
    if not user:
        log.error('Can not find user. cid: {0}.'.format( cid ))
        defer.returnValue( res_err )

    res_err, fellow_id = yield user.bag_fellowsoul_mgr.combine( user_fellow_id )
    if not res_err:
        try:
            # args: fellow_id, is_major, camp_id, on_troop
            res_err, attrib = yield user.fellow_mgr.create_table_data( fellow_id, 0, 0, 0 )
        except Exception as e:
            log.warn('Create fellow fail! e:', e)
            defer.returnValue(res_err)
        #errorno, value = yield user.fellow_mgr.addNewFellow( fellow_id )
        if not res_err:
            _conf = get_fellow_by_fid(fellow_id)
            if _conf:
                q = _conf.get('Quality', 0)
                if q>= 2:
                    user.achievement_mgr.update_achievement_status(29, 1)
                if q>= 3:
                    user.achievement_mgr.update_achievement_status(30, 1)
                if q>= 4:
                    user.achievement_mgr.update_achievement_status(31, 1)
            defer.returnValue( [attrib.attrib_id, attrib.fellow_id] )

    defer.returnValue( res_err )
Beispiel #10
0
    def onekey_receive_reward(self, user):
        res = CONSTELLATION_REWARD_RECEIVED

        self.synced = False

        items_return = []
        for idx, reward in enumerate(self.rewards):
            if reward[3]:
                continue

            if self.score >= Constellation.stars_need( idx ):
                _add_func = ITEM_MODELs.get(reward[0], None)

                if _add_func:
                    res_err, res_value = yield _add_func( user, ItemID = reward[1], ItemNum = reward[2], AddType=WAY_CONSTELLATION_AWARD, CapacityFlag=False )
                    if not res_err:
                        for _v in res_value:
                            items_return = total_new_items(_v, items_return)
                    reward = list(reward)
                    reward[3] = 1
                    self.rewards[idx] = reward
                    res = NO_ERROR
                else:
                    log.error('[ Constellation.receve_reward ]no such item type: {0}.', reward[0])
            else:
                log.warn('user too few score. cid:{0}, score:{1}, idx:{2}, stars need:{3}.'.format( user.cid, self.score, idx, Constellation.stars_need(idx)) )
                break

        defer.returnValue( (res, self.rewards, items_return) )
Beispiel #11
0
    def timeoutConnection(self):
        transport = getattr(self, 'transport')

        if transport is not None and isinstance(transport, asyncio.transports.Transport):
            transport.abort()
        else:
            log.warn('unknown transport:{0}'.format(transport))
Beispiel #12
0
def gm_add_credits(cmd, ts, args, sign):
    if len(args) != 2:
        log.error('add credits error. args:{0}.'.format( args ))
        defer.returnValue(GM_INVALID_ARGS)

    account = args[0]
    add_credits = int( args[1] )

    try:
        res_login = yield server_login_user( account )
    except Exception as e:
        log.exception()
        defer.returnValue(GM_EXECUTE_FAIL)
    log.debug('Res of server_login_user:'******'Exp39287692 login user fail! account {0}, err_login {1}'.format( account, err_login ))
        defer.returnValue(GM_LOGIN_USER_FAIL)

    res = GM_EXECUTE_FAIL
    try:
        res = yield gs_call('gs_gm_add_credits', [ cid, add_credits ])
    except Exception as e:
        log.debug('Exp39380828 e:', e)
        log.exception()

    server_logout_user(cid)

    defer.returnValue( res )
Beispiel #13
0
def load_all_config(limit=FOR_ALL):
    conn   = MySQLdb.connect(**db_config())
    SELECT = 'SELECT {0} FROM tb_{1}'
    result = {}
    
    for _limit, table, fields, custom_sql, custom_handler in TABLES:
        if _limit not in (FOR_ALL, limit):
            continue

        cursor = conn.cursor()
        try:
            data = {}

            _sql = custom_sql if custom_sql else SELECT.format(','.join(fields), table)

            cursor.execute(_sql)
            _dataset = cursor.fetchall()

            if custom_handler:
                data = custom_handler(table, fields, _dataset)
            else:
                for row in _dataset:
                    if row:
                        #data[row[0]] = row
                        data[row[0]] = dict(zip(fields, row))

            result[table] = data
        except Exception, e: 
            log.warn('error sql: %s' % _sql) 
            traceback.print_exc()
            continue

        cursor.close()
def main_noninteractive():
    """The main routine for running non-interactively."""
    global imcftpl
    args = parse_arguments()
    set_loglevel(args.verbose)
    log.info('Running in non-interactive mode.')
    log.debug('Python FluoView package file: %s' % fv.__file__)
    base = dirname(args.mosaiclog)
    fname = basename(args.mosaiclog)
    mosaics = fv.FluoViewMosaic(join(base, fname))
    log.warn(gen_mosaic_details(mosaics))
    if args.templates is not None:
        imcftpl = args.templates
    code = imagej.gen_stitching_macro_code(mosaics, 'templates/stitching',
                                           path=base, tplpath=imcftpl)
    if not args.dryrun:
        log.info('Writing stitching macro.')
        imagej.write_stitching_macro(code, fname='stitch_all.ijm', dname=base)
        log.info('Writing tile configuration files.')
        imagej.write_all_tile_configs(mosaics, fixsep=True)
        log.info('Launching stitching macro.')
        IJ.runMacro(flatten(code))
    else:
        log.info('Dry-run was selected. Printing generated macro:')
        log.warn(flatten(code))
Beispiel #15
0
    def probability_of_robot(cid, shard_id, limit_rate):
        '''
        @summary: 获取玩家抢机器人时的概率
        @return : True-命中碎片 False-未命中
        '''
        _base_rate = 7500 # 机器人的基础概率

        _conf = get_treasureshard_rate_conf( shard_id )
        if _conf:
            _miss_rate = yield redis.hget( HASH_TREASURESHARD_ROBOT_RATE % shard_id, cid )
            if _miss_rate is None: # 新号第一次夺宝
                _miss_rate = _conf['MaxRate']
            else:
                _miss_rate = int(_miss_rate)

            _base_rate = _conf['Rate'] + _miss_rate
            if _base_rate >= _conf['MaxRate']:
                _base_rate = _conf['MaxRate']

            _miss_rate += _conf['AddRate']
        else:
            log.warn( 'No such conf in sysconfig:treasureshard_rate, shard_id:', shard_id )
            defer.returnValue( False )

        if limit_rate <= _base_rate: # 命中
            _miss_rate = 0
            yield redis.hset( HASH_TREASURESHARD_ROBOT_RATE % shard_id, cid, _miss_rate )
            defer.returnValue( True )
        else:
            yield redis.hset( HASH_TREASURESHARD_ROBOT_RATE % shard_id, cid, _miss_rate )
            defer.returnValue( False )
Beispiel #16
0
 def verify():
     try:
         d1, d2, _id = brandiso.extract_iso_checksums(media.partPath)
         return d1 == d2
     except brandiso.BrandISOException, inst:
         log.warn(inst)
         return None
Beispiel #17
0
def write_tile_config(mosaic_ds, outdir='', fixsep=False):
    """Generate and write the tile configuration file.

    Call the function to generate the corresponding tile configuration and
    store the result in a file. The naming scheme is "mosaic_xyz.txt" where
    "xyz" is the zero-padded index number of this particular mosaic.

    Parameters
    ----------
    mosaic_ds : volpy.dataset.MosaicData
        The mosaic dataset to write the tile config for.
    outdir : str
        The output directory, if empty the input directory is used.
    fixsep : bool
        Passed on to gen_tile_config().
    """
    log.info('write_tile_config(%i)' % mosaic_ds.supplement['index'])
    config = gen_tile_config(mosaic_ds, fixsep)
    # TODO: add some padding mechanism to the experiment/dataset classes
    # fname = 'mosaic_%0*i.txt' % (len(str(len(mosaic_ds))))
    fname = 'mosaic_%s.txt' % mosaic_ds.supplement['index']
    if(outdir == ''):
        fname = join(mosaic_ds.storage['path'], fname)
    else:
        fname = join(outdir, fname)
    out = open(fname, 'w')
    out.writelines(config)
    out.close()
    log.warn('Wrote tile config to %s' % out.name)
Beispiel #18
0
def daemon():
    # find local IP
    s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
    s.connect(('8.8.8.8', 0))
    local_ip_address = s.getsockname()[0]
    s.close()

    # listen and reply
    s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
    # s.settimeout(debug_timeout)
    
    try:
        s.bind(('0.0.0.0', listen_port))
        
    except Exception:
        log.warn('Could not bind 0.0.0.0:{}'.format(listen_port))
    else:
        log.debug('UDP bound')
        while True:    
            try:
                data, addr = s.recvfrom(4096)  # blocks
                print('{ip}:{port}  {data}'.format(data=data.decode('ascii'), ip=addr[0], port=addr[1]))
                reply = str(web_server_port).encode('ascii')        
                s.sendto(reply, (addr[0], reply_port))
                log.debug('Sent a reply to ' + str(addr))
            
            except socket.timeout:
                print('Listening duration elapsed.')
                break

    s.close()
Beispiel #19
0
def scheduler_listener(event):
    if event.code == EVENT_JOB_ERROR:
        print('The job crashed :(')
        log.warn("The schedule job crashed because of %s" % repr(event.exception))
    else:
        print('The job executed :)')
        log.debug("The schedule job %s executed and return value is '%s'" % (event.job_id, event.retval))
Beispiel #20
0
    def syncdb(self):
        if self.__dirty:
            _dirty_fields = self.__dirty_fields[:]

            if len(_dirty_fields) == 0 and False == self.__del:
                log.info('no dirty_fields! table name:{0}, attrib_id:{1}.'.format( self.table, self.__attrib_id ))
                raise defer.returnValue(None)

            _sql = ''

            try:
                if self.__del:
                    yield db.execute('DELETE FROM {0} WHERE id={1};'.format(self.table, self.__attrib_id))
                else:
                    _sql, _v = self.__gen_update_value(_dirty_fields)
                    if _v:
                        yield POOL.execute(_sql, _v)
                    else:
                        log.warn('Update error. table: {0}, cid: {1}, sql: {2}, dirty: {3}.'.format(\
                            self.table, self.__attrib_id, _sql, self.__dirty_fields))
            except:
                log.exception('[ SQLERROR ]table:{0}, id:{1}, dirty:{2}, new:{3}, dirty_fields:{4}, sql:{5}'.format(
                    self.table, self.__attrib_id, self.__dirty, self.__new, self.__dirty_fields, _sql))
            else:
                self.clean()
Beispiel #21
0
def simple_route(path):
    if Template_Routes.has_key(path):
        return render_template(Template_Routes[path],
                               meta_content=oauth_meta_content())
    else:
        log.warn("page '%s' not found" % path)
        abort(404)
Beispiel #22
0
def login(p, req):
    log.warn("=======req: ", req)
    machine_code, = req
    if not machine_code:
        defer.returnValue((MACHINE_CODE_ERROR, {}))

    uid = yield redis.hget(HASH_MACHINE_CODE_REGISTERED, machine_code)
    p.uid = uid
    info = dict()
    # 创建新玩家
    if not uid:
        #TODO random nickname
        nickname = machine_code
        character_mgr = Character(0, machine_code, nickname)
        yield character_mgr.new(machine_code, nickname)

        uid = character_mgr.uid
        yield redis.hset(HASH_NICKNAME_REGISTERED, nickname, uid)
        yield redis.hset(HASH_MACHINE_CODE_REGISTERED, machine_code, uid)
        info = character_mgr.info()
    else:
        # 检查已登录时, 释放旧的连接 提示有重复登陆
        user = g_UserMgr.getUserLogined(uid, p)
        if not user:
            nickname = yield redis.hget(HASH_NICKNAME_REGISTERED, uid)
            character_mgr = Character(uid, machine_code, nickname)
            yield character_mgr.load()
            user = g_UserMgr.loginUser(p, uid, machine_code, nickname, character_mgr)
        if user:
            info = user.character_mgr.info()

    info['constants'] = constant_data()
    info['server_time'] = int(time()*1000)
    defer.returnValue((0, info))
Beispiel #23
0
def grant_limit_fellow_award(all_ranks, activity_id, timestamp):
    '''
    @param: timestamp-借用时间戳作为callLater的有效性验证
    '''
    log.warn('limit fellow award. activity_id: {0}, timestamp: {1}, ACTIVITY_AWARD_TIME: {2}.'.format( activity_id, timestamp, ACTIVITY_AWARD_TIME ))
    # 判断是否是正确的callLater
    if ACTIVITY_AWARD_TIME != timestamp:
        defer.returnValue( None )

    _max_rank = max(all_ranks)
    if _max_rank > 0:
        _rank = 1
        _name_scores = yield redis.zrevrange( SET_LIMIT_FELLOW_NAME_SCORE, 0, _max_rank, withscores=True )
        for _nick_name, _score in _name_scores:
            if _score <= 0:
                continue
            _cid = yield redis.hget(DICT_NICKNAME_REGISTERED, str(_nick_name))
            if _cid:
                yield g_AwardCenterMgr.new_award( _cid, AWARD_TYPE_LIMIT_FELLOW_RANK, [int(time()), activity_id, _rank] )
            _rank += 1

    if 0 in all_ranks:
        _names = yield redis.zrangebyscore( SET_LIMIT_FELLOW_NAME_SCORE, 60, '+inf' )
        for _name in _names:
            _cid = yield redis.hget(DICT_NICKNAME_REGISTERED, str(_name))
            if _cid:
                yield g_AwardCenterMgr.new_award( _cid, AWARD_TYPE_LIMIT_FELLOW_SCORE, [int(time()), activity_id, 0] )

    yield redis.delete( HASH_LIMIT_FELLOW_SHRINE, SET_LIMIT_FELLOW_NAME_SCORE )
Beispiel #24
0
async def sync_gs_dirty_attributes(loops=True):
    _len = g_DirtyGSAtrributeQueue.qsize()
    if _len > 0:
        if loops:
            _times = min(_len, MAX_SYNC_CS_CNT_PER_LOOP) 
        else:
            _times = _len

        i = 0
        dirty_attributes = []
        while i < _times:
            i += 1
            try:
                attr  = g_DirtyGSAtrributeQueue.get_nowait()
                _data = attr.syncDirtyAttrToCS()
                if _data:
                    dirty_attributes.append( _data )
            except Queue.Empty:
                break
            except Exception as e:
                log.error('Exp3029882 e:', e)

        if dirty_attributes:
            await cs_call('cs_sync_user_data', dirty_attributes)
        log.info('End sync, total attributes: {0}, left dirty attributes length: {1}.'.format( _times, g_DirtyGSAtrributeQueue.qsize()))

    if loops:
        loop.call_later(SYNC_CS_INTERVAL, make_task)
    else:
        log.warn('End sync, dirty attributes length {0}, loop:{1}.'.format(g_DirtyGSAtrributeQueue.qsize(), loops))
Beispiel #25
0
def get_slides_source_from_gist(gist):
    """ Get remark style slides's markdown source

    First, remarks will search for file ``slides.md`` and return its content
    
    If file ``slides.md`` does not exists, remarks will scan files like slide1.md, slide2.md, slide3.md ...
    If slide files found, remark will join each of their content with ``---`` and return.
    (``---`` is the slide separater for `remark <https://github.com/gnab/remark>`_ )
   
    If no slides.md or slideN.md found, None will be return instead.
    """
    
    log.info('Guessing slides.md source')
    slides_file = gist.get('files', {}).get('slides.md', None)
    if slides_file:
        log.info('slides.md found.')
        return slides_file.get('content', '')
    
    log.info('Guessing slideN.md sources')
    slide_files = [v for k, v in gist.get('files', {}).items() if re.match(r'slide\d+\.md', k)]
    slide_files = sorted(slide_files, key=lambda slide_file: int(re.sub(r'\D', '', slide_file.get('filename', ''))))
    if slide_files:
        log.info('%d slideN.md files found', len(slide_files))
        return '\n---\n'.join([slide_file.get('content', '') for slide_file in slide_files])

    log.warn('No slides found')
    return None 
Beispiel #26
0
def outPdHandler(meta, config):
    message, service_alias, incident_key = meta[1:]
    log.info("Event Match: %s" % message)

    service_key = config['pagerduty'][service_alias]
    url = "https://events.pagerduty.com/generic/2010-04-15/create_event.json"
    alert = {
      "event_type": "trigger",
      "service_key": service_key,
      "description": "occam_alert",
      "incident_key": "",
      "details": {}
    }

    # Append whole message as PD alert details.
    alert['details'] = json.dumps(message)

    # Create incident_key if provided.
    if incident_key: alert['incident_key'] = alert['description'] = incident_key

    # Ship.
    resp = requests.post(url, data=json.dumps(alert))
    if resp.status_code != 200:
        log.warn("Error sending to PagerDuty: %s" % resp.content.decode('utf-8'))
    else:
        log.info("Message sent to PagerDuty: %s" % resp.content.decode('utf-8'))
Beispiel #27
0
    def do_indexing(self):
        # read dictionary index file
        log.info("parse stardict index file %s" % self.idxloc)
        try:
            idx = open(self.idxloc, 'rb')
        except IOError:
            log.warn("failed to open %s" % self.idxloc)
            return False

        idxdata = idx.read()
        idx.close()

        start = 0
        for i in range(self.wordcount):
            pos = idxdata.find('\0', start, -1)
            fmt = "%ds" % (pos-start)
            wordstr = struct.unpack_from(fmt, idxdata, start)[0]
            start += struct.calcsize(fmt) + 1

            (off, size) = struct.unpack_from(">LL",idxdata, start)
            start += struct.calcsize(">LL")
            item = worditem(wordstr, off, size)
            self.wordset.add(wordstr)
            self.wordlist.append(item)

        log.info("identify dict %s, word count %d" % (self.name, self.wordcount))
        return True
Beispiel #28
0
    def sync( self ):
        #_info = self.__cid, self.__name, self.__lead_id, self.__level, self.__might, self.__rank, self.__t

        if self.alliance:
            yield redis.rpush( TPL_LIST_ALLIANCE_REQUEST % self.alliance.aid, dumps( self.info ) )
        else:
            log.warn( 'This Request<%s> have no alliance'.format( self.info ) )
Beispiel #29
0
    def get_randcard_frompool(self, shrine_level, rand_count):
        ''' 获取抽卡池 '''
        pool_level = 0
        if rand_count == 0:
            pool_level = shrine_level + 1

        final_level = 0
        all_shrine_levels = get_limit_fellow_pool_levels()
        for _level in all_shrine_levels:
            if _level <= pool_level and final_level < _level:
                final_level = _level

        total_rate = 0
        pool = get_limit_fellow_pool_conf(final_level)
        for _conf in pool.itervalues():
            total_rate += _conf['Rate']

        if total_rate <= 0:
            log.warn('No randcard. final_level: {0}, total_rate: {1}.'.format( final_level, total_rate ))
            return {}

        curr_int = 0
        randint  = rand_num(total_rate)
        for _conf in pool.itervalues():
            if randint < (curr_int + _conf['Rate']):
                #log.error('For Test. randint: {0}, curr_int: {1}, rate: {2}, total_rate: {3}.'.format( randint, curr_int, _conf['Rate'], total_rate ))
                return _conf
            else:
                curr_int += _conf['Rate']
        else:
            log.warn('Not rand a card. final_level: {0}, randint: {1}, total_rate: {2}.'.format( final_level, randint, total_rate ))
            return {}
Beispiel #30
0
def discoverLocation(subDict):
    '''Discover where grub is installed, either on the MBR or the first sector
    of the /boot partition.
    '''
    wholeDiskDev, _partNum = partition.splitPath(subDict['devPartName'])

    diskFile = open(wholeDiskDev)
    mbrContents = diskFile.read(512)
    diskFile.close()

    partFile = open(subDict['devPartName'])
    partBootContents = partFile.read(512)
    partFile.close()

    loc = None
    doNotInstall = False
    if _sectorHasGrub(partBootContents):
        log.debug("found grub installed on the /boot partition")
        loc = userchoices.BOOT_LOC_PARTITION
    elif _sectorHasGrub(mbrContents):
        log.debug("found grub installed on the MBR")
        loc = userchoices.BOOT_LOC_MBR
    else:
        log.warn("grub was not found, not upgrading boot loader")
        doNotInstall = True

    userchoices.setBoot(True, doNotInstall, location=loc)
Beispiel #31
0
    def getUserLogined(self, cid, p, session_key, info={}):
        user = self.user_dic.get(cid, None)
        if user:
            if hasattr(user, 'p') and user.p and user.p.transport:
                try:
                    user.p.send('invalid_connect', None)
                    user.p.lose_connect = False
                    user.p.transport.loseConnection()
                except Exception, e:
                    log.error('exception. cid: {0}, e: {1}.'.format(cid, e))

            user.p = p
            p.cid = user.cid
            user.session_key = session_key
            user.temp_lost = False
            user.logout_timestamp = 0
            if info:
                user.info = info
            log.warn('Replace old client. cid: {0}.'.format(p.cid))
Beispiel #32
0
    def load(self):
        try:
            _l_stream = yield redis.lrange(LIST_TIME_LIMITED_GROUP, 0, -1)
            if _l_stream:
                for i in xrange(len(_l_stream)):
                    _group = loads(_l_stream[i])
                    if not isinstance(_group, (list, tuple)) or len(_group) != 3:
                        log.warn('unknown stream:', _group)
                        continue

                    _group_id, _begin_t, _end_t = loads(_l_stream[i])
                    _group = ItemGroup(_group_id, _begin_t, _end_t)

                    insort_right(self.groups_sorted, _group)

                yield self.load_by_group()
        except Exception, e:
            log.warn(e)
            reactor.callLater(1, self.load)
Beispiel #33
0
def runtimeActionMediaCheck(uiDelegate=None):
    global MEDIA_CHECKED
    
    log.info("checking the MD5 of the installation media")

    if not uiDelegate:
        uiDelegate = MOUNT_MEDIA_DELEGATE

    media = userchoices.getMediaDescriptor() or DEFAULT_MEDIA

    def verify():
        try:
            d1, d2, _id = brandiso.extract_iso_checksums(media.partPath)
            return d1 == d2
        except brandiso.BrandISOException, inst:
            log.warn(inst)
            return None
        except IOError, e:
            log.warn(e)
            return None
Beispiel #34
0
    def random_next_level(self, curr_level):
        ''' 鉴玉等级会有一定几率提示、不变、回到初始等级 '''
        #log.info('For Test. before level: {0}.'.format( curr_level ))
        level_conf = get_jade_level_conf(curr_level)
        if not level_conf:
            return curr_level, []

        total_rate = 0
        for _v in level_conf.itervalues():
            total_rate += _v['Rate']

        if total_rate <= 0:
            log.warn(
                'There is no level pool. cid: {0}, curr_level: {1}.'.format(
                    self.cid, curr_level))
            return curr_level, extra_item

        curr_int = 0
        randint = rand_num(total_rate)

        extra_item = []
        for _v in level_conf.itervalues():
            if randint < (curr_int + _v['Rate']):
                curr_level = _v['TargetLevel']
                #log.error('For Test. curr_level: {0}, randint: {1}, curr_int: {2}, _rate: {3}, total_rate: {4}.'.format( curr_level, randint, curr_int, _v['Rate'], total_rate ))
                break
            else:
                curr_int += _v['Rate']
        else:
            curr_level = curr_level

        rand_int = random.randint(0, 10000)
        if rand_int <= level_conf[curr_level]['ExtraOdds']:
            extra_item = [
                level_conf[curr_level]['ItemType'],
                level_conf[curr_level]['ItemID'],
                level_conf[curr_level]['ItemNum']
            ]

        #log.info('For Test. after level: {0}, extra_item: {1}, total_rate: {2}.'.format( curr_level, extra_item, total_rate ))
        return curr_level, extra_item
Beispiel #35
0
def proxy(p, request):
    res = [UNKNOWN_ERROR, None]

    log.info('request: {0}, p: {1}.'.format(request, p))
    if not (hasattr(p, 'cid') and p.cid):
        res[0] = LOGIN_UNKNOWN_CID
        log.warn('[ proxy ]unknown cid.', request, 'FROM:',
                 p.transport.getPeer())
        returnValue(res)
    else:
        _func, _request = request
        if _request:
            _request = p.cid, _request
        else:
            _request = (p.cid, )

        _prefix = _func.split('_')[0]

        if _prefix == PREFIX_FORWARD_TO_CHAT:
            _call = ms_call
        elif _prefix == PREFIX_FORWARD_TO_MAIL:
            _call = ms_call
        elif _prefix == PREFIX_FORWARD_TO_ALLI:
            _call = alli_call
        else:
            _call = gs_call

        try:
            game_res = yield _call(_func, _request)
        except Exception, e:
            log.error('e: {0}, request: {1}.'.format(e, request))
            traceback.print_exc()
            returnValue(res)

        if isinstance(game_res, int):
            res[0] = game_res
            returnValue(res)
        else:
            res[0] = NO_ERROR
            res[1] = game_res
            returnValue(res)
Beispiel #36
0
 def del_member(self, member, action_member=None):
     ''' 在公会成员退出时不用实时更新公会战力 '''
     if member.cid in self.__members:
         self.__members.remove( member.cid )
         self.dirty()
     else:
         log.warn('delete member not in alliance member lists. cid: {0}, alliance_id: {1}.'.format( member.cid,  self.__id))
     if member.cid in self.__vice_leaders:
         self.__vice_leaders.remove( member.cid )
     if action_member:
         yield member.clean_alliance()
         ms_send('write_mail', (member.cid, MAIL_PAGE_SYSTEM, MAIL_SYSTEM_6, [self.__name]))
         # 公会动态
         self.new_action( (ALLIANCE_ACTION_3, int(time()), action_member.cid, \
                 action_member.lead_id, action_member.nick_name, action_member.level, [member.lead_id, member.nick_name]) )
     else:
         yield member.leave_alliance()
         ms_send('write_mail', (member.cid, MAIL_PAGE_SYSTEM, MAIL_SYSTEM_5, [self.__name]))
         # 公会动态
         self.new_action( (ALLIANCE_ACTION_2, int(time()), member.cid, member.lead_id, member.nick_name, member.level, []) )
     defer.returnValue( NO_ERROR )
Beispiel #37
0
 def runCallback(self, reason, amount, total, callbackArgs, param):
     '''This method is called by the RPM installation backend.  It is
     mostly called as a means to report progress.  It also needs to
     download incomplete files and needs to open/close files.
     '''
     reasonDispatch = {
         rpm.RPMCALLBACK_INST_OPEN_FILE: self.cbInstallOpenFile,
         rpm.RPMCALLBACK_INST_CLOSE_FILE: self.cbInstallCloseFile,
         rpm.RPMCALLBACK_UNPACK_ERROR: self.cbFileError,
         rpm.RPMCALLBACK_CPIO_ERROR: self.cbFileError,
         rpm.RPMCALLBACK_UNKNOWN: self.cbUnknown,
         rpm.RPMCALLBACK_INST_PROGRESS: self.cbNoop,
         rpm.RPMCALLBACK_INST_START: self.cbNoop,
         rpm.RPMCALLBACK_TRANS_PROGRESS: self.cbNoop,
         rpm.RPMCALLBACK_TRANS_START: self.cbNoop,
         rpm.RPMCALLBACK_TRANS_STOP: self.cbNoop,
     }
     try:
         return reasonDispatch[reason](callbackArgs)
     except KeyError:
         log.warn('No handler known for the RPM event %s' % str(reason))
Beispiel #38
0
def outHcHandler(meta, config):
    message, hc_meta, notify = meta[1:]
    log.info("Event Match: %s" % message)

    hc = config['hipchat'][hc_meta].split("_")
    url = "https://api.hipchat.com/v2/room/" + hc[0] + "/notification"

    notification = {
        "message": "<b>Occam Alert</b><br>" + json.dumps(message),
        "message_format": "html",
        "notify": notify
    }
    # Ship.
    resp = requests.post(url,
                         data=json.dumps(notification),
                         params={'auth_token': hc[1]},
                         headers={'content-type': 'application/json'})
    if resp.status_code != (200 | 204):
        log.warn("Error sending to HipChat: %s" % resp.content.decode('utf-8'))
    else:
        log.info("Message sent to HipChat")
Beispiel #39
0
    def __init__(self, contentString):
        '''Construct an config file object from the given contents.

        The contents are split into individual lines and then split into
        individual fields based on the SEPARATOR regex.  If the number of fields
        matches the length of the ELEMENT_TYPE.STRUCT list, a new element of
        ELEMENT_TYPE will be constructed with the fields.
        '''

        self.elements = []

        for line in map(string.strip, contentString.split('\n')):
            if not line:
                continue

            rawFields = re.split(self.SEPARATOR, line)
            fields = self._addDefaultFields(rawFields)
            if len(fields) == len(self.ELEMENT_TYPE.STRUCT):
                self.elements.append(self.ELEMENT_TYPE(fields))
            else:
                log.warn("skipping config line -- %s" % line)
Beispiel #40
0
def __broadcast(user_remain, func, args):
    if user_remain:
        i = 0
        while i < MAX_CLIENTS_BORADCAST_PER_LOOP:
            i += 1
            _user = user_remain.pop(0)
            if _user:
                if hasattr(_user, 'p'):
                    if hasattr(_user.p, 'transport'):
                        if _user.p.transport:
                            _user.p.send(func, args)
                        else:
                            log.warn(
                                '__broadcast. cid:{0}, unknown t:{1}.'.format(
                                    _user.cid, _user.p.transport))
                            g_UserMgr.del_zombie_user(_user.cid)
                    else:
                        log.warn(
                            '__broadcast. cid:{0}, the p has no transport attribute..'
                            .format(_user.cid))
                        g_UserMgr.del_zombie_user(_user.cid)
                else:
                    log.warn(
                        '__broadcast. cid:{0}, the user has no p attribute..'.
                        format(_user.cid))
                    g_UserMgr.del_zombie_user(_user.cid)
            else:
                log.info('__broadcast. Unknown user.')

            if not user_remain:
                break
        else:
            reactor.callLater(1, __broadcast, user_remain, func, args)
Beispiel #41
0
    def del_zombie_user(self, cid):
        ''' 清除僵尸玩家,无效client连接
        '''
        _user = self.user_dic.get(cid, None)
        if _user:
            if hasattr(_user, 'p'):
                if hasattr(_user.p, 'transport'):
                    if _user.p.transport:
                        defer.returnValue(NO_ERROR)
                    else:
                        log.error(
                            'Unknown user. cid:{0}, transport:{1}.'.format(
                                cid, _user.p.transport))
                else:
                    log.warn(
                        'Unknown user. cid:{0}, the p has no transport attribute..'
                        .format(cid))
            else:
                log.warn('__broadcast. cid:{0}, the user has no p attribute..'.
                         format(_user.cid))

            self.delUserByCid(cid)
            gs_call('gs_logout', cid)
            ms_call('ms_logout', [cid])
            yield alli_call('alli_logout', [cid])
            log.warn('Del zombie user success. cid: {0}.'.format(cid))
        defer.returnValue(NO_ERROR)
def sync_external_dependencies(source_file, target_file):
    # Read artifact version from source_file.
    dependency_dict = {}
    with open(source_file) as file:
        for line in file:
            line = line.strip()
            if line.startswith('#') or not line:
                file.write(line)
            else:
                key_value = line.split(';', 1)
                key = key_value[0]
                value = key_value[1]
                dependency_dict[key] = value
    # Write artifact versions into target file.
    with in_place.InPlace(target_file) as file:
        for line in file:
            line = line.strip()
            if line.startswith('#') or not line:
                file.write(line)
            else:
                key_value = line.split(';', 1)
                key = key_value[0]
                value = key_value[1]
                if key not in SKIP_IDS and key in dependency_dict:
                    value_in_dict = dependency_dict[key]
                    if version_bigger_than(value, value_in_dict):
                        log.warn(
                            'Version update skipped. key = {}, value = {}, new_value = {}'
                            .format(key, value, value_in_dict))
                        file.write(line)
                    elif version_bigger_than(value_in_dict, value):
                        log.info(
                            'Version updated. key = {}, value = {}, new_value = {}'
                            .format(key, value, value_in_dict))
                        file.write('{};{}'.format(key, value_in_dict))
                    else:
                        file.write(line)
                else:
                    file.write(line)
            file.write('\n')
    def lock_exchange_material(self, exchange_id, lock_type):
        res = EXCHANGE_NOT_FOUND

        if not self.__list:
            yield self.exchange_list()

        for _exchange in self.__list:
            if _exchange[0] == int(exchange_id):
                _conf = get_exchange_limited_conf(exchange_id)
                if not _conf:
                    defer.returnValue(res)

                res = [self.user.credits]

                if lock_type == LOCK_TYPE_MATERIAL:
                    _credits_need = _conf['MaterialLockedCost']
                elif lock_type == LOCK_TYPE_TARGET:
                    _credits_need = _conf['TargetLockedCost']
                else:
                    _exchange[7] = LOCK_TYPE_NONE
                    log.warn('unknown lock type:{0}. _exchange:{1}.'.format(
                        lock_type, _exchange))
                    defer.returnValue(res)

                res_consume = yield self.user.consume_credits(
                    _credits_need, WAY_LIMIT_EXCHANGE_LOCK)
                if res_consume:
                    res = res_consume
                    defer.returnValue(res)

                _exchange[7] = lock_type
                log.debug('lock type:{0}. _exchange:{1}, res:{2}.'.format(
                    lock_type, _exchange, res))

                self.sync()

                res = [self.user.credits]
                break

        defer.returnValue(res)
Beispiel #44
0
def gs_logout(p, req):
    cid  = req
    user = g_UserMgr.getUser(cid)
    if not user:
        log.warn('User had logout. cid: {0}.'.format( cid ))
        defer.returnValue( UNKNOWN_ERROR )

    # 有离线登陆的玩家
    if user.offline_num > 0:
        log.warn('User had offline login, could not logout. cid: {0}.'.format( cid ))
        # 更新标志位为离线登陆标志
        user.offline_flag = True
        defer.returnValue( UNKNOWN_ERROR )

    if worldBoss.running:
        worldBoss.remove_attacker(cid)

    try:
        res = yield cs_call('cs_character_logout', cid)
    except:
        log.warn('Some exp raise in gs_logout. cid: {0}.'.format( cid ))
        defer.returnValue( UNKNOWN_ERROR )
    else:
        g_UserMgr.logoutUser(cid)
        log.debug('user logout sucess. cid: {0}.'.format( cid ))
        defer.returnValue( res )
Beispiel #45
0
def init_voice_verify():
    """ initial voice verify service
    Example for config.py:
    "voice_verify": {
        "enabled": True,
        "provider": "rong_lian",
        "rong_lian": {
            ... ...
        }
    }
    """
    provider_name = safe_get_config("voice_verify.provider", None)
    enabled = safe_get_config("voice_verify.enabled", False)
    if not enabled:
        log.warn("voice verify disabled")
        factory.provide("voice_verify", DisabledVoiceVerify)
    elif provider_name and safe_get_config("voice_verify." + provider_name,
                                           None):
        log.warn("Voice verify initialized to:" + provider_name)
        # if provider other than Ronglian is supported, update following lines
        factory.provide("voice_verify", RonglianVoiceVerify)
    else:
        log.warn(
            "either voice verify provider name or provider config is missing, Please check your configuration"
        )
        raise ConfigurationException("voice_verify.provider")
Beispiel #46
0
    def load_by_group(self):
        for idx, _group in enumerate(self.groups_sorted):
            if not _group.is_finish:
                self.current_group_idx = idx
                break

        _l_stream = yield redis.lrange(LIST_TIME_LIMITED_SHOP, 0, -1)
        if _l_stream:
            for i in xrange(len(_l_stream)):
                _item = loads(_l_stream[i])
                if not isinstance(_item, (tuple, list)) or len(_item) != 9:
                    log.warn('item:', _item)
                    continue

                _group_id, _shop_id, _item_type, _item_id, _item_num, _description, _orig_credits, _credits, _limit = _item

                for _group in self.groups_sorted:
                    if _group.group_id == _group_id:
                        _group.append_item(_shop_id, _item_type, _item_id, _item_num, _description, _orig_credits, _credits, _limit)
                        break
                else:
                    log.warn('no group:{0} config for this item.'.format(_group_id))
Beispiel #47
0
    def onekey_receive_reward(self, user):
        res = CONSTELLATION_REWARD_RECEIVED

        self.synced = False

        items_return = []
        for idx, reward in enumerate(self.rewards):
            if reward[3]:
                continue

            if self.score >= Constellation.stars_need(idx):
                _add_func = ITEM_MODELs.get(reward[0], None)

                if _add_func:
                    res_err, res_value = yield _add_func(
                        user,
                        ItemID=reward[1],
                        ItemNum=reward[2],
                        AddType=WAY_CONSTELLATION_AWARD,
                        CapacityFlag=False)
                    if not res_err:
                        for _v in res_value:
                            items_return = total_new_items(_v, items_return)
                    reward = list(reward)
                    reward[3] = 1
                    self.rewards[idx] = reward
                    res = NO_ERROR
                else:
                    log.error(
                        '[ Constellation.receve_reward ]no such item type: {0}.',
                        reward[0])
            else:
                log.warn(
                    'user too few score. cid:{0}, score:{1}, idx:{2}, stars need:{3}.'
                    .format(user.cid, self.score, idx,
                            Constellation.stars_need(idx)))
                break

        defer.returnValue((res, self.rewards, items_return))
Beispiel #48
0
    def rand_reward(turn):
        _all, _weight = sysconfig['constellation_reward_rand']

        _rewards = _all.get(turn, None)
        _t_weight = _weight.get(turn, None)

        if _rewards and _t_weight:
            _tmp = 0
            _rand = rand_num(_t_weight)

            for reward in _rewards:
                _tmp += reward[3]
                if _rand <= _tmp:
                    return list(reward[:3])
            else:
                log.warn(
                    "[ Constellation.rand_reward ]missed, current tmp:{0}, rand:{1}, turn:{2}."
                    .format(_tmp, _rand, turn))
        else:
            log.warn(
                "[ Constellation.rand_reward ]no turn {0} in sysconfig, rewards:{1}, weight:{2}."
                .format(turn, _rewards, _t_weight))
 def new_f(*args, **kwargs):
     
     # allow configuration of retry
     if constants.DISABLE_API_RETRIES:
         retry_count = max_retries
     else:
         retry_count=0
         
     while retry_count <= max_retries:
         
         try:
             
             # try to run the function as normal
             result = f(*args, **kwargs)
             
             # exit the loop to return the result
             break
             
         except:
             
             # we've reached max_retries allowed, re-raise
             if retry_count == max_retries:
                 raise
             
             else:
                 
                 # increment retry count
                 retry_count += 1
                 
                 # log our retry attempt
                 log.warn("Repeatable function %s failed, waiting %i seconds before retrying up to %s more times" % (f.func_name, 1.5**retry_count, max_retries-retry_count))
                 
                 # sleep for a bit, in the event there is a network hiccup and continue to back off the api as we continue to fail
                 time.sleep(1.5**retry_count)
                 
                 # continue to the next iteration
                 continue
             
     return result
Beispiel #50
0
def grant_limit_fellow_award(all_ranks, activity_id, timestamp):
    '''
    @param: timestamp-借用时间戳作为callLater的有效性验证
    '''
    log.warn(
        'limit fellow award. activity_id: {0}, timestamp: {1}, ACTIVITY_AWARD_TIME: {2}.'
        .format(activity_id, timestamp, ACTIVITY_AWARD_TIME))
    # 判断是否是正确的callLater
    if ACTIVITY_AWARD_TIME != timestamp:
        defer.returnValue(None)

    _max_rank = max(all_ranks)
    if _max_rank > 0:
        _rank = 1
        _name_scores = yield redis.zrevrange(SET_LIMIT_FELLOW_NAME_SCORE,
                                             0,
                                             _max_rank,
                                             withscores=True)
        for _nick_name, _score in _name_scores:
            if _score <= 0:
                continue
            _cid = yield redis.hget(DICT_NICKNAME_REGISTERED, str(_nick_name))
            if _cid:
                yield g_AwardCenterMgr.new_award(
                    _cid, AWARD_TYPE_LIMIT_FELLOW_RANK,
                    [int(time()), activity_id, _rank])
            _rank += 1

    if 0 in all_ranks:
        _names = yield redis.zrangebyscore(SET_LIMIT_FELLOW_NAME_SCORE, 60,
                                           '+inf')
        for _name in _names:
            _cid = yield redis.hget(DICT_NICKNAME_REGISTERED, str(_name))
            if _cid:
                yield g_AwardCenterMgr.new_award(
                    _cid, AWARD_TYPE_LIMIT_FELLOW_SCORE,
                    [int(time()), activity_id, 0])

    yield redis.delete(HASH_LIMIT_FELLOW_SHRINE, SET_LIMIT_FELLOW_NAME_SCORE)
Beispiel #51
0
    def findFirstPartitionMatching(self,
                                   fsTypes=None,
                                   minimumSize=0,
                                   uuid=None):
        '''Find the first partition on this disk that matches the given set of
        constraints.

        fsTypes - If not-none, a sequence of filesystem names that are
          acceptable for the partition.
        minimumSize - The minimum size of the partition in megabytes.
        uuid - The partition UUID to search for.
        '''

        for currentPart in self.partitions:
            if currentPart.getSizeInMegabytes() < max(
                    0, minimumSize - partition.PARTITION_FUDGE_SIZE):
                continue

            if fsTypes:
                if not currentPart.fsType or \
                        currentPart.fsType.name not in fsTypes:
                    continue

            if uuid:
                if not currentPart.fsType:
                    continue

                try:
                    partUuid = currentPart.fsType.getUuid(
                        currentPart.consoleDevicePath)
                except Exception, e:
                    log.warn("could not get UUID for %s" %
                             currentPart.consoleDevicePath)
                    continue
                else:
                    if partUuid != uuid:
                        continue

            return currentPart
    def find_matrix_roi_groups(self):
        """Locate potential 'MatrixROI' trees within the XML.

        A tiled dataset is defined as a 'matl:group' of type
        'matl:DefineMatrixROI' in the omp2info file.
        """
        matrix_groups = list()

        log.debug('Looking for Matrix ROI groups (tiling datasets).')
        root = self.tree.getroot()
        groups = root.findall('matl:group', self.xmlns)
        for grp in groups:
            grp_type = grp.attrib[self.xsi + 'type']
            if grp_type == 'matl:DefineMatrixROI':
                log.debug('Group %s is a Matrix ROI.', grp.attrib['objectId'])
                matrix_groups.append(grp)
            if grp_type == 'matl:MosaicROI':
                log.debug('Group %s is a Mosaic ROI.', grp.attrib['objectId'])
                matrix_groups.append(grp)

        log.warn("Found %i Matrix ROIs (tiling datasets).", len(matrix_groups))
        return matrix_groups
Beispiel #53
0
def cmpOffsetString(a, b):
    '''Compare two timezone offset strings.  ie "UTC+10:30"
    >>> cmpOffsetString('UTC+00:00', 'UTC+00:00')
    0
    >>> cmpOffsetString('UTC+01:00', 'UTC+00:00')
    1
    >>> cmpOffsetString('UTC-01:00', 'UTC+00:00')
    -1
    >>> cmpOffsetString('FOO+01:00', 'UTC+00:00')
    Timezone Offset malformed: invalid literal for float(): FOO-01.00
    0
    '''
    a = a.replace('UTC','')
    a = a.replace(':','.')
    b = b.replace('UTC','')
    b = b.replace(':','.')
    try:
        a = float(a)
        b = float(b)
    except ValueError, ex:
        log.warn('Timezone Offset malformed: '+ str(ex))
        return 0
Beispiel #54
0
    def login(self, cid, name, level, vip_level, lead_id, might, rank):
        _m = None

        if cid in self.__dict:
            _m = self.__dict[cid]
        else:
            _stream = yield redis.hget(HASH_ALLIANCE_MEMBER, cid)

            if _stream:
                _m = Member.load(_stream)
                if _m:
                    yield _m.clean_alliance()
                else:
                    log.warn('Unknown error')
                    returnValue(None)
            else:
                _m = Member(cid, name, lead_id, level, vip_level, might, rank,
                            0, 0, 0, 0)

        yield self.update(_m, name, level, vip_level, might, rank, lead_id)

        returnValue(_m)
Beispiel #55
0
    def attack(self, user, damage):
        _now = time()

        if self._attack_time:
            if (_now - self._attack_time) < ATTACK_CD:
                log.warn('[ attack ]CD time. last attack time:{0}, now:{1}.',
                         self._attack_time, _now)
                defer.returnValue((WORLDBOSS_CD, None))

        _current_life = worldBoss.life
        if _current_life <= 0:
            defer.returnValue((WORLDBOSS_DEAD_ALREADY, None))

        self._attack_count += 1
        '''
        转到客户端算
        _attack_extra = self._attack_extra_level * 5
        damage = int(int(damage) * (100 + _attack_extra) / 100)
        '''
        self._damage_total += damage

        _farm = 10
        user.base_att.prestige += _farm

        worldBoss.on_attack(self._cid, user.nick_name, damage)

        self._attack_time = _now

        self.sync()

        yield redis.zadd(RANK_WORLDBOSS_DAMAGE, self._cid, -self._damage_total)
        yield redis.hset(DICT_WORLDBOSS_RANKER_DATA, self._cid,
                         dumps((user.nick_name, user.level, user.alliance_id)))

        #log.debug('attack come here...', self, self.__dict__)
        _current_rank = yield self.rank
        defer.returnValue((NO_ERROR, (self._damage_total, _farm,
                                      worldBoss.life, _current_rank + 1)))
Beispiel #56
0
def _usbOption(match):
    '''Handle the "ks=usb" and "ks=usb:<path>" option.'''

    try:
        ksFile = match.group(1)
    except IndexError:
        ksFile = "ks.cfg"

    firstTime = True
    while True:
        if not firstTime:
            # XXX Maybe we should just stop retrying after awhile?
            log.info("Insert a USB storage device that contains '%s' "
                     "file to perform a scripted install..." % ksFile)
            util.rawInputCountdown("\rrescanning in %2d second(s), "
                                   "press <enter> to rescan immediately", 10)
        firstTime = False

        diskSet = devices.DiskSet(forceReprobe=True)

        usbDisks = [disk for disk in diskSet.values()
                    if disk.driverName == devices.DiskDev.DRIVER_USB_STORAGE]

        if not usbDisks:
            log.info("") # XXX just for spacing
            log.warn("No USB storage found.")
            continue

        kickstartPath = os.path.join(USB_MOUNT_PATH, ksFile.lstrip('/'))

        if not os.path.exists(USB_MOUNT_PATH):
            os.makedirs(USB_MOUNT_PATH)

        for disk in usbDisks:
            for part in disk.partitions:
                if part.partitionId == -1:
                    continue

                if (part.getFsTypeName() not in ("ext2", "ext3", "vfat")):
                    # Don't try mounting partitions with filesystems that aren't
                    # likely to be on a usb key.
                    continue

                if util.mount(part.consoleDevicePath,
                              USB_MOUNT_PATH,
                              fsTypeName=part.getFsTypeName()):
                    log.warn("Unable to mount '%s'" % part.consoleDevicePath)
                    continue

                if os.path.exists(kickstartPath):
                    userchoices.addDriveUse(disk.name, 'kickstart')
                    return [('-s', kickstartPath)]

                if util.umount(USB_MOUNT_PATH):
                    failWithLog("Unable to umount '%s'" % USB_MOUNT_PATH)

        log.info("")
        log.warn("%s was not found on any attached USB storage." % ksFile)
def replace_version(module, pom):
    """
    Replace version in dependency and plugin part.
    :param module: module name
    :param pom: pom file path
    """
    log.debug('Replacing version in file: {}'.format(pom))
    pom_dict = config[module][pom]
    if VERSION_UPDATE_ITEMS not in pom_dict:
        log.warn(
            'No config key {} in pom parameters.'.format(VERSION_UPDATE_ITEMS))
        return

    version_update_items = pom_dict[VERSION_UPDATE_ITEMS]
    log.debug('Module: {}, versions: {}'.format(module,
                                                get_str(version_update_items)))
    with in_place.InPlace(pom) as file:
        line_num = 0
        for line in file:
            line_num = line_num + 1
            for version_update_item in version_update_items:
                if version_update_item.id in line:
                    # update version in dependency part
                    if X_VERSION_UPDATE in line:
                        old_version = line[(line.index('<version>') +
                                            9):line.index('</version>')]
                        if old_version != version_update_item.new_version:
                            new_line = line.replace(
                                old_version, version_update_item.new_version)
                            log.debug(
                                'Updating version of dependency in line {}'.
                                format(line_num))
                            log.debug('    old_line = {}.'.format(
                                line.strip('\n')))
                            log.debug('    new_line = {}.'.format(
                                new_line.strip('\n')))
                            line = new_line
                        else:
                            log.warn(
                                'The same with new version in dependency part.'
                            )
                    # update version in plugin part
                    elif X_INCLUDE_UPDATE in line:
                        old_version = line[(line.index('[') +
                                            1):line.index(']')]
                        if old_version != version_update_item.new_version:
                            new_line = line.replace(
                                old_version, version_update_item.new_version)
                            log.debug('Updating line {}'.format(line_num))
                            log.debug('    old_line = {}.'.format(
                                line.strip('\n')))
                            log.debug('    new_line = {}.'.format(
                                new_line.strip('\n')))
                            line = new_line
                        else:
                            log.warn(
                                'The same with new version in plugin part.')
            file.write(line)
Beispiel #58
0
    def process(self, ar_id, body):
        self.resetTimeout()
        state = 1, 'unknown'
        result = state[1]

        try:
            request_data = None
            if len(body) == 1:
                _handler_name, = body
                _request       = None
            else:
                _handler_name, _request = body
                try:
                    request_data = str(_request)[0:100]
                except:
                    pass

            _handler, need_proxy = get_handler( _handler_name )

            log.debug("[ PROCESS ]:ar_id:%d, handler:%s, request:%s, H:%s, from:%s." % (ar_id, _handler_name,
                request_data, _handler, self.transport.getPeer()))

            if _handler:
                state = 0, ''
                if need_proxy:
                    _request = _handler_name, _request

                defer.maybeDeferred(_handler, self, _request).addBoth(self.ack, ar_id, state)
                return
            else:
                state = 1, ('No such handler: %s' % _handler_name) 
                log.warn('[ ALL handlers: ]', state, HANDLERS.keys())
                result = state[1]
        except Exception, msg:
            log.error('error body: {0}.'.format( body ))
            traceback.print_exc()
            state = 1, str(msg)[:128]
            result = state[1]
Beispiel #59
0
def hostActionSetupVmdk(_context):
    virtualDevs = userchoices.getVirtualDevices()
    assert len(virtualDevs) == 1

    virtualDev = virtualDevs[0]['device']

    vmfsVolPath = ""

    dsSet = datastore.DatastoreSet()
    for ds in dsSet:
        if virtualDev.vmfsVolume in (ds.name, ds.uuid):
            vmfsVolPath = ds.consolePath

    assert vmfsVolPath, "no console path for %s" % virtualDev.vmfsVolume

    path = os.path.normpath(
        os.path.join(vmfsVolPath, virtualDev.imagePath, virtualDev.imageName))

    sysInfo = vmkctl.SystemInfoImpl()
    try:
        sysInfo.SetServiceConsoleVmdk(path)
    except vmkctl.HostCtlException, msg:
        log.warn("Couldn't set vmdk path.  The system may not boot correctly.")
Beispiel #60
0
def new(self, need_load=True, **dict_data):#Return { fn: value }
    if need_load:
        yield self.load(need_value=False)

    _attr = Attribute(self._table)
    _attrib_id = yield _attr.new(**dict_data)
    '''
    try:
        _attrib_id = yield _attr.new(**dict_data)
    except Exception, e:
        raise AttribManagerException("[ %s ]new failed. table:%s, data: %s, error:%s." % ( self.__class__, self._table, dict_data, e))
    '''

    if self._multirow:
        if not isinstance(self.dict_attribs, dict):
            log.warn('[ %s.new ]property dict_attribs is not dict. %s' % ( self.__class__, self.dict_attribs ))
            self.dict_attribs = {}
        self.dict_attribs[_attrib_id] = _attr
    else:
        self.dict_attribs = _attr

    #returnValue( _attr.value )
    returnValue( _attr.new_value() )