Ejemplo n.º 1
0
Archivo: base.py Proyecto: brianr/uuss
 def create(cls, orig_cls, user_id, game):
     user_state = orig_cls()
     user_state.user_id = user_id
     user_state.state = chunking.blow_chunks(make_empty_user_state(user_id, game))
     user_state.date_created = int(time.time())
     user_state.date_modified = int(time.time())
     return user_state
Ejemplo n.º 2
0
Archivo: proto.py Proyecto: brianr/uuss
 def _build_response(self, state, chunked, game, user_id):
     if not chunked:
         # get the state in a chunked format for sending along the wire
         # there will be only a master chunk with no chunk config specified
         state = chunking.blow_chunks(state)
     resp = self.Response()
     resp.game = game
     resp.user_id = user_id
     if state is None:
         resp.state = ""
     else:
         resp.state = state
     return resp
Ejemplo n.º 3
0
    def save(self, user_id, state):
        """
        Save the state for the specified user (but don't release the lock).
        user_id: string
        state: dict (not a json string)
        """
        log.debug("uuss.UserState.save %r", user_id)
        user_id = str(user_id)
        # Debugging for FB20021
        if user_id == 'null':
            raise Exception('"null" user_id in userstate.save')
        if not isinstance(state, dict):
            raise Exception('state not a dict for user_id %s' % user_id)

        check_user_id(user_id, state, game=self.game)

        req = proto.SaveRequest()
        req.game = self.game
        req.user_id = user_id
        req.state = chunking.blow_chunks(state, self.chunk_config)
        self._protocol_send_message(req)
        resp = self._recv_expected_message(proto.SaveResponse, self.game, user_id)
def do_processing():
    load_name = 'user_state-modified-%s-%s' % (bucket, time.time())

    mod_ids = open(filename, 'r').readlines()
    mod_ids = [id.replace('\n', '').strip() for id in mod_ids]
    log.info("%s ids found to process" % len(mod_ids))

    mod_count = 0
    batch_count = 1
    while mod_ids:
        proc_ids = mod_ids[:max_batch_size]
        mod_ids = mod_ids[max_batch_size:]
        base_file_name = "%s.%s" % (load_name, batch_count)
        log.info("Writing %s", base_file_name)
        with open(os.path.join(tmp_dir, base_file_name+".usrtmp"), 'w') as user_file:
            with open(os.path.join(tmp_dir, base_file_name+".plrtmp"), 'w') as player_file:
                mod_count += len(proc_ids)
                for row in proc_ids:
                    try:
                        if filetype == "player":
                            id = row.split(',')[0][2:-1]
                        else:
                            id = row.split("\t")[0][1:-1]
                        state = userstate.backup(id)
                        if isinstance(state, str):
                            state = simplejson.loads(zlib.decompress(state))
                        raw_state = chunking.blow_chunks(state)
                        user_line = gen_userstate_line(id, raw_state)
                        player_line = gen_player_line(id, state)
                        user_file.write(user_line+'\n')
                        if player_line:
                            player_file.write(player_line+'\n')
                    except userstate_multi.UserstateException, e:
                        log.exception(e)
                    except Exception, e:
                        # (jay) errors are bad here, but we don't want to keep the 
                        # rest of the userstates from being saved, so log it and go on
                        log.exception(e)
Ejemplo n.º 5
0
def do_processing(onerun=False):
    for collection_name in mongo_db.collection_names():
        proc_time = 0
        if collection_name.startswith('user_state-modified-%s' % bucket):
            try:
                load_name = 'user_state-modified-%s-%s' % (bucket, time.time())
                try:
                    mongo_db[collection_name].rename(load_name)
                except pymongo.errors.OperationFailure:
                    # effectively this makes sure the renamed collection exists
                    if mongo_db[load_name].count() <= 0:
                        raise
                    log.error("Error encountered renaming collection %s to %s, but able to continue" %
                              (collection_name, load_name))
                
                modified = mongo_db[load_name]
                
                start_time = time.time()
                log.info("%s userstates to save for %s ..." % (modified.count(), load_name))

                # prepare for userstate size monitoring
                userstates_processed = 0
                max_userstate_size = -1
                total_userstate_size = 0
                
                mod_count = 0
                mod_rows = list(modified.find())
                batch_count = 1
                while mod_rows:
                    proc_rows = mod_rows[:max_batch_size]
                    mod_rows = mod_rows[max_batch_size:]
                    base_file_name = "%s.%s" % (load_name, batch_count)
                    with open(os.path.join(tmp_dir, base_file_name+".usrtmp"), 'w') as user_file:
                        with open(os.path.join(tmp_dir, base_file_name+".plrtmp"), 'w') as player_file:
                            for row in proc_rows:
                                mod_count += row['count']
                                try:
                                    if row['_id'] == 'null':
                                        log.error("null user_id encountered")
                                    else:
                                        if rebalancing:
                                            (state, chunked) = userstate.get(row['_id'], raw=True)
                                        else:
                                            (state, chunked) = userstate.backup(row['_id'], raw=True)
                                        if state is not None:
                                            if not chunked:
                                                # NOTE(jpatrin): Shouldn't happen, but just in case...
                                                if isinstance(state, str):
                                                    state = simplejson.loads(zlib.decompress(state))
                                                # NOTE(jpatrin): This will be just a single chunk for now,
                                                # but will serve until the state gets saved by the game
                                                raw_state = chunking.blow_chunks(state)
                                            else:
                                                raw_state = state
                                                state = chunking.reconstitute_chunks(raw_state, True)
                                            #state_zlib_json = zlib.compress(simplejson.dumps(state))
                                            #user_line = gen_userstate_line(row['_id'], state_zlib_json)
                                            user_line = gen_userstate_line(row['_id'], raw_state)
                                            player_line = gen_player_line(row['_id'], state)
                                            user_file.write(user_line+'\n')
                                            if player_line:
                                                player_file.write(player_line+'\n')
                                            
                                            # keep userstate size for average and max size tracking
                                            userstates_processed += 1
                                            userstate_size = len(raw_state) / 1024
                                            total_userstate_size += userstate_size
                                            max_userstate_size = max(userstate_size, max_userstate_size)
                                except userstate_multi.UserstateException, e:
                                    log.exception(e)
                                except Exception, e:
                                    # (jay) errors are bad here, but we don't want to keep the 
                                    # rest of the userstates from being saved, so log it and go on
                                    log.exception(e)
                    # don't want the file reader to get to these before we're done, so keep
                    # as temporary name until finished writing
                    os.rename(os.path.join(tmp_dir, base_file_name+".usrtmp"),
                              os.path.join(tmp_dir, base_file_name+".user"))
                    os.rename(os.path.join(tmp_dir, base_file_name+".plrtmp"),
                              os.path.join(tmp_dir, base_file_name+".player"))
                    log.info("processed batch, %s remaining" % len(mod_rows))
                    batch_count += 1
Ejemplo n.º 6
0
         log.debug("other_state: %s...", repr(other_state)[:200])
         
         log.info('Release another userstate lock')
         req = proto.ReleaseLock()
         req.game = 'dane'
         req.user_id = u2
         protocol.send_message(req)
         #RECEIVE LockReleased
         resp = protocol.recv_message()
         
         log.info('Save a userstate')
         state.setdefault('uuss_test', {'i': 0})['i'] += 1
         req = proto.SaveRequest()
         req.game = 'dane'
         req.user_id = u1
         req.state = chunking.blow_chunks(state)
         protocol.send_message(req)
         resp = protocol.recv_message()
         
         log.info('Release a userstate lock')
         req = proto.ReleaseLock()
         req.game = 'dane'
         req.user_id = u1
         protocol.send_message(req)
         #RECEIVE LockReleased
         resp = protocol.recv_message()
     
         sock.shutdown(socket.SHUT_RDWR)
 except Exception, e:
     if isinstance(e, KeyboardInterrupt):
         raise