예제 #1
0
 def addCoin(self):
     if not self.lobbier:
         self.coins += 1
     self.sendBin(0x21, Buffer().writeInt8(0))
예제 #2
0
def buffer_input(prompt='scm> '):
    """Return a Buffer instance containing interactive input."""
    return Buffer(tokenize_lines(InputReader(prompt)))
예제 #3
0
 def sendBin(self, code, buff):
     self.server.out_messages += 1
     msg = Buffer().writeInt8(code).write(
         buff.toBytes() if isinstance(buff, Buffer) else buff).toBytes()
     #print("sendBin: "+str(code)+" "+str(msg))
     self.sendMessage(msg, True)
예제 #4
0
    def handlePkt(self, code, b, pktData):
        if code == 0x10:  # CREATE_PLAYER_OBJECT
            level, zone, pos = b.readInt8(), b.readInt8(), b.readShor2()
            self.level = level
            self.zone = zone

            wasDead = self.dead
            self.dead = False
            if wasDead:
                self.match.broadPlayerList()

            self.client.stopDCTimer()

            self.match.broadBin(
                0x10,
                Buffer().writeInt16(self.id).write(pktData).writeInt16(
                    self.skin))

        elif code == 0x11:  # KILL_PLAYER_OBJECT
            if self.dead:
                return

            self.dead = True
            self.client.startDCTimer(60)

            self.match.broadBin(0x11, Buffer().writeInt16(self.id))

        elif code == 0x12:  # UPDATE_PLAYER_OBJECT
            if self.dead:
                return

            level, zone, pos, sprite, reverse = b.readInt8(), b.readInt8(
            ), b.readVec2(), b.readInt8(), b.readBool()
            self.level = level
            self.zone = zone
            self.posX = pos[0]
            self.posY = pos[1]

            if sprite > 5 and self.match.world == "lobby" and zone == 0:
                self.client.block(0x1)
                return

            self.match.broadBin(0x12,
                                Buffer().writeInt16(self.id).write(pktData))

        elif code == 0x13:  # PLAYER_OBJECT_EVENT
            if self.dead:
                return

            type = b.readInt8()

            if self.match.world == "lobby":
                self.client.block(0x2)
                return

            self.match.broadBin(0x13,
                                Buffer().writeInt16(self.id).write(pktData))

        elif code == 0x17:
            killer = b.readInt16()
            if self.id == killer:
                return

            killer = self.match.getPlayer(killer)
            if killer is None:
                return

            killer.sendBin(0x17, Buffer().writeInt16(self.id).write(pktData))

        elif code == 0x18:  # PLAYER_RESULT_REQUEST
            if self.dead or self.win:
                return

            self.win = True
            self.client.startDCTimer(120)

            self.match.broadBin(
                0x18,
                Buffer().writeInt16(self.id).writeInt8(
                    self.match.getWinners()).writeInt8(0))

        elif code == 0x19:
            self.trustCount += 1
            if self.trustCount > 8:
                self.client.block(0x3)

        elif code == 0x20:  # OBJECT_EVENT_TRIGGER
            if self.dead:
                return

            level, zone, oid, type = b.readInt8(), b.readInt8(), b.readInt32(
            ), b.readInt8()

            if self.match.world == "lobby" and oid == 458761:
                self.match.goldFlowerTaken = True

            self.match.broadBin(0x20,
                                Buffer().writeInt16(self.id).write(pktData))

        elif code == 0x30:  # TILE_EVENT_TRIGGER
            if self.dead:
                return

            level, zone, pos, type = b.readInt8(), b.readInt8(), b.readShor2(
            ), b.readInt8()

            self.match.broadBin(0x30,
                                Buffer().writeInt16(self.id).write(pktData))
예제 #5
0
파일: player.py 프로젝트: xho22/PyRoyale
 def serializePlayerObject(self):
     return Buffer().writeInt16(self.id).writeInt8(self.level).writeInt8(
         self.zone).writeShor2(self.posX, self.posY).toString()
예제 #6
0
    def __init__(self, config, rng):
        self.config = config
        self.rng = rng

        self.model_dir = config.model_dir
        self.gpu_memory_fraction = config.gpu_memory_fraction

        self.log_step = config.log_step
        self.max_step_d_g = config.max_step_d_g
        self.max_step_d_g_l = config.max_step_d_g_l

        if not config.is_train:
            config.input_width = config.input_PS_test
            config.input_height = config.input_PS_test

        self.load_path = config.load_path
        self.seg_path = config.seg_path
        self.out_path = config.out_path
        self.K_d = config.K_d
        self.K_g = config.K_g
        self.K_l = config.K_l
        self.initial_K_d = config.initial_K_d
        self.initial_K_g = config.initial_K_g
        self.initial_K_l = config.initial_K_l
        self.after_K_l = config.after_K_l
        self.checkpoint_secs = config.checkpoint_secs

        self.method_description = config.method_description
        self.postprocess_nproc = config.postprocess_nproc
        self.postprocess_seg_thres = config.postprocess_seg_thres
        self.postprocess_det_thres = config.postprocess_det_thres
        self.postprocess_win_size = config.postprocess_win_size
        self.postprocess_min_nucleus_size = config.postprocess_min_nucleus_size
        self.postprocess_max_nucleus_size = config.postprocess_max_nucleus_size
        self.do_gpu_process = config.do_gpu_process
        self.do_cpu_postprocess = config.do_cpu_postprocess

        if not self.do_gpu_process:
            return

        import tensorflow as tf

        DataLoader = {
            'nuclei': nuclei_data.DataLoader,
        }[config.data_set]
        self.data_loader = DataLoader(config, rng=self.rng)

        self.model = Model(config, self.data_loader)
        if config.is_train:
            self.history_buffer = Buffer(config, self.rng)

        self.summary_ops = {
            'test_synthetic_images': {
                'summary':
                tf.summary.image("test_synthetic_images",
                                 self.model.x,
                                 max_outputs=config.max_image_summary),
                'output':
                self.model.x,
            },
            'test_refined_images': {
                'summary':
                tf.summary.image("test_refined_images",
                                 self.model.denormalized_R_x,
                                 max_outputs=config.max_image_summary),
                'output':
                self.model.denormalized_R_x,
            },
            'test_refer_images': {
                'summary':
                tf.summary.image("test_refer_images",
                                 self.model.ref_image,
                                 max_outputs=config.max_image_summary),
                'output':
                self.model.ref_image,
            },
            'test_learner_outputs': {
                'summary':
                tf.summary.image("test_learner_outputs",
                                 self.model.L_R_x[..., 1:] * 255,
                                 max_outputs=config.max_image_summary),
                'output':
                self.model.L_R_x[..., 1:] * 255,
            },
        }

        self.saver = tf.train.Saver()
        self.summary_writer = tf.summary.FileWriter(self.model_dir)

        sv = tf.train.Supervisor(logdir=self.model_dir,
                                 is_chief=True,
                                 saver=self.saver,
                                 summary_op=None,
                                 summary_writer=self.summary_writer,
                                 save_summaries_secs=300,
                                 save_model_secs=self.checkpoint_secs,
                                 global_step=self.model.learner_step)

        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=self.gpu_memory_fraction,
            allow_growth=True)  # seems to be not working
        sess_config = tf.ConfigProto(allow_soft_placement=True,
                                     gpu_options=gpu_options)

        self.sess = sv.prepare_or_wait_for_session(config=sess_config)
예제 #7
0
    assert False
model.compile(optimizer=optimizer, loss='mse')

# another set of layers for target model
x, u, m, v, q, p, a = createLayers()

# V() function uses target model weights
fV = K.function([K.learning_phase(), x], v)
V = lambda x: fV([0, x])

# target model is initialized from main model
target_model = Model(input=[x, u], output=q)
target_model.set_weights(model.get_weights())

# replay memory
R = Buffer(args.replay_size, env.observation_space.shape,
           env.action_space.shape)

# the main learning loop
total_reward = 0
for i_episode in xrange(args.episodes):
    observation = env.reset()
    #print "initial state:", observation
    episode_reward = 0
    for t in xrange(args.max_timesteps):
        if args.display:
            env.render()

        # predict the mean action from current observation
        x = np.array([observation])
        u = mu(x)[0]
예제 #8
0
 def sendBin(self, code, buff):
     msg = Buffer().writeInt8(code).write(
         buff.toBytes() if isinstance(buff, Buffer) else buff).toBytes()
     self.sendMessage(msg, True)
예제 #9
0
    def onBinaryMessage(self):
        pktLenDict = {
            0x10: 6,
            0x11: 0,
            0x12: 12,
            0x13: 1,
            0x17: 2,
            0x18: 4,
            0x19: 0,
            0x20: 7,
            0x30: 7
        }

        code = self.recv[0]
        if code not in pktLenDict:
            #print("Unknown binary message received: {1} = {0}".format(repr(self.recv[1:]), hex(code)))
            self.recv = bytes()
            return False

        pktLen = pktLenDict[code] + 1
        if len(self.recv) < pktLen:
            return False

        pktData = self.recv[1:pktLen]
        self.recv = self.recv[pktLen:]
        b = Buffer(pktData)

        if not self.player.loaded or self.blocked or (
                not self.player.match.closed and self.player.match.playing):
            self.recv = bytes()
            return False

        if code == 0x10:  # CREATE_PLAYER_OBJECT
            level, zone, pos = b.readInt8(), b.readInt8(), b.readShor2()
            self.player.level = level
            self.player.zone = zone

            wasDead = self.player.dead
            self.player.dead = False
            if wasDead:
                self.player.match.broadPlayerList()

            try:
                self.dcTimer.cancel()
            except:
                pass

            self.player.match.broadBin(
                0x10,
                Buffer().writeInt16(self.player.id).write(pktData))

        elif code == 0x11:  # KILL_PLAYER_OBJECT
            if self.player.dead:
                return

            self.player.dead = True
            self.dcTimer = reactor.callLater(15, self.transport.loseConnection)

            self.player.match.broadBin(0x11,
                                       Buffer().writeInt16(self.player.id))

        elif code == 0x12:  # UPDATE_PLAYER_OBJECT
            if self.player.dead:
                return

            level, zone, pos, sprite, reverse = b.readInt8(), b.readInt8(
            ), b.readVec2(), b.readInt8(), b.readBool()
            self.player.level = level
            self.player.zone = zone
            self.player.posX = pos[0]
            self.player.posY = pos[1]

            if ((  #self.player.posX < 23 or
                    self.player.posY >= 58.5) or sprite > 5
                ) and self.player.match.world == "lobby" and zone == 0:
                self.block(0x1)
                return

            self.player.match.broadBin(
                0x12,
                Buffer().writeInt16(self.player.id).write(pktData))

        elif code == 0x13:  # PLAYER_OBJECT_EVENT
            if self.player.dead:
                return

            type = b.readInt8()

            if self.player.match.world == "lobby":
                self.block(0x2)
                return

            self.player.match.broadBin(
                0x13,
                Buffer().writeInt16(self.player.id).write(pktData))

        elif code == 0x17:
            killer = b.readInt16()
            if self.player.id == killer:
                return

            killer = self.player.match.getPlayer(killer)
            if killer is None:
                return

            killer.sendBin(0x17,
                           Buffer().writeInt16(self.player.id).write(pktData))

        elif code == 0x18:  # PLAYER_RESULT_REQUEST
            if self.player.dead or self.player.win:
                return

            self.player.win = True
            self.dcTimer = reactor.callLater(120,
                                             self.transport.loseConnection)

            self.player.match.broadBin(
                0x18,
                Buffer().writeInt16(self.player.id).writeInt8(
                    self.player.match.getWinners()).writeInt8(0))

        elif code == 0x19:
            self.trustCount += 1
            if self.trustCount > 8:
                self.block(0x3)

        elif code == 0x20:  # OBJECT_EVENT_TRIGGER
            if self.player.dead:
                return

            level, zone, oid, type = b.readInt8(), b.readInt8(), b.readInt32(
            ), b.readInt8()

            self.player.match.broadBin(
                0x20,
                Buffer().writeInt16(self.player.id).write(pktData))

        elif code == 0x30:  # TILE_EVENT_TRIGGER
            if self.player.dead:
                return

            level, zone, pos, type = b.readInt8(), b.readInt8(), b.readShor2(
            ), b.readInt8()

            self.player.match.broadBin(
                0x30,
                Buffer().writeInt16(self.player.id).write(pktData))

        else:
            print("Unknown binary message received: {1} = {0}".format(
                repr(self.recv[1:]), hex(code)))
            self.recv = bytes()
            return False

        return True
예제 #10
0
파일: main.py 프로젝트: aithlab/ppo
N_epoch = 500
T = 200  #Peudulum environment has 200 max episode steps
gamma = 0.99
lambda_ = 0.95

env = NormalizedReward(gym.make('Pendulum-v0'))
torch.manual_seed(SEED)
env.seed(SEED)
np.random.seed(SEED)

state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
ppo = PPO(state_dim, action_dim)

buffer = Buffer(T)
params = [p for p in ppo.model.parameters() if p.requires_grad]
optimizer = torch.optim.Adam(params, lr=0.0003)

epoch_r = []
for epoch in range(N_epoch):
    state, T_r, done, t = env.reset(), 0, False, 1
    state = Variable(torch.tensor(state, dtype=torch.float32))
    while not done:  # Collect T timesteps of data
        env.render()

        action_dist, v_s_t = ppo(state)
        action = action_dist.rsample()  #reparameterized sample
        entropy = action_dist.entropy()
        log_prob = action_dist.log_prob(action)
예제 #11
0
def infer_on_stream(args, client, stats):
    """
    Initialize the inference network, stream video to network,
    and output stats and video.

    :param args: Command line arguments parsed by `build_argparser()`
    :param client: MQTT client
    :return: None
    """
    # Initialise the class
    infer_network = Network()
    buffer = Buffer()
    # Set Probability threshold for detections
    prob_threshold = args.prob_threshold
    ### Load the model through `infer_network` ###
    infer_network.load_model(args.model, args.device, args.cpu_extension)
    net_input_shape = infer_network.get_input_shape()
    ##net_input_shape = [1, 3, 600, 600]
    net_output_name = infer_network.get_output_name()
    net_input_name = infer_network.get_input_blob_name()
    net_input_shape = infer_network.get_input_shape()
    net_output_info = infer_network.get_output_info()
    log.info("network output name")
    log.info(net_output_name)
    log.info("network output info")
    log.info(net_output_info.shape)
    log.info("network input shape")
    log.info(net_input_name)
    log.info(net_input_shape)

    ### Handle the input stream ###
    iflag = False
    input_stream_arg = 0 if args.input == "cam" else args.input
    if input_stream_arg.endswith('.jpg') or input_stream_arg.endswith('.bmp'):
        iflag = True

    width = 0
    height = 0
    frame = None
    cap = None
    captureOpen = False
    ## Handle image or stream or CAM
    if iflag:
        frame = cv2.imread(input_stream_arg)
        log.info("single frame shape: %s", frame.shape)
        width = frame.shape[1]
        height = frame.shape[0]
    else:
        log.info("attempting VideoCapture for: %s", input_stream_arg)
        cap = cv2.VideoCapture(input_stream_arg)
        cap.open(args.input)
        captureOpen = True
        width = int(cap.get(3))
        height = int(cap.get(4))

    log.info("input image width: %s, height: %s", width, height)
    #steam input shape:
    input_width = 0
    input_height = 0
    total_person_count = 0
    duration = 0

    cur_request_id = 0
    next_request_id = 1
    render_time = 0
    parsing_time = 0
    waitingOnInference = False
    ### Loop until stream is over ###
    while (captureOpen or iflag or waitingOnInference):
        ### Read from the video capture ###
        flag = True
        key_pressed = None
        if not iflag:
            flag, frame = cap.read()
            if not cap.isOpened():
                captureOpen = False
            key_pressed = cv2.waitKey(60)
        if not flag:
            break
        ### Pre-process the image as needed ###
        input_width = net_input_shape[2]
        input_height = net_input_shape[3]
        p_frame = cv2.resize(frame, (net_input_shape[3], net_input_shape[2]))
        p_frame = p_frame.transpose((2, 0, 1))
        p_frame = p_frame.reshape(1, *p_frame.shape)

        ### Start asynchronous inference for specified request ###
        start_time = time()
        infer_network.exec_net(p_frame)
        waitingOnInference = True
        render_time = 0
        inf_time = 0

        ### Wait for the result ###
        if infer_network.wait() == 0:
            ### Get the results of the inference request ###
            result = infer_network.get_output()
            inf_time = time() - start_time
            ###restart clock to capture evaluate/draw time
            start_time = time()
            boxes = post_process(result, width, height, PERSON_CLASS)
            ##if len(boxes) > 1:
            ##log.info("initial boxes: %s", boxes)
            boxes = list(boxes.values())
            boxes = nms(boxes)
            buffer_avg = 0

            if (iflag):
                boxes = filter_confidence(boxes, args.prob_threshold)

            if len(boxes) > 0:
                ##we have a person in frame (maybe)
                first_prop = boxes[0]
                confidence = first_prop[4]
                buffer.add(confidence)
                buffer_avg = buffer.average()
                if confidence > args.prob_threshold:
                    if duration > 0:
                        ##this is not the first time they have been in the frame
                        ##increase duration and move along
                        duration = duration + 1
                    else:
                        ##very first time this person has entered the frame
                        ##pulse out new count
                        total_person_count = total_person_count + 1
                        duration = duration + 1
                    client.publish(
                        "person",
                        json.dumps({
                            "count": 1,
                            "total": total_person_count
                        }))
                    draw_box(frame, boxes, inf_time)
                else:
                    ##we have a person in frame, but they don't meet confidence threshold
                    if duration > 0:
                        ##we know we were tracking someone last frame
                        ##so check our rolling buffer average
                        if buffer_avg > BUFFER_AVERAGE_CUTOFF:
                            ##same person, keep counting, move along
                            duration = duration + 1
                            client.publish(
                                "person",
                                json.dumps({
                                    "count": 1,
                                    "total": total_person_count
                                }))
                            draw_box(frame, boxes, inf_time)
                        else:
                            ##log.info("NO-DRAW: c:%s, b:%s, d:%s : else:if:else", confidence, buffer_avg, duration)
                            ##no longer meet confidence or buffer avg
                            client.publish(
                                "person",
                                json.dumps({
                                    "count": 0,
                                    "total": total_person_count
                                }))
                            client.publish("person/duration",
                                           json.dumps({"duration": duration}))
                            duration = 0
                            buffer.flush()
                    else:
                        ##log.info("NO-DRAW: c:%s, b:%s, d:%s : else:else", confidence, buffer_avg, duration)
                        ##also nobody in the last frame (duration == 0)
                        client.publish(
                            "person",
                            json.dumps({
                                "count": 0,
                                "total": total_person_count
                            }))
            else:
                ##no boxes with our target class was found, make sure we didn't see one in the last frame (or so)
                buffer.add(0)
                buffer_avg = buffer.average()
                if buffer_avg > BUFFER_AVERAGE_CUTOFF:
                    ##we has someone previously, keep counting, move along
                    duration = duration + 1
                else:
                    ##nobody previously, nobody now, make sure we say so
                    client.publish(
                        "person",
                        json.dumps({
                            "count": 0,
                            "total": total_person_count
                        }))
                    if duration > 0:
                        ##we were previously tracking someone, pulse out duration before zeroing out
                        client.publish("person/duration",
                                       json.dumps({"duration": duration}))
                        duration = 0

            render_time = time() - start_time
            render_time_message = "OpenCV rendering time: {:.3f} ms".format(
                render_time * 1e3)
            cv2.putText(frame, render_time_message, (15, 45),
                        cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)
            stats.append(dict(it=inf_time, rt=render_time))
            sys.stdout.buffer.write(frame)
            sys.stdout.flush()
        if key_pressed == 27:
            break
        if iflag and not waitingOnInference:
            iflag = False
        if infer_network.wait() == 0:
            iflag = False
            waitingOnInference = False
    if cap:
        cap.release()
        cv2.destroyAllWindows()
    client.disconnect()
예제 #12
0
def exec_block(block, context_globals=None):
    if context_globals is None:
        context_globals = {}

    if isinstance(block.body[-1], ast.Expr):
        last = ast.Expression(block.body.pop().value)

        exec(compile(block, '<string>', mode='exec'), context_globals)
        return eval(compile(last, '<string>', mode='eval'), context_globals)
    else:
        exec(compile(block, '<string>', mode='exec'), context_globals)
        return None


cells: List[Cell] = []
cells.append(Cell(Buffer(''), 'some output'))
cells.append(Cell(Buffer('some\nmore\ninput\nhere'), 'some output'))

cur_cell = 0
cursor = Cursor(cells[cur_cell].input)

event_pipe = []

with glfw_window() as window:
    GL.glClear(GL.GL_COLOR_BUFFER_BIT)

    def char_callback(_win, c):
        event_pipe.append(('key_press', chr(c)))

    def key_callback(_win, k, scancode, action, mods):
        key_map = {
예제 #13
0
def valor(env_fn, actor_critic=ActorCritic, ac_kwargs=dict(), disc=Discriminator, dc_kwargs=dict(), seed=0, episodes_per_epoch=40,
        epochs=50, gamma=0.99, pi_lr=3e-4, vf_lr=1e-3, dc_lr=5e-4, train_v_iters=80, train_dc_iters=10, train_dc_interv=10, 
        lam=0.97, max_ep_len=1000, logger_kwargs=dict(), con_dim=5, save_freq=10, k=1):

    logger = EpochLogger(**logger_kwargs)
    logger.save_config(locals())

    seed += 10000 * proc_id()
    torch.manual_seed(seed)
    np.random.seed(seed)

    env = env_fn()
    obs_dim = env.observation_space.shape
    act_dim = env.action_space.shape

    ac_kwargs['action_space'] = env.action_space

    # Model
    actor_critic = actor_critic(input_dim=obs_dim[0]+con_dim, **ac_kwargs)
    disc = disc(input_dim=obs_dim[0], context_dim=con_dim, **dc_kwargs)

    # Buffer
    local_episodes_per_epoch = int(episodes_per_epoch / num_procs())
    buffer = Buffer(con_dim, obs_dim[0], act_dim[0], local_episodes_per_epoch, max_ep_len, train_dc_interv)

    # Count variables
    var_counts = tuple(count_vars(module) for module in
        [actor_critic.policy, actor_critic.value_f, disc.policy])
    logger.log('\nNumber of parameters: \t pi: %d, \t v: %d, \t d: %d\n'%var_counts)    

    # Optimizers
    train_pi = torch.optim.Adam(actor_critic.policy.parameters(), lr=pi_lr)
    train_v = torch.optim.Adam(actor_critic.value_f.parameters(), lr=vf_lr)
    train_dc = torch.optim.Adam(disc.policy.parameters(), lr=dc_lr)

    # Parameters Sync
    sync_all_params(actor_critic.parameters())
    sync_all_params(disc.parameters())

    def update(e):
        obs, act, adv, pos, ret, logp_old = [torch.Tensor(x) for x in buffer.retrieve_all()]
        
        # Policy
        _, logp, _ = actor_critic.policy(obs, act)
        entropy = (-logp).mean()

        # Policy loss
        pi_loss = -(logp*(k*adv+pos)).mean()

        # Train policy
        train_pi.zero_grad()
        pi_loss.backward()
        average_gradients(train_pi.param_groups)
        train_pi.step()

        # Value function
        v = actor_critic.value_f(obs)
        v_l_old = F.mse_loss(v, ret)
        for _ in range(train_v_iters):
            v = actor_critic.value_f(obs)
            v_loss = F.mse_loss(v, ret)

            # Value function train
            train_v.zero_grad()
            v_loss.backward()
            average_gradients(train_v.param_groups)
            train_v.step()

        # Discriminator
        if (e+1) % train_dc_interv == 0:
            print('Discriminator Update!')
            con, s_diff = [torch.Tensor(x) for x in buffer.retrieve_dc_buff()]
            _, logp_dc, _ = disc(s_diff, con)
            d_l_old = -logp_dc.mean()

            # Discriminator train
            for _ in range(train_dc_iters):
                _, logp_dc, _ = disc(s_diff, con)
                d_loss = -logp_dc.mean()
                train_dc.zero_grad()
                d_loss.backward()
                average_gradients(train_dc.param_groups)
                train_dc.step()

            _, logp_dc, _ = disc(s_diff, con)
            dc_l_new = -logp_dc.mean()
        else:
            d_l_old = 0
            dc_l_new = 0

        # Log the changes
        _, logp, _, v = actor_critic(obs, act)
        pi_l_new = -(logp*(k*adv+pos)).mean()
        v_l_new = F.mse_loss(v, ret)
        kl = (logp_old - logp).mean()
        logger.store(LossPi=pi_loss, LossV=v_l_old, KL=kl, Entropy=entropy, DeltaLossPi=(pi_l_new-pi_loss),
            DeltaLossV=(v_l_new-v_l_old), LossDC=d_l_old, DeltaLossDC=(dc_l_new-d_l_old))
        # logger.store(Adv=adv.reshape(-1).numpy().tolist(), Pos=pos.reshape(-1).numpy().tolist())

    start_time = time.time()
    o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0
    context_dist = Categorical(logits=torch.Tensor(np.ones(con_dim)))
    total_t = 0

    for epoch in range(epochs):
        actor_critic.eval()
        disc.eval()
        for _ in range(local_episodes_per_epoch):
            c = context_dist.sample()
            c_onehot = F.one_hot(c, con_dim).squeeze().float()
            for _ in range(max_ep_len):
                concat_obs = torch.cat([torch.Tensor(o.reshape(1, -1)), c_onehot.reshape(1, -1)], 1)
                a, _, logp_t, v_t = actor_critic(concat_obs)

                buffer.store(c, concat_obs.squeeze().detach().numpy(), a.detach().numpy(), r, v_t.item(), logp_t.detach().numpy())
                logger.store(VVals=v_t)

                o, r, d, _ = env.step(a.detach().numpy()[0])
                ep_ret += r
                ep_len += 1
                total_t += 1

                terminal = d or (ep_len == max_ep_len)
                if terminal:
                    dc_diff = torch.Tensor(buffer.calc_diff()).unsqueeze(0)
                    con = torch.Tensor([float(c)]).unsqueeze(0)
                    _, _, log_p = disc(dc_diff, con)
                    buffer.end_episode(log_p.detach().numpy())
                    logger.store(EpRet=ep_ret, EpLen=ep_len)
                    o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0

        if (epoch % save_freq == 0) or (epoch == epochs - 1):
            logger.save_state({'env': env}, [actor_critic, disc], None)

        # Update
        actor_critic.train()
        disc.train()

        update(epoch)

        # Log
        logger.log_tabular('Epoch', epoch)
        logger.log_tabular('EpRet', with_min_and_max=True)
        logger.log_tabular('EpLen', average_only=True)
        logger.log_tabular('VVals', with_min_and_max=True)
        logger.log_tabular('TotalEnvInteracts', total_t)
        logger.log_tabular('LossPi', average_only=True)
        logger.log_tabular('DeltaLossPi', average_only=True)
        logger.log_tabular('LossV', average_only=True)
        logger.log_tabular('DeltaLossV', average_only=True)
        logger.log_tabular('LossDC', average_only=True)
        logger.log_tabular('DeltaLossDC', average_only=True)
        logger.log_tabular('Entropy', average_only=True)
        logger.log_tabular('KL', average_only=True)
        logger.log_tabular('Time', time.time()-start_time)
        logger.dump_tabular()
예제 #14
0
 def serializePlayerObject(self):
     return Buffer().writeInt16(self.id).writeInt8(self.level).writeInt8(self.zone).writeShor2(self.posX, self.posY).writeInt16(self.skin).writeInt8(self.isDev).toBytes()
예제 #15
0
 def __init__(self,
              state_dim,
              action_dim,
              cus_policy=True,
              env_dtype=tf.float32):
     action_dim = list(action_dim)
     state_dim = list(state_dim)
     self.cus_policy = cus_policy
     self.buffer = Buffer(buffer_size,
                          state_dim,
                          action_dim,
                          dtype=np.__dict__[env_dtype])
     self.sess = tf.Session(
         config=tf.ConfigProto(inter_op_parallelism_threads=threads,
                               log_device_placement=False,
                               allow_soft_placement=True))
     self.actor_theta = network.actor_theta(state_dim, action_dim)
     self.critic_theta = network.critic_theta(state_dim, action_dim)
     self.actor_theta_t, update_actor_t = exponential_moving_averages(
         self.actor_theta, tau)
     self.critic_theta_t, update_critic_t = exponential_moving_averages(
         self.critic_theta, tau)
     states = tf.placeholder(env_dtype, [None] + state_dim, "states")
     is_training = tf.placeholder(tf.bool, name="is_training")
     action_test = network.actor(states, self.actor_theta)
     noise_init = tf.zeros([1] + action_dim, dtype=env_dtype)
     noise_var = tf.Variable(noise_init)
     self.ou_reset = noise_var.assign(noise_init)
     noise = noise_var.assign_sub(
         ou_theta * noise_init -
         tf.random_normal(action_dim, stddev=ou_sigma, dtype=env_dtype))
     action_exploration = action_test + noise
     action_cont = tf.placeholder(env_dtype, [None] + action_dim,
                                  "action_cont_space")
     actions_k = tf.placeholder(env_dtype, [None] + action_dim,
                                "knn_actions")
     reward_k = tf.placeholder(env_dtype, [1], "reward_k")
     term_k = tf.placeholder(tf.bool, [1], "term_k")
     qfunction_eval = network.critic(states, actions_k, self.critic_theta)
     action_k_policy = tf.stop_gradient(
         tf.where(term_k, reward_k, reward_k + discount * qfunction_eval))
     qfunction = network.critic(states, action_test, self.critic_theta)
     meanq = tf.reduce_mean(qfunction, 0)
     wd_p = tf.add_n(
         [actor_l2 * tf.nn.l2_loss(var) for var in self.actor_theta])
     loss_p = -meanq + wd_p
     optim_p = tf.train.AdamOptimizer(learning_rate=learning_actor)
     grads_and_vars_p = optim_p.compute_gradients(loss_p,
                                                  var_list=self.actor_theta)
     optimize_p = optim_p.apply_gradients(grads_and_vars_p)
     with tf.control_dependencies([optimize_p]):
         train_actor = tf.group(update_actor_t)
     action_train = tf.placeholder(env_dtype,
                                   [FLAGS.batch_size] + action_dim,
                                   "action_train")
     action_train_k = tf.placeholder(env_dtype,
                                     [FLAGS.batch_size] + action_dim,
                                     "action_train_k")
     reward = tf.placeholder(env_dtype, [FLAGS.batch_size], "reward")
     states2 = tf.placeholder(env_dtype, [FLAGS.batch_size] + state_dim,
                              "states2")
     term2 = tf.placeholder(tf.bool, [FLAGS.batch_size], "term2")
     tensor_cond = tf.constant(self.cus_policy,
                               dtype=tf.bool,
                               name="is_cus_p")
     q_train = network.critic(states, action_train, self.critic_theta)
     act2 = network.actor(states2, theta=self.actor_theta_t)
     full_act_policy2 = tf.cond(
         tensor_cond,
         lambda: action_train_k,
         lambda: act2,
     )
     qfunction2 = network.critic(states2,
                                 full_act_policy2,
                                 theta=self.critic_theta_t)
     qfunction_target = tf.stop_gradient(
         tf.where(term2, reward, reward + discount * qfunction2))
     td_error = q_train - qfunction_target
     ms_td_error = tf.reduce_mean(tf.square(td_error), 0)
     wd_q = tf.add_n(
         [critic_l2 * tf.nn.l2_loss(var) for var in self.critic_theta])
     loss_q = ms_td_error + wd_q
     optim_q = tf.train.AdamOptimizer(learning_rate=learning_critic)
     grads_and_vars_q = optim_q.compute_gradients(
         loss_q, var_list=self.critic_theta)
     optimize_q = optim_q.apply_gradients(grads_and_vars_q)
     with tf.control_dependencies([optimize_q]):
         train_critic = tf.group(update_critic_t)
     with self.sess.as_default():
         self._action_test = Res([states, is_training], action_test)
         self._action_exploration = Res([states, is_training],
                                        action_exploration)
         self._reset = Res([], self.ou_reset)
         self._train_actor = Res([
             states, action_train, action_train_k, reward, states2, term2,
             is_training
         ], [train_critic])
         self._train_critic = Res([states, is_training], [train_actor])
         self._train = Res([
             states, action_train, action_train_k, reward, states2, term2,
             is_training
         ], [train_actor, train_critic])
         self._action_k_p = Res([
             states, action_cont, actions_k, reward_k, term_k, is_training
         ], [action_k_policy])
     self.sess.run(tf.global_variables_initializer())
     self.sess.graph.finalize()
     self.t = 0
예제 #16
0
import warnings
import os
warnings.filterwarnings('ignore')

from stable_baselines.common.policies import MlpPolicy
from stable_baselines import PPO2
from callback import CustomCallback
from env_wrapper import wrap_env
from buffer import Buffer

import gym

if __name__ == "__main__":
    env = wrap_env(gym.make('LunarLanderContinuous-v2'),
                   Buffer(50000),
                   delay=0)
    agent = PPO2(MlpPolicy,
                 env,
                 verbose=1,
                 tensorboard_log='./test_tensorboard')
    agent.learn(total_timesteps=5000000, log_interval=10)
예제 #17
0
파일: main.py 프로젝트: mcarobene/MAX
def get_buffer(d_state, d_action, ensemble_size, data_buffer_size):
    return Buffer(d_action=d_action,
                  d_state=d_state,
                  ensemble_size=ensemble_size,
                  buffer_size=data_buffer_size)
    def handlePkt(self, code, b, pktData):
        if code == 0x10:  # CREATE_PLAYER_OBJECT
            level, zone, pos = b.readInt8(), b.readInt8(), b.readShor2()
            self.level = level
            self.zone = zone

            self.dead = False
            self.client.stopDCTimer()

            self.match.broadBin(
                0x10,
                Buffer().writeInt16(self.id).write(pktData).writeInt16(
                    self.skin))

        elif code == 0x11:  # KILL_PLAYER_OBJECT
            if self.dead or self.win:
                return

            self.dead = True
            self.client.startDCTimer(60)

            self.match.broadBin(0x11, Buffer().writeInt16(self.id))

        elif code == 0x12:  # UPDATE_PLAYER_OBJECT
            if self.dead or self.lastUpdatePkt == pktData:
                return

            level, zone, pos, sprite, reverse = b.readInt8(), b.readInt8(
            ), b.readVec2(), b.readInt8(), b.readBool()

            if self.level != level or self.zone != zone:
                self.match.onPlayerWarp(self, level, zone)

            self.level = level
            self.zone = zone
            self.posX = pos[0]
            self.posY = pos[1]
            self.lastUpdatePkt = pktData

            if sprite > 5 and self.match.world == "lobby" and zone == 0:
                self.client.block(0x1)
                return

            self.match.broadPlayerUpdate(self, pktData)

        elif code == 0x13:  # PLAYER_OBJECT_EVENT
            if self.dead or self.win:
                return

            type = b.readInt8()

            if self.match.world == "lobby":
                self.client.block(0x2)
                return

            self.match.broadBin(0x13,
                                Buffer().writeInt16(self.id).write(pktData))

        elif code == 0x17:
            killer = b.readInt16()
            if self.id == killer:
                return

            killer = self.match.getPlayer(killer)
            if killer is None:
                return

            killer.sendBin(0x17, Buffer().writeInt16(self.id).write(pktData))

        elif code == 0x18:  # PLAYER_RESULT_REQUEST
            if self.dead or self.win:
                return

            self.win = True
            self.client.startDCTimer(120)

            pos = self.match.getWinners()
            try:
                # Maybe this should be assynchronous?
                if self.server.discordWebhook is not None and pos == 1 and not self.match.private:
                    name = self.name
                    # We already filter players that have a squad so...
                    if len(self.team) == 0 and self.server.checkCurse(
                            self.name):
                        name = "[ censored ]"
                    embed = DiscordEmbed(
                        description=
                        '**%s** has achieved **#1** victory royale!%s' %
                        (name, " (PVP Mode)" if self.gameMode == 1 else
                         " (Hell mode)" if self.gameMode == 2 else ""),
                        color=0xffff00)
                    self.server.discordWebhook.add_embed(embed)
                    self.server.discordWebhook.execute()
                    self.server.discordWebhook.remove_embed(0)
            except:
                pass

            # Make sure that everyone knows that the player is at the axe
            self.match.broadPlayerUpdate(self, self.lastUpdatePkt)

            self.match.broadBin(
                0x18,
                Buffer().writeInt16(self.id).writeInt8(pos).writeInt8(0))

        elif code == 0x19:
            self.trustCount += 1
            if self.trustCount > 8:
                self.client.block(0x3)

        elif code == 0x20:  # OBJECT_EVENT_TRIGGER
            if self.dead:
                return

            level, zone, oid, type = b.readInt8(), b.readInt8(), b.readInt32(
            ), b.readInt8()

            if self.match.world == "lobby" and oid == 458761:
                self.match.goldFlowerTaken = True

            self.match.broadBin(0x20,
                                Buffer().writeInt16(self.id).write(pktData))

        elif code == 0x30:  # TILE_EVENT_TRIGGER
            if self.dead:
                return

            level, zone, pos, type = b.readInt8(), b.readInt8(), b.readShor2(
            ), b.readInt8()

            self.match.broadBin(0x30,
                                Buffer().writeInt16(self.id).write(pktData))
예제 #19
0
from predict_accident import PredictAccident
from threading import Thread
from buffer import Buffer

if __name__ == '__main__':
    while True:
        Thread(target=Buffer().add_to_buffer()).start()
        Thread(target=PredictAccident().predict_accident()).start()
예제 #20
0
        except:
            return False

    if not is_admin():
        # Re-run the program with admin rights
        ctypes.windll.shell32.ShellExecuteW(None, "runas", sys.executable,
                                            __file__, None, 1)

    this_dir = os.path.dirname(os.path.abspath(__file__))
    HOPSIZE = 2
    FR = 30.
    receiver_data = Receiver("127.0.0.1", 8017)
    receiver_ft = Receiver("127.0.0.1", 9017)

    sender = Sender("127.0.0.1", 8050, freq=FR / HOPSIZE)
    buffer = Buffer(receiver_data, Nbuffer=128, hopsize=HOPSIZE)
    feature_trigger = Buffer(receiver_ft, Nbuffer=1, hopsize=1, type_in="str")

    data_driven_mq=Fluidity_Heaviness(buffer=buffer, \
                                      sender=sender, \
                                      trigger=feature_trigger,\
                                      models={'Fluidity':os.path.join(this_dir,'model_fluidity.pickle')})

    d.set_elements(buffer=buffer)
    receiver_data.start()
    receiver_ft.start()

    buffer.start()
    feature_trigger.start()
    data_driven_mq.start()
    sender.start()
def buffer_input():
    return Buffer(tokenize_lines(InputReader('> ')))
예제 #22
0
    def __init__(self,
                 x=0.0,
                 y=0.0,
                 inner_radius=3,
                 radius=16,
                 inner_color=(2, 2, 2, 128),
                 color=(52, 235, 222, 128),
                 Id="Car",
                 indx=None,
                 map_size=None,
                 d2d=True,
                 stop=False):
        if map_size is None:
            map_size = [1024, 768]
        self.kind = "car"
        self.indx = indx
        self.dt = 1 / 30
        self.position = Vector2(x, y)
        self.lastPosition = self.position
        self.inner_radius = inner_radius
        self.radius = radius
        self.inner_color = inner_color
        self.color = color
        self.link_color = (20, 50, 100)
        self.Id = Id
        self.dir = randint(1, 4)
        self.lastDir = self.dir
        self.newDir = self.dir
        self.dic = {1: "Derecha", 2: "Izquierda", 3: "Arriba", 4: "Abajo"}
        self.stop = stop
        self.turn = False
        self.edificeNumbers = 0
        self.randomCounter = randint(10, 150)
        self.currentEdifice = 0
        self.i = 0
        self.collide = False
        self.speed = 2

        self.map_size = map_size
        pygame.font.init()
        try:
            self.font = pygame.font.Font(None, 30)
        except Exception as e:
            print(e)
        self.text_surface = self.font.render(self.Id, True,
                                             pygame.Color('dodgerblue1'))
        self.text_rect = self.text_surface.get_rect()
        self.connectionsList = Register(Id=Id)

        self.subcarriers = Subcarries()

        self.femtoID = ""
        self.femtoIndx = None
        self.d2d = d2d
        # For record
        self.distanceFC = 0
        self.isConnected = False
        self.recording = False
        self.recordLimit = 500
        self.T = 25
        self.Bw = 5

        self.buffer = Buffer(snri=0, interference=0, capacity=0, demand=0)

        self.demand = 0  # Mbps
        self.prx = -100  # dBm
        self.t = 0  # segs
        self.snri = 0  #
        self.interference = 0  #

        self.Ptx = subcarrier_power(d=5.0, dB=True)
        self.updateDemand(random=False)
        self.bw_assigned = 0
        self.rf_su = 1  # suma del datarate de los subscriptores
        self.ym = 2  # Eficiencia espectral MC
        self.yf = 6  # Eficiencia esoectral FC
        self.No = -174  # dBm/Hz
        self.fc = 2300  # MHz

        self.pm = 60  # dBm Potencia MC
        self.pf = 10  # dBm Potencia FC
        self.alpha_f = 0  # Pertenece a femtocelda
        self.alpha_mc = 0  # Pertencece a macrocelda
        self.Nsc = 256  # Número de Subportadoras
        self.bwsc = 15 / 1000  # MHz Ancho de Banda por subportadora
        self.bits_mod = 6
        self.subscribed = False
        self.subscriptorColorText = (100, 100, 100)
        self.subscriptorColor = (250, 200, 20, 100)
        self.subsCount = 1

        self.fcs = {}
        self.users = {}
        self.neighbors = {}
        self.maxTime = 60  # segs
        self.coalition = False
        self.capacity = 0.0
예제 #23
0
def buffer_lines(lines, prompt="scm> "):
    """Return a Buffer instance iterating through LINES."""
    return Buffer(tokenize_lines(LineReader(lines, prompt)))
예제 #24
0
파일: server.py 프로젝트: dktank/PyRoyale
    def onTextMessage(self, payload):
        #print("Text message received: {0}".format(payload))
        packet = json.loads(payload)
        type = packet["type"]

        if self.stat == "l":
            if type == "l00":  # Input state ready
                if self.pendingStat is None:
                    self.transport.loseConnection()
                    return
                self.pendingStat = None

                self.stopDCTimer()

                if self.address != "127.0.0.1" and self.server.getPlayerCountByAddress(
                        self.address) >= self.server.maxSimulIP:
                    self.exception("Too many connections")
                    self.transport.loseConnection()
                    return

                for b in self.server.blocked:
                    if b[0] == self.address:
                        self.blocked = True
                        self.setState("g")  # Ingame
                        return

                team = packet["team"][:3].strip().upper()
                if len(team) == 0:
                    team = self.server.defaultTeam
                skin = packet["skin"] if "skin" in packet else 0
                if skin < 0 or skin > NUM_SKINS - 1:  #once shop is implemented this check should be "does player own this skin"
                    skin = 0
                self.player = Player(
                    self, packet["name"], team,
                    self.server.getMatch(
                        team,
                        packet["private"] if "private" in packet else False),
                    skin)
                self.loginSuccess()
                self.server.players.append(self.player)

                self.setState("g")  # Ingame

        elif self.stat == "g":
            if type == "g00":  # Ingame state ready
                if self.player is None or self.pendingStat is None:
                    if self.blocked:
                        self.sendJSON({
                            "packets": [{
                                "game": "jail",
                                "type": "g01"
                            }],
                            "type":
                            "s01"
                        })
                        return

                    self.transport.loseConnection()
                    return
                self.pendingStat = None

                self.player.onEnterIngame()

            elif type == "g03":  # World load completed
                if self.player is None:
                    if self.blocked:
                        self.sendBin(0x02,
                                     Buffer().writeInt16(0).writeInt16(0))
                        self.startDCTimer(15)
                        return

                    self.transport.loseConnection()
                    return
                self.player.onLoadComplete()

            elif type == "g50":  # Vote to start
                if self.player is None or self.player.voted or self.player.match.playing:
                    return

                self.player.voted = True
                self.player.match.voteStart()

            elif type == "g51":  # (SPECIAL) Force start
                if self.server.mcode and self.server.mcode in packet["code"]:
                    self.player.match.start(True)

            elif type == "gsl":  # Level select
                if self.player is None or self.player.team != "" or not self.player.match.private:
                    return

                levelName = packet["name"]
                if levelName == "custom":
                    try:
                        self.player.match.selectCustomLevel(packet["data"])
                    except Exception as e:
                        estr = str(e)
                        estr = "\n".join(estr.split("\n")[:10])
                        self.sendJSON({
                            "type": "gsl",
                            "name": levelName,
                            "status": "error",
                            "message": estr
                        })
                        return

                    self.sendJSON({
                        "type": "gsl",
                        "name": levelName,
                        "status": "success",
                        "message": ""
                    })
                else:
                    self.player.match.selectLevel(levelName)
예제 #25
0
        for target_pr, pr in zip(self.target_c.parameters(),
                                 self.critic.parameters()):
            target_pr.data.copy_(TAU * pr.data + (1 - TAU) * target_pr.data)


episodes = 150

seed = 12
env = gym.make('MountainCarContinuous-v0')
env.seed(seed)
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)

agent = DDPG(2, 1)
buf = Buffer(BUF_SIZE)
noise = OUStrategy(env.action_space, min_sigma=1e-4)
updates_noise = 0
for episode in range(episodes):
    state = env.reset()
    episode_reward = 0
    done = False
    total_reward = 0
    while not done:
        action = agent.act(state)
        action = noise.get_action_from_raw_action(action, updates_noise)
        updates_noise += 1
        next_state, reward, done, _ = env.step(action)
        total_reward += reward
        buf.add((state, action, reward, next_state, done))
        if len(buf) >= BATCH_SIZE:
    def __init__(self, config, rng):
        self.config = config
        self.rng = rng

        self.task = config.task
        self.model_dir = config.model_dir
        self.gpu_memory_fraction = config.gpu_memory_fraction

        self.log_step = config.log_step
        self.max_step = config.max_step

        self.K_d = config.K_d
        self.K_g = config.K_g
        self.initial_K_d = config.initial_K_d
        self.initial_K_g = config.initial_K_g
        self.checkpoint_secs = config.checkpoint_secs

        config.data_set = 'hands'
        DataLoader = {'hands': hands.DataLoader}[config.data_set]
        # DataLoader = {'gaze': gaze_data.DataLoader}[config.data_set]  #  DELETE LATER
        self.data_loader = DataLoader(config, rng=self.rng)

        self.model = Model(config, self.data_loader)
        self.history_buffer = Buffer(config, self.rng)

        self.summary_ops = {
            'test_synthetic_images': {
                'summary':
                tf.summary.image("test_synthetic_images",
                                 self.model.resized_x,
                                 max_outputs=config.max_image_summary),
                'output':
                self.model.resized_x,
            },
            'test_refined_images': {
                'summary':
                tf.summary.image("test_refined_images",
                                 self.model.denormalized_R_x,
                                 max_outputs=config.max_image_summary),
                'output':
                self.model.denormalized_R_x,
            }
        }

        self.saver = tf.train.Saver()
        self.summary_writer = tf.summary.FileWriter(self.model_dir)

        sv = tf.train.Supervisor(logdir=self.model_dir,
                                 is_chief=True,
                                 saver=self.saver,
                                 summary_op=None,
                                 summary_writer=self.summary_writer,
                                 save_summaries_secs=300,
                                 save_model_secs=self.checkpoint_secs,
                                 global_step=self.model.discrim_step)

        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=self.gpu_memory_fraction,
            allow_growth=True)  # seems to be not working
        sess_config = tf.ConfigProto(allow_soft_placement=True,
                                     gpu_options=gpu_options)

        self.sess = sv.prepare_or_wait_for_session(config=sess_config)
예제 #27
0
def read_line(line):
    """Read a single string LINE as a Scheme expression."""
    return scheme_read(Buffer(tokenize_lines([line])))
예제 #28
0
    else:
        assert False

    return x, z


x, z = createLayers()
model = Model(input=x, output=z)
model.summary()
model.compile(optimizer='adam', loss='mse')

x, z = createLayers()
target_model = Model(input=x, output=z)
target_model.set_weights(model.get_weights())

mem = Buffer(args.replay_size, env.observation_space.shape, (1, ))

total_reward = 0
for i_episode in xrange(args.episodes):
    observation = env.reset()
    episode_reward = 0
    for t in xrange(args.max_timesteps):
        if args.display:
            env.render()

        if np.random.random() < args.exploration:
            action = env.action_space.sample()
        else:
            s = np.array([observation])
            q = model.predict_on_batch(s)
            #print "q:", q
예제 #29
0
    def onTextMessage(self, payload):
        #print("Text message received: {0}".format(payload))
        packet = json.loads(payload)
        type = packet["type"]

        if self.stat == "l":
            if type == "l00":  # Input state ready
                if self.player is not None or self.pendingStat is None:
                    self.sendClose2()
                    return
                self.pendingStat = None
                self.stopDCTimer()

                if self.address != "127.0.0.1" and self.server.getPlayerCountByAddress(
                        self.address) >= self.server.maxSimulIP:
                    self.sendClose2()
                    return

                if self.server.shuttingDown:
                    self.setState("g")  # Ingame
                    return
                for b in self.server.blocked:
                    if b[0] == self.address:
                        self.blocked = True
                        self.setState("g")  # Ingame
                        return
                if self.username != "":
                    if self.accountPriv["isBanned"]:
                        self.blocked = True
                        self.setState("g")  # Ingame
                        return

                name = packet["name"]
                team = packet["team"][:3].strip().upper()
                priv = packet["private"] if "private" in packet else False
                skin = int(packet["skin"]) if "skin" in packet else 0
                if not self.account and self.server.restrictPublicSkins and 0 < len(
                        self.server.guestSkins):
                    if not skin in self.server.guestSkins:
                        skin = self.server.guestSkins[0]
                gm = int(packet["gm"]) if "gm" in packet else 0
                gm = gm if gm in range(NUM_GM) else 0
                gm = ["royale", "pvp", "hell"][gm]
                isDev = self.account[
                    "isDev"] if "isDev" in self.account else False
                self.player = Player(
                    self, name, (team if (team != "" or priv) else
                                 self.server.defaultTeam).lower(),
                    self.server.getMatch(team, priv, gm),
                    skin if skin in range(self.server.skinCount) else 0, gm,
                    isDev)
                #if priv:
                #    self.maxConLifeTimer.cancel()
                self.loginSuccess()
                self.server.players.append(self.player)

                self.setState("g")  # Ingame

            elif type == "llg":  #login
                if self.username != "" or self.player is not None or self.pendingStat is None:
                    self.sendClose2()
                    return
                self.stopDCTimer()

                username = packet["username"].upper()
                if self.address in self.server.loginBlocked:
                    self.sendJSON({
                        "type":
                        "llg",
                        "status":
                        False,
                        "msg":
                        "max login tries reached.\ntry again in one minute."
                    })
                    return
                elif username in self.server.authd:
                    self.sendJSON({
                        "type": "llg",
                        "status": False,
                        "msg": "account already in use"
                    })
                    return

                status, msg, self.accountPriv = datastore.login(
                    self.dbSession, username, packet["password"])

                j = {"type": "llg", "status": status, "msg": msg}
                if status:
                    self.account = msg
                    j["username"] = self.username = username
                    self.session = msg["session"]
                    self.server.authd.append(self.username)
                else:
                    if self.address not in self.server.maxLoginTries:
                        self.server.maxLoginTries[self.address] = 1
                    else:
                        self.server.maxLoginTries[self.address] += 1
                        if self.server.maxLoginTries[self.address] >= 4:
                            del self.server.maxLoginTries[self.address]
                            self.server.loginBlocked.append(self.address)
                            reactor.callLater(60,
                                              self.server.loginBlocked.remove,
                                              self.address)
                self.sendJSON(j)

            elif type == "llo":  #logout
                if self.username == "" or self.player is not None or self.pendingStat is None:
                    self.sendClose2()
                    return

                datastore.logout(self.dbSession, self.session)
                self.sendJSON({"type": "llo"})

            elif type == "lrg":  #register
                if self.username != "" or self.address not in self.server.captchas or self.player is not None or self.pendingStat is None:
                    self.sendClose2()
                    return
                self.stopDCTimer()

                username = packet["username"].upper()
                if CP_IMPORT and len(packet["captcha"]) != 5:
                    status, msg = False, "invalid captcha"
                elif CP_IMPORT and packet["captcha"].upper(
                ) != self.server.captchas[self.address]:
                    status, msg = False, "incorrect captcha"
                elif util.checkCurse(username):
                    status, msg = False, "please choose a different username"
                else:
                    status, msg, self.accountPriv = datastore.register(
                        self.dbSession, username, packet["password"])

                if status:
                    del self.server.captchas[self.address]
                    self.account = msg
                    self.username = username
                    self.session = msg["session"]
                    self.server.authd.append(self.username)
                self.sendJSON({"type": "lrg", "status": status, "msg": msg})

            elif type == "lrc":  #request captcha
                if self.username != "" or self.player is not None or self.pendingStat is None:
                    self.sendClose2()
                    return
                if not CP_IMPORT:
                    self.server.captchas[self.address] = ""
                    self.sendJSON({"type": "lrc", "data": ""})
                    return
                self.stopDCTimer()

                cp = ''.join(
                    random.SystemRandom().choice(string.ascii_uppercase +
                                                 string.digits)
                    for _ in range(5))
                self.server.captchas[self.address] = cp

                imageCaptcha = ImageCaptcha()
                image = imageCaptcha.generate_image(cp)

                imgByteArr = BytesIO()
                image.save(imgByteArr, format='PNG')
                imgByteArr = imgByteArr.getvalue()

                self.sendJSON({
                    "type":
                    "lrc",
                    "data":
                    base64.b64encode(imgByteArr).decode("utf-8")
                })

            elif type == "lrs":  #resume session
                if self.username != "" or self.player is not None or self.pendingStat is None:
                    self.sendClose2()
                    return
                self.stopDCTimer()

                status, msg, self.accountPriv = datastore.resumeSession(
                    self.dbSession, packet["session"])

                j = {"type": "lrs", "status": status, "msg": msg}
                if status:
                    if msg["username"] in self.server.authd:
                        self.sendJSON({
                            "type": "lrs",
                            "status": False,
                            "msg": "account already in use"
                        })
                        return
                    j["username"] = self.username = msg["username"]
                    self.account = msg
                    self.session = msg["session"]
                    self.server.authd.append(self.username)
                self.sendJSON(j)

            elif type == "lpr":  #update profile
                if self.username == "" or self.player is not None or self.pendingStat is None:
                    self.sendClose2()
                    return

                res = datastore.updateAccount(self.dbSession, self.username,
                                              packet)
                j = {
                    "type": "lpr",
                    "status": res[0],
                    "changes": res[1],
                    "msg": res[2]
                }
                self.sendJSON(j)

            elif type == "lpc":  #password change
                if self.username == "" or self.player is not None or self.pendingStat is None:
                    self.sendClose2()
                    return

                datastore.changePassword(self.dbSession, self.username,
                                         packet["password"])

        elif self.stat == "g":
            if type == "g00":  # Ingame state ready
                if self.player is None or self.pendingStat is None:
                    if self.server.shuttingDown:
                        levelName, levelData = self.server.getRandomLevel(
                            "maintenance", None)
                        self.sendJSON({
                            "packets": [{
                                "game": levelName,
                                "levelData": json.dumps(levelData),
                                "type": "g01"
                            }],
                            "type":
                            "s01"
                        })
                        return
                    if self.blocked:
                        levelName, levelData = self.server.getRandomLevel(
                            "jail", None)
                        self.sendJSON({
                            "packets": [{
                                "game": levelName,
                                "levelData": json.dumps(levelData),
                                "type": "g01"
                            }],
                            "type":
                            "s01"
                        })
                        return
                    self.sendClose2()
                    return
                self.pendingStat = None

                self.player.onEnterIngame()

            elif type == "g03":  # World load completed
                if self.player is None:
                    if self.blocked or self.server.shuttingDown:
                        self.sendBin(
                            0x02,
                            Buffer().writeInt16(0).writeInt16(0).writeInt8(0))
                        #self.startDCTimer(15)
                        return
                    self.sendClose2()
                    return
                self.player.onLoadComplete()

            elif type == "g50":  # Vote to start
                if self.player is None or self.player.voted or self.player.match.playing:
                    return

                self.player.voted = True
                self.player.match.voteStart()

            elif type == "g51":  # (SPECIAL) Force start
                if self.server.mcode and self.server.mcode in packet["code"]:
                    self.player.match.start(True)

            elif type == "gsl":  # Level select
                if self.player is None or (
                    (not self.server.enableLevelSelectInMultiPrivate and
                     self.player.team != "") or not self.player.match.private
                ) and not self.player.isDev:
                    return

                levelName = packet["name"]
                if levelName == "custom":
                    try:
                        self.player.match.selectCustomLevel(packet["data"])
                    except Exception as e:
                        estr = str(e)
                        estr = "\n".join(estr.split("\n")[:10])
                        self.sendJSON({
                            "type": "gsl",
                            "name": levelName,
                            "status": "error",
                            "message": estr
                        })
                        return

                    self.sendJSON({
                        "type": "gsl",
                        "name": levelName,
                        "status": "success",
                        "message": ""
                    })
                else:
                    self.player.match.selectLevel(levelName)
            elif type == "gbn":  # ban player
                if not self.account["isDev"]:
                    self.sendClose2()
                pid = packet["pid"]
                ban = packet["ban"]
                self.player.match.banPlayer(pid, ban)
            elif type == "gnm":  # rename player
                if not self.account["isDev"]:
                    self.sendClose2()
                pid = packet["pid"]
                newName = packet["name"]
                self.player.match.renamePlayer(pid, newName)
            elif type == "gsq":  # resquad player
                if not self.account["isDev"]:
                    self.sendClose2()
                pid = packet["pid"]
                newName = packet["name"].lower()
                if len(newName) > 3:
                    newName = newName[:3]
                self.player.match.resquadPlayer(pid, newName)
            else:
                print("unknown message! " + payload)
예제 #30
0
def read_line(line):
    return scheme_read(Buffer(tokenize_lines([line])))