Пример #1
0
    def callback(self, event):
        name = event.widget['text'].split(':')[0]
        print('passed', name)
        f = open(self.menu + 'state.json', 'r')
        state = json.load(f)
        f.close()
        if state[name] == self.disable:
            print(name, ' disable ')
            return
        print('state old', state)
        '''
            main logic part
        '''
        _start = counter()
        f = os.popen('pwd', 'r')
        pwd = f.read().strip()
        f.close()
        script = pwd + '/state_machine.lua'
        f = os.popen("lua " + script + (" '%s'" % name), 'r')
        # print('popen',f.read())
        state = json.load(f)
        print("system call lua spend %d ms!!!" % ((counter() - _start) * 1000))
        f.close()
        self.scene_check(state)

        # if name not in self.sce:
        #     # state = self.fun(state, name)
        #     f = os.popen("lua ~/project/python_network_program_study/jinding_config_build/tmp_V06/state_machine.lua '%s'" % name, 'r')
        #     # print('popen',f.read())
        #     state = json.load(f)
        #     f.close()
        #     self.scene_check(state)
        # else:

        #     if not state[name]:
        #         '''
        #         disable all other enabled scene!
        #         '''
        #         for s in self.sce:
        #             if state[s]:
        #                 state.update(self.scene_control(s, state, False))
        #         # [state.update(self.scene_control(s, state, False)) for s in self.sce]
        #         state[name] = True
        #     else:
        #         state[name] = False
        #     self.scene_control(name, state, state[name])
        '''
        main logic end
        '''

        print('state new', state)
        self.button_update(state)
        f = open(self.menu + 'state.json', 'w')
        json.dump(state, f)
        f.close()
Пример #2
0
    def fit(self, X, y, n_epochs, batch_size, print_many=False, verbose=1):
        model, optimizer = self.model, self.optimizer

        X, y = np.asarray(X), np.asarray(y)
        total_size = len(X)
        time_steps = X.shape[1]
        max_iters = total_size // batch_size
        if total_size % batch_size != 0:
            max_iters += 1

        progresses = {int(n_epochs // (100 / i)): i for i in range(1, 101, 1)}
        t0, durations = counter(), list()

        for epoch in range(n_epochs):
            epoch_losses, epoch_acc = [], []
            for i in range(max_iters):
                # N*T*D batch style
                x_batch = X[i * batch_size:(i + 1) * batch_size]
                y_batch = y[i * batch_size:(i + 1) * batch_size]
                current_batch_size = x_batch.shape[0]

                loss = model.forward(x_batch, y_batch)
                model.backward()
                params, grads = model.params, model.grads
                optimizer.update(params, grads)
                epoch_losses.append(loss)

                if verbose >= 2:
                    loss_s = round(loss.item(), 3)
                    perp = round(np.exp(loss_s).item(), 2)
                    print(
                        f"epoch-iter: {epoch}-{i}, loss: {loss_s}, perp: {perp}"
                    )

            durations.append(counter() - t0)
            t0 = counter()
            if (print_many and epoch % 100 == 0) or (not print_many
                                                     and epoch in progresses):
                acc_s = f"{round(sum(epoch_acc) / (X.shape[0] * X.shape[1]) * 100, 4)}%"
                loss_s = round(np.mean(np.array(epoch_losses)).item(), 3)
                perp = round(np.exp(loss_s).item(), 2)
                print(
                    f"epoch: {epoch}, loss: {loss_s}, perp: {perp}, acc: {acc_s}"
                )

        if verbose > 0:
            avg_epoch_time = sum(durations) / len(durations)
            print("average epoch time:", round(avg_epoch_time, 3))
            return avg_epoch_time
Пример #3
0
        def wrapped(*args, **kwargs):
            start = counter()
            return_value = _f(*args, **kwargs)
            interval = counter() - start
            wrapped.interval = interval

            if print_fn is not None:
                nonlocal name
                if name is None:
                    name = _f.__name__ + ': '
                t, u = rescale_time(interval, unit=unit)
                print_str = f'{name}{t:.3g} {u}'
                print_fn(print_str)

            return return_value
Пример #4
0
 def time_check(self, idx):
     if idx == 0:
         self.t0 = counter()
         if idx in self.progresses:
             print("WARNING: The length is to small for this timer.")
     elif idx in self.progresses:
         self.t1 = counter()
         duration = self.t1 - self.t0
         self.t0 = counter()
         self.elapsed += duration
         idx = 1 if idx == 0 else idx
         eta = self.elapsed * ((self.length / idx) - 1)
         self.print_time(idx, self.elapsed, eta)
     else:
         pass
Пример #5
0
    def interval(self) -> float:
        """Time elapsed in seconds

        If still in the context, returns time elapsed from the moment
        of entering to the context. If the context has been already
        left, returns the total time spent in the context.

        """
        if self._interval:  # when the context exited
            return self._interval
        else:  # when still in the context
            return counter() - self._start
Пример #6
0
    def __next__(self):
        try:
            now = counter()
            if self._last:
                interval = now - self._last
                self.intervals.append(interval)
                t, u = rescale_time(interval, self.unit)
                self.iteration_print_fn(
                    f'iteration {self.num_iterations:4}: {t:.3g} {u}')

            self._last = now

            return next(self.iterable)

        except StopIteration:
            self.print_summary()
            raise StopIteration
Пример #7
0
s1.angle(-60, 1500)  # move to -60 degrees in 1500ms
s1.speed(50)  # for continuous rotation servos
'''
1.6 External interrupts
'''
from pyb import Pin, ExtInt

callback = lambda e: print("intr")
ext = ExtInt(Pin('Y1'), ExtInt.IRQ_RISING, Pin.PULL_NONE, callback)
'''
1.7 Timers
'''
from pyb import Timer

tim = Timer(1, freq=1000)
time.counter()  # get counter value
time.freq(0.5)  # 0.5Hz
time.callback(lambda t: pyb.LED(1).toggle())
'''
1.8 PWM
'''
from pyb import Pin, Timer

p = Pin('X1')
tim = Timer(2, freq=1000)
ch = Timer.channel(1, Timer.PWM, pin=p)
ch.pulse_width_percent(50)
'''
1.9 ADC
'''
from pyb import Pin, ADC
Пример #8
0
def main():
    """Start Dodge."""

    pygame.init()

    #Display created with the screen size constants set priorly.
    screen = pygame.display.set_mode(screen_size)

    #The line that separates the lanes is set using the size of the screen.
    lane_line = FakeSprite("White Rectangle.jpg",
                           position=(int(screen_width * 0.45), 0),
                           size=(screen_height, int(screen_width / 10)),
                           rotation=90)

    player_ball = FakeSprite("White Ball.jpg",
                             position=(int(screen_width * 0.025),
                                       int(screen_height * 13 / 15)),
                             size=(int(screen_width * 0.4),
                                   int(screen_height * 2 / 15)))

    first_wall = FakeSprite("White Rectangle.jpg",
                            position=(0, 0),
                            size=(int(screen_width * 0.45),
                                  int(screen_height / 30)))

    lane_line.blit_on(screen)
    player_ball.blit_on(screen)
    first_wall.blit_on(screen)
    pygame.display.flip()

    walls = [first_wall]
    wall_speed = [0, 5]
    wall_space = player_ball.rectangle.height * 5
    base_time = counter()
    press = False
    over = False
    score = 0

    #Main Game Loop
    while True:
        #If a wall makes contact with the ball, the player loses.
        for wall in walls:
            if wall.rectangle.colliderect(player_ball.rectangle):
                game_over(screen, score)
                over = True

        #If the game is over, the game no longer loops.
        if over:
            sleep(3)
            break

        #The game gets progressively harder.
        if counter() - base_time >= 1:
            base_time = counter()
            wall_speed[1] += 1

        #When the previous wall has gone far enough, the next wall comes down.
        if walls[-1].rectangle.top >= wall_space:
            walls.append(new_wall())

        #Every time a rectangle comes offscreen, it is no longer animated
        #to keep process speeds high. The player also gets a point.
        for wall in walls:
            if wall.rectangle.top >= screen_height:
                del walls[walls.index(wall)]
                score += 1

        #All the walls currently onscreen move down with their curent speed.
        for wall in walls:
            wall.move(wall_speed)

        #The player's ball is moved to the other side
        #if the space bar is pressed.
        if pygame.key.get_pressed()[K_SPACE]:
            press = True

        elif not pygame.key.get_pressed()[K_SPACE] and press:
            press = False
            if player_ball.rectangle.left == 5:
                player_ball.rectangle.left = 115
            elif player_ball.rectangle.left == 115:
                player_ball.rectangle.left = 5

        #Image update phase.
        #The screen is filled black to erase the previous frame.
        screen.fill(black)

        #All of the (fake) sprites are blitted back onto the display.
        lane_line.blit_on(screen)
        player_ball.blit_on(screen)

        for wall in walls:
            wall.blit_on(screen)

        #The display is updated.
        pygame.display.flip()

        #The pump() function is called to keep input working.
        pygame.event.pump()

        sleep(0.01)

    #3 seconds after game over, pygame quits.
    pygame.quit()
Пример #9
0
 def __iter__(self):
     self._start = counter()
     self.iterable = iter(self.iterable)
     return self
Пример #10
0
    def train_np_last(self, X, y_true, batch_size, learning_rate, num_epochs,
                      print_many, verbose):
        self.batch_size = batch_size
        lr = learning_rate
        progresses = {
            int(num_epochs // (100 / i)): i
            for i in range(1, 101, 1)
        }
        t0 = counter()
        durations = []

        rnn = RNNLayer(input_dim=self.input_dim,
                       hidden_dim=self.hidden_dim,
                       Wx=self.rnn_Wx,
                       Wh=self.rnn_Wh,
                       bias=self.rnn_b)

        for epoch in range(num_epochs):
            epoch_losses, epoch_acc = [], []
            for i in range(self.max_iters):
                # # T*N*D batch style
                # x_batch = X[i * self.batch_size: (i + 1) * self.batch_size]
                # x_batch = np.array([x_batch[:, step, :] for step in range(self.time_steps)])
                # y_true_batch = y_true[i * self.batch_size:(i + 1) * self.batch_size]
                # current_batch_size = x_batch.shape[1]

                # N*T*D batch style
                x_batch = X[i * self.batch_size:(i + 1) * self.batch_size]
                y_true_batch = y_true[i * self.batch_size:(i + 1) *
                                      self.batch_size]
                current_batch_size = x_batch.shape[0]

                fc = FCLayer(W=self.fc_W,
                             bias=self.fc_b,
                             batch_size=current_batch_size)
                loss = SoftmaxWithLossLayer()

                h_last, h_stack = rnn.forward(x_batch)
                fc_out = fc.forward(x=h_last)
                loss_value, num_acc = loss.forward(x=fc_out,
                                                   y_true=y_true_batch)
                epoch_losses.append(loss_value)
                epoch_acc.append(num_acc)

                # backward pass
                d_L = loss.backward()
                fc_grads = fc.backward(d_L)
                d_fc_W = fc_grads['W_grad']
                d_fc_bias = fc_grads['bias_grad']
                d_h_last = fc_grads['x_grad']
                grads = rnn.backward(d_h_next=d_h_last, optimize=True)

                # parameter update
                self.rnn_Wx -= lr * grads["Wx_grad"]
                self.rnn_Wh -= lr * grads["Wh_grad"]
                self.rnn_b -= lr * grads["bias_grad"]
                self.fc_W -= lr * d_fc_W
                self.fc_b -= lr * d_fc_bias

                parameters = [self.rnn_Wx, self.rnn_Wh, self.rnn_b]
                rnn.update(parameters)

            durations.append(counter() - t0)
            t0 = counter()
            if (print_many and epoch % 100 == 0) or (not print_many
                                                     and epoch in progresses):
                acc_s = f"{round(sum(epoch_acc) / (X.shape[0]) * 100, 4)}%"
                loss_s = round(np.mean(np.array(epoch_losses)).item(), 3)
                perp = round(np.exp(loss_s).item(), 2)
                print(
                    f"epoch: {epoch}, loss: {loss_s}, perp: {perp}, acc: {acc_s}"
                )

        if verbose > 0:
            avg_epoch_time = sum(durations) / len(durations)
            print("average epoch time:", round(avg_epoch_time, 3))
            return avg_epoch_time
Пример #11
0
    def train_np_stack(self, X, y_true, batch_size, learning_rate, num_epochs,
                       print_many, verbose):
        self.batch_size = batch_size
        lr = learning_rate
        progresses = {
            int(num_epochs // (100 / i)): i
            for i in range(1, 101, 1)
        }
        t0 = counter()
        durations = []

        if self.layertype == 'rnn':
            rnn = RNNLayerWithTimesteps(input_dim=self.input_dim,
                                        hidden_dim=self.hidden_dim,
                                        Wx=self.rnn_Wx,
                                        Wh=self.rnn_Wh,
                                        bias=self.rnn_b)
        elif self.layertype == 'lstm':
            rnn = LSTMLayerTimesteps(self.input_dim, self.hidden_dim,
                                     self.rnn_Wx, self.rnn_Wh, self.rnn_b,
                                     self.stateful)

        for epoch in range(num_epochs):
            epoch_losses, epoch_acc = [], []
            for i in range(self.max_iters):
                # N*T*D batch style
                x_batch = X[i * self.batch_size:(i + 1) * self.batch_size]
                y_true_batch = y_true[i * self.batch_size:(i + 1) *
                                      self.batch_size]
                current_batch_size = x_batch.shape[0]

                fc = FCLayerTimesteps(W=self.fc_W, bias=self.fc_b)
                loss = SoftmaxWithLossLayerTimesteps()

                h_last, h_stack = rnn.forward(x_batch)
                fc_out = fc.forward(x=h_stack)
                loss_value, num_acc = loss.forward(x=fc_out,
                                                   y_true=y_true_batch)
                epoch_losses.append(loss_value)
                epoch_acc.append(num_acc)

                if verbose >= 2:
                    acc_s = f"{round(num_acc / (x_batch.shape[0] * x_batch.shape[1]) * 100, 4)}%"
                    loss_s = round(loss_value.item(), 3)
                    perp = round(np.exp(loss_s).item(), 2)
                    print(
                        f"epoch-iter: {epoch}-{i}, loss: {loss_s}, perp: {perp}, acc: {acc_s}"
                    )

                # backward pass
                d_L = loss.backward()
                d_h_stack = fc.backward(d_L)
                rnn.backward(d_h_stack=d_h_stack, optimize=True)

                if self.layertype == 'rnn':
                    dWx, dWh, dbias = rnn.grads["Wx"], rnn.grads[
                        "Wh"], rnn.grads["bias"]
                elif self.layertype == 'lstm':
                    dWx, dWh, dbias = rnn.grads

                # parameter update
                self.rnn_Wx -= lr * dWx
                self.rnn_Wh -= lr * dWh
                self.rnn_b -= lr * dbias
                self.fc_W -= lr * fc.grads[0]
                self.fc_b -= lr * fc.grads[1]

                rnn.update(self.rnn_Wx, self.rnn_Wh, self.rnn_b)

            durations.append(counter() - t0)
            t0 = counter()
            if (print_many and epoch % 100 == 0) or (not print_many
                                                     and epoch in progresses):
                acc_s = f"{round(sum(epoch_acc) / (X.shape[0] * X.shape[1]) * 100, 4)}%"
                loss_s = round(np.mean(np.array(epoch_losses)).item(), 3)
                perp = round(np.exp(loss_s).item(), 2)
                print(
                    f"epoch: {epoch}, loss: {loss_s}, perp: {perp}, acc: {acc_s}"
                )

        if verbose > 0:
            avg_epoch_time = sum(durations) / len(durations)
            print("average epoch time:", round(avg_epoch_time, 3))
            return avg_epoch_time
Пример #12
0
    def train_torch(self, X, y_true, batch_size, learning_rate, num_epochs,
                    print_many, verbose):
        self.batch_size = batch_size
        progresses = {
            int(num_epochs // (100 / i)): i
            for i in range(1, 101, 1)
        }
        t0 = counter()
        durations = []

        device = torch.device('cuda:0')
        rnn = RNN(input_size=self.input_dim,
                  hidden_size=self.hidden_dim,
                  num_layers=1,
                  nonlinearity='tanh',
                  bias=True,
                  batch_first=False).to(device)
        fc = FCLayer(self.hidden_dim, self.output_size, bias=True).to(device)
        params = [rnn.parameters(), fc.params()]
        optimizer = SGD(chain(*params), lr=learning_rate)
        for epoch in range(num_epochs):
            epoch_loss = 0
            for i in range(self.max_iters):
                x_batch = X[i * self.batch_size:(i + 1) * self.batch_size]
                x_batch = np.array(
                    [x_batch[:, step, :] for step in range(self.time_steps)])
                y_true_batch = y_true[i * self.batch_size:(i + 1) *
                                      self.batch_size]
                batch_size_local = x_batch.shape[1]

                # convert to pytorch tensor
                y_true_batch = y_true_batch.astype(np.int64)
                y_true_batch = torch.tensor(y_true_batch,
                                            requires_grad=False).to(device)
                x_batch = x_batch.astype(np.float32)
                x_batch = torch.tensor(x_batch, requires_grad=True).to(device)

                # forward pass
                h_stack, h_last = rnn.forward(x_batch, hx=None)
                fc_out = fc.forward(h_last)
                log_y_pred = F.log_softmax(input=fc_out, dim=2)
                log_y_pred = log_y_pred.view(batch_size_local,
                                             self.output_size)
                loss = F.nll_loss(input=log_y_pred,
                                  target=y_true_batch,
                                  reduction='mean')

                # update gradient
                optimizer.zero_grad()
                loss.backward()
                epoch_loss += loss.item()
                optimizer.step()

            durations.append(counter() - t0)
            t0 = counter()
            if (print_many and epoch % 100 == 0) or (not print_many
                                                     and epoch in progresses):
                print(
                    f"after epoch: {epoch}, epoch_losses: {round(epoch_loss / self.max_iters, 3)}"
                )

        if verbose > 0:
            avg_epoch_time = sum(durations) / len(durations)
            print("average epoch time:", round(avg_epoch_time, 3))
            return avg_epoch_time
Пример #13
0
from os.path import isfile
from time import perf_counter as counter
from multiprocessing import active_children, Lock, Pipe, Process

from mrrobot.app.units import UnitLoader
from mrrobot.app.exception import Elliot
from mrrobot.app.configuration import Configuration
from mrrobot.app.arguments import parse as argparser

istimeout = lambda start, timeout: bool(
    timeout > 0 and not start or not timeout or counter() - start >= timeout)


def arguments() -> tuple:
    contents = None
    arguments = argparser()
    if isfile(arguments.input):
        with open(arguments.input, "rb") as f:
            contents = f.read()
    return arguments, contents


def check_requirements() -> None:
    # argparse
    try:
        import argparse
        del argparse
    except ImportError:
        raise Elliot("Module argparse is not installed")
    # PIL (Pillow)
    try:
Пример #14
0
 def __enter__(self):
     self._start = counter()
     return self