def _step(self, inputs, input_sizes, targets):
        """
        Make a single gradient update. This is called by train() and should not
        be called manually.

        Parameters
        ----------

        inputs:
        inputs_sizes:
        targets:

        """

        output = self.model(inputs, input_sizes)

        loss = self.criterion(output, targets.long())

        loss = loss / inputs.size(0)  # average the loss by minibatch

        if self.distributed:
            loss = loss.to(self.device)
            loss_value = reduce_tensor(loss, self.world_size).item()
        else:
            loss_value = loss.item()

        # Check to ensure valid loss was calculated
        valid_loss, error = check_loss(loss, loss_value)

        if valid_loss:

            self.optimizer.zero_grad()

            with amp.scale_loss(loss, self.optimizer) as scaled_loss:
                scaled_loss.backward()

            torch.nn.utils.clip_grad_norm_(amp.master_params(self.optimizer),
                                           self.max_norm)

            self.optimizer.step()
        else:
            print(error)
            print('Skipping grad update')
            loss_value = 0

        return output, loss_value
Exemple #2
0
    def step(self, loss):
        if self.distributed:
            loss = loss.to(self.device)
            loss_value = reduce_tensor(loss, self.world_size).item()
        else:
            loss_value = loss.item()

        valid_loss, error = check_loss(loss, loss_value)
        if valid_loss:
            self.optimizer.zero_grad()
            with amp.scale_loss(loss, self.optimizer) as scaled_loss:
                scaled_loss.backward()
            torch.nn.utils.clip_grad_norm_(amp.master_params(self.optimizer), self.max_norm)
            self.optimizer.step()
        else:
            print(error)
            print('Skipping grad update')
            return False

        self.avg_loss += loss_value
        return True
Exemple #3
0
def state():
    board.populate()
    utils.spawn_new()

    while not utils.check_loss():
        # print(utils.elapsed_time())
        # thresh = round(utils.elapsed_time() % globs.delay, 2)
        # print(thresh)
        #  if (utils.elapsed_time() % globs.delay == 0):
        t = str(utils.elapsed_time())
        #  print(t)
        drop_nums = globs.drop_lists[globs.current_level - 1]
        if (any(x in t for x in drop_nums)):
            Canvas.draw_board()

            if (not globs.current_piece.collision()):
                globs.current_piece.move_down()
                utils.insert_piece(globs.current_piece)

            else:
                # utils.reset_piece(globs.preview_pc)
                utils.de_select_piece(globs.current_piece)
                utils.clear_board()
                # print("DONE CLEARING")
                # time.sleep(1)
                utils.spawn_new()
                # print("SPAWNED")
                globs.num_placed += 1
                utils.check_level()
            # time.sleep(0.1)

        # give player 0.2 seconds to make a last-second adjustment
        if globs.current_piece.collision() and not globs.dropped:
            Canvas.draw_board()
            kb.actions(kb.get_dir(0.2))
        # give player 0.5 seconds to slide piece once made contact
        # TODO set timer, only stop accepting input once timer runs out
        # reset timer when key is pressed
        else:
            kb.actions(kb.get_dir(0.05))
            float_out = out.float()  # ensure float32 for loss
            #print(float_out.to('cpu'))
            #break
            loss = criterion(float_out.to('cpu'), targets, output_sizes,
                             target_sizes).to(device)
            loss = loss / inputs.size(0)  # average the loss by minibatchi

            if args.distributed:
                loss = loss.to(device)
                loss_value = reduce_tensor(loss, args.world_size).item()
            else:
                loss_value = loss.item()

            # Check to ensure valid loss was calculated
            valid_loss, error = check_loss(loss, loss_value)
            if valid_loss:
                optimizer.zero_grad()
                # compute gradient

                with amp.scale_loss(loss, optimizer) as scaled_loss:
                    scaled_loss.backward()
                torch.nn.utils.clip_grad_norm_(model.parameters(),
                                               args.max_norm)
                optimizer.step()
                #if i%16 == 15:
                #   print('step')
                # optimizer.step()
                #  optimizer.zero_grad()
            else:
                print(error)