コード例 #1
0
    def evaluate(self) -> MetricLogger:
        obj_test = MetricLogger(one_hot=True)
        self.model.eval()
        for (x, y) in self.test_data:
            preds_test = self.model.forward(x)
            obj_test.log(preds_test, y)

        return obj_test
コード例 #2
0
 def train(self):
     model_name = type(self.model).__name__
     since = datetime.now()
     start_time = since.strftime("%H:%M:%S")
     print(f"Training is started for {model_name} model at {start_time}")
     self.model.fit(self.X_train, self.y_train)
     time_elapsed = datetime.now() - since
     print(f"Training for {model_name} model completed in {time_elapsed}")
     pred = self.model.predict(self.X_val)
     obj = MetricLogger(one_hot=False)
     obj.log(pred, self.y_val)
     print(f"Accuracy of {model_name} model:", obj.accuracy, sep='\n')
     print(f"Precision of {model_name} model:", obj.precision, sep='\n')
     print(f"Recall of {model_name} model:", obj.recall, sep='\n')
コード例 #3
0
ファイル: main.py プロジェクト: mcint170/CMSE_401_Project
# Add in check to see if GPU is avaliable (BM)
if torch.cuda.is_available():
    device = torch.device("cuda:0")
    print("Using GPU!")
else:
    device = torch.device("cpu")
    print("Using CPU!")

mario = Mario(state_dim=(4, 84, 84),
              action_dim=env.action_space.n,
              save_dir=save_dir,
              device=device,
              checkpoint=checkpoint)

logger = MetricLogger(save_dir)

# Make number of episodes a variable to pass in (BM)
if len(sys.argv) > 1:
    episodes = int(sys.argv[1])
    print_e = 20
else:
    episodes = 10
    print_e = 1

### for Loop that train the model num_episodes times by playing the game
for e in range(episodes):

    state = env.reset()

    # Play the game!
コード例 #4
0
 def evaluate(self):
     pred = self.model.predict(self.X_test)
     obj = MetricLogger(one_hot=False)
     obj.log(pred, self.y_test)
     return obj
コード例 #5
0
    def train(self, epochs: int, patience=None):
        model_name = type(self.model).__name__
        since = datetime.now()
        start_time = since.strftime("%H:%M:%S")
        print(
            f"Training and Validating is started for {model_name} at {start_time}"
        )
        count = 0
        obj_train = MetricLogger(one_hot=True)
        obj_val = MetricLogger(one_hot=True)

        if patience is None:
            print("patience is None")
            for e in range(epochs):
                print('epoch', e)
                obj_train.reset()
                obj_val.reset()
                for i, (x, y) in enumerate(self.train_data):
                    preds_train = self.model.forward(x)
                    loss = F.cross_entropy(preds_train, y)
                    self.optimizer.zero_grad()
                    loss.backward()
                    self.optimizer.step()
                    obj_train.log(preds_train, y)
                    count += 1
                    if i % 100 == 0:
                        self.logger.add_scalar("train loss (batches)",
                                               loss.item(), count)
                        self.logger.add_scalar("train accuracy (batches)",
                                               obj_train.accuracy, count)
                for (x, y) in self.val_data:
                    preds_val = self.model.forward(x)
                    obj_val.log(preds_val, y)
                count += 1
                self.logger.add_scalar("validation accuracy (epochs)",
                                       obj_train.accuracy, count)
            time_elapsed = datetime.now() - since
            print(
                f"Training and Validating for {model_name} completed in {time_elapsed}"
            )

        if patience is not None and patience > 0:
            losses = []
            for e in range(epochs):
                print('epoch', e)
                obj_train.reset()
                obj_val.reset()
                self.model.train()
                for i, (x, y) in enumerate(self.train_data):
                    preds_train = self.model.forward(x)
                    loss = F.cross_entropy(preds_train, y)
                    self.optimizer.zero_grad()
                    loss.backward()
                    self.optimizer.step()
                    obj_train.log(preds_train, y)
                    count += 1
                    if i % 100 == 0:
                        self.logger.add_scalar("train loss (batches)",
                                               loss.item(), count)
                        self.logger.add_scalar("train accuracy (batches)",
                                               obj_train.accuracy, count)
                self.model.eval()
                val_loss = 0
                for (x, y) in self.val_data:
                    preds_val_Regu = self.model.forward(x)
                    loss = F.cross_entropy(preds_val_Regu, y)
                    val_loss += loss
                    obj_val.log(preds_val_Regu, y)
                if len(losses) == 0 or val_loss < losses[-1]:
                    losses = [val_loss]
                else:
                    losses.append(val_loss)
                if len(losses) == patience:
                    print("Early stopping")
                    break
                count += 1
                self.logger.add_scalar("validation accuracy (epochs)",
                                       obj_train.accuracy, count)

            time_elapsed = datetime.now() - since
            print(
                f"Training and Validating for {model_name} completed in {time_elapsed}"
            )

        if patience is not None and patience <= 0:
            time_elapsed = datetime.now() - since
            print(
                f"Training and Validating for {model_name} completed in {time_elapsed}"
            )
            raise ValueError("patience must be positive!")
コード例 #6
0
ファイル: replay.py プロジェクト: GowriShankar-Art/supermario
env = SkipFrame(env, skip=4)
env = GrayScaleObservation(env, keep_dim=False)
env = ResizeObservation(env, shape=81)
env = FrameStack(env, num_stack=4)

env.reset()

save_dir = Path('checkpoints') / datetime.datetime.now().strftime('%Y-%m-%dT%H-%M-%S')
save_dir.mkdir(parents=True)

checkpoint = Path('checkpoint/mario_checkpoint.chkpt')
mario = Mario(state_dim=(4, 81, 81), action_dim=env.action_space.n, save_dir=save_dir, checkpoint=checkpoint)
mario.exploration_rate = mario.exploration_rate_min

logger = MetricLogger(save_dir)

episodes = 100

for e in range(episodes):

    state = env.reset()

    while True:

        # env.render()

        action = mario.act(state)

        next_state, reward, done, info = env.step(action)
コード例 #7
0
        fixed_noise, cfg.START_EPOCH = load_checkpoint(args.checkpoint, gen,
                                                       opt_gen, scaler_gen,
                                                       dis, opt_dis,
                                                       scaler_dis,
                                                       cfg.LEARNING_RATE)
    else:
        fixed_noise = get_random_noise(cfg.FIXED_NOISE_SAMPLES,
                                       cfg.Z_DIMENSION, device)
        # logger.info("load weights from normal distribution")
        # init_weights(gen)
        # init_weights(dis)

    gen.train()
    dis.train()

    metric_logger = MetricLogger(cfg.PROJECT_VERSION_NAME)

    for epoch in range(cfg.START_EPOCH, cfg.END_EPOCH):
        if args.wgp:
            train_one_epoch_with_gp(gen,
                                    opt_gen,
                                    dis,
                                    opt_dis,
                                    dataloader,
                                    metric_logger,
                                    device,
                                    fixed_noise,
                                    epoch,
                                    fid_model,
                                    fid_score=args.fid)
        else: