Ejemplo n.º 1
0
    def evaluate(self) -> MetricLogger:
        obj_test = MetricLogger(one_hot=True)
        self.model.eval()
        for (x, y) in self.test_data:
            preds_test = self.model.forward(x)
            obj_test.log(preds_test, y)

        return obj_test
Ejemplo n.º 2
0
 def train(self):
     model_name = type(self.model).__name__
     since = datetime.now()
     start_time = since.strftime("%H:%M:%S")
     print(f"Training is started for {model_name} model at {start_time}")
     self.model.fit(self.X_train, self.y_train)
     time_elapsed = datetime.now() - since
     print(f"Training for {model_name} model completed in {time_elapsed}")
     pred = self.model.predict(self.X_val)
     obj = MetricLogger(one_hot=False)
     obj.log(pred, self.y_val)
     print(f"Accuracy of {model_name} model:", obj.accuracy, sep='\n')
     print(f"Precision of {model_name} model:", obj.precision, sep='\n')
     print(f"Recall of {model_name} model:", obj.recall, sep='\n')
Ejemplo n.º 3
0
 def evaluate(self):
     pred = self.model.predict(self.X_test)
     obj = MetricLogger(one_hot=False)
     obj.log(pred, self.y_test)
     return obj
Ejemplo n.º 4
0
    def train(self, epochs: int, patience=None):
        model_name = type(self.model).__name__
        since = datetime.now()
        start_time = since.strftime("%H:%M:%S")
        print(
            f"Training and Validating is started for {model_name} at {start_time}"
        )
        count = 0
        obj_train = MetricLogger(one_hot=True)
        obj_val = MetricLogger(one_hot=True)

        if patience is None:
            print("patience is None")
            for e in range(epochs):
                print('epoch', e)
                obj_train.reset()
                obj_val.reset()
                for i, (x, y) in enumerate(self.train_data):
                    preds_train = self.model.forward(x)
                    loss = F.cross_entropy(preds_train, y)
                    self.optimizer.zero_grad()
                    loss.backward()
                    self.optimizer.step()
                    obj_train.log(preds_train, y)
                    count += 1
                    if i % 100 == 0:
                        self.logger.add_scalar("train loss (batches)",
                                               loss.item(), count)
                        self.logger.add_scalar("train accuracy (batches)",
                                               obj_train.accuracy, count)
                for (x, y) in self.val_data:
                    preds_val = self.model.forward(x)
                    obj_val.log(preds_val, y)
                count += 1
                self.logger.add_scalar("validation accuracy (epochs)",
                                       obj_train.accuracy, count)
            time_elapsed = datetime.now() - since
            print(
                f"Training and Validating for {model_name} completed in {time_elapsed}"
            )

        if patience is not None and patience > 0:
            losses = []
            for e in range(epochs):
                print('epoch', e)
                obj_train.reset()
                obj_val.reset()
                self.model.train()
                for i, (x, y) in enumerate(self.train_data):
                    preds_train = self.model.forward(x)
                    loss = F.cross_entropy(preds_train, y)
                    self.optimizer.zero_grad()
                    loss.backward()
                    self.optimizer.step()
                    obj_train.log(preds_train, y)
                    count += 1
                    if i % 100 == 0:
                        self.logger.add_scalar("train loss (batches)",
                                               loss.item(), count)
                        self.logger.add_scalar("train accuracy (batches)",
                                               obj_train.accuracy, count)
                self.model.eval()
                val_loss = 0
                for (x, y) in self.val_data:
                    preds_val_Regu = self.model.forward(x)
                    loss = F.cross_entropy(preds_val_Regu, y)
                    val_loss += loss
                    obj_val.log(preds_val_Regu, y)
                if len(losses) == 0 or val_loss < losses[-1]:
                    losses = [val_loss]
                else:
                    losses.append(val_loss)
                if len(losses) == patience:
                    print("Early stopping")
                    break
                count += 1
                self.logger.add_scalar("validation accuracy (epochs)",
                                       obj_train.accuracy, count)

            time_elapsed = datetime.now() - since
            print(
                f"Training and Validating for {model_name} completed in {time_elapsed}"
            )

        if patience is not None and patience <= 0:
            time_elapsed = datetime.now() - since
            print(
                f"Training and Validating for {model_name} completed in {time_elapsed}"
            )
            raise ValueError("patience must be positive!")