Exemplo n.º 1
0
def evaluate_epoch(
    axes,
    tr_loader,
    val_loader,
    te_loader,
    model,
    criterion,
    epoch,
    stats,
    include_test=False,
    update_plot=True,
    multiclass=False,
):
    """Evaluate the `model` on the train and validation set."""
    def _get_metrics(loader):
        y_true, y_pred, y_score = [], [], []
        correct, total = 0, 0
        running_loss = []
        for X, y in loader:
            with torch.no_grad():
                output = model(X)
                predicted = predictions(output.data)
                y_true.append(y)
                y_pred.append(predicted)
                if not multiclass:
                    y_score.append(softmax(output.data, dim=1)[:, 1])
                else:
                    y_score.append(softmax(output.data, dim=1))
                total += y.size(0)
                correct += (predicted == y).sum().item()
                running_loss.append(criterion(output, y).item())
        y_true = torch.cat(y_true)
        y_pred = torch.cat(y_pred)
        y_score = torch.cat(y_score)
        loss = np.mean(running_loss)
        acc = correct / total
        if not multiclass:
            auroc = metrics.roc_auc_score(y_true, y_score)
        else:
            auroc = metrics.roc_auc_score(y_true, y_score, multi_class="ovo")
        return acc, loss, auroc

    train_acc, train_loss, train_auc = _get_metrics(tr_loader)
    val_acc, val_loss, val_auc = _get_metrics(val_loader)

    stats_at_epoch = [
        val_acc,
        val_loss,
        val_auc,
        train_acc,
        train_loss,
        train_auc,
    ]
    if include_test:
        stats_at_epoch += list(_get_metrics(te_loader))

    stats.append(stats_at_epoch)
    utils.log_training(epoch, stats)
    if update_plot:
        utils.update_training_plot(axes, epoch, stats)
Exemplo n.º 2
0
def report_training_progress(sess, batch_index, images, labels, loss, acc,
                             clothes):
    """
    Performs inference on the validation set and reports the loss
    to the terminal and the training plot.
    """
    if batch_index % 50 == 0:
        batch_images, batch_labels = clothes.get_batch(partition='validate',
                                                       batch_size=512)

        print("calculating acc and loss")
        print(batch_labels.shape)
        print(batch_images.shape)
        ##########################################################
        '''
        logit = cnn(batch_images.astype('float32'))
        print(logit.shape)
        '''
        valid_acc, valid_loss = sess.run([acc, loss],
                                         feed_dict={
                                             images: batch_images,
                                             labels: batch_labels
                                         })

        utils.log_training(batch_index, valid_loss, valid_acc)
        utils.update_training_plot(batch_index, valid_acc, valid_loss)
Exemplo n.º 3
0
 def train(self, data_gen, epoch=1, continue_from=None,
           step_save=5000, step_val=1000, step_log=100):
     if not self.built:
         self._build()
     opts = tf.ConfigProto(allow_soft_placement=True)
     init_op = tf.global_variables_initializer()
     saver = tf.train.Saver()
     with tf.Session(config=opts, graph=self.graph) as sess:
         # Create session
         if continue_from is not None:
             print("Continue training from " + continue_from)
             saver.restore(sess, continue_from)
         else:
             sess.run(tf.global_variables_initializer())
         writer_train = tf.summary.FileWriter("./logs/train", sess.graph)
         writer_val = tf.summary.FileWriter("./logs/val", sess.graph)
         # Generate data and training
         while data_gen.train.epochs_completed < epoch:
             images, labels = data_gen.train.next_batch(self.batch_size)
             gl_step = self.global_step.eval()
             if gl_step % step_val == 0:
                 # Training accuracies
                 acc1 = sess.run([self.acc1], feed_dict={
                     self.input: images, self.labels: labels,
                     self.training: False})
                 print("Step {}, training accuracy: {} (top 1)".\
                     format(gl_step, acc1))
                 # Validation accuracies
                 val_images, val_labels = data_gen.val.next_batch(self.val_size)
                 acc1, s = sess.run([self.acc1, self.summary_ops],
                                    feed_dict={self.input: val_images,
                                               self.labels: val_labels,
                                               self.training: False})
                 writer_val.add_summary(s, global_step=gl_step)
                 print("Step {}, validation accuracy: {} (top 1)".\
                     format(gl_step, acc1))
             # Training
             _, s, gl_step, bloss, lr = sess.run(
                 [self.train_op, self.summary_ops, self.global_step,
                  self.loss, self.optimizer._learning_rate_tensor],
                 feed_dict={self.input: images,
                            self.labels: labels,
                            self.training: True})
             writer_train.add_summary(s, global_step=gl_step)
             if gl_step % step_log == 0:
                 log_training(gl_step, bloss, lr)
             if gl_step % step_save == 0 and gl_step > 0:
                 print("Saving checkpoint...")
                 saver.save(sess, "./checkpoints/{}".format(self.name),
                            global_step = gl_step)
         test_images, test_labels = data_gen.test.next_batch(10000)
         acc1, s = sess.run([self.acc1, self.summary_ops],
                            feed_dict={self.input: test_images,
                            self.labels: test_labels, self.training: False})
         print("Step {}, final test accuracy: {} (top 1)".\
               format(gl_step, acc1))
Exemplo n.º 4
0
def report_training_progress(sess, batch_index, images, labels, keep_prob,
                             loss, acc, dataset):
    if batch_index % 50 == 0:
        batch_images, batch_labels = dataset.get_valid_batch(batch_size=512)
        test_acc, test_loss = sess.run([acc, loss],
                                       feed_dict={
                                           images: batch_images,
                                           labels: batch_labels,
                                           keep_prob: 1
                                       })
        utils.log_training(batch_index, test_loss, test_acc)
        utils.update_training_plot(batch_index, test_acc, test_loss)
Exemplo n.º 5
0
 def train(self, data_gen, data_size, epoch=1, continue_from=None, 
           step_save=10000, step_log=100, step_training_log=20):
     if not self.built:
         self._build()
     opts = tf.ConfigProto(allow_soft_placement=True,
                           log_device_placement=True)
     init_op = tf.global_variables_initializer()
     saver = tf.train.Saver()
     with tf.Session(config=opts, graph=self.graph) as sess:
         # Create session
         if continue_from is not None:
             saver.restore(sess, continue_from)
         else:
             sess.run(tf.global_variables_initializer())
         writer = tf.summary.FileWriter("./logs", sess.graph)
         images_labels = data_gen(self.batch_size, "train")
         val_images_labels = data_gen(self.batch_size, "val")
         # Generate data and training
         for e in range(epoch):
             print("======== Epoch {} ========".format(e))
             for _ in range(data_size//self.batch_size):
                 images, labels = next(images_labels)
                 gl_step = self.global_step.eval()
                 if gl_step % step_log == 0:
                     # Training accuracies
                     acc1, acc5 = sess.run([self.acc1, self.acc5], feed_dict={
                         self.input: images, self.labels: labels, 
                         self.training: False})
                     print("Step {}, training accuracy: {} (top 1), {} (top 5)".\
                         format(gl_step, acc1, acc5))
                     # Validation accuracies
                     val_images, val_labels = next(val_images_labels)
                     acc1, acc5 = sess.run([self.acc1, self.acc5], feed_dict={
                         self.input: val_images, self.labels: val_labels, 
                         self.training: False})
                     print("Step {}, validation accuracy: {} (top 1), {} (top 5)".\
                         format(gl_step, acc1, acc5))
                 # Training
                 _, s, gl_step, bloss = sess.run([self.train_op, self.summary_ops, 	
                     self.global_step, self.loss], feed_dict={self.input: images,
                     self.labels: labels, self.training: True})
                 writer.add_summary(s, global_step=gl_step)
                 if gl_step % step_training_log == 0:
                     log_training(gl_step, bloss)
                 if (gl_step+1) % step_save == 0:
                     print("Saving checkpoint...")
                     saver.save(sess, "./checkpoints/{}".format(self.name),
                                global_step = gl_step)
Exemplo n.º 6
0
def report_training_progress(sess, batch_index, images, labels, loss, acc,
                             food):
    """
    Performs inference on the validation set and reports the loss
    to the terminal and the training plot.
    """
    if batch_index % 50 == 0:
        batch_images, batch_labels = food.get_batch(
            partition='valid', batch_size=get('rnn.batch_size'))
        valid_acc, valid_loss = sess.run([acc, loss],
                                         feed_dict={
                                             images: batch_images,
                                             labels: batch_labels
                                         })
        utils.log_training(batch_index, valid_loss, valid_acc)
        utils.update_training_plot(batch_index, valid_acc, valid_loss)
Exemplo n.º 7
0
def evaluate_epoch_pro(axes,
                       tr_loader,
                       val_loader,
                       te_loader,
                       model,
                       criterion,
                       epoch,
                       stats,
                       predictlog,
                       include_test=False,
                       update_plot=True,
                       multiclass=False,
                       probabimode=False):
    """Evaluate the `model` on the train and validation set."""
    def _get_metrics(loader):
        y_true, y_pred, y_score = [], [], []
        correct, total = 0, 0
        running_loss = []
        oneroundlis = []
        disagreeloss = []
        mselossfunc = torch.nn.MSELoss()
        for X, y in loader:
            with torch.no_grad():
                output = model(X)

                # predicted = predictions(output.data)
                # print('the predicted and the true: ', predicted, ' ', y)
                y_true.append(y)

                for idx, y0 in enumerate(y):
                    if np.count_nonzero(y0) > 1:
                        print(y0)
                        disagreeloss.append(
                            mselossfunc(y0, output[idx]).item())

                # if np.count_nonzero(y) > len(y):
                #     print("FOUND!!!!!!!")
                #     print(y)
                # y_pred.append(predicted)
                # oneroundlis.append((predicted, y))
                oneroundlis.append((output, y))
                # if not multiclass:
                #     y_score.append(softmax(output.data, dim=1)[:, 1])
                # else:
                #     y_score.append(softmax(output.data, dim=1))
                total += y.size(0)
                # correct += (predicted == y).sum().item()
                # print(output)
                # print(output.shape)
                # print(type(output))
                # print(type(y))
                # print(y)
                # print(y.shape)
                running_loss.append(criterion(output, y).item())

        predictlog.append(oneroundlis)
        # y_true = torch.cat(y_true)
        # y_pred = torch.cat(y_pred)
        # # y_score = torch.cat(y_score)
        # # print(y_true)
        loss = np.mean(running_loss)
        # acc = correct / total
        # if not multiclass:
        #     auroc = metrics.roc_auc_score(y_true, y_score)
        # else:
        #     auroc = metrics.roc_auc_score(y_true, y_score, multi_class="ovo",labels=[0, 1, 2, 3])
        return loss, np.mean(disagreeloss)

    # train_acc, train_loss, train_auc = _get_metrics(tr_loader)
    # val_acc, val_loss, val_auc = _get_metrics(val_loader)
    # test_acc, test_loss, test_auc = _get_metrics(te_loader)

    train_loss, train_dis = _get_metrics(tr_loader)
    val_loss, val_dis = _get_metrics(val_loader)
    test_loss, test_dis = _get_metrics(te_loader)


    wandb.log({ "train_loss": train_loss, "train_dis": train_dis, \
                 "val_loss": val_loss, "val_dis": val_dis, \
                "test_loss": test_loss, "test_dis": test_dis,
                })

    stats_at_epoch = [
        val_loss,
        train_loss,
        test_loss,
    ]
    if include_test:
        stats_at_epoch += list(_get_metrics(te_loader))

    stats.append(stats_at_epoch)
    utils.log_training(epoch, stats)
Exemplo n.º 8
0
def evaluate_epoch(axes,
                   tr_loader,
                   val_loader,
                   te_loader,
                   model,
                   criterion,
                   epoch,
                   stats,
                   prolist,
                   include_test=False,
                   update_plot=True,
                   multiclass=False,
                   probabimode=False):
    """Evaluate the `model` on the train and validation set."""
    def _get_metrics(loader):
        y_true, y_pred, y_score = [], [], []
        correct, total = 0, 0
        running_loss = []
        oneroundlis = []
        mseloss = []
        mselossfunc = torch.nn.MSELoss()
        disagreeloss = []
        for X, y in loader:
            with torch.no_grad():
                output = model(X)
                predicted = predictions(output.data)
                # print('the predicted and the true: ', predicted, ' ', y)
                tmp = softmax(output.data, dim=1)
                oneroundlis.append((output, y))
                mseloss.append(mselossfunc(tmp, y).item())

                for idx, y0 in enumerate(y):
                    if np.count_nonzero(y0) > 1:
                        print(y0)
                        disagreeloss.append(mselossfunc(y0, tmp[idx]))

                y = np.argmax(y, axis=1)
                y_true.append(y)
                y_pred.append(predicted)
                if not multiclass:
                    y_score.append(softmax(output.data, dim=1)[:, 1])
                else:
                    y_score.append(softmax(output.data, dim=1))
                total += y.size(0)
                correct += (predicted == y).sum().item()

                running_loss.append(criterion(output, y).item())

        prolist.append(oneroundlis)
        y_true = torch.cat(y_true)
        y_pred = torch.cat(y_pred)
        y_score = torch.cat(y_score)
        # print(y_true)
        loss = np.mean(mseloss)
        acc = correct / total
        if not multiclass:
            auroc = metrics.roc_auc_score(y_true, y_score)
        else:
            auroc = metrics.roc_auc_score(y_true,
                                          y_score,
                                          multi_class="ovo",
                                          labels=[0, 1, 2, 3])
        disagreeval = np.mean(disagreeloss)
        return acc, loss, auroc, disagreeval

    train_acc, train_loss, train_auc, train_dis = _get_metrics(tr_loader)
    val_acc, val_loss, val_auc, val_dis = _get_metrics(val_loader)
    test_acc, test_loss, test_auc, test_dis = _get_metrics(te_loader)

    wandb.log({"train_acc":train_acc, "train_loss": train_loss, "train_auc": train_auc, "train_dis": train_dis,\
                "val_acc":val_acc, "val_loss": val_loss, "val_auc": val_auc,  "val_dis": val_dis, \
                "test_acc":test_acc, "test_loss": test_loss, "test_auc": test_auc, "test_dis": test_dis
                })

    stats_at_epoch = [
        val_acc,
        val_loss,
        val_auc,
        train_acc,
        train_loss,
        train_auc,
        test_acc,
        test_loss,
        test_acc,
    ]
    if include_test:
        stats_at_epoch += list(_get_metrics(te_loader))

    stats.append(stats_at_epoch)
    utils.log_training(epoch, stats)
    if update_plot:
        utils.update_training_plot(axes, epoch, stats)