예제 #1
0
 def evaluate_results(self,
                      image,
                      cage,
                      mask,
                      resulting_cage,
                      gt_mask,
                      results_file='results_cages'):
     utils.mkdir(results_file)
     result_file = results_file + "/" + cage.save_name
     if not resulting_cage:
         print 'No convergence reached for the cac-segmenter'
     else:
         resulting_cage.save_cage(result_file)
         res_fold = results_file + "/" + 'result' + cage.spec_name.split(
             "cage_")[-1] + '.png'
         result_mask = utils.create_ground_truth(cage, resulting_cage, mask)
         if result_mask:
             result_mask.save_image(filename=res_fold)
         if gt_mask:
             sorensen_dice_coeff = utils.sorensen_dice_coefficient(
                 gt_mask, result_mask)
             TP, TN, FP, FN = utils.evaluate_segmentation(
                 gt_mask, result_mask)
             print 'Sorensen-Dice coefficient', sorensen_dice_coeff
             print 'TP:', TP, 'TN:', TN, 'FP:', FP, 'FN:', FN
             return sorensen_dice_coeff, TP, TN, FP, FN
예제 #2
0
 def evaluate_results(self, image, cage, mask, resulting_cage, gt_mask, results_file='results_cages'):
     utils.mkdir(results_file)
     result_file = results_file + "/" + cage.save_name
     if not resulting_cage:
         print 'No convergence reached for the cac-segmenter'
     else:
         resulting_cage.save_cage(result_file)
         res_fold = results_file + "/" + 'result' + cage.spec_name.split("cage_")[-1] + '.png'
         result_mask = utils.create_ground_truth(cage, resulting_cage, mask)
         if result_mask:
             result_mask.save_image(filename=res_fold)
         if gt_mask:
             sorensen_dice_coeff = utils.sorensen_dice_coefficient(gt_mask, result_mask)
             TP, TN, FP, FN = utils.evaluate_segmentation(gt_mask, result_mask)
             print 'Sorensen-Dice coefficient', sorensen_dice_coeff
             print 'TP:', TP, 'TN:', TN, 'FP:', FP, 'FN:', FN
             return sorensen_dice_coeff, TP, TN, FP, FN
예제 #3
0
                gt = load_image(
                    val_output_names[ind])[:args.crop_height, :args.crop_width]
                gt = helpers.reverse_one_hot(
                    helpers.one_hot_it(gt, label_values))

                # st = time.time()

                output_image = sess.run(network,
                                        feed_dict={net_input: input_image})

                output_image = np.array(output_image[0, :, :, :])
                output_image = helpers.reverse_one_hot(output_image)
                out_vis_image = helpers.colour_code_segmentation(
                    output_image, label_values)

                accuracy, class_accuracies, prec, rec, f1, iou = utils.evaluate_segmentation(
                    pred=output_image, label=gt, num_classes=num_classes)

                file_name = utils.filepath_to_name(val_input_names[ind])
                target.write("%s, %f, %f, %f, %f, %f" %
                             (file_name, accuracy, prec, rec, f1, iou))
                for item in class_accuracies:
                    target.write(", %f" % (item))
                target.write("\n")

                scores_list.append(accuracy)
                class_scores_list.append(class_accuracies)
                precision_list.append(prec)
                recall_list.append(rec)
                f1_list.append(f1)
                iou_list.append(iou)
예제 #4
0
def main():
    # transform = myTransform.Compose([myTransform.Resize(512, Image.BILINEAR), myTransform.ToTensor()])
    co_transform = MyCoTransform(augment=True)#1024)
    co_transform_val = MyCoTransform(augment=False)#1024)
    root = "/home/jjin/adl/cityscapes_dataset"
    dataset_train = cityscapes(root, co_transform)
    loader_train = DataLoader(dataset_train, batch_size=4, shuffle=True,num_workers=4)
    
    dataset_val = cityscapes(root, co_transform_val, subset='val')
    loader_val = DataLoader(dataset_val, batch_size=1, shuffle=False,num_workers=4)
    
    NUM_CLASSES = 20
    weight = torch.ones(NUM_CLASSES)
    weight[0] = 2.8149201869965
    weight[1] = 6.9850029945374
    weight[2] = 3.7890393733978
    weight[3] = 9.9428062438965
    weight[4] = 9.7702074050903
    weight[5] = 9.5110931396484
    weight[6] = 10.311357498169
    weight[7] = 10.026463508606
    weight[8] = 4.6323022842407
    weight[9] = 9.5608062744141
    weight[10] = 7.8698215484619
    weight[11] = 9.5168733596802
    weight[12] = 10.373730659485
    weight[13] = 6.6616044044495
    weight[14] = 10.260489463806
    weight[15] = 10.287888526917
    weight[16] = 10.289801597595
    weight[17] = 10.405355453491
    weight[18] = 10.138095855713
    weight[19] = 0
    weight = weight.cuda()
    
    criterion = CrossEntropyLoss2d(weight)
    print(type(criterion))
    model = Net(NUM_CLASSES).cuda()
    
    # print(model)

    optimizer = Adam(model.parameters(), 0.001, (0.9, 0.999),  eps=1e-08, weight_decay=1e-4)      ## scheduler 2
    
    savedir = '/home/jjin/adl/myImplementation/datasets/save'
    automated_log_path = savedir + "/automated_log_encoder.txt"
    modeltxtpath = savedir + "/model_encoder.txt"
    
    start_epoch = 1
    iteration = 1
       
    def load_my_state_dict(model, state_dict):  #custom function to load model when not all dict elements
        own_state = model.state_dict()
        for name, param in state_dict.items():
            if name not in own_state:
                    continue
            own_state[name].copy_(param)
        return model
    weightspath = '/home/jjin/adl/myImplementation/datasets/trained_models/erfnet_pretrained.pth'
    
    for epoch in range(start_epoch, 10):
        # model.train()
        # for step, (images, labels) in enumerate(loader_train):
        #     images = images.cuda()
        #     labels = labels.cuda()
            
        #     inputs = Variable(images)
        #     targets = Variable(labels).long()
        #     outputs = model(inputs)
    
        #     optimizer.zero_grad()
        #     loss = criterion(outputs, targets[:, 0])
        #     loss.backward()
        #     optimizer.step()
        #     iteration = iteration + 1
        #     pred = outputs.data.max(1)[1].cpu().numpy().flatten()
        #     gt = labels.data.cpu().numpy().flatten()
        #     global_accuracy, class_accuracies, prec, rec, f1, iou = evaluate_segmentation(pred, gt, NUM_CLASSES)
        #     print('Epoch {} [{}/{}] Train_loss:{}'.format(epoch, step, len(loader_train), loss.data[0])) # loss.item() = loss.data[0]
        # torch.save(model.state_dict(), '{}_{}.pth'.format(os.path.join("/home/jjin/adl/myImplementation/datasets/save","model"),str(epoch)))   
        #   
        # model.load_state_dict(torch.load('/home/jjin/adl/myImplementation/datasets/trained_models/erfnet_pretrained.pth'))
        model = load_my_state_dict(model, torch.load(weightspath))
        model.eval()
        with torch.no_grad():
            for step_val, (images_val, labels_val) in enumerate(loader_val):

                images_val = images_val.cuda()
                labels_val = labels_val.cuda()

                inputs_val = Variable(images_val)   
                targets_val = Variable(labels_val).long()
                outputs_val = model(inputs_val) 

                loss_val = criterion(outputs_val, targets_val[:, 0])
              #  time_val.append(time.time() - start_time)
            
                pred = outputs_val.data.max(1)[1].cpu().numpy().flatten()
                gt = labels_val.data.cpu().numpy().flatten()
                global_accuracy, class_accuracies, prec, rec, f1, iou = evaluate_segmentation(pred, gt, NUM_CLASSES)

            print('Epoch {} [{}/{}] val_loss:{}'.format(epoch, step_val, len(loader_val), loss_val.data[0])) # loss.item() = loss.data[0]
예제 #5
0
    def val_out(self,
                sess,
                val_init,
                threshold=0.5,
                output_dir="val",
                epoch=0):
        print("validation starts.")
        save_dir = output_dir + "/%d" % (epoch)
        if not os.path.exists(save_dir):
            os.mkdir(save_dir)
        target = open(save_dir + "/val_scores.csv", 'w')
        target.write(
            "val_name, avg_accuracy, precision, recall, f1 score, mean iou, %s\n"
            % (self.name_string))

        sess.run(val_init)
        #for ind in range(self.num_val):
        scores_list = []
        class_scores_list = []
        precision_list = []
        recall_list = []
        f1_list = []
        iou_list = []
        try:
            while True:
                img, ann, output_image = sess.run(
                    [self.img, self.mask, self.logits])
                img = img[0, :, :, :] * 255

                ann = np.array(ann[0, :, :, :])
                ann = reverse_one_hot(ann)

                path, size = sess.run([self.path, self.size])
                size = (size[0][0], size[0][1])

                output_single_image = np.array(output_image)
                output_single_image = np.array(output_single_image[0, :, :, :])
                output_image = reverse_one_hot(output_single_image)
                out_vis_image = colour_code_segmentation(
                    output_image, self.label_values)

                accuracy, class_accuracies, prec, rec, f1, iou = evaluate_segmentation(
                    pred=output_image, label=ann, num_classes=self.num_classes)

                dir = path[0].decode('ascii')
                file_name = filepath_to_name(dir)

                target.write("%s, %f, %f, %f, %f, %f" %
                             (file_name, accuracy, prec, rec, f1, iou))
                for item in class_accuracies:
                    target.write(", %f" % (item))
                target.write("\n")

                mask = colour_code_segmentation(ann, self.label_values)

                mask = cv2.cvtColor(np.uint8(mask), cv2.COLOR_RGB2BGR)
                out_vis_image = cv2.cvtColor(np.uint8(out_vis_image),
                                             cv2.COLOR_RGB2BGR)
                mask, ori_out_vis = resizeImage(mask, out_vis_image, size)

                out_vis_image = cv2.resize(ori_out_vis[:, :, 1],
                                           size,
                                           interpolation=cv2.INTER_NEAREST)
                out_vis_image[out_vis_image < threshold * 255] = 0
                out_vis_image[out_vis_image >= threshold * 255] = 255

                save_ori_img = cv2.cvtColor(np.uint8(img), cv2.COLOR_RGB2BGR)
                save_ori_img = cv2.resize(save_ori_img,
                                          size,
                                          interpolation=cv2.INTER_NEAREST)
                transparent_image = np.append(np.array(save_ori_img)[:, :,
                                                                     0:3],
                                              out_vis_image[:, :, None],
                                              axis=-1)
                # transparent_image = Image.fromarray(transparent_image)

                cv2.imwrite(save_dir + "/%s_img.jpg" % (file_name),
                            save_ori_img)
                cv2.imwrite(save_dir + "/%s_ann.png" % (file_name), mask)
                cv2.imwrite(save_dir + "/%s_ori_pred.png" % (file_name),
                            ori_out_vis)
                cv2.imwrite(save_dir + "/%s_filter_pred.png" % (file_name),
                            out_vis_image)
                cv2.imwrite(save_dir + "/%s_mat.png" % (file_name),
                            transparent_image)

                scores_list.append(accuracy)
                class_scores_list.append(class_accuracies)
                precision_list.append(prec)
                recall_list.append(rec)
                f1_list.append(f1)
                iou_list.append(iou)
        except tf.errors.OutOfRangeError:
            avg_score = np.mean(scores_list)
            class_avg_scores = np.mean(class_scores_list, axis=0)
            avg_precision = np.mean(precision_list)
            avg_recall = np.mean(recall_list)
            avg_f1 = np.mean(f1_list)
            avg_iou = np.mean(iou_list)

            print("\nAverage validation accuracy for epoch # %04d = %f" %
                  (epoch, avg_score))
            print("Average per class validation accuracies for epoch # %04d:" %
                  (epoch))
            for index, item in enumerate(class_avg_scores):
                print("%s = %f" % (self.name_list[index], item))
            print("Validation precision = ", avg_precision)
            print("Validation recall = ", avg_recall)
            print("Validation F1 score = ", avg_f1)
            print("Validation IoU score = ", avg_iou)
예제 #6
0
            # Do the validation on a small set of validation images
            for ind in val_indices:
                input_image = np.expand_dims(np.float32(load_image(val_input_names[ind])[:args.crop_height, :args.crop_width]),axis=0)/255.0
                gt = load_image(val_output_names[ind])[:args.crop_height, :args.crop_width]
                gt_tmp = helpers.one_hot_it(label=gt, label_values=label_values)
                gt_image = np.expand_dims(np.float32(gt_tmp),axis=0)
                gt = helpers.reverse_one_hot(gt_tmp)

                # st = time.time()
                #output_image = sess.run(network,feed_dict={net_input:input_image})
                output_image ,current=sess.run([network,loss],feed_dict={net_input:input_image,net_output:gt_image})
                output_image = np.array(output_image[0,:,:,:])
                output_image = helpers.reverse_one_hot(output_image)
                out_vis_image = helpers.colour_code_segmentation(output_image, label_values)

                accuracy, class_accuracies, prec, rec, f1, iou = utils.evaluate_segmentation(pred=output_image, label=gt, num_classes=num_classes)
            
                file_name = utils.filepath_to_name(val_input_names[ind])
                target.write("%s, %f, %.2f, %f, %f, %f, %f, %f"%(file_name, current,time.time()-val_st, accuracy, prec, rec, f1, iou))
                for item in class_accuracies:
                    target.write(", %f"%(item))
                target.write("\n")

                scores_list.append(accuracy)
                class_scores_list.append(class_accuracies)
                precision_list.append(prec)
                recall_list.append(rec)
                f1_list.append(f1)
                iou_list.append(iou)
                val_loss_list.append(current)
                #print ("CURRENT LOSS: ",current) ## add val loss
예제 #7
0
파일: main.py 프로젝트: zfxu/Automatting
def val():
    # Create directories if needed
    if not os.path.isdir(cfg.base_dir + "%s/%s" % ("result", "Val")):
        os.makedirs(cfg.base_dir + "%s/%s" % ("result", "Val"))

    target = open(cfg.base_dir + "%s/%s/val_scores.csv" % ("result", "Val"),
                  'w')
    target.write(
        "val_name, avg_accuracy, precision, recall, f1 score, mean iou, %s\n" %
        (class_names_string))
    scores_list = []
    class_scores_list = []
    precision_list = []
    recall_list = []
    f1_list = []
    iou_list = []
    run_times_list = []

    # Run testing on ALL test images
    for ind in range(len(val_input_names)):
        sys.stdout.write("\rRunning test image %d / %d" %
                         (ind + 1, len(val_input_names)))
        sys.stdout.flush()

        input_image = np.expand_dims(np.float32(
            dataset.load_image(val_input_names[ind])[:cfg.height, :cfg.width]),
                                     axis=0) / 255.0
        gt = dataset.load_image(val_output_names[ind])[:cfg.height, :cfg.width]
        gt = helpers.reverse_one_hot(helpers.one_hot_it(gt, label_values))

        st = time.time()
        output_image = sess.run(network, feed_dict={net_input: input_image})

        run_times_list.append(time.time() - st)

        output_image = np.array(output_image[0, :, :, :])
        output_image = helpers.reverse_one_hot(output_image)
        out_vis_image = helpers.colour_code_segmentation(
            output_image, label_values)

        accuracy, class_accuracies, prec, rec, f1, iou = utils.evaluate_segmentation(
            pred=output_image, label=gt, num_classes=num_classes)

        file_name = utils.filepath_to_name(val_input_names[ind])
        target.write("%s, %f, %f, %f, %f, %f" %
                     (file_name, accuracy, prec, rec, f1, iou))
        for item in class_accuracies:
            target.write(", %f" % (item))
        target.write("\n")

        scores_list.append(accuracy)
        class_scores_list.append(class_accuracies)
        precision_list.append(prec)
        recall_list.append(rec)
        f1_list.append(f1)
        iou_list.append(iou)

        gt = helpers.colour_code_segmentation(gt, label_values)

        cv2.imwrite(
            cfg.base_dir + "%s/%s/%s_pred.png" % ("result", "Val", file_name),
            cv2.cvtColor(np.uint8(out_vis_image), cv2.COLOR_RGB2BGR))
        cv2.imwrite(
            cfg.base_dir + "%s/%s/%s_gt.png" % ("result", "Val", file_name),
            cv2.cvtColor(np.uint8(gt), cv2.COLOR_RGB2BGR))

    target.close()

    avg_score = np.mean(scores_list)
    class_avg_scores = np.mean(class_scores_list, axis=0)
    avg_precision = np.mean(precision_list)
    avg_recall = np.mean(recall_list)
    avg_f1 = np.mean(f1_list)
    avg_iou = np.mean(iou_list)
    avg_time = np.mean(run_times_list)
    print("Average test accuracy = ", avg_score)
    print("Average per class test accuracies = \n")
    for index, item in enumerate(class_avg_scores):
        print("%s = %f" % (class_names_list[index], item))
    print("Average precision = ", avg_precision)
    print("Average recall = ", avg_recall)
    print("Average F1 score = ", avg_f1)
    print("Average mean IoU score = ", avg_iou)
    print("Average run time = ", avg_time)
예제 #8
0
파일: main.py 프로젝트: zfxu/Automatting
def train():
    if cfg.class_balancing:
        print("Computing class weights for trainlabel ...")
        class_weights = utils.compute_class_weights(
            labels_dir=train_output_names, label_values=label_values)
        weights = tf.reduce_sum(class_weights * net_output, axis=-1)
        unweighted_loss = None
        unweighted_loss = tf.nn.softmax_cross_entropy_with_logits_v2(
            logits=network, labels=net_output)
        losses = unweighted_loss * class_weights
    else:
        losses = tf.nn.softmax_cross_entropy_with_logits_v2(logits=network,
                                                            labels=net_output)
    loss = tf.reduce_mean(losses)

    opt = tf.train.AdamOptimizer(cfg.lr).minimize(
        loss, var_list=[var for var in tf.trainable_variables()])

    sess.run(tf.global_variables_initializer())
    utils.count_params()

    # If a pre-trained ResNet is required, load the weights.
    # This must be done AFTER the variables are initialized with sess.run(tf.global_variables_initializer())
    if init_fn is not None:
        init_fn(sess)

    avg_scores_per_epoch = []
    avg_loss_per_epoch = []

    # Which validation images do we want
    val_indices = []
    num_vals = min(cfg.num_val_images, len(val_input_names))

    # Set random seed to make sure models are validated on the same validation images.
    # So you can compare the results of different models more intuitively.
    random.seed(16)
    val_indices = random.sample(range(0, len(val_input_names)), num_vals)

    # Do the training here
    for epoch in range(0, cfg.num_epochs):
        current_losses = []
        cnt = 0

        # Equivalent to shuffling
        id_list = np.random.permutation(len(train_input_names))

        num_iters = int(np.floor(len(id_list) / cfg.batch_size))
        st = time.time()
        epoch_st = time.time()

        for i in range(num_iters):
            # st=time.time()
            input_image_batch = []
            output_image_batch = []

            # Collect a batch of images
            for j in range(cfg.batch_size):
                index = i * cfg.batch_size + j
                id = id_list[index]
                input_image = dataset.load_image(train_input_names[id])
                output_image = dataset.load_image(train_output_names[id])

                h, w, _ = input_image.shape
                new_h, new_w = dataset.getTrainSize(h, w)

                with tf.device('/cpu:0'):
                    input_image, output_image = dataset.data_augmentation(
                        input_image, output_image, new_h, new_w)

                    # Prep the data. Make sure the labels are in one-hot format
                    input_image = np.float32(input_image) / 255.0
                    output_image = np.float32(
                        helpers.one_hot_it(label=output_image,
                                           label_values=label_values))

                    input_image_batch.append(
                        np.expand_dims(input_image, axis=0))
                    output_image_batch.append(
                        np.expand_dims(output_image, axis=0))

            # ***** THIS CAUSES A MEMORY LEAK AS NEW TENSORS KEEP GETTING CREATED *****
            # input_image = tf.image.crop_to_bounding_box(input_image, offset_height=0, offset_width=0,
            #                                               target_height=args.crop_height, target_width=args.crop_width).eval(session=sess)
            # output_image = tf.image.crop_to_bounding_box(output_image, offset_height=0, offset_width=0,
            #                                               target_height=args.crop_height, target_width=args.crop_width).eval(session=sess)
            # ***** THIS CAUSES A MEMORY LEAK AS NEW TENSORS KEEP GETTING CREATED *****

            # memory()
            # print(cfg.batch_size)
            if cfg.batch_size == 1:
                input_image_batch = input_image_batch[0]
                output_image_batch = output_image_batch[0]
            else:
                input_image_batch = np.squeeze(
                    np.stack(input_image_batch, axis=1))
                output_image_batch = np.squeeze(
                    np.stack(output_image_batch, axis=1))

            # print(input_image_batch.shape)
            # Do the training
            _, current = sess.run([opt, loss],
                                  feed_dict={
                                      net_input: input_image_batch,
                                      net_output: output_image_batch
                                  })
            current_losses.append(current)
            cnt = cnt + cfg.batch_size
            if cnt % 20 == 0:
                string_print = "Epoch = %d Count = %d Current_Loss = %.4f Time = %.2f" % (
                    epoch, cnt, current, time.time() - st)
                utils.LOG(string_print)
                st = time.time()

        mean_loss = np.mean(current_losses)
        avg_loss_per_epoch.append(mean_loss)

        # Create directories if needed
        if not os.path.isdir(cfg.base_dir + "%s/%s/%04d" %
                             ("checkpoints", cfg.model, epoch)):
            os.makedirs(cfg.base_dir + "%s/%s/%04d" %
                        ("checkpoints", cfg.model, epoch))

        # Save latest checkpoint to same file name
        print("Saving latest checkpoint")
        saver.save(sess, model_checkpoint_name)

        if val_indices != 0 and epoch % cfg.checkpoint_step == 0:
            print("Saving checkpoint for this epoch")
            saver.save(
                sess, cfg.base_dir + "%s/%s/%04d/model.ckpt" %
                ("checkpoints", cfg.model, epoch))

        if epoch % cfg.validation_step == 0:
            print("Performing validation")
            target = open(
                cfg.base_dir + "%s/%s/%04d/val_scores.csv" %
                ("checkpoints", cfg.model, epoch), 'w')
            target.write(
                "val_name, avg_accuracy, precision, recall, f1 score, mean iou, %s\n"
                % (class_names_string))

            scores_list = []
            class_scores_list = []
            precision_list = []
            recall_list = []
            f1_list = []
            iou_list = []

            # Do the validation on a small set of validation images
            for ind in val_indices:
                input_image = dataset.load_image(val_input_names[ind])
                output_image = dataset.load_image(val_output_names[ind])

                h, w, _ = input_image.shape
                new_h, new_w = dataset.getTrainSize(h, w)

                input_image, output_image = utils.random_crop(
                    input_image, output_image, new_h, new_w)

                input_image = np.expand_dims(np.float32(input_image),
                                             axis=0) / 255.0

                gt = helpers.reverse_one_hot(
                    helpers.one_hot_it(output_image, label_values))

                # st = time.time()

                output_image = sess.run(network,
                                        feed_dict={net_input: input_image})

                output_image = np.array(output_image[0, :, :, :])
                output_image = helpers.reverse_one_hot(output_image)
                out_vis_image = helpers.colour_code_segmentation(
                    output_image, label_values)

                accuracy, class_accuracies, prec, rec, f1, iou = utils.evaluate_segmentation(
                    pred=output_image, label=gt, num_classes=num_classes)

                file_name = utils.filepath_to_name(val_input_names[ind])
                target.write("%s, %f, %f, %f, %f, %f" %
                             (file_name, accuracy, prec, rec, f1, iou))
                for item in class_accuracies:
                    target.write(", %f" % (item))
                target.write("\n")

                scores_list.append(accuracy)
                class_scores_list.append(class_accuracies)
                precision_list.append(prec)
                recall_list.append(rec)
                f1_list.append(f1)
                iou_list.append(iou)

                gt = helpers.colour_code_segmentation(gt, label_values)

                file_name = os.path.basename(val_input_names[ind])
                file_name = os.path.splitext(file_name)[0]
                cv2.imwrite(
                    cfg.base_dir + "%s/%s/%04d/%s_pred.png" %
                    ("checkpoints", cfg.model, epoch, file_name),
                    cv2.cvtColor(np.uint8(out_vis_image), cv2.COLOR_RGB2BGR))
                cv2.imwrite(
                    cfg.base_dir + "%s/%s/%04d/%s_gt.png" %
                    ("checkpoints", cfg.model, epoch, file_name),
                    cv2.cvtColor(np.uint8(gt), cv2.COLOR_RGB2BGR))

            target.close()

            avg_score = np.mean(scores_list)
            class_avg_scores = np.mean(class_scores_list, axis=0)
            avg_scores_per_epoch.append(avg_score)
            avg_precision = np.mean(precision_list)
            avg_recall = np.mean(recall_list)
            avg_f1 = np.mean(f1_list)
            avg_iou = np.mean(iou_list)

            print("\nAverage validation accuracy for epoch # %04d = %f" %
                  (epoch, avg_score))
            print("Average per class validation accuracies for epoch # %04d:" %
                  (epoch))
            for index, item in enumerate(class_avg_scores):
                print("%s = %f" % (class_names_list[index], item))
            print("Validation precision = ", avg_precision)
            print("Validation recall = ", avg_recall)
            print("Validation F1 score = ", avg_f1)
            print("Validation IoU score = ", avg_iou)

        epoch_time = time.time() - epoch_st
        remain_time = epoch_time * (cfg.num_epochs - 1 - epoch)
        m, s = divmod(remain_time, 60)
        h, m = divmod(m, 60)
        if s != 0:
            train_time = "Remaining training time = %d hours %d minutes %d seconds\n" % (
                h, m, s)
        else:
            train_time = "Remaining training time : Training completed.\n"
        utils.LOG(train_time)
        scores_list = []

    utils.drawLine(range(cfg.num_epochs),
                   avg_scores_per_epoch,
                   cfg.base_dir + 'checkpoints/' + cfg.model +
                   '/accuracy_vs_epochs.png',
                   title='Average validation accuracy vs epochs',
                   xlabel='Epoch',
                   ylabel='Avg. val. accuracy')
    utils.drawLine(range(cfg.num_epochs),
                   avg_loss_per_epoch,
                   cfg.base_dir + 'checkpoints/' + cfg.model +
                   '/loss_vs_epochs.png',
                   title='Average loss vs epochs',
                   xlabel='Epoch',
                   ylabel='Current loss')