Exemplo n.º 1
0
    def show_statistics(self) -> None:
        """
        Shows the bar charts of the document distributions (overall, for each media and for each party.
        """
        plt.close("all")
        self.clear_plots(clear_plot_array=True)
        self.current_plot_type = PlotType.STATISTICS
        self.current_plot_index = 0

        # get currently enabled parties and media
        party_list = self.get_parties()
        media_list = self.get_media()

        # filter time
        self.configure_dataframe()

        basic_figure = Visualization.get_basic_statistic_bar_plot(
            self.df_paragraphs_configured, party_list, media_list)
        media_figures = Visualization.get_media_statistics_bar_plots(
            self.df_paragraphs_configured, party_list, media_list)
        party_figures = Visualization.get_party_statistics_bar_plots(
            self.df_paragraphs_configured, party_list, media_list)

        figures = [basic_figure] + media_figures + party_figures

        for fig in figures:
            bar1 = FigureCanvasTkAgg(fig, self.gui)
            self.plots.append(bar1)

        self.show_diagram(first_image=True)
        self.next_button["state"] = "normal"
Exemplo n.º 2
0
def tester(cfg):
    print('testing')
    dataloader_test, dataset_size_test = data.make_dataloader(cfg,
                                                              is_train=False)

    model = modeling.build(cfg)

    if cfg.TEST.MODEL.startswith('.'):
        load_path = cfg.TEST.MODEL.replace(".", os.path.realpath("."))
    else:
        load_path = cfg.TEST.MODEL

    model = torch.load(load_path)
    model.cuda()

    vis_test = Visualization(cfg, dataset_size_test, is_train=False)

    writer_path = os.path.join(cfg.VISUALIZATION_DIRECTORY,
                               cfg.EXPERIMENT_NAME)
    writer = SummaryWriter(writer_path)

    total_iterations = 0
    total_iterations_val = 0

    model.eval()
    epoch = 1
    for iteration, batch in enumerate(dataloader_test):
        index = batch[0]

        videoFeat = batch[1].cuda()
        videoFeat_lengths = batch[2].cuda()

        tokens = batch[3].cuda()
        tokens_lengths = batch[4].cuda()

        start = batch[5].cuda()
        end = batch[6].cuda()

        localiz = batch[7].cuda()
        localiz_lengths = batch[8]

        time_starts = batch[9]
        time_ends = batch[10]

        factors = batch[11]
        fps = batch[12]
        frame_start = batch[13]
        frame_end = batch[14]

        loss, individual_loss, pred_start, pred_end, attention, atten_loss = model(
            videoFeat, videoFeat_lengths, tokens, tokens_lengths, start, end,
            localiz, frame_start, frame_end)
        aux = vis_test.run(index, pred_start,
                           pred_end, start, end, videoFeat_lengths, epoch,
                           loss.detach(), individual_loss, attention,
                           atten_loss, time_starts, time_ends, factors, fps)
        total_iterations_val += 1
    a = vis_test.plot(epoch)
Exemplo n.º 3
0
    def classification_report(
        self, test_loader, target_names=None, binary=False, visualize=False
    ):
        print("-" * 10, "Classification Report", "-" * 10)
        print(f"loss: {self.validation(test_loader)}")
        model = self.model
        model.eval().to(self.device)

        y_pred, y_true = [], []
        for data in test_loader:
            inputs, labels = data
            inputs, labels = inputs.to(self.device), labels.to(self.device).long()
            outputs = model(inputs)
            if not binary:
                _, predicted = torch.max(outputs, 1)
            else:
                predicted = torch.round(outputs)

            y_true += labels.squeeze().cpu().tolist()
            y_pred += predicted.squeeze().cpu().tolist()

        if visualize:
            vis = Visualization(y_true, y_pred, target_names)
            vis.confusion_matrix()
            vis.classification_report()
            vis.show()
        report = classification_report(y_true, y_pred, target_names=target_names)
        print(report)
        return report
Exemplo n.º 4
0
    def show_time_course_for_custom_word(self):
        self.clear_plots(clear_plot_array=True)
        self.current_plot_type = PlotType.TIME_COURSE_CUSTOM
        self.configure_dataframe()
        self.current_plot_index = 0
        if self.date_check.get() == 0:
            self.check_filter_date.toggle()
            self.enable_date_setting()
        if self.filter_criteria.get() == "media":
            filter_list = self.get_media()
        else:
            filter_list = self.get_parties()
        initial_start_date = datetime.datetime.strptime(
            self.entry_date_from.get(), "%Y-%m-%d")
        initial_end_date = datetime.datetime.strptime(self.entry_date_to.get(),
                                                      "%Y-%m-%d")
        df_image = self.time_course.get_time_course_custom_word(
            filter_list,
            self.word.get(),
            self.filter_criteria.get(),
            initial_start_date,
            initial_end_date,
            self.df_paragraphs_configured,
        )

        figures = Visualization.get_time_course_plots_custom_word(df_image)
        for fig in figures:
            bar1 = FigureCanvasTkAgg(fig, self.gui)
            self.plots.append(bar1)
        self.show_diagram(first_image=True)
Exemplo n.º 5
0
 def show_sentiment(self, by_party: bool) -> None:
     """
     Shows the pie chart with the sentiment either by party or by media
     :param by_party: if True, shows the sentiment sorted by party, that is a party and then 3 plots with the
                      sentiment of the media toward this party. Otherwise sorted by media, that is a media with the
                      6 plots of the sentiment towards different parties
     """
     plt.close("all")
     self.clear_plots(clear_plot_array=True)
     if by_party:
         self.current_plot_type = PlotType.SENTIMENT_PARTY
     else:
         self.current_plot_type = PlotType.SENTIMENT_OUTLET
     self.current_plot_index = 0
     # get currently enabled parties and media
     party_list = self.get_parties()
     media_list = self.get_media()
     # filter time
     self.configure_dataframe()
     # get the pie charts from visualization class
     figures = Visualization.get_sentiment_pie_charts(
         self.df_paragraphs_configured,
         by_party=by_party,
         parties=party_list,
         media=media_list)
     # get canvas to show in gui from each of the figures and store it in plots array
     for fig in figures:
         bar1 = FigureCanvasTkAgg(fig, self.gui)
         self.plots.append(bar1)
     # display the first diagram
     self.show_diagram(first_image=True)
     self.next_button["state"] = "normal"
Exemplo n.º 6
0
    def visualize(*args, **kwargs):
        """Visualization Wrapper.

        Returns a `Visualization` instance with the given number of subplots.

        Args:
            subplots (int): Number of subplots.

        Returns:
            Visualization: Dynamic plotter handle.

        """
        return Visualization(*args, **kwargs)
Exemplo n.º 7
0
 def show_time_course(self):
     self.clear_plots(clear_plot_array=True)
     self.current_plot_type = PlotType.TIME_COURSE
     self.configure_dataframe()
     self.current_plot_index = 0
     if self.date_check.get() == 0:
         self.check_filter_date.toggle()
         self.enable_date_setting()
     party_list, media_list = self._topic_setup()
     df_top_terms = self.keyword_extraction.get_top_terms_for_party(
         parties=party_list)
     initial_start_date = datetime.datetime.strptime(
         self.entry_date_from.get(), "%Y-%m-%d")
     initial_end_date = datetime.datetime.strptime(self.entry_date_to.get(),
                                                   "%Y-%m-%d")
     df_image = self.time_course.get_time_course(
         party_list, media_list, df_top_terms, initial_start_date,
         initial_end_date, self.df_paragraphs_configured)
     # draw plot for time window
     figures = Visualization.get_time_course_plots(df_image)
     for fig in figures:
         bar1 = FigureCanvasTkAgg(fig, self.gui)
         self.plots.append(bar1)
     self.show_diagram(first_image=True)
Exemplo n.º 8
0
                cropFaceLandmark = image_np[
                    shape_detect['top']:shape_detect['bottom'],
                    shape_detect['left']:shape_detect['right']]
                # cropFaceLandmark = image_np[min(yList): max(yList), min(xList):max(xList)]
                # cv2.imshow('cropFaceLandmark', cropFaceLandmark)

                # ====================
                #   Face Recognition
                # ====================
                predict, predict_acc = faceRecognition.run(cropFaceDetection)
                predict2, predict_acc2 = faceRecognition.run(cropFaceLandmark)

                # =================
                #   Visualization
                # =================
                visualization = Visualization(frame=image_np)
                min_score_thresh = 30
                if (predict_acc >= min_score_thresh) and (predict_acc2 >=
                                                          min_score_thresh):
                    if predict_acc > predict_acc2:
                        visualization.prediction_box(predict=predict,
                                                     predict_acc=predict_acc,
                                                     detect_point=detect)
                        visualization.face_detection(detect_point=detect)
                        visualization.shape_detection(
                            shape_point=shapePointQueue,
                            detect_point=shape_detect)
                    elif predict_acc < predict_acc2:
                        visualization.prediction_box(predict=predict2,
                                                     predict_acc=predict_acc2,
                                                     detect_point=detect)
Exemplo n.º 9
0
def trainer(cfg):
    print('trainer')
    dataloader_train, dataset_size_train = data.make_dataloader(cfg,
                                                                is_train=True)
    dataloader_test, dataset_size_test = data.make_dataloader(cfg,
                                                              is_train=False)

    model = modeling.build(cfg)
    model.cuda()
    #model = torch.load("/home/crodriguezo/projects/phd/moment-localization-with-NLP/mlnlp_lastversion/checkpoints/anet_config7/model_epoch_80")
    optimizer = solver.make_optimizer(cfg, model)

    vis_train = Visualization(cfg, dataset_size_train)
    vis_test = Visualization(cfg, dataset_size_test, is_train=False)

    writer_path = os.path.join(cfg.VISUALIZATION_DIRECTORY,
                               cfg.EXPERIMENT_NAME)
    writer = SummaryWriter(writer_path)

    total_iterations = 0
    total_iterations_val = 0

    for epoch in range(cfg.EPOCHS):
        print("Epoch {}".format(epoch))
        model.train()
        for iteration, batch in enumerate(dataloader_train):
            index = batch[0]

            videoFeat = batch[1].cuda()
            videoFeat_lengths = batch[2].cuda()

            tokens = batch[3].cuda()
            tokens_lengths = batch[4].cuda()

            start = batch[5].cuda()
            end = batch[6].cuda()

            localiz = batch[7].cuda()
            localiz_lengths = batch[8]
            time_starts = batch[9]
            time_ends = batch[10]
            factors = batch[11]
            fps = batch[12]

            loss, individual_loss, pred_start, pred_end, attention, atten_loss = model(
                videoFeat, videoFeat_lengths, tokens, tokens_lengths, start,
                end, localiz)
            print("Loss :{}".format(loss))
            optimizer.zero_grad()
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), 5)
            optimizer.step()

            vis_train.run(index, pred_start,
                          pred_end, start, end, videoFeat_lengths, epoch,
                          loss.detach(), individual_loss, attention,
                          atten_loss, time_starts, time_ends, factors, fps)

            writer.add_scalar(f'mlnlp/Progress_Loss', loss.item(),
                              total_iterations)

            writer.add_scalar(f'mlnlp/Progress_Attention_Loss',
                              atten_loss.item(), total_iterations)

            writer.add_scalar(f'mlnlp/Progress_Mean_IoU', vis_train.mIoU[-1],
                              total_iterations)

            total_iterations += 1.

        writer.add_scalar(f'mlnlp/Train_Loss', np.mean(vis_train.loss), epoch)

        writer.add_scalar(f'mlnlp/Train_Mean_IoU', np.mean(vis_train.mIoU),
                          epoch)

        vis_train.plot(epoch)
        torch.save(
            model,
            "./checkpoints/{}/model_epoch_{}".format(cfg.EXPERIMENT_NAME,
                                                     epoch))

        model.eval()
        for iteration, batch in enumerate(dataloader_test):
            index = batch[0]

            videoFeat = batch[1].cuda()
            videoFeat_lengths = batch[2].cuda()

            tokens = batch[3].cuda()
            tokens_lengths = batch[4].cuda()

            start = batch[5].cuda()
            end = batch[6].cuda()
            localiz = batch[7].cuda()
            localiz_lengths = batch[8]
            time_starts = batch[9]
            time_ends = batch[10]
            factors = batch[11]
            fps = batch[12]

            loss, individual_loss, pred_start, pred_end, attention, atten_loss = model(
                videoFeat, videoFeat_lengths, tokens, tokens_lengths, start,
                end, localiz)
            vis_test.run(index, pred_start,
                         pred_end, start, end, videoFeat_lengths, epoch,
                         loss.detach(), individual_loss, attention, atten_loss,
                         time_starts, time_ends, factors, fps)
            #print(loss)
            writer.add_scalar(f'mlnlp/Progress_Valid_Loss', loss.item(),
                              total_iterations_val)

            writer.add_scalar(f'mlnlp/Progress_Valid_Atten_Loss',
                              atten_loss.item(), total_iterations_val)

            writer.add_scalar(f'mlnlp/Progress_Valid_Mean_IoU',
                              vis_test.mIoU[-1], total_iterations_val)

            total_iterations_val += 1

        writer.add_scalar(f'mlnlp/Valid_Loss', np.mean(vis_test.loss), epoch)

        writer.add_scalar(f'mlnlp/Valid_Mean_IoU', np.mean(vis_test.mIoU),
                          epoch)

        a = vis_test.plot(epoch)
        writer.add_scalars(f'mlnlp/Valid_tIoU_th', a, epoch)
Exemplo n.º 10
0
def tester(cfg):
    print('testing')
    dataloader_test, dataset_size_test = data.make_dataloader(cfg,
                                                              is_train=False)

    model = modeling.build(cfg)
    # torch.nn.Module.dump_patches = True
    model = torch.load(cfg.TEST.MODEL)
    # print(model)
    model.cuda()

    vis_test = Visualization(cfg, dataset_size_test, is_train=False)

    writer_path = os.path.join(cfg.VISUALIZATION_DIRECTORY,
                               cfg.EXPERIMENT_NAME)
    writer = SummaryWriter(writer_path)

    total_iterations = 0
    total_iterations_val = 0

    model.eval()
    epoch = 1
    results_data = {}
    for iteration, batch in enumerate(dataloader_test):

        index = batch[0]

        videoFeat = batch[1].cuda()
        videoFeat_lengths = batch[2].cuda()

        tokens = batch[3].cuda()
        tokens_lengths = batch[4].cuda()

        start = batch[5].cuda()
        end = batch[6].cuda()

        localiz = batch[7].cuda()
        localiz_lengths = batch[8]
        time_starts = batch[9]
        time_ends = batch[10]
        factors = batch[11]
        fps = batch[12]

        objects = batch[13].cuda()
        objects_lengths = batch[14].cuda()

        humans = batch[15].cuda()
        humans_lengths = batch[16].cuda()

        loss, individual_loss, pred_start, pred_end, attention,atten_loss, attentionNodeQueryHO, attentionNodeQueryVH, attentionNodeQueryVO = model(videoFeat, videoFeat_lengths, \
                                                                                    objects, objects_lengths, \
                                                                                    humans, humans_lengths, \
                                                                                    tokens, tokens_lengths, \
                                                                                    start, end, localiz)
        aux = vis_test.run(index, pred_start,
                           pred_end, start, end, videoFeat_lengths, epoch,
                           loss.detach(), individual_loss, attention,
                           atten_loss, time_starts, time_ends, factors, fps,
                           attentionNodeQueryHO, attentionNodeQueryVH,
                           attentionNodeQueryVO)
        total_iterations_val += 1
        for k, v in aux.items():
            results_data[k] = v
Exemplo n.º 11
0
def trainer(cfg):
    print('trainer')
    dataloader_train, dataset_size_train = data.make_dataloader(cfg,
                                                                is_train=True)
    dataloader_test, dataset_size_test = data.make_dataloader(cfg,
                                                              is_train=False)

    model = modeling.build(cfg)
    model.cuda()

    optimizer = solver.make_optimizer(cfg, model)
    scheduler = StepLR(optimizer, step_size=6, gamma=0.01)

    vis_train = Visualization(cfg, dataset_size_train)
    vis_test = Visualization(cfg, dataset_size_test, is_train=False)

    writer_path = os.path.join(cfg.VISUALIZATION_DIRECTORY,
                               cfg.EXPERIMENT_NAME)
    writer = SummaryWriter(writer_path)

    total_iterations = 0
    total_iterations_val = 0

    for epoch in range(cfg.EPOCHS):
        # Decay Learning Rate
        # print("Epoch {}".format(epoch))
        print('Epoch:', epoch, 'LR:', scheduler.get_lr())
        model.train()
        for iteration, batch in enumerate(dataloader_train):
            index = batch[0]

            videoFeat = batch[1].cuda()
            videoFeat_lengths = batch[2].cuda()

            tokens = batch[3].cuda()
            tokens_lengths = batch[4].cuda()

            start = batch[5].cuda()
            end = batch[6].cuda()

            localiz = batch[7].cuda()
            localiz_lengths = batch[8]
            time_starts = batch[9]
            time_ends = batch[10]
            factors = batch[11]
            fps = batch[12]

            objects = batch[13].cuda()
            objects_lengths = batch[14].cuda()

            humans = batch[15].cuda()
            humans_lengths = batch[16].cuda()

            loss, individual_loss, pred_start, pred_end, attention, atten_loss, attentionNodeQueryHO, attentionNodeQueryVH, attentionNodeQueryVO = model(videoFeat, videoFeat_lengths, \
                                                                                      objects, objects_lengths, \
                                                                                      humans, humans_lengths, \
                                                                                      tokens, tokens_lengths, \
                                                                                      start, end, localiz)
            # print("Loss :{}".format(loss))
            optimizer.zero_grad()
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), 5)
            optimizer.step()

            vis_train.run(index, pred_start,
                          pred_end, start, end, videoFeat_lengths, epoch,
                          loss.detach(), individual_loss, attention,
                          atten_loss, time_starts, time_ends, factors, fps,
                          attentionNodeQueryHO, attentionNodeQueryVH,
                          attentionNodeQueryVO)

            writer.add_scalar(f'mlnlp/Progress_Loss', loss.item(),
                              total_iterations)

            writer.add_scalar(f'mlnlp/Progress_Attention_Loss',
                              atten_loss.item(), total_iterations)

            writer.add_scalar(f'mlnlp/Progress_Mean_IoU', vis_train.mIoU[-1],
                              total_iterations)

            total_iterations += 1.

        writer.add_scalar(f'mlnlp/Train_Loss', np.mean(vis_train.loss), epoch)

        writer.add_scalar(f'mlnlp/Train_Mean_IoU', np.mean(vis_train.mIoU),
                          epoch)

        scheduler.step()
        vis_train.plot(epoch)
        torch.save(
            model,
            "./checkpoints/{}/model_epoch_{}".format(cfg.EXPERIMENT_NAME,
                                                     epoch))

        model.eval()
        for iteration, batch in enumerate(dataloader_test):
            index = batch[0]

            videoFeat = batch[1].cuda()
            videoFeat_lengths = batch[2].cuda()

            tokens = batch[3].cuda()
            tokens_lengths = batch[4].cuda()

            start = batch[5].cuda()
            end = batch[6].cuda()

            localiz = batch[7].cuda()
            localiz_lengths = batch[8]
            time_starts = batch[9]
            time_ends = batch[10]
            factors = batch[11]
            fps = batch[12]

            objects = batch[13].cuda()
            objects_lengths = batch[14].cuda()

            humans = batch[15].cuda()
            humans_lengths = batch[16].cuda()

            loss, individual_loss, pred_start, pred_end, attention,atten_loss, attentionNodeQueryHO, attentionNodeQueryVH, attentionNodeQueryVO = model(videoFeat, videoFeat_lengths, \
                                                                                     objects, objects_lengths, \
                                                                                     humans, humans_lengths, \
                                                                                     tokens, tokens_lengths, \
                                                                                     start, end, localiz)

            vis_test.run(index, pred_start,
                         pred_end, start, end, videoFeat_lengths, epoch,
                         loss.detach(), individual_loss, attention, atten_loss,
                         time_starts, time_ends, factors, fps,
                         attentionNodeQueryHO, attentionNodeQueryVH,
                         attentionNodeQueryVO)
            #print(index)
            writer.add_scalar(f'mlnlp/Progress_Valid_Loss', loss.item(),
                              total_iterations_val)

            writer.add_scalar(f'mlnlp/Progress_Valid_Atten_Loss',
                              atten_loss.item(), total_iterations_val)

            writer.add_scalar(f'mlnlp/Progress_Valid_Mean_IoU',
                              vis_test.mIoU[-1], total_iterations_val)

            total_iterations_val += 1

        writer.add_scalar(f'mlnlp/Valid_Loss', np.mean(vis_test.loss), epoch)

        writer.add_scalar(f'mlnlp/Valid_Mean_IoU', np.mean(vis_test.mIoU),
                          epoch)

        a = vis_test.plot(epoch)
        writer.add_scalars(f'mlnlp/Valid_tIoU_th', a, epoch)
Exemplo n.º 12
0
def trainer(cfg):
    print('trainer')
    dataloader_train, dataset_size_train = data.make_dataloader(cfg,
                                                                is_train=True)
    dataloader_test, dataset_size_test = data.make_dataloader(cfg,
                                                              is_train=False)
    print(dataset_size_train)
    print(dataset_size_test)

    model = modeling.build(cfg)
    if cfg.MODE_TRAIN == 'resume':
        model = torch.load("./checkpoints/{}/model_{}_epoch_{}".format(
            cfg.EXPERIMENT_NAME, cfg.MODEL_NAME, cfg.MODE_TRAIN_RESUME_EPOCH))

    model = torch.load("./checkpoints/{}/model_{}".format(
        cfg.EXPERIMENT_NAME, cfg.MODEL_NAME))

    model.cuda()
    optimizer = solver.make_optimizer(cfg, model)
    #model = torch.load("/home/crodriguezo/projects/phd/moment-localization-with-NLP/mlnlp_lastversion/checkpoints/anet_config7/model_epoch_80")

    vis_train = Visualization(cfg, dataset_size_train)
    vis_test = Visualization(cfg, dataset_size_test, is_train=False)

    writer_path = os.path.join(cfg.VISUALIZATION_DIRECTORY,
                               cfg.EXPERIMENT_NAME)
    writer = SummaryWriter(writer_path)

    total_iterations = 0
    total_iterations_val = 0
    cfg.EPOCHS = 1
    for epoch in range(cfg.EPOCHS):
        model.eval()
        sumloss = 0
        sumsample = 0
        with torch.no_grad():
            for iteration, batch in enumerate(dataloader_test):
                index = batch[0]

                videoFeat = batch[1].cuda()
                videoFeat_lengths = batch[2].cuda()

                tokens = batch[3].cuda()
                tokens_lengths = batch[4].cuda()
                if cfg.MODEL_NAME == 'TMLGA':
                    start = batch[5].cuda()
                    end = batch[6].cuda()
                    localiz = batch[7].cuda()
                    frame_start = batch[13]
                    frame_end = batch[14]
                else:
                    start = batch[5]
                    end = batch[6]
                    localiz = batch[7]
                    frame_start = batch[13].cuda()
                    frame_end = batch[14].cuda()

                localiz_lengths = batch[8]
                time_starts = batch[9]
                time_ends = batch[10]
                factors = batch[11]
                fps = batch[12]

                duration = batch[15]
                vid_names = batch[16]
                loss, individual_loss, pred_start, pred_end, attention, atten_loss = model(
                    videoFeat, videoFeat_lengths, tokens, tokens_lengths,
                    start, end, localiz, frame_start, frame_end)
                sumloss += loss.item() * float(videoFeat.shape[0])
                sumsample += videoFeat.shape[0]
                # print("Test_Loss :{}".format(loss))
                vis_test.run(index, pred_start, pred_end, start, end, videoFeat_lengths, epoch, loss.detach(), individual_loss, \
                    attention,atten_loss, time_starts, time_ends, factors, fps, duration,vid_names)
                #print(loss)
                writer.add_scalar(f'mlnlp/Progress_Valid_Loss', loss.item(),
                                  total_iterations_val)

                writer.add_scalar(f'mlnlp/Progress_Valid_Atten_Loss',
                                  atten_loss.item(), total_iterations_val)

                writer.add_scalar(f'mlnlp/Progress_Valid_Mean_IoU',
                                  vis_test.mIoU[-1], total_iterations_val)

                total_iterations_val += 1
                # del videoFeat,videoFeat_lengths,tokens,tokens_lengths,start,end,localiz
                # torch.cuda.empty_cache()
        print("Test_Loss :{}".format(sumloss / sumsample))
        writer.add_scalar(f'mlnlp/Valid_Loss', np.mean(vis_test.loss), epoch)

        writer.add_scalar(f'mlnlp/Valid_Mean_IoU', np.mean(vis_test.mIoU),
                          epoch)

        a = vis_test.plot(epoch)
        writer.add_scalars(f'mlnlp/Valid_tIoU_th', a, epoch)