示例#1
0
def tester(cfg):
    print('testing')
    dataloader_test, dataset_size_test = data.make_dataloader(cfg,
                                                              is_train=False)

    model = modeling.build(cfg)

    if cfg.TEST.MODEL.startswith('.'):
        load_path = cfg.TEST.MODEL.replace(".", os.path.realpath("."))
    else:
        load_path = cfg.TEST.MODEL

    model = torch.load(load_path)
    model.cuda()

    vis_test = Visualization(cfg, dataset_size_test, is_train=False)

    writer_path = os.path.join(cfg.VISUALIZATION_DIRECTORY,
                               cfg.EXPERIMENT_NAME)
    writer = SummaryWriter(writer_path)

    total_iterations = 0
    total_iterations_val = 0

    model.eval()
    epoch = 1
    for iteration, batch in enumerate(dataloader_test):
        index = batch[0]

        videoFeat = batch[1].cuda()
        videoFeat_lengths = batch[2].cuda()

        tokens = batch[3].cuda()
        tokens_lengths = batch[4].cuda()

        start = batch[5].cuda()
        end = batch[6].cuda()

        localiz = batch[7].cuda()
        localiz_lengths = batch[8]

        time_starts = batch[9]
        time_ends = batch[10]

        factors = batch[11]
        fps = batch[12]
        frame_start = batch[13]
        frame_end = batch[14]

        loss, individual_loss, pred_start, pred_end, attention, atten_loss = model(
            videoFeat, videoFeat_lengths, tokens, tokens_lengths, start, end,
            localiz, frame_start, frame_end)
        aux = vis_test.run(index, pred_start,
                           pred_end, start, end, videoFeat_lengths, epoch,
                           loss.detach(), individual_loss, attention,
                           atten_loss, time_starts, time_ends, factors, fps)
        total_iterations_val += 1
    a = vis_test.plot(epoch)
示例#2
0
文件: build.py 项目: suvaansh/TMLGA
def trainer(cfg):
    print('trainer')
    dataloader_train, dataset_size_train = data.make_dataloader(cfg,
                                                                is_train=True)
    dataloader_test, dataset_size_test = data.make_dataloader(cfg,
                                                              is_train=False)

    model = modeling.build(cfg)
    model.cuda()
    #model = torch.load("/home/crodriguezo/projects/phd/moment-localization-with-NLP/mlnlp_lastversion/checkpoints/anet_config7/model_epoch_80")
    optimizer = solver.make_optimizer(cfg, model)

    vis_train = Visualization(cfg, dataset_size_train)
    vis_test = Visualization(cfg, dataset_size_test, is_train=False)

    writer_path = os.path.join(cfg.VISUALIZATION_DIRECTORY,
                               cfg.EXPERIMENT_NAME)
    writer = SummaryWriter(writer_path)

    total_iterations = 0
    total_iterations_val = 0

    for epoch in range(cfg.EPOCHS):
        print("Epoch {}".format(epoch))
        model.train()
        for iteration, batch in enumerate(dataloader_train):
            index = batch[0]

            videoFeat = batch[1].cuda()
            videoFeat_lengths = batch[2].cuda()

            tokens = batch[3].cuda()
            tokens_lengths = batch[4].cuda()

            start = batch[5].cuda()
            end = batch[6].cuda()

            localiz = batch[7].cuda()
            localiz_lengths = batch[8]
            time_starts = batch[9]
            time_ends = batch[10]
            factors = batch[11]
            fps = batch[12]

            loss, individual_loss, pred_start, pred_end, attention, atten_loss = model(
                videoFeat, videoFeat_lengths, tokens, tokens_lengths, start,
                end, localiz)
            print("Loss :{}".format(loss))
            optimizer.zero_grad()
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), 5)
            optimizer.step()

            vis_train.run(index, pred_start,
                          pred_end, start, end, videoFeat_lengths, epoch,
                          loss.detach(), individual_loss, attention,
                          atten_loss, time_starts, time_ends, factors, fps)

            writer.add_scalar(f'mlnlp/Progress_Loss', loss.item(),
                              total_iterations)

            writer.add_scalar(f'mlnlp/Progress_Attention_Loss',
                              atten_loss.item(), total_iterations)

            writer.add_scalar(f'mlnlp/Progress_Mean_IoU', vis_train.mIoU[-1],
                              total_iterations)

            total_iterations += 1.

        writer.add_scalar(f'mlnlp/Train_Loss', np.mean(vis_train.loss), epoch)

        writer.add_scalar(f'mlnlp/Train_Mean_IoU', np.mean(vis_train.mIoU),
                          epoch)

        vis_train.plot(epoch)
        torch.save(
            model,
            "./checkpoints/{}/model_epoch_{}".format(cfg.EXPERIMENT_NAME,
                                                     epoch))

        model.eval()
        for iteration, batch in enumerate(dataloader_test):
            index = batch[0]

            videoFeat = batch[1].cuda()
            videoFeat_lengths = batch[2].cuda()

            tokens = batch[3].cuda()
            tokens_lengths = batch[4].cuda()

            start = batch[5].cuda()
            end = batch[6].cuda()
            localiz = batch[7].cuda()
            localiz_lengths = batch[8]
            time_starts = batch[9]
            time_ends = batch[10]
            factors = batch[11]
            fps = batch[12]

            loss, individual_loss, pred_start, pred_end, attention, atten_loss = model(
                videoFeat, videoFeat_lengths, tokens, tokens_lengths, start,
                end, localiz)
            vis_test.run(index, pred_start,
                         pred_end, start, end, videoFeat_lengths, epoch,
                         loss.detach(), individual_loss, attention, atten_loss,
                         time_starts, time_ends, factors, fps)
            #print(loss)
            writer.add_scalar(f'mlnlp/Progress_Valid_Loss', loss.item(),
                              total_iterations_val)

            writer.add_scalar(f'mlnlp/Progress_Valid_Atten_Loss',
                              atten_loss.item(), total_iterations_val)

            writer.add_scalar(f'mlnlp/Progress_Valid_Mean_IoU',
                              vis_test.mIoU[-1], total_iterations_val)

            total_iterations_val += 1

        writer.add_scalar(f'mlnlp/Valid_Loss', np.mean(vis_test.loss), epoch)

        writer.add_scalar(f'mlnlp/Valid_Mean_IoU', np.mean(vis_test.mIoU),
                          epoch)

        a = vis_test.plot(epoch)
        writer.add_scalars(f'mlnlp/Valid_tIoU_th', a, epoch)
示例#3
0
def tester(cfg):
    print('testing')
    dataloader_test, dataset_size_test = data.make_dataloader(cfg,
                                                              is_train=False)

    model = modeling.build(cfg)
    # torch.nn.Module.dump_patches = True
    model = torch.load(cfg.TEST.MODEL)
    # print(model)
    model.cuda()

    vis_test = Visualization(cfg, dataset_size_test, is_train=False)

    writer_path = os.path.join(cfg.VISUALIZATION_DIRECTORY,
                               cfg.EXPERIMENT_NAME)
    writer = SummaryWriter(writer_path)

    total_iterations = 0
    total_iterations_val = 0

    model.eval()
    epoch = 1
    results_data = {}
    for iteration, batch in enumerate(dataloader_test):

        index = batch[0]

        videoFeat = batch[1].cuda()
        videoFeat_lengths = batch[2].cuda()

        tokens = batch[3].cuda()
        tokens_lengths = batch[4].cuda()

        start = batch[5].cuda()
        end = batch[6].cuda()

        localiz = batch[7].cuda()
        localiz_lengths = batch[8]
        time_starts = batch[9]
        time_ends = batch[10]
        factors = batch[11]
        fps = batch[12]

        objects = batch[13].cuda()
        objects_lengths = batch[14].cuda()

        humans = batch[15].cuda()
        humans_lengths = batch[16].cuda()

        loss, individual_loss, pred_start, pred_end, attention,atten_loss, attentionNodeQueryHO, attentionNodeQueryVH, attentionNodeQueryVO = model(videoFeat, videoFeat_lengths, \
                                                                                    objects, objects_lengths, \
                                                                                    humans, humans_lengths, \
                                                                                    tokens, tokens_lengths, \
                                                                                    start, end, localiz)
        aux = vis_test.run(index, pred_start,
                           pred_end, start, end, videoFeat_lengths, epoch,
                           loss.detach(), individual_loss, attention,
                           atten_loss, time_starts, time_ends, factors, fps,
                           attentionNodeQueryHO, attentionNodeQueryVH,
                           attentionNodeQueryVO)
        total_iterations_val += 1
        for k, v in aux.items():
            results_data[k] = v
示例#4
0
def trainer(cfg):
    print('trainer')
    dataloader_train, dataset_size_train = data.make_dataloader(cfg,
                                                                is_train=True)
    dataloader_test, dataset_size_test = data.make_dataloader(cfg,
                                                              is_train=False)

    model = modeling.build(cfg)
    model.cuda()

    optimizer = solver.make_optimizer(cfg, model)
    scheduler = StepLR(optimizer, step_size=6, gamma=0.01)

    vis_train = Visualization(cfg, dataset_size_train)
    vis_test = Visualization(cfg, dataset_size_test, is_train=False)

    writer_path = os.path.join(cfg.VISUALIZATION_DIRECTORY,
                               cfg.EXPERIMENT_NAME)
    writer = SummaryWriter(writer_path)

    total_iterations = 0
    total_iterations_val = 0

    for epoch in range(cfg.EPOCHS):
        # Decay Learning Rate
        # print("Epoch {}".format(epoch))
        print('Epoch:', epoch, 'LR:', scheduler.get_lr())
        model.train()
        for iteration, batch in enumerate(dataloader_train):
            index = batch[0]

            videoFeat = batch[1].cuda()
            videoFeat_lengths = batch[2].cuda()

            tokens = batch[3].cuda()
            tokens_lengths = batch[4].cuda()

            start = batch[5].cuda()
            end = batch[6].cuda()

            localiz = batch[7].cuda()
            localiz_lengths = batch[8]
            time_starts = batch[9]
            time_ends = batch[10]
            factors = batch[11]
            fps = batch[12]

            objects = batch[13].cuda()
            objects_lengths = batch[14].cuda()

            humans = batch[15].cuda()
            humans_lengths = batch[16].cuda()

            loss, individual_loss, pred_start, pred_end, attention, atten_loss, attentionNodeQueryHO, attentionNodeQueryVH, attentionNodeQueryVO = model(videoFeat, videoFeat_lengths, \
                                                                                      objects, objects_lengths, \
                                                                                      humans, humans_lengths, \
                                                                                      tokens, tokens_lengths, \
                                                                                      start, end, localiz)
            # print("Loss :{}".format(loss))
            optimizer.zero_grad()
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), 5)
            optimizer.step()

            vis_train.run(index, pred_start,
                          pred_end, start, end, videoFeat_lengths, epoch,
                          loss.detach(), individual_loss, attention,
                          atten_loss, time_starts, time_ends, factors, fps,
                          attentionNodeQueryHO, attentionNodeQueryVH,
                          attentionNodeQueryVO)

            writer.add_scalar(f'mlnlp/Progress_Loss', loss.item(),
                              total_iterations)

            writer.add_scalar(f'mlnlp/Progress_Attention_Loss',
                              atten_loss.item(), total_iterations)

            writer.add_scalar(f'mlnlp/Progress_Mean_IoU', vis_train.mIoU[-1],
                              total_iterations)

            total_iterations += 1.

        writer.add_scalar(f'mlnlp/Train_Loss', np.mean(vis_train.loss), epoch)

        writer.add_scalar(f'mlnlp/Train_Mean_IoU', np.mean(vis_train.mIoU),
                          epoch)

        scheduler.step()
        vis_train.plot(epoch)
        torch.save(
            model,
            "./checkpoints/{}/model_epoch_{}".format(cfg.EXPERIMENT_NAME,
                                                     epoch))

        model.eval()
        for iteration, batch in enumerate(dataloader_test):
            index = batch[0]

            videoFeat = batch[1].cuda()
            videoFeat_lengths = batch[2].cuda()

            tokens = batch[3].cuda()
            tokens_lengths = batch[4].cuda()

            start = batch[5].cuda()
            end = batch[6].cuda()

            localiz = batch[7].cuda()
            localiz_lengths = batch[8]
            time_starts = batch[9]
            time_ends = batch[10]
            factors = batch[11]
            fps = batch[12]

            objects = batch[13].cuda()
            objects_lengths = batch[14].cuda()

            humans = batch[15].cuda()
            humans_lengths = batch[16].cuda()

            loss, individual_loss, pred_start, pred_end, attention,atten_loss, attentionNodeQueryHO, attentionNodeQueryVH, attentionNodeQueryVO = model(videoFeat, videoFeat_lengths, \
                                                                                     objects, objects_lengths, \
                                                                                     humans, humans_lengths, \
                                                                                     tokens, tokens_lengths, \
                                                                                     start, end, localiz)

            vis_test.run(index, pred_start,
                         pred_end, start, end, videoFeat_lengths, epoch,
                         loss.detach(), individual_loss, attention, atten_loss,
                         time_starts, time_ends, factors, fps,
                         attentionNodeQueryHO, attentionNodeQueryVH,
                         attentionNodeQueryVO)
            #print(index)
            writer.add_scalar(f'mlnlp/Progress_Valid_Loss', loss.item(),
                              total_iterations_val)

            writer.add_scalar(f'mlnlp/Progress_Valid_Atten_Loss',
                              atten_loss.item(), total_iterations_val)

            writer.add_scalar(f'mlnlp/Progress_Valid_Mean_IoU',
                              vis_test.mIoU[-1], total_iterations_val)

            total_iterations_val += 1

        writer.add_scalar(f'mlnlp/Valid_Loss', np.mean(vis_test.loss), epoch)

        writer.add_scalar(f'mlnlp/Valid_Mean_IoU', np.mean(vis_test.mIoU),
                          epoch)

        a = vis_test.plot(epoch)
        writer.add_scalars(f'mlnlp/Valid_tIoU_th', a, epoch)
示例#5
0
async def train():
    print(df.shape)
    X, y = preprocess(df)
    model = build(c.ModelName.lgbm)
    model = train_and_validate(model, X, y)
    return "Training is done."
示例#6
0
def trainer(cfg):
    print('trainer')
    dataloader_train, dataset_size_train = data.make_dataloader(cfg,
                                                                is_train=True)
    dataloader_test, dataset_size_test = data.make_dataloader(cfg,
                                                              is_train=False)
    print(dataset_size_train)
    print(dataset_size_test)

    model = modeling.build(cfg)
    if cfg.MODE_TRAIN == 'resume':
        model = torch.load("./checkpoints/{}/model_{}_epoch_{}".format(
            cfg.EXPERIMENT_NAME, cfg.MODEL_NAME, cfg.MODE_TRAIN_RESUME_EPOCH))

    model = torch.load("./checkpoints/{}/model_{}".format(
        cfg.EXPERIMENT_NAME, cfg.MODEL_NAME))

    model.cuda()
    optimizer = solver.make_optimizer(cfg, model)
    #model = torch.load("/home/crodriguezo/projects/phd/moment-localization-with-NLP/mlnlp_lastversion/checkpoints/anet_config7/model_epoch_80")

    vis_train = Visualization(cfg, dataset_size_train)
    vis_test = Visualization(cfg, dataset_size_test, is_train=False)

    writer_path = os.path.join(cfg.VISUALIZATION_DIRECTORY,
                               cfg.EXPERIMENT_NAME)
    writer = SummaryWriter(writer_path)

    total_iterations = 0
    total_iterations_val = 0
    cfg.EPOCHS = 1
    for epoch in range(cfg.EPOCHS):
        model.eval()
        sumloss = 0
        sumsample = 0
        with torch.no_grad():
            for iteration, batch in enumerate(dataloader_test):
                index = batch[0]

                videoFeat = batch[1].cuda()
                videoFeat_lengths = batch[2].cuda()

                tokens = batch[3].cuda()
                tokens_lengths = batch[4].cuda()
                if cfg.MODEL_NAME == 'TMLGA':
                    start = batch[5].cuda()
                    end = batch[6].cuda()
                    localiz = batch[7].cuda()
                    frame_start = batch[13]
                    frame_end = batch[14]
                else:
                    start = batch[5]
                    end = batch[6]
                    localiz = batch[7]
                    frame_start = batch[13].cuda()
                    frame_end = batch[14].cuda()

                localiz_lengths = batch[8]
                time_starts = batch[9]
                time_ends = batch[10]
                factors = batch[11]
                fps = batch[12]

                duration = batch[15]
                vid_names = batch[16]
                loss, individual_loss, pred_start, pred_end, attention, atten_loss = model(
                    videoFeat, videoFeat_lengths, tokens, tokens_lengths,
                    start, end, localiz, frame_start, frame_end)
                sumloss += loss.item() * float(videoFeat.shape[0])
                sumsample += videoFeat.shape[0]
                # print("Test_Loss :{}".format(loss))
                vis_test.run(index, pred_start, pred_end, start, end, videoFeat_lengths, epoch, loss.detach(), individual_loss, \
                    attention,atten_loss, time_starts, time_ends, factors, fps, duration,vid_names)
                #print(loss)
                writer.add_scalar(f'mlnlp/Progress_Valid_Loss', loss.item(),
                                  total_iterations_val)

                writer.add_scalar(f'mlnlp/Progress_Valid_Atten_Loss',
                                  atten_loss.item(), total_iterations_val)

                writer.add_scalar(f'mlnlp/Progress_Valid_Mean_IoU',
                                  vis_test.mIoU[-1], total_iterations_val)

                total_iterations_val += 1
                # del videoFeat,videoFeat_lengths,tokens,tokens_lengths,start,end,localiz
                # torch.cuda.empty_cache()
        print("Test_Loss :{}".format(sumloss / sumsample))
        writer.add_scalar(f'mlnlp/Valid_Loss', np.mean(vis_test.loss), epoch)

        writer.add_scalar(f'mlnlp/Valid_Mean_IoU', np.mean(vis_test.mIoU),
                          epoch)

        a = vis_test.plot(epoch)
        writer.add_scalars(f'mlnlp/Valid_tIoU_th', a, epoch)