Beispiel #1
0
def do_train(cfg, model, data_loader, optimizer, scheduler, checkpointer,
             device, arguments, args):
    logger = logging.getLogger("SSD.trainer")
    logger.info("Start training ...")
    meters = MetricLogger()

    # #获得要剪枝的层
    if cfg.PRUNE.TYPE != 'no':
        if hasattr(model, 'module'):
            backbone = model.module.backbone
        else:
            backbone = model.backbone
        if cfg.PRUNE.TYPE == 'normal':
            logger.info('normal sparse training')
            _, _, prune_idx = normal_prune.parse_module_defs(
                backbone.module_defs)
        elif cfg.PRUNE.TYPE == 'shortcut':
            logger.info('shortcut sparse training')
            _, _, prune_idx, _, _ = shortcut_prune.parse_module_defs(
                backbone.module_defs)

    model.train()
    save_to_disk = dist_util.get_rank() == 0
    if args.use_tensorboard and save_to_disk:
        try:
            from torch.utils.tensorboard import SummaryWriter
        except ImportError:
            from tensorboardX import SummaryWriter
        summary_writer = SummaryWriter(
            log_dir=os.path.join(cfg.OUTPUT_DIR, 'tf_logs'))
    else:
        summary_writer = None

    max_iter = len(data_loader)
    start_iter = arguments["iteration"]
    start_training_time = time.time()
    end = time.time()
    for iteration, (images, targets, _) in enumerate(data_loader, start_iter):
        iteration = iteration + 1
        arguments["iteration"] = iteration

        images = images.to(device)
        targets = targets.to(device)
        loss_dict = model(images, targets=targets)
        loss = sum(loss for loss in loss_dict.values())

        # reduce losses over all GPUs for logging purposes
        loss_dict_reduced = reduce_loss_dict(loss_dict)
        losses_reduced = sum(loss for loss in loss_dict_reduced.values())
        meters.update(total_loss=losses_reduced, **loss_dict_reduced)

        optimizer.zero_grad()
        loss.backward()

        # 对要剪枝层的γ参数稀疏化
        if cfg.PRUNE.TYPE != 'no':
            if hasattr(model, 'module'):
                bn_sparse.updateBN(model.module.backbone.module_list,
                                   cfg.PRUNE.SR, prune_idx)
            else:
                # print(model.backbone.module_list)
                bn_sparse.updateBN(model.backbone.module_list, cfg.PRUNE.SR,
                                   prune_idx)

        optimizer.step()
        scheduler.step()

        batch_time = time.time() - end
        end = time.time()
        meters.update(time=batch_time)
        if iteration % args.log_step == 0:
            eta_seconds = meters.time.global_avg * (max_iter - iteration)
            eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
            logger.info(
                meters.delimiter.join([
                    "iter: {iter:06d}",
                    "lr: {lr:.5f}",
                    '{meters}',
                    "eta: {eta}",
                    'mem: {mem}M',
                ]).format(
                    iter=iteration,
                    lr=optimizer.param_groups[0]['lr'],
                    meters=str(meters),
                    eta=eta_string,
                    mem=round(torch.cuda.max_memory_allocated() / 1024.0 /
                              1024.0),
                ))
            if summary_writer:
                global_step = iteration
                summary_writer.add_scalar('losses/total_loss',
                                          losses_reduced,
                                          global_step=global_step)
                for loss_name, loss_item in loss_dict_reduced.items():
                    summary_writer.add_scalar('losses/{}'.format(loss_name),
                                              loss_item,
                                              global_step=global_step)
                summary_writer.add_scalar('lr',
                                          optimizer.param_groups[0]['lr'],
                                          global_step=global_step)

        if iteration % args.save_step == 0:
            checkpointer.save("model_{:06d}".format(iteration), **arguments)

        if args.eval_step > 0 and iteration % args.eval_step == 0 and not iteration == max_iter:
            eval_results = do_evaluation(cfg,
                                         model,
                                         distributed=False,
                                         iteration=iteration)  #单gpu测试
            if dist_util.get_rank() == 0 and summary_writer:
                for eval_result, dataset in zip(eval_results,
                                                cfg.DATASETS.TEST):
                    write_metric(eval_result['metrics'], 'metrics/' + dataset,
                                 summary_writer, iteration)
            model.train()  # *IMPORTANT*: change to train mode after eval.

    checkpointer.save("model_final", **arguments)
    # compute training time
    total_training_time = int(time.time() - start_training_time)
    total_time_str = str(datetime.timedelta(seconds=total_training_time))
    logger.info("Total training time: {} ({:.4f} s / it)".format(
        total_time_str, total_training_time / max_iter))
    return model
Beispiel #2
0
def do_train(cfg, model, data_loader, optimizer, scheduler, checkpointer,
             device, arguments, args):
    logger = logging.getLogger("SSD.trainer")
    logger.info("Start training ...")
    meters = MetricLogger()

    # 模型设置为train()模式,表示参数是可以进行更新的
    model.train()
    save_to_disk = dist_util.get_rank() == 0
    # 这个是关于模型训练过程中的过程记录
    if args.use_tensorboard and save_to_disk:
        import tensorboardX

        summary_writer = tensorboardX.SummaryWriter(
            log_dir=os.path.join(cfg.OUTPUT_DIR, 'tf_logs'))
    else:
        summary_writer = None

    # dataloader的大小,根据配置文件中的iteration进行训练
    # arguments = {"iteration": 0},按照目前的理解是按照断点进行训练,这个表示的是当前的迭代次数这样
    max_iter = len(data_loader)
    start_iter = arguments["iteration"]
    # 开始计时
    start_training_time = time.time()
    end = time.time()
    # 一次训练中,数据长度应该是dataloader的大小,也就是按照batchsize进行分割之后的大小
    # 数据集会返回图像和图像对应的标签,也就是(类别数目) (c+4)k,k个先验框、c个类别,然后加一个框的坐标位置
    for iteration, (images, targets, _) in enumerate(data_loader, start_iter):
        # print(iteration)
        # print(targets)
        iteration = iteration + 1
        arguments["iteration"] = iteration

        images = images.to(device)
        targets = targets.to(device)
        # 把输入和目标输出传入模型,模型就会返回loss
        loss_dict = model(images, targets=targets)
        loss = sum(loss for loss in loss_dict.values())

        # reduce losses over all GPUs for logging purposes
        # 这里是多GPU的操作,暂时先不用去理会
        loss_dict_reduced = reduce_loss_dict(loss_dict)
        losses_reduced = sum(loss for loss in loss_dict_reduced.values())
        meters.update(total_loss=losses_reduced, **loss_dict_reduced)

        # 这里是标准的反向传播的过程,传播就完事了
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        scheduler.step()

        # 记录时间、写日志、写模型然后保存训练中的过程记录之类的,这里也基本是死的,主要找到模型就完事了
        batch_time = time.time() - end
        end = time.time()
        meters.update(time=batch_time)
        if iteration % args.log_step == 0:
            eta_seconds = meters.time.global_avg * (max_iter - iteration)
            eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
            logger.info(
                meters.delimiter.join([
                    "iter: {iter:06d}",
                    "lr: {lr:.5f}",
                    '{meters}',
                    "eta: {eta}",
                    'mem: {mem}M',
                ]).format(
                    iter=iteration,
                    lr=optimizer.param_groups[0]['lr'],
                    meters=str(meters),
                    eta=eta_string,
                    mem=round(torch.cuda.max_memory_allocated() / 1024.0 /
                              1024.0),
                ))
            if summary_writer:
                global_step = iteration
                summary_writer.add_scalar('losses/total_loss',
                                          losses_reduced,
                                          global_step=global_step)
                for loss_name, loss_item in loss_dict_reduced.items():
                    summary_writer.add_scalar('losses/{}'.format(loss_name),
                                              loss_item,
                                              global_step=global_step)
                summary_writer.add_scalar('lr',
                                          optimizer.param_groups[0]['lr'],
                                          global_step=global_step)

        if iteration % args.save_step == 0:
            checkpointer.save("model_{:06d}".format(iteration), **arguments)

        # 目前问题主要存在这个部分,就是利用模型进行验证的过程中会报错,验证的文件有错误
        if args.eval_step > 0 and iteration % args.eval_step == 0 and not iteration == max_iter:
            eval_results = do_evaluation(cfg,
                                         model,
                                         distributed=args.distributed,
                                         iteration=iteration)
            if dist_util.get_rank() == 0 and summary_writer:
                for eval_result, dataset in zip(eval_results,
                                                cfg.DATASETS.TEST):
                    write_metric(eval_result['metrics'], 'metrics/' + dataset,
                                 summary_writer, iteration)
            model.train()  # *IMPORTANT*: change to train mode after eval.

    checkpointer.save("model_final", **arguments)
    # compute training time
    total_training_time = int(time.time() - start_training_time)
    total_time_str = str(datetime.timedelta(seconds=total_training_time))
    logger.info("Total training time: {} ({:.4f} s / it)".format(
        total_time_str, total_training_time / max_iter))
    return model
Beispiel #3
0
def do_train(cfg, model, data_loader, optimizer, scheduler, checkpointer,
             arguments):
    logger = logging.getLogger("SSD.trainer")
    logger.info("Start training ...")
    meters = MetricLogger()

    model.train()

    summary_writer = torch.utils.tensorboard.SummaryWriter(
        log_dir=os.path.join(cfg.OUTPUT_DIR, 'tf_logs'))

    max_iter = len(data_loader)
    start_iter = arguments["iteration"]
    start_training_time = time.time()
    end = time.time()
    for iteration, (images, targets, _) in enumerate(data_loader, start_iter):
        iteration = iteration + 1
        arguments["iteration"] = iteration
        images = torch_utils.to_cuda(images)
        targets = torch_utils.to_cuda(targets)
        loss_dict = model(images, targets=targets)
        loss = sum(loss for loss in loss_dict.values())

        meters.update(total_loss=loss, **loss_dict)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        scheduler.step()

        batch_time = time.time() - end
        end = time.time()
        meters.update(time=batch_time)
        if iteration % cfg.LOG_STEP == 0:
            eta_seconds = meters.time.global_avg * (max_iter - iteration)
            eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
            logger.info(
                meters.delimiter.join([
                    "iter: {iter:06d}",
                    "lr: {lr:.5f}",
                    '{meters}',
                    "eta: {eta}",
                    'mem: {mem}M',
                ]).format(iter=iteration,
                          lr=optimizer.param_groups[0]['lr'],
                          meters=str(meters),
                          eta=eta_string,
                          mem=round(torch.cuda.max_memory_allocated() /
                                    1024.0 / 1024.0)))
            global_step = iteration
            summary_writer.add_scalar('losses/total_loss',
                                      loss,
                                      global_step=global_step)
            for loss_name, loss_item in loss_dict.items():
                summary_writer.add_scalar('losses/{}'.format(loss_name),
                                          loss_item,
                                          global_step=global_step)
            summary_writer.add_scalar('lr',
                                      optimizer.param_groups[0]['lr'],
                                      global_step=global_step)

        if iteration % cfg.MODEL_SAVE_STEP == 0:
            checkpointer.save("model_{:06d}".format(iteration), **arguments)

        if cfg.EVAL_STEP > 0 and iteration % cfg.EVAL_STEP == 0:
            eval_results = do_evaluation(cfg, model, iteration=iteration)
            for eval_result, dataset in zip(eval_results, cfg.DATASETS.TEST):
                write_metric(eval_result['metrics'], 'metrics/' + dataset,
                             summary_writer, iteration)
            model.train()  # *IMPORTANT*: change to train mode after eval.

    checkpointer.save("model_final", **arguments)
    # compute training time
    total_training_time = int(time.time() - start_training_time)
    total_time_str = str(datetime.timedelta(seconds=total_training_time))
    logger.info("Total training time: {} ({:.4f} s / it)".format(
        total_time_str, total_training_time / max_iter))
    return model
Beispiel #4
0
def do_train(cfg, model, data_loader, optimizer, scheduler, checkpointer,
             device, arguments, args):
    logger = logging.getLogger("SSD.trainer")
    logger.info("Start training ...")
    meters = MetricLogger()

    model.train()
    save_to_disk = dist_util.get_rank() == 0
    if args.use_tensorboard and save_to_disk:
        import tensorboardX

        summary_writer = tensorboardX.SummaryWriter(
            log_dir=os.path.join(cfg.OUTPUT_DIR, 'tf_logs'))
    else:
        summary_writer = None

    max_iter = len(data_loader)
    start_iter = arguments["iteration"]
    start_training_time = time.time()
    end = time.time()
    for iteration, (images, targets, _, boxes_norm,
                    labels_norm) in enumerate(data_loader, start_iter):
        iteration = iteration + 1
        arguments["iteration"] = iteration
        scheduler.step()

        images = images.to(device)
        targets = targets.to(device)
        #+++++++++++++++++++++++++++++++++++++++++++++++ Mask GT ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        mask_t = np.zeros((images.shape[0], 81, 64, 64))
        mask_t[:, 0, :, :] = np.ones((1, 1, 64, 64))
        for i in range(images.shape[0]):
            for L, B_norm in zip(labels_norm[i], boxes_norm[i]):
                xmin = int(B_norm[0] * 64)
                ymin = int(B_norm[1] * 64)
                xmax = int(B_norm[2] * 64)
                ymax = int(B_norm[3] * 64)
                lab = int(L)

                mask_t[i, 0, ymin:ymax, xmin:xmax] = 0.0
                mask_t[i, lab, ymin:ymax, xmin:xmax] = 1.0

        mask_t = Variable(torch.from_numpy((mask_t).astype(np.float32))).cuda()
        #+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        loss_dict = model(images, targets=(targets, mask_t))
        loss = sum(loss for loss in loss_dict.values())

        # reduce losses over all GPUs for logging purposes
        loss_dict_reduced = reduce_loss_dict(loss_dict)
        losses_reduced = sum(loss for loss in loss_dict_reduced.values())
        meters.update(total_loss=losses_reduced, **loss_dict_reduced)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        batch_time = time.time() - end
        end = time.time()
        meters.update(time=batch_time)
        if iteration % args.log_step == 0:
            eta_seconds = meters.time.global_avg * (max_iter - iteration)
            eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
            logger.info(
                meters.delimiter.join([
                    "iter: {iter:06d}",
                    "lr: {lr:.5f}",
                    '{meters}',
                    "eta: {eta}",
                    'mem: {mem}M',
                ]).format(
                    iter=iteration,
                    lr=optimizer.param_groups[0]['lr'],
                    meters=str(meters),
                    eta=eta_string,
                    mem=round(torch.cuda.max_memory_allocated() / 1024.0 /
                              1024.0),
                ))
            if summary_writer:
                global_step = iteration
                summary_writer.add_scalar('losses/total_loss',
                                          losses_reduced,
                                          global_step=global_step)
                for loss_name, loss_item in loss_dict_reduced.items():
                    summary_writer.add_scalar('losses/{}'.format(loss_name),
                                              loss_item,
                                              global_step=global_step)
                summary_writer.add_scalar('lr',
                                          optimizer.param_groups[0]['lr'],
                                          global_step=global_step)

        if iteration % args.save_step == 0:
            checkpointer.save("model_{:06d}".format(iteration), **arguments)

        if args.eval_step > 0 and iteration % args.eval_step == 0 and not iteration == max_iter:
            eval_results = do_evaluation(cfg,
                                         model,
                                         distributed=args.distributed,
                                         iteration=iteration)
            if dist_util.get_rank() == 0 and summary_writer:
                for eval_result, dataset in zip(eval_results,
                                                cfg.DATASETS.TEST):
                    write_metric(eval_result['metrics'], 'metrics/' + dataset,
                                 summary_writer, iteration)
            model.train()  # *IMPORTANT*: change to train mode after eval.

    checkpointer.save("model_final", **arguments)
    # compute training time
    total_training_time = int(time.time() - start_training_time)
    total_time_str = str(datetime.timedelta(seconds=total_training_time))
    logger.info("Total training time: {} ({:.4f} s / it)".format(
        total_time_str, total_training_time / max_iter))
    return model
Beispiel #5
0
def do_train(cfg, model, data_loader, optimizer, checkpointer, arguments,
             scheduler):
    logger = logging.getLogger("SSD.trainer")
    logger.info("Start training ...")
    meters = MetricLogger()

    model.train()

    summary_writer = torch.utils.tensorboard.SummaryWriter(
        log_dir=os.path.join(cfg.OUTPUT_DIR, 'tf_logs'))

    max_iter = len(data_loader)
    start_iter = arguments["iteration"]
    start_training_time = time.time()
    end = time.time()
    scaler = torch.cuda.amp.GradScaler()
    print(model)
    for iteration, (images, targets, _) in enumerate(data_loader, start_iter):
        iteration = iteration + 1
        arguments["iteration"] = iteration
        images = torch_utils.to_cuda(images)
        targets = torch_utils.to_cuda(targets)

        # Casts operations to mixed precision
        with torch.cuda.amp.autocast():
            loss_dict = model(images.half(), targets=targets)
            loss = sum(loss for loss in loss_dict.values())

        meters.update(total_loss=loss, **loss_dict)

        optimizer.zero_grad()
        # Scales the loss, and calls backward()
        # to create scaled gradients
        scaler.scale(loss).backward()
        # loss.backward()
        # Unscales gradients and calls
        # or skips optimizer.step()
        scaler.step(optimizer)
        # optimizer.step(iteration)

        # Updates the scale for next iteration
        scaler.update()
        if iteration > 5000:
            scheduler.step()

        batch_time = time.time() - end
        end = time.time()
        meters.update(time=batch_time)
        if iteration % cfg.LOG_STEP == 0:
            eta_seconds = meters.time.global_avg * (max_iter - iteration)
            eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
            lr = optimizer.param_groups[0]['lr']
            to_log = [
                f"iter: {iteration:06d}",
                f"lr: {lr:.5f}",
                f'{meters}',
                f"eta: {eta_string}",
            ]
            if torch.cuda.is_available():
                mem = round(torch.cuda.max_memory_allocated() / 1024.0 /
                            1024.0)
                to_log.append(f'mem: {mem}M')
            logger.info(meters.delimiter.join(to_log))
            global_step = iteration
            summary_writer.add_scalar('losses/total_loss',
                                      loss,
                                      global_step=global_step)
            for loss_name, loss_item in loss_dict.items():
                summary_writer.add_scalar('losses/{}'.format(loss_name),
                                          loss_item,
                                          global_step=global_step)
            summary_writer.add_scalar('lr',
                                      optimizer.param_groups[0]['lr'],
                                      global_step=global_step)

        if iteration % cfg.MODEL_SAVE_STEP == 0:
            checkpointer.save("model_{:06d}".format(iteration), **arguments)

        if cfg.EVAL_STEP > 0 and iteration % cfg.EVAL_STEP == 0:
            eval_results = do_evaluation(cfg, model, iteration=iteration)
            for eval_result, dataset in zip(eval_results, cfg.DATASETS.TEST):
                write_metric(eval_result['metrics'], 'metrics/' + dataset,
                             summary_writer, iteration)
            model.train()  # *IMPORTANT*: change to train mode after eval.

        if iteration >= cfg.SOLVER.MAX_ITER:
            break

    checkpointer.save("model_final", **arguments)
    # compute training time
    total_training_time = int(time.time() - start_training_time)
    total_time_str = str(datetime.timedelta(seconds=total_training_time))
    logger.info("Total training time: {} ({:.4f} s / it)".format(
        total_time_str, total_training_time / max_iter))
    return model
Beispiel #6
0
def do_train(cfg, model, data_loader, optimizer, scheduler, checkpointer,
             device, arguments, args):
    logger = logging.getLogger("SSD.trainer")
    logger.info("Start training ...")
    meters = MetricLogger()

    model.train()
    save_to_disk = dist_util.get_rank() == 0
    if args.use_tensorboard and save_to_disk:
        import tensorboardX

        summary_writer = tensorboardX.SummaryWriter(
            log_dir=os.path.join(cfg.OUTPUT_DIR, 'tf_logs'))
    else:
        summary_writer = None

    max_iter = len(data_loader)
    start_iter = arguments["iteration"]
    start_training_time = time.time()
    end = time.time()
    max_epoch = 10
    for epoch in range(max_epoch):
        logger.info('epoch: {}'.format(epoch))
        for iteration, (images, targets,
                        _) in enumerate(data_loader, start_iter):
            # print("imgs shape:  ",images.shape,iteration)
            # continue
            # iteration = iteration + 1
            arguments["iteration"] = iteration
            scheduler.step()

            images = images.to(device)
            targets = targets.to(device)
            loss_dict = model(images, targets=targets)
            loss = sum(loss for loss in loss_dict.values())

            # reduce losses over all GPUs for logging purposes
            loss_dict_reduced = reduce_loss_dict(loss_dict)
            losses_reduced = sum(loss for loss in loss_dict_reduced.values())
            meters.update(total_loss=losses_reduced, **loss_dict_reduced)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            batch_time = time.time() - end
            end = time.time()
            meters.update(time=batch_time)

            # log step
            if iteration % args.log_step == 0:
                eta_seconds = meters.time.global_avg * (max_iter - iteration)
                eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
                logger.info(
                    meters.delimiter.join([
                        "iter: {iter:06d}",
                        "lr: {lr:.5f}",
                        '{meters}',
                        "eta: {eta}",
                        'mem: {mem}M',
                    ]).format(
                        iter=iteration,
                        lr=optimizer.param_groups[0]['lr'],
                        meters=str(meters),
                        eta=eta_string,
                        mem=round(torch.cuda.max_memory_allocated() / 1024.0 /
                                  1024.0),
                    ))
                if summary_writer:
                    global_step = iteration
                    summary_writer.add_scalar('losses/total_loss',
                                              losses_reduced,
                                              global_step=global_step)
                    for loss_name, loss_item in loss_dict_reduced.items():
                        summary_writer.add_scalar(
                            'losses/{}'.format(loss_name),
                            loss_item,
                            global_step=global_step)
                    summary_writer.add_scalar('lr',
                                              optimizer.param_groups[0]['lr'],
                                              global_step=global_step)

            # save step
            if iteration % args.save_step == 0:
                checkpointer.save("model_{:06d}".format(iteration),
                                  **arguments)

            # eval step
            if args.eval_step > 0 and iteration % args.eval_step == 0 and not iteration == max_iter:
                # if True:
                eval_results = do_evaluation(cfg,
                                             model,
                                             distributed=args.distributed,
                                             iteration=iteration)
                if dist_util.get_rank() == 0 and summary_writer:
                    for eval_result, dataset in zip(eval_results,
                                                    cfg.DATASETS.TEST):
                        write_metric(eval_result['metrics'],
                                     'metrics/' + dataset, summary_writer,
                                     iteration)
                model.train()  # *IMPORTANT*: change to train mode after eval.

    checkpointer.save("model_final", **arguments)
    # compute training time
    total_training_time = int(time.time() - start_training_time)
    total_time_str = str(datetime.timedelta(seconds=total_training_time))
    logger.info("Total training time: {} ({:.4f} s / it)".format(
        total_time_str, total_training_time / max_iter))
    return model
Beispiel #7
0
def do_train_with_style(cfg, model, data_loader, style_loader, optimizer,
                        scheduler, checkpointer, device, arguments, args):
    logger = logging.getLogger("SSD.trainer")
    logger.info("Start training ...")
    meters = MetricLogger()

    model.train()
    save_to_disk = dist_util.get_rank() == 0
    if args.use_tensorboard and save_to_disk:
        try:
            from torch.utils.tensorboard import SummaryWriter
        except ImportError:
            from tensorboardX import SummaryWriter
        summary_writer = SummaryWriter(
            log_dir=os.path.join(cfg.OUTPUT_DIR, 'tf_logs'))
    else:
        summary_writer = None

    max_iter = len(data_loader)
    start_iter = arguments["iteration"]
    start_training_time = time.time()
    end = time.time()

    # prepare AdaIN models
    default_path = '/content/drive/MyDrive/DA_detection/models/'
    vgg_path = default_path + 'vgg_normalized.pth'
    if 'VGG_PATH' in os.environ:
        vgg_path = os.environ['VGG_PATH']
    decoder_path = default_path + 'decoder.pth'
    if 'DECODER_PATH' in os.environ:
        decoder_path = os.environ['DECODER_PATH']
    # DEBUG: print('AdaIN > models loaded')

    for iteration, (images, targets, ids) in enumerate(data_loader,
                                                       start_iter):
        iteration = iteration + 1
        arguments["iteration"] = iteration

        # AdaIN routine
        random.seed()
        styles = next(iter(style_loader))
        # DEBUG: print('AdaIN > begin new batch')
        if random.random() > args.p:
            apply_style_transfer(vgg_path, decoder_path, images, styles[0],
                                 args.p)

        # DEBUG: print('AdaIN > end batch')
        images = images.to(device)
        targets = targets.to(device)
        loss_dict = model(images, targets=targets)
        loss = sum(loss for loss in loss_dict.values())

        # reduce losses over all GPUs for logging purposes
        loss_dict_reduced = reduce_loss_dict(loss_dict)
        losses_reduced = sum(loss for loss in loss_dict_reduced.values())
        meters.update(total_loss=losses_reduced, **loss_dict_reduced)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        scheduler.step()

        batch_time = time.time() - end
        end = time.time()
        meters.update(time=batch_time)
        if iteration % args.log_step == 0:
            eta_seconds = meters.time.global_avg * (max_iter - iteration)
            eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
            if device == "cuda":
                logger.info(
                    meters.delimiter.join([
                        "iter: {iter:06d}",
                        "lr: {lr:.5f}",
                        '{meters}',
                        "eta: {eta}",
                        'mem: {mem}M',
                    ]).format(
                        iter=iteration,
                        lr=optimizer.param_groups[0]['lr'],
                        meters=str(meters),
                        eta=eta_string,
                        mem=round(torch.cuda.max_memory_allocated() / 1024.0 /
                                  1024.0),
                    ))
            else:
                logger.info(
                    meters.delimiter.join([
                        "iter: {iter:06d}",
                        "lr: {lr:.5f}",
                        '{meters}',
                        "eta: {eta}",
                    ]).format(
                        iter=iteration,
                        lr=optimizer.param_groups[0]['lr'],
                        meters=str(meters),
                        eta=eta_string,
                    ))
            if summary_writer:
                global_step = iteration
                summary_writer.add_scalar('losses/total_loss',
                                          losses_reduced,
                                          global_step=global_step)
                for loss_name, loss_item in loss_dict_reduced.items():
                    summary_writer.add_scalar('losses/{}'.format(loss_name),
                                              loss_item,
                                              global_step=global_step)
                summary_writer.add_scalar('lr',
                                          optimizer.param_groups[0]['lr'],
                                          global_step=global_step)

        if iteration % args.save_step == 0:
            checkpointer.save("model_{:06d}".format(iteration), **arguments)

        if args.eval_step > 0 and iteration % args.eval_step == 0 and not iteration == max_iter:
            eval_results = do_evaluation(cfg,
                                         model,
                                         distributed=args.distributed,
                                         iteration=iteration)
            if dist_util.get_rank() == 0 and summary_writer:
                for eval_result, dataset in zip(eval_results,
                                                cfg.DATASETS.TEST):
                    write_metric(eval_result['metrics'], 'metrics/' + dataset,
                                 summary_writer, iteration)
            model.train()  # *IMPORTANT*: change to train mode after eval.

    checkpointer.save("model_final", **arguments)
    # compute training time
    total_training_time = int(time.time() - start_training_time)
    total_time_str = str(datetime.timedelta(seconds=total_training_time))
    logger.info("Total training time: {} ({:.4f} s / it)".format(
        total_time_str, total_training_time / max_iter))
    return model
Beispiel #8
0
def do_train(
    cfg: CfgNode,
    model: SSDDetector,
    data_loader: DataLoader,
    optimizer: SGD,
    scheduler: MultiStepLR,
    checkpointer,
    device: device,
    arguments,
    args: Namespace,
    output_dir: Path,
    model_manager: Dict[str, Any],
) -> SSDDetector:
    logger = logging.getLogger("SSD.trainer")
    logger.info("Start training ...")
    meters = MetricLogger()

    model.train()
    save_to_disk = dist_util.get_rank() == 0
    if args.use_tensorboard and save_to_disk:
        import tensorboardX

        summary_writer = tensorboardX.SummaryWriter(logdir=output_dir / "logs")
    else:
        summary_writer = None

    max_iter = len(data_loader)
    start_iter = arguments["iteration"]
    start_training_time = time.time()
    end = time.time()

    logger.info("MAX_ITER: {}".format(max_iter))

    # GB: 2019-09-08:
    # For rescaling tests, do an eval before fine-tuning-training, so we know what
    # the eval results are before any weights are updated:
    # do_evaluation(
    #     cfg,
    #     model,
    #     distributed=args.distributed,
    #     iteration=0,
    # )
    # model.train()  # *IMPORTANT*: change to train mode after eval.

    for iteration, (images, targets, _) in enumerate(data_loader, start_iter):
        # TODO: Print learning rate:
        iteration = iteration + 1
        arguments["iteration"] = iteration
        scheduler.step()

        images = images.to(device)
        targets = targets.to(device)
        loss_dict = model(images, targets=targets)
        loss = sum(loss for loss in loss_dict.values())

        # reduce losses over all GPUs for logging purposes
        loss_dict_reduced = reduce_loss_dict(loss_dict)
        losses_reduced = sum(loss for loss in loss_dict_reduced.values())
        loss = sum(loss for loss in loss_dict.values())
        meters.update(total_loss=losses_reduced, **loss_dict_reduced)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        batch_time = time.time() - end
        end = time.time()
        meters.update(time=batch_time)
        if iteration % args.log_step == 0:
            eta_seconds = meters.time.global_avg * (max_iter - iteration)
            eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
            logger.info(
                meters.delimiter.join([
                    "iter: {iter:06d}",
                    "lr: {lr:.5f}",
                    "{meters}",
                    "eta: {eta}",
                    "mem: {mem}M",
                ]).format(
                    iter=iteration,
                    lr=optimizer.param_groups[0]["lr"],
                    meters=str(meters),
                    eta=eta_string,
                    mem=round(torch.cuda.max_memory_allocated() / 1024.0 /
                              1024.0),
                ))
            if summary_writer:
                global_step = iteration
                summary_writer.add_scalar("losses/total_loss",
                                          losses_reduced,
                                          global_step=global_step)
                for loss_name, loss_item in loss_dict_reduced.items():
                    summary_writer.add_scalar(
                        "losses/{}".format(loss_name),
                        loss_item,
                        global_step=global_step,
                    )
                summary_writer.add_scalar("lr",
                                          optimizer.param_groups[0]["lr"],
                                          global_step=global_step)

        # This project doesn't use epochs, it does something with batch samplers
        # instead, so there is only a concept of "iteration". For now hardcode epoch as
        # zero to put into file name:
        epoch = 0
        save_name = f"ssd{cfg.INPUT.IMAGE_SIZE}-vgg_{cfg.DATASETS.TRAIN[0]}_0_{epoch}_{iteration:06d}"
        model_path = Path(output_dir) / f"{save_name}.pth"

        # Above if block would be replaced by this:
        if iteration % args.save_step == 0:
            checkpointer.save(save_name, **arguments)

        # Do eval when training, to trace the mAP changes and see performance improved
        # whether or nor
        if (args.eval_step > 0 and iteration % args.eval_step == 0
                and not iteration == max_iter):
            eval_results = do_evaluation(
                cfg,
                model,
                distributed=args.distributed,
                iteration=iteration,
            )
            do_best_model_checkpointing(cfg, model_path, eval_results,
                                        model_manager, logger)
            if dist_util.get_rank() == 0 and summary_writer:
                for eval_result, dataset in zip(eval_results,
                                                cfg.DATASETS.TEST):
                    write_metric(
                        eval_result["metrics"],
                        "metrics/" + dataset,
                        summary_writer,
                        iteration,
                    )
            model.train()  # *IMPORTANT*: change to train mode after eval.

        if iteration % args.save_step == 0:
            remove_extra_checkpoints(output_dir, [model_path], logger)

    checkpointer.save("model_final", **arguments)
    # compute training time
    total_training_time = int(time.time() - start_training_time)
    total_time_str = str(datetime.timedelta(seconds=total_training_time))
    logger.info("Total training time: {} ({:.4f} s / it)".format(
        total_time_str, total_training_time / max_iter))
    return model