Example #1
0
def adjust_learning_rate(decay_rate, optimizer, epoch):
    if epoch == decay_rate:
        for g_id, param_group in enumerate(optimizer.param_groups):
            origin_lr = param_group["lr"]
            param_group["lr"] = origin_lr * 0.1
            logger.warning(
                "update optimizer group {} from lr = {} to {}".format(
                    g_id, origin_lr, param_group["lr"]))

    show_lr = optimizer.param_groups[0]["lr"]
    logger.warning("current lr={}, logger_dir={}".format(
        show_lr, logger.get_logger_dir()))
    return show_lr
Example #2
0
def step_scheduler(optimizer, current_epoch, lr_schedule, net_name):
    """
    Function to perform step learning rate decay
    Args:
        optimizer: Optimizer for which step decay has to be applied
        epoch: Current epoch number
    """
    warnings.warn(
        "please use step_scheduler in pytorchgo.utils.learning_rate",
        DeprecationWarning,
    )
    previous_lr = optimizer.param_groups[0]["lr"]
    for (e, v) in lr_schedule:
        if current_epoch == e - 1:  # epoch start from 0
            logger.warning("epoch {}: {} lr changed from: {} to {}".format(
                current_epoch, net_name, previous_lr, v))
            for param_group in optimizer.param_groups:
                param_group["lr"] = v

    return optimizer
Example #3
0
def set_gpu(gpu):
    if not isinstance(gpu, str):
        gpu = str(gpu)
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = gpu
    logger.warning("pytorchgo set gpu: {}".format(gpu))
Example #4
0
# Author: Tao Hu <*****@*****.**>

import torch
from misc_utils import pytorchgo_logger as logger
from termcolor import colored
from tabulate import tabulate
import warnings, os, sys

if sys.version_info[0] >= 3:
    logger.warning("use reduce from functools...")
    from functools import reduce


def set_gpu(gpu):
    if not isinstance(gpu, str):
        gpu = str(gpu)
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = gpu
    logger.warning("pytorchgo set gpu: {}".format(gpu))


def model_summary(model_list):
    if not isinstance(model_list, list):
        model_list = [model_list]

    from operator import mul

    for model in model_list:
        state_dict = model.state_dict().copy()
        params = filter(lambda p: p.requires_grad, model.parameters())
Example #5
0
def main():
    args = parse()
    set_gpu(args.gpu)
    args.best_score = 0
    args.best_result_dict = {}

    from dataloader_baseline import get_my_dataset

    train_loader = get_my_dataset(args)
    args.semantic_mem = train_loader.dataset.semantic_mem
    seed(args.manual_seed)
    model = get_model(args)

    if args.evaluate:
        logger.info(vars(args))
        assert args.test_load is not None
        saved_dict = torch.load(args.test_load)
        logger.warning("loading weight {}".format(args.test_load))
        model.load_state_dict(saved_dict["state_dict"], strict=True)
        args.read_cache_feat = True
        score_dict = do_eval(args=args, model=model)
        return

    logger.warning("using {}".format(args.optimizer))
    if args.optimizer == "sgd":
        optimizer = torch.optim.SGD(
            model.parameters(),
            args.lr,
            momentum=args.momentum,
            weight_decay=args.wd,
        )
    elif args.optimizer == "adam":
        optimizer = torch.optim.Adam(model.parameters(),
                                     args.lr,
                                     weight_decay=args.wd)
    else:
        assert False, "invalid optimizer"

    model_summary(model)
    optimizer_summary(optimizer)

    logger.info(vars(args))

    for epoch in range(args.epochs):
        if args.method == "baseline":
            train(train_loader, model, optimizer, epoch, args)
        elif args.method == "va":
            train_va(train_loader, model, optimizer, epoch, args)
        elif args.method == "vasa":
            train_vasa(train_loader, model, optimizer, epoch, args)
        elif args.method == "ranking":
            train_ranking(train_loader, model, optimizer, epoch, args)
        else:
            raise
        if epoch % eval_per_epoch == 0 or epoch == args.epochs - 1:
            score_dict = do_eval(args=args, model=model)

            score = score_dict["ap"]
            is_best = score > args.best_score
            if is_best:
                # args.best_result_dict = score_dict
                args.best_score = max(score, args.best_score)
                logger.warning("saving best snapshot..")
                torch.save(
                    {
                        "epoch": epoch,
                        "state_dict": model.state_dict(),
                        "score": args.best_score,
                        "optimizer": optimizer.state_dict(),
                    },
                    os.path.join(logger.get_logger_dir(), "best.pth.tar"),
                )

    weigth_path = os.path.join(logger.get_logger_dir(), "best.pth.tar")
    saved_dict = torch.load(weigth_path)
    logger.warning("loading weight {}, best validation result={}".format(
        weigth_path, saved_dict["score"]))
    model.load_state_dict(saved_dict["state_dict"], strict=True)
    args.eval_split = "testing"
    args.eval_all = True
    logger.info(vars(args))
    score_dict = do_eval(args=args, model=model)
    logger.info("training finish. snapshot weight in {}".format(
        logger.get_logger_dir()))