示例#1
0
 def __init__(self, metrics=None):
     super(MetricsHandler, self).__init__()
     self.metrics = AlphaConfig()
     self._items_ = []
     for m in metrics:
         if m not in METRICS.keys():
             logger.raise_error(
                 NotImplementedError,
                 "Handler of metric {} is not in implemented metrics set {}"
                 .format(m, _METRICS_))
         self._items_.append(m)
示例#2
0
def test_deepcopy(test_dict):
    config = AlphaConfig(test_dict)
    copied_config = copy.deepcopy(config)
    print("expect: False, got: {}".format(config is copied_config))
    copied_config.attr_1 = 10
    print(copied_config)
    print(config)
示例#3
0
def test_build(test_dict):
    # config = AlphaConfig(path2json="test.json")
    configs = AlphaConfig(test_dict, attr_3="value_3")
    print(configs.cvt2dict())
    with open("test.yaml", 'r') as fp:
        src_dict = yaml.safe_load(fp)
    configs.yaml = src_dict
    configs.attr_1 = "value_1"
    configs.attr_2 = {"attr_2_1": 1, "attr_2_2": [1, 2, 4]}
    print(configs)
示例#4
0
 def update(self, dataset, phase, epoch, metric, value):
     epoch = str(epoch)
     if dataset not in self.metrics.keys():
         self.metrics[dataset] = {}
     if phase not in self.metrics[dataset].keys():
         self.metrics[dataset][phase] = {}
     if epoch not in self.metrics[dataset][phase].keys():
         self.metrics[dataset][phase][epoch] = {}
     if metric not in self.metrics[dataset][phase][epoch].keys():
         self.metrics[dataset][phase][epoch][metric] = AlphaConfig(value=0,
                                                                   counts=0)
     self.metrics[dataset][phase][epoch][metric].counts += 1
     counts = self.metrics[dataset][phase][epoch][metric].counts
     pre_value = self.metrics[dataset][phase][epoch][metric].value
     new_value = value / counts + pre_value * (counts - 1) / counts
     self.metrics[dataset][phase][epoch][metric].value = new_value
     return new_value
示例#5
0
def test_read_only(test_dict):
    config = AlphaConfig(test_dict)
    config.cvt_state(read_only=True)
    print("is read only? {}".format(config.is_read_only()))
    try:
        config.test_attr_1
    except AttributeError:
        print("Successfully raise AttributeError #1")
    try:
        config.test_attr_2 = "test_value"
    except AttributeError:
        print("Successfully raise AttributeError #2")
    print(config)
示例#6
0
from alphaconfig import AlphaConfig

loss_fn = AlphaConfig()

# ========      MSELoss         ========
loss_fn.MSELoss.WEIGHT1 = 1.0
loss_fn.MSELoss.WEIGHT2 = 1.0
# ========      MAELoss         ========
loss_fn.MAELoss.WEIGHT1 = 1.0
loss_fn.MAELoss.WEIGHT2 = 1.0
# ========      MSESSIMLoss     ========
loss_fn.MSESSIMLoss.mse = 1.0
loss_fn.MSESSIMLoss.ssim = 0.5
示例#7
0
r"""
Info:
    Configurations for datasets.
Author:
    Yiqun Chen
"""

from alphaconfig import AlphaConfig

data = AlphaConfig()

# ========      Dataset1    ========
data.Dataset1.dir = "/home/chenyiqun/data/SOTS/outdoor"
data.Dataset1.train = True
data.Dataset1.valid = True
data.Dataset1.test = False
data.Dataset1.infer = ["valid"]
data.Dataset1.train_reso = (256, 256)
data.Dataset1.extra_reso = (128, 128)
data.Dataset1.data_ratio = 1.0

# ========      Dataset2  ========
data.Dataset2.dir = "/home/chenyiqun/data/SOTS/indoor"
data.Dataset2.train = False
data.Dataset2.valid = True
data.Dataset2.test = False
data.Dataset2.infer = ["valid"]
data.Dataset2.train_reso = (256, 256)
data.Dataset2.extra_reso = (128, 128)
data.Dataset2.data_ratio = 1.0
示例#8
0
r"""
Info:
    Configurations for optimizer
Author:
    Yiqun Chen
"""

from alphaconfig import AlphaConfig
from .args import args

optim = AlphaConfig()

# ========      Adam        ========
optim.Adam.lr = args.lr
optim.Adam.finetune = 1.0
optim.Adam.weight_decay = 0.0
# ========      AdamW       ========
optim.AdamW.lr = args.lr
optim.AdamW.finetune = 1.0
optim.AdamW.weight_decay = 0.1
示例#9
0
def test_iter(test_dict):
    config = AlphaConfig(test_dict)
    for it in config:
        print(it)
示例#10
0
def test_get_key(test_dict):
    config = AlphaConfig(test_dict)
    print(config.items())
示例#11
0
def test_hasattr(test_dict):
    print("Testing hasattr")
    config = AlphaConfig(test_dict)
    config.cvt_state(read_only=True)
    print("expect: False, got: {}".format(hasattr(config, "attr")))
示例#12
0
r"""
Info:
    Configurations for learning rate scheduler
Author:
    Yiqun Chen
"""

from alphaconfig import AlphaConfig
from .args import args

scheduler = AlphaConfig()

# ========      LinearLRScheduler       ========
scheduler.LinearLRScheduler.min_lr = 2.5E-6
scheduler.LinearLRScheduler.warmup = 10
# ========      StepLRScheduler         ========
scheduler.StepLRScheduler.min_lr = 2.5E-6
scheduler.StepLRScheduler.warmup = 10
scheduler.StepLRScheduler.update_epoch = range(int(args.max_epoch * 0.1),
                                               args.max_epoch,
                                               int(args.max_epoch * 0.1))
scheduler.StepLRScheduler.update_coeff = 0.5
示例#13
0
class MetricsHandler:
    def __init__(self, metrics=None):
        super(MetricsHandler, self).__init__()
        self.metrics = AlphaConfig()
        self._items_ = []
        for m in metrics:
            if m not in METRICS.keys():
                logger.raise_error(
                    NotImplementedError,
                    "Handler of metric {} is not in implemented metrics set {}"
                    .format(m, _METRICS_))
            self._items_.append(m)

    def register(self, metric):
        if m not in METRICS.keys():
            logger.raise_error(
                NotImplementedError,
                "Handler of metric {} is not in implemented metrics set {}".
                format(m, _METRICS_))
        self._items_.append(m)

    def update(self, dataset, phase, epoch, metric, value):
        epoch = str(epoch)
        if dataset not in self.metrics.keys():
            self.metrics[dataset] = {}
        if phase not in self.metrics[dataset].keys():
            self.metrics[dataset][phase] = {}
        if epoch not in self.metrics[dataset][phase].keys():
            self.metrics[dataset][phase][epoch] = {}
        if metric not in self.metrics[dataset][phase][epoch].keys():
            self.metrics[dataset][phase][epoch][metric] = AlphaConfig(value=0,
                                                                      counts=0)
        self.metrics[dataset][phase][epoch][metric].counts += 1
        counts = self.metrics[dataset][phase][epoch][metric].counts
        pre_value = self.metrics[dataset][phase][epoch][metric].value
        new_value = value / counts + pre_value * (counts - 1) / counts
        self.metrics[dataset][phase][epoch][metric].value = new_value
        return new_value

    def calc_metrics(self, dataset, phase, epoch, out, trg, data_range, *args,
                     **kwargs):
        kwargs["device"] = out.device
        for metric in self._items_:
            value = METRICS[metric](out,
                                    trg,
                                    data_range=data_range,
                                    *args,
                                    **kwargs)
            self.update(dataset, phase, epoch, metric, value)
        return self.metrics[dataset][phase][epoch]

    def summarize(self, dataset, phase, epoch, logger=None, *args, **kwargs):
        fmt = "{:<20}" * (len(self._items_) + 1)
        values = [dataset]
        for idx in range(len(self._items_)):
            values.append(
                str(
                    round(
                        self.metrics[dataset][phase][epoch][
                            self._items_[idx]].value.item(), 5)))
        info = fmt.format("dataset", *self._items_)
        data = fmt.format(*values)
        msg = "\n========Metrics========\n" + info + "\n" + data
        utils.notify(msg, logger=logger)
示例#14
0
Author:
    Yiqun Chen
Docs:
    Configurations, should not call other custom modules.
"""

import os, sys, copy
from alphaconfig import AlphaConfig

from .args import args
from .optim import optim
from .dataset import data
from .loss_fn import loss_fn
from .scheduler import scheduler

configs = AlphaConfig()
cfg = configs

# ================================
# GENERAL
# ================================
cfg.gnrl.root = os.path.join(os.getcwd(), ".")
cfg.gnrl.id = "{}".format(args.id)
cfg.gnrl.batch = args.batch_size
cfg.gnrl.resume = True if args.resume == "true" else False
cfg.gnrl.cuda = eval(args.cuda)
cfg.gnrl.ckphs = range(int(args.max_epoch * 0.6), args.max_epoch,
                       int(args.max_epoch * 0.1))
cfg.gnrl.infer = "0"

# ================================