Example #1
0
def check(config):

    assert isinstance(config, dict)

    # ==========================================================================

    assert "model_name" in config

    if "model_type" in config:
        assert config["model_type"] in ["prior", "conditional"]

    assert config["model_name"] in list(MODELS.keys())

    assert "train_path" in config

    if isinstance(config["train_path"], list):
        assert all(os.path.exists(path) for path in config["train_path"])
    else:
        assert os.path.exists(config["train_path"])

    if "eval_path" in config:
        assert os.path.exists(config["eval_path"])

    assert "epochs" in config
    assert "batch_size" in config

    assert "optimizer" in config
    assert "opt_params" in config
    assert "learning_rate" in config["opt_params"]

    # ==========================================================================

    MODELS[config["model_name"]].check(config)

    return config
Example #2
0
def parse_args():
    parser = argparse.ArgumentParser(description='Model training')

    # params of model
    parser.add_argument(
        '--model_name',
        dest='model_name',
        help='Model type for testing, which is one of {}'.format(str(list(MODELS.keys()))),
        type=str,
        default='FarNet')

    # params of infer
    parser.add_argument(
        '--dataset',
        dest='dataset',
        help="The dataset you want to test, which is one of {}".format(str(list(DATASETS.keys()))),
        type=str,
        default='ImageFolder')
    parser.add_argument(
        '--infer_root',
        dest='infer_root',
        help="dataset root directory",
        type=str,
        default=None)
    parser.add_argument(
        '--num_workers',
        dest='num_workers',
        help="number works of data loader",
        type=int,
        default=0)

    # params of prediction
    parser.add_argument(
        '--batch_size',
        dest='batch_size',
        help='Mini batch size',
        type=int,
        default=32)
    parser.add_argument(
        '--model_file',
        dest='model_file',
        help='The path of model for evaluation',
        type=str,
        required=True)
    parser.add_argument(
        '--save_dir',
        dest='save_dir',
        help='The directory for saving the inference results',
        type=str,
        default='./outputs/result')
    parser.add_argument(
        '--device',
        dest='device',
        help='device for training',
        type=str,
        default="cuda")

    return parser.parse_args()
Example #3
0
def parse_args():
    parser = argparse.ArgumentParser(description='Model evaluation')

    # params of model
    parser.add_argument(
        '--model_name',
        dest='model_name',
        help='Model type for evaluation, which is one of {}'.format(
            str(list(MODELS.keys()))),
        type=str,
        default='FarNet')

    # params of dataset
    parser.add_argument(
        '--dataset',
        dest='dataset',
        help="The dataset you want to evaluation, which is one of {}".format(
            str(list(DATASETS.keys()))),
        type=str,
        default='Map')

    parser.add_argument('--dataset_root',
                        dest='dataset_root',
                        help="dataset root directory",
                        type=str,
                        default=None)

    # params of evaluate
    parser.add_argument("--input_size",
                        dest="input_size",
                        help="The image size for net inputs.",
                        nargs=2,
                        default=[256, 256],
                        type=int)

    parser.add_argument('--model_dir',
                        dest='model_dir',
                        help='The path of model for evaluation',
                        type=str,
                        default=None)

    return parser.parse_args()
Example #4
0
    return model.keras


if __name__ == '__main__':

    parser = argparse.ArgumentParser(
        description="Train a fiber tracking model")

    parser.add_argument("config_path",
                        type=str,
                        nargs="?",
                        help="Path to model config.")

    parser.add_argument("--model_name",
                        type=str,
                        choices=list(MODELS.keys()),
                        help="Name of model to be trained.")

    parser.add_argument("--model_type",
                        type=str,
                        choices=["prior", "conditional"],
                        help="Specify if model has type conditional or prior.")

    parser.add_argument("--train_path",
                        type=str,
                        help="Path to training samples.")

    parser.add_argument("--eval",
                        type=str,
                        dest="eval_path",
                        help="Path to evaluation samples.")
Example #5
0
                    type=int,
                    default=2000,
                    help='The step number when beta reaches 1.0')
parser.add_argument(
    '--anneal_cap',
    type=float,
    default=0.2,
    help='Upper limit of increasing beta. Set this as the best beta found')

################
# Model
################
parser.add_argument('--model_code',
                    type=str,
                    default='vae',
                    choices=MODELS.keys())
parser.add_argument('--model_init_seed', type=int, default=None)
# DAE #
parser.add_argument('--dae_num_items',
                    type=int,
                    default=None,
                    help='Number of total items')
parser.add_argument('--dae_num_hidden',
                    type=int,
                    default=0,
                    help='Number of hidden layers in DAE')
parser.add_argument('--dae_hidden_dim',
                    type=int,
                    default=600,
                    help='Dimension of hidden layer in DAE')
parser.add_argument('--dae_latent_dim',
Example #6
0
import pytorch_retinanet.model_pnasnet
import pytorch_retinanet.model_resnet
import pytorch_retinanet.model_se_resnext
import pytorch_retinanet.model_xception
import torch
from config import DATA_DIR, IMG_SIZE, RESULTS_DIR, TEST_DIR, WEIGHTS_DIR
from datasets.test_dataset import TestDataset
from models import MODELS
from torch import nn, optim
from torch.optim import lr_scheduler
from torch.utils.data import DataLoader
from torchvision import datasets, models, transforms
from utils.logger import Logger
from utils.my_utils import set_seed

model_configs = MODELS.keys()


def load_model(checkpoint: str) -> nn.Module:
    """
    Helper to load model weihts
    """
    print(f"Loading model from: {checkpoint}")
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    # load model
    model = torch.load(checkpoint)
    model = model.to(device)
    model.eval()
    # model = torch.nn.DataParallel(model).cuda()
    return model
Example #7
0
def parse_args():
    parser = argparse.ArgumentParser(description='Model training')

    # params of model
    parser.add_argument(
        '--model_name',
        dest='model_name',
        help='Model type for training, which is one of {}'.format(
            str(list(MODELS.keys()))),
        type=str,
        default='FarNet')

    # params of dataset
    parser.add_argument(
        '--dataset',
        dest='dataset',
        help="The dataset you want to train, which is one of {}".format(
            str(list(DATASETS.keys()))),
        type=str,
        default='ImagePairs')
    parser.add_argument('--train_root',
                        dest='train_root',
                        help="train dataset root directory",
                        type=str,
                        required=True)
    parser.add_argument('--val_root',
                        dest='val_root',
                        help="val dataset root directory",
                        type=str,
                        default=None)
    parser.add_argument('--num_workers',
                        dest='num_workers',
                        help="number works of data loader",
                        type=int,
                        default=0)
    parser.add_argument('--device',
                        dest='device',
                        help='device for training',
                        type=str,
                        default="cuda")

    # params of training
    parser.add_argument('--epochs',
                        dest='epochs',
                        help='epochs for training',
                        type=int,
                        default=20)
    parser.add_argument('--batch_size',
                        dest='batch_size',
                        help='Mini batch size of one gpu or cpu',
                        type=int,
                        default=32)
    parser.add_argument('--lr',
                        dest='lr',
                        help='Learning rate',
                        type=float,
                        default=0.0005)
    parser.add_argument('--resume',
                        dest='resume',
                        help='The path of resume model',
                        type=str,
                        default=None)
    parser.add_argument('--save_dir',
                        dest='save_dir',
                        help='The directory for saving the model snapshot',
                        type=str,
                        default='./outputs')
    parser.add_argument('--logs_dir',
                        dest='logs_dir',
                        help='The directory for saving the log message',
                        type=str,
                        default='./logs')

    return parser.parse_args()
Example #8
0
                    help='Which LR scheduler to use', choices=['warmup_cos', 'warmup_linear', 'cos', 'step'])
parser.add_argument('--num_warmup_steps', type=int, default=10, help='Number of warmup epochs for a warmup sheduler')
parser.add_argument('--decay_step', type=int, default=15, help='Decay step for StepLR')
parser.add_argument('--gamma', type=float, default=0.1, help='Gamma for StepLR')
# epochs #
parser.add_argument('--num_epochs', type=int, default=100, help='Number of epochs for training')
# logger #
parser.add_argument('--log_period_as_iter', type=int, default=12800)
# evaluation #
parser.add_argument('--metric_ks', nargs='+', type=int, default=[10, 20, 50], help='ks for Metric@k')
parser.add_argument('--best_metric', type=str, default='NDCG@10', help='Metric for determining the best model')

################
# Model
################
parser.add_argument('--model_code', type=str, default='bert', choices=MODELS.keys())
parser.add_argument('--model_init_seed', type=int, default=None)
# BERT #
parser.add_argument('--bert_max_len', type=int, default=None, help='Length of sequence for bert')
parser.add_argument('--bert_num_items', type=int, default=None, help='Number of total items')
parser.add_argument('--bert_hidden_units', type=int, default=None, help='Size of hidden vectors (d_model)')
parser.add_argument('--bert_user_hidden_units', type=int, default=None, help='Size of user embeddings')
parser.add_argument('--bert_num_blocks', type=int, default=None, help='Number of transformer layers')
parser.add_argument('--bert_num_heads', type=int, default=None, help='Number of heads for multi-attention')
parser.add_argument('--bert_dropout', type=float, default=None, help='Dropout probability to use throughout the model')
parser.add_argument('--bert_mask_prob', type=float, default=None, help='Probability for masking items in the training sequence')
parser.add_argument('--bert_force_mask_last', type=bool, default=False,
                    help='Whether to force only more recent N items to be used')
parser.add_argument('--bert_p_only_mask_last', type=float, default=0.15,
                    help='Probability of only masking last training item in a sequence')
parser.add_argument('--bert_p_window', type=float, default=0.5,
Example #9
0
    if _input_dir.joinpath('images').is_dir():
        _input_dir = _input_dir.joinpath('images')

    _pattern = _args.filePattern

    _group_by = _args.groupBy
    if 'c' not in _group_by:
        _error_messages.append(f'Grouping Variables must contain \'c\'. Got {_group_by} instead.')

    _selector_name = _args.selectionCriterion
    if _selector_name not in SELECTORS.keys():
        _error_messages.append(f'--tileSelectionCriterion {_selector_name} not found. '
                               f'Must be one of {list(SELECTORS.keys())}.')

    _model_name = _args.model
    if _model_name not in MODELS.keys():
        _error_messages.append(f'--model {_model_name} not found. Must be one of {list(MODELS.keys())}.')

    _channel_overlap = _args.channelOverlap

    _kernel_size = _args.kernelSize
    if _kernel_size not in ('1x1', '3x3', '5x5'):
        _error_messages.append(f'--kernelSize must be one of \'1x1\', \'3x3\', \'5x5\'. '
                               f'Got {_kernel_size} instead.')
    _kernel_size = int(_kernel_size.split('x')[0])

    _channel_ordering: list[int] = list() if _args.channelOrdering == "" else list(map(int, str(_args.channelOrdering).split(',')))
    if any(_c < 0 for _c in _channel_ordering):
        _error_messages.append(f'--channelOrdering must have non-negative integers separated by commas in a string.')

    _output_dir = Path(_args.outDir).resolve()
Example #10
0
from sklearn.model_selection import ShuffleSplit
from higgs_geant import normalize_weight
from higgs_geant import split_train_test
from higgs_geant import split_data_label_weights

from higgs_4v_pandas import tau_energy_scale
from higgs_4v_pandas import jet_energy_scale
from higgs_4v_pandas import lep_energy_scale
from higgs_4v_pandas import soft_term
from higgs_4v_pandas import nasty_background

from nll import HiggsNLL
from models import higgsml_models
from models import MODELS
ARG_MODELS = MODELS.keys()


def parse_args():
    # TODO : more descriptive msg.
    parser = argparse.ArgumentParser(description="Training launcher")

    parser.add_argument("--verbosity",
                        "-v",
                        type=int,
                        choices=[0, 1, 2],
                        default=0,
                        help="increase output verbosity")

    # DATASET CHOICE
    # parser.add_argument('--data', help='chosen dataset',