예제 #1
0
def add_experiment_args(parser: ArgumentParser) -> None:
    """
    Adds the arguments used by all the models.
    :param parser: the parser instance
    """
    parser.add_argument('--dataset',
                        type=str,
                        required=True,
                        choices=DATASET_NAMES,
                        help='Which dataset to perform experiments on.')
    parser.add_argument('--model',
                        type=str,
                        required=True,
                        help='Model name.',
                        choices=get_all_models())
    parser.add_argument('--device', type=str, default=None)

    parser.add_argument('--lr',
                        type=float,
                        required=True,
                        help='Learning rate.')
    parser.add_argument('--batch_size',
                        type=int,
                        required=True,
                        help='Batch size.')
    parser.add_argument('--n_epochs',
                        type=int,
                        required=True,
                        help='The number of epochs for each task.')
예제 #2
0
파일: main.py 프로젝트: yxue3357/mammoth
def main():
    lecun_fix()
    parser = ArgumentParser(description='mammoth', allow_abbrev=False)
    parser.add_argument('--model',
                        type=str,
                        required=True,
                        help='Model name.',
                        choices=get_all_models())
    parser.add_argument('--load_best_args',
                        action='store_true',
                        help='Loads the best arguments for each method, '
                        'dataset and memory buffer.')
    add_management_args(parser)
    args = parser.parse_known_args()[0]
    mod = importlib.import_module('models.' + args.model)

    if args.load_best_args:
        parser.add_argument('--dataset',
                            type=str,
                            required=True,
                            choices=DATASET_NAMES,
                            help='Which dataset to perform experiments on.')
        if hasattr(mod, 'Buffer'):
            parser.add_argument('--buffer_size',
                                type=int,
                                required=True,
                                help='The size of the memory buffer.')
        args = parser.parse_args()
        if args.model == 'joint':
            best = best_args[args.dataset]['sgd']
        else:
            best = best_args[args.dataset][args.model]
        if args.model == 'joint' and args.dataset == 'mnist-360':
            args.model = 'joint_gcl'
        if hasattr(args, 'buffer_size'):
            best = best[args.buffer_size]
        else:
            best = best[-1]
        for key, value in best.items():
            setattr(args, key, value)
    else:
        get_parser = getattr(mod, 'get_parser')
        parser = get_parser()
        args = parser.parse_args()

    if args.seed is not None:
        set_random_seed(args.seed)

    if args.model == 'mer': setattr(args, 'batch_size', 1)
    dataset = get_dataset(args)
    backbone = dataset.get_backbone()
    loss = dataset.get_loss()
    model = get_model(args, backbone, loss, dataset.get_transform())

    if isinstance(dataset, ContinualDataset):
        train(model, dataset, args)
    else:
        assert not hasattr(model, 'end_task') or model.NAME == 'joint_gcl'
        ctrain(args)
예제 #3
0
def show_options():
    from models import get_all_models
    from samplers import get_all_multi_samplers, get_all_single_samplers
    print("models")
    print((',\n').join(get_all_models()))
    print("single samplers")
    print((',\n').join(get_all_single_samplers()))
    print("multi samplers")
    print((',\n').join(get_all_multi_samplers()))
예제 #4
0
def create_tables():
    db.connect_db()
    # creates tables if not exist
    db.database.create_tables(get_all_models(), True)
예제 #5
0
def main():
    if torch.cuda.device_count() > 1:
        torch.set_num_threads(6 * torch.cuda.device_count())
    else:
        torch.set_num_threads(2)
    parser = ArgumentParser(description='mammoth', allow_abbrev=False)
    parser.add_argument('--model',
                        type=str,
                        required=True,
                        help='Model name.',
                        choices=get_all_models())
    parser.add_argument('--load_best_args',
                        action='store_true',
                        help='Loads the best arguments for each method, '
                        'dataset and memory buffer.')
    add_management_args(parser)
    args = parser.parse_known_args()[0]
    mod = importlib.import_module('models.' + args.model)

    if args.load_best_args:
        parser.add_argument('--dataset',
                            type=str,
                            required=True,
                            choices=DATASET_NAMES,
                            help='Which dataset to perform experiments on.')
        if hasattr(mod, 'Buffer'):
            parser.add_argument('--buffer_size',
                                type=int,
                                required=True,
                                help='The size of the memory buffer.')
        args = parser.parse_args()
        model = args.model
        if model == 'joint':
            model = 'sgd'
        best = best_args[args.dataset][model]
        if hasattr(args, 'buffer_size'):
            best = best[args.buffer_size]
        else:
            best = best[-1]
        for key, value in best.items():
            setattr(args, key, value)
    else:
        get_parser = getattr(mod, 'get_parser')
        parser = get_parser()
        args = parser.parse_args()

    if args.seed is not None:
        set_random_seed(args.seed)

    off_joint = False
    if args.model == 'joint' and args.dataset == 'seq-core50':
        args.dataset = 'seq-core50j'
        args.model = 'sgd'
        off_joint = True

    dataset = get_dataset(args)

    # continual learning
    backbone = dataset.get_backbone()
    loss = dataset.get_loss()
    model = get_model(args, backbone, loss, dataset.get_transform())
    if off_joint:
        print('BEGIN JOINT TRAINING')
        jtrain(model, dataset, args)
    else:
        print('BEGIN CONTINUAL TRAINING')
        train(model, dataset, args)

def get_cfg(name):
    module = sys.modules[__name__]
    name = name.lower()
    try:
        return getattr(module, '{}_cfg'.format(name))
    except Exception as e:
        raise ValueError("Model config {} not found".format(name))


def build_model(name):
    cfg = get_cfg(name)
    cfg['name'] = name
    cfg['pretrained'] = False
    return model_builder.build(cfg)


@pytest.mark.parametrize("model", get_all_models())
def test_model(model):
    model = build_model(model)
    model.eval()
    test_input = torch.rand(1, 3, 256, 128)
    with torch.no_grad():
        endpoints = model(test_input, model.endpoints)
    print(model.create_endpoints())
    print(model.dimensions)
    # foward one image compare to dimensions
    for key, dim in model.dimensions.items():
        assert endpoints[key].shape[1:] == dim, key