Пример #1
0
def get_kernel_exp_list(dataset,
                        loss,
                        ssn_const_lm,
                        ssn_grow_lm,
                        slbfgs_lr,
                        slbfgs_lm,
                        grow_only=False):
    exp_list = hu.cartesian_exp_group({
        "dataset":
        dataset,
        "model":
        model,
        "loss_func":
        loss,
        "acc_func": ["logistic_accuracy"],
        "opt": [{
            "name": "ssn",
            "lm": ssn_grow_lm
        }],
        "batch_size": [100],
        "max_epoch":
        max_epoch,
        "runs":
        run_list,
        "batch_grow_factor":
        batch_grow_factor,
        "batch_size_max":
        batch_size_max,
    })

    if not grow_only:
        exp_list += hu.cartesian_exp_group({
            "dataset":
            dataset,
            "model":
            model,
            "loss_func":
            loss,
            "acc_func": ["logistic_accuracy"],
            "opt":
            opt_list + [{
                "name": "ssn",
                "lm": ssn_const_lm
            }, {
                "name": "slbfgs",
                "lm": slbfgs_lm,
                "history_size": 10,
                "lr": slbfgs_lr
            }],
            "batch_size": [100],
            "max_epoch":
            max_epoch,
            "runs":
            run_list
        })

    return exp_list
Пример #2
0
def test_filter_exp_list():
    exp_list = hu.cartesian_exp_group({
        "dataset": ["imagenet", "mnist", "cifar10"],
        "model":
        "mlp",
        "batch_size": [1, 5]
    })

    exp_list1 = hu.filter_exp_list(exp_list,
                                   filterby_list=[{
                                       "dataset": "mnist"
                                   }])

    exp_list2 = hu.filter_exp_list(exp_list,
                                   filterby_list=[[{
                                       "dataset": "mnist"
                                   }]])

    exp_list = hu.filter_exp_list(exp_list,
                                  filterby_list=[{
                                      "dataset": "mnist"
                                  }, {
                                      "dataset": "cifar10"
                                  }])
    visited = []
    for exp_dict in exp_list:
        assert exp_dict["dataset"] in ["mnist", "cifar10"]
        visited += [exp_dict["dataset"]]

    assert "mnist" in visited
    assert "cifar10" in visited
Пример #3
0
    def test_cartesian_product(self):
        # test whether the cartesian product covers all needed variations
        exp_dict_1 = {'dataset': 'mnist', 'model': 'mlp', 'batch_size': 1}
        exp_dict_2 = {'dataset': 'mnist', 'model': 'mlp', 'batch_size': 5}
        exp_dict_3 = {'dataset': 'cifar10', 'model': 'mlp', 'batch_size': 1}
        exp_dict_4 = {'dataset': 'cifar10', 'model': 'mlp', 'batch_size': 5}

        exp_list = [exp_dict_1, exp_dict_2, exp_dict_3, exp_dict_4]
        exp_list_cartesian = hu.cartesian_exp_group({
            'dataset': ['mnist', 'cifar10'],
            'model':
            'mlp',
            'batch_size': [1, 5]
        })

        exp_list_hash = [hu.hash_dict(e) for e in exp_list]
        exp_list_cartesian_hash = [hu.hash_dict(e) for e in exp_list_cartesian]

        # check if the # experiments is correct
        assert (len(exp_list_cartesian_hash) == len(exp_list_hash))

        # check that the hashes in the cartesian are all there
        for h in exp_list_hash:
            assert (h in exp_list_cartesian_hash)

        # check that every hash is unique
        assert (len(exp_list_cartesian_hash) == len(
            np.unique(exp_list_cartesian_hash)))
Пример #4
0
def test_cartesian_product():
    # test whether the cartesian product covers all needed variations
    exp_dict_1 = {"dataset": "mnist", "model": "mlp", "batch_size": 1}
    exp_dict_2 = {"dataset": "mnist", "model": "mlp", "batch_size": 5}
    exp_dict_3 = {"dataset": "cifar10", "model": "mlp", "batch_size": 1}
    exp_dict_4 = {"dataset": "cifar10", "model": "mlp", "batch_size": 5}

    exp_list = [exp_dict_1, exp_dict_2, exp_dict_3, exp_dict_4]
    exp_list_cartesian = hu.cartesian_exp_group({
        "dataset": ["mnist", "cifar10"],
        "model": "mlp",
        "batch_size": [1, 5]
    })

    exp_list_hash = [hu.hash_dict(e) for e in exp_list]
    exp_list_cartesian_hash = [hu.hash_dict(e) for e in exp_list_cartesian]

    # check if the # experiments is correct
    assert len(exp_list_cartesian_hash) == len(exp_list_hash)

    # check that the hashes in the cartesian are all there
    for h in exp_list_hash:
        assert h in exp_list_cartesian_hash

    # check that every hash is unique
    assert len(exp_list_cartesian_hash) == len(
        np.unique(exp_list_cartesian_hash))
Пример #5
0
    def test_group_exp_list(self):
        exp_list = hu.cartesian_exp_group({
            'dataset': ['imagenet', 'mnist', 'cifar10'],
            'model':
            'mlp',
            'batch_size': [1, 5]
        })

        list_of_exp_list = hr.group_exp_list(exp_list, groupby_list='dataset')
        for exp_list in list_of_exp_list:
            assert (len(set([exp_dict['dataset']
                             for exp_dict in exp_list])) == 1)
Пример #6
0
def random_search(hp_lists, n_trials, n_runs=1):
    for i in range(len(hp_lists)):
        if i == 0:
            out = np.random.choice(hu.cartesian_exp_group(hp_lists[i]),
                                   n_trials,
                                   replace=True).tolist()
        if i == len(hp_lists) - 1:
            out = hu.ignore_duplicates(out)
            print('remove {} duplicates'.format(n_trials - len(out)))
            break
        to_add = np.random.choice(hu.cartesian_exp_group(hp_lists[i + 1]),
                                  n_trials,
                                  replace=True).tolist()
        out = [dict(out[i], **to_add[i]) for i in range(n_trials)]
    ## running multiple
    if n_runs == 1:
        return out
    else:
        out_n_runs = []
        for i in range(n_runs):
            out_n_runs += [
                dict(out[j], **{'seed': i}) for j in range(len(out))
            ]
        return out_n_runs
Пример #7
0
def test_group_exp_list():
    exp_list = hu.cartesian_exp_group({
        "dataset": ["imagenet", "mnist", "cifar10"],
        "model":
        "mlp",
        "batch_size": [1, 5],
        "mode": {
            "fset": 1
        }
    })

    list_of_exp_list = hu.group_exp_list(exp_list,
                                         groupby_list=["dataset", "mode.fset"])

    list_of_exp_list = hu.group_exp_list(exp_list, groupby_list="dataset")
    for exp_list in list_of_exp_list:
        assert len(set([exp_dict["dataset"] for exp_dict in exp_list])) == 1
Пример #8
0
    def test_filter_exp_list(self):
        exp_list = hu.cartesian_exp_group({
            'dataset': ['imagenet', 'mnist', 'cifar10'],
            'model':
            'mlp',
            'batch_size': [1, 5]
        })

        exp_list = hr.filter_exp_list(exp_list,
                                      filterby_list=[{
                                          'dataset': 'mnist'
                                      }, {
                                          'dataset': 'cifar10'
                                      }])
        visited = []
        for exp_dict in exp_list:
            assert (exp_dict['dataset'] in ['mnist', 'cifar10'])
            visited += [exp_dict['dataset']]

        assert ('mnist' in visited)
        assert ('cifar10' in visited)
Пример #9
0
EXP_GROUPS['mnist_full'] = hu.cartesian_exp_group({
    'batch_size':
    32,
    'batch_size_val':
    1024,
    'dataset':
    'mnist_full',
    'max_epoch':
    50,
    'max_cycle':
    100,
    'opt': {
        'name': 'sgd',
        'lr': 1e-3
    },
    'model': {
        'name': 'clf',
        'base': 'lenet'
    },
    'active_learning': [
        {
            'ndata_to_label': 32,
            'name': 'random',
            'ndata_to_label_init': 32
        },
        {
            'ndata_to_label': 32,
            'batch_size_pool': 128,
            'n_mcmc': 50,
            'name': 'bald',
            'ndata_to_label_init': 32
        },
        {
            'ndata_to_label': 32,
            'batch_size_pool': 128,
            'n_mcmc': 50,
            'name': 'entropy',
            'ndata_to_label_init': 32
        },
    ]
})
Пример #10
0
from haven import haven_utils as hu

EXP_GROUPS = {
    'mnist': hu.cartesian_exp_group({
        'lr': [1e-3, 1e-4],
        'batch_size': [32, 64]
    })
}
Пример #11
0
EXP_GROUPS["infnet"] = hu.cartesian_exp_group({
    'batch_size':
    8,
    'num_channels':
    1,
    'dataset': [
        {
            'name': 'covid19_v2_mixed',
            'n_classes': 2
        },
        {
            'name': 'covid19_v2',
            'n_classes': 2
        },
        # {'name':'covid19', 'n_classes':2},
    ],
    'dataset_size': [
        {
            'train': 10,
            'val': 'all'
        },
        #  {'train':10, 'val':'all'},
        {
            'train': 15,
            'val': 'all'
        },
        {
            'train': 20,
            'val': 'all'
        },
        {
            'train': 25,
            'val': 'all'
        },
        {
            'train': 30,
            'val': 'all'
        },
        {
            'train': 35,
            'val': 'all'
        },
        {
            'train': 'all',
            'val': 'all'
        },
    ],
    'max_epoch': [100],
    'optimizer': ["adam"],
    'lr': [
        1e-4,
    ],
    'model': [
        #  {'name':'semseg', 'loss':'joint_cross_entropy',
        #                  'base':'fcn8_vgg16',"clip_grad":True,
        #                   'n_channels':3,'n_classes':1},

        # {'name':'semseg', 'loss':'joint_cross_entropy',
        #                  'base':'unet_resnet',
        #                   'n_channels':3,'n_classes':1},
        # {'name':'semseg', 'loss':'joint_cross_entropy',
        #                  'base':'unet_resnet',"clip_grad":True,
        #                   'n_channels':3,'n_classes':1},
        {
            'name': 'semseg',
            'loss': 'joint_cross_entropy',
            'base': 'fcn8_vgg16',
            'n_channels': 3,
            'n_classes': 1
        },
        {
            'name': 'semseg',
            'loss': 'joint_only',
            'base': 'infnet',
            'n_channels': 3
        },
    ]
})
Пример #12
0
        score_df = pd.DataFrame(score_list)
        print(score_df.tail())
        hu.torch_save(model_path, model.get_state_dict())
        hu.save_pkl(score_list_path, score_list)
        print('Checkpoint Saved: %s' % savedir)

    print('experiment completed')


# Define exp groups for parameter search
EXP_GROUPS = {
    'mnist':
    hu.cartesian_exp_group({
        'dataset': 'mnist',
        'model': 'mlp',
        'max_epoch': 20,
        'lr': [1e-3, 1e-4],
        'batch_size': [32, 64]
    })
}


# Dataset
# -------
def get_loader(dataset_name, datadir, split, batch_size):
    if dataset_name == 'mnist':
        transform = torchvision.transforms.Compose([
            torchvision.transforms.ToTensor(),
            torchvision.transforms.Normalize((0.5, ), (0.5, ))
        ])
Пример #13
0
        'train': 'all',
        'val': 'all'
    },
    'optimizer': ['adam'],
    'lr': [1e-5]
}

EXP_GROUPS['trancos_debug'] = {
    "dataset": {
        'name': 'trancos',
        'transform': 'rgb_normalize'
    },
    "model": {
        'name': 'lcfcn',
        'base': "fcn8_vgg16"
    },
    "batch_size": [1, 5, 10],
    "max_epoch": [100],
    'dataset_size': [
        {
            'train': 1,
            'val': 1
        },
        # {'train':'all', 'val':'all'},
    ],
    'optimizer': ['adam'],
    'lr': [1e-5]
}

EXP_GROUPS = {k: hu.cartesian_exp_group(v) for k, v in EXP_GROUPS.items()}
Пример #14
0
                suffix = '_c3'

            if n_classes == 3 and (dataset == 'covid19_v1'
                                   or dataset == 'covid19_v3_mixed'):
                continue
            EXP_GROUPS["%sweakly_%s%s" %
                       (prefix, dataset, suffix)] = hu.cartesian_exp_group({
                           'batch_size': [8],
                           'num_channels':
                           1,
                           'dataset': [
                               {
                                   'name': dataset,
                                   'n_classes': n_classes
                               },
                           ],
                           'dataset_size':
                           dataset_size,
                           'max_epoch': [101],
                           'optimizer': ["adam"],
                           'lr': [
                               1e-4,
                           ],
                           'model':
                           model_list
                       })

            EXP_GROUPS["%sweakly_%s%s_count" %
                       (prefix, dataset, suffix)] = hu.cartesian_exp_group({
                           'batch_size': [8],
                           'num_channels':
                           1,
Пример #15
0
 hu.cartesian_exp_group({
     'name':
     'balanced-font-chars_n=1000000_2020-Oct-19.h5py',
     # 'name':'default_n=100000_2020-Oct-19.h5py', NOTE: for debugging
     'backbone':
     'fewshot_synbols',
     "width":
     32,
     "height":
     32,
     "channels":
     3,
     'task': [{
         'train': 'char',
         'val': 'char',
         'test': 'char',
         'ood': 'font'
     }, {
         'train': 'font',
         'val': 'font',
         'test': 'font',
         'ood': 'char'
     }],
     'mask': ['stratified_char', 'stratified_font'],
     'trim_size': [None],
     "z_dim_multiplier":
     2 * 2,
     # NOTE: start 5-way 5-shot 15-query
     'nclasses_train':
     5,
     'nclasses_val':
     5,
     'nclasses_test':
     5,
     'support_size_train':
     5,
     'support_size_val':
     5,
     'support_size_test':
     5,
     'query_size_train':
     15,
     'query_size_val':
     15,
     'query_size_test':
     15,
     # NOTE: end 5-way 5-shot 5-query
     # NOTE: for debugging, only 5 iters:
     # 'train_iters': 5,
     # 'val_iters': 5,
     # 'test_iters': 5,
     # 'ood_iters': 5,
     'train_iters':
     500,
     'val_iters':
     500,
     'test_iters':
     500,
     'ood_iters':
     500,
 })
Пример #16
0
c_list = [0.2, 0.5]
sps_list = []

for c, adapt_flag in itertools.product(c_list, ['smooth_iter']):
    sps_list += [{'name': "sps", "c": c, 'adapt_flag': adapt_flag}]

opt_list = sps_list + [{'name': 'adam'}]

EXP_GROUPS = {}

# define interpolation exp groups
EXP_GROUPS['kernel'] = hu.cartesian_exp_group({
    "dataset": kernel_datasets,
    "model": ["linear"],
    "loss_func": ['logistic_loss'],
    "acc_func": ["logistic_accuracy"],
    "opt": opt_list,
    "batch_size": [100],
    "max_epoch": [35],
    "runs": run_list
})

EXP_GROUPS['mf'] = hu.cartesian_exp_group({
    "dataset": ["matrix_fac"],
    "model": ["matrix_fac_1", "matrix_fac_4", "matrix_fac_10", "linear_fac"],
    "loss_func": ["squared_loss"],
    "opt":
    opt_list,
    "acc_func": ["mse"],
    "batch_size": [100],
    "max_epoch": [50],
    "runs":
Пример #17
0
EXP_GROUPS["synbols_count"] = hu.cartesian_exp_group({
    'batch_size':
    1,
    'num_channels':
    1,
    'dataset': [
        {
            'name': 'synbols',
            'mode': 'crowded',
            'n_classes': 2,
            'transform': 'basic',
            'transform_mode': None
        },
        {
            'name': 'synbols',
            'mode': 'fixed_scale',
            'n_classes': 2,
            'transform': 'basic',
            'transform_mode': None
        },
        {
            'name': 'synbols',
            'mode': 'no_overlap',
            'n_classes': 2,
            'transform': 'basic',
            'transform_mode': None
        },
        {
            'name': 'synbols',
            'mode': 'overlap',
            'n_classes': 2,
            'transform': 'basic',
            'transform_mode': None
        },
    ],
    'dataset_size': [
        #  {'train':10, 'val':1, 'test':1},
        {
            'train': 'all',
            'val': 'all'
        }
    ],
    'runs': [0],
    'max_epoch': [100],
    'optimizer': ["adam"],
    'lr': [
        1e-3,
    ],
    'model':
    count_list,
})
Пример #18
0
import itertools, copy
EXP_GROUPS = {}

EXP_GROUPS['pascal_point_level'] = hu.cartesian_exp_group({
    'batch_size':
    1,
    'num_channels':
    1,
    'dataset': [{
        'name': 'pascal'
    }],
    'dataset_size': {
        'train': 'all',
        'val': 'all'
    },
    # 'dataset_size':{'train':10, 'val':10},
    'max_epoch': [20],
    'optimizer': ["adam"],
    'lr': [
        1e-5,
    ],
    'model': {
        'name': 'semseg',
        'loss': 'point_level',
        'base': 'fcn8_vgg16',
        'n_channels': 3,
        'n_classes': 21
    }
})

EXP_GROUPS['pascal_cross_entropy'] = hu.cartesian_exp_group({
    'batch_size':
Пример #19
0
    },
    {
        "name": "plain_radam"
    },
]
# -------------- ## -------------- ## -------------- ## -------------- #
# Setting up benchmarks
# ------------------ #

# ------------------ #
# II. Convex with interpolation
benchmarks_list = ['syn', 'kernels']
# all optimizers for small exps
opt_list = adaptive_first_list + baselines_list + constant_list + sgd_list
for benchmark in benchmarks_list:
    EXP_GROUPS['nomom_%s' % benchmark] = hu.cartesian_exp_group(
        get_benchmark(benchmark, opt_list))

# ------------------ #
# III. Easy nonconvex
benchmarks_list = ['mnist', 'mf']
opt_list = adaptive_first_sls_armijo_list + amsgrad_constant_list + adam_constant_list + baselines_list + sgd_list + adaptive_first_sps_list + adaptive_first_sls_lipschitz_list
for benchmark in benchmarks_list:
    EXP_GROUPS['nomom_%s' % benchmark] = hu.cartesian_exp_group(
        get_benchmark(benchmark, opt_list))

# ------------------ #
# IV. Larg-scale nonconvex
benchmarks_list = ['cifar10_nobn', 'cifar100_nobn', 'cifar10', 'cifar100']
opt_list = adaptive_first_sls_armijo_list + baselines_list + adam_constant_list + amsgrad_constant_list + sgd_list + adaptive_first_sps_list + adaptive_first_sls_lipschitz_list
for benchmark in benchmarks_list:
    EXP_GROUPS['nomom_%s' % benchmark] = hu.cartesian_exp_group(
Пример #20
0
EXP_GROUPS['cifar'] = hu.cartesian_exp_group({
    "dataset": [{
        'name': 'cifar10',
        'transform_lvl': 1.5,
        'colorjitter': False,
        'val_transform': 'identity'
    }],
    "dataset_size": [{
        'train': None,
        'test': None
    }],
    "valratio": [0.2],
    'model': [{
        'name': 'blvl',
        'netC': {
            "name": "resnet18_meta_2",
            "opt": {
                'name': 'sgd',
                'momentum': 0.9,
                'sched': True,
                'lr': 0.1,
                "weight_decay": 5e-4
            }
        },
        'netA': netA
    } for netA in [{
        "name": 'small_affine',
        "opt": {
            'name': 'sgd',
            'lr': 0.2,
            'sched': False,
            'momentum': 0.9,
            "weight_decay": 0.01
        },
        "transform": "affine",
        "factor": 1
    }, {
        "name": 'affine_color',
        "opt": {
            'name': 'sgd',
            'lr': 0.2,
            'sched': False,
            'momentum': 0.9,
            "weight_decay": 0.01
        },
        "transform": "affine",
        "factor": 1
    }, None]],
    "n_inner_iter": [1],
    "batch": {
        "size": 128,
        "factor": 1
    },
    "niter": [201],
    "fixedSeed": [6442],
    "predParams": [None],
    "mixTrainVal": [True],
    "testTimeDA": [0],
})
Пример #21
0
    'learning_epoch': 10,
    'heuristic': ['bald', 'random', 'entropy'],
    'iterations': [20],
    'max_epoch': 200,
    'imagenet_pretraining': [True],
}

EXP_GROUPS = {
    'active_char_missing_calibrated':
    hu.cartesian_exp_group(
        dict({
            **model_cfg,
            **dict(model='calibrated_active_learning')
        },
             calibrate=[True, False],
             dataset={
                 'path': 'missing-symbol_n=100000_2020-Oct-19.h5py',
                 'name': 'active_learning',
                 'task': 'char',
                 'initial_pool': 2000,
                 'seed': 1337
             })),
    'active_char_label_noise_calibrated':
    hu.cartesian_exp_group(
        dict({
            **model_cfg,
            **dict(model='calibrated_active_learning')
        },
             calibrate=[True, False],
             dataset={
                 'path': 'default_n=100000_2020-Oct-19.h5py',
Пример #22
0
EXP_GROUPS['fish_budget'] = hu.cartesian_exp_group({
    'batch_size': [1],
    'num_channels':
    1,
    'dataset': [
        # {'name': 'JcuFish', 'n_classes': 2, 'n_fish_images':15},
        {
            'name': 'JcuFish',
            'n_classes': 2,
            'n_fish_images': 10
        },
        #  {'name': 'JcuFish', 'n_classes': 2, 'n_fish_images':8},
        #   {'name': 'JcuFish', 'n_classes': 2, 'n_fish_images':6},
    ],
    'dataset_size': [
        #  {'train':100, 'val':'all', 'test':'all'},
        {
            'train': 'all',
            'val': 'all'
        },
    ],
    'max_epoch': [1000],
    'optimizer': ["adam"],
    'lr': [1e-5],
    'model': [{
        'name': 'semseg',
        'loss': l,
        'base': 'fcn8_vgg16',
        'n_channels': 3,
        'n_classes': 2,
    } for l in [
        'cross_entropy',
    ]],
})
Пример #23
0
    "dataset": 'mnist',
    "batch_size": 128,
    "opt": {
        'name': 'adam',
        'lr': 1e-3
    }
}, {
    "dataset": 'fashionmnist',
    "model": 'mlp',
    "batch_size": 128,
    "opt": {
        'name': 'adam',
        'lr': 1e-3
    }
}]

EXP_GROUPS['group1'] = hu.cartesian_exp_group({
    "dataset": ['mnist', 'fashionmnist'],
    "batch_size": [128],
    "opt": [{
        'name': 'adam',
        'lr': 1e-3
    }, {
        'name': 'sgd',
        'lr': 1e-3
    }, {
        'name': 'adagrad',
        'lr': 1e-3
    }],
})
Пример #24
0
    "channels": 3,
    "name": "natural-patterns_n=100000_2020-Oct-20.h5py",
    "task": "char",
    "augmentation": augmentation,
    "mask": "random",
}
EXP_GROUPS = {}
EXP_GROUPS["deepinfomax"] = hu.cartesian_exp_group({
    'lr':
    0.0001,
    'beta_annealing':
    True,
    'ngpu':
    1,
    'batch_size':
    256,
    'seed': [2, 42, 128],
    'amp':
    0,
    'min_lr_decay':
    1e-3,
    'model':
    "deepinfomax",
    'backbone': [infomax],
    'z_dim': [64],
    'max_epoch':
    200,
    'episodic':
    False,
    'dataset': [solid, camouflage, default, natural]
})
Пример #25
0
def get_syn_exp_list(loss, grow_only=False):
    exp_list = hu.cartesian_exp_group({
        "dataset": syn_datasets,
        "model": model,
        "loss_func": loss,
        "acc_func": ["logistic_accuracy"],
        "n_samples": syn_n_samples,
        "d": syn_dims,
        "opt": [{
            "name": "ssn",
            "lm": 1e-6
        }],
        "margin": margin_list,
        "batch_size": [100],
        "max_epoch": max_epoch,
        "runs": run_list,
        "batch_grow_factor": batch_grow_factor,
        "batch_size_max": batch_size_max,
    })
    if grow_only:
        return exp_list
    exp_list += hu.cartesian_exp_group({
        "dataset":
        syn_datasets,
        "model":
        model,
        "loss_func":
        loss,
        "acc_func": ["logistic_accuracy"],
        "n_samples":
        syn_n_samples,
        "d":
        syn_dims,
        "opt":
        opt_list + [{
            "name": "ssn",
            "lm": 1e-3
        }, {
            "name": "slbfgs",
            'line_search_fn': 'sls',
            'lr': 0.9,
            "lm": 1e-4,
            "history_size": 10
        }],
        "margin":
        margin_list,
        "batch_size": [100],
        "max_epoch":
        max_epoch,
        "runs":
        run_list
    })

    exp_list += hu.cartesian_exp_group({
        "dataset":
        syn_datasets,
        "model":
        model,
        "loss_func":
        loss,
        "acc_func": ["logistic_accuracy"],
        "n_samples":
        syn_n_samples,
        "d":
        syn_dims,
        "opt": [{
            "name": "ssn",
            "lm": 0
        }, {
            "name": "lbfgs",
            "history_size": 10,
            "max_iter": 2
        }],
        "margin":
        margin_list,
        "batch_size": ["full"],
        "max_epoch":
        max_epoch,
        "runs":
        run_list
    })
    return exp_list
Пример #26
0
EXP_GROUPS['cows_counting'] = hu.cartesian_exp_group({
    'batch_size':
    1,
    'num_channels':
    1,
    'dataset':
    splits2,
    'dataset_size': [
        {
            'train': 'all',
            'val': 'all'
        },
    ],
    # 'dataset_size':
    'runs':
    RUNS,
    'max_epoch': [500],
    'optimizer': ["adam"],
    'lr': [
        1e-5,
    ],
    'model': [
        #   {'name':'semseg', 'loss':'lcfcn_consistency',
        #               'base':'fcn8_vgg16',
        #               'n_channels':3, 'n_classes':1},
        {
            'name': 'semseg',
            'loss': 'lcfcn_nopretrain',
            'base': 'fcn8_vgg16',
            'n_channels': 3,
            'n_classes': 1
        },
        {
            'name': 'semseg',
            'loss': 'density',
            'base': 'fcn8_vgg16',
            'n_channels': 3,
            'n_classes': 1
        },
        {
            'name': 'semseg',
            'loss': 'lcfcn',
            'base': 'fcn8_vgg16',
            'n_channels': 3,
            'n_classes': 1
        },
    ]
})
Пример #27
0
from haven import haven_utils as hu
import itertools, copy
EXP_GROUPS = {}

EXP_GROUPS['pascal_point_level'] = hu.cartesian_exp_group({
                        'batch_size': 1,
                        'num_channels':1,
                        'dataset': [
                                {'name':'pascal'}
                                ],
                        'dataset_size':{'train':'all', 'val':'all'},
                        # 'dataset_size':{'train':10, 'val':10},
                        'max_epoch': [20],
                        'optimizer': [ "adam"],
                        'lr': [ 1e-5,],
                        'model': {'name':'semseg', 'loss':'point_level',
                                            'base':'fcn8_vgg16',
                                            'n_channels':3, 'n_classes':21}
                        })

EXP_GROUPS['pascal_cross_entropy'] = hu.cartesian_exp_group({
                        'batch_size': 1,
                        'num_channels':1,
                        'dataset': [
                               {'name':'pascal'}
                                ],
                        'dataset_size':{'train':'all', 'val':'all'},
                        # 'dataset_size':{'train':10, 'val':10},
                        'max_epoch': [20],
                        'optimizer': [ "adam"],
                        'lr': [ 1e-5,],
EXP_GROUPS["wisenet"] = hu.cartesian_exp_group({
    'model': {
        'name': 'wisenet',
        'loss': "one_head_sum_loss",
        'base': 'fcn8_vgg16',
        'n_channels': 3,
        'n_classes': 1
    },
    "batch_size":
    1,
    "max_epoch":
    500,
    'dataset_size': [
        {
            'train': 10,
            'val': 10,
            'test': 10
        },
        #  {'train': 'all', 'val': 'all'},
    ],
    "optimizer":
    "adam",
    "lr":
    1e-5,
    "dataset": [
        #  {'name':'pascal', 'transform':'flip', 'supervision':'seam', 'sbd':True},
        #  {'name':'pascal', 'transform':'basic', 'supervision':'seam', 'sbd':True},
        #  {'name':'pascal', 'transform':'basic', 'supervision':'full'},
        {
            'name': 'pascal',
            'transform': 'basic',
            'supervision': 'seam'
        },
        #  {'name':'pascal', 'transform':'flip', 'supervision':'seam'},
    ],
    "predict":
    "best_dice"
})
    "dp_prob": 0.3,
    "feature_extractor": "deepinfomax"
}
EXP_GROUPS = {}
EXP_GROUPS["vae"] = hu.cartesian_exp_group({
    'lr':
    0.0001,
    'beta_annealing':
    True,
    'ngpu':
    1,
    'beta': [0.01],
    'batch_size':
    256,
    'seed': [3],
    'amp':
    0,
    'min_lr_decay':
    1e-3,
    'model':
    "vae",
    'backbone': [biggan_infomax],
    'z_dim': [64],
    'max_epoch':
    200,
    'episodic':
    False,
    'hierarchical': [False],
    'dataset': [camouflage, default, solid]
})
Пример #30
0
from haven import haven_utils as hu
import itertools, copy

EXP_GROUPS = {}

       
EXP_GROUPS["looc_trancos"] = hu.cartesian_exp_group({
        'batch_size': 1,
        'num_channels':1,
        'dataset': [
                {'name':'trancos', 'n_classes':1},
                    ],
        "attention": [{'name':"semseg",  
                                                'ratio_top':0.01, 
                                                'select_prob':0.5,
                                                'agg_score_method':'mean', 
                                                'box_score_method':'center'}],
        'dataset_size':[
                # {'train':10, 'val':1, 'test':1},
                {'train':'all', 'val':'all'}
                ],
        'max_epoch': [100],
        'optimizer': [ "adam"], 
        'lr': [1e-5],
        'model': {'name':'semseg_looc',
                        'n_classes':1, 'base':'fcn8_vgg16', 'n_channels':3, 
                        'loss':'att_lcfcn'},
        })