def make_kwargs(args, seed, loss, loss_params_fn, radius): name = make_name(seed=seed, loss=loss, radius=radius) kwargs = app.train_kwargs(args, name) kwargs.update( seed=seed, model_params=dict( use_desired_size=True, target_size=64, desired_template_scale=2.0, desired_search_radius=1.0, feature_arch='alexnet', feature_arch_params=None, feature_extra_conv_enable=False, join_type='single', join_arch='xcorr', join_params=dict(use_batch_norm=True), window_params=dict(normalize_method='mean', window_profile='hann', combine_method='mul'), window_radius=1.0, arg_max_eps=0.01, wd=1e-4, loss_params=loss_params_fn(radius), ), ) return name, kwargs
def make_kwargs(args, seed, opt, opt_config, schedule, schedule_config, init): name = make_name(seed=seed, opt=opt, schedule=schedule, init=init) kwargs = app.train_kwargs(args, name) # Note: Overrides the optimizer specified by args. kwargs.update(opt_config) # Note: Overrides the learning rate specified by args. kwargs.update(lr_init=init, **schedule_config) kwargs.update( seed=seed, model_params=dict( use_desired_size=True, target_size=64, desired_template_scale=2.0, desired_search_radius=1.0, feature_arch='alexnet', feature_arch_params=None, join_type='single', join_arch='xcorr', join_params=dict(use_batch_norm=True), window_params=dict(normalize_method='mean', window_profile='hann', combine_method='mul'), window_radius=1.0, arg_max_eps=0.01, # TODO: Study weight decay and loss config. wd=1e-4, loss_params=args.loss_params, ), ) return name, kwargs
def make_kwargs(args, feat, feat_config, weight, dims, dims_config, seed): name = make_name(feat=feat, weight=weight, dims=dims, seed=seed) kwargs = app.train_kwargs(args, name) kwargs.update( seed=seed, model_params=dict( target_size=args.target_size, use_desired_size=False, template_size=dims_config['template_size'], search_size=dims_config['search_size'], feature_arch=feat_config.arch, feature_arch_params=feat_config.arch_params, feature_extra_conv_enable=feat_config.extra_conv_enable, feature_extra_conv_params=feat_config.extra_conv_params, join_type='single', join_arch='xcorr', join_params=dict( learn_spatial_weight=weight, use_batch_norm=True, ), window_params=dict( normalize_method='mean', window_profile='hann', combine_method='mul', ), window_radius=1.0, arg_max_eps=args.arg_max_eps, # TODO: Study weight decay and loss config. wd=1e-4, loss_params=args.loss_params, ), ) return name, kwargs
def make_kwargs(args, seed): name = 'seed_{}'.format(seed) kwargs = app.train_kwargs(args) kwargs['params_dict'] = app.train_params_kwargs(args) kwargs['params_dict']['seed'] = seed kwargs['params_dict']['model_params'] = args.model_params kwargs['resume'] = args.resume return name, kwargs
def make_kwargs(args, seed, join, join_config): name = make_name(seed=seed, join=join) kwargs = app.train_kwargs(args, name) # Note: Overrides the learning rate specified by args. kwargs.update( seed=seed, model_params=dict( use_desired_size=True, target_size=64, desired_template_scale=2.0, desired_search_radius=1.0, feature_arch='alexnet', feature_arch_params=None, join_type='single', join_arch=join_config['arch'], join_params=join_config['params'], window_params=dict( normalize_method='mean', window_profile='hann', combine_method='mul', ), window_radius=1.0, arg_max_eps=0.01, # TODO: Study weight decay and loss config. wd=1e-4, loss_params=dict( method='sigmoid', params=dict( balanced=True, pos_weight=1, label_method='hard', label_params=dict(positive_radius=0.3, negative_radius=0.3), ), ), ), ) return name, kwargs