Example #1
0
opt.num_classes = len(dataloaders['training'].dataset.avail_classes)
print("num of classes", opt.num_classes)
print("num of training samples", len(dataloaders['training']))
"""============================================================================"""
#################### CREATE LOGGING FILES ###############
sub_loggers = ['Train', 'Test', 'Model Grad']
if opt.use_learned_sampler:
    sub_loggers += ['RL-Policy', 'RL-Policy Grad', 'Val']
LOG = aux.LOGGER(opt,
                 sub_loggers=sub_loggers,
                 start_new=True,
                 log_to_wandb=opt.wandb_log)
"""============================================================================"""
#################### LOSS SETUP ####################
if opt.use_learned_sampler: opt.sampling = 'random'
criterion, to_optim = losses.loss_select(opt.loss, opt, to_optim)
if opt.use_learned_sampler: opt.sampling = 'learned'
_ = criterion.to(opt.device)
"""============================================================================"""
############################################# vvv RL_SAMPLER vvv ##################################################
if opt.use_learned_sampler:
    # rl_sub_loggers = ['RL-Policy', 'RL-Policy Grad', 'Val']
    # RL_LOG = aux.LOGGER(opt, sub_loggers=rl_sub_loggers, start_new=False, log_to_wandb=opt.wandb_log)

    general_pars = {
        'policy_lr': opt.policy_lr,
        'logger': LOG,
        'logname': 'RL-Policy',
        'old_policy_update': opt.policy_old_update_iter,
        'metric_history': opt.policy_metric_history,
        'mode': opt.policy_mode,
#If graphviz is installed on the system, a computational graph of the underlying
#network can be made as well.
try:
    if opt.make_graph:
        aux.save_graph(opt, model)
    else:
        print('Not generating graph!')
except:
    # Will be thrown if graphviz is not installed (correctly).
    print('Cannot generate graph!')
"""============================================================================"""
#################### LOSS SETUP - Collecting all criterions ####################
Criterions = nn.ModuleDict()
# Add Class/Shared loss criterion to Criterion dictionary.
for i, task in enumerate(opt.tasks):
    Criterions[task], to_optim = losses.loss_select(opt.losses[i], opt,
                                                    to_optim, i)

# Add adversarial loss in given directions.
for i, mutual_task in enumerate(opt.adversarial):
    idx_target = np.where(
        np.array(opt.tasks) == mutual_task.split('-')[0])[0][0]
    idx_source = np.where(
        np.array(opt.tasks) == mutual_task.split('-')[1])[0][0]
    opt.embed_dim_target, opt.embed_dim_source = opt.embed_sizes[
        idx_target], opt.embed_sizes[idx_source]
    Criterions['MutualInfo-{}'.format(
        mutual_task)], to_optim = losses.loss_select('adversarial', opt,
                                                     to_optim, i)

### Move learnable parameters to GPU
for _, loss in Criterions.items():