コード例 #1
0
ファイル: main.py プロジェクト: yongxinw/Smooth_AP
    for ind, param in enumerate(all_but_fc_params):
        all_but_fc_params[ind] = param[1]

    fc_params         = model.model.last_linear.parameters()

    to_optim          = [{'params':all_but_fc_params,'lr':opt.lr,'weight_decay':opt.decay},
                         {'params':fc_params,'lr':opt.lr*opt.fc_lr_mul,'weight_decay':opt.decay}]
else:
    to_optim   = [{'params':model.parameters(),'lr':opt.lr,'weight_decay':opt.decay}]
"""============================================================================"""
#################### DATALOADER SETUPS ##################
#Returns a dictionary containing 'training', 'testing', and 'evaluation' dataloaders.
#The 'testing'-dataloader corresponds to the validation set, and the 'evaluation'-dataloader
#Is simply using the training set, however running under the same rules as 'testing' dataloader,
#i.e. no shuffling and no random cropping.
dataloaders      = data.give_dataloaders(opt.dataset, opt)
#Because the number of supervised classes is dataset dependent, we store them after
#initializing the dataloader
opt.num_classes  = len(dataloaders['training'].dataset.avail_classes)

"""============================================================================"""
#################### CREATE LOGGING FILES ###############
#Each dataset usually has a set of standard metrics to log. aux.metrics_to_examine()
#returns a dict which lists metrics to log for training ('train') and validation/testing ('val')

metrics_to_log = aux.metrics_to_examine(opt.dataset, opt.k_vals)
# example output: {'train': ['Epochs', 'Time', 'Train Loss', 'Time'],
#                  'val': ['Epochs','Time','NMI','F1', 'Recall @ 1','Recall @ 2','Recall @ 4','Recall @ 8']}

#Using the provided metrics of interest, we generate a LOGGER instance.
#Note that 'start_new' denotes that a new folder should be made in which everything will be stored.
opt.device = torch.device('cuda')
_ = model.to(opt.device)
# List of optimization parameters. Will be appended by loss functions layer if they have learnable parameters.
to_optim = [{
    'params': model.parameters(),
    'lr': opt.lr,
    'weight_decay': opt.decay
}]
"""============================================================================"""
#################### DATALOADERS SETUP ##################
#opt.all_num_classes simply collects the number of target classes for each task.
dataloaders, opt.all_num_classes = {task: {} for task in opt.tasks}, []

#### CLASS
opt.samples_per_class = opt.cs_per_bs[0]
dataloaders['Class'] = data.give_dataloaders(opt.dataset, opt)
opt.all_num_classes.append(
    len(dataloaders['Class']['training'].dataset.avail_classes))

#### SHARED
opt.samples_per_class = opt.cs_per_bs[1]
dataloaders['Shared']['label_generator'] = dataloaders['Class']['evaluation']
# Compute initial clusters using features throughout the network (i.e. not only the final embedding.
# This allows better grouping based on both low and high-level features.)
shared_labels, image_paths = aux.initcluster(
    opt,
    dataloaders['Shared']['label_generator'],
    model,
    num_cluster=opt.shared_num_classes)
# Using those labels, generate a new PyTorch dataloader for the auxiliary task.
dataloaders['Shared']['cluster'] = data.ClusterDataset(image_paths,