bn_flag=False, device=device) z_dim = 128 generator_hidden_units = (256, 512) tanh_scale = 20 label_eps = 0.1 L2_regularization_discriminator = 0 elif datasource in ['miniImageNet_embedding', 'tieredImageNet_embedding']: from FC640 import FC640 from WGenerator2 import WeightDiscriminator, WeightGenerator net = FC640(dim_output=num_classes_per_task, num_hidden_units=(128, 32), device=device) z_dim = 128 encoder = FC640(dim_output=z_dim, num_hidden_units=(256, 128), device=device) generator_hidden_units = (256, 512) tanh_scale = 20 L2_regularization_discriminator = 0 label_eps = 0.1 # set the clipping value clip_grad_value = args.clip_grad_value
device=device) elif (datasource == 'miniImageNet') or (datasource == 'tieredImageNet'): from ConvNet import ConvNet net = ConvNet(dim_input=(3, 84, 84), dim_output=num_classes_per_task, num_filters=(32, 32, 32, 32), filter_size=(3, 3), device=device) elif (datasource == 'miniImageNet_embedding') or (datasource == 'tieredImageNet_embedding'): from FC640 import FC640 net = FC640(dim_output=num_classes_per_task, num_hidden_units=(256, 64), device=device) else: sys.exit('Unknown dataset') # endregion w_shape = net.get_weight_shape() # print(w_shape) num_weights = get_num_weights(net) print('Number of parameters of base model = {0:d}'.format(num_weights)) num_val_tasks = args.num_val_tasks p_dropout_base = args.p_dropout_base
sm = torch.nn.Softmax(dim=1) if datasource in ['omniglot', 'miniImageNet']: from ConvNet import ConvNet DIM_INPUT = {'omniglot': (1, 28, 28), 'miniImageNet': (3, 84, 84)} net = ConvNet(dim_input=DIM_INPUT[datasource], dim_output=None, num_filters=(32, 32, 32, 32), filter_size=(3, 3), device=device) elif datasource in ['miniImageNet_640', 'tieredImageNet_640']: import pickle from FC640 import FC640 net = FC640(num_hidden_units=(128, 32), dim_output=None, device=device) else: sys.exit('Unknown dataset!') weight_shape = net.get_weight_shape() # ------------------------------------------------------------------------------------------------- # Parse training parameters # ------------------------------------------------------------------------------------------------- meta_lr = args.meta_lr print('Meta learning rate = {0}'.format(meta_lr)) num_tasks_per_minibatch = args.minibatch_size print('Number of tasks per minibatch = {0:d}'.format(num_tasks_per_minibatch)) num_meta_updates_print = int(1000 / num_tasks_per_minibatch) num_epochs_save = 1