Пример #1
0
def create_model_embed_miniimagenet(args,
                                    config,
                                    architecture,
                                    ENCODER_CONFIG=None):
    if args.model == "MAML" or args.model == "MetaSGD":
        # Meta SGD or MAML
        if config['is_meta_sgd']:
            model = MetaSGD(architecture,
                            config['update_lr'],
                            config['update_step'],
                            is_regression=False,
                            is_image_feature=False)
        else:
            model = Meta(architecture,
                         config['update_lr'],
                         config['update_step'],
                         is_regression=False,
                         is_image_feature=False)
    elif args.model == "LEO":
        model = LEO(config)
    elif args.model == "SIB":
        model = SIB(args.n_way, config)
    elif args.model == "Prototypes_embedded":
        model = PrototypeNet_embedded(args.n_way, config)
    elif args.model == "MLwM":
        # MLwM with MAML or MetaSGD
        model = MLwM(ENCODER_CONFIG, architecture, config['update_lr'], config['update_step'],\
            is_regression=False)
    else:
        NotImplementedError

    return model
Пример #2
0
def create_model_miniimagenet(args, config, architecture, ENCODER_CONFIG=None):
    # Create Model
    if args.model == "MAML" or args.model == "MetaSGD":
        # Meta SGD or MAML
        if config['is_meta_sgd']:
            model = MetaSGD(architecture,
                            config['update_lr'],
                            config['update_step'],
                            is_regression=False)
        else:
            model = Meta(architecture,
                         config['update_lr'],
                         config['update_step'],
                         is_regression=False)

    elif args.model == "LEO":
        model = LEO(config)

    elif args.model == "MLwM":
        model = MLwM(ENCODER_CONFIG, architecture, config['update_lr'], config['update_step'],\
            is_regression=False)
    else:
        NotImplementedError

    return model
Пример #3
0
def create_model_poseregression(args,
                                config,
                                architecture,
                                ENCODER_CONFIG=None):
    # Create Model
    if args.model == "MAML" or args.model == "MetaSGD":
        # Meta SGD or MAML
        if config['is_meta_sgd']:
            model = MetaSGD(architecture,
                            config['update_lr'],
                            config['update_step'],
                            is_regression=True)
        else:
            model = Meta(architecture,
                         config['update_lr'],
                         config['update_step'],
                         is_regression=True)
    elif args.model == "MLwM":
        model = MLwM(config, architecture, config['update_lr'], config['update_step'],\
            is_regression=True)
    elif args.model == "CNP":
        model = CNP(config, is_regression=True)
    else:
        NotImplementedError

    return model
Пример #4
0
    def __init__(self, config, architecture, update_lr, update_step, initializer=None, \
        is_regression=False):
        super().__init__()

        self.encoder_type = config['encoder_type']
        self.config = config
        self._is_regression = is_regression
        self._is_image_feature = config['is_image_feature']
        self._is_kl_loss = config['is_kl_loss']
        self._beta_kl = config['beta_kl']
        self._is_meta_sgd = config['is_meta_sgd']

        if self.encoder_type == 'deterministic':
            self.encoder = \
                Deterministic_Conv_Encoder(self.config)

        elif self.encoder_type == 'VAE':
            self.encoder = \
                Conv_Reparameterization_Encoder(self.config)

        elif self.encoder_type == 'BBB':
            self.encoder = \
                Stochastic_Conv_Encoder(self.config)

        elif self.encoder_type == 'BBB_FC':
            self.encoder = \
                Stochastic_FC_Encoder(self.config)
        else:
            NotImplementedError

        # choose 'update_lr' can be learned or not
        if self._is_meta_sgd:
            self.maml = MetaSGD(architecture,
                                update_lr,
                                update_step,
                                initializer=None,
                                is_regression=self._is_regression,
                                is_image_feature=self._is_image_feature)
        else:
            self.maml = Meta(architecture,
                             update_lr,
                             update_step,
                             initializer=None,
                             is_regression=self._is_regression,
                             is_image_feature=self._is_image_feature)
Пример #5
0
    # Load config
    config = yaml.load(open("/home/mgyukim/workspaces/MLwM/configs/MLwM_config.yml", 'r'), \
            Loader=yaml.SafeLoader)
    config = config['miniImageNet']

    # architecture
    architecture = set_config(config['CONFIG_CONV_4_MAXPOOL'],
                              args.n_way,
                              config['img_size'],
                              is_regression=False)

    # Create Model
    if args.model == "MAML":
        model = Meta(architecture,
                     config['update_lr'],
                     config['update_step'],
                     is_regression=False)

    # load model path
    if args.model_save_root_dir == args.model_load_dir:
        args = set_dir_path_args(args, "miniimagenet")
        load_model_path = latest_load_model_filepath(args)
    else:
        load_model_path = args.model_load_dir

    # dataset
    miniimagenet_valid_set = meta_miniImagenet_dataset(args.n_way, args.k_shot_support, args.k_shot_query, \
        args.data_path, config['img_size'], mode='val', types=args.datatypes)

    val_loader = DataLoader(miniimagenet_valid_set,
                            batch_size=args.task_size,
Пример #6
0
class MLwM(nn.Module):
    def __init__(self, config, architecture, update_lr, update_step, initializer=None, \
        is_regression=False):
        super().__init__()

        self.encoder_type = config['encoder_type']
        self.config = config
        self._is_regression = is_regression
        self._is_image_feature = config['is_image_feature']
        self._is_kl_loss = config['is_kl_loss']
        self._beta_kl = config['beta_kl']
        self._is_meta_sgd = config['is_meta_sgd']

        if self.encoder_type == 'deterministic':
            self.encoder = \
                Deterministic_Conv_Encoder(self.config)

        elif self.encoder_type == 'VAE':
            self.encoder = \
                Conv_Reparameterization_Encoder(self.config)

        elif self.encoder_type == 'BBB':
            self.encoder = \
                Stochastic_Conv_Encoder(self.config)

        elif self.encoder_type == 'BBB_FC':
            self.encoder = \
                Stochastic_FC_Encoder(self.config)
        else:
            NotImplementedError

        # choose 'update_lr' can be learned or not
        if self._is_meta_sgd:
            self.maml = MetaSGD(architecture,
                                update_lr,
                                update_step,
                                initializer=None,
                                is_regression=self._is_regression,
                                is_image_feature=self._is_image_feature)
        else:
            self.maml = Meta(architecture,
                             update_lr,
                             update_step,
                             initializer=None,
                             is_regression=self._is_regression,
                             is_image_feature=self._is_image_feature)

    def forward(self,
                x_support,
                y_support,
                x_query,
                is_hessian=True,
                is_adaptation=True):
        '''
            Model Agnostic Meta Learning with an encoder
            
            Args:
                x_support : [task_size, n_way, k_shot, channel, height, width]
                y_support : [task_size, n_way, k_shot, , ] # not one-hot vector
                x_query : [task_size, n_way, k_shot, channel, height, width]
                y_support : [task_size, n_way, k_shot, ] # not one-hot vector
            Returns:
                pred : [task_size, n_way, k_shot, num_classes] # 
        '''
        '''
        _, _, k_shot_support, _, _, _ = x_support.size()
        task_size, n_way, k_shot_query, channel_count, height, width = x_query.size()
        '''

        # Encode x_support and x_query
        encoded_x_support, _ = self.encoder(x_support)
        encoded_x_query, _ = self.encoder(x_query)

        # Forward by MAML
        pred_y_stack = self.maml.forward(
            encoded_x_support, y_support, encoded_x_query, is_hessian,
            is_adaptation)  #[task_size, n_way, k_shot_query]

        return pred_y_stack

    def meta_loss(self,
                  x_support,
                  y_support,
                  x_query,
                  y_query,
                  is_hessian=True):
        '''
            Model Agnostic Meta Learning
            
            Args:
                x_support : [task_size, n_way, k_shot, channel, height, width]
                y_support : [task_size, n_way, k_shot, ] # not one-hot vector
                x_query : [task_size, n_way, k_shot, channel, height, width]
                y_query : [task_size, n_way, k_shot, ] # not one-hot vector

            Returns:
                loss : Loss for the meta parameters
        '''
        '''
        _, _, k_shot_support, _, _, _ = x_support.size()
        task_size, n_way, k_shot_query, channel_count, height, width = x_query.size()
        '''

        # Encode x_support and x_query
        encoded_x_support, kl_loss_support = self.encoder(x_support)
        encoded_x_query, kl_loss_query = self.encoder(x_query)

        # Forward by MAML
        maml_loss, criterion, losses_list = self.maml.meta_loss(
            encoded_x_support, y_support, encoded_x_query, y_query,
            is_hessian)  #[task_size, n_way, k_shot_query]

        # kl_loss and total loss
        if self.encoder_type != "deterministic" and self._is_kl_loss:
            kl_loss = (kl_loss_support + kl_loss_query) / 2.
            total_loss = maml_loss + (self._beta_kl * kl_loss)
        else:
            total_loss = maml_loss

        return total_loss, criterion, losses_list
Пример #7
0
        # Set Configuration (MAML)
        encoded_img_size = math.floor(math.sqrt(config['encoder_output_dim']))
        architecture = set_config(config['CONFIG_CONV_4'],
                                  args.n_way,
                                  encoded_img_size,
                                  is_regression=True)
    else:
        architecture = set_config(config['CONFIG_CONV_4_MAML'],
                                  args.n_way,
                                  config['img_size'],
                                  is_regression=True)

    # Create Model
    if args.model == "MAML":
        model = Meta(architecture,
                     config['update_lr'],
                     config['update_step'],
                     is_regression=True)
    elif args.model == "MLwM":
        model = MLwM(ENCODER_CONFIG, architecture, config['update_lr'], config['update_step'],\
            is_regression=True, is_kl_loss=True, beta_kl=config['beta_kl'])
    else:
        NotImplementedError

    # Train
    train(model, config, save_model_path)

    # load model path
    if args.model_save_root_dir == args.model_load_dir:
        load_model_path = latest_load_model_filepath(args)
    else:
        load_model_path = args.model_load_dir
Пример #8
0
    elif args.model == "LEO":
        # Config
        leo_config = yaml.load(open("'/home/mgyukim/workspaces/MLwM/model/LEO/config.yml'", 'r'), \
            Loader=yaml.SafeLoader)
        leo_config = leo_config['miniImageNet']

    else:
        architecture = set_config(config['CONFIG_CONV_4_MAXPOOL'],
                                  args.n_way,
                                  config['img_size'],
                                  is_regression=False)

    # Create Model
    if args.model == "MAML":
        model = Meta(architecture,
                     config['update_lr'],
                     config['update_step'],
                     is_regression=False)

        # Debug
        for name, param in model.named_parameters():
            if param.requires_grad:
                print("parameter name : ", name)

        # Debug
        print(model.net.vars)

    elif args.model == "LEO":
        model = LEO(leo_config)

    elif args.model == "MLwM":
        model = MLwM(ENCODER_CONFIG, architecture, config['update_lr'], config['update_step'],\
        architecture = set_config_fc_layers(args.n_way, 640, 64,
                                            config['layer_count'])

    # Create Model
    if args.model == "MAML":
        # Meta SGD or MAML
        if config['is_meta_sgd']:
            model = MetaSGD(architecture,
                            config['update_lr'],
                            config['update_step'],
                            is_regression=False,
                            is_image_feature=False)
        else:
            model = Meta(architecture,
                         config['update_lr'],
                         config['update_step'],
                         is_regression=False,
                         is_image_feature=False)
    elif args.model == "LEO":
        model = LEO(config)
    elif args.model == "SIB":
        model = SIB(args.n_way, config)
    elif args.model == "Prototypes_embedded":
        model = PrototypeNet_embedded(args.n_way, config)
    elif args.model == "MLwM":
        # MLwM with MAML or MetaSGD
        model = MLwM(ENCODER_CONFIG, architecture, config['update_lr'], config['update_step'],\
            is_regression=False, is_kl_loss=True, beta_kl=config['beta_kl'], is_meta_sgd=config['is_meta_sgd'])
    else:
        NotImplementedError