Beispiel #1
0
class HANSageHander(BaseHander):
    """
    wrapper for SupervisedGraphSage model
    """
    def __init__(self, num_class, data, args):
        self.num_class = num_class
        self.labels = data['labels']
        self.adj_lists = data['adj_lists']
        self.adj_matrix = data['adj_matrix']
        self.feat_data = data['feat_data']
        self.num_nodes, self.feat_dim = self.feat_data.shape
        self.split_seed = args.split_seed
        self.is_cuda = args.cuda
        self.view = args.view
        self.num_sample_tpl = args.num_sample_tpl
        self.num_sample_permission = args.num_sample_permission
        self.num_neighs_tpl = args.num_neighs_tpl
        self.num_neighs_permission = args.num_neighs_permission
        self.embed_dim = args.embed_dim
        self.freeze = args.freeze
        self.inputdata = InputData(self.num_nodes, self.labels, self.adj_lists, args.split_seed, args.label_rate, self.is_cuda)
        self.inst_generator = self.inputdata.gen_train_batch(batch_size=args.batch_size)
        self.train_data_loader = self.inputdata.get_train_data_load(batch_size=args.batch_size, shuffle=True)
    
    def build_model(self):
        logger.info("define model.")
        num_layers = 2
        self.model = HANSage(self.num_class, self.num_nodes, self.feat_data, self.feat_dim, self.adj_lists, self.adj_matrix, self.is_cuda, self.num_sample_tpl, self.num_sample_permission, self.embed_dim, num_layers)
        logger.info(self.model)
        if self.is_cuda:
            self.model.cuda()
        self.custom_init(self.freeze)
        self.optimizer = torch.optim.Adam(filter(lambda param: param.requires_grad, self.model.parameters()), lr=1e-3, weight_decay=1e-5)
        unbalance_alpha = torch.Tensor([0.9934, 1])
        if self.is_cuda:
            unbalance_alpha = unbalance_alpha.cuda()
        self.loss_func = nn.CrossEntropyLoss(weight=unbalance_alpha)
    
    def custom_init(self, freeze=False):
        logger.info("custom initialization. freeze={}".format(freeze))
        from setting import model_path
        import glob
        checkpoint_tpl = torch.load(glob.glob(os.path.join(model_path, 'GraphSage', "*tpl*neigh{}".format(self.num_neighs_tpl)))[0])
        tpl_state = checkpoint_tpl['state_dict']
        self.model.encoder_tpl.load_state_dict(tpl_state, strict=False)
        if freeze:
            for param in self.model.encoder_tpl.parameters():
                param.requires_grad = False

        checkpoint_permission = torch.load(glob.glob(os.path.join(model_path, 'GraphSage', "*permission*neigh{}".format(self.num_neighs_permission)))[0])
        permission_state = checkpoint_permission['state_dict']
        self.model.encoder_permission.load_state_dict(permission_state, strict=False)
        if freeze:
            for param in self.model.encoder_permission.parameters():
                param.requires_grad = False
Beispiel #2
0
 def __init__(self, num_class, data, args):
     self.num_class = num_class
     self.labels = data['labels']
     self.adj_lists = data['adj_lists']
     self.feat_data = data['feat_data']
     self.num_nodes, self.feat_dim = self.feat_data.shape
     self.is_cuda = args.cuda
     self.view = args.view
     self.num_sample = args.num_sample
     self.embed_dim = args.embed_dim
     self.freeze = args.freeze
     self.inputdata = InputData(self.num_nodes, self.labels, self.adj_lists,
                                args.label_rate, self.is_cuda)
     self.train_data_loader = self.inputdata.get_train_data_load(
         batch_size=args.batch_size, shuffle=True)
Beispiel #3
0
def preprocess_image(image_path):
    class_id, sample_id = InputData.split_path(str(image_path))
    high_resolution_image = cv2.imread(str(image_path), cv2.IMREAD_COLOR)
    low_resolution, high_resolution_image = _reduce_resolution(
        high_resolution_image)
    return image_example(low_resolution, high_resolution_image, class_id,
                         sample_id)
Beispiel #4
0
class GraphSageHandler(BaseHander):
    """
    wrapper for GraphSage model
    """
    def __init__(self, num_class, data, args):
        super(GraphSageHandler, self).__init__(args)
        self.num_class = num_class
        self.labels = data['labels']
        self.adj_lists = data['adj_lists']
        self.feat_data = data['feat_data']
        self.num_nodes, self.feat_dim = self.feat_data.shape
        self.is_cuda = args.cuda
        self.view = args.view
        self.num_sample = args.num_sample
        self.embed_dim = args.embed_dim
        self.dropout = args.dropout
        self.inputdata = InputData(self.num_nodes, self.labels, self.adj_lists,
                                   args.label_rate, self.is_cuda)
        self.train_data_loader = self.inputdata.get_train_data_load(
            batch_size=args.batch_size, shuffle=True)

    def build_model(self):
        logger.info("define model.")
        self.num_layers = 2
        self.model = GraphSage(self.adj_lists, self.feat_data, self.num_class,
                               self.embed_dim, self.num_sample,
                               self.num_layers, self.is_cuda)
        logger.info('\n{}'.format(self.model))
        if self.is_cuda:
            self.model.cuda()
        self.optimizer = torch.optim.Adam(self.model.parameters(),
                                          lr=1e-3,
                                          weight_decay=1e-5)
        self.loss_func = nn.CrossEntropyLoss()
Beispiel #5
0
 def __init__(self, num_class, data, args):
     super(GraphSageHandler, self).__init__(args)
     self.num_class = num_class
     self.labels = data['labels']
     self.adj_lists = data['adj_lists']
     self.feat_data = data['feat_data']
     self.num_nodes, self.feat_dim = self.feat_data.shape
     self.is_cuda = args.cuda
     self.view = args.view
     self.num_sample = args.num_sample
     self.embed_dim = args.embed_dim
     self.dropout = args.dropout
     self.inputdata = InputData(self.num_nodes, self.labels, self.adj_lists,
                                args.label_rate, self.is_cuda)
     self.train_data_loader = self.inputdata.get_train_data_load(
         batch_size=args.batch_size, shuffle=True)
     self.ddc_target_data_loader = self.inputdata.get_ddc_target_data_load(
         batch_size=args.batch_size, shuffle=True)
Beispiel #6
0
class GraphSageHandler(BaseHander):
    """
    wrapper for GraphSage model
    """
    def __init__(self, num_class, data, args):
        super(GraphSageHandler, self).__init__(args)
        self.num_class = num_class
        self.labels = data['labels']
        self.adj_lists = data['adj_lists']
        self.adj_matrix = data['adj_matrix']
        self.feat_data = data['feat_data']
        self.num_nodes, self.feat_dim = self.feat_data.shape
        self.split_seed = args.split_seed
        self.is_cuda = args.cuda
        self.view = args.view
        self.num_neighs = args.num_neighs
        self.num_sample = args.num_sample
        self.embed_dim = args.embed_dim
        self.dropout = args.dropout
        self.inputdata = InputData(self.num_nodes, self.labels, self.adj_lists,
                                   args.split_seed, args.label_rate,
                                   self.is_cuda)
        self.inst_generator = self.inputdata.gen_train_batch(
            batch_size=args.batch_size)
        self.train_data_loader = self.inputdata.get_train_data_load(
            batch_size=args.batch_size, shuffle=True)

    def build_model(self):
        logger.info("define model.")
        num_layers = 2
        self.model = GraphSage(self.num_class, self.num_nodes, self.feat_data,
                               self.feat_dim, self.adj_lists, self.adj_matrix,
                               self.is_cuda, self.num_sample, self.embed_dim,
                               num_layers)
        logger.info(self.model)
        if self.is_cuda:
            self.model.cuda()
        self.optimizer = torch.optim.Adam(self.model.parameters(),
                                          lr=1e-3,
                                          weight_decay=1e-5)
        unbalance_alpha = torch.Tensor([0.9934, 1])
        if self.is_cuda:
            unbalance_alpha = unbalance_alpha.cuda()
        self.loss_func = nn.CrossEntropyLoss(weight=unbalance_alpha)
Beispiel #7
0
 def __init__(self, num_class, data, args):
     self.num_class = num_class
     self.labels = data['labels']
     self.adj_lists = data['adj_lists']
     self.adj_matrix = data['adj_matrix']
     self.feat_data = data['feat_data']
     self.num_nodes, self.feat_dim = self.feat_data.shape
     self.split_seed = args.split_seed
     self.is_cuda = args.cuda
     self.view = args.view
     self.num_sample_tpl = args.num_sample_tpl
     self.num_sample_permission = args.num_sample_permission
     self.num_neighs_tpl = args.num_neighs_tpl
     self.num_neighs_permission = args.num_neighs_permission
     self.embed_dim = args.embed_dim
     self.freeze = args.freeze
     self.inputdata = InputData(self.num_nodes, self.labels, self.adj_lists, args.split_seed, args.label_rate, self.is_cuda)
     self.inst_generator = self.inputdata.gen_train_batch(batch_size=args.batch_size)
     self.train_data_loader = self.inputdata.get_train_data_load(batch_size=args.batch_size, shuffle=True)
Beispiel #8
0
class HANSageHander(BaseHander):
    def __init__(self, num_class, data, args):
        self.num_class = num_class
        self.labels = data['labels']
        self.adj_lists = data['adj_lists']
        self.feat_data = data['feat_data']
        self.num_nodes, self.feat_dim = self.feat_data.shape
        self.is_cuda = args.cuda
        self.view = args.view
        self.num_sample = args.num_sample
        self.embed_dim = args.embed_dim
        self.freeze = args.freeze
        self.inputdata = InputData(self.num_nodes, self.labels, self.adj_lists,
                                   args.label_rate, self.is_cuda)
        self.train_data_loader = self.inputdata.get_train_data_load(
            batch_size=args.batch_size, shuffle=True)

    def build_model(self):
        logger.info("define model.")
        self.num_layers = 2
        self.model = HANSage(self.adj_lists, self.feat_data, self.num_class,
                             self.embed_dim, self.num_sample, self.num_layers,
                             self.is_cuda)
        logger.info('\n{}'.format(self.model))
        if self.is_cuda:
            self.model.cuda()

        self.custom_init(self.freeze)
        self.optimizer = torch.optim.Adam(filter(
            lambda param: param.requires_grad, self.model.parameters()),
                                          lr=1e-3,
                                          weight_decay=1e-5)

        self.loss_func = nn.CrossEntropyLoss()

    def custom_init(self, freeze=False):
        logger.info("custom initialization. freeze={}".format(freeze))
        from setting import model_path
        import glob

        all_views = [
            'app_permission_app', 'app_url_app', 'app_component_app',
            'app_tpl_app'
        ]

        for i in range(len(all_views)):
            checkpoint = torch.load(
                glob.glob(os.path.join(model_path,
                                       "*{}*".format(all_views[i])))[0])
            state_dict = checkpoint['state_dict']
            self.model.encoders[i].load_state_dict(state_dict, strict=False)
            if freeze:
                for param in self.model.encoders[i].parameters():
                    param.requires_grad = False