예제 #1
0
def evaluate():

    device = torch.device('cuda')
    torch.backends.cudnn.benchmark = True

    # ------------------------------- #
    # Load Model
    # ------------------------------- #
    PATH = './baseline_89.pth'

    model = Conv4Classifier(64)
    model.load_state_dict(torch.load(PATH))

    # model = Conv4Classifier(64)
    # checkpoint = torch.load('./model_best.pth.tar')
    # model_dict = model.state_dict()
    # params = checkpoint['state_dict']
    # params = {k: v for k, v in params.items() if k in model_dict}
    # model_dict.update(params)
    # model.load_state_dict(model_dict)

    model.to(device)
    model.eval()

    ####################
    # Prepare Data Set #
    ####################
    print('preparing dataset')
    n = 5  # number of samples per supporting class
    k = 5  # number of classes
    q = 15  # query image per class
    episodes_per_epoch = 10000

    base_cls, val_cls, support_cls = get_splits()

    support = MiniImageNet('support', base_cls, val_cls, support_cls)
    support_loader = DataLoader(support,
                                batch_sampler=SupportingSetSampler(
                                    support, n, k, q, episodes_per_epoch),
                                num_workers=4)

    logging.basicConfig(
        filename=f'./logs/baseline_cosine_result_{k}-way_{n}-shot.log',
        filemode='w',
        format='%(asctime)s - %(message)s',
        level=logging.INFO)

    print('start to evaluate')
    accs = 0
    for i, data in enumerate(tqdm(support_loader)):
        inputs, labels = prepare_nshot_task(n, k, q, data)
        embeddings = model(inputs, feature=True)

        acc = evaluation(embeddings, labels, n, k, q)
        logging.info(f'[{i:3d}]: {acc}%')
        accs += acc

    logging.info(
        f'Average ACC is {accs}/{len(support_loader)}={accs/len(support_loader)}'
    )
예제 #2
0
파일: __init__.py 프로젝트: vanway/bamboo
    def cut_base_on_dict(self, sentence):
        word_list, tag_list = [], []
        blocks = utils.get_blocks(sentence, utils.RE_NORMAL_HAN)
        for block in blocks:
            max_prob_route = self.trie_model.get_max_prob_route(block)
            max_prob_word_list = [block[max_prob_route[idx]: max_prob_route[idx+1]] \
                                  for idx in range(len(max_prob_route)-1)]
            continuos_singe_list = self.get_continuos_singe(max_prob_word_list)
            last_end = 0
            for start, end in continuos_singe_list:
                for pre_word in max_prob_word_list[last_end: start]:
                    word_list.append(pre_word)
                    tag_list.append(self.trie_model.word_value.get(pre_word, {}).get('tag', 'x'))
                last_end = end
                continuos_singe_str = ''.join(max_prob_word_list[start: end])
                for slices in utils.get_splits(continuos_singe_str, utils.RE_NUNMBER_ENG):
                    #print slices
                    if utils.is_number_or_eng(slices):
                        word_list.append(slices)
                        number_tag = 'm'
                        tag_list.append(number_tag)
                    else:
                        mid_word_list = tag.crf_tag.crfToken(slices)
                        mid_tag_list = tag.crf_tag.crfPos(mid_word_list)
                        word_list.extend(mid_word_list)
                        tag_list.extend(mid_tag_list)
            for word in max_prob_word_list[last_end: ]:
                word_list.append(word)
                tag_list.append(self.trie_model.word_value.get(pre_word, {}).get('tag', 'x'))

        #tag_list = [self.trie_model.word_value.get(word, {}).get('tag', 'x') \
                    #for word in word_list]
        return SegmentPair(word_list, tag_list)
예제 #3
0
def preprocess(options):
    # parse the input args
    dataset = options['dataset']
    model_path = options['model_path']
    batch_size = options['batch_size']
    DTYPE = torch.FloatTensor
    if options['cuda']:
        DTYPE = torch.cuda.FloatTensor

    # prepare the paths for storing models
    model_path = os.path.join(model_path, "tfn.pt")
    print("Temp location for saving model: {}".format(model_path))

    # define fields
    text_field = 'CMU_MOSI_TimestampedWordVectors_1.1'
    visual_field = 'CMU_MOSI_VisualFacet_4.1'
    acoustic_field = 'CMU_MOSI_COVAREP'
    label_field = 'CMU_MOSI_Opinion_Labels'

    # DEBUG ONLY
    recalc = not (os.path.exists('vars/dump') and os.path.isfile('vars/dump'))

    if recalc:
        # prepare the datasets
        print("Currently using {} dataset.".format(dataset))
        DATASET = utils.download()
        dataset = utils.load(visual_field, acoustic_field, text_field)
        utils.align(text_field, dataset)
        utils.annotate(dataset, label_field)
        splits = utils.get_splits(DATASET)
        if not os.path.exists('./vars'):
            os.makedirs('./vars')
        f = open('./vars/dump', 'wb+')
        pickle.dump([splits, dataset], f)
        f.close()
    else:
        f = open('./vars/dump', 'rb')
        splits, dataset = pickle.load(f)
        f.close()

    input_dims = utils.get_dims_from_dataset(dataset, text_field,
                                             acoustic_field, visual_field)
    train, dev, test = utils.split(splits, dataset, label_field, visual_field,
                                   acoustic_field, text_field, batch_size)
    train_loader, dev_loader, test_loader = utils.create_data_loader(
        train, dev, test, batch_size, DTYPE)
    return train_loader, dev_loader, test_loader, input_dims
예제 #4
0
    def cut_base_on_dict(self, sentence):
        word_list, tag_list = [], []
        blocks = utils.get_blocks(sentence, utils.RE_NORMAL_HAN)
        for block in blocks:
            max_prob_route = self.trie_model.get_max_prob_route(block)
            max_prob_word_list = [block[max_prob_route[idx]: max_prob_route[idx+1]] \
                                  for idx in range(len(max_prob_route)-1)]
            continuos_singe_list = self.get_continuos_singe(max_prob_word_list)
            last_end = 0
            for start, end in continuos_singe_list:
                for pre_word in max_prob_word_list[last_end:start]:
                    word_list.append(pre_word)
                    tag_list.append(
                        self.trie_model.word_value.get(pre_word,
                                                       {}).get('tag', 'x'))
                last_end = end
                continuos_singe_str = ''.join(max_prob_word_list[start:end])
                for slices in utils.get_splits(continuos_singe_str,
                                               utils.RE_NUNMBER_ENG):
                    #print slices
                    if utils.is_number_or_eng(slices):
                        word_list.append(slices)
                        number_tag = 'm'
                        tag_list.append(number_tag)
                    else:
                        mid_word_list = tag.crf_tag.crfToken(slices)
                        mid_tag_list = tag.crf_tag.crfPos(mid_word_list)
                        word_list.extend(mid_word_list)
                        tag_list.extend(mid_tag_list)
            for word in max_prob_word_list[last_end:]:
                word_list.append(word)
                tag_list.append(
                    self.trie_model.word_value.get(pre_word,
                                                   {}).get('tag', 'x'))

        #tag_list = [self.trie_model.word_value.get(word, {}).get('tag', 'x') \
        #for word in word_list]
        return SegmentPair(word_list, tag_list)
예제 #5
0
파일: train.py 프로젝트: wstmac/FSL_with_KG
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu', default=0, type=int)
    parser.add_argument('--model_arch',
                        default='conv4',
                        choices=['conv4', 'resnet10', 'resnet18'],
                        type=str)
    # parser.add_argument('--attention', action='store_true')
    parser.add_argument('--start_epoch', default=1, type=int)
    parser.add_argument('--num_epoch', default=90, type=int)
    parser.add_argument('--learning_rate', default=0.01, type=float)
    parser.add_argument('--scheduler_milestones', nargs='+', type=int)
    parser.add_argument('--alpha', default=0.5, type=float)
    parser.add_argument('--model_saving_rate', default=30, type=int)
    parser.add_argument('--train', action='store_true')
    parser.add_argument('--support_groups', default=10000, type=int)
    parser.add_argument('--evaluate', action='store_true')
    parser.add_argument('--evaluation_rate', default=10, type=int)
    parser.add_argument('--model_dir', default=None, type=str)
    parser.add_argument('--checkpoint', action='store_true')
    parser.add_argument('--normalize', action='store_true')
    parser.add_argument('--save_settings', action='store_true')
    parser.add_argument('--layer', default=4, type=int)

    args = parser.parse_args()

    device = torch.device(f'cuda:{args.gpu}')
    model_arch = args.model_arch
    # attention = args.attention
    learning_rate = args.learning_rate
    alpha = args.alpha
    start_epoch = args.start_epoch
    num_epoch = args.num_epoch
    model_saving_rate = args.model_saving_rate
    toTrain = args.train
    toEvaluate = args.evaluate
    evaluation_rate = args.evaluation_rate
    checkpoint = args.checkpoint
    normalize = args.normalize
    scheduler_milestones = args.scheduler_milestones
    save_settings = args.save_settings
    support_groups = args.support_groups

    # ------------------------------- #
    # Generate folder
    # ------------------------------- #
    if checkpoint:
        model_dir = f'./training_models/{args.model_dir}'
    else:
        model_dir = f'./training_models/{datetime.now().strftime("%Y-%m-%d_%H-%M-%S")}'
        os.makedirs(model_dir)

    # ------------------------------- #
    # Config logger
    # ------------------------------- #
    train_logger = setup_logger('train_logger', f'{model_dir}/train.log')
    result_logger = setup_logger('result_logger', f'{model_dir}/result.log')
    # overview_logger = setup_logger('overview_logger', f'./overview_result.log')
    if save_settings:
        # ------------------------------- #
        # Saving training parameters
        # ------------------------------- #
        result_logger.info(f'Model: {model_arch}')
        result_logger.info(f'Layer: {args.layer}')
        result_logger.info(f'Learning rate: {learning_rate}')
        result_logger.info(f'alpha: {alpha}')
        result_logger.info(f'Normalize feature vector: {normalize}')
    # ------------------------------- #
    # Load extracted knowledge graph
    # ------------------------------- #
    knowledge_graph = Graph()
    classFile_to_superclasses, superclassID_to_wikiID =\
        knowledge_graph.class_file_to_superclasses(1, [1,2])

    ####################
    # Prepare Data Set #
    ####################
    print('preparing dataset')
    base_cls, val_cls, support_cls = get_splits()

    base = MiniImageNet('base', base_cls, val_cls, support_cls,
                        classFile_to_superclasses)
    base_loader = DataLoader(base, batch_size=256, shuffle=True, num_workers=4)

    support = MiniImageNet('support',
                           base_cls,
                           val_cls,
                           support_cls,
                           classFile_to_superclasses,
                           eval=True)
    support_loader_1 = DataLoader(support,
                                  batch_sampler=SupportingSetSampler(
                                      support, 1, 5, 15, support_groups),
                                  num_workers=4)
    support_loader_5 = DataLoader(support,
                                  batch_sampler=SupportingSetSampler(
                                      support, 5, 5, 15, support_groups),
                                  num_workers=4)

    #########
    # Model #
    #########
    if model_arch == 'conv4':
        model = models.Conv4Attension(len(base_cls),
                                      len(superclassID_to_wikiID))

    if model_arch == 'resnet10':
        model = models.resnet10(len(base_cls), len(superclassID_to_wikiID))

    if model_arch == 'resnet18':
        model = models.resnet18(len(base_cls), len(superclassID_to_wikiID))

    model.to(device)

    # loss function and optimizer
    criterion = loss_fn(alpha)
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=learning_rate,
                                momentum=0.9,
                                weight_decay=1e-4,
                                nesterov=True)
    scheduler = MultiStepLR(optimizer,
                            milestones=scheduler_milestones,
                            gamma=0.1)

    if save_settings:
        result_logger.info(
            'optimizer: torch.optim.SGD(model.parameters(), '
            f'lr={learning_rate}, momentum=0.9, weight_decay=1e-4, nesterov=True)'
        )
        result_logger.info(
            f'scheduler: MultiStepLR(optimizer, milestones={scheduler_milestones}, gamma=0.1)\n'
        )
        # result_logger.info('='*40+'Results Below'+'='*40+'\n')

    if checkpoint:
        print('load model...')
        model.load_state_dict(torch.load(f'{model_dir}/{start_epoch-1}.pth'))
        model.to(device)

        for _ in range(start_epoch - 1):
            scheduler.step()

    # ------------------------------- #
    # Start to train
    # ------------------------------- #
    if toTrain:
        for epoch in range(start_epoch, start_epoch + num_epoch):
            model.train()
            train(model, normalize, base_loader, optimizer, criterion, epoch,
                  start_epoch + num_epoch - 1, device, train_logger)
            scheduler.step()

            if epoch % model_saving_rate == 0:
                torch.save(model.state_dict(), f'{model_dir}/{epoch}.pth')

            # ------------------------------- #
            # Evaluate current model
            # ------------------------------- #
            if toEvaluate:
                if epoch % evaluation_rate == 0:
                    evaluate(model, normalize, epoch, support_loader_1, 1, 5,
                             15, device, result_logger)
                    evaluate(model, normalize, epoch, support_loader_5, 5, 5,
                             15, device, result_logger)
    else:
        if toEvaluate:
            evaluate(model, normalize, start_epoch - 1, support_loader_1, 1, 5,
                     15, device, result_logger)
            evaluate(model, normalize, start_epoch - 1, support_loader_5, 5, 5,
                     15, device, result_logger)

    result_logger.info('=' * 140)
예제 #6
0
 def test_get_splits(self):
     run_on_word = "giantkick"
     ans_1_splits = [["giant", "kick"]]
     ans_2_splits = []
     self.assertEqual(utils.get_splits(run_on_word, 1, self.lexicon), ans_1_splits)
     self.assertEqual(utils.get_splits(run_on_word, 2, self.lexicon), ans_2_splits)
예제 #7
0
파일: execute.py 프로젝트: qss2012/RGAT
# sys.stdout.flush()

patience = 100

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

with open("./aifb.pickle", 'rb') as f:
    data = pkl.load(f, encoding='latin1')

A = data['A']
y = data['y']
train_idx = data['train_idx']
test_idx = data['test_idx']

# Get dataset splits
y_train, y_val, y_test, idx_train, idx_val, idx_test = utils.get_splits(
    y, train_idx, test_idx, False)

print((y_train.shape))
train_mask = utils.sample_mask(idx_train, y.shape[0])

print(train_mask.shape)

val_mask = utils.sample_mask(idx_val, y.shape[0])

test_mask = utils.sample_mask(idx_test, y.shape[0])

# print (train_mask)

# print (val_mask)

print(idx_train)
예제 #8
0
import torch
import utils
from NN import NN
import numpy as np
import torch.nn.functional as F
from ray import tune
import os
from train import train
from torch.utils.data import DataLoader

batch_size = 16

train_data, val_data, test_data = utils.get_splits()


class AutoEncoder(torch.nn.Module):
    def __init__(self, encoder_layers, decoder_layers):
        super().__init__()
        self.encoder = NN(encoder_layers)
        self.decoder = NN(decoder_layers)

    def encode(self, x):
        return self.encoder(x)

    def decode(self, z):
        return self.decoder(z)

    def forward(self, x):
        z = self.encode(x)
        h = self.decode(z)
        return h
예제 #9
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu', default=0, type=int)
    parser.add_argument('--model_arch',
                        default='conv4',
                        choices=['conv4', 'resnet10', 'resnet18'],
                        type=str)
    # parser.add_argument('--attention', action='store_true')
    parser.add_argument('--start_epoch', default=1, type=int)
    parser.add_argument('--num_epoch', default=90, type=int)
    parser.add_argument('--learning_rate', default=0.01, type=float)
    parser.add_argument('--scheduler_milestones', nargs='+', type=int)
    parser.add_argument('--alpha', default=1, type=float)
    parser.add_argument('--beta', default=1, type=float)
    parser.add_argument('--gamma', default=0.5, type=float)
    parser.add_argument('--model_saving_rate', default=30, type=int)
    parser.add_argument('--train', action='store_true')
    parser.add_argument('--support_groups', default=1000, type=int)
    parser.add_argument('--evaluate', action='store_true')
    parser.add_argument('--evaluation_rate', default=10, type=int)
    parser.add_argument('--model_dir', type=str)
    parser.add_argument('--checkpoint', action='store_true')
    parser.add_argument('--normalize', action='store_true')
    parser.add_argument('--save_settings', action='store_true')
    parser.add_argument('--layer', default=4, type=int)
    parser.add_argument('--fusion_method', default='sum', type=str)
    parser.add_argument('--lamda', default=0, type=float)
    # parser.add_argument('--gcn_path', type=str)
    # parser.add_argument('--img_encoder_path', type=str)

    args = parser.parse_args()

    device = torch.device(f'cuda:{args.gpu}')
    model_arch = args.model_arch
    # attention = args.attention
    learning_rate = args.learning_rate
    alpha = args.alpha
    beta = args.beta
    gamma = args.gamma
    start_epoch = args.start_epoch
    num_epoch = args.num_epoch
    model_saving_rate = args.model_saving_rate
    toTrain = args.train
    toEvaluate = args.evaluate
    evaluation_rate = args.evaluation_rate
    checkpoint = args.checkpoint
    normalize = args.normalize
    scheduler_milestones = args.scheduler_milestones
    save_settings = args.save_settings
    support_groups = args.support_groups
    fusion_method = args.fusion_method
    lamda = args.lamda

    # gcn_path = args.gcn_path
    # img_encoder_path = args.img_encoder_path

    # ------------------------------- #
    # Generate folder
    # ------------------------------- #
    if checkpoint:
        model_dir = f'./training_models/{args.model_dir}'
    else:
        model_dir = f'./training_models/{datetime.now().strftime("%Y-%m-%d_%H-%M-%S")}'
        os.makedirs(model_dir)

    # ------------------------------- #
    # Config logger
    # ------------------------------- #
    train_logger = setup_logger('train_logger', f'{model_dir}/train_all.log')
    result_logger = setup_logger('result_logger',
                                 f'{model_dir}/result_all.log')
    if save_settings:
        # ------------------------------- #
        # Saving training parameters
        # ------------------------------- #
        result_logger.info(f'Model: {model_arch}')
        result_logger.info(f'Fusion Method: {fusion_method}; Lamda: {lamda}')
        result_logger.info(f'Attention Layer: {args.layer}')
        result_logger.info(f'Learning rate: {learning_rate}')
        result_logger.info(f'Alpha: {alpha} Beta: {beta} Gamma: {gamma}')
        # result_logger.info(f'alpha: {alpha}')
        result_logger.info(f'Normalize feature vector: {normalize}')

    # ------------------------------- #
    # Load extracted knowledge graph
    # ------------------------------- #
    knowledge_graph = Graph()
    classFile_to_superclasses, superclassID_to_wikiID =\
        knowledge_graph.class_file_to_superclasses(1, [1,2])
    nodes = knowledge_graph.nodes
    # import ipdb; ipdb.set_trace()

    layer = 2
    layer_nums = [768, 2048, 1600]
    edges = knowledge_graph.edges

    cat_feature = 1600
    final_feature = 1024

    ####################
    # Prepare Data Set #
    ####################
    print('preparing dataset')
    base_cls, val_cls, support_cls = get_splits()

    base = MiniImageNet('base', base_cls, val_cls, support_cls,
                        classFile_to_superclasses)
    base_loader = DataLoader(base, batch_size=256, shuffle=True, num_workers=4)

    support = MiniImageNet('support',
                           base_cls,
                           val_cls,
                           support_cls,
                           classFile_to_superclasses,
                           eval=True)
    support_loader_1 = DataLoader(support,
                                  batch_sampler=SupportingSetSampler(
                                      support, 1, 5, 15, support_groups),
                                  num_workers=4)
    support_loader_5 = DataLoader(support,
                                  batch_sampler=SupportingSetSampler(
                                      support, 5, 5, 15, support_groups),
                                  num_workers=4)

    #########
    # Model #
    #########
    # sentence transformer
    sentence_transformer = SentenceTransformer(
        'paraphrase-distilroberta-base-v1')

    # image encoder
    if model_arch == 'conv4':
        img_encoder = models.Conv4Attension(len(base_cls),
                                            len(superclassID_to_wikiID))

    if model_arch == 'resnet10':
        img_encoder = models.resnet10(len(base_cls),
                                      len(superclassID_to_wikiID))

    if model_arch == 'resnet18':
        img_encoder = models.resnet18(len(base_cls),
                                      len(superclassID_to_wikiID))

    # img_encoder.load_state_dict(torch.load(f'{model_dir}/{img_encoder_path}'))
    # img_encoder.to(device)
    # img_encoder.eval()

    # knowledge graph encoder
    GCN = models.GCN(layer, layer_nums, edges)
    # GCN.load_state_dict(torch.load(f'{model_dir}/{gcn_path}'))
    # GCN.to(device)
    # GCN.eval()

    # total model
    model = models.FSKG(cat_feature, final_feature, img_encoder, GCN,
                        len(base_cls), lamda)
    model.to(device)

    # loss function and optimizer
    criterion = loss_fn(alpha, beta, gamma, device)
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=learning_rate,
                                momentum=0.9,
                                weight_decay=1e-4,
                                nesterov=True)
    scheduler = MultiStepLR(optimizer,
                            milestones=scheduler_milestones,
                            gamma=0.1)

    if save_settings:
        result_logger.info(
            'optimizer: torch.optim.SGD(model.parameters(), '
            f'lr={learning_rate}, momentum=0.9, weight_decay=1e-4, nesterov=True)'
        )
        result_logger.info(
            f'scheduler: MultiStepLR(optimizer, milestones={scheduler_milestones}, gamma=0.1)\n'
        )
        # result_logger.info('='*40+'Results Below'+'='*40+'\n')

    if checkpoint:
        print('load model...')
        model.load_state_dict(
            torch.load(f'{model_dir}/FSKG_{start_epoch-1}.pth'))
        model.to(device)

        # for _ in range(start_epoch - 1):
        #     scheduler.step()

    # ---------------------------------------- #
    # Graph convolution to get kg embeddings
    # ---------------------------------------- #

    # encode node description
    desc_embeddings = knowledge_graph.encode_desc(sentence_transformer).to(
        device)

    # start graph convolution
    # import ipdb; ipdb.set_trace()
    # kg_embeddings = GCN(desc_embeddings)
    # kg_embeddings = kg_embeddings.to('cpu')

    classFile_to_wikiID = get_classFile_to_wikiID()
    # train_class_name_to_id = base.class_name_to_id
    train_id_to_class_name = base.id_to_class_name
    # eval_class_name_to_id = support.class_name_to_id
    eval_id_to_class_name = support.id_to_class_name

    # ------------------------------- #
    # Start to train
    # ------------------------------- #
    if toTrain:
        for epoch in range(start_epoch, start_epoch + num_epoch):
            model.train()
            train(model, img_encoder, normalize, base_loader, optimizer,
                  criterion, epoch, start_epoch + num_epoch - 1, device,
                  train_logger, nodes, desc_embeddings, train_id_to_class_name,
                  classFile_to_wikiID)
            scheduler.step()

            if epoch % model_saving_rate == 0:
                torch.save(model.state_dict(), f'{model_dir}/FSKG_{epoch}.pth')

                # ------------------------------- #
                # Evaluate current model
                # ------------------------------- #
            if toEvaluate:
                if epoch % evaluation_rate == 0:
                    evaluate(model, normalize, epoch, support_loader_1, 1, 5,
                             15, device, result_logger, nodes, desc_embeddings,
                             eval_id_to_class_name, classFile_to_wikiID)
                    evaluate(model, normalize, epoch, support_loader_5, 5, 5,
                             15, device, result_logger, nodes, desc_embeddings,
                             eval_id_to_class_name, classFile_to_wikiID)

    else:
        # pass
        if toEvaluate:
            evaluate(model, normalize, 30, support_loader_1, 1, 5, 15, device,
                     result_logger, nodes, desc_embeddings,
                     eval_id_to_class_name, classFile_to_wikiID)
            evaluate(model, normalize, 30, support_loader_5, 5, 5, 15, device,
                     result_logger, nodes, desc_embeddings,
                     eval_id_to_class_name, classFile_to_wikiID)
    result_logger.info('=' * 140)
예제 #10
0
def train():
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu', default=0, type=int)
    parser.add_argument('--model_arch',
                        default='conv4',
                        choices=['conv4', 'resnet10', 'resnet18'],
                        type=str)
    parser.add_argument('--start_epoch', default=1, type=int)
    parser.add_argument('--num_epoch', default=90, type=int)
    parser.add_argument('--learning_rate', default=0.01, type=float)
    parser.add_argument('--model_saving_rate', default=30, type=int)
    parser.add_argument('--train', action='store_true')
    # parser.add_argument('--support_groups', default=10000, type=int)
    parser.add_argument('--evaluate', action='store_true')
    parser.add_argument('--evaluation_rate', default=10, type=int)
    parser.add_argument('--model_dir', type=str)
    parser.add_argument('--img_encoder_path', type=str)
    parser.add_argument('--checkpoint', action='store_true')
    parser.add_argument('--normalize', action='store_true')
    parser.add_argument('--save_settings', action='store_true')
    parser.add_argument('--layer', default=4, type=int)
    parser.add_argument('--classifiers_path', action='store_true')
    parser.add_argument('--optimizer', default='SGD', type=str)
    # parser.add_argument('--scheduler_milestones', nargs='+', type=int)

    args = parser.parse_args()

    device = torch.device(f'cuda:{args.gpu}')
    model_arch = args.model_arch
    learning_rate = args.learning_rate
    start_epoch = args.start_epoch
    num_epoch = args.num_epoch
    model_saving_rate = args.model_saving_rate
    # toTrain = args.train
    # toEvaluate = args.evaluate
    evaluation_rate = args.evaluation_rate
    checkpoint = args.checkpoint
    # scheduler_milestones = args.scheduler_milestones
    save_settings = args.save_settings
    model_dir = f'./training_models/{args.model_dir}'
    img_encoder_path = f'{model_dir}/{args.img_encoder_path}'
    classifiers_path = args.classifiers_path
    normalize = args.normalize

    # ------------------------------- #
    # Config logger
    # ------------------------------- #
    train_logger = setup_logger('train_logger', f'{model_dir}/gcn_train.log')
    if save_settings:
        # ------------------------------- #
        # Saving training parameters
        # ------------------------------- #
        train_logger.info(f'{model_arch} Model: {img_encoder_path}')
        train_logger.info(f'Attention Layer: args.layer')
        train_logger.info(f'Learning rate: {learning_rate}')
        train_logger.info(f'Optimizer: {args.optimizer}')

    # ------------------------------- #
    # Load extracted knowledge graph
    # ------------------------------- #
    knowledge_graph = Graph()
    classFile_to_superclasses, superclassID_to_wikiID =\
        knowledge_graph.class_file_to_superclasses(1, [1,2])
    edges = knowledge_graph.edges
    nodes = knowledge_graph.nodes

    ####################
    # Prepare Data Set #
    ####################
    print('preparing dataset')
    base_cls, val_cls, support_cls = get_splits()
    base = MiniImageNet('base', base_cls, val_cls, support_cls,
                        classFile_to_superclasses)
    base_loader = DataLoader(base,
                             batch_size=256,
                             shuffle=False,
                             num_workers=4)

    # ------------------------------- #
    # Load image encoder model
    # ------------------------------- #
    # image encoder
    if model_arch == 'conv4':
        img_encoder = models.Conv4Attension(len(base_cls),
                                            len(superclassID_to_wikiID))

    if model_arch == 'resnet10':
        img_encoder = models.resnet10(len(base_cls),
                                      len(superclassID_to_wikiID))

    if model_arch == 'resnet18':
        img_encoder = models.resnet18(len(base_cls),
                                      len(superclassID_to_wikiID))

    img_encoder.load_state_dict(torch.load(f'{img_encoder_path}'))
    img_encoder.to(device)

    img_feature_dim = img_encoder.dim_feature

    # ------------------------------- #
    # get class classifiers
    # ------------------------------- #

    if classifiers_path:
        with open(f'{model_dir}/base_classifiers.pkl', 'rb') as f:
            classifiers = pickle.load(f)
    else:
        classifiers = get_classifier(img_encoder, img_feature_dim,
                                     len(base_cls), base_loader, 'base',
                                     normalize, model_dir, device)

    # import ipdb; ipdb.set_trace()

    # ------------------------------- #
    # Init GCN model
    # ------------------------------- #
    layer = 2
    layer_nums = [768, 2048, img_feature_dim]
    layer_nums_str = "".join([str(a) + ' ' for a in layer_nums])
    if save_settings:
        train_logger.info(f'GCN layers: {layer_nums_str}')
    GCN = models.GCN(layer, layer_nums, edges)
    # GCN = models.GCN(edges)
    GCN.to(device)
    # import ipdb; ipdb.set_trace()
    # ------------------------------- #
    # Other neccessary parameters
    # ------------------------------- #
    classFile_to_wikiID = get_classFile_to_wikiID()
    base_cls_index = [
        nodes.index(classFile_to_wikiID[base.id_to_class_name[i]])
        for i in range(len(base_cls))
    ]
    # support_cls_index = [nodes.index(classFile_to_wikiID[base.id_to_class_name[i]]) for i in range(len(support_cls))]

    sentence_transformer = SentenceTransformer(
        'paraphrase-distilroberta-base-v1')
    desc_embeddings = knowledge_graph.encode_desc(sentence_transformer)
    desc_embeddings = desc_embeddings.to(device)

    # ------------------------------- #
    # Training settings
    # ------------------------------- #
    # criterion = torch.nn.MSELoss()
    criterion = torch.nn.CosineEmbeddingLoss()
    optimizer = torch.optim.SGD(GCN.parameters(),
                                lr=learning_rate,
                                momentum=0.9,
                                weight_decay=1e-4,
                                nesterov=True)
    # optimizer = torch.optim.Adam(GCN.parameters(), lr=learning_rate, weight_decay=1e-4)

    batch_time = AverageMeter()  # forward prop. + back prop. time
    losses = AverageMeter()  # loss

    GCN.train()
    start = time.time()

    classifiers = classifiers.to(device)

    loss_target = torch.ones(classifiers.shape[0]).to(device)

    for epoch in range(start_epoch, start_epoch + num_epoch):
        base_embeddings = GCN(desc_embeddings)[base_cls_index]
        # import ipdb; ipdb.set_trace()
        # loss = criterion(base_embeddings, classifiers)
        loss = criterion(base_embeddings, classifiers, loss_target)
        loss.backward()
        optimizer.step()

        losses.update(loss.item())
        # print(loss.item())
        batch_time.update(time.time() - start)

        if epoch % 200 == 0:  # print every 30 epoch
            train_logger.info(
                f'[{epoch:3d}/{start_epoch+num_epoch-1}]'
                f' batch_time: {batch_time.avg:.2f} loss: {losses.avg:.3f}')
            batch_time.reset()
            losses.reset()
            start = time.time()
        if epoch % 1000 == 0:
            torch.save(GCN.state_dict(), f'{model_dir}/gcn_{epoch}.pth')

    train_logger.info("=" * 60)
예제 #11
0
# coding: utf-8

import numpy as np
from tensorflow.python.keras.callbacks import EarlyStopping, ModelCheckpoint
from tensorflow.python.keras.layers import Lambda
from tensorflow.python.keras.models import Model
from tensorflow.python.keras.optimizers import Adam

from gcn import GCN
from utils import load_data, get_splits, preprocess_adj, plot_embeddings

if __name__ == "__main__":

    FEATURE_LESS = False
    features, A, y, _ = load_data(dataset="cora")
    y_train, y_val, y_test, idx_train, idx_val, idx_test, train_mask, val_mask, test_mask = get_splits(y,
                                                                                                       shuffle=False)
    features /= features.sum(axis=1, ).reshape(-1, 1)

    A = preprocess_adj(A)

    if FEATURE_LESS:
        X = np.arange(A.shape[-1])
        feature_dim = A.shape[-1]
    else:
        X = features
        feature_dim = X.shape[-1]
    model_input = [X, A]

    # Compile model
    model = GCN(A.shape[-1], y_train.shape[1], feature_dim, dropout_rate=0.5, l2_reg=2.5e-4,
                feature_less=FEATURE_LESS, )
예제 #12
0
import utils, config
from tqdm import tqdm

scores = {}
for dataset in config.datasets:

    scores[dataset["name"]] = {}

    for algorithm in config.algorithms:

        statslist = []

        splits = utils.get_splits(dataset)

        for X_train, y_train, X_test, y_test in tqdm(splits):

            for random_state in config.random_states:

                utils.reset_random_state(random_state)

                algo = algorithm()

                algo.fit(X_train)

                y_train_pred = algo.predict(X_train)
                y_test_pred = algo.predict(X_test)

                stats = utils.calculate_stats(y_train, y_train_pred, y_test,
                                              y_test_pred)

                statslist.append(stats)
예제 #13
0
def train():
    logging.basicConfig(filename='./logs/baseline.log',
                        filemode='w',
                        format='%(asctime)s - %(message)s',
                        level=logging.INFO)

    device = torch.device('cuda')
    torch.backends.cudnn.benchmark = True

    n = 1  # number of samples per supporting class
    k = 5  # number of classes
    q = 15  # query image per class
    learning_rate = 0.1

    ####################
    # Prepare Data Set #
    ####################
    print('preparing dataset')
    base_cls, val_cls, support_cls = get_splits()

    base = MiniImageNet('base', base_cls, val_cls, support_cls)
    base_loader = DataLoader(base, batch_size=256, shuffle=True, num_workers=4)

    # val = MiniImageNet('val', base_cls, val_cls, support_cls)
    # val_loader = DataLoader(val, batch_size=256, shuffle=True, num_workers=4)

    # support = MiniImageNet('support', base_cls, val_cls, support_cls)
    # support_loader = DataLoader(support,
    #                         batch_sampler=SupportingSetSampler(support, n, k, q),
    #                         num_workers=4)

    #########
    # Model #
    #########
    model = Conv4Classifier(len(base_cls))
    model.to(device)

    criterion = torch.nn.CrossEntropyLoss().cuda()
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=learning_rate,
                                momentum=0.9,
                                weight_decay=1e-4,
                                nesterov=True)
    scheduler = MultiStepLR(optimizer,
                            milestones=[int(.5 * 90),
                                        int(.75 * 90)],
                            gamma=0.1)

    print('start to train')
    for epoch in range(90):
        running_loss = 0.0
        data_load_time = 0
        gpu_time = 0
        epoch_time = 0
        for i, data in enumerate(base_loader):
            time1 = time.time()
            inputs, labels = data[0].to(device), data[1].to(device)
            time2 = time.time()

            outputs = model(inputs)
            loss = criterion(outputs, labels)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            time3 = time.time()
            # print statistics
            running_loss += loss.item()
            data_load_time += time2 - time1
            gpu_time += time3 - time2
            epoch_time += time3 - time1

            if i % 30 == 29:  # print every 2000 mini-batches
                logging.info('[%d, %3d] loss: %.3f ' %
                             (epoch + 1, i + 1, running_loss / 30))
                print(
                    '[%d, %5d] load_data:%2f gpu_time:%2f epoch_time:%.2f loss: %.3f '
                    % (epoch + 1, i + 1, data_load_time, gpu_time, epoch_time,
                       running_loss / 30))
                running_loss = 0.0
                data_load_time = 0
                gpu_time = 0
                epoch_time = 0

        scheduler.step()

    PATH = f'./baseline_{epoch}.pth'
    torch.save(model.state_dict(), PATH)
import numpy as np, time

from utils import load_data, get_splits, preprocess_adj_numpy, evaluate_preds
from keras_dgl.layers import GraphCNN


d_set = ["Terrorists-Relation"]  # ["Cora", "CiteSeer", "Facebook-Page2Page", "PubMed-Diabetes", "Terrorists-Relation", "Zachary-Karate", "Internet-Industry-Partnerships"]  # [sparse, dense]

for i in range(len(d_set)):
    # Prepare Data
    X, A, Y = load_data(path="../data/", dataset=d_set[i])
    A = np.array(A.todense())
    
    # Preserve ratio/percentage of samples per class using efficent data-splitting && data-resampling strageies
    graph_fname = "../data/"+d_set[i]+"/"+d_set[i]
    _, Y_val, _, train_idx, val_idx, test_idx, train_mask = get_splits(X, Y, graph_fname)
    train_idx = np.array(train_idx)
    val_idx = np.array(val_idx)
    test_idx = np.array(test_idx)
    labels = np.argmax(Y, axis=1) + 1
    # Preserve ratio/percentage of samples per class using efficent data-splitting && data-resampling strageies
    
    # Normalize X
    X /= X.sum(1).reshape(-1, 1)
    X = np.array(X)
    
    Y_train = np.zeros(Y.shape)
    labels_train = np.zeros(labels.shape)
    Y_train[train_idx] = Y[train_idx]
    labels_train[train_idx] = labels[train_idx]