コード例 #1
0
 val_data_list = train_data_list[30000:]
 train_data_list = train_data_list[:30000]
 normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                  std=[0.229, 0.224, 0.225])
 trainset = MyDataset(total_num=1000 * args['batch_size'],
                      if_rot_90=True,
                      if_flip=True,
                      if_mixup=True,
                      data_list=train_data_list,
                      data_path=args['dataset_path'],
                      transform_list=[
                          transforms.Compose([
                              transforms.RandomHorizontalFlip(p=0.5),
                              transforms.RandomVerticalFlip(p=0.5),
                              transforms.RandomRotation([-180, 180]),
                              transforms.RandomResizedCrop(size=448,
                                                           scale=(0.9,
                                                                  1.0)),
                          ]),
                          transforms.Compose([
                              transforms.RandomRotation([-2, 2]),
                              transforms.ColorJitter(0.4, 0.4, 0.4, 0.02),
                              transforms.RandomGrayscale(p=0.2),
                              transforms.ToTensor(),
                          ]),
                          transforms.Compose([normalize])
                      ])
 train_loader = torch.utils.data.DataLoader(trainset,
                                            batch_size=args['batch_size'],
                                            shuffle=True,
                                            num_workers=args['workers'],
コード例 #2
0
            dataset_name = npy_name[len(segmentation_object) +
                                    1:].split('.')[0]
            checkpoint_save_path = os.path.join(
                'random_initialization',
                dataset_name + '_' + segmentation_object, str(train_rate))
            check_and_create_folder(checkpoint_save_path)
            #######create data loader
            training_list, test_list = np.load(os.path.join(
                main_data_path, npy_name),
                                               allow_pickle=True)
            training_list = training_list[:int(train_rate *
                                               len(training_list))]
            val_data_list = training_list[:int(val_rate * len(training_list))]
            training_list = training_list[int(val_rate * len(training_list)):]
            training_set = MyDataset(data_list=training_list,
                                     data_path=main_data_path,
                                     if_augumentation=True)
            training_loader = data.DataLoader(training_set,
                                              batch_size=batch_size,
                                              shuffle=True,
                                              num_workers=0)
            val_set = MyDataset(data_list=val_data_list,
                                data_path=main_data_path,
                                if_augumentation=False)
            val_loader = data.DataLoader(val_set,
                                         batch_size=batch_size,
                                         shuffle=True,
                                         num_workers=0)

            ######build models
            encoder_net = vgg.__dict__[arch]()
コード例 #3
0
                                     lr=opt.lr,
                                     warmup=opt.warmup_proportion,
                                     t_total=num_train_optimization_steps)
                optimizers.append(optimizer)

                logging.info(
                    "%s, Warmup steps = %d, Num steps = %d",
                    train_dataset['name'],
                    int(num_train_optimization_steps * opt.warmup_proportion),
                    num_train_optimization_steps)
        else:
            raise RuntimeError("unsupported optimizer {}".format(opt.optim))

        train_data_loaders = []
        for train_dataset_instances in train_datasets_instances:
            train_loader = DataLoader(MyDataset(
                train_dataset_instances['dataset_instances']),
                                      opt.batch_size,
                                      shuffle=True,
                                      collate_fn=my_collate)
            train_data_loaders.append(train_loader)

        dev_indomain_data_loaders = []
        for dev_dataset_instances in dev_indomain_datasets_instances:
            dev_loader = DataLoader(MyDataset(
                dev_dataset_instances['dataset_instances']),
                                    opt.batch_size,
                                    shuffle=False,
                                    collate_fn=my_collate)
            dev_indomain_data_loaders.append(dev_loader)

        dev_outdomain_data_loaders = []
コード例 #4
0
ファイル: rdoc_main.py プロジェクト: foxlf823/bionlp-ost-2019
            device = torch.device("cuda", opt.gpu)
        else:
            device = torch.device("cpu")
        logging.info("use device {}".format(device))

        model = BertForSequenceClassification_rdoc.from_pretrained(
            opt.bert_dir,
            num_labels=alphabet_label.size(),
            num_category=alphabet_category.size())
        model.to(device)

        optimizer = optim.Adam(model.parameters(),
                               lr=opt.lr,
                               weight_decay=opt.l2)

        train_loader = DataLoader(MyDataset(train_instances),
                                  opt.batch_size,
                                  shuffle=True,
                                  collate_fn=my_collate)

        logging.info("start training ...")

        best_test = -10
        bad_counter = 0
        for idx in range(opt.iter):
            epoch_start = time.time()

            model.train()

            train_iter = iter(train_loader)
            num_iter = len(train_loader)
コード例 #5
0
def evaluate(documents, model, alphabet_label, alphabet_category, dump_dir):

    ct_predicted = 0
    ct_gold = 0
    ct_correct = 0

    all_pred_labels = []

    for document in documents:
        instances = prepare_instance_for_one_doc(document, alphabet_label,
                                                 alphabet_category)

        data_loader = DataLoader(MyDataset(instances),
                                 opt.batch_size,
                                 shuffle=False,
                                 collate_fn=my_collate)

        pred_labels = []

        with torch.no_grad():
            model.eval()

            data_iter = iter(data_loader)
            num_iter = len(data_loader)
            sent_start = 0

            for i in range(num_iter):
                tokens, labels, mask, sent_type, cate = next(data_iter)

                logits = model.forward(cate, tokens, sent_type, mask)

                actual_batch_size = tokens.size(0)

                for batch_idx in range(actual_batch_size):

                    sent_logits = logits[batch_idx]

                    _, indices = torch.max(sent_logits, 0)

                    pred_labels.append(
                        alphabet_label.get_instance(indices.item()))

                sent_start += actual_batch_size

        if len(document.relevant_sentences) != 0:
            p1, p2, p3 = count_tp(document.sentences, pred_labels)
        else:
            p1, p2, p3 = 0, 0, 0

        if dump_dir:
            all_pred_labels.append(pred_labels)

        ct_gold += p1
        ct_predicted += p2
        ct_correct += p3

    if ct_gold == 0 or ct_predicted == 0:
        precision = 0
        recall = 0
    else:
        precision = ct_correct * 1.0 / ct_predicted
        recall = ct_correct * 1.0 / ct_gold

    if precision + recall == 0:
        f_measure = 0
    else:
        f_measure = 2 * precision * recall / (precision + recall)

    # dump results
    if dump_dir:
        dump_results(documents, all_pred_labels, dump_dir)

    return precision, recall, f_measure
コード例 #6
0
def evaluate(documents, model, alphabet_label, dump_dir):

    ct_predicted = 0
    ct_gold = 0
    ct_correct = 0

    for document in documents:
        instances = prepare_instance_for_one_doc(document, alphabet_label)

        data_loader = DataLoader(MyDataset(instances),
                                 opt.batch_size,
                                 shuffle=False,
                                 collate_fn=my_collate)

        pred_entities = []

        with torch.no_grad():
            model.eval()

            data_iter = iter(data_loader)
            num_iter = len(data_loader)
            sent_start = 0
            entity_id = 1

            for i in range(num_iter):
                tokens, labels, mask, sent_type = next(data_iter)

                logits = model.forward(tokens, sent_type, mask)

                actual_batch_size = tokens.size(0)

                for batch_idx in range(actual_batch_size):

                    sent_logits = logits[batch_idx]
                    sent_mask = mask[batch_idx]

                    _, indices = torch.max(sent_logits, 1)

                    actual_indices = indices[sent_mask == 1]

                    actual_indices = actual_indices[
                        1:-1]  # remove [CLS] and [SEP]

                    pred_labels = [
                        alphabet_label.get_instance(ind.item())
                        for ind in actual_indices
                    ]

                    sentence = document.sentences[sent_start + batch_idx]

                    sent_entities = translateLabelintoEntities(
                        pred_labels, sentence, entity_id,
                        sent_start + batch_idx)
                    entity_id += len(sent_entities)

                    pred_entities.extend(sent_entities)

                sent_start += actual_batch_size

        if len(document.entities) != 0:
            p1, p2, p3 = count_tp(document.entities, pred_entities)
        else:
            p1, p2, p3 = 0, 0, 0

        # dump results
        if dump_dir:
            dump_results(document, pred_entities, dump_dir)

        ct_gold += p1
        ct_predicted += p2
        ct_correct += p3

    if ct_gold == 0 or ct_predicted == 0:
        precision = 0
        recall = 0
    else:
        precision = ct_correct * 1.0 / ct_predicted
        recall = ct_correct * 1.0 / ct_gold

    if precision + recall == 0:
        f_measure = 0
    else:
        f_measure = 2 * precision * recall / (precision + recall)

    return precision, recall, f_measure