Ejemplo n.º 1
0
 def eval_model(self, eval_loader):
     self.model.eval()
     # print(self.model.decoder.query_embed.weight)
     prediction, gold = {}, {}
     with torch.no_grad():
         batch_size = self.args.batch_size
         eval_num = len(eval_loader)
         total_batch = eval_num // batch_size + 1
         for batch_id in range(total_batch):
             start = batch_id * batch_size
             end = (batch_id + 1) * batch_size
             if end > eval_num:
                 end = eval_num
             eval_instance = eval_loader[start:end]
             if not eval_instance:
                 continue
             input_ids, attention_mask, target, info = self.model.batchify(
                 eval_instance)
             gold.update(formulate_gold(target, info))
             # print(target)
             gen_triples = self.model.gen_triples(input_ids, attention_mask,
                                                  info)
             prediction.update(gen_triples)
     num_metric(prediction, gold)
     overlap_metric(prediction, gold)
     return metric(prediction, gold)
Ejemplo n.º 2
0
    def trainIters(self, args):
        
        self.lm_model.train()
        self.subject_model.train()
        self.object_model.train()
        
        if self.start_step:
            step = self.start_step
        else:
            step = 0
            
        for epoch in range(config.epoches): 
            
            for batch in self.train_batcher:
                total_loss, sub_entities_loss, obj_entities_loss = self.train_one_batch(batch)
                print("epoch:", epoch, "step: ", step, "total_loss:", total_loss, "sub_entities_loss:", sub_entities_loss, "obj_entities_loss: ", obj_entities_loss)
                step += 1
                if step % 2000 == 0:
                    
                    with torch.no_grad():
                        
                        self.lm_model.eval()
                        self.subject_model.eval()
                        self.object_model.eval()

                        precision, recall, f1 = metric(self.lm_model, self.subject_model, self.object_model, self.test_data, self.id2rel, self.lm_tokenizer, output_path="./result.json")
                        print("precision: ", precision, "recall: ", recall, "f1: ", f1)
                        self.save_models(args, total_loss, step)
                        
                    self.lm_model.train()
                    self.subject_model.train()
                    self.object_model.train()
            
            self.reset_train_dataloader()
Ejemplo n.º 3
0
    def eval_model(self):
        self.model.eval()
        prediction, gold = {}, {}
        with torch.no_grad():
            for batch_id, (input_ids, attention_mask, targets,
                           info) in enumerate(tqdm(self.valid_loader)):
                # if batch_id > 10:
                #     break
                if self.args.use_gpu:
                    input_ids = Variable(input_ids.cuda())
                    attention_mask = Variable(attention_mask.cuda())
                    targets = [{k: v.cuda()
                                for k, v in t.items()} for t in targets]

                gold.update(formulate_gold(
                    targets, info))  # {0: [(三元组1), (三元组2), ...], 1: [...]}
                gen_triples = self.gen_triples(input_ids, attention_mask, info)
                prediction.update(
                    gen_triples
                )  # {0: [{"pred_rel": int, "rel_prob": float, ...}, {}, ...省略10个字典], 1: []}
        # num_metric(prediction, gold)
        # overlap_metric(prediction, gold)
        return metric(prediction, gold)
def train():

    parser = argparse.ArgumentParser(
        description='PyTorch Medical Segmentation Training')
    parser = parse_training_args(parser)
    args, _ = parser.parse_known_args()

    args = parser.parse_args()

    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.enabled = args.cudnn_enabled
    torch.backends.cudnn.benchmark = args.cudnn_benchmark

    from data_function import MedData_train
    os.makedirs(args.output_dir, exist_ok=True)

    if hp.mode == '2d':
        from models.two_d.unet import Unet
        model = Unet(in_channels=hp.in_class, classes=hp.out_class)

        # from models.two_d.miniseg import MiniSeg
        # model = MiniSeg(in_input=hp.in_class, classes=hp.out_class)

        # from models.two_d.fcn import FCN32s as fcn
        # model = fcn(in_class =hp.in_class,n_class=hp.out_class)

        # from models.two_d.segnet import SegNet
        # model = SegNet(input_nbr=hp.in_class,label_nbr=hp.out_class)

        # from models.two_d.deeplab import DeepLabV3
        # model = DeepLabV3(in_class=hp.in_class,class_num=hp.out_class)

        # from models.two_d.unetpp import ResNet34UnetPlus
        # model = ResNet34UnetPlus(num_channels=hp.in_class,num_class=hp.out_class)

        # from models.two_d.pspnet import PSPNet
        # model = PSPNet(in_class=hp.in_class,n_classes=hp.out_class)

    elif hp.mode == '3d':

        from models.three_d.unet3d import UNet3D
        model = UNet3D(in_channels=hp.in_class,
                       out_channels=hp.out_class,
                       init_features=32)

        # from models.three_d.residual_unet3d import UNet
        # model = UNet(in_channels=hp.in_class, n_classes=hp.out_class, base_n_filter=2)

        #from models.three_d.fcn3d import FCN_Net
        #model = FCN_Net(in_channels =hp.in_class,n_class =hp.out_class)

        #from models.three_d.highresnet import HighRes3DNet
        #model = HighRes3DNet(in_channels=hp.in_class,out_channels=hp.out_class)

        #from models.three_d.densenet3d import SkipDenseNet3D
        #model = SkipDenseNet3D(in_channels=hp.in_class, classes=hp.out_class)

        # from models.three_d.densevoxelnet3d import DenseVoxelNet
        # model = DenseVoxelNet(in_channels=hp.in_class, classes=hp.out_class)

        #from models.three_d.vnet3d import VNet
        #model = VNet(in_channels=hp.in_class, classes=hp.out_class)

    model = torch.nn.DataParallel(model, device_ids=devicess)
    optimizer = torch.optim.Adam(model.parameters(), lr=args.init_lr)

    # scheduler = ReduceLROnPlateau(optimizer, 'min',factor=0.5, patience=20, verbose=True)
    scheduler = StepLR(optimizer,
                       step_size=hp.scheduer_step_size,
                       gamma=hp.scheduer_gamma)
    # scheduler = CosineAnnealingLR(optimizer, T_max=50, eta_min=5e-6)

    if args.ckpt is not None:
        print("load model:", args.ckpt)
        print(os.path.join(args.output_dir, args.latest_checkpoint_file))
        ckpt = torch.load(os.path.join(args.output_dir,
                                       args.latest_checkpoint_file),
                          map_location=lambda storage, loc: storage)

        model.load_state_dict(ckpt["model"])
        optimizer.load_state_dict(ckpt["optim"])

        for state in optimizer.state.values():
            for k, v in state.items():
                if torch.is_tensor(v):
                    state[k] = v.cuda()

        # scheduler.load_state_dict(ckpt["scheduler"])
        elapsed_epochs = ckpt["epoch"]
    else:
        elapsed_epochs = 0

    model.cuda()

    from loss_function import Binary_Loss, DiceLoss
    criterion = Binary_Loss().cuda()

    writer = SummaryWriter(args.output_dir)

    train_dataset = MedData_train(source_train_dir, label_train_dir)
    train_loader = DataLoader(train_dataset.queue_dataset,
                              batch_size=args.batch,
                              shuffle=True,
                              pin_memory=True,
                              drop_last=True)

    model.train()

    epochs = args.epochs - elapsed_epochs
    iteration = elapsed_epochs * len(train_loader)

    for epoch in range(1, epochs + 1):
        print("epoch:" + str(epoch))
        epoch += elapsed_epochs

        num_iters = 0

        for i, batch in enumerate(train_loader):

            if hp.debug:
                if i >= 1:
                    break

            print(f"Batch: {i}/{len(train_loader)} epoch {epoch}")

            optimizer.zero_grad()

            if (hp.in_class == 1) and (hp.out_class == 1):
                x = batch['source']['data']
                y = batch['label']['data']

                x = x.type(torch.FloatTensor).cuda()
                y = y.type(torch.FloatTensor).cuda()

            else:
                x = batch['source']['data']
                y_atery = batch['atery']['data']
                y_lung = batch['lung']['data']
                y_trachea = batch['trachea']['data']
                y_vein = batch['atery']['data']

                x = x.type(torch.FloatTensor).cuda()

                y = torch.cat((y_atery, y_lung, y_trachea, y_vein), 1)
                y = y.type(torch.FloatTensor).cuda()

            if hp.mode == '2d':
                x = x.squeeze(4)
                y = y.squeeze(4)

                y[y != 0] = 1

            # print(y.max())

            outputs = model(x)

            # for metrics
            logits = torch.sigmoid(outputs)
            labels = logits.clone()
            labels[labels > 0.5] = 1
            labels[labels <= 0.5] = 0

            loss = criterion(outputs, y)

            num_iters += 1
            loss.backward()

            optimizer.step()
            iteration += 1

            false_positive_rate, false_negtive_rate, dice = metric(
                y.cpu(), labels.cpu())
            ## log
            writer.add_scalar('Training/Loss', loss.item(), iteration)
            writer.add_scalar('Training/false_positive_rate',
                              false_positive_rate, iteration)
            writer.add_scalar('Training/false_negtive_rate',
                              false_negtive_rate, iteration)
            writer.add_scalar('Training/dice', dice, iteration)

            print("loss:" + str(loss.item()))
            print('lr:' + str(scheduler._last_lr[0]))

        scheduler.step()

        # Store latest checkpoint in each epoch
        torch.save(
            {
                "model": model.state_dict(),
                "optim": optimizer.state_dict(),
                "scheduler": scheduler.state_dict(),
                "epoch": epoch,
            },
            os.path.join(args.output_dir, args.latest_checkpoint_file),
        )

        # Save checkpoint
        if epoch % args.epochs_per_checkpoint == 0:

            torch.save(
                {
                    "model": model.state_dict(),
                    "optim": optimizer.state_dict(),
                    "epoch": epoch,
                },
                os.path.join(args.output_dir, f"checkpoint_{epoch:04d}.pt"),
            )

            with torch.no_grad():
                if hp.mode == '2d':
                    x = x.unsqueeze(4)
                    y = y.unsqueeze(4)
                    outputs = outputs.unsqueeze(4)

                x = x[0].cpu().detach().numpy()
                y = y[0].cpu().detach().numpy()
                outputs = outputs[0].cpu().detach().numpy()
                affine = batch['source']['affine'][0].numpy()

                if (hp.in_class == 1) and (hp.out_class == 1):
                    source_image = torchio.ScalarImage(tensor=x, affine=affine)
                    source_image.save(
                        os.path.join(args.output_dir,
                                     f"step-{epoch:04d}-source" +
                                     hp.save_arch))
                    # source_image.save(os.path.join(args.output_dir,("step-{}-source.mhd").format(epoch)))

                    label_image = torchio.ScalarImage(tensor=y, affine=affine)
                    label_image.save(
                        os.path.join(args.output_dir,
                                     f"step-{epoch:04d}-gt" + hp.save_arch))

                    output_image = torchio.ScalarImage(tensor=outputs,
                                                       affine=affine)
                    output_image.save(
                        os.path.join(
                            args.output_dir,
                            f"step-{epoch:04d}-predict" + hp.save_arch))
                else:
                    y = np.expand_dims(y, axis=1)
                    outputs = np.expand_dims(outputs, axis=1)

                    source_image = torchio.ScalarImage(tensor=x, affine=affine)
                    source_image.save(
                        os.path.join(args.output_dir,
                                     f"step-{epoch:04d}-source" +
                                     hp.save_arch))

                    label_image_artery = torchio.ScalarImage(tensor=y[0],
                                                             affine=affine)
                    label_image_artery.save(
                        os.path.join(
                            args.output_dir,
                            f"step-{epoch:04d}-gt_artery" + hp.save_arch))

                    output_image_artery = torchio.ScalarImage(
                        tensor=outputs[0], affine=affine)
                    output_image_artery.save(
                        os.path.join(
                            args.output_dir,
                            f"step-{epoch:04d}-predict_artery" + hp.save_arch))

                    label_image_lung = torchio.ScalarImage(tensor=y[1],
                                                           affine=affine)
                    label_image_lung.save(
                        os.path.join(
                            args.output_dir,
                            f"step-{epoch:04d}-gt_lung" + hp.save_arch))

                    output_image_lung = torchio.ScalarImage(tensor=outputs[1],
                                                            affine=affine)
                    output_image_lung.save(
                        os.path.join(
                            args.output_dir,
                            f"step-{epoch:04d}-predict_lung" + hp.save_arch))

                    label_image_trachea = torchio.ScalarImage(tensor=y[2],
                                                              affine=affine)
                    label_image_trachea.save(
                        os.path.join(
                            args.output_dir,
                            f"step-{epoch:04d}-gt_trachea" + hp.save_arch))

                    output_image_trachea = torchio.ScalarImage(
                        tensor=outputs[2], affine=affine)
                    output_image_trachea.save(
                        os.path.join(
                            args.output_dir,
                            f"step-{epoch:04d}-predict_trachea" +
                            hp.save_arch))

                    label_image_vein = torchio.ScalarImage(tensor=y[3],
                                                           affine=affine)
                    label_image_vein.save(
                        os.path.join(
                            args.output_dir,
                            f"step-{epoch:04d}-gt_vein" + hp.save_arch))

                    output_image_vein = torchio.ScalarImage(tensor=outputs[3],
                                                            affine=affine)
                    output_image_vein.save(
                        os.path.join(
                            args.output_dir,
                            f"step-{epoch:04d}-predict_vein" + hp.save_arch))

    writer.close()