コード例 #1
0
def main(_):
    # 탄력적으로 GPU 메모리를 사용하기 위해 allow_growth를 true로 설정
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    with tf.Session(config=config) as sess:
        # zi2zi 전체 모델 생성(GAN 모델 전체)
        model = UNet(args.experiment_dir, batch_size=args.batch_size, experiment_id=args.experiment_id,
                     input_width=args.image_size, output_width=args.image_size, embedding_num=args.embedding_num,
                     embedding_dim=args.embedding_dim, L1_penalty=args.L1_penalty, Lconst_penalty=args.Lconst_penalty,
                     Ltv_penalty=args.Ltv_penalty, Lcategory_penalty=args.Lcategory_penalty)
        model.register_session(sess)

        if args.flip_labels:
            model.build_model(is_training=True, inst_norm=args.inst_norm, no_target_source=True)
        else:
            model.build_model(is_training=True, inst_norm=args.inst_norm)

        fine_tune_list = None
        # 구체적인 미세조정 글자가 옵션으로 지정되었다면,
        if args.fine_tune:
            ids = args.fine_tune.split(",")
            fine_tune_list = set([int(i) for i in ids])

        # zi2zi 모델 학습 시작
        model.train(lr=args.lr, epoch=args.epoch, resume=args.resume,
                    schedule=args.schedule, freeze_encoder=args.freeze_encoder, fine_tune=fine_tune_list,
                    sample_steps=args.sample_steps, checkpoint_steps=args.checkpoint_steps,
                    flip_labels=args.flip_labels)
コード例 #2
0
ファイル: test2.py プロジェクト: heixialeeLeon/Noise2Noise
def test():
    device = torch.device(args.devices if torch.cuda.is_available() else "cpu")
    #test_dataset = Training_Dataset(args.test_dir, (args.image_size,args.image_size),(args.noise, args.noise_param))
    # test_dataset = HongZhang_Dataset("/data_1/data/Noise2Noise/shenqingbiao/0202", "/data_1/data/Noise2Noise/hongzhang")
    test_dataset = HongZhang_TestDataset("/data_1/data/红章图片/test/hongzhang", (256, 256))
    test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False)

    # choose the model
    if args.model == "unet":
        model = UNet(in_channels=args.image_channels, out_channels=args.image_channels)
    elif args.model == "srresnet":
        model = SRResnet(args.image_channels, args.image_channels)
    elif args.model == "eesp":
        model = EESPNet_Seg(args.image_channels, 2)
    else:
        model = UNet(in_channels=args.image_channels, out_channels=args.image_channels)
    print('loading model')
    # model.load_state_dict(torch.load(model_path))
    # model.eval()
    # model.to(device)
    if args.resume_model:
        resume_model(model, args.resume_model)
        model.eval()
        model.to(device)

    # result_dir = args.denoised_dir
    # if not os.path.exists(result_dir):
    #     os.mkdir(result_dir)

    for batch_idx, image in enumerate(test_loader):
        #PIL_ShowTensor(torch.squeeze(source))
        #PIL_ShowTensor2(torch.squeeze(source),torch.squeeze(noise))
        image = image.to(device)
        denoised_img = model(image).detach().cpu()
        CV2_showTensors(image.cpu(),denoised_img,timeout=5000)
コード例 #3
0
ファイル: unet_test.py プロジェクト: Nornostra/ai-summer
class UnetTest(tf.test.TestCase):

    def setUp(self):
        super(UnetTest, self).setUp()
        self.unet = UNet(CFG)

    def tearDown(self):
        pass

    def test_normalize(self):
        input_image = np.array([[1., 1.], [1., 1.]])
        input_mask = 1
        expected_image = np.array([[0.00392157, 0.00392157], [0.00392157, 0.00392157]])

        result = self.unet._normalize(input_image, input_mask)
        self.assertAllClose(expected_image, result[0])

    def test_ouput_size(self):
        shape = (1, self.unet.image_size, self.unet.image_size, 3)
        image = tf.ones(shape)
        self.unet.build()
        self.assertEqual(self.unet.model.predict(image).shape, shape)

    @patch('model.unet.DataLoader.load_data')
    def test_load_data(self, mock_data_loader):
        mock_data_loader.side_effect = dummy_load_data
        shape = tf.TensorShape([None, self.unet.image_size, self.unet.image_size, 3])

        self.unet.load_data()
        mock_data_loader.assert_called()

        self.assertItemsEqual(self.unet.train_dataset.element_spec[0].shape, shape)
        self.assertItemsEqual(self.unet.test_dataset.element_spec[0].shape, shape)
コード例 #4
0
def main(_):
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    with tf.Session(config=config) as sess:
        model = UNet(batch_size=args.batch_size,embedding_dim=args.embedding_dim,input_width=args.image_size,output_width=args.image_size,embedding_num=args.embedding_num)
        model.register_session(sess)
        model.build_model(is_training=False, inst_norm=args.inst_norm)
        embedding_ids = [int(i) for i in args.embedding_ids.split(",")]
        if not args.interpolate:
            if len(embedding_ids) == 1:
                embedding_ids = embedding_ids[0]
            model.infer(model_dir=args.model_dir, source_obj=args.source_obj, embedding_ids=embedding_ids,
                        save_dir=args.save_dir)
        else:
            if len(embedding_ids) < 2:
                raise Exception("no need to interpolate yourself unless you are a narcissist")
            chains = embedding_ids[:]
            if args.uroboros:
                chains.append(chains[0])
            pairs = list()
            for i in range(len(chains) - 1):
                pairs.append((chains[i], chains[i + 1]))
            for s, e in pairs:
                model.interpolate(model_dir=args.model_dir, source_obj=args.source_obj, between=[s, e],
                                  save_dir=args.save_dir, steps=args.steps)
            if args.output_gif:
                gif_path = os.path.join(args.save_dir, args.output_gif)
                compile_frames_to_gif(args.save_dir, gif_path)
                print("gif saved at %s" % gif_path)
コード例 #5
0
def main(_):
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    with tf.Session(config=config) as sess:
        model = UNet(args.experiment_dir,
                     batch_size=args.batch_size,
                     experiment_id=args.experiment_id,
                     input_width=args.image_size,
                     output_width=args.image_size,
                     embedding_num=args.embedding_num,
                     embedding_dim=args.embedding_dim)
        model.register_session(sess)
        if args.flip_labels:
            model.build_model(is_training=True,
                              inst_norm=args.inst_norm,
                              no_target_source=True)
        else:
            model.build_model(is_training=True, inst_norm=args.inst_norm)
        fine_tune_list = None
        if args.fine_tune:
            ids = args.fine_tune.split(",")
            fine_tune_list = set([int(i) for i in ids])
        model.train(lr=args.lr,
                    epoch=args.epoch,
                    resume=args.resume,
                    schedule=args.schedule,
                    freeze_encoder=args.freeze_encoder,
                    fine_tune=fine_tune_list,
                    sample_steps=args.sample_steps,
                    checkpoint_steps=args.checkpoint_steps,
                    flip_labels=args.flip_labels)
コード例 #6
0
ファイル: main.py プロジェクト: Vergangenheit/DL_Production
def run():
    """Builds model, loads data, trains and evaluates"""
    model = UNet(CFG)
    model.load_data()
    model.build()
    model.train()
    model.evaluate()
コード例 #7
0
def main(_):

    avalialbe_cpu, available_gpu, available_cpu_num, available_gpu_num = get_available_gpus(
    )
    forward_backward_device = list()
    if available_gpu_num == 0:
        print(
            "No available GPU found!!! The calculation will be performed with CPU only."
        )
        args.device_mode = 0

    if args.device_mode == 0:
        parameter_update_device = avalialbe_cpu[0]
        forward_backward_device.append(avalialbe_cpu[0])
    elif args.device_mode == 1:
        parameter_update_device = avalialbe_cpu[0]
        forward_backward_device.extend(available_gpu)
    elif args.device_mode == 2:
        parameter_update_device = available_gpu[1]
        forward_backward_device.append(available_gpu[0])
        forward_backward_device.append(available_gpu[1])
        forward_backward_device.append(available_gpu[2])
    elif args.device_mode == 3:
        parameter_update_device = available_gpu[0]
        forward_backward_device.append(available_gpu[0])

    forward_backward_device_list = list()
    forward_backward_device_list.extend(forward_backward_device)
    print("Available devices for forward && backward:")
    for device in forward_backward_device_list:
        print(device)
    print("Available devices for parameter update:%s" %
          parameter_update_device)

    model_for_train = UNet(
        training_mode=args.training_mode,
        base_trained_model_dir=args.base_trained_model_dir,
        experiment_dir=args.experiment_dir,
        experiment_id=args.experiment_id,
        train_obj_name=args.train_name,
        val_obj_name=args.val_name,
        optimization_method=args.optimization_method,
        batch_size=args.batch_size,
        lr=args.lr,
        samples_per_font=args.samples_per_font,
        schedule=args.schedule,
        ebdd_dictionary_dim=args.ebdd_dictionary_dim,
        L1_penalty=args.L1_penalty,
        Lconst_penalty=args.Lconst_penalty,
        ebdd_weight_penalty=args.ebdd_weight_penalty,
        base_training_font_num=args.base_training_font_num,
        resume_training=args.resume_training,
        freeze_encoder=args.freeze_encoder,
        freeze_decoder=args.freeze_decoder,
        sub_train_set_num=args.sub_train_set_num,
        parameter_update_device=parameter_update_device,
        forward_backward_device=forward_backward_device_list,
    )

    model_for_train.train_procedures()
コード例 #8
0
def main():
    logging.basicConfig(level=logging.INFO,
                        format='%(levelname)s: %(message)s')
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    logging.info(f'Using device {device}')
    parser = argparse.ArgumentParser()
    parser.add_argument("--config",
                        type=str,
                        required=True,
                        help="Path to (.yml) config file.")
    parser.add_argument(
        "--load-checkpoint",
        type=str,
        default="",
        help="Path to load saved checkpoint from.",
    )
    configargs = parser.parse_args()
    # Read config file.
    cfg = None
    with open(configargs.config, "r") as f:
        cfg_dict = yaml.load(f, Loader=yaml.FullLoader)
        print(cfg_dict)
    # set up network in/out channels details
    # n_channels=3 for RGB images
    # n_classes is the number of probabilities you want to get per pixel
    #   - For 1 class and background, use n_classes=1
    #   - For 2 classes, use n_classes=1
    #   - For N > 2 classes, use n_classes=N
    net = UNet(n_channels=1, n_classes=1, bilinear=True)
    #print(net)
    logging.info(
        f'Network:\n'
        f'\t{net.n_channels} input channels\n'
        f'\t{net.n_classes} output channels (classes)\n'
        f'\t{"Bilinear" if net.bilinear else "Transposed conv"} upscaling')
    net.to(device=device)
    # set up custome_loss_fn
    custome_loss_fn = None
    if cfg_dict.get('loss_function', None) == 'dice_loss':
        logging.info(
            f"\n Use custom loss function-{cfg_dict.get('loss_function', None)}"
        )
        custome_loss_fn = DiceLoss
    # start training
    TrainNet(Net=net,
             device=device,
             root_imgs_dir=cfg_dict.get('base_dir', None),
             imgs_dir_name=cfg_dict.get("image_dir_suffix", None),
             mask_dir_name=cfg_dict.get("mask_dir_suffix", None),
             dir_checkpoint=cfg_dict.get("checkpoint_dir", None),
             epochs=cfg_dict.get("epochs", 5),
             batch_size=cfg_dict.get("batch_size", 1),
             lr=cfg_dict.get("learning_rate", 0.0001),
             val_percent=cfg_dict.get("validation", 0.2),
             save_checkpoints=True,
             img_scale=cfg_dict.get("scale", 1),
             custome_loss_fn=custome_loss_fn)
コード例 #9
0
def test(args):
    print("Predicting ...")
    test_paths = os.listdir(os.path.join(args.dataset_dir, args.test_img_dir))
    print(len(test_paths), 'test images found')
    test_df = pd.DataFrame({'ImageId': test_paths, 'EncodedPixels': None})

    from skimage.morphology import binary_opening, disk

    test_df = test_df[:5000]
    test_loader = make_dataloader(test_df,
                                  args,
                                  batch_size=args.batch_size,
                                  shuffle=False,
                                  transform=None,
                                  mode='predict')

    model = UNet()
    if args.gpu and torch.cuda.is_available():
        model = model.cuda()
    run_id = 1
    print("Resuming run #{}...".format(run_id))
    model_path = Path('model_{run_id}.pt'.format(run_id=run_id))
    state = torch.load(str(model_path))
    state = {
        key.replace('module.', ''): value
        for key, value in state['model'].items()
    }
    model.load_state_dict(state)

    out_pred_rows = []

    for batch_id, (inputs,
                   image_paths) in enumerate(tqdm(test_loader,
                                                  desc='Predict')):
        if args.gpu and torch.cuda.is_available():
            inputs = inputs.cuda()
        inputs = torch.tensor(inputs)
        outputs = model(inputs)
        for i, image_name in enumerate(image_paths):
            mask = torch.sigmoid(outputs[i, 0]).data.cpu().numpy()
            cur_seg = binary_opening(mask > 0.5, disk(2))
            cur_rles = multi_rle_encode(cur_seg)
            if len(cur_rles) > 0:
                for c_rle in cur_rles:
                    out_pred_rows += [{
                        'ImageId': image_name,
                        'EncodedPixels': c_rle
                    }]
            else:
                out_pred_rows += [{
                    'ImageId': image_name,
                    'EncodedPixels': None
                }]

    submission_df = pd.DataFrame(out_pred_rows)[['ImageId', 'EncodedPixels']]
    submission_df.to_csv('submission.csv', index=False)
    print("done.")
コード例 #10
0
ファイル: main.py プロジェクト: M3DV/FracNet
def main(args):
    train_image_dir = args.train_image_dir
    train_label_dir = args.train_label_dir
    val_image_dir = args.val_image_dir
    val_label_dir = args.val_label_dir

    batch_size = 4
    num_workers = 4
    optimizer = optim.SGD
    criterion = MixLoss(nn.BCEWithLogitsLoss(), 0.5, DiceLoss(), 1)

    thresh = 0.1
    recall_partial = partial(recall, thresh=thresh)
    precision_partial = partial(precision, thresh=thresh)
    fbeta_score_partial = partial(fbeta_score, thresh=thresh)

    model = UNet(1, 1, first_out_channels=16)
    model = nn.DataParallel(model.cuda())

    transforms = [
        tsfm.Window(-200, 1000),
        tsfm.MinMaxNorm(-200, 1000)
    ]
    ds_train = FracNetTrainDataset(train_image_dir, train_label_dir,
        transforms=transforms)
    dl_train = FracNetTrainDataset.get_dataloader(ds_train, batch_size, False,
        num_workers)
    ds_val = FracNetTrainDataset(val_image_dir, val_label_dir,
        transforms=transforms)
    dl_val = FracNetTrainDataset.get_dataloader(ds_val, batch_size, False,
        num_workers)

    databunch = DataBunch(dl_train, dl_val,
        collate_fn=FracNetTrainDataset.collate_fn)

    learn = Learner(
        databunch,
        model,
        opt_func=optimizer,
        loss_func=criterion,
        metrics=[dice, recall_partial, precision_partial, fbeta_score_partial]
    )

    learn.fit_one_cycle(
        200,
        1e-1,
        pct_start=0,
        div_factor=1000,
        callbacks=[
            ShowGraph(learn),
        ]
    )

    if args.save_model:
        save(model.module.state_dict(), "./model_weights.pth")
コード例 #11
0
def main():
    # Detect devices
    use_cuda = torch.cuda.is_available()  # check if GPU exists
    device = torch.device("cuda" if use_cuda else "cpu")  # use CPU or GPU

    model = UNet(device,
                 input_width=args.image_size,
                 output_width=args.image_size,
                 inst_norm=args.inst_norm,
                 g_norm_type=args.g_norm_type).to(device)
    model.export_generator(save_dir=args.save_dir, model_dir=args.model_dir)
コード例 #12
0
    def __init__(self, config):
        super(E2VID, self).__init__(config)

        self.unet = UNet(num_input_channels=self.num_bins,
                         num_output_channels=1,
                         skip_type=self.skip_type,
                         activation='sigmoid',
                         num_encoders=self.num_encoders,
                         base_num_channels=self.base_num_channels,
                         num_residual_blocks=self.num_residual_blocks,
                         norm=self.norm,
                         use_upsample_conv=self.use_upsample_conv)
コード例 #13
0
def main(_):
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    with tf.Session(config=config) as sess:
        model = UNet(args.experiment_dir,
                     batch_size=args.batch_size,
                     experiment_id=args.experiment_id,
                     input_width=args.image_size,
                     output_width=args.image_size,
                     embedding_num=args.embedding_num,
                     embedding_dim=args.embedding_dim,
                     L1_penalty=args.L1_penalty,
                     Lconst_penalty=args.Lconst_penalty,
                     Ltv_penalty=args.Ltv_penalty,
                     Lcategory_penalty=args.Lcategory_penalty)

        model.register_session(sess)
        if args.flip_labels:
            model.build_model(is_training=True,
                              inst_norm=args.inst_norm,
                              no_target_source=True)
        else:
            model.build_model(is_training=True, inst_norm=args.inst_norm)
        fine_tune_list = None
        if args.fine_tune:
            ids = args.fine_tune.split(",")
            fine_tune_list = set([int(i) for i in ids])

        print("***************** number of parameters *******************")

        def get_num_params():
            num_params = 0
            for variable in tf.trainable_variables():
                shape = variable.get_shape()
                p = reduce(mul, [dim.value for dim in shape], 1)
                print(variable.name, p)
                num_params += p
            return num_params

        print(get_num_params())
        print("***************** number of parameters *******************")

        model.train(lr=args.lr,
                    epoch=args.epoch,
                    resume=args.resume,
                    schedule=args.schedule,
                    freeze_encoder=args.freeze_encoder,
                    fine_tune=fine_tune_list,
                    sample_steps=args.sample_steps,
                    checkpoint_steps=args.checkpoint_steps,
                    flip_labels=args.flip_labels)
コード例 #14
0
def main(_):
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    src_font = ImageFont.truetype(args.src_font, size=args.char_size)

    with tf.Session(config=config) as sess:
        model = UNet(batch_size=args.batch_size,
                     input_width=args.canvas_size,
                     output_width=args.canvas_size,
                     experiment_id=args.experiment_id,
                     embedding_dim=args.embedding_dim,
                     embedding_num=args.embedding_num)
        model.register_session(sess)
        model.build_model(is_training=False, inst_norm=args.inst_norm)
        model.load_model(args.model_dir)

        count = 0
        batch_buffer = list()
        examples = []
        for ch in list(args.text):
            src_img = draw_single_char_by_font(ch, src_font, args.canvas_size,
                                               args.char_size)

            paired_img = draw_paired_image(src_img, src_img, args.canvas_size)

            p = os.path.join(args.save_dir, "inferred_%04d.png" % 100)
            misc.imsave(p, paired_img)

            buffered = BytesIO()
            paired_img.save(buffered, format="JPEG")

            examples.append((args.embedding_id, buffered.getvalue()))
        batch_iter = get_batch_iter(examples, args.batch_size, augment=False)

        for _, images in batch_iter:
            # inject specific embedding style here
            labels = [args.embedding_id] * len(images)

            fake_imgs = model.generate_fake_samples(images, labels)[0]
            merged_fake_images = merge(scale_back(fake_imgs),
                                       [-1, 1])  # scale 0-1
            print("getshape", type(merged_fake_images),
                  merged_fake_images.shape)
            if len(batch_buffer
                   ) > 0 and merged_fake_images.shape != batch_buffer[0].shape:

                continue
            batch_buffer.append(merged_fake_images)
            # if len(batch_buffer) == 10:
            #     save_imgs(batch_buffer, count, args.save_dir)
            #     batch_buffer = list()
            count += 1

        if batch_buffer:
            # last batch
            save_imgs(batch_buffer, count, args.save_dir)
コード例 #15
0
ファイル: train.py プロジェクト: kse0202/neural-fonts-test
def main():

    model = UNet(args.experiment_dir,
                 batch_size=args.batch_size,
                 experiment_id=args.experiment_id,
                 input_width=args.image_size,
                 output_width=args.image_size,
                 embedding_num=args.embedding_num,
                 embedding_dim=args.embedding_dim,
                 L1_penalty=args.L1_penalty,
                 Lconst_penalty=args.Lconst_penalty,
                 Ltv_penalty=args.Ltv_penalty,
                 Lcategory_penalty=args.Lcategory_penalty)
    # model.register_session(sess)
    if args.flip_labels:
        model.build_model(is_training=True,
                          inst_norm=args.inst_norm,
                          no_target_source=True)
    else:
        model.build_model(is_training=True, inst_norm=args.inst_norm)
    fine_tune_list = None
    if args.fine_tune:
        ids = args.fine_tune.split(",")
        fine_tune_list = set([int(i) for i in ids])
    model.train(lr=args.lr,
                epoch=args.epoch,
                resume=args.resume,
                schedule=args.schedule,
                freeze_encoder=args.freeze_encoder,
                fine_tune=fine_tune_list,
                sample_steps=args.sample_steps,
                checkpoint_steps=args.checkpoint_steps,
                flip_labels=args.flip_labels,
                no_val=args.no_val)
コード例 #16
0
ファイル: main.py プロジェクト: Zchhh73/Unet2D_torchVersion
def train(args):
    model = UNet(3, 3).to(device)
    batch_size = args.batch_size
    criterion = nn.BCEWithLogitsLoss()
    # criterion = DiceLoss()
    optimizer = optim.Adam(model.parameters())
    verse_data = DatasetVerse(dir_img,
                              dir_mask,
                              transform=x_transform,
                              target_transform=y_transform)
    dataloader = DataLoader(verse_data,
                            batch_size=batch_size,
                            shuffle=True,
                            num_workers=4)
    train_model(model, criterion, optimizer, dataloader)
コード例 #17
0
    def __init__(self,
                 input_channels=1,
                 num_classes=1,
                 num_filters=[32, 64, 128, 192],
                 latent_dim=6,
                 no_convs_fcomb=3,
                 beta=1.0):
        super(ProbabilisticUnet, self).__init__()
        self.n_channels = input_channels
        self.n_classes = num_classes
        self.num_filters = num_filters
        self.latent_dim = latent_dim
        self.no_convs_per_block = 2
        self.no_convs_fcomb = no_convs_fcomb
        self.initializers = {'w': 'he_normal', 'b': 'normal'}
        self.beta = beta
        self.z_prior_sample = 0

        self.unet = UNet(n_channels=self.n_channels,
                         n_classes=self.n_classes,
                         num_filters=self.num_filters,
                         apply_last_layer=False).to(device)
        self.prior = AxisAlignedConvGaussian(
            self.n_channels,
            self.num_filters,
            self.no_convs_per_block,
            self.latent_dim,
            self.initializers,
        ).to(device)
        self.posterior = AxisAlignedConvGaussian(self.n_channels,
                                                 self.num_filters,
                                                 self.no_convs_per_block,
                                                 self.latent_dim,
                                                 self.initializers,
                                                 posterior=True).to(device)
        self.fcomb = Fcomb(self.num_filters,
                           self.latent_dim,
                           self.n_channels,
                           self.n_classes,
                           self.no_convs_fcomb, {
                               'w': 'orthogonal',
                               'b': 'normal'
                           },
                           use_tile=True).to(device)

        self.posterior_latent_space = None
        self.prior_latent_space = None
        self.unet_features = None
コード例 #18
0
ファイル: model.py プロジェクト: uzh-rpg/rpg_ramnet
class ERGB2Depth(BaseERGB2Depth):
    def __init__(self, config):
        super(ERGB2Depth, self).__init__(config)

        self.unet = UNet(num_input_channels=self.num_bins_rgb,
                         num_output_channels=1,
                         skip_type=self.skip_type,
                         activation='sigmoid',
                         num_encoders=self.num_encoders,
                         base_num_channels=self.base_num_channels,
                         num_residual_blocks=self.num_residual_blocks,
                         norm=self.norm,
                         use_upsample_conv=self.use_upsample_conv)

    def forward(self, item, prev_super_states, prev_states_lstm):
        #def forward(self, event_tensor, prev_states=None):
        """
        :param event_tensor: N x num_bins x H x W
        :return: a predicted image of size N x 1 x H x W, taking values in [0,1].
        """
        predictions_dict = {}
        '''for key in item.keys():
            if "depth" not in key:
                event_tensor = item[key].to(self.gpu)

                prediction = self.unet.forward(event_tensor)
                predictions_dict[key] = prediction'''

        event_tensor = item["image"].to(self.gpu)
        prediction = self.unet.forward(event_tensor)
        predictions_dict["image"] = prediction

        return predictions_dict, {'image': None}, prev_states_lstm
コード例 #19
0
ファイル: train.py プロジェクト: catherinebouchard22/carvana
def main():
    df_train = pd.read_csv(DATA_ROOT / 'train_masks.csv')
    ids_train = df_train['img'].map(lambda s: s.split('.')[0])

    ids_train_split, ids_valid_split = train_test_split(ids_train,
                                                        test_size=0.2,
                                                        random_state=SEED)

    print('Training on {} samples'.format(len(ids_train_split)))
    print('Validating on {} samples'.format(len(ids_valid_split)))

    train_dataset = CarvanaTrainDataset(ids_train_split.values)
    valid_dataset = CarvanaTrainDataset(ids_valid_split.values)

    train_loader = DataLoader(train_dataset,
                              shuffle=True,
                              batch_size=TRAIN_BATCH_SIZE)
    valid_loader = DataLoader(valid_dataset, batch_size=TEST_BATCH_SIZE)

    tb_viz_cb = TensorBoardVisualizerCallback(str(LOG_TB_VZ))
    tb_logs_cb = TensorBoardLoggerCallback(str(LOG_TB_L))
    model_saver_cb = ModelSaverCallback(str(SAVED_MODEL), str(BEST_MODEL))
    logger = SimpleLoggerCallback(str(LOG_FILE))

    logs2 = [tb_viz_cb, tb_logs_cb, model_saver_cb, logger]
    classifier = CarvanaSegmenationTrain(net=UNet(),
                                         num_epochs=NUM_EPOCHS,
                                         learning_rate=LEARNING_RATE,
                                         load_model=LOAD_MODEL)
    classifier.train(train_loader, valid_loader, callbacks=logs2)
コード例 #20
0
ファイル: export.py プロジェクト: EuphoriaYan/zi2zi
def main(_):
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    with tf.Session(config=config) as sess:
        model = UNet(batch_size=args.batch_size)
        model.register_session(sess)
        model.build_model(is_training=False, inst_norm=args.inst_norm)
        model.export_generator(save_dir=args.save_dir, model_dir=args.model_dir)
コード例 #21
0
ファイル: models.py プロジェクト: won548/LDCT-Octave
def model_selector(args):
    if args.model == "unet":
        model = UNet(in_ch=1, ch=args.channels, kernel=args.kernel_size)
    if args.model == "redcnn":
        model = REDCNN(out_ch=args.channels, kernel_size=args.kernel_size)
    if args.model == "cnn_oct":
        model = CNN_OCT(channels=args.channels, kernel_size=args.kernel_size, alpha=args.alpha)
    return model
コード例 #22
0
def main(_):

    avalialbe_cpu, available_gpu, available_cpu_num, available_gpu_num = get_available_gpus(
    )
    forward_backward_device = list()
    if available_gpu_num == 0:
        print(
            "No available GPU found!!! The calculation will be performed with CPU only."
        )
        args.device_mode = 0

    parameter_update_device = avalialbe_cpu[0]
    forward_backward_device.append(avalialbe_cpu[0])
    # parameter_update_device = available_gpu[0]
    # forward_backward_device.append(available_gpu[0])

    forward_backward_device_list = list()
    forward_backward_device_list.extend(forward_backward_device)
    print("Available devices for forward && backward:")
    for device in forward_backward_device_list:
        print(device)
    print("Available devices for parameter update:%s" %
          parameter_update_device)

    model_for_train = UNet(
        training_mode=args.training_mode,
        base_trained_model_dir=args.base_trained_model_dir,
        infer_obj_name=args.infer_name,
        infer_copy_num=args.infer_copy_num,
        ebdd_dictionary_dim=args.ebdd_dictionary_dim,
        base_training_font_num=args.base_training_font_num,
        parameter_update_device=parameter_update_device,
        forward_backward_device=forward_backward_device_list)

    if os.path.exists(args.inferred_result_saving_path):
        shutil.rmtree(args.inferred_result_saving_path)
    os.makedirs(args.inferred_result_saving_path)

    model_for_train.infer_procedures(
        inferred_result_saving_path=args.inferred_result_saving_path,
        base_trained_model_dir=args.base_trained_model_dir,
        freeze_ebdd_weights=0,
        freeze_encoder=0,
        freeze_decoder=0)
コード例 #23
0
ファイル: train.py プロジェクト: juno1028/Son-Dam-Ee
def main():
    # Detect devices
    use_cuda = torch.cuda.is_available()  # check if GPU exists
    device = torch.device("cuda" if use_cuda else "cpu")  # use CPU or GPU

    model = UNet(device,
                 args.experiment_dir,
                 batch_size=args.batch_size,
                 experiment_id=args.experiment_id,
                 input_width=args.image_size,
                 output_width=args.image_size,
                 embedding_num=args.embedding_num,
                 embedding_dim=args.embedding_dim,
                 L1_penalty=args.L1_penalty,
                 Lconst_penalty=args.Lconst_penalty,
                 Ltv_penalty=args.Ltv_penalty,
                 Lcategory_penalty=args.Lcategory_penalty,
                 inst_norm=args.inst_norm,
                 g_norm_type=args.g_norm_type,
                 d_norm_type=args.d_norm_type,
                 gan_loss_type=args.gan_loss_type,
                 cycle_gan=args.cycle_gan,
                 rotate_range=args.rotate_range).to(device)

    fine_tune_list = None
    if args.fine_tune:
        ids = args.fine_tune.split(",")
        fine_tune_list = set([int(i) for i in ids])
    ignore_label_list = None
    if args.ignore_label:
        ids = args.ignore_label.split(",")
        ignore_label_list = set([int(i) for i in ids])
    model.train(lr=args.lr,
                epoch=args.epoch,
                resume=args.resume,
                schedule=args.schedule,
                freeze_encoder=args.freeze_encoder,
                fine_tune=fine_tune_list,
                sample_steps=args.sample_steps,
                checkpoint_steps=args.checkpoint_steps,
                flip_labels=args.flip_labels,
                ignore_label=ignore_label_list)
コード例 #24
0
    def init_model(self, CHANNELS_IN, CHANNELS_OUT, LOAD_MODEL,
                   MODEL_LOAD_PATH, MODEL_NAME, MODEL_SUFFIX,
                   USE_DECONV_LAYERS):
        """
		Initialization and loading model if needed.
			Int: CHANNELS_IN -> Number of input channels in UNet
			Int: CHANNELS_OUT -> Number of output channels in UNet
			Bool: LOAD_MODEL -> If True we need to load existing parameters.
			Str: MODEL_LOAD_PATH -> Path where models are stored
			Str: MODEL_NAME -> Name of loading model

		Returns: Model
		"""

        model = UNet(CHANNELS_IN, CHANNELS_OUT, not USE_DECONV_LAYERS)
        if LOAD_MODEL:
            model_state_dict = torch.load(MODEL_LOAD_PATH + MODEL_NAME +
                                          MODEL_SUFFIX)
            model.load_state_dict(model_state_dict)
        return model
コード例 #25
0
 def __init__(self,
              W,
              H,
              feature_num,
              use_pyramid=True,
              view_direction=True):
     super(PipeLine, self).__init__()
     self.feature_num = feature_num
     self.use_pyramid = use_pyramid
     self.view_direction = view_direction
     self.texture = Texture(W, H, feature_num, use_pyramid)
     self.unet = UNet(feature_num, 3)
コード例 #26
0
ファイル: test.py プロジェクト: catherinebouchard22/carvana
def main():
    df_test = pd.read_csv(DATA_ROOT / 'sample_submission.csv')
    ids_test = df_test['img'].map(lambda s: s.split('.')[0])

    test_dataset = CarvanaTestDataset(ids_test.values)
    test_loader = DataLoader(test_dataset,
                             shuffle=False,
                             batch_size=TEST_BATCH_SIZE)

    classifier = CarvanaSegmenationTest(net=UNet(),
                                        pred_folder=str(PREDICTIONS_DIR))
    classifier.predict(test_loader)
コード例 #27
0
    def __init__(self, dataset, n_channels, n_classes):
        super().__init__()
        # self.hparams = hparams
        self.dataset = dataset
        self.n_channels = n_channels
        self.n_classes = n_classes
        self.bilinear = True
        # self.criterion =  nn.CrossEntropyLoss() if self.n_classes > 1 else \
        #     nn.BCEWithLogitsLoss()

        # self.loss = UnetLoss
        self.net = UNet(n_channels=self.n_channels, n_classes=self.n_classes, bilinear=self.bilinear)
        self.save_hyperparameters()
コード例 #28
0
def main(_):
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    ##tensorflow auto-select available GPu
    #with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True)) as sess:
    with tf.Session(config=config) as sess:
        model = UNet(args.experiment_dir,
                     batch_size=args.batch_size,
                     experiment_id=args.experiment_id,
                     input_width=args.image_size,
                     output_width=args.image_size,
                     embedding_num=args.embedding_num,
                     embedding_dim=args.embedding_dim,
                     L1_penalty=args.L1_penalty,
                     Lconst_penalty=args.Lconst_penalty,
                     Ltv_penalty=args.Ltv_penalty,
                     Lcategory_penalty=args.Lcategory_penalty)
        model.register_session(sess)
        if args.flip_labels:
            model.build_model(is_training=True,
                              inst_norm=args.inst_norm,
                              no_target_source=True)
        else:
            model.build_model(is_training=True, inst_norm=args.inst_norm)
        fine_tune_list = None
        if args.fine_tune:
            ids = args.fine_tune.split(",")
            fine_tune_list = set([int(i) for i in ids])
        model.train(lr=args.lr,
                    epoch=args.epoch,
                    resume=args.resume,
                    schedule=args.schedule,
                    freeze_encoder=args.freeze_encoder,
                    fine_tune=fine_tune_list,
                    sample_steps=args.sample_steps,
                    checkpoint_steps=args.checkpoint_steps,
                    flip_labels=args.flip_labels)
コード例 #29
0
def getModel(device, params):
    if params.model == 'UNet':
        model = UNet(3, 1).to(device)
    if params.model == 'resnet34_unet':
        model = resnet34_unet(1, pretrained=False).to(device)
    if params.model == 'unet++':
        params.deepsupervision = True
        model = NestedUNet(params, 3, 1).to(device)
    if params.model == 'Attention_UNet':
        model = Attention_Gate_UNet(3, 1).to(device)
    if params.model == 'segnet':
        model = SegNet(3, 1).to(device)
    if params.model == 'r2unet':
        model = R2U_Net(3, 1).to(device)
    if params.model == 'fcn32s':
        model = get_fcn32s(1).to(device)
    if params.model == 'myChannelUnet':
        model = ChannelUnet(3, 1).to(device)
    if params.model == 'fcn8s':
        assert params.dataset != 'esophagus', "fcn8s模型不能用于数据集esophagus,因为esophagus数据集为80x80,经过5次的2倍降采样后剩下2.5x2.5,分辨率不能为小数,建议把数据集resize成更高的分辨率再用于fcn"
        model = get_fcn8s(1).to(device)
    if params.model == 'cenet':
        model = CE_Net_().to(device)
    if params.model == 'smaatunet':
        model = SmaAt_UNet(3, 1).to(device)
    # if params.model == "self_attention_unet":
    #     model = get_unet_depthwise_light_encoder_attention_with_skip_connections_decoder(3,1).to(device)
    if params.model == "kiunet":
        model = kiunet().to(device)
    if params.model == "Lite_RASPP":
        model = MobileNetV3Seg(nclass=1).to(device=device)
    if params.model == "design_one":
        model = AttentionDesignOne(3, 1).to(device)
    if params.model == "design_two":
        model = AttentionDesignTwo(3, 1).to(device)
    if params.model == "design_three":
        model = AttentionDesignThree(3, 1).to(device)
    if params.model == "only_attention":
        model = Design_Attention(3, 1).to(device)
    if params.model == "only_bottleneck":
        model = Design_MRC_RMP(3, 1).to(device)
    return model
コード例 #30
0
class E2VID(BaseE2VID):
    def __init__(self, config):
        super(E2VID, self).__init__(config)

        self.unet = UNet(num_input_channels=self.num_bins,
                         num_output_channels=1,
                         skip_type=self.skip_type,
                         activation='sigmoid',
                         num_encoders=self.num_encoders,
                         base_num_channels=self.base_num_channels,
                         num_residual_blocks=self.num_residual_blocks,
                         norm=self.norm,
                         use_upsample_conv=self.use_upsample_conv)

    def forward(self, event_tensor, prev_states=None):
        """
        :param event_tensor: N x num_bins x H x W
        :return: a predicted image of size N x 1 x H x W, taking values in [0,1].
        """
        return self.unet.forward(event_tensor), None