Beispiel #1
0
    # params for optimizing models
    lr = 2e-4


params = Config()

currentDir = os.path.dirname(os.path.realpath(__file__))
logFile = os.path.join(currentDir+'/../', 'dann-{}-{}.log'.format(params.src_dataset, params.tgt_dataset))
loggi = setLogger(logFile)

# init random seed
init_random_seed(params.manual_seed)

# init device
device = torch.device("cuda:" + params.gpu_id if torch.cuda.is_available() else "cpu")

# load dataset
src_data_loader = get_data_loader(params.src_dataset, params.dataset_root, params.batch_size)
tgt_data_loader = get_data_loader(params.tgt_dataset, params.dataset_root, params.batch_size)

# load dann model
# dann = init_model(net=AlexModel(), restore=None)
dann = init_model(net=ResNet50(), restore=None)

# train dann model
print("Start training dann model.")

# if not (dann.restored and params.dann_restore):
dann = train_dann(dann, params, src_data_loader, tgt_data_loader, tgt_data_loader, device, loggi)

print('done')
Beispiel #2
0
    manual_seed = 8888
    alpha = 0

    # params for optimizing models
    lr = 2e-4


params = Config()

# init random seed
init_random_seed(params.manual_seed)

# load dataset
src_data_loader = get_data_loader(params.src_dataset, params.dataset_root,
                                  params.batch_size)
tgt_data_loader = get_data_loader(params.tgt_dataset, params.dataset_root,
                                  params.batch_size)

# load dann model
dann = init_model(net=AlexModel(), restore=None)

# train dann model
print("Start training dann model.")

if not (dann.restored and params.dann_restore):
    dann = train_dann(dann, params, src_data_loader, tgt_data_loader,
                      tgt_data_loader)

print('done')
Beispiel #3
0
    name_best_model = 'weights/camvid/best'
    dataset_path = 'Datasets/camvid'
    preprocess_mode = 'imagenet'  #possible values 'imagenet', 'normalize',None

    loader = Loader.Loader(dataFolderPath=dataset_path, n_classes=n_classes, problemType='segmentation',
                           width=width, height=height, channels=channels, median_frequency=0.0)

    # build model and optimizer
    model = Segception.Efficient(num_classes=n_classes, weights='imagenet', input_shape=(None, None, channels))

    # optimizer
    learning_rate = tfe.Variable(lr)
    optimizer = tf.train.AdamOptimizer(learning_rate)

    # Init models (optional, just for get_params function)
    init_model(model, input_shape=(batch_size, width, height, channels))

    variables_to_restore = model.variables
    variables_to_save = model.variables
    variables_to_optimize = model.variables

    # Init saver. can use also ckpt = tfe.Checkpoint((model=model, optimizer=optimizer,learning_rate=learning_rate, global_step=global_step)
    saver_model = tfe.Saver(var_list=variables_to_save)
    restore_model = tfe.Saver(var_list=variables_to_restore)

    # restore if model saved and show number of params
    restore_state(restore_model, name_best_model)
    get_params(model)

    train(loader=loader, optimizer=optimizer, model=model, epochs=epochs, batch_size=batch_size, augmenter='segmentation', lr=learning_rate,
          init_lr=lr, saver=saver_model, variables_to_optimize=variables_to_optimize, name_best_model=name_best_model,
Beispiel #4
0
src_data_loader = get_data_loader(params.src_dataset,
                                  params.dataset_root,
                                  params.batch_size,
                                  train=True)
src_data_loader_eval = get_data_loader(params.src_dataset,
                                       params.dataset_root,
                                       params.batch_size,
                                       train=False)
tgt_data_loader = get_data_loader(params.tgt_dataset,
                                  params.dataset_root,
                                  params.batch_size,
                                  train=True)
tgt_data_loader_eval = get_data_loader(params.tgt_dataset,
                                       params.dataset_root,
                                       params.batch_size,
                                       train=False)

# load dann model
dann = init_model(net=SVHNmodel(), restore=None)

# train dann model
print("Training dann model")
if not (dann.restored and params.dann_restore):
    dann = train_dann(dann, params, src_data_loader, tgt_data_loader,
                      tgt_data_loader_eval)

# eval dann model
print("Evaluating dann for source domain {}".format(params.src_dataset))
eval(dann, src_data_loader_eval)
print("Evaluating dann for target domain {}".format(params.tgt_dataset))
eval(dann, tgt_data_loader_eval)
Beispiel #5
0
# init random seed
init_random_seed(params.manual_seed)

# load dataset
src_data_loader = get_data_loader(params.src_dataset,
                                  params.src_image_root,
                                  params.batch_size,
                                  train=True)
src_data_loader_eval = get_data_loader(params.src_dataset,
                                       params.src_image_root,
                                       params.batch_size,
                                       train=False)
tgt_data_loader = get_data_loader(params.tgt_dataset,
                                  params.tgt_image_root,
                                  params.batch_size,
                                  train=True)
tgt_data_loader_eval = get_data_loader(params.tgt_dataset,
                                       params.tgt_image_root,
                                       params.batch_size,
                                       train=False)

# load dann model
dann = init_model(net=GTSRBmodel(), restore=None)

# train dann model
print("Training dann model")
if not (dann.restored and params.dann_restore):
    dann = train_dann(dann, params, src_data_loader, tgt_data_loader,
                      tgt_data_loader_eval, device, logger)
Beispiel #6
0
                            transforms.Compose([
                                transforms.Resize(112),
                                transforms.CenterCrop(112),
                                transforms.ToTensor()
                            ]), stage='Test')
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=params.test_batch_size,
                                             num_workers=0,
                                             pin_memory=True)

    test_dataset = FinalImgLoader(params.root_folder, os.path.join(params.root_folder, params.src_test_list),
                                transforms.Compose([
                                    transforms.Resize(112),
                                    transforms.CenterCrop(112),
                                    transforms.ToTensor()
                                ]), stage='Test')
    test_loader = torch.utils.data.DataLoader(test_dataset,
                                             batch_size=params.test_batch_size,
                                             num_workers=0,
                                             pin_memory=True)


    model = init_model(net=resnet18_cbam(),
                        restore=params.fusion_encoder_restore)


    model = train_feature_fusion(model, train_loader, val_loader)

    eval_fusionmodal(model, test_loader)

Beispiel #7
0
    # some parameters
    os.environ["CUDA_VISIBLE_DEVICES"] = str(args.n_gpu)
    n_classes = int(args.n_classes)
    width = int(args.width)
    height = int(args.height)
    channels = 1
    name_best_model = os.path.join(args.model_path, 'best')

    # build model and optimizer
    model = Segception.Segception_v4(num_classes=n_classes,
                                     weights=None,
                                     input_shape=(None, None, channels))

    # Init models (optional, just for get_params function)
    init_model(model, input_shape=(1, width, height, channels))

    variables_to_restore = model.variables
    variables_to_save = model.variables
    variables_to_optimize = model.variables

    # Init saver. can use also ckpt = tfe.Checkpoint((model=model, optimizer=optimizer,learning_rate=learning_rate, global_step=global_step)
    saver_model = tfe.Saver(var_list=variables_to_save)
    restore_model = tfe.Saver(var_list=variables_to_restore)

    # restore if model saved and show number of params
    restore_state(restore_model, name_best_model)
    get_params(model)

    img = cv2.imread(args.image_path, 0)
    img = cv2.resize(img, (width, height),
Beispiel #8
0
    manual_seed = 8888
    alpha = 0

    # params for optimizing models
    lr = 2e-4

params = Config()

# init random seed
init_random_seed(params.manual_seed)

# load dataset
src_data_loader = get_data_loader(params.src_dataset, params.dataset_root, params.batch_size, train=True)
src_data_loader_eval = get_data_loader(params.src_dataset, params.dataset_root, params.batch_size, train=False)
tgt_data_loader = get_data_loader(params.tgt_dataset, params.dataset_root, params.batch_size, train=True)
tgt_data_loader_eval = get_data_loader(params.tgt_dataset, params.dataset_root, params.batch_size, train=False)

# load dann model
dann = init_model(net=MNISTmodel(), restore=None)

# train dann model
print("Training dann model")
if not (dann.restored and params.dann_restore):
    dann = train_dann(dann, params, src_data_loader, tgt_data_loader, tgt_data_loader_eval)

# eval dann model
print("Evaluating dann for source domain {}".format(params.src_dataset))
eval(dann, src_data_loader_eval)
print("Evaluating dann for target domain {}".format(params.tgt_dataset))
eval(dann, tgt_data_loader_eval)
Beispiel #9
0
def train(n_classes=11,
          batch_size=16,
          epochs=100,
          width=960,
          height=720,
          crop_factor_x=2,
          crop_factor_y=1.25,
          init_lr=1e-4,
          median_frequency=.15,
          zoom_augmentation=.2,
          dataset_path='datasets/endoscopy',
          weights_path='weights/endoscopy/model',
          preprocess='imagenet'):

    CONFIG = {}
    CONFIG['n_classes'] = n_classes
    CONFIG['batch_size'] = batch_size
    CONFIG['epochs'] = epochs
    CONFIG['width'] = width
    CONFIG['height'] = height
    CONFIG['crop_factor_x'] = crop_factor_x
    CONFIG['crop_factor_y'] = crop_factor_y
    CONFIG['width_train'] = int(
        CONFIG['width'] /
        CONFIG['crop_factor_x'])  # will be cropped from width_test size
    CONFIG['height_train'] = int(
        CONFIG['height'] /
        CONFIG['crop_factor_y'])  # will be cropped from height_test size
    CONFIG['init_lr'] = init_lr
    CONFIG['median_frequency'] = median_frequency
    CONFIG['zoom_augmentation'] = zoom_augmentation
    CONFIG['dataset_path'] = dataset_path
    CONFIG['weights_path'] = weights_path
    CONFIG['preprocess'] = preprocess

    assert CONFIG['width'] * (
        1 - CONFIG['zoom_augmentation']) >= CONFIG['width_train']
    assert CONFIG['height'] * (
        1 - CONFIG['zoom_augmentation']) >= CONFIG['height_train']

    # GPU to use
    n_gpu = 0
    os.environ["CUDA_VISIBLE_DEVICES"] = str(n_gpu)
    # Loader
    loader = Loader.Loader(dataFolderPath=CONFIG['dataset_path'],
                           n_classes=CONFIG['n_classes'],
                           width=CONFIG['width'],
                           height=CONFIG['height'],
                           median_frequency=CONFIG['median_frequency'])
    print('Dataset loaded...')
    # build model
    #model = MiniNetv2.MiniNetv2p(num_classes=CONFIG['n_classes'])
    model = ResNet50.ResNet50Seg(CONFIG['n_classes'],
                                 input_shape=(None, None, 3),
                                 weights='imagenet')

    # optimizer
    learning_rate = tf.Variable(CONFIG['init_lr'])
    optimizer = tf.keras.optimizers.Adam(learning_rate)
    loss_function = tf.keras.losses.CategoricalCrossentropy()

    # restore if model saved and show number of params
    restore_state(model, CONFIG['weights_path'])

    init_model(model, (1, CONFIG['width'], CONFIG['height'], 3))
    get_params(model)

    # Train
    print('Training...')
    _train(loader=loader,
           optimizer=optimizer,
           loss_function=loss_function,
           model=model,
           config=CONFIG,
           lr=learning_rate,
           name_best_model=CONFIG['weights_path'],
           evaluation=True,
           preprocess_mode=CONFIG['preprocess'])

    print('Testing model')
    test_acc, test_miou = get_metrics(loader,
                                      model,
                                      loader.n_classes,
                                      train=False,
                                      flip_inference=True,
                                      scales=[1, 2, 1.5, 0.5, 0.75],
                                      write_images=True,
                                      preprocess_mode=CONFIG['preprocess'],
                                      time_exect=True)
    print('Test accuracy: ' + str(test_acc.numpy()))
    print('Test miou: ' + str(test_miou.numpy()))
Beispiel #10
0
    print('Dataset loaded...')
    # build model
    #model = MiniNetv2.MiniNetv2p(num_classes=CONFIG['n_classes'])
    model = ResNet50.ResNet50Seg(CONFIG['n_classes'],
                                 input_shape=(None, None, 3),
                                 weights='imagenet')

    # optimizer
    learning_rate = tf.Variable(CONFIG['init_lr'])
    optimizer = tf.keras.optimizers.Adam(learning_rate)
    loss_function = tf.keras.losses.CategoricalCrossentropy()

    # restore if model saved and show number of params
    restore_state(model, args.weights_path)

    init_model(model, (1, CONFIG['width'], CONFIG['height'], 3))
    get_params(model)

    # Train
    print('Training...')
    _train(loader=loader,
           optimizer=optimizer,
           loss_function=loss_function,
           model=model,
           config=CONFIG,
           lr=learning_rate,
           name_best_model=args.weights_path,
           evaluation=True,
           preprocess_mode=args.preprocess)

    print('Testing model')
Beispiel #11
0
    src_train_loader = torch.utils.data.DataLoader(src_dataset,
                                               batch_size=params.batch_size,
                                               num_workers=2,
                                               shuffle=True,
                                               pin_memory=True)

    src_val_dataset = TestImgLoader(params.root_folder, os.path.join(params.root_folder, params.src_val_list),
                            transforms.Compose([
                                transforms.Resize(256),
                                transforms.CenterCrop(248),
                                transforms.ToTensor()
                            ]), stage='Test')
    src_val_loader = torch.utils.data.DataLoader(src_val_dataset,
                                             batch_size=params.test_batch_size,
                                             num_workers=2,
                                             pin_memory=True)


    model = init_model(net=resnet18(),
                             restore=params.src_encoder_restore)




    # model = train_src(model, src_train_loader, src_val_loader)

    eval_src(model, src_val_loader)
    #eval_src_score(model, src_val_loader)