def train(args):
    filepath = "weights-{epoch:03d}-{val_loss:.4f}-{val_mean_iou:.4f}.h5"
    weights_dir = os.path.join(args.weights, args.backBone + '_' + args.model)
    cfg.check_folder(weights_dir)
    model_weights = os.path.join(weights_dir, filepath)

    # build the model
    model, base_model = builder(cfg.n_classes, (256, 256), args.model, args.backBone)
    model.summary()

    # compile the model
    #sgd = optimizers.SGD(lr=cfg.lr, momentum=0.9)
    nadam = optimizers.Nadam(lr=cfg.lr, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
    model.compile(optimizer=nadam, loss='categorical_crossentropy', metrics=[MeanIoU(cfg.n_classes)])

    # checkpoint setting
    model_checkpoint = ModelCheckpoint(model_weights, monitor='val_loss', save_best_only=True, mode='auto')

    # learning rate scheduler setting
    lr_decay = lr_decays_func(args.lr_scheduler, args.learning_rate, args.num_epochs, args.lr_warmup)
    learning_rate_scheduler = LearningRateScheduler(lr_decay, args.learning_rate, args.lr_warmup, cfg.steps_per_epoch,
                                                    num_epochs=args.num_epochs, verbose=1)

    callbacks = [model_checkpoint]

    # training...
    train_set = dataloader.train_data_generator(cfg.train_data_path, cfg.train_label_path, cfg.batch_size,
                                                cfg.n_classes, cfg.data_augment)
    val_set = dataloader.val_data_generator(cfg.val_data_path, cfg.val_label_path, cfg.batch_size, cfg.n_classes)

    start_epoch = 0
    if os.path.exists(weights_dir) and os.listdir(weights_dir):
        a = sorted(file for file in os.listdir(weights_dir))
        model.load_weights(weights_dir + '/' + a[-1], by_name=True)
        # if load success, output info
        print('loaded :' + '-' * 8 + weights_dir + '/' + a[-1])
        start_epoch = int(a[-1][8:11])

    model.fit(train_set,
              steps_per_epoch=cfg.steps_per_epoch,
              epochs=args.num_epochs,
              callbacks=callbacks,
              validation_data=val_set,
              validation_steps=cfg.validation_steps,
              max_queue_size= cfg.batch_size,
              initial_epoch=start_epoch)
                    default=True)

args = parser.parse_args()

# check related paths
paths = check_related_path(os.getcwd())

# check the image path
if not os.path.exists(args.image_path):
    raise ValueError(
        'The path \'{image_path}\' does not exist the image file.'.format(
            image_path=args.image_path))

# build the model
net, base_model = builder(args.num_classes,
                          (args.crop_height, args.crop_width), args.model,
                          args.base_model)

# load weights
print('Loading the weights...')
if args.weights is None:
    net.load_weights(filepath=os.path.join(
        paths['weigths_path'], '{model}_based_on_{base_model}.h5'.format(
            model=args.model, base_model=base_model)))
else:
    if not os.path.exists(args.weights):
        raise ValueError(
            'The weights file does not exist in \'{path}\''.format(
                path=args.weights))
    net.load_weights(args.weights)
Пример #3
0
from builders import builder
from solvers import solves
from utilities import load_params, dump_result

if __name__ == '__main__':
    params = load_params("params.json")
    terms = builder(nmax=params['nmax'],
                    t_lower_bound=params['t_lower_bound'],
                    t_pivot=params['t_pivot'],
                    t_upper_bound=params['t_upper_bound'],
                    n1=params['n1'],
                    n2=params['n2'],
                    delta=params['delta'],
                    MU=params['MU'],
                    U=params['U'],
                    V=params['V'],
                    W=params['W'],
                    mu_lower_bound=params['mu_lower_bound'],
                    mu_upper_bound=params['mu_upper_bound'],
                    ma=params['ma'])
    result = solves(hexagon_mf_operators=terms['hexagon_mf_operators'],
                    t_a=terms['t_a'],
                    t_b=terms['t_b'],
                    ts=terms['ts'],
                    Ma=terms['Ma'],
                    uab_term=terms['uab_term'],
                    u_term=terms['u_term'],
                    v_term=terms['v_term'],
                    mu_term=terms['mu_term'],
                    t_term=terms['t_term'],
                    var_terms=terms['var_terms'],
def train(args):
    filepath = "weights-{epoch:03d}-{val_loss:.4f}-{val_mean_iou:.4f}.h5"
    weights_dir = os.path.join(args.weights, args.backBone + '_' + args.model)
    cfg.check_folder(weights_dir)
    model_weights = os.path.join(weights_dir, filepath)

    # build the model
    model, base_model = builder(cfg.n_classes, (256, 256), args.model,
                                args.backBone)
    model.summary()

    # compile the model
    sgd = optimizers.SGD(lr=0.0001, momentum=0.9)
    adam = optimizers.Adam(lr=cfg.lr, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
    model.compile(optimizer=sgd,
                  loss='categorical_crossentropy',
                  metrics=[MeanIoU(cfg.n_classes)])

    # checkpoint setting
    model_checkpoint = ModelCheckpoint(model_weights,
                                       monitor='val_loss',
                                       save_best_only=True,
                                       mode='auto')

    # learning rate scheduler setting
    lr_decay = lr_decays_func(args.lr_scheduler, args.learning_rate,
                              args.num_epochs, args.lr_warmup)
    learning_rate_scheduler = LearningRateScheduler(lr_decay,
                                                    args.learning_rate,
                                                    args.lr_warmup,
                                                    cfg.steps_per_epoch,
                                                    num_epochs=args.num_epochs,
                                                    verbose=1)
    Reduce_LR = ReduceLROnPlateau(monitor='val_mean_iou',
                                  mode='max',
                                  patience=2,
                                  verbose=1,
                                  factor=0.2,
                                  min_lr=1e-7)
    # callbacks = [model_checkpoint]
    callbacks = [model_checkpoint, Reduce_LR]

    # training...
    train_set = dataloader.train_data_generator(cfg.train_data_path,
                                                cfg.train_label_path,
                                                cfg.batch_size, cfg.n_classes,
                                                cfg.data_augment)
    val_set = dataloader.val_data_generator(cfg.val_data_path,
                                            cfg.val_label_path, cfg.batch_size,
                                            cfg.n_classes)

    start_epoch = 0
    if os.path.exists(weights_dir) and os.listdir(weights_dir):
        a = sorted(file for file in os.listdir(weights_dir))
        model.load_weights(weights_dir + '/' + a[-1], by_name=True)
        # if load success, output info
        print('loaded :' + '-' * 8 + weights_dir + '/' + a[-1])
        start_epoch = int(a[-1][8:11])

        for layer in model.layers:
            layer.trainable = False
        for i in range(-1, -27, -1):
            model.layers[i].trainable = True
        model.summary()

    print("start_epoch: ", start_epoch)
    if start_epoch == 0:
        backbone_pretrained_path = "backBone_pretrained_weights/" + 'densenet121_weights_tf_dim_ordering_tf_kernels_notop.h5'
        model.load_weights(backbone_pretrained_path, by_name=True)
        print(f"loaded : {backbone_pretrained_path}")

        print(len(model.layers))
        for layer in model.layers:
            layer.trainable = False
        for i in range(-1, -27, -1):
            model.layers[i].trainable = True
        model.summary()

    model.fit(train_set,
              steps_per_epoch=cfg.steps_per_epoch,
              epochs=args.num_epochs,
              callbacks=callbacks,
              validation_data=val_set,
              validation_steps=cfg.validation_steps,
              max_queue_size=cfg.batch_size,
              initial_epoch=start_epoch)
Пример #5
0
csv_file = 'CamVid/class_dict.csv'
num_classes = 32
crop_height = 256
crop_width = 256
weights = 'weights/{Weight-name}.h5'
image_path = 'predictions/{Image-name}.png'
color_encode1 = True

paths = check_related_path(os.getcwd())

if not os.path.exists(image_path):
    raise ValueError(
        'The path \'{image_path}\' does not exist the image file.'.format(
            image_path=image_path))

net, base_model = builder(num_classes, (crop_height, crop_width), model,
                          base_model)

print('Loading the weights...')
if weights is None:
    net.load_weights(filepath=os.path.join(
        paths['weigths_path'], '{model}_based_on_{base_model}.h5'.format(
            model=model, base_model=base_model)))
else:
    if not os.path.exists(weights):
        raise ValueError(
            'The weights file does not exist in \'{path}\''.format(
                path=weights))
    net.load_weights(weights)

print("\n***** Begin testing *****")
print("Model -->", model)