Esempio n. 1
0
def train():
    training_generator = get_training_generator()
    for epoch_index in range(epochs):
        # Why iterating over epochs instead of passing them correctly as a parameter of fit_generator?
        # The problem is that I need to clean the graph regularly, or I will get a memory error
        # And I did not find a way to clean the memory from used operations without resetting the whole graph
        # But resetting the whole graph also requires to re-load the wholes weights
        # This is obviously not an acceptable long-term solution. To see the issue on github:
        # https://github.com/tensorflow/tensorflow/issues/31419
        graph = tf.Graph()
        K.clear_session()
        gen = training_generator(graph,
                                 starting_index=epoch_index * steps_per_epoch)
        with graph.as_default():
            if (epoch_index == 0):
                unet = Unet(
                    "resnet34",
                    encoder_weights="imagenet",
                    classes=1,
                    activation="sigmoid",
                    input_shape=(tf_image_size, tf_image_size, 3),
                )
                unet.compile(optimizer=Adam(lr=learning_rate),
                             loss=calculate_loss)
            else:
                unet = load_model(
                    file_path,
                    custom_objects={"calculate_loss": calculate_loss})
            unet.fit_generator(gen, steps_per_epoch=steps_per_epoch, epochs=1)
            save_model(unet, file_path)
def train():
    #load images
    images = []
    for image in os.listdir(im_path):
        imi = cv.imread(os.path.join(im_path, image))
        images.append(imi)

    #load masks
    masks = []
    for mask in os.listdir(mask_path):
        mask_in = cv.imread(os.path.join(mask_path, mask), 0)
        ret_val, threshed_mask = cv.threshold(mask_in, 37, 1, cv.THRESH_BINARY)
        masks.append(threshed_mask)

    model = Unet('resnet34',
                 encoder_weights='imagenet',
                 input_shape=(128, 128, 3))
    model.compile('Adam',
                  loss=bce_jaccard_loss,
                  metrics=[iou_score, 'accuracy'])
    model.summary()
    hist = model.fit(x=np.array(images).reshape(-1, 128, 128, 3),
                     y=np.array(masks).reshape(-1, 128, 128, 1),
                     batch_size=10,
                     epochs=15)

    #save model
    filename = 'trained_model.h5'
    model.save(filename, include_optimizer=False)
    def get_model(self, encoderWeights, input_tensor, input_shape):
        """
        Loads the model from segmentation_models for either semantic segmentation or single-view depth prediction
        :param encoderWeights: encoder weights for the backbone (e.g., imagenet)
        :param input_tensor: image shape for training
        :param input_shape: tensor shape for training
        :return model to be used for training/testing:
        """

        if self.mode == self.params.SINGLEVIEW_MODE:
            model = UnetRegressor(input_shape=input_shape,
                                  input_tensor=input_tensor,
                                  backbone_name=self.params.BACKBONE,
                                  encoder_weights=encoderWeights)
        elif self.mode == self.params.SEMANTIC_MODE:
            model = Unet(input_shape=input_shape,
                         input_tensor=input_tensor,
                         backbone_name=self.params.BACKBONE,
                         encoder_weights=encoderWeights,
                         classes=self.params.NUM_CATEGORIES)
        # elif self.mode == self.params.SEMANTIC_MODE:
        #     model = PSPNet(input_shape=input_shape, input_tensor=input_tensor, backbone_name=self.params.BACKBONE,
        #                  encoder_weights=encoderWeights, classes=self.params.NUM_CATEGORIES)

        # elif self.mode == self.params.SEMANTIC_MODE:
        #     model_name='pspnet101_cityscapes'
        #     input_shape=(713,713)
        #     pspnet_model = pspnet.PSPNet101(nb_classes=self.params.NUM_CATEGORIES, input_shape=input_shape,
        #                                weights=model_name)
        #     model=pspnet_model.model
        # model = pspnet(input_shape=input_shape, input_tensor=input_tensor, backbone_name=self.params.BACKBONE,
        #               encoder_weights=encoderWeights, classes=self.params.NUM_CATEGORIES)

        return model
Esempio n. 4
0
def get_model(cfg, training=True):
    tf.keras.backend.set_learning_phase(training)

    model = None
    n_classes = len(cfg.CLASSES.keys())
    if cfg.model_type == "UNET":
        model = Unet(backbone_name=cfg.backbone_name,
                     input_shape=cfg.input_shape,
                     classes=n_classes,
                     activation='sigmoid' if n_classes == 1 else 'softmax',
                     weights=None,
                     encoder_weights=cfg.encoder_weights,
                     encoder_freeze=cfg.encoder_freeze,
                     encoder_features=cfg.encoder_features,
                     decoder_block_type=cfg.decoder_block_type,
                     decoder_filters=cfg.decoder_filters,
                     decoder_use_batchnorm=True)
    elif cfg.model_type == "FPN":
        model = FPN(backbone_name=cfg.backbone_name,
                    input_shape=cfg.input_shape,
                    classes=n_classes,
                    activation='sigmoid' if n_classes == 1 else 'softmax',
                    weights=None,
                    encoder_weights=cfg.encoder_weights,
                    encoder_freeze=cfg.encoder_freeze,
                    encoder_features=cfg.encoder_features)
    else:
        print("Unsupported model type!")
        exit(1)

    if cfg.pretrained_model is not None:
        model.load_weights(cfg.pretrained_model)

    return model
def unet_efficientnet():
    model = Unet('efficientnetb0',
                 input_shape=(320, 480, 3),
                 encoder_weights='imagenet',
                 classes=4,
                 encoder_freeze=False)
    return model
Esempio n. 6
0
    def compile_dense(self):

        self.model = Unet(backbone_name='inceptionv3',
                          input_shape=self.img_shape,
                          input_tensor=None,
                          encoder_weights=None,
                          freeze_encoder=False,
                          skip_connections='default',
                          decoder_block_type='upsampling',
                          decoder_filters=(256, 128, 64, 32, 16),
                          decoder_use_batchnorm=True,
                          n_upsample_blocks=5,
                          upsample_rates=(2, 2, 2, 2, 2),
                          classes=4,
                          activation='softmax')

        sgd = SGD(lr=0.1, momentum=0.9, decay=5e-6, nesterov=False)
        adam = Adam(lr=0.01,
                    beta_1=0.9,
                    beta_2=0.999,
                    epsilon=None,
                    decay=0.0,
                    amsgrad=False)

        self.model.compile(
            adam, gen_dice_loss,
            [dice_whole_metric, dice_core_metric, dice_en_metric])

        return (self.model)
Esempio n. 7
0
 def U_net(self):
     # Build U-Net model
     transfer_model = Unet(backbone_name=backbone, input_shape=(None, None, 3), classes=1,
                           activation='relu', encoder_weights='imagenet', encoder_freeze=True)
     transfer_model.compile(optimizer='Adam', loss='binary_crossentropy', metrics=[self.mean_iou])
     transfer_model.load_weights(self.modelPath)
     transfer_model.summary()
     return transfer_model
 def __init__(self,
              model_path='models/unet_MoNuSeg.hdf5',
              target_size=(512, 512)):
     self.target_size = target_size
     ## build model
     self.model = Unet('resnet34', input_shape=(512, 512, 3))
     ## load weights
     self.model.load_weights(model_path, skip_mismatch=True, by_name=True)
Esempio n. 9
0
File: unet.py Progetto: anssar/salt
def train_stage_1(x_train, y_train, x_valid, y_valid):
    opt = optimizers.adam(lr=0.001)
    model = Unet(backbone_name=BACKBONE,
                 encoder_weights='imagenet',
                 freeze_encoder=True)
    model.compile(loss=bce_dice_jaccard_loss,
                  optimizer=opt,
                  metrics=[my_iou_metric])
    model_checkpoint = ModelCheckpoint(
        OUTPUT_DIR + "/{}/models/{}_fold_{}_stage1.model".format(
            BASE_NAME, BASE_NAME, CUR_FOLD_INDEX),
        monitor='val_my_iou_metric',
        mode='max',
        save_best_only=True,
        verbose=1)
    reduce_lr = ReduceLROnPlateau(monitor='val_my_iou_metric',
                                  mode='max',
                                  factor=0.5,
                                  patience=6,
                                  min_lr=0.00001,
                                  verbose=1)
    early_stopping = EarlyStopping(monitor='val_my_iou_metric',
                                   mode='max',
                                   patience=20,
                                   verbose=1)
    logger = CSVLogger(OUTPUT_DIR + '/{}/logs/{}_fold_{}_stage1.log'.format(
        BASE_NAME, BASE_NAME, CUR_FOLD_INDEX))
    model.fit_generator(
        TrainGenerator(x_train,
                       y_train,
                       batch_size=int(np.ceil(BATCH_SIZE / (len(AUGS) + 1))),
                       img_size_target=IMG_SIZE_TARGET),
        steps_per_epoch=int(
            np.ceil(len(x_train) / int(np.ceil(BATCH_SIZE /
                                               (len(AUGS) + 1))))),
        epochs=WARM_EPOCHS,
        validation_data=ValidGenerator(x_valid,
                                       y_valid,
                                       batch_size=BATCH_SIZE,
                                       img_size_target=IMG_SIZE_TARGET),
        callbacks=[model_checkpoint],
        shuffle=True)
    segmentation_utils.set_trainable(model)
    model.fit_generator(
        TrainGenerator(x_train,
                       y_train,
                       batch_size=int(np.ceil(BATCH_SIZE / (len(AUGS) + 1))),
                       img_size_target=IMG_SIZE_TARGET),
        steps_per_epoch=int(
            np.ceil(len(x_train) / int(np.ceil(BATCH_SIZE /
                                               (len(AUGS) + 1))))),
        epochs=EPOCHS,
        validation_data=ValidGenerator(x_valid,
                                       y_valid,
                                       batch_size=BATCH_SIZE,
                                       img_size_target=IMG_SIZE_TARGET),
        callbacks=[early_stopping, model_checkpoint, reduce_lr, logger],
        shuffle=True)
Esempio n. 10
0
def model_init(path_1, path_2):
    model = Unet(BACKBONE_NAME, input_shape=(None, None, 1), classes=1, encoder_weights=None)
    model.compile(optimizer=OPTIMIZER, loss=LOSS, metrics=[my_iou])
    history = model.fit_generator(train_generator,
                                steps_per_epoch=TRAIN_STEPS_PER_EPOCH,
                                validation_data=valid_generator,
                                validation_steps=VALID_STEPS_PER_EPOCH,
                                callbacks=callbacks,
                                epochs=50)
Esempio n. 11
0
def get_model(net_name,
              num_class,
              weight_path,
              input_shape=[],
              weighted_loss=False):
    number_class = num_class
    if net_name == 'psp':
        model_name = 'pspnet101_cityscapes'
        input_shape = (473, 473, 3)
        model = pspnet.PSPNet101(nb_classes=num_class,
                                 input_shape=input_shape,
                                 weights=model_name)
        model = model.model
    elif net_name == 'psp_50':
        input_shape = (473, 473, 3)
        model_name = 'pspnet50_ade20k'
        #output_mode='sigmoid'
        model = pspnet.PSPNet50(nb_classes=num_class,
                                input_shape=input_shape,
                                weights=model_name)
        model = model.model

    elif net_name[-1:] == 'c':
        if net_name == 'unet_rgbh_c' or net_name == 'unet_rgbc_c':
            if len(input_shape) < 3:
                input_shape = [512, 512, 4]
        elif net_name == 'unet_rgb_c':
            if len(input_shape) < 3:
                input_shape = [512, 512, 3]
        elif net_name == 'unet_msi_c':
            if len(input_shape) < 3:
                input_shape = [512, 512, 3]
        elif net_name == 'unet_msih_c' or net_name == 'unet_msic_c':
            if len(input_shape) < 3:
                input_shape = [512, 512, 9]
        from keras.layers import Input
        input_tensor = Input(shape=(input_shape[0], input_shape[1],
                                    input_shape[2]))
        model = Unet(input_shape=input_shape,
                     input_tensor=input_tensor,
                     backbone_name=params.BACKBONE,
                     encoder_weights=None,
                     classes=num_class)
    if weighted_loss:
        loss = my_class_weighted_loss
    else:
        loss = params.SEMANTIC_LOSS
    lr = params.LEARN_RATE
    optimizer = Adam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
    if (len(weight_path) > 2):
        model.load_weights(weight_path, True)
        print('use pre-trained weights', weight_path)
    model.compile(optimizer, loss=loss, metrics=[categorical_accuracy])

    model.summary()
    return model, input_shape
Esempio n. 12
0
def train_unet_mobilenetv2(saveModelFn, tensorboardPath):
    # train_imgDir = "/home/xiping/mydisk2/imglib/my_imglib/coco/train2014_person"
    train_imgDir = "/coco/train2014_person"
    (train_data, train_mask_data), (val_data,
                                    val_mask_data) = get_data(train_imgDir,
                                                              maxNum=12000,
                                                              valMaxNum=1000)

    # print(train_data.shape)
    # print(mask_data.shape)
    # print(mask_data[0])
    # cv2.imwrite("xx.bmp", mask_data[1]*255)
    # exit(0)

    print("================================")
    BACKBONE = 'mobilenetv2'
    # define model
    model = Unet(
        BACKBONE,
        classes=1,
        input_shape=(224, 224,
                     3),  # specific inputsize for callback save model
        activation='sigmoid',  #sigmoid,softmax
        encoder_weights='imagenet')

    # Show network structure.
    # model.summary()

    model.compile('Adam', loss='jaccard_loss', metrics=['iou_score'])
    # model.compile('SGD', loss="bce_dice_loss", metrics=["dice_score"])
    # model.compile('SGD', loss="bce_jaccard_loss", metrics=["iou_score"])
    # model.compile('adam', loss="binary_crossentropy", metrics=["iou_score"])

    checkpointer = ModelCheckpoint(
        filepath=
        "weights.epoch={epoch:02d}-val_loss={val_loss:.2f}-val_iou_score={val_iou_score:.2f}.hdf5",
        verbose=1,
        save_best_only=True)

    print("================================")
    print("Start train...")
    # fit model
    # if you use data generator use model.fit_generator(...) instead of model.fit(...)
    # more about `fit_generator` here: https://keras.io/models/sequential/#fit_generator
    model.fit(
        x=train_data,
        y=train_mask_data,
        batch_size=32,
        epochs=200,
        validation_data=(
            val_data,
            val_mask_data),  # callback save middle model need input val data
        callbacks=[TensorBoard(log_dir=tensorboardPath), checkpointer])

    model.save(saveModelFn)
Esempio n. 13
0
def net_predict():
    model = Unet(backbone_name=backbone,
                 encoder_weights=None,
                 input_shape=(256, 256, 1))
    model.load_weights(checkpoint)
    preds_train = model.predict(X_train, verbose=1)
    preds_val = model.predict(X_valid, verbose=1)

    preds_train_t = (preds_train > 0.5).astype(np.uint8)
    preds_val_t = (preds_val > 0.5).astype(np.uint8)
    plot_sample(X_valid, y_valid, preds_val, preds_val_t, ix=None)
Esempio n. 14
0
def run_navigation_trials(args):
    # Load model if specified
    if args.model_path:
        K.set_learning_phase(1)
        model = Unet('resnet18',
                     input_shape=(256, 320, 4),
                     activation='sigmoid',
                     classes=1,
                     encoder_weights=None)
        if args.multi_gpu_model:
            model = multi_gpu_model(model)

        model.load_weights(args.model_path)
    else:
        model = None

    # Create save directories for results and vids
    if args.save_dir:
        os.makedirs(args.save_dir, exist_ok=True)
        if args.save_vid:
            vid_dir = join(args.save_dir, 'vids')
            os.makedirs(vid_dir, exist_ok=True)

    # Read saved experimental setup from disk
    with open(args.experiment_path, 'r') as fp:
        experiment_dict = json.load(fp)

    # Execute experiments for all maps
    result_dict = defaultdict(list)
    for wad_id, experiments in sorted(experiment_dict.items()):
        print('INFO: Testing on map {}'.format(wad_id))
        game = util.setup_game(args.wad_dir,
                               wad_id,
                               visible=args.viz_output,
                               localization_noise=args.localization_noise)

        # Execute individual experiments within map
        for exp_idx, experiment in enumerate(experiments):
            for i in range(args.iterations):
                util.setup_trial(game, experiment['start'])
                vid_path = join(vid_dir, '{}_{}_{}.mp4'.format(
                    wad_id, exp_idx, i)) if args.save_vid else None  # NOQA
                result, full_path = navigate(game, args.max_steps,
                                             experiment['goal'], model,
                                             vid_path)
                result_dict[wad_id].append(result)
                print('INFO: Trial complete {}'.format(result))

        # Save results from experiment
        if args.save_dir:
            result_path = join(args.save_dir, 'results.json')
            with open(result_path, 'w') as fp:
                json.dump(result_dict, fp, sort_keys=True, indent=4)
Esempio n. 15
0
def _load_pretrained_model(checkpoint_path):
    model = Unet(backbone_name='resnet34',
                 input_shape=(IMG_HEIGHT, IMG_WIDTH, 3),
                 classes=CLASS_NUM,
                 activation='softmax',
                 encoder_weights=None,
                 decoder_block_type='transpose')

    checkpoint_path = os.path.join(_get_resources_path(), checkpoint_path)
    model.load_weights(checkpoint_path)

    return model
Esempio n. 16
0
def get_model(backbone):
    if backbone == "resnet34":
        from segmentation_models import Unet
        model = Unet("resnet34",
                     encoder_weights='imagenet',
                     classes=1,
                     activation='sigmoid',
                     encoder_freeze=True)
        prep_fn = preprocessing_fn(custom_fn=sm.get_preprocessing("resnet34"))
        return model, prep_fn

    else:
        raise ValueError("Unknown backbone")
Esempio n. 17
0
    def _do_make_model_task(self,
                            task,
                            model_name,
                            nb_classes,
                            width=299,
                            height=299,
                            backbone="resnet50",
                            activation="softmax",
                            trial=None):
        if task == Task.CLASSIFICATION:
            xception_shape_condition = height >= 71 and width >= 71
            mobilenet_shape_condition = height >= 32 and width >= 32

            if model_name == "xception" and xception_shape_condition:
                model = xception(nb_classes, height, width)
            elif model_name == "mobilenet" and mobilenet_shape_condition:
                model = mobilenet(nb_classes, height, width)
            else:
                model = Model2D(nb_classes, height, width)

        elif task == Task.SEMANTIC_SEGMENTATION:
            if self.config.op_backbone:
                backbone = trial.suggest_categorical('backbone',
                                                     self.config.backbone)
            print('------------------')
            print('Model:', model_name)
            print('Backbone:', backbone)
            print('------------------')

            if model_name == "unet":
                model = Unet(
                    backbone_name=backbone,
                    input_shape=(height, width, 3),
                    classes=nb_classes,
                )
            elif model_name == "deeplab_v3":
                model = Deeplabv3(input_shape=(height, width, 3),
                                  classes=nb_classes,
                                  backbone=backbone,
                                  activation=activation)
            elif model_name == "pspnet":
                model = PSPNet(
                    backbone_name=backbone,
                    input_shape=(height, width, 3),
                    classes=nb_classes,
                )
        else:
            raise NotImplementedError

        return model
Esempio n. 18
0
def define_model(architecture='Unet', BACKBONE='resnet34', input_shape=(None, None, 4),encoder_weights=None):
    print('In define_model function')
    if architecture == 'Unet':
        model = Unet(BACKBONE, classes=3, activation='softmax', encoder_weights=encoder_weights, input_shape=input_shape)
        print('Unet model defined')
    elif architecture == 'FPN':
        model = FPN(BACKBONE, classes=3, activation='softmax', encoder_weights=encoder_weights, input_shape=input_shape)
        print('FPN model defined')
    elif architecture == 'Linknet':
        model = Linknet(BACKBONE, classes=3, activation='softmax', encoder_weights=encoder_weights, input_shape=input_shape)
        print('Linknet model defined')
    elif architecture == 'PSPNet':
        model = PSPNet(BACKBONE, classes=3, activation='softmax', encoder_weights=encoder_weights, input_shape=input_shape)
        print('PSPNet model defined')
    return model
Esempio n. 19
0
def load_model(args):
    # Load trained seed segmentation model to use with active sampling
    K.set_learning_phase(0)
    model = Unet('resnet18',
                 input_shape=(256, 320, 4),
                 activation='sigmoid',
                 classes=1,
                 encoder_weights=None)

    # Convert to multi-GPU model if necessary
    if args.multi_gpu_model:
        model = multi_gpu_model(model)

    model.load_weights(args.model_path)
    return model
Esempio n. 20
0
def buildYoloModel(pathToSave, width, height):
    base_model = Unet('resnet34',
                      input_shape=(width, height, 3),
                      classes=1,
                      activation='sigmoid',
                      freeze_encoder=True)
    # UNet is based on resnet34 in this case, so we could strip those UNet specific layers to get the resnet34 model.
    # The topmost layer of the resnet is "relu1".
    relu1_layer_name = "relu1"
    while base_model.layers[-1].name != relu1_layer_name:
        base_model.layers.pop()
    relu1_layer = base_model.layers[-1]
    # Use this line could create a resnet34 model
    # model = Model(inputs = base_model.input, outputs = relu1_layer)

    X = Flatten(name="yolo1")(relu1_layer.output)
    X = Dense(2048, activation="relu", name="yolo2")(X)
    X = Dense(1024, activation="relu", name="yolo3")(X)
    # We are using 25 x 4 grid, each grid has a box of 4 classes.
    # Each box has a confidence, central point, height and width.
    # So in this case, this layers has 900 nodes.
    confidences = Dense(25 * 4, activation="sigmoid",
                        name="yolo_conf1")(X)  # 0..1
    classes = Dense(25 * 4 * 4, activation="softmax",
                    name="yolo_classes")(X)  # 0..1
    coord = Dense(25 * 4 * 2, activation="tanh", name="yolo_coord1")(X)
    coord = Lambda(lambda x: x / 2, name="yolo_coord2")(coord)  # -0.5..0.5
    sizes = Dense(25 * 4 * 2, activation="relu", name="yolo_sizes")(X)  # >= 0
    pred = Concatenate(name="yolo4")([confidences, classes, coord, sizes])
    model = Model(inputs=base_model.input, outputs=pred)
    model.summary()

    # At last, add the first layer adapter
    input = Input(shape=(height, width, 1))
    # This adapter layer map grayscale to RGB space since all these public models 3 channels only.
    adapter = Conv2D(3, (1, 1),
                     trainable=False,
                     name="AdapterLayer",
                     kernel_initializer="ones")(input)
    out = model(adapter)

    model = Model(input, out, name=base_model.name)
    model.summary()

    model.save(pathToSave)
    return model
Esempio n. 21
0
    def workflow(self):
        # define model
        model = Unet(backbone_name='resnet50', encoder_weights='imagenet')
        #model.compile('Adam', 'binary_crossentropy', ['binary_accuracy'])
        model.load_weights(
            os.path.join(self.cfgs["SAVE_DIR"],
                         "epoch" + str(self.epoch) + ".h5"))
        print("RETORE SUCCESSFULLY!")
        test_images, test_ulabels, test_elabels, test_rlabels, filelist = self.dl.get_test_data(
        )
        # TEST:
        print('start')
        start = time.clock()
        results = model.predict(test_images, batch_size=5, verbose=1)
        stop = time.clock()
        print('程序运行时间:', str(stop - start), ' 秒')
        pmlabels = results[0]

        print(len(results))

        mkdirs(self.cfgs["SAVE_DIR"],
               ['images', 'labels_e', 'labels_r', 'preds', 'preds_threshold'])
        for ii in range(results[0].shape[0]):
            cv2.imwrite(
                os.path.join(self.cfgs["SAVE_DIR"],
                             'images/{}'.format(filelist[ii][0])),
                test_images[ii, :] * 255)

            cv2.imwrite(
                os.path.join(self.cfgs["SAVE_DIR"],
                             'labels_e/{}'.format(filelist[ii][1])),
                test_elabels[ii, :] * 255)
            cv2.imwrite(
                os.path.join(self.cfgs["SAVE_DIR"],
                             'labels_r/{}'.format(filelist[ii][1])),
                test_rlabels[ii, :] * 255)

            cv2.imwrite(
                os.path.join(self.cfgs["SAVE_DIR"],
                             'preds/{}'.format(filelist[ii][1])),
                results[-1][ii, :])
            pred_threshold = threshold(results[-1][ii, :])
            cv2.imwrite(
                os.path.join(self.cfgs["SAVE_DIR"],
                             'preds_threshold/{}'.format(filelist[ii][1])),
                pred_threshold * 255)
Esempio n. 22
0
def convert_model(args):
    if args.out_path is None:
        args.out_path = args.model_path.split('.h5')[0] + '_single.h5'

    # Load multi-GPU model weights
    K.set_learning_phase(1)
    model = Unet('resnet18',
                 input_shape=(256, 320, 4),
                 activation='sigmoid',
                 classes=1,
                 encoder_weights=None)
    model = multi_gpu_model(model)
    model.load_weights(args.model_path)

    # Set weights in single-GPU model and save
    single_model = model.layers[-2]
    single_model.save(args.out_path)
Esempio n. 23
0
def get_model(name, in_shape, n_classes, backend='resnet34'):
    if name == 'fpn':
        return FPN(backbone_name=backend,
                   input_shape=in_shape,
                   classes=n_classes,
                   encoder_weights=None)
    if name == 'unet':
        return Unet(backbone_name=backend,
                    input_shape=in_shape,
                    classes=n_classes,
                    encoder_weights=None)
    if name == 'pspnet':
        return PSPNet50(input_shape=in_shape, n_labels=n_classes)
    if name == 'deeplab':
        return Deeplabv3(input_shape=in_shape, classes=n_classes, weights=None)
    if name == 'biard':
        return biard_net(in_shape=in_shape, n_classes=n_classes)
    raise ValueError("Unknown model name")
Esempio n. 24
0
def get_unet_model(backbone="efficientnetb0",
                   classes=1,
                   activation="sigmoid",
                   encoder_weights="imagenet"):
    """
    Returns Unet model based on input variables

    :param backbone: Unet backbone
    :param classes: Number of classes
    :param activation: Activation function
    :param encoder_weights: Encoder weights
    :return model: Model based on input variables
    """
    model = Unet(backbone,
                 classes=classes,
                 activation=activation,
                 encoder_weights=encoder_weights)

    return model
Esempio n. 25
0
def main():

    train_datagen = ImageDataGenerator(rescale=1 / 255)
    train_batches = train_datagen.flow_from_directory(DATASET_PATH,
                                                      target_size=(1024, 1024),
                                                      shuffle=True,
                                                      class_mode=None,
                                                      batch_size=BATCH_SIZE)

    valid_datagen = ImageDataGenerator(rescale=1 / 255)
    valid_batches = valid_datagen.flow_from_directory(DATASET_PATH,
                                                      target_size=(1024, 1024),
                                                      shuffle=False,
                                                      class_mode=None,
                                                      batch_size=BATCH_SIZE)

    train_crops = crop_generator(train_batches, CROP_LENGTH)  #224
    valid_crops = crop_generator(valid_batches, CROP_LENGTH)

    batch_x_random_crop, batch_y_targeted_crop = next(train_crops)
    valid_x, valid_y = next(valid_crops)

    in_painted_x = in_painting_mask(batch_x_random_crop, batch_y_targeted_crop)
    valid_in_x = in_painting_mask(valid_x, valid_y)

    batch_x_random_crop = rgb2gray(batch_x_random_crop)
    batch_x_random_crop = np.reshape(
        batch_x_random_crop, (batch_x_random_crop.shape[0], 224, 224, 1))

    valid_x = rgb2gray(valid_x)
    valid_x = np.reshape(valid_x, (valid_x.shape[0], 224, 224, 1))

    model = Unet(backbone_name='resnet18',
                 encoder_weights='imagenet',
                 decoder_block_type='transpose')  # build U-Net
    model.compile(optimizer='Adam', loss='mean_squared_error')
    model.summary()
    model.fit(x=in_painted_x,
              y=batch_x_random_crop,
              validation_data=(valid_in_x, valid_x),
              validation_steps=5,
              steps_per_epoch=5,
              epochs=1)
Esempio n. 26
0
def main():
    with open('/home/rbuddhad/NIH-XRAY/test_sml.txt') as f1:
        lines1 = f1.readlines()

    test_datagen = ImageDataGenerator()
    test_batches = test_datagen.flow_from_directory(TEST_DATASET_PATH,
                                                    target_size=(1024, 1024),
                                                    shuffle=True,
                                                    class_mode=None,
                                                    batch_size=BATCH_SIZE)

    test_crops_orig = crop_generator(test_batches, CROP_LENGTH, lines1)  # 224

    model = Unet(backbone_name='resnet18', encoder_weights=None)
    model.load_weights('best_model1.h5')
    model.compile(optimizer='Adam',
                  loss='mean_squared_error',
                  metrics=['mae', 'mean_squared_error'])
    model.summary()

    # callbacks = [EarlyStopping(monitor='val_loss', patience=10),
    #              ModelCheckpoint(filepath='best_model1.h5', monitor='val_loss', save_best_only=True),
    #              TensorBoard(log_dir='./logs', histogram_freq=0, write_graph=True, write_images=True)]
    # model.fit_generator(generator=test_crops_orig,
    #                     steps_per_epoch=100,
    #                     validation_data=valid_crops_orig,
    #                     callbacks=callbacks,
    #                     validation_steps=200,
    #                     epochs=1000,
    #                     shuffle=True)
    # model.predict(generator=test_crops_orig,
    #               steps=2,
    #               verbose=1)

    # model.save('unet2.h5')
    predict = model.predict_generator(generator=test_crops_orig,
                                      steps=1,
                                      verbose=1)
    # predict = model.predict()
    print(predict.shape, 'predict_batch_size')
    for i in range(50):
        plt.imshow(predict[i, :, :, 0], cmap='gray', vmin=0, vmax=1)
        plt.show()
Esempio n. 27
0
def train(x_train: NpArray, x_valid: NpArray, y_train: NpArray, y_valid: NpArray,
          fold: int = -1) -> None:
    preprocessing_fn = get_preprocessing('resnet34')
    x_train = preprocessing_fn(x_train)
    x_valid = preprocessing_fn(x_valid)

    model = Unet(backbone_name='resnet34', encoder_weights='imagenet')
    model.compile('Adam', 'binary_crossentropy', metrics=[my_iou_metric])
    model.summary()

    model_name = make_output_path("models/fold%d.hdf5" % fold)
    model_checkpoint = ModelCheckpoint(model_name, monitor='val_my_iou_metric',
                                       mode='max', save_best_only=True, verbose=1)
    reduce_lr = ReduceLROnPlateau(monitor='val_my_iou_metric', mode='max',
                                  factor=0.5, patience=5, min_lr=3e-6, verbose=1)

    model.fit(x_train, y_train, validation_data=[x_valid, y_valid], epochs=EPOCHS,
              batch_size=BATCH_SIZE, callbacks=[model_checkpoint, reduce_lr],
              verbose=VERBOSE)
def seg_model(preprocess_type, input_size, pretrained_weights, activation,
              loss):
    classes = 4 if activation == 'sigmoid' else 5

    model = Unet(preprocess_type,
                 encoder_weights='imagenet',
                 input_shape=input_size,
                 classes=classes,
                 activation=activation)

    adam = keras.optimizers.Adam(lr=1e-4)

    model.compile(optimizer=adam, loss=loss, metrics=[dice_coef])

    model.summary()

    if (pretrained_weights):
        model.load_weights(pretrained_weights)

    return model
Esempio n. 29
0
def build_model():

    model = Unet(backbone_name='mobilenetv2',
                 input_shape=(224, 224, 3),
                 classes=1,
                 activation='sigmoid',
                 encoder_weights=weight_mobilenetv2_path,
                 encoder_freeze=True,
                 encoder_features='default',
                 decoder_block_type='upsampling',
                 decoder_filters=(256, 128, 64, 32, 16),
                 decoder_use_batchnorm=True)

    #model.compile(loss='binary_crossentropy', optimizer=RMSprop(lr=0.0001), metrics=['acc'])
    #model.compile(loss='binary_crossentropy', optimizer=SGD(lr=1e-4, momentum=0.9), metrics=['acc'])
    model.compile(loss='binary_crossentropy',
                  optimizer=Adam(lr=0.0001),
                  metrics=['acc'])
    model.summary()
    return model
Esempio n. 30
0
def get_compiled_unet(config, label_encoder, loss='categorical_crossentropy', predict_logits=False, large=True):
    '''
    Input: config dict, label_encoder, loss (string or callable), predict_logits (boolean)
    Output: compiled Unet model
    '''
    activation = 'linear' if predict_logits else 'softmax'
    n_bands = len(config['s1_input_bands']) + len(config['s2_input_bands'])
    decoder_filters = (256,128,64,32,16) if large else (64,32,32,32,32)
    model = Unet(
        backbone_name=config['unet_params']['backbone_name'],
        encoder_weights=None,
        activation=activation,
        input_shape=(None, None, n_bands),
        classes=len(label_encoder.classes_),
        decoder_filters=decoder_filters
    )
    model.compile(loss=loss,
        optimizer=Nadam(lr=config['unet_params']['learning_rate']),
        metrics=['accuracy'])
    return model