Exemplo n.º 1
0
def unet_easy():
    # datatset
    dataset = NucleusDataset()
    dataset.load_nucleus(dataset_dir=train_data_dir)
    dataset.prepare()
    nucleus, masks = dataset.load_dataset()
    print(nucleus.shape, masks.shape)

    # model
    model = unet(input_size=(256, 256, 3), pre_weights=None, channels=1)
    # model.summary()
    # freeze_model(model, "input_1")
    optimizer = RMSprop(lr=0.001)
    best_model_file = '{}/best_{}.h5'.format("results", "unet")
    checkpointer = ModelCheckpoint(
        'trained_model_weight/model-dsbowl2018-1.h5',
        verbose=1,
        save_best_only=True)
    model.compile(optimizer=optimizer,
                  loss=make_loss('bce_dice'),
                  metrics=[binary_crossentropy, hard_dice_coef])
    results = model.fit(nucleus,
                        masks,
                        validation_split=0.1,
                        batch_size=16,
                        epochs=50,
                        callbacks=[checkpointer])
Exemplo n.º 2
0
def main():
    # lane_config=Config()
    '''#create model adn define optimizer
    reduced_loss, miou, pred = create_network(image, labels, classes, network=network, image_size = (IMG_SIZE[1],IMG_SIZE[0], for_test=False))
    optimizer = Adam
    optimizer.minimize(reduced_loss, no_grad_set=no_grad_set)'''
    image_size = [1536, 512, 3]

    model = unet(image_size, Config)
    # if load pretrained model
    '''if use_pretrained == True:
        #model = load_model(loadpath)
        print('loaded model from {}'.format(model_path))
    else:
        print('Train from initialized model.')'''

    #traindata = LaneDataset("train.csv")
    #maskdata = LaneDataset('train.csv')

    # training
    data_gen_args = dict(featurewise_center=True,
                         featurewise_std_normalization=True,
                         rotation_range=90,
                         width_shift_range=0.1,
                         height_shift_range=0.1,
                         zoom_range=0.2)
    seed = 1
    # image_datagen = ImageDataGenerator(**data_gen_args)
    #train_datagen = ImageDataGenerator(**data_gen_args)
    # image_generator = image_datagen.flow(imagedata, batch_size=2,seed=seed)
    #train_generator = train_datagen.flow(traindata, batch_size=2)
    # train_generator = zip(image_generator, mask_generator)

    data_dir = './data_list/train.csv'
    train_list = pd.read_csv(data_dir)
    train_reader = train_image_gen(train_list,
                                   Config.BATCH_size,
                                   image_size=[1536, 512],
                                   crop_offset=690)

    # Compile model
    model.compile(
        loss='binary_crossentropy',
        optimizer='adam',
        metrics=[MeanIoU(num_classes=8)],
    )
    model_check_path = './save_checkpoints/'
    checkpoint = ModelCheckpoint(model_check_path,
                                 monitor='val_acc',
                                 verbose=1,
                                 save_weights_only=True,
                                 save_freq='epoch')
    callbacks_lists = [checkpoint]

    # fit the model
    model.fit_generator(train_reader,
                        steps_per_epoch=134 // 2,
                        epochs=Config.EPOCHS,
                        callbacks=callbacks_lists)
    '''for epoch in range(epoches):
def compile_model():
    model = unet()
    model.summary()
    model.compile(
        optimizer=adam,
        loss=binary_crossentropy,
        metrics=[dice_coefficient, surface_distance, robust_hausdorff],
        run_eagerly=True)
    return model
Exemplo n.º 4
0
 def segSingleImage(self, imgPath, weightPath, savePath):
     self.model = unet()
     self.model.load_weights(weightPath)
     img = SingleImageTransform(imgPath)
     # 将数据组装进生成器
     testGene = dataGenerator(img)
     # 进行分割
     # https://keras.io/zh/models/model/#predict_generator
     results = self.model.predict_generator(testGene, 1, verbose=1)
     # print(results.shape)  # (1, 320, 480, 1)
     # print(type(results))  # <class 'numpy.ndarray'>
     # print(results)
     # cv2.imshow("haha",results[0])
     savePredict(savePath, results)
def test():
    UNet = unet(image_size = image_size,num_class=num_class)
    UNet.load()
    for flag in range(500):
        print(str(flag).zfill(5))
        image = Image.open("../marine_data/11/images/"+str(flag+1).zfill(5)+".jpg")
        image,label = preprocess(image)
        plt.subplot(1, 2, 1)
        plt.imshow(np.array(image))
        prediction = UNet.predict(image/255)
        result = np.argmax(prediction[0,:,:,:],-1)
        plt.subplot(1, 2, 2)
        plt.imshow(result)
        plt.pause(0.01)
        plt.clf()
def _eval(dataset):
    images, truths = dataset.eval_data(batch_size=8,image_size=(512, 512, 3),labels=num_class)
    print(truths.shape)
    UNet = unet(image_size = image_size,num_class=num_class)
    UNet.load()
    mean_acc=0
    mean_mIoU = 0
    for i in range(images.shape[0]):
        prediction = UNet.predict(images[i,:,:,:])
        metric = SegmentationMetric(num_class)
        prediction = np.argmax(prediction[0,:,:,:],-1)
        # plt.imshow(prediction)
        # plt.show()
        truth = np.argmax(truths[i,:,:,:],-1)
        metric.addBatch(prediction, truth)
        acc = metric.pixelAccuracy()
        mIoU = metric.meanIntersectionOverUnion()
        mean_acc+=acc
        mean_mIoU+=mIoU
    print(mean_acc/images.shape[0], mean_mIoU/images.shape[0])
Exemplo n.º 7
0
def main():
    start_time = time.time()

    args = parse_args()

    fnames_in = list(
        glob.iglob(os.path.join(args.input, '**', '*_in.*'), recursive=True))
    model = None
    if len(fnames_in) != 0:
        mkdir_s(args.output)
        model = unet()
        model.compile(optimizer=Adam(lr=1e-4),
                      loss='binary_crossentropy',
                      metrics=['accuracy'])
        model.load_weights(args.weights)
    for fname in fnames_in:
        img = cv2.imread(fname, cv2.IMREAD_GRAYSCALE).astype(np.float32)
        img = binarize_img(img, model, args.batchsize)
        cv2.imwrite(
            os.path.join(args.output,
                         os.path.split(fname)[-1].replace('_in', '_out')), img)

    print("finished in {0:.2f} seconds".format(time.time() - start_time))
Exemplo n.º 8
0
 def Load(self):
     self.model = unet()
     self.model.load_weights(
         "C:/Users\Keen\Desktop\Project\Github\Minner\Code\TestDemo\data\weight/unet_ore_epoch4.hdf5"
     )
VAL_BATCH = args.val_batch
lr_init = args.lr_init
lr_decay = args.lr_decay
vgg_path = args.vgg
config = Config()
labels = 2
if model_name == "fcn":
    model = fcn_8s(input_shape=(256, 256, 7),
                   num_classes=labels,
                   lr_init=lr_init,
                   lr_decay=lr_decay,
                   vgg_weight_path=vgg_path)
elif model_name == "unet":
    model = unet(input_shape=(256, 256, 7),
                 num_classes=labels,
                 lr_init=lr_init,
                 lr_decay=lr_decay,
                 vgg_weight_path=vgg_path)
elif model_name == "pspnet":
    model = pspnet50(input_shape=(256, 256, 7),
                     num_classes=labels,
                     lr_init=lr_init,
                     lr_decay=lr_decay)
elif model_name == "deeplabv3p":
    model = Deeplabv3(input_shape=(256, 256, 3), classes=labels)
elif model_name == "deeplabv3":
    model = deeplabv3_plus(input_shape=(256, 256, 7), num_classes=labels)
elif model_name == "maskrcnn":
    modelt = modellib.MaskRCNN(mode='training',
                               config=config,
                               model_dir=MODEL_DIR)
Exemplo n.º 10
0
def train(dataset, model_dir, writer):
    dataloaders = dataset.get_dataloaders()

    # we now set GPU training parameters
    # if the given index is not available then we use index 0
    # also when using multi gpu we should specify index 0
    if dataset.config.gpu_index + 1 > torch.cuda.device_count(
    ) or dataset.config.multi_gpu:
        dataset.config.gpu_index = 0

    logging.info('Using GPU cuda:{}, script PID {}'.format(
        dataset.config.gpu_index, os.getpid()))
    if dataset.config.multi_gpu:
        logging.info('Training on multi-GPU mode with {} devices'.format(
            torch.cuda.device_count()))
    device = torch.device('cuda:{}'.format(dataset.config.gpu_index))

    if dataset.config.model == 'unet':
        model = unet(input_size=dataset.config.num_feats,
                     num_classes=dataset.config.num_classes,
                     kernel_size=dataset.config.kernel_size).to(device)
    else:
        model = deeplab(backbone=dataset.config.backbone,
                        input_size=dataset.config.num_feats,
                        num_classes=dataset.config.num_classes,
                        kernel_size=dataset.config.kernel_size,
                        sigma=dataset.config.sigma).to(device)

    # if use multi_gou then convert the model to DataParallel
    if dataset.config.multi_gpu:
        model = nn.DataParallel(model)

    # create optimizer, loss function, and lr scheduler
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=dataset.config.lr,
                                 weight_decay=1e-4)
    criterion = nn.CrossEntropyLoss()
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
        optimizer,
        mode='min',
        factor=dataset.config.lr_decay,
        patience=dataset.config.lr_patience,
        verbose=True)

    logging.info('Config {}'.format(dataset.config))
    logging.info(
        'TB logs and checkpoint will be saved in {}'.format(model_dir))

    phases = ['train', 'test']

    # create metric trackers: we track lass, class accuracy, and overall accuracy
    trackers = {
        x: {
            'loss':
            metrics.LossMean(),
            'acc':
            metrics.Accuracy(),
            'iou':
            None,
            'cm':
            metrics.ConfusionMatrix(
                num_classes=int(dataset.config.num_classes))
        }
        for x in phases
    }

    # create initial best state object
    best_state = {
        'model': model.state_dict(),
        'optimizer': optimizer.state_dict(),
        'scheduler': scheduler.state_dict() if scheduler else None,
        'train_loss': float('inf'),
        'test_loss': float('inf'),
        'train_acc': 0.0,
        'test_acc': 0.0,
        'train_mIoU': 0.0,
        'test_mIoU': 0.0,
        'convergence_epoch': 0,
        'num_epochs_since_best_acc': 0
    }

    # now we train!
    for epoch in range(dataset.config.max_epochs):
        for phase in phases:
            if phase == 'train':
                model.train()
            else:
                model.eval()

            # reset metrics
            trackers[phase]['loss'].reset()
            trackers[phase]['cm'].reset()

            for step_number, inputs in enumerate(
                    tqdm(dataloaders[phase],
                         desc='[{}/{}] {} '.format(epoch + 1,
                                                   dataset.config.max_epochs,
                                                   phase))):
                data = inputs[0].to(device, dtype=torch.float).permute(0, 2, 1)
                coords = inputs[1].to(device,
                                      dtype=torch.float).permute(0, 2, 1)
                label = inputs[2].to(device, dtype=torch.long)

                # compute gradients on train only
                with torch.set_grad_enabled(phase == 'train'):
                    out = model(data, coords)
                    loss = criterion(out, label)
                    if phase == 'train':
                        optimizer.zero_grad()
                        loss.backward()
                        optimizer.step()

                # now we update metrics
                trackers[phase]['loss'].update(average_loss=loss,
                                               batch_size=data.size(0))
                trackers[phase]['cm'].update(y_true=label, y_logits=out)

            logging.info('Computing accuracy...')

            # compare with my metrics
            epoch_loss = trackers[phase]['loss'].result()
            epoch_overall_acc = trackers[phase]['cm'].result(metric='accuracy')
            epoch_iou = trackers[phase]['cm'].result(metric='iou')
            epoch_miou = epoch_iou.mean()

            logging.info(
                '--------------------------------------------------------------------------------'
            )
            logging.info(
                '[{}/{}] {} Loss: {:.2e}. Overall Acc: {:.4f}. mIoU {:.4f}'.
                format(epoch + 1, dataset.config.max_epochs, phase, epoch_loss,
                       epoch_overall_acc, epoch_miou))
            iou_per_class_str = ' '.join(
                ['{:.4f}'.format(s) for s in epoch_iou])
            logging.info('IoU per class: {}'.format(iou_per_class_str))
            logging.info(
                '--------------------------------------------------------------------------------'
            )

            # we update our learning rate scheduler if loss does not improve
            if phase == 'test' and scheduler:
                scheduler.step(epoch_loss)
                writer.add_scalar('params/lr', optimizer.param_groups[0]['lr'],
                                  epoch + 1)

            writer.add_scalar('loss/epoch_{}'.format(phase), epoch_loss,
                              epoch + 1)
            writer.add_scalar('miou/epoch_{}'.format(phase), epoch_miou,
                              epoch + 1)
            writer.add_scalar('acc_all/epoch_{}'.format(phase),
                              epoch_overall_acc, epoch + 1)

        # after each epoch we update best state values as needed
        # first we save our state when we get better test accuracy
        if best_state['test_mIoU'] > trackers['test']['cm'].result(
                metric='iou').mean():
            best_state['num_epochs_since_best_acc'] += 1
        else:
            logging.info('Got a new best model with mIoU {:.4f}'.format(
                trackers['test']['cm'].result(metric='iou').mean()))
            best_state = {
                'model': model.state_dict(),
                'optimizer': optimizer.state_dict(),
                'scheduler': scheduler.state_dict() if scheduler else None,
                'train_loss': trackers['train']['loss'].result(),
                'test_loss': trackers['test']['loss'].result(),
                'train_acc': trackers['train']['cm'].result(metric='accuracy'),
                'test_acc': trackers['test']['cm'].result(metric='accuracy'),
                'train_mIoU':
                trackers['train']['cm'].result(metric='iou').mean(),
                'test_mIoU':
                trackers['test']['cm'].result(metric='iou').mean(),
                'convergence_epoch': epoch + 1,
                'num_epochs_since_best_acc': 0
            }

            file_name = os.path.join(model_dir, 'best_state.pth')
            torch.save(best_state, file_name)
            logging.info('saved checkpoint in {}'.format(file_name))

        # we check for early stopping when we have trained a min number of epochs
        if epoch >= dataset.config.min_epochs and best_state[
                'num_epochs_since_best_acc'] >= dataset.config.early_stopping:
            logging.info('Accuracy did not improve for {} iterations!'.format(
                dataset.config.early_stopping))
            logging.info('[Early stopping]')
            break

    utl.dump_best_model_metrics_to_tensorboard(writer, phases, best_state)

    logging.info('************************** DONE **************************')
        tf.config.experimental.set_virtual_device_configuration(
       gpus[0],
        [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=500)])
    except RuntimeError as e:
        print(e)
        
cap= cv2.VideoCapture("test.mp4")

#fourcc = cv2.VideoWriter_fourcc(*'DIVX')
#out = cv2.VideoWriter('result1.mp4', fourcc, 30.0, (480,272))

IMG_WIDTH = 480
IMG_HEIGHT = 272
n_classes = 7

model = unet((IMG_HEIGHT, IMG_WIDTH ,3), n_classes)
#model.load_weights("pspunet_weight.h5")

model.load_weights("model_h5/unet/unet_0.35704005_epoch_9.h5")

while True:
    start= time.time()
    try:
        _,frame = cap.read()
        frame = cv2.resize(frame, (IMG_WIDTH, IMG_HEIGHT))
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        frame = frame[tf.newaxis, ...]
        frame = frame/255
    except:
        #out.release()
        cv2.destroyAllWindows()
Exemplo n.º 12
0
label_path = './BUS/data2/GT/Case81.png'

# input size
img_width = 128
img_height = 128
nb_class = 2
channels = 3


# Create model
print('Creating network...\n')
if model_name == "fcn":
    model = fcn_8s(input_shape=(img_height, img_width, channels), num_classes=nb_class,
                   lr_init=1e-3, lr_decay=5e-4, vgg_weight_path=vgg_path)
elif model_name == "unet":
    model = unet(input_shape=(img_height, img_width, channels), num_classes=nb_class,
                 lr_init=1e-3, lr_decay=5e-4, vgg_weight_path=vgg_path)
elif model_name == "fuzzyunet":
    model = fuzzy_unet(input_shape=(img_height, img_width, channels), num_classes=nb_class,
                 lr_init=1e-3, lr_decay=5e-4, vgg_weight_path=vgg_path)
elif model_name == "pspnet":
    model = pspnet50(input_shape=(img_height, img_width, channels), num_classes=nb_class, lr_init=1e-3, lr_decay=5e-4)

# load weights
try:
    model.load_weights(model_name + '_model_weight.h5')
except:
    print("You must train model and get weight before test.")

# Palette, used to show the result
palette = VOCPalette(nb_class=nb_class)
# print the testing image's name
Exemplo n.º 13
0
def set_configuration(n_epoch=500, flag_aug=False):

    IMG_ROWS = GL_get_value("IMG_ROWS")
    IMG_COLS = GL_get_value("IMG_COLS")
    MODEL_ID = GL_get_value("MODEL_ID")

    model = None
    opt = None
    loss = None

    # logs
    log_path = '.\\logs\\' + MODEL_ID + "\\"
    if not os.path.exists(log_path):
        os.makedirs(log_path)

    # set traininig configurations
    conf = {
        "image_shape": (IMG_ROWS, IMG_COLS, 1),
        "out_channel": 1,
        "filter": GL_get_value("n_filter"),
        "depth": GL_get_value("depth"),
        "inc_rate": 2,
        "activation": 'relu',
        "dropout": GL_get_value("flag_Dropout"),
        "batchnorm": GL_get_value("flag_BN"),
        "maxpool": True,
        "upconv": True,
        "residual": True,
        "shuffle": True,
        "augmentation": False,
        "learning_rate": 1e-5,
        "decay": 0.0,
        "epsilon": 1e-8,
        "beta_1": 0.9,
        "beta_2": 0.999,
        "epochs": n_epoch,
        "loss": 'loss_breast_p2p',
        "metric": "mse",
        "optimizer": 'Adam',
        "batch_size": 10
    }
    if GL_get_value("flag_smooth"):
        conf["loss"] = conf["loss"] + '_smooth'
    np.save(log_path + 'info.npy', conf)

    if flag_aug:
        # set augmentation configurations
        conf_a = {
            "rotation_range": 15,
            "shear_range": 10,
            "width_shift_range": 0.33,
            "height_shift_range": 0.33,
            "zoom_range": 0.33,
            "horizontal_flip": True,
            "vertical_flip": True,
            "fill_mode": 'nearest',
            "seed": 314,
            "batch_size": conf["batch_size"]
        }
        np.save(log_path + 'aug.npy', conf_a)

    # build up the model
    model = unet(img_shape=conf["image_shape"],
                 out_ch=conf["out_channel"],
                 start_ch=conf["filter"],
                 depth=conf["depth"],
                 inc_rate=conf["inc_rate"],
                 activation=conf["activation"],
                 dropout=conf["dropout"],
                 batchnorm=conf["batchnorm"],
                 maxpool=conf["maxpool"],
                 upconv=conf["upconv"],
                 residual=conf["residual"])

    # Adam optimizer
    if conf["optimizer"] == 'Adam':
        opt = Adam(lr=conf["learning_rate"],
                   decay=conf["decay"],
                   epsilon=conf["epsilon"],
                   beta_1=conf["beta_1"],
                   beta_2=conf["beta_2"])
    if conf["loss"] == 'mse1e6':
        loss = mean_squared_error_1e6
    if conf["loss"] == 'Gray_White_CSF':
        loss = Gray_White_CSF
    if conf["loss"] == 'Gray_White_CSF_smooth':
        loss = Gray_White_CSF_soomth
    if conf["loss"] == 'loss_breast':
        loss = loss_breast
    if conf["loss"] == 'loss_breast_practical':
        loss = loss_breast_practical
    if conf["loss"] == 'loss_breast_p2p':
        loss = loss_breast_p2p

    # callback
    callbacks_list = set_checkpoint(log_path=log_path,
                                    MODEL_ID=MODEL_ID,
                                    batch_size=conf["batch_size"])

    return model, opt, loss, callbacks_list, conf
Exemplo n.º 14
0
    lake = (argmax_idx == 1)
    img[:, :, 0] = np.where(lake, 255, 0)
    return img

model_name = 'fcn'
img_path = './img/test-512.tif'

# Use only 2 classes.
labels = ['lake','_background_']


# Choose model to train
if model_name == "fcn":
    model = fcn_8s(input_shape=(512, 512, 3), num_classes=len(labels), lr_init=1e-3, lr_decay=5e-4)
elif model_name == "unet":
    model = unet(input_shape=(512, 512, 3), num_classes=len(labels), lr_init=1e-3, lr_decay=5e-4)
elif model_name == "unet_fpa":
    model = unet_fpa(input_shape=(512, 512, 3), num_classes=len(labels), lr_init=1e-3, lr_decay=5e-4)
elif model_name == "unet_gau":
    model = unet_gau(input_shape=(512, 512, 3), num_classes=len(labels), lr_init=1e-3, lr_decay=5e-4)
elif model_name == "unet_fpagau":
    model = unet_fpagau(input_shape=(512, 512, 3), num_classes=len(labels), lr_init=1e-3, lr_decay=5e-4)
elif model_name == "unet_att":
    model = unet_att(input_shape=(512, 512, 3), num_classes=len(labels), lr_init=1e-3, lr_decay=5e-4)
elif model_name == "unet_se":
    model = unet_se(input_shape=(512, 512, 3), num_classes=len(labels), lr_init=1e-3, lr_decay=5e-4)
elif model_name == "unet_se_gau":
    model = unet_se_gau(input_shape=(512, 512, 3), num_classes=len(labels), lr_init=1e-3, lr_decay=5e-4)
elif model_name == "unet_se_gau_fpa":
    model = unet_se_gau_fpa(input_shape=(512, 512, 3), num_classes=len(labels), lr_init=1e-3, lr_decay=5e-4)
elif model_name == "unet_atts":
Exemplo n.º 15
0
vgg_path = args.vgg

# Use only 3 classes.
labels = ['background', 'person', 'car', 'road']

# Choose model to train
if model_name == "fcn":
    model = fcn_8s(input_shape=(256, 512, 3),
                   num_classes=len(labels),
                   lr_init=lr_init,
                   lr_decay=lr_decay,
                   vgg_weight_path=vgg_path)
elif model_name == "unet":
    model = unet(input_shape=(256, 512, 3),
                 num_classes=len(labels),
                 lr_init=lr_init,
                 lr_decay=lr_decay,
                 vgg_weight_path=vgg_path)
elif model_name == "pspnet":
    model = pspnet50(input_shape=(256, 512, 3),
                     num_classes=len(labels),
                     lr_init=lr_init,
                     lr_decay=lr_decay)

# Define callbacks
checkpoint = ModelCheckpoint(filepath=model_name + '_model_weight.h5',
                             monitor='val_dice_coef',
                             save_best_only=True,
                             save_weights_only=True)
train_check = TrainCheck(output_path='./img', model_name=model_name)
#early_stopping = EarlyStopping(monitor='val_dice_coef', patience=10)
Exemplo n.º 16
0
def train_a_unet(data_path):

    slice_x = gbl_get_value("slice_x")
    n_slice_train = gbl_get_value("n_slice_train")
    n_slice_val = gbl_get_value("n_slice_val")
    n_pixel = gbl_get_value('input_size')
    model_id = gbl_get_value("model_id")
    dir_model = gbl_get_value('dir_model')

    epochs = gbl_get_value("n_epoch")
    n_fliter = gbl_get_value("n_filter")
    depth = gbl_get_value("depth")
    batch_size = gbl_get_value("batch_size")
    optimizer = 'RAdam'
    flag_save = True

    # ----------------------------------------------Configurations----------------------------------------------#

    # save logs
    log_path = dir_model.split('model')[0] + 'log/'
    if not os.path.exists(log_path):
        os.makedirs(log_path)
    tensorboard = TensorBoard(log_dir=log_path,
                              batch_size=batch_size,
                              write_graph=True,
                              write_grads=True,
                              write_images=True)

    # set traininig configurations
    conf = {
        "image_shape": (n_pixel, n_pixel, slice_x),
        "out_channel": 1,
        "filter": n_fliter,
        "depth": depth,
        "inc_rate": 2,
        "activation": 'relu',
        "dropout": True,
        "batchnorm": True,
        "maxpool": True,
        "upconv": True,
        "residual": True,
        "shuffle": True,
        "augmentation": True,
        "learning_rate": 1e-4,
        "decay": 0.0,
        "epsilon": 1e-8,
        "beta_1": 0.9,
        "beta_2": 0.999,
        "validation_split": 0.3,
        "batch_size": batch_size,
        "epochs": epochs,
        "loss": "mse1e6",
        "metric": "mse",
        "optimizer": optimizer,
        "model_id": model_id
    }
    np.save(log_path + model_id + '_info.npy', conf)

    # set augmentation configurations
    conf_a = {
        "rotation_range": 15,
        "shear_range": 10,
        "width_shift_range": 0.33,
        "height_shift_range": 0.33,
        "zoom_range": 0.33,
        "horizontal_flip": True,
        "vertical_flip": True,
        "fill_mode": 'nearest',
        "seed": 314,
        "batch_size": conf["batch_size"]
    }
    np.save(log_path + model_id + '__aug.npy', conf_a)

    if flag_save:
        check_path_1 = dir_model + 'psnr_model_' + model_id + '.hdf5'  # _{epoch:03d}_{val_loss:.4f}
        checkpoint1 = ModelCheckpoint(check_path_1,
                                      monitor='val_psnr',
                                      verbose=1,
                                      save_best_only=True,
                                      mode='max')
        check_path_2 = dir_model + 'loss_model_' + model_id + '.hdf5'  # _{epoch:03d}_{val_loss:.4f}
        checkpoint2 = ModelCheckpoint(check_path_2,
                                      monitor='val_loss',
                                      verbose=1,
                                      save_best_only=True,
                                      mode='min')
        callbacks_list = [checkpoint1, checkpoint2, tensorboard]
    else:
        callbacks_list = [tensorboard]

    # ----------------------------------------------Create Model----------------------------------------------#

    # build up the model
    print(conf)

    if gbl_get_value("pretrained_flag") == 0:
        print('-----------------start with new model---------------')
        model = unet(img_shape=conf["image_shape"],
                     out_ch=conf["out_channel"],
                     start_ch=conf["filter"],
                     depth=conf["depth"],
                     inc_rate=conf["inc_rate"],
                     activation=conf["activation"],
                     dropout=conf["dropout"],
                     batchnorm=conf["batchnorm"],
                     maxpool=conf["maxpool"],
                     upconv=conf["upconv"],
                     residual=conf["residual"])
        # model = multi_gpu_model(model, 3)

    else:
        # load model
        print('-----------------fine tune previous models----------------')
        model_path = gbl_get_value("pretrained_path")
        model = load_model(model_path, compile=False)

    # for the perceptual loss model, the loss function is perceptual loss
    loss = perceptual_loss

    # for the mse loss model, the loss function is mse loss
    # loss = mean_squared_error_1e6

    opt = RAdam(learning_rate=conf['learning_rate'],
                total_steps=10000,
                warmup_proportion=0.1,
                min_lr=1e-6)

    # ----------------------------------------------Data Generator----------------------------------------------#

    # train data_generator
    data_generator1 = ImageDataGenerator(
        rescale=1. / 255,
        rotation_range=conf_a["rotation_range"],
        shear_range=conf_a["shear_range"],
        width_shift_range=conf_a["width_shift_range"],
        height_shift_range=conf_a["height_shift_range"],
        zoom_range=conf_a["zoom_range"],
        horizontal_flip=conf_a["horizontal_flip"],
        vertical_flip=conf_a["vertical_flip"],
        fill_mode=conf_a["fill_mode"])
    # preprocessing_function=aug_noise)
    data_generator2 = ImageDataGenerator(
        rescale=1. / 255,
        rotation_range=conf_a["rotation_range"],
        shear_range=conf_a["shear_range"],
        width_shift_range=conf_a["width_shift_range"],
        height_shift_range=conf_a["height_shift_range"],
        zoom_range=conf_a["zoom_range"],
        horizontal_flip=conf_a["horizontal_flip"],
        vertical_flip=conf_a["vertical_flip"],
        fill_mode=conf_a["fill_mode"])
    # preprocessing_function=aug_noise)

    # validation data_generator
    data_generator3 = ImageDataGenerator(
        rescale=1. / 255,
        width_shift_range=conf_a["width_shift_range"],
        height_shift_range=conf_a["height_shift_range"],
        zoom_range=conf_a["zoom_range"],
        horizontal_flip=conf_a["horizontal_flip"],
        vertical_flip=conf_a["vertical_flip"],
        fill_mode=conf_a["fill_mode"])
    # preprocessing_function=aug_noise)
    data_generator4 = ImageDataGenerator(
        rescale=1. / 255,
        width_shift_range=conf_a["width_shift_range"],
        height_shift_range=conf_a["height_shift_range"],
        zoom_range=conf_a["zoom_range"],
        horizontal_flip=conf_a["horizontal_flip"],
        vertical_flip=conf_a["vertical_flip"],
        fill_mode=conf_a["fill_mode"])
    # preprocessing_function=aug_noise)

    aug_dir = ''

    train_x_path = data_path + 'train/train_x/'
    train_y_path = data_path + 'train/train_y/'
    val_x_path = data_path + 'val/val_x/'
    val_y_path = data_path + 'val/val_y'

    # zip files
    data_generator_t = zip(
        data_generator1.flow_from_directory(train_x_path,
                                            target_size=(512, 512),
                                            color_mode='grayscale',
                                            classes=None,
                                            class_mode=None,
                                            batch_size=conf_a["batch_size"],
                                            shuffle=True,
                                            seed=conf_a["seed"],
                                            save_to_dir=aug_dir,
                                            save_prefix='train_x'),
        data_generator2.flow_from_directory(train_y_path,
                                            target_size=(512, 512),
                                            color_mode='rgb',
                                            classes=None,
                                            class_mode=None,
                                            batch_size=conf_a["batch_size"],
                                            shuffle=True,
                                            seed=conf_a["seed"],
                                            save_to_dir=aug_dir,
                                            save_prefix='train_y'))

    data_generator_v = zip(
        data_generator3.flow_from_directory(val_x_path,
                                            target_size=(512, 512),
                                            color_mode='grayscale',
                                            classes=None,
                                            class_mode=None,
                                            batch_size=conf_a["batch_size"],
                                            shuffle=True,
                                            seed=conf_a["seed"],
                                            save_to_dir=aug_dir,
                                            save_prefix='val_x'),
        data_generator4.flow_from_directory(val_y_path,
                                            target_size=(512, 512),
                                            color_mode='rgb',
                                            classes=None,
                                            class_mode=None,
                                            batch_size=conf_a["batch_size"],
                                            shuffle=True,
                                            seed=conf_a["seed"],
                                            save_to_dir=aug_dir,
                                            save_prefix='val_y'))

    # ----------------------------------------------Train Model----------------------------------------------#

    # compile
    model.compile(loss=loss,
                  optimizer=opt,
                  metrics=[
                      content_loss, style_loss, psnr, mean_squared_error_1e6,
                      mean_absolute_error_1e6
                  ])

    # train
    model.fit_generator(
        generator=data_generator_t,
        steps_per_epoch=int(n_slice_train / conf_a["batch_size"]),
        epochs=conf["epochs"],
        callbacks=callbacks_list,
        validation_data=data_generator_v,
        validation_steps=int(n_slice_val / conf_a["batch_size"]))  #

    return model
Exemplo n.º 17
0
from keras.callbacks import ModelCheckpoint
from callbacks import TrainCheck

from model.unet import unet
from model.fcn import fcn_8s
from dataset_parser.generator import data_generator

# Use only 3 classes.
labels = ['background', 'person', 'car', 'road']

TRAIN_BATCH = 4
VAL_BATCH = 1

model = unet(input_shape=(256, 512, 3),
             num_classes=len(labels),
             init_lr=5e-3,
             vgg_weight_path="../vgg16_notop.h5")
#model = fcn_8s(input_shape=(256, 512, 3), num_classes=len(labels), init_lr=1e-3, vgg_weight_path="../vgg16_notop.h5")

model.summary()

checkpoint = ModelCheckpoint(filepath='model_weight.h5',
                             save_best_only=True,
                             save_weights_only=True)
train_check = TrainCheck(output_path='./img')

# training
history = model.fit_generator(data_generator('dataset_parser/data.h5',
                                             TRAIN_BATCH, 'train'),
                              steps_per_epoch=3475 // TRAIN_BATCH,
                              validation_data=data_generator(
def train(md):
    UNet = unet(image_size = image_size,num_class=num_class)
    UNet.batch_generator = md.BatchGenerator(batch_size=4, image_size=(512, 512, 3), labels=num_class)
    UNet.train(epochs=10, steps_per_epoch=500)
    UNet.save()
Exemplo n.º 19
0
                    required=True,
                    help="The image path you want to test")

args = parser.parse_args()
model_name = args.model
img_path = args.img_path

labels = 2
if model_name == "fcn":
    model = fcn_8s(input_shape=(256, 256, 5),
                   num_classes=labels,
                   lr_init=1e-3,
                   lr_decay=5e-4)
elif model_name == "unet":
    model = unet(input_shape=(256, 256, 7),
                 num_classes=labels,
                 lr_init=1e-3,
                 lr_decay=5e-4)
elif model_name == "pspnet":
    model = pspnet50(input_shape=(256, 256, 5),
                     num_classes=labels,
                     lr_init=1e-3,
                     lr_decay=5e-4)
elif model_name == 'deeplabv3p':
    model = Deeplabv3(input_shape=(256, 256, 5), classes=labels)
elif model_name == "deeplabv3":
    model = deeplabv3_plus(input_shape=(256, 256, 5), num_classes=labels)
elif model_name == "segnet":
    model = SegNet(input_shape=(256, 256, 5), classes=labels)
elif model_name == "refinenet":
    model = refinenet(input_shape=(256, 256, 5), num_classes=labels)
model.load_weights("h5File/unet_model_weight.h5")
Exemplo n.º 20
0
def train_a_unet(X, Y):

    slice_x = gbl_get_value("slice_x")
    n_pixel = X.shape[2]
    n_slice = X.shape[0]
    model_id = gbl_get_value("model_id")
    # dir_model = gbl_get_value('dir_model')
    dir_model = './'

    epochs = gbl_get_value("n_epoch")
    n_fliter = gbl_get_value("n_filter")
    depth = gbl_get_value("depth")
    batch_size = gbl_get_value("batch_size")
    optimizer = 'Adam'

    run_aim = gbl_get_value("run_aim")
    flag_save = True
    if run_aim == 'see_aug':
        flag_save = False

    # ----------------------------------------------Configurations----------------------------------------------#

    # logs
    log_path = './logs/' + model_id + "/"
    if not os.path.exists(log_path):
        os.makedirs(log_path)
    tensorboard = TensorBoard(log_dir=log_path,
                              batch_size=batch_size,
                              write_graph=True,
                              write_grads=True,
                              write_images=True)

    # set traininig configurations
    conf = {
        "image_shape": (n_pixel, n_pixel, slice_x),
        "out_channel": 1,
        "filter": n_fliter,
        "depth": depth,
        "inc_rate": 2,
        "activation": 'relu',
        "dropout": True,
        "batchnorm": True,
        "maxpool": True,
        "upconv": True,
        "residual": True,
        "shuffle": True,
        "augmentation": True,
        "learning_rate": 1e-5,
        "decay": 0.0,
        "epsilon": 1e-8,
        "beta_1": 0.9,
        "beta_2": 0.999,
        "validation_split": 0.2632,
        "batch_size": batch_size,
        "epochs": epochs,
        "loss": "mse1e6",
        "metric": "mse",
        "optimizer": optimizer,
        "model_id": model_id
    }
    np.save(log_path + model_id + '_info.npy', conf)

    # set augmentation configurations
    conf_a = {
        "rotation_range": 15,
        "shear_range": 10,
        "width_shift_range": 0.33,
        "height_shift_range": 0.33,
        "zoom_range": 0.33,
        "horizontal_flip": True,
        "vertical_flip": True,
        "fill_mode": 'nearest',
        "seed": 314,
        "batch_size": conf["batch_size"]
    }
    np.save(log_path + model_id + '__aug.npy', conf_a)

    # checkpoint
    #     check_path= './training_models/' + model_type + '_' + str(LOOCV) + '/'
    #     if not os.path.exists(check_path):
    #         os.makedirs(check_path)
    if flag_save:
        check_path = dir_model + 'model_' + model_id + '.hdf5'  # _{epoch:03d}_{val_loss:.4f}
        checkpoint1 = ModelCheckpoint(check_path,
                                      monitor='val_psnr',
                                      verbose=1,
                                      save_best_only=True,
                                      mode='max')
        #     checkpoint2 = ModelCheckpoint(check_path, period=100)
        callbacks_list = [checkpoint1, tensorboard]
    else:
        callbacks_list = [tensorboard]

    # ----------------------------------------------Create Model----------------------------------------------#

    # build up the model
    model = unet(img_shape=conf["image_shape"],
                 out_ch=conf["out_channel"],
                 start_ch=conf["filter"],
                 depth=conf["depth"],
                 inc_rate=conf["inc_rate"],
                 activation=conf["activation"],
                 dropout=conf["dropout"],
                 batchnorm=conf["batchnorm"],
                 maxpool=conf["maxpool"],
                 upconv=conf["upconv"],
                 residual=conf["residual"])

    # Adam optimizer
    # if conf["optimizer"] == 'Adam':
    #     opt = Adam(lr=conf["learning_rate"], decay=conf["decay"],
    #                epsilon=conf["epsilon"], beta_1=conf["beta_1"], beta_2=conf["beta_2"])
    # if conf["loss"] == 'mse1e6':
    #     loss = mean_squared_error_1e6

    loss = mean_squared_error_1e6
    opt = Adam(lr=conf["learning_rate"],
               decay=conf["decay"],
               epsilon=conf["epsilon"],
               beta_1=conf["beta_1"],
               beta_2=conf["beta_2"])

    # load dataset [80, n_pixel, n_pixel, 1]
    # x_val = np.zeros((int(n_slice * 0.3), n_pixel, n_pixel, 1), dtype=np.float32)
    # y_val = np.zeros((int(n_slice * 0.3), n_pixel, n_pixel, 1), dtype=np.float32)
    # x_train = np.zeros((int(n_slice * 0.7) + 1, n_pixel, n_pixel, 1), dtype=np.float32)
    # y_train = np.zeros((int(n_slice * 0.7) + 1, n_pixel, n_pixel, 1), dtype=np.float32)
    #
    # temp_x = X
    # temp_y = Y
    # list_cand = []
    #
    # idx_x = 0
    # while idx_x < int(n_slice * 0.3):
    #     idx_slice = int(np.random.rand() * n_slice)
    #     if not (idx_slice in list_cand):
    #         list_cand.append(idx_slice)
    #         x_val[idx_x, :, :, :] = temp_x[:, :, idx_slice].reshape((1, n_pixel, n_pixel, 1))
    #         y_val[idx_x, :, :, :] = temp_x[:, :, idx_slice].reshape((1, n_pixel, n_pixel, 1))
    #         idx_x += 1
    #
    # idx_x = 0
    # for i in range(n_slice):
    #     if not (i in list_cand):
    #         x_train[idx_x, :, :, :] = temp_x[:, :, i].reshape((1, n_pixel, n_pixel, 1))
    #         y_train[idx_x, :, :, :] = temp_y[:, :, i].reshape((1, n_pixel, n_pixel, 1))
    #         idx_x = idx_x + 1

    X = X.reshape((n_slice, n_pixel, n_pixel, slice_x))
    Y = Y.reshape((n_slice, n_pixel, n_pixel, 1))

    x_train, x_val, y_train, y_val = train_test_split(X,
                                                      Y,
                                                      test_size=0.33,
                                                      random_state=42)
    x_train = x_train / np.amax(x_train)
    y_train = y_train / np.amax(y_train)
    x_val = x_val / np.amax(x_val)
    y_val = y_val / np.amax(y_val)

    # ----------------------------------------------Data Generator----------------------------------------------#

    # train data_generator
    data_generator1 = ImageDataGenerator(
        rotation_range=conf_a["rotation_range"],
        shear_range=conf_a["shear_range"],
        width_shift_range=conf_a["width_shift_range"],
        height_shift_range=conf_a["height_shift_range"],
        zoom_range=conf_a["zoom_range"],
        horizontal_flip=conf_a["horizontal_flip"],
        vertical_flip=conf_a["vertical_flip"],
        fill_mode=conf_a["fill_mode"],
        preprocessing_function=aug_noise)
    data_generator2 = ImageDataGenerator(
        rotation_range=conf_a["rotation_range"],
        shear_range=conf_a["shear_range"],
        width_shift_range=conf_a["width_shift_range"],
        height_shift_range=conf_a["height_shift_range"],
        zoom_range=conf_a["zoom_range"],
        horizontal_flip=conf_a["horizontal_flip"],
        vertical_flip=conf_a["vertical_flip"],
        fill_mode=conf_a["fill_mode"],
        preprocessing_function=aug_noise)

    # validation data_generator
    data_generator3 = ImageDataGenerator(
        width_shift_range=conf_a["width_shift_range"],
        height_shift_range=conf_a["height_shift_range"],
        zoom_range=conf_a["zoom_range"],
        horizontal_flip=conf_a["horizontal_flip"],
        vertical_flip=conf_a["vertical_flip"],
        fill_mode=conf_a["fill_mode"],
        preprocessing_function=aug_noise)
    data_generator4 = ImageDataGenerator(
        width_shift_range=conf_a["width_shift_range"],
        height_shift_range=conf_a["height_shift_range"],
        zoom_range=conf_a["zoom_range"],
        horizontal_flip=conf_a["horizontal_flip"],
        vertical_flip=conf_a["vertical_flip"],
        fill_mode=conf_a["fill_mode"],
        preprocessing_function=aug_noise)

    # set generator
    # data_generator1.fit(x_train, seed=conf_a["seed"])
    # data_generator2.fit(y_train, seed=conf_a["seed"])
    # data_generator3.fit(x_val, seed=conf_a["seed"])
    # data_generator4.fit(y_val, seed=conf_a["seed"])

    if run_aim == "see_aug":
        # aug dir
        aug_dir = './aug_files/' + model_id + '/'
        if not os.path.exists(aug_dir):
            os.makedirs(aug_dir)
    else:
        aug_dir = ''

    # # save files
    # data_generator1.flow(x_train, seed=conf_a["seed"], save_to_dir=aug_dir, save_prefix='train_x')
    # data_generator2.flow(y_train, seed=conf_a["seed"], save_to_dir=aug_dir, save_prefix='train_y')
    # data_generator3.flow(x_val, seed=conf_a["seed"], save_to_dir=aug_dir, save_prefix='val_x')
    # data_generator4.flow(y_val, seed=conf_a["seed"], save_to_dir=aug_dir, save_prefix='val_y')

    # zip files
    data_generator_t = zip(
        data_generator1.flow(x=x_train,
                             y=None,
                             batch_size=conf_a["batch_size"],
                             seed=conf_a["seed"],
                             save_to_dir=aug_dir,
                             save_prefix='train_x'),
        data_generator2.flow(x=y_train,
                             y=None,
                             batch_size=conf_a["batch_size"],
                             seed=conf_a["seed"],
                             save_to_dir=aug_dir,
                             save_prefix='train_y'))
    data_generator_v = zip(
        data_generator3.flow(x=x_val,
                             y=None,
                             batch_size=conf_a["batch_size"],
                             seed=conf_a["seed"],
                             save_to_dir=aug_dir,
                             save_prefix='val_x'),
        data_generator4.flow(x=y_val,
                             y=None,
                             batch_size=conf_a["batch_size"],
                             seed=conf_a["seed"],
                             save_to_dir=aug_dir,
                             save_prefix='val_y'))

    # ----------------------------------------------Train Model----------------------------------------------#

    # compile
    model.compile(loss=loss,
                  optimizer=opt,
                  metrics=[mean_squared_error_1e6, psnr])

    # train
    model.fit_generator(
        generator=data_generator_t,
        steps_per_epoch=int(int(n_slice * 0.7) / conf_a["batch_size"]),  #
        epochs=conf["epochs"],
        callbacks=callbacks_list,
        validation_data=data_generator_v,
        validation_steps=int(int(n_slice * 0.3) / conf_a["batch_size"]))  #

    return model
Exemplo n.º 21
0
def main():
    start_time = time.time()
    args = parse_args()
    np.random.seed()

    # Creating data for training, validation and testing.
    fnames_in = [
        os.path.join(args.input, 'in',
                     str(i) + '_in.png')
        for i in range(len(os.listdir(os.path.join(args.input, 'in'))))
    ]
    fnames_gt = [
        os.path.join(args.input, 'gt',
                     str(i) + '_gt.png')
        for i in range(len(os.listdir(os.path.join(args.input, 'gt'))))
    ]
    assert (len(fnames_in) == len(fnames_gt))
    n = len(fnames_in)

    train_start = 0

    train_stop = int(n * (args.train / 100))
    train_in = fnames_in[train_start:train_stop]
    train_gt = fnames_gt[train_start:train_stop]
    train_generator = ParallelDataGenerator(train_in, train_gt, args.batchsize,
                                            args.augmentate)

    validation_start = train_stop
    validation_stop = validation_start + int(n * (args.val / 100))
    validation_in = fnames_in[validation_start:validation_stop]
    validation_gt = fnames_gt[validation_start:validation_stop]
    validation_generator = ParallelDataGenerator(validation_in, validation_gt,
                                                 args.batchsize,
                                                 args.augmentate)

    test_start = validation_stop
    test_stop = n
    test_in = fnames_in[test_start:test_stop]
    test_gt = fnames_gt[test_start:test_stop]
    test_generator = ParallelDataGenerator(test_in, test_gt, args.batchsize,
                                           args.augmentate)

    # Creating model.
    original_model = unet()
    if args.gpus == 1:
        model = original_model
        model.compile(optimizer=Adam(lr=1e-4),
                      loss=dice_coef_loss,
                      metrics=[dice_coef, jacard_coef, 'accuracy'])
    else:
        model = multi_gpu_model(original_model, gpus=args.gpus)
        model.compile(optimizer=Adam(lr=1e-4),
                      loss=dice_coef_loss,
                      metrics=[dice_coef, jacard_coef, 'accuracy'])
    callbacks = create_callbacks(model, original_model, args)

    # Running training, validation and testing.
    if args.extraprocesses == 0:
        model.fit_generator(
            generator=train_generator,
            steps_per_epoch=train_generator.__len__(
            ),  # Compatibility with old Keras versions.
            validation_data=validation_generator,
            validation_steps=validation_generator.__len__(
            ),  # Compatibility with old Keras versions. 
            epochs=args.epochs,
            shuffle=True,
            callbacks=callbacks,
            use_multiprocessing=False,
            workers=0,
            max_queue_size=args.queuesize,
            verbose=1)
        metrics = model.evaluate_generator(generator=test_generator,
                                           use_multiprocessing=False,
                                           workers=0,
                                           max_queue_size=args.queuesize,
                                           verbose=1)
    else:
        model.fit_generator(
            generator=train_generator,
            steps_per_epoch=train_generator.__len__(
            ),  # Compatibility with old Keras versions.
            validation_data=validation_generator,
            validation_steps=validation_generator.__len__(
            ),  # Compatibility with old Keras versions.
            epochs=args.epochs,
            shuffle=True,
            callbacks=callbacks,
            use_multiprocessing=True,
            workers=args.extraprocesses,
            max_queue_size=args.queuesize,
            verbose=1)
        metrics = model.evaluate_generator(generator=test_generator,
                                           use_multiprocessing=True,
                                           workers=args.extraprocesses,
                                           max_queue_size=args.queuesize,
                                           verbose=1)

    print()
    print('total:')
    print('test_loss:       {0:.4f}'.format(metrics[0]))
    print('test_dice_coef:  {0:.4f}'.format(metrics[1]))
    print('test_jacar_coef: {0:.4f}'.format(metrics[2]))
    print('test_accuracy:   {0:.4f}'.format(metrics[3]))

    # Saving model.
    if args.debug != '':
        model.save_weights(args.weights)
    print("finished in {0:.2f} seconds".format(time.time() - start_time))
    # Sometimes script freezes.
    sys.exit(0)