Esempio n. 1
0
def main():
    # 1936 x 1216
    input_size = (320, 480, 3)
    classes = 20
    train_dataset_x = '../seg_train_images/seg_train_images'
    train_dataset_y = '../seg_train_annotations/seg_train_annotations'
    test_size = 0.2
    batch_size = 8

    datasets_paths = get_data_paths(train_dataset_x, train_dataset_y)
    train_data, test_data = train_test_split(datasets_paths, test_size=test_size)
    net = Unet(input_size, classes)
    #net = SegNet(input_size, classes)
    net.summary()
    train_gen =  DataGenerator(train_data, input_size, classes, batch_size)
    val_gen =  DataGenerator(test_data, input_size, classes, batch_size)

    callbacks = [
        ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1),
        EarlyStopping(monitor='val_loss', min_delta=0, patience=9, verbose=1),
        ModelCheckpoint('checkpoint/ep{epoch:03d}-loss{loss:.5f}-val_loss{val_loss:.5f}.h5', monitor='val_loss', verbose=1, save_best_only=True, mode='min')
    ]

    net.compile(optimizer=Adam(1e-3),loss=categorical_crossentropy)
    history = net.fit_generator(
        train_gen, 
        steps_per_epoch=train_gen.num_batches_per_epoch,
        validation_data=val_gen,
        validation_steps=val_gen.num_batches_per_epoch,
        initial_epoch=0,
        epochs=50,
        callbacks=callbacks
    )
    net.save_weights('checkpoint/first_stage.h5')

    train_data, test_data = train_test_split(datasets_paths, test_size=test_size)
    train_gen =  DataGenerator(train_data, input_size, classes, batch_size)
    val_gen =  DataGenerator(test_data, input_size, classes, batch_size)

    net.compile(optimizer=Adam(1e-4),loss=categorical_crossentropy)
    history = net.fit_generator(
        train_gen, 
        steps_per_epoch=train_gen.num_batches_per_epoch,
        validation_data=val_gen,
        validation_steps=val_gen.num_batches_per_epoch,
        initial_epoch=50,
        epochs=100,
        callbacks=callbacks
    )
    net.save_weights('checkpoint/final_stage.h5')
Esempio n. 2
0
def main(args):
    device = 'cuda' if torch.cuda.is_available() else 'cpu'

    ### Hyperparameters Setting ###
    epochs = args.epochs
    batch_size = args.batch_size
    num_workers = args.num_workers
    valid_ratio = args.valid_ratio
    threshold = args.threshold
    separable = args.separable
    down_method = args.down_method
    up_method = args.up_method
    ### DataLoader ###
    dataset = DataSetWrapper(batch_size, num_workers, valid_ratio)
    train_dl, valid_dl = dataset.get_data_loaders(train=True)

    ### Model: U-Net ###
    model = Unet(input_dim=1,
                 separable=separable,
                 down_method=down_method,
                 up_method=up_method)
    model.summary()
    model = nn.DataParallel(model).to(device)

    optimizer = optim.Adam(model.parameters(),
                           lr=args.lr,
                           weight_decay=args.weight_decay)
    scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer,
                                                     T_max=len(train_dl),
                                                     eta_min=0,
                                                     last_epoch=-1)
    criterion = nn.BCEWithLogitsLoss()
    train_losses = []
    val_losses = []

    ###Train & Validation start ###
    mIOU_list = []
    best_mIOU = 0.
    step = 0

    for epoch in range(epochs):

        ### train ###
        pbar = tqdm(train_dl)
        model.train()
        losses = []

        for (img, label) in pbar:
            optimizer.zero_grad()
            img, label = img.to(device), label.to(device)
            pred = model(img)
            # pred = Padding()(pred, label.size(3))
            loss = criterion(pred, label)
            loss.backward()
            optimizer.step()
            losses.append(loss.item())
            pbar.set_description(
                f'E: {epoch + 1} | L: {loss.item():.4f} | lr: {scheduler.get_lr()[0]:.7f}'
            )
        scheduler.step()
        if (epoch + 1) % 10:
            losses = sum(losses) / len(losses)
            train_losses.append(losses)

        ### validation ###
        with torch.no_grad():
            model.eval()
            mIOU = []
            losses = []
            pbar = tqdm(valid_dl)
            for (img, label) in pbar:
                img, label = img.to(device), label.to(device)
                pred = model(img)

                loss = criterion(pred, label)

                mIOU.append(get_IOU(pred, label, threshold=threshold))
                losses.append(loss.item())

            mIOU = sum(mIOU) / len(mIOU)
            mIOU_list.append(mIOU)
            if (epoch + 1) % 10:
                losses = sum(losses) / len(losses)
                val_losses.append(losses)

            print(
                f'VL: {loss.item():.4f} | mIOU: {100 * mIOU:.1f}% | best mIOU: {100 * best_mIOU:.1f}'
            )

        ### Early Stopping ###
        if mIOU > best_mIOU:
            best_mIOU = mIOU
            save_state = {
                'epoch': epoch,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'scheduler_state_dict': scheduler.state_dict(),
                'train_losses': train_losses,
                'val_losses': val_losses,
                'best_mIOU': best_mIOU
            }
            torch.save(
                save_state,
                f'./checkpoint/{down_method}_{up_method}_{separable}.ckpt')
            step = 0
        else:
            step += 1
            if step > args.patience:
                print('Early stopped...')
                return
Esempio n. 3
0
    valid_data_gen_args = dict(rescale=1. / 255, )
    data_gen_args = dict(rescale=1. / 255,
                         rotation_range=1,
                         width_shift_range=0.05,
                         height_shift_range=0.2,
                         shear_range=0.05,
                         zoom_range=0.05,
                         horizontal_flip=True,
                         fill_mode='constant',
                         cval=0)

    input_size = (512, 512, 1)
    batch = 8
    model = Unet(input_size=input_size)
    model.summary()
    target_size = input_size[:2]  # 前两项
    train_generator = load_data.trainGenerator(batch,
                                               train_dir,
                                               data_gen_args,
                                               image_folder='image',
                                               mask_folder='label',
                                               target_size=target_size)
    valid_generator = load_data.trainGenerator(1,
                                               validation_dir,
                                               valid_data_gen_args,
                                               image_folder='image',
                                               mask_folder='label',
                                               target_size=target_size)
    history = LossHistory()
    model.fit_generator(
Esempio n. 4
0
xval, yval = np.array(valid_images,
                      dtype=np.float32), np.array(valid_labels,
                                                  dtype=np.float32)
xtrain, ytrain = np.array(train_images,
                          dtype=np.float32), np.array(train_labels,
                                                      dtype=np.float32)
print(str(xtrain.shape), 'images and', str(ytrain.shape), 'masks')

unet = Unet(DESIRED_SIZE,
            DESIRED_SIZE,
            nclasses=NUM_CLASSES,
            filters=UNET_FILTERS)
print(unet.output_shape)

unet.summary()
# Setting BCE to False to use focal loss
BCE = False
print(xtrain.shape, train_images.shape)
if BCE:
    unet.compile(optimizer='adam',
                 loss="sparse_categorical_crossentropy",
                 metrics=['accuracy'])
    history = unet.fit(train_images,
                       train_labels,
                       batch_size=BATCH_SIZE,
                       epochs=EPOCHS,
                       validation_data=(valid_images, valid_labels))
else:
    unet.compile(optimizer='adam',
                 loss=SparseCategoricalFocalLoss(gamma=2),