def train_stage2(img_shape,epochs=1,batch_size=1,organ='Mandible'):

    model = unet.model(img_shape)

    #adam = tf.keras.optimizers.Adam(lr=0.0003)
    adam = tf.keras.optimizers.Adam(lr=0.00012, beta_1=0.9, beta_2=0.9, epsilon=1e-08, amsgrad=True)
    model.compile(optimizer=adam, loss=unet.bce_dice_loss, metrics=[unet.dice_loss])



    model.summary()
    DATA_DIR = path_DATA+organ+'/cutted/'

    train_set, test_set, vali_set = split(DATA_DIR)

    save_model_path = path_model+organ+'_step3.hdf5'
    cp = tf.keras.callbacks.ModelCheckpoint(filepath=save_model_path, monitor='val_dice_loss', save_best_only=True,
                                            verbose=1)

    history = model.fit_generator(generator=read_data(DATA_DIR, train_set, batch_size),
                                  steps_per_epoch=int(len(train_set) / batch_size) + 1,
                                  epochs=epochs, validation_data=read_data(DATA_DIR, vali_set, batch_size),
                                  validation_steps=int(len(vali_set) / batch_size) + 1,
                                  callbacks=[cp])
    K.clear_session()
Example #2
0
def verify(model, case_id):
    kid_dataset = KidDataset(case_id,
                             transform=x_transform,
                             target_transform=y_transform)
    dataloaders = DataLoader(kid_dataset)  # batch_size默认为1
    model.eval()
    with torch.no_grad():
        num = 0
        case_dice = 0
        for x, label in dataloaders:
            y = model(x)
            case_dice = case_dice + dice(torch.squeeze(y) > 0.5, label)
            num = num + 1
        return case_dice / num
Example #3
0
def train_model(model, criterion, optimizer, dataload, num_epochs=5):
    for epoch in range(num_epochs):
        print('Epoch {}/{}'.format(epoch, num_epochs - 1))
        print('-' * 10)
        dataset_size = len(dataload.dataset)
        epoch_loss = 0
        step = 0  # minibatch数
        for x, y in dataload:  # 分100次遍历数据集,每次遍历batch_size=4
            optimizer.zero_grad()  # 每次minibatch都要将梯度(dw,db,...)清零
            inputs = x.to(device)
            labels = y.to(device)
            outputs = model(inputs)  # 前向传播
            loss = criterion(outputs, labels)  # 计算损失
            loss.backward()  # 梯度下降,计算出梯度
            optimizer.step()  # 更新参数一次:所有的优化器Optimizer都实现了step()方法来对所有的参数进行更新
            epoch_loss += loss.item()
            step += 1
            print("%d/%d,train_loss:%0.3f" %
                  (step, dataset_size // dataload.batch_size, loss.item()))
            if epoch_loss < 0.001:
                break
        print("epoch %d loss:%0.3f" % (epoch, epoch_loss))
    return model
Example #4
0
    config.BATCH_SIZE,
    data_mean,
    data_std,
    shuffle_on_end=False,
    augment=False,
)

# test_gen = make_gen(test_inputs,
#                      test_truths,
#                      config.BATCH_SIZE,
#                      data_mean,
#                      data_std,
#                      shuffle_on_end=False,
#                      augment=False)

model = unet.model(output_channels=config.OUTPUT_CHANNELS)

model.compile(config.OPTIMIZER, config.LOSS, config.METRICS)

if config.INITIAL_WEIGHTS is not None:
    model.load_weights(config.INITIAL_WEIGHTS)

checkpoint_name = config.MODEL_SAVE + "_epoch_{epoch:02d}" + ".hdf5"

early_stopping = tf.keras.callbacks.EarlyStopping(
    patience=config.STOP_PATIENCE, verbose=1, restore_best_weights=True)
reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(factor=config.LR_SCALE,
                                                 patience=config.LR_PATIENCE,
                                                 verbose=1)
model_checkpoint = tf.keras.callbacks.ModelCheckpoint(checkpoint_name,
                                                      save_weights_only=True,
Example #5
0
def load_data(image_path, mask_path):
    return get_image(image_path), get_image(mask_path, mask=True)


train_ds = tf.data.Dataset.from_tensor_slices((image_list, mask_list))
train_ds = train_ds.shuffle(256)
train_ds = train_ds.map(load_data, num_parallel_calls=AUTOTUNE)
train_ds = train_ds.batch(batch_size)
train_ds = train_ds.repeat()
train_ds = train_ds.prefetch(AUTOTUNE)
print(train_ds)


def dice_loss(pred, actual):
    num = 2 * tf.reduce_sum((pred * actual), axis=-1)
    den = tf.reduce_sum((pred + actual), axis=-1)
    return 1 - (num + 1) / (den + 1)


mirrored_strategy = tf.distribute.MirroredStrategy()
with mirrored_strategy.scope():
    model = unet.model(512, 512, 1)
    model.compile(optimizer = tf.keras.optimizers.Adam(0.0001), 
                  loss = dice_loss, 
                  metrics = ['accuracy'])

history = model.fit(train_ds, epochs=5, steps_per_epoch=len(image_list)//batch_size, workers=16, use_multiprocessing=True)

model.save_weights('weights.h5')
print('Weights saved')
def demo():
    #############################
    # 1. load dataset
    #############################
    image_dir = os.path.join(DATASET_TRAIN_PATH, 'image')
    mask_dir = os.path.join(DATASET_TRAIN_PATH, 'mask')
    image_path_list = [os.path.join(image_dir, v) for v in os.listdir(image_dir)]
    mask_path_list = [os.path.join(mask_dir, v.replace('.jpg', '.png')) for v in os.listdir(image_dir)]
    image_num = len(image_path_list)
    print("Training image num -> ", image_num)
    for image_path, mask_path in list(zip(image_path_list, mask_path_list))[:3]:
        print(image_path, mask_path)
        image = utils.cv_imread(image_path)
        mask = utils.cv_imread(mask_path, 1)
        print(image.shape, mask.shape)
        cv2.imshow('Sample', np.hstack([image, mask]))
        cv2.waitKey(1)
        pass

    batch_num = image_num // BATCH_SIZE
    data_idx_list = list(range(image_num))

    if not os.path.exists(OUT_IMAGE_DIR):
        os.makedirs(OUT_IMAGE_DIR)

    #############################
    # 2. Create Model
    #############################
    sess, tf_x, tf_y, tf_lr, tf_train, tf_logit, tf_predict, tf_cost, tf_optimizer, tf_saver = unet.model(H_IN, W_IN, C_IN, 8)
    global_step = 0

    for epoch in range(MAX_EPOCH):
        np.random.shuffle(data_idx_list)
        for step in range(batch_num):
            idx_list = data_idx_list[step * BATCH_SIZE: (step + 1) * BATCH_SIZE]
            image_batch, mask_batch = utils.get_batch(idx_list, image_path_list, mask_path_list, H_IN, W_IN)
            _, cost = sess.run([tf_optimizer, tf_cost],
                               feed_dict={tf_x: image_batch, tf_y: mask_batch, tf_train: True, tf_lr: LEARNING_RATE})
            if global_step % 10 == 0:
                print("Epoch %d: Step %d -> loss: %.5g" % (epoch, step, cost))
                predict_mask = sess.run(tf_predict, feed_dict={tf_x: image_batch, tf_train: False})
                compare_result_image = utils.create_compare_image(image_batch[0], mask_batch[0], predict_mask[0])
                cv2.imshow('Sample', compare_result_image)
                cv2.waitKey(1)
                cv2.imwrite(os.path.join(OUT_IMAGE_DIR, "train_step_%d.png" % global_step), compare_result_image)

            global_step += 1
    print("Finished. Save model to %s ..." % MODEL_SAVE_PATH)
    tf_saver.save(sess, MODEL_SAVE_PATH)
Example #7
0
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

pascal_val = PascalVOCSegmentation(source='val')
data = DataLoader(pascal_val, batch_size=4, shuffle=False, num_workers=0)

model.eval()
count = 0
for images, labels in data:
    inputs = images.permute(0, 3, 1, 2)

    inputs = inputs.float()
    labels = labels.float()

    # Predict
    outputs = model(inputs)

    # Convert to numpy
    images_np = images.numpy()
    labels_np = labels.numpy()
    outputs_np = outputs.data.numpy()

    for sample in range(0, outputs_np.shape[0]):
        inputs_1 = images_np[sample, :, :, :3]
        inputs_2 = images_np[sample, :, :, 3]
        inputs_2 = inputs_2[:, :, np.newaxis]
        output = outputs_np[sample, 0]
        label = labels_np[sample]

        output = output[:, :, np.newaxis]
Example #8
0
    def train_model(model, criterion, optimizer, scheduler, num_epochs=50):
        since = time.time()

        best_model_wts = copy.deepcopy(model.state_dict())
        best_acc = 0.0
        best_loss = 100.0

        for epoch in range(num_epochs):
            print('Epoch {}/{}'.format(epoch, num_epochs - 1))

            for phase in ['train', 'val']:
                if phase == 'train':
                    model.train()
                else:
                    if (epoch % 5 != 0):
                        break
                    model.eval()

                running_loss = 0.0
                running_corrects = 0

                count = 0
                for inputs, labels in dataloaders[phase]:
                    count += 1

                    inputs = inputs.permute(0, 3, 1, 2)
                    labels = labels.permute(0, 3, 1, 2)

                    inputs = inputs.to(device).float()
                    labels = labels.to(device).float()

                    optimizer.zero_grad()

                    with torch.set_grad_enabled(phase == 'train'):
                        outputs = model(inputs)

                        # Visualize
                        outputs_t = outputs.permute(0, 2, 3, 1)
                        outputs_np = outputs_t.cpu().detach().numpy()
                        out_0 = outputs_np[0]
                        cv2.imshow('out_0', out_0)

                        #inputs_t = inputs.permute(0, 2, 3, 1)
                        #inputs_np = inputs_t.cpu().detach().numpy()
                        #in_0 = inputs_np[0]
                        #cv2.imshow('in_0', in_0)

                        labels_t = labels.permute(0, 2, 3, 1)
                        labels_np = labels_t.cpu().detach().numpy()
                        lab_0 = labels_np[0]

                        cv2.imshow('lab_0', lab_0)
                        cv2.waitKey(2)

                        loss = criterion(outputs, labels)

                        if phase == 'train':
                            loss.backward()
                            optimizer.step()

                        _, preds = torch.max(outputs, 1)
                        _, labels_pred = torch.max(labels, 1)

                    running_loss += loss.item() * inputs.size(0)
                    epoch_loss = running_loss / count

                    print('{} batch: {} Loss: {:.4f} Acc: {:.4f}.............'.
                          format(phase, count, epoch_loss, 0),
                          end='\r')

                if phase == 'train':
                    scheduler.step()

                if phase == 'val' and epoch_loss < best_loss:
                    print('')
                    print('saving checkpoint')
                    best_loss = epoch_loss
                    best_model_wts = copy.deepcopy(model.state_dict())
                    torch.save(model.state_dict(),
                               './checkpoints/unet_8_16_n.pt')
Example #9
0
                                 batch_size,
                                 epochs,
                                 target_size,
                                 train=False,
                                 classes=CLASSES,
                                 shard_size=hvd.size(),
                                 shard_rank=hvd.rank())
#----------CREATE MODEL AND BEGIN TRAINING----------#
time = datetime.now().strftime("%d_%m_%Y_%H_%M_%S")
# Depending on model chosen, get the right model and create the optimizer for it
if model_name == 'unet':
    if args.learning_rate:
        learning_rate = args.learning_rate * hvd.size()
    else:
        learning_rate = 0.01 * hvd.size()
    model = unet.model(input_size=target_size, num_classes=CLASSES)
    optimizer = keras.optimizers.Adam(learning_rate=learning_rate)
if model_name == 'separable_unet':
    if args.learning_rate:
        learning_rate = args.learning_rate * hvd.size()
    else:
        learning_rate = 0.003 * hvd.size()
    model = separable_unet.model(input_size=target_size, num_classes=CLASSES)
    optimizer = keras.optimizers.SGD(learning_rate=learning_rate,
                                     momentum=0.9,
                                     nesterov=True)
elif model_name == 'bayes_segnet':
    if args.learning_rate:
        learning_rate = args.learning_rate * hvd.size()
    else:
        learning_rate = 0.001 * hvd.size()
Example #10
0
            net: original model
        """
        super().__init__()
        modules = list(net.children())[:-1]
        n_feats = list(net.children())[-1].in_features
        # add custom head
        modules += [
            nn.Sequential(Flatten(), nn.BatchNorm1d(81536), nn.Dropout(p),
                          nn.Linear(81536, n_feats),
                          nn.Linear(n_feats, num_classes), nn.Sigmoid())
        ]
        self.net = nn.Sequential(*modules)

    def forward(self, x):
        logits = self.net(x)
        return logits


if __name__ == '__main__':
    x = np.zeros((3, 3, 384, 576), dtype="f")
    x = torch.from_numpy(x)
    print("input shape:", x.size())
    model = get_model(model_type='FastFCN',
                      encoder='resnet50',
                      encoder_weights='imagenet',
                      activation=None,
                      n_classes=4,
                      task='segmentation')

    y = model(x)
    print("out shape:", y.size())