예제 #1
0
def train(run, model_name, data_path, epochs, batch_size, mlflow_custom_log,
          log_as_onnx):
    x_train, y_train, x_test, y_test = utils.get_train_data(data_path)
    model = build_model()

    model.compile(optimizer="rmsprop",
                  loss="categorical_crossentropy",
                  metrics=["accuracy"])
    model.summary()
    model.fit(x_train,
              y_train,
              epochs=epochs,
              batch_size=batch_size,
              verbose=0)

    test_loss, test_acc = model.evaluate(x_test, y_test)
    print("test_acc:", test_acc)
    print("test_loss:", test_loss)

    if mlflow_custom_log:
        mlflow.log_param("epochs", epochs)
        mlflow.log_param("batch_size", batch_size)

        mlflow.log_metric("test_acc", test_acc)
        mlflow.log_metric("test_loss", test_loss)
        mlflow.keras.log_model(model,
                               "keras-model",
                               registered_model_name=model_name)

        # write model summary
        summary = []
        model.summary(print_fn=summary.append)
        summary = "\n".join(summary)
        with open("model_summary.txt", "w") as f:
            f.write(summary)
        mlflow.log_artifact("model_summary.txt")

        # Save as TensorFlow SavedModel format
        path = "tensorflow-model"
        tf.keras.models.save_model(model,
                                   path,
                                   overwrite=True,
                                   include_optimizer=True)
        mlflow.log_artifact(path)
    else:
        utils.register_model(run, model_name)

    # write model as yaml file
    with open("model.yaml", "w") as f:
        f.write(model.to_yaml())
    mlflow.log_artifact("model.yaml")

    # MLflow - log onnx model
    if log_as_onnx:
        import onnx_utils
        mname = f"{model_name}_onnx" if model_name else None
        onnx_utils.log_model(model, "onnx-model", mname)

    predictions = model.predict_classes(x_test)
    print("predictions:", predictions)
예제 #2
0
def train(run, model_name, epochs, batch_size, mlflow_custom_log, log_as_onnx):
    x_train, y_train, x_test, y_test = utils.get_train_data()
    model = build_model()

    model.compile(
        optimizer='rmsprop',
        loss='categorical_crossentropy',
        metrics=['accuracy'])
    model.summary()
    model.fit(x_train, y_train, epochs=epochs, batch_size=batch_size, verbose=0)

    test_loss, test_acc = model.evaluate(x_test, y_test)
    print("test_acc:", test_acc)
    print("test_loss:", test_loss)

    if mlflow_custom_log:
        mlflow.log_param("epochs", epochs)
        mlflow.log_param("batch_size", batch_size)

        mlflow.log_metric("test_acc", test_acc)
        mlflow.log_metric("test_loss", test_loss)
        mlflow.keras.log_model(model, "keras-model", registered_model_name=model_name)

        # write model summary
        summary = []
        model.summary(print_fn=summary.append)
        summary = '\n'.join(summary)
        with open("model_summary.txt", "w") as f:
            f.write(summary)
        mlflow.log_artifact("model_summary.txt")
    else:
        utils.register_model(run, model_name)

    # write model as yaml file
    with open("model.yaml", "w") as f:
        f.write(model.to_yaml())
    mlflow.log_artifact("model.yaml")

    # MLflow - log onnx model
    if log_as_onnx:
        import onnx_utils
        mname = f"{model_name}_onnx" if model_name else None
        onnx_utils.log_model(model, "onnx-model", mname)

    predictions = model.predict_classes(x_test)
    print("predictions:", predictions)
def main():
    batch_size = 2
    train_dir = 'D:/DATA/JHUBrain/Train/'
    val_dir = 'D:/DATA/JHUBrain/Val/'
    save_dir = 'ViTVNet_reg0.02_mse_diff/'
    lr = 0.0001
    epoch_start = 0
    max_epoch = 500
    cont_training = False
    config_vit = CONFIGS_ViT_seg['ViT-V-Net']
    reg_model = utils.register_model((160, 192, 224), 'nearest')
    reg_model.cuda()
    model = models.ViTVNet(config_vit, img_size=(160, 192, 224))
    if cont_training:
        epoch_start = 335
        model_dir = 'experiments/'+save_dir
        updated_lr = round(lr * np.power(1 - (epoch_start) / max_epoch,0.9),8)
        best_model = torch.load(model_dir + natsorted(os.listdir(model_dir))[0])['state_dict']
        model.load_state_dict(best_model)
    else:
        updated_lr = lr
    model.cuda()
    train_composed = transforms.Compose([trans.RandomFlip(0),
                                         trans.NumpyType((np.float32, np.float32)),
                                         ])

    val_composed = transforms.Compose([trans.Seg_norm(), #rearrange segmentation label to 1 to 46
                                       trans.NumpyType((np.float32, np.int16)),
                                        ])

    train_set = datasets.JHUBrainDataset(glob.glob(train_dir + '*.pkl'), transforms=train_composed)
    val_set = datasets.JHUBrainInferDataset(glob.glob(val_dir + '*.pkl'), transforms=val_composed)
    train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True, num_workers=4, pin_memory=True)
    val_loader = DataLoader(val_set, batch_size=1, shuffle=False, num_workers=4, pin_memory=True, drop_last=True)

    optimizer = optim.Adam(model.parameters(), lr=updated_lr, weight_decay=0, amsgrad=True)
    criterion = nn.MSELoss()
    criterions = [criterion]
    weights = [1]
    # prepare deformation loss
    criterions += [losses.Grad3d(penalty='l2')]
    weights += [0.02]
    best_mse = 0
    writer = SummaryWriter(log_dir='ViTVNet_log')
    for epoch in range(epoch_start, max_epoch):
        print('Training Starts')
        '''
        Training
        '''
        loss_all = AverageMeter()
        idx = 0
        for data in train_loader:
            idx += 1
            model.train()
            adjust_learning_rate(optimizer, epoch, max_epoch, lr)
            data = [t.cuda() for t in data]
            x = data[0]
            y = data[1]
            x_in = torch.cat((x,y), dim=1)
            output = model(x_in)
            loss = 0
            loss_vals = []
            for n, loss_function in enumerate(criterions):
                curr_loss = loss_function(output[n], y) * weights[n]
                loss_vals.append(curr_loss)
                loss += curr_loss
            loss_all.update(loss.item(), y.numel())
            # compute gradient and do SGD step
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            del x_in
            del output
            # flip fixed and moving images
            loss = 0
            x_in = torch.cat((y, x), dim=1)
            output = model(x_in)
            for n, loss_function in enumerate(criterions):
                curr_loss = loss_function(output[n], x) * weights[n]
                loss_vals[n] += curr_loss
                loss += curr_loss
            loss_all.update(loss.item(), y.numel())
            # compute gradient and do SGD step
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            print('Iter {} of {} loss {:.4f}, Img Sim: {:.6f}, Reg: {:.6f}'.format(idx, len(train_loader), loss.item(), loss_vals[0].item()/2, loss_vals[1].item()/2))

        writer.add_scalar('Loss/train', loss_all.avg, epoch)
        print('Epoch {} loss {:.4f}'.format(epoch, loss_all.avg))
        '''
        Validation
        '''
        eval_dsc = AverageMeter()
        with torch.no_grad():
            for data in val_loader:
                model.eval()
                data = [t.cuda() for t in data]
                x = data[0]
                y = data[1]
                x_seg = data[2]
                y_seg = data[3]
                # x = x.squeeze(0).permute(1, 0, 2, 3)
                # y = y.squeeze(0).permute(1, 0, 2, 3)
                x_in = torch.cat((x, y), dim=1)
                output = model(x_in)
                def_out = reg_model([x_seg.cuda().float(), output[1].cuda()])
                dsc = utils.dice_val(def_out.long(), y_seg.long(), 46)
                eval_dsc.update(dsc.item(), x.size(0))
                print(eval_dsc.avg)
        best_mse = max(eval_dsc.avg, best_mse)
        save_checkpoint({
            'epoch': epoch + 1,
            'state_dict': model.state_dict(),
            'best_mse': best_mse,
            'optimizer': optimizer.state_dict(),
        }, save_dir='experiments/'+save_dir, filename='dsc{:.3f}.pth.tar'.format(eval_dsc.avg))
        writer.add_scalar('MSE/validate', eval_dsc.avg, epoch)
        plt.switch_backend('agg')
        pred_fig = comput_fig(def_out)
        x_fig = comput_fig(x_seg)
        tar_fig = comput_fig(y_seg)
        writer.add_figure('input', x_fig, epoch)
        plt.close(x_fig)
        writer.add_figure('ground truth', tar_fig, epoch)
        plt.close(tar_fig)
        writer.add_figure('prediction', pred_fig, epoch)
        plt.close(pred_fig)
        loss_all.reset()
    writer.close()
예제 #4
0
def train(run, model_name, data_path, epochs, batch_size, mlflow_custom_log,
          log_as_onnx, log_as_tensorflow_lite, log_as_tensorflow_js):
    print("mlflow_custom_log:", mlflow_custom_log)
    x_train, _, y_train, _ = utils.build_data(data_path)

    ncols = x_train.shape[1]

    def baseline_model():
        model = Sequential()
        model.add(
            Dense(ncols,
                  input_dim=ncols,
                  kernel_initializer='normal',
                  activation='relu'))
        model.add(Dense(1, kernel_initializer='normal'))
        model.compile(loss='mean_squared_error', optimizer='adam')
        return model

    model = baseline_model()

    if mlflow_custom_log:
        print("Logging with mlflow.log")
        mlflow.log_param("epochs", epochs)
        mlflow.log_param("batch_size", batch_size)
        mlflow.keras.log_model(model,
                               "tensorflow-model",
                               registered_model_name=model_name)
    else:
        utils.register_model(run, model_name)

    # MLflow - log as ONNX model
    if log_as_onnx:
        import onnx_utils
        mname = f"{model_name}_onnx" if model_name else None
        onnx_utils.log_model(model, "onnx-model", model_name=mname)

    # Save as TensorFlow Lite format
    if log_as_tensorflow_lite:
        converter = tf.lite.TFLiteConverter.from_keras_model(model)
        tflite_model = converter.convert()
        path = "model.tflite"
        with open(path, "wb") as f:
            f.write(tflite_model)
        mlflow.log_artifact(path, "tensorflow-lite-model")

    # Save as TensorFlow.js format
    if log_as_tensorflow_js:
        import tensorflowjs as tfjs
        path = "model.tfjs"
        tfjs.converters.save_keras_model(model, path)
        mlflow.log_artifact(path, "tensorflow-js-model")

    # Evaluate model
    estimator = KerasRegressor(build_fn=baseline_model,
                               epochs=epochs,
                               batch_size=batch_size,
                               verbose=0)
    kfold = KFold(n_splits=10)
    results = cross_val_score(estimator, x_train, y_train, cv=kfold)
    print(
        f"Baseline MSE: mean: {round(results.mean(),2)}  std: {round(results.std(),2)}"
    )
    if mlflow_custom_log:
        mlflow.log_metric("mse_mean", results.mean())
        mlflow.log_metric("mse_std", results.std())

    # Score
    data = x_train
    predictions = model.predict(data)
    predictions = pd.DataFrame(data=predictions, columns=["prediction"])
    print("predictions.shape:", predictions.shape)
    print("predictions:", predictions)
예제 #5
0
def main():
    test_dir = 'D:/DATA/JHUBrain/Test/'
    model_idx = -1
    model_folder = 'ViTVNet_reg0.02_mse_diff/'
    model_dir = 'experiments/' + model_folder
    config_vit = CONFIGS_ViT_seg['ViT-V-Net']
    dict = utils.process_label()
    if os.path.exists('experiments/' + model_folder[:-1] + '.csv'):
        os.remove('experiments/' + model_folder[:-1] + '.csv')
    csv_writter(model_folder[:-1], 'experiments/' + model_folder[:-1])
    line = ''
    for i in range(46):
        line = line + ',' + dict[i]
    csv_writter(line, 'experiments/' + model_folder[:-1])
    model = models.ViTVNet(config_vit, img_size=(160, 192, 224))
    best_model = torch.load(
        model_dir + natsorted(os.listdir(model_dir))[model_idx])['state_dict']
    print('Best model: {}'.format(natsorted(os.listdir(model_dir))[model_idx]))
    model.load_state_dict(best_model)
    model.cuda()
    reg_model = utils.register_model((160, 192, 224), 'nearest')
    reg_model.cuda()
    test_composed = transforms.Compose([
        trans.Seg_norm(),
        trans.NumpyType((np.float32, np.int16)),
    ])
    test_set = datasets.JHUBrainInferDataset(glob.glob(test_dir + '*.pkl'),
                                             transforms=test_composed)
    test_loader = DataLoader(test_set,
                             batch_size=1,
                             shuffle=False,
                             num_workers=1,
                             pin_memory=True,
                             drop_last=True)
    eval_dsc_def = AverageMeter()
    eval_dsc_raw = AverageMeter()
    eval_det = AverageMeter()
    with torch.no_grad():
        stdy_idx = 0
        for data in test_loader:
            model.eval()
            data = [t.cuda() for t in data]
            x = data[0]
            y = data[1]
            x_seg = data[2]
            y_seg = data[3]

            x_in = torch.cat((x, y), dim=1)
            x_def, flow = model(x_in)
            def_out = reg_model([x_seg.cuda().float(), flow.cuda()])
            tar = y.detach().cpu().numpy()[0, 0, :, :, :]
            #jac_det = utils.jacobian_determinant(flow.detach().cpu().numpy()[0, :, :, :, :])
            line = utils.dice_val_substruct(def_out.long(), y_seg.long(),
                                            stdy_idx)
            line = line  #+','+str(np.sum(jac_det <= 0)/np.prod(tar.shape))
            csv_writter(line, 'experiments/' + model_folder[:-1])
            #eval_det.update(np.sum(jac_det <= 0) / np.prod(tar.shape), x.size(0))

            dsc_trans = utils.dice_val(def_out.long(), y_seg.long(), 46)
            dsc_raw = utils.dice_val(x_seg.long(), y_seg.long(), 46)
            print('Trans diff: {:.4f}, Raw diff: {:.4f}'.format(
                dsc_trans.item(), dsc_raw.item()))
            eval_dsc_def.update(dsc_trans.item(), x.size(0))
            eval_dsc_raw.update(dsc_raw.item(), x.size(0))
            stdy_idx += 1

            # flip moving and fixed images
            y_in = torch.cat((y, x), dim=1)
            y_def, flow = model(y_in)
            def_out = reg_model([y_seg.cuda().float(), flow.cuda()])
            tar = x.detach().cpu().numpy()[0, 0, :, :, :]

            #jac_det = utils.jacobian_determinant(flow.detach().cpu().numpy()[0, :, :, :, :])
            line = utils.dice_val_substruct(def_out.long(), x_seg.long(),
                                            stdy_idx)
            line = line  #+ ',' + str(np.sum(jac_det < 0) / np.prod(tar.shape))
            out = def_out.detach().cpu().numpy()[0, 0, :, :, :]
            #print('det < 0: {}'.format(np.sum(jac_det <= 0)/np.prod(tar.shape)))
            csv_writter(line, 'experiments/' + model_folder[:-1])
            #eval_det.update(np.sum(jac_det <= 0) / np.prod(tar.shape), x.size(0))

            dsc_trans = utils.dice_val(def_out.long(), x_seg.long(), 46)
            dsc_raw = utils.dice_val(y_seg.long(), x_seg.long(), 46)
            print('Trans diff: {:.4f}, Raw diff: {:.4f}'.format(
                dsc_trans.item(), dsc_raw.item()))
            eval_dsc_def.update(dsc_trans.item(), x.size(0))
            eval_dsc_raw.update(dsc_raw.item(), x.size(0))
            stdy_idx += 1

        print('Deformed DSC: {:.3f} +- {:.3f}, Affine DSC: {:.3f} +- {:.3f}'.
              format(eval_dsc_def.avg, eval_dsc_def.std, eval_dsc_raw.avg,
                     eval_dsc_raw.std))
        print('deformed det: {}, std: {}'.format(eval_det.avg, eval_det.std))