コード例 #1
0
ファイル: tests_augment.py プロジェクト: CovertLab/cellunet
def train(image_list, labels_list, output, patchsize=256, nsamples=10):
    li_image, li_labels = [], []
    for image_path, labels_path in zip(image_list, labels_list):
        image, labels = imread(image_path), imread(labels_path).astype(
            np.uint8)
        image = normalize(image)
        if image.ndim == 2:
            image = np.expand_dims(image, -1)
        elif image.ndim == 3:
            image = np.moveaxis(image, 0, -1)
        li_image.append(image)
        li_labels.append(labels)

    ecoords = pick_coords_list(nsamples, li_labels, patchsize, patchsize)
    li_labels = [conv_labels2dto3d(lb) for lb in li_labels]
    x_tests, y_tests = extract_patch_list(li_image, li_labels, ecoords,
                                          patchsize, patchsize)

    from train import augment_pipe
    make_outputdir(output)

    for i in range(nsamples):
        x, y = x_tests[i], y_tests[i]
        for aug in augment_pipe:
            x, y = aug(x, y)
        tiff.imsave(join(output, 'x{0:04}.tif'.format(i)),
                    x.astype(np.float32))
        tiff.imsave(join(output, 'y{0:04}.tif'.format(i)), y.astype(np.uint16))
コード例 #2
0
ファイル: unet_predict.py プロジェクト: t10823gm/CellTK
def predict(img_path, weight_path):
    x = imread(img_path)
    x = normalize(x)

    if x.ndim == 2:
        x = np.expand_dims(x, -1)
    elif x.ndim == 3:
        x = np.moveaxis(x, 0, -1)
    x = np.expand_dims(x, 0)
    num_colors = x.shape[-1]

    x, hpadding, wpadding = pad_image(x)

    model = _model_builder.get_model(x.shape[1], x.shape[2], num_colors, activation=None)
    model.load_weights(weight_path)
    predictions = model.predict(x, batch_size=1)
    predictions = [predictions[0, :, :, i] for i in range(predictions.shape[-1])]

    # resize predictions to match image dimensions (i.e. remove padding)
    height = np.shape(predictions[0])[0]
    width = np.shape(predictions[0])[1]
    predictions = [p[hpadding[0]:height-hpadding[1], wpadding[0]:width-wpadding[1]] for p in predictions]

    predictions = normalize_predictions(predictions)
    return predictions
コード例 #3
0
def predict(img_path, weight_path, zoom_factor=1):
    x = imread(img_path)
    orig_size = x.shape
    if not zoom_factor == 1:
        # FIXME. It probably does not work for 3D shape.
        x = rescale(x, 1/zoom_factor, preserve_range=True, anti_aliasing=True)
    x = normalize(x)

    if x.ndim == 2:
        x = np.expand_dims(x, -1)
    elif x.ndim == 3:
        x = np.moveaxis(x, 0, -1)
    x = np.expand_dims(x, 0)

    num_colors = x.shape[-1]

    x, hpadding, wpadding = pad_image(x)

    model = utils.model_builder.get_model(x.shape[1], x.shape[2], num_colors, activation=None)
    model.load_weights(weight_path)
    predictions = model.predict(x, batch_size=1)
    predictions = [predictions[0, :, :, i] for i in range(predictions.shape[-1])]

    # resize predictions to match image dimensions (i.e. remove padding)
    height = predictions[0].shape[0]
    width = predictions[0].shape[1]
    predictions = [p[hpadding[0]:height-hpadding[1], wpadding[0]:width-wpadding[1]] for p in predictions]

    if not zoom_factor == 1:
        predictions = [resize(p, orig_size, order=3, preserve_range=True, anti_aliasing=False).astype(np.float32) for p in predictions]
    predictions = normalize_predictions(predictions)
    return predictions
コード例 #4
0
def predict(img_path, model_path, weight_path):
    x = imread(img_path)

    if x.ndim == 2:
        x = np.expand_dims(x, -1)
    elif x.ndim == 3:
        x = np.moveaxis(x, 0, -1)
    x = np.expand_dims(x, 0)

    model = load_model_py(model_path)
    model = convert_model_patch2full(model)
    model.load_weights(weight_path)

    model.summary()
    evaluate_model = backend.function(
        [model.layers[0].input,
         backend.learning_phase()], [model.layers[-1].output])

    cc = evaluate_model([x, 0])[0]

    # from tensorflow.contrib.keras import optimizers
    # opt = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    # model.compile(optimizer=opt, loss='sparse_categorical_crossentropy', metrics=['accuracy'])
    # cc = model.predict(x)
    return [cc[0, :, :, i] for i in range(cc.shape[-1])]
コード例 #5
0
def train(image_list, labels_list, output, patchsize=256, nsteps=100,
          batch_size=16, nepochs=10, weights=None, loss_weights=[1.0, 1.0, 10.0], zoom_factor=1):

    li_image, li_labels = [], []
    for image_path, labels_path in zip(image_list, labels_list):
        image, labels = imread(image_path), imread(labels_path).astype(np.uint8)
        image = normalize(image)
        if image.ndim == 2:
            image = np.expand_dims(image, -1)
        elif image.ndim == 3:
            image = np.moveaxis(image, 0, -1)
        image, labels = predownsample(image, labels, zoom_factor)
        li_image.append(image)
        li_labels.append(labels)
    num_colors = li_image[0].shape[-1]

    num_tests = int(nsteps * batch_size * FRAC_TEST)
    ecoords = pick_coords_list(nsteps * batch_size, li_labels, patchsize, patchsize)
    li_labels = [conv_labels2dto3d(lb) for lb in li_labels]
    ecoords_tests, ecoords_train = ecoords[:num_tests], ecoords[num_tests:]
    x_tests, y_tests = extract_patch_list(li_image, li_labels, ecoords_tests, patchsize, patchsize)

    model = utils.model_builder.get_model(patchsize, patchsize, num_colors, activation=None)
    loss = lambda y_true, y_pred: weighted_crossentropy(y_true, y_pred, loss_weights)
    metrics = [keras.metrics.categorical_accuracy,
               utils.metrics.channel_recall(channel=0, name="background_recall"),
               utils.metrics.channel_precision(channel=0, name="background_precision"),
               utils.metrics.channel_recall(channel=1, name="interior_recall"),
               utils.metrics.channel_precision(channel=1, name="interior_precision"),
               utils.metrics.channel_recall(channel=2, name="boundary_recall"),
               utils.metrics.channel_precision(channel=2, name="boundary_precision"),
               ]
    if weights is not None:
        model.load_weights(weights)
    optimizer = keras.optimizers.RMSprop(lr=1e-4)
    model.compile(loss=loss, metrics=metrics, optimizer=optimizer)
    # model.summary()

    make_outputdir(output)
    callbacks = define_callbacks(output)

    datagen = PatchDataGeneratorList(augment_pipe)

    history = model.fit_generator(
        generator=datagen.flow(li_image, li_labels, ecoords_train, patchsize, patchsize, batch_size=batch_size, shuffle=True),
        steps_per_epoch=nsteps,
        epochs=nepochs,
        validation_data=(x_tests, y_tests),
        # validation_steps=len(ecoords_train)/batch_size,
        validation_steps=len(ecoords_train),
        callbacks=callbacks,
        verbose=1
    )

    # score = model.evaluate(x_tests, y_tests, batch_size=batch_size)
    rec = dict(zip(model.metrics_names, [history.history[i] for i in model.metrics_names]))
    np.savez(join(output, 'records.npz'), **rec)

    json_string = model.to_json()
    open(join(output, 'cnn_model.json'), 'w').write(json_string)
    model.save_weights(join(output, 'cnn_model_weights.hdf5'))
    yaml_string = model.to_yaml()
    open(join(output, 'cnn_model.yaml'), 'w').write(yaml_string)
コード例 #6
0
def train(image_list,
          labels_list,
          model_path,
          output,
          patchsize=61,
          nsamples=10000,
          batch_size=32,
          nepochs=100,
          frac_test=FRAC_TEST):
    assert np.bool(patchsize & 0x1)  # check if odd
    model = load_model_py(model_path)
    model.summary()

    li_image, li_labels = [], []
    for image_path, labels_path in zip(image_list, labels_list):
        image, labels = imread(image_path), imread(labels_path).astype(
            np.uint8)
        if image.ndim == 2:
            image = np.expand_dims(image, -1)
        elif image.ndim == 3:
            image = np.moveaxis(image, 0, -1)
        li_image.append(image)
        li_labels.append(labels)

    num_tests = int(nsamples * FRAC_TEST)
    ecoords = pick_coords_list(nsamples, li_labels, patchsize, patchsize)
    ecoords_tests, ecoords_train = ecoords[:num_tests], ecoords[num_tests:],
    x_tests, y_tests = extract_patch_list(li_image, li_labels, ecoords_tests,
                                          patchsize, patchsize)
    li_image = [np.expand_dims(i, 0) for i in li_image]

    make_outputdir(output)
    opt = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(optimizer=opt,
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])
    callbacksets = define_callbacks(output, batch_size)

    datagen = PatchDataGeneratorList(rotation_range=90,
                                     shear_range=0,
                                     horizontal_flip=True,
                                     vertical_flip=True)
    history = model.fit_generator(
        datagen.flow(li_image,
                     li_labels,
                     ecoords_train,
                     patchsize,
                     patchsize,
                     batch_size=batch_size,
                     shuffle=True),
        steps_per_epoch=len(ecoords_train) / batch_size,
        epochs=nepochs,
        validation_data=(x_tests, y_tests),
        validation_steps=len(ecoords_train) / batch_size,
        callbacks=callbacksets)

    score = model.evaluate(x_tests, y_tests, batch_size=batch_size)
    print('score[loss, accuracy]:', score)
    rec = dict(acc=history.history['acc'],
               val_acc=history.history['val_acc'],
               loss=history.history['loss'],
               val_loss=history.history['val_loss'])
    np.savez(join(output, 'records.npz'), **rec)

    json_string = model.to_json()
    open(join(output, 'cnn_model.json'), 'w').write(json_string)
    model.save_weights(join(output, 'cnn_model_weights.hdf5'))
    yaml_string = model.to_yaml()
    open(join(output, 'cnn_model.yaml'), 'w').write(yaml_string)