Exemple #1
0
def init():

    global prednet_model, logistic_regression_model

    nt = 10

    # logistic regression model for detecing anomalies based on model output
    model_path = Model.get_model_path("logistic_regression")
    logistic_regression_model = joblib.load(model_path)

    model_root = Model.get_model_path('prednet_UCSDped1')  #, _workspace=ws)
    # model_root = model_root.strip('model.json')
    print(model_root)
    # load json and create model
    json_file = open(os.path.join(model_root, 'model.json'),
                     'r')  # todo, this is going to the real one
    # json_file = open(os.path.join(model_root, 'model', 'model.json'), 'r')
    model_json = json_file.read()
    json_file.close()
    trained_model = model_from_json(model_json,
                                    custom_objects={"PredNet": PredNet})
    # load weights into new model
    trained_model.load_weights(os.path.join(model_root, "weights.hdf5"))

    # Create testing model (to output predictions)
    layer_config = trained_model.layers[1].get_config()
    layer_config['output_mode'] = 'prediction'
    # data_format = layer_config['data_format'] if 'data_format' in layer_config else layer_config['dim_ordering']
    test_prednet = PredNet(weights=trained_model.layers[1].get_weights(),
                           **layer_config)
    input_shape = list(trained_model.layers[0].batch_input_shape[1:])
    input_shape[0] = nt
    inputs = Input(shape=tuple(input_shape))
    predictions = test_prednet(inputs)
    prednet_model = Model_keras(inputs=inputs, outputs=predictions)
def get_unet():
    inputs = Input((None, None, 3))
    conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(inputs)
    conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(pool1)
    conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(pool2)
    conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool3)
    conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

    conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(pool4)
    conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(conv5)

    up6 = concatenate([UpSampling2D(size=(2, 2))(conv5), conv4], axis=3)
    conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(up6)
    conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv6)

    up7 = concatenate([UpSampling2D(size=(2, 2))(conv6), conv3], axis=3)
    conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(up7)
    conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv7)

    up8 = concatenate([UpSampling2D(size=(2, 2))(conv7), conv2], axis=3)
    conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(up8)
    conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv8)

    up9 = concatenate([UpSampling2D(size=(2, 2))(conv8), conv1], axis=3)
    conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(up9)
    conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv9)

    conv10 = Conv2D(1, (1, 1), activation='sigmoid')(conv9)

    model = Model_keras(inputs=[inputs], outputs=[conv10])

    model.compile(optimizer=Adam(lr=1e-5),
                  loss=dice_coef_loss,
                  metrics=[dice_coef])

    return model
Exemple #3
0
def init():

    global model

    model_root = Model.get_model_path(
        'kd_teach_the_student')  #, _workspace=ws)
    # model_root = model_root.strip('model.json')
    print(model_root)
    # load json and create model

    weight_file = os.path.join(model_root, "squeezenet_weights.hdf5")

    model = SqueezeNet(weight_decay=0.0,
                       image_size=299,
                       trainable=False,
                       weight_file=weight_file)
    # model.load_weights(os.path.join(model_root, "squeezenet_weights.hdf5"))

    model = Model_keras(model.input, model.outputs)
Exemple #4
0
def init():

    global prednet_model, logistic_regression_model

    nt = 10

    # logistic regression model for detecing anomalies based on model output
    try:
        model_path = Model.get_model_path("logistic_regression")
        logistic_regression_model = joblib.load(model_path)
    except Exception as e:
        print(e)
        logistic_regression_model = None

    prednet_path = Model.get_model_path('prednet_UCSDped1')
    # model_root = model_root.strip('model.json')
    print(prednet_path)
    # load json and create model
    with open(os.path.join(prednet_path, 'model.json'), 'r') as json_file:
        model_json = json_file.read()

    trained_model = model_from_json(model_json,
                                    custom_objects={"PredNet": PredNet})

    # load weights into new model
    trained_model.load_weights(os.path.join(prednet_path, "weights.hdf5"))

    # Create testing model (to output predictions)
    layer_config = trained_model.layers[1].get_config()
    layer_config['output_mode'] = 'prediction'

    test_prednet = PredNet(weights=trained_model.layers[1].get_weights(),
                           **layer_config)
    input_shape = list(trained_model.layers[0].batch_input_shape[1:])
    input_shape[0] = nt
    inputs = Input(shape=tuple(input_shape))
    predictions = test_prednet(inputs)
    prednet_model = Model_keras(inputs=inputs, outputs=predictions)
def get_unet(imgs_width, imgs_height):
    inputs = Input((imgs_width, imgs_height, n_channels))
    conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(inputs)
    conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(pool1)
    conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(pool2)
    conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool3)
    conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

    conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(pool4)
    conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(conv5)
    pool5 = MaxPooling2D(pool_size=(2, 2))(conv5)

    conv6 = Conv2D(1024, (3, 3), activation='relu', padding='same')(pool5)
    conv6 = Conv2D(1024, (3, 3), activation='relu', padding='same')(conv6)

    up7 = concatenate([
        Conv2DTranspose(512,
                        (2, 2), strides=(2, 2), padding='same')(conv6), conv5
    ],
                      axis=3)
    conv7 = Conv2D(512, (3, 3), activation='relu', padding='same')(up7)
    conv7 = Conv2D(512, (3, 3), activation='relu', padding='same')(conv7)

    up8 = concatenate([
        Conv2DTranspose(256,
                        (2, 2), strides=(2, 2), padding='same')(conv7), conv4
    ],
                      axis=3)
    conv8 = Conv2D(256, (3, 3), activation='relu', padding='same')(up8)
    conv8 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv8)

    up9 = concatenate([
        Conv2DTranspose(128,
                        (2, 2), strides=(2, 2), padding='same')(conv8), conv3
    ],
                      axis=3)
    conv9 = Conv2D(128, (3, 3), activation='relu', padding='same')(up9)
    conv9 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv9)

    up10 = concatenate([
        Conv2DTranspose(64,
                        (2, 2), strides=(2, 2), padding='same')(conv9), conv2
    ],
                       axis=3)
    conv10 = Conv2D(64, (3, 3), activation='relu', padding='same')(up10)
    conv10 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv10)

    up11 = concatenate([
        Conv2DTranspose(32,
                        (2, 2), strides=(2, 2), padding='same')(conv10), conv1
    ],
                       axis=3)
    conv11 = Conv2D(32, (3, 3), activation='relu', padding='same')(up11)
    conv11 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv11)

    conv12 = Conv2D(1, (1, 1), activation='sigmoid')(conv11)

    model = Model_keras(inputs=[inputs], outputs=[conv12])

    model.compile(optimizer=Adam(lr=1e-5),
                  loss=dice_coef_loss,
                  metrics=['accuracy', dice_coef])

    return model
# Create testing model (to output predictions)
layer_config = trained_model.layers[1].get_config()
layer_config["output_mode"] = "prediction"
data_format = (
    layer_config["data_format"]
    if "data_format" in layer_config
    else layer_config["dim_ordering"]
)
prednet = PredNet(
    weights=trained_model.layers[1].get_weights(), **layer_config
)
input_shape = list(trained_model.layers[0].batch_input_shape[1:])
input_shape[0] = args.nt
inputs = Input(shape=tuple(input_shape))
predictions = prednet(inputs)
test_model = Model_keras(inputs=inputs, outputs=predictions)

# Define Generator for test sequences
test_generator = TestsetGenerator(
    test_file,
    test_sources,
    args.nt,
    data_format=data_format,
    N_seq=args.N_seq
)
X_test = test_generator.create_all()

# Apply model to the test sequences
X_hat = test_model.predict(X_test, args.batch_size)
if data_format == "channels_first":
    X_test = np.transpose(X_test, (0, 1, 3, 4, 2))