Ejemplo n.º 1
0
def test_delete_layer_same_layer_outputs():
    # Create all model layers
    input_1 = Input(shape=(10, ))
    dense_1 = Dense(3)
    dense_2 = Dense(3)
    dense_3 = Dense(3)
    dense_4 = Dense(1)
    # Create the base model
    x = dense_1(input_1)
    y = dense_2(x)
    x = dense_3(x)
    output_1 = dense_4(x)
    output_2 = dense_4(y)
    model_1 = utils.clean_copy(Model(input_1, [output_1, output_2]))
    # Create the expected modified model
    x = dense_1(input_1)
    y = dense_2(x)
    output_1 = dense_4(x)
    output_2 = dense_4(y)
    model_2_exp = utils.clean_copy(Model(input_1, [output_1, output_2]))
    # Delete layer dense_3
    model_2 = operations.delete_layer(model_1,
                                      model_1.get_layer(dense_3.name),
                                      copy=False)
    # Compare the modified model with the expected modified model
    assert compare_models(model_2, model_2_exp)
Ejemplo n.º 2
0
def test_delete_layer_reuse():
    # Create all model layers
    input_1 = Input(shape=[3])
    dense_1 = Dense(3)
    dense_2 = Dense(3)
    dense_3 = Dense(3)
    dense_4 = Dense(3)
    # Create the model
    x = dense_1(input_1)
    x = dense_2(x)
    x = dense_3(x)
    x = dense_2(x)
    output_1 = dense_4(x)
    # TODO: use clean_copy once keras issue 4160 has been fixed
    # model_1 = utils.clean_copy(Model(input_1, output_1))
    model_1 = Model(input_1, output_1)
    # Create the expected modified model
    x = dense_1(input_1)
    x = dense_3(x)
    output_2 = dense_4(x)
    # model_2_exp = utils.clean_copy(Model(input_1, output_2))
    model_2_exp = Model(input_1, output_2)
    # Delete layer dense_2
    model_2 = operations.delete_layer(model_1,
                                      model_1.get_layer(dense_2.name),
                                      copy=False)
    # Compare the modified model with the expected modified model
    assert compare_models(model_2, model_2_exp)
Ejemplo n.º 3
0
def test_delete_layer():
    # Create all model layers
    input_1 = Input(shape=[7, 7, 1])
    conv2d_1 = Conv2D(3, [3, 3], data_format="channels_last")
    conv2d_2 = Conv2D(3, [3, 3], data_format="channels_last")
    flatten_1 = Flatten()
    dense_1 = Dense(3)
    dense_2 = Dense(3)
    dense_3 = Dense(3)
    dense_4 = Dense(1)
    # Create the base model
    x = conv2d_1(input_1)
    x = conv2d_2(x)
    x = flatten_1(x)
    x = dense_1(x)
    x = dense_2(x)
    x = dense_3(x)
    output_1 = dense_4(x)
    model_1 = utils.clean_copy(Model(input_1, output_1))
    # Create the expected modified model
    x = conv2d_1(input_1)
    x = conv2d_2(x)
    x = flatten_1(x)
    x = dense_1(x)
    x = dense_3(x)
    output_2 = dense_4(x)
    model_2_exp = utils.clean_copy(Model(input_1, output_2))
    # Delete layer dense_2
    model_2 = operations.delete_layer(model_1, model_1.get_layer(dense_2.name))
    # Compare the modified model with the expected modified model
    assert compare_models(model_2, model_2_exp)
Ejemplo n.º 4
0
        nums.append(count)
mx = max(nums)
nums = [1/n*mx for n in nums]  
class_weights = {}
for i, wt in enumerate(nums):
    class_weights[i]=wt
    
"""
FIX INPUT SHAPE
"""
from kerassurgeon.operations import delete_layer, insert_layer, delete_channels
new_input = Input(shape=(512, 512, 3))

new_output = mal_model(new_input)
mal_model = Model(new_input, new_output)
mal_model.summary()mal_model = delete_layer(mal_model, mal_model.layers[0])


"""
TRAIN
"""

mal_model.compile(
        loss='categorical_crossentropy',
        optimizer='adam',
        metrics=['accuracy', keras.metrics.Precision(), keras.metrics.Recall()]
    )

mal_model.fit_generator(
        train_generator,
        steps_per_epoch=500 // batch_size,
Ejemplo n.º 5
0
def DeleteLayerType(model, layerType):
    for curLayer in model.layers:
        if type(curLayer) is layerType:
            model = delete_layer(model=model, layer=curLayer, copy=False)

    return model
Ejemplo n.º 6
0
def convert_keras_to_mlmodel(keras_model_path, coreml_model_path):

    import importlib.machinery as imm
    from coremltools.converters.keras._keras_converter import convertToSpec
    from coremltools.models import MLModel, _MLMODEL_FULL_PRECISION, _MLMODEL_HALF_PRECISION
    #    from coremltools.models.utils import convert_double_to_float_multiarray_type

    from keras.models import load_model
    from kerassurgeon.operations import delete_layer

    sys.path.append(os.path.dirname(sys.argv[4]))

    # Import neural network code
    NN_file_name = os.path.splitext(os.path.basename(sys.argv[3]))[0]
    NN = imm.SourceFileLoader(NN_file_name, sys.argv[3]).load_module()

    try:
        NN_model_name = NN.Model_Name()
    except:
        NN_model_name = NN_file_name

    try:
        NN_model_description = NN.Model_Description()
    except:
        NN_model_description = None

    # Load custom layers if implemented in each Keras model
    # Take care the imported NN may not have Custom_Layers() def, so try and catch except.
    # The type is a dictionary. The keys are supposed to be same as the corresponding values (=defs).
    try:
        NN_custom_layers = NN.Custom_Layers()
    except:
        NN_custom_layers = {}

    # Import Train.py to get custom loss and metrics
    Train_name = os.path.splitext(os.path.basename(sys.argv[4]))[0]
    Train_py = imm.SourceFileLoader(Train_name, sys.argv[4]).load_module()

    custom_loss = Train_py.get_loss()
    custom_metrics = Train_py.get_metrics()

    kpt, kex = os.path.splitext(keras_model_path)
    keras_model_path_temp = kpt + '_temp' + kex

    print('----------------------------------------------------------')
    print('NN model file path: {}'.format(sys.argv[3]))
    print('NN model name: {}'.format(NN_model_name))
    print('NN model description: {}'.format(NN_model_description))
    print('NN custom layers:')
    print(NN_custom_layers)
    print('Training file path and loss/metrics used:')
    print(sys.argv[4])
    print(custom_loss)
    print(custom_metrics)

    print('----------------------------------------------------------')
    print('Keras model file: {}'.format(keras_model_path))
    print('Keras model file temp: {}'.format(keras_model_path_temp))
    print('CoreML model file: {}'.format(coreml_model_path))

    print('----------------------------------------------------------')
    print('Keras custom layers implemented in AIAS for this code:')
    for k in conversion_func_in_AIAS:
        print(k)

    # Deleting Dropout layers from the Keras model to be converted
    # Because the layers will cause unknown conversion failures in coremltools
    keras_model = load_model(keras_model_path,
                             custom_objects=dict(**custom_loss,
                                                 **custom_metrics,
                                                 **NN_custom_layers),
                             compile=False)

    print('----------------------------------------------------------')
    keras_model.summary()

    del_prefixs = [
        'gaussian_dropout', 'gaussian_noise', 'dropout', 'spatial_dropout2d'
    ]  # Add here to define the layer to be deleted

    for del_prefix in del_prefixs:
        idp = 1
        while True:
            try:
                layer = keras_model.get_layer('{}_{}'.format(del_prefix, idp))
            except:
                break
            print('Deleting layer: {}_{}'.format(del_prefix, idp))
            keras_model = delete_layer(model=keras_model,
                                       layer=layer,
                                       copy=False)
            idp += 1

    keras_model.summary()
    print('Saving temporary Keras model: {}'.format(keras_model_path_temp))
    keras_model.save(keras_model_path_temp)

    # Construct custom layers and conversion functions
    custom_layers = {}
    custom_conversion_func = {}
    print('----------------------------------------------------------')

    if NN_custom_layers is not None:
        print('Custom layers in this Keras model:')
        for keras_layer_key in NN_custom_layers:
            if keras_layer_key in conversion_func_in_AIAS:
                print(keras_layer_key + ' - available')
                custom_layers[keras_layer_key] = NN_custom_layers[
                    keras_layer_key]
                custom_conversion_func[
                    keras_layer_key] = conversion_func_in_AIAS[keras_layer_key]
            else:
                print(keras_layer_key + ' - unavailable')

        print('Matched layers and conversion functions for coremltools:')
        print(custom_layers)
        print(custom_conversion_func)

    else:
        print('Custom layers not found in this Keras model.')

    custom_objects = dict(**custom_loss, **custom_metrics, **custom_layers)

    print('----------------------------------------------------------')
    print('Custom objects passed into coremltools converter:')
    print(custom_objects)
    print('----------------------------------------------------------')

    # Convert
    # Do not change the input_names/output_names because they are used to identify input/output layers in Keras code
    spec = convertToSpec(keras_model_path_temp,
                         input_names='input',
                         output_names='output',
                         add_custom_layers=True,
                         custom_conversion_functions=custom_conversion_func,
                         custom_objects=custom_objects,
                         respect_trainable=False)  # should be True???
    model = MLModel(spec)

    # Set descriptions
    model.author = 'Takashi Shirakawa'
    model.license = '(C) 2019-2020, Takashi Shirakawa. All right reserved.'
    model.short_description = NN_model_name + ' for A.I.Segmentation'
    model.input_description[
        'input'] = 'Input is a square image with 8-bit grayscale per pixel.'
    model.output_description[
        'output'] = 'Output (segmentation) is supposed to be an image with the same dimension and format.'

    # Save mlmodel
    model.save(coreml_model_path)

    #    spec_f = model.get_spec()
    #    convert_double_to_float_multiarray_type(spec_f)
    #    model_f = MLModel(spec_f)
    #    model_f.save(os.path.splitext(coreml_model_path)[0] + ', float_multiarray.mlmodel')

    # Show results
    spec = model.get_spec()
    print('----------------------------------------------------------')
    print('Model descriptions:')
    print(spec.description)
    #    print('Model descriptions (float multiarray type):')
    #    print(spec_f.description)

    print('Custom layers:')
    for i, layer in enumerate(spec.neuralNetwork.layers):
        if layer.HasField('custom'):
            print('Layer %d = %s : class name = %s' %
                  (i + 1, layer.name, layer.custom.className))
#        else:
#            print('Layer %d = %s' % (i, layer.name))

    print('Done.')
Ejemplo n.º 7
0
def grad_layerwise_rank(model, input_img, psize=1):
    """ 
    use gradient for layer-wise ranking 
    psize is an int indicating how many layers to prune
    """
    layer_idx = []
    layer_grads = []
    lidx = 0
    for layer in model.layers:
        layer_class = layer.__class__.__name__
        if layer_class == 'Conv2D':
            layer_idx.append(lidx)
            lidx += 1
        elif layer_class == 'Dense':
            layer_idx.append(lidx)
            lidx += 1
        else:
            lidx += 1
            continue
        inputs = model.input
        mean = K.mean(layer.output)
        #print("mean :", mean)
        grads = K.gradients(mean, inputs)[0]
        grads = K.sum(K.abs(grads))
        print("grads{} shape:{}".format(layer_class, grads.shape))
        iterate = K.function([inputs, K.learning_phase()],[grads])
        grads = iterate([input_img, 0])
        layer_grads.append(grads[0])
    print("layer grads:",layer_grads)
    pidx = np.argsort(layer_grads)
    pidx = np.delete(pidx, [i for i in pidx if pidx[i] == 0])
    #new_pidx = []
    #for p in pidx:
    #    new_pidx.append(layer_idx[p])
    #print("prune idx:", new_pidx)
    pidx = pidx[:psize]
    pidx = pidx.tolist()
    print("pruning idx:", pidx)
    # pruning according to input and output size
    for i in pidx:
        layer = model.layers[layer_idx[i]]
        layer_class = layer.__class__.__name__
        if i == 0:
            input_size = layer.input_shape
        else:
            input_size = model.layers[layer_idx[i-1]].output_shape
        if i == len(layer_idx)-1:
            output_size = layer.output_shape
        else:
            output_size = model.layers[layer_idx[i+1]].input_shape
        if input_size[-1] > output_size[-1]:
            if i == 0:
                # we do not delete input layer in dnn
                continue
            # former layer lager than latter layer
            # we prune former layer to make them fit
            psize_chnl = input_size[-1] - output_size[-1]
            last_layer = model.layers[layer_idx[i-1]]
            model = delete_channels(model, last_layer, np.arange(psize_chnl).tolist())
            #model = delete_layer(model, layer)
        elif input_size[-1] < output_size[-1]:
            # former layer smaller than latter
            # happens in CNN
            # we prune this layer to make them fit
            psize_chnl = output_size[-1] - input_size[-1]
            model = delete_channels(model, layer, np.arange(psize_chnl).tolist())
        print("Pruning layer {}".format(layer.name))
        model = delete_layer(model, layer)
        return model