def simultaneus_training_network(
        conv_layers_sizes,
        dense_layers_sizes,
        sample_sizes=[8, 32, 128, 512, 1024],
        iteration_numbers=[4000, 3000, 2000, 1000, 500],
        SN_limits=[10, 100],
        translation_distance=1,
        verbose=0.01,
        model_path="",
        save_networks=True,
        residual_connections=False):
    """
    Function for building a multi-output network and then train it all outputs
    at once.
    """
    from keras.models import Model
    import deeptrack

    network, output_layers, input_tensor = build_modular_breadth_model(
        conv_layers_sizes=conv_layers_sizes,
        dense_layers_sizes=dense_layers_sizes,
        sample_sizes=sample_sizes,
        iteration_numbers=iteration_numbers,
        SN_limits=SN_limits,
        translation_distance=translation_distance,
        verbose=verbose,
        model_path=model_path,
        save_networks=save_networks,
        residual_connections=residual_connections,
        train_network=False)
    single_network = Model(input_tensor, output_layers)
    losses = []
    # network.compile(optimizer='rmsprop', loss='mse', metrics=['mse', 'mae'])
    for i in range(len(output_layers)):
        losses.append('mse')
    single_network.summary()
    single_network.compile(optimizer='rmsprop',
                           loss=losses,
                           metrics=['mse', 'mae'])
    deeptrack.train_deep_learning_network_mp(
        single_network,
        sample_sizes=sample_sizes,
        iteration_numbers=iteration_numbers,
        verbose=verbose,
        SN_limits=SN_limits,
        translation_distance=translation_distance,
        nbr_outputs=len(output_layers))
    if save_networks:
        single_network.save(model_path + 'simultaneus_training_network.h5')
    return single_network
Beispiel #2
0
def normal_comp_full_models(
        layer_sizes,
        nbr_conv_layers,
        train_generator,  # Not strictily needed if mp is used
        step=8,
        input_shape=(51, 51, 1),
        output_shape=3,
        sample_sizes=(8, 32, 128, 512, 1024),
        iteration_numbers=[4000, 3000, 2000, 1000, 500],
        verbose=0.01,
        save_networks=True,
        mp_training=False,  # use multiprocessing training
        translation_distance=5,  # parameters for multiprocessing training
        SN_limits=[10, 100],  # parameters for multiprocessing training
        model_path=""):
    import deeptrack

    for j in range(nbr_conv_layers):
        for i in range(round(layer_sizes[j] / step)):
            layer_size = (i + 1) * step
            network_trad = deeptrack.create_deep_learning_network_no_dense_top(
                conv_layers_dimensions=[layer_size])

            print("Multiprocessing image generation initiated")
            deeptrack.train_deep_learning_network_mp(
                network_trad,
                sample_sizes=sample_sizes,
                iteration_numbers=iteration_numbers,
                SN_limits=SN_limits,
                translation_distance=translation_distance,
                verbose=0.01)
            network_trad.summary()
            model_name = trad_path + "layer_no1" + "top_size" + str(
                layer_size) + ".h5"
            network_trad.save(model_name)
    return 0
def build_modular_breadth_model(
    conv_layers_sizes,
    dense_layers_sizes,
    sample_sizes=[8, 32, 128, 512, 1024],
    iteration_numbers=[4000, 3000, 2000, 1000, 500],
    SN_limits=[10, 100],
    translation_distance=1,
    verbose=0.01,
    model_path="",
    save_networks=True,
    residual_connections=False,
    train_network=True,
    parameter_function=0,
    extra_pooling=0,
):
    """
    residual_connections - Used to connect the first non-zero convolutional
        layer directly also to input for expansion layers.
    """
    import deeptrack
    import evaluate_deeptrack_performance as edp
    import numpy as np

    output_layers = []  # list of all output layers in model, used for training
    # all layers at once
    # Create first network
    network, input_tensor, conv_layers_list, dense_layers_list, final_output = modular_breadth_network_start(
        conv_layers_sizes[0],
        dense_layers_sizes[0],
        extra_pooling=extra_pooling)
    output_layers.append(final_output)
    # compile and verify appearence
    nbr_images_to_evaluate = 1000
    nbr_residual_connections = 0
    last_residual_connection = None
    if train_network:
        network.compile(optimizer='rmsprop',
                        loss='mse',
                        metrics=['mse', 'mae'])
        network.summary()
        # Use default parameters for evaluation
        # Train and freeze layers in network

        deeptrack.train_deep_learning_network_mp(
            network,
            sample_sizes=sample_sizes,
            iteration_numbers=iteration_numbers,
            verbose=verbose,
            SN_limits=SN_limits,
            translation_distance=translation_distance,
            parameter_function=parameter_function,
        )
        freeze_all_layers(network)
        if (save_networks):
            network.save(model_path + "network_no" + str(0) + ".h5")
    for i in range(len(conv_layers_sizes) - 1):
        model_idx = i + 1
        # Grow network
        network, conv_layers_list, dense_layers_list, final_output, last_residual_connection, nbr_residual_connections = modular_breadth_network_growth(
            input_tensor,
            conv_layers_list,
            dense_layers_list,
            conv_layers_sizes[model_idx],
            dense_layers_sizes[model_idx],
            residual_connections=residual_connections,
            nbr_residual_connections=nbr_residual_connections,
            last_residual_connection=last_residual_connection,
            model_idx=model_idx + 1)
        output_layers.append(final_output)

        if train_network:

            network.compile(optimizer='rmsprop',
                            loss='mse',
                            metrics=['mse', 'mae'])
            network.summary()

            # Train and freeze layers in network
            deeptrack.train_deep_learning_network_mp(
                network,
                sample_sizes=sample_sizes,
                iteration_numbers=iteration_numbers,
                verbose=verbose,
                SN_limits=SN_limits,
                translation_distance=translation_distance,
                parameter_function=parameter_function,
            )
            freeze_all_layers(network)  # migth complain about this, no worries
            network.compile(optimizer='rmsprop',
                            loss='mse',
                            metrics=['mse', 'mae'])

            # Save network
            if save_networks:
                network.save(model_path + "network_no" + str(model_idx) +
                             ".h5")
    if train_network:
        return network
    return network, output_layers, input_tensor
Beispiel #4
0
def independent_avg_modular_deeptrack_new_layer(
        layer_size,  # Total number of nodes in layer
        train_generator,
        conv_list,
        output_list,
        input_tensor,
        layer_no=2,
        nbr_nodes_added=1,
        sample_sizes=(8, 32, 128, 512, 1024),
        iteration_numbers=(401, 301, 201, 101, 51),
        verbose=0.01,
        mp_training=False,  # use multiprocessing training?
        translation_distance=5,  # parameters for multiprocessing training
        SN_limits=[10, 100],  # parameters for multiprocessing training
        save_networks=False,
        model_path="",
        layer_type='conv'):
    """
    Adds a new layer to the modular averaging architecture where each output is
    trained independently
    """
    import deeptrack
    from keras import models, layers
    from keras.models import Model
    from feature_by_feature import freeze_all_layers
    new_layer_node_list = []  # Convolutions in the new layer
    new_layer_flattened_list = []  # flattened output from new layer

    # Create input tenosr for new node
    if len(conv_list) > 1:
        new_layer_input = layers.Concatenate()(conv_list)
    else:
        new_layer_input = conv_list[0]

    # If next layer is dense then we (probably) need to flatten previous input
    if layer_type == 'dense':
        import keras.backend as K
        # Check dimension of previous output to see if Flatten layer is neede.
        prev_out_size = K.shape(new_layer_input).shape
        if (prev_out_size[0] > 2):
            new_layer_input = layers.Flatten()(new_layer_input)

    # Add all the new nodes and train network in between
    for i in range(round(layer_size / nbr_nodes_added)):
        if layer_type == 'dense':
            next_node = layers.Dense(nbr_nodes_added,
                                     activation='relu')(new_layer_input)
            next_flattened = next_node
        else:
            next_node = layers.Conv2D(nbr_nodes_added, (3, 3),
                                      activation='relu')(new_layer_input)
            next_node = layers.MaxPooling2D((2, 2))(next_node)
            next_flattened = layers.Flatten()(next_node)
        new_layer_flattened_list.append(next_flattened)
        if (i == 0):
            # i = 0 special case. No concatenation needed
            next_output = layers.Dense(3)(next_flattened)  # Different for i==0
        else:
            next_output = layers.Concatenate(axis=-1)(new_layer_flattened_list)
            next_output = layers.Dense(3)(next_output)

        # Construct and compile network
        network = models.Model(input_tensor, next_output)
        network.compile(optimizer='rmsprop',
                        loss='mse',
                        metrics=['mse', 'mae'])
        network.summary()
        output_list.append(next_output)

        # Train and freeze layers in network

        if mp_training:
            deeptrack.train_deep_learning_network_mp(
                network,
                sample_sizes=sample_sizes,
                iteration_numbers=iteration_numbers,
                verbose=verbose,
                SN_limits=SN_limits,
                translation_distance=translation_distance,
            )
        else:
            deeptrack.train_deep_learning_network(
                network,
                train_generator,
                sample_sizes=sample_sizes,
                iteration_numbers=iteration_numbers,
                verbose=verbose)

        freeze_all_layers(network)
        new_layer_node_list.append(next_node)
        if (save_networks):
            network.save(model_path + "L" + str(layer_no) + "_" +
                         str((i + 1) * nbr_nodes_added) + "F.h5")
    avg_out = layers.average(output_list)
    network = models.Model(input_tensor, avg_out)
    network.compile(optimizer='rmsprop', loss='mse', metrics=['mse', 'mae'])
    print('final network architecture')
    network.summary()
    if (save_networks):
        network.save(model_path + "final_L" + str(layer_no) + "_F.h5")
    # IF dense statement needed
    return network, new_layer_node_list, output_list, new_layer_flattened_list
Beispiel #5
0
def independent_avg_modular_deeptrack_L1(  # bad name
    layer_size,
    train_generator,
    input_shape=(51, 51, 1),
    output_shape=3,
    nbr_nodes_added=1,
    sample_sizes=(8, 32, 128, 512, 1024),
    iteration_numbers=(401, 301, 201, 101, 51),
    verbose=0.01,
    save_networks=False,
    mp_training=False,  # use multiprocessing training
    translation_distance=5,  # parameters for multiprocessing training
    SN_limits=[10, 100],  # parameters for multiprocessing training
    model_path="",  # determines the type of layer used for combining the
    # predictions. Addition and average only options at the moment
):
    """
    First layer in the modular averaging architecture where each layer is
    trained independetly of previous ones.
    Inputs:
        layer_size - size of first layer in model
        train_generator - generator for images
        output_shape - number of outputs from network, 3 for normal deeptrack
        nbr_nodes_added - number of nodes to add at a time
        sample_size - same as deeptrack
        iteration_numbers - same as deeptrack
        save_networks - if networks are to be saved automatically once training is finished
        mp_training - use multiprocessing to speed up training. Not available for custom
        image generators as supplied by train_generator, uses FBFs own image generator.
        translation_distance - parameter for mp_training image generator, determines
        the area the particle is allowed to appear in
        SN_limits -
    Outputs:
    TODO - Implement weight decay in later layers and lowering learning rate
    """
    import deeptrack
    import keras
    from keras import Input, models, layers
    from keras.models import Model
    from feature_by_feature import freeze_all_layers
    #import deeptrackelli_mod_mproc as multiprocess_training # Needed for fast parallelized image generator

    input_tensor = Input(input_shape)
    conv_list = [
    ]  # List of convolutional neruons in L1, needed for subsequent layers
    flattened_list = []  # List of flattened layers
    output_list = []  # List of the output layers

    # Loop thorugh the neoruns and add them one by one
    for i in range(round(layer_size / nbr_nodes_added)):
        next_node = layers.Conv2D(nbr_nodes_added, (3, 3),
                                  activation='relu')(input_tensor)
        next_node = layers.MaxPooling2D((2, 2))(next_node)
        if (i == 0):
            # i = 0 special case. No addition needed
            next_flattened = layers.Flatten()(next_node)
            next_output = layers.Dense(3)(next_flattened)
            final_output = next_output
            output_list.append(next_output)
            flattened_list.append(next_flattened)
        else:
            # Construct the next output node
            next_flattened = layers.Flatten()(next_node)
            flattened_list.append(next_flattened)
            # Can't concatenate a single layer
            if (len(flattened_list) > 1):
                next_output = layers.Concatenate(axis=-1)(flattened_list)

            next_output = layers.Dense(3)(next_output)
            output_list.append(next_output)
            # Construct and compile network
        network = models.Model(input_tensor, next_output)
        network.compile(optimizer='rmsprop',
                        loss='mse',
                        metrics=['mse', 'mae'])
        network.summary()

        # Train and freeze layers in network
        if mp_training:
            deeptrack.train_deep_learning_network_mp(
                network,
                sample_sizes=sample_sizes,
                iteration_numbers=iteration_numbers,
                verbose=verbose,
                SN_limits=SN_limits,
                translation_distance=translation_distance,
            )
        else:
            deeptrack.train_deep_learning_network(
                network,
                train_generator,
                sample_sizes=sample_sizes,
                iteration_numbers=iteration_numbers,
                verbose=verbose)

        freeze_all_layers(network)
        conv_list.append(next_node)
        if (save_networks):
            network.save(model_path + "L1_" + str((i + 1) * nbr_nodes_added) +
                         "F.h5")
    # Create final output using all the output layers and averaging them
    if (len(output_list) > 1):
        avg_out = layers.average(output_list)
    else:
        avg_out = output_list[0]
    network = models.Model(input_tensor, avg_out)
    network.compile(optimizer='rmsprop', loss='mse', metrics=['mse', 'mae'])
    print('final network architecture')
    network.summary()
    if (save_networks):
        network.save(model_path + "final_L" + str(1) + "_F.h5")
    return network, conv_list, output_list, flattened_list, input_tensor
Beispiel #6
0
def single_output_modular_L1(
    layer_size,
    train_generator,
    input_shape=(51, 51, 1),
    output_shape=3,
    nbr_nodes_added=1,
    sample_sizes=(8, 32, 128, 512, 1024),
    iteration_numbers=(401, 301, 201, 101, 51),
    verbose=0.01,
    save_networks=True,
    mp_training=False,  # use multiprocessing training
    translation_distance=5,  # parameters for multiprocessing training
    SN_limits=[10, 100],  # parameters for multiprocessing training
    model_path="",
):
    import deeptrack
    import keras
    from keras import Input, models, layers
    from keras.models import Model
    #import deeptrackelli_mod_mproc as multiprocess_training # Needed for fast parallelized image generator

    input_tensor = Input(input_shape)
    conv_list = [
    ]  # List of convolutional neruons in L1, needed for subsequent layers
    flattened_list = []  # List of flattened layers

    # Loop thorugh the neoruns and add them one by one
    for i in range(round(layer_size / nbr_nodes_added)):
        next_node = layers.Conv2D(nbr_nodes_added, (3, 3),
                                  activation='relu')(input_tensor)
        next_node = layers.MaxPooling2D((2, 2))(next_node)
        if (i == 0):
            # i = 0 special case. No addition needed
            next_flattened = layers.Flatten()(next_node)
            flattened_list.append(next_flattened)
            next_output = layers.Dense(3)(next_flattened)
            final_output = next_output
        else:
            # Construct the next output node
            next_flattened = layers.Flatten()(next_node)
            flattened_list.append(next_flattened)
            #Create temporary list with new output

            next_output = layers.Concatenate(axis=-1)(flattened_list)
            next_output = layers.Concatenate(axis=-1)(
                [next_output, final_output])
            final_output = layers.Dense(3)(next_output)

        network = models.Model(input_tensor, final_output)
        network.compile(optimizer='rmsprop',
                        loss='mse',
                        metrics=['mse', 'mae'])
        network.summary()

        # Train and freeze layers in network
        if mp_training:
            deeptrack.train_deep_learning_network_mp(
                network,
                sample_sizes=sample_sizes,
                iteration_numbers=iteration_numbers,
                verbose=verbose,
                SN_limits=SN_limits,
                translation_distance=translation_distance,
            )
        else:
            deeptrack.train_deep_learning_network(
                network,
                train_generator,
                sample_sizes=sample_sizes,
                iteration_numbers=iteration_numbers,
                verbose=verbose)

        freeze_all_layers(network)
        conv_list.append(next_node)
        if (save_networks):
            network.save(model_path + "L1_" + str((i + 1) * nbr_nodes_added) +
                         "F.h5")
    return network, conv_list, flattened_list, input_tensor, final_output
Beispiel #7
0
def fbf_modular_expand_layer(
    expansion_size,
    train_generator,
    conv_list,
    output_list,
    flattened_list,
    input_tensor,
    layer_no=1,
    nbr_nodes_added=1,  # may be problematic if larger than expansion size
    sample_sizes=(8, 32, 128, 512, 1024),
    iteration_numbers=(401, 301, 201, 101, 51),
    verbose=0.01,
    mp_training=False,  # use multiprocessing training
    translation_distance=5,  # parameters for multiprocessing training
    SN_limits=[10, 100],  # parameters for multiprocessing training
    save_networks=True,
    model_path="",
    combination_layer_type='addition',
):
    """
    Function for expanding preexisting layer of an fbf_modular model.
    Inputs:
        expansion_size - number of nodes which layer should be expanded with
        train_generator - image generator for the new nodes to be trained on
    Outputs:
    """
    import deeptrack
    import keras
    from keras import Input, models, layers
    from keras.models import Model
    base_length = len(conv_list)
    for i in range(round(expansion_size / nbr_nodes_added)):
        next_node = layers.Conv2D(nbr_nodes_added, (3, 3),
                                  activation='relu')(input_tensor)
        next_node = layers.MaxPooling2D((2, 2))(next_node)

        # Construct the next output node
        next_flattened = layers.Flatten()(next_node)
        flattened_list.append(next_flattened)
        next_output = layers.Concatenate(axis=-1)(flattened_list)
        next_output = layers.Dense(3)(next_output)
        output_list.append(next_output)
        if combination_layer_type == 'average':
            final_output = layers.average(output_list)
        else:
            final_output = layers.add(output_list)
        # Construct and compile network
        network = models.Model(input_tensor, final_output)
        network.compile(optimizer='rmsprop',
                        loss='mse',
                        metrics=['mse', 'mae'])
        network.summary()

        # Train and then freeze layers in network
        if mp_training:
            deeptrack.train_deep_learning_network_mp(
                network,
                sample_sizes=sample_sizes,
                iteration_numbers=iteration_numbers,
                verbose=verbose,
                SN_limits=SN_limits,
                translation_distance=translation_distance,
            )
        else:
            deeptrack.train_deep_learning_network(
                network,
                train_generator,
                sample_sizes=sample_sizes,
                iteration_numbers=iteration_numbers,
                verbose=verbose)

        freeze_all_layers(network)
        conv_list.append(next_node)
        if (save_networks):
            network.save(model_path + "L" + str(layer_no) + "_" + str(
                (i + base_length + 1) * nbr_nodes_added) +
                         "F.h5")  # fix layer_indexing

    return network, conv_list, output_list, flattened_list, input_tensor
def ensamble_network_LBL(
        conv_layers_sizes,  # 1d array
        dense_layers_sizes,  # 1d array
        ensemble_size=2,
        sample_sizes=[8, 32, 128, 512, 1024],
        iteration_numbers=[4000, 3000, 2000, 1000, 500],
        SN_limits=[10, 100],
        translation_distance=1,
        verbose=0.01,
        model_path="",
        save_networks=True,
        input_shape=(51, 51, 1),
        parameter_function=0):
    ### # TODO: ADD TRAINING LBL style
    # Save networks
    import deeptrack
    from keras import layers, models, Input
    import numpy as np

    input_tensor = Input(input_shape)

    pooled_layers = []
    old_pooled_layers = []

    # Add the convolutional layers
    if (len(conv_layers_sizes) < 1):
        print("Error no convolutional layers")
        return 0
    for layer_no in range(len(conv_layers_sizes)):  # range(1):
        old_pooled_layers = pooled_layers
        pooled_layers = []
        for i in range(ensemble_size):
            conv_name = "conv_layer_" + str(layer_no) + '_' + str(i)
            pooling_name = "pooling_layer_" + str(layer_no) + '_' + str(i)
            if layer_no == 0:
                new_conv_layer = layers.Conv2D(conv_layers_sizes[layer_no],
                                               (3, 3),
                                               activation='relu',
                                               name=conv_name)(input_tensor)
            else:
                new_conv_layer = layers.Conv2D(
                    conv_layers_sizes[layer_no], (3, 3),
                    activation='relu',
                    name=conv_name)(old_pooled_layers[i])

            new_pooling_layer = layers.MaxPooling2D(
                (2, 2), name=pooling_name)(new_conv_layer)
            pooled_layers.append(new_pooling_layer)

        # Add dense top, train the model and freeze all the layers. Then save if need be
        network, output_layers = Add_ensemble_output(
            input_tensor=input_tensor,
            top_layers=pooled_layers,
            ensemble_size=ensemble_size,
            convolutions=True)
        network.compile(optimizer='rmsprop',
                        loss='mse',
                        metrics=['mse', 'mae'])
        network.summary()
        deeptrack.train_deep_learning_network_mp(
            network,
            translation_distance=translation_distance,
            SN_limits=SN_limits,
            sample_sizes=sample_sizes,
            iteration_numbers=iteration_numbers,
            verbose=verbose,
            nbr_outputs=ensemble_size,
            parameter_function=parameter_function)
        freeze_all_layers(network)
        if (save_networks):
            network.save(model_path + "network_no" + str(layer_no) + ".h5")

    # Adding flatten layers
    flattened_layers = []
    for i in range(ensemble_size):
        flattened_name = "flatten_layer_" + str(i)
        new_flattened = layers.Flatten(name=flattened_name)(pooled_layers[i])
        flattened_layers.append(new_flattened)

    # Add dense layers
    dense_layers = []
    old_dense_layers = []
    for layer_no in range(len(dense_layers_sizes)):
        old_dense_layers = dense_layers
        dense_layers = []
        for i in range(ensemble_size):
            dense_name = "dense_layer_" + str(layer_no) + '_' + str(i)

            if (layer_no == 0):
                new_dense_layer = layers.Dense(dense_layers_sizes[layer_no],
                                               activation='relu',
                                               name=dense_name)(
                                                   flattened_layers[i])
            else:
                new_dense_layer = layers.Dense(dense_layers_sizes[layer_no],
                                               activation='relu',
                                               name=dense_name)(
                                                   old_dense_layers[i])
            dense_layers.append(new_dense_layer)

        network, output_layers = Add_ensemble_output(
            input_tensor=input_tensor,
            top_layers=dense_layers,
            ensemble_size=ensemble_size,
            convolutions=False)
        network.compile(optimizer='rmsprop',
                        loss='mse',
                        metrics=['mse', 'mae'])
        network.summary()
        deeptrack.train_deep_learning_network_mp(
            network,
            translation_distance=translation_distance,
            SN_limits=SN_limits,
            sample_sizes=sample_sizes,
            iteration_numbers=iteration_numbers,
            verbose=verbose,
            nbr_outputs=ensemble_size,
            parameter_function=parameter_function)
        freeze_all_layers(network)
        if (save_networks):
            network.save(model_path + "network_no" +
                         str(len(conv_layers_sizes) + layer_no) + ".h5")

    return network