예제 #1
0
 def __init__(self, nn_type="resnet50", restore = None, session=None, use_imagenet_pretrain=False, use_softmax=True):
     self.image_size = 224
     self.num_channels = 3
     self.num_labels = 8
 
     input_layer = Input(shape=(self.image_size, self.image_size, self.num_channels))
     weights = "imagenet" if use_imagenet_pretrain else None
     if nn_type == "resnet50":
         base_model = ResNet50(weights=weights, input_tensor=input_layer)
     elif nn_type == "vgg16":
         base_model = VGG16(weights=weights, input_tensor=input_layer)
         # base_model = VGG16(weights=None, input_tensor=input_layer)
     x = base_model.output
     x = LeakyReLU()(x)
     x = Dense(1024)(x)
     x = Dropout(0.2)(x)
     x = LeakyReLU()(x)
     x = Dropout(0.3)(x)
     x = Dense(8)(x)
     if use_softmax:
         x = Activation("softmax")(x)
     model = Model(inputs=base_model.input, outputs=x)
 
     # for layer in base_model.layers:
     # 	layer.trainable = False
 
 
     if restore:
         print("Load: {}".format(restore))
         model.load_weights(restore)
 
     self.model = model
예제 #2
0
    def build_discriminator(self):

        model = Sequential()

        model.add(
            Conv2D(32,
                   kernel_size=3,
                   strides=2,
                   input_shape=self.img_shape,
                   padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
        model.add(ZeroPadding2D(padding=((0, 1), (0, 1))))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(256, kernel_size=3, strides=1, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Flatten())
        model.add(Dense(1, activation='sigmoid'))

        # model.summary()

        img = Input(shape=self.img_shape)
        validity = model(img)

        return Model(img, validity)
예제 #3
0
    def default_model(input_shape, classes):

        from tensorflow.contrib.keras.api.keras.layers import LSTM, Dense, LeakyReLU, Dropout

        model = tf.keras.models.Sequential()

        model.add(LSTM(256, input_shape=input_shape, return_sequences=True))
        model.add(LeakyReLU(alpha=0.05))
        model.add(Dropout(0.2))

        model.add(LSTM(128))
        model.add(LeakyReLU(alpha=0.02))
        model.add(Dropout(0.1))

        model.add(Dense(32, activation="tanh"))
        model.add(Dropout(0.1))

        model.add(Dense(classes, activation="softmax"))

        return model
예제 #4
0
    def simple_lstm(input_shape, classes, leaky_relu_alpha=0.04):
        """
        Generate network of architecture:
            LSTM
                units = M * N / 10
                activation = leaky relu
            Dense
                units = classes
                activation = softmax


        :param input_shape: tuple of int with length input_shape_rank=2 specifying the model input shape (MxN)
        :param classes: number of output units
        :param leaky_relu_alpha: slope of the leaky relu activation function
        :return: network with an lstm layer and a dense layer (see network architecture)
        """
        from tensorflow.contrib.keras.api.keras.layers import LSTM, Dense, LeakyReLU
        model = tf.keras.models.Sequential()
        input_units = round_even(input_shape[0] * input_shape[1] / 10.0)
        model.add(LSTM(input_units, input_shape=input_shape))
        model.add(LeakyReLU(alpha=leaky_relu_alpha))
        model.add(Dense(classes, activation="softmax"))
        return model
예제 #5
0
    def lstm_2_dense_2(input_shape,
                       classes,
                       input_unit_multiplier=0.10,
                       input_layer_dropout=0.16,
                       dropout_decay=0.5,
                       lstm_1_leaky_relu_alpha=0.04,
                       lstm_2_leaky_relu_alpha=0.02,
                       lstm_2_unit_input_multiplier=0.5,
                       dense_1_unit_input_multiplier=0.25,
                       presoftmax_dense_activation="tanh"):
        """
        Generate network of architecture:
            LSTM
                units = M * N * input_unit_multiplier
                activation = leaky relu
                dropout = input_layer_dropout
            LSTM
                units = M * N * input_unit_multiplier * lstm_2_unit_input_multiplier
                activation = leaky relu
                dropout = input_layer_dropout * dropout_decay
            Dense
                units = M * N * input_unit_multiplier * dense_1_unit_input_multiplier
                activation = presoftmax_dense_activation
                dropout = input_layer_dropout * (dropout_decay ** 2)
            Dense
                units = classes
                activation = softmax

        :param input_shape: tuple of int with length input_shape_rank=2 specifying the model input shape (MxN)
        :param classes: number of output units
        :param input_unit_multiplier: multiplier for generating number of input units (see model architecture)
        :param input_layer_dropout: dropout rate for the first LSTM. If 0, no dropout
        :param dropout_decay: dropout decay rate by which the initial dropout rate decays for lower layers. If 0, constant dropout rate
        :param lstm_1_leaky_relu_alpha: slope of the leaky relu activation function for the first lstm
        :param lstm_2_leaky_relu_alpha: slope of the leaky relu activation function for the second lstm
        :param lstm_2_unit_input_multiplier: multiplier for generating the number of units for the second lstm (see model architecture)
        :param dense_1_unit_input_multiplier: multiplier for generating the number of units for the first dense layer (see model architecture)
        :param presoftmax_dense_activation: activation function for the first dense layer
        :return: network with two lstm layers and two dense layers (see network architecture)
        """

        from tensorflow.contrib.keras.api.keras.layers import LSTM, Dense, LeakyReLU, Dropout
        model = tf.keras.models.Sequential()
        input_units = round_even(input_shape[0] * input_shape[1] *
                                 input_unit_multiplier)
        model.add(
            LSTM(input_units, input_shape=input_shape, return_sequences=True))
        model.add(LeakyReLU(alpha=lstm_1_leaky_relu_alpha))
        if input_layer_dropout > 0:
            model.add(Dropout(input_layer_dropout))
        model.add(LSTM(round_even(input_units * lstm_2_unit_input_multiplier)))
        model.add(LeakyReLU(alpha=lstm_2_leaky_relu_alpha))
        if input_layer_dropout > 0:
            model.add(Dropout(input_layer_dropout * dropout_decay))
        model.add(
            Dense(round_even(input_units * dense_1_unit_input_multiplier),
                  activation=presoftmax_dense_activation))
        if input_layer_dropout > 0:
            model.add(Dropout(input_layer_dropout * (dropout_decay**2)))
        model.add(Dense(classes, activation="softmax"))

        return model
예제 #6
0
    def convlstm_2_dense_2(input_shape,
                           output_units,
                           channels_first=True,
                           units=None,
                           filter_sizes=(5, 3),
                           dropouts=(0.20, 0.24, 0.32),
                           leaky_relu_alphas=(0.04, 0.04),
                           pool_size=2,
                           pool_method="avg",
                           dense_1_activation="tanh",
                           dense_2_activation="tanh",
                           print_model_architecture=True):
        """
        :param input_shape: 5 dimensional (batch, timesteps, channels, rows, columns)
        :param output_units: total number of output neurons
        :param channels_first: if False, input_shape is: (batch, timesteps, rows, columns, channels)
        :param units: list of int specifying the number of filters and units per layer (excluding final layer)
        :param filter_sizes: list of int specifying the dimensions of the convolutional filters of ConvLSTM2D
        :param dropouts: list of float specifying the dropout rates per layer (excluding final layer)
        :param leaky_relu_alphas: list of float specifying leak relu slope in the negative domain for the ConvLSTM2D activation functions
        :param pool_size: int specifying the size of the square used for pooling between the second ConvLSTM and first Dense layers
        :param pool_method: if "avg", 2D average pooling is used, otherwise max pooling is used
        :param dense_1_activation: activation function for the first dense layer
        :param dense_2_activation: activation function for the output layer
        :param print_model_architecture: if True, the model architecture is printed before trying to build it
        :return:
        """

        from tensorflow.contrib.keras.api.keras.layers import ConvLSTM2D, LeakyReLU, Dense, AveragePooling2D, MaxPooling2D, Dropout, Flatten
        model = tf.keras.models.Sequential()

        data_format = "channels_first" if channels_first else "channels_last"

        if not units:
            units = []
            units.append(round_even(input_shape[2] * input_shape[3] / 2))
            units.append(round_even(units[0] * 1.5))
            units.append(round_even((units[1] + output_units) / 2))
        if print_model_architecture:
            print("building network with architecture:")
            print("\tCONV LSTM 2D")
            print("\t\tfilters: {}".format(units[0]))
            print("\t\tleaky relu alpha: {}".format(leaky_relu_alphas[0]))
            print("\t\tdata format: {}".format(data_format))
            if dropouts[0] > 0:
                print("\t\tdropout: {}".format(dropouts[0]))
            print("\tCONV LSTM 2D")
            print("\t\tfilters: {}".format(units[1]))
            print("\t\tleaky relu alpha: {}".format(leaky_relu_alphas[1]))
            print("\t\tdata format: {}".format(data_format))
            if dropouts[1] > 0:
                print("\t\tdropout: {}".format(dropouts[1]))
            if pool_size > 0:
                print("\tPOOL")
                print("\t\tsize: {}".format(pool_size))
                print("\t\tmethod: {}".format(pool_method))
            print("\tFLATTEN")
            print("\t\tdata format: {}".format(data_format))
            print("\tDENSE")
            print("\t\tunits: {}".format(units[2]))
            print("\t\tactivation: {}".format(dense_1_activation))
            print("\tDENSE")
            print("\t\tunits: {}".format(output_units))
            print("\t\tactivation: {}".format(dense_2_activation))

        if len(units) != 3 or not all([x > 0 for x in units]):
            raise ValueError("inputs to each layer must be a positive int")

        model.add(
            ConvLSTM2D(units[0], (filter_sizes[0], filter_sizes[0]),
                       input_shape=input_shape,
                       data_format=data_format,
                       return_sequences=True))
        model.add(LeakyReLU(alpha=leaky_relu_alphas[0]))
        if dropouts[0] > 0:
            model.add(Dropout(dropouts[0]))

        model.add(
            ConvLSTM2D(units[1], (filter_sizes[1], filter_sizes[1]),
                       data_format=data_format))
        model.add(LeakyReLU(alpha=leaky_relu_alphas[1]))
        if dropouts[1] > 0:
            model.add(Dropout(dropouts[1]))

        if pool_size > 0:
            model.add(
                AveragePooling2D((pool_size, pool_size)) if pool_method ==
                "avg" else MaxPooling2D(pool_size, pool_size))

        model.add(Flatten(data_format=data_format))
        model.add(Dense(units[2], activation=dense_1_activation))
        model.add(Dense(output_units, activation=dense_2_activation))

        return model