def create_network(network_input, n_vocab):
    """
    Make the model architechture - GRU or LSTM
    """
    model = Sequential()
    model.add(
        GRU(512,
            input_shape=(network_input.shape[1], network_input.shape[2]),
            recurrent_dropout=0.3,
            return_sequences=True))
    model.add(GRU(
        512,
        return_sequences=False,
        recurrent_dropout=0.3,
    ))
    model.add(BatchNorm())
    model.add(Dropout(0.3))
    model.add(Dense(256))
    model.add(Activation('relu'))
    model.add(BatchNorm())
    model.add(Dropout(0.3))
    model.add(Dense(n_vocab))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    return model
Exemple #2
0
def create_network(network_input, n_vocab, weight):
    """ create the structure of the neural network """
    model = Sequential()
    model.add(
        LSTM(512,
             input_shape=(network_input.shape[1], network_input.shape[2]),
             recurrent_dropout=0.2,
             return_sequences=True))
    model.add(LSTM(512, return_sequences=True, recurrent_dropout=0.2))
    model.add(LSTM(256))

    model.add(BatchNorm())
    model.add(Dropout(0.2))
    model.add(Dense(128))
    model.add(Activation('relu'))
    model.add(BatchNorm())
    model.add(Dropout(0.2))

    model.add(Dense(n_vocab))
    model.add(Activation('softmax'))

    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    # Load the weights to each node
    model.load_weights(weight)

    return model
def create_network(network_input, n_vocab):
    """ create the structure of the neural network """
    model = Sequential()
    model.add(
        LSTM(512,
             input_shape=(network_input.shape[1], network_input.shape[2]),
             recurrent_dropout=0.3,
             return_sequences=True))
    model.add(LSTM(
        512,
        return_sequences=True,
        recurrent_dropout=0.3,
    ))
    model.add(LSTM(512))
    model.add(BatchNorm())
    model.add(Dropout(0.3))
    model.add(Dense(256))
    model.add(Activation('relu'))
    model.add(BatchNorm())
    model.add(Dropout(0.3))
    model.add(Dense(n_vocab))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    # Load the weights to each node
    # model.load_weights('weights-improvement-195-0.1490-bigger.hdf5')
    model.load_weights('weights-improvement-31-3.0448-bigger.hdf5')

    return model
Exemple #4
0
def create_network(network_input, n_vocab, mode, progression):
    #create the structure of the neural network
    model = Sequential()
    model.add(
        LSTM(hidden_layer,
             input_shape=(network_input.shape[1], network_input.shape[2]),
             recurrent_dropout=dropout,
             return_sequences=True))
    model.add(
        LSTM(
            hidden_layer,
            return_sequences=True,
            recurrent_dropout=dropout,
        ))
    model.add(LSTM(hidden_layer))
    model.add(BatchNorm())
    model.add(Dropout(dropout))
    model.add(Dense(dense))
    model.add(Activation('relu'))
    model.add(BatchNorm())
    model.add(Dropout(dropout))
    model.add(Dense(n_vocab))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='adam')

    # Load the weights to each node
    # mode means major/minor
    model.load_weights(
        f'/content/drive/My Drive/Colab Notebooks/music gen/data/weights/{mode}/{progression}_090.hdf5'
    )

    return model
Exemple #5
0
    def build_model(self):
        if self.loaded_model != self.model_name:
            print(self.notes_classes)
            dropout = 0.4
            self.model = Sequential()

            self.model.add(
                LSTM(self.hidden_size,
                     input_shape=(self.input_length, self.notes_classes),
                     return_sequences=True,
                     recurrent_dropout=dropout), )
            self.model.add(
                LSTM(self.hidden_size,
                     recurrent_dropout=dropout,
                     return_sequences=True))
            self.model.add(BatchNorm())
            self.model.add(Dropout(dropout))
            self.model.add(Dense(256))
            self.model.add(Activation('relu'))
            self.model.add(BatchNorm())
            self.model.add(Dropout(dropout))
            self.model.add(Dense(256))
            self.model.add(Activation('relu'))
            self.model.add(BatchNorm())
            self.model.add(Dense(self.notes_classes))
            self.model.add(Activation('softmax'))
            # self.directory = "drive/My Drive/DataProject/1"
            self.model.compile(loss='categorical_crossentropy',
                               optimizer='rmsprop',
                               metrics=['categorical_accuracy'])
            self.loaded_model = self.model_name
Exemple #6
0
 def __init__(self, channels=128):
     super(ResNetBlock, self).__init__()
     self.channels = channels
     self.conv1x1_1 = layers.Conv2D(self.channels // 2, (1, 1),
                                    padding='same',
                                    use_bias=False)
     self.conv1x1_2 = layers.Conv2D(self.channels, (1, 1), padding='same')
     self.conv_masked = MaskedConv2D(self.channels // 2,
                                     3,
                                     mask_type='B',
                                     activation='relu',
                                     padding='same')
     self.bn = BatchNorm()
     self.bn_2 = BatchNorm()
Exemple #7
0
    def __init__(self, channels, final_channels=3 * 4, output_made=False):
        super(PixelCNN, self).__init__()
        self.output_made = output_made
        self.res_blocks = [ResNetBlock(channels) for _ in range(15)]
        #Sequential([ResNetBlock(channels, input_shape=(28,28,channels))]+[ResNetBlock(channels) for _ in range(11)])
        self.mask_2 = MaskedConv2D(channels, 3, mask_type='B', padding='same')
        self.mask_1 = MaskedConv2D(channels, 7, mask_type='A', padding='same')

        self.conv1x1_1 = layers.Conv2D(channels, (1, 1),
                                       padding='same',
                                       use_bias=False)
        self.conv1x1_2 = layers.Conv2D(final_channels, (1, 1), padding='same')
        self.bn_1 = BatchNorm()
        self.bn_2 = BatchNorm()
        self.bn_3 = BatchNorm()
Exemple #8
0
def main():
    input, output, mapping = getNotes(
        SEQUENCE_LEN, True, LOADED)  # getNotes(int, bool train, bool loaded)
    training_input = [[mapping[note] for note in sequence]
                      for sequence in input]
    training_output = [mapping[note] for note in output]
    training_input = numpy.reshape(
        training_input, (len(training_input), len(training_input[0]), 1))
    training_output = to_categorical(training_output, num_classes=len(mapping))
    # print(training_input.shape)
    # print(training_output.shape)
    model = Sequential()
    model.add(
        LSTM(
            LSTM_LAYER_SIZE,  # num nodes
            input_shape=(
                training_input.shape[1], training_input.shape[2]
            ),  # Since this is the first layer, we know dimentions of input
            return_sequences=True))  # creates recurrence

    model.add(
        LSTM(
            LSTM_LAYER_SIZE,
            return_sequences=True,  # creates recurrence
            recurrent_dropout=DROPOUT_RATE,
        ))  # fraction to leave out from recurrence

    model.add(
        LSTM(LSTM_LAYER_SIZE)
    )  # multiple LSTM layers create Deep Neural Network for greater accuracy
    model.add(BatchNorm(
    ))  # normalizes inputs to neural network layers to make training faster
    model.add(Dropout(DROPOUT_RATE))  # prevents overfitting
    model.add(
        Dense(len(mapping))
    )  # classification layer - output must be same dimentions as mapping
    model.add(Activation(
        'softmax'))  # transforms output into a probability distribution

    model.compile(
        loss='categorical_crossentropy', optimizer='adam'
    )  # try changing optimizer to adam - adpative moment estimation

    #model.summary()
    #TRAINING TIME
    filepath = "%s.hdf5" % WEIGHTS_DIR

    checkpoint = ModelCheckpoint(  # used for training loss
        filepath,
        monitor='loss',
        verbose=0,
        save_best_only=True,
        mode='min')
    model_callbacks = [checkpoint]

    model.fit(training_input,
              training_output,
              epochs=EPOCHS,
              batch_size=BATCH_SIZE,
              callbacks=model_callbacks)
Exemple #9
0
    def __init__(self, config, repeat=3, prefix='Generator', **kwargs):
        super(Generator, self).__init__(**kwargs)

        self.config = config
        self.repeat = repeat
        self.prefix = prefix

        self.conv1 = tf.keras.layers.Conv2D(64,
                                            kernel_size=(3, 3),
                                            strides=2,
                                            padding='same',
                                            name=self.prefix + "_conv1")
        self.bn1 = BatchNorm()
        self.ac1 = tf.keras.activations.relu

        self.conv2 = tf.keras.layers.Conv2D(128,
                                            kernel_size=(3, 3),
                                            strides=2,
                                            padding='same',
                                            name=self.prefix + "_conv2")
        self.bn2 = BatchNorm()
        self.ac2 = tf.keras.activations.relu

        self.conv3 = tf.keras.layers.Conv2D(256,
                                            kernel_size=(3, 3),
                                            strides=2,
                                            padding='same',
                                            name=self.prefix + "_conv3")
        self.bn3 = BatchNorm()
        self.ac3 = tf.keras.activations.relu

        self.conv4 = tf.keras.layers.Conv2D(256,
                                            kernel_size=(3, 3),
                                            strides=2,
                                            padding='same',
                                            name=self.prefix + "_conv4")
        self.bn4 = BatchNorm()
        self.ac4 = tf.keras.activations.relu

        self.conv = tf.keras.layers.Conv2D(256,
                                           kernel_size=(3, 3),
                                           padding='same',
                                           name=self.prefix + "_final_conv")
        self.bn = BatchNorm()
        self.ac = tf.keras.activations.relu
Exemple #10
0
    def call(self, inputs, training=None, mask=None):
        x = inputs
        for i in range(self.repeat):
            x = tf.keras.layers.Conv2D(256, (3, 3),
                                       padding='same',
                                       name=self.prefix +
                                       "_3x3conv{}".format(i + 1))(x)
            x = BatchNorm()(x, training=training)
            x = tf.keras.activations.relu(x)
            x = tf.keras.layers.Conv2D(256, (1, 1),
                                       padding='same',
                                       name=self.prefix +
                                       "_1x1conv{}".format(i + 1))(x)
            x = BatchNorm()(x, training=training)
            x = tf.keras.activations.relu(x)

        pm = tf.keras.layers.Conv2D(4, (1, 1),
                                    padding='same',
                                    name=self.prefix + "_possi")(x)
        pm = BatchNorm()(pm, training=training)
        pm = tf.keras.activations.relu(pm)
        pm = tf.keras.layers.Flatten()(pm)
        pm = tf.keras.layers.Dense(2)(pm)
        pm_logits = tf.reshape(pm, [-1, 2])
        pm = tf.nn.softmax(pm_logits, axis=-1)

        dm = tf.keras.layers.Conv2D(4, (1, 1),
                                    padding='same',
                                    name=self.prefix + "_depth")(x)
        dm = BatchNorm()(dm, training=training)
        dm = tf.keras.activations.relu(dm)
        dm = tf.keras.layers.Flatten()(dm)
        dm = tf.keras.layers.Dense(1)(dm)
        dm = tf.reshape(dm, [-1, 1])

        lm = tf.keras.layers.Conv2D(128, (1, 1),
                                    padding='same',
                                    name=self.prefix + "_loc")(x)
        lm = BatchNorm()(lm, training=training)
        lm = tf.keras.activations.relu(lm)
        lm = tf.reshape(lm, [-1, 144, 72])

        return pm, dm, lm, pm_logits
Exemple #11
0
    def __init__(self,
                 gs=20,
                 param=40,
                 keepconst=10,
                 iterations=1,
                 alinearity=[-1.0, 1.0],
                 initializer='glorot_uniform',
                 i1=30,
                 i2=20,
                 mlpact=K.relu,
                 momentum=0.99,
                 k=16,
                 **kwargs):
        self.initializer = initializer
        self.gs = gs
        self.param = param
        self.keepconst = keepconst
        self.iterations = iterations
        self.activate = False
        self.i1 = i1
        self.i2 = i2
        self.mlpact = mlpact
        self.momentum = momentum
        self.k = k

        self.batch1 = BatchNorm(input_shape=(self.gs * self.gs, i1),
                                trainable=True,
                                momentum=momentum)
        self.batch2 = BatchNorm(input_shape=(self.gs * self.gs, i2),
                                trainable=True,
                                momentum=momentum)
        self.batch3 = BatchNorm(input_shape=(self.gs * self.gs,
                                             self.param - self.keepconst),
                                trainable=True,
                                momentum=momentum)

        if len(alinearity) == 2:
            self.activate = True
            self.activation = alinearity  #most general form of continous activation: const,x,const
        else:
            self.activation = []

        super(glmlp, self).__init__(**kwargs)
Exemple #12
0
def create_network(network_input, n_vocab):
    """
    Make the model - can be GRU or LSTM
    """
    model = Sequential()
    #model.add(LSTM(
    #    512,
    #    input_shape=(network_input.shape[1], network_input.shape[2]),
    #    recurrent_dropout=0.3,
    #    return_sequences=True
    #))
    #model.add(LSTM(512, return_sequences=True, recurrent_dropout=0.3,))

    model.add(
        GRU(512,
            input_shape=(network_input.shape[1], network_input.shape[2]),
            recurrent_dropout=0.3,
            return_sequences=True))
    model.add(GRU(
        512,
        return_sequences=False,
        recurrent_dropout=0.3,
    ))
    model.add(BatchNorm())
    model.add(Dropout(0.3))
    model.add(Dense(256))
    model.add(Activation('relu'))
    model.add(BatchNorm())
    model.add(Dropout(0.3))
    model.add(Dense(n_vocab))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    # Load weights for the model
    model.load_weights('weights-improvement-GRU-123-1.8256.hdf5')

    return model
Exemple #13
0
def initNetwork(network_input, n_vocab, weights):
    model = Sequential()
    model.add(
        LSTM(512,
             input_shape=(network_input.shape[1], network_input.shape[2]),
             recurrent_dropout=0.3,
             return_sequences=True))
    model.add(LSTM(
        512,
        return_sequences=True,
        recurrent_dropout=0.3,
    ))
    model.add(LSTM(512))
    model.add(BatchNorm())
    model.add(Dropout(0.3))
    model.add(Dense(256))
    model.add(Activation('relu'))
    model.add(BatchNorm())
    model.add(Dropout(0.3))
    model.add(Dense(n_vocab))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    if (not path.exists(weights)):
        print(
            "\nNo weights found for this artist.\nMake sure you specify the weights file name as a second parameter in CLI, or\nthat a weights.hdf5 file exists in the artist folder.\n"
        )

        import os
        cwd = os.getcwd()
        print("current working folder", cwd)
        exit()

    model.load_weights(weights)

    return model
Exemple #14
0
def rebuild_model(test_input, mapping):
    test_input = numpy.reshape(test_input,
                               (len(test_input), len(test_input[0]), 1))

    #New
    model = Sequential()
    model.add(
        LSTM(
            LSTM_LAYER_SIZE,  # num nodes
            input_shape=(
                test_input.shape[1], test_input.shape[2]
            ),  # Since this is the first layer, we know dimentions of input
            return_sequences=True))  # creates recurrence
    model.add(
        LSTM(
            LSTM_LAYER_SIZE,
            return_sequences=True,  # creates recurrence
            recurrent_dropout=DROPOUT_RATE,
        ))  # fraction to leave out from recurrence

    model.add(
        LSTM(LSTM_LAYER_SIZE)
    )  # multiple LSTM layers create Deep Neural Network for greater accuracy
    model.add(BatchNorm(
    ))  # normalizes inputs to neural network layers to make training faster
    model.add(Dropout(DROPOUT_RATE))  # prevents overfitting
    model.add(
        Dense(len(mapping))
    )  # classification layer - output must be same dimentions as mapping
    model.add(Lambda(lambda x: x / TEMP))  # adds temperature settings
    model.add(Activation(
        'softmax'))  # transforms output into a probability distribution

    model.compile(loss='categorical_crossentropy', optimizer='adam')
    #load weights
    model.load_weights('%s.hdf5' % WEIGHTS_DIR)

    return model
Exemple #15
0
    def call(self, inputs, training=None, mask=None):
        x = self.conv1(inputs)  # 72
        x = self.bn1(x, training=training)
        x = self.ac1(x)

        x = self.conv2(x)  # 36 128
        x = self.bn2(x, training=training)
        x = self.ac2(x)

        x1 = x

        x = self.conv3(x)  # 18 256
        x = self.bn3(x, training=training)
        x = self.ac3(x)

        x2 = x

        x = self.conv4(x)  # 9 256
        x = self.bn4(x, training=training)
        x = self.ac4(x)

        x3 = x

        x1 = tf.keras.layers.Conv2D(128, (1, 1), strides=2, padding='same')(x1)
        x2 = tf.keras.layers.Conv2D(256, (1, 1), padding='same')(x2)

        x2 = tf.concat([x1, x2], axis=-1)

        x2 = tf.keras.layers.Conv2D(256, (1, 1), strides=2, padding='same')(x2)
        x3 = tf.keras.layers.Conv2D(256, (1, 1), padding='same')(x3)

        feats = tf.concat([x2, x3], axis=-1)

        x = feats
        for i in range(self.repeat):
            x = tf.keras.layers.Conv2D(256, (3, 3),
                                       padding='same',
                                       name=self.prefix +
                                       '_IMFs_3x3conv{}'.format(i + 1))(x)
            x = BatchNorm()(x, training=training)
            x = tf.keras.activations.relu(x)

            x = tf.keras.layers.Conv2D(256, (1, 1),
                                       padding='same',
                                       name=self.prefix +
                                       '_IMFs_1x1conv{}'.format(i + 1))(x)
            x = BatchNorm()(x, training=training)
            x = tf.keras.activations.relu(x)
        x = tf.keras.layers.Conv2DTranspose(128,
                                            kernel_size=(3, 3),
                                            strides=1,
                                            padding='valid')(x)
        x = BatchNorm()(x, training=training)
        x = tf.keras.activations.relu(x)  # 11x11x128

        x = tf.keras.layers.Conv2DTranspose(64,
                                            kernel_size=(3, 3),
                                            strides=1,
                                            padding='valid')(x)
        x = BatchNorm()(x, training=training)
        x = tf.keras.activations.relu(x)  # 13x13x64

        x = tf.keras.layers.Conv2DTranspose(36,
                                            kernel_size=(3, 3),
                                            strides=2,
                                            padding='same')(x)
        x = BatchNorm()(x, training=training)
        x = tf.keras.activations.relu(x)  # 26x26x36

        imfs = tf.reshape(x, [-1, 9, 2704])

        return feats, imfs
def create_network(network_input_notes, n_vocab_notes, network_input_offsets,
                   n_vocab_offsets, network_input_durations,
                   n_vocab_durations):

    # Branch of the network that considers notes
    inputNotesLayer = Input(shape=(network_input_notes.shape[1],
                                   network_input_notes.shape[2]))
    inputNotes = LSTM(256,
                      input_shape=(network_input_notes.shape[1],
                                   network_input_notes.shape[2]),
                      return_sequences=True)(inputNotesLayer)
    inputNotes = Dropout(0.2)(inputNotes)

    # Branch of the network that considers note offset
    inputOffsetsLayer = Input(shape=(network_input_offsets.shape[1],
                                     network_input_offsets.shape[2]))
    inputOffsets = LSTM(256,
                        input_shape=(network_input_offsets.shape[1],
                                     network_input_offsets.shape[2]),
                        return_sequences=True)(inputOffsetsLayer)
    inputOffsets = Dropout(0.2)(inputOffsets)

    # Branch of the network that considers note duration
    inputDurationsLayer = Input(shape=(network_input_durations.shape[1],
                                       network_input_durations.shape[2]))
    inputDurations = LSTM(256,
                          input_shape=(network_input_durations.shape[1],
                                       network_input_durations.shape[2]),
                          return_sequences=True)(inputDurationsLayer)
    #inputDurations = Dropout(0.3)(inputDurations)
    inputDurations = Dropout(0.2)(inputDurations)

    #Concatentate the three input networks together into one branch now
    inputs = concatenate([inputNotes, inputOffsets, inputDurations])

    # A cheeky LSTM to consider everything learnt from the three separate branches
    x = LSTM(512, return_sequences=True)(inputs)
    x = Dropout(0.3)(x)
    x = LSTM(512)(x)
    x = BatchNorm()(x)
    x = Dropout(0.3)(x)
    x = Dense(256, activation='relu')(x)

    #Time to split into three branches again...

    # Branch of the network that classifies the note
    outputNotes = Dense(128, activation='relu')(x)
    outputNotes = BatchNorm()(outputNotes)
    outputNotes = Dropout(0.3)(outputNotes)
    outputNotes = Dense(n_vocab_notes, activation='softmax',
                        name="Note")(outputNotes)

    # Branch of the network that classifies the note offset
    outputOffsets = Dense(128, activation='relu')(x)
    outputOffsets = BatchNorm()(outputOffsets)
    outputOffsets = Dropout(0.3)(outputOffsets)
    outputOffsets = Dense(n_vocab_offsets, activation='softmax',
                          name="Offset")(outputOffsets)

    # Branch of the network that classifies the note duration
    outputDurations = Dense(128, activation='relu')(x)
    outputDurations = BatchNorm()(outputDurations)
    outputDurations = Dropout(0.3)(outputDurations)
    outputDurations = Dense(n_vocab_durations,
                            activation='softmax',
                            name="Duration")(outputDurations)

    # Tell Keras what our inputs and outputs are
    model = Model(
        inputs=[inputNotesLayer, inputOffsetsLayer, inputDurationsLayer],
        outputs=[outputNotes, outputOffsets, outputDurations])

    #Adam seems to be faster than RMSProp and learns better too
    model.compile(loss='categorical_crossentropy', optimizer='adam')
    # Useful to try RMSProp though

    # LOAD WEIGHTS HERE IF YOU WANT TO CONTINUE TRAINING!
    #model.load_weights(weights_name)

    return model
Exemple #17
0
def generateAgePredictionResNet(inputShape,paddingType = 'same',initType='he_uniform',regAmount=0.00005,dropRate=0.2,includeScannerGender=True):
    t1Input = Input(inputShape+(1,), name='T1_Img')
    
        
    with tf.name_scope('ResBlock0'):
        inputs = t1Input
        features = 8
        hidden = Conv3D(features, (3, 3, 3), padding=paddingType,kernel_regularizer=L2(regAmount),kernel_initializer=initType)(inputs)
        hidden = BatchNorm(renorm=True)(hidden)
        hidden = ELU(alpha=1.0)(hidden)
        hidden = Conv3D(features, (3, 3, 3), padding=paddingType,kernel_regularizer=L2(regAmount),kernel_initializer=initType)(hidden)
        hidden = BatchNorm(renorm=True)(hidden)
        shortcut = Conv3D(features, (1,1,1), strides=(1,1,1), padding=paddingType,kernel_initializer=initType)(inputs)
        hidden = add([shortcut,hidden])
        outputs = ELU(alpha=1.0)(hidden)
        
    pooling = MaxPooling3D(pool_size=(2,2,2),strides=(2,2,2), padding=paddingType)(outputs)
    
    with tf.name_scope('ResBlock1'):
        inputs = pooling
        features = 16
        hidden = Conv3D(features, (3, 3, 3), padding=paddingType,kernel_regularizer=L2(regAmount),kernel_initializer=initType)(inputs)
        hidden = BatchNorm(renorm=True)(hidden)
        hidden = ELU(alpha=1.0)(hidden)
        hidden = Conv3D(features, (3, 3, 3), padding=paddingType,kernel_regularizer=L2(regAmount),kernel_initializer=initType)(hidden)
        hidden = BatchNorm(renorm=True)(hidden)
        shortcut = Conv3D(features, (1,1,1), strides=(1,1,1), padding=paddingType,kernel_initializer=initType)(inputs)
        hidden = add([shortcut,hidden])
        outputs = ELU(alpha=1.0)(hidden)
        
    pooling = MaxPooling3D(pool_size=(2,2,2),strides=(2,2,2), padding=paddingType)(outputs)

    with tf.name_scope('ResBlock2'):
        inputs = pooling
        features = 32
        hidden = Conv3D(features, (3, 3, 3), padding=paddingType,kernel_regularizer=L2(regAmount),kernel_initializer=initType)(inputs)
        hidden = BatchNorm(renorm=True)(hidden)
        hidden = ELU(alpha=1.0)(hidden)
        hidden = Conv3D(features, (3, 3, 3), padding=paddingType,kernel_regularizer=L2(regAmount),kernel_initializer=initType)(hidden)
        hidden = BatchNorm(renorm=True)(hidden)
        shortcut = Conv3D(features, (1,1,1), strides=(1,1,1), padding=paddingType,kernel_initializer=initType)(inputs)
        hidden = add([shortcut,hidden])
        outputs = ELU(alpha=1.0)(hidden)
        
    pooling = MaxPooling3D(pool_size=(2,2,2),strides=(2,2,2), padding=paddingType)(outputs)
    
    with tf.name_scope('ResBlock3'):
        inputs = pooling
        features = 64
        hidden = Conv3D(features, (3, 3, 3), padding=paddingType,kernel_regularizer=L2(regAmount),kernel_initializer=initType)(inputs)
        hidden = BatchNorm(renorm=True)(hidden)
        hidden = ELU(alpha=1.0)(hidden)
        hidden = Conv3D(features, (3, 3, 3), padding=paddingType,kernel_regularizer=L2(regAmount),kernel_initializer=initType)(hidden)
        hidden = BatchNorm(renorm=True)(hidden)
        shortcut = Conv3D(features, (1,1,1), strides=(1,1,1), padding=paddingType,kernel_initializer=initType)(inputs)
        hidden = add([shortcut,hidden])
        outputs = ELU(alpha=1.0)(hidden)
        
        
    pooling = MaxPooling3D(pool_size=(2,2,2),strides=(2,2,2), padding=paddingType)(outputs)
    
    with tf.name_scope('ResBlock4'):
        inputs = pooling
        features = 128
        hidden = Conv3D(features, (3, 3, 3), padding=paddingType,kernel_regularizer=L2(regAmount),kernel_initializer=initType)(inputs)
        hidden = BatchNorm(renorm=True)(hidden)
        hidden = ELU(alpha=1.0)(hidden)
        hidden = Conv3D(features, (3, 3, 3), padding=paddingType,kernel_regularizer=L2(regAmount),kernel_initializer=initType)(hidden)
        hidden = BatchNorm(renorm=True)(hidden)
        shortcut = Conv3D(features, (1,1,1), strides=(1,1,1), padding=paddingType,kernel_initializer=initType)(inputs)
        hidden = add([shortcut,hidden])
        outputs= ELU(alpha=1.0)(hidden)
        
    pooling = MaxPooling3D(pool_size=(2,2,2),strides=(2,2,2), padding=paddingType)(outputs)
        
    hidden = Flatten()(pooling)
    
    hidden = Dense(128,kernel_regularizer=L2(regAmount),kernel_initializer=initType,name='FullyConnectedLayer')(hidden)
    hidden = ELU(alpha=1.0)(hidden)
    hidden = Dropout(dropRate)(hidden)
    
    if includeScannerGender:
        scanner  = Input((1,), name='Scanner')
        gender  = Input((1,), name='Gender')
        hidden = concatenate([scanner,gender,hidden])
    
    prediction = Dense(1,kernel_regularizer=L2(regAmount), name='AgePrediction')(hidden)
    if includeScannerGender:
        model = Model(inputs=[t1Input,scanner,gender],outputs=prediction)
    else:
        model = Model(inputs=[t1Input],outputs=prediction)
    return model
    
Exemple #18
0
# normalize input
rnnNormalized = rnnNormalized / float(len(vocab))
# one hot enconding the labels
rnnOutput = np_utils.to_categorical(rnnOutput)

# modelo
model = Sequential()
model.add(LSTM(
    512,
    input_shape=(rnnNormalized.shape[1], rnnNormalized.shape[2]),
    recurrent_dropout=0.3,
    return_sequences=True
))
model.add(LSTM(512, return_sequences=True, recurrent_dropout=0.3,))
model.add(LSTM(512))
model.add(BatchNorm())

# overfitting
model.add(Dropout(0.3))
model.add(Dense(256))
model.add(Activation('relu')) # regularization
model.add(BatchNorm())
model.add(Dropout(0.3))

model.add(Dense(len(vocab)))
model.add(Activation('softmax')) # softmax activation / crossentropy loss
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

model.load_weights('./trainedModels/check-87-0.9973.hdf5')

generatedFile = open('./texts/generatedSentences.txt', 'w')
img_H = config['img_H']
img_W = config['img_W']
grid_H = config['grid_H']
grid_W = config['grid_W']
box = config['box']
classes = config['classes']
max_boxes = config['max_true_boxes']

#Input 
x_img = Input(shape = [img_H, img_W, 3]) #tf.placeholder(tf.float32, [None, size, size, 3])
x_true_boxes = Input(shape = [1, 1, 1, max_boxes, 4]) 

#Layers (from the YOLO model)
#1
x = Conv2D(16, [3, 3], strides = [1, 1], padding = 'same', use_bias = False)(x_img)
x = BatchNorm()(x)
x = LRelu()(x)

x = MaxPooling2D([2, 2], strides = [2, 2])(x)

#2 
x = Conv2D(32, [3, 3], strides = [1, 1], padding = 'same', use_bias = False)(x)
x = BatchNorm()(x)
x = LRelu()(x)

x = MaxPooling2D([2, 2], strides = [2, 2])(x)

#3
x = Conv2D(64, [3, 3], strides = [1, 1], padding = 'same', use_bias = False)(x)
x = BatchNorm()(x)
x = LRelu()(x)