Esempio n. 1
0
def create_network(network_input, n_vocab):
    """ create the structure of the neural network """
    model = Sequential()
    model.add(
        LSTM(512,
             input_shape=(network_input.shape[1], network_input.shape[2]),
             recurrent_dropout=0.3,
             return_sequences=True))
    model.add(LSTM(
        512,
        return_sequences=True,
        recurrent_dropout=0.3,
    ))
    model.add(LSTM(512))
    model.add(BatchNorm())
    model.add(Dropout(0.3))
    model.add(Dense(256))
    model.add(Activation('relu'))
    model.add(BatchNorm())
    model.add(Dropout(0.3))
    model.add(Dense(n_vocab))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    # Load the weights to each node
    model.load_weights(
        '/content/drive/MyDrive/Colab Notebooks/beethoven/weights-improvement-19-1.0630-bigger.hdf5'
    )

    return model
Esempio n. 2
0
def create_network(network_input, n_vocab):
    """ create the structure of the neural network """
    model = Sequential()
    model.add(
        Bidirectional(LSTM(256,
                           input_shape=(network_input.shape[1],
                                        network_input.shape[2]),
                           recurrent_dropout=0.3,
                           return_sequences=True),
                      merge_mode="concat"))
    model.add(
        Bidirectional(LSTM(
            512,
            return_sequences=True,
            recurrent_dropout=0.3,
        ),
                      merge_mode="concat"))
    model.add(LSTM(512))
    model.add(BatchNorm())
    model.add(Dropout(0.3))
    model.add(Dense(256))
    model.add(Activation('relu'))
    model.add(BatchNorm())
    model.add(Dropout(0.3))
    model.add(Dense(n_vocab, name="new_layer"))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    return model
Esempio n. 3
0
def create_network(network_input, n_vocab):
    """ create the structure of the neural network """
    model = Sequential()
    model.add(
        LSTM(args.cells,
             input_shape=(network_input.shape[1], network_input.shape[2]),
             recurrent_dropout=0.3,
             return_sequences=True))
    model.add(LSTM(
        args.cells,
        return_sequences=True,
        recurrent_dropout=0.2,
    ))
    model.add(LSTM(
        args.cells,
        return_sequences=True,
        recurrent_dropout=0.1,
    ))
    model.add(LSTM(args.cells))
    model.add(BatchNorm())
    model.add(Dropout(0.3))
    model.add(Dense(256))
    model.add(Activation('tanh'))
    model.add(BatchNorm())
    model.add(Dropout(0.3))
    model.add(Dense(n_vocab))
    model.add(Activation('softmax'))
    model.compile(loss=LOSS, optimizer=OPTIMIZER)

    # Load the weights to each node
    model.load_weights(args.weights)

    return model
Esempio n. 4
0
def create_network(networkInput, n_vocab):
    """ create the structure of the neural network """
    model = Sequential()
    model.add(
        LSTM(hidden_layer,
             input_shape=(networkInput.shape[1], networkInput.shape[2]),
             recurrent_dropout=dropout,
             return_sequences=True))
    model.add(
        LSTM(
            hidden_layer,
            return_sequences=True,
            recurrent_dropout=dropout,
        ))
    model.add(LSTM(hidden_layer))
    model.add(BatchNorm())
    model.add(Dropout(dropout))
    model.add(Dense(dense))
    model.add(Activation('relu'))
    model.add(BatchNorm())
    model.add(Dropout(dropout))
    model.add(Dense(n_vocab))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='Adam',
                  metrics=['categorical_accuracy'])

    return model
Esempio n. 5
0
def create_network(network_input, n_vocab):
    model = Sequential()
    model.add(
        LSTM(512,
             input_shape=(network_input.shape[1], network_input.shape[2]),
             recurrent_dropout=0.3,
             return_sequences=True))
    model.add(LSTM(
        512,
        return_sequences=True,
        recurrent_dropout=0.3,
    ))
    model.add(LSTM(512))
    model.add(BatchNorm())
    model.add(Dropout(0.3))
    model.add(Dense(256))
    model.add(Activation('relu'))
    model.add(BatchNorm())
    model.add(Dropout(0.3))
    model.add(Dense(n_vocab))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
    model.load_weights('weights.hdf5')

    return model
def create_network(network_input, n_vocab):
    """ create the structure of the neural network """
    model = Sequential()
    model.add(
        LSTM(512,
             input_shape=(network_input.shape[1], network_input.shape[2]),
             recurrent_dropout=0.3,
             return_sequences=True))
    model.add(LSTM(
        512,
        return_sequences=True,
        recurrent_dropout=0.3,
    ))
    model.add(LSTM(512))
    model.add(BatchNorm())
    model.add(Dropout(0.3))
    model.add(Dense(256))
    model.add(Activation('relu'))
    model.add(BatchNorm())
    model.add(Dropout(0.3))
    model.add(Dense(n_vocab))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='adam')

    model.load_weights('lofi-hip-hop-weights-improvement-100-0.6290.hdf5')

    return model
def lstm(network_input, n_vocab):
    """LSTM model architecture."""

    # Create LSTM network structure.
    model = Sequential()
    model.add(
        LSTM(128,
             input_shape=(network_input.shape[1], network_input.shape[2]),
             recurrent_dropout=0.3,
             return_sequences=True))
    model.add(LSTM(
        128,
        return_sequences=True,
        recurrent_dropout=0.3,
    ))
    model.add(LSTM(128))
    model.add(BatchNorm())
    model.add(Dropout(0.3))
    model.add(Dense(128))
    model.add(Activation('relu'))
    model.add(BatchNorm())
    model.add(Dropout(0.3))
    model.add(Dense(n_vocab))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    return model
Esempio n. 8
0
def create_network(network_input, n_vocab, mode):
    #create the structure of the neural network
    model = Sequential()
    model.add(LSTM(
        hidden_layer,
        input_shape=(network_input.shape[1], network_input.shape[2]),
        recurrent_dropout=dropout,
        return_sequences=True
    ))
    model.add(LSTM(hidden_layer, return_sequences=True, recurrent_dropout=dropout,))
    model.add(LSTM(hidden_layer))
    model.add(BatchNorm())
    model.add(Dropout(dropout))
    model.add(Dense(dense))
    model.add(Activation('relu'))
    model.add(BatchNorm())
    model.add(Dropout(dropout))
    model.add(Dense(n_vocab))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='adam')

    # Load the weights to each node
    # mode means major/minor
    model.load_weights(f'./data/{mode}/{progression}.hdf5')

    return model
Esempio n. 9
0
def DenseNet_encoder(blocks,
             input_tensor,
             pooling=None,
             train_bn=False):
    """Instantiates the DenseNet architecture."""

    bn_axis = 3 if K.image_data_format() == 'channels_last' else 1

    x = ZeroPadding2D(padding=((3, 3), (3, 3)))(input_tensor)
   # x = Lambda(lambda x: tf.pad(x, [[0,0],[3,3],[3,3],[0,0]], mode='SYMMETRIC'))(input_tensor)
    x = Conv2D(64, 7, strides=2, use_bias=False, name='conv1/conv')(x)
    x = BatchNorm(axis=bn_axis, epsilon=1.001e-5,
                           name='conv1/bn', )(x, training=train_bn)
    R1 = x = Activation('relu', name='conv1/relu')(x)
    x = ZeroPadding2D(padding=((1, 1), (1, 1)))(x)
    x = MaxPooling2D(3, strides=2, name='pool1')(x)

    R2 = x = dense_block(x, blocks[0], name='conv2', train_bn=train_bn)
    _, x = transition_block(x, 0.5, name='pool2', train_bn=train_bn)
    R3 = x = dense_block(x, blocks[1], name='conv3', train_bn=train_bn)
    _, x = transition_block(x, 0.5, name='pool3', train_bn=train_bn)
    R4 = x = dense_block(x, blocks[2], name='conv4', train_bn=train_bn)
    _, x = transition_block(x, 0.5, name='pool4', train_bn=train_bn)
    x = dense_block(x, blocks[3], name='conv5', train_bn=train_bn)

    x = BatchNorm(axis=bn_axis, epsilon=1.001e-5,
                           name='bn')(x, training=train_bn)

    if pooling == 'avg':
        x = AveragePooling2D(7, name='avg_pool')(x)
    elif pooling == 'max':
        x = MaxPooling2D(7, name='max_pool')(x)

    return [R1, R2, R3, R4], x
Esempio n. 10
0
def generate_model(model_input, n_elements, file):
    """ build the model """
    print("Generating RNN from %s" % file)

    model = Sequential()
    model.add(
        LSTM(512,
             input_shape=(model_input.shape[1], model_input.shape[2]),
             recurrent_dropout=0.3,
             return_sequences=True))
    model.add(LSTM(
        512,
        return_sequences=True,
        recurrent_dropout=0.3,
    ))
    model.add(LSTM(512))
    model.add(BatchNorm())
    model.add(Dropout(0.3))
    model.add(Dense(256))
    model.add(Activation('relu'))
    model.add(BatchNorm())
    model.add(Dense(256))
    model.add(Dropout(0.3))
    model.add(Dense(n_elements))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    # Load the weights from training
    model.load_weights(file)

    return model
Esempio n. 11
0
 def model_A(self, network_input, n_vocab):
     """ Create the structure of the neural network """
     model = Sequential()
     model.add(LSTM(
         512,
         input_shape=(network_input.shape[1], network_input.shape[2]),
         recurrent_dropout=0.3,
         return_sequences=True
     ))
     model.add(LSTM(512, return_sequences=True, recurrent_dropout=0.3,))
     model.add(LSTM(512))
     model.add(BatchNorm())
     model.add(Dropout(0.3))
     model.add(Dense(256))
     model.add(Activation('relu'))
     model.add(BatchNorm())
     model.add(Dropout(0.3))
     model.add(Dense(n_vocab))
     model.add(Activation('softmax'))
     model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
     
     if self.weights != None:
         model.load_weights(self.weights)
     else:
         print("Weights were not loaded, starting from nothing")  
         
     return model
def create_network(network_input, n_vocab, weights):
    """ create the structure of the neural network """
    model = Sequential()
    model.add(
        LSTM(512,
             input_shape=(network_input.shape[1], network_input.shape[2]),
             recurrent_dropout=0.3,
             return_sequences=True))
    model.add(LSTM(
        512,
        return_sequences=True,
        recurrent_dropout=0.3,
    ))
    model.add(LSTM(512))
    model.add(BatchNorm())
    model.add(Dropout(0.3))
    model.add(Dense(256))
    model.add(Activation('relu'))
    model.add(BatchNorm())
    model.add(Dropout(0.3))
    model.add(Dense(n_vocab))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    # Load the weights to each node
    model.load_weights(weights)

    return model
Esempio n. 13
0
def create_network(network_input, n_vocab):
    """ create the structure of the neural network """
    model = Sequential()
    model.add(
        LSTM(512,
             input_shape=(network_input.shape[1], network_input.shape[2]),
             recurrent_dropout=0.3,
             return_sequences=True))
    model.add(LSTM(
        512,
        return_sequences=True,
        recurrent_dropout=0.3,
    ))
    model.add(LSTM(512))
    model.add(BatchNorm())
    model.add(Dropout(0.3))
    model.add(Dense(256))
    model.add(Activation('relu'))
    model.add(BatchNorm())
    model.add(Dropout(0.3))
    model.add(Dense(n_vocab))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    # Load the weights to each node
    if os.path.exists('weights_' + name + '/'):
        files = []
        for each in os.listdir('weights_' + name + '/'):
            files.append(os.path.join('weights_' + name + '/', each))
        files.sort(key=lambda x: float(x[-11:-5]))
        model.load_weights(files[0])
        print("Weight ", files[0], " Loaded...............")

    return model
Esempio n. 14
0
def create_network(network_input, n_vocab):
    """ create the structure of the neural network """
    model = Sequential()
    model.add(
        LSTM(64,
             input_shape=(network_input.shape[1], network_input.shape[2]),
             recurrent_dropout=0.3,
             return_sequences=True))
    model.add(LSTM(
        64,
        return_sequences=True,
        recurrent_dropout=0.3,
    ))
    model.add(LSTM(64))
    model.add(BatchNorm())
    model.add(Dropout(0.3))
    model.add(Dense(10))
    model.add(Activation('relu'))
    model.add(BatchNorm())
    model.add(Dropout(0.3))
    model.add(Dense(n_vocab))
    model.add(Activation('softmax'))
    es = EarlyStopping(monitor='val_loss', mode='min')
    callbacks = es,
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    # Load the weights to each node

    model.load_weights('weights-improvement-666-3.4629-bigger.hdf5')

    return model
Esempio n. 15
0
def generate_model(model_input, n_elements):
    """ Build the RNN """
    model = Sequential()
    model.add(
        LSTM(512,
             input_shape=(model_input.shape[1], model_input.shape[2]),
             recurrent_dropout=0.3,
             return_sequences=True))
    model.add(LSTM(
        512,
        return_sequences=True,
        recurrent_dropout=0.3,
    ))
    model.add(LSTM(512))
    model.add(BatchNorm())
    model.add(Dropout(0.3))
    model.add(Dense(256))
    model.add(Activation('relu'))
    model.add(BatchNorm())
    model.add(Dense(256))
    model.add(Dropout(0.3))
    model.add(Dense(n_elements))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    return model
Esempio n. 16
0
def conv_block(x, growth_rate, name, train_bn):
    """A building block for a dense block.

    # Arguments
        x: input tensor.
        growth_rate: float, growth rate at dense layers.
        name: string, block label.

    # Returns
        output tensor for the block.

    import tensorflow as tf
    """
    bn_axis = 3 if K.image_data_format() == 'channels_last' else 1
    x1 = BatchNorm(axis=bn_axis, epsilon=1.001e-5,
                    name=name + '_0_bn')(x, training=train_bn)
    x1 = Activation('relu', name=name + '_0_relu')(x1)
    x1 = Conv2D(4 * growth_rate, 1, use_bias=False,
                name=name + '_1_conv')(x1)
    x1 = BatchNorm(axis=bn_axis, epsilon=1.001e-5,
                    name=name + '_1_bn')(x1, training=train_bn)
    x1 = Activation('relu', name=name + '_1_relu')(x1)
    x1 = ZeroPadding2D(padding=((1, 1), (1, 1)))(x1)
   # x1 = Lambda(lambda x: tf.pad(x, [[0,0],[1,1],[1,1],[0,0]], mode='SYMMETRIC'))(x1)
    x1 = Conv2D(growth_rate, 3, padding='valid', use_bias=False,
                name=name + '_2_conv')(x1)
    x = Concatenate(axis=bn_axis, name=name + '_concat')([x, x1])
    return x
Esempio n. 17
0
def lstm_model_2(window_size, dropout_rate, size_vocab, size_lstm):
    model = Sequential()
    model.add(LSTM(size_lstm, input_shape=(window_size, 1), recurrent_dropout=0.3, return_sequences=True)) # 512
    model.add(LSTM(size_lstm, return_sequences=True, recurrent_dropout=0.3)) # 512
    model.add(LSTM(size_lstm)) # 512
    model.add(BatchNorm())
    model.add(Dropout(0.3))
    model.add(Dense(256))
    model.add(Activation('relu'))
    model.add(BatchNorm())
    model.add(Dropout(0.3))
    model.add(Dense(size_vocab))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])

    return model
Esempio n. 18
0
def create_network(network_input, n_vocab):
    """ create the structure of the neural network """
    nmodel = None
    if (args.weights):
        nmodel = load_model(args.weights)
    else:
        model = Sequential()
        model.add(
            LSTM(
                args.cells,
                input_shape=(network_input.shape[1], network_input.shape[2]),
                recurrent_dropout=0.3,
                return_sequences=True,
            ))
        model.add(
            LSTM(
                args.cells,
                return_sequences=True,
                recurrent_dropout=0.2,
            ))
        model.add(
            LSTM(
                args.cells,
                return_sequences=True,
                recurrent_dropout=0.1,
            ))
        model.add(LSTM(args.cells))
        model.add(BatchNorm())
        model.add(Dropout(0.3))
        model.add(Dense(256))
        model.add(Activation('tanh'))
        model.add(BatchNorm())
        model.add(Dropout(0.3))
        model.add(Dense(n_vocab))
        model.add(Activation('softmax'))

        if (args.ngpus > 1):
            print('INFO: using %d devices' % args.ngpus)
            parallel_model = multi_gpu_model(model, gpus=2)
            parallel_model.compile(loss=LOSS, optimizer=OPTIMIZER)
            nmodel = parallel_model
        else:
            print('INFO: using only one device')
            model.compile(loss=LOSS, optimizer=OPTIMIZER)
            nmodel = model
    return nmodel
Esempio n. 19
0
def create_network(network_input, n_vocab, Pitch):
    """ create the structure of the neural network """
    model = Sequential()
    model.add(
        LSTM(512,
             input_shape=(network_input.shape[1], network_input.shape[2]),
             recurrent_dropout=0.3,
             return_sequences=True))
    model.add(LSTM(
        512,
        return_sequences=True,
        recurrent_dropout=0.3,
    ))
    model.add(LSTM(512))
    model.add(BatchNorm())
    model.add(Dropout(0.3))
    model.add(Dense(256))
    model.add(Activation('relu'))
    model.add(BatchNorm())
    model.add(Dropout(0.3))
    model.add(Dense(n_vocab))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
    files = ""
    i = 0
    print("Choose your weights:  ex 0 or 1 or 2")
    files = glob.glob('weights/' + Pitch + '/*')
    # Load the weights to each node              #讀取weights權重
    minLoss = files[0].split("-")[len(files[0].split("-")) - 2]
    chooseWeight = ""  #最後選擇哪個模型  =>loss小的
    for file in files:
        fileX = file.split("-")
        if fileX[len(fileX) - 2] <= minLoss:
            minLoss = fileX[len(fileX) - 2]
            print('minLoss change')
            chooseWeight = file
    # print(str(i)+"."+file)
        i += 1

#  x = int(input(""))
    print("you use :" + chooseWeight)

    model.load_weights(chooseWeight)  # weights.hdf5

    return model
Esempio n. 20
0
def generate_model(train_samples, num_pitches):
    model = Sequential()
    model.add(
        LSTM(512,
             input_shape=(train_samples.shape[1], train_samples.shape[2]),
             recurrent_dropout=.3,
             return_sequences=True))
    model.add(LSTM(512, return_sequences=True, recurrent_dropout=0.3))
    model.add(LSTM(512))
    model.add(BatchNorm())
    model.add(Dropout(0.3))
    model.add(Dense(256))
    model.add(Activation('relu'))
    model.add(BatchNorm())
    model.add(Dropout(0.3))
    model.add(Dense(num_pitches))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    return model
def createNetwork(networkInput, nDiff):
    """ create the structure of the neural network """
    #LSTM(Long Short Trem Memory) je sloj RRN koja prima sekvencu ulaza i vraca
    #sekvencu ili matricu (u ovom slucaju sekvencu)
    #aktivacioni sloj odredjuje koju ce aktivacionu fju nasa mreza koristiti za
    #izdracunavanje
    #za LSTM, Dense i Activation slojeve prvi parametar je broj cvorova u njima
    #dropout parametar predstavlja koliki deo ulaznih vrednosti ce biti odbacen
    #prilikom treniranja
    # input_shape daje do znjanja mrezi kakvog ce oblika biti podaci koje ce
    #trenirati

    ######treba se igrati malo sa ovim slojevima
    model = Sequential()
    model.add(
        LSTM(512,
             input_shape=(networkInput.shape[1], networkInput.shape[2]),
             recurrent_dropout=0.3,
             return_sequences=True))
    model.add(LSTM(
        512,
        return_sequences=True,
        recurrent_dropout=0.3,
    ))
    model.add(LSTM(512))
    model.add(BatchNorm())
    model.add(Dropout(0.3))
    model.add(Dense(256))
    model.add(Activation('relu'))
    model.add(BatchNorm())
    model.add(Dropout(0.3))
    model.add(Dense(nDiff))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='adam')
    #poslednji sloj mreze ima isti broj cvorova kao nas izlaz da bi se direktno
    #mapiralo
    return model
Esempio n. 22
0
    def build_model(self):
        self.encoder = Sequential()

        self.encoder.add(
            LSTM(self.hidden_size,
                 input_shape=(self.inp_length, self.notes_classes),
                 return_sequences=True,
                 recurrent_dropout=self.dropout))
        self.encoder.add(
            LSTM(self.hidden_size,
                 recurrent_dropout=self.dropout,
                 return_sequences=True))
        self.encoder.add(BatchNorm())
        self.encoder.add(Dropout(self.dropout))
        self.encoder.add(Dense(256, activation="relu"))
        self.encoder.add(BatchNorm())
        self.encoder.add(Dense(128, activation="relu"))
        self.encoder.add(BatchNorm())
        self.encoder.add(Dense(64, activation="relu", name="encoder_out"))

        self.decoder = Sequential()
        # self.decoder.add(Input(shape=(128, 87)))
        self.decoder.add(Dense(64, name="decoder_in", activation="relu"))
        self.decoder.add(BatchNorm())
        self.decoder.add(Dense(128, activation="relu"))
        self.decoder.add(BatchNorm())
        self.decoder.add(Dense(256, activation="relu"))
        self.decoder.add(
            LSTM(self.hidden_size,
                 recurrent_dropout=self.dropout,
                 return_sequences=True))
        self.decoder.add(
            LSTM(self.hidden_size,
                 recurrent_dropout=self.dropout,
                 return_sequences=True))
        self.decoder.add(Dropout(self.dropout))
        self.decoder.add(Dense(256, activation="relu"))
        self.decoder.add(Dense(128, activation="relu"))
        self.decoder.add(BatchNorm())
        self.decoder.add(Dense(self.notes_classes, activation="softmax"))

        decoded_inp = Input(shape=(128, 64))
        decoded_out = self.decoder(decoded_inp)
        self.decoder = Model(decoded_inp, decoded_out)

        self.model = Sequential()
        self.model.add(self.encoder)
        self.model.add(self.decoder)
        self.model.compile(loss="categorical_crossentropy",
                           optimizer='rmsprop',
                           metrics=['categorical_accuracy'])
Esempio n. 23
0
def transition_block(x, reduction, name, train_bn):
    """A transition block.

    # Arguments
        x: input tensor.
        reduction: float, compression rate at transition layers.
        name: string, block label.

    # Returns
        output tensor for the block.
    """
    bn_axis = 3 if K.image_data_format() == 'channels_last' else 1
    x = BatchNorm(axis=bn_axis, epsilon=1.001e-5,
                    name=name + '_bn')(x, training=train_bn)
    x = Activation('relu', name=name + '_relu')(x)
    skip = x = Conv2D(int(K.int_shape(x)[bn_axis] * reduction), 1, use_bias=False,
               name=name + '_conv')(x)
    x = AveragePooling2D(2, strides=2, name=name + '_pool')(x)
    return skip, x
Esempio n. 24
0
def deconv_block(x, skip, network_name, fpn_d, train_bn):
    bn_axis = 3 if K.image_data_format() == 'channels_last' else 1
    x = UpSampling2D(2, data_format=K.image_data_format())(x)
    if not skip is None:
        channel = K.int_shape(skip)[bn_axis]
        channel = fpn_d if channel < fpn_d else channel
        #x = Conv2D(channel, (1, 1), name=network_name+'_conv',
                   #padding='same', use_bias=False)(x)
        #x = Add(name=network_name+'_add')([x, skip])
        x = Concatenate(axis=bn_axis)([x, skip])
        x = Conv2D(channel, (1, 1), name=network_name+'_conv',
                   padding='same', use_bias=False)(x)
    else:
        channel = K.int_shape(x)[bn_axis]
        channel = fpn_d if channel < fpn_d else channel
        x = Conv2D(channel, (1, 1), name=network_name+'_conv', use_bias=False)(x)
    
    x = BatchNorm(axis=bn_axis, epsilon=1.001e-5,
                    name=network_name+'_bn')(x, training=train_bn)
    x = Activation('relu', name=network_name+'_relu')(x)
    return x
Esempio n. 25
0
def testing(composer, song_name, save_path, output_size, save, progress_bar):
    progress = 0
    progress_bar.update_idletasks()

    with open('./data/{composer}/input.txt', 'r') as text:
        events = text.read().split(' ')
        text.close()
    # Define the length of each sequence of input data
    seq_length = 12
    # Get number of unique events in the data
    n_vocab = len(set(events))
    print(f'{n_vocab} unique events')
    # get all pitch names
    event_names = sorted(set(event for event in events))

    # create a dictionary to map pitches to integers
    event_to_int = dict(
        (event, number) for number, event in enumerate(event_names))

    network_input = []
    network_output = []

    # create input sequences and their labels
    for i in range(0, len(events) - seq_length, 1):
        seq_in = events[i:i + seq_length]
        seq_out = events[i + seq_length]
        network_input.append([event_to_int[char] for char in seq_in])
        network_output.append(event_to_int[seq_out])

    n_patterns = len(network_input)

    # reshape the input into a format compatible with LSTM layers
    network_input = numpy.reshape(network_input, (n_patterns, seq_length, 1))
    # normalize input
    network_input = network_input / float(n_vocab)

    network_output = np_utils.to_categorical(network_output)

    # Define Neural Network Architecture -------------------------------------------
    model = Sequential()
    model.add(
        LSTM(512,
             input_shape=(network_input.shape[1], network_input.shape[2]),
             return_sequences=True))
    model.add(Dropout(0.3))
    model.add(LSTM(512, return_sequences=True))
    model.add(Dropout(0.3))
    model.add(LSTM(512))
    model.add(BatchNorm())
    model.add(Dropout(0.3))
    model.add(Dense(256))
    model.add(Activation('relu'))
    model.add(BatchNorm())
    model.add(Dropout(0.3))
    model.add(Dense(n_vocab))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.load_weights('./data/{composer}/weights.txt')

    # pick a random sequence from the input as a starting point for the prediction
    start = numpy.random.randint(0, len(network_input) - 1)

    int_to_event = dict(
        (number, event) for number, event in enumerate(event_names))

    pattern = list(network_input[start])
    prediction_output = []

    # generate events
    for note_index in range(output_size):
        progress_bar['value'] = progress
        progress_bar.update_idletasks()
        prediction_input = numpy.reshape(pattern, (1, len(pattern), 1))
        prediction_input = prediction_input / float(n_vocab)
        prediction_input = numpy.asarray(prediction_input).astype(
            numpy.float32)
        prediction = model.predict(prediction_input, verbose=0)
        index = numpy.argmax(prediction)
        result = int_to_event[index]
        prediction_output.append(result)
        print(result, end=' ')
        pattern.append(index)
        pattern = pattern[1:len(pattern)]
        progress += 1

    with open('./outputs/{song_name}.txt', "w") as txt:
        for event in prediction_output:
            print(event, end=' ')
            txt.write(f'{event} ')

    progress = 0
    progress_bar.update_idletasks()
Esempio n. 26
0
def fusion_graph_with_rgb(fg_features,
                          bg_features,
                          input_rgb,
                          output_shape=None,
                          filters=[256, 128, 128, 64, 256],
                          train_bn=True,
                          network_name='fusion_',
                          d=256):
    '''

    :param fg_features: [b, 512, 512, d]
    :param bg_features: [b, 512, 512, d]
    :param trimap: [b, 512, 512, 1]
    :param backbone_r1: [b, 256, 256, 64]
    :param filters:
    :param train_bn:
    :param network_name:
    :param broadcast_trimap:
    :return:
    '''

    if len(filters) == 5:
        nb_filter0, nb_filter1, nb_filter2, nb_filter3, nb_filter4 = filters
    else:
        nb_filter0, nb_filter1, nb_filter2, nb_filter3 = filters

    # TODO: input_rgb = raw_image - mean_pixel.
    # TODO: BatchNorm
    conv_rgb = KL.Conv2D(d, (3, 3),
                         strides=1,
                         name=network_name + 'convrgb',
                         padding='same')(input_rgb)
    conv_rgb = BatchNorm(name=network_name + "bnrgb")(conv_rgb,
                                                      training=train_bn)
    fusion_input = KL.Concatenate(axis=3, name=network_name + 'input_concate')(
        [fg_features, bg_features, conv_rgb])

    x = KL.Conv2D(nb_filter0, (3, 3),
                  strides=1,
                  name=network_name + "conv0",
                  padding='same')(fusion_input)
    x = BatchNorm(name=network_name + "bn0")(x, training=train_bn)
    conv0 = KL.Activation('relu', name=network_name + "relu0")(x)

    # fusion_conv1
    x = KL.Conv2D(nb_filter1, (3, 3),
                  strides=1,
                  name=network_name + "conv1",
                  padding='same')(conv0)
    x = BatchNorm(name=network_name + "bn1")(x, training=train_bn)
    x = KL.Activation('relu', name=network_name + "relu1")(x)
    conv1 = x

    # fusion_conv2
    x = KL.Conv2D(nb_filter2, (3, 3),
                  strides=1,
                  name=network_name + "conv2",
                  padding='same')(x)
    x = BatchNorm(name=network_name + "bn2")(x, training=train_bn)
    x = KL.Activation('relu', name=network_name + 'relu2')(x)
    conv2 = x

    # fusion_conv3
    x = KL.Conv2D(nb_filter3, (3, 3),
                  strides=1,
                  name=network_name + "conv3",
                  padding='same')(x)
    x = BatchNorm(name=network_name + "bn3")(x, training=train_bn)
    x = KL.Activation('relu', name=network_name + 'relu3')(x)
    conv3 = x

    # fusion_conv4
    if len(filters) == 5:
        x = KL.Conv2D(nb_filter4, (3, 3),
                      strides=1,
                      name=network_name + "conv4",
                      padding='same')(x)
        x = BatchNorm(name=network_name + "bn4")(x, training=train_bn)
        x = KL.Activation('relu', name=network_name + 'relu4')(x)
        conv4 = x

    # fusion_output
    x = KL.Conv2D(1, (1, 1),
                  strides=1,
                  name=network_name + "conv_output",
                  padding='same')(x)
    # x = BatchNorm(name=network_name+"bn_output")(x, training=train_bn)
    output = KL.Activation('sigmoid', name=network_name + "sigmoid_output")(x)

    # output = BilinearUpsampling(output_size=(output_shape[0], output_shape[1]), name=network_name + '_upsampling')(
    #     output)
    return output, [conv0, conv1, conv2, conv3]
def create_network(network_input_notes, n_vocab_notes, network_input_offsets,
                   n_vocab_offsets, network_input_durations,
                   n_vocab_durations):

    # Branch of the network that considers notes
    inputNotesLayer = Input(shape=(network_input_notes.shape[1],
                                   network_input_notes.shape[2]))
    inputNotes = LSTM(256,
                      input_shape=(network_input_notes.shape[1],
                                   network_input_notes.shape[2]),
                      return_sequences=True)(inputNotesLayer)
    inputNotes = Dropout(0.2)(inputNotes)

    # Branch of the network that considers note offset
    inputOffsetsLayer = Input(shape=(network_input_offsets.shape[1],
                                     network_input_offsets.shape[2]))
    inputOffsets = LSTM(256,
                        input_shape=(network_input_offsets.shape[1],
                                     network_input_offsets.shape[2]),
                        return_sequences=True)(inputOffsetsLayer)
    inputOffsets = Dropout(0.2)(inputOffsets)

    # Branch of the network that considers note duration
    inputDurationsLayer = Input(shape=(network_input_durations.shape[1],
                                       network_input_durations.shape[2]))
    inputDurations = LSTM(256,
                          input_shape=(network_input_durations.shape[1],
                                       network_input_durations.shape[2]),
                          return_sequences=True)(inputDurationsLayer)
    #inputDurations = Dropout(0.3)(inputDurations)
    inputDurations = Dropout(0.2)(inputDurations)

    #Concatentate the three input networks together into one branch now
    inputs = concatenate([inputNotes, inputOffsets, inputDurations])

    # A cheeky LSTM to consider everything learnt from the three separate branches
    x = LSTM(512, return_sequences=True)(inputs)
    x = Dropout(0.3)(x)
    x = LSTM(512)(x)
    x = BatchNorm()(x)
    x = Dropout(0.3)(x)
    x = Dense(256, activation='relu')(x)

    #Time to split into three branches again...

    # Branch of the network that classifies the note
    outputNotes = Dense(128, activation='relu')(x)
    outputNotes = BatchNorm()(outputNotes)
    outputNotes = Dropout(0.3)(outputNotes)
    outputNotes = Dense(n_vocab_notes, activation='softmax',
                        name="Note")(outputNotes)

    # Branch of the network that classifies the note offset
    outputOffsets = Dense(128, activation='relu')(x)
    outputOffsets = BatchNorm()(outputOffsets)
    outputOffsets = Dropout(0.3)(outputOffsets)
    outputOffsets = Dense(n_vocab_offsets, activation='softmax',
                          name="Offset")(outputOffsets)

    # Branch of the network that classifies the note duration
    outputDurations = Dense(128, activation='relu')(x)
    outputDurations = BatchNorm()(outputDurations)
    outputDurations = Dropout(0.3)(outputDurations)
    outputDurations = Dense(n_vocab_durations,
                            activation='softmax',
                            name="Duration")(outputDurations)

    # Tell Keras what our inputs and outputs are
    model = Model(
        inputs=[inputNotesLayer, inputOffsetsLayer, inputDurationsLayer],
        outputs=[outputNotes, outputOffsets, outputDurations])

    #Adam seems to be faster than RMSProp and learns better too
    model.compile(loss='categorical_crossentropy', optimizer='adam')
    # Useful to try RMSProp though

    # LOAD WEIGHTS HERE IF YOU WANT TO CONTINUE TRAINING!
    #model.load_weights(weights_name)

    return model
Esempio n. 28
0
def train_CNN(X_train, Y_train):
    """
    Builds, trains and evaluates CNN classifier
    :param X_train: np.ndarray (n by m by 13): features
    :param Y_train: np.ndarray (n by 4): target
    :return: saves the model weights and prints the accuracy and AUC
    """
    # reshape input for CNN
    X_train = X_train.reshape((X_train.shape[0], 1, X_train.shape[1], X_train.shape[2]))
    # split the set on training and testing sets
    X_train, X_test, Y_train, Y_test = train_test_split(X_train, Y_train, test_size=0.1, random_state=15)

    nb_filters = 32
    pool_size = (2, 2)
    kernel_size = (3, 3)
    batch_size = 16
    nb_epochs = 40
    nb_classes = Y_train.shape[1]
    input_shape = (1, X_train.shape[2], X_train.shape[3])
    early_stopping = EarlyStopping(monitor='val_loss', min_delta=0.001, patience=2, verbose=1)

    model = Sequential()
    model.add(Conv2D(nb_filters, kernel_size=kernel_size, border_mode='valid', input_shape=input_shape,
                     data_format='channels_first'))
    model.add(BatchNorm())
    model.add(LeakyReLU(alpha=0.01))

    model.add(Conv2D(nb_filters, kernel_size=kernel_size, data_format='channels_first'))
    model.add(BatchNorm())
    model.add(LeakyReLU(alpha=0.01))
    model.add(MaxPooling2D(pool_size=pool_size, strides=(1, 1)))
    model.add(Dropout(0.1))

    model.add(Conv2D(nb_filters, kernel_size=kernel_size, data_format='channels_first'))
    model.add(BatchNorm())
    model.add(LeakyReLU(alpha=0.01))
    model.add(MaxPooling2D(pool_size=pool_size, strides=(1, 1)))
    model.add(Dropout(0.1))

    model.add(Flatten())
    model.add(Dense(128))
    model.add(Activation('linear'))
    model.add(Dropout(0.1))

    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))

    model.compile(loss='categorical_crossentropy', metrics=['categorical_accuracy'], optimizer='adam')
    model.fit(X_train, Y_train, batch_size=batch_size, epochs=nb_epochs, verbose=1,
                        callbacks=[early_stopping],
                        validation_split=0.15)

    print('CNN classifier performance on the testing set')
    evaluate_model(X_test, Y_test, model)
    model_name = '/data/model_CNN.h5'
    path = os.getcwd()
    temp = path.split('/')
    temp.pop(-1)
    path = '/'.join(temp)
    model.save(path + model_name)
    print('CNN classifier is saved', model_name)
    print('-----------------------------------------')