コード例 #1
0
def create_network(network_input, n_vocab):
    print(network_input.shape[0])
    print(network_input.shape[1])
    print(network_input.shape[2])
    print(n_vocab)

    model = Sequential()
    model.add(
        Bidirectional(LSTM(lstm_size,
                           return_sequences=True,
                           recurrent_dropout=r_dropout),
                      input_shape=(network_input.shape[1],
                                   network_input.shape[2])))
    model.add(Dropout(dropout))
    model.add(
        Bidirectional(
            LSTM(lstm_size,
                 return_sequences=False,
                 recurrent_dropout=r_dropout)))
    model.add(Dropout(dropout))
    model.add(Dense(n_vocab))
    model.add(Activation('softmax'))

    optimizer = optimizers.rmsprop()
    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizer,
                  metrics=['accuracy'])

    if weights_to_load != "":
        model.load_weights(weights_to_load)

    return model
コード例 #2
0
def createModel():
    image_size = IMAGE_SIZE

    image_input = Input(shape=(image_size, image_size, 3), name='input_layer')

    conv_1 = Conv2D(filters=64, kernel_size=(3, 3),
                    use_bias=False)(image_input)
    conv_1_normalized = BatchNormalization()(conv_1)
    conv_1_activation = Activation('relu')(conv_1_normalized)
    conv_1_pooled = MaxPooling2D(padding='same')(conv_1_activation)

    conv_2 = Conv2D(filters=128, kernel_size=(3, 3),
                    use_bias=False)(conv_1_pooled)
    conv_2_normalized = BatchNormalization()(conv_2)
    conv_2_activation = Activation('relu')(conv_2_normalized)
    conv_2_pooled = MaxPooling2D(padding='same')(conv_2_activation)

    conv_3 = Conv2D(filters=128, kernel_size=(3, 3),
                    use_bias=False)(conv_2_pooled)
    conv_3_normalized = BatchNormalization()(conv_3)
    conv_3_activation = Activation('relu')(conv_3_normalized)
    conv_3_pooled = MaxPooling2D(padding='same')(conv_3_activation)

    conv_4 = Conv2D(filters=256, kernel_size=(3, 3),
                    use_bias=False)(conv_3_pooled)
    conv_4_normalized = BatchNormalization()(conv_4)
    conv_4_activation = Activation('relu')(conv_4_normalized)
    conv_4_pooled = MaxPooling2D(padding='same')(conv_4_activation)

    conv_5 = Conv2D(filters=512, kernel_size=(3, 3),
                    use_bias=False)(conv_4_pooled)
    conv_5_normalized = BatchNormalization()(conv_5)
    conv_5_activation = Activation('relu')(conv_5_normalized)
    conv_5_pooled = MaxPooling2D(padding='same')(conv_5_activation)

    conv_flattened = Flatten()(conv_5_pooled)

    dense_layer_1 = Dense(512, use_bias=False)(conv_flattened)
    dense_normalized = BatchNormalization()(dense_layer_1)
    dense_activation = Activation('relu')(dense_normalized)

    output = Dense(43, activation='softmax',
                   name='output_layer')(dense_activation)

    model = tf.keras.Model(inputs=image_input, outputs=[output])

    model.compile(optimizer=rmsprop(1e-3),
                  loss={'output_layer': 'categorical_crossentropy'},
                  metrics=['accuracy'])
    model.summary()
    return model
コード例 #3
0
def new_top_layer(bottleneck_features):
    # FCL 1 - Flatten to 1D -> Hidden FCL -> Relu -> Dropout prob 0.5
    inferenceModel.add(Flatten(input_shape=bottleneck_features.shape[1:])))
    inferenceModel.add(Dense(FCL_SIZE))
    inferenceModel.add(Activation('relu'))
    inferenceModel.add(Dropout(DROPOUT_2))

    # FCL 2 - Flatten to 1D -> Final FCL -> softmax
    inferenceModel.add(Dense(CLASSES))
    inferenceModel.add(Activation('softmax')) # produces error as a probability

    optimizer = optimizers.rmsprop(lr=LEARNING_RATE, decay=DECAY_RATE)
    # Configure training process for Multi-class classification
    inferenceModel.compile(
            optimizer=optimizer, # Adam optimizer or rmsprop
            loss='categorical_crossentropy', # use cros-entropy loss function to minimise loss
            metrics=['accuracy']) #  report on accuracy
コード例 #4
0
ファイル: keras_models.py プロジェクト: solversa/AutoDL-1
    def init_model(self, config):

        input_shape = config['max_len']
        num_classes = config['num_classes']

        inputs = Input(shape=(input_shape, 96))
        x = inputs
        cnn1 = Conv1D(50,
                      kernel_size=1,
                      strides=1,
                      padding='same',
                      kernel_initializer='he_normal')(x)
        cnn1 = BatchNormalization(axis=-1)(cnn1)
        cnn1 = LeakyReLU()(cnn1)
        cnn1 = GlobalMaxPooling1D()(
            cnn1)  # CNN_Dynamic_MaxPooling(cnn1,50,2,2)

        cnn2 = Conv1D(50,
                      kernel_size=3,
                      strides=1,
                      padding='same',
                      kernel_initializer='he_normal')(x)
        cnn2 = BatchNormalization(axis=-1)(cnn2)
        cnn2 = LeakyReLU()(cnn2)
        cnn2 = GlobalMaxPooling1D()(cnn2)

        cnn3 = Conv1D(50,
                      kernel_size=5,
                      strides=1,
                      padding='same',
                      kernel_initializer='he_normal')(x)
        cnn3 = BatchNormalization(axis=-1)(cnn3)
        cnn3 = LeakyReLU()(cnn3)
        cnn3 = GlobalMaxPooling1D()(cnn3)
        x = concatenate([cnn1, cnn2, cnn3], axis=-1)

        x = Dense(units=num_classes, activation='softmax')(x)
        model = TFModel(inputs=inputs, outputs=x)
        opt = optimizers.rmsprop(lr=0.0001, decay=1e-6)
        model.compile(optimizer=opt,
                      loss="sparse_categorical_crossentropy",
                      metrics=['acc'])
        model.summary()
        self._model = model
        self.is_init = True
コード例 #5
0
 def init_model(self,
                input_shape,
                num_classes,
                **kwargs):
     # New model
     model = Sequential()
     model.add(
         Conv1D(256, 8, padding='same', input_shape=(input_shape[0], 1)))  # X_train.shape[0] = No. of Columns
     model.add(Activation('relu'))
     model.add(Conv1D(256, 8, padding='same'))
     model.add(BatchNormalization())
     model.add(Activation('relu'))
     model.add(Dropout(0.25))
     model.add(MaxPool1D(pool_size=8))
     model.add(Conv1D(128, 8, padding='same'))
     model.add(Activation('relu'))
     model.add(Conv1D(128, 8, padding='same'))
     model.add(Activation('relu'))
     model.add(Conv1D(128, 8, padding='same'))
     model.add(Activation('relu'))
     model.add(Conv1D(128, 8, padding='same'))
     model.add(BatchNormalization())
     model.add(Activation('relu'))
     model.add(Dropout(0.25))
     model.add(MaxPool1D(pool_size=8))
     model.add(Conv1D(64, 8, padding='same'))
     model.add(Activation('relu'))
     model.add(Conv1D(64, 8, padding='same'))
     model.add(Activation('relu'))
     model.add(Flatten())
     model.add(Dense(num_classes))  # Target class number
     model.add(Activation('softmax'))
     # opt = keras.optimizers.SGD(lr=0.01, momentum=0.0, decay=1e-6, nesterov=False)
     # opt = keras.optimizers.Adam(lr=0.0001)
     opt = optimizers.rmsprop(lr=0.0001, decay=1e-6)
     model.compile(
         optimizer=opt,
         loss="sparse_categorical_crossentropy",
         metrics=['acc'])
     model.summary()
     self._model = model
     self.is_init = True
コード例 #6
0
def new_model():
    inferenceModel = tf.keras.Sequential() # Builds linear stack of layers for model

    input_shape = (IMAGE_SIZE, IMAGE_SIZE, 3) # input has form (samples, height, width, channels)
    # Layer 1 - Conv(32) -> Relu
    inferenceModel.add(Convolution2D(CONV1_DEPTH, FILTER_SIZE, input_shape=input_shape,
                                                                    padding='same'))
    inferenceModel.add(Activation('relu'))

    # Layer 2 - Conv(32) -> Relu -> Pool2D -> Dropout prob 0.25
    inferenceModel.add(Convolution2D(CONV1_DEPTH, FILTER_SIZE, padding='same'))
    inferenceModel.add(Activation('relu'))
    inferenceModel.add(MaxPooling2D(pool_size=(2,2)))
    inferenceModel.add(Dropout(DROPOUT_1))

    # Layer 3 - Conv(64) -> Relu
    inferenceModel.add(Convolution2D(CONV2_DEPTH, FILTER_SIZE, padding='same'))
    inferenceModel.add(Activation('relu'))

    # Layer 4 - Conv(64) -> Relu -> Pool2D -> Dropout prob 0.5
    inferenceModel.add(Convolution2D(CONV2_DEPTH, FILTER_SIZE, padding='same'))
    inferenceModel.add(Activation('relu'))
    inferenceModel.add(MaxPooling2D(pool_size=(2,2)))
    inferenceModel.add(Dropout(DROPOUT_1))

    # FCL 1 - Flatten to 1D -> Hidden FCL -> Relu -> Dropout prob 0.5
    inferenceModel.add(Flatten())
    inferenceModel.add(Dense(FCL_SIZE))
    inferenceModel.add(Activation('relu'))
    inferenceModel.add(Dropout(DROPOUT_2))

    # FCL 2 - Flatten to 1D -> Final FCL -> softmax
    inferenceModel.add(Dense(CLASSES))
    inferenceModel.add(Activation('softmax')) # produces error as a probability

    optimizer = optimizers.rmsprop(lr=LEARNING_RATE, decay=DECAY_RATE)
    # Configure training process for Multi-class classification
    inferenceModel.compile(
            optimizer=optimizer, # Adam optimizer or rmsprop
            loss='categorical_crossentropy', # use cros-entropy loss function to minimise loss
            metrics=['accuracy']) #  report on accuracy
    return inferenceModel
コード例 #7
0
ファイル: main.py プロジェクト: WojDrew/Project_SI
model.add(Conv2D(filters=128, kernel_size=2, activation='elu'))
model.add(Conv2D(filters=128, kernel_size=2, activation='elu'))
model.add(MaxPool2D(2))

model.add(Conv2D(filters=256, kernel_size=2, activation='elu'))
model.add(MaxPool2D(2))

model.add(Conv2D(filters=512, kernel_size=2, activation='elu'))
model.add(MaxPool2D(2))

model.add(Flatten())
model.add(Dense(1024, activation='elu'))
#model.add(Dropout(rate=0.5))
model.add(Dense(10, activation='softmax'))

model.compile(optimizer=rmsprop(lr=1e-5),
              loss='categorical_crossentropy',
              metrics=['accuracy'])

print(model.get_weights())
spectrogramsAvalaible = sC.getAvalaibleSpectrograms()
dataSetTrain = createTrainingSet(spectrogramsAvalaible, 700)
dataSetVal = createTrainingSet(spectrogramsAvalaible, 200)
dataSetTrainX, dataSetTrainY = zip(*dataSetTrain)
dataSetValX, dataSetValY = zip(*dataSetVal)

trainX = numpy.asarray(dataSetTrainX)
trainX = trainX.reshape([-1, 128, 128, 1])
trainY = fit_trasform(dataSetTrainY, getAllGenres())

validX = numpy.asarray(dataSetValX)
コード例 #8
0
model.add(Conv2D(32, (2, 2)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(64, (2, 2)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Flatten())  # this converts our 3D feature maps to 1D feature vectors
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.1))
model.add(Dense(1))
model.add(Activation('sigmoid'))

model.compile(optimizer=rmsprop(lr=0.00001),
              loss='binary_crossentropy',
              metrics=['accuracy'])

history = model.fit(X,
                    y,
                    epochs=20,
                    steps_per_epoch=200,
                    validation_split=0.1,
                    validation_steps=100,
                    callbacks=[tensorboard])

print(history.history.keys())
print(history.history.values())

test_loss, test_acc = model.evaluate(testX, testY, verbose=2)
コード例 #9
0
def euclidean_distance(vects):
    x, y = vects[0], vects[1]
    return K.sqrt(K.sum(K.square(x - y), axis=1, keepdims=True))


def contrastive_loss(y_true, y_pred):
    margin = 1
    return K.mean(y_true * K.square(y_pred) +
                  (1 - y_true) * K.square(K.maximum(margin - y_pred, 0)))


distance = Lambda(euclidean_distance)([output_x1, output_x2])
# distance = Lambda( lambda tensors : K.abs( tensors[0] - tensors[1] ))( [output_x1 , output_x2] )
output_ = Dense(1, activation=sigmoid)(distance)

rms = rmsprop()

model = Model([input_x1, input_x2], output_)
model.compile(loss=contrastive_loss, optimizer='rmsprop')

X1 = np.load('numpy_files/X1.npy', allow_pickle=True)
X2 = np.load('numpy_files/X2.npy', allow_pickle=True)
Y = np.load('numpy_files/Y.npy', allow_pickle=True)
print(X1.shape)
print(X1[0])
# print(X1[0][0])
data_dimension = 128
X11 = X1.reshape((X1.shape[0], 128, 128, 3)).astype(np.float32)
X22 = X2.reshape((X2.shape[0], 128, 128, 3)).astype(np.float32)

model.fit([X11, X22], Y, batch_size=5, epochs=5, validation_split=None)