Ejemplo n.º 1
0
def buildClassifier(input_shape=(100, 100, 3)):
    # Initialising the CNN
    classifier = Sequential()
    classifier.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape, padding='same'))
    classifier.add(MaxPooling2D(pool_size=(4, 4), padding='same'))
    classifier.add(Dropout(0.5))  # added extra Dropout layer
    classifier.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
    classifier.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
    classifier.add(Conv2D(128, (3, 3), padding='same', activation='relu'))
    classifier.add(Dropout(0.5))  # added extra dropout layer
    classifier.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
    classifier.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
    classifier.add(Dropout(0.2))  # antes era 0.25
    classifier.add(Conv2D(512, (3, 3), padding='same', activation='relu'))
    classifier.add(Conv2D(1024, (3, 3), activation='relu', padding='same'))
    classifier.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
    classifier.add(Dense(units=1024, activation='relu'))  # added new dense layer
    classifier.add(Dropout(0.2))  # antes era 0.25
    # Step 3 - Flattening
    classifier.add(Flatten())
    classifier.add(Dense(units=1024, activation='relu'))  # added new dense layer
    classifier.add(Dense(units=256, activation='relu'))  # added new dense layer
    # Step 4 - Full connection
    classifier.add(Dropout(0.2))
    classifier.add(Dense(units=1, activation='sigmoid'))
    classifier.summary()

    # Compiling the CNN
    classifier.compile(optimizer='adam',
                       loss='binary_crossentropy',
                       metrics=['accuracy'])
    plot_model(classifier, to_file='model_plot.png', show_shapes=True, show_layer_names=True)
    return classifier
Ejemplo n.º 2
0
    def _build_dqn_model(state_space, action_space, learning_rate):
        """
        Builds a neural network for the agent

        :param state_space: state specification
        :param action_space: action specification
        :param learning_rate: learning rate
        :return: model
        """
        q_net = Sequential()
        q_net.add(
            Dense(128,
                  input_dim=state_space,
                  activation='relu',
                  kernel_initializer='he_uniform'))
        q_net.add(Dense(64, activation='relu',
                        kernel_initializer='he_uniform'))
        q_net.add(
            Dense(action_space,
                  activation='linear',
                  kernel_initializer='he_uniform'))
        q_net.compile(
            optimizer=tf.optimizers.Adam(learning_rate=learning_rate),
            loss='mse')
        q_net.summary()
        return q_net
Ejemplo n.º 3
0
 def createModel(X_train):
   model = Sequential()
   #
   model.add(Conv2D(filters = 16, kernel_size = (3,3),padding = 'Same', 
                   activation ='relu', input_shape = (X_train.shape[1],X_train.shape[2],X_train.shape[3])))
   model.add(MaxPool2D(pool_size=(2,2)))
   model.add(Dropout(0.25))
   #
   model.add(Conv2D(filters = 32, kernel_size = (3,3),padding = 'Same', 
                   activation ='relu'))
   model.add(MaxPool2D(pool_size=(2,2)))
   model.add(Dropout(0.25))
     ##decode
   model.add(Conv2D(filters = 16, strides=(2,2), kernel_size = (3,3),padding = 'Same', 
                   activation ='relu'))
   #model.add(UpSampling2D((2,2)))
   model.add(Conv2D(filters = 16, kernel_size = (3,3),padding = 'Same', 
                   activation ='relu'))
   model.add(UpSampling2D((2,2)))
   ##
   # fully connected
   model.add(Flatten())
   model.add(Dense(256, activation = "relu"))
   model.add(Dropout(0.5))
   model.add(Dense(10, activation = "softmax"))
   #%
   # Define the optimizer
   optimizer = tf.keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999)
   #%
   model.compile(optimizer = optimizer , loss = "categorical_crossentropy", metrics=["accuracy"])
   model.summary()
   return model
Ejemplo n.º 4
0
def create_resnet50_model(num_classes: int):
    """
    Function to create a ResNet50 model pre-trained with custom FC Layers.
    If the "advanced" command line argument is selected, adds an extra convolutional layer with extra filters to support
    larger images.
    :param num_classes: The number of classes (labels).
    :return: The ResNet50 model.
    """
    # Reconfigure single channel input into a greyscale 3 channel input
    img_input = Input(shape=(config.RESNET_IMG_SIZE['HEIGHT'],
                             config.RESNET_IMG_SIZE['WIDTH'], 1))
    img_conc = Concatenate()([img_input, img_input, img_input])

    # Generate a ResNet50 model with pre-trained ImageNet weights, input as given above, excluding fully connected
    # layers.
    model_base = ResNet50(include_top=False,
                          weights="imagenet",
                          input_tensor=img_conc)

    # Add fully connected layers
    model = Sequential()
    # Start with base model consisting of convolutional layers
    model.add(model_base)

    # Flatten layer to convert each input into a 1D array (no parameters in this layer, just simple pre-processing).
    model.add(Flatten())

    fully_connected = Sequential(name="Fully_Connected")
    # Fully connected layers.
    fully_connected.add(Dropout(0.2, seed=config.RANDOM_SEED,
                                name="Dropout_1"))
    fully_connected.add(Dense(units=512, activation='relu', name='Dense_1'))
    # fully_connected.add(Dropout(0.2, name="Dropout_2"))
    fully_connected.add(Dense(units=32, activation='relu', name='Dense_2'))

    # Final output layer that uses softmax activation function (because the classes are exclusive).
    if num_classes == 2:
        fully_connected.add(
            Dense(1,
                  activation='sigmoid',
                  kernel_initializer="random_uniform",
                  name='Output'))
    else:
        fully_connected.add(
            Dense(num_classes,
                  activation='softmax',
                  kernel_initializer="random_uniform",
                  name='Output'))

    model.add(fully_connected)

    # Print model details if running in debug mode.
    if config.verbose_mode:
        print("CNN Model used:")
        print(model.summary())
        print("Fully connected layers:")
        print(fully_connected.summary())

    return model
Ejemplo n.º 5
0
def buildClassifier(input_shape=(100, 100, 3)):
    """
    This creates the CNN algorithm.
    Args:
        input_shape(tuple): This is the image shape of (100,100,3)
    Returns:
        classifier(sequential): This is the sequential model.
    """
    # Initialising the CNN
    opt = Adam(lr=0.0002)  # lr = learning rate
    classifier = Sequential()
    classifier.add(
        Conv2D(32,
               kernel_size=(3, 3),
               activation='relu',
               input_shape=input_shape,
               padding='same'))
    classifier.add(MaxPooling2D(pool_size=(3, 3), padding='same'))
    classifier.add(Dropout(0.5))  # added extra Dropout layer
    classifier.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
    classifier.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
    classifier.add(Conv2D(128, (3, 3), padding='same', activation='relu'))
    classifier.add(Dropout(0.5))  # added extra dropout layer
    classifier.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
    classifier.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
    classifier.add(Dropout(0.2))  # antes era 0.25
    classifier.add(Conv2D(512, (3, 3), padding='same', activation='relu'))
    classifier.add(Conv2D(1024, (3, 3), activation='relu', padding='same'))
    classifier.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
    classifier.add(
        Flatten())  # This is added before dense layer a flatten is needed
    classifier.add(Dense(units=1024,
                         activation='relu'))  # added new dense layer
    classifier.add(Dropout(0.2))  # antes era 0.25
    # Step 3 - Flattening
    #classifier.add(Flatten())
    classifier.add(Dense(units=1024,
                         activation='relu'))  # added new dense layer
    classifier.add(Dense(units=256,
                         activation='relu'))  # added new dense layer
    # Step 4 - Full connection
    classifier.add(Dropout(0.2))
    classifier.add(Dense(units=1, activation='sigmoid'))
    classifier.summary()

    # Compiling the CNN
    classifier.compile(optimizer=opt,
                       loss='binary_crossentropy',
                       metrics=['accuracy'])
    plot_model(classifier,
               to_file='model_plot.png',
               show_shapes=True,
               show_layer_names=True)
    return classifier
    def create(self):
        """ Creates CNN model.

        Returns
        -------
        model: Model
            A Convolutinal Neural Network model
        """

        model = Sequential()
        model.add(
            Reshape((self.input_shape[0], 1), input_shape=self.input_shape))

        if isinstance(self.filters, int):
            model.add(
                Conv1D(self.filters,
                       self.kernel_size,
                       strides=self.strides,
                       padding='valid',
                       activation='relu',
                       kernel_regularizer=self.kernel_regularizer))
            if self.pool > 0:
                model.add(MaxPooling1D(self.pool))
        else:
            for c, filter in enumerate(self.filters, start=1):
                model.add(
                    Conv1D(filter,
                           self.kernel_size,
                           strides=self.strides,
                           padding='valid',
                           kernel_regularizer=self.kernel_regularizer))
                if self.batch_normalization:
                    model.add(BatchNormalization())
                    model.add(Activation('relu'))
                elif self.dropout:
                    model.add(Activation('relu'))
                    model.add(Dropout(self.dropout_rate))
                else:
                    model.add(Activation('relu'))

                if self.pool > 0 and len(self.filters) != c:
                    model.add(MaxPooling1D(self.pool))

        model.add(GlobalAveragePooling1D())
        if self.include_top:
            model.add(
                Dense(self.num_classes,
                      activation=self.last_activation,
                      kernel_regularizer=self.kernel_regularizer))
        if self.summary:
            model.summary()
        return model
Ejemplo n.º 7
0
 def build_model(self):
     model = Sequential()
     model.add(
         Dense(200, input_dim=self.state_input_size,
               activation='relu'))  # State is input
     model.add(Dense(60, activation='relu'))
     model.add(Dense(24, activation='relu'))
     model.add(
         Dense(self.number_of_actions,
               activation='linear'))  # Q_Value of each action is Output
     model.summary()
     model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))
     return model
Ejemplo n.º 8
0
def build_cnn_1d_model(maxlen=500):
    model = Sequential()
    model.add(Embedding(max_features, 128, input_length=maxlen))
    model.add(Conv1D(32, 7, activation='relu'))
    model.add(MaxPooling1D(5))
    model.add(Conv1D(32, 7, activation='relu'))
    model.add(GlobalMaxPooling1D())
    model.add(Dense(1))
    model.summary()
    model.compile(optimizer=RMSprop(lr=1e-4),
                  loss='binary_crossentropy',
                  metrics=['acc'])
    return model
    def create(self):
        """ Creates MLP model.

        Returns
        -------
        model: Model
            A Dense Neural Network model
        """

        model = Sequential()
        if isinstance(self.units, int):
            model.add(
                Dense(self.units,
                      activation='relu',
                      input_shape=self.input_shape,
                      kernel_regularizer=self.kernel_regularizer))
        else:  # Retrieve first unit and add input shape
            model.add(
                Dense(self.units.pop(0),
                      input_shape=self.input_shape,
                      kernel_regularizer=self.kernel_regularizer))
            if self.batch_normalization:
                model.add(BatchNormalization())
                model.add(Activation('relu'))
            elif self.dropout:
                model.add(Activation('relu'))
                model.add(Dropout(self.dropout_rate))
            else:  # No norm or dropout
                model.add(Activation('relu'))
                for unit in self.units:
                    model.add(
                        Dense(unit,
                              kernel_regularizer=self.kernel_regularizer))
                    if self.batch_normalization:
                        model.add(BatchNormalization())
                        model.add(Activation('relu'))
                    elif self.dropout:
                        model.add(Activation('relu'))
                        model.add(Dropout(self.dropout_rate))
                    else:
                        model.add(Activation('relu'))
        if self.include_top:
            model.add(
                Dense(self.num_classes,
                      activation=self.last_activation,
                      kernel_regularizer=self.kernel_regularizer))
        if self.summary:
            model.summary()
        return model
Ejemplo n.º 10
0
    def create_rnn_model(self, n_timesteps, n_features, n_outputs, model_type):
        model = Sequential()
        if model_type == "GRU":
            model.add(GRU(100, input_shape=(n_timesteps, n_features)))
        else:
            model.add(LSTM(100, input_shape=(n_timesteps, n_features)))
        model.add(Dropout(0.5))
        model.add(Dense(100, activation='relu'))
        model.add(Dense(n_outputs, activation='softmax'))
        model.compile(loss='categorical_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])
        model.summary()

        return model
Ejemplo n.º 11
0
 def model_layers(self):
     # 1st CNN layer
     model = Sequential()
     model.add(
         layers.Conv2D(32, (5, 5),
                       activation='relu',
                       input_shape=(28, 28, 1)))
     model.add(layers.MaxPool2D((2, 2)))
     model.add(layers.Conv2D(64, (5, 5), activation='relu'))
     model.add(layers.MaxPool2D((2, 2)))
     model.add(layers.Flatten())
     model.add(layers.Dense(64, activation='relu'))
     model.add(layers.Dense(10, activation='softmax'))
     model.summary()
     return model
Ejemplo n.º 12
0
def build_model(input_dim, output_dim, drop_out):
    # Neural network
    model = Sequential()
    model.add(
        Dense(int(nr_neurons * 2),
              input_dim=input_dim,
              activation='relu',
              name='first_layer'))
    model.add(Dropout(drop_out, name='drop_out_1'))
    model.add(
        Dense(int(nr_neurons * 4), activation='relu', name='hidden_layer_1'))
    model.add(Dropout(drop_out, name='drop_out_2'))
    model.add(
        Dense(int(nr_neurons * 2), activation='relu', name='hidden_layer_2'))
    model.add(Dropout(drop_out, name='drop_out_3'))
    model.add(Dense(nr_neurons, activation='relu', name='hidden_layer_3'))
    model.add(Dropout(drop_out, name='drop_out_4'))
    model.add(
        Dense(int(nr_neurons / 2), activation='relu', name='hidden_layer_4'))
    model.add(Dropout(drop_out, name='drop_out_5'))
    model.add(
        Dense(int(nr_neurons / 4), activation='relu', name='hidden_layer_6'))
    model.add(Dropout(drop_out, name='drop_out_6'))
    model.add(Dense(output_dim, activation='softmax', name='output_layer'))

    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    print(model.summary())

    tf.keras.utils.plot_model(model, 'neuralNetwork.png', show_shapes=True)

    return model
Ejemplo n.º 13
0
def keras_model_fn(input_dim, output_dim, return_sequences, hyperparameters):
    model = Sequential()
    model.add(
        LSTM(input_shape=(None, input_dim),
             units=output_dim,
             return_sequences=return_sequences,
             name="inputs"))

    model.add(Dropout(0.2))

    model.add(LSTM(128, return_sequences=False))

    model.add(Dropout(0.2))

    model.add(Dense(units=1))
    model.add(Activation('linear'))

    opt = hyperparameters['optimizer'],
    loss = hyperparameters['loss'],
    eval_metric = hyperparameters['eval_metric'],

    model.compile(loss=loss, optimizer=opt, metrics=eval_metric)

    print(model.summary())

    return model
def build_model(params):
    model = Sequential()
    if params['pretrained']:
        model.add(Embedding(params['vocab_size'], params['embedding_dim'], weights=[glove.custom_embedding_matrix],
                            input_length=params['max_answer_len'], trainable=False))
    else:
        model.add(Embedding(params['vocab_size'], params['embedding_dim'], input_length=params['max_answer_len']))

    model.add(Dropout(params['dropout']))
    if params['flatten']:
        model.add(Flatten())
        model.add(Reshape((1, params['embedding_dim'] * params['max_answer_len'])))
    if params['lstm_dim_2']:
        model.add(LSTM(params['lstm_dim_1'], return_sequences=True))
        model.add(LSTM(params['lstm_dim_2'], return_sequences=False))
    else:
        model.add(LSTM(params['lstm_dim_1'], return_sequences=False))
    model.add(Dropout(params['dropout']))
    model.add(Dense(1, activation="linear"))

    # compile the model
    optimizer = AdamOptimizer()
    model.compile(optimizer=optimizer, loss='mean_squared_error', metrics=['acc'])
    print(model.summary())
    return model
Ejemplo n.º 15
0
def build_model():
    model = Sequential()
    model.add(Conv2D(64, (5, 5), (1, 1), "SAME", activation="relu", input_shape=(306, 408, 3)))
    model.add(MaxPool2D((3, 3), (2, 2), 'same'))
    model.add(Conv2D(64, (5, 5), (1, 1), "SAME", activation="relu"))
    model.add(MaxPool2D((3, 3), (2, 2), 'same'))
    model.add(Conv2D(64, (5, 5), padding="SAME", activation='relu'))
    model.add(MaxPool2D((3, 3), (2, 2), 'same'))
    model.add(Conv2D(16, (5, 5), padding="SAME", activation='relu'))
    model.add(MaxPool2D((3, 3), (2, 2), 'same'))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(128, activation='relu'))
    model.add(Dropout(0.3))
    model.add(Dense(512, activation='relu'))
    model.add(Dense(8, activation='relu'))
    optimizer = Adadelta()
    model.compile(optimizer, loss=mean_squared_error)
    print(model.summary())
    train_X, train_y = GET_DATA.get_batches_data()
    cost_values = []
    for step in range(1000):
        cost = model.train_on_batch(train_X, train_y)
        cost_values.append(cost)
        if step % 10 == 0:
            print("step %d , cost value is %.3f" % (step, cost))
    model.save("./model1.h5")
    plt.plot(cost_values)
    plt.show()
def init_dqn(env, nb_actions):
    """ Initialize the DQN agent using the keras-rl package.

    :param env: the environment to be played, required to determine the input size
    :param nb_actions: number of actions
    :return: DQN Agent
    """
    # Next, we build a very simple model.
    model = Sequential()
    model.add(Flatten(input_shape=(1, ) + env.observation_space.shape))
    model.add(Dense(16))
    model.add(Activation('relu'))
    model.add(Dense(16))
    model.add(Activation('relu'))
    model.add(Dense(16))
    model.add(Activation('relu'))
    model.add(Dense(nb_actions))
    model.add(Activation('linear'))
    print(model.summary())

    # compile agent
    memory = SequentialMemory(limit=50000, window_length=1)
    policy = BoltzmannQPolicy()
    dqn = DQNAgent(model=model,
                   nb_actions=nb_actions,
                   memory=memory,
                   nb_steps_warmup=10,
                   target_model_update=1e-2,
                   policy=policy)
    dqn.model_name = f"DQN"
    dqn.compile(Adam(lr=1e-3), metrics=['mae'])
    return dqn
Ejemplo n.º 17
0
class BiLSTM(NNBaseModel):
    def train(self):
        batch_size = 64
        units = 100
        embedding_matrix = np.zeros((self.vocab_size, 100))
        for word, index in self.tk.word_index.items():
            embedding_vector = self.word2vec.get(word)
            if embedding_vector is not None:
                embedding_matrix[index] = embedding_vector

        self.model = Sequential()
        self.model.add(
            Embedding(self.vocab_size,
                      units,
                      weights=[embedding_matrix],
                      trainable=False))
        self.model.add(
            Bidirectional(LSTM(units, return_sequences=True, dropout=0.2)))
        self.model.add(Bidirectional(LSTM(units, dropout=0.2)))
        self.model.add(Dense(self.output_size, activation='sigmoid'))
        print(self.model.summary())
        self.model.compile(optimizer='adam',
                           loss='sparse_categorical_crossentropy',
                           metrics=['acc'])
        history = self.model.fit(self.X_train,
                                 self.y_train,
                                 epochs=100,
                                 batch_size=batch_size,
                                 verbose=1)
Ejemplo n.º 18
0
def prueba_2():
	cantidad_twits=10
	# define class twits
	test = load_test()
	twits = preprocesing(test[:cantidad_twits, 0])
	print(f"\ntwiters:\n{twits}")
	# define class labels
	labels = test[:cantidad_twits, 1].astype('float32')
	print(f"\nlabels:\n{labels}")
	# prepare tokenizer
	t = Tokenizer()
	t.fit_on_texts(twits)
	vocab_size = len(t.word_index) + 1
	# integer encode the documents
	encoded_twits = t.texts_to_sequences(twits)
	print(f"\nencoded_twits:\n{encoded_twits}")
	# pad documents to a max length of 4 words
	# Calculo largo maximo
	mylen = np.vectorize(len)
	lens=mylen(encoded_twits)
	max_len=max(lens)
	#TODO: Contar el twtit mas largo
	max_length = max_len
	padded_twits = pad_sequences(encoded_twits, maxlen=max_length, padding='post')
	print(f"\npadded_twits:\n{padded_twits}")

	# load the whole embedding into memory
	embeddings_index = dict()
	f = open('fasttext.es.300.txt')
	for line in f:
		values = line.split()
		word = values[0]
		coefs = np.asarray(values[1:], dtype='float32')
		embeddings_index[word] = coefs
	f.close()
	print('Loaded %s word vectors.' % len(embeddings_index))

	# create a weight matrix for words in training docs
	embedding_matrix = np.zeros((vocab_size, 300))
	for word, i in t.word_index.items():
		embedding_vector = embeddings_index.get(word)
		if embedding_vector is not None:
			embedding_matrix[i] = embedding_vector

	# define model
	model = Sequential()
	e = Embedding(vocab_size, 300, weights=[embedding_matrix], input_length=max_length, trainable=False)
	model.add(e)
	model.add(Flatten())
	model.add(Dense(1, activation='sigmoid'))
	# compile the model
	model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
	# summarize the model
	print(model.summary())
	# fit the model
	model.fit(padded_twits, labels, epochs=50, verbose=0)
	# evaluate the model
	loss, accuracy = model.evaluate(padded_twits, labels, verbose=0)
	print('Accuracy: %f' % (accuracy * 100))
Ejemplo n.º 19
0
def bot_neural_net(input_shape):
    model = Sequential()
    model.add(Dense(256, input_dim=input_shape, activation='relu'))
    model.add(Dropout(0.3))
    model.add(Dense(200, activation='relu'))
    model.add(Dropout(0.3))
    model.add(Dense(160, activation='relu'))
    model.add(Dropout(0.3))
    model.add(Dense(120, activation='relu'))
    model.add(Dropout(0.3))
    model.add(Dense(80, activation='relu'))
    model.add(Dense(1, activation='sigmoid'))
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    model.summary()
    return model
    def create(self):
        """ Creates the logistic model.

        Returns
        -------
        model: Model
            A Logistic regression model
        """

        model = Sequential()
        model.add(
            Dense(self.num_classes,
                  kernel_regularizer=self.kernel_regularizer,
                  activation=self.last_activation,
                  input_shape=self.input_shape))
        if self.summary:
            model.summary()
        return model
Ejemplo n.º 21
0
def generate_vgg_model(classes_len: int):
    """
    Function to create a VGG19 model pre-trained with custom FC Layers.
    If the "advanced" command line argument is selected, adds an extra convolutional layer with extra filters to support
    larger images.
    :param classes_len: The number of classes (labels).
    :return: The VGG19 model.
    """
    # Reconfigure single channel input into a greyscale 3 channel input
    img_input = Input(shape=(config.VGG_IMG_SIZE['HEIGHT'],
                             config.VGG_IMG_SIZE['WIDTH'], 1))
    img_conc = Concatenate()([img_input, img_input, img_input])

    # Generate a VGG19 model with pre-trained ImageNet weights, input as given above, excluded fully connected layers.
    model_base = VGG19(include_top=False,
                       weights='imagenet',
                       input_tensor=img_conc)

    # Add fully connected layers
    model = Sequential()
    # Start with base model consisting of convolutional layers
    model.add(model_base)

    # Generate additional convolutional layers
    if config.model == "advanced":
        model.add(Conv2D(1024, (3, 3), activation='relu', padding='same'))
        model.add(Conv2D(1024, (3, 3), activation='relu', padding='same'))
        model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    # Flatten layer to convert each input into a 1D array (no parameters in this layer, just simple pre-processing).
    model.add(Flatten())
    if config.dropout == "Y":
        model.add(Dropout(0.3))
    # Add fully connected hidden layers.
    model.add(Dense(units=512, activation='relu', name='Dense_Intermediate_1'))
    if config.dropout == "Y":
        model.add(Dropout(0.4))
    model.add(Dense(units=32, activation='relu', name='Dense_Intermediate_2'))
    if config.dropout == "Y":
        model.add(Dropout(0.4))

    # Possible dropout for regularisation can be added later and experimented with:
    # model.add(Dropout(0.1, name='Dropout_Regularization'))

    # Final output layer that uses softmax activation function (because the classes are exclusive).
    if classes_len == 2:
        model.add(Dense(1, activation='sigmoid', name='Output'))
    else:
        model.add(Dense(classes_len, activation='softmax', name='Output'))

    # Print model details if running in debug mode.
    if config.verbose_mode:
        print(model.summary())

    return model
Ejemplo n.º 22
0
    def train(self):
        model = Sequential()
        model.add(
            DenseNet201(weights="imagenet",
                        include_top=False,
                        input_shape=self.input_shape))
        model.add(Flatten())
        model.add(Dense(1024, activation="relu"))
        model.add(Dense(1, activation="sigmoid"))
        plot_model(model)
        model.summary()
        model.compile(optimizer=Adam(learning_rate=1e-3),
                      loss="binary_crossentropy",
                      metrics=['accuracy'])
        history = model.fit(self.train_data,
                            epochs=100,
                            verbose=1,
                            validation_data=self.valid_data)

        return model, history
 def create_model(self, x_train):
     model = Sequential()
     model.add(
         LSTM(units=256,
              return_sequences=True,
              input_shape=(x_train.shape[1], x_train.shape[2])))
     model.add(Dropout(0.2))
     model.add(LSTM(units=128, return_sequences=True))
     model.add(Dropout(0.2))
     model.add(LSTM(units=64, return_sequences=True))
     model.add(Dropout(0.5))
     model.add(LSTM(units=64))
     model.add(Dropout(0.5))
     model.add(Dense(units=1))
     model.summary()
     tf.keras.utils.plot_model(model,
                               to_file=os.path.join(self.project_folder,
                                                    'model_lstm.png'),
                               show_shapes=True,
                               show_layer_names=True)
     return model
Ejemplo n.º 24
0
 def create_model(self, x_train):
     model = Sequential()
     # 1st layer with Dropout regularisation
     # * units = add 100 neurons is the dimensionality of the output space
     # * return_sequences = True to stack LSTM layers so the next LSTM layer has a three-dimensional sequence input
     # * input_shape => Shape of the training dataset
     model.add(
         LSTM(units=100,
              return_sequences=True,
              input_shape=(x_train.shape[1], 1)))
     # 20% of the layers will be dropped
     model.add(Dropout(0.2))
     # 2nd LSTM layer
     # * units = add 50 neurons is the dimensionality of the output space
     # * return_sequences = True to stack LSTM layers so the next LSTM layer has a three-dimensional sequence input
     model.add(LSTM(units=50, return_sequences=True))
     # 20% of the layers will be dropped
     model.add(Dropout(0.2))
     # 3rd LSTM layer
     # * units = add 50 neurons is the dimensionality of the output space
     # * return_sequences = True to stack LSTM layers so the next LSTM layer has a three-dimensional sequence input
     model.add(LSTM(units=50, return_sequences=True))
     # 50% of the layers will be dropped
     model.add(Dropout(0.5))
     # 4th LSTM layer
     # * units = add 50 neurons is the dimensionality of the output space
     model.add(LSTM(units=50))
     # 50% of the layers will be dropped
     model.add(Dropout(0.5))
     # Dense layer that specifies an output of one unit
     model.add(Dense(units=1))
     model.summary()
     tf.keras.utils.plot_model(model,
                               to_file=os.path.join(self.project_folder,
                                                    'model_lstm.png'),
                               show_shapes=True,
                               show_layer_names=True)
     return model
Ejemplo n.º 25
0
class AudioFeaturesModel:
    def __init__(self, model_name, le, layers):
        self.le = le
        self.model = Sequential(name=model_name)
        # Builds layers based on the structure in model_structures
        for layer in layers:
            self.model.add(layer)

    def compile(self):
        """Compile the model and print the structure"""
        self.model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer='adam')
        self.model.summary()

    def test_model(self, x_data, y_data):
        """Calculate the model's accuracy on the input dataset"""
        score = self.model.evaluate(x_data, y_data, verbose=0)
        accuracy = 100 * score[1]
        return accuracy

    def train_model(self, x_train, y_train, x_val, y_val):
        """Train and save the model"""
        early_stopping = EarlyStopping(monitor='val_loss', patience=sounds_config.patience, mode='min')
        checkpointer = ModelCheckpoint(filepath=f'{sounds_config.sounds_model_dir}/{self.model.name}.hdf5', verbose=1,
                                       save_best_only=True)
        history = self.model.fit(x_train, y_train, batch_size=sounds_config.num_batch_size,
                                 epochs=sounds_config.num_epochs, validation_data=(x_val, y_val),
                                 callbacks=[checkpointer, early_stopping], verbose=1)
        self.le.save(self.model.name)
        return history

    def calculate_confusion_matrix(self, x_test, y_test):
        """Calculate the probabilities required for the confusion matrix and create a dataframe"""
        y_pred = self.model.predict_classes(x_test)
        y_test = argmax(y_test, axis=1)
        con_mat = confusion_matrix(labels=y_test, predictions=y_pred).numpy()
        con_mat_norm = np.around(con_mat.astype('float') / con_mat.sum(axis=1)[:, np.newaxis], decimals=2)
        classes = self.le.inverse_transform(list(range(0, self.le.encoded_labels.shape[1])))
        return pd.DataFrame(con_mat_norm, index=classes, columns=classes)
Ejemplo n.º 26
0
def train_classifier():
    data = get_data()

    classifier = Sequential()
    classifier.add(
        Dense(100,
              activation=tf.nn.relu,
              input_shape=(FLAGS.sentence_embedding_size, )))
    for i in range(1 - 1):
        classifier.add(
            Dense(100,
                  activation='relu',
                  kernel_regularizer=tf.keras.regularizers.l2(0.3)))
        classifier.add(Dropout(0.5))
    classifier.add(Dense(2, activation='softmax'))
    classifier.compile(optimizer=Adagrad(0.01),
                       loss='categorical_crossentropy',
                       metrics=['accuracy',
                                Recall(),
                                Precision(), f1])

    classifier.summary()

    helper._print_header('Training classifier')

    classifier.fit(data['train'][0],
                   data['train'][1],
                   batch_size=FLAGS.classifier_batch_size,
                   validation_data=(data['val'][0], data['val'][1]),
                   epochs=200,
                   callbacks=[
                       EarlyStopping(monitor='val_accuracy',
                                     patience=25,
                                     min_delta=0.01),
                       SaveBestModelCallback()
                   ],
                   verbose=2)
Ejemplo n.º 27
0
def ramble(model: keras.Sequential, vectorizer: Vectorizer, seed='she'):
    current_token = vectorizer.word_to_index[seed]
    current_seq = [current_token]
    model.reset_states()
    model.summary()

    probs_arr = []

    # for t in range(config.max_seq_len):
    for t in range(5):
        # Format inputs.
        inputs = np.zeros((config.find('batch_size'), 1))
        assert inputs.shape[0] == 32, inputs.shape
        inputs[0, 0] = current_token

        # Forward pass.
        probs = model.predict_on_batch(inputs)[0]
        probs = probs.squeeze()
        probs_arr.append(probs)
        assert probs.shape == (vectorizer.vocab_size,
                               ), f'{probs.shape} != {vectorizer.vocab_size}'

        # Sample prediction.
        current_token = probs.argmax()
        current_seq.append(current_token)

    text_rant = vectorizer.sequences_to_docs([[current_seq]])
    assert len(text_rant) == 1, len(text_rant)

    # Some post-processing prettification.
    text_rant = util.lmap(str.strip, text_rant[0].split('.'))
    util.print_box('Rant', list(text_rant))

    sequences = beam_search_decoder(probs_arr, k=3)
    sequences = [thing[0] for thing in sequences]
    beam_rant = vectorizer.sequences_to_docs([sequences])
    print('beam rant:', beam_rant)
    def create_model(X_train):
        model = Sequential()
        #
        model.add(
            Conv2D(filters=16,
                   kernel_size=(3, 3),
                   padding='Same',
                   activation='relu',
                   input_shape=(X_train.shape[1], X_train.shape[2],
                                X_train.shape[3])))
        model.add(MaxPool2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))
        #
        model.add(
            Conv2D(filters=32,
                   kernel_size=(3, 3),
                   padding='Same',
                   activation='relu'))
        model.add(MaxPool2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))
        #
        model.add(Flatten())
        model.add(Dense(256, activation="relu"))
        model.add(Dropout(0.5))
        model.add(Dense(10, activation="softmax"))

        # Define the optimizer
        # optimizer = tf.compat.v1.train.AdamOptimizer(1e-3, epsilon=1e-4)
        optimizer = tf.keras.optimizers.Adam(lr=0.001,
                                             beta_1=0.9,
                                             beta_2=0.999)

        model.compile(optimizer=optimizer,
                      loss="categorical_crossentropy",
                      metrics=["accuracy"])
        model.summary()
        return model
        def create_model():
            """
            Define and return tensorflow model.
            """
            model = Sequential()
            model.add(
                Dense(256, activation=tf.nn.relu, input_shape=(vocab_size, )))
            model.add(Dropout(0.2))
            model.add(Dense(128, activation=tf.nn.relu))
            model.add(Dropout(0.2))
            model.add(Dense(num_labels, activation=tf.nn.softmax))
            """
            tried:
            loss functions =>  categorical_crossentropy, binary_crossentropy
            optimizers => adam, rmsprop
            """

            model.compile(loss='categorical_crossentropy',
                          optimizer='adam',
                          metrics=['accuracy'])

            model.summary()

            return model
Ejemplo n.º 30
0
class ImageFeaturesModel:
    def __init__(self, model_name, le, layers):
        self.le = le
        self.model = Sequential(name=model_name)

        for layer in layers:
            self.model.add(layer)

    def compile(self):
        self.model.compile(loss='categorical_crossentropy',
                           metrics=['accuracy'],
                           optimizer='adam')
        self.model.summary()

    def test_model(self, x_data, y_data):
        score = self.model.evaluate(x_data, y_data, verbose=0)
        accuracy = 100 * score[1]
        return accuracy

    def train_model(self, x_train, y_train, x_val, y_val):
        early_stop = EarlyStopping(monitor='val_loss',
                                   mode='min',
                                   verbose=1,
                                   patience=5)
        checkpointer = ModelCheckpoint(filepath=f'{self.model.name}.hdf5',
                                       verbose=1,
                                       save_best_only=True)
        history = self.model.fit(x_train,
                                 y_train,
                                 batch_size=config.num_batch_size,
                                 epochs=config.num_epochs,
                                 validation_data=(x_val, y_val),
                                 callbacks=[early_stop, checkpointer],
                                 verbose=1)
        self.le.save(self.model.name)
        return history