コード例 #1
0
def gruModel(embeddingMatrix, maxDataLenght, embeddingVectorLength,
             numAttributes, numNeurons):
    model = Sequential()
    model.add(
        Embedding(input_dim=numAttributes,
                  output_dim=embeddingVectorLength,
                  weights=[embeddingMatrix],
                  input_length=maxDataLenght,
                  trainable=False))
    model.add(GRU(numNeurons, return_sequences=False))
    model.add(Dropout(0.2))
    model.add(
        Dense(1,
              activation='sigmoid',
              kernel_regularizer=regularizers.l2(0.001)))
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=[
                      'accuracy',
                      km.binary_f1_score(),
                      km.binary_precision(),
                      km.binary_recall()
                  ])

    return model
コード例 #2
0
def transferlearn_model(trainX, trainY, valX, valY, params):

    # initialize the training data augmentation object
    trainAug = ImageDataGenerator(rotation_range=15, fill_mode="nearest")

    # PRINT SUMMARY OF MODEL
    # baseModel.summary()
    # construct the head of the model that will be placed on top of the
    # the base model
    headModel = baseModel.output
    headModel = AveragePooling2D(pool_size=(4, 4))(headModel)
    headModel = Flatten(name="flatten")(headModel)
    headModel = Dense(64, activation="relu")(headModel)
    headModel = Dropout(params['dropout'])(headModel)
    headModel = Dense(2, activation="softmax")(headModel)

    # place the head FC model on top of the base model (this will become
    # the actual model we will train)
    model = Model(inputs=baseModel.input, outputs=headModel)

    # loop over all layers in the base model and freeze them so they will
    # not be updated during the first training process
    for layer in baseModel.layers:
        layer.trainable = False

    # compile our model
    print("[INFO] compiling model...")
    opt = params['optimizer'](lr=params['lr'],
                              decay=params['lr'] / params['epochs'])
    model.compile(loss="binary_crossentropy",
                  optimizer=opt,
                  metrics=["accuracy", km.binary_f1_score()])

    # train the head of the network
    print("[INFO] training head...")
    out = model.fit_generator(
        trainAug.flow(trainX, trainY, batch_size=params['batch_size']),
        steps_per_epoch=len(trainX) // params['batch_size'],
        validation_data=(valX, valY),
        validation_steps=len(valX) // params['batch_size'],
        epochs=params['epochs'],
        class_weight=d_class_weights,
        verbose=0)
    return out, model
コード例 #3
0
def Lstm(dataInput, maxDataLength):
    train, test = train_test_split(dataInput, test_size=0.2)
    xTrain, yTrain = list(zip(*train))
    xTest, yTest = list(zip(*test))

    yTrain = np.array(yTrain)
    yTest = np.array(yTest)

    xTrain = sequence.pad_sequences(xTrain, maxlen=maxDataLength)
    xTest = sequence.pad_sequences(xTest, maxlen=maxDataLength)

    embedding_vector_length = 32
    model = Sequential()
    model.add(
        Embedding(NUM_OF_ATTRIBUTES,
                  embedding_vector_length,
                  input_length=maxDataLength))
    model.add(Bidirectional(LSTM(NUM_OF_NEURONS, return_sequences=False)))
    model.add(Dropout(0.2))
    model.add(Dense(1, activation='sigmoid'))
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=[
                      'accuracy',
                      km.binary_f1_score(),
                      km.binary_precision(),
                      km.binary_recall()
                  ])
    model.fit(xTrain,
              yTrain,
              validation_data=(xTest, yTest),
              epochs=NUM_OF_EPOCHS,
              batch_size=512,
              verbose=0)
    scores = model.evaluate(xTest, yTest, verbose=0)
    for i in range(1, 5):
        print("%s : %.2f%%" % (model.metrics_names[i], scores[i] * 100))
    print("========================================================")
コード例 #4
0
def Build_Model_RNN_Text(word_index,
                         embedding_index,
                         number_of_classes,
                         MAX_SEQUENCE_LENGTH,
                         EMBEDDING_DIM,
                         sparse_categorical,
                         min_hidden_layer_rnn,
                         max_hidden_layer_rnn,
                         min_nodes_rnn,
                         max_nodes_rnn,
                         random_optimizor,
                         dropout,
                         use_cuda=True,
                         use_bidirectional=True,
                         _l2=0.01,
                         lr=1e-3):
    """
    def buildModel_RNN(word_index, embedding_index, number_of_classes, MAX_SEQUENCE_LENGTH, EMBEDDING_DIM, sparse_categorical):
    word_index in word index ,
    embedding_index is embeddings index, look at data_helper.py
    number_of_classes is number of classes,
    MAX_SEQUENCE_LENGTH is maximum lenght of text sequences
    """

    Recurrent = CuDNNGRU if use_cuda else GRU

    model = Sequential()
    values = list(range(min_nodes_rnn, max_nodes_rnn + 1))
    values_layer = list(range(min_hidden_layer_rnn - 1, max_hidden_layer_rnn))

    layer = random.choice(values_layer)
    print(layer)

    embedding_matrix = np.zeros((len(word_index) + 1, EMBEDDING_DIM))
    for word, i in word_index.items():
        embedding_vector = embedding_index.get(word)
        if embedding_vector is not None:
            # words not found in embedding index will be all-zeros.
            embedding_matrix[i] = embedding_vector
        else:
            embedding_matrix[i] = embedding_index['UNK']

    model.add(
        Embedding(len(word_index) + 1,
                  EMBEDDING_DIM,
                  weights=[embedding_matrix],
                  input_length=MAX_SEQUENCE_LENGTH,
                  trainable=True))

    gru_node = random.choice(values)
    print(gru_node)
    for i in range(0, layer):
        if use_bidirectional:
            model.add(
                Bidirectional(
                    Recurrent(gru_node,
                              return_sequences=True,
                              kernel_regularizer=l2(_l2))))
        else:
            model.add(
                Recurrent(gru_node,
                          return_sequences=True,
                          kernel_regularizer=l2(_l2)))
        model.add(Dropout(dropout))
    if use_bidirectional:
        model.add(
            Bidirectional(Recurrent(gru_node, kernel_regularizer=l2(_l2))))
    else:
        model.add(Recurrent(gru_node, kernel_regularizer=l2(_l2)))
    model.add(Dropout(dropout))
    model.add(Dense(256, activation='relu', kernel_regularizer=l2(_l2)))
    if number_of_classes == 2:
        model.add(Dense(1, activation='sigmoid', kernel_regularizer=l2(_l2)))
        model_tmp = model
        model.compile(loss='binary_crossentropy',
                      optimizer=optimizors(random_optimizor, lr),
                      metrics=[
                          'accuracy',
                          km.binary_precision(),
                          km.binary_recall(),
                          km.binary_f1_score(),
                          km.binary_true_positive(),
                          km.binary_true_negative(),
                          km.binary_false_positive(),
                          km.binary_false_negative()
                      ])
    else:
        model.add(
            Dense(number_of_classes,
                  activation='softmax',
                  kernel_regularizer=l2(_l2)))
        model_tmp = model
        if sparse_categorical:
            model.compile(loss='sparse_categorical_crossentropy',
                          optimizer=optimizors(random_optimizor, lr),
                          metrics=[
                              'accuracy',
                              km.sparse_categorical_precision(),
                              km.sparse_categorical_recall(),
                              km.sparse_categorical_f1_score(),
                              km.sparse_categorical_true_positive(),
                              km.sparse_categorical_true_negative(),
                              km.sparse_categorical_false_positive(),
                              km.sparse_categorical_false_negative()
                          ])
        else:
            model.compile(loss='categorical_crossentropy',
                          optimizer=optimizors(random_optimizor, lr),
                          metrics=[
                              'accuracy',
                              km.categorical_precision(),
                              km.categorical_recall(),
                              km.categorical_f1_score(),
                              km.categorical_true_positive(),
                              km.categorical_true_negative(),
                              km.categorical_false_positive(),
                              km.categorical_false_negative()
                          ])
    return model, model_tmp
コード例 #5
0
def Build_Model_RNN_Image(shape, number_of_classes, sparse_categorical,
                          min_nodes_rnn, max_nodes_rnn, random_optimizor,
                          dropout):
    """
        def Image_model_RNN(num_classes,shape):
        num_classes is number of classes,
        shape is (w,h,p)
    """
    values = list(range(min_nodes_rnn - 1, max_nodes_rnn))
    node = random.choice(values)

    x = Input(shape=shape)

    # Encodes a row of pixels using TimeDistributed Wrapper.
    encoded_rows = TimeDistributed(CuDNNLSTM(node,
                                             recurrent_dropout=dropout))(x)
    node = random.choice(values)
    # Encodes columns of encoded rows.
    encoded_columns = CuDNNLSTM(node, recurrent_dropout=dropout)(encoded_rows)

    # Final predictions and model.
    #prediction = Dense(256, activation='relu')(encoded_columns)
    if number_of_classes == 2:
        prediction = Dense(1, activation='sigmoid')(encoded_columns)
    else:
        prediction = Dense(number_of_classes,
                           activation='softmax')(encoded_columns)

    model = Model(x, prediction)
    model_tmp = model
    if number_of_classes == 2:
        model.compile(loss='binary_crossentropy',
                      optimizer=optimizors(random_optimizor),
                      metrics=[
                          'accuracy',
                          km.binary_precision(),
                          km.binary_recall(),
                          km.binary_f1_score(),
                          km.binary_true_positive(),
                          km.binary_true_negative(),
                          km.binary_false_positive(),
                          km.binary_false_negative()
                      ])
    else:
        if sparse_categorical:
            model.compile(loss='sparse_categorical_crossentropy',
                          optimizer=optimizors(random_optimizor),
                          metrics=[
                              'accuracy',
                              km.sparse_categorical_precision(),
                              km.sparse_categorical_recall(),
                              km.sparse_categorical_f1_score(),
                              km.sparse_categorical_true_positive(),
                              km.sparse_categorical_true_negative(),
                              km.sparse_categorical_false_positive(),
                              km.sparse_categorical_false_negative()
                          ])
        else:
            model.compile(loss='categorical_crossentropy',
                          optimizer=optimizors(random_optimizor),
                          metrics=[
                              'accuracy',
                              km.categorical_precision(),
                              km.categorical_recall(),
                              km.categorical_f1_score(),
                              km.categorical_true_positive(),
                              km.categorical_true_negative(),
                              km.categorical_false_positive(),
                              km.categorical_false_negative()
                          ])
    return model, model_tmp
コード例 #6
0
def Build_Model_CNN_Image(shape, number_of_classes, sparse_categorical,
                          min_hidden_layer_cnn, max_hidden_layer_cnn,
                          min_nodes_cnn, max_nodes_cnn, random_optimizor,
                          dropout):
    """""
    def Image_model_CNN(num_classes,shape):
    num_classes is number of classes,
    shape is (w,h,p)
    """ ""

    model = Sequential()
    values = list(range(min_nodes_cnn, max_nodes_cnn))
    Layers = list(range(min_hidden_layer_cnn, max_hidden_layer_cnn))
    Layer = random.choice(Layers)
    Filter = random.choice(values)
    model.add(Conv2D(Filter, (3, 3), padding='same', input_shape=shape))
    model.add(Activation('relu'))
    model.add(Conv2D(Filter, (3, 3)))
    model.add(Activation('relu'))

    for i in range(0, Layer):
        Filter = random.choice(values)
        model.add(Conv2D(Filter, (3, 3), padding='same'))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(dropout))

    model.add(Flatten())
    model.add(Dense(256, activation='relu'))
    model.add(Dropout(dropout))
    if number_of_classes == 2:
        model.add(Dense(1, activation='sigmoid', kernel_constraint=maxnorm(3)))
        model_tmp = model
        model.compile(loss='binary_crossentropy',
                      optimizer=optimizors(random_optimizor),
                      metrics=[
                          'accuracy',
                          km.binary_precision(),
                          km.binary_recall(),
                          km.binary_f1_score(),
                          km.binary_true_positive(),
                          km.binary_true_negative(),
                          km.binary_false_positive(),
                          km.binary_false_negative()
                      ])
    else:
        model.add(
            Dense(number_of_classes,
                  activation='softmax',
                  kernel_constraint=maxnorm(3)))
        model_tmp = model
        if sparse_categorical:
            model.compile(loss='sparse_categorical_crossentropy',
                          optimizer=optimizors(random_optimizor),
                          metrics=[
                              'accuracy',
                              km.sparse_categorical_precision(),
                              km.sparse_categorical_recall(),
                              km.sparse_categorical_f1_score(),
                              km.sparse_categorical_true_positive(),
                              km.sparse_categorical_true_negative(),
                              km.sparse_categorical_false_positive(),
                              km.sparse_categorical_false_negative()
                          ])
        else:
            model.compile(loss='categorical_crossentropy',
                          optimizer=optimizors(random_optimizor),
                          metrics=[
                              'accuracy',
                              km.categorical_precision(),
                              km.categorical_recall(),
                              km.categorical_f1_score(),
                              km.categorical_true_positive(),
                              km.categorical_true_negative(),
                              km.categorical_false_positive(),
                              km.categorical_false_negative()
                          ])
    return model, model_tmp
コード例 #7
0
def Build_Model_DNN_Text(shape,
                         number_of_classes,
                         sparse_categorical,
                         min_hidden_layer_dnn,
                         max_hidden_layer_dnn,
                         min_nodes_dnn,
                         max_nodes_dnn,
                         random_optimizor,
                         dropout,
                         _l2=0.01,
                         lr=1e-3):
    """
    buildModel_DNN_Tex(shape, number_of_classes,sparse_categorical)
    Build Deep neural networks Model for text classification
    Shape is input feature space
    number_of_classes is number of classes
    """
    model = Sequential()
    layer = list(range(min_hidden_layer_dnn, max_hidden_layer_dnn))
    node = list(range(min_nodes_dnn, max_nodes_dnn))

    Numberof_NOde = random.choice(node)
    nLayers = random.choice(layer)

    Numberof_NOde_old = Numberof_NOde
    model.add(
        Dense(Numberof_NOde,
              input_dim=shape,
              activation='relu',
              kernel_regularizer=l2(_l2)))
    model.add(Dropout(dropout))
    for i in range(0, nLayers):
        Numberof_NOde = random.choice(node)
        model.add(
            Dense(Numberof_NOde,
                  input_dim=Numberof_NOde_old,
                  activation='relu',
                  kernel_regularizer=l2(_l2)))
        model.add(Dropout(dropout))
        Numberof_NOde_old = Numberof_NOde
    if number_of_classes == 2:
        model.add(Dense(1, activation='sigmoid', kernel_regularizer=l2(_l2)))
        model_tmp = model
        model.compile(loss='binary_crossentropy',
                      optimizer=optimizors(random_optimizor, lr),
                      metrics=[
                          'accuracy',
                          km.binary_precision(),
                          km.binary_recall(),
                          km.binary_f1_score(),
                          km.binary_true_positive(),
                          km.binary_true_negative(),
                          km.binary_false_positive(),
                          km.binary_false_negative()
                      ])
    else:
        model.add(
            Dense(number_of_classes,
                  activation='softmax',
                  kernel_regularizer=l2(_l2)))
        model_tmp = model
        if sparse_categorical:
            model.compile(loss='sparse_categorical_crossentropy',
                          optimizer=optimizors(random_optimizor, lr),
                          metrics=[
                              'accuracy',
                              km.sparse_categorical_precision(),
                              km.sparse_categorical_recall(),
                              km.sparse_categorical_f1_score(),
                              km.sparse_categorical_true_positive(),
                              km.sparse_categorical_true_negative(),
                              km.sparse_categorical_false_positive(),
                              km.sparse_categorical_false_negative()
                          ])
        else:
            model.compile(loss='categorical_crossentropy',
                          optimizer=optimizors(random_optimizor, lr),
                          metrics=[
                              'accuracy',
                              km.categorical_precision(),
                              km.categorical_recall(),
                              km.categorical_f1_score(),
                              km.categorical_true_positive(),
                              km.categorical_true_negative(),
                              km.categorical_false_positive(),
                              km.categorical_false_negative()
                          ])
    return model, model_tmp
コード例 #8
0
callback = ModelSaveBestAvgAcc(filepath="model-{epoch:02d}-{avgacc:.2f}.hdf5",
                               verbose=True,
                               cond=filter_val_f1score)

losses = []
for i in range(0, 6):
    losses.append(binary_focal_loss(gamma=2.))

model = get_model(input_shape)
model.compile(optimizer=opt.Adam(lr=1e-4),
              loss=losses,
              metrics=[
                  'accuracy',
                  km.binary_precision(),
                  km.binary_recall(),
                  km.binary_f1_score()
              ])

model.summary()

model.fit_generator(gen_train,
                    steps_per_epoch=len(dataset_train.image_ids) // batch_size,
                    epochs=epochs,
                    validation_data=gen_val,
                    validation_steps=len(dataset_val.image_ids) // batch_size,
                    callbacks=[callback],
                    verbose=2)

print('fine')
コード例 #9
0
x = Flatten()(x)
x = Dense(1024, activation="relu")(x)
x = Dropout(0.5)(x)
x = layers.Dense(num_classes, activation='softmax')(x)
model_final = keras.models.Model(base_model.input, x)

model_final.summary()

# install keras_metrics to use f1 score metric
!pip install keras_metrics

import keras_metrics
opt = Adam(lr=0.000001)
#opt = Adam(lr=0.0000001)
#opt = optimizers.RMSprop(lr=0.000001)
model_final.compile(loss='categorical_crossentropy', optimizer = opt, metrics=['accuracy', keras_metrics.precision(), keras_metrics.binary_f1_score(), keras_metrics.recall()])

model_final.summary()

"""### balancer les donnees"""

import numpy as np
y_train_new = np.argmax(y_train, axis=1)

from sklearn.utils import class_weight
from keras.callbacks import EarlyStopping
y_train_new = np.argmax(y_train, axis=1)
#Deal with unbalanced Data
class_weights = class_weight.compute_class_weight('balanced', np.unique(y_train_new),y_train_new)
class_weights = dict(enumerate(class_weights))
class_weights
コード例 #10
0
    def __init__(self, inputs, buffer, sess_id, sess, **kwargs):
        self.util = Utility()
        self.sess = sess
        self.sess_id = sess_id

        game = inputs['game']
        agnt = inputs['agent']
        sess = agnt['session']
        eps = sess['episode']
        mod = inputs['model']
        trn = mod['training']
        sv = mod['save']
        mem = inputs['memory']
        '''---Environment Paramters---'''
        self.env_name = game['name']
        self.fps = game['fps']
        self.mode = game['difficulty']
        self.target = game['target']
        self.tick = game['tick']
        '''---Episode Parameters---'''
        self.nb_episodes = sess['max_ep']
        self.nb_max_episode_steps = game['fps'] * 60 * eps['max_time']
        self.nb_steps = self.nb_max_episode_steps * self.nb_episodes
        self.nb_steps_warmup = trn['warmup']
        self.nb_max_start_steps = trn['max_ep_observe']
        self.max_start_steps = trn['warmup']
        self.keep_gif_score = eps['keep_gif_score']
        '''---Agent / Model Parameters---'''
        self.name = agnt['name']
        self.nb_actions = agnt['action_size']
        self.delta_clip = agnt['delta_clip']

        self.training = trn['training']
        self.verbose = trn['verbose']
        self.lr = trn['learn_rate']
        self.eps = trn['initial_epsilon']
        self.value_max = trn['initial_epsilon']
        self.value_min = trn['terminal_epsilon']
        self.anneal = trn['anneal']
        self.shuffle = trn['shuffle']
        self.train_interval = trn['interval']
        self.validate = trn['validate']
        self.split = trn['split']
        self.action_repetition = trn['action_repetition']
        self.epochs = trn['epochs']
        self.epoch = 1

        prec = km.binary_precision()
        re = km.binary_recall()
        f1 = km.binary_f1_score()
        self.metrics = ['accuracy', 'mse', prec, re, f1]
        self.H = mod['filter_size']
        self.alpha = mod['alpha']
        self.gamma = mod['gamma']
        self.momentum = mod['momentum']
        self.decay = mod['decay']
        self.target_model_update = mod['target_update']
        self.type = mod['type']
        self.enable_double_dqn = mod['double_dqn']
        self.enable_dueling_network = mod['dueling_network']
        self.dueling_type = mod['dueling_type']

        self.limit = mem['limit']
        self.batch_size = mem['batch_size']
        self.window_length = mem['state_size']
        self.memory_interval = mem['interval']

        self.ftype = sv['ftype']

        self.vizualize = sv['visualize']
        self.save_full = sv['save_full']
        self.save_weights = sv['save_weights']
        self.save_json = sv['save_json']
        self.save_plot = sv['save_plot']
        self.save_interval = sv['save_n']
        self.log_interval = sv['log_n']
        self.saves = sv['save_path']
        self.save_path = self.util.get_save_dir_struct(self.saves,
                                                       self.env_name)
        self.logs = sv['log_path']
        self.util.display_status('Hyperparameters Successfully Loaded')
        '''Reference/Excerpt:  keras-rl DQN Atari Example
        https://github.com/keras-rl/keras-rl/blob/master/examples/dqn_atari.py
        # Select a policy. 
        # We use eps-greedy action selection, which means that a random action
        # is selected with probability eps. We anneal eps from init to term over 
        # the course of (anneal) steps. This is done so that the agent initially 
        # explores the environment (high eps) and then gradually sticks to 
        # what it knows (low eps). We also set a dedicated eps value that is 
        # used during testing. Note that we set it to 0.05 so that the agent 
        # still performs some random actions. 
        # This ensures that the agent cannot get stuck.
        # '''
        self.custom_model_objects = {
            'S': self.window_length,
            'A': self.nb_actions,
            'H': self.H,
            'lr': self.lr,
            'name': self.name,
            'batch_size': self.batch_size,
            'sess': self.sess,
            #dueling_network=self.enable_dueling_network,
            #dueling_type=self.dueling_type,
        }

        with tf.device(gpu):
            self.policy = LinearAnnealedPolicy(
                inner_policy=EpsGreedyQPolicy(eps=self.value_max),
                attr='eps',
                value_max=self.value_max,
                value_min=self.value_min,
                value_test=self.alpha,
                nb_steps=self.anneal)
            self.test_policy = GreedyQPolicy()

            if mod['optimizer'].lower() == 'adamax':
                self.optimizer = Adamax(lr=self.lr)
            elif mod['optimizer'].lower() == 'adadelta':
                self.optimizer = Adadelta()
            elif mod['optimizer'].lower() == 'rmsprop':
                self.optimizer = RMSprop()
            elif mod['optimizer'].lower() == 'sgd':
                self.optimizer = SGD(
                    lr=self.lr,
                    momentum=self.momentum,
                    decay=self.decay,
                )
            else:
                self.optimizer = Adam(lr=self.lr)

        self.memory = buffer

        self.log_path = self.util.get_log_dir_struct(self.sess_id, self.logs,
                                                     self.ftype)

        self.util.display_status('Keras GPU Session {} Beginning'.format(
            self.sess_id))

        nn = NeuralNet(
            S=self.window_length,
            A=self.nb_actions,
            H=self.H,
            lr=self.lr,
            name=self.name,
            batch_size=self.batch_size,
            dueling_network=self.enable_dueling_network,
            dueling_type=self.dueling_type,
            sess=self.sess,
        )
        with tf.device(gpu):
            self.model = nn.get_model()

        self.util.display_status(
            '{} Keras Agent with {} Optimizer Built'.format(
                self.name, mod['optimizer']))
        '''---Compile the model with chosen optimizer
        loss is calculated with lamba function based on model
        type selections (dueling, or double dqn)'''
        with tf.device(gpu):
            self.compile(
                optimizer=self.optimizer,
                metrics=self.metrics,
            )

        self.util.display_status(
            '{} Agent Fully Initialized with Compiled Model'.format(self.name))

        super(BetaFlapDQN, self).__init__(
            model=self.model,
            nb_actions=self.nb_actions,
            memory=self.memory,
            policy=self.policy,
            test_policy=self.test_policy,
            enable_double_dqn=self.enable_double_dqn,
            enable_dueling_network=self.enable_dueling_network,
            dueling_type=self.dueling_type,
            **kwargs)
コード例 #11
0
callback = ModelSaveBestAvgAcc(
    filepath="model-{epoch:02d}-{avgacc:.2f}.hdf5",
    verbose=True, cond=filter_val_f1score
)

losses = []
for i in range(0, 1):
    losses.append(binary_focal_loss(gamma=2.))

model = get_model(input_shape)
model.compile(
    optimizer=opt.Adam(lr=1e-4),
    loss=losses,
    metrics=['accuracy', km.binary_precision(),
             km.binary_recall(), km.binary_f1_score()]
)

model.summary()

model.fit_generator(
    gen_train,
    steps_per_epoch=len(dataset_train.image_ids) // batch_size,
    epochs=epochs,
    validation_data=gen_val,
    validation_steps=len(dataset_val.image_ids) // batch_size,
    callbacks=[callback],
    verbose=1
)

print('fine')
コード例 #12
0
def crossValidation1(dataInput, maxDataLength):
    seed = 7
    np.random.seed(seed)

    # split data and label
    X, Y = list(zip(*dataInput))
    Y = np.array(Y)

    # set k value for cross validation
    split = 10
    kfold = StratifiedKFold(n_splits=split, shuffle=True, random_state=seed)
    cvscores = []
    for i in range(0, 5):
        cvscores.append([])

    # cross validation process
    for train, test in kfold.split(X, Y):
        # build model
        model = Sequential()
        model.add(
            Embedding(input_dim=NUM_OF_ATTRIBUTES,
                      output_dim=32,
                      input_length=maxDataLength))
        model.add(Bidirectional(LSTM(NUM_OF_NEURONS, return_sequences=False)))
        model.add(Dropout(0.2))
        model.add(
            Dense(1,
                  activation='sigmoid',
                  kernel_regularizer=regularizers.l2(0.001)))
        model.compile(loss='binary_crossentropy',
                      optimizer='adam',
                      metrics=[
                          'accuracy',
                          km.binary_f1_score(),
                          km.binary_precision(),
                          km.binary_recall()
                      ])
        # print(model.summary())
        model.fit(X[train],
                  Y[train],
                  validation_data=(X[test], Y[test]),
                  epochs=NUM_OF_EPOCHS,
                  batch_size=256,
                  verbose=0)

        # evaluate the model
        # 1 = accuracy
        # 2 = f1_score
        # 3 = precission
        # 4 = recall
        # model = load_model('model.weights.best.hdf5', custom_objects={'f1_m':cm.f1_m, 'precision_m':cm.precision_m, 'recall_m':cm.recall_m})
        scores = model.evaluate(X[train], Y[train], verbose=0)
        for i in range(1, 5):
            logFile += "%s : %.2f%%\n" % (model.metrics_names[i],
                                          scores[i] * 100)
            print("%s : %.2f%%" % (model.metrics_names[i], scores[i] * 100))
            cvscores[i].append(scores[i] * 100)
        print("========================================================")

    print("Mean : ")
    for i in range(1, 5):
        metricName = ''
        if i == 1:
            metricName = 'acc'
        elif i == 2:
            metricName = 'f1_score'
        elif i == 3:
            metricName = 'precision'
        elif i == 4:
            metricName = 'recall'
        print("%s : %.2f%% (+/- %.2f%%)" %
              (metricName, np.mean(cvscores[i]), np.std(cvscores[i])))

    # show result to matplotlib
    plt.style.use('classic')
    fig, axs = plt.subplots(2, 2, gridspec_kw={'hspace': 0.5, 'wspace': 0.5})

    iteration = [*range(1, 11)]
    fig.suptitle('Result Data')
    (ax1, ax2), (ax3, ax4) = axs

    ax1.plot(iteration, cvscores[1])
    ax1.set_ylabel('Accuracy (%)')

    ax2.plot(iteration, cvscores[2], 'tab:green')
    ax2.set_ylabel('F1 Measurement (%)')

    ax3.plot(iteration, cvscores[3], 'tab:orange')
    ax3.set_ylabel('Precission (%)')

    ax4.plot(iteration, cvscores[4], 'tab:red')
    ax4.set_ylabel('Recall (%)')

    plt.show()
コード例 #13
0
gen_train = prepare(dataset_train, epochs, batch_size,
                    os.path.join(coco_path, 'train_output'))

callback = ModelSaveBestAvgAcc(filepath="model-{epoch:02d}-{acc:.2f}.hdf5",
                               verbose=True)

losses = []
for i in range(0, dataset_val.num_classes):
    losses.append(binary_focal_loss(gamma=2., alpha=0.9995))
for i in range(0, 3):
    losses.append(binary_focal_loss(gamma=2., alpha=0.8))

#  model = unet(input_shape=input_shape)
model = get_model(input_shape, dataset_val.num_classes)
model.compile(optimizer=opt.Adam(lr=1e-4),
              loss=losses,
              metrics=['accuracy', km.binary_f1_score()])

model.summary()
#  import ipdb; ipdb.set_trace()

model.fit_generator(gen_train,
                    steps_per_epoch=len(dataset_train.image_ids) // batch_size,
                    epochs=epochs,
                    validation_data=gen_val,
                    validation_steps=len(dataset_val.image_ids) // batch_size,
                    callbacks=[callback],
                    verbose=1)

print('fine')
コード例 #14
0
if define_inception:
    model = TransferLearning(input_shape=(75, 75, 3),
                             output=2,
                             model='inception',
                             fineTune=define_finetune,
                             tuneLayers=define_tuneLayers)

# Definir el optimizador:
adam = Adam()

# Compilar el modelo:
model.compile(loss='binary_crossentropy',
              optimizer=adam,
              metrics=[
                  'binary_accuracy',
                  km.binary_f1_score(label=1),
                  km.binary_precision(label=1),
                  km.binary_recall(label=1)
              ])

callback_list = [LearningRateScheduler(step_decay)]

# Entrenar:
history = model.fit(X_train,
                    y_train_ohe,
                    batch_size=BATCH,
                    epochs=EPOCHS,
                    class_weight=class_weights,
                    callbacks=callback_list,
                    validation_data=(X_val, y_val_ohe),
                    verbose=2)
コード例 #15
0
def Build_Model_CNN_Text(word_index,
                         embedding_index,
                         number_of_classes,
                         MAX_SEQUENCE_LENGTH,
                         EMBEDDING_DIM,
                         sparse_categorical,
                         min_hidden_layer_cnn,
                         max_hidden_layer_cnn,
                         min_nodes_cnn,
                         max_nodes_cnn,
                         random_optimizor,
                         dropout,
                         simple_model=False,
                         _l2=0.01,
                         lr=1e-3):
    """
        def buildModel_CNN(word_index,embedding_index,number_of_classes,MAX_SEQUENCE_LENGTH,EMBEDDING_DIM,Complexity=0):
        word_index in word index ,
        embedding_index is embeddings index, look at data_helper.py
        number_of_classes is number of classes,
        MAX_SEQUENCE_LENGTH is maximum lenght of text sequences,
        EMBEDDING_DIM is an int value for dimention of word embedding look at data_helper.py
        Complexity we have two different CNN model as follows
        F=0 is simple CNN with [1 5] hidden layer
        Complexity=2 is more complex model of CNN with filter_length of range [1 10]
    """

    model = Sequential()
    if simple_model:
        embedding_matrix = np.zeros((len(word_index) + 1, EMBEDDING_DIM))
        for word, i in word_index.items():
            embedding_vector = embedding_index.get(word)
            if embedding_vector is not None:
                # words not found in embedding index will be all-zeros.
                embedding_matrix[i] = embedding_vector
            else:
                embedding_matrix[i] = embedding_index['UNK']
        model.add(
            Embedding(len(word_index) + 1,
                      EMBEDDING_DIM,
                      weights=[embedding_matrix],
                      input_length=MAX_SEQUENCE_LENGTH,
                      trainable=True))
        values = list(range(min_nodes_cnn, max_nodes_cnn))
        Layer = list(range(min_hidden_layer_cnn, max_hidden_layer_cnn))
        Layer = random.choice(Layer)
        for i in range(0, Layer):
            Filter = random.choice(values)
            model.add(
                Conv1D(Filter,
                       5,
                       activation='relu',
                       kernel_regularizer=l2(_l2)))
            model.add(Dropout(dropout))
            model.add(MaxPooling1D(5))

        model.add(Flatten())
        Filter = random.choice(values)
        model.add(Dense(Filter, activation='relu', kernel_regularizer=l2(_l2)))
        model.add(Dropout(dropout))
        Filter = random.choice(values)
        model.add(Dense(Filter, activation='relu', kernel_regularizer=l2(_l2)))
        model.add(Dropout(dropout))
        if number_of_classes == 2:
            model.add(
                Dense(1, activation='sigmoid', kernel_regularizer=l2(_l2)))
            model_tmp = model
            model.compile(loss='binary_crossentropy',
                          optimizer=optimizors(random_optimizor, lr),
                          metrics=[
                              'accuracy',
                              km.binary_precision(),
                              km.binary_recall(),
                              km.binary_f1_score(),
                              km.binary_true_positive(),
                              km.binary_true_negative(),
                              km.binary_false_positive(),
                              km.binary_false_negative()
                          ])
        else:
            model.add(
                Dense(number_of_classes,
                      activation='softmax',
                      kernel_regularizer=l2(_l2)))
            model_tmp = model
            if sparse_categorical:
                model.compile(loss='sparse_categorical_crossentropy',
                              optimizer=optimizors(random_optimizor, lr),
                              metrics=[
                                  'accuracy',
                                  km.sparse_categorical_precision(),
                                  km.sparse_categorical_recall(),
                                  km.sparse_categorical_f1_score(),
                                  km.sparse_categorical_true_positive(),
                                  km.sparse_categorical_true_negative(),
                                  km.sparse_categorical_false_positive(),
                                  km.sparse_categorical_false_negative()
                              ])
            else:
                model.compile(loss='categorical_crossentropy',
                              optimizer=optimizors(random_optimizor, lr),
                              metrics=[
                                  'accuracy',
                                  km.categorical_precision(),
                                  km.categorical_recall(),
                                  km.categorical_f1_score(),
                                  km.categorical_true_positive(),
                                  km.categorical_true_negative(),
                                  km.categorical_false_positive(),
                                  km.categorical_false_negative()
                              ])
    else:
        embedding_matrix = np.zeros((len(word_index) + 1, EMBEDDING_DIM))
        for word, i in word_index.items():
            embedding_vector = embedding_index.get(word)
            if embedding_vector is not None:
                # words not found in embedding index will be all-zeros.
                embedding_matrix[i] = embedding_vector
            else:
                embedding_matrix[i] = embedding_index['UNK']
        embedding_layer = Embedding(len(word_index) + 1,
                                    EMBEDDING_DIM,
                                    weights=[embedding_matrix],
                                    input_length=MAX_SEQUENCE_LENGTH,
                                    trainable=True)

        # applying a more complex convolutional approach
        convs = []
        values_layer = list(range(min_hidden_layer_cnn, max_hidden_layer_cnn))
        filter_sizes = []
        layer = random.choice(values_layer)
        print("Filter  ", layer)
        for fl in range(0, layer):
            filter_sizes.append((fl + 2))

        values_node = list(range(min_nodes_cnn, max_nodes_cnn))
        node = random.choice(values_node)
        print("Node  ", node)
        sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH, ), dtype='int32')
        embedded_sequences = embedding_layer(sequence_input)

        for fsz in filter_sizes:
            l_conv = Conv1D(node, kernel_size=fsz,
                            activation='relu')(embedded_sequences)
            l_pool = MaxPooling1D(5)(l_conv)
            #l_pool = Dropout(0.25)(l_pool)
            convs.append(l_pool)

        l_merge = Concatenate(axis=1)(convs)
        l_cov1 = Conv1D(node, 5, activation='relu')(l_merge)
        l_cov1 = Dropout(dropout)(l_cov1)
        l_pool1 = MaxPooling1D(5)(l_cov1)
        l_cov2 = Conv1D(node, 5, activation='relu')(l_pool1)
        l_cov2 = Dropout(dropout)(l_cov2)
        l_pool2 = MaxPooling1D(30)(l_cov2)
        l_flat = Flatten()(l_pool2)
        l_dense = Dense(1024, activation='relu')(l_flat)
        l_dense = Dropout(dropout)(l_dense)
        l_dense = Dense(512, activation='relu')(l_dense)
        l_dense = Dropout(dropout)(l_dense)
        if number_of_classes == 2:
            preds = Dense(1, activation='sigmoid')(l_dense)
        else:
            preds = Dense(number_of_classes, activation='softmax')(l_dense)
        model = Model(sequence_input, preds)
        model_tmp = model
        if number_of_classes == 2:
            model.compile(loss='binary_crossentropy',
                          optimizer=optimizors(random_optimizor, lr),
                          metrics=[
                              'accuracy',
                              km.binary_precision(),
                              km.binary_recall(),
                              km.binary_f1_score(),
                              km.binary_true_positive(),
                              km.binary_true_negative(),
                              km.binary_false_positive(),
                              km.binary_false_negative()
                          ])
        else:
            if sparse_categorical:
                model.compile(loss='sparse_categorical_crossentropy',
                              optimizer=optimizors(random_optimizor, lr),
                              metrics=[
                                  'accuracy',
                                  km.sparse_categorical_precision(),
                                  km.sparse_categorical_recall(),
                                  km.sparse_categorical_f1_score(),
                                  km.sparse_categorical_true_positive(),
                                  km.sparse_categorical_true_negative(),
                                  km.sparse_categorical_false_positive(),
                                  km.sparse_categorical_false_negative()
                              ])
            else:
                model.compile(loss='categorical_crossentropy',
                              optimizer=optimizors(random_optimizor, lr),
                              metrics=[
                                  'accuracy',
                                  km.categorical_precision(),
                                  km.categorical_recall(),
                                  km.categorical_f1_score(),
                                  km.categorical_true_positive(),
                                  km.categorical_true_negative(),
                                  km.categorical_false_positive(),
                                  km.categorical_false_negative()
                              ])
    return model, model_tmp
コード例 #16
0
    ### ENTRENAMIENTO ###

    BATCH = 64
    EPOCHS = 100

    # Definir el modelo:
    model = GlandsDetector(input_shape=(32,32,6), output=2)

    # Definir el optimizador:
    adam = Adam()
    
    # Compilar el modelo:
    model.compile(
        loss='binary_crossentropy',
        optimizer=adam,
        metrics=['binary_accuracy', km.binary_f1_score(label=1), 
            km.binary_precision(label=1), km.binary_recall(label=1)]
    )

    callback_list = [LearningRateScheduler(step_decay)]

    # Entrenar:
    history = model.fit(
        X_train, 
        y_train_ohe, 
        batch_size=BATCH, 
        epochs=EPOCHS, 
        validation_data=(X_val, y_val_ohe),
        class_weight=class_weights,
        callbacks=callback_list,
        verbose=2
コード例 #17
0
def Build_Model_DNN_Image(shape, number_of_classes, sparse_categorical,
                          min_hidden_layer_dnn, max_hidden_layer_dnn,
                          min_nodes_dnn, max_nodes_dnn, random_optimizor,
                          dropout):
    '''
    buildModel_DNN_image(shape, number_of_classes,sparse_categorical)
    Build Deep neural networks Model for text classification
    Shape is input feature space
    number_of_classes is number of classes
    '''

    model = Sequential()
    values = list(range(min_nodes_dnn, max_nodes_dnn))
    Numberof_NOde = random.choice(values)
    Lvalues = list(range(min_hidden_layer_dnn, max_hidden_layer_dnn))
    nLayers = random.choice(Lvalues)
    print(shape)
    model.add(Flatten(input_shape=shape))
    model.add(Dense(Numberof_NOde, activation='relu'))
    model.add(Dropout(dropout))
    for i in range(0, nLayers - 1):
        Numberof_NOde = random.choice(values)
        model.add(Dense(Numberof_NOde, activation='relu'))
        model.add(Dropout(dropout))
    if number_of_classes == 2:
        model.add(Dense(1, activation='sigmoid'))
        model_tmp = model
        model.compile(loss='binary_crossentropy',
                      optimizer=optimizors(random_optimizor),
                      metrics=[
                          'accuracy',
                          km.binary_precision(),
                          km.binary_recall(),
                          km.binary_f1_score(),
                          km.binary_true_positive(),
                          km.binary_true_negative(),
                          km.binary_false_positive(),
                          km.binary_false_negative()
                      ])
    else:
        model.add(Dense(number_of_classes, activation='softmax'))
        model_tmp = model
        if sparse_categorical:
            model.compile(loss='sparse_categorical_crossentropy',
                          optimizer=optimizors(random_optimizor),
                          metrics=[
                              'accuracy',
                              km.sparse_categorical_precision(),
                              km.sparse_categorical_recall(),
                              km.sparse_categorical_f1_score(),
                              km.sparse_categorical_true_positive(),
                              km.sparse_categorical_true_negative(),
                              km.sparse_categorical_false_positive(),
                              km.sparse_categorical_false_negative()
                          ])
        else:
            model.compile(loss='categorical_crossentropy',
                          optimizer=optimizors(random_optimizor),
                          metrics=[
                              'accuracy',
                              km.categorical_precision(),
                              km.categorical_recall(),
                              km.categorical_f1_score(),
                              km.categorical_true_positive(),
                              km.categorical_true_negative(),
                              km.categorical_false_positive(),
                              km.categorical_false_negative()
                          ])
    return model, model_tmp
コード例 #18
0
ファイル: demo.py プロジェクト: vietnvri/trigger-word
def loss(y_true, y_pred):
    print(y_true.shape, y_pred.shape)
    weight = np.ones_like(y_true)
    weight = weight * 10
    return K.mean(
        K.mean(-y_true * K.log(y_pred + 0.000001) -
               (1 - y_true + 0.000001) * K.log(1 - y_pred),
               axis=1))


model = load_model(PRETRAIN_PATH,
                   custom_objects={
                       'loss': loss,
                       'binary_precision': km.binary_precision(),
                       'binary_recall': km.binary_recall(),
                       'binary_f1_score': km.binary_f1_score()
                   })

# model.summary()


def predict(x):
    x = x.swapaxes(0, 1)
    x = np.expand_dims(x, axis=0)
    preds = model.predict(x)
    return preds.reshape(-1)


def check_keyword(preds, chunk_duration, feed_duration, threshold=0.5):
    preds = preds > threshold
    chunk_sample = int(len(preds) * chunk_duration / feed_duration)