def my_cnn_network(width, dicom_path, masks_path, prag, train_dim, test_dim):
    x_train, y_train = train_the_network(dicom_path, masks_path)
    x_train = x_train.reshape(train_dim, width, width, 1)
    y_train = y_train.reshape(train_dim, width, width, 1)

    # arhitectura
    inp = Input((width, width, 1))
    l = Conv2D(32, (2, 2), padding='same')(inp)
    l = BatchNormalization()(l)
    l = Activation('relu')(l)
    l = AveragePooling2D((2, 2))(l)

    l = Conv2D(64, (3, 3), padding='same')(l)
    l = BatchNormalization()(l)
    l = Activation('relu')(l)
    l = AveragePooling2D((2, 2))(l)

    l = Conv2D(256, (3, 3), padding='same')(l)
    l = BatchNormalization()(l)
    l = Activation('relu')(l)
    l = AveragePooling2D((2, 2))(l)

    l = Conv2DTranspose(128, (3, 3), strides=2, padding='same')(l)
    l = BatchNormalization()(l)
    l = Activation('relu')(l)

    l = Conv2DTranspose(64, (3, 3), strides=2, padding='same')(l)
    l = BatchNormalization()(l)
    l = Activation('relu')(l)

    l = Conv2DTranspose(32, (3, 3), padding='same')(l)
    l = BatchNormalization()(l)
    l = Activation('relu')(l)

    l = Conv2DTranspose(32, (3, 3), strides=2, padding='same')(l)
    l = BatchNormalization()(l)
    l = Activation('relu')(l)

    decoded = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(l)
    l = BatchNormalization()(l)
    l = Activation('softmax')(l)

    MyModel = Model(inp, decoded)

    MyModel.summary()
    print()

    MyModel.compile(optimizer='adagrad',
                    loss=losses.mse,
                    metrics=[
                        'accuracy',
                        metrics.Recall(),
                        metrics.Precision(),
                        metrics.TruePositives(),
                        metrics.TrueNegatives(),
                        metrics.FalseNegatives(),
                        metrics.FalsePositives()
                    ])
    MyModel.fit(x_train, y_train, batch_size=32, epochs=1000)
    MyModel.save('model.h5')
コード例 #2
0
def neural_net(train, test):
    wandb.init(project="bachelor")
    keras_model = Sequential()
    keras_model.add(Dense(len(train[0][0]), activation="relu"))
    keras_model.add(Dense(50, activation="relu"))
    keras_model.add(Dense(1, activation="sigmoid"))

    keras_model.compile(loss="binary_crossentropy",
                        optimizer="adam",
                        metrics=[
                            'accuracy',
                            metrics.TruePositives(),
                            metrics.TrueNegatives(),
                            metrics.FalsePositives(),
                            metrics.FalseNegatives()
                        ])
    keras_model.fit(train[0],
                    train[1],
                    batch_size=3,
                    epochs=20,
                    callbacks=[WandbCallback()])
    loss, acc, tp, fp, tn, fn = keras_model.evaluate(
        test[0], test[1], callbacks=[WandbCallback()])

    p = tp / (tp + fp)
    r = tp / (tp + fn)

    print(f'f-score is: {(2 * p * r) / (p + r)}')
コード例 #3
0
 def test_weighted(self):
     fp_obj = metrics.FalsePositives()
     y_true = ((0, 1, 0, 1, 0), (0, 0, 1, 1, 1), (1, 1, 1, 1, 0), (0, 0, 0,
                                                                   0, 1))
     y_pred = ((0, 0, 1, 1, 0), (1, 1, 1, 1, 1), (0, 1, 0, 1, 0), (1, 1, 1,
                                                                   1, 1))
     sample_weight = (1., 1.5, 2., 2.5)
     result = fp_obj(y_true, y_pred, sample_weight=sample_weight)
     assert np.allclose(14., K.eval(result))
コード例 #4
0
    def test_unweighted_with_thresholds(self):
        fp_obj = metrics.FalsePositives(thresholds=[0.15, 0.5, 0.85])

        y_pred = ((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
                  (0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3))
        y_true = ((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0), (1, 1, 1, 1))

        result = fp_obj(y_true, y_pred)
        assert np.allclose([7., 4., 2.], K.eval(result))
コード例 #5
0
ファイル: grizzly.py プロジェクト: blacksite/blacksite
def define_model():
    hidden_nodes = int((DATASET.get_number_of_features() + DATASET.NUM_CLASSES) / 2)
    # create and fit the DNN network
    model = Sequential()
    model.add(Dense(hidden_nodes, input_dim=DATASET.get_number_of_features(), activation='relu'))
    model.add(Dense(1, activation='sigmoid'))
    model.compile(loss='binary_crossentropy', optimizer='adam',
                  metrics=["accuracy", km.TruePositives(), km.FalsePositives(), km.TrueNegatives(),
                           km.FalseNegatives()])
    return model
コード例 #6
0
    def test_unweighted(self):
        fp_obj = metrics.FalsePositives()

        y_true = ((0, 1, 0, 1, 0), (0, 0, 1, 1, 1), (1, 1, 1, 1, 0), (0, 0, 0,
                                                                      0, 1))
        y_pred = ((0, 0, 1, 1, 0), (1, 1, 1, 1, 1), (0, 1, 0, 1, 0), (1, 1, 1,
                                                                      1, 1))

        result = fp_obj(y_true, y_pred)
        assert np.allclose(7., K.eval(result))
コード例 #7
0
    def test_weighted_with_thresholds(self):
        fp_obj = metrics.FalsePositives(thresholds=[0.15, 0.5, 0.85])

        y_pred = ((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
                  (0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3))
        y_true = ((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0), (1, 1, 1, 1))
        sample_weight = ((1.0, 2.0, 3.0, 5.0), (7.0, 11.0, 13.0, 17.0),
                         (19.0, 23.0, 29.0, 31.0), (5.0, 15.0, 10.0, 0))

        result = fp_obj(y_true, y_pred, sample_weight=sample_weight)
        assert np.allclose([125., 42., 12.], K.eval(result))
コード例 #8
0
    def test_config(self):
        fp_obj = metrics.FalsePositives(name='my_fp', thresholds=[0.4, 0.9])
        assert fp_obj.name == 'my_fp'
        assert len(fp_obj.weights) == 1
        assert fp_obj.thresholds == [0.4, 0.9]

        # Check save and restore config
        fp_obj2 = metrics.FalsePositives.from_config(fp_obj.get_config())
        assert fp_obj2.name == 'my_fp'
        assert len(fp_obj2.weights) == 1
        assert fp_obj2.thresholds == [.4, 0.9]
コード例 #9
0
def initializeNN():

    from keras.models import Sequential
    from keras.layers import Dense
    from keras.layers import LeakyReLU
    from keras.layers import Dropout
    from keras import regularizers
    from keras import metrics
    #import tensorflow_addons as tfa

    ### Define metrics
    metrics = [
        metrics.CategoricalAccuracy(name="accuracy"),
        metrics.FalseNegatives(name="fn"),
        metrics.FalsePositives(name="fp"),
        metrics.TrueNegatives(name="tn"),
        metrics.TruePositives(name="tp"),
        metrics.Precision(name="precision"),
        metrics.Recall(name="recall"),
        metrics.AUC(name='auc')  #,
        #tfa.metrics.CohenKappa(name='kappa')
    ]

    # define the keras model
    nn = Sequential()
    nn.add(Dense(256, input_dim=102,
                 kernel_regularizer='l1'))  #, activation='relu'))
    nn.add(LeakyReLU(alpha=0.1))
    nn.add(Dropout(0.1))

    nn.add(Dense(128))  #, activation='relu'))#,kernel_regularizer='l1'))
    nn.add(LeakyReLU(alpha=0.1))
    nn.add(Dropout(0.1))

    nn.add(Dense(64))  #, activation='relu'))#,kernel_regularizer='l1'))
    nn.add(LeakyReLU(alpha=0.1))
    nn.add(Dropout(0.1))

    nn.add(Dense(64))  #, activation='relu'))#,kernel_regularizer='l1'))
    nn.add(LeakyReLU(alpha=0.1))
    nn.add(Dropout(0.1))

    nn.add(Dense(31, activation='softmax'))

    nn.compile(loss='categorical_crossentropy',
               optimizer='Adamax',
               metrics=metrics)

    return nn
def train_cnn_model(x_train, y_train):
  x_train = array(x_train)
  x_train = x_train.reshape((len(x_train), 3, int(len(x_train[0])/3), 1))

  y_train = array(y_train)

  #create model
  cnn_model = Sequential()
  cnn_model.add(Conv2D(64, 
                  kernel_size=3, 
                  activation='relu', 
                  input_shape=(3,21,1), 
                  padding='same'))
  cnn_model.add(layers.BatchNormalization(1))
  cnn_model.add(Conv2D(64, 
                  kernel_size=3, 
                  activation='relu', 
                  padding='same'))
  cnn_model.add(layers.BatchNormalization(1))
  cnn_model.add(MaxPooling2D(2,2))
  cnn_model.add(Flatten())
  cnn_model.add(Dense(512, activation = 'relu')) 
  cnn_model.add(Dense(1, activation='sigmoid'))

  # compile and fit
  cnn_model.compile(optimizer='Adam',
                loss='binary_crossentropy',
                metrics=['acc', 
                        metrics.AUC(), 
                        metrics.FalseNegatives(),
                        metrics.Recall(),
                        metrics.Precision(),
                        metrics.FalseNegatives(),
                        metrics.TrueNegatives(),
                        metrics.FalsePositives(),
                        metrics.TruePositives()])
  cnn_history = cnn_model.fit(x_train, y_train,
                      epochs=100,
                      batch_size=16,
                      validation_split=0.2,
                      callbacks=[callbacks.EarlyStopping(monitor='val_loss', patience=5),
                      callbacks.LearningRateScheduler(scheduler)])
    
  print("finish training cnn model")
  return cnn_model, cnn_history
コード例 #11
0
def all_experiment_model(hp):
    PRE_TRAINED_MODEL = Xception(input_shape=(IMG_HEIGHT, IMG_WIDTH, 3),
                                 include_top=False,
                                 pooling='avg',
                                 weights='imagenet')

    for layer in PRE_TRAINED_MODEL.layers:
        layer.trainable = False

    x = layers.Flatten()(PRE_TRAINED_MODEL.output)
    for i in range(
            hp.Int('number_of_dense_dropout_blocks', min_value=0,
                   max_value=2)):
        x = layers.Dense(hp.Int(f'dense_units_{i}',
                                min_value=1024,
                                max_value=4096,
                                step=512),
                         activation='relu')(x)
        x = layers.Dropout(
            hp.Float(f'dropout_probability_{i}',
                     min_value=0,
                     max_value=0.3,
                     step=0.05))(x)
    x = layers.Dense(hp.Int(f'penultimate_dense_unit',
                            min_value=1024,
                            max_value=4096,
                            step=512),
                     activation='relu')(x)
    x = layers.Dense(1, activation='sigmoid')(x)

    MODEL = Model(PRE_TRAINED_MODEL.input, x)

    chosen_loss = hp.Choice(
        'loss', values=['SigmoidFocalCrossEntropy', 'BinaryCrossentropy'])
    MODEL.compile(optimizer=hp.Choice('optimiser',
                                      values=['Adam', 'RMSprop', 'SGD']),
                  loss=eval(chosen_loss)(),
                  metrics=[
                      metrics.BinaryAccuracy(name='acc'),
                      metrics.AUC(name='auc'),
                      metrics.FalsePositives(name='fp')
                  ])
    return MODEL
コード例 #12
0
def optimiser_experiment_model(hp):
    PRE_TRAINED_MODEL = Xception(input_shape=(IMG_HEIGHT, IMG_WIDTH, 3),
                                 include_top=False,
                                 pooling='avg',
                                 weights='imagenet')

    for layer in PRE_TRAINED_MODEL.layers:
        layer.trainable = False

    x = layers.Flatten()(PRE_TRAINED_MODEL.output)
    x = layers.Dense(1, activation='sigmoid')(x)

    MODEL = Model(PRE_TRAINED_MODEL.input, x)

    MODEL.compile(optimizer=hp.Choice('optimiser',
                                      values=['Adam', 'RMSprop', 'SGD']),
                  loss='binary_crossentropy',
                  metrics=[
                      metrics.BinaryAccuracy(name='acc'),
                      metrics.AUC(name='auc'),
                      metrics.FalsePositives(name='fp')
                  ])
    return MODEL
def train_lstm_model(x_train, y_train):
  x_train = array(x_train)
  x_train = x_train.reshape((len(x_train), 1, len(x_train[0])))
  print("x_train.shape", x_train.shape)
  print(x_train[0])

  y_train = array(y_train)
  print("y_train.shape", y_train.shape)

  # imrpove log: use batch size 16 and add one more lstm layer

  lstm_model = Sequential()
  lstm_model.add(LSTM(16, 
                input_shape=(1, 63),
                return_sequences=True))
  lstm_model.add(LSTM(16, ))
  lstm_model.add(layers.Dense(1, activation='sigmoid'))
  lstm_model.compile(optimizer='rmsprop',
                loss='binary_crossentropy',
                metrics=['acc',
                        metrics.AUC(), 
                        metrics.FalseNegatives(),
                        metrics.Recall(),
                        metrics.Precision(),
                        metrics.FalseNegatives(),
                        metrics.TrueNegatives(),
                        metrics.FalsePositives(),
                        metrics.TruePositives()])
  lstm_history = lstm_model.fit(x_train, y_train,
                      epochs=100,
                      batch_size=16,
                      validation_split=0.2,
                      callbacks=[callbacks.EarlyStopping(monitor='val_loss', patience=5),
                      callbacks.LearningRateScheduler(scheduler)])
  print("finish training lstm model")
  return lstm_model, lstm_history
コード例 #14
0
def loss_experiment_model(hp):
    PRE_TRAINED_MODEL = Xception(input_shape=(IMG_HEIGHT, IMG_WIDTH, 3),
                                 include_top=False,
                                 pooling='avg',
                                 weights='imagenet')

    for layer in PRE_TRAINED_MODEL.layers:
        layer.trainable = False

    x = layers.Flatten()(PRE_TRAINED_MODEL.output)
    x = layers.Dense(1, activation='sigmoid')(x)

    MODEL = Model(PRE_TRAINED_MODEL.input, x)

    chosen_loss = hp.Choice(
        'loss', values=['SigmoidFocalCrossEntropy', 'BinaryCrossentropy'])
    MODEL.compile(optimizer='adam',
                  loss=eval(chosen_loss)(),
                  metrics=[
                      metrics.BinaryAccuracy(name='acc'),
                      metrics.AUC(name='auc'),
                      metrics.FalsePositives(name='fp')
                  ])
    return MODEL
コード例 #15
0
    def test_threshold_limit(self):
        with pytest.raises(Exception):
            metrics.FalsePositives(thresholds=[-1, 0.5, 2])

        with pytest.raises(Exception):
            metrics.FalsePositives(thresholds=[None])
コード例 #16
0
imerg=HDF5Matrix(path+'imerg_2019_200_3h.h5','data')
goes=HDF5Matrix(path+'nor_goes_2019_800_3h_re.h5','data')
#gfs=HDF5Matrix(path+'nor_gfs_2019_100_3h.h5','data')



print(imerg.shape, label.shape,goes.shape, flush=True)


model=UNet()
print(model.summary(),flush=True)
model = multi_gpu_model(model,gpus=2)

metrics = [
    metrics.FalseNegatives(name="fn"),
    metrics.FalsePositives(name="fp"),
    metrics.TrueNegatives(name="tn"),
    metrics.TruePositives(name="tp"),
    metrics.Precision(name="precision"),
    metrics.Recall(name="recall"),
]


model.compile(optimizer="Adam", loss='binary_crossentropy', metrics=metrics)

epochs=500
batch_size=16
earlystopper = EarlyStopping(patience=50,verbose=1, monitor='val_loss')
checkpointer = ModelCheckpoint('model_ck_mse_new.h5', save_best_only=True, verbose=1)

history=model.fit([goes,imerg], label, epochs=epochs, batch_size=batch_size,
コード例 #17
0
for layer in PRE_TRAINED_MODEL.layers:
    layer.trainable = False

x = layers.Flatten()(PRE_TRAINED_MODEL.output)  
x = layers.Dense(1024, activation = 'relu')(x) 
x = layers.Dropout(0.2)(x)      
x = layers.Dense(1, activation = 'sigmoid')(x)

MODEL = Model(PRE_TRAINED_MODEL.input, x) 

MODEL.compile(optimizer = RMSprop(lr = LEARNING_RATE), 
              loss = 'binary_crossentropy', 
              metrics = [
                         metrics.BinaryAccuracy(name = 'acc'),
                         metrics.AUC(name = 'auc'),
                         metrics.FalsePositives(name = 'fp')
                         ])

EARLY_STOPPING = EarlyStopping(monitor='fp', 
                               verbose=1,
                               patience=50,
                               mode='max',
                               restore_best_weights=True)

CLASS_WEIGHT = {0: 0.001694915254237288, 
                1: 0.00017733640716439085}
# CLASS_WEIGHT = {0: 9, 
#                 1: 1}
# CLASS_WEIGHT = {0: 1, 
#                 1: 1}
コード例 #18
0
model.add(Dense(units_11, activation=activation_11))
model.add(Dropout(dropout_11))

model.add(Dense(units_12, activation=activation_12))
model.add(Dropout(dropout_12))

model.add(Dense(nb_classes, activation='sigmoid'))

METRICS = [
            metrics.BinaryAccuracy(name='ACCURACY'),
            metrics.Precision(name='PRECISION'),
            metrics.Recall(name='RECALL'),
            metrics.AUC(name='AUC'),
            metrics.TruePositives(name='TP'),
            metrics.TrueNegatives(name='TN'),
            metrics.FalsePositives(name='FP'),
            metrics.FalseNegatives(name='FN')]

model.compile(loss='binary_crossentropy',
                optimizer=compile_optimizer,
                metrics=METRICS)

# GENERATORS
train_datagen = ImageDataGenerator(
    rescale=1. / 255,
    shear_range=0.2,
    zoom_range=0.2,
    horizontal_flip=True,
    validation_split=validation_split)

test_datagen = ImageDataGenerator(rescale=1. / 255)