def my_cnn_network(width, dicom_path, masks_path, prag, train_dim, test_dim):
    x_train, y_train = train_the_network(dicom_path, masks_path)
    x_train = x_train.reshape(train_dim, width, width, 1)
    y_train = y_train.reshape(train_dim, width, width, 1)

    # arhitectura
    inp = Input((width, width, 1))
    l = Conv2D(32, (2, 2), padding='same')(inp)
    l = BatchNormalization()(l)
    l = Activation('relu')(l)
    l = AveragePooling2D((2, 2))(l)

    l = Conv2D(64, (3, 3), padding='same')(l)
    l = BatchNormalization()(l)
    l = Activation('relu')(l)
    l = AveragePooling2D((2, 2))(l)

    l = Conv2D(256, (3, 3), padding='same')(l)
    l = BatchNormalization()(l)
    l = Activation('relu')(l)
    l = AveragePooling2D((2, 2))(l)

    l = Conv2DTranspose(128, (3, 3), strides=2, padding='same')(l)
    l = BatchNormalization()(l)
    l = Activation('relu')(l)

    l = Conv2DTranspose(64, (3, 3), strides=2, padding='same')(l)
    l = BatchNormalization()(l)
    l = Activation('relu')(l)

    l = Conv2DTranspose(32, (3, 3), padding='same')(l)
    l = BatchNormalization()(l)
    l = Activation('relu')(l)

    l = Conv2DTranspose(32, (3, 3), strides=2, padding='same')(l)
    l = BatchNormalization()(l)
    l = Activation('relu')(l)

    decoded = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(l)
    l = BatchNormalization()(l)
    l = Activation('softmax')(l)

    MyModel = Model(inp, decoded)

    MyModel.summary()
    print()

    MyModel.compile(optimizer='adagrad',
                    loss=losses.mse,
                    metrics=[
                        'accuracy',
                        metrics.Recall(),
                        metrics.Precision(),
                        metrics.TruePositives(),
                        metrics.TrueNegatives(),
                        metrics.FalseNegatives(),
                        metrics.FalsePositives()
                    ])
    MyModel.fit(x_train, y_train, batch_size=32, epochs=1000)
    MyModel.save('model.h5')
Exemple #2
0
def neural_net(train, test):
    wandb.init(project="bachelor")
    keras_model = Sequential()
    keras_model.add(Dense(len(train[0][0]), activation="relu"))
    keras_model.add(Dense(50, activation="relu"))
    keras_model.add(Dense(1, activation="sigmoid"))

    keras_model.compile(loss="binary_crossentropy",
                        optimizer="adam",
                        metrics=[
                            'accuracy',
                            metrics.TruePositives(),
                            metrics.TrueNegatives(),
                            metrics.FalsePositives(),
                            metrics.FalseNegatives()
                        ])
    keras_model.fit(train[0],
                    train[1],
                    batch_size=3,
                    epochs=20,
                    callbacks=[WandbCallback()])
    loss, acc, tp, fp, tn, fn = keras_model.evaluate(
        test[0], test[1], callbacks=[WandbCallback()])

    p = tp / (tp + fp)
    r = tp / (tp + fn)

    print(f'f-score is: {(2 * p * r) / (p + r)}')
Exemple #3
0
    def test_unweighted_with_thresholds(self):
        tn_obj = metrics.TrueNegatives(thresholds=[0.15, 0.5, 0.85])

        y_pred = ((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
                  (0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3))
        y_true = ((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0), (1, 1, 1, 1))

        result = tn_obj(y_true, y_pred)
        assert np.allclose([2., 5., 7.], K.eval(result))
Exemple #4
0
 def test_weighted(self):
     tn_obj = metrics.TrueNegatives()
     y_true = ((0, 1, 0, 1, 0), (0, 0, 1, 1, 1), (1, 1, 1, 1, 0), (0, 0, 0,
                                                                   0, 1))
     y_pred = ((0, 0, 1, 1, 0), (1, 1, 1, 1, 1), (0, 1, 0, 1, 0), (1, 1, 1,
                                                                   1, 1))
     sample_weight = (1., 1.5, 2., 2.5)
     result = tn_obj(y_true, y_pred, sample_weight=sample_weight)
     assert np.allclose(4., K.eval(result))
Exemple #5
0
    def test_weighted_with_thresholds(self):
        tn_obj = metrics.TrueNegatives(thresholds=[0.15, 0.5, 0.85])

        y_pred = ((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
                  (0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3))
        y_true = ((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0), (1, 1, 1, 1))
        sample_weight = ((0.0, 2.0, 3.0, 5.0), )

        result = tn_obj(y_true, y_pred, sample_weight=sample_weight)
        assert np.allclose([5., 15., 23.], K.eval(result))
Exemple #6
0
    def test_unweighted(self):
        tn_obj = metrics.TrueNegatives()

        y_true = ((0, 1, 0, 1, 0), (0, 0, 1, 1, 1), (1, 1, 1, 1, 0), (0, 0, 0,
                                                                      0, 1))
        y_pred = ((0, 0, 1, 1, 0), (1, 1, 1, 1, 1), (0, 1, 0, 1, 0), (1, 1, 1,
                                                                      1, 1))

        result = tn_obj(y_true, y_pred)
        assert np.allclose(3., K.eval(result))
Exemple #7
0
def define_model():
    hidden_nodes = int((DATASET.get_number_of_features() + DATASET.NUM_CLASSES) / 2)
    # create and fit the DNN network
    model = Sequential()
    model.add(Dense(hidden_nodes, input_dim=DATASET.get_number_of_features(), activation='relu'))
    model.add(Dense(1, activation='sigmoid'))
    model.compile(loss='binary_crossentropy', optimizer='adam',
                  metrics=["accuracy", km.TruePositives(), km.FalsePositives(), km.TrueNegatives(),
                           km.FalseNegatives()])
    return model
Exemple #8
0
    def test_config(self):
        tn_obj = metrics.TrueNegatives(name='my_tn', thresholds=[0.4, 0.9])
        assert tn_obj.name == 'my_tn'
        assert len(tn_obj.weights) == 1
        assert tn_obj.thresholds == [0.4, 0.9]

        # Check save and restore config
        tn_obj2 = metrics.TrueNegatives.from_config(tn_obj.get_config())
        assert tn_obj2.name == 'my_tn'
        assert len(tn_obj2.weights) == 1
        assert tn_obj2.thresholds == [0.4, 0.9]
Exemple #9
0
def initializeNN():

    from keras.models import Sequential
    from keras.layers import Dense
    from keras.layers import LeakyReLU
    from keras.layers import Dropout
    from keras import regularizers
    from keras import metrics
    #import tensorflow_addons as tfa

    ### Define metrics
    metrics = [
        metrics.CategoricalAccuracy(name="accuracy"),
        metrics.FalseNegatives(name="fn"),
        metrics.FalsePositives(name="fp"),
        metrics.TrueNegatives(name="tn"),
        metrics.TruePositives(name="tp"),
        metrics.Precision(name="precision"),
        metrics.Recall(name="recall"),
        metrics.AUC(name='auc')  #,
        #tfa.metrics.CohenKappa(name='kappa')
    ]

    # define the keras model
    nn = Sequential()
    nn.add(Dense(256, input_dim=102,
                 kernel_regularizer='l1'))  #, activation='relu'))
    nn.add(LeakyReLU(alpha=0.1))
    nn.add(Dropout(0.1))

    nn.add(Dense(128))  #, activation='relu'))#,kernel_regularizer='l1'))
    nn.add(LeakyReLU(alpha=0.1))
    nn.add(Dropout(0.1))

    nn.add(Dense(64))  #, activation='relu'))#,kernel_regularizer='l1'))
    nn.add(LeakyReLU(alpha=0.1))
    nn.add(Dropout(0.1))

    nn.add(Dense(64))  #, activation='relu'))#,kernel_regularizer='l1'))
    nn.add(LeakyReLU(alpha=0.1))
    nn.add(Dropout(0.1))

    nn.add(Dense(31, activation='softmax'))

    nn.compile(loss='categorical_crossentropy',
               optimizer='Adamax',
               metrics=metrics)

    return nn
def train_cnn_model(x_train, y_train):
  x_train = array(x_train)
  x_train = x_train.reshape((len(x_train), 3, int(len(x_train[0])/3), 1))

  y_train = array(y_train)

  #create model
  cnn_model = Sequential()
  cnn_model.add(Conv2D(64, 
                  kernel_size=3, 
                  activation='relu', 
                  input_shape=(3,21,1), 
                  padding='same'))
  cnn_model.add(layers.BatchNormalization(1))
  cnn_model.add(Conv2D(64, 
                  kernel_size=3, 
                  activation='relu', 
                  padding='same'))
  cnn_model.add(layers.BatchNormalization(1))
  cnn_model.add(MaxPooling2D(2,2))
  cnn_model.add(Flatten())
  cnn_model.add(Dense(512, activation = 'relu')) 
  cnn_model.add(Dense(1, activation='sigmoid'))

  # compile and fit
  cnn_model.compile(optimizer='Adam',
                loss='binary_crossentropy',
                metrics=['acc', 
                        metrics.AUC(), 
                        metrics.FalseNegatives(),
                        metrics.Recall(),
                        metrics.Precision(),
                        metrics.FalseNegatives(),
                        metrics.TrueNegatives(),
                        metrics.FalsePositives(),
                        metrics.TruePositives()])
  cnn_history = cnn_model.fit(x_train, y_train,
                      epochs=100,
                      batch_size=16,
                      validation_split=0.2,
                      callbacks=[callbacks.EarlyStopping(monitor='val_loss', patience=5),
                      callbacks.LearningRateScheduler(scheduler)])
    
  print("finish training cnn model")
  return cnn_model, cnn_history
def train_lstm_model(x_train, y_train):
  x_train = array(x_train)
  x_train = x_train.reshape((len(x_train), 1, len(x_train[0])))
  print("x_train.shape", x_train.shape)
  print(x_train[0])

  y_train = array(y_train)
  print("y_train.shape", y_train.shape)

  # imrpove log: use batch size 16 and add one more lstm layer

  lstm_model = Sequential()
  lstm_model.add(LSTM(16, 
                input_shape=(1, 63),
                return_sequences=True))
  lstm_model.add(LSTM(16, ))
  lstm_model.add(layers.Dense(1, activation='sigmoid'))
  lstm_model.compile(optimizer='rmsprop',
                loss='binary_crossentropy',
                metrics=['acc',
                        metrics.AUC(), 
                        metrics.FalseNegatives(),
                        metrics.Recall(),
                        metrics.Precision(),
                        metrics.FalseNegatives(),
                        metrics.TrueNegatives(),
                        metrics.FalsePositives(),
                        metrics.TruePositives()])
  lstm_history = lstm_model.fit(x_train, y_train,
                      epochs=100,
                      batch_size=16,
                      validation_split=0.2,
                      callbacks=[callbacks.EarlyStopping(monitor='val_loss', patience=5),
                      callbacks.LearningRateScheduler(scheduler)])
  print("finish training lstm model")
  return lstm_model, lstm_history
Exemple #12
0
model.add(Dense(units_11, activation=activation_11))
model.add(Dropout(dropout_11))

model.add(Dense(units_12, activation=activation_12))
model.add(Dropout(dropout_12))

model.add(Dense(nb_classes, activation='sigmoid'))

METRICS = [
            metrics.BinaryAccuracy(name='ACCURACY'),
            metrics.Precision(name='PRECISION'),
            metrics.Recall(name='RECALL'),
            metrics.AUC(name='AUC'),
            metrics.TruePositives(name='TP'),
            metrics.TrueNegatives(name='TN'),
            metrics.FalsePositives(name='FP'),
            metrics.FalseNegatives(name='FN')]

model.compile(loss='binary_crossentropy',
                optimizer=compile_optimizer,
                metrics=METRICS)

# GENERATORS
train_datagen = ImageDataGenerator(
    rescale=1. / 255,
    shear_range=0.2,
    zoom_range=0.2,
    horizontal_flip=True,
    validation_split=validation_split)
Exemple #13
0
goes=HDF5Matrix(path+'nor_goes_2019_800_3h_re.h5','data')
#gfs=HDF5Matrix(path+'nor_gfs_2019_100_3h.h5','data')



print(imerg.shape, label.shape,goes.shape, flush=True)


model=UNet()
print(model.summary(),flush=True)
model = multi_gpu_model(model,gpus=2)

metrics = [
    metrics.FalseNegatives(name="fn"),
    metrics.FalsePositives(name="fp"),
    metrics.TrueNegatives(name="tn"),
    metrics.TruePositives(name="tp"),
    metrics.Precision(name="precision"),
    metrics.Recall(name="recall"),
]


model.compile(optimizer="Adam", loss='binary_crossentropy', metrics=metrics)

epochs=500
batch_size=16
earlystopper = EarlyStopping(patience=50,verbose=1, monitor='val_loss')
checkpointer = ModelCheckpoint('model_ck_mse_new.h5', save_best_only=True, verbose=1)

history=model.fit([goes,imerg], label, epochs=epochs, batch_size=batch_size,
          validation_split=0.3, callbacks=[earlystopper,checkpointer], verbose=2) 
Exemple #14
0
import numpy as np
from keras import optimizers
from keras import metrics
from keras import losses
from keras.models import Sequential
from keras.layers import Dense, Dropout
from scipy.io.arff import loadarff
from sklearn import preprocessing
import matplotlib.pyplot as plt

METRICS = [
    metrics.FalsePositives(name='fp'),
    metrics.FalseNegatives(name='fn'),
    metrics.TruePositives(name='tp'),
    metrics.TrueNegatives(name='tn'),
    metrics.BinaryAccuracy(name='accuracy'),
]
mean_squared_error = losses.squared_hinge

KDDTrain, train_metadata = loadarff("KDDTrain+.arff")
KDDTest, test_metadata = loadarff("KDDTest+.arff")
training_nparray = np.asarray(KDDTrain.tolist())
testing_nparray = np.asarray(KDDTest.tolist())

enc = preprocessing.OrdinalEncoder()

encoded_dataset = enc.fit_transform(training_nparray)
X_train = encoded_dataset[:, :-1]
y_train = np.ravel(encoded_dataset[:, -1:])
encoded_dataset = enc.fit_transform(testing_nparray)
X_test = encoded_dataset[:, :-1]