예제 #1
0
def test_BFL():
    output = 40*(torch.randint(0,2,(1,1,32,32,32))-0.5)
    target = torch.zeros_like(output)
    target[output>0] = 1
    # target = torch.randint(0,2,(1,1,32,32,32))
    criterion = BinaryFocalLoss()   
    loss = criterion(output,target)
    print(loss.item())
예제 #2
0
def test_train_dummy_binary_classifier():
    """Train a simple model to make sure that BinaryFocalLoss works."""
    # Data/model parameters
    n_examples = 100
    n_features = 16
    epochs = 3
    random_state = np.random.RandomState(0)

    # Generate some fake data
    x = random_state.binomial(n=1, p=0.5, size=(n_examples, n_features))
    x = 2.0 * x.astype(np.float32) - 1.0
    weights = 100 * np.ones(shape=(n_features, 1)).astype(np.float32)
    y = (x.dot(weights) > 0).astype(np.int8)

    # Number of positive and negative examples
    n_pos = y.sum()
    n_neg = n_examples - n_pos

    for pos_weight in (None, (n_neg / n_pos)):
        for gamma, label_smoothing in product((0, 2), (None, 0.1)):
            for from_logits in (True, False):
                if from_logits:
                    activation = None
                else:
                    activation = 'sigmoid'
                # Just a linear classifier (without bias term)
                model = tf.keras.Sequential(layers=[
                    tf.keras.layers.Input(shape=n_features),
                    tf.keras.layers.Dense(units=1, use_bias=False,
                                          activation=activation),
                ])
                model.compile(
                    optimizer='sgd',
                    loss=BinaryFocalLoss(gamma=gamma, pos_weight=pos_weight,
                                         from_logits=from_logits,
                                         label_smoothing=label_smoothing),
                    metrics=['accuracy'],
                )
                stop_on_nan = tf.keras.callbacks.TerminateOnNaN()
                history = model.fit(x, y, batch_size=n_examples, epochs=epochs,
                                    callbacks=[stop_on_nan])
                history = history.history

                # Check that we didn't stop early: if we did then we
                # encountered NaNs during training, and that shouldn't happen
                assert len(history['loss']) == epochs

                # Check that BinaryFocalLoss and binary_focal_loss agree (at
                # least when averaged)
                model_loss, *_ = model.evaluate(x, y)

                y_pred = model.predict(x)
                loss = binary_focal_loss(y_true=y, y_pred=y_pred,
                                         gamma=gamma, pos_weight=pos_weight,
                                         from_logits=from_logits,
                                         label_smoothing=label_smoothing)
                loss = tf.math.reduce_mean(loss)
                tf.debugging.assert_near(loss, model_loss)
예제 #3
0
 def _test_reduce_to_keras_loss(self, y_true, y_pred, from_logits: bool,
                                label_smoothing: Optional[float]):
     """Focal loss with gamma=0 should be the same as cross-entropy."""
     y_pred = tf.dtypes.cast(y_pred, dtype=tf.dtypes.float32)
     keras_loss = tf.keras.losses.BinaryCrossentropy(
         from_logits=from_logits,
         label_smoothing=(0
                          if label_smoothing is None else label_smoothing),
     )
     focal_loss = BinaryFocalLoss(gamma=0,
                                  from_logits=from_logits,
                                  label_smoothing=label_smoothing)
     self.assertAllClose(keras_loss(y_true, y_pred),
                         focal_loss(y_true, y_pred))
예제 #4
0
    def __init__(self, backbone=ResNetBackbone()):
        self.backbone = backbone
        self.input = self.backbone.input

        last_layer = _create_pyramid_features(
            backbone_layers=self.backbone.backbone_layers, feature_size=256)
        regression_model = default_regression_model(pyramid_feature_size=256,
                                                    feature_size=256,
                                                    output_size=1)
        output = regression_model(last_layer)
        self.model = keras.models.Model(inputs=self.input, outputs=output)

        adam = keras.optimizers.Adam(learning_rate=5e-4)
        self.model.compile(optimizer=adam,
                           loss=BinaryFocalLoss(gamma=2.0),
                           metrics=[])
예제 #5
0
파일: anchor_network.py 프로젝트: anzue/CW
def build_anchor_model(inp_shape, anchor_counts):
    inp = Input(shape=inp_shape)
    headModel = Conv2D(32, (3, 3), activation="relu", padding='same')(inp)
    headModel = Conv2D(32, (3, 3), activation="relu",
                       padding='same')(headModel)
    headModel = MaxPooling2D((2, 2))(headModel)

    headModel = Conv2D(64, (3, 3), activation="relu",
                       padding='same')(headModel)
    headModel = Conv2D(64, (3, 3), activation="relu",
                       padding='same')(headModel)
    headModel = MaxPooling2D((2, 2))(headModel)

    headModel = Conv2D(128, (3, 3), activation="relu",
                       padding='same')(headModel)
    headModel = Conv2D(128, (3, 3), activation="relu",
                       padding='same')(headModel)
    headModel = MaxPooling2D((2, 2))(headModel)

    headModel = Conv2D(256, (3, 3), activation="relu",
                       padding='same')(headModel)
    headModel = Conv2D(256, (3, 3), activation="relu",
                       padding='same')(headModel)
    headModel = MaxPooling2D((2, 2))(headModel)

    headModel = Conv2D(512, (3, 3), activation="relu",
                       padding='same')(headModel)
    headModel = Conv2D(512, (3, 3), activation="relu",
                       padding='same')(headModel)

    headModel = MaxPooling2D((2, 2))(headModel)

    headModel = Flatten(name="flatten")(headModel)
    headModel = Dropout(0.2)(headModel)
    headModel = Dense(4 * 1024, activation="relu")(headModel)
    headModel = Dropout(0.2)(headModel)
    headModel = Dense(anchor_counts[0] * anchor_counts[1],
                      activation="sigmoid")(headModel)
    headModel = Reshape((anchor_counts[0], anchor_counts[1]))(headModel)
    model = Model(inputs=inp, outputs=headModel)
    model.compile(
        # loss="binary_crossentropy",
        loss=BinaryFocalLoss(gamma=3),  # ,  # "mean_absolute_error",
        optimizer=Adam(lr=0.001),
        metrics=["accuracy"],
    )
    return model
예제 #6
0
def test_save_and_restore():
    """Check whether models compiled with BinaryFocalLoss can be saved/loaded.
    """
    n_features = 10
    for gamma, pos_weight in product((0, 1, 2), (None, 0.5)):
        for from_logits, label_smoothing in product((True, False), (None, 0.1)):
            if from_logits:
                activation = None
            else:
                activation = 'sigmoid'
            # Just a linear classifier
            model = tf.keras.Sequential(layers=[
                tf.keras.layers.Input(shape=n_features),
                tf.keras.layers.Dense(units=1, activation=activation),
            ])
            model.compile(
                optimizer='sgd',
                loss=BinaryFocalLoss(gamma=gamma, pos_weight=pos_weight,
                                     from_logits=from_logits,
                                     label_smoothing=label_smoothing),
                metrics=['accuracy'],
            )
            weights = model.weights

            # Try to save the model to the HDF5 format
            h5_filepath = 'model.h5'
            model.save(h5_filepath, save_format='h5')

            h5_restored_model = tf.keras.models.load_model(h5_filepath)
            h5_restored_weights = h5_restored_model.weights
            for weight, h5_restored_weight in zip(weights, h5_restored_weights):
                tf.debugging.assert_equal(weight, h5_restored_weight)

            # Delete the created HDF5 file
            os.unlink(h5_filepath)

            # Try to save the model to the SavedModel format
            sm_filepath = 'model'
            model.save(sm_filepath, save_format='tf')

            sm_restored_model = tf.keras.models.load_model(sm_filepath)
            sm_restored_weights = sm_restored_model.weights
            for weight, sm_restored_weight in zip(weights, sm_restored_weights):
                tf.debugging.assert_equal(weight, sm_restored_weight)

            # Delete the created SavedModel directory
            shutil.rmtree(sm_filepath, ignore_errors=True)
예제 #7
0
 def test_get_config(self, gamma, pos_weight, from_logits, label_smoothing):
     """Check the get_config() method."""
     loss1 = BinaryFocalLoss(gamma=gamma,
                             pos_weight=pos_weight,
                             from_logits=from_logits,
                             label_smoothing=label_smoothing,
                             name='binary_focal_loss')
     config1 = loss1.get_config()
     loss2 = BinaryFocalLoss(**config1)
     config2 = loss2.get_config()
     self.assertEqual(config1, config2)
예제 #8
0
def test_get_config():
    """Check the get_config() method."""
    for gamma, pos_weight in product((0, 1, 2), (None, 0.5)):
        for from_logits, label_smoothing in product((True, False), (None, 0.1)):
            loss1 = BinaryFocalLoss(gamma=gamma, pos_weight=pos_weight,
                                    from_logits=from_logits,
                                    label_smoothing=label_smoothing,
                                    name='binary_focal_loss')
            config1 = loss1.get_config()
            loss2 = BinaryFocalLoss(**config1)
            config2 = loss2.get_config()
            assert config1 == config2
예제 #9
0
def get_dummy_binary_classifier(n_features, gamma, pos_weight, label_smoothing,
                                from_logits):
    activation = None if from_logits else 'sigmoid'

    # Just a linear classifier (without bias term)
    model = tf.keras.Sequential(layers=[
        tf.keras.layers.Input(shape=n_features),
        tf.keras.layers.Dense(units=1, use_bias=False, activation=activation),
    ])
    model.compile(
        optimizer='sgd',
        loss=BinaryFocalLoss(gamma=gamma,
                             pos_weight=pos_weight,
                             from_logits=from_logits,
                             label_smoothing=label_smoothing),
        metrics=['accuracy'],
    )

    return model
예제 #10
0
from parser import parser
from tensorflow.keras import backend as K

args = parser.parse_args()
tol = args.tol
save_weights = args.save_weights
HEIGHT = args.HEIGHT
WIDTH = args.WIDTH
BATCH_SIZE = args.batch_size
FRAME_STACK = args.frame_stack
pre_trained = args.pre_trained

optimizer = keras.optimizers.Adadelta(lr=args.lr)
if not pre_trained:
    model = ResNet_Track(input_shape=(FRAME_STACK, HEIGHT, WIDTH))
    model.compile(loss=BinaryFocalLoss(gamma=2),
                  optimizer=optimizer,
                  metrics=[keras.metrics.BinaryAccuracy()])
else:
    model = ResNet_Track(input_shape=(FRAME_STACK, HEIGHT, WIDTH))
    model.load_weights(args.load_weights)
    model.compile(loss=BinaryFocalLoss(gamma=2),
                  optimizer=optimizer,
                  metrics=[keras.metrics.BinaryAccuracy()])

print('Beginning training......')
match_path = args.match_folder
match_list = [
    os.sep.join([os.getcwd(), match_path, match])
    for match in os.listdir(match_path)
]
예제 #11
0
#FOCAL LOSS AND DICE METRIC
#Focal loss helps focus more on tough to segment classes.
from focal_loss import BinaryFocalLoss

###############################################################################

#Try various models: Unet, Attention_UNet, and Attention_ResUnet
#Rename original python file from 224_225_226_models.py to models.py
from models import Attention_ResUNet, UNet, Attention_UNet, dice_coef, dice_coef_loss, jacard_coef
'''
UNet
'''
unet_model = UNet(input_shape)
unet_model.compile(optimizer=Adam(lr=1e-2),
                   loss=BinaryFocalLoss(gamma=2),
                   metrics=['accuracy', jacard_coef])

print(unet_model.summary())

start1 = datetime.now()
unet_history = unet_model.fit(X_train,
                              y_train,
                              verbose=1,
                              batch_size=batch_size,
                              validation_data=(X_test, y_test),
                              shuffle=False,
                              epochs=50)

stop1 = datetime.now()
#Execution time of the model
예제 #12
0
train_data = tf.data.Dataset.from_tensor_slices((X_train, y_train))
train_data = train_data.shuffle(1000).batch(64)

test_data = tf.data.Dataset.from_tensor_slices((X_test, y_test))
test_data = test_data.batch(64)

model = ks.Sequential([
    L.Flatten(),
    L.Dense(32, activation="relu"),
    L.Dense(1, activation="sigmoid")
])

model.compile(
    # ks.optimizers.SGD(1e-3, momentum=0.9),
    ks.optimizers.Adam(0.5e-3),
    loss=BinaryFocalLoss(3),
    # loss=focal_loss(),
    # loss='categorical_crossentropy',
    metrics=["accuracy",
             ks.metrics.Precision(),
             ks.metrics.Recall()],
)

model.fit_generator(
    generator=train_data,
    # steps_per_epoch=steps_per_epoch,
    epochs=100,
    # validation_data=(X_test, y_test)
    validation_data=test_data,
)
dense.add(tf.keras.layers.BatchNormalization(axis=1))
dense.add(tf.keras.layers.ReLU())
dense.add(tf.keras.layers.Dropout(rate=0.5))

dense.add(tf.keras.layers.Dense(units=128, activation='linear'))
dense.add(tf.keras.layers.BatchNormalization(axis=1))
dense.add(tf.keras.layers.ReLU())
dense.add(tf.keras.layers.Dropout(rate=0.5))

encoded_left = dense(shot_1)
encoded_right = dense(shot_2)

combine = tf.keras.layers.concatenate([encoded_left, encoded_right], axis = 1)
#flat = tf.keras.layers.Flatten()(combine) #output size (None, 1536)
d1 = tf.keras.layers.Dropout(rate=0.5)(combine)
d2 = tf.keras.layers.Dense(50, activation='relu')(d1)
d2r = tf.keras.layers.Dropout(rate=0.5)(d2)
d3 = tf.keras.layers.Dense(50, activation='relu')(d2r)
d3r = tf.keras.layers.Dropout(rate=0.5)(d3)
output = tf.keras.layers.Dense(1, activation = 'softmax')(d3r)

model = tf.keras.Model(inputs = inputs, outputs = output)
opt = tf.keras.optimizers.Adam(learning_rate = 0.01)
model.compile(optimizer = opt, loss = BinaryFocalLoss(pos_weight = 9, gamma=2.5), metrics=[tf.keras.metrics.Accuracy()])
model.summary()

tf.keras.utils.plot_model(model, to_file='Dense_FL128.png', dpi=100)

training_log = 'crossval_fold_' + '.txt'
csv_logger = tf.keras.callbacks.CSVLogger(training_log, append = True, separator=' ')
metrics = model.fit(X, Y_gt, epochs=100, validation_split= 0.2, verbose=2, batch_size = 128)
예제 #14
0
#d2r = tf.keras.layers.Dropout(rate=0.3)(d2)
d2d = tf.keras.layers.BatchNormalization(axis=1)(d2)

d3 = tf.keras.layers.Dense(10,
                           activation='relu',
                           kernel_regularizer='l1',
                           bias_regularizer='l1')(d2d)
#d3r = tf.keras.layers.Dropout(rate=0.3)(d3)
d3d = tf.keras.layers.BatchNormalization(axis=1)(d3)

output = tf.keras.layers.Dense(2, activation='softmax')(d3d)

model = tf.keras.Model(inputs=inputs, outputs=output)
opt = tf.keras.optimizers.Adam(learning_rate=0.001)
model.compile(optimizer=opt,
              loss=BinaryFocalLoss(pos_weight=9, gamma=2.5),
              metrics=[tf.keras.metrics.Accuracy()
                       ])  #BinaryFocalLoss(pos_weight=7, gamma=4)

model.summary()
image_out = directory + '/' + 'softmax_final_1.png'
print(image_out)
tf.keras.utils.plot_model(model, to_file=image_out, dpi=100)

training_log = directory + '/' + 'softmax_final_1' + '.txt'
print(training_log)
csv_logger = tf.keras.callbacks.CSVLogger(training_log,
                                          append=True,
                                          separator=' ')
metrics = model.fit(X1,
                    tf.one_hot(Y1, depth=2),
예제 #15
0
def predict(X, Y, flag, batch_size=32, epochs=200):
    """ Predicts labels for X given and compares predictions to ground truth Y
      Arguments: (X,Y) data and corresponding labels, 
                    X dim = (number of samples, number of timesteps, number of features = 75)
                    Y dim = (number of samples, number of timesteps, number of labels = 8)
                  flag: model to use
                  batch_size : should be set to 1 if only 1 sample
                  repeats: the averaged scores are computed over #number of repeats since stochastic approach
      Prints scores and downloads prediction
  """

    ##RESHAPE GROUND TRUTH Y
    y_te = reshape(Y, (Y.shape[0] * Y.shape[1], Y.shape[2]))

    if flag == "LSTM":
        ## LOAD MODEL
        loaded_model = load_model('Results/opt_LSTM_model')
        print("Loaded model from disk")
        loaded_model.compile(loss=BinaryFocalLoss(2),
                             optimizer='adam',
                             metrics=[
                                 BinaryAccuracy(),
                                 Precision(),
                                 Recall(),
                                 FalseNegatives(),
                                 FalsePositives()
                             ])
        #predict and reshape predictions
        y_pred = loaded_model.predict(X, batch_size=batch_size)
        y_pred = reshape(y_pred, (y_pred.shape[0] * y_pred.shape[1], 8))
        #build-in predictions
        loss, accuracy, P, R, FN, FP = loaded_model.evaluate(
            X, Y, batch_size=batch_size)

    if flag == "TCN":
        loaded_model = load_model('Results/opt_TCN_model')
        print("Loaded model from disk")
        loaded_model.compile(optimizer='adam',
                             loss=BinaryFocalLoss(5),
                             metrics=[
                                 BinaryAccuracy(),
                                 Precision(),
                                 Recall(),
                                 FalseNegatives(),
                                 FalsePositives()
                             ])

        #predict and reshape predictions
        y_pred = loaded_model.predict(X, batch_size=batch_size)
        y_pred = reshape(y_pred, (y_pred.shape[0] * y_pred.shape[1], 8))
        #build-in predictions
        loss, accuracy, P, R, FN, FP = loaded_model.evaluate(
            X, Y, batch_size=batch_size)

    if flag == "Random_Forest":

        ##Load MODEL
        loaded_rf = joblib.load("Results/opt_random_forest_model.joblib")

        #feature expansion
        X_expanded = feature_expansion(X)

        #time window flattening
        X_expanded, Y = time_window_sample(X_expanded, Y, 120)

        #reshape into one video
        X_reshaped = np.reshape(
            X_expanded,
            (X_expanded.shape[0] * X_expanded.shape[1], X_expanded.shape[2]))
        y_te = np.reshape(Y, (Y.shape[0] * Y.shape[1], Y.shape[2]))

        ##prediction
        y_pred = loaded_rf.predict(X_reshaped)

    #customed predictions
    wf1, mf1, pf1, F1_tab, Ptab, Rtab, acc_tab = custom_scoring(y_te, y_pred)
    Tab = perf_measure(y_te, y_pred)

    print("F1 score per label: ", F1_tab)
    print("Precision per label: ", Ptab)
    print("Recall per label: ", Rtab)
    print("Macro F1 score: ", mf1, " ; Weighted F1 score: ", wf1,
          " ; Proportional F1 score: ", pf1)
    for name, tab in zip([
            "True Positive ", "True Negative ", "False Positive ",
            "False Negatives "
    ], Tab):
        for i, t in enumerate(tab):
            print(name, "number for label ", i, " :", t)

    y_pred = y_pred > 0.5
    tf.cast(y_pred, tf.int32)
    print("Prediction will be saved into Results/")
    name = 'Results/' + flag + '_Annotation.csv'
    pd.DataFrame(tf.get_static_value(y_pred),
                 columns=[
                     "arch", "burrow + arch", "drag + arch", "groom",
                     "tap + arch", "egg", "proboscis", "idle"
                 ]).to_csv(name)
예제 #16
0
def evaluate_model_TCN(x_tr,
                       y_tr,
                       x_te,
                       y_te,
                       gamma=2,
                       epochs=200,
                       verbose=0,
                       plot=0,
                       single_run=0):
    """Training function, to evaluate train set against test set or train set againts validation set
        Arguments: (x_tr, y_tr) training data
                   (x_te, y_te) testing/validation data
                   gamma: focal loss parameter
                   epochs= number of times, it passes through the training dataset.
                   verbose: if true, print all the metrics
                   plot: if true, print built in plot
                   single_run: fix random seed to ensure reproducibility
                   
          Returns: loss: Last binary focal loss value on test/validation set
                  accuracy: accuracy on test/validation set 
                  wf1: weighted F1 score
                  wf1_: custom weighted F1 score (with proportional weights)
                  mf1: macro F1 score 
                  F1_tab: F1 score per label 
                  Ptab: precision per label 
                  Rtab: recall per label"""

    batch_size = 1
    w = class_weights(y_tr)
    clear_session()

    if single_run:
        np.random.seed(123)
        python_random.seed(123)
        tf.random.set_seed(1234)

    #-------------------------------------model definition---------------------------#
    #Creation TCN object
    Tcn = TCN(nb_filters=64,
              kernel_size=2,
              nb_stacks=1,
              dilations=(8, 16, 32, 64, 128, 256, 512, 1024),
              return_sequences=True,
              activation=LeakyReLU(0.01),
              kernel_initializer='he_normal')

    i = Input(batch_shape=(1, None, x_tr.shape[2]))
    o = Tcn(i)
    o = Dense(200,
              activation=LeakyReLU(0.01),
              kernel_regularizer=keras.regularizers.l1_l2(0.00001))(o)
    o = Dense(8, activation='sigmoid')(o)

    model = Model(inputs=[i], outputs=[o])
    model.compile(optimizer='adam',
                  loss=BinaryFocalLoss(gamma),
                  metrics=[
                      BinaryAccuracy(),
                      Precision(),
                      Recall(),
                      FalseNegatives(),
                      FalsePositives()
                  ])

    model.summary()

    #---------------------fit network---------------------------------------------#
    hist = model.fit(x_tr,
                     y_tr,
                     epochs=epochs,
                     batch_size=batch_size,
                     verbose=0,
                     validation_data=(x_te, y_te))
    #---------------------------------evaluate model----------------------------------------------#

    #evaluate model on test set (over all classes)
    loss, accuracy, P, R, FN, FP = model.evaluate(x_te,
                                                  y_te,
                                                  batch_size=batch_size,
                                                  verbose=verbose)

    #save model
    if single_run:
        model.save('Results/opt_TCN_model')
        print("Model saved to Results")

    y_pred = model.predict(x_te, batch_size=batch_size, verbose=0)
    y_pred[y_pred < 0.5] = 0.
    y_pred[y_pred > 0.5] = 1.

    y_pred = reshape(y_pred, (y_pred.shape[0] * y_pred.shape[1], 8))
    y_te = reshape(y_te, (y_te.shape[0] * y_te.shape[1], 8))

    #evaluate F1 score for each label
    F1_tab, Ptab, Rtab, wf1_ = F1_score(y_te, y_pred, w)

    #evaluate accuracy per label
    acc_tab = Acc(y_te, y_pred)
    print("-> F1 score per label: ", F1_tab)
    print("-> y_pred ", y_pred[:, 4])

    #test f1 score built in
    f = F1Score(8, threshold=0.5, average='weighted')
    f.update_state(y_te, y_pred)
    wf1 = f.result().numpy()
    print("weighted F1 score built in: ", wf1)
    f.reset_states()
    f = F1Score(8, threshold=0.5, average='macro')
    f.update_state(y_te, y_pred)
    mf1 = f.result().numpy()
    print("macro F1 score built in: ", mf1)
    f.reset_states()

    #-----------------------------------------print---------------------------------------------#

    #print all
    if verbose:
        print(" -> Accuracy: ", accuracy, "; Mean of labelwise accuracy: ",
              np.mean(acc_tab))
        print("Per label accuracy: ", acc_tab)
        print("-> Weighted F1 score: ", wf1_)
        print("-> F1 score per label: ", F1_tab)
        print("-> Precision: ", P, "; Recall: ", R)
        print("-> Precision per label: ", Ptab)
        print("-> Recall per label: ", Rtab)
        print("-> Loss: ", loss)

    if plot:
        plot_history(hist.history)

    return hist, loss, accuracy, wf1, wf1_, mf1, F1_tab, Ptab, Rtab
예제 #17
0
def evaluate_model(x_tr,
                   y_tr,
                   x_te,
                   y_te,
                   model_type=1,
                   gamma=2,
                   nodes_nb=600,
                   drop=0.1,
                   epochs=200,
                   reg=0,
                   verbose=0,
                   plot=0,
                   single_run=0):
    """Training function, to evaluate train set against test set or train set againts validation set
        Arguments: (x_tr, y_tr) training data
                   (x_te, y_te) testing/validation data
                   model_type: type of model to train/evaluate (on LSTM layers)
                              0: 1 LSTM layer model
                              1: 1 bidirectional LSTM layer model
                              2: 2 bidirectional LSTM layer model
                   gamma: focal loss parameter
                   nodes_nb: number of neurons in the LSTM layers
                   drop: dropout value
                   verbose: if true, print all the metrics
                   plot: if true, print built in plot
                   single_run: fix random seed to ensure reproducibility
          Returns: loss: Last binary focal loss value on test/validation set
                  accuracy: accuracy on test/validation set 
                  wf1: weighted F1 score
                  wf1_: custom weighted F1 score (with proportional weights)
                  mf1: macro F1 score 
                  F1_tab: F1 score per label 
                  Ptab: precision per label 
                  Rtab: recall per label"""

    batch_size = 32
    n_features, n_outputs = x_tr.shape[2], y_tr.shape[2]
    w = class_weights(y_tr)
    clear_session()

    if single_run:
        np.random.seed(2020)
        python_random.seed(2020)
        tf.random.set_seed(2020)

    #-------------------------------------model definition-------------------------------------#
    model = Sequential()

    #model types
    if model_type == 0:
        model.add(
            LSTM(nodes_nb,
                 input_shape=(None, n_features),
                 return_sequences=True))
        model.add(Dropout(drop))
        model.add(Dense(n_outputs, activation='sigmoid'))
    if model_type == 1:
        model.add(
            Bidirectional(
                LSTM(nodes_nb,
                     input_shape=(None, n_features),
                     return_sequences=True)))
        model.add(Dropout(drop))
        model.add(Dense(n_outputs, activation='sigmoid'))
    if model_type == 2:
        model.add(
            Bidirectional(
                LSTM(nodes_nb,
                     input_shape=(None, n_features),
                     return_sequences=True)))
        model.add(Dropout(drop))
        model.add(
            Bidirectional(
                LSTM(nodes_nb,
                     input_shape=(None, n_features),
                     return_sequences=True)))
        model.add(Dropout(drop))
        model.add(Dense(n_outputs, activation='sigmoid'))

    model.compile(loss=BinaryFocalLoss(gamma),
                  optimizer='adam',
                  metrics=[
                      BinaryAccuracy(),
                      Precision(),
                      Recall(),
                      FalseNegatives(),
                      FalsePositives()
                  ])

    #------------------------------------fit network---------------------------------------------#

    hist = model.fit(x_tr,
                     y_tr,
                     epochs=epochs,
                     batch_size=batch_size,
                     verbose=0,
                     validation_data=(x_te, y_te))
    if verbose:
        model.summary()

    #---------------------------------evaluate model----------------------------------------------#

    #evaluate model on test set (over all classes)
    loss, accuracy, P, R, FN, FP = model.evaluate(x_te,
                                                  y_te,
                                                  batch_size=batch_size,
                                                  verbose=verbose)

    #save model
    if single_run:
        model.save('Results/opt_LSTM_model')
        print("Model saved to Results")

    y_pred = model.predict(x_te, batch_size=batch_size, verbose=0)
    y_pred = reshape(y_pred, (y_pred.shape[0] * y_pred.shape[1], 8))
    y_te = reshape(y_te, (y_te.shape[0] * y_te.shape[1], 8))

    #evaluate F1 score for each label
    F1_tab, Ptab, Rtab, wf1_ = F1_score(y_te, y_pred, w)
    #evaluate accuracy per label
    acc_tab = Acc(y_te, y_pred)

    #test f1 score built in
    f = F1Score(8, threshold=0.5, average='weighted')
    f.update_state(y_te, y_pred)
    wf1 = f.result().numpy()
    f.reset_states()
    f = F1Score(8, threshold=0.5, average='macro')
    f.update_state(y_te, y_pred)
    mf1 = f.result().numpy()
    f.reset_states()

    #-----------------------------------------print---------------------------------------------#

    #print all
    if verbose:
        print(" -> Accuracy: ", accuracy, "; Mean of labelwise accuracy: ",
              np.mean(acc_tab))
        print("Per label accuracy: ", acc_tab)
        print("-> Proportional F1 score: ", wf1_, "; Weighted F1 score: ", wf1,
              "; Macro F1 score: ", mf1)
        print("-> F1 score per label: ", F1_tab)
        print("-> Precision: ", P, "; Recall: ", R)
        print("-> Precision per label: ", Ptab)
        print("-> Recall per label: ", Rtab)
        print("-> Loss: ", loss)

    if plot:
        plot_history(hist.history)

    return hist, loss, accuracy, wf1, wf1_, mf1, F1_tab, Ptab, Rtab
encode2 = conv2(x2)
encode3 = conv3(x3)
encode4 = conv4(x4)

shot1 = tf.keras.layers.Concatenate(axis = 1)([encode1, encode2, encode3, encode4])
print(shot1.shape)

# Dense part of the network starts here
shot_d1 = tf.keras.layers.Dense(50, activation='linear', kernel_regularizer='l1', bias_regularizer='l1')(shot1)
shot_r1 = tf.keras.layers.Dropout(0.4)(shot_d1)
shot_b1 = tf.keras.layers.BatchNormalization(axis = 1)(shot_d1)

output = tf.keras.layers.Dense(1, activation = 'sigmoid')(shot_b1)

model = tf.keras.Model(inputs = inputs, outputs = output)
opt = tf.keras.optimizers.SGD(learning_rate = 0.001)
model.compile(optimizer = opt, loss = BinaryFocalLoss(pos_weight = 9, gamma = 2.5), metrics=[tf.keras.losses.BinaryCrossentropy()]) #BinaryFocalLoss(pos_weight=7, gamma=4)

model.summary()
image_out = 'drive/MyDrive/Eluvio-MLChallenge/lasso' + '/' + 'separable_lasso2.png'
print(image_out)
tf.keras.utils.plot_model(model, to_file=image_out, dpi=100)

training_log = 'drive/MyDrive/Eluvio-MLChallenge/lasso' + '/' + 'separable_lasso2' + '.txt'
print(training_log)
csv_logger = tf.keras.callbacks.CSVLogger(training_log, append = True, separator=' ')
metrics = model.fit(X1, Y1, epochs = 50, validation_split= 0.2, verbose=2, batch_size = 64, callbacks=[csv_logger])

model_ID = 'drive/MyDrive/Eluvio-MLChallenge/lasso' +  '/' + 'separable_lasso2' + '.h5'
print(model_ID)
tf.keras.models.save_model(model,model_ID)
예제 #19
0
)
model = Sequential([base_model, Dense(2, activation='softmax')])


"""
base_model = tf.keras.applications.VGG16(weights='imagenet', input_shape=(224,224,3), include_top = False)
model = Sequential([base_model])
model.add(Conv2D(filters = 32,kernel_size = (5,5), activation = 'relu'))
model.add(MaxPooling2D(pool_size = (2,2)))
model.add(Flatten())
model.add(Dense(units = 64, activation = 'relu'))
model.add(Dense(units = 2 , activation = 'softmax'))


model.compile(Adam(lr=.00001), loss=[BinaryFocalLoss(gamma=0.25)], metrics=[AUC()])
model.fit(train_batches,batch_size=32,
                    validation_data=validation_batches, epochs=10,workers=2, verbose=1,callbacks=[learning_rate_reduction])
model.save("Transfer-LearninENb0.model")



def load_image(img_path):

    img = image.load_img(img_path, target_size=(224, 224))
    img2 = image.img_to_array(img)
    img2 = np.expand_dims(img2, axis=0)
    img2 /= 255.
    return img2

csv = pd.read_csv('C:/Users/14oka/Desktop/test.csv')
예제 #20
0
                                            factor=0.5,
                                            min_lr=1e-7)

model = Sequential()
# VGG16 KULLANILDI 0.8356
base_model = tf.keras.applications.VGG16(weights='imagenet',
                                         input_shape=(224, 224, 3),
                                         include_top=False)

model = Sequential(
    [base_model,
     GlobalAveragePooling2D(),
     Dense(2, activation='softmax')])

model.compile(Adam(lr=.00001),
              loss=[BinaryFocalLoss(gamma=0.25)],
              metrics=[AUC()])
model.fit(train_batches,
          batch_size=32,
          validation_data=validation_batches,
          validation_batch_size=32,
          epochs=10,
          workers=4,
          verbose=1,
          callbacks=[learning_rate_reduction])
model.save("Transfer-Learning.model")


def load_image(img_path):

    img = image.load_img(img_path, target_size=(224, 224))