def train_model(dataset, model):
    epochs = 15
    #     epochs = 0
    lr = 1e-4
    size = 300
    wd = 1e-2
    bs = 8  # reduce this if you are running out of GPU memory
    pretrained = True
    alpha_fl = [0.4, 0.4, 0.15, 0.05]
    gamma_fl = 2

    config = {
        'epochs': epochs,
        'lr': lr,
        'size': size,
        'wd': wd,
        'bs': bs,
        'alpha_fl': alpha_fl,
        'gamma_fl': gamma_fl,
        'pretrained': pretrained
    }

    wandb.config.update(config)

    model.compile(
        optimizer=optimizers.Adam(lr=lr),
        loss=[categorical_focal_loss(alpha=alpha_fl, gamma=gamma_fl)],
        metrics=[
            metrics.Precision(top_k=1, name='precision'),
            metrics.Recall(top_k=1, name='recall'),
            metrics.Accuracy(name='accuracy')
        ])
    early_stop = EarlyStopping(monitor='loss',
                               min_delta=0.01,
                               patience=7,
                               mode='min',
                               verbose=1)

    reduce_on_plateau = ReduceLROnPlateau(monitor='loss',
                                          factor=0.1,
                                          patience=2,
                                          verbose=1,
                                          mode='min',
                                          epsilon=0.01,
                                          cooldown=0,
                                          min_lr=0)

    train_data, valid_data = datasets_keras.load_dataset(dataset, bs)
    _, ex_data = datasets_keras.load_dataset(dataset, 10)
    model.fit_generator(train_data,
                        validation_data=valid_data,
                        epochs=epochs,
                        callbacks=[
                            early_stop, reduce_on_plateau,
                            WandbCallback(input_type='image',
                                          output_type='segmentation_mask',
                                          validation_data=ex_data[0])
                        ])
Пример #2
0
 def __init__(self):
     self.num_conv2d_layers=1
     self.filters_2d=[16,32]
     self.kernel_size_2d=[[3,3], [3,3]]
     self.mpool_size_2d=[[2,2], [2,2]]
     self.metric_type_map = {
         'precision' : metrics.Precision(),
         'recall' : metrics.Recall(),
         'AUC' : metrics.AUC(),
         'accuracy' : metrics.Accuracy(),
     }
Пример #3
0
Файл: trim4.py Проект: jit9/cuts
def buildModel(D, sel, length):
    """    
       buildModel function builds the neural network model.
    """
    model = Sequential()
    model.add(Dense(12, input_dim=9, activation='relu'))
    model.add(Dense(8, activation='relu'))
    model.add(Dense(8, activation='relu'))
    model.add(Dense(1, activation='sigmoid'))

    # for experimental purposes,  various types of models are tried and
    #    simply stored as comments for future reference.

    #    print("from buildModel,  shape of D:  {}\n".format(D.shape))
    # compile the keras model
    #    model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['FalseNegatives'])
    # fit the keras model on the dataset
    #    model.compile(loss='binary_crossentropy', optimizer='sgd', metrics=[metrics.SpecificityAtSensitivity(0.2)])
    #    model.compile(loss=[losses.Poisson()], optimizer='adam', metrics=[metrics.PrecisionAtRecall(0.25)])
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=[metrics.Accuracy()])
    #    model.compile(loss='binary_crossentropy', optimizer = 'adam', metrics=[metrics.SparseCategoricalAccuracy()])
    #    model.compile(loss='binary_crossentropy', optimizer = 'adam', metrics=[metrics.Recall()])
    model.fit(D, sel, epochs=50, batch_size=200)
    #    model.fit(scaled_D, sel, epochs=40, batch_size=50)
    _, Accuracy = model.evaluate(D, sel)
    predictions = model.predict(D)
    print("\nRecall of training set = {}\n".format(Accuracy))
    #    print("accuracy of trained model = {}".format(accuracy))

    #    Save the experimental model for future use or comment out.
    #    model.save("pa5_f150_s19_tod655_trimmed_30Dec2020")
    #    model.save("pa7_f030_bri_temp","wb")
    #    model.save("pa5_f090_s19_c11_v0_trim_8Feb_tod#0")
    #    model.save("pa7_f030_s20_tod49_trimmed_30Dec2020")
    #    model.save("pa7_f030_bri_tod_0_8Feb")
    #    model.save("pa6_f090_s18_c11_v1_tod_0_8Feb")
    #    model.save("pa7_f040_s20_bri_v0_twoTODs_4and9")
    print("from buildModel:  type(D) = {}".format(type(D)))
    DD = DataFrame(data=D)
    tc, fc, tu, fu, E_cut, s_cut = anal(DD, predictions, sel)
    print("from inside model build, stage 1, tc, fc, tu, fu = ")
    print("                                  {}  {}  {}  {}".format(
        tc, fc, tu, fu))
    return model
Пример #4
0
    def test_accuracy(self):
        acc_obj = metrics.Accuracy(name='my_acc')

        # check config
        assert acc_obj.name == 'my_acc'
        assert acc_obj.stateful
        assert len(acc_obj.weights) == 2
        assert acc_obj.dtype == 'float32'

        # verify that correct value is returned
        result = K.eval(acc_obj([[1], [2], [3], [4]], [[1], [2], [3], [4]]))
        assert result == 1  # 2/2

        # Check save and restore config
        a2 = metrics.Accuracy.from_config(acc_obj.get_config())
        assert a2.name == 'my_acc'
        assert a2.stateful
        assert len(a2.weights) == 2
        assert a2.dtype, 'float32'

        # check with sample_weight
        result_t = acc_obj([[2], [1]], [[2], [0]], sample_weight=[[0.5], [0.2]])
        result = K.eval(result_t)
        assert np.isclose(result, 4.5 / 4.7, atol=1e-3)
Пример #5
0
def define_nn_model(
        input_size: int = 2048,
        l2reg: float = 0.001,
        dropout: float = 0.2,
        activation: str = 'relu',
        optimizer: str = 'Adam',
        lr: float = 0.001,
        decay: float = 0.01) -> Model:
    """
    Sets up the structure of the feed forward neural network. The number and size of the hidden layers are based on
    the dimensions of the input vector.

    :param input_size: Length of the input vector. Default: 2048
    :param l2reg: Log2 regularization value. Default: 0.001
    :param dropout: Value of dropout for hidden layers. Default: 0.2
    :param activation: Activation function for inner layers. Default: 'relu'
    :param optimizer: Optimizer for loss function. Default: 'Adam'
    :param lr: Learning rate. Default: 0.001
    :param decay: Decay of the optimizer. Default: 0.01
    :return: A keras model.
    """

    if optimizer == 'Adam':
        my_optimizer = optimizers.Adam(learning_rate=lr,
                                       decay=decay)
    elif optimizer == 'SGD':
        my_optimizer = SGD(lr=lr,
                           momentum=0.9,
                           decay=decay)
    else:
        my_optimizer = optimizer

    my_hidden_layers = {"2048": 6, "1024": 5, "999": 5, "512": 4, "256": 3}

    if not str(input_size) in my_hidden_layers.keys():
        raise ValueError("Wrong input-size. Must be in {2048, 1024, 999, 512, 256}.")

    nhl = int(math.log2(input_size) / 2 - 1)

    model = Sequential()
    # From input to 1st hidden layer
    model.add(Dense(units=int(input_size / 2),
                    input_dim=input_size,
                    activation=activation,
                    kernel_regularizer=regularizers.l2(l2reg)))
    model.add(Dropout(dropout))
    # next hidden layers
    for i in range(1, nhl):
        factor_units = 2 ** (i + 1)
        factor_dropout = 2 * i
        model.add(Dense(units=int(input_size / factor_units),
                        activation=activation,
                        kernel_regularizer=regularizers.l2(l2reg)))
        model.add(Dropout(dropout / factor_dropout))
    # output layer
    model.add(Dense(units=1,
                    activation='sigmoid'))

    model.summary(print_fn=logging.info)

    # compile model
    model.compile(loss="mean_squared_error",
                  optimizer=my_optimizer,
                  metrics=[metrics.Accuracy(name="my_acc")])  # ,
    # metrics.Accuracy(),
    # metrics.Precision(),
    # metrics.Recall(),
    # metrics.SpecificityAtSensitivity(),
    # metrics.SensitivityAtSpecificity()])

    return model
Пример #6
0
callback_checkpoint = ModelCheckpoint(
    model_filename,
    verbose=1,
    monitor='val_loss',
    save_best_only=True,
)

#Compile model here
model.compile(
    #optimizer=Adam(),
    optimizer=SGD(lr=0.01, momentum=0.99),
    loss='binary_crossentropy',
    #loss=jaccard_distance,
    metrics=[
        iou, iou_thresholded, metrics.binary_accuracy,
        metrics.Accuracy()
    ])

model_filename = 'weights.h5'

#Load weights
model.load_weights(model_filename)

#Dictionary of layers
dict_layer = dict([(layer.name, layer) for layer in model.layers])

#Directory for saving feature maps
path = 'E:\\maps\\'

for i in list(dict_layer.keys()):