Exemple #1
0
 def _setup_metrics(self):
     self.metric_functions = {}  # needed to shadow class variable
     self.metric_functions[LOSS] = self.eval_loss_function
     self.metric_functions[ACCURACY] = BinaryAccuracy(
         name='metric_accuracy')
Exemple #2
0
def train_model(cfg, data, callbacks, verbose=2):
    '''
    Train a and evaluate model on given data.
    :param cfg: Project config (from config.yml)
    :param data: dict of partitioned dataset
    :param callbacks: list of callbacks for Keras model
    :param verbose: Verbosity mode to pass to model.fit()
    :return: Trained model and associated performance metrics on the test set
    '''

    # Apply class imbalance strategy
    num_neg, num_pos = np.bincount(data['Y_train'].astype(int))
    class_weight = None
    if cfg['TRAIN']['IMB_STRATEGY'] == 'class_weight':
        class_weight = get_class_weights(num_pos, num_neg,
                                         cfg['TRAIN']['POS_WEIGHT'])
    elif cfg['TRAIN']['IMB_STRATEGY'] != 'none':
        data['X_train'], data['Y_train'] = minority_oversample(
            data['X_train'],
            data['Y_train'],
            algorithm=cfg['TRAIN']['IMB_STRATEGY'])

    thresholds = cfg['TRAIN']['THRESHOLDS']  # Load classification thresholds

    # List metrics
    metrics = [
        BinaryAccuracy(name='accuracy'),
        Precision(name='precision', thresholds=thresholds),
        Recall(name='recall', thresholds=thresholds),
        F1Score(name='f1score', thresholds=thresholds),
        AUC(name='auc')
    ]

    # Compute output bias
    num_neg, num_pos = np.bincount(data['Y_train'].astype(int))
    output_bias = np.log([num_pos / num_neg])

    # Build the model graph.
    if cfg['TRAIN']['MODEL_DEF'] == 'hifis_rnn_mlp':
        model_def = hifis_rnn_mlp
    elif cfg['TRAIN']['MODEL_DEF'] == 'hifis_mlp':
        model_def = hifis_mlp
    elif cfg['TRAIN']['MODEL_DEF'] == 'logistic_regression':
        model_def = logistic_regression
    elif cfg['TRAIN']['MODEL_DEF'] == 'random_forest':
        model_def = random_forest
    else:
        model_def = xgboost_model
    model = model_def(cfg['MODELS'][cfg['TRAIN']['MODEL_DEF'].upper()],
                      input_dim=(data['X_train'].shape[-1], ),
                      metrics=metrics,
                      metadata=data['METADATA'],
                      output_bias=output_bias)

    # Train the model.
    history = model.fit(data['X_train'],
                        data['Y_train'],
                        batch_size=cfg['TRAIN']['BATCH_SIZE'],
                        epochs=cfg['TRAIN']['EPOCHS'],
                        validation_data=(data['X_val'], data['Y_val']),
                        callbacks=callbacks,
                        class_weight=class_weight,
                        verbose=verbose)

    # Run the model on the test set and print the resulting performance metrics.
    test_results = model.evaluate(data['X_test'], data['Y_test'])
    test_metrics = {}
    test_summary_str = [['**Metric**', '**Value**']]
    for metric, value in zip(model.metrics_names, test_results):
        test_metrics[metric] = value
        print(metric, ' = ', value)
        test_summary_str.append([metric, str(value)])
    return model, test_metrics
Exemple #3
0
l2_user = user_embedding**2
l2_reg = 0.0001

#값이 너무 크지 않도록 값을 조절해준다.
weight_decay = l2_reg * tf.reduce_sum(l2_pos_item + l2_neg_item + l2_user)

model.add_loss(weight_decay)

#모델 컴파일하기
from tensorflow.keras.optimizers import Adagrad
from tensorflow.keras.losses import BinaryCrossentropy
from tensorflow.keras.metrics import BinaryAccuracy

model.compile(Adagrad(1.),
              loss=BinaryCrossentropy(),
              metrics=[BinaryAccuracy()])

###################################학습 데이터 구성하기
# 유저별 평가한 영화목록 구성하기
itemset_per_user = (train_df.groupby('user_id')['item_id'].apply(frozenset))

total_items = set(train_df.item_id.unique())

#########################학습 데이터 구성하기
# 유저가 평가하지 않은 영화목록 구성하기
notseen_itemset_per_user = total_items - itemset_per_user
notseen_itemset_per_user = notseen_itemset_per_user.apply(list)


def get_bpr_dataset(train_df, notseen_itemset_per_user):
    batch_train_df = train_df.copy()
Exemple #4
0
from keras.preprocessing.image import ImageDataGenerator
from keras.models import load_model
from tensorflow.keras.losses import BinaryCrossentropy, Huber
from tensorflow.keras.metrics import BinaryAccuracy, MeanAbsoluteError
from rpn.generation import rpn_generator, RPNconfig
from rpn.rpn import make_cls_wrapper, make_reg_wrapper, ThresholdedRegularizer, ClsMetricWrapper, RegMetricWrapper
import pandas as pd

if __name__ == '__main__':
    seed = 42
    rpn_config = RPNconfig.load_json('versions/RPN_v8/rpn_config.json')

    cls_loss = make_cls_wrapper(BinaryCrossentropy(from_logits=True))
    reg_loss = make_reg_wrapper(Huber())
    cls_acc = ClsMetricWrapper(BinaryAccuracy(), name='acc')
    reg_mae = RegMetricWrapper(MeanAbsoluteError(), name='mae')
    rpn = load_model('versions/RPN_v8/configs/best.h5',
                     custom_objects={
                         'ThresholdedRegularizer': ThresholdedRegularizer,
                         'reg_processer': reg_loss,
                         'cls_processer': cls_loss
                     })
    rpn.compile(optimizer=rpn.optimizer,
                loss=rpn.loss,
                metrics={
                    'bbox_reg': reg_mae,
                    'bbox_cls_log': cls_acc
                })

    test_data = pd.read_json('../dataset/test.json')
    test_generator = ImageDataGenerator(rescale=1. / 255).flow_from_dataframe(
def network(cols, loss_f, activation_f, train_gen, valid_gen, l):

    base_model=MobileNet(weights='imagenet', input_shape=(HEIGHT, WIDTH, 3), include_top=False) #imports the mobilenet model and discards the last 1000 neuron layer.
    x=base_model.layers[l].output
    x=GlobalMaxPooling2D()(x)
    preds=Dense(len(cols),activation=activation_f)(x)

    model=Model(inputs=base_model.input,outputs=preds)

    for layer in model.layers:
        layer.trainable=True

    opt = Adam(lr=0.001)

    model.compile(loss=loss_f, optimizer=opt, metrics=['acc', BinaryAccuracy(), Precision(), Recall()])

    es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=5)
    mc = ModelCheckpoint(str(l)+'_'.join(cols).lower()+'_best_model.h5', monitor='val_loss', mode='min', verbose=1, save_best_only=True)
    dc = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=1, min_lr=1e-9)

    model.save_weights(str(l)+'_'.join(cols).lower()+'_best_model.h5')
    model_json = model.to_json()

    with open(str(l)+'_'.join(cols).lower()+'_model.json', 'w') as json_file:
        json_file.write(model_json)

    datagen=ImageDataGenerator(rescale=1./255)

    train_generator=datagen.flow_from_dataframe(
        dataframe=train_gen,
        directory=path,
        x_col='Filename',
        y_col=cols,
        batch_size=bs,
        color_mode='rgb',
        class_mode='raw',
        target_size=(HEIGHT,WIDTH))

    valid_generator=datagen.flow_from_dataframe(
        dataframe=valid_gen,
        directory=path,
        x_col='Filename',
        y_col=cols,
        batch_size=bs,
        color_mode='rgb',
        class_mode='raw',
        target_size=(HEIGHT,WIDTH))

    STEP_SIZE_TRAIN=train_generator.n//train_generator.batch_size
    STEP_SIZE_VALID=valid_generator.n//valid_generator.batch_size

    h = model.fit_generator(generator=train_generator,
            steps_per_epoch=STEP_SIZE_TRAIN,
            validation_data=valid_generator,
            validation_steps=STEP_SIZE_VALID,
            epochs=50,
            verbose = 1,
            callbacks=[es, mc, dc],
            workers = 10,
            )

    return model, h.history
Exemple #6
0
def main():
    args = cmd_parser()

    physical_devices = tf.config.list_physical_devices('GPU')
    tf.config.set_visible_devices(physical_devices[args.gpu:], 'GPU')

    if_fast_run = False

    print(f"TensorFlow version: {tf.__version__}.")  # Keras backend
    print(f"Keras version: {keras.__version__}.")
    print("If in eager mode: ", tf.executing_eagerly())
    assert tf.__version__[0] == "2"

    # Prepare model
    n = 2  # order of ResNetv2, 2 or 6
    version = 2
    depth = model_depth(n, version)
    model_type = "two_conv2d_net"
    model_type = 'ResNet%dv%d' % (depth, version)  # "ResNet20v2"

    # or model_type = "keras.applications.ResNet50V2"
    model_type = "keras.applications.ResNet50V2"

    # data path
    competition_name = "dogs-vs-cats-redux-kernels-edition"
    data_dir = os.path.expanduser(f"~/.kaggle/competitions/{competition_name}")

    # experiment time
    date_time = datetime.now().strftime("%Y%m%d-%H%M%S")

    prefix = os.path.join("~", "Documents", "DeepLearningData",
                          competition_name)
    subfix = os.path.join(
        model_type, '-'.join((date_time, "pretrain", str(args.pretrain))))
    ckpt_dir = os.path.expanduser(os.path.join(prefix, "ckpts", subfix))
    log_dir = os.path.expanduser(os.path.join(prefix, "logs", subfix))
    makedir_exist_ok(ckpt_dir)
    makedir_exist_ok(log_dir)

    # Input parameters
    IMAGE_WIDTH = IMAGE_HEIGHT = 128
    image_size = (IMAGE_WIDTH, IMAGE_HEIGHT)
    IMAGE_CHANNELS = 3
    input_shape = (IMAGE_WIDTH, IMAGE_HEIGHT, IMAGE_CHANNELS)
    num_classes = 2

    # Data loaders
    train_generator, validation_generator = data_generators(
        data_dir, target_size=image_size, batch_size=args.batch_size)

    # Create model
    model = create_model(model_type,
                         input_shape,
                         num_classes,
                         pretrain=args.pretrain)

    # Compile model
    from tensorflow.keras.optimizers import Adam
    from tensorflow.keras.losses import BinaryCrossentropy
    from tensorflow.keras.metrics import Recall, Precision, TruePositives, FalsePositives, TrueNegatives, FalseNegatives, BinaryAccuracy, AUC
    metrics = [
        Recall(name='recall'),
        Precision(name='precision'),
        TruePositives(name='tp'),  # thresholds=0.5
        FalsePositives(name='fp'),
        TrueNegatives(name='tn'),
        FalseNegatives(name='fn'),
        BinaryAccuracy(name='accuracy'),
        # AUC0(name='auc_cat_0'),  # 以 cat 为 positive 的 AUC
        AUC(name='auc_dog_1')  # 以 dog 为 positive 的 AUC
    ]
    model.compile(loss=BinaryCrossentropy(),
                  optimizer=Adam(learning_rate=lr_schedule(args.start_epoch)),
                  metrics=metrics)

    # Resume training
    # model_ckpt_file = MODEL_CKPT
    # if os.path.exists(model_ckpt_file):
    #     print("Model ckpt found! Loading...:%s" % model_ckpt_file)
    #     model.load_weights(model_ckpt_file)

    # define callbacks
    from tensorflow.keras.callbacks import CSVLogger, LearningRateScheduler, TensorBoard, ModelCheckpoint
    # model_name = "%s.start-%d-epoch-{epoch:03d}-val_loss-{val_loss:.4f}.h5" % (
    #     model_type, args.start_epoch)
    model_name = "%s-epoch-{epoch:03d}-val_loss-{val_loss:.4f}.h5" % (
        model_type)
    # Prepare model model saving directory.
    filepath = os.path.join(ckpt_dir, model_name)
    checkpoint = ModelCheckpoint(filepath=filepath,
                                 monitor='val_loss',
                                 verbose=1)

    file_writer = tf.summary.create_file_writer(
        os.path.join(log_dir, "metrics"))  # custom scalars
    file_writer.set_as_default()
    csv_logger = CSVLogger(os.path.join(log_dir, "training.log.csv"),
                           append=True)
    tensorboard_callback = TensorBoard(log_dir, histogram_freq=1)
    lr_scheduler = LearningRateScheduler(lr_schedule, verbose=1)
    callbacks = [csv_logger, tensorboard_callback, lr_scheduler, checkpoint]

    # Fit model
    epochs = 3 if if_fast_run else args.epochs
    model.fit(
        train_generator,
        epochs=epochs,
        validation_data=validation_generator,
        callbacks=callbacks,
        initial_epoch=args.start_epoch,
        verbose=1  # 2 for notebook
    )
Exemple #7
0
def generate_compiled_segmentation_model(
        model_name,
        model_parameters,
        num_classes,
        loss,
        optimizer,
        weights_to_load=None,
        optimizing_threshold_class_metric=None,
        optimizing_class_id=None,
        optimizing_input_threshold=None,
        optimized_class_thresholds=None):

    # alter input_shape due to inability of yaml to accept tuples!
    if 'input_shape' in model_parameters:
        model_parameters['input_shape'] = tuple(
            model_parameters['input_shape'])
    else:  # to guarantee the V1 config files still work
        model_parameters['input_shape'] = (None, None, 1)

    # Select the optimizer as a function of the name in the config file
    if optimizer.lower() in optimizer_dict:
        optimizer_fn = optimizer_dict[optimizer.lower()]
    else:
        raise NameError("Error, the optimizer selected" + optimizer +
                        " is currently not supported.")

    # Select the loss function  as a function of the name in the config file
    if loss.lower() in loss_dict:
        loss_fn = loss_dict[loss.lower()]
    else:
        raise NameError("Error, the loss function selected" + loss +
                        " is currently not supported.")

    if 'activation' in model_parameters:
        if loss == 'binary_cross_entropy' or loss == 'cross_entropy':
            assert model_parameters['activation'] == 'sigmoid'
        elif loss == 'categorical_cross_entropy':
            assert model_parameters['activation'] == 'softmax'
    else:
        print(
            'Activation function and loss compatibility was not checked because model_parameters: activation does not exist in the model config file. '
        )

    all_metrics = [
    ]  # one-hot versions are generally preferred for given metric
    # make first metric a copy of loss, to continually verify `val_loss` is correct
    if isinstance(loss_fn, BinaryCrossentropyL):
        all_metrics.append(BinaryCrossentropyM(name='binary_ce_metric'))
    else:
        all_metrics.append(CategoricalCrossentropyM(name='categ_ce_metric'))

    # standard thresholded version (default threshold is 0.5) also kept below, in case it's desired in certain scenario
    for class_num in range(num_classes + 1):
        if class_num == 0 and optimizing_threshold_class_metric is None:  # all class metrics
            # note, `loss_fn` for all classes placed before `all_metrics` in lineup of command window metrics and plots
            if not isinstance(loss_fn, BinaryCrossentropyL):
                all_metrics.extend([CategoricalCELoss()])
                all_metrics[1].name = str('categ_cross_entropy_sm')
            all_metrics.extend([
                AccuracyTfKeras(),
                # OneHotAccuracyTfKeras(),  # `global_threshold` built-in
                ClassBinaryAccuracyTfKeras(thresholds=global_threshold),
                # OneHotClassBinaryAccuracyTfKeras(thresholds=global_threshold),
                ClassBinaryAccuracySM(threshold=global_threshold),
                # OneHotClassBinaryAccuracySM(threshold=global_threshold),
                BinaryAccuracy(threshold=global_threshold),
                CategoricalAccuracy(),
                FalseNegatives(name='false_neg', thresholds=global_threshold),
                # OneHotFalseNegatives(name='false_neg_1H', thresholds=global_threshold),
                TrueNegatives(name='true_neg', thresholds=global_threshold),
                # OneHotTrueNegatives(name='true_neg_1H', thresholds=global_threshold),
                FalsePositives(name='false_pos', thresholds=global_threshold),
                # OneHotFalsePositives(name='false_pos_1H', thresholds=global_threshold),
                TruePositives(name='true_pos', thresholds=global_threshold),
                # OneHotTruePositives(name='true_pos_1H', thresholds=global_threshold),
                Recall(name='recall', thresholds=global_threshold),
                # OneHotRecall(name='recall_1H', thresholds=global_threshold),
                Precision(name='precision', thresholds=global_threshold),
                # OneHotPrecision(name='precision_1H', thresholds=global_threshold),
                FBetaScore(name='f1_score',
                           beta=1,
                           thresholds=global_threshold),
                # OneHotFBetaScore(name='f1_score_1H', beta=1, thresholds=global_threshold),
                IoUScore(name='iou_score', thresholds=global_threshold),
                # OneHotIoUScore(name='iou_score_1H', thresholds=global_threshold)
            ])
        elif class_num == 0 and optimizing_threshold_class_metric is not None:  # all class metrics
            continue
        else:  # per class metrics
            if optimizing_threshold_class_metric is not None:
                class_threshold = optimizing_input_threshold
                class_num = optimizing_class_id + 1
            elif optimized_class_thresholds is None:
                class_threshold = global_threshold
            else:
                class_threshold = optimized_class_thresholds[str(
                    'class' + str(class_num - 1))]

            all_metrics.append(CategoricalCELoss(class_indexes=class_num - 1))
            all_metrics[-1].name = str('class' + str(class_num - 1) +
                                       '_binary_cross_entropy')
            all_metrics.append(
                ClassBinaryAccuracySM(name=str('class' + str(class_num - 1) +
                                               '_binary_accuracy_sm'),
                                      class_indexes=class_num - 1,
                                      threshold=class_threshold))
            all_metrics.append(
                ClassBinaryAccuracyTfKeras(
                    name=str('class' + str(class_num - 1) +
                             '_binary_accuracy_tfkeras'),
                    class_id=class_num - 1,
                    thresholds=class_threshold))
            all_metrics.append(
                IoUScore(name=str('class' + str(class_num - 1) + '_iou_score'),
                         class_id=class_num - 1,
                         thresholds=class_threshold))
            all_metrics.append(
                FBetaScore(name=str('class' + str(class_num - 1) +
                                    '_f1_score'),
                           class_id=class_num - 1,
                           beta=1,
                           thresholds=class_threshold))
            all_metrics.append(
                Precision(name=str('class' + str(class_num - 1) +
                                   '_precision'),
                          class_id=class_num - 1,
                          thresholds=class_threshold))
            all_metrics.append(
                Recall(name=str('class' + str(class_num - 1) + '_recall'),
                       class_id=class_num - 1,
                       thresholds=class_threshold))

            if optimizing_threshold_class_metric is not None:
                break

        if num_classes == 1:
            break

    # strategy = tf.distribute.MirroredStrategy()
    # with strategy.scope():

    if model_name in models_dict:
        if model_parameters['backbone_name'] in models_dict[model_name][
                'compatible_backbones']:
            model = models_dict[model_name]['model_class'](classes=num_classes,
                                                           **model_parameters)
        else:
            raise NameError("Error, model and backbone are not compatible.")
    else:
        raise NameError("Error, the selected model" + model_name +
                        " is not currently supported.")

    model.compile(optimizer=optimizer_fn, loss=loss_fn, metrics=all_metrics)

    if weights_to_load:
        model.load_weights(weights_to_load)

    if optimizing_threshold_class_metric is None:
        print(model.summary())

    return model
Exemple #8
0
domain_labels = np.vstack(
    [np.tile([1.], [batch_size // 2, 1]),
     np.tile([0.], [batch_size // 2, 1])])

opposite_domain_labels = np.vstack(
    [np.tile([0.], [batch_size // 2, 1]),
     np.tile([1.], [batch_size // 2, 1])])

sample_weights_class = np.array(
    ([1] * (batch_size // 2) + [0] * (batch_size // 2)))

sample_weights_adversarial = np.ones((batch_size, ))

weights = [sample_weights_class, sample_weights_adversarial]

domain_binary_metric = BinaryAccuracy()
source_classifier_metric = CategoricalAccuracy()
source_classifier_cum_loss = tf.keras.metrics.CategoricalCrossentropy()
target_classifier_cum_loss = tf.keras.metrics.CategoricalCrossentropy()
target_classifier_metric = CategoricalAccuracy()


def custom_loss_function(y_true, y_pred):

    sample_weight_class = weights[0]
    sample_weight_domain = weights[1]
    y_class = y_true[0]
    y_domain = y_true[1]
    y_pred_class = y_pred[0]
    y_pred_domain = y_pred[1]
    classifier_loss = CategoricalCrossentropy()(
Exemple #9
0
def nv1x16(n_kernels=128):
    l1 = 1e-9
    l2 = 1e-9

    loss = tf.keras.losses.binary_crossentropy

    model = tf.keras.Sequential(name='nv1x16')
    model.add(tf.keras.layers.BatchNormalization(input_shape=(3000, 16, 1)))

    model.add(
        tf.keras.layers.Conv2D(filters=n_kernels // 4,
                               kernel_size=(5, 1),
                               padding='same',
                               kernel_regularizer=tf.keras.regularizers.l1_l2(
                                   l1, l2)))
    model.add(tf.keras.layers.BatchNormalization())
    tf.keras.layers.LeakyReLU(0.2)
    model.add(tf.keras.layers.MaxPooling2D((5, 1)))
    model.add(tf.keras.layers.Dropout(.2))

    model.add(
        tf.keras.layers.Conv2D(filters=n_kernels // 2,
                               kernel_size=(5, 1),
                               padding='same',
                               kernel_regularizer=tf.keras.regularizers.l1_l2(
                                   l1, l2)))
    model.add(tf.keras.layers.BatchNormalization())
    tf.keras.layers.LeakyReLU(0.2)
    model.add(tf.keras.layers.MaxPooling2D((3, 1)))
    model.add(tf.keras.layers.Dropout(.2))

    for i in range(2):
        model.add(
            tf.keras.layers.Conv2D(
                filters=(n_kernels // 4) * (3 + i),
                kernel_size=(3, 1),
                padding='same',
                kernel_regularizer=tf.keras.regularizers.l1_l2(l1, l2)))
        model.add(tf.keras.layers.BatchNormalization())
        tf.keras.layers.LeakyReLU(0.2)
        model.add(tf.keras.layers.MaxPooling2D((2, 1)))
        model.add(tf.keras.layers.Dropout(0.2))

    n_k = {
        0: n_kernels,
        1: (n_kernels // 4) * 3,
        2: n_kernels // 2,
        3: n_kernels // 4,
        4: n_kernels // 4,
        5: n_kernels // 4,
    }

    for i in range(3):
        model.add(
            tf.keras.layers.Conv2D(
                filters=n_k[2 * i],
                kernel_size=(4, 1),
                padding='valid',
                kernel_regularizer=tf.keras.regularizers.l1_l2(l1, l2)))
        model.add(
            tf.keras.layers.Conv2D(
                filters=n_k[2 * i + 1],
                kernel_size=(4, 1),
                padding='valid',
                kernel_regularizer=tf.keras.regularizers.l1_l2(l1, l2)))
        model.add(tf.keras.layers.BatchNormalization())
        model.add(tf.keras.layers.LeakyReLU(0.2))
        model.add(tf.keras.layers.MaxPooling2D((2, 1)))

    model.add(tf.keras.layers.Flatten())
    model.add(tf.keras.layers.Dropout(.5))
    model.add(
        tf.keras.layers.Dense(64,
                              kernel_regularizer=tf.keras.regularizers.l1_l2(
                                  l1, l2)))
    model.add(tf.keras.layers.LeakyReLU(0.2))
    model.add(tf.keras.layers.Dense(1, activation='sigmoid'))

    model.compile(loss=loss,
                  optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),
                  metrics=[
                      AUC(curve='ROC', name='roc_auc'),
                      AUC(curve='PR', name='pr_auc'),
                      Precision(),
                      Recall(), mean_pred,
                      BinaryAccuracy()
                  ])

    return model
import numpy as np
import tensorflow.keras as keras
from tensorflow.keras.callbacks import Callback
from tensorflow.keras.callbacks import History, EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from tensorflow.keras.layers import Bidirectional, Dense, Embedding
from tensorflow.keras.layers import Input, LSTM
from tensorflow.keras.losses import CategoricalCrossentropy
from tensorflow.keras.metrics import BinaryAccuracy, CategoricalAccuracy, Precision, Recall
from tensorflow.keras.metrics import TruePositives, TrueNegatives, FalsePositives, FalseNegatives
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam

METRICS = [
    CategoricalAccuracy(name='accuracy'),
    BinaryAccuracy(name='binary_accuracy'),
    TruePositives(name='tp'),
    TrueNegatives(name='tn'),
    FalsePositives(name='fp'),
    FalseNegatives(name='fn'),
    Precision(name='precision'),
    Recall(name='recall')
]


@dataclass
class BiLSTM:
    # required args
    args: Namespace
    partial_path: str
    weights: np.ndarray
    target_size=(IMG_WIDTH, IMG_HEIGHT),
    class_mode='binary')

base_model = Xception(input_shape=(IMG_WIDTH, IMG_HEIGHT, 3),
                      include_top=False,
                      weights='imagenet')
base_model.trainable = False

model = Sequential(
    [base_model,
     GlobalAveragePooling2D(),
     Dense(1, activation='sigmoid')])

model.compile(optimizer='adam',
              loss='binary_crossentropy',
              metrics=[BinaryAccuracy()])  #, Precision(), Recall()])

model.summary()

print("Start time:", datetime.now())
print()

history = model.fit_generator(train_data_gen,
                              steps_per_epoch=total_train // batch_size,
                              epochs=epochs,
                              validation_data=test_data_gen,
                              validation_steps=total_val // batch_size)

print()
print("End time:", datetime.now())
Exemple #12
0
def unet_padding(input_size=(512, 512,
                             1)):  # Model downloaded from the Internent.
    inputs = Input(input_size)
    conv1 = Conv2D(64,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(inputs)
    conv1 = Conv2D(64,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
    conv2 = Conv2D(128,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(pool1)
    conv2 = Conv2D(128,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
    conv3 = Conv2D(256,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(pool2)
    conv3 = Conv2D(256,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
    conv4 = Conv2D(512,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(pool3)
    conv4 = Conv2D(512,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv4)
    drop4 = Dropout(0.5)(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)

    conv5 = Conv2D(1024,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(pool4)
    conv5 = Conv2D(1024,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv5)
    drop5 = Dropout(0.5)(conv5)

    up6 = Conv2D(512,
                 2,
                 activation='relu',
                 padding='same',
                 kernel_initializer='he_normal')(UpSampling2D(size=(2,
                                                                    2))(drop5))
    merge6 = Concatenate(axis=3)([drop4, up6])
    conv6 = Conv2D(512,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(merge6)
    conv6 = Conv2D(512,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv6)

    up7 = Conv2D(256,
                 2,
                 activation='relu',
                 padding='same',
                 kernel_initializer='he_normal')(UpSampling2D(size=(2,
                                                                    2))(conv6))
    merge7 = Concatenate(axis=3)([conv3, up7])
    conv7 = Conv2D(256,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(merge7)
    conv7 = Conv2D(256,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv7)

    up8 = Conv2D(128,
                 2,
                 activation='relu',
                 padding='same',
                 kernel_initializer='he_normal')(UpSampling2D(size=(2,
                                                                    2))(conv7))
    merge8 = Concatenate(axis=3)([conv2, up8])
    conv8 = Conv2D(128,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(merge8)
    conv8 = Conv2D(128,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv8)

    up9 = Conv2D(64,
                 2,
                 activation='relu',
                 padding='same',
                 kernel_initializer='he_normal')(UpSampling2D(size=(2,
                                                                    2))(conv8))
    merge9 = Concatenate(axis=3)([conv1, up9])
    conv9 = Conv2D(64,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(merge9)
    conv9 = Conv2D(64,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv9)
    conv9 = Conv2D(2,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv9)
    conv10 = Conv2D(1, 1, activation='sigmoid')(conv9)

    model = Model(inputs=inputs, outputs=conv10)

    model.compile(optimizer=Adam(lr=1e-4),
                  loss='binary_crossentropy',
                  metrics=[BinaryAccuracy(), AUC()])

    return model
Exemple #13
0
def unet_608():  # The swallow model
    inputs = Input((608, 608, 1))

    conv1 = Conv2D(filters=64,
                   kernel_size=(3, 3),
                   activation='relu',
                   kernel_initializer='VarianceScaling')(inputs)
    conv1 = Conv2D(filters=64,
                   kernel_size=(3, 3),
                   activation='relu',
                   kernel_initializer='VarianceScaling')(conv1)
    drop1 = Dropout(0.5)(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(drop1)

    conv2 = Conv2D(filters=64,
                   kernel_size=(3, 3),
                   activation='relu',
                   kernel_initializer='VarianceScaling')(pool1)
    drop2 = Dropout(0.5)(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(drop2)

    conv3 = Conv2D(filters=256,
                   kernel_size=(3, 3),
                   activation='relu',
                   kernel_initializer='VarianceScaling')(pool2)
    drop3 = Dropout(0.5)(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(drop3)

    conv4 = Conv2D(filters=512,
                   kernel_size=(3, 3),
                   activation='relu',
                   kernel_initializer='VarianceScaling')(pool3)
    drop4 = Dropout(0.5)(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)

    conv5 = Conv2D(filters=1024,
                   kernel_size=(3, 3),
                   activation='relu',
                   kernel_initializer='VarianceScaling')(pool4)

    up6 = UpSampling2D(size=(2, 2))(conv5)
    merge6 = Concatenate(axis=3)([Cropping2D(cropping=(2, 2))(drop4), up6])
    conv6 = Conv2D(filters=512,
                   kernel_size=(3, 3),
                   activation='relu',
                   kernel_initializer='VarianceScaling')(merge6)

    up7 = UpSampling2D(size=(2, 2))(conv6)
    merge7 = Concatenate(axis=3)([Cropping2D(cropping=(8, 8))(drop3), up7])
    conv7 = Conv2D(filters=256,
                   kernel_size=(3, 3),
                   activation='relu',
                   kernel_initializer='VarianceScaling')(merge7)

    up8 = UpSampling2D(size=(2, 2))(conv7)
    merge8 = Concatenate(axis=3)([Cropping2D(cropping=(20, 20))(drop2), up8])
    conv8 = Conv2D(filters=128,
                   kernel_size=(3, 3),
                   activation='relu',
                   kernel_initializer='VarianceScaling')(merge8)

    up9 = UpSampling2D(size=(2, 2))(conv8)
    merge9 = Concatenate(axis=3)([Cropping2D(cropping=(44, 44))(drop1), up9])
    conv9 = Conv2D(filters=64,
                   kernel_size=(3, 3),
                   activation='relu',
                   kernel_initializer='VarianceScaling')(merge9)
    conv9 = Conv2D(filters=64,
                   kernel_size=(3, 3),
                   activation='relu',
                   kernel_initializer='VarianceScaling')(conv9)
    conv10 = Conv2D(filters=1,
                    kernel_size=(1, 1),
                    activation='sigmoid',
                    kernel_initializer='VarianceScaling')(conv9)

    model = Model(inputs=inputs, outputs=conv10)

    model.compile(optimizer=Adam(lr=1e-4),
                  loss='binary_crossentropy',
                  metrics=[BinaryAccuracy(), AUC()])

    return model
Exemple #14
0
def Fmrs(request, pk=None):
    gbinary()
    print('hi')
    # GPU 확인
    tf.config.list_physical_devices('GPU')
    # 자료형 선언
    tf.keras.backend.set_floatx('float32')

    # 데이터 로드
    scaler = MinMaxScaler()
    X = pickle.load(open('knn_models/X.pkl', 'rb'))
    print('X', X)
    Y = pickle.load(open('knn_models/Y.pkl', 'rb'))
    print('Y', Y)
    X = scaler.fit_transform(X)

    n = X.shape[0]
    p = X.shape[1]
    k2 = Y.shape[1]
    k = 10
    batch_size = 8
    epochs = 10
    X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, stratify=Y)

    train_ds = tf.data.Dataset.from_tensor_slices(
        (tf.cast(X_train, tf.float32), tf.cast(Y_train, tf.float32))).shuffle(500).batch(8)

    test_ds = tf.data.Dataset.from_tensor_slices(
        (tf.cast(X_test, tf.float32), tf.cast(Y_test, tf.float32))).shuffle(200).batch(8)
    class FM(tf.keras.Model):
      def __init__(self):
          super(FM, self).__init__()

          # 모델의 파라미터 정의
          self.w_0 = tf.Variable([0.0])
          self.w = tf.Variable(tf.zeros([p]))
          self.V = tf.Variable(tf.random.normal(shape=(p, k)))

      def call(self, inputs):
          linear_terms = tf.reduce_sum(tf.math.multiply(self.w, inputs), axis=1)

          interactions = 0.5 * tf.reduce_sum(
              tf.math.pow(tf.matmul(inputs, self.V), 2)
              - tf.matmul(tf.math.pow(inputs, 2), tf.math.pow(self.V, 2)),
              1,
              keepdims=False
          )

          y_hat = tf.math.sigmoid(self.w_0 + linear_terms + interactions)

          return y_hat

    model = FM()
    optimizer = tf.keras.optimizers.SGD(learning_rate=0.01)
    accuracy = BinaryAccuracy(threshold=0.5)
    loss_history = []

    for i in range(epochs):
      for x, y in train_ds:
          loss = train_on_batch(model, optimizer, accuracy, x, y)
          loss_history.append(loss)

      if i % 2== 0:
          print("스텝 {:03d}에서 누적 평균 손실: {:.4f}".format(i, np.mean(loss_history)))
          print("스텝 {:03d}에서 누적 정확도: {:.4f}".format(i, accuracy.result().numpy()))


    test_accuracy = BinaryAccuracy(threshold=0.5)
    for x, y in test_ds:
        y_pred = model(x)
        test_accuracy.update_state(y, y_pred)

    print("테스트 정확도: {:.4f}".format(test_accuracy.result().numpy()))

    filename = 'knn_models/fm.pkl'
    pickle.dump(model, open(filename, 'wb'))

    return HttpResponse({'message':'end'}, content_type="text/json-comment-filtered", status=status.HTTP_200_OK)