コード例 #1
0
ファイル: losses_test.py プロジェクト: richardj2020/keras1
    def test_serializing_loss_class(self):
        orig_loss_class = MSE_MAE_loss(0.3)
        with custom_object_scope({'MSE_MAE_loss': MSE_MAE_loss}):
            serialized = losses.serialize(orig_loss_class)

        with custom_object_scope({'MSE_MAE_loss': MSE_MAE_loss}):
            deserialized = losses.deserialize(serialized)
        assert isinstance(deserialized, MSE_MAE_loss)
        assert deserialized.mse_fraction == 0.3
コード例 #2
0
ファイル: losses_test.py プロジェクト: richardj2020/keras1
    def test_serializing_model_with_loss_class(self, tmpdir):
        model_filename = str(tmpdir / 'custom_loss.hdf')

        with custom_object_scope({'MSE_MAE_loss': MSE_MAE_loss}):
            loss = MSE_MAE_loss(0.3)
            inputs = keras.layers.Input((2,))
            outputs = keras.layers.Dense(1, name='model_output')(inputs)
            model = keras.models.Model(inputs, outputs)
            model.compile(optimizer='sgd', loss={'model_output': loss})
            model.fit(np.random.rand(256, 2), np.random.rand(256, 1))
            model.save(model_filename)

        with custom_object_scope({'MSE_MAE_loss': MSE_MAE_loss}):
            loaded_model = keras.models.load_model(model_filename)
            loaded_model.predict(np.random.rand(128, 2))
コード例 #3
0
def main():
    X_train, X_test, y_train = get_data()
    model = create_model(input_dim=X_train.shape[1],
                         first_layer_size=300,
                         second_layer_size=200,
                         third_layer_size=200,
                         lr=0.0001,
                         l2reg=0.1,
                         dropout=0.2,
                         mode="AUC")

    train_model(X_train, y_train, model)

    with custom_object_scope({'soft_AUC_theano': soft_AUC_theano}):
        pred_fun = lambda x: model.predict(np.array(x))
        makeOutputFile(pred_fun, X_test, "auc.csv")

    model = create_model_bce(input_dim=X_train.shape[1],
                             first_layer_size=300,
                             second_layer_size=200,
                             third_layer_size=200,
                             lr=0.0001,
                             l2reg=0.1,
                             dropout=0.2)

    train_model(X_train, y_train, model)

    pred_fun = lambda x: model.predict(np.array(x))
    makeOutputFile(pred_fun, X_test, "no_auc.csv")
コード例 #4
0
def keras_to_coreml():
    with custom_object_scope({'smoothL1': smoothL1, 'relu6': relu6}):
        ml_model = load_model(MODEL_PATH)
    coreml_model = coremltools.converters.keras.convert(ml_model, 
                                                        input_names='image', image_input_names='image', 
                                                        is_bgr=False)
    coreml_model.save(ML_MODEL_PATH)
コード例 #5
0
def get_RetinaNet_model():
    from keras.utils import custom_object_scope
    from keras_resnet.layers import BatchNormalization
    from keras_retinanet.layers import UpsampleLike, Anchors, RegressBoxes, ClipBoxes, FilterDetections
    from keras_retinanet.initializers import PriorProbability
    from keras_retinanet import models
    from keras_retinanet.models.retinanet import retinanet_bbox

    custom_objects = {
        'BatchNormalization': BatchNormalization,
        'UpsampleLike': UpsampleLike,
        'Anchors': Anchors,
        'RegressBoxes': RegressBoxes,
        'PriorProbability': PriorProbability,
        'ClipBoxes': ClipBoxes,
        'FilterDetections': FilterDetections,
    }

    with custom_object_scope(custom_objects):
        backbone = models.backbone('resnet50')
        model = backbone.retinanet(500)
        prediction_model = retinanet_bbox(model=model)
        # prediction_model.load_weights("...your weights here...")

    return prediction_model, custom_objects
コード例 #6
0
ファイル: detector.py プロジェクト: yueyedeai/FaceLandmarks
    def __init__(self, model_path, gpuid=-1, thresh=0.95, scales=[384, 512], mark_model=None):

        self.gpuid = -1 if gpuid < 0 else gpuid
        self.thresh = thresh

        if isinstance(scales, (list, tuple)) and len(scales) == 2:
            self.scales = scales
        else:
            raise Exception("scales set is error...")

        try:
            self.detector = retinaface.RetinaFace(
                model_path, 0, self.gpuid, 'net3')
        except:
            raise Exception("Detector loading error...")

        # if isinstance(threshold, list) and len(threshold) == 3:
        #     self.threshold = threshold
        #     self.factor = factor
        #     self.minsize = minsize

            # with tf.Graph().as_default():
            #     sess = tf.Session()
            #     with sess.as_default():
            #         self.pnet, self.rnet, self.onet = detect_face.create_mtcnn(
            #             sess, None)

        if mark_model.split(".")[-1] == 'h5':
            with custom_object_scope({'normalized_mean_error': normalized_mean_error,
                                      'wing_loss': wing_loss, 'smoothL1': smoothL1, \
                                          'relu6': relu6, 'hard_swish': hard_swish}):
                self.sess = load_model(mark_model)
        else:
            raise Exception("model should be given...")
コード例 #7
0
ファイル: nn-alpha.py プロジェクト: LukasKemmer/PSSDD
def main():
    X_train, X_test, y_train = get_data()
    model = create_model(input_dim=X_train.shape[1],
                         first_layer_size=500,
                         second_layer_size=500,
                         third_layer_size=500,
                         lr=0.0001,
                         l2reg=0.01,
                         dropout=0.2,
                         mode="AUC")

    X_train, X_val = split(X_train, 0.7)
    y_train, y_val = split(y_train, 0.7)
    X_train, y_train = balance(X_train, y_train, 0.06)

    X_train = X_train.head(20480)
    y_train = y_train.head(20480)

    train_model(X_train, y_train, model, epochs=30)

    with custom_object_scope({'soft_AUC_theano': soft_AUC_theano}):
        pred_fun = lambda x: model.predict(np.array(x))

        y_pred = pred_fun(X_val)

        y_pred = 1 / y_pred
        print("GINI coeff: ",
              eval_gini(y_val.values.flatten(), y_pred.flatten()))
        print("\a\a\a")
コード例 #8
0
ファイル: utils.py プロジェクト: Crissal1995/IPCV
def clean_copy(model):
    """Returns a copy of the model without other model uses of its layers."""
    weights = model.get_weights()
    # Modifica del codice con aggiunta di custom object
    custom_objects={'Interp': Interp}
    with custom_object_scope(custom_objects):
        new_model = model.__class__.from_config(model.get_config())
    new_model.set_weights(weights)
    return new_model
コード例 #9
0
ファイル: ced_curve.py プロジェクト: yueyedeai/FaceLandmarks
    def __init__(self, model_path, nb_points=106, output_dim=112):

        with custom_object_scope({'normalized_mean_error': normalized_mean_error,
                                  'wing_loss': wing_loss, 'smoothL1': smoothL1,
                                  'relu6': relu6, 'hard_swish': hard_swish}):
            self.model = load_model(model_path)

        self.output_dim = output_dim
        self.nb_points = nb_points

        self.__gt_landmarks = None
        self.__pred_landmarks = None
        self.__image_names = None
コード例 #10
0
 def test_application_base(self, app, _):
     # Can be instantiated with default arguments
     model = app(weights=None)
     # Can be serialized and deserialized
     config = model.get_config()
     if "ConvNeXt" in app.__name__:
         custom_objects = {"LayerScale": convnext.LayerScale}
         with utils.custom_object_scope(custom_objects):
             reconstructed_model = model.__class__.from_config(config)
     else:
         reconstructed_model = model.__class__.from_config(config)
     self.assertEqual(len(model.weights), len(reconstructed_model.weights))
     backend.clear_session()
コード例 #11
0
def main():
    X_train, X_test, y_train = get_data()
    X_train, X_val, y_train, y_val = train_test_split(X_train,
                                                      y_train,
                                                      test_size=0.2,
                                                      random_state=0)

    model = create_model(input_dim=X_train.shape[1])

    train_model(X_train, y_train, model, epochs=30)

    with custom_object_scope({'soft_AUC_theano': soft_AUC_theano}):
        pred_fun = lambda x: model.predict(np.array(x))

        y_pred = model.predict(np.array(X_val))

        y_pred = 1 / y_pred
        print("GINI coeff: ",
              eval_gini(y_val.values.flatten(), y_pred.flatten()))
        print("\a\a\a")
def main(args=None):
    from keras_retinanet.utils.config import parse_anchor_parameters
    from keras.utils import custom_object_scope

    # parse arguments
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)

    anchor_params = None
    if 0:
        config = dict()
        config['anchor_parameters'] = dict()
        config['anchor_parameters']['sizes'] = '16 32 64 128 256 512'
        config['anchor_parameters']['strides'] = '8 16 32 64 128'
        config['anchor_parameters']['ratios'] = '0.1 0.5 1 2 4 8'
        config['anchor_parameters']['scales'] = '1 1.25 1.5 1.75'
        anchor_params = parse_anchor_parameters(config)

    # load and convert model
    with custom_object_scope({
            'AdamAccumulate': AdamAccumulate,
            'AccumOptimizer': Adam
    }):
        model = models.load_model(args.model_in, backbone_name=args.backbone)
        model = models.convert_model(
            model,
            nms=args.nms,
            class_specific_filter=args.class_specific_filter,
            max_detections=500,
            nms_threshold=0.3,
            score_threshold=0.01,
            anchor_params=anchor_params)

    # save model
    model.save(args.model_out)
コード例 #13
0
if __name__ == '__main__':
    models_to_test = ['mobilenet_small', 'mobilenet', 'mobilenet_v2', 'resnet50', 'inception_v3',
                      'inception_resnet_v2', 'xception', 'densenet121', 'densenet169', 'densenet201',
                       'nasnetmobile', 'nasnetlarge', 'multi_io', 'multi_model_layer_1', 'multi_model_layer_2',
                      'Conv2DTranspose', 'RetinaNet', 'conv3d_model', 'conv1d_model']
    # Comment line below for full model testing
    models_to_test = ['conv1d_model']
    verbose = True

    for model_name in models_to_test:
        print('Go for: {}'.format(model_name))
        model, custom_objects = get_tst_neural_net(model_name)
        if verbose:
            print(model.summary())
        start_time = time.time()
        with custom_object_scope(custom_objects):
            model_reduced = reduce_keras_model(model, verbose=verbose)
        print("Reduction time: {:.2f} seconds".format(time.time() - start_time))
        if verbose:
            print(model_reduced.summary())
        print('Initial model number layers: {}'.format(len(model.layers)))
        print('Reduced model number layers: {}'.format(len(model_reduced.layers)))
        print('Compare models...')
        if model_name in ['nasnetlarge', 'deeplab_v3plus_mobile', 'deeplab_v3plus_xception']:
            max_error = compare_two_models_results(model, model_reduced, test_number=10000, max_batch=128)
        elif model_name in ['RetinaNet', 'conv3d_model', 'conv1d_model']:
            max_error = compare_two_models_results(model, model_reduced, test_number=1280, max_batch=128)
        elif model_name in ['mobilenet_small']:
            max_error = compare_two_models_results(model, model_reduced, test_number=1000, max_batch=1000)
        else:
            max_error = compare_two_models_results(model, model_reduced, test_number=10000, max_batch=10000)
コード例 #14
0
def base_train(data_tag, to_train=False, is_bin=False):
    print("Unpacking data...")
    state_len, num_classes, x_train, y_train, x_test, y_test = load_data(
        data_tag)
    print(f"state_len: {state_len}, num_classes: {num_classes}")

    model = build_model(state_len, num_classes)
    print("Model built. ")

    time_stamp = get_time()
    print(time_stamp)
    model_save_root = f"checkpoints/{data_tag}/{MAGIC_CODE}"
    history_save_root = f"history/{data_tag}/{MAGIC_CODE}/{WORK_MAGIC_CODE}/{time_stamp}"
    os.makedirs(model_save_root, exist_ok=True)
    os.makedirs(history_save_root, exist_ok=True)

    model_basename = f"{MAGIC_CODE}-{data_tag}-{WORK_MAGIC_CODE}"
    model_save_path = f"{model_save_root}/{model_basename}-{time_stamp}.h5"
    model_universal = f"best_models/{model_basename}.h5"
    history = []
    if to_train:
        # earlystopper = EarlyStopping(patience=10, verbose=1, monitor="val_acc")
        # checkpointer = ModelCheckpoint(model_universal, verbose=1, save_best_only=True, monitor="val_acc")

        earlystopper = EarlyStopping(patience=5, verbose=1)
        checkpointer = ModelCheckpoint(model_universal,
                                       verbose=1,
                                       save_best_only=True)

        # reduce_lr_loss = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=10, verbose=1, epsilon=1e-4, mode='min')
        history = model.fit(
            x_train,
            [y_train, y_train],
            batch_size=batch_size,
            epochs=epochs,
            verbose=1,
            validation_data=(x_test, [y_test, y_test]),
            # callbacks=[earlystopper, checkpointer, reduce_lr_loss])
            callbacks=[earlystopper, checkpointer],
            class_weight=generate_class_weights(np.argmax(y_train, axis=1)),
        )

    with custom_object_scope({
            "Projection":
            Projection,
            "Proj2Prob":
            Proj2Prob,
            "EigenDist":
            EigenDist,
            "categorical_bernoulli_crossentropy":
            categorical_bernoulli_crossentropy,
            "FullConnectedNeuralNetwork":
            FullConnectedNeuralNetwork,
            "Softmax":
            Softmax,
            "categorical_crossentropy":
            categorical_crossentropy
    }):
        model.load_weights(model_universal)
        if to_train:
            model.save(model_save_path)

    score = model.evaluate(x_test, [y_test, y_test], verbose=0)
    print(score)
    # print('Test loss:', score[0])
    # print('Test loss 2:', score[1])
    # print('Test accuracy:', score[2])
    # print('Test accuracy 2:', score[3])

    dataset = [x_train, y_train, x_test, y_test]
    save_history(dataset, model, num_classes, history, data_tag,
                 WORK_MAGIC_CODE, MAGIC_CODE, history_save_root, time_stamp)
    cmp_res = compare_all(dataset,
                          num_classes,
                          model,
                          data_tag,
                          WORK_MAGIC_CODE,
                          MAGIC_CODE,
                          time_stamp,
                          is_bin=is_bin)
    # save_compare_result(cmp_res, data_tag, WORK_MAGIC_CODE, MAGIC_CODE, time_stamp)

    print("Wait Nutstore to sync...")
    import time
    time.sleep(5)

    shutil.copy(f"history_{data_tag}.txt", f"{history_save_root}/")
コード例 #15
0
def base_train(data_tag, to_train=False, is_bin=False):
    print("Unpacking data...")
    state_len, num_classes, x_train, y_train, x_test, y_test = load_data(
        data_tag)
    print(f"state_len: {state_len}, num_classes: {num_classes}")
    # x_train = np.expand_dims(x_train, axis=0)
    # x_test = np.expand_dims(x_test, axis=0)
    x_train = np.concatenate([x_train, np.zeros(x_train.shape)], axis=1)
    x_test = np.concatenate([x_test, np.zeros(x_test.shape)], axis=1)
    # print(x_train.dtype)
    # print(x_test.dtype)

    model = build_model(state_len, num_classes)
    print("Model built. ")

    time_stamp = get_time()
    print(time_stamp)
    model_save_root = f"checkpoints/{data_tag}/{MAGIC_CODE}"
    history_save_root = f"history/{data_tag}/{MAGIC_CODE}/{time_stamp}"
    os.makedirs(model_save_root, exist_ok=True)
    os.makedirs(history_save_root, exist_ok=True)

    model_basename = f"{MAGIC_CODE}-{data_tag}-{WORK_MAGIC_CODE}"
    model_save_path = f"{model_save_root}/{model_basename}-{time_stamp}.h5"
    model_universal = f"best_models/{model_basename}.h5"
    history = []
    if to_train:
        # earlystopper = EarlyStopping(patience=10, verbose=1, monitor="val_acc")
        # checkpointer = ModelCheckpoint(model_universal, verbose=1, save_best_only=True, monitor="val_acc")

        earlystopper = EarlyStopping(patience=10, verbose=1)
        checkpointer = ModelCheckpoint(model_universal,
                                       verbose=1,
                                       save_best_only=True)

        # reduce_lr_loss = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=10, verbose=1, epsilon=1e-4, mode='min')
        history = model.fit(
            x_train,
            [y_train, y_train],
            batch_size=batch_size,
            epochs=epochs,
            verbose=1,
            validation_data=(x_test, [y_test, y_test]),
            # callbacks=[earlystopper, checkpointer, reduce_lr_loss])
            callbacks=[earlystopper, checkpointer])

    with custom_object_scope({
            "Projection":
            Projection,
            "Proj2Prob":
            Proj2Prob,
            "EigenDist":
            EigenDist,
            "categorical_bernoulli_crossentropy":
            categorical_bernoulli_crossentropy,
            "FullConnectedNeuralNetwork":
            FullConnectedNeuralNetwork,
            "categorical_crossentropy":
            categorical_crossentropy
    }):
        model.load_weights(model_universal)
        if to_train:
            model.save(model_save_path)

    score = model.evaluate(x_test, [y_test, y_test], verbose=0)
    print(score)