예제 #1
0
def train_Keras(train_X, train_y, test_X, test_y, kwargs):
    normalization = normalization_func()
    num_classes = train_y.shape[-1]

    norm_train_X = normalization.fit_transform(train_X)
    norm_test_X = normalization.transform(test_X)

    class_weight = train_y.shape[0] / np.sum(train_y, axis=0)
    class_weight = num_classes * class_weight / class_weight.sum()
    sample_weight = None
    batch_size = max(2, len(norm_train_X) // 50)
    print('reps : ', reps, ', weights : ', class_weight)
    if num_classes == 2:
        sample_weight = np.zeros((len(norm_train_X),))
        sample_weight[train_y[:, 1] == 1] = class_weight[1]
        sample_weight[train_y[:, 1] == 0] = class_weight[0]
        class_weight = None

    model = three_layer_nn(nfeatures=norm_train_X.shape[1:], **kwargs)
    optimizer = optimizer_class(lr=1e-3)  # optimizers.adam(lr=1e-2)

    model.compile(
        loss='categorical_crossentropy',
        optimizer=optimizer,
        metrics=['acc']
    )

    model.fit(
        norm_train_X, train_y, batch_size=batch_size,
        epochs=epochs,
        callbacks=[
            callbacks.LearningRateScheduler(scheduler()),
        ],
        validation_data=(norm_test_X, test_y),
        class_weight=class_weight,
        sample_weight=sample_weight,
        verbose=verbose
    )

    model.normalization = normalization

    return model
예제 #2
0
def train_Keras(train_X,
                train_y,
                test_X,
                test_y,
                kwargs,
                e2efs_class=None,
                n_features=None,
                e2efs_kwargs=None,
                T=300,
                extra=300):
    normalization = normalization_func()
    num_classes = train_y.shape[-1]

    norm_train_X = normalization.fit_transform(train_X)
    norm_test_X = normalization.transform(test_X)

    batch_size = max(2, len(train_X) // 50)
    class_weight = train_y.shape[0] / np.sum(train_y, axis=0)
    class_weight = num_classes * class_weight / class_weight.sum()
    sample_weight = None
    print('reps : ', reps, ', weights : ', class_weight)
    if num_classes == 2:
        sample_weight = np.zeros((len(norm_train_X), ))
        sample_weight[train_y[:, 1] == 1] = class_weight[1]
        sample_weight[train_y[:, 1] == 0] = class_weight[0]
        class_weight = None

    classifier = three_layer_nn(nfeatures=norm_train_X.shape[1:], **kwargs)

    model_clbks = [
        callbacks.LearningRateScheduler(
            scheduler(extra=0 if e2efs_class is None else extra)),
    ]

    if e2efs_class is not None:
        e2efs_layer = e2efs_class(n_features,
                                  input_shape=norm_train_X.shape[1:],
                                  **e2efs_kwargs)
        model = e2efs_layer.add_to_model(classifier,
                                         input_shape=norm_train_X.shape[1:])
        model_clbks.append(
            clbks.E2EFSCallback(factor_func=e2efs_factor(T),
                                units_func=None,
                                verbose=verbose))
    else:
        model = classifier
        e2efs_layer = None

    optimizer = optimizer_class(e2efs_layer, lr=1e-3)
    model_epochs = epochs
    if e2efs_class is not None:
        model_epochs += extra_epochs

    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizer,
                  metrics=['acc'])

    if e2efs_class is not None:
        model.fs_layer = e2efs_layer
        model.heatmap = e2efs_layer.moving_heatmap

    model.fit(norm_train_X,
              train_y,
              batch_size=batch_size,
              epochs=model_epochs,
              callbacks=model_clbks,
              validation_data=(norm_test_X, test_y),
              class_weight=class_weight,
              sample_weight=sample_weight,
              verbose=verbose)

    model.normalization = normalization
    return model
예제 #3
0
def train_Keras(train_X,
                train_y,
                test_X,
                test_y,
                kwargs,
                cae_model_func=None,
                n_features=None,
                epochs=150):
    normalization = normalization_func()
    num_classes = train_y.shape[-1]

    norm_train_X = normalization.fit_transform(train_X)
    norm_test_X = normalization.transform(test_X)

    batch_size = max(2, len(train_X) // 50)
    class_weight = train_y.shape[0] / np.sum(train_y, axis=0)
    class_weight = num_classes * class_weight / class_weight.sum()
    sample_weight = None
    print('l2 :', kwargs['regularization'], ', batch_size :', batch_size)
    print('reps : ', reps, ', weights : ', class_weight)
    if num_classes == 2:
        sample_weight = np.zeros((len(norm_train_X), ))
        sample_weight[train_y[:, 1] == 1] = class_weight[1]
        sample_weight[train_y[:, 1] == 0] = class_weight[0]
        class_weight = None

    model_clbks = [
        callbacks.LearningRateScheduler(scheduler()),
    ]

    if cae_model_func is not None:
        classifier = three_layer_nn(nfeatures=(n_features, ), **kwargs)
        cae_model = cae_model_func(output_function=classifier, K=n_features)
        start_time = time.process_time()
        cae_model.fit(norm_train_X,
                      train_y,
                      norm_test_X,
                      test_y,
                      num_epochs=extra_epochs + epochs,
                      batch_size=batch_size,
                      class_weight=class_weight)
        model = cae_model.model
        model.indices = cae_model.get_support(True)
        model.heatmap = cae_model.probabilities.max(axis=0)
        model.fs_time = time.process_time() - start_time
    else:
        model = three_layer_nn(nfeatures=norm_train_X.shape[1:], **kwargs)
        model.compile(loss='categorical_crossentropy',
                      optimizer=optimizer_class(lr=initial_lr),
                      metrics=['acc'])
        model.fit(norm_train_X,
                  train_y,
                  batch_size=batch_size,
                  epochs=epochs,
                  callbacks=model_clbks,
                  validation_data=(norm_test_X, test_y),
                  class_weight=class_weight,
                  sample_weight=sample_weight,
                  verbose=verbose)

    model.normalization = normalization

    return model
예제 #4
0
def train_Keras(train_X,
                train_y,
                test_X,
                test_y,
                kwargs,
                e2efs_class=None,
                n_features=None,
                epochs=150,
                fine_tuning=True):
    normalization = normalization_func()
    num_classes = train_y.shape[-1]

    norm_train_X = normalization.fit_transform(train_X)
    norm_test_X = normalization.transform(test_X)

    batch_size = max(2, len(train_X) // 50)
    class_weight = train_y.shape[0] / np.sum(train_y, axis=0)
    class_weight = num_classes * class_weight / class_weight.sum()
    sample_weight = None
    print('r :', kwargs['regularization'], ', batch_size :', batch_size)
    print('reps : ', reps, ', weights : ', class_weight)
    if num_classes == 2:
        sample_weight = np.zeros((len(norm_train_X), ))
        sample_weight[train_y[:, 1] == 1] = class_weight[1]
        sample_weight[train_y[:, 1] == 0] = class_weight[0]
        class_weight = None

    classifier = three_layer_nn(nfeatures=norm_train_X.shape[1:], **kwargs)

    model_clbks = [
        callbacks.LearningRateScheduler(scheduler()),
    ]

    fs_callbacks = []

    if e2efs_class is not None:
        classifier = three_layer_nn(nfeatures=norm_train_X.shape[1:], **kwargs)
        e2efs_layer = e2efs_class(n_features,
                                  input_shape=norm_train_X.shape[1:])
        model = e2efs_layer.add_to_model(classifier,
                                         input_shape=norm_train_X.shape[1:])
        fs_callbacks.append(clbks.E2EFSCallback(units=5, verbose=verbose))
    else:
        model = three_layer_nn(nfeatures=norm_train_X.shape[1:], **kwargs)
        e2efs_layer = None

    optimizer = optimizer_class(e2efs_layer, lr=initial_lr)

    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizer,
                  metrics=['acc'])

    if e2efs_class is not None:
        model.fs_layer = e2efs_layer
        model.heatmap = e2efs_layer.moving_heatmap

        start_time = time.process_time()
        model.fit(norm_train_X,
                  train_y,
                  batch_size=batch_size,
                  epochs=200000,
                  callbacks=fs_callbacks,
                  validation_data=(norm_test_X, test_y),
                  class_weight=class_weight,
                  sample_weight=sample_weight,
                  verbose=verbose)
        model.fs_time = time.process_time() - start_time

    if fine_tuning:
        model.fit(norm_train_X,
                  train_y,
                  batch_size=batch_size,
                  epochs=epochs,
                  callbacks=model_clbks,
                  validation_data=(norm_test_X, test_y),
                  class_weight=class_weight,
                  sample_weight=sample_weight,
                  verbose=verbose)

    model.normalization = normalization

    return model
예제 #5
0
def train_Keras(train_X,
                train_y,
                test_X,
                test_y,
                kwargs,
                l2x_model_func=None,
                n_features=None,
                epochs=150):
    normalization = normalization_func()
    num_classes = train_y.shape[-1]

    norm_train_X = normalization.fit_transform(train_X)
    norm_test_X = normalization.transform(test_X)

    batch_size = max(2, len(train_X) // 50)
    class_weight = train_y.shape[0] / np.sum(train_y, axis=0)
    class_weight = num_classes * class_weight / class_weight.sum()
    sample_weight = None
    print('l2 :', kwargs['regularization'], ', batch_size :', batch_size)
    print('reps : ', reps, ', weights : ', class_weight)
    if num_classes == 2:
        sample_weight = np.zeros((len(norm_train_X), ))
        sample_weight[train_y[:, 1] == 1] = class_weight[1]
        sample_weight[train_y[:, 1] == 0] = class_weight[0]
        class_weight = None

    classifier = three_layer_nn(nfeatures=norm_train_X.shape[1:], **kwargs)

    model_clbks = [
        callbacks.LearningRateScheduler(scheduler()),
    ]

    fs_callbacks = [
        callbacks.LearningRateScheduler(scheduler(extra_epochs=extra_epochs)),
    ]

    if l2x_model_func is not None:
        l2x_model = l2x_model_func(norm_train_X.shape[1:], n_features)
        classifier_input = layers.Multiply()(
            [l2x_model.output, l2x_model.input])
        output = classifier(classifier_input)
        model = models.Model(l2x_model.input, output)
    else:
        model = classifier
        l2x_model = None

    optimizer = optimizer_class(lr=initial_lr)

    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizer,
                  metrics=['acc'])

    if l2x_model is not None:
        model.l2x_model = l2x_model

        start_time = time.process_time()
        model.fit(norm_train_X,
                  train_y,
                  batch_size=batch_size,
                  epochs=epochs + extra_epochs,
                  callbacks=fs_callbacks,
                  validation_data=(norm_test_X, test_y),
                  class_weight=class_weight,
                  sample_weight=sample_weight,
                  verbose=0)
        # scores = l2x_model.predict(norm_train_X, verbose=0, batch_size=batch_size).reshape((-1, np.prod(norm_train_X.shape[1:])))
        # model.heatmap = compute_median_rank(scores, k=n_features)
        model.heatmap = l2x_model.predict(
            norm_train_X, verbose=0, batch_size=batch_size).reshape(
                (-1, np.prod(norm_train_X.shape[1:]))).sum(axis=0)
        model.fs_time = time.process_time() - start_time
    else:
        model.fit(norm_train_X,
                  train_y,
                  batch_size=batch_size,
                  epochs=epochs,
                  callbacks=model_clbks,
                  validation_data=(norm_test_X, test_y),
                  class_weight=class_weight,
                  sample_weight=sample_weight,
                  verbose=verbose)

    model.normalization = normalization

    return model