예제 #1
0
import tensorflow as tf

from config import TrainingConfig, BayesOptConfig, BoundConfig
from dataset import FashionMNIST
from mlp import MlpSearchSpace

training_config = TrainingConfig(
    dataset=FashionMNIST(),
    optimizer=lambda: tf.optimizers.Adam(learning_rate=0.001),
    callbacks=lambda:
    [tf.keras.callbacks.ReduceLROnPlateau(factor=0.5, patience=4)])

search_config = BayesOptConfig(search_space=MlpSearchSpace(),
                               starting_points=10,
                               checkpoint_dir="artifacts/mlp_fashion")

bound_config = BoundConfig(error_bound=0.18,
                           peak_mem_bound=200,
                           model_size_bound=30000,
                           mac_bound=30000)
예제 #2
0
from tensorflow.keras.callbacks import LearningRateScheduler
from tensorflow_addons.optimizers import AdamW

from config import TrainingConfig, BayesOptConfig, BoundConfig
from dataset import FashionMNIST
from cnn import CnnSearchSpace

training_config = TrainingConfig(
    dataset=FashionMNIST(),
    epochs=75,
    optimizer=lambda: AdamW(lr=0.001, weight_decay=1e-5),
    callbacks=lambda: [LearningRateScheduler(lambda e: 0.001 if e < 25 else 0.00025)]
)

search_config = BayesOptConfig(
    search_space=CnnSearchSpace(),
    starting_points=15,
    checkpoint_dir="artifacts/cnn_fashion"
)

bound_config = BoundConfig(
    error_bound=0.10,
    peak_mem_bound=64000,
    model_size_bound=64000,
    mac_bound=1000000
)
예제 #3
0
import tensorflow_addons as tfa

from config import TrainingConfig, BayesOptConfig, BoundConfig
from dataset import MNIST
from cnn import CnnSearchSpace
from search_algorithms import BayesOpt

search_algorithm = BayesOpt

training_config = TrainingConfig(
    dataset=MNIST(),
    epochs=30,
    batch_size=128,
    optimizer=lambda: tfa.optimizers.SGDW(
        learning_rate=0.005, momentum=0.9, weight_decay=4e-5),
    callbacks=lambda: [],
)

search_config = BayesOptConfig(search_space=CnnSearchSpace(),
                               starting_points=10,
                               checkpoint_dir="artifacts/cnn_mnist")

bound_config = BoundConfig(error_bound=0.035,
                           peak_mem_bound=2500,
                           model_size_bound=4500,
                           mac_bound=30000000)
예제 #4
0
    # Load model configurations
    # if FLAGS.model == "zhang":
    model_config = MultiInputCNNConfig()
    # Build model
    model = MultiInputCNN(input_char_size=MAX_CHAR_IN_SENT_LEN,
                          input_phone_CZ_size=MAX_PHONE_CZ_LEN,
                          input_phone_EN_size=MAX_PHONE_EN_LEN,
                          input_phone_HU_size=MAX_PHONE_HU_LEN,
                          input_phone_RU_size=MAX_PHONE_RU_LEN,
                          input_acoustic_size=ACOUSTIC_EMB_SIZE,
                          phone_CZ_alphabet_size=PHONE_CZ_ALPHABET_LEN,
                          phone_EN_alphabet_size=PHONE_EN_ALPHABET_LEN,
                          phone_HU_alphabet_size=PHONE_HU_ALPHABET_LEN,
                          phone_RU_alphabet_size=PHONE_RU_ALPHABET_LEN,
                          model_config=model_config,
                          data_config=data_config)

    # Load training configurations
    training_config = TrainingConfig()
    # Train model
    model.train(training_inputs=training_inputs,
                training_labels=training_labels,
                validation_inputs=validation_inputs,
                validation_labels=validation_labels,
                epochs=training_config.epochs,
                batch_size=training_config.batch_size,
                checkpoint_every=training_config.checkpoint_every)
    # Test model
    model.test(testing_inputs=validation_inputs,
               testing_labels=validation_labels)
예제 #5
0
import tensorflow as tf
import tensorflow_addons as tfa
from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping

from config import TrainingConfig, BayesOptConfig, BoundConfig
from dataset import MNIST
from cnn import CnnSearchSpace

training_config = TrainingConfig(
    dataset=MNIST(binary=True),
    epochs=25,
    batch_size=128,
    optimizer=lambda: tfa.optimizers.AdamW(learning_rate=0.001,
                                           weight_decay=0.0001),
    callbacks=lambda: [EarlyStopping(patience=10, min_delta=0.005)],
)

search_config = BayesOptConfig(search_space=CnnSearchSpace(),
                               starting_points=10,
                               checkpoint_dir="artifacts/cnn_mnist")

bound_config = BoundConfig(error_bound=0.01,
                           peak_mem_bound=2000,
                           model_size_bound=2000,
                           mac_bound=1000000)
예제 #6
0
from tensorflow.python.keras.callbacks import LearningRateScheduler
from tensorflow_addons.optimizers import SGDW

from cnn import CnnSearchSpace
from config import TrainingConfig
from configs.cnn_chars74k_aging import bound_config, search_config, search_algorithm
from dataset import Chars74K


def lr_schedule(epoch):
    if 0 <= epoch < 42:
        return 0.01
    return 0.005


training_config = TrainingConfig(
    dataset=Chars74K("/datasets/chars74k", img_size=(32, 32), binary=True),
    epochs=60,
    batch_size=128,
    optimizer=lambda: SGDW(lr=0.01, momentum=0.9, weight_decay=4e-5),
    callbacks=lambda: [LearningRateScheduler(lr_schedule)])
예제 #7
0
search_algorithm = AgingEvoSearch


def lr_schedule(epoch):
    if 0 <= epoch < 35:
        return 0.01
    if 35 <= epoch < 65:
        return 0.005
    return 0.001


training_config = TrainingConfig(
    dataset=CIFAR10(binary=True),
    optimizer=lambda: tfa.optimizers.SGDW(lr=0.01, momentum=0.9, weight_decay=1e-5),
    batch_size=128,
    epochs=80,
    callbacks=lambda: []
)

search_config = AgingEvoConfig(
    search_space=CnnSearchSpace(),
    checkpoint_dir="artifacts/cnn_cifar10"
)

bound_config = BoundConfig(
    error_bound=0.3,
    peak_mem_bound=3000,
    model_size_bound=2000,
    mac_bound=1000000
)
    def GenerateSimpleModelCollectionsReducedCategorySet(
            MC_path, weight_path=None):
        mcolls = []
        global_max_epochs = 100
        global_hyperparams = {'number_layers': 2}

        # ---------------------------------------------

        H1_stream = {MC_path + "VBFH125/ZZ4lAnalysis.root": cuts.mZZ_cut}
        H0_stream = {MC_path + "ggH125/ZZ4lAnalysis.root": cuts.mZZ_cut}

        mcoll_name = "D_VBF_ggH_ML"
        mcoll = ModelCollection(mcoll_name, H1_stream, H0_stream)

        # this is the model that will take the place of the VBF2jet/ggH discriminant.
        model_name = "D_VBF_ggH_2j_ML"
        input_columns = [
            "PFMET", "nCleanedJetsPt30", "nCleanedJetsPt30BTagged_bTagSF",
            "nExtraLep", "D_VBF2j_ggH_ME"
        ]
        preprocessor_cuts = lambda row: row["nCleanedJetsPt30"] >= 2
        pre = PCAWhiteningPreprocessor(name=model_name + "_input",
                                       processed_columns=input_columns,
                                       cuts=preprocessor_cuts)
        mod = SimpleModel(model_name, input_columns, global_hyperparams)
        sett = TrainingConfig(max_epochs=global_max_epochs)
        mcoll.add_model(pre, mod, sett)

        # this is the model that will take the place of the VBF2jet/ggH discriminant.
        model_name = "D_VBF_ggH_1j_ML"
        input_columns = [
            "PFMET", "nCleanedJetsPt30", "nCleanedJetsPt30BTagged_bTagSF",
            "nExtraLep", "D_VBF1j_ggH_ME"
        ]
        preprocessor_cuts = lambda row: row["nCleanedJetsPt30"] == 1
        pre = PCAWhiteningPreprocessor(name=model_name + "_input",
                                       processed_columns=input_columns,
                                       cuts=preprocessor_cuts)
        mod = SimpleModel(model_name, input_columns, global_hyperparams)
        sett = TrainingConfig(max_epochs=global_max_epochs)
        mcoll.add_model(pre, mod, sett)

        if weight_path is not None:
            mcoll.load_weights(weight_path + mcoll_name)
        mcolls.append(mcoll)

        # # ---------------------------------------------

        H1_stream = {
            MC_path + "WplusH125/ZZ4lAnalysis.root": cuts.WHhadr_cut,
            MC_path + "WminusH125/ZZ4lAnalysis.root": cuts.WHhadr_cut
        }
        H0_stream = {MC_path + "ggH125/ZZ4lAnalysis.root": cuts.mZZ_cut}

        mcoll_name = "D_WHh_ggH_ML"
        mcoll = ModelCollection(mcoll_name, H1_stream, H0_stream)

        model_name = "D_WHh_ggH_2j_ML"
        input_columns = [
            "PFMET", "nCleanedJetsPt30", "nCleanedJetsPt30BTagged_bTagSF",
            "nExtraLep", "D_WHh_ggH_ME"
        ]
        preprocessor_cuts = lambda row: row["nCleanedJetsPt30"] >= 2
        pre = PCAWhiteningPreprocessor(name=model_name + "_input",
                                       processed_columns=input_columns,
                                       cuts=preprocessor_cuts)
        mod = SimpleModel(model_name, input_columns, global_hyperparams)
        sett = TrainingConfig(max_epochs=global_max_epochs)
        mcoll.add_model(pre, mod, sett)

        if weight_path is not None:
            mcoll.load_weights(weight_path + mcoll_name)
        mcolls.append(mcoll)

        # # ---------------------------------------------

        H1_stream = {MC_path + "ZH125/ZZ4lAnalysis.root": cuts.ZHhadr_cut}
        H0_stream = {MC_path + "ggH125/ZZ4lAnalysis.root": cuts.mZZ_cut}

        mcoll_name = "D_ZHh_ggH_ML"
        mcoll = ModelCollection(mcoll_name, H1_stream, H0_stream)

        model_name = "D_ZHh_ggH_2j_ML"
        input_columns = [
            "PFMET", "nCleanedJetsPt30", "nCleanedJetsPt30BTagged_bTagSF",
            "nExtraLep", "D_ZHh_ggH_ME"
        ]
        preprocessor_cuts = lambda row: row["nCleanedJetsPt30"] >= 2
        pre = PCAWhiteningPreprocessor(name=model_name + "_input",
                                       processed_columns=input_columns,
                                       cuts=preprocessor_cuts)
        mod = SimpleModel(model_name, input_columns, global_hyperparams)
        sett = TrainingConfig(max_epochs=global_max_epochs)
        mcoll.add_model(pre, mod, sett)

        if weight_path is not None:
            mcoll.load_weights(weight_path + mcoll_name)
        mcolls.append(mcoll)

        # # ---------------------------------------------

        H1_stream = {
            MC_path + "WplusH125/ZZ4lAnalysis.root": cuts.WHhadr_cut,
            MC_path + "WminusH125/ZZ4lAnalysis.root": cuts.WHhadr_cut
        }
        H0_stream = {MC_path + "ZH125/ZZ4lAnalysis.root": cuts.ZHhadr_cut}

        mcoll_name = "D_WHh_ZHh_ML"
        mcoll = ModelCollection(mcoll_name, H1_stream, H0_stream)

        model_name = "D_WHh_ZHh_2j_ML"
        input_columns = [
            "PFMET", "nCleanedJetsPt30", "nCleanedJetsPt30BTagged_bTagSF",
            "nExtraLep", "D_WHh_ZHh_ME"
        ]
        preprocessor_cuts = lambda row: row["nCleanedJetsPt30"] >= 2
        pre = PCAWhiteningPreprocessor(name=model_name + "_input",
                                       processed_columns=input_columns,
                                       cuts=preprocessor_cuts)
        mod = SimpleModel(model_name, input_columns, global_hyperparams)
        sett = TrainingConfig(max_epochs=global_max_epochs)
        mcoll.add_model(pre, mod, sett)

        if weight_path is not None:
            mcoll.load_weights(weight_path + mcoll_name)
        mcolls.append(mcoll)

        # # ---------------------------------------------

        H1_stream = {MC_path + "VBFH125/ZZ4lAnalysis.root": cuts.mZZ_cut}
        H0_stream = {
            MC_path + "WplusH125/ZZ4lAnalysis.root": cuts.WHhadr_cut,
            MC_path + "WminusH125/ZZ4lAnalysis.root": cuts.WHhadr_cut
        }

        mcoll_name = "D_VBF_WHh_ML"
        mcoll = ModelCollection(mcoll_name, H1_stream, H0_stream)

        model_name = "D_VBF_WHh_2j_ML"
        input_columns = [
            "PFMET", "nCleanedJetsPt30", "nCleanedJetsPt30BTagged_bTagSF",
            "nExtraLep", "D_VBF2j_WHh_ME"
        ]
        preprocessor_cuts = lambda row: row["nCleanedJetsPt30"] >= 2
        pre = PCAWhiteningPreprocessor(name=model_name + "_input",
                                       processed_columns=input_columns,
                                       cuts=preprocessor_cuts)
        mod = SimpleModel(model_name, input_columns, global_hyperparams)
        sett = TrainingConfig(max_epochs=global_max_epochs)
        mcoll.add_model(pre, mod, sett)

        if weight_path is not None:
            mcoll.load_weights(weight_path + mcoll_name)
        mcolls.append(mcoll)

        # # ---------------------------------------------

        H1_stream = {MC_path + "VBFH125/ZZ4lAnalysis.root": cuts.mZZ_cut}
        H0_stream = {MC_path + "ZH125/ZZ4lAnalysis.root": cuts.ZHhadr_cut}

        mcoll_name = "D_VBF_ZHh_ML"
        mcoll = ModelCollection(mcoll_name, H1_stream, H0_stream)

        model_name = "D_VBF_ZHh_2j_ML"
        input_columns = [
            "PFMET", "nCleanedJetsPt30", "nCleanedJetsPt30BTagged_bTagSF",
            "nExtraLep", "D_VBF2j_ZHh_ME"
        ]
        preprocessor_cuts = lambda row: row["nCleanedJetsPt30"] >= 2
        pre = PCAWhiteningPreprocessor(model_name + "_input",
                                       processed_columns=input_columns,
                                       cuts=preprocessor_cuts)
        mod = SimpleModel(model_name, input_columns, global_hyperparams)
        sett = TrainingConfig(max_epochs=global_max_epochs)
        mcoll.add_model(pre, mod, sett)

        if weight_path is not None:
            mcoll.load_weights(weight_path + mcoll_name)
        mcolls.append(mcoll)

        # # ---------------------------------------------

        return mcolls
    def GenerateCombinedModelCollectionsReducedCategorySet(
            MC_path, weight_path=None):
        mcolls = []
        global_max_epochs = 100
        global_hyperparams = {
            'LSTM_units': 16,
            'LSTM_output_size': 4,
            'number_dense_layers': 2,
            'number_dense_neurons': 128
        }

        # ---------------------------------------------

        H1_stream = {MC_path + "VBFH125/ZZ4lAnalysis.root": cuts.mZZ_cut}
        H0_stream = {MC_path + "ggH125/ZZ4lAnalysis.root": cuts.mZZ_cut}

        mcoll_name = "D_VBF_ggH_ML"
        mcoll = ModelCollection(mcoll_name, H1_stream, H0_stream)

        # this is the model that will take the place of the VBF2jet/ggH discriminant.
        model_name = "D_VBF_ggH_2j_ML"
        scalar_input_columns = [
            "PFMET", "nCleanedJetsPt30", "nCleanedJetsPt30BTagged_bTagSF",
            "nExtraLep", "D_VBF2j_ggH_ME"
        ]
        list_input_columns = {
            "Jet": ["JetPt", "JetEta", "JetPhi"],
            "Lep": ["LepPt", "LepEta", "LepPhi"],
            "ExtraLep": ["ExtraLepPt", "ExtraLepEta", "ExtraLepPhi"]
        }

        preprocessor_cuts = lambda row: row["nCleanedJetsPt30"] >= 2

        mod = CombinedModel(model_name, scalar_input_columns,
                            list_input_columns, global_hyperparams)
        pre = CombinedPreprocessor(model_name, scalar_input_columns,
                                   PCAWhiteningPreprocessor,
                                   list_input_columns, RNNPreprocessor,
                                   preprocessor_cuts)

        sett = TrainingConfig(max_epochs=global_max_epochs)
        mcoll.add_model(pre, mod, sett)

        model_name = "D_VBF_ggH_1j_ML"
        scalar_input_columns = [
            "PFMET", "nCleanedJetsPt30", "nCleanedJetsPt30BTagged_bTagSF",
            "nExtraLep", "D_VBF1j_ggH_ME"
        ]
        list_input_columns = {
            "Jet": ["JetPt", "JetEta", "JetPhi"],
            "Lep": ["LepPt", "LepEta", "LepPhi"],
            "ExtraLep": ["ExtraLepPt", "ExtraLepEta", "ExtraLepPhi"]
        }

        preprocessor_cuts = lambda row: row["nCleanedJetsPt30"] == 1

        mod = CombinedModel(model_name, scalar_input_columns,
                            list_input_columns, global_hyperparams)
        pre = CombinedPreprocessor(model_name, scalar_input_columns,
                                   PCAWhiteningPreprocessor,
                                   list_input_columns, RNNPreprocessor,
                                   preprocessor_cuts)

        sett = TrainingConfig(max_epochs=global_max_epochs)
        mcoll.add_model(pre, mod, sett)

        if weight_path is not None:
            mcoll.load_weights(weight_path + mcoll_name)
        mcolls.append(mcoll)

        # ---------------------------------------------

        H1_stream = {
            MC_path + "WplusH125/ZZ4lAnalysis.root": cuts.WHhadr_cut,
            MC_path + "WminusH125/ZZ4lAnalysis.root": cuts.WHhadr_cut
        }
        H0_stream = {MC_path + "ggH125/ZZ4lAnalysis.root": cuts.mZZ_cut}

        mcoll_name = "D_WHh_ggH_ML"
        mcoll = ModelCollection(mcoll_name, H1_stream, H0_stream)

        model_name = "D_WHh_ggH_2j_ML"
        scalar_input_columns = [
            "PFMET", "nCleanedJetsPt30", "nCleanedJetsPt30BTagged_bTagSF",
            "nExtraLep", "D_WHh_ggH_ME"
        ]
        list_input_columns = {
            "Jet": ["JetPt", "JetEta", "JetPhi"],
            "Lep": ["LepPt", "LepEta", "LepPhi"],
            "ExtraLep": ["ExtraLepPt", "ExtraLepEta", "ExtraLepPhi"]
        }

        preprocessor_cuts = lambda row: row["nCleanedJetsPt30"] >= 2

        mod = CombinedModel(model_name, scalar_input_columns,
                            list_input_columns, global_hyperparams)
        pre = CombinedPreprocessor(model_name, scalar_input_columns,
                                   PCAWhiteningPreprocessor,
                                   list_input_columns, RNNPreprocessor,
                                   preprocessor_cuts)

        sett = TrainingConfig(max_epochs=global_max_epochs)
        mcoll.add_model(pre, mod, sett)

        if weight_path is not None:
            mcoll.load_weights(weight_path + mcoll_name)
        mcolls.append(mcoll)

        # ---------------------------------------------

        H1_stream = {MC_path + "ZH125/ZZ4lAnalysis.root": cuts.ZHhadr_cut}
        H0_stream = {MC_path + "ggH125/ZZ4lAnalysis.root": cuts.mZZ_cut}

        mcoll_name = "D_ZHh_ggH_ML"
        mcoll = ModelCollection(mcoll_name, H1_stream, H0_stream)

        model_name = "D_ZHh_ggH_2j_ML"
        scalar_input_columns = [
            "PFMET", "nCleanedJetsPt30", "nCleanedJetsPt30BTagged_bTagSF",
            "nExtraLep", "D_ZHh_ggH_ME"
        ]
        list_input_columns = {
            "Jet": ["JetPt", "JetEta", "JetPhi"],
            "Lep": ["LepPt", "LepEta", "LepPhi"],
            "ExtraLep": ["ExtraLepPt", "ExtraLepEta", "ExtraLepPhi"]
        }

        preprocessor_cuts = lambda row: row["nCleanedJetsPt30"] >= 2

        mod = CombinedModel(model_name, scalar_input_columns,
                            list_input_columns, global_hyperparams)
        pre = CombinedPreprocessor(model_name, scalar_input_columns,
                                   PCAWhiteningPreprocessor,
                                   list_input_columns, RNNPreprocessor,
                                   preprocessor_cuts)

        sett = TrainingConfig(max_epochs=global_max_epochs)
        mcoll.add_model(pre, mod, sett)

        if weight_path is not None:
            mcoll.load_weights(weight_path + mcoll_name)
        mcolls.append(mcoll)

        # ---------------------------------------------

        H1_stream = {
            MC_path + "WplusH125/ZZ4lAnalysis.root": cuts.WHhadr_cut,
            MC_path + "WminusH125/ZZ4lAnalysis.root": cuts.WHhadr_cut
        }
        H0_stream = {MC_path + "ZH125/ZZ4lAnalysis.root": cuts.ZHhadr_cut}

        mcoll_name = "D_WHh_ZHh_ML"
        mcoll = ModelCollection(mcoll_name, H1_stream, H0_stream)

        model_name = "D_WHh_ZHh_2j_ML"
        scalar_input_columns = [
            "PFMET", "nCleanedJetsPt30", "nCleanedJetsPt30BTagged_bTagSF",
            "nExtraLep", "D_WHh_ZHh_ME"
        ]
        list_input_columns = {
            "Jet": ["JetPt", "JetEta", "JetPhi"],
            "Lep": ["LepPt", "LepEta", "LepPhi"],
            "ExtraLep": ["ExtraLepPt", "ExtraLepEta", "ExtraLepPhi"]
        }

        preprocessor_cuts = lambda row: row["nCleanedJetsPt30"] >= 2

        mod = CombinedModel(model_name, scalar_input_columns,
                            list_input_columns, global_hyperparams)
        pre = CombinedPreprocessor(model_name, scalar_input_columns,
                                   PCAWhiteningPreprocessor,
                                   list_input_columns, RNNPreprocessor,
                                   preprocessor_cuts)

        sett = TrainingConfig(max_epochs=global_max_epochs)
        mcoll.add_model(pre, mod, sett)

        if weight_path is not None:
            mcoll.load_weights(weight_path + mcoll_name)
        mcolls.append(mcoll)

        # ---------------------------------------------

        H1_stream = {MC_path + "VBFH125/ZZ4lAnalysis.root": cuts.mZZ_cut}
        H0_stream = {
            MC_path + "WplusH125/ZZ4lAnalysis.root": cuts.WHhadr_cut,
            MC_path + "WminusH125/ZZ4lAnalysis.root": cuts.WHhadr_cut
        }

        mcoll_name = "D_VBF_WHh_ML"
        mcoll = ModelCollection(mcoll_name, H1_stream, H0_stream)

        model_name = "D_VBF_WHh_2j_ML"
        scalar_input_columns = [
            "PFMET", "nCleanedJetsPt30", "nCleanedJetsPt30BTagged_bTagSF",
            "nExtraLep", "D_VBF2j_WHh_ME"
        ]
        list_input_columns = {
            "Jet": ["JetPt", "JetEta", "JetPhi"],
            "Lep": ["LepPt", "LepEta", "LepPhi"],
            "ExtraLep": ["ExtraLepPt", "ExtraLepEta", "ExtraLepPhi"]
        }

        preprocessor_cuts = lambda row: row["nCleanedJetsPt30"] >= 2

        mod = CombinedModel(model_name, scalar_input_columns,
                            list_input_columns, global_hyperparams)
        pre = CombinedPreprocessor(model_name, scalar_input_columns,
                                   PCAWhiteningPreprocessor,
                                   list_input_columns, RNNPreprocessor,
                                   preprocessor_cuts)

        sett = TrainingConfig(max_epochs=global_max_epochs)
        mcoll.add_model(pre, mod, sett)

        if weight_path is not None:
            mcoll.load_weights(weight_path + mcoll_name)
        mcolls.append(mcoll)

        # ---------------------------------------------

        H1_stream = {MC_path + "VBFH125/ZZ4lAnalysis.root": cuts.mZZ_cut}
        H0_stream = {MC_path + "ZH125/ZZ4lAnalysis.root": cuts.ZHhadr_cut}

        mcoll_name = "D_VBF_ZHh_ML"
        mcoll = ModelCollection(mcoll_name, H1_stream, H0_stream)

        model_name = "D_VBF_ZHh_2j_ML"
        scalar_input_columns = [
            "PFMET", "nCleanedJetsPt30", "nCleanedJetsPt30BTagged_bTagSF",
            "nExtraLep", "D_VBF2j_ZHh_ME"
        ]
        list_input_columns = {
            "Jet": ["JetPt", "JetEta", "JetPhi"],
            "Lep": ["LepPt", "LepEta", "LepPhi"],
            "ExtraLep": ["ExtraLepPt", "ExtraLepEta", "ExtraLepPhi"]
        }

        preprocessor_cuts = lambda row: row["nCleanedJetsPt30"] >= 2

        mod = CombinedModel(model_name, scalar_input_columns,
                            list_input_columns, global_hyperparams)
        pre = CombinedPreprocessor(model_name, scalar_input_columns,
                                   PCAWhiteningPreprocessor,
                                   list_input_columns, RNNPreprocessor,
                                   preprocessor_cuts)

        sett = TrainingConfig(max_epochs=global_max_epochs)
        mcoll.add_model(pre, mod, sett)

        if weight_path is not None:
            mcoll.load_weights(weight_path + mcoll_name)
        mcolls.append(mcoll)

        return mcolls
예제 #10
0
from config import TrainingConfig, BayesOptConfig, BoundConfig
from dataset import Chars74K
from cnn import CnnSearchSpace
from search_algorithms import BayesOpt

search_algorithm = BayesOpt


def lr_schedule(epoch):
    if 0 <= epoch < 35:
        return 0.01
    return 0.005


training_config = TrainingConfig(
    dataset=Chars74K("/datasets/chars74k", img_size=(48, 48)),
    epochs=60,
    batch_size=80,
    optimizer=lambda: tfa.optimizers.SGDW(
        learning_rate=0.01, momentum=0.9, weight_decay=0.0001),
    callbacks=lambda: [LearningRateScheduler(lr_schedule)],
)

search_config = BayesOptConfig(search_space=CnnSearchSpace(dropout=0.15),
                               checkpoint_dir="artifacts/cnn_chars74k")

bound_config = BoundConfig(error_bound=0.3,
                           peak_mem_bound=10000,
                           model_size_bound=20000,
                           mac_bound=1000000)