コード例 #1
0
def main():

    if len(sys.argv) != 5:
        print "Error: exactly 4 arguments are required"

    MC_dir = sys.argv[1]
    setting_dir = sys.argv[2]
    training_path = sys.argv[3]
    data_outpath = sys.argv[4]

    # files to which this discriminant should be augmented
    #data_files = ["ggH125", "VBFH125", "ZH125", "WplusH125", "WminusH125"]

    data_files = [
        "ggH125", "VBFH125", "ZH125", "WplusH125", "WminusH125", "ttH125"
    ]

    #data_files = ["ggH125", "VBFH125", "ZH125", "WplusH125", "WminusH125", "ttH125", "ZZTo4l", "ggTo2e2mu_Contin_MCFM701", "ggTo2mu2tau_Contin_MCFM701", "ggTo4mu_Contin_MCFM701", "ggTo2e2tau_Contin_MCFM701", "ggTo4e_Contin_MCFM701", "ggTo4tau_Contin_MCFM701"]

    confhandler = ModelCollectionConfigFileHandler()
    confhandler.load_configuration(setting_dir + "settings.conf")
    mcolls = confhandler.GetModelCollection(weightpath=training_path)

    for data_file in data_files:
        augment_file(MC_dir, data_outpath, data_file, mcolls)
コード例 #2
0
def main():

    if len(sys.argv) != 5:
        print "Error: exactly 4 arguments are required"

    in_folder = sys.argv[1]
    out_folder = sys.argv[2]
    tree_name = sys.argv[3]
    run_dir = sys.argv[4]

    confhandler = ModelCollectionConfigFileHandler()
    confhandler.load_configuration(os.path.join(run_dir, "settings.conf"))
    mcolls = confhandler.GetModelCollection(weightpath = os.path.join(run_dir, "training/"))

    augment_file(in_folder, out_folder, tree_name, mcolls)
コード例 #3
0
def main():

    if len(sys.argv) != 3:
        print "Error: exactly 2 arguments are required"

    #/data_CMS/cms/wind/CJLST_NTuples/
    #MC_dir = sys.argv[1]
    setting_dir = sys.argv[1]
    training_dir = sys.argv[2]

    confhandler = ModelCollectionConfigFileHandler()
    confhandler.load_configuration(setting_dir + "settings.conf")
    mcolls = confhandler.GetModelCollection()

    train = Trainer(training_dir)
    opt = optimizers.SGD(lr=0.01, momentum=0.9, decay=1e-6)
    #opt = optimizers.Adam(lr = 0.001, beta_1 = 0.9, beta_2 = 0.999, epsilon = K.epsilon(), decay = 0.0)

    for mcoll in mcolls:
        train.train(mcoll, optimizer=opt, MC_weighting=False)
コード例 #4
0
def distribute_training_settings(run_path):

    # load the configuration that is sitting there
    confhandler = ModelCollectionConfigFileHandler()
    confhandler.load_configuration(run_path + "settings.conf")

    # these are all the model collections that need to be trained
    mcolls = confhandler.GetModelCollection()

    # create the folder holding the settings for the individual models and their training
    settings_dir = run_path + "settings_training/"
    if not os.path.exists(settings_dir):
        os.makedirs(settings_dir)

    # iterate over these models and make a separate config file for each of them
    for mcoll in mcolls:
        training_settings_dir = settings_dir + mcoll.name + "/"
    
        if not os.path.exists(training_settings_dir):
            os.makedirs(training_settings_dir)
    
        outconf = ModelCollectionConfigFileHandler()
        outconf.ToConfiguration([mcoll])
        outconf.save_configuration(training_settings_dir + "settings.conf")
コード例 #5
0
def main():
    def _compute_class_weights_lengths(gen, preprocessor, MC_weighting=False):
        # determine the actual size of the available dataset and adjust the sample weights correspondingly
        H1_data = gen.H1_collection.get_data(Config.branches, 0.0, 1.0)
        H0_data = gen.H0_collection.get_data(Config.branches, 0.0, 1.0)
        H1_length = len(preprocessor.process(H1_data).values()[0])
        H1_indices = preprocessor.get_last_indices()
        H0_length = len(preprocessor.process(H0_data).values()[0])
        H0_indices = preprocessor.get_last_indices()

        print "H1_length = " + str(H1_length)
        print "H0_length = " + str(H0_length)

        # if per-sample weighting is enabled, also set up the normalization of the event weights
        if MC_weighting:
            H1_weight_sum = np.sum(
                np.maximum(np.array(H1_data["training_weight"][H1_indices]),
                           0.0))
            H0_weight_sum = np.sum(
                np.maximum(np.array(H0_data["training_weight"][H0_indices]),
                           0.0))

            H1_class_weight = float(H0_length) / H1_weight_sum
            H0_class_weight = float(H1_length) / H0_weight_sum
        else:
            # H1_class_weight = 1.0
            # H0_class_weight = float(H1_length) / float(H0_length)
            H1_class_weight = 1.0 + float(H0_length) / float(H1_length)
            H0_class_weight = 1.0 + float(H1_length) / float(H0_length)

        return H1_class_weight, H0_class_weight, H1_length, H0_length

    # this computes low-level performance metrics for a model collection, i.e. the mean-quare error
    # computed on the validation dataset for each discriminant. Since the validation datasets will be held constant,
    # this is an easy way to directly compare different models

    setting_dir = sys.argv[1]
    training_dir = sys.argv[2]
    out_dir = sys.argv[3]

    # first, need to read in the trained ModelCollection:
    mconfhandler = ModelCollectionConfigFileHandler()
    mconfhandler.load_configuration(setting_dir + "settings.conf")
    mcolls = mconfhandler.GetModelCollection(weightpath=training_dir)

    confhandler = ConfigFileHandler()
    out_path = out_dir + "model_benchmark.txt"

    # for the evaluation, need to proceed in the same way as for training, but evaluate the models on the validation
    # data instead of training them on the training data

    for mcoll in mcolls:
        models, preprocessors, settings = mcoll.get_models(
        ), mcoll.get_preprocessors(), mcoll.get_settings()

        for cur_model, cur_preprocessor, cur_settings in zip(
                models, preprocessors, settings):
            val_gen = Generator(mcoll.H1_stream,
                                mcoll.H0_stream,
                                Config.branches,
                                preprocessor=cur_preprocessor,
                                chunks=1,
                                MC_weighting=False)
            val_gen.setup_validation_data()
            val_H1_classweight, val_H0_classweight, H1_length, H0_length = _compute_class_weights_lengths(
                val_gen, cur_preprocessor, False)
            print val_H1_classweight
            print val_H0_classweight
            print H1_length
            print H0_length
            val_gen.set_H1_weight(val_H1_classweight)
            val_gen.set_H0_weight(val_H0_classweight)
            val_gen.set_minimum_length(0)
            cur_model.get_keras_model().compile(optimizer=optimizers.Adam(),
                                                loss="mean_squared_error",
                                                metrics=["binary_accuracy"])
            res = cur_model.get_keras_model().evaluate_generator(
                val_gen.preprocessed_generator(), steps=1)
            print "statistics for model " + cur_model.name
            print res
            print cur_model.get_keras_model().metrics_names

            confhandler.new_section(cur_model.name)
            confhandler.set_field(cur_model.name, 'H0_val_length',
                                  str(H0_length))
            confhandler.set_field(cur_model.name, 'H1_val_length',
                                  str(H1_length))
            confhandler.set_field(cur_model.name, 'val_loss', str(res[0]))

    confhandler.save_configuration(out_path)
コード例 #6
0
def main():
    # runs to check for (good) models (the first one passed is taken as reference run from which the available models
    # are taken - it is expected that all others runs also follow this structure):
    input_runs = []

    print "==================================================================="
    print "looking for models in the following runs:"

    for campaign_dir in sys.argv[1:-2]:
        for run_dir in next(os.walk(campaign_dir))[1]:
            if not "bin" in run_dir:
                run_path = os.path.join(campaign_dir, run_dir)
                print run_path
                input_runs.append(run_path)

    print "==================================================================="

    # output training campaign, this will consist of a combination of the models found in the campaigns listed above, in such a way that the overall performance is optimized
    output_run = os.path.join(sys.argv[-1], "optimized")

    # where the configuration file for the hyperparameter settings should be stored
    hyperparam_output = os.path.join(output_run, "../hyperparameters.conf")

    os.makedirs(output_run)

    # load the available model names
    reference_run = input_runs[0]
    available_mcolls = os.walk(os.path.join(reference_run,
                                            "training")).next()[1]

    mcolls_winning = []

    for mcoll in available_mcolls:
        models = os.walk(os.path.join(reference_run, "training",
                                      mcoll)).next()[1]

        # load a representative version of the current model collection...
        mconfhandler = ModelCollectionConfigFileHandler()
        mconfhandler.load_configuration(
            os.path.join(reference_run, "settings_training", mcoll,
                         "settings.conf"))
        mcoll_template = mconfhandler.GetModelCollection()[0]

        # ... but strip away all the actual model components
        mcoll_template.model_dict = {}
        mcoll_template.preprocessor_dict = {}
        mcoll_template.settings_dict = {}

        for model in models:
            # compare this model across the different runs
            losses = [get_loss(run, mcoll, model) for run in input_runs]

            winner = np.argmin(losses)

            winning_run = input_runs[winner]

            # copy the winning model into the output run
            shutil.copytree(
                os.path.join(winning_run, "training", mcoll, model),
                os.path.join(output_run, "training", mcoll, model))

            print "--------------------------------------------"
            print " take " + model + " from " + winning_run
            print "--------------------------------------------"

            # load the winning model to keep track of its settings
            mconfhandler = ModelCollectionConfigFileHandler()
            mconfhandler.load_configuration(
                os.path.join(winning_run, "settings_training", mcoll,
                             "settings.conf"))
            mcoll_winning = mconfhandler.GetModelCollection()[0]

            # then pull the winning model over into the template
            winning_model = mcoll_winning.model_dict[model]
            winning_preprocessor = mcoll_winning.preprocessor_dict[model]
            winning_settings = mcoll_winning.settings_dict[model]

            mcoll_template.add_model(winning_preprocessor, winning_model,
                                     winning_settings)

        mcolls_winning.append(mcoll_template)

    # now save the put-together config file also into the output run
    mconfhandler = ModelCollectionConfigFileHandler()
    mconfhandler.ToConfiguration(mcolls_winning)
    mconfhandler.save_configuration(os.path.join(output_run, "settings.conf"))

    # now distriute again the training settings, as usual:
    distribute_training_settings(output_run + '/')

    # now create the hyperparameter config file for each model, taken from the winners
    hp_confhandler = ConfigFileHandler()
    for mcoll in mcolls_winning:
        for model_name, model in mcoll.model_dict.iteritems():
            hp_confhandler.new_section(model_name)
            hp_confhandler.set_field(
                model_name, "hyperparameters",
                ConfigFileUtils.serialize_dict(model.hyperparameters,
                                               lambda x: str(x)))

    hp_confhandler.save_configuration(hyperparam_output)

    print "==================================================================="
    print "hyperparameter configuration file written to " + hyperparam_output
    print "==================================================================="