示例#1
0
def run():
    tf.reset_default_graph()

    train_dataset, val_dataset = load_all_datasets()

    handle, training_iterator, validation_iterator, next_element = load_iterators(train_dataset, val_dataset)

    sess = tf.Session()

    sess.run(tf.global_variables_initializer())

    training_handle = sess.run(training_iterator.string_handle())
    validation_handle = sess.run(validation_iterator.string_handle())

    i = 0

    while True:
        try:
            sess.run(next_element[0], feed_dict={handle: training_handle})
            i+=BATCH_SIZE
        except:
            print("Train examples: {} +/- {}".format(i, BATCH_SIZE))
            break

    i = 0

    while True:
        try:
            sess.run(next_element[0], feed_dict={handle: validation_handle})
            i+=BATCH_SIZE
        except:
            print("Validation examples: {} +/- {}".format(i, BATCH_SIZE))
            break
示例#2
0
文件: train.py 项目: dPys/deepbrain
def run():
    tf.reset_default_graph()

    train_dataset, val_dataset = load_all_datasets()

    handle, training_iterator, validation_iterator, next_element = load_iterators(
        train_dataset, val_dataset)

    training, img, labels, out, merged, upd = model(*next_element)

    saver = tf.train.Saver(max_to_keep=2)

    sess = tf.Session()

    train_writer = tf.summary.FileWriter('./logs/train', sess.graph)
    val_writer = tf.summary.FileWriter('./logs/val', sess.graph)

    sess.run(tf.global_variables_initializer())

    training_handle = sess.run(training_iterator.string_handle())
    validation_handle = sess.run(validation_iterator.string_handle())

    i = 0

    spinner = Halo(text='Training', spinner='dots')
    subprocess.Popen([
        "tensorboard", "--logdir", "./logs", "--port", "6006", "--host",
        "0.0.0.0"
    ])

    spinner.start()
    # img_out, labels_out = sess.run([img, labels], feed_dict={handle: training_handle})
    # img_out = img_out[0].squeeze()
    # labels_out = labels_out[0].squeeze()

    # import nibabel as nib
    # nib.save(nib.Nifti1Image(img_out, np.eye(4)), "original.nii")
    # nib.save(nib.Nifti1Image(labels_out, np.eye(4)), "seg.nii")
    while True:
        m, _ = sess.run([merged, upd], feed_dict={handle: training_handle})
        train_writer.add_summary(m, i)

        if i % 100 == 0:
            m = sess.run(merged,
                         feed_dict={
                             training: False,
                             handle: validation_handle
                         })
            val_writer.add_summary(m, i)
            val_writer.flush()

        if i % 1000 == 0 and i > 0:
            # Save model
            saver.save(sess, "./models/model.ckpt", global_step=i)

        i += 1

    spinner.stop()
示例#3
0
def main(experiment_config):
    """Sets the configurations according to `experiment_config` and runs them.
    """
    raw_output_filepath = os.path.join(experiment_config["output folder"],
                                       'raw_output.csv')
    with open(raw_output_filepath, 'w') as fout:
        init_raw_output_csv(fout, output_split_char=',')
        criteria_list = get_criteria(experiment_config["criteria"])

        if "starting seed index" not in experiment_config:
            starting_seed = 1
        else:
            starting_seed = experiment_config["starting seed index"]

        if experiment_config["prunning parameters"]["use chi-sq test"]:
            max_p_value_chi_sq = experiment_config["prunning parameters"][
                "max chi-sq p-value"]
            decision_tree.MIN_SAMPLES_IN_SECOND_MOST_FREQUENT_VALUE = experiment_config[
                "prunning parameters"]["second most freq value min samples"]
        else:
            max_p_value_chi_sq = None
            decision_tree.MIN_SAMPLES_IN_SECOND_MOST_FREQUENT_VALUE = None

        decision_tree.USE_MIN_SAMPLES_SECOND_LARGEST_CLASS = experiment_config[
            "prunning parameters"]["use second most freq class min samples"]
        if decision_tree.USE_MIN_SAMPLES_SECOND_LARGEST_CLASS:
            decision_tree.MIN_SAMPLES_SECOND_LARGEST_CLASS = experiment_config[
                "prunning parameters"]["second most freq class min samples"]
        else:
            decision_tree.MIN_SAMPLES_SECOND_LARGEST_CLASS = None

        if experiment_config["use all datasets"]:
            datasets_configs = dataset.load_all_configs(
                experiment_config["datasets basepath"])
            datasets_configs.sort(key=lambda config: config["dataset name"])
        else:
            datasets_folders = [
                os.path.join(experiment_config["datasets basepath"],
                             folderpath)
                for folderpath in experiment_config["datasets folders"]
            ]
            datasets_configs = [
                dataset.load_config(folderpath)
                for folderpath in datasets_folders
            ]
        if experiment_config["load one dataset at a time"]:
            datasets = dataset.load_all_datasets(
                datasets_configs, experiment_config["use numeric attributes"])
            for ((dataset_name, curr_dataset),
                 min_num_samples_allowed) in itertools.product(
                     datasets, experiment_config["prunning parameters"]
                     ["min num samples allowed"]):
                for criterion in criteria_list:
                    print('-' * 100)
                    print(criterion.name)
                    print()
                    run(dataset_name,
                        curr_dataset,
                        experiment_config["num training samples"],
                        criterion,
                        min_num_samples_allowed=min_num_samples_allowed,
                        max_depth=experiment_config["max depth"],
                        num_trials=experiment_config["num trials"],
                        starting_seed=starting_seed,
                        use_numeric_attributes=experiment_config[
                            "use numeric attributes"],
                        use_chi_sq_test=experiment_config[
                            "prunning parameters"]["use chi-sq test"],
                        max_p_value_chi_sq=max_p_value_chi_sq,
                        output_file_descriptor=fout,
                        output_split_char=',')
        else:
            for (dataset_config, min_num_samples_allowed) in itertools.product(
                    datasets_configs, experiment_config["prunning parameters"]
                ["min num samples allowed"]):
                curr_dataset = dataset.Dataset(
                    dataset_config["filepath"],
                    dataset_config["key attrib index"],
                    dataset_config["class attrib index"],
                    dataset_config["split char"],
                    dataset_config["missing value string"],
                    experiment_config["use numeric attributes"])
                for criterion in criteria_list:
                    print('-' * 100)
                    print(criterion.name)
                    print()
                    run(dataset_config["dataset name"],
                        curr_dataset,
                        experiment_config["num training samples"],
                        criterion,
                        min_num_samples_allowed=min_num_samples_allowed,
                        max_depth=experiment_config["max depth"],
                        num_trials=experiment_config["num trials"],
                        starting_seed=starting_seed,
                        use_numeric_attributes=experiment_config[
                            "use numeric attributes"],
                        use_chi_sq_test=experiment_config[
                            "prunning parameters"]["use chi-sq test"],
                        max_p_value_chi_sq=max_p_value_chi_sq,
                        output_file_descriptor=fout,
                        output_split_char=',')