Пример #1
0
def main():

    config, model, dataset_train, _, dataset_test = argparser(is_train=False)

    evaler = Evaler(config, model, dataset_test)

    log.warning("dataset: %s", config.dataset)
    evaler.eval_run()
Пример #2
0
def main():

    config, model, dataset_train, dataset_test = argparser(is_train=False)

    trainer = Trainer(config, model, dataset_train, dataset_test)

    log.warning("dataset: %s", config.dataset)
    trainer.train()
Пример #3
0
def main():

    config, model, dataset_train, dataset_train_unlabel, dataset_test = argparser(is_train=True)

    trainer = Trainer(config, model, dataset_train, dataset_train_unlabel, dataset_test)

    log.warning("dataset: %s, learning_rate_g: %f, learning_rate_d: %f",
                config.dataset, config.learning_rate_g, config.learning_rate_d)
    trainer.train()
Пример #4
0
def main():
    print("step0")
    config, model, dataset_train, dataset_test = argparser(is_train=True)
    print("step1")
    trainer = Trainer(config, model, dataset_train, dataset_test)

    log.warning("dataset: %s, learning_rate_g: %f, learning_rate_d: %f",
                config.dataset, config.learning_rate_g, config.learning_rate_d)
    trainer.train()
def main():

    os.environ["CUDA_VISIBLE_DEVICES"] = "1"

    config, model, dataset_train, dataset_test = argparser(is_train=False)

    evaler = Evaler(config, model, dataset_test)

    log.warning("dataset: %s", config.dataset)
    evaler.eval_run()
Пример #6
0
def main(_):
    def shutdown(signal, frame):
        logger.warning('Received signal %s: exiting', signal)
        sys.exit(128 + signal)

    signal.signal(signal.SIGHUP, shutdown)
    signal.signal(signal.SIGINT, shutdown)
    signal.signal(signal.SIGTERM, shutdown)

    args = config.argparser()
    run(args)
def main():

    os.environ["CUDA_VISIBLE_DEVICES"] = "0"

    config, model, dataset_train, dataset_test = argparser(is_train=True)

    trainer = Trainer(config, model, dataset_train, dataset_test)

    log.warning("dataset: %s, learning_rate_g: %f, learning_rate_d: %f",
                config.dataset, config.learning_rate_g, config.learning_rate_d)
    trainer.train()
Пример #8
0
def main():

    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
    tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)

    config, model, dataset_train, dataset_val, dataset_test = argparser(
        is_train=True)

    trainer = Trainer(config, model, dataset_train)

    log.warning("dataset_path: %s, learning_rate: %f", config.dataset_path,
                config.learning_rate)
    trainer.train()
Пример #9
0
def main():

    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
    tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)

    config, model, dataset_train, dataset_val, dataset_test = argparser(is_train=False)
    log.warning("dataset path: %s", config.dataset_path)

    evaler_val = Evaler(config, model, dataset_val, 'val')
    evaler_val.eval_run()

    evaler_train = Evaler(config, model, dataset_train, 'train')
    evaler_train.eval_run()

    evaler_train = Evaler(config, model, dataset_test, 'test')
    evaler_train.eval_run()
Пример #10
0
def main():

    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
    tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)

    config, model, dataset_train, dataset_val, dataset_test = argparser(
        is_train=False)
    log.warning("dataset path: %s", config.dataset_path)

    #viewer_val = Visualizer(config, model, dataset_val, 'val')
    #viewer_val.vis_run()
    #config.batch_size = viewer_val.batch_size

    #viewer_train = Visualizer(config, model, dataset_train, 'train')
    #viewer_train.vis_run()
    #config.batch_size = viewer_train.batch_size

    viewer_test = Visualizer(config, model, dataset_test, 'test')
    viewer_test.vis_run()
def GA_train(population):

    for i in range(population.shape[0]):
        print("population ", i)
        print(population.ix[i])
        tf.reset_default_graph()
        config, model, dataset_train, dataset_test = argparser(is_train=True)      
        config.learning_rate_g = population.ix[i]["learning_rate_g"]
        config.learning_rate_d = population.ix[i]["learning_rate_d"]
        config.update_rate = population.ix[i]["update_rate"]
        config.batch_size = population.ix[i]["batch_size"]
        config.n_z = population.ix[i]["n_z"]
        config.buffer_size = population.ix[i]["buffer_size"]
        config.real_probability = population.ix[i]["real_probability"]

        model = Model(config, debug_information=config.debug, is_train=True)

        trainer = Trainer(config, model, dataset_train, dataset_test)

        population.loc[i, "accuracy_score"] = trainer.run_GA()

    return population
Пример #12
0
import sys
import random
import pickle
import numpy as np
import tensorflow as tf

from config import argparser

args = argparser()

with open(args.dataset_dir + 'dataset.pkl', 'rb') as f:
    train_set = pickle.load(f, encoding='latin1')
    test_set = pickle.load(f, encoding='latin1')
    cate_list = pickle.load(f, encoding='latin1')
    cate_list = tf.convert_to_tensor(cate_list, dtype=tf.int64)
    user_count, item_count, cate_count = pickle.load(f)


class DataLoader:
    def __init__(self, batch_size, data):
        self.batch_size = batch_size
        self.data = data
        self.epoch_size = len(self.data) // self.batch_size
        if self.epoch_size * self.batch_size < len(self.data):
            self.epoch_size += 1
        self.i = 0

    def __iter__(self):
        self.i = 0
        return self
Пример #13
0
    else:
        trainer.evaluate()
        logger.info("Finish evaluating")


def make_log_files(config):
    """
    Sets up log directories and saves git diff and command line.
    """
    config.run_name = '{}.{}.{}'.format(config.prefix, config.seed,
                                        config.suffix)

    config.log_dir = os.path.join(config.log_root_dir, config.run_name)
    logger.info('Create log directory: %s', config.log_dir)
    os.makedirs(config.log_dir, exist_ok=True)

    if config.is_train:
        # log config
        param_path = os.path.join(config.log_dir, 'params.json')
        logger.info('Store parameters in %s', param_path)
        with open(param_path, 'w') as fp:
            json.dump(config.__dict__, fp, indent=4, sort_keys=True)


if __name__ == '__main__':
    args, unparsed = argparser()
    if len(unparsed):
        logger.error('Unparsed argument is detected:\n%s', unparsed)
    else:
        run(args)
Пример #14
0

def debug_test(env, config):
    printstar("Debug")
    obs = env.reset()
    init_car = [
        env.car.hull.position[0], env.car.hull.position[1], env.car.hull.angle,
        env.car.hull.linearVelocity[0], env.car.hull.linearVelocity[1]
    ]  #, env.info]
    obs, reward, dones, info = env.step(1)
    for _ in range(100):
        obs, reward, dones, info = env.step(1)
        env.render()
        obs, reward, dones, info = env.step(3)
        env.render()

    _ = input("Moving state")
    print(env._is_outside())
    small_network.change_state(env, init_car)
    for _ in range(2):
        obs, reward, dones, info = env.step(env.action_space.sample())
        env.render()
    print(env._is_outside())
    _ = input("Moved state")


if __name__ == '__main__':
    config = argparser()
    mp.set_start_method('spawn', force=True)
    main(config)
Пример #15
0
def main(argv=None):  # pylint: disable=unused-argument

    config = argparser(is_train=True)
    all_train_dir, all_result_file_name = construct_train_dir(config)

    if config.hdf5FileNametrain == 'train_MRIdata_3_AD_MCI_Normal.hdf5':
        dataset_path = os.path.join(r"./datasets/mri/3_AD_MCI_Normal/")
    elif config.hdf5FileNametrain == 'train_MRIdata_2_AD_MCI.hdf5':
        dataset_path = os.path.join(r"./datasets/mri/2_AD_MCI/")
    elif config.hdf5FileNametrain == 'train_MRIdata_2_AD_Normal.hdf5':
        dataset_path = os.path.join(r"./datasets/mri/2_AD_Normal/")
    else:
        dataset_path = os.path.join(r"./datasets/mri/2_MCI_Normal/")

    input_file_name = config.hdf5FileNametrain
    name_list = input_file_name.split("_")
    class_num = name_list[2]

    dataset_train_unlabelled, dataset_test, all_hdf5_data_train, all_hdf5_data_test, dataset_train_labelled, dataset_val, all_hdf5_data_val = dataset.create_default_splits8020(
        dataset_path, config.hdf5FileNametrain, config.testhdf5FileName,
        config.valhdf5FileName, config.idFileNametrain, config.testidFileName,
        config.validFileName, config.num_less_label_data, class_num)

    data_provider = get_data_provider_by_path(config, dataset_train_unlabelled,
                                              dataset_train_labelled,
                                              dataset_test,
                                              all_hdf5_data_train,
                                              all_hdf5_data_test, dataset_val,
                                              all_hdf5_data_val, 0)

    model = TripleGAN3D(config, data_provider, all_train_dir, 0, is_train=True)

    if tf.gfile.Exists(all_train_dir[0] + "/GANconfusionMatrixResults"):
        log.infov(all_train_dir[0] + "/GANconfusionMatrixResults")
    else:
        os.makedirs(all_train_dir[0] + "/GANconfusionMatrixResults")

    if config.train:
        total_start_time = time.time()
        print("Data provider train labelled images: ",
              data_provider.train_labelled.num_examples)
        print("Data provider train unlabelled images: ",
              data_provider.train_unlabelled.num_examples)
        best_epoch = model.train_all_epochs(config)
        total_training_time = time.time() - total_start_time

        print(
            "\n  Total training time for all epoches : %s  and %s seconds" %
            (str(timedelta(seconds=total_training_time)), total_training_time))

        fxx = open(
            all_train_dir[0] +
            "/GANconfusionMatrixResults/train_timeReport.txt", 'w')
        fxx.write(
            "\n  Total training time for all epoches : %s  and %s seconds" %
            (str(timedelta(seconds=total_training_time)), total_training_time))
        fxx.write('\n')
        fxx.close()

    if config.test:

        model.load_model(best_epoch)
        print("Data provider test images: ", data_provider.test.num_examples)
        print("Testing...")

        total_start_time = time.time()

        model.test_and_record(all_result_file_name[0],
                              0,
                              config,
                              all_train_dir[0],
                              data_provider.test,
                              batch_size=config.batch_size_label)

        total_test_time = time.time() - total_start_time

        print("\n   Total test time for all epoches : %s  and %s seconds" %
              (str(timedelta(seconds=total_test_time)), total_test_time))

        fxx = open(
            all_train_dir[0] +
            "/GANconfusionMatrixResults/test_timeReport.txt", 'w')

        fxx.write("\n  Total test time for all epoches : %s  and %s seconds" %
                  (str(timedelta(seconds=total_test_time)), total_test_time))
        fxx.write('\n')

        fxx.write(
            "\n   test time for each record : %s  and %s seconds" %
            (str(
                timedelta(seconds=(total_test_time /
                                   float(data_provider.test.num_examples)))),
             (total_test_time / float(data_provider.test.num_examples))))
        fxx.write('\n')

        fxx.close()

    input_file_name = config.hdf5FileNametrain
    class_labels = []
    name_list = input_file_name.split("_")
    if int(name_list[2]) == 3:
        class_labels.append(name_list[3])
        class_labels.append(name_list[4])
        last_class = name_list[5].split(".")
        class_labels.append(last_class[0])
    else:
        class_labels.append(name_list[3])
        last_class = name_list[4].split(".")
        class_labels.append(last_class[0])

    accuracy_10folds_all = []
    fold_write = 0
    for each_result_file_name in all_result_file_name:
        if fold_write < 1:
            accuracy, cr, cm, auc = calculateConfusionMatrix(
                each_result_file_name, class_labels, all_train_dir[0],
                int(name_list[2]))
        else:
            accuracy, cr, cm, auc = calculateConfusionMatrix(
                each_result_file_name, class_labels, './train_dir',
                int(name_list[2]))

        f = open(
            all_train_dir[0] +
            "/GANconfusionMatrixResults/ConfusionMatrix.txt", 'w')
        log.info("Fold: {}".format(fold_write))
        f.write(each_result_file_name)
        f.write(
            '{}\n\nClassification Report\n\n{}\n\nConfusion Matrix\n\n{}\n'.
            format(config.hdf5FileNametrain, cr, cm))
        f.write("accuracy: {}\n".format(accuracy))
        log.info("accuracy: {}".format(accuracy))

        if int(name_list[2]) == 3:
            for each_auc in auc:
                f.write("auc: {}\n".format(each_auc))
                log.info("auc: {}".format(each_auc))

        else:
            f.write("auc: {}\n".format(auc))
            log.info("auc: {}".format(auc))

        f.close()
Пример #16
0
import glob
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
from models_64x64 import generator as gen
import matplotlib.pyplot as plt
from config import argparser
from data_process import noisy_meas

PARAMS = argparser()
np.random.seed(PARAMS.seed_no)

N = PARAMS.n_mcmc #np.size(mcmc_samps, 0)
burn = int(PARAMS.burn_mcmc*N)
n_eff = N-burn
batch_size = 6400
z_dim = PARAMS.z_dim
n_iter = int(n_eff/batch_size)
dim_like = PARAMS.img_h*PARAMS.img_w*PARAMS.img_c
noise_var = PARAMS.noise_var
img_no = PARAMS.img_no


sample_dir = 'exps/mcmc/img{}_var{}_N{}'.format(PARAMS.img_no, noise_var, N)
mcmc_samps = np.load(sample_dir + '/samples.npy')
eff_samps = np.squeeze(mcmc_samps[burn:,:,:])

plt.figure(figsize=(15, 6))
for ii in range(25):
    plt.subplot(5,5,ii+1)
    plt.hist(eff_samps[:, ii], 50, density=True);
Пример #17
0
            "git diff >> {}/git.txt".format(config.log_dir),
            "echo 'python -m rl.main {}' >> {}/cmd.sh".format(
                " ".join([shlex_quote(arg) for arg in sys.argv[1:]]), config.log_dir
            ),
        ]
        os.system("\n".join(cmds))

        # log config
        param_path = os.path.join(config.log_dir, "params.json")
        logger.info("Store parameters in %s", param_path)
        with open(param_path, "w") as fp:
            json.dump(config.__dict__, fp, indent=4, sort_keys=True)


if __name__ == "__main__":
    parser = argparser()
    args, unparsed = parser.parse_known_args()

    if "Pusher" in args.env:
        from config.pusher import add_arguments
    elif "Sawyer" in args.env:
        from config.sawyer import add_arguments
    else:
        raise ValueError("args.env (%s) is not supported" % args.env)

    add_arguments(parser)
    mp_add_arguments(parser)
    args, unparsed = parser.parse_known_args()

    if args.debug:
        args.rollout_length = 150