Esempio n. 1
0
def main():
    # capture the config path from the run arguments
    # then process the json configuration file
    try:
        args = get_args()
        config = process_config(args.config)

    except Exception as e:
        print("missing or invalid arguments %s" % e)
        exit(0)

    os.environ["CUDA_VISIBLE_devices"] = config.gpu
    import tensorflow as tf

    # create the experiments dirs
    create_dirs([config.summary_dir, config.checkpoint_dir])
    # create tensorflow session
    gpuconfig = tf.ConfigProto(allow_soft_placement=True,
                               log_device_placement=False)
    gpuconfig.gpu_options.visible_device_list = config.gpu
    sess = tf.Session(config=gpuconfig)
    # create your data generator
    data = DataGenerator(config)

    # create an instance of the model you want
    model = invariant_basic(config, data)
    # create trainer and pass all the previous components to it
    trainer = Trainer(sess, model, data, config)
    # load model if exists
    model.load(sess)
    # here you train your model
    trainer.train()
def main():
    # capture the config path from the run arguments
    # then process the json configuration file
    try:
        args = get_args()
        config = process_config(args.config)

    except Exception as e:
        print("missing or invalid arguments %s" % e)
        exit(0)

    os.environ["CUDA_VISIBLE_devices"] = config.gpu
    import tensorflow as tf
    import numpy as np
    tf.set_random_seed(100)
    np.random.seed(100)
    base_summary_folder = config.summary_dir
    base_exp_name = config.exp_name
    # create the experiments dirs
    create_dirs([config.summary_dir, config.checkpoint_dir])
    for lr in [0.00008 * (2**i) for i in range(8)]:
        for decay in [0.6, 0.7, 0.8, 0.9]:
            config.learning_rate = lr
            config.decay_rate = decay
            config.exp_name = base_exp_name + " lr={0}_decay={1}".format(
                lr, decay)
            curr_dir = os.path.join(base_summary_folder,
                                    "lr={0}_decay={1}".format(lr, decay))
            config.summary_dir = curr_dir
            create_dirs([curr_dir])
            # create your data generator
            data = DataGenerator(config)
            gpuconfig = tf.ConfigProto(allow_soft_placement=True,
                                       log_device_placement=False)
            gpuconfig.gpu_options.visible_device_list = config.gpu
            sess = tf.Session(config=gpuconfig)
            # create an instance of the model you want
            model = invariant_basic(config, data)
            # create trainer and pass all the previous components to it
            trainer = Trainer(sess, model, data, config)
            # here you train your model
            acc, loss = trainer.train()
            sess.close()
            tf.reset_default_graph()

    doc_utils.summary_10fold_results(config.summary_dir)
Esempio n. 3
0
def main():
    # capture the config path from the run arguments
    # then process the json configuration file
    try:
        args = get_args()
        config = process_config(args.config)

    except Exception as e:
        print("missing or invalid arguments %s" % e)
        exit(0)

    os.environ["CUDA_VISIBLE_DEVICES"] = config.gpu
#     import tensorflow as tf
    import torch

    # create the experiments dirs
    create_dirs([config.summary_dir, config.checkpoint_dir])
    # create tensorflow session
#     gpuconfig = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
#     gpuconfig.gpu_options.visible_device_list = config.gpus_list
#     gpuconfig.gpu_options.allow_growth = True
#     sess = tf.Session(config=gpuconfig)
    if config.cuda:
        print(f'Using GPU : {torch.cuda.get_device_name(int(config.gpu))}')
    else:
        print(f'Using CPU')
    # create your data generator
    data = DataGenerator(config)
#     data = torch.from_numpy(data)

    # create an instance of the model you want
    model = invariant_basic(config, data)
    if config.cuda:
        model = model.cuda()
        
    for name, param in model.named_parameters():
#         if param.device.type != 'cuda':
        print(f'{name}, device type {param.device.type}')
        
    # create trainer and pass all the previous components to it
#     trainer = Trainer(sess, model, data, config)
    trainer = Trainer(model, data, config)
    # load model if exists
#     model.load(sess)
    # here you train your model
    trainer.train()
def main():
    # capture the config path from the run arguments
    # then process the json configuration file
    try:
        args = get_args()
        config = process_config(args.config, dataset_name='QM9')

    except Exception as e:
        print("missing or invalid arguments %s" % e)
        exit(0)

    os.environ["CUDA_VISIBLE_DEVICES"] = config.gpu
    import tensorflow as tf
    import numpy as np
    tf.set_random_seed(100)
    np.random.seed(100)
    print("lr = {0}".format(config.hyperparams.learning_rate))
    print("decay = {0}".format(config.hyperparams.decay_rate))
    if config.target_param is not False:  # (0 == False) while (0 is not False)
        print("target parameter: {0}".format(config.target_param))
    print(config.architecture)
    # create the experiments dirs
    create_dirs([config.summary_dir, config.checkpoint_dir])
    doc_utils.doc_used_config(config)

    # create your data generator
    data = DataGenerator(config)
    gpuconfig = tf.ConfigProto(allow_soft_placement=True,
                               log_device_placement=False)
    gpuconfig.gpu_options.visible_device_list = config.gpus_list
    gpuconfig.gpu_options.allow_growth = True
    sess = tf.Session(config=gpuconfig)
    # create an instance of the model you want
    model = invariant_basic(config, data)
    # create trainer and pass all the previous components to it
    trainer = Trainer(sess, model, data, config)
    # here you train your model
    trainer.train()
    # test model, restore best model
    test_dists, test_loss = trainer.test(load_best_model=True)
    sess.close()
    tf.reset_default_graph()

    doc_utils.summary_qm9_results(config.summary_dir, test_dists, test_loss,
                                  trainer.best_epoch)
Esempio n. 5
0
def main():
    # capture the config path from the run arguments
    # then process the json configuration file
    try:
        args = get_args()
        config = process_config(args.config)

    except Exception as e:
        print("missing or invalid arguments %s" % e)
        exit(0)

    os.environ["CUDA_VISIBLE_DEVICES"] = config.gpu
    import tensorflow as tf
    import numpy as np
    tf.set_random_seed(100)
    np.random.seed(100)
    print("lr = {0}".format(config.learning_rate))
    print("decay = {0}".format(config.decay_rate))
    print(config.architecture)
    # create the experiments dirs
    create_dirs([config.summary_dir, config.checkpoint_dir])
    for exp in range(1, config.num_exp + 1):
        for fold in range(1, 11):
            print("Experiment num = {0}\nFold num = {1}".format(exp, fold))
            # create your data generator
            config.num_fold = fold
            data = DataGenerator(config)
            gpuconfig = tf.ConfigProto(allow_soft_placement=True,
                                       log_device_placement=False)
            gpuconfig.gpu_options.visible_device_list = config.gpus_list
            gpuconfig.gpu_options.allow_growth = True
            sess = tf.Session(config=gpuconfig)
            # create an instance of the model you want
            model = invariant_basic(config, data)
            # create trainer and pass all the previous components to it
            trainer = Trainer(sess, model, data, config)
            # here you train your model
            acc, loss = trainer.train()
            doc_utils.doc_results(acc, loss, exp, fold, config.summary_dir)
            sess.close()
            tf.reset_default_graph()

    doc_utils.summary_10fold_results(config.summary_dir)