예제 #1
0
def set_global_determinism(seed=SEED, fast_n_close=False):
    """
        Enable 100% reproducibility on operations related to tensor and randomness.
        Parameters:
        seed (int): seed value for global randomness
        fast_n_close (bool): whether to achieve efficient at the cost of determinism/reproducibility
    """
    set_seeds(seed=seed)
    if fast_n_close:
        return

    logging.warning(
        "*******************************************************************************"
    )
    logging.warning(
        "*** set_global_determinism is called,setting full determinism, will be slow ***"
    )
    logging.warning(
        "*******************************************************************************"
    )

    os.environ['TF_DETERMINISTIC_OPS'] = '1'
    os.environ['TF_CUDNN_DETERMINISTIC'] = '1'
    # https://www.tensorflow.org/api_docs/python/tf/config/threading/set_inter_op_parallelism_threads
    tf.config.threading.set_inter_op_parallelism_threads(1)
    tf.config.threading.set_intra_op_parallelism_threads(1)
    from tfdeterminism import patch
    patch()
예제 #2
0
def enable_deterministic_training(seed, no_gpu_patch=False):
    """Set all seeds for deterministic training

    Args:
        no_gpu_patch (bool): if False, apply a patch to TensorFlow to have
            deterministic GPU operations, if True the training is much faster
            but slightly less deterministic.
    This function needs to be called before any TensorFlow code.
    """
    import numpy as np
    import os
    import random
    import tfdeterminism
    if not no_gpu_patch:
        # Patch stock TensorFlow to have deterministic GPU operation
        tfdeterminism.patch()  # then use tf as normal
    # If PYTHONHASHSEED environment variable is not set or set to random,
    # a random value is used to seed the hashes of str, bytes and datetime
    # objects. (Necessary for Python >= 3.2.3)
    os.environ['PYTHONHASHSEED'] = str(seed)
    # Set python built-in pseudo-random generator at a fixed value
    random.seed(seed)
    # Set seed for random Numpy operation (e.g. np.random.randint)
    np.random.seed(seed)
    # Set seed for random TensorFlow operation (e.g. tf.image.random_crop)
    tf.compat.v1.random.set_random_seed(seed)
def run(esetSC2, geneList, random_seed, hidden_size, hidden_size2,
        code_sizeList, activ1, optim, noise_factor):
    ## to improve reproducibility
    patch()
    os.environ['PYTHONHASHSEED'] = str(random_seed)
    config = tf.compat.v1.ConfigProto(intra_op_parallelism_threads=1,
                                      inter_op_parallelism_threads=1)
    session = tf.compat.v1.Session(config=config)
    tf.compat.v1.keras.backend.set_session(session)

    ### genes from feature selection + autoencoder
    esetSC2Var = esetSC2.loc[geneList]
    ## General Processing; original
    esetSC2Var = esetSC2Var.values
    esetSC2Var = esetSC2Var / np.max(esetSC2Var)
    esetSC2Var = transpose(esetSC2Var)

    ## Prediction & Evaluation
    random.seed(random_seed)
    np.random.seed(random_seed)
    auto_list = None
    best_model_ind = None
    ## Denoising Autoencoder Preparation
    esetSC2Var_noisy = esetSC2Var + noise_factor * np.random.normal(
        size=esetSC2Var.shape)
    esetSC2Var_noisy = np.clip(esetSC2Var_noisy, 0.0,
                               1.0)  ## already transposed
    evalOut = predEval(code_sizeList, esetSC2Var_noisy, esetSC2Var,
                       hidden_size, hidden_size2, activ1, optim, random_seed)
    [
        auto_list, abs_spearman, abs_pearson, abs_kendall, abs_eucDist,
        scalarTestLoss
    ] = evalOut
    bests = [abs_spearman.index(max(abs_spearman)), \
                abs_pearson.index(max(abs_pearson)), \
                abs_kendall.index(max(abs_kendall)), \
                abs_eucDist.index(min(abs_eucDist)), \
                scalarTestLoss.index(min(scalarTestLoss))]
    best_model_ind = max(set(bests), key=bests.count)
    print(bests, best_model_ind)
    return best_model_ind, auto_list[best_model_ind]
예제 #4
0
def settfreproduceability(seed=42):
    """
        Function enables reproduceablity for your experiments so that you can compare easily
        Parameters:
        seed (int): seed value for global randomness
    """
    logging.warning(
        "*** Modules are set to be deterministic , randomness in your modules will be avoided , Current seed value is {} change seed value if you want to try a different seed of parameters***"
        .format(seed))
    #Pythonic determinism
    random.seed(seed)
    os.environ['PYTHONHASHSEED'] = str(seed)
    np.random.seed(seed)
    #Pythonic determinism
    tf.random.set_seed(seed)
    os.environ['TF_DETERMINISTIC_OPS'] = '1'
    os.environ['TF_CUDNN_DETERMINISTIC'] = '1'
    # https://www.tensorflow.org/api_docs/python/tf/config/threading/set_inter_op_parallelism_threads
    tf.config.threading.set_inter_op_parallelism_threads(1)
    tf.config.threading.set_intra_op_parallelism_threads(1)
    from tfdeterminism import patch
    patch()
예제 #5
0
# -*- coding: utf-8 -*-
"""
@author: danfeng
"""
#import library
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import scipy.io as scio 
import scipy.io as sio
from tf_utils import random_mini_batches_single, convert_to_one_hot
from tensorflow.python.framework import ops
from tfdeterminism import patch
patch()

def create_placeholders(n_x, n_y):
    
    isTraining = tf.placeholder_with_default(True, shape=())
    x = tf.placeholder(tf.float32, [None, n_x], name = "x")
    y = tf.placeholder(tf.float32, [None, n_y], name = "Y")

    return x, y, isTraining

def initialize_parameters():

    
    tf.set_random_seed(1)
     
    x_w1 = tf.get_variable("x_w1", [1,1,200,128], initializer = tf.contrib.layers.xavier_initializer(seed = 1))
    x_b1 = tf.get_variable("x_b1", [128], initializer = tf.zeros_initializer())
    
예제 #6
0
#!/usr/bin/env python3

import argparse
import docker
import os
import shutil
import sys
import ast
import socket

import numpy as np
from tfdeterminism import patch

patch(
)  # Ensure tf GPU determinism: https://github.com/NVIDIA/framework-determinism

import data.data_preprocess as prp
from metrics.metric_utils import feature_prediction, one_step_ahead_prediction, reidentify_score
from computils.misc import fix_all_random_seeds, tf_fixed_seed_seesion, redact_exception
from computils.solutions import (
    load_data,
    load_generated_data,
    validate_hider_output,
    validate_seeker_output,
    benchmark_hider,
)
from running import competition_config, MAX_SEQ_LEN, TRAIN_RATE, DATA_FILE_NAME, FORCE_REPROCESS, SEED, DEBUG_DATA

DEFAULT_IMAGE = competition_config.get(
    "DEFAULT_IMAGE", "drshushen/hide-and-seek-codalab:latest")
HIDER_EVAL_TRAINING_VERBOSE = competition_config.get(
예제 #7
0
def train(workdir,
          env_name,
          num_timesteps,
          nsteps=256,
          nminibatches=4,
          noptepochs=4,
          learning_rate=2.5e-4,
          ent_coef=0.01,
          patch_tf_determinism=False,
          global_seed=None,
          checkpoint_path_for_debugging=None):
    """Runs PPO training.

  Args:
    workdir: where to store experiment results/logs
    env_name: the name of the environment to run
    num_timesteps: for how many timesteps to run training
    nsteps: Number of consecutive environment steps to use during training.
    nminibatches: Minibatch size.
    noptepochs: Number of optimization epochs.
    learning_rate: Initial learning rate.
    ent_coef: Entropy coefficient.
  """
    if patch_tf_determinism:
        from tfdeterminism import patch
        patch()
        print('Applied tfdeterminism.patch')
    if global_seed is not None:
        import random
        import numpy as np
        random.seed(global_seed)
        np.random.seed(global_seed)
        #tf.random.set_seed(global_seed)
        tf.set_random_seed(global_seed)

    #tf.debugging.set_log_device_placement(True)

    train_measurements = utils.create_measurement_series(
        workdir, 'reward_train')
    valid_measurements = utils.create_measurement_series(
        workdir, 'reward_valid')
    test_measurements = utils.create_measurement_series(workdir, 'reward_test')

    def measurement_callback(unused_eplenmean, eprewmean, global_step_val):
        if train_measurements:
            train_measurements.create_measurement(objective_value=eprewmean,
                                                  step=global_step_val)
        logger.logkv('eprewmean_train', eprewmean)

    def eval_callback_on_valid(eprewmean, global_step_val):
        if valid_measurements:
            valid_measurements.create_measurement(objective_value=eprewmean,
                                                  step=global_step_val)
        logger.logkv('eprewmean_valid', eprewmean)

    def eval_callback_on_test(eprewmean, global_step_val):
        if test_measurements:
            test_measurements.create_measurement(objective_value=eprewmean,
                                                 step=global_step_val)
        logger.logkv('eprewmean_test', eprewmean)

    logger_dir = workdir
    #logger.configure(logger_dir)
    logger.configure(dir=logger_dir,
                     format_strs=['tensorboard', 'stdout', 'log', 'csv'])
    logger.Logger.DEFAULT = logger.Logger.CURRENT

    rlb_ot = get_rlb_args().outer_args['rlb_ot']

    env, valid_env, test_env = get_environment(
        env_name,
        create_env_fn=(rlb_env_factory.create_environments_with_rlb
                       if rlb_ot else env_factory.create_environments))
    is_ant = env_name.startswith('parkour:')

    # Validation metric.
    policy_evaluator_on_valid = eval_policy.PolicyEvaluator(
        valid_env, metric_callback=eval_callback_on_valid, video_filename=None)

    # Test metric (+ videos).
    video_filename = os.path.join(FLAGS.workdir, 'video')
    policy_evaluator_on_test = eval_policy.PolicyEvaluator(
        test_env,
        metric_callback=eval_callback_on_test,
        video_filename=video_filename,
        grayscale=(env_name.startswith('atari:')))

    # Delay to make sure that all the DMLab environments acquire
    # the GPU resources before TensorFlow acquire the rest of the memory.
    # TODO(damienv): Possibly use allow_grow in a TensorFlow session
    # so that there is no such problem anymore.
    time.sleep(15)

    cloud_sync_callback = lambda: None

    def evaluate_valid_test(model_step_fn, global_step):
        if not is_ant:
            policy_evaluator_on_valid.evaluate(model_step_fn, global_step)
        policy_evaluator_on_test.evaluate(model_step_fn, global_step)

    sess_config = tf.ConfigProto()
    sess_config.gpu_options.allow_growth = True
    if patch_tf_determinism:
        sess_config.intra_op_parallelism_threads = 1
        sess_config.inter_op_parallelism_threads = 1
    #with tf.Session():
    with tf.Session(config=sess_config):
        policy = {
            'cnn': policies.CnnPolicy,
            'lstm': policies.LstmPolicy,
            'lnlstm': policies.LnLstmPolicy,
            'mlp': policies.MlpPolicy
        }[FLAGS.policy_architecture]

        # Openai baselines never performs num_timesteps env steps because
        # of the way it samples training data in batches. The number of timesteps
        # is multiplied by 1.1 (hacky) to insure at least num_timesteps are
        # performed.

        ppo2.learn(policy,
                   env=env,
                   nsteps=nsteps,
                   nminibatches=nminibatches,
                   lam=0.95,
                   gamma=0.99,
                   noptepochs=noptepochs,
                   log_interval=1,
                   ent_coef=ent_coef,
                   lr=learning_rate if is_ant else lambda f: f * learning_rate,
                   cliprange=0.2 if is_ant else lambda f: f * 0.1,
                   total_timesteps=int(num_timesteps * 1.1),
                   train_callback=measurement_callback,
                   eval_callback=evaluate_valid_test,
                   cloud_sync_callback=cloud_sync_callback,
                   save_interval=200,
                   workdir=workdir,
                   use_curiosity=FLAGS.use_curiosity,
                   curiosity_strength=FLAGS.curiosity_strength,
                   forward_inverse_ratio=FLAGS.forward_inverse_ratio,
                   curiosity_loss_strength=FLAGS.curiosity_loss_strength,
                   random_state_predictor=FLAGS.random_state_predictor,
                   use_rlb=(not rlb_ot),
                   checkpoint_path_for_debugging=checkpoint_path_for_debugging)
        cloud_sync_callback()
    test_env.close()
    valid_env.close()
    utils.maybe_close_measurements(train_measurements)
    utils.maybe_close_measurements(valid_measurements)
    utils.maybe_close_measurements(test_measurements)