예제 #1
0
 def _compile_environment(self, cfg: EnvironmentConfig):
     logging.info("=>Compiling execution environment...")
     logging.info("=>Execution with environment config:\n %s" % (str(cfg)))
     if self._checkpoint_dir is None:
         raise ValueError("Checkpoint_dir must be specified to "
                          "store execution results")
     # Config random
     random_seed = cfg.random_seed
     np.random.seed(random_seed)
     set_random_seed(random_seed)
     random.seed(random_seed)
     # Config GPU
     if hasattr(cfg, 'CUDA_VISIBLE_DEVICES'):
         logging.info("=>Setting visible devices on %s" % cfg.CUDA_VISIBLE_DEVICES)
         os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
         os.environ['CUDA_VISIBLE_DEVICES'] = cfg.CUDA_VISIBLE_DEVICES
     intra_op_parallelism_threads = cfg.intra_op_parallelism_threads \
         if hasattr(cfg, 'intra_parallelism_threads') else 0
     inter_op_parallelism_threads = cfg.inter_op_parallelism_threads \
         if hasattr(cfg, 'inter_parallelism_threads') else 0
     session_config = config_pb2.ConfigProto(
         allow_soft_placement=True,
         inter_op_parallelism_threads=inter_op_parallelism_threads,
         intra_op_parallelism_threads=intra_op_parallelism_threads)
     if hasattr(cfg, 'per_process_gpu_memory_fraction'):
         session_config.gpu_options.per_process_gpu_memory_fraction = \
             cfg.per_process_gpu_memory_fraction
     if hasattr(cfg, 'allow_growth'):
         session_config.gpu_options.allow_growth = cfg.allow_growth
     # Create first session
     F.get_session(config=session_config,
                   checkpoint_dir=self._checkpoint_dir)
예제 #2
0
 def _compile_environment(self, _session_cfg):
     cfg = toml.load('./env_cfg.toml')['ENV']
     if isinstance(_session_cfg, str):
         suffix = _session_cfg.split('.')[-1]
         if suffix == 'toml':
             cfg.update(**toml.load(_session_cfg)['ENV'])
         elif suffix == 'json':
             import json
             cfg.update(**json.load(open(_session_cfg)))
         else:
             raise ValueError("Unsupported file format %s, only support toml, json" % suffix)
     elif isinstance(_session_cfg, dict):
         cfg.update(**_session_cfg)
     logging.info("=>Compiling execution environment...")
     logging.info("=>Execution with environment config:\n %s" % (str(cfg)))
     if self._checkpoint_dir is None:
         raise ValueError("Checkpoint_dir must be specified to "
                          "store execution results")
     # Config random
     random_seed = cfg['random_seed']
     np.random.seed(random_seed)
     set_random_seed(random_seed)
     random.seed(random_seed)
     # Config GPU
     if hasattr(cfg, 'CUDA_VISIBLE_DEVICES'):
         logging.info("=>Setting visible devices on %s" % cfg['CUDA_VISIBLE_DEVICES'])
         os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
         os.environ['CUDA_VISIBLE_DEVICES'] = cfg['CUDA_VISIBLE_DEVICES']
     intra_op_parallelism_threads = cfg['intra_op_parallelism_threads'] \
         if hasattr(cfg, 'intra_parallelism_threads') else 0
     inter_op_parallelism_threads = cfg['inter_op_parallelism_threads'] \
         if hasattr(cfg, 'inter_parallelism_threads') else 0
     session_config = config_pb2.ConfigProto(
         allow_soft_placement=True,
         inter_op_parallelism_threads=inter_op_parallelism_threads,
         intra_op_parallelism_threads=intra_op_parallelism_threads)
     if hasattr(cfg, 'per_process_gpu_memory_fraction'):
         session_config.gpu_options.per_process_gpu_memory_fraction = \
             cfg['per_process_gpu_memory_fraction']
     if hasattr(cfg, 'allow_growth'):
         session_config.gpu_options.allow_growth = cfg['allow_growth']
     # Create first session
     F.get_session(config=session_config,
                   checkpoint_dir=self._checkpoint_dir)
예제 #3
0
test_labels_up2 = np.loadtxt(test_label_up2, dtype='int8')

## Configure GPU usage
physical_devices = config.experimental.list_physical_devices('GPU')
config.experimental.set_memory_growth(physical_devices[0], True)

# Perform training for each parameters combination
results = []
params = extract_experiments_parameters(
    exp_data, ('seed', 'batch_size', 'lr_decay', 'max_stride'))

for p in params:

    if 'seed' in p:
        np.random.seed(p['seed'])
        random.set_random_seed(p['seed'])

    # Prepare the data train

    if 'batch_size' in p:
        if isinstance(p['batch_size'], int):
            # Set the same batch_size for all the clients
            p['batch_size'] = [p['batch_size']] * NUM_CLIENTS

        elif isinstance(p['batch_size'], (list, tuple)):
            if len(p['batch_size']) != NUM_CLIENTS:
                print('The list of batch sizes for each client must provide a '\
                  'batch size for the {} clients'.format(NUM_CLIENTS))
                continue

        else:
예제 #4
0
## Define settings for plots
plt.rcParams["font.family"] = "serif"

## Set working directory
#path = r'C:\Users\bbeals\Dropbox (Personal)\Masters in Predictive Analytics\590-Thesis\NW590-Thesis'
path = r'C:\Users\brand\Dropbox\Masters in Predictive Analytics\590-Thesis\NW590-Thesis'
os.chdir(path)

## Get current datetime
today = datetime.now().strftime('%Y%m%d')

## Set seed for reproducibility
np.random.seed(1)
#random.set_seed(1)
random.set_random_seed(1)

######################################
# PREPARE DATA
######################################

## Load data
data = pd.read_csv(r'..\Data.csv')

## Fill missing values
data = data.fillna(0)

## Define independent, numerical variables
features = data.loc[:, 'BETA_ACWI':]
features = features.drop(['1M_TREASURY'], axis=1)
features = features.drop(['1Y_TREASURY'], axis=1)
예제 #5
0
num_validation_samples = 1000
initial_learning_rate = 0.001
gamma = 3
training_epochs = 400
batch_size = 1000

train_data = pd.read_csv(train_path).values.astype(np.float32)
train_labels, train_images = np.split(train_data,[1],axis=1)
train_images = np.reshape(train_images, (-1, 28, 28, 1)) / 255.

test_images = pd.read_csv(test_path).values.astype(np.float32)
test_images = np.reshape(test_images, (-1, 28, 28, 1)) / 255.

np.random.seed(0)
from tensorflow import random
random.set_random_seed(0)

#%% Define custom objective function with Taylor softmax and focal loss
def focal_objective(y_true, y_pred):
    taylor_ex = 1. + y_pred + 0.5 * K.pow(y_pred, 2)
    taylor_sum = K.reshape(K.sum(taylor_ex, axis=1, keepdims=False), shape=[-1, 1])
    taylor_softmax = taylor_ex / taylor_sum
    cost = -K.mean(K.sum(K.cast(y_true, 'float32') * K.log(taylor_softmax)
            * K.pow(1. - taylor_softmax, gamma), axis=1, keepdims=False), axis=0, keepdims=False)
    return cost

#%% Set up train set and validation set
num_train_samples = train_data.shape[0] - num_validation_samples
arr = np.arange(train_data.shape[0])
np.random.shuffle(arr)
train_set_images = train_images[arr[0:num_train_samples],:]
y_train = []

for i in range(60, 1258):
    X_train.append(training_data_scaled[i - 60:i, 0])
    y_train.append(training_data_scaled[i, 0])

X_train, y_train = np.array(X_train), np.array(y_train)

X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))

from keras.models import Sequential
from keras.layers import Dense, LSTM, Dropout

seed = 1
np.random.seed(seed)
random.set_random_seed(seed)
model = Sequential()

model.add(
    LSTM(units=50, return_sequences=True, input_shape=(X_train.shape[1], 1)))

#Adding second LSTM layer
model.add(LSTM(units=50, return_sequences=True))
#Adding third LSTM layer
model.add(LSTM(units=50, return_sequences=True))
#Adding fourth LSTM layer
model.add(LSTM(units=50))
#Adding the output layer
model.add(Dense(units=1))

#Compiling RNN
from bullet.tm700_rgb_Gym import tm700_rgb_gym
from stable_baselines import DQN, PPO2, DDPG
from baselines.helpers import evaluate
import baselines.parser as parser
import time
from datetime import date
import baselines.helpers as helpers
from tensorflow.random import set_random_seed
import tensorflow as tf
import numpy as np
from baselines.helpers import record_gif
from stable_baselines import results_plotter
#################### PARAMETERS
args = parser.arg_parse()
np.random.seed(0)
set_random_seed(args.random_seed)

config = tf.ConfigProto(device_count={'GPU': 0})
sess = tf.Session(config=config)

start = time.time()
ENVIRONMENT = 'possensor'
MODEL = 'DDPG'
DISCRETE = False
DATE = date.today().strftime("%d-%m")
RENDERS = True
MODELPATH = None
# MODELNAME = 'trainedmodel_DQN_rgbd_06-02.pkl' #
MODELNAME = "tm700_ddpg_possensor_bestmodel.pkl"

################ MODEL AND GYM ENVIRONMENT
예제 #8
0
from tensorflow.random import set_random_seed
set_random_seed(42)
import tensorflow.keras as K
import time
import os
import numpy as np
import time

import os
import models
import utils
import generatorPrototype

training_file_path = '../resources/WSD_Evaluation_Framework/Training_Corpora/SemCor/semcor.data.xml'
gold_file_path = '../resources/WSD_Evaluation_Framework/Training_Corpora/SemCor/semcor.gold.key.txt'
training_file_path_dev = '../resources/WSD_Evaluation_Framework/Evaluation_Datasets/semeval2013/semeval2013.data.xml'
gold_file_path_dev = '../resources/WSD_Evaluation_Framework/Evaluation_Datasets/semeval2013/semeval2013.gold.key.txt'
fine_senses_vocab_path = '../resources/semcor.vocab.WordNet.json'
input_vocab_path = '../resources/semcor.input.vocab.json'
input_antivocab_path = '../resources/semcor.leftout.vocab.json'
embedding_size = 64
batch_size = 64
LEARNING_RATE = 0.1
N_EPOCHS = 10
PADDING_SIZE = 30
print_model = False

#loading dict
senses = utils.json_vocab_reader(fine_senses_vocab_path)
inputs, antivocab = utils.json_vocab_reader(input_vocab_path,
                                            input_antivocab_path)
예제 #9
0
def train_eval_network(dataset_name,
                       train_gen,
                       validate_gen,
                       test_x,
                       test_y,
                       seq_len,
                       epochs,
                       batch_size,
                       batch_epoch_ratio,
                       initial_weights,
                       size,
                       cnn_arch,
                       learning_rate,
                       optimizer,
                       cnn_train_type,
                       pre_weights,
                       lstm_conf,
                       len_train,
                       len_valid,
                       dropout,
                       classes,
                       patience_es=15,
                       patience_lr=5):
    """the function build, compine fit and evaluate a certain architechtures on a dataset"""
    set_random_seed(2)
    seed(1)
    result = dict(dataset=dataset_name,
                  cnn_train=cnn_train_type,
                  cnn=cnn_arch.__name__,
                  lstm=lstm_conf[0].__name__,
                  epochs=epochs,
                  learning_rate=learning_rate,
                  batch_size=batch_size,
                  dropout=dropout,
                  optimizer=optimizer[0].__name__,
                  initial_weights=initial_weights,
                  seq_len=seq_len)
    print("run experimnt " + str(result))
    model = BuildModel_basic.build(size=size,
                                   seq_len=seq_len,
                                   learning_rate=learning_rate,
                                   optimizer_class=optimizer,
                                   initial_weights=initial_weights,
                                   cnn_class=cnn_arch,
                                   pre_weights=pre_weights,
                                   lstm_conf=lstm_conf,
                                   cnn_train_type=cnn_train_type,
                                   dropout=dropout,
                                   classes=classes)

    # the network is trained on data generatores and apply the callacks when the validation loss is not improving:
    # 1. early stop to training after n iteration
    # 2. reducing the learning rate after k iteration where k< n
    test_history = TestCallback((test_x, test_y))
    history = model.fit_generator(steps_per_epoch=int(
        float(len_train) / float(batch_size * batch_epoch_ratio)),
                                  generator=train_gen,
                                  epochs=epochs,
                                  validation_data=validate_gen,
                                  validation_steps=int(
                                      float(len_valid) / float(batch_size)),
                                  callbacks=[
                                      EarlyStopping(
                                          monitor='val_loss',
                                          min_delta=0.001,
                                          patience=patience_es,
                                      ),
                                      ReduceLROnPlateau(monitor='val_loss',
                                                        factor=0.5,
                                                        patience=patience_lr,
                                                        min_lr=1e-8,
                                                        verbose=1),
                                      test_history
                                  ])
    history_to_save = history.history
    history_to_save['test accuracy'] = test_history.test_acc
    history_to_save['test loss'] = test_history.test_loss

    model_name = ""
    for k, v in result.items():
        model_name = model_name + "_" + str(k) + "-" + str(v).replace(".", "d")
    model_path = os.path.join(res_path, model_name)
    pd.DataFrame(history_to_save).to_csv(model_path + "_train_results.csv")
    result['validation loss'] = min(history.history['val_loss'])
    result['validation accuracy'] = max(history.history['val_acc'])
    result['last validation loss'] = history.history['val_loss'][-1]
    result['last validation accuracy'] = history.history['val_acc'][-1]

    result['train accuracy'] = max(history.history['acc'])
    result['train loss'] = min(history.history['loss'])
    result['last train accuracy'] = history.history['acc'][-1]
    result['last train loss'] = history.history['loss'][-1]

    result['test accuracy'] = max(test_history.test_acc)
    result['test loss'] = min(test_history.test_loss)
    result['last test accuracy'] = test_history.test_acc[-1]
    result['last test loss'] = test_history.test_loss[-1]

    result['final lr'] = history.history['lr'][-1]
    result['total epochs'] = len(history.history['lr'])
    return result