def build_config(limit_gpu_fraction=0.2, limit_cpu_fraction=10): if limit_gpu_fraction > 0: os.environ["CUDA_VISIBLE_DEVICES"] = "0" gpu_options = GPUOptions( allow_growth=True, per_process_gpu_memory_fraction=limit_gpu_fraction) config = ConfigProto(gpu_options=gpu_options) else: os.environ["CUDA_VISIBLE_DEVICES"] = "" config = ConfigProto(device_count={'GPU': 0}) if limit_cpu_fraction is not None: if limit_cpu_fraction == 0: cpu_count = 1 if limit_cpu_fraction < 0: # -2 gives all CPUs except 1 cpu_count = max(1, int(os.cpu_count() + limit_cpu_fraction + 1)) elif limit_cpu_fraction < 1: # 0.5 gives 50% of available CPUs cpu_count = max(1, int(os.cpu_count() * limit_cpu_fraction)) else: # 2 gives 2 CPUs cpu_count = int(limit_cpu_fraction) config.inter_op_parallelism_threads = cpu_count config.intra_op_parallelism_threads = cpu_count os.environ['OMP_NUM_THREADS'] = str(1) os.environ['MKL_NUM_THREADS'] = str(cpu_count) return config
def __init__(self, model_dir, img_list): GPUsetting = GPUOptions(per_process_gpu_memory_fraction = 1, allow_growth = True) sess = Session(config = ConfigProto(gpu_options = GPUsetting)) self.model_dir = os.path.join(C.check_point_path, model_dir) self.batch_size = 64 self.img_list = img_list self.list_size = len(self.img_list) self.model = load_model(self.model_dir, custom_objects = {'resize_and_normalize': resize_and_normalize})
def __init__(self, projectModel, latentDim, usePretrained=True): from tensorflow import GPUOptions, Session, ConfigProto gpu_options = GPUOptions(per_process_gpu_memory_fraction=0.25) sess = Session(config=ConfigProto(gpu_options=gpu_options)) self.latentDim = latentDim self.actionSpace = projectModel.getActionSpace() if usePretrained: name = projectModel.modelOptions.asPretrainedVAE_Filename( latentDim) self.model = load_model(name) else: self.defineModel() self.defineDecoder()
def load_model(architecture_file, mtype='base'): import models from tensorflow import GPUOptions, ConfigProto, Session checkdir = '/'.join(architecture_file.split('/')[:-1]) + '/' print('\n' * 2, '-' * _repeat_, '\n:: Open Session\n', '-' * _repeat_, '\n') gpu_options = GPUOptions(per_process_gpu_memory_fraction=0.5) config = ConfigProto(allow_soft_placement=True, gpu_options=gpu_options) sess = Session(config=config) print('\n', '-' * _repeat_) model = models.__dict__[mtype].__MODEL__() pkg = {'model': model, 'architecture': architecture_file, 'dir': checkdir} models.base.__MODEL__.load_architecture(pkg) model.set_session(sess) model.build(training=False) model.load(pkg) return model
def __init__(self, name): # tf.config.experimental.set_per_process_memory_fraction(1) # tf.config.gpu.set_per_process_memory_growth(True) GPUsetting = GPUOptions(per_process_gpu_memory_fraction = 1, allow_growth = True) sess = Session(config = ConfigProto(gpu_options = GPUsetting)) self.name = name prep = data_preparation() # feature and labels are float32 and normalized self.X_test, self.y_test = prep.in_mem_validation() batch_size = 64 epoch_step = 5000 self.tensor_shape = (batch_size, C.work_image_size, C.work_image_size, C.channels) aug = image_augmentation(dimension_shift = True, crop = True, brightness = True, gray = True, transformation = True, blurring = True, spots = True) self.generator = data_generator(prep.training_list, aug, batch_size, epoch_step)
print("Loading vectors") w2v_model, w2i, init_vectors = load_model(model_loc) print("Reading data") batches = create_batches(128, max_len, s_sents, target, w2i, t_map) test_batch = create_batches(len(t_sents), max_len, t_sents, test, w2i, t_map)[0] hold_out = test_batch print("Assembling model") terminals = assemble_model(init_vectors, max_len, len(t_map)) print("Starting training") from tensorflow import GPUOptions gpu_options = GPUOptions(per_process_gpu_memory_fraction=gpu_mem) with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess: sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) summary_writer = tf.summary.FileWriter("model/", graph=sess.graph) for e in range(epochs): for ind, batch in enumerate(batches): sentences, pos_tags, lens = batch sess.run( terminals['train'], { terminals['words']: sentences, terminals['labels']: pos_tags, terminals['lengths']: lens })
from threading import Thread from mne import concatenate_raws from evaluation import EEGNet, stratify, test_rest_split, get_fold, add_kernel_dim, onehot from preparation import epoch_pilot from tensorflow.python.keras import backend as K from tensorflow.python.keras.callbacks import ModelCheckpoint from mne.io import read_raw_brainvision from tensorflow import ConfigProto, GPUOptions, Session config = ConfigProto(gpu_options=GPUOptions(allow_growth=True)) session = Session(config=config) K.set_session(session) GOODS = ['FC3', 'C3', 'CP3', 'Fz', 'Cz', 'POz', 'FC4', 'C4', 'CP4'] T_RANGE = [0.5, 2.5] RESAMPLE = 125 EPOCHS = 300 LO_FREQ = 1. HI_FREQ = 32 BASE_WEIGHTS = '3class_model.h5' def train(model, train, validation, weight_file=None, epochs=300): checkpointer = ModelCheckpoint( filepath=weight_file, verbose=1, save_best_only=True) if weight_file is not None else None return model.fit( train['x'], train['y'],
# Importing the Keras libraries and packages from keras.models import Sequential from keras.layers import Conv2D from keras.layers import MaxPooling2D from keras.layers import Flatten from keras.layers import Dense from keras.layers import Dropout from keras.layers import Activation from keras.layers import BatchNormalization from keras.preprocessing.image import ImageDataGenerator from keras.optimizers import adam from keras.callbacks import TensorBoard from keras import backend as K from tensorflow import Session, ConfigProto, GPUOptions gpuoptions = GPUOptions(allow_growth=True) session = Session(config=ConfigProto(gpu_options=gpuoptions)) K.set_session(session) classifier = Sequential() classifier.add(Conv2D(32, (3, 3), input_shape=(img_size, img_size, 1))) classifier.add(Activation("relu")) classifier.add(Conv2D(32, (3, 3))) classifier.add(Activation("relu")) classifier.add(MaxPooling2D(pool_size=(2, 2))) classifier.add(Dropout(0.25)) classifier.add(Conv2D(64, (3, 3), padding='same')) classifier.add(Activation("relu")) classifier.add(Conv2D(64, (3, 3)))