def init_tensorflow(): os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' config = tf.ConfigProto() config.gpu_options.allow_growth = True config.log_device_placement = True sess = tf.Session(config=config) set_session(sess)
def configure_gpu(gpu): global config os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu) config = tf.ConfigProto() config.gpu_options.allow_growth = True sess = tf.Session(config=config) set_session(sess)
import os import numpy as np import matplotlib # matplotlib.use('Agg') import pylab as plt import keras.backend as K assert K.image_data_format( ) == 'channels_last', "Backend should be tensorflow and data_format channel_last" from keras.backend import tf as ktf config = ktf.ConfigProto() config.gpu_options.allow_growth = True session = ktf.Session(config=config) K.set_session(session) from tqdm import tqdm class Trainer(object): def __init__(self, dataset, gan, output_dir='output/generated_samples', checkpoints_dir='output/checkpoints', training_ratio=5, display_ratio=1, checkpoint_ratio=10, start_epoch=0, number_of_epochs=100, batch_size=64, **kwargs): self.dataset = dataset
import keras from keras import Input, Model, optimizers from keras.activations import softmax from keras.backend import batch_dot, tf from keras.models import load_model from keras.layers import Dense, Dropout, Bidirectional, GRU, Embedding, TimeDistributed, GlobalMaxPooling1D, \ concatenate, Lambda, Dot, Permute, Concatenate, Multiply, Add from keras.optimizers import RMSprop from sklearn.metrics import f1_score import pickle import numpy as np from keras import backend as K import logging gpu_options = tf.GPUOptions(allow_growth=True) sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) from utils.batch_gather import batch_gather from keras.callbacks import Callback logging.basicConfig(level=logging.INFO, filename='bert_hua_cea.log') from utils.CyclicLR import CyclicLR from utils.threshold import threshold_search, f1, count, load_pkl, save_file from keras_bert import load_vocabulary, load_trained_model_from_checkpoint, Tokenizer pretrained_path = 'scibert_scivocab_uncased' config_path = os.path.join(pretrained_path, 'bert_config.json') checkpoint_path = os.path.join(pretrained_path, 'bert_model.ckpt') vocab_path = os.path.join(pretrained_path, 'vocab.txt') batch_size = 16 bert_out_shape = 768 max_sentence_length = 512
# Your actual code here # Import ROS-dependencies here import rospy from nav_msgs.msg import Odometry from sensor_msgs.msg import Joy from sensor_msgs.msg import Image from cv_bridge import CvBridge from ackermann_msgs.msg import AckermannDriveStamped """ Solves a memory issue, needs to be done before importing Keras """ # import tensorflow as tf from keras.backend import tf config = tf.ConfigProto() #config.gpu_options.per_process_gpu_memory_fraction = 0.7 config.gpu_options.allow_growth = True session = tf.Session(config=config) from keras.models import load_model import keras.backend as K from scipy.optimize import curve_fit frame_id = rospy.get_param('~frame_id', 'odom') max_accel_x = rospy.get_param('~acc_lim_x', 1.0) max_jerk_x = rospy.get_param('~jerk_lim_x', 0.0) rospy.init_node('autonomous_driver') driver = RunModel() try: rospy.spin()