예제 #1
0
    def __init__(self,
                 gpus=0,
                 batch_size=100,
                 segment_size=12,
                 num_features=121,
                 num_layers=2,
                 hidden_size=100,
                 learning_rate=0.0001,
                 output_dim=1,
                 create_tensorboard=False):
        lstm_cell = CuDNNLSTM if is_gpu_available() else CpuLSTM

        with tf.device('/cpu:0'):
            self.model = Sequential()
            input_shape = (segment_size, num_features)

            for i in range(num_layers - 1):
                self.model.add(
                    lstm_cell(hidden_size,
                              input_shape=input_shape,
                              return_sequences=True))
            self.model.add(lstm_cell(hidden_size, input_shape=input_shape))

            self.model.add(Dense(output_dim))

        self.model = model_device_adapter.get_device_specific_model(
            self.model, gpus)

        optimizer = Adam(lr=learning_rate)
        self.model.compile(loss='mse', optimizer=optimizer)

        print(self.model.summary())

        super(LSTM, self).__init__(batch_size=batch_size,
                                   create_tensorboard=create_tensorboard)
예제 #2
0
    def check_hardware_requirement(self):
        if not test_util.is_gpu_available() and self.use_gpu:
            print('GPU is not available but Deeplab is configued with GPU',
                  file=sys.stderr)

        gpus = self.get_available_gpus()
        for gpu in gpus:
            print(gpu)
예제 #3
0
    def __init__(self, input_data):
        self.inputs = Input(shape=(None, input_data.num_tokens))
        if is_gpu_available(cuda_only=True, min_cuda_compute_capability=3.7):
            self.lstm = CuDNNLSTM(units=UNIT_SIZE,
                                  return_state=True,
                                  name='encoder_lstm',
                                  stateful=False)
        else:
            self.lstm = LSTM(units=UNIT_SIZE,
                             return_state=True,
                             name='encoder_lstm',
                             stateful=False)

        encoder_outputs, state_h, state_c = self.lstm(self.inputs)

        self.encoder_outputs = encoder_outputs
        self.states = [state_h, state_c]
예제 #4
0
    def __init__(self, output_data, encoder):
        self.inputs = Input(shape=(None, output_data.num_tokens))
        if is_gpu_available(cuda_only=True, min_cuda_compute_capability=3.7):
            self.lstm = CuDNNLSTM(units=UNIT_SIZE,
                                  return_sequences=True,
                                  return_state=True,
                                  name='decoder_lstm',
                                  stateful=False)
        else:
            self.lstm = LSTM(units=UNIT_SIZE,
                             return_sequences=True,
                             return_state=True,
                             name='decoder_lstm',
                             stateful=False)

        outputs, _, _ = self.lstm(self.inputs, initial_state=encoder.states)

        self.dense = Dense(output_data.num_tokens, activation='softmax')
        self.outputs = self.dense(outputs)
def get_device_specific_model(model, gpus):
    """ utility function for preparing Keras models to use specific available devices
	"""
    print(f"is gpu available: {is_gpu_available()}")
    print(f"gpus: {gpus}")
    if is_gpu_available():
        try:
            print(f"trying to use {gpus} gpus")
            model = multi_gpu_model(model, gpus)
            print("\nUsing multiple gpus\n")
        except Exception as inst:
            print(type(inst))
            print(inst.args)
            print(inst)

            print("\nUsing single GPU\n")
    else:
        print("\nUsing CPU!\n")

    return model
예제 #6
0
from vtk.inferrers.tensorrt import TensorRTInferrer
from tensorflow import test as tftest
import cv2
import numpy as np
import unittest
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
is_gpu_available = not tftest.is_gpu_available()
gpu_unavailable_message = "CUDA is unavailable on this computer, skipping this test."
inferrer = TensorRTInferrer("testdata/models/frozen_inference_graph.pb")


def inference_helper(holder, image: np.ndarray):
    result = inferrer.run(image)
    holder.assertGreaterEqual(result["num_detections"], 1)


def inference_on_empty_image(holder):
    with holder.assertRaises(ValueError):
        inferrer.run(np.zeros((300, 300), dtype=int))


@unittest.skipIf(is_gpu_available, gpu_unavailable_message)
class TensorRTInferrerTest(unittest.TestCase):
    def test_inference_ball_1(self):
        image = cv2.imread("testdata/detect/Ball_001.jpg")
        inference_helper(self, image)

    def test_inference_ball_2(self):
        image = cv2.imread("testdata/detect/Ball_002.jpg")
        inference_helper(self, image)
예제 #7
0
""" Test for passing information about data format via config. """
# pylint: disable=import-error, no-name-in-module
# pylint: disable=redefined-outer-name
import pytest
import numpy as np
from tensorflow.test import is_gpu_available

from batchflow import Pipeline, ImagesBatch, Dataset
from batchflow import B, V, C
from batchflow.models.tf import VGG7, ResNet18, Inception_v1, MobileNet

MODELS = [VGG7, ResNet18, Inception_v1, MobileNet]
LOCATIONS = set(['initial_block', 'body', 'block', 'head'])
NO_GPU = pytest.mark.skipif(not is_gpu_available(), reason='No GPU')


@pytest.fixture()
def model_setup():
    """ Pytest fixture to generate fake dataset and model config with desired format.

    Parameters
    ----------
    data_format: {'channels_last', 'channels_first'}
        Desired format of tensors

    Returns
    -------
    tuple
        an instance of Dataset
        a model config
    """
    return np.expand_dims(n_img, -1)


from tensorflow.python.client import device_lib


def get_available_devices():
    local_device_protos = device_lib.list_local_devices()
    return [x.name for x in local_device_protos]


print(get_available_devices())
print(tf.__version__)

from tensorflow import test
test.is_gpu_available()

from keras.backend.tensorflow_backend import set_session
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True  # dynamically grow the memory used on the GPU
config.log_device_placement = True  # to log device placement
sess = tf.compat.v1.Session(config=config)
tf.compat.v1.keras.backend.set_session(sess)

img_vol, seg_vol = [], []
for img_path, s_paths in tqdm(cxr_images):
    img_vol += [imread(img_path)]
    seg_vol += [
        np.max(
            np.stack([imread(s_path, apply_clahe=False) for s_path in s_paths],
                     0), 0)
예제 #9
0
	def define_model(self, sequence_size, n_add_seq_layers, dense_pse_size, concat_size, dense_size, dropout, l2_reg, sequence_length, w2v_embedding_dim, pse_shape, n_add_pse_dense, n_dense, output_n_classes):
		
		# Assign simple names
		# Use CuDNN implementation of LSTM if GPU is available, LSTM if it isn't
		# (non-CuDNN implementation is slower even on GPU)
		if test.is_gpu_available():
			LSTM = keras.layers.CuDNNLSTM
		else:
			LSTM = keras.layers.LSTM

		Dense = keras.layers.Dense
		Dropout = keras.layers.Dropout
		Input = keras.layers.Input
		BatchNormalization = keras.layers.BatchNormalization
		concatenate = keras.layers.concatenate
		l2 = keras.regularizers.l2
		Model = keras.models.Model

		# Define the neural network structure

		to_concat = []
		inputs= []
		
		# The word2vec inputs and layers before concatenation
		w2v_input = Input(shape=(sequence_length, w2v_embedding_dim, ), dtype='float32', name='w2v_input')
		w2v = LSTM(sequence_size, return_sequences=True)(w2v_input)
		w2v = Dropout(dropout)(w2v)
		for _ in range(n_add_seq_layers):
			w2v = LSTM(sequence_size, return_sequences=True)(w2v)
			w2v = Dropout(dropout)(w2v)
		w2v = LSTM(sequence_size)(w2v)
		w2v = Dropout(dropout)(w2v)
		w2v = Dense(sequence_size, activation='relu')(w2v)
		w2v = Dropout(dropout)(w2v)
		to_concat.append(w2v)
		inputs.append(w2v_input)
		
		# The multi-hot vector input (profile state encoder) and layers before concatenation
		pse_input = Input(shape=(pse_shape,), dtype='float32', name='pse_input')
		pse = Dense(dense_pse_size, activation='relu', kernel_regularizer=l2(l2_reg))(pse_input)
		pse = Dropout(dropout)(pse)
		for _ in range(n_add_pse_dense):
			pse = BatchNormalization()(pse)
			pse = Dense(dense_pse_size, activation='relu', kernel_regularizer=l2(l2_reg))(pse)
			pse = Dropout(dropout)(pse)
		to_concat.append(pse)
		inputs.append(pse_input)

		# Concatenation and dense layers
		concatenated = concatenate(to_concat)
		for _ in range(n_dense):
			concatenated = BatchNormalization()(concatenated)
			concatenated = Dense(concat_size, activation='relu', kernel_regularizer=l2(l2_reg))(concatenated)
			concatenated = Dropout(dropout)(concatenated)
		concatenated = BatchNormalization()(concatenated)
		output = Dense(output_n_classes, activation='softmax', name='main_output')(concatenated)

		# Compile the model
		model = Model(inputs = inputs, outputs = output)
		model.compile(optimizer='Adam', loss=['sparse_categorical_crossentropy'], metrics=['sparse_categorical_accuracy', self.sparse_top10_accuracy, self.sparse_top30_accuracy])
		
		return model
 def test_gpu_differential(self):
     if tft.is_gpu_available(cuda_only=True) and tft.is_built_with_cuda():
         self._test_differential(True, ('LINEAR', 'BSPLINE'))
     else:
         self.skipTest('No CUDA support available')
 def test_gpu_resampling(self):
     if tft.is_gpu_available(cuda_only=True) and tft.is_built_with_cuda():
         self._test_resampling(True)
     else:
         self.skipTest('No CUDA support available')
예제 #12
0
파일: utils.py 프로젝트: Chris1221/deepStab
def gpu():
    os.environ["CUDA_VISIBLE_DEVICES"] = "1"
    assert is_gpu_available(), "GPU is not available."
예제 #13
0
    def start_training(self):
        if self.training_flag:  # 有模型在训练
            reply = QMessageBox.information(self, '提示', '正在训练模型,请等待...',
                                            QMessageBox.Yes, QMessageBox.Yes)
            if reply == QMessageBox.Yes:
                return  # 退出函数

        if '' == self.data_file_path:  # 没有选择过文件
            reply = QMessageBox.information(self, '提示', '请先选择文件!',
                                            QMessageBox.Yes, QMessageBox.Yes)
            if reply == QMessageBox.Yes:
                return  # 退出函数

        # 到这里,就是 没有模型在训练,且选择了文件
        # 提示用户确认
        select_model = self.ui.comb_select_model.currentText()  # 用户选择的 模型
        reply = QMessageBox.information(
            self, '提示', '确定使用“' + select_model + '”进行训练。\n请确保所有数据在一个文件夹下!',
            QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)
        if reply == QMessageBox.No:
            return  # 退出函数

        # 到这里,可以开始训练了
        self.training_flag = True  # 改变标志位
        self.ui.statusbar.showMessage('正在训练模型...', 120)

        # 不同的模型,参数设置不一样
        if '随机森林' == select_model:
            signal_length = 500
            signal_number = 1000  # 每个类别中要抽取的样本的数量
            normal = False  # 是否标准化
            rate = [0.6, 0.2, 0.2]  # 训练集、测试集、验证集划分比例
        else:
            signal_length = 2048
            signal_number = 1000  # 每个类别中要抽取的样本的数量
            normal = True  # 是否标准化
            rate = [0.7, 0.2, 0.1]  # 训练集、测试集、验证集划分比例
        # 获得数据所在的文件夹  E:/fff/hhh/iii/tt.mat --> tt.mat --> E:/fff/hhh/iii/
        data_path = self.data_file_path.split('/')[-1]  # 先获得文件名
        data_path = self.data_file_path.split(data_path)[0]  # 再去掉文件路径中的文件名

        if '1D_CNN' == select_model:
            self.model_name = '1D_CNN'
            text = self.ui.tb_train_result.toPlainText()  # 获得原本显示的文字
            if test.is_gpu_available():
                self.ui.tb_train_result.setText(
                    text + '\n模型选择:1D_CNN\n检测到GPU可用\n--------------')
            else:
                self.ui.tb_train_result.setText(
                    text + '\n模型选择:1D_CNN\n未检测到可用GPU\n--------------')
            text = self.ui.tb_train_result.toPlainText()  # 获得原本显示的文字
            self.ui.tb_train_result.setText(text +
                                            '\n正在训练模型...\n--------------')

            # 创建子线程,训练模型
            training_thread = threading.Thread(
                target=CNN_1D_training,
                args=(data_path, signal_length, signal_number, normal, rate,
                      self.cache_path, self.model_name))
            # training_thread.setDaemon(True)  # 守护线程
            training_thread.start()
            training_end_signal.send_msg.connect(
                self.training_end_slot)  # 信号与槽连接

        elif 'LSTM' == select_model:
            self.model_name = 'LSTM'
            text = self.ui.tb_train_result.toPlainText()  # 获得原本显示的文字
            if test.is_gpu_available():
                self.ui.tb_train_result.setText(
                    text + '\n模型选择:LSTM\n检测到GPU可用\n--------------')
            else:
                self.ui.tb_train_result.setText(
                    text + '\n模型选择:LSTM\n未检测到可用GPU\n--------------')
            text = self.ui.tb_train_result.toPlainText()  # 获得原本显示的文字
            self.ui.tb_train_result.setText(text +
                                            '\n正在训练模型...\n--------------')

            # 创建子线程,训练模型
            training_thread = threading.Thread(
                target=LSTM_training,
                args=(data_path, signal_length, signal_number, normal, rate,
                      self.cache_path, self.model_name))
            # training_thread.setDaemon(True)  # 守护线程
            training_thread.start()
            training_end_signal.send_msg.connect(
                self.training_end_slot)  # 信号与槽连接

        elif 'GRU' == select_model:
            self.model_name = 'GRU'
            text = self.ui.tb_train_result.toPlainText()  # 获得原本显示的文字
            if test.is_gpu_available():
                self.ui.tb_train_result.setText(
                    text + '\n模型选择:GRU\n检测到GPU可用\n--------------')
            else:
                self.ui.tb_train_result.setText(
                    text + '\n模型选择:GRU\n未检测到可用GPU\n--------------')
            text = self.ui.tb_train_result.toPlainText()  # 获得原本显示的文字
            self.ui.tb_train_result.setText(text +
                                            '\n正在训练模型...\n--------------')

            # 创建子线程,训练模型
            training_thread = threading.Thread(
                target=GRU_training,
                args=(data_path, signal_length, signal_number, normal, rate,
                      self.cache_path, self.model_name))
            # training_thread.setDaemon(True)  # 守护线程
            training_thread.start()
            training_end_signal.send_msg.connect(
                self.training_end_slot)  # 信号与槽连接

        elif '随机森林' == select_model:
            self.model_name = 'random_forest'
            text = self.ui.tb_train_result.toPlainText()  # 获得原本显示的文字
            self.ui.tb_train_result.setText(
                text + '\n模型选择:随机森林\n正在训练模型...\n--------------')

            # 创建子线程,训练模型
            training_thread = threading.Thread(
                target=random_forest_training,
                args=(data_path, signal_length, signal_number, normal, rate,
                      self.cache_path, self.model_name))
            # training_thread.setDaemon(True)  # 守护线程
            training_thread.start()
            training_end_signal.send_msg.connect(
                self.training_end_slot)  # 信号与槽连接
예제 #14
0
import datetime as dt
import numpy as np
import os
from keras.callbacks import EarlyStopping, Callback
from keras.layers import Dense, Dropout
from keras.models import Sequential, load_model
from matplotlib import pyplot
from numpy import newaxis
from tensorflow.test import is_gpu_available

from core.utils import Timer

if is_gpu_available():
    from keras.layers import CuDNNLSTM as LSTM
else:
    from keras.layers import LSTM as LSTM


class NBatchLogger(Callback):
    def __init__(self, total_epochs, total_batches):
        super().__init__()
        self.total_epochs = total_epochs
        self.total_batches = total_batches

        self.currrent_epoch = 0
        self.current_batch = 0

    def update_msg(self):
        progress = int(((self.currrent_epoch + 1) / self.total_epochs) * 50)
        print("Training " +
              "Epoch: %03d/%03d" % (self.currrent_epoch, self.total_epochs) +
    def _get_devs(self):
        devs = [False]
        if tft.is_gpu_available(cuda_only=True) and tft.is_built_with_cuda():
            devs += [True]

        return devs