def __init__(self,
              get_model_function,
              model_path=model_path,
              characters=characters,
              height=32):
     self.height = height
     self.nClass = len(characters)
     self.characters = characters
     self.gpus = KTF._get_available_gpus()
     if self.gpus:
         self.config = tf.ConfigProto()
         self.config.gpu_options.per_process_gpu_memory_fraction = 0.4
         self.session = tf.Session(config=self.config)
         KTF.set_session(self.session)
         if len(self.gpus) == 1:
             self.basemodel = get_model_function(height=self.height,
                                                 nClass=self.nClass)
             self.basemodel.load_weights(model_path)
             self.predict_batch = 512
         else:
             with tf.device('/cpu:0'):
                 model_template = get_model_function(height=self.height,
                                                     nClass=self.nClass)
                 model_template.load_weights(model_path)
             self.basemodel = multi_gpu_model(model_template,
                                              len(self.gpus))
             self.predict_batch = len(self.gpus) * 512
     else:
         self.basemodel = get_model_function(height=self.height,
                                             nClass=self.nClass)
         self.basemodel.load_weights(model_path)
def keras_get_available_GPUs():
    """
    Checks available GPUs to Keras (>=2.1.1).
    :return:
    """
    # assert len(keras_tensorflow_backend._get_available_gpus()) > 0

    return keras_tf_backend._get_available_gpus()
Ejemplo n.º 3
0
def initial_setup():
    import keras.backend.tensorflow_backend as tfback

    # Setup path for Graphviz plotting tool
    os.environ[
        "PATH"] += os.pathsep + 'C:/Program Files (x86)/Graphviz2.38/bin/'

    # some random Keras bug as per: https://github.com/keras-team/keras/issues/13684
    def _get_available_gpus():
        """Get a list of available gpu devices (formatted as strings).

        # Returns
            A list of available GPU devices.
        """
        global _LOCAL_DEVICES
        if tfback._LOCAL_DEVICES is None:
            devices = tf.config.list_logical_devices()
            tfback._LOCAL_DEVICES = [x.name for x in devices]
        return [x for x in tfback._LOCAL_DEVICES if 'device:gpu' in x.lower()]

    tfback._get_available_gpus = _get_available_gpus
    tfback._get_available_gpus()
 def enable_gpu_support(self):
     # Checks for GPU support on the system
     self._gpus = tf_backend._get_available_gpus()
     if len(self._gpus) is not 0:
         tf.Session(config=tf.ConfigProto(
             log_device_placement=True))  # logs the GPU
         gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.8,
                                     allow_growth=True)
         sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
         tf_backend.set_session(sess)
     else:
         print(
             "No GPU found on system, execution will be done using the CPU only!!!"
         )
     return
Ejemplo n.º 5
0
def check_gpu():
    tensorflow_backend._get_available_gpus()
Ejemplo n.º 6
0
 def keras_get_available_GPUs():  # To Check if keras(>=2.1.1) is using GPU:
     return keras_tensorflow_backend._get_available_gpus()
Ejemplo n.º 7
0
from keras.models import load_model

import numpy as np
import os
import platform
import datetime
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.python.client import device_lib

now = datetime.datetime.now()  #get date and time for data writing

## ======================== Print Software Version =========================
print("\nPython Version:", platform.python_version())  #print python version
print("Tensorflow Version:", tf.__version__)  #print tensorflow version
print("GPU Available to Keras:", _get_available_gpus(), '\n')
print(device_lib.list_local_devices())
print('---------------------------------------------------------------')

## ============================ GPU Control ================================
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.99  #Percentage of GPU max usage
config.gpu_options.visible_device_list = "0"  #Select GPU
set_session = tf.Session(config=config)  #Set Session

## =======================+++ File Paramters +++============================
# Learning/Training/Verification
# Micro or Mega Batches
# Micro less than 1000 (Learning) and Mega less than 10000 (Learning)
import_filepath = ('/Batches/Learning/Micro_Batches/',
                   '/Batches/Testing/Micro_Batches/',
                        help='If argument is present do not train gru RNN')
    parser.add_argument('--skip_lgb',
                        action='store_true',
                        help='If argument is present do not train lgb')
    args = parser.parse_args()

    return args


if __name__ == '__main__':
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    args = get_args(parser)
    if args.directory[-1] != '/':
        args.directory += '/'

    if not os.path.exists(args.directory):
        os.makedirs(args.directory)

    if K_tf._get_available_gpus():
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.log_device_placement = True
        sess = tf.Session(config=config)
        K_tf.set_session(sess)

        # use CuDNN
        LSTM = CuDNNLSTM
        GRU = CuDNNGRU

    main(args)
Ejemplo n.º 9
0
#tf.test.is_gpu_available() # True/False
tf.config.list_physical_devices()

# Or only check for gpu's with cuda support
#tf.test.is_gpu_available(cuda_only=True)
tf.config.list_physical_devices('GPU')

import keras.backend.tensorflow_backend as tfback
print("tf.__version__ is", tf.__version__)
print("tf.keras.__version__ is:", tf.keras.__version__)


# because experimental_list_devices is removed from tensorflow is removed from tensorflow
# https://github.com/keras-team/keras/issues/13684
def _get_available_gpus():
    """Get a list of available gpu devices (formatted as strings).

    # Returns
        A list of available GPU devices.
    """
    #global _LOCAL_DEVICES
    if tfback._LOCAL_DEVICES is None:
        devices = tf.config.list_logical_devices()
        tfback._LOCAL_DEVICES = [x.name for x in devices]
    return [x for x in tfback._LOCAL_DEVICES if 'device:gpu' in x.lower()]


tfback._get_available_gpus = _get_available_gpus

assert len(tfback._get_available_gpus()) > 0
Ejemplo n.º 10
0
                            "normal_atten", "atten", "rule_out", "TrimmedMean",
                            "Krum", "GeoMed"
                        ],
                        default="theroy")
    parser.add_argument('--output', type=str, default="stats.txt")
    parser.add_argument("--batch_size", type=int, default=100)
    parser.add_argument("--local_epoch", type=int, default=1)
    parser.add_argument("--select_ratio", type=float, default=1)
    parser.add_argument("--attack_mode", type=int, default=2)
    parser.add_argument("--attack_ratio", type=float, default=0.4)
    args = parser.parse_args()
    gpu = args.gpu

    if gpu != -1:
        os.environ['CUDA_VISIBLE_DEVICES'] = '%d' % gpu
        tb._get_available_gpus()

        config = tf.ConfigProto(device_count={'GPU': 1})
        sess = tf.Session(config=config)
        keras.backend.set_session(sess)

    print("##### Arguements #####")
    print(args)
    print("##########")
    NUM_CLIENT = int(len(writers) * args.select_ratio)
    MAX_NUM_ROUNDS = 50
    server = FLServer(GlobalModel_MNIST_CNN, aggregation=args.aggregation)
    acc_avg = 0
    train_acc = {}
    test_loss = {}
    test_acc = {}
Ejemplo n.º 11
0
def _get_available_gpus():
    """Get a list of available gpu devices (formatted as strings)

    # Returns
        A list of available GPU devices.
    """
    #global _LOCAL_DEVICES
    if tfback._LOCAL_DEVICES is None:
        devices = tf.config.list_logical_devices()
        tfback._LOCAL_DEVICES = [x.name for x in devices]
    return [x for x in tfback._LOCAL_DEVICES if 'device:gpu' in x.lower()]


tfback._get_available_gpus = _get_available_gpus
tfback._get_available_gpus()
tf.config.list_logical_devices()
from copy import deepcopy
import cv2
import numpy as np
from PCONV_UNET import *


def predict_final(img, mask):
    model = PConvUnet(256, 512, vgg_weights=None)
    model.load('gan9.h5', train_bn=False)

    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    img = cv2.resize(img, (512, 256))

    mask = cv2.bitwise_not(mask)
Ejemplo n.º 12
0
os.system("pip --version")
os.system("python --version")

os.system(
    'apt update && apt install -y libsm6 libxext6 libxrender-dev zip unzip curl nano'
)

# keras may or may not exist so just to make sure...
os.system(
    "pip install keras gputil psutil humanize matplotlib opencv-python tqdm \
joblib pandas dill tabulate jupyterlab pillow seaborn")
os.system(
    'pip install git+https://www.github.com/keras-team/keras-contrib.git')

from keras.backend.tensorflow_backend import _get_available_gpus
if len(_get_available_gpus()) == 0:
    warn("No GPU was found!")

############# downloader ###############

import psutil
import humanize
import os
import GPUtil as GPU
GPUs = GPU.getGPUs()
# Colab only provides one GPU and it is not always guaranteed
gpu = GPUs[0]

### need RAM should be around 12.9 GB, which is enough to load the datasets in memory.
### Also, usually, we have available 11.4 GB of GPU memory, which is more than enough to run this code.