Beispiel #1
0
def cpu_gpu_check():
    fallback_cpu, limit_gpu_mem = cpu_gpu_reader()
    if fallback_cpu is True:
        cpu_fallback()
    if isinstance(limit_gpu_mem, float) is True:
        gpu_memory_manage(ratio=limit_gpu_mem)
    else:
        gpu_memory_manage()
Beispiel #2
0
import tensorflow as tf
import tensorflow.keras as tfk

from astroNN.nn.losses import zeros_loss
from astroNN.shared.nn_tools import gpu_memory_manage

Input = tfk.layers.Input
Dense = tfk.layers.Dense
concatenate = tfk.layers.concatenate
Conv1D = tfk.layers.Conv1D
Conv2D = tfk.layers.Conv2D
Flatten = tfk.layers.Flatten
Model = tfk.models.Model
Sequential = tfk.models.Sequential

gpu_memory_manage()


class LayerCase(unittest.TestCase):
    def test_MCDropout(self):
        print('==========MCDropout tests==========')
        from astroNN.nn.layers import MCDropout

        # Data preparation
        random_xdata = np.random.normal(0, 1, (100, 7514))
        random_ydata = np.random.normal(0, 1, (100, 25))

        input = Input(shape=[7514])
        dense = Dense(100)(input)
        b_dropout = MCDropout(0.2, name='dropout')(dense)
        output = Dense(25)(b_dropout)