Beispiel #1
0
def cpu_gpu_check():
    fallback_cpu, limit_gpu_mem = cpu_gpu_reader()
    if fallback_cpu is True:
        cpu_fallback()
    if isinstance(limit_gpu_mem, float) is True:
        gpu_memory_manage(ratio=limit_gpu_mem)
    else:
        gpu_memory_manage()
Beispiel #2
0
    def test_cpu_gpu_management(self):
        from astroNN.shared.nn_tools import cpu_fallback

        cpu_fallback(flag=True)
        cpu_fallback(flag=False)

        # make sure flag=2 raise error
        self.assertRaises(ValueError, cpu_fallback, flag=2)
Beispiel #3
0
    def test_cpu_gpu_management(self):
        from astroNN.shared.nn_tools import cpu_fallback

        cpu_fallback(flag=0)
        # os environ is string
        self.assertEqual(os.environ['CUDA_VISIBLE_DEVICES'], '-1')

        cpu_fallback(flag=1)
        # make sure flag =1 will delete the environ
        self.assertEqual(any(x == "CUDA_VISIBLE_DEVICES" for x in os.environ), False)

        # make sure flag=2 raise error
        self.assertRaises(ValueError, cpu_fallback, flag=2)
Beispiel #4
0
import unittest

import numpy as np
import numpy.testing as npt
import tensorflow as tf

from astroNN.shared.nn_tools import cpu_fallback

# make sure this test use CPU
cpu_fallback()

from astroNN.config import MAGIC_NUMBER
from astroNN.nn import reduce_var
from astroNN.nn.losses import (magic_correction_term, mean_absolute_error,
                               mean_squared_error, categorical_crossentropy,
                               binary_crossentropy, nll, mean_error,
                               zeros_loss, mean_percentage_error, median)
from astroNN.nn.metrics import (
    categorical_accuracy,
    binary_accuracy,
    mean_absolute_percentage_error,
    mean_squared_logarithmic_error,
)


class LossFuncTestCase(unittest.TestCase):
    def test_loss_func_util(self):
        # make sure custom reduce_var works
        content = [1, 2, 3, 4, 5]
        var_array = tf.constant(content)
        self.assertEqual(reduce_var(var_array).numpy(), np.var(content))