def test_prefer_cached_array(): nn.reset_array_preference() nn.prefer_cached_array(True) ac2 = nn.array_classes() check_cached_array_preferred(ac2) try: from nnabla_ext import cuda except: cuda = None if cuda is not None: ac2 = cuda.array_classes() check_cached_array_preferred(ac2) nn.prefer_cached_array(False) ac2 = nn.array_classes() check_cached_array_preferred(ac2, False) if cuda is not None: ac2 = cuda.array_classes() check_cached_array_preferred(ac2, False)
def context(device_id='0', type_config='float', *kw): """CUDNN context""" from nnabla_ext.cuda import array_classes backends = ['cudnn:float', 'cuda:float', 'cpu:float'] if type_config == 'half': backends = [ 'cudnn:half', 'cudnn:float', 'cuda:half', 'cuda:float', 'cpu:float' ] elif type_config == 'mixed_half': backends = [ 'cudnn:mixed_half', 'cudnn:half', 'cudnn:float', 'cuda:mixed_half', 'cuda:half', 'cuda:float', 'cpu:float' ] elif type_config == 'float': pass else: raise ValueError("Unknown data type config is given %s" % type_config) return Context(backends, array_classes()[0], device_id=str(device_id))
def context(device_id=0, *kw): """CUDNN context""" from nnabla_ext.cuda import array_classes return Context('cpu|cuda', array_classes()[0], device_id=str(device_id), compute_backend='default|cudnn')