Esempio n. 1
0
 def test_maxout_conv_c01b_cifar10(self):
     if cuda.cuda_available is False:
         raise SkipTest('Optional package cuda disabled')
     if not hasattr(cuda, 'unuse'):
         raise Exception("Theano version too old to run this test!")
     # Tests that we can run a small convolutional model on GPU,
     assert cuda.cuda_enabled is False
     # Even if there is a GPU, but the user didn't specify device=gpu
     # we want to run this test.
     try:
         old_floatX = config.floatX
         cuda.use('gpu')
         config.floatX = 'float32'
         try:
             train = yaml_parse.load(yaml_string_maxout_conv_c01b_cifar10)
         except NoDataPathError:
             raise SkipTest("PYLEARN2_DATA_PATH environment variable "
                            "not defined")
         train.main_loop()
         # Check that the performance is close to the expected one:
         # test_y_misclass: 0.3777000308036804
         misclass_chan = train.algorithm.monitor.channels['test_y_misclass']
         assert misclass_chan.val_record[-1] < 0.38
         # test_y_nll: 1.0978516340255737
         nll_chan = train.algorithm.monitor.channels['test_y_nll']
         assert nll_chan.val_record[-1] < 1.1
     finally:
         config.floatX = old_floatX
         cuda.unuse()
     assert cuda.cuda_enabled is False
Esempio n. 2
0
 def test_maxout_conv_c01b_cifar10(self):
     if cuda.cuda_available is False:
         raise SkipTest('Optional package cuda disabled')
     if not hasattr(cuda, 'unuse'):
         raise Exception("Theano version too old to run this test!")
     # Tests that we can run a small convolutional model on GPU,
     assert cuda.cuda_enabled is False
     # Even if there is a GPU, but the user didn't specify device=gpu
     # we want to run this test.
     try:
         old_floatX = config.floatX
         cuda.use('gpu')
         config.floatX = 'float32'
         try:
             train = yaml_parse.load(yaml_string_maxout_conv_c01b_cifar10)
         except NoDataPathError:
             raise SkipTest("PYLEARN2_DATA_PATH environment variable "
                            "not defined")
         train.main_loop()
         # Check that the performance is close to the expected one:
         # test_y_misclass: 0.3777000308036804
         misclass_chan = train.algorithm.monitor.channels['test_y_misclass']
         assert misclass_chan.val_record[-1] < 0.38, \
             ("misclass_chan.val_record[-1] = %g" %
              misclass_chan.val_record[-1])
         # test_y_nll: 1.0978516340255737
         nll_chan = train.algorithm.monitor.channels['test_y_nll']
         assert nll_chan.val_record[-1] < 1.1
     finally:
         config.floatX = old_floatX
         cuda.unuse()
     assert cuda.cuda_enabled is False
Esempio n. 3
0
 def test_maxout_conv_c01b_basic(self):
     if cuda.cuda_available is False:
         raise SkipTest('Optional package cuda disabled')
     if not hasattr(cuda, 'unuse'):
         raise Exception("Theano version too old to run this test!")
     # Tests that we can run a small convolutional model on GPU,
     assert cuda.cuda_enabled is False
     # Even if there is a GPU, but the user didn't specify device=gpu
     # we want to run this test.
     try:
         old_floatX = config.floatX
         cuda.use('gpu')
         config.floatX = 'float32'
         train = yaml_parse.load(yaml_string_maxout_conv_c01b_basic)
         train.main_loop()
     finally:
         config.floatX = old_floatX
         cuda.unuse()
     assert cuda.cuda_enabled is False
Esempio n. 4
0
 def test_maxout_conv_c01b_basic(self):
     if cuda.cuda_available is False:
         raise SkipTest('Optional package cuda disabled')
     if not hasattr(cuda, 'unuse'):
         raise Exception("Theano version too old to run this test!")
     # Tests that we can run a small convolutional model on GPU,
     assert cuda.cuda_enabled is False
     # Even if there is a GPU, but the user didn't specify device=gpu
     # we want to run this test.
     try:
         old_floatX = config.floatX
         cuda.use('gpu')
         config.floatX = 'float32'
         train = yaml_parse.load(yaml_string_maxout_conv_c01b_basic)
         train.main_loop()
     finally:
         config.floatX = old_floatX
         cuda.unuse()
     assert cuda.cuda_enabled is False