def testDelattrs(self): """Testing deletion of attributes""" self.lc.x = 5 del self.lc.x try: self.lc.x except (NameError, KeyError): pass else: unittest.fail("No exception raised")
def test_time_conversion(self): """ Tests that forday and dayfor are consistent """ numtest=20000 for k in range(numtest) : yrflag = random.randrange(0,4) selector = random.randrange(0,3) dtime = random.randrange(1,200000) # Tests wnen close to integer, and normal if selector==0 : dtime = dtime + random.random()/100. elif selector==1 : dtime = dtime + 1.-random.random()/100. elif selector==2 : dtime = dtime + random.random() if k%50000 == 0 : print "Doing test %10d of %10d. yrflag=%d, dtime=%20.16g"%(k,numtest,yrflag,dtime) try : #print "dtime,yrflag=",dtime,yrflag iyear,iday,ihour = modeltools.hycom.forday(dtime,yrflag) #print "iyear,iday,ihour=",iyear,iday,ihour dtimeout=modeltools.hycom.dayfor(iyear,iday,ihour,yrflag) #print "dtime, reversed:",dtime except: unittest.fail("Caught error. dtime = %20.10g,yrflag=%d"%(dtime,yrflag)) #print yrflag,dtime,dtimeout-dtime,3600./86400. #dtimeout is "floored" to nearest hour. So error should never be greater than this: if (dtime-dtimeout)>3600./86400: unittest.fail("Error: inn forday or dayfor, dtime=%20.10g, yrflag%d="%(dtime,yrflag))
def assertTuplesAlmostEqual(self, a, b, tol=1e-8): if len(a) != len(b): unittest.fail() for _a, _b in zip(a, b): if abs(_a-_b) > tol: unittest.fail() return
def test_fit(self): """Entrena el modelo de jirafas. excepciones : ValueError, si el clasificador acepta valores nulos clasificador acepta arreglos numpy de labels y features de tamaños diferentes """ print('Fit test...') inicializador = Practica04() inicializador.main('./imagenes/train', 64) features, labels = load_pickles() clasificador = GiraffesClassifier() try: clasificador.fit(None, None) unittest.fail('el clasificador acepta parametros nulos') except ValueError: pass try: clasificador.fit(features, labels[:-1]) unittest.fail('el clasificador acepta features y labels de ' + 'tamano diferente') except ValueError: pass clasificador.fit(features, labels) assert clasificador.model is not None
def test_afile_writeread_nomask(self) : idm = random.randrange(10,5000) jdm = random.randrange(10,5000) print "Creating %dX%d random arrays"%(idm,jdm) scale=1e4 wfld1=numpy.random.rand(jdm,idm)*scale wfld2=numpy.random.rand(jdm,idm)*scale wfld3=numpy.random.rand(jdm,idm)*scale afile=modeltools.hycom.AFile(idm,jdm,"test.a","w") afile.writerecord(wfld1,None,record=None) afile.writerecord(wfld2,None,record=None) afile.writerecord(wfld3,None,record=None) afile.close() afile=modeltools.hycom.AFile(idm,jdm,"test.a","r") rfld1=afile.readrecord(None,record=0) rfld2=afile.readrecord(None,record=1) rfld3=afile.readrecord(None,record=2) afile.close() maxdiff1=numpy.abs(numpy.amax(rfld1-wfld1))/scale maxdiff2=numpy.abs(numpy.amax(rfld2-wfld2))/scale maxdiff3=numpy.abs(numpy.amax(rfld3-wfld3))/scale if all([elem < 1e-7 for elem in [maxdiff1,maxdiff2,maxdiff3]]) : print "afile io test passed" else : unittest.fail("AFile IO failed. MAx diff between read/written: %14.7g"%max([maxdiff1,maxdiff2,maxdiff3]))
def test_abfilebathy_writeread_nomask(self) : idm = random.randrange(10,5000) jdm = random.randrange(10,5000) scale=1e4 print "Creating %dX%d bathy array"%(idm,jdm) wfldout=scale + numpy.random.rand(jdm,idm)*scale bathyfile=modeltools.hycom.ABFileBathy("testbathy","w") bathyfile.write_field(wfldout,None) bathyfile.close() print "Reading %dX%d bathy array"%(idm,jdm) bathyfile=modeltools.hycom.ABFileBathy("testbathy","r",idm=idm,jdm=jdm) wfldin=bathyfile.read_field("depth",None) bathyfile.close() fldmaxdiff=numpy.abs(numpy.amax(wfldin-wfldout))/scale bmin,bmax = bathyfile.bminmax("depth") amax = numpy.amax(wfldin) amin = numpy.amin(wfldin) abmindiff=numpy.abs(amin-bmin)/scale abmaxdiff=numpy.abs(amax-bmax)/scale #print amin,amax #print bmin,bmax if fldmaxdiff > 1e-7 and abmindiff >1e-5 and abmaxdiff > 1e-5: print "test_Abfilebathy_writeread_nomasl test passed" else : unittest.fail("AFile IO failed. MAx diff between read/written: %14.7g"%max([fldmaxdiff,abmaxdiff,abmindiff]))
def test_afile_writeread_nomask(self): idm = random.randrange(10, 5000) jdm = random.randrange(10, 5000) print "Creating %dX%d random arrays" % (idm, jdm) scale = 1e4 wfld1 = numpy.random.rand(jdm, idm) * scale wfld2 = numpy.random.rand(jdm, idm) * scale wfld3 = numpy.random.rand(jdm, idm) * scale afile = modeltools.hycom.AFile(idm, jdm, "test.a", "w") afile.writerecord(wfld1, None, record=None) afile.writerecord(wfld2, None, record=None) afile.writerecord(wfld3, None, record=None) afile.close() afile = modeltools.hycom.AFile(idm, jdm, "test.a", "r") rfld1 = afile.readrecord(None, record=0) rfld2 = afile.readrecord(None, record=1) rfld3 = afile.readrecord(None, record=2) afile.close() maxdiff1 = numpy.abs(numpy.amax(rfld1 - wfld1)) / scale maxdiff2 = numpy.abs(numpy.amax(rfld2 - wfld2)) / scale maxdiff3 = numpy.abs(numpy.amax(rfld3 - wfld3)) / scale if all([elem < 1e-7 for elem in [maxdiff1, maxdiff2, maxdiff3]]): print "afile io test passed" else: unittest.fail( "AFile IO failed. MAx diff between read/written: %14.7g" % max([maxdiff1, maxdiff2, maxdiff3]))
def test_abfilebathy_writeread_nomask(self): idm = random.randrange(10, 5000) jdm = random.randrange(10, 5000) scale = 1e4 print "Creating %dX%d bathy array" % (idm, jdm) wfldout = scale + numpy.random.rand(jdm, idm) * scale bathyfile = modeltools.hycom.ABFileBathy("testbathy", "w") bathyfile.write_field(wfldout, None) bathyfile.close() print "Reading %dX%d bathy array" % (idm, jdm) bathyfile = modeltools.hycom.ABFileBathy("testbathy", "r", idm=idm, jdm=jdm) wfldin = bathyfile.read_field("depth", None) bathyfile.close() fldmaxdiff = numpy.abs(numpy.amax(wfldin - wfldout)) / scale bmin, bmax = bathyfile.bminmax("depth") amax = numpy.amax(wfldin) amin = numpy.amin(wfldin) abmindiff = numpy.abs(amin - bmin) / scale abmaxdiff = numpy.abs(amax - bmax) / scale #print amin,amax #print bmin,bmax if fldmaxdiff > 1e-7 and abmindiff > 1e-5 and abmaxdiff > 1e-5: print "test_Abfilebathy_writeread_nomasl test passed" else: unittest.fail( "AFile IO failed. MAx diff between read/written: %14.7g" % max([fldmaxdiff, abmaxdiff, abmindiff]))
def test_fit(self): ''' Prueba para entrenar el clasificador. Crea unos vectores aleatorios e intenta entrenar al clasificador con estos ''' import numpy as np from random import randrange as rand print('Fit test...') n = rand(32, 64) features = np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]]) features = np.random.rand(n, 64, 64, 1) labels = [rand(0, 10) for _ in range(n)] clasificador = OstrichClassifier() try: clasificador.fit(None, None) unittest.fail('el clasificador acepta parametros nulos') except ValueError: pass try: clasificador.fit(features, labels[:-1]) unittest.fail('el clasificador acepta features y labels de ' + 'tamano diferente') except ValueError: pass clasificador.fit(features, labels) assert clasificador.model is not None
def testBugFix1149372(self): try: c = C() str(c) except Exception as e: assert e.args[0] == "E" return unittest.fail("if __str__ raises an exception, re-raise")
def testGetAttrs(self): """Test getting of various attributes""" self.assertIsInstance(self.lc.LocalConnection, type) try: self.lc.asotnuhaoseu except (NameError, KeyError): pass else: unittest.fail("NameError or KeyError should be raised")
def test_create_zone_call_rndc_connection_refused(self): # Run rndc againts a closed port. Albeit this does not perform a # successful rndc run, it is enough to test the argument parsing context = self.get_context() exp_msg = 'rndc: connect failed: 127.0.0.1#33953: connection refused' try: self.backend.create_zone(context, self.zone) unittest.fail("Did not raise an exception") except exceptions.Backend as e: self.assertTrue(exp_msg in str(e))
class ExceptionHandling(unittest.TestCase): def testBugFix1149372(self): try: c = C() str(c) except Exception, e: assert e.args[0] == "E" return unittest.fail("if __str__ raises an exception, re-raise")
def test_abfilegrid_writeread(self): import modeltools.grid grid = modeltools.grid.Proj4Grid( "+proj=stere +lat_ts=80 +lat_0=90 +lon_0=0", -45, 60, 20000, 20000, 200, 300) modeltools.hycom.write_regional_grid(grid) inflds = modeltools.hycom.read_regional_grid() scale = 1e-4 flds = {} flds["plon"], flds["plat"] = grid.pgrid() flds["ulon"], flds["ulat"] = grid.ugrid() flds["vlon"], flds["vlat"] = grid.vgrid() flds["qlon"], flds["qlat"] = grid.qgrid() flds["scpx"] = grid.scpx() flds["scux"] = grid.scux() flds["scvx"] = grid.scvx() flds["scqx"] = grid.scqx() flds["scpy"] = grid.scpy() flds["scuy"] = grid.scuy() flds["scvy"] = grid.scvy() flds["scqy"] = grid.scqy() flds["cori"] = grid.corio() flds["pang"] = grid.p_azimuth() flds["pasp"] = grid.aspect_ratio() for fldname in flds.keys(): print fldname maxdiff = max_diff(flds[fldname], inflds[fldname]) print fldname, maxdiff if maxdiff > 1e-7: unittest.fail( "AFile IO failed. MAx diff between %s read/written: %14.7g" % (fldname, maxdiff)) #bmin,bmax = bathyfile.bminmax("depth") #amax = numpy.amax(wfldin) #amin = numpy.amin(wfldin) #abmindiff=numpy.abs(amin-bmin)/scale #abmaxdiff=numpy.abs(amax-bmax)/scale #print amin,amax #print bmin,bmax raise NameError, "test"
def test_abfilegrid_writeread(self) : import modeltools.grid grid = modeltools.grid.Proj4Grid("+proj=stere +lat_ts=80 +lat_0=90 +lon_0=0",-45,60,20000,20000,200,300) modeltools.hycom.write_regional_grid(grid) inflds = modeltools.hycom.read_regional_grid() scale=1e-4 flds={} flds["plon"],flds["plat"] = grid.pgrid() flds["ulon"],flds["ulat"] = grid.ugrid() flds["vlon"],flds["vlat"] = grid.vgrid() flds["qlon"],flds["qlat"] = grid.qgrid() flds["scpx"] = grid.scpx() flds["scux"] = grid.scux() flds["scvx"] = grid.scvx() flds["scqx"] = grid.scqx() flds["scpy"] = grid.scpy() flds["scuy"] = grid.scuy() flds["scvy"] = grid.scvy() flds["scqy"] = grid.scqy() flds["cori"] = grid.corio() flds["pang"] = grid.p_azimuth() flds["pasp"] = grid.aspect_ratio() for fldname in flds.keys() : print fldname maxdiff=max_diff(flds[fldname],inflds[fldname]) print fldname, maxdiff if maxdiff > 1e-7 : unittest.fail("AFile IO failed. MAx diff between %s read/written: %14.7g"%(fldname,maxdiff)) #bmin,bmax = bathyfile.bminmax("depth") #amax = numpy.amax(wfldin) #amin = numpy.amin(wfldin) #abmindiff=numpy.abs(amin-bmin)/scale #abmaxdiff=numpy.abs(amax-bmax)/scale #print amin,amax #print bmin,bmax raise NameError,"test"
def test_evaluate(self): ''' Prueba para evaluar la presicion del modelo, se requiere un conjunto de imagenes normalizadas de los archivos llamados ./pickles/ostrich_features.pickle y ./pickles/ostrich_labels.pickle ''' print('Evaluate test...') features, labels = load_pickles() clasificador = OstrichClassifier() clasificador.load('ostrich') features, labels = load_pickles() try: clasificador.evaluate(None, None) unittest.fail('el clasificador acepta parametros nulos') except ValueError: pass try: clasificador.evaluate(features, labels[:-1]) unittest.fail('el clasificador acepta features y labels de ' + 'tamano diferente') except ValueError: pass ev = clasificador.evaluate(features, labels) if ev['acc'] < 0.7: unittest.fail('el clasificador no es suficientemente preciso')
def test_evaluate(self): """ Prueba para evaluar la punteria del modelo, se requiere un conjunto de imagenes normalizadas en un archivo llamado test_images.pickle """ clasificador = HippopotamusClassifier() with (open("./pickles/hippopotamus_images.pickle", "rb")) as f: features = pickle.load(f) with (open("./pickles/hippopotamus_labels.pickle", "rb")) as f: labels = pickle.load(f) try: clasificador.evaluate(None, None) unittest.fail("el clasificador acepta parametros nulos") except ValueError: pass try: clasificador.evaluate(features, labels[:-1]) unittest.fail("el clasificador acepta features y labels de " + "tamano diferente") except ValueError: pass ev = clasificador.evaluate(features, labels) if ev["acc"] < 0.7: unittest.fail("el clasificador no es suficientemente preciso")
def test_evaluate(self): print('Evaluate test...') inicializador = Practica04() inicializador.main('./imagenes/train', 64) features, labels = load_pickles() clasificador = PenguinClassifier() clasificador.fit(features, labels) inicializador.main('./imagenes/test', 64) features, labels = load_pickles() try: clasificador.evaluate(None, None) unittest.fail('el clasificador acepta parametros nulos') except ValueError: pass try: clasificador.evaluate(features, labels[:-1]) unittest.fail('el clasificador acepta features y labels de ' + 'tamano diferente') except ValueError: pass ev = clasificador.evaluate(features, labels) if ev['acc'] < 0.7: unittest.fail('el clasificador no es suficientemente preciso')
def test_evaluate(self): ''' Prueba para evaluar la punteria del modelo, se requiere un conjunto de imagenes normalizadas en un archivo llamado test_images.pickle ''' clasificador = HamsterClassifier() with (open('./pickles/hamster_images.pickle', 'rb')) as f: features = pickle.load(f) with (open('./pickles/hamster_labels.pickle', 'rb')) as f: labels = pickle.load(f) try: clasificador.evaluate(None, None) unittest.fail('el clasificador acepta parametros nulos') except ValueError: pass try: clasificador.evaluate(features, labels[:-1]) unittest.fail('el clasificador acepta features y labels de ' + 'tamano diferente') except ValueError: pass ev = clasificador.evaluate(features, labels) if ev['acc'] < 0.7: unittest.fail('el clasificador no es suficientemente preciso')
def runTest(self): """Test blank""" prefString= "" prefs = Properties(prefString) prefString = "default.name=Bilbo Baggins\nhomepage.feeds=42,24" prefs = Properties(prefString) assert cmp('Bilbo Baggins', prefs.get('default.name')) == 0, 'Error retrieving value from key' assert cmp('', prefs.get('jeff')) == 0, "Missing keys should default to blank string" prefs.set('Hulk', 'Hogan') assert cmp('Hogan', prefs.get('Hulk')) == 0, 'Error retrieving set key/value' stringValue = prefs.convertToPropertiesFile() #this last test needs to be fixed, can fail if keys are not brought back in same order assert cmp(prefString + "\nHulk=Hogan", stringValue) == 0, 'Error converting to properties' prefString = "jeff=1" prefs = Properties(prefString) assert cmp('1', prefs.get('jeff')) == 0 assert cmp('1', prefs.pop('jeff')) == 0 try: prefs.pop('badkey') unittest.fail('A KeyError exception should have prevented this') except KeyError: pass assert False == prefs.has_key('jeff')
def test_fit(self): print('Fit test...') inicializador = Practica04() inicializador.main('./imagenes/train', 64) features, labels = load_pickles() clasificador = PenguinClassifier() try: clasificador.fit(None, None) unittest.fail('el clasificador acepta parametros nulos') except ValueError: pass try: clasificador.fit(features, labels[:-1]) unittest.fail('el clasificador acepta features y labels de ' + 'tamano diferente') except ValueError: pass clasificador.fit(features, labels) assert clasificador.model is not None
def test_time_conversion(self): """ Tests that forday and dayfor are consistent """ numtest = 20000 for k in range(numtest): yrflag = random.randrange(0, 4) selector = random.randrange(0, 3) dtime = random.randrange(1, 200000) # Tests wnen close to integer, and normal if selector == 0: dtime = dtime + random.random() / 100. elif selector == 1: dtime = dtime + 1. - random.random() / 100. elif selector == 2: dtime = dtime + random.random() if k % 50000 == 0: print "Doing test %10d of %10d. yrflag=%d, dtime=%20.16g" % ( k, numtest, yrflag, dtime) try: #print "dtime,yrflag=",dtime,yrflag iyear, iday, ihour = modeltools.hycom.forday(dtime, yrflag) #print "iyear,iday,ihour=",iyear,iday,ihour dtimeout = modeltools.hycom.dayfor(iyear, iday, ihour, yrflag) #print "dtime, reversed:",dtime except: unittest.fail("Caught error. dtime = %20.10g,yrflag=%d" % (dtime, yrflag)) #print yrflag,dtime,dtimeout-dtime,3600./86400. #dtimeout is "floored" to nearest hour. So error should never be greater than this: if (dtime - dtimeout) > 3600. / 86400: unittest.fail( "Error: inn forday or dayfor, dtime=%20.10g, yrflag%d=" % (dtime, yrflag))
raise RuntimeError("dummy") except RuntimeError: raise except RuntimeError, e: r = str(e) self.assertEquals(r, "dummy") def testBugFix1149372(self): try: c = C() str(c) except Exception, e: assert e.args[0] == "E" return unittest.fail("if __str__ raises an exception, re-raise") def test_wrap_StackOverflowError(self): with self.assertRaises(RuntimeError) as cm: StackOverflowErrorTest.throwStackOverflowError() self.assertEqual( cm.exception.message, "maximum recursion depth exceeded (Java StackOverflowError)") with self.assertRaises(RuntimeError) as cm: StackOverflowErrorTest.causeStackOverflowError() self.assertEqual( cm.exception.message, "maximum recursion depth exceeded (Java StackOverflowError)") def test_unicode_args(self):
def test_buckets(self, bucket_count=0, ops=0, max_time=0, replicas=[1]): bucket_ram = 100 if not bucket_count: bucket_count = info.memoryQuota / bucket_ram if bucket_count > info.memoryQuota / bucket_ram: self.log.error('node does not have enough capacity for {0} buckets, exiting test'.format(bucket_count)) return max_load_memory = bucket_ram * 3 / 4 max_load_time = max_time load_info = { 'server_info' : self.servers, 'memcached_info' : { 'bucket_name':"", 'bucket_port':"11211", 'bucket_password':"", }, 'operation_info' : { 'operation_distribution':{'set':3, 'get':5}, 'valuesize_distribution':{250:15, 1500:10, 20:5,15000:5}, 'create_percent':25, 'threads':2*len(self.servers), 'operation_rate':ops/bucket_count, }, 'limit_info' : { 'max_size':max_load_memory, 'max_time':max_load_time, }, } loads = [] for i in range(bucket_count): bucket_name = 'bucketops-{0}'.format(uuid.uuid4()) replica = replicas[i%len(replicas)] rest.create_bucket(bucket=bucket_name, ramQuotaMB=bucket_ram, replicaNumber=replica, authType='sasl', saslPassword='') msg = 'create_bucket succeeded but bucket {0} does not exist'.format(bucket_name) self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(bucket_name, rest), msg=msg) load_info['memcached_info']['bucket_name'] = bucket_name loads.append(load_runner.LoadRunner(load_info, dryrun=False)) buckets = [] try: buckets = rest.get_buckets() except: self.log.info('15 seconds sleep before calling get_buckets again...') time.sleep(15) buckets = rest.get_buckets() if len(buckets) != bucket_count: msg = 'tried to create {0} buckets, only created {1}'.format(bucket_count, len(buckets)) self.log.error(msg) unittest.fail(msg=msg) if ops: self.log.info('starting load against all buckets') for load in loads: load.start() if max_load_time: end_time = time.time() + max_load_time for load in loads: load.wait(end_time - time.time()) # stop all load if there is any still running for load in loads: load.stop() else: for load in loads: load.wait() self.log.info('stopped load against all buckets')
def testGetAttrs(self): """Test getting of various attributes""" assert type(self.lc.LocalConnection) is types.ClassType try: self.lc.asotnuhaoseu except (NameError, KeyError): pass else: unittest.fail("NameError or KeyError should be raised")