def test_all_fields_landon_no_restart_continuation(self): """Test landon output for no restart continuation runs. These are for runs where the results should be identical to the benchmark for a default run. A case where nothing is changed between the continuations and where u1 is changed between continuations is done. This test also tests to make sure for a subset of variables that the case where u1 is changed is different from the case where it is not changed, prior to the continuation. """ #- Check Qtcm runs match benchmarks: for ivar in self.var_list: data = read_benchmark( ivar, 'landon_cont40' \ , array_type=self.array_type ) myoutput = read_output( ivar, 'parts_365_landon_nr1_cont40' \ , array_type=self.array_type ) print ivar print ' ', self.N.allclose(data[0], myoutput[0]) print ' ', self.N.allclose(data[1], myoutput[1]) if ivar == 'u1': execfile('/Users/jlin/.stop.py') #self.failUnless( self.N.allclose(data[0], myoutput[0]) ) #self.failUnless( self.N.allclose(data[1], myoutput[1]) ) datau1 = read_benchmark( ivar, 'landon_cont40u1' \ , array_type=self.array_type ) myoutputu1 = read_output( ivar, 'parts_365_landon_nr2_cont40' \ , array_type=self.array_type ) #self.failUnless( self.N.allclose(datau1[0], myoutputu1[0]) ) #self.failUnless( self.N.allclose(datau1[1], myoutputu1[1]) ) #- Check continue without change and continue with change are # different: sub_var_list = [ 'u1', 'Prec', 'advT1', 'dfsq1', 'GMq1', 'q1' \ , 'Evap', 'u0', 'OLR', 'div1', 'advq1', 'FLWds' \ , 'dfsT1', 'T1', 'v0', 'v1', 'vort0', 'FTs' \ , 'FSWut', 'FSWus', 'us', 'vs', 'psi0' \ , 'taux', 'tauy', 'cl1', 'FSWds' ] for ivar in sub_var_list: data = read_benchmark( ivar, 'landon_cont40' \ , array_type=self.array_type ) myoutput = read_output( ivar, 'parts_365_landon_nr1_cont40' \ , array_type=self.array_type ) datau1 = read_benchmark( ivar, 'landon_cont40u1' \ , array_type=self.array_type ) myoutputu1 = read_output( ivar, 'parts_365_landon_nr2_cont40' \ , array_type=self.array_type )
def test_all_fields_unmasked_parts_landon_multi(self): """Test all landon output match the benchmark for multi case. Test uses the assumes self.compiled_form is 'parts'. """ for ivar in self.var_list: data = read_benchmark( ivar, 'landon' \ , array_type=self.array_type ) myoutput1 = read_output( ivar, 'parts_365_landon_multi1' \ , array_type=self.array_type ) myoutput2 = read_output( ivar, 'parts_365_landon_multi2' \ , array_type=self.array_type ) self.failUnless( self.allclose(data[0], myoutput1[0]) ) self.failUnless( self.allclose(data[1], myoutput1[1]) ) self.failUnless( self.allclose(data[0], myoutput2[0]) ) self.failUnless( self.allclose(data[1], myoutput2[1]) )
def test_all_fields_unmasked_parts_landon_multi(self): """Test all landon output match the benchmark for multi case. Test uses the assumes self.compiled_form is 'parts'. """ for ivar in self.var_list: data = read_benchmark( ivar, 'landon' \ , array_type=self.array_type ) myoutput1 = read_output( ivar, 'parts_365_landon_multi1' \ , array_type=self.array_type ) myoutput2 = read_output( ivar, 'parts_365_landon_multi2' \ , array_type=self.array_type ) self.failUnless(self.allclose(data[0], myoutput1[0])) self.failUnless(self.allclose(data[1], myoutput1[1])) self.failUnless(self.allclose(data[0], myoutput2[0])) self.failUnless(self.allclose(data[1], myoutput2[1]))
def test_all_fields_landon_restart_continuation(self): """Test landon output for "restart" continuation runs. These are for runs where the results should be identical to the benchmark for a default run. """ for ivar in self.var_list: data1a = read_benchmark( ivar, 'landon_cont10-30_10' \ , array_type=self.array_type ) data1b = read_benchmark( ivar, 'landon_cont10-30_30' \ , array_type=self.array_type ) myoutput1a = read_output( ivar, 'full_365_landon_cont10-30_10' \ , array_type=self.array_type ) myoutput1b = read_output( ivar, 'full_365_landon_cont10-30_30' \ , array_type=self.array_type ) myoutput2a = read_output( ivar, 'parts_365_landon_cont10-30_10' \ , array_type=self.array_type ) myoutput2b = read_output( ivar, 'parts_365_landon_cont10-30_30' \ , array_type=self.array_type ) myoutput3a = read_output( ivar, 'parts_365_landon_nr_cont10-30_10' \ , array_type=self.array_type ) myoutput3b = read_output( ivar, 'parts_365_landon_nr_cont10-30_30' \ , array_type=self.array_type ) self.failUnless( self.N.allclose(data1a[0], myoutput1a[0]) )
def test_all_fields_unmasked_full_landon(self): """Test all landon output fields match the benchmark. Test uses the assumes self.compiled_form is 'full'. """ for ivar in self.var_list: data = read_benchmark( ivar, 'landon' \ , array_type=self.array_type ) myoutput = read_output( ivar, 'full_365_landon' \ , array_type=self.array_type ) self.failUnless(self.N.allclose(data[0], myoutput[0])) self.failUnless(self.N.allclose(data[1], myoutput[1]))
def test_all_fields_unmasked_full_landon(self): """Test all landon output fields match the benchmark. Test uses the assumes self.compiled_form is 'full'. """ for ivar in self.var_list: data = read_benchmark( ivar, 'landon' \ , array_type=self.array_type ) myoutput = read_output( ivar, 'full_365_landon' \ , array_type=self.array_type ) self.failUnless( self.N.allclose(data[0], myoutput[0]) ) self.failUnless( self.N.allclose(data[1], myoutput[1]) )
def test_all_fields_unmasked_parts_aquaplanet(self): """Test all aquaplanet output fields match the benchmark. Test uses the assumes self.compiled_form is 'parts'. """ for ivar in self.var_list: data = read_benchmark( ivar, 'aquaplanet_long' \ , array_type=self.array_type ) myoutput = read_output( ivar, 'parts_365_aqua_long' \ , array_type=self.array_type ) self.failUnless( self.N.allclose(data[0], myoutput[0]) ) self.failUnless( self.N.allclose(data[1], myoutput[1]) )
def test_all_fields_unmasked_parts_aquaplanet(self): """Test all aquaplanet output fields match the benchmark. Test uses the assumes self.compiled_form is 'parts'. """ for ivar in self.var_list: data = read_benchmark( ivar, 'aquaplanet_long' \ , array_type=self.array_type ) myoutput = read_output( ivar, 'parts_365_aqua_long' \ , array_type=self.array_type ) self.failUnless(self.N.allclose(data[0], myoutput[0])) self.failUnless(self.N.allclose(data[1], myoutput[1]))
def test_all_fields_aqua_no_restart_continuation(self): """Test aqua output for no restart continuation runs. These are for runs where the results should be identical to the benchmark for a default run. A case where nothing is changed between the continuations and where u1 is changed between continuations is done. This test also tests to make sure for a subset of variables that the case where u1 is changed is different from the case where it is not changed, prior to the continuation. """ #- Check Qtcm runs match benchmarks: for ivar in self.var_list: data = read_benchmark( ivar, 'aquaplanet_cont40' \ , array_type=self.array_type ) myoutput = read_output( ivar, 'parts_365_aqua_nr1_cont40' \ , array_type=self.array_type ) self.failUnless(self.N.allclose(data[0], myoutput[0])) self.failUnless(self.N.allclose(data[1], myoutput[1])) datau1 = read_benchmark( ivar, 'aquaplanet_cont40u1' \ , array_type=self.array_type ) myoutputu1 = read_output( ivar, 'parts_365_aqua_nr2_cont40' \ , array_type=self.array_type ) self.failUnless(self.N.allclose(datau1[0], myoutputu1[0])) self.failUnless(self.N.allclose(datau1[1], myoutputu1[1])) datau1 = read_benchmark( ivar, 'aquaplanet_cont40u1' \ , array_type=self.array_type ) myoutputu1 = read_output( ivar, 'full_365_aqua_nr2_cont40' \ , array_type=self.array_type ) self.failUnless(self.N.allclose(datau1[0], myoutputu1[0])) self.failUnless(self.N.allclose(datau1[1], myoutputu1[1]))
def test_all_fields_aqua_no_restart_continuation(self): """Test aqua output for no restart continuation runs. These are for runs where the results should be identical to the benchmark for a default run. A case where nothing is changed between the continuations and where u1 is changed between continuations is done. This test also tests to make sure for a subset of variables that the case where u1 is changed is different from the case where it is not changed, prior to the continuation. """ #- Check Qtcm runs match benchmarks: for ivar in self.var_list: data = read_benchmark( ivar, 'aquaplanet_cont40' \ , array_type=self.array_type ) myoutput = read_output( ivar, 'parts_365_aqua_nr1_cont40' \ , array_type=self.array_type ) self.failUnless( self.N.allclose(data[0], myoutput[0]) ) self.failUnless( self.N.allclose(data[1], myoutput[1]) ) datau1 = read_benchmark( ivar, 'aquaplanet_cont40u1' \ , array_type=self.array_type ) myoutputu1 = read_output( ivar, 'parts_365_aqua_nr2_cont40' \ , array_type=self.array_type ) self.failUnless( self.N.allclose(datau1[0], myoutputu1[0]) ) self.failUnless( self.N.allclose(datau1[1], myoutputu1[1]) ) datau1 = read_benchmark( ivar, 'aquaplanet_cont40u1' \ , array_type=self.array_type ) myoutputu1 = read_output( ivar, 'full_365_aqua_nr2_cont40' \ , array_type=self.array_type ) self.failUnless( self.N.allclose(datau1[0], myoutputu1[0]) ) self.failUnless( self.N.allclose(datau1[1], myoutputu1[1]) )
def test_all_fields_unmasked_parts_aqua_multi_same(self): """Test aqua output match the benchmark for multi same case. These are for runs where the results should be identical to the benchmark for a default run. Test uses the assumes self.compiled_form is 'parts'. """ for ivar in self.var_list: data = read_benchmark( ivar, 'aquaplanet' \ , array_type=self.array_type ) myoutput1 = read_output( ivar, 'parts_365_aqua_multi1' \ , array_type=self.array_type ) myoutput2 = read_output( ivar, 'parts_365_aqua_multi2' \ , array_type=self.array_type ) myoutput4 = read_output( ivar, 'parts_365_aqua_multi4' \ , array_type=self.array_type ) self.failUnless( self.allclose(data[0], myoutput1[0]) ) self.failUnless( self.allclose(data[1], myoutput1[1]) ) self.failUnless( self.allclose(data[0], myoutput2[0]) ) self.failUnless( self.allclose(data[1], myoutput2[1]) ) self.failUnless( self.allclose(data[0], myoutput4[0]) ) self.failUnless( self.allclose(data[1], myoutput4[1]) )
def test_all_fields_unmasked_parts_aqua_multi_params2(self): """Test aqua output match the benchmark for multi params2 case. These are for runs where the results should be identical to the benchmark for an aquaplanet_params2 run. Test uses the assumes self.compiled_form is 'parts'. """ for ivar in self.var_list: data = read_benchmark( ivar, 'aquaplanet_params2' \ , array_type=self.array_type ) myoutput3 = read_output( ivar, 'parts_365_aqua_multi3' \ , array_type=self.array_type ) self.failUnless( self.allclose(data[0], myoutput3[0]) ) self.failUnless( self.allclose(data[1], myoutput3[1]) )
def test_all_fields_unmasked_parts_aqua_multi_same(self): """Test aqua output match the benchmark for multi same case. These are for runs where the results should be identical to the benchmark for a default run. Test uses the assumes self.compiled_form is 'parts'. """ for ivar in self.var_list: data = read_benchmark( ivar, 'aquaplanet' \ , array_type=self.array_type ) myoutput1 = read_output( ivar, 'parts_365_aqua_multi1' \ , array_type=self.array_type ) myoutput2 = read_output( ivar, 'parts_365_aqua_multi2' \ , array_type=self.array_type ) myoutput4 = read_output( ivar, 'parts_365_aqua_multi4' \ , array_type=self.array_type ) self.failUnless(self.allclose(data[0], myoutput1[0])) self.failUnless(self.allclose(data[1], myoutput1[1])) self.failUnless(self.allclose(data[0], myoutput2[0])) self.failUnless(self.allclose(data[1], myoutput2[1])) self.failUnless(self.allclose(data[0], myoutput4[0])) self.failUnless(self.allclose(data[1], myoutput4[1]))
def test_all_fields_unmasked_parts_aqua_multi_params2(self): """Test aqua output match the benchmark for multi params2 case. These are for runs where the results should be identical to the benchmark for an aquaplanet_params2 run. Test uses the assumes self.compiled_form is 'parts'. """ for ivar in self.var_list: data = read_benchmark( ivar, 'aquaplanet_params2' \ , array_type=self.array_type ) myoutput3 = read_output( ivar, 'parts_365_aqua_multi3' \ , array_type=self.array_type ) self.failUnless(self.allclose(data[0], myoutput3[0])) self.failUnless(self.allclose(data[1], myoutput3[1]))
def test_all_fields_unmasked_parts_aqua_inst(self): """Test aqua output match the benchmark for inst. case. The "inst. case" means testing that running the model with the init_with_instance_state keyword set to True will work properly. Test assumes self.compiled_form is 'parts'. """ for ivar in self.var_list: data = read_benchmark( ivar, 'aquaplanet' \ , array_type=self.array_type ) myoutput = read_output( ivar, 'parts_365_aqua_inst1' \ , array_type=self.array_type ) self.failUnless(self.allclose(data[0], myoutput[0])) self.failUnless(self.allclose(data[1], myoutput[1]))
def test_all_fields_unmasked_parts_aqua_inst(self): """Test aqua output match the benchmark for inst. case. The "inst. case" means testing that running the model with the init_with_instance_state keyword set to True will work properly. Test assumes self.compiled_form is 'parts'. """ for ivar in self.var_list: data = read_benchmark( ivar, 'aquaplanet' \ , array_type=self.array_type ) myoutput = read_output( ivar, 'parts_365_aqua_inst1' \ , array_type=self.array_type ) self.failUnless( self.allclose(data[0], myoutput[0]) ) self.failUnless( self.allclose(data[1], myoutput[1]) )
def test_all_fields_benchmarks_are_all_different(): """Test all benchmark outputs are different for sub-var_list. Only select variables are compared. """ #@@@e sub_var_list = [ 'WD', 'GMs1', 'u1', 'cdn', 'S0', 'Ts', 'Prec' \ , 'top', 'advT1', 'dfsq1', 'GMq1', 'div0', 'q1' \ , 'Evap', 'lon', 'u0', 'OLR', 'div1', 'wet' \ , 'advq1', 'FLWds', 'FLWus', 'Runs', 'dfsT1', 'T1' \ , 'v0', 'v1', 'vort0', 'Runf', 'lat', 'FTs', 'Evapi' \ , 'FSWut', 'FSWus', 'us', 'vs', 'psi0', 'stype' \ , 'time', 'taux', 'tauy', 'cl1', 'FSWds' ] #sub_var_list = [ 'us', 'Ts']# 'GMs1', 'u1', 'cdn', 'S0', 'Ts', 'Prec' \ # , 'top', 'advT1', 'dfsq1', 'GMq1', 'div0', 'q1' \ # , 'Evap', 'lon', 'u0', 'OLR', 'div1', 'wet' \ # , 'advq1', 'FLWds', 'FLWus', 'Runs', 'dfsT1', 'T1' \ # , 'v0', 'v1', 'vort0', 'Runf', 'lat', 'FTs', 'Evapi' \ # , 'FSWut', 'FSWus', 'us', 'vs', 'psi0', 'stype' \ # , 'time', 'taux', 'tauy', 'cl1', 'FSWds' ] sub_var_list = [ 'u1', ] for ivar in sub_var_list: data1 = read_benchmark( ivar, 'aquaplanet' \ , array_type='numpy' ) data2 = read_benchmark( ivar, 'landon' \ , array_type='numpy' ) data3 = read_benchmark( ivar, 'aquaplanet_params2' \ , array_type='numpy' ) myoutput3 = read_output( ivar, 'full_365_aqua_multi3' \ , array_type='numpy' ) #print N.allclose(data1[0], data2[0]) #print N.allclose(data1[1], data2[1]) #print ivar, N.allclose(data3[0], myoutput3[0]) #print ivar, N.allclose(data3[1], myoutput3[1]) print N.max(N.abs((data3[0] - myoutput3[0]) / data3[0])) print N.max(N.abs((data3[1] - myoutput3[0]) / data3[1]))
def test_all_fields_benchmarks_are_all_different(): """Test all benchmark outputs are different for sub-var_list. Only select variables are compared. """ #@@@e sub_var_list = [ 'WD', 'GMs1', 'u1', 'cdn', 'S0', 'Ts', 'Prec' \ , 'top', 'advT1', 'dfsq1', 'GMq1', 'div0', 'q1' \ , 'Evap', 'lon', 'u0', 'OLR', 'div1', 'wet' \ , 'advq1', 'FLWds', 'FLWus', 'Runs', 'dfsT1', 'T1' \ , 'v0', 'v1', 'vort0', 'Runf', 'lat', 'FTs', 'Evapi' \ , 'FSWut', 'FSWus', 'us', 'vs', 'psi0', 'stype' \ , 'time', 'taux', 'tauy', 'cl1', 'FSWds' ] #sub_var_list = [ 'us', 'Ts']# 'GMs1', 'u1', 'cdn', 'S0', 'Ts', 'Prec' \ # , 'top', 'advT1', 'dfsq1', 'GMq1', 'div0', 'q1' \ # , 'Evap', 'lon', 'u0', 'OLR', 'div1', 'wet' \ # , 'advq1', 'FLWds', 'FLWus', 'Runs', 'dfsT1', 'T1' \ # , 'v0', 'v1', 'vort0', 'Runf', 'lat', 'FTs', 'Evapi' \ # , 'FSWut', 'FSWus', 'us', 'vs', 'psi0', 'stype' \ # , 'time', 'taux', 'tauy', 'cl1', 'FSWds' ] sub_var_list = ['u1',] for ivar in sub_var_list: data1 = read_benchmark( ivar, 'aquaplanet' \ , array_type='numpy' ) data2 = read_benchmark( ivar, 'landon' \ , array_type='numpy' ) data3 = read_benchmark( ivar, 'aquaplanet_params2' \ , array_type='numpy' ) myoutput3 = read_output( ivar, 'full_365_aqua_multi3' \ , array_type='numpy' ) #print N.allclose(data1[0], data2[0]) #print N.allclose(data1[1], data2[1]) #print ivar, N.allclose(data3[0], myoutput3[0]) #print ivar, N.allclose(data3[1], myoutput3[1]) print N.max( N.abs((data3[0]- myoutput3[0])/data3[0]) ) print N.max( N.abs((data3[1]- myoutput3[0])/data3[1]) )
#inputs['ntouti'] = 1 #inputs['noout'] = 0 #inputs['mrestart'] = 0 #print '@@@@@@@@@@@@@@@@', model.get_qtcm_item('outdir') #@@@ # the problem with this is for another run, even though you've changed # model, it doesn't use the new values for SSTdir etc.? #model3 = Qtcm(**inputs) #model3.run_session() #if os.path.exists('qtcm_00011115.restart'): os.remove('qtcm_00011115.restart') ivar = 'u1' data_aq = read_benchmark( ivar, 'aquaplanet', array_type='numpy' ) data_lo = read_benchmark( ivar, 'landon', array_type='numpy' ) data1 = read_output( ivar, 'full_365_aqua', array_type='numpy' ) #data2 = read_output( ivar, 'full_365_aqua2', array_type='numpy' ) #data3 = read_output( ivar, 'full_365_landon', array_type='numpy' ) print 'aqua run meets benchmark', N.allclose(data1[0], data_aq[0]) #print 'land run meets benchmark', N.allclose(data3[0], data_lo[0]) #print 'The two aqua runs should test allclose True: ', \ # N.allclose(data1[0], data2[0]) #print 'The aqua runs and landon should both test allclose False: ', \ # N.allclose(data1[0], data3[0]), N.allclose(data2[0], data3[0]) print model._Qtcm__qtcm.__file__ #print model2._Qtcm__qtcm.__file__ #print model3._Qtcm__qtcm.__file__ # ====== end file ======