def createZSVariables(self): numVars = 0 # the z variable is for each scenario for scenario in self.scenarios: # create zsp variable v1 = Variable() v1.type = Variable.v_zsp v1.name = "zsp_" + scenario.id v1.col = self.numCols v1.scenario = scenario.id self.variables[v1.name] = v1 self.lp.variables.add(names=[v1.name]) self.numCols += 1 numVars += 1 #create zsn variable v2 = Variable() v2.type = Variable.v_zsn v2.name = "zsn_" + scenario.id v2.col = self.numCols v2.scenario = scenario.id self.variables[v2.name] = v2 self.lp.variables.add(names=[v2.name]) self.numCols += 1 numVars += 1 return numVars
def test_execute_multiple_fields(self): from VM import VM vm = VM() c = ClassDefinition() c.namespace = 'a' c.name = 'b' v = Variable() v.name = 'abc' v.type = Types.Int32 v2 = Variable() v2.name = 'def' v2.type = Types.Int32 c.fieldDefinitions.append(v2) r = ReferenceType() t = Types.register_custom_type(c) r.type = t r.add_field(v) r.add_field(v2) vm.stack.push(r) x = ldfld('int32 ConsoleApplication1.foo::def') x.execute(vm) self.assertEqual(vm.stack.count(), 1) self.assertEqual(r.fields[1], vm.stack.pop())
def test_newobj_no_parameters_initializes_int_field_to_zero(self): from VM import VM vm = VM() m = MethodDefinition() m.name = 'ctor' m.namespace = 'testnamespace.testclass' vm.methods.append(m) c = ClassDefinition() c.name = 'testclass' c.namespace = 'testnamespace' c.methods.append(m) v = Variable() v.name = 'xyz' v.type = Types.Int32 c.fieldDefinitions.append(v) t = Types.register_custom_type(c) n = newobj('instance void testnamespace.testclass::.ctor()') n.execute(vm) Types.unregister_custom_type(t) o = vm.stack.pop() self.assertEqual(o.type, t) self.assertEqual(len(o.fields), 1) self.assertEqual(o.fields[0].value, 0) self.assertEqual(len(o.fieldNames), 1) self.assertEqual(o.fieldNames[0], 'xyz')
def test_execute_int_parameter(self): from VM import VM vm = VM() c = ClassDefinition() c.namespace = 'a' c.name = 'b' v = Variable() v.name = 'xyz' v.type = Types.Int32 r = ReferenceType() t = Types.register_custom_type(c) r.type = t r.add_field(v) vm.stack.push(r) vm.stack.push(Variable(9876)) x = stfld('int32 a.b::xyz') x.execute(vm) self.assertEqual(vm.stack.count(), 0) self.assertEqual(r.fields[0].value, 9876)
def execute(self, vm): t = Types.resolve_type(self.typeName) r = ReferenceType() r.type = t for f in t.classRef.fieldDefinitions: o = Variable() o.type = f.type o.name = f.name o.value = 0 # fixme - reference types? r.fields.append(o) r.fieldNames.append(f.name) vm.stack.push(r) namespace = t.namespace + '.' + t.name name = 'ctor' methodDefinition = vm.find_method_by_signature( namespace, name, None, None) # fixme - should name have a . in it? if methodDefinition is None: raise Exception("Couldn't find " + name + " method for " + namespace) m = methodDefinition.get_method() #parameter = Variable() #parameter.value = r m.parameters = [ r ] # fixme - create a new method object so we don't overwrite the parameters? #fixme - should we even use parameters? or just the stack? vm.execute_method(m)
def test___repr__(self): var = Variable() var.name = "test_name" expected_repr = 'eopy.product.productIO.Variable.Variable "test_name"' output_repr = var.__repr__() self.assertEqual(output_repr, expected_repr)
def parse_parameters(self, method): token = self.context.get_next_token() while token != ')' and token != '': if token != '(': type = Types.resolve_type(token) name = self.context.get_next_token() v = Variable() v.type = type v.name = name method.parameters.append(v) token = self.context.get_next_token()
def test_create_method_references_definition(self): import Types md = MethodDefinition() md.name = 'foobar' v = Variable() v.name = 'asdf' v.type = Types.Int32 md.locals.append(v) m = md.get_method() self.assertEqual(m.methodDefinition, md)
def test_create_method_has_new_copy_of_locals(self): import Types md = MethodDefinition() md.name = 'foobar' v = Variable() v.name = 'asdf' v.type = Types.Int32 md.parameters.append(v) m = md.get_method() self.assertEqual(len(m.parameters), 1) self.assertNotEqual(v, m.parameters[0])
def createTimeIndexedVariable(self, name, v_type, coefficient=0.0, interval=1, lb=0.0, ub=1000000): numVars = 0 for i in range(self.currentDay, self.finalDay, interval): v = Variable() v.name = name + str(i) v.col = self.numCols v.instant = i v.type = v_type self.variables[v.name] = v self.lp.variables.add(obj=[coefficient], lb=[lb], ub=[ub], names=[v.name]) self.numCols += 1 numVars += 1 return numVars
def test_create_method_has_reference_type_instead_of_value(self): from Instructions.Ret import Ret import Types md = MethodDefinition() md.name = 'foobar' v = Variable() v.name = 'asdf' v.type = Types.Int32 md.instructions.append(Ret()) md.instructions.append(Ret()) md.instructions.append(Ret()) m = md.get_method() self.assertEqual(m.instructions, md.instructions)
def test_create_method_has_pointer_to_instructions(self): import Types from Instructions.Ret import Ret md = MethodDefinition() md.name = 'foobar' v = Variable() v.name = 'asdf' v.type = Types.Int32 md.instructions.append(Ret()) md.instructions.append(Ret()) md.instructions.append(Ret()) m = md.get_method() self.assertEqual(m.instructions, md.instructions)
def createZVariables(self): numVars = 0 # create the zp variable v1 = Variable() v1.type = Variable.v_zp v1.name = "zp" v1.col = self.numCols self.variables[v1.name] = v1 self.lp.variables.add(obj=[1.0], names=[v1.name]) self.numCols += 1 numVars += 1 # create the zn variable v2 = Variable() v2.type = Variable.v_zn v2.name = "zn" v2.col = self.numCols self.variables[v2.name] = v2 self.lp.variables.add(obj=[-1.0], names=[v2.name]) self.numCols += 1 numVars += 1 return numVars
def createDemandVariable(self): numVars = 0 t0 = self.currentDay for t in range(self.currentDay, self.finalDay): v = Variable() v.name = "d" + str(t) v.col = self.numCols v.instant = t v.type = Variable.v_demand demand = self.pData.getForecast(t0, t) self.variables[v.name] = v self.lp.variables.add(obj=[params.unitPrice], lb=[demand], ub=[demand], names=[v.name]) self.numCols += 1 numVars += 1 return numVars
def createRepositionVariable(self): numVars = 0 repositionDays = [rDay for rDay in self.pData.repositionDays if rDay >= self.currentDay and rDay < self.finalDay] for i in repositionDays: v = Variable() v.name = "r" + str(i) v.col = self.numCols v.instant = i v.type = Variable.v_reposition self.variables[v.name] = v self.lp.variables.add(obj=[-params.unitCost], names=[v.name]) self.numCols += 1 numVars += 1 return numVars
def createStockVariable(self): numVars = 0 # the s variables are for each scenario and for each t of current horizon for scenario in self.scenarios: for t in range(self.currentDay, self.finalDay): # create the variable v = Variable() v.type = Variable.v_stock v.name = "s_" + scenario.id + "_" + str(t) v.col = self.numCols v.instant = t v.scenario = scenario.id self.variables[v.name] = v self.lp.variables.add(names=[v.name]) self.numCols += 1 numVars += 1 return numVars
def parse_locals(self, context): from ParserContext import ParseException locals = [] token = context.get_next_token() if token != 'init': raise ParseException( 'Expected init, found ' + token) # fixme - only required for verifiable methods token = context.get_next_token() if token != '(': raise ParseException('Expected (, found' + token) token = context.get_next_token() lastToken = '' while not token.endswith(')'): v = Variable() if token.startswith('['): v.alias = token[1:-1] lastToken = token token = context.get_next_token() if token == 'class': v2 = ReferenceType() v2.alias = v.alias v2.type = Types.resolve_type(context.get_next_token()) v = v2 elif token.endswith('[]'): # array v.type = Types.Array v.arrayType = Types.resolve_type(token[:-2]) else: v.type = Types.BuiltInTypes[token] # fixme - non-builtin types locals.append(v) lastToken = token token = context.get_next_token() #if token.endswith(')'): # v.name = token[:-1] # token = ')' #else: v.name = token lastToken = token token = context.get_next_token() return locals
def createRepositionVariable(self): numVars = 0 repositionDays = [rDay for rDay in self.pData.repositionDays if rDay >= self.currentDay and rDay < self.finalDay] # get the maximum forecast for using it as upper bound for r variable maxDemand = max(s.maxForecast for s in self.scenarios) # The reposition variable remains only for each t in the current planning horizon for i in repositionDays: v = Variable() v.type = Variable.v_reposition v.name = "r_" + str(i) v.col = self.numCols v.instant = i self.variables[v.name] = v self.lp.variables.add(names=[v.name]) self.numCols += 1 numVars += 1 return numVars
def parse(self, parserContext): c = ClassDefinition() while True: token = parserContext.get_next_token() if token in ClassFlags: c.flags.append(token) elif token == 'extends': c.base = parserContext.get_next_token() elif token == '.method': m = MethodParser().parse(parserContext) m.namespace = c.namespace + '.' + c.name c.methods.append(m) parserContext.methods.append(m) # fixme - should i add to both? elif token == '.field': v = Variable() visibility = parserContext.get_next_token() type = parserContext.get_next_token( ) # fixme - type, visibility if type == 'class': type = parserContext.get_next_token() if Types.BuiltInTypes.has_key(type): v.type = Types.BuiltInTypes[type] else: v.type = Types.resolve_type(type) name = parserContext.get_next_token() v.name = name c.fieldDefinitions.append(v) elif token == '}': break elif token != '{': fullyQualifiedName = token.split('.') c.name = fullyQualifiedName[-1] c.namespace = '.'.join(fullyQualifiedName[:-1]) Types.register_custom_type(c) return c
def parse(self, parserContext): c = ClassDefinition() while True: token = parserContext.get_next_token() if token in ClassFlags: c.flags.append(token) elif token == 'extends': c.base = parserContext.get_next_token() elif token == '.method': m = MethodParser().parse(parserContext) m.namespace = c.namespace + '.' + c.name c.methods.append(m) parserContext.methods.append(m) # fixme - should i add to both? elif token == '.field': v = Variable() visibility = parserContext.get_next_token() type = parserContext.get_next_token() # fixme - type, visibility if type == 'class': type = parserContext.get_next_token() if Types.BuiltInTypes.has_key(type): v.type = Types.BuiltInTypes[type] else: v.type = Types.resolve_type(type) name = parserContext.get_next_token() v.name = name c.fieldDefinitions.append(v) elif token == '}': break elif token != '{': fullyQualifiedName = token.split('.') c.name = fullyQualifiedName[-1] c.namespace = '.'.join(fullyQualifiedName[:-1]) Types.register_custom_type(c) return c
def parse_locals(self, context): from ParserContext import ParseException locals = [] token = context.get_next_token() if token != 'init': raise ParseException('Expected init, found ' + token) # fixme - only required for verifiable methods token = context.get_next_token() if token != '(': raise ParseException('Expected (, found' + token) token = context.get_next_token() lastToken = '' while not token.endswith(')'): v = Variable() if token.startswith('['): v.alias = token[1:-1] lastToken = token token = context.get_next_token() if token == 'class': v2 = ReferenceType() v2.alias = v.alias v2.type = Types.resolve_type(context.get_next_token()) v = v2 elif token.endswith('[]'): # array v.type = Types.Array v.arrayType = Types.resolve_type(token[:-2]) else: v.type = Types.BuiltInTypes[token] # fixme - non-builtin types locals.append(v) lastToken = token token = context.get_next_token() #if token.endswith(')'): # v.name = token[:-1] # token = ')' #else: v.name = token lastToken= token token = context.get_next_token() return locals
def test_execute_single_field(self): from VM import VM vm = VM() c = ClassDefinition() c.namespace = 'ConsoleApplication1' c.name = 'foo' v = Variable() v.name = 'z' v.type = Types.Int32 r = ReferenceType() t = Types.register_custom_type(c) r.type = t r.add_field(v) vm.stack.push(r) x = ldfld('int32 ConsoleApplication1.foo::z') x.execute(vm) self.assertEqual(vm.stack.count(), 1) self.assertEqual(r.fields[0], vm.stack.pop())
def test_return_variable_dict(self): var = Variable() var.name = "test_name" var.ndims = 2 var.shape = (5, 10) var.dtype = "int" var.vtype = "data" var.units = "m" expected_dict = { "name": "test_name", "ndims": 2, "shape": (5, 10), "dtype": "int", "vtype": "data", "vclass": "default", "units": "m" } output_dict = var.return_variable_dict() self.assertDictEqual(output_dict, expected_dict)
def createRepositionVariable(self): numVars = 0 repositionDays = [ rDay for rDay in self.pData.repositionDays if rDay >= self.currentDay and rDay < self.finalDay ] # get the maximum forecast for using it as upper bound for r variable maxDemand = max(s.maxForecast for s in self.scenarios) # The reposition variable remains only for each t in the current planning horizon for i in repositionDays: v = Variable() v.type = Variable.v_reposition v.name = "r_" + str(i) v.col = self.numCols v.instant = i self.variables[v.name] = v self.lp.variables.add(names=[v.name]) self.numCols += 1 numVars += 1 return numVars
def confront(self,m): """Confront the GRACE data by computing a mean over river basins. Fine-scale, point comparisons aren't meaningful as the underlying resolution of the GRACE data is 300-400 [m]. See the following publication for more information. Swenson, Sean & National Center for Atmospheric Research Staff (Eds). Last modified 08 Oct 2013. "The Climate Data Guide: GRACE: Gravity Recovery and Climate Experiment: Surface mass, total water storage, and derived variables." Retrieved from https://climatedataguide.ucar.edu/climate-data/grace-gravity-recovery-and-climate-experiment-surface-mass-total-water-storage-and. """ obs,mod = self.stageData(m) # find the magnitude of the anomaly obs_anom = obs.rms() obs_anom_val = obs_anom.siteStats() mod_anom = mod.rms() mod_anom_val = mod_anom.siteStats() rmse = obs.rmse(mod).convert(obs.unit) rmse_val = rmse.siteStats() rmse_smap = Variable(name = "", unit = "1", data = np.exp(-rmse.data/obs_anom.data), ndata = obs.ndata, lat = obs.lat, lon = obs.lon) rmse_score = rmse_smap.siteStats() iav_score = Variable(name = "Interannual Variability Score global", unit = "1", data = np.exp(-np.abs(mod_anom_val.data-obs_anom_val.data)/obs_anom_val.data)) # remap for plotting obs_anom_map = self._extendSitesToMap(obs_anom ) mod_anom_map = self._extendSitesToMap(mod_anom ) rmse_map = self._extendSitesToMap(rmse ) rmse_smap = self._extendSitesToMap(rmse_smap) # renames obs_anom_val.name = "Anomaly Magnitude global" mod_anom_val.name = "Anomaly Magnitude global" obs_anom_map.name = "timeint_of_anomaly" mod_anom_map.name = "timeint_of_anomaly" rmse_map .name = "rmse_of_anomaly" rmse_smap .name = "rmsescore_of_anomaly" rmse_val .name = "RMSE global" rmse_score .name = "RMSE Score global" # dump results to a netCDF4 file results = Dataset(os.path.join(self.output_path,"%s_%s.nc" % (self.name,m.name)),mode="w") results.setncatts({"name" :m.name, "color":m.color}) mod .toNetCDF4(results,group="MeanState") mod_anom_val.toNetCDF4(results,group="MeanState") mod_anom_map.toNetCDF4(results,group="MeanState") rmse_map .toNetCDF4(results,group="MeanState") rmse_smap .toNetCDF4(results,group="MeanState") rmse_val .toNetCDF4(results,group="MeanState") rmse_score .toNetCDF4(results,group="MeanState") iav_score .toNetCDF4(results,group="MeanState") results.close() if self.master: results = Dataset(os.path.join(self.output_path,"%s_Benchmark.nc" % (self.name)),mode="w") results.setncatts({"name" :"Benchmark", "color":np.asarray([0.5,0.5,0.5])}) obs.toNetCDF4(results,group="MeanState") obs_anom_val.toNetCDF4(results,group="MeanState") obs_anom_map.toNetCDF4(results,group="MeanState") results.close()