def testRenameDeviceDuplicateName(self): testId1 = 'testId1' testId2 = 'testId2' dev1 = manage_createDevice(self.dmd, testId1, '/') manage_createDevice(self.dmd, testId2, '/Devices') self.assertRaises( DeviceExistsError, dev1.renameDevice, testId2 )
def testManage_createDevice(self): dev = manage_createDevice(self.dmd, 'mydevice', '/') self.assert_(isinstance(dev, Device)) self.assertEqual(dev.deviceClass(), self.dmd.Devices) self.assertEqual(dev.getDeviceClassName(), "/") self.assertEqual(dev.id, "mydevice") self.assertEqual(dev.titleOrId(), "mydevice")
def testPrettyLinkWithTitleOrId(self): dev = manage_createDevice(self.dmd, 'testId', '/') link = dev.getPrettyLink() self.assert_(link.endswith('testId</a>')) dev.title = 'testTitle' link = dev.getPrettyLink() self.assert_(link.endswith('testTitle</a>'))
def testPrettyLinkWithTitleOrId(self): dev = manage_createDevice(self.dmd, 'testId', '/') link = dev.getPrettyLink() self.assert_( link.endswith( 'testId</a>' ) ) dev.title = 'testTitle' link = dev.getPrettyLink() self.assert_( link.endswith( 'testTitle</a>' ) )
def testMultipleDevicesWithDuplicate(self): dev = manage_createDevice(self.dmd, "test-dev2", "/Server/Windows", manageIp="10.0.10.2") dev.setManageIp("10.0.10.1") proxies = self._configService.remote_getDeviceConfigs() self.assertEqual(len(proxies), 1)
def testIpAddrCreation(self): manageIp = '1.2.3.4' dev = manage_createDevice(self.dmd, 'mydevice', '/', manageIp=manageIp) ip = self.dev.getNetworkRoot().findIp(manageIp) self.assert_(ip is not None) #check relation Ip -> device self.assertEqual(dev, ip.manageDevice())
def testManage_createDeviceWithIpFromInterface(self): # create device with ip that is on Interface of another device testIp = '1.2.3.4' dev1 = manage_createDevice(self.dmd, 'myfirstdevice', '/', manageIp='1.2.3.5') # Need a network interface on that device from Products.ZenModel.IpInterface import IpInterface tmpIface = IpInterface('testNIC') dev1.os.interfaces._setObject('testNIC', tmpIface) iface = dev1.getDeviceComponents()[0] iface.addIpAddress(testIp) ip = dev1.getNetworkRoot().findIp(testIp) self.assert_(ip is not None) dev2 = manage_createDevice(self.dmd, 'myseconddevice', '/', manageIp=testIp) self.assertNotEqual(dev1.manageIp, dev2.manageIp) self.assert_(dev2 is not None)
def createTestDevice(dmd, deviceId, propertyMap={}, deviceClass='/Devices/Server'): dev = manage_createDevice(dmd, deviceId, deviceClass) for key, value in propertyMap.iteritems(): if hasattr(dev, key): setattr(dev, key, value) return dev
def afterSetUp(self): super(TestEventLogConfig, self).afterSetUp() dev = manage_createDevice(self.dmd, "test-dev1", "/Server/Windows", manageIp="10.0.10.1") dev.zWmiMonitorIgnore = False dev.zWinEventlog = True self._testDev = dev self._deviceNames = ["test-dev1"] self._configService = EventLogConfig(self.dmd, "localhost")
def testManage_createDeviceDupIp(self): dev = manage_createDevice(self.dmd, 'mydevice', '/', manageIp='1.1.1.1') self.assertRaises(DeviceExistsError, manage_createDevice, self.dmd, 'mydevice2', '/', manageIp='1.1.1.1')
def testMultipleDevices(self): dev = manage_createDevice(self.dmd, "test-dev2", "/Server/Windows", manageIp="10.0.10.2") dev.zWmiMonitorIgnore = False dev.zWinEventlog = True self._deviceNames.append("test-dev2") proxies = self._configService.remote_getDeviceConfigs( self._deviceNames) self.assertTrue(len(proxies), 2) proxies = self._configService.remote_getDeviceConfigs(None) self.assertTrue(len(proxies), 2)
def _doDbWork(): """ return device object (either new or existing), and flag indicating whether device was newly created, or just updated """ try: netroot = getNetworkRoot(self.dmd, kw.get('performanceMonitor', 'localhost')) netobj = netroot.getNet(ip) netmask = 24 if netobj is not None: netmask = netobj.netmask else: defaultNetmasks = getattr(netroot, 'zDefaultNetworkTree', []) if defaultNetmasks: netmask = defaultNetmasks[0] autoDiscover = getattr(netobj, 'zAutoDiscover', True) # If we're not supposed to discover this IP, return None if not force and not autoDiscover: return None, False kw['manageIp'] = ipunwrap(ip) dev = manage_createDevice(self.dmd, **kw) netroot.createIp(ip, netmask) return dev, True except DeviceExistsError as e: # Update device with latest info from zendisc # (if necessary) if not e.dev.getManageIp(): e.dev.setManageIp(kw['manageIp']) # only overwrite title if it has not been set if not e.dev.title or isip(e.dev.title): if not isip(kw.get('deviceName')): e.dev.setTitle(kw['deviceName']) # copy kw->updateAttributes, to keep kw intact in case # we need to retry transaction updateAttributes = {} for k,v in kw.items(): if k not in ('manageIp', 'deviceName', 'devicePath', 'discoverProto', 'performanceMonitor', 'productionState'): updateAttributes[k] = v # use updateDevice so we don't clobber existing device properties. e.dev.updateDevice(**updateAttributes) return e.dev, False except Exception as ex: log.exception("IP address %s (kw = %s) encountered error", ipunwrap(ip), kw) raise pb.CopyableFailure(ex)
def afterSetUp(self): super(TestWinServiceConfig, self).afterSetUp() dev = manage_createDevice(self.dmd, "test-dev1", "/Server/Windows", manageIp="10.0.10.1") dev.zWmiMonitorIgnore = False winService = manage_addWinService(dev.os.winservices, 'wsvc', 'test service') winService.zMonitor = True winService.monitor = True winService.startMode = 'Auto' winService.index_object() self._testDev = dev self._deviceNames = [ "test-dev1" ] self._configService = WinServiceConfig(self.dmd, "localhost")
def createFakeDevice( self, name ): """ Create a fake device with a datapoint """ from Products.ZenModel.Device import manage_createDevice self.dev = manage_createDevice(self.dmd, deviceName=name, devicePath='/Test') from Products.ZenModel.RRDTemplate import manage_addRRDTemplate manage_addRRDTemplate(self.dmd.Devices.Test.rrdTemplates, 'Device') t = self.dmd.Devices.Test.rrdTemplates.Device ds = t.manage_addRRDDataSource('ds', 'BasicDataSource.COMMAND') dp = ds.manage_addRRDDataPoint('dp') thresh = t.manage_addRRDThreshold('limit', 'MinMaxThreshold') thresh.maxval = "100" thresh.dsnames = ('ds_dp',)
def testMultipleUpdates(self): device = manage_createDevice(self.dmd, 'my_device', '/') device_uid = device.idx_uid() # On creationg, a index update of the whole object should have been created tx_state = self._get_transaction_state() self._check_tx_state(pending=device_uid) # temporary commit changes made so far self.data_manager.do_mid_transaction_commit() # We should be able to find the newly created device search_results = self.model_catalog.search(query=Eq(UID, device_uid), commit_dirty=False) self._validate_temp_indexed_results(search_results, expected_object_uids=[device_uid]) # Changing the managed ip should trigger another index update ip = "10.10.10.1" device.setManageIp(ip) self.assertTrue(device_uid in tx_state.pending_updates) self.assertTrue(device_uid in tx_state.temp_indexed_uids) # a serch by ip "10.10.10.1" should return our device search_results = self.model_catalog.search(query=Eq("text_ipAddress", ip), commit_dirty=True) self._validate_temp_indexed_results(search_results, expected_object_uids=[device_uid]) # set the managed ip to a different value old_ip = ip new_ip = "10.10.10.2" device.setManageIp(new_ip) # search by new ip should return out device search_results = self.model_catalog.search(query=Eq("text_ipAddress", new_ip), commit_dirty=True) self._validate_temp_indexed_results(search_results, expected_object_uids=[device_uid]) # search by old ip should NOT return anything search_results = self.model_catalog.search(query=Eq("text_ipAddress", old_ip), commit_dirty=True) self._validate_temp_indexed_results(search_results, expected_object_uids=[]) # set production state prod_state = 1100 device.setProdState(prod_state) search_results = self.model_catalog.search(query=Eq("productionState", prod_state), commit_dirty=True) self._validate_temp_indexed_results(search_results, expected_object_uids=[device_uid]) # Search by uid and check all the fields are correct fields = ["productionState", "text_ipAddress"] search_results = self.model_catalog.search(query=Eq(UID, device_uid), fields=fields, commit_dirty=False) self.assertEquals(search_results.total, 1) brain = search_results.results.next() self.assertEquals(brain.uid, device_uid) self.assertEquals(brain.text_ipAddress, new_ip) self.assertEquals(brain.productionState, prod_state)
def testMultipleUpdates(self): device = manage_createDevice(self.dmd, 'my_device', '/') device_uid = device.idx_uid() # On creationg, a index update of the whole object should have been created tx_state = self._get_transaction_state() self._check_tx_state(pending=device_uid) # temporary commit changes made so far self.data_manager.do_mid_transaction_commit() # We should be able to find the newly created device search_results = self.model_catalog.search(query=Eq(UID, device_uid), commit_dirty=False) self._validate_temp_indexed_results(search_results, expected_object_uids=[device_uid]) # Changing the managed ip should trigger another index update ip = "10.10.10.1" device.setManageIp(ip) self.assertTrue(device_uid in tx_state.pending_updates) self.assertTrue(device_uid in tx_state.temp_indexed_uids) # a serch by ip "10.10.10.1" should return our device search_results = self.model_catalog.search(query=Eq("text_ipAddress", ip), commit_dirty=True) self._validate_temp_indexed_results(search_results, expected_object_uids=[device_uid]) # set the managed ip to a different value old_ip = ip new_ip = "10.10.10.2" device.setManageIp(new_ip) # search by new ip should return out device search_results = self.model_catalog.search(query=Eq("text_ipAddress", new_ip), commit_dirty=True) self._validate_temp_indexed_results(search_results, expected_object_uids=[device_uid]) # search by old ip should NOT return anything search_results = self.model_catalog.search(query=Eq("text_ipAddress", old_ip), commit_dirty=True) self._validate_temp_indexed_results(search_results, expected_object_uids=[]) # set production state prod_state = 1000 device.setProdState(prod_state) search_results = self.model_catalog.search(query=Eq("productionState", prod_state), commit_dirty=True) self._validate_temp_indexed_results(search_results, expected_object_uids=[device_uid]) # Search by uid and check all the fields are correct fields = ["productionState", "text_ipAddress"] search_results = self.model_catalog.search(query=Eq(UID, device_uid), fields=fields, commit_dirty=False) self.assertEquals(search_results.total, 1) brain = search_results.results.next() self.assertEquals(brain.uid, device_uid) self.assertEquals(brain.text_ipAddress, new_ip) self.assertEquals(brain.productionState, prod_state)
def createFakeDevice(self, name): """ Create a fake device with a datapoint """ from Products.ZenModel.Device import manage_createDevice self.dev = manage_createDevice(self.dmd, deviceName=name, devicePath='/Test') from Products.ZenModel.RRDTemplate import manage_addRRDTemplate manage_addRRDTemplate(self.dmd.Devices.Test.rrdTemplates, 'Device') t = self.dmd.Devices.Test.rrdTemplates.Device ds = t.manage_addRRDDataSource('ds', 'BasicDataSource.COMMAND') dp = ds.manage_addRRDDataPoint('dp') thresh = t.manage_addRRDThreshold('limit', 'MinMaxThreshold') thresh.maxval = "100" thresh.dsnames = ('ds_dp', )
def _doDbWork(): """ return device object (either new or existing), and flag indicating whether device was newly created, or just updated """ try: netroot = getNetworkRoot(self.dmd, kw.get('performanceMonitor', 'localhost')) netobj = netroot.getNet(ip) netmask = 24 if netobj is not None: netmask = netobj.netmask else: defaultNetmasks = getattr(netroot, 'zDefaultNetworkTree', []) if defaultNetmasks: netmask = defaultNetmasks[0] autoDiscover = getattr(netobj, 'zAutoDiscover', True) # If we're not supposed to discover this IP, return None if not force and not autoDiscover: return None, False kw['manageIp'] = ipunwrap(ip) dev = manage_createDevice(self.dmd, **kw) netroot.createIp(ip, netmask) return dev, True except DeviceExistsError, e: # Update device with latest info from zendisc # (if necessary) if not e.dev.getManageIp(): e.dev.setManageIp(kw['manageIp']) # only overwrite title if it has not been set if not e.dev.title or isip(e.dev.title): if not isip(kw.get('deviceName')): e.dev.setTitle(kw['deviceName']) # copy kw->updateAttributes, to keep kw intact in case # we need to retry transaction updateAttributes = {} for k,v in kw.items(): if k not in ('manageIp', 'deviceName', 'devicePath', 'discoverProto', 'performanceMonitor', 'productionState'): updateAttributes[k] = v # use updateDevice so we don't clobber existing device properties. e.dev.updateDevice(**updateAttributes) return e.dev, False
def createDevice(dmd, deviceId): return manage_createDevice(dmd, deviceId)
def testPartialUpdates(self): # for this test we need to create a test device and commit the changes to device = manage_createDevice(self.dmd, 'my_device', '/') ip = "10.10.10.1" prod_state = 500 device_uid = device.idx_uid() device.setManageIp(ip) device.setProdState(prod_state) # get the uids we are about to commit so we can revert them at the end tx_state = self._get_transaction_state() tid = tx_state.tid updated_uids = set(tx_state.pending_updates.keys()) | tx_state.temp_indexed_uids try: # simulate the transaction was committed and do a few partial updates self._simulate_tx_commit() # make sure the device was correctly indexed fields = ["productionState", "text_ipAddress"] search_results = self.model_catalog.search(query=Eq(UID, device_uid), fields=fields, commit_dirty=False) self.assertEquals(search_results.total, 1) brain = search_results.results.next() self.assertEquals(brain.uid, device_uid) self.assertEquals(brain.text_ipAddress, ip) self.assertEquals(brain.productionState, prod_state) # update prod state triggers an atomic update new_prod_state = 1000 device.setProdState(new_prod_state) # tx_state.pending_updates.values()[0].spec.to_dict() # mi_results = self.model_index.search(SearchParams(Eq(UID, device_uid))) # repeat the search and make sure that the atomic update has all the fields it should search_results = self.model_catalog.search(query=Eq(UID, device_uid), fields=fields, commit_dirty=True) self.assertEquals(search_results.total, 1) brain = search_results.results.next() self.assertEquals(brain.uid, device_uid) self.assertEquals(brain.text_ipAddress, ip) self.assertEquals(brain.productionState, new_prod_state) # Make sure the index update is correct tx_state = self._get_transaction_state() index_update = tx_state.indexed_updates.get(device_uid) self.assertIsNotNone(index_update) expected_fields = MANDATORY_FIELDS | set( [ "productionState" ] ) self.assertEquals(expected_fields, index_update.idxs) # Set manage ip also sends a partial update for fields # 'decimal_ipAddress', 'text_ipAddress' new_ip = "10.10.10.2" device.setManageIp(new_ip) search_results = self.model_catalog.search(query=Eq(UID, device_uid), fields=fields, commit_dirty=True) self.assertEquals(search_results.total, 1) brain = search_results.results.next() self.assertEquals(brain.uid, device_uid) self.assertEquals(brain.text_ipAddress, new_ip) self.assertEquals(brain.productionState, new_prod_state) # Make sure the partial updates have been correctly combined tx_state = self._get_transaction_state() index_update = tx_state.indexed_updates.get(device_uid) self.assertIsNotNone(index_update) expected_fields = MANDATORY_FIELDS | set([ 'decimal_ipAddress', 'text_ipAddress', "productionState" ]) self.assertEquals(expected_fields, index_update.idxs) # simulate another transaction commit and check everything went well self._simulate_tx_commit() search_results = self.model_catalog.search(query=Eq(UID, device_uid), fields=fields, commit_dirty=False) self.assertEquals(search_results.total, 1) brain = search_results.results.next() self.assertEquals(brain.uid, device_uid) self.assertEquals(brain.text_ipAddress, new_ip) self.assertEquals(brain.productionState, new_prod_state) # make sure all temp documents have beed deleted search_results = self.model_catalog.search(query=Eq(TX_STATE_FIELD, tid), commit_dirty=False) self.assertEquals(search_results.total, 0) finally: query = In(UID, updated_uids) self.model_index.unindex_search(SearchParams(query))
def load_device(self, deviceName, devicePath='/Discovered', discoverProto='snmp', performanceMonitor='localhost', manageIp="", zProperties=None, deviceProperties=None): """ Load a single device into the database. """ # Make the config dictionaries the proper type try: if zProperties is None: zProperties = {} if deviceProperties is None: deviceProperties = {} # Remove spaces from the name deviceName = deviceName.replace(' ', '') manageIp = manageIp.replace(' ', '') if not manageIp: try: IPAddress(deviceName) manageIp = deviceName deviceName = ipwrap(deviceName) deviceProperties.setdefault('title', manageIp) except ValueError: pass # If we're not discovering and we have no IP, attempt the IP lookup # locally if discoverProto == 'none' and not manageIp: try: manageIp = getHostByName(deviceName) except socket.error: pass # move the zProperties required by manage_createDevice to # deviceProperties for key in 'zSnmpCommunity', 'zSnmpPort', 'zSnmpVer': if key in zProperties: deviceProperties[key] = zProperties.pop(key) # Make a device object in the database self.deviceobj = manage_createDevice( self.context, deviceName, devicePath, performanceMonitor=performanceMonitor, manageIp=manageIp, zProperties=zProperties, **deviceProperties) # Flag this device as temporary. # If discovery goes well, zendisc will flip this to False. self.deviceobj._temp_device = True # If we're not discovering, we're done if discoverProto == 'none': return self.deviceobj # Pass production state from device properties productionState = deviceProperties.get('productionState', 1000) # Otherwise, time for zendisc to do its thing self.run_zendisc(deviceName, devicePath, performanceMonitor, productionState) finally: # Check discovery's success and clean up accordingly self.cleanup() return self.deviceobj
def testPartialUpdates(self): # for this test we need to create a test device and commit the changes to device = manage_createDevice(self.dmd, 'my_device', '/') ip = "10.10.10.1" prod_state = 500 device_uid = device.idx_uid() device.setManageIp(ip) device.setProdState(prod_state) # get the uids we are about to commit so we can revert them at the end tx_state = self._get_transaction_state() tid = tx_state.tid updated_uids = set( tx_state.pending_updates.keys()) | tx_state.temp_indexed_uids try: # simulate the transaction was committed and do a few partial updates self._simulate_tx_commit() # make sure the device was correctly indexed fields = ["productionState", "text_ipAddress"] search_results = self.model_catalog.search(query=Eq( UID, device_uid), fields=fields, commit_dirty=False) self.assertEquals(search_results.total, 1) brain = search_results.results.next() self.assertEquals(brain.uid, device_uid) self.assertEquals(brain.text_ipAddress, ip) self.assertEquals(brain.productionState, prod_state) # update prod state triggers an atomic update new_prod_state = 1000 device.setProdState(new_prod_state) # tx_state.pending_updates.values()[0].spec.to_dict() # mi_results = self.model_index.search(SearchParams(Eq(UID, device_uid))) # repeat the search and make sure that the atomic update has all the fields it should search_results = self.model_catalog.search(query=Eq( UID, device_uid), fields=fields, commit_dirty=True) self.assertEquals(search_results.total, 1) brain = search_results.results.next() self.assertEquals(brain.uid, device_uid) self.assertEquals(brain.text_ipAddress, ip) self.assertEquals(brain.productionState, new_prod_state) # Make sure the index update is correct tx_state = self._get_transaction_state() index_update = tx_state.indexed_updates.get(device_uid) self.assertIsNotNone(index_update) expected_fields = MANDATORY_FIELDS | set(["productionState"]) self.assertEquals(expected_fields, index_update.idxs) # Set manage ip also sends a partial update for fields # 'decimal_ipAddress', 'text_ipAddress' new_ip = "10.10.10.2" device.setManageIp(new_ip) search_results = self.model_catalog.search(query=Eq( UID, device_uid), fields=fields, commit_dirty=True) self.assertEquals(search_results.total, 1) brain = search_results.results.next() self.assertEquals(brain.uid, device_uid) self.assertEquals(brain.text_ipAddress, new_ip) self.assertEquals(brain.productionState, new_prod_state) # Make sure the partial updates have been correctly combined tx_state = self._get_transaction_state() index_update = tx_state.indexed_updates.get(device_uid) self.assertIsNotNone(index_update) expected_fields = MANDATORY_FIELDS | set( ['decimal_ipAddress', 'text_ipAddress', "productionState"]) self.assertEquals(expected_fields, index_update.idxs) # simulate another transaction commit and check everything went well self._simulate_tx_commit() search_results = self.model_catalog.search(query=Eq( UID, device_uid), fields=fields, commit_dirty=False) self.assertEquals(search_results.total, 1) brain = search_results.results.next() self.assertEquals(brain.uid, device_uid) self.assertEquals(brain.text_ipAddress, new_ip) self.assertEquals(brain.productionState, new_prod_state) # make sure all temp documents have beed deleted search_results = self.model_catalog.search(query=Eq( TX_STATE_FIELD, tid), commit_dirty=False) self.assertEquals(search_results.total, 0) finally: query = In(UID, updated_uids) self.model_index.unindex_search(SearchParams(query))
def createTestDevice( dmd, deviceId, propertyMap={}, deviceClass='/Devices/Server' ): dev = manage_createDevice( dmd, deviceId, deviceClass ) for key, value in propertyMap.iteritems(): if hasattr( dev, key ): setattr( dev, key, value ) return dev
def createAndUpdateModelObjects(self, newObjectPropertiesList, organizer, warnOnMissingDevices=True, allowDuplicates=False): """ Modifies model information in the database given the information received from scraping the google account. New objects are created, existing objects are updated. @param newObjectPropertiesList: a group of object properties, all representing a common type of object (Application, Task Queue, etc) @type newObjectPropertiesList: C{list} of L{AppEngineManagedObjectProperties} @param organizer: the organizer that holds items of this type. For example, if we have modeled the infrastructure represented by the organizer /Devices/AppEngine/test and these are the Application property sets, then the organizer at /Devices/AppEngine/test/Applications would be passed @type organizer: L{DeviceOrganizer} @param warnOnMissingDevices: if I{True} and the item exists in the organizer but is not present in the supplied property sets, then an event is created and the device in the database is marked as I{Decommissioned} @type warnOnMissingDevices: C{boolean} """ currentModelItemMap = mapIdsToModelItems(organizer.devices()) newObjectPropertiesMap = mapIdAttributesToObjectProperties( newObjectPropertiesList) organizerPath = organizer.getOrganizerName() modelItemsToAdd = Set(newObjectPropertiesMap.keys()).difference( Set(currentModelItemMap.keys())) modelItemsToDelete = Set(currentModelItemMap.keys()).difference( Set(newObjectPropertiesMap.keys())) modelItems = {} for objectRef, objectProperties in newObjectPropertiesMap.iteritems(): objectId = prepId(str(objectProperties.getId()).encode('ascii')) if objectRef in modelItemsToAdd: existingDevices = self.dmd.Devices._findDevice(objectId) if not existingDevices: perfMonitor = organizer.getZ('zAppEngineInstanceMonitor') modelItem = manage_createDevice( self.dmd, objectId, organizerPath, performanceMonitor=perfMonitor) action = 'adding' else: modelItem = currentModelItemMap[objectId] action = 'updating' #Handle id and name attributes idValue = objectProperties.attributes['id'] if not objectProperties.attributes.has_key('name'): objectProperties.attributes['name'] = idValue objectProperties.attributes['id'] = prepId(idValue) applyPropertiesToObject(modelItem, objectProperties) for componentList in objectProperties.components.values(): self.createComponents(componentList, modelItem) self.log.debug(('addInfrastructure():%s model object %s ' + 'with properties %s ') % (action, modelItem, objectProperties)) modelItems[objectId] = modelItem transaction.commit() if modelItemsToDelete and warnOnMissingDevices: events = [] for deviceToDelete in [ currentModelItemMap[id] for id in modelItemsToDelete ]: deviceId = deviceToDelete.id events.append({ 'severity' : Event.Warning, 'eventClass' : "/Status/AppEngine", 'eventKey' : "DeviceNotFound", 'summary' : "%s/%s not found on target AppEngine instance"\ % (organizer.getOrganizerName(),deviceId ), 'device' : deviceId }) # decommission the device if deviceToDelete.productionState != -1: self.log.info( 'Last AppEngine modeling did not find %s; ' + 'setting production state to Decommissioned' % deviceId) deviceToDelete.setProdState(-1) self.zem.sendEvents(events) return modelItems