def getFreeSlots(self, planetID): planet = client.get(planetID, noUpdate=1) system = client.get(self.systemID, noUpdate=1) player = client.getPlayer() owner = getattr(planet, "owner", OID_NONE) freeSlots = 0 if owner == player.oid and hasattr(planet, "slots") and hasattr(planet, "plSlots"): freeSlots = planet.plSlots - len(planet.slots) if hasattr(system, "planets"): for sPlanetID in system.planets: sPlanet = client.get(sPlanetID, noUpdate=1) if hasattr(sPlanet, "prodQueue"): for task in sPlanet.prodQueue: # Target Slot does not changes number of free slots if ( not task.isShip and hasattr(task, "demolishStruct") and task.demolishStruct == 0 and task.targetID == planetID ): tech = client.getFullTechInfo(task.techID) # constructing Habitable Surface Expansion increases planet slots if tech.isProject and tech.id == 3802: freeSlots += task.quantity elif tech.isStructure: freeSlots -= task.quantity return freeSlots
def getFullMessageText(message): """Gets full text of automaticaly generated message If message has no data to generate, it returns empty string. """ text = "" if message.has_key("data"): sourceID, msgID, locationID, turn, data = message["data"] sev = getMsgSeverity(msgID) currTurn = client.getTurn() player = client.getPlayer() # source if sourceID != OID_NONE and sourceID != player.oid: obj = client.get(sourceID, noUpdate=1) if obj: source = getattr(obj, "name", res.getUnknownName()) else: source = _("N/A") else: source = _("-") text = "%s%s\n" % (text, _("Source: %s") % source) # location if locationID != OID_NONE: obj = client.get(locationID, noUpdate=1) location = getattr(obj, "name", res.getUnknownName()) else: location = _("-") text = "%s%s\n" % (text, _("Location: %s") % location) text = "%s%s\n" % (text, _("Severity: %s") % _(gdata.msgSeverity[sev])) text = "%s%s\n" % (text, _("Time: %s [%s]") % (res.formatTime(turn), res.formatTime(turn - currTurn))) text = "%s%s\n" % (text, "") text = "%s%s\n" % (text, getMsgText(msgID, data)) return text
def init(): nsx_mgr_ip = os.getenv('nsx_manager_ips_int').split(',')[0].strip() nsx_mgr_user = os.getenv('nsx_manager_username_int', 'admin') nsx_mgr_pwd = os.getenv('nsx_manager_password_int') nsx_mgr_context = { 'admin_user': nsx_mgr_user, 'url': 'https://' + nsx_mgr_ip, 'admin_passwd': nsx_mgr_pwd } # TODO: the value of transport zone name is current static, and hidden from user. # see vars.yml global_id_map['DEFAULT_TRANSPORT_ZONE_NAME'] = 'overlay-tz' client.set_context(nsx_mgr_context) try: print('Using manager IP address at %s' % nsx_mgr_ip) client.get(TRANSPORT_ZONES_ENDPOINT) except requests.exceptions.SSLError: vip = os.getenv('nsx_manager_virtual_ip_int', '').strip() if vip == '': print( 'Manager IP is not accessible and VIP is not set, unable to connect ' 'to nsx manager!') raise print('Manager IP is not accessible, using cluster vip at %s!' % vip) cluster_context = { 'admin_user': nsx_mgr_user, 'url': 'https://' + vip, 'admin_passwd': nsx_mgr_pwd } client.set_context(cluster_context)
def GET(self, group_id): render = web.template.render('asset', base='after.common', globals=globals()) r, t = client.get('/groups/%i/' % int(group_id)) r, j = client.get('/users/') if ok(r): return render.groups_recommend(users=j,group=t) return web.notfound()
def getFreeSlots(self, planetID): planet = client.get(planetID, noUpdate=1) system = client.get(self.systemID, noUpdate=1) player = client.getPlayer() owner = getattr(planet, 'owner', Const.OID_NONE) freeSlots = 0 if owner == player.oid and hasattr(planet, 'slots') and hasattr( planet, 'plSlots'): freeSlots = planet.plSlots - len(planet.slots) if hasattr(system, 'planets'): for sPlanetID in system.planets: sPlanet = client.get(sPlanetID, noUpdate=1) if hasattr(sPlanet, 'prodQueue'): for task in sPlanet.prodQueue: # Target Slot does not changes number of free slots if not task.isShip and hasattr( task, "demolishStruct" ) and task.demolishStruct == 0 and task.targetID == planetID: tech = client.getFullTechInfo(task.techID) # constructing Habitable Surface Expansion increases planet slots if tech.isProject and tech.id == 3802: freeSlots += task.quantity elif tech.isStructure: freeSlots -= task.quantity return freeSlots
def precompute(self): minX = minY = 1000000 maxX = maxY = 0 for objID in client.db.keys(): if objID < OID_FREESTART: continue obj = client.get(objID, noUpdate = 1) if not (hasattr(obj, "type") and hasattr(obj, "x") and hasattr(obj, "y")): continue if obj.type in (T_SYSTEM,T_WORMHOLE): ownerID = OID_NONE if hasattr(obj, 'planets'): for planetID in obj.planets: planet = client.get(planetID, noUpdate = 1) owner = getattr(planet, 'owner', OID_NONE) if int(owner) != 0: ownerID = owner break color = res.getPlayerColor(ownerID) minX = min(minX, obj.x) minY = min(minY, obj.y) maxX = max(maxX, obj.x) maxY = max(maxY, obj.y) self._map.append((obj.oid, obj.x, obj.y, color)) self._minX = minX self._minY = minY zoomFactor = 0.05 #percent self._shiftX = int(self._width * zoomFactor) self._shiftY = int(self._height * zoomFactor) self._scaleX = float(self._width - 2 * self._shiftX) / float(maxX - minX) self._scaleY = float(self._height - 2 * self._shiftX) / float(maxY - minY) self._repaintMap = True
def identify_edges_and_hosts(): retries = 0 failed_uninstalls = {} bailout = False install_failed = False fabric_nodes_api_endpoint = FABRIC_NODES_ENDPOINT fabric_nodes_resp = client.get(fabric_nodes_api_endpoint) # Check periodically for install status print 'Checking status of the NSX-T Fabric Nodes Addition!\n' while (retries < MAX_RETRY_CHECK and not bailout ): still_in_progress = False print '{} Checking Status <Try: {}>\n'.format(datetime.now(), retries + 1) for fabric_node in fabric_nodes_resp.json()['results']: #print 'Fabric Node: {}'.format(fabric_node) fabric_node_state_url = '%s/%s/status' % (fabric_nodes_api_endpoint, fabric_node['id']) fabric_node_state_resp = client.get(fabric_node_state_url) message = fabric_node_state_resp.json() print ' Node: {}, IP: {}, Type: {}, Status: {}'.format( fabric_node['display_name'], fabric_node['ip_addresses'][0], fabric_node['resource_type'], message['host_node_deployment_status'] ) # Dont bail out when things are still in progress if message['host_node_deployment_status'] in ['INSTALL_IN_PROGRESS']: still_in_progress = True if message['host_node_deployment_status'] in [ 'INSTALL_FAILED', 'INSTALL_SUCCESSFUL']: bailout = True if message['host_node_deployment_status'] == 'INSTALL_FAILED': install_failed = True #print '\nERROR!! Install of NSX-T Modules on the ESXi Hosts failed!!' #print 'Check the NSX Manager for reasons for the failure, Exiting!!\n' # If anything still in progress, let it continue, retry the check status # Ignore other failed or success states till all are completed if still_in_progress: bailout = False print ' Sleeping for {} seconds before checking status of installs!\n'.format(RETRY_INTERVAL) time.sleep(RETRY_INTERVAL) retries += 1 if retries == MAX_RETRY_CHECK: print '\nWARNING!! Max retries reached for checking if hosts have been added to NSX-T.\n' install_failed = True if install_failed == True: print '\nERROR!! Install of NSX-T Modules on the ESXi Hosts failed!!' print 'Something wrong with configuring the Hosts as part of the NSX-T Fabric, check NSX-T Mgr Fabric -> Nodes status' print 'Check the NSX Manager for reasons for the failure, Exiting!!' else: print '\nAll the ESXi host addition as transport nodes successfull!!' print '' return install_failed
def gotoObject(self, objIDs, bObjIDs): if len(objIDs) + len(bObjIDs) == 1: if len(objIDs) == 1: if self.selectobject: return objIDs[0] self.processAction(self.action, objIDs[0]) self.pressedObjIDs = [] else: if self.selectobject: return Const.OID_NONE self.showBuoyDlg.display(bObjIDs[0]) self.pressedBuoyObjIDs = [] else: # multiple objects -> post pop-up menu items = [] for objID in objIDs: obj = client.get(objID) if obj.type == Const.T_SYSTEM: name = getattr(obj, "name", None) name = _("System: %s [ID: %d]") % ( name or res.getUnknownName(), obj.oid) elif obj.type == Const.T_WORMHOLE: name = getattr(obj, "name", None) name = _("Worm hole: %s [ID: %d]") % ( name or res.getUnknownName(), obj.oid) elif obj.type == Const.T_PLANET: name = getattr(obj, "name", None) name = _("Planet: %s [ID: %d]") % ( name or res.getUnknownName(), obj.oid) elif obj.type == Const.T_FLEET: if hasattr(obj, 'customname') and obj.customname: name = obj.customname else: name = getattr(obj, "name", None) name = _("Fleet: %s [ID: %d]") % ( name or res.getUnknownName(), obj.oid) else: name = _("Unknown object [ID: %d]") % obj.oid item = ui.Item(name, action="onObjectSelected", data=objID) items.append(item) for objID in bObjIDs: obj = client.get(objID) if obj.type == Const.T_SYSTEM: name = getattr(obj, "name", None) name = _("Buoy on system: %s [ID: %d]") % ( name or res.getUnknownName(), obj.oid) elif obj.type == Const.T_WORMHOLE: name = getattr(obj, "name", None) name = _("Buoy on worm hole: %s [ID: %d]") % ( name or res.getUnknownName(), obj.oid) else: name = _("Buoy on unknown object [ID: %d]") % obj.oid item = ui.Item(name, action="onBuoySelected", data=objID) items.append(item) self.popup.items = items self.popup.show() if self.selectobject: return Const.OID_NONE
def GET(self, topic_id): if not is_admin(): return web.notfound() render = web.template.render('asset', base='after.common', globals=globals()) r, t = client.get('/topics/%i/' % int(topic_id)) r, j = client.get('/users/') if ok(r): return render.topics_recommend(users=j,topic=t) return web.notfound()
def GET(self, id): if not user(): return web.notfound() render = web.template.render('asset', base='after.common', globals=globals()) r, j = client.get('/users/%i/' % int(id)) if ok(r): r, f = client.get('/users/%i/friends/' % int(id)) if ok(r): return render.user(user=j, friends=f) return web.notfound()
def gotoObject(self,objIDs,bObjIDs): if len(objIDs) + len(bObjIDs) == 1: if len(objIDs) == 1: if self.selectobject: return objIDs[0] self.processAction(self.action, objIDs[0]) self.pressedObjIDs = [] else: if self.selectobject: return Const.OID_NONE self.showBuoyDlg.display(bObjIDs[0]) self.pressedBuoyObjIDs = [] else: # multiple objects -> post pop-up menu items = [] for objID in objIDs: obj = client.get(objID) if obj.type == Const.T_SYSTEM: name = getattr(obj, "name", None) name = _("System: %s [ID: %d]") % (name or res.getUnknownName(), obj.oid) elif obj.type == Const.T_WORMHOLE: name = getattr(obj, "name", None) name = _("Worm hole: %s [ID: %d]") % (name or res.getUnknownName(), obj.oid) elif obj.type == Const.T_PLANET: name = getattr(obj, "name", None) name = _("Planet: %s [ID: %d]") % (name or res.getUnknownName(), obj.oid) elif obj.type == Const.T_FLEET: if hasattr(obj,'customname') and obj.customname: name = obj.customname else: name = getattr(obj, "name", None) name = _("Fleet: %s [ID: %d]") % (name or res.getUnknownName(), obj.oid) elif obj.type == Const.T_ASTEROID: name = getattr(obj, "name", None) name = _("Asteroid: %s [ID: %d]") % (name or res.getUnknownName(), obj.oid) else: name = _("Unknown object [ID: %d]") % obj.oid item = ui.Item(name, action = "onObjectSelected", data = objID) items.append(item) for objID in bObjIDs: obj = client.get(objID) if obj.type == Const.T_SYSTEM: name = getattr(obj, "name", None) name = _("Buoy on system: %s [ID: %d]") % (name or res.getUnknownName(), obj.oid) elif obj.type == Const.T_WORMHOLE: name = getattr(obj, "name", None) name = _("Buoy on worm hole: %s [ID: %d]") % (name or res.getUnknownName(), obj.oid) else: name = _("Buoy on unknown object [ID: %d]") % obj.oid item = ui.Item(name, action = "onBuoySelected", data = objID) items.append(item) self.popup.items = items self.popup.show() if self.selectobject: return Const.OID_NONE
def GET(self, id): if not user(): return web.notfound() render = web.template.render('asset', base='after.common', globals=globals()) r, j = client.get('/topics/%i/' % int(id)) if r != codes.ok: return web.notfound() r, c = client.get('/topics/%i/comments/' % int(id)) if r != codes.ok: return web.notfound() return render.topics_detail(topic=j,comments=c)
def disable_auto_install_for_compute_fabric(): compute_fabric_collection_api_endpoint = COMPUTE_COLLECTION_FABRIC_TEMPLATES_ENDPOINT transport_node_collection_api_endpoint = COMPUTE_COLLECTION_TRANSPORT_NODES_ENDPOINT outer_resp = client.get(compute_fabric_collection_api_endpoint) #print 'Got Compute collection respo: {}'.format(outer_resp) compute_fabric_templates = outer_resp.json()['results'] for compute_fabric in compute_fabric_templates: #print 'Iterating over Compute fabric respo: {}'.format(compute_fabric) compute_fabric['auto_install_nsx'] = False compute_fabric_id = compute_fabric['id'] compute_collection_id = compute_fabric['compute_collection_id'] # First remove the related transport node template from the compute collection relationship transport_node_association_from_compute_fabric_api_endpoint = '%s?compute_collection_id=%s' % ( transport_node_collection_api_endpoint, compute_collection_id) get_resp = client.get( transport_node_association_from_compute_fabric_api_endpoint, check=False) if get_resp.status_code == 200: try: for transport_node in get_resp.json()['results']: transport_node_id = transport_node['id'] transport_node_removal_api_endpoint = '%s/%s' % ( transport_node_collection_api_endpoint, transport_node_id) delete_resp = client.delete( transport_node_removal_api_endpoint, check=False) print 'Removed auto-linking of Host as Transport Node in Fabric for Compute Manager: {}'.format( compute_fabric['compute_collection_id']) except Exception as e: print 'No transport nodes associated' #ignore # Now change the compute fabric template compute_fabric_update_api_endpoint = '%s/%s' % ( compute_fabric_collection_api_endpoint, compute_fabric_id) resp = client.put(compute_fabric_update_api_endpoint, compute_fabric, check=False) if resp.status_code < 400: print 'Disabled auto install of NSX in Compute Fabric: {}'.format( compute_fabric['compute_collection_id']) print '' else: print 'Problem in disabling auto install in Compute Fabric: {}'.format( compute_fabric['compute_collection_id']) print 'Associated Error: {}'.format(resp.json()) exit(1)
def objIDList2Names(objIDs): names = [] for objID in objIDs: obj = client.get(objID, noUpdate = 1, publicOnly = 1) if hasattr(obj, 'owner') and obj.owner != obj.oid: try: owner = _(' (%s)') % client.get(obj.owner, noUpdate = 1, publicOnly = 1).name except AttributeError: owner = '' else: owner = '' text = _('%s%s') % (getattr(obj, 'name', res.getUnknownName()), owner) names.append(text) return string.join(names, ', ')
def objIDList2Names(objIDs): names = [] for objID in objIDs: obj = client.get(objID, noUpdate=1) if hasattr(obj, "owner") and obj.owner != obj.oid: try: owner = _(" (%s)") % client.get(obj.owner, noUpdate=1).name except AttributeError: owner = "" else: owner = "" text = _("%s%s") % (getattr(obj, "name", res.getUnknownName()), owner) names.append(text) return string.join(names, ", ")
def objIDList2Names(objIDs): names = [] for objID in objIDs: obj = client.get(objID, noUpdate=1) if hasattr(obj, 'owner') and obj.owner != obj.oid: try: owner = _(' (%s)') % client.get(obj.owner, noUpdate=1).name except AttributeError: owner = '' else: owner = '' text = _('%s%s') % (getattr(obj, 'name', res.getUnknownName()), owner) names.append(text) return string.join(names, ', ')
def fetch_profiles(client, screen_names, storage): """ Fetch twitter profile information for screen_names and add them to storage Can request 100 profiles per request and 180 profiles per 15mins, hence the swapping of request sizes for 100, 80 """ lookup_uri = client.twitter_uri('users', 'lookup') rate_limit = 180 while screen_names: if rate_limit > 100: size_limit, rate_limit = 100, rate_limit - 100 else: size_limit = rate_limit clump = screen_names[:size_limit] response = client.get(lookup_uri, params={'screen_name': ",".join(clump)}) if ok(response): del screen_names[:size_limit] for profile in response.json: storage.store_profile(profile) logger.debug("fetched {} profiles, {} left".format(len(clump), len(screen_names))) elif not_found(response): # none of the screen names were valid del screen_names[:size_limit] elif rate_limited(response): # rate limiting, need to sleep client.wait_for(lookup_uri) rate_limit = 180 else: raise UnexpectedError(response.status_code, response.text) client.enhance_my_calm()
def main(): try: infile = sys.argv[1] outfile = sys.argv[2] except IndexError: raise Exception("An input and outpfile file is required.") found = [] to_check = [] with open(infile) as inf: for row in csv.DictReader(inf): d = {} for k, v in row.items(): d[k.lower()] = v.strip() to_check.append(d) lookup_groups = client.grouper(to_check, client.BATCH_SIZE) for idx, batch in enumerate(lookup_groups): xml = prep_request(batch) print>> sys.stderr, "Processing batch", idx # Post the batch rsp = client.get(xml) found.append(rsp) # Write the results to a csv file. with open(outfile, 'wb') as of: writer = csv.writer(of) writer.writerow(('id', 'ut', 'doi', 'pmid', 'times cited', 'source')) for grp in found: for k, item in grp.items(): ut = item.get('ut') if ut is not None: ut = "WOS:" + ut writer.writerow([k, ut, item.get('doi', ""), item.get('pmid', ""), item.get('timesCited', '0'), item.get('sourceURL', 'N/A')])
def list_logical_switches(context, reportAll=True): existinglSwitchesResponse = client.get(NSX_URLS['lswitch']['all']+ '?&startindex=0&pagesize=100')#'/api/2.0/vdn/virtualwires') existinglSwitchesResponseDoc = xmltodict.parse(existinglSwitchesResponse.text) if DEBUG: print('LogicalSwitches response :{}\n'.format(existinglSwitchesResponse.text)) virtualWires = existinglSwitchesResponseDoc['virtualWires']['dataPage']['virtualWire'] lswitchEntries = virtualWires if isinstance(virtualWires, dict): lswitchEntries = [ virtualWires ] vcenterMobMap = refresh_moid_map(context) print_moid_map(vcenterMobMap) for lswitch in lswitchEntries: lswitch['id'] = lswitch['objectId'] lswitch['moName'] = mobclient.lookup_logicalswitch_managed_obj_name(lswitch['name']) if not lswitch.get('moName'): lswitch['moName'] = '' managed_lswitch_names = [ lswitch['name'] for lswitch in context['logical_switches']] if reportAll: print_logical_switches_available(lswitchEntries) else: managedLSwitches = [ ] for lswitch in lswitchEntries: if lswitch['name'] in managed_lswitch_names: managedLSwitches.append(lswitch) if len(managedLSwitches) > 0: print_logical_switches_available(managedLSwitches)
def check_transport_zone(context): transport_zone_name = context['nsxmanager'].get('transport_zone') if not transport_zone_name: transport_zone_name = context['name'] + '-tz' print 'Checking for Transport Zone, name: {}'.format(transport_zone_name) vdnScopesResponse = client.get(NSX_URLS['scope']['all']) vdnScopesDoc = xmltodict.parse(vdnScopesResponse.text) if DEBUG: print('VDN Scopes output: {}'.format(vdnScopesDoc)) # Handle multiple Transport zones vdnScopes = vdnScopesDoc['vdnScopes'].get('vdnScope') if vdnScopes: # If just single entry, just wrap it in an array if isinstance(vdnScopes, dict): vdnScopes = [ vdnScopes ] for entry in vdnScopes: if DEBUG: print('Transport Zone name in entry: {}'.format(entry['name'])) if entry['name'] == transport_zone_name: context['nsxmanager']['transport_zone'] = entry['name'] context['nsxmanager']['transport_zone_id'] = entry['objectId'] print('Found matching TZ: {}, VDN Scope id :{}\n'.format(entry['name'], entry['objectId'])) return True return False
def map_logical_switches_id(logical_switches): existinglSwitchesResponse = client.get(NSX_URLS['lswitch']['all'] + '?&startindex=0&pagesize=100') existinglSwitchesResponseDoc = xmltodict.parse(existinglSwitchesResponse.text) if DEBUG: print('LogicalSwitches response :{}\n'.format(existinglSwitchesResponse.text)) num_lswitches = len(logical_switches) matched_lswitches = 0 virtualWires = existinglSwitchesResponseDoc['virtualWires']['dataPage']['virtualWire'] lswitchEntries = virtualWires if isinstance(virtualWires, dict): lswitchEntries = [ virtualWires ] for existingLSwitch in lswitchEntries: if (num_lswitches == matched_lswitches): break for interested_lswitch in logical_switches: if interested_lswitch['name'] == existingLSwitch['name']: interested_lswitch['id'] = existingLSwitch['objectId'] ++matched_lswitches break if len(logical_switches) > 0: print_logical_switches_available(logical_switches) if DEBUG: for interested_lswitch in logical_switches: if (interested_lswitch.get('id') is None): print('Logical Switch instance with name: {} does not exist, possibly deleted already'\ .format(interested_lswitch['name']))
def focusOnKeyObject(self,evtkey): if evtkey in gdata.objectFocus: objID = gdata.objectFocus[evtkey] obj = client.get(objID, noUpdate = 1) if hasattr(obj, "x"): gdata.mainGameDlg.win.vStarMap.highlightPos = (obj.x, obj.y) gdata.mainGameDlg.win.vStarMap.setPos(obj.x, obj.y)
def print_t0_route_nat_rules(): for key in global_id_map: if key.startswith('ROUTER:TIER0:'): t0_router_id = global_id_map[key] api_endpoint = '%s/%s/%s' % (ROUTERS_ENDPOINT, t0_router_id, 'nat/rules') resp = client.get(api_endpoint).json() print('NAT Rules for T0 Router: {}\n{}'.format(t0_router_id, resp))
def fetch_cursored_collection(client, screen_name, resource_uri, storage_func): """ Fetch each page of friends/followers collections for a screen name adding the resulting list by calling storage_func(screen_name, result) """ cursor = -1 result = [] while cursor != 0: response = client.get(resource_uri, params={'screen_name': screen_name, 'cursor': cursor}) client.enhance_my_calm() if ok(response): logger.debug('fetched {} ids from {}'.format( len(response.json['ids']), resource_uri)) result.extend(response.json['ids']) cursor = response.json['next_cursor'] if cursor == 0: break logger.debug('next cursor {}'.format(cursor)) elif not_found(response): return elif rate_limited(response): client.wait_for(resource_uri) else: raise UnexpectedError(response.status_code, response.text) storage_func(screen_name, result)
def load_loadbalancer_persistence_profiles(): api_endpoint = LBR_PERSISTENCE_PROFILE_ENDPOINT resp = client.get(api_endpoint).json() for persistence_profile in resp['results']: persistence_profile_name = persistence_profile['display_name'] persistence_profile_id = persistence_profile['id'] global_id_map['PERSISTENCE_PROFILE:'+ persistence_profile_name] = persistence_profile_id
def __getrobots(host, port): """Get a RobotFileParser object for a particular server. First it checks for an up-to-date and already parsed version in __CACHE. If that doesn't exist it tries to get a copy from the database, and if *that* doesn't exist or is out of date it gets a copy from the server and parses it. TODO unit tests """ key=host+":"+str(port) cache=None if _CACHE.has_key(key) and not __expired(_CACHE[key]['modified']): # Try to get it from the in-memory cache cache=_CACHE[key] if cache == None: # Nope, not in memory (or in-memory cache expired) cache={} data=None modified=None # try to get it from the database robots=archive.getlatest(host, port, "robots.txt") if robots is not None and not __expired(robots['timestamp']): data=archive.gethash(robots['hash']) modified=robots['timestamp'] else: # We still don't have it! # Retrieve it from the host data=client.get(host, port, '0', "robots.txt") modified=time.time() cache['parsed']=robotparser.RobotFileParser() cache['parsed'].parse(data) cache['modified']=modified _CACHE[key]=cache return cache['parsed']
def focusOnKeyObject(self, evtkey): if evtkey in gdata.objectFocus: objID = gdata.objectFocus[evtkey] obj = client.get(objID, noUpdate=1) if hasattr(obj, "x"): gdata.mainGameDlg.win.vStarMap.highlightPos = (obj.x, obj.y) gdata.mainGameDlg.win.vStarMap.setPos(obj.x, obj.y)
def load_ip_pools(): resp = client.get(EXTERNAL_IP_POOL_ENDPOINT) for result in resp.json()['results']: ip_pool_name = result['display_name'] ip_pool_key = '%s:%s' % (IP_POOL, ip_pool_name) global_id_map[ip_pool_key] = result['id'] cache[ip_pool_key] = result
def main(): found = [] journals = [] with open(sys.argv[1]) as infile: for num, row in enumerate(csv.DictReader(infile)): print >> sys.stderr, "Processing", row['ISSN'] jid = row.get('ID', num) journals.append((jid, row['ISSN'])) lookup_groups = client.grouper(journals, client.BATCH_SIZE) for idx, batch in enumerate(lookup_groups): xml = prep_request(batch) print >> sys.stderr, "Processing batch", idx # Post the batch rsp = client.get(xml) found.append(rsp) with open(sys.argv[2], 'wb') as outfile: writer = csv.writer(outfile) writer.writerow(('number', 'ISSN', 'JCR')) for grp in found: for item in grp: writer.writerow([ item, grp[item].get('issn', 'na'), grp[item].get('impactGraphURL', 'na') ])
def load_ip_blocks(): resp = client.get(CONTAINER_IP_BLOCKS_ENDPOINT) for result in resp.json()['results']: ip_block_name = result['display_name'] ip_block_key = '%s:%s' % (IP_BLOCK, ip_block_name) global_id_map[ip_block_key] = result['id'] cache[ip_block_key] = result
def load_loadbalancer_monitors(): api_endpoint = LBR_MONITORS_ENDPOINT resp = client.get(api_endpoint).json() for monitor in resp['results']: monitor_name = monitor['display_name'] monitor_id = monitor['id'] global_id_map['MONITOR:' + monitor_name] = monitor_id
def load_loadbalancer_app_profiles(): api_endpoint = LBR_APPLICATION_PROFILE_ENDPOINT resp = client.get(api_endpoint).json() for app_profile in resp['results']: app_profile_name = app_profile['display_name'] app_profile_id = app_profile['id'] global_id_map['APP_PROFILE:' + app_profile_name] = app_profile_id
def check_cluster_name_against_router(t0_router_name, given_foundation_name): global failed api_endpoint = ROUTERS_ENDPOINT resp = client.get(api_endpoint, check=False) for router in resp.json()['results']: if t0_router_name == router['display_name']: tags = router.get('tags') if not tags: print 'Warning!! No tags associated with T0 Router: {} !!\n'.format( t0_router_name) return foundation_found = False for tag_entry in tags: if tag_entry.get('scope') == 'ncp/cluster': if given_foundation_name == tag_entry.get('tag'): print 'Specified foundation name: {} tagged correctly against T0 Router: {} ' \ ' with \'ncp/cluster\' scope!!\n'.format(given_foundation_name, t0_router_name) return print 'Warning!! Specified foundation name: {} not tagged for T0 Router: {} ' \ ' with \'ncp/cluster\' scope!!\n'.format(given_foundation_name, t0_router_name) return
def GET(self): render = web.template.render('asset', base='after.common', globals=globals()) r, j = client.get('/topics/') if not is_admin(): j = filter(lambda t: t.is_public, j) if ok(r): return render.topics_list(topics=j) return web.notfound()
def GET(self): if not user(): return web.notfound() render = web.template.render('asset', base='after.common', globals=globals()) r, j = client.get('/users/%i/notifications/' % session.user.user_id) if ok(r): return render.notifications_list(notifications=j) return web.notfound()
def GET(self): if not user(): return web.notfound() render = web.template.render('asset', base='after.common', globals=globals()) r, j = client.get('/users/%i/groups/requests/' % session.user.user_id) if ok(r): return render.groups_requests(requests=j) return web.notfound()
def GET(self): if not user(): return web.notfound() render = web.template.render('asset', base='after.common', globals=globals()) r, j = client.get('/groups/') if ok(r): return render.groups_list(groups_list=j) return web.notfound()
def fetchPage(self, index=None, limit=None): if index != None: if limit == None: limit = self._page_length self._fetch_options['_start'] = str(index) self._fetch_options['_limit'] = str(limit) try: data = riq.get(self.endpoint(), self._fetch_options) except HTTPError as e: #retry, get list items has a self fixing error: #if it still fails, let it raise print "Retrying GET " + self.endpoint() data = riq.get(self.endpoint(), self._fetch_options) objects = [] for obj in data.get('objects', []): objects.append(self._object_class(data=obj, parent=self._parent)) return objects
def GET(self, id): if not user(): return web.notfound() render = web.template.render('asset', base='after.common', globals=globals()) r, j = client.get('/groups/%i/' % int(id)) if ok(r): return render.groups_detail(group=j) return web.notfound()
def fetchPage(self,index=None,limit=None) : if index != None : if limit == None : limit = self._page_length self._fetch_options['_start'] = str(index) self._fetch_options['_limit'] = str(limit) try: data = riq.get(self.endpoint(),self._fetch_options) except HTTPError as e: #retry, get list items has a self fixing error: #if it still fails, let it raise print "Retrying GET " + self.endpoint() data = riq.get(self.endpoint(), self._fetch_options) objects = [] for obj in data.get('objects',[]) : objects.append(self._object_class(data=obj,parent=self._parent)) return objects
def load_transport_zones(): api_endpoint = TRANSPORT_ZONES_ENDPOINT resp = client.get(api_endpoint) for result in resp.json()['results']: transport_zone_name = result['display_name'] transport_zone_id = result['id'] global_id_map['TZ:' + transport_zone_name] = transport_zone_id
def load_logical_routers(): api_endpoint = ROUTERS_ENDPOINT resp = client.get(api_endpoint) for result in resp.json()['results']: router_name = result['display_name'] router_id = result['id'] router_type = result['router_type'] global_id_map['ROUTER:' + router_type + ':' + router_name] = router_id
def delete_edge_clusters(): api_endpoint = EDGE_CLUSTERS_ENDPOINT print 'Starting deletion of Edge Clusters!' edge_clusters_resp = client.get(api_endpoint) for instance in edge_clusters_resp.json()['results']: instance_api_endpoint = '%s/%s' % (api_endpoint, instance['id']) resp = client.delete(instance_api_endpoint) print ' Deleted Edge Clusters!'
def OnFind(self, evt): forum, objID, type, neco = self.selected objMessages = client.get(objID)._messages ids = objMessages.keys() ids.sort() ids.reverse() # limit key only to thes with corresponding forum ids = filter(lambda x: self.FilterForum(objMessages, forum, x), ids) found = False #i = self.findidx for i in range(self.findidx, len(ids)): #for messageID in ids: messageID = ids[i] message = objMessages[messageID] # regenerate topics for messages with data if message.has_key("data") and message["topic"] == "EVENT": sourceID, msgID, locationID, turn, data = message["data"] message["topic"] = messages.getMsgText(msgID, data).split("\n")[0] findstring = self.finddata.GetFindString().lower() topic = message["topic"].lower() if topic.find(findstring) > -1: self.findidx = i + 1 self.SelectMessage(i) found = True break if message.has_key("text"): text = message["text"].lower() if text.find(findstring) > -1: self.findidx = i + 1 self.SelectMessage(i) found = True break # if implement searching in automaticaly generated messages, # add searching here #if message.has_key("data"): # sourceID, msgID, locationID, turn, data = message["data"] i += 1 if not found: dlg = wx.MessageDialog(self, _("Find String Not Found"), _("Find"), wx.OK | wx.ICON_INFORMATION) dlg.ShowModal() dlg.Destroy() self.findidx = 0 if self.finddlg: if not found: self.finddlg.SetFocus() else: self.finddlg.Destroy()
def GET(self): if not is_admin(): return web.notfound() render = web.template.render('asset', base='after.common', globals=globals()) r, j = client.get('/topics/hot/') if ok(r): return render.topics_list(topics=j) else: return web.notfound()
def check_for_ip_pools(given_names): api_endpoint = EXTERNAL_IP_POOL_ENDPOINT resp = client.get(api_endpoint, check=False) existing_names = [] for ip_pool in resp.json()['results']: existing_names.append(ip_pool['display_name']) return check_for_match('IP Pool', given_names, existing_names)
def load_edge_clusters(): api_endpoint = EDGE_CLUSTERS_ENDPOINT resp = client.get(api_endpoint) for result in resp.json()['results']: edge_cluster_name = result['display_name'] edge_cluster_id = result['id'] global_id_map['EDGE_CLUSTER:' + edge_cluster_name] = edge_cluster_id if global_id_map.get('DEFAULT_EDGE_CLUSTER_NAME') is None: global_id_map['DEFAULT_EDGE_CLUSTER_NAME'] = edge_cluster_name
def GET(self): if not is_admin(): return web.notfound() render = web.template.render('asset',base='after.common', globals=globals()) r, j = client.get('/vips/pending/') if ok(r): return render.user_list(user_list=j) else: return web.notfound()
def check_for_security_groups(given_names): api_endpoint = NSGROUP_ENDPOINT resp = client.get(api_endpoint, check=False) existing_names = [] for nsg in resp.json()['results']: existing_names.append(nsg['display_name']) return check_for_match('NS Group', given_names, existing_names)
def check_for_ip_blocks(given_names): api_endpoint = CONTAINER_IP_BLOCKS_ENDPOINT resp = client.get(api_endpoint, check=False) existing_names = [] for ip_block in resp.json()['results']: existing_names.append(ip_block['display_name']) return check_for_match('IP Block', given_names, existing_names)
def add_t0_route_nat_rules(): nat_rules_defns = get_rsc_def_if_configured('nsx_t_nat_rules_spec_int', 'nat_rules') if not nat_rules_defns: return t0_router_id = global_id_map['ROUTER:TIER0:' + nat_rules_defns[0]['t0_router']] if t0_router_id is None: print('Error!! No T0Router found with name: {}'.format( nat_rules_defns[0]['t0_router'])) exit - 1 api_endpoint = '%s/%s/%s' % (ROUTERS_ENDPOINT, t0_router_id, 'nat/rules') changes_detected = False existing_nat_rules = client.get(api_endpoint).json()['results'] for nat_rule in nat_rules_defns: rule_payload = { 'resource_type': 'NatRule', 'enabled': True, 'rule_priority': nat_rule['rule_priority'], 'translated_network': nat_rule['translated_network'] } if nat_rule['nat_type'] == 'dnat': rule_payload['action'] = 'DNAT' rule_payload['match_destination_network'] = nat_rule[ 'destination_network'] else: rule_payload['action'] = 'SNAT' rule_payload['match_source_network'] = nat_rule['source_network'] existing_nat_rule = check_for_existing_rule(existing_nat_rules, rule_payload) if None == existing_nat_rule: changes_detected = True print('Adding new Nat rule: {}'.format(rule_payload)) resp = client.post(api_endpoint, rule_payload) else: rule_payload['id'] = existing_nat_rule['id'] rule_payload['display_name'] = existing_nat_rule['display_name'] rule_payload['_revision'] = existing_nat_rule['_revision'] if rule_payload['rule_priority'] != existing_nat_rule[ 'rule_priority']: changes_detected = True print('Updating just the priority of existing nat rule: {}'. format(rule_payload)) update_api_endpint = '%s%s%s' % (api_endpoint, '/', existing_nat_rule['id']) resp = client.put(update_api_endpint, rule_payload) if changes_detected: print('Done adding/updating nat rules for T0Routers!!\n') else: print('Detected no change with nat rules for T0Routers!!\n')
def GET(self, id): if not user(): return web.notfound() render = web.template.render('asset', base='after.common', globals=globals()) r, j = client.get('/topics/%i/' % int(id)) if ok(r): return render.topics_edit(topic=j) else: return web.notfound()
def check_for_routers(given_names): api_endpoint = ROUTERS_ENDPOINT resp = client.get(api_endpoint, check=False) existing_names = [] for router in resp.json()['results']: existing_names.append(router['display_name']) return check_for_match('T0 Router', given_names, existing_names)
def GET(self): if not user(): return web.notfound() render = web.template.render('asset', base='after.common', globals=globals()) r, j = client.get('/users/%i/groups/' % int(session.user.user_id)) print '/users/%i/groups/' % int(session.user.user_id), j if ok(r): return render.groups_list(groups_list=j) return web.notfound()
def GET(self): if not user(): return web.notfound() render = web.template.render('asset', base='after.common', globals=globals()) r, j = client.get('/users/%i/topics/' % int(session.user.user_id)) if ok(r): return render.topics_my(topics=j) else: return web.notfound()
def setKeyObject(self,objIDs,bObjIDs): objID = self.gotoObject(objIDs,bObjIDs) log.debug('Setting Key Object To:',objID) self.app.setStatus(_("Ready.")) self.selectobject = False if (objID == Const.OID_NONE): return obj = client.get(objID) if obj.type in (Const.T_SYSTEM, Const.T_PLANET, Const.T_FLEET): gdata.objectFocus[self.setKey]=objID
def fetchPage(cls,index=0,limit=None) : if limit == None : limit = cls._page_length cls._fetch_options['_start'] = str(index) cls._fetch_options['_limit'] = str(limit) data = riq.get(cls.endpoint(),cls._fetch_options) objects = [] for obj in data.get('objects',[]) : objects.append(cls(data=obj)) return objects
def OnReadAll(self, event): forum, objID, type, msgID = self.selected obj = client.get(objID) for messageID in obj._messages: message = obj._messages[messageID] if message["forum"] == forum: message["readed"] = 1 self.createTree() self.fillMessages(forum, objID, type)
def fetchPage(self,index=None,limit=None) : if index != None : if limit == None : limit = self._page_length self._fetch_options['_start'] = str(index) self._fetch_options['_limit'] = str(limit) data = riq.get(self.endpoint(),self._fetch_options) objects = [] for obj in data.get('objects',[]) : objects.append(self._object_class(data=obj,parent=self._parent)) return objects