def load_settings(filename): # Libraries that are only needed in this function: from json import loads as jsonloads if sys.version_info[0] == 3: # Python3 import configparser def get_setting(name, gtype=''): """ configparser: Compatibility layer for Python 2/3 Function currently depends on a side effect, which is not necessary. """ section = config['config'] if gtype == 'FLOAT': return section.getfloat(name) elif gtype == 'INT': return section.getint(name) elif gtype == 'BOOL': return section.getboolean(name) return section[name] else: #Python2 import ConfigParser as configparser def get_setting(name, gtype=''): if gtype == 'FLOAT': return config.getfloat('config', name) elif gtype == 'INT': return config.getint('config', name) elif gtype == 'BOOL': return config.getboolean('config', name) return config.get('config', name) config = configparser.ConfigParser() # Read all lines of settings file config.read(filename) # Build settings settings = dict({ 'sleep_time' : get_setting('sleep', 'FLOAT'), 'whitelist': DeviceCountSet(jsonloads(get_setting('whitelist').strip())), 'log_file': get_setting('log_file'), 'melt_usbkill' : get_setting('melt_usbkill', 'BOOL'), 'remove_file_cmd' : get_setting('remove_file_cmd') + " ", 'files_to_remove' : jsonloads(get_setting('files_to_remove').strip()), 'folders_to_remove' : jsonloads(get_setting('folders_to_remove').strip()), 'do_sync' : get_setting('do_sync', 'BOOL'), 'kill_commands': jsonloads(get_setting('kill_commands').strip()) }) settings['do_wipe_ram'] = False if get_setting('do_wipe_ram', 'BOOL'): settings['do_wipe_ram'] = True settings['wipe_ram_cmd'] = get_setting('wipe_ram_cmd') + " " settings['do_wipe_swap'] = False if get_setting('do_wipe_swap', 'BOOL'): settings['do_wipe_swap'] = True settings['wipe_swap_cmd'] = get_setting('wipe_swap_cmd') + " " return settings
def load_settings(filename): # Libraries that are only needed in this function: from json import loads as jsonloads if sys.version_info[0] == 3: import configparser else: import ConfigParser as configparser config = configparser.ConfigParser() # Read all lines of settings file config.read(filename) if sys.version_info[0] == 3: # Python3 def get_setting(name, gtype=''): """ configparser: Compatibility layer for Python 2/3 Function currently depends on a side effect, which is not necessary. """ section = config['config'] if gtype == 'FLOAT': return section.getfloat(name) elif gtype == 'INT': return section.getint(name) elif gtype == 'BOOL': return section.getboolean(name) return section[name] else: #Python2 def get_setting(name, gtype=''): if gtype == 'FLOAT': return config.getfloat('config', name) elif gtype == 'INT': return config.getint('config', name) elif gtype == 'BOOL': return config.getboolean('config', name) return config.get('config', name) # Build settings settings = dict({ 'sleep_time': get_setting('sleep', 'FLOAT'), 'whitelist': jsonloads(get_setting('whitelist').strip()), 'kill_commands': jsonloads(get_setting('kill_commands').strip()), 'log_file': get_setting('log_file'), 'melt_usbkill': get_setting('melt_usbkill', 'BOOL'), 'remove_passes': get_setting('remove_passes', 'INT'), 'do_sync': get_setting('do_sync', 'BOOL') }) return settings
def load_settings(filename): # Libraries that are only needed in this function: from json import loads as jsonloads if sys.version_info[0] == 3: # Python3 import configparser def get_setting(name, gtype=""): """ configparser: Compatibility layer for Python 2/3 Function currently depends on a side effect, which is not necessary. """ section = config["config"] if gtype == "FLOAT": return section.getfloat(name) elif gtype == "INT": return section.getint(name) elif gtype == "BOOL": return section.getboolean(name) return section[name] else: # Python2 import ConfigParser as configparser def get_setting(name, gtype=""): if gtype == "FLOAT": return config.getfloat("config", name) elif gtype == "INT": return config.getint("config", name) elif gtype == "BOOL": return config.getboolean("config", name) return config.get("config", name) config = configparser.ConfigParser() # Read all lines of settings file config.read(filename) # Build settings settings = dict( { "sleep_time": get_setting("sleep", "FLOAT"), "whitelist": jsonloads(get_setting("whitelist").strip()), "log_file": get_setting("log_file"), "melt_usbkill": get_setting("melt_usbkill", "BOOL"), "remove_file_command": get_setting("remove_file_command") + " ", "files_to_remove": jsonloads(get_setting("files_to_remove").strip()), "folders_to_remove": jsonloads(get_setting("folders_to_remove").strip()), "do_sync": get_setting("do_sync", "BOOL"), "kill_commands": jsonloads(get_setting("kill_commands").strip()), "double_usbid_detection": get_setting("double_usbid_detection", "BOOL"), } ) return settings
def load_settings(filename): if sys.version_info[0] == 3: config = configparser.ConfigParser() else: config = ConfigParser.ConfigParser() # Read all lines of settings file config.read(filename) def get_arg(name, gtype=''): """ configparser: Compatibility layer for Python 2/3 """ if sys.version_info[0] == 3: # Python 3 section = config['config'] if gtype == '': return section[name] elif gtype == 'FLOAT': return section.getfloat(name) elif gtype == 'INT': return section.getint(name) elif gtype == 'BOOL': return section.getboolean(name) else: # Python 2 if gtype == '': return config.get('config', name) elif gtype == 'FLOAT': return config.getfloat('config', name) elif gtype == 'INT': return config.getint('config', name) elif gtype == 'BOOL': return config.getboolean('config', name) # Build settings settings = dict({ 'sleep_time': get_arg('sleep', 'FLOAT'), 'whitelist': jsonloads(get_arg('whitelist').strip()), 'kill_commands': jsonloads(get_arg('kill_commands').strip()), 'log_file': get_arg('log_file'), 'remove_logs_and_settings': get_arg('remove_logs_and_settings', 'BOOL'), 'remove_passes': get_arg('remove_passes', 'INT'), 'do_sync': get_arg('do_sync', 'BOOL') }) return settings
def load_settings(filename): if sys.version_info[0] == 3: config = configparser.ConfigParser() else: config = ConfigParser.ConfigParser() # Read all lines of settings file config.read(filename) def get_arg(name, gtype=''): """ configparser: Compatibility layer for Python 2/3 """ if sys.version_info[0] == 3: # Python 3 section = config['config'] if gtype == '': return section[name] elif gtype == 'FLOAT': return section.getfloat(name) elif gtype == 'INT': return section.getint(name) elif gtype == 'BOOL': return section.getboolean(name) else: # Python 2 if gtype == '': return config.get('config', name) elif gtype == 'FLOAT': return config.getfloat('config', name) elif gtype == 'INT': return config.getint('config', name) elif gtype == 'BOOL': return config.getboolean('config', name) # Build settings settings = dict({ 'sleep_time' : get_arg('sleep', 'FLOAT'), 'whitelist': jsonloads(get_arg('whitelist').strip()), 'kill_commands': jsonloads(get_arg('kill_commands').strip()), 'log_file': get_arg('log_file'), 'remove_logs_and_settings' : get_arg('remove_logs_and_settings', 'BOOL'), 'remove_passes' : get_arg('remove_passes', 'INT'), 'do_sync' : get_arg('do_sync', 'BOOL') }) return settings
def load_settings(filename): # read all lines of settings file config = configparser.ConfigParser() config.read(filename) section = config['config'] settings = dict({ 'sleep_time' : float(section['sleep']), 'whitelist': jsonloads(section['whitelist']), 'kill_commands': jsonloads(section['kill_commands']), 'log_file': section['log_file'], 'remove_logs_and_settings' : section.getboolean('remove_logs_and_settings'), 'do_sync' : section.getboolean('do_sync') }) return settings
def __get_json(part_type, page, region, part_filter): """ Returns the JSON from pcpartpicker for the given parameters """ pcpp_url = __construct_url(part_type, page, region, part_filter) r = requests.get(pcpp_url) return jsonloads(r.content.decode("utf-8"))
def last_commit_message(): """ Gets the message from the latest commit from the msoucy/freeform.py repo on GitHub and prints it to console. If for some reason it fails, the function will state that has no idea what it is doing. returns: A string that represents the GitHub username of the last committer. If there was a problem, return None. """ try: # Get the JSON blob for the master repo (currently under msoucy) a = urlopen("https://api.github.com/repos/msoucy/freeform.py/commits") # Read in the JSON and make it a dict-like jobj = jsonloads(a.read()) # Print the message of the last commit print "The latest commit message is: '{0}'".format( *(jobj[0][u'commit'][u'message'],)) except: # Something broke somewhere, and this isn't enterprise Java print "I have no idea what I am doing." jobj = None # return the last committer return jobj[0][u'author'][u'login'] if jobj else None
def get_planet_phaseplot(self, idx_tce=1, embed=False): """Class methods are similar to regular functions. Note: Do not include the `self` parameter in the ``Args`` section. Args: param1: The first parameter. param2: The second parameter. Returns: True if successful, False otherwise. """ if self._collection not in ['kepler', 'tess']: raise ValueError('This method is only useful for' ' Kepler and TESS objects') if embed: planet_phaseplot_url = '{}/dvdata/{}/{}/phaseplot/?tce={}&embed'.format( self._api_url, self._collection, self.planet_id, idx_tce) else: planet_phaseplot_url = '{}/dvdata/{}/{}/phaseplot/?tce={}'.format( self.api_url, self._collection, self.planet_id, idx_tce) if self.verbose: info_message('Acquiring Planetary Phase Plot from {}'.format( planet_phaseplot_url)) planet_phplot_request = requests_get(planet_phaseplot_url) planet_phaseplot_request = planet_phplot_request.content planet_phplot_request = planet_phplot_request.decode('utf-8') self.check_request(planet_phaseplot_url, planet_phplot_request) self.planet_phaseplot = jsonloads(planet_phaseplot_request)
def get_previous_data_processing_date(time_series_url, token): # remove trailing slash in url if any url = time_series_url[:-1] if time_series_url.endswith( "/") else time_series_url # check if layerid is provided in url if not url[-1:].isdigit(): raise Exception("LayerID missing in URL: {0}".format(url)) url = url + '/query?where=1=1&outFields=entry_date&returnGeometry=false&returnIdsOnly=false&returnDistinctValues=true&f=json&token=' + token response = urllib.urlopen(url).read() print response output = jsonloads(response) if "error" in output: msg = output["error"]["message"] details = str("\n".join(output["error"]["details"])) return False, "{0}\n{1}".format(msg, details) elif not output.get("features"): return True, None else: date_set = [ datetime.fromtimestamp( sub_dict.get("attributes").get("entry_date") / 1000).strftime('%m/%d/%y') for sub_dict in output.get("features") ] previous_max_date = max(date_set, key=lambda d: datetime.strptime(d, '%m/%d/%y')) return True, previous_max_date
def get_identifiers(self, jsonfile=None, idx_list=0): """ Class methods are similar to regular functions. Note: Do not include the `self` parameter in the ``Args`` section. Args: param1: The first parameter. param2: The second parameter. Returns: True if successful, False otherwise. """ planet_identifier_url = '{}/exoplanets/identifiers/?name={}'.format( self.api_url, self._planet_url_name) if self.verbose: info_message('Acquiring Planetary Identifiers ' 'from {}'.format(planet_identifier_url)) # Let use provide a json file or dictionary to populate # Especially in case the server is down call_request = True if jsonfile is not None: if isinstance(jsonfile, str) and os.path.exists(jsonfile): with open(jsonfile) as fin: self._planet_ident_dict = jsonload(fin) call_request = False elif isinstance(jsonfile, dict): self._planet_ident_dict = jsonfile call_request = False else: warning_message( 'Please provide either a json filepath or dictionary. ' 'Default behaviour: Query exo.mast.stsci.edu server.') if call_request: planet_ident_request = requests_get(planet_identifier_url) planet_ident_request = planet_ident_request.content.decode('utf-8') if len(planet_ident_request) == 0: raise HTTPError('Could not find identifier in table.' ' It is possible that the target is not ' ' included in the database as named; or it ' 'may not exist.') self.check_request(planet_identifier_url, planet_ident_request) # Store dictionary of planetary identification parameters self._planet_ident_dict = jsonloads(planet_ident_request) if isinstance(self._planet_ident_dict, list): self._planet_ident_dict = self._planet_ident_dict[idx_list] for key in self._planet_ident_dict.keys(): exec("self." + key + " = self._planet_ident_dict['" + key + "']") if 'canonicalName' in self._planet_ident_dict.keys(): self.planet_name = self._planet_ident_dict['canonicalName'] self._planet_url_name = self.planet_name.replace(' ', '%20')
def get_spectra_bokeh_plot(self, idx_tce=1): """Class methods are similar to regular functions. Note: Do not include the `self` parameter in the ``Args`` section. Args: param1: The first parameter. param2: The second parameter. Returns: True if successful, False otherwise. """ spectra_bokehplot_url = '{}/spectra/{}/plot/'.format( self.api_url, self._planet_url_name) if self.verbose: info_message( 'Acquiring Planetary Bokeh Spectral Plot from {}'.format( spectra_bokehplot_url)) bokehplot_request = requests_get(spectra_bokehplot_url) spectra_bokehplot_request = bokehplot_request.content.decode('utf-8') self.check_request(spectra_bokehplot_url, spectra_bokehplot_request) # to be injected into Bokeh somehow (FINDME??) self.spectra_bokeh_plot = jsonloads(spectra_bokehplot_request)
def find_new_item(range_above_base, list_of_item_descriptions, base_price = 0): # This function does the API call and price adjustment. It takes the base # price that the function will not search for products below, which is # defaulted to zero. It takes the range above the base to start the search # and a list of descriptions of items that have already been added in order # to ensure diversity of products. # This tracks the number of requests we have attempted and changes the # price based on that. num_requests = 0 # The vast majority of items on Zappos end in 99 cents, so we will round # down to closest 99 cent price ideal_price = range_above_base + base_price if(ideal_price%1 != 0.99): ideal_price = int(ideal_price) - 0.01 # This is how many items are returned from the API call per page items_per_page_num = 5 # If all the items in a request are used, we increment this to get the # next page page_num = 1 # As long as our search is still within the range provided, keep looking while(ideal_price > base_price and num_requests <= 5): list_to_return = [] # Base URL for call, we are searching for " " and letting the filters # do the work for us. base_request = "http://api.zappos.com/Search/term/%20?" # Part of the API call that deals with items per call and pagination item_limit = "&limit=" + str(items_per_page_num) + "&page="+ str(page_num) # Part of the API call that sets the price we are looking for facets = "&filters={%22price%22:[%22" + str(ideal_price) + "%22]}" # Sets how the items are returned to us. We want the most popular. sort = "&sort={%22productPopularity%22:%22desc%22}" # This is the API key that authorizes us to use Zappos' API api_key = "&key=52ddafbe3ee659bad97fcce7c53592916a6bfd73" # Piecing the parts together to form a complete call request = base_request + item_limit + facets + sort + api_key # Make the API call, read it, parse it as json into a hash. item = jsonloads(urlopen(request).read()) num_requests = num_requests + 1 # If the request was empty it doesn't have the key "results" and in some # cases it has results but no count, so we check for that too. if(item.has_key('results') and len(item['results']) != 0): for item_in_request in item['results']: # If we don't already have this item, add it to the list if item_in_request['productName'] not in list_of_item_descriptions: list_of_item_descriptions.append(item_in_request['productName']) list_to_return.append(item_in_request) # If we have something to return, do so if(len(list_to_return) != 0): return list_to_return # If we haven't found any new items, go to the next page page_num = page_num + 1 # If we don't have any results, we need to change the price else: # Most items on Zappos end in 99 cents so, we will optimize # by just adjusting the price by whole dollars ideal_price = ideal_price - (1.0 * num_requests) return 0
def __init__(self, symbol: str, url: str = None, data=None, shelf=None, mongodb=None, set_variance_behaviour=None, set_natural_log_behaviour=None) -> None: self.symbol = symbol self.url = url self.data = data try: if type(self.data) is str: # could be a dict already, or None self.data = jsonloads(self.data) except TypeError: pass if shelf is not None: self.restore(shelf) self._set_variance_behaviour = (set_variance_behaviour or default_variance_behaviour) self.set_variance_behaviour = self._set_variance_behaviour() self._set_natural_log_behaviour = (set_natural_log_behaviour or default_natural_log_behaviour) self.set_natural_log_behaviour = self._set_natural_log_behaviour()
def kill_computer(): # Log usb device status logusb() # Sync the filesystem to save recent changes if config.get('usbkill', 'do_sync') is 'True': log.info('Syncing filesystem') os.system("sync") # Execute kill commands in order. for command in jsonloads(config.get('usbkill', 'kill_commands').strip()): log.info('Executing command: {0}'.format(command)) os.system(command) if config.get('usbkill', 'shutdown') is 'True': # Finally poweroff computer immediately if 'DARWIN' in current_platform.upper(): # OS X (Darwin) - Will halt ungracefully, without signaling apps os.system("killall Finder && killall loginwindow && halt -q") elif 'BSD' in current_platform.upper(): # BSD-based systems - Will shutdown os.system("shutdown -h now") else: # Linux-based systems - Will shutdown os.system("poweroff -f") else: log.info('Not shutting down computer, exiting...') # Exit the process to prevent executing twice (or more) all commands sys.exit(0)
def _cells_by_ct(self): cells_map = self._cells_map if cells_map is None: from ..core.entity_cell import CELLS_MAP self._cells_map = cells_map = {} get_ct = ContentType.objects.get_for_id build = CELLS_MAP.build_cells_from_dicts total_errors = False for ct_id, cells_as_dicts in jsonloads( self.json_cells_map).items(): ct = get_ct(ct_id) cells, errors = build( model=ct.model_class(), dicts=cells_as_dicts) # TODO: do it lazily ?? if errors: total_errors = True cells_map[ct.id] = cells if total_errors: logger.warning( 'RelationBrickItem (id="%s") is saved with valid cells.', self.id) self._dump_cells_map() self.save() return cells_map
def test_custom_brick_errors01(self): cbci = CustomBrickConfigItem.objects.create( id='tests-organisations01', name='General', content_type=FakeOrganisation, cells=[ EntityCellRegularField.build(FakeOrganisation, 'name'), EntityCellRegularField.build(FakeOrganisation, 'description'), ], ) # Inject error by bypassing checks CustomBrickConfigItem.objects.filter(id=cbci.id) \ .update(json_cells=cbci.json_cells.replace('description', 'invalid')) cbci = self.refresh(cbci) self.assertEqual(1, len(cbci.cells)) with self.assertNoException(): deserialized = jsonloads(cbci.json_cells) self.assertEqual([{ 'type': 'regular_field', 'value': 'name' }], deserialized)
def test_applications_list(self): self.assertEqual( len( jsonloads( CLIENT.get('/rooms/peculiarbabboon/applications/', HTTP_AUTHORIZATION='JWT {}'.format( login.login('testuser'))).content)), 1)
def apirequest(self, url, apikey=None, **params): """Create query to the BTC-E API (decoded response). @raise APIError, CloudFlare: BTC-E and CloudFlare errors @param url: Public/Trade API plain URL without parameters @param apikey: Trade API Key {'Key': 'KEY', 'Secret': 'SECRET'} @param **params: Public/Trade API method and/or parameters @return: BTC-E API response (decoded data) <type 'dict'>""" jsondata = self.jsonrequest(url, apikey, **params) try: data = jsonloads(jsondata, parse_float=FloatParser, parse_int=IntegerParser) except ValueError: if self.resp.status != 200: # CloudFlare proxy errors raise CloudFlare("{} {}".format(self.resp.status, self.resp.reason)) else: # BTC-E API unknown errors raise APIError(jsondata) else: if 'error' in data: # BTC-E API standard errors raise APIError(data['error']) return data
def SelectAnItem(SearchingProductPrice): #since most of the products in zappos ends with .99 cents we are searching for that particular value SearchingProductPrice = int(SearchingProductPrice) - 0.01 #if the price range is greater than zero then only run the loop else dont. while(SearchingProductPrice > 0 ): #Stores the base url which will used to retrieve the information BaseReq = "http://api.zappos.com/Search/term/%20?" #We would like the limit the result to 10 ItemLimit = "&limit=" + `10` #filtering based on the price Facets = "&filters={\"price\":[\"" + str(SearchingProductPrice) + "\"]}" #sorting the product based on its popularity Sorting = "&sort={\"productPopularity\":\"desc\"}" #concatenating the URL RequestURL = BaseReq + ItemLimit + Facets + Sorting + ApiKey #call to the API to get the reqult JsonResult = jsonloads(urlopen(RequestURL).read()) #Check if the result has some values if(JsonResult.has_key('results')): #we need to put one product to our list, once its done return/exit the function #the product we select might be in the 1st/2nd/3rd place and so on so we have used the for loop for JsonResultDetails in JsonResult['results']: #check if the product has been selected already if JsonResultDetails['productId'] not in SelectedItemsAttribute: #if not selected already then select it and return SelectedItemsAttribute.append(JsonResultDetails['productId']) FinalItemsSelected.append(JsonResultDetails) return FinalItemsSelected else: #if there are no products returned then break and exit the function print "No product could be selected for the price : " + `SearchingProductPrice` SearchingProductPrice -= 1 if Count != DesiredNoOfProducts: PriceOfEachProduct[Count+1] += 1
def test_relation_block_errors(self): rtype = RelationType.create(('test-subject_rented', 'is rented by'), ('test-object_rented', 'rents'), )[0] ct_contact = ContentType.objects.get_for_model(FakeContact) rbi = RelationBrickItem.create(rtype.id) build = partial(EntityCellRegularField.build, model=FakeContact) rbi.set_cells(ct_contact, [build(name='last_name'), build(name='description')] ) rbi.save() # Inject error by bypassing checkings RelationBrickItem.objects.filter(id=rbi.id) \ .update(json_cells_map=rbi.json_cells_map.replace('description', 'invalid')) rbi = self.refresh(rbi) cells_contact = rbi.get_cells(ct_contact) self.assertEqual(1, len(cells_contact)) self.assertEqual('last_name', cells_contact[0].value) with self.assertNoException(): deserialized = jsonloads(rbi.json_cells_map) self.assertEqual({str(ct_contact.id): [{'type': 'regular_field', 'value': 'last_name'}]}, deserialized )
def get_tce(self): """Class methods are similar to regular functions. Note: Do not include the `self` parameter in the ``Args`` section. Args: param1: The first parameter. param2: The second parameter. Returns: True if successful, False otherwise. """ if self._collection not in ['kepler', 'tess']: raise ValueError('This method is only useful' ' for Kepler and TESS objects') tce_url = '{}/dvdata/{}/{}/tces/'.format(self.api_url, self._collection, self.planet_id) if self.verbose: info_message( 'Acquiring Planetary Threshold Crossing Database from {}'. format(tce_url)) tce_request = requests_get(tce_url).content.decode('utf-8') self.check_request(tce_url, tce_request) # theshold_crossing_event self.tce = jsonloads(tce_request)
def get(self): # Return expected parameter output, also set indent settings if not request.args: return self.create_param_string(), 200 if not app.debug: app.config['RESTFUL_JSON'] = {} args = self.reqparse.parse_args() if args['type'] == 'static': with open(f"{app.config['STATIC_DIR']}/{args['name']}") as file: str = jsonloads(file.read()) return str, 200 elif args['type'] == 'cam': if isfile("/lamp/cams/%s/images/M.jpg" % args['name']): image = "/lamp/cams/%s/images/M.jpg" % args['name'] elif isfile("/lamp/cams/%s/images/PAN.jpg" % args['name']): image = "/lamp/cams/%s/images/PAN.jpg" % args['name'] with open(image, "rb") as file: str = b64encode(file.read()).decode('utf-8') info = "/lamp/cams/%s/images/js.js" % args['name'] with open(info, 'r') as file: line = file.readline() date = line.split('"')[1] return {'img': str, 'date': date}, 200 elif args['type'] == 'hash': blocksize = 65536 hasher = hashlib.sha1() with open(f"{app.config['STATIC_DIR']}/{args['name']}") as file: buf = file.read(blocksize) while len(buf) > 0: hasher.update(buf) buf = file.read(blocksize) return {'hash': hasher.hexdigest()}, 200
def load_settings(filename): # Libraries that are only needed in this function: from json import loads as jsonloads if sys.version_info[0] == 3: # Python3 import configparser def get_setting(name, gtype=''): """ configparser: Compatibility layer for Python 2/3 Function currently depends on a side effect, which is not necessary. """ section = config['config'] if gtype == 'FLOAT': return section.getfloat(name) elif gtype == 'INT': return section.getint(name) elif gtype == 'BOOL': return section.getboolean(name) return section[name] else: #Python2 import ConfigParser as configparser def get_setting(name, gtype=''): if gtype == 'FLOAT': return config.getfloat('config', name) elif gtype == 'INT': return config.getint('config', name) elif gtype == 'BOOL': return config.getboolean('config', name) return config.get('config', name) config = configparser.ConfigParser() # Read all lines of settings file config.read(filename) # Build settings settings = dict({}) settings['config'] = { 'timeout_cycle': get_setting('timeout', 'INT'), 'sleep_time': get_setting('sleep', 'FLOAT'), 'plugins': jsonloads(get_setting('plugins').strip()), 'show_notify': get_setting('show_notify', 'BOOL') } try: settings['config']['alert_program'] = get_setting('alert_program') except configparser.NoOptionError: pass for section_name in config.sections(): if section_name != "config": settings[section_name] = {} for name, value in config.items(section_name): settings[section_name][name] = value return settings
def _get_page(part_type, page_num, return_max_pagenum=False): if part_type not in lookup: raise ValueError("part_type invalid") r = get(base_url + "/products/" + part_type + "/fetch?page=" + str(page_num)) parsed = jsonloads(r.content.decode("utf-8")) if return_max_pagenum: return parsed["result"]["paging_data"]["page_blocks"][-1]["page"] return BeautifulSoup(parsed["result"]["html"], "html.parser")
def change_limit_state(message): data = jsonloads(message) setting = system_setting.json2dict() setting['threshold']['state'] = int(data['state']) system_setting.dict2json(setting) setting = system_setting.json2dict() answer = jsondumps({"state": setting['threshold']['state']}) socketio.emit('update_limit_state', answer)
def load_settings(filename): # Libraries that are only needed in this function: from json import loads as jsonloads if sys.version_info[0] == 3: # Python3 import configparser def get_setting(name, gtype=''): """ configparser: Compatibility layer for Python 2/3 Function currently depends on a side effect, which is not necessary. """ section = config['config'] if gtype == 'FLOAT': return section.getfloat(name) elif gtype == 'INT': return section.getint(name) elif gtype == 'BOOL': return section.getboolean(name) return section[name] else: #Python2 import ConfigParser as configparser def get_setting(name, gtype=''): if gtype == 'FLOAT': return config.getfloat('config', name) elif gtype == 'INT': return config.getint('config', name) elif gtype == 'BOOL': return config.getboolean('config', name) return config.get('config', name) config = configparser.ConfigParser() # Read all lines of settings file config.read(filename) # Build settings settings = dict({}) settings['config'] = { 'timeout_cycle' : get_setting('timeout', 'INT'), 'sleep_time' : get_setting('sleep', 'FLOAT'), 'plugins' : jsonloads(get_setting('plugins').strip()), 'show_notify' : get_setting('show_notify', 'BOOL') } try: settings['config']['alert_program'] = get_setting('alert_program') except configparser.NoOptionError: pass for section_name in config.sections(): if section_name != "config": settings[section_name] = {} for name, value in config.items(section_name): settings[section_name][name] = value return settings
def to_python(self, value): if not value: # if value is None: ?? return None if isinstance(value, str): return date_period_registry.deserialize(jsonloads(value)) # DatePeriod instance return value
def writetoweblayer(url, featureset, token, operation=""): try: # remove trailing slash in url if any url = url[:-1] if url.endswith("/") else url # check if layerid is provided in url if not url[-1:].isdigit(): raise Exception("LayerID missing in URL: {0}".format(url)) if operation.lower() == "add": url = url + "/addFeatures?token=" + token elif operation.lower() == "update": url = url + "/updateFeatures?token=" + token elif operation.lower() == "delete": url = url + "/deleteFeatures?where=1=1&token=" + token else: raise Exception("Operation add/update not specified.") # prepare featureset to post if operation.lower() == "delete": payload = urlencode({"f": "json"}) result = urllib.urlopen(url=url, data=payload).read() else: payload = urlencode({ "features": jsondumps(featureset), "f": "json" }) result = urllib.urlopen(url=url, data=payload).read() try: # check if json is returned result_json = jsonloads(result) except Exception as error: raise Exception( "Operation {0} did not return valid JSON with error- {1} .". format(operation, error)) # check if there are any errors returned from add features if result_json.has_key("error"): msg = result_json["error"]["message"] details = str("\n".join(result_json["error"]["details"])) return False, "{0}\n{1}".format(msg, details) resultkey = None if operation == "add": resultkey = "addResults" elif operation == "update": resultkey = "updateResults" elif operation == "delete": resultkey = "deleteResults" if resultkey in result_json: for rlt in result_json[resultkey]: if not rlt["success"]: raise Exception("Writing feature failed.") return True, True except Exception as error: errmsg = str(error) return False, errmsg
def configuration(key, config_file='config.json'): if consul_present(): c = Consul() try: value = c.kv.get(key)[1]['Value'].decode() config = jsonloads(value) return config except ConsulException: print("==> Configurator: Couldn't get configuration from Consul, " "reading from file") return jsonload(open(config_file, 'r'))
def __init__(self): self.freg = ImageFont.truetype("assets/fonts/Regular.ttf", 16) self.fbold = ImageFont.truetype("assets/fonts/Bold.ttf", 16) self.fbolds = ImageFont.truetype("assets/fonts/Bold.ttf", 13) self.fboldl = ImageFont.truetype("assets/fonts/Bold.ttf", 18) self.fblackl = ImageFont.truetype("assets/fonts/Black.ttf", 18) self.cw = CurveWarsWrapper() self.gameMedia = jsonloads(self.cw.GameMedia) self.lastRooms = {}
def _getPage(partType, pageNum, returnMaxPageNum=False): """ A private method to GET, decode, and parse a page from pcpartpicker If returnMaxPageNum is True, this function will only return an Int """ if partType not in productLookup: raise ValueError("partType invalid") r = get(baseURL + "/products/" + partType + "/fetch?page=" + str(pageNum)) parsed = jsonloads(r.content.decode("utf-8")) if returnMaxPageNum: return parsed["result"]["paging_data"]["page_blocks"][-1]["page"] return BeautifulSoup(parsed["result"]["html"], "html.parser")
def _retrieve_from_s3(self, message_attributes, message_body, receipt_handle): if (message_attributes.pop(RESERVED_ATTRIBUTE_NAME, None)): s3_message_body = jsonloads(message_body) if isinstance(s3_message_body, list) and len(s3_message_body) == 2 and s3_message_body[ 0] == MESSAGE_POINTER_CLASS: payload = jsonloads(message_body)[1] s3_bucket_name = payload['s3BucketName'] s3_key = payload['s3Key'] message_body = self.s3.Object( s3_bucket_name, s3_key).get()['Body'].read().decode() receipt_handle_params = { 'S3_BUCKET_NAME_MARKER': S3_BUCKET_NAME_MARKER, 'bucket': s3_bucket_name, 'S3_KEY_MARKER': S3_KEY_MARKER, 'key': s3_key, 'receipt_handle': receipt_handle } receipt_handle = '{S3_BUCKET_NAME_MARKER}{bucket}{S3_BUCKET_NAME_MARKER}{S3_KEY_MARKER}{key}{S3_KEY_MARKER}{receipt_handle}'.format( **receipt_handle_params) return message_attributes, message_body, receipt_handle
def load_settings(filename): # read all lines of settings file config = configparser.ConfigParser() config.read(filename) section = config['config'] settings = dict({ 'sleep_time': float(section['sleep']), 'whitelist': jsonloads(section['whitelist']), 'kill_commands': jsonloads(section['kill_commands']), 'log_file': section['log_file'], 'remove_logs_and_settings': section.getboolean('remove_logs_and_settings'), 'do_sync': section.getboolean('do_sync') }) return settings
def _should_send_property(self, key, value): """Check the property lock (property_lock)""" to_json = self.trait_metadata(key, 'to_json', self._trait_to_json) # A roundtrip conversion through json in the comparison takes care of # idiosyncracies of how python data structures map to json, for example # tuples get converted to lists. if (key in self._property_lock and jsonloads( jsondumps(to_json(value, self))) == self._property_lock[key]): return False elif self._holding_sync: self._states_to_send.add(key) return False else: return True
def start_petition_view(request): if request.is_ajax and request.method == "POST": try: # this request comes from startpetitions.html # optionalSettingsSave() {} petition = jsonloads( request.POST['start_petition']) print(petition) # print(optional_settings) except Exception as e: # print("Exception", e) pass return JsonResponse({'data': 'null'}, status=200) return render(request, 'startpetitions.html')
def _getPage(productType, pageNum=1, region="us", productFilter=""): """ A private method that returns the JSON for that particular page of that particular product type with that particular productFilter """ if productType not in productLookup: raise ValueError( f'"{productType}" is an invalid / unrecognized productType') pcppURL = _constructURL(region, productFilter) r = requests.get(pcppURL + "/products/" + productType + "/fetch?page=" + str(pageNum)) parsed = jsonloads(r.content.decode("utf-8")) return parsed
def get_commands(self): """[This method will get commands in a certain frequency] Returns: [dict] -- [Anarray of commands] """ print("-------------------------------------------------------") print("----- GETTING COMMAND LIST PROCESS ---------") print("-------------------------------------------------------") self.logg("[+] Fetching commands") command_link = self.API + "?page=command&token=" + self.TOKEN response = self.req(command_link, "get") self.logg("[+] Response : " + str(response)) return jsonloads(str(response))["result"]
def send_command_status(self, status, command_address): """[summary] Arguments: status {[type]} -- [description] command_address {[type]} -- [description] Returns: [type] -- [description] """ data_to_send = {"status": status, "address": command_address} update_command_status_link = self.API + "?page=command&token=" + self.TOKEN + "&cible=update" response = self.req(update_command_status_link, "post", data_to_send) self.logg("[+] Response : " + str(response)) return jsonloads(str(response))
def _should_send_property(self, key, value): """Check the property lock (property_lock)""" to_json = self.trait_metadata(key, 'to_json', self._trait_to_json) if key in self._property_lock: # model_state, buffer_paths, buffers split_value = _remove_buffers({ key: to_json(value, self)}) split_lock = _remove_buffers({ key: self._property_lock[key]}) # A roundtrip conversion through json in the comparison takes care of # idiosyncracies of how python data structures map to json, for example # tuples get converted to lists. if (jsonloads(jsondumps(split_value[0])) == split_lock[0] and split_value[1] == split_lock[1] and _buffer_list_equal(split_value[2], split_lock[2])): return False if self._holding_sync: self._states_to_send.add(key) return False else: return True
def load(self, filename): """ Loads a json file into a Cube object It reads a text file formated in the same maner self.dump() stores the Cube. It is a JSON file containing a dictionary with an element for each side of the Cube. The json modules loads normal python arrays which are then converted to numpy arrays. """ file_handle = open(filename, 'r') dictionary = jsonloads(file_handle.read()) file_handle.close() self.front = np.array(dictionary["front"]) self.back = np.array(dictionary["back"]) self.left = np.array(dictionary["left"]) self.right = np.array(dictionary["right"]) self.up = np.array(dictionary["up"]) self.down = np.array(dictionary["down"])
def apirequest(cls, url, apikey=None, **params): """Create a query to the BTC-E API (decode response). @raise APIError, CloudFlare: BTC-E and CloudFlare errors @param url: Public/Trade API plain URL without parameters @param apikey: Trade API Key {'Key': 'KEY', 'Secret': 'SECRET'} @param **params: Public/Trade API method and/or parameters @return: BTC-E API response (decoded data) <type 'dict'>""" jsondata = cls.jsonrequest(url, apikey, **params) try: data = jsonloads(jsondata, parse_float=PARSE_FLOAT, parse_int=PARSE_INT) except ValueError: if cls.resp.status != 200: #: != status OK # CloudFlare proxy errors raise CloudFlare("{0.status} {0.reason}".format(cls.resp)) raise APIError(jsondata) # BTC-E API unknown errors else: if 'error' in data: raise APIError(data['error']) # BTC-E API standard errors return data
def main(): parser = ArgumentParser( description = ('USBKill is meant to shutdown the system if USB devices ' 'change'), epilog = 'fork of USBKill by Stefan Midjich' ) parser.add_argument( '-S', '--no-shutdown', action = 'store_false', default = True, dest = 'shutdown', help = 'do not shutdown the computer, test run' ) parser.add_argument( '-D', '--no-duplicate-check', action = 'store_false', default = True, dest = 'duplicate_check', help = 'Do not check for duplicate usb IDs' ) parser.add_argument( '-d', '--debug', action = 'store_true', default = False, dest = 'debug', help = 'Debug output' ) args = parser.parse_args() if not os.geteuid() == 0: log.error('Must be root') sys.exit(1) # Register handlers for clean exit of program for sig in [signal.SIGINT, signal.SIGTERM, signal.SIGQUIT, ]: signal.signal(sig, exit_handler) # Reload configuration handler signal.signal(signal.SIGUSR1, reload_handler) if not args.shutdown: config.set('usbkill', 'shutdown', 'False') if not args.duplicate_check: config.set('usbkill', 'duplicate_check', 'False') if args.debug: config.set('usbkill', 'log_level', 'DEBUG') log.setLevel(loglevels[config.get('usbkill', 'log_level')]) start_devices = lsusb() acceptable_devices = set( start_devices + jsonloads(config.get('usbkill', 'whitelist').strip()) ) log.info('Patrolling USB ports every {0} seconds'.format( config.getfloat('usbkill', 'sleep_time') )) logusb() # Start main loop while True: current_devices = lsusb() # Check that no usbids are connected twice. # Two devices with same usbid implies a usbid copy attack if config.get('usbkill', 'duplicate_check') is 'True': if not len(current_devices) == len(set(current_devices)): log.debug('Found duplicate USB IDs') kill_computer() # Check that all current devices are in the set of acceptable devices for device in current_devices: if device not in acceptable_devices: log.debug('Found unacceptable USB device') kill_computer() # Check that all start devices are still present in current devices for device in start_devices: if device not in current_devices: log.debug('Found discrepancy in current usb device list') kill_computer() sleep(config.getfloat('usbkill', 'sleep_time'))
def load_json_data(json_str): try: return jsonloads(json_str) except ValueError: raise GoCdApiException('Cannot parse %s' % json_str)
def decode_trains_asset(train_data, asset_id): # TODO: needs major overhaul for new paradigm # TODO, not very robust if certain pieces of data are missing print "=== TRAIN ASSETS ===" pages = readGoogleEngineAsset(asset_id) trains = train_data["trains"] stops = train_data["stops"] aliases = train_data["aliases"] arrival_predictions = train_data["arrival_predictions"] departure_predictions = train_data["departure_predictions"] for page in pages: # Aliasing of train numbers throws a major wrench into this for feature in page.get("features"): geom = feature.get("geometry") coords = geom.get("coordinates") properties = feature.get("properties") properties["TrainNum"] = int(properties["TrainNum"]) aliasNums = None # Add the adjusted origin time properties["OrigTime"] = parseAmtrakDateTime(properties.get("OrigSchDep"), properties.get("OriginTZ")) # append latitude and longitude to properties # Record the aliases aliasString = properties.get("Aliases") if aliasString != "": for alias in aliasString.split(","): properties["Alias"] = alias aliasNums = aliases.emplace(properties) trains.emplace(properties) # append the adjusted reading time readingTime = parseAmtrakDateTime( properties.get("LastValTS"), properties.get("EventTZ") or properties.get("OriginTZ") ) properties["RecordTime"] = readingTime count = 1 while ("Station" + str(count)) in properties: stopinfo = jsonloads(properties.get("Station" + str(count))) stopinfo["TrainNum"] = properties.get("TrainNum") stopinfo["OrigTime"] = properties.get("OrigTime") stopinfo["RecordTime"] = properties.get("RecordTime") stopinfo["Source"] = "Amtrak" station = stopinfo.get("code") scheddeptext = stopinfo.get("schdep") schedarrtext = stopinfo.get("scharr") actdeptext = stopinfo.get("postdep") actarrtext = stopinfo.get("postarr") estdeptext = stopinfo.get("estdep") estarrtext = stopinfo.get("estarr") timezone = stopinfo.get("tz") # Be aware of the use of boolean short-circuiting here stopinfo["adj_schdep"] = scheddeptext and parseAmtrakDateTime2(scheddeptext, timezone) stopinfo["adj_scharr"] = schedarrtext and parseAmtrakDateTime2(schedarrtext, timezone) stopinfo["adj_postdep"] = actdeptext and parseAmtrakDateTime2(actdeptext, timezone) stopinfo["adj_postarr"] = actarrtext and parseAmtrakDateTime2(actarrtext, timezone) stops.emplace(stopinfo) if "estdep" in stopinfo: stopinfo["adj_estdep"] = estdeptext and parseAmtrakDateTime2(estdeptext, timezone) departure_predictions.emplace(stopinfo) if "estarr" in stopinfo: stopinfo["adj_estarr"] = estarrtext and parseAmtrakDateTime2(estarrtext, timezone) arrival_predictions.emplace(stopinfo) count += 1