def common_read(self): if hasattr(self, 'oid'): res = self.fetch(self.res_url + self.oid, method='GET') assert res.code == 200 assert isinstance(jload(res.body), dict) if self.create: assert_similar(jload(res.body), self.create_args) res = self.fetch(self.res_url, method='GET') assert res.code == 200 json = jload(res.body) assert isinstance(json, dict) return json
def __init__(self, oauth, http_client=None): self.oauth = oauth http_client = http_client or httplib2.Http(cache="/tmp/.pygsheets_cache", timeout=10) http = self.oauth.authorize(http_client) data_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data") with open(os.path.join(data_path, "sheets_discovery.json")) as jd: self.service = discovery.build_from_document(jload(jd), http=http) with open(os.path.join(data_path, "drive_discovery.json")) as jd: self.driveService = discovery.build_from_document(jload(jd), http=http) self._spreadsheeets = [] self.batch_requests = dict() self.retries = 1 self._fetch_sheets()
def loadCachedDatabase(databaseDumpPath): if path.exists(databaseDumpPath): print("## Status: Found cached copy of Music 4 Dance database.") with open(databaseDumpPath, 'r') as f: return jload(f.read()) else: return None
def get_jconfigrsc(self) -> dict: """ Laedt die piCtory Konfiguration und erstellt ein <class 'dict'>. :return: <class 'dict'> der piCtory Konfiguration """ # piCtory Konfiguration prüfen if self._configrsc is not None: if not access(self._configrsc, F_OK | R_OK): raise RuntimeError( "can not access pictory configuration at {0}".format( self._configrsc)) else: # piCtory Konfiguration an bekannten Stellen prüfen lst_rsc = ["/etc/revpi/config.rsc", "/opt/KUNBUS/config.rsc"] for rscfile in lst_rsc: if access(rscfile, F_OK | R_OK): self._configrsc = rscfile break if self._configrsc is None: raise RuntimeError( "can not access known pictory configurations at {0} - " "use 'configrsc' parameter so specify location" "".format(", ".join(lst_rsc))) with open(self._configrsc, "r") as fhconfigrsc: try: jdata = jload(fhconfigrsc) except Exception: raise RuntimeError( "can not read piCtory configuration - check your hardware " "configuration http://revpi_ip/") return jdata
def __load_username_cache() -> dict: fpath = get_server_file("usernamecache.json") with open(fpath, mode='r') as f: username_cache = jload(f) return username_cache
def main(args): args.top_ns = arg_to_list(args.top_ns) lang2i = '{}2i'.format(args.lang) texts_mapping = jload(open(args.mapping_path)) corpus_vecs = pload(open(args.corpus_vectors_path, 'rb')) golden_standard_ids = load_golden_standart(args.golden_standard_path, texts_mapping, lang2i) corp_sims = [] # списки предсказанного для каждого текста for i in tqdm(range(len(corpus_vecs))): target_vec = corpus_vecs[i] sim_ids = predict_sim(target_vec, corpus_vecs) corp_sims.append(sim_ids) top_accuracies = [ eval_acc(top_n, golden_standard_ids, corp_sims) for top_n in args.top_ns ] top_strings = [ 'ТОП-{}:\t{}'.format(top_n, top_acc) for top_n, top_acc in zip(args.top_ns, top_accuracies) ] print('\n'.join(top_strings))
def main(stop_after_init=False): from sys import argv argc = len(argv) if argc <= len(CLI_ARGS): print 'Usage: %s %s %s' % (argv[0], ' '.join(CLI_ARGS), ' '.join(["[%s]" % x for x in OPTIONAL_ARGS])) print 'Currently missing parameters arguments:', ' '.join(CLI_ARGS[len(argv)-1:]) exit() files = [] for x in xrange(1, argc-1): files.append(argv[x].strip()) output_path = argv[-1].strip() t_init = time() t0 = time() print "Loading result files..." serps_combined = {} for crawl_result_file in files: print "Loading", crawl_result_file, "..." t1 = time() with univ_open(crawl_result_file, 'r') as f: merge_serps(serps_combined, jload(f)) print "Done in", time()-t1 print "All files done in", time()-t0 print "Writing URLs to output file", output_path, "..." t0 = time() jdump(serps_combined, univ_open(output_path, 'w+')) print "Done in", time()-t0 print "Script executed in", time() - t_init, "seconds"
def generate_gui(gen_dict, is_gen_resource=True): """Generate all the needed file for the GUI Parameters ---------- gen_dict : dict Dict with key = class name and value = class_dict is_gen_resource : bool True to genrate the resources as well """ # Get all the ui files file_list = find_ui_files() for file_tuple in file_list: # Convert every ui file to py ui_to_py(file_tuple[0], file_tuple[1]) # Create the "Gen_" classes from the gen_list.json file (if needed) gen_path = join(file_tuple[0], "gen_list.json") if isfile(gen_path): # If needed add a "Gen_" class to edit widget with csv values with open(gen_path, "r") as load_file: gen_list = jload(load_file) gen_gui_edit_file(file_tuple[0], file_tuple[1][:-3], gen_dict, gen_list) # Generate the resources if is_gen_resource: print("Generate GUI resources...") qrc_to_py(RES_PATH, RES_NAME) else: print("############################") print("Skipping resource generation")
def load_model(directory): with open('%s/title2indx.json' % directory) as f: title2indx = jload(f) npz = np.load('%s/weights.npz' % directory) W = npz['arr_0'] V = npz['arr_1'] return title2indx, W, V
def mk_full_id_dict(_path, _file, _type): _dict = {} try: with open(_path + _file, 'rU') as _handle: records = jload(_handle)['results'] _handle.close if _type == 'peer': for record in records: _dict[int(record['id'])] = { 'CALLSIGN': record['callsign'], 'CITY': record['city'], 'STATE': record['state'], 'COUNTRY': record['country'], 'FREQ': record['frequency'], 'CC': record['color_code'], 'OFFSET': record['offset'], 'LINKED': record['ts_linked'], 'TRUSTEE': record['trustee'], 'NETWORK': record['ipsc_network'] } elif _type == 'subscriber': for record in records: _dict[int(record['id'])] = { 'CALLSIGN': record['callsign'], 'NAME': (record['fname'] + ' ' + record['surname']), 'CITY': record['city'], 'STATE': record['state'], 'COUNTRY': record['country'] } elif _type == 'tgid': for record in records: _dict[int(record['id'])] = {'NAME': record['callsign']} return _dict except IOError: return _dict
def pretty_print_list(file_name=None, data_format="JSON"): # print YAML or JSON representations of list data assert (file_name is not None), "Provide a file name" assert ((data_format == "JSON" or data_format == "YAML")), ("Format must be 'JSON'" " or 'YAML'") try: formatted_list = [] with open(file_name, "r") as f: if data_format == "JSON": some_list = jload(f) formatted_list = jdumps(some_list) elif data_format == "YAML": some_list = yload(f) formatted_list = ydump(some_list, default_flow_style=False, explicit_start=True, width=1, indent=2) except IOError as e: print "Could not read file: %s" % e except Exception as e: print "Unexpected exception: %s" % e print "======================" print "list from file: %s in %s data_format:" % (file_name, data_format) print "======================" print formatted_list print "======================" print "list from file: %s in pretty_print native python" % file_name print "======================" pp(some_list, width=1)
def json_load(text): ''' Load JSON data using what ever resources are available. ''' if OLD_SUGAR_SYSTEM is True: listdata = json.read(text) else: # Remove MAGIC NUMBER, if present, and leading whitespace if text[0:2] == MAGICNUMBER: clean_text = text[2:].lstrip() else: clean_text = text.lstrip() # Strip out trailing whitespace, nulls, and newlines clean_text = clean_text.replace('\12', '') clean_text = clean_text.replace('\00', '') clean_text = clean_text.rstrip() # Look for missing ']'s left_count = clean_text.count('[') right_count = clean_text.count(']') while left_count > right_count: clean_text += ']' right_count = clean_text.count(']') io = StringIO(clean_text) try: listdata = jload(io) except ValueError: # Assume that text is ascii list listdata = text.split() for i, value in enumerate(listdata): listdata[i] = convert(value, float) # json converts tuples to lists, so we need to convert back, return _tuplify(listdata)
def get_repos(): """Returns list of repository urls in the given order.""" #TODO: Perhaps validate URLs first? #TODO: Perhaps validate that a list is being returned? # or not: a dictionary with urls as keys would still work in database_update. with open(get_cfg()) as cfg: return jload(cfg)['repositories']
def get_logger(): r"""Return a logger. Configure logger lg with config for appLogger from config.json["logging"], and return it. Might need to configure the log path manually. Returns ------- lg Logger object. Examples -------- Get the logging object and use it to log >>> lg = get_logger() >>> lg.debug("Form is valid") appLogger - 2020-11-05 23:52:35,166-2984-DEBUG-Form is valid """ # Configure logger lg with config for appLogger from config.json["logging"] CONFIG_DIR = Path(__file__).resolve().parent.parent.parent with open(CONFIG_DIR / "config.json", "r") as f: config = jload(f) logging.config.dictConfig(config["logging"]) lg = logging.getLogger("appLogger") # lg.debug("This is a debug message") return lg
def add_older_new_viruses(self, dt = 3, dtref = None): from date_util import numerical_date for v in self.new_strains: if v['strain'] not in [x['strain'] for x in self.viruses]: tmp_date = numerical_date(v['date']) if tmp_date<self.time_interval[0] and tmp_date>=self.time_interval[0]-dt: self.viruses.append(v) print("adding ",v['strain'], v['date'], tmp_date, self.time_interval) else: print("skipping ",v['strain'], v['date'], tmp_date, self.time_interval) new_strain_names = [v['strain'] for v in self.new_strains] try: if dtref==None: dtref=dt*0.5 from json import load as jload with open('/Users/yujiazhou/Documents/nextflu/H9_nextflu-master/augur/source-data/'+self.virus_type+'_ref_strains.json', 'r') as infile: self.reference_viruses = jload(infile) for v in self.reference_viruses: if v['strain'] not in [x['strain'] for x in self.viruses]: tmp_date = numerical_date(v['date']) tmp_strain = v['strain'] print(tmp_strain) if tmp_strain not in new_strain_names: if tmp_date<self.time_interval[0] and tmp_date>=self.time_interval[0]-dtref: self.viruses.append(v) print("adding ",v['strain'], v['date'], tmp_date, self.time_interval) else: print("skipping ",v['strain'], v['date'], tmp_date, self.time_interval) except: print("can't find reference_viruses")
def load_json(file_path): """Load a json file Parameters ---------- file_path: str path to the file to load Returns ------- json_data: json decoded data type data of the json file """ # The file_name must end with .json if not match(".*\.json", file_path): file_path += ".json" # If it doesn't, we add .json at the end # The file (and the folder) should exist if not isfile(file_path): raise LoadMissingFileError(str(file_path) + " doesn't exist") # Get the data dictionary with open(file_path, "r") as load_file: json_data = jload(load_file) return json_data
def authorize(outh_file='client_secret.json', outh_creds_store=None, outh_nonlocal=False, service_file=None, credentials=None): """Login to Google API using OAuth2 credentials. This function instantiates :class:`Client` and performs auhtication. :param outh_file: path to outh2 credentials file, or tokens file :param outh_creds_store: path to directory where tokens should be stored 'global' if you want to store in system-wide location None if you want to store in current script directory :param outh_nonlocal: if the authorization should be done in another computer, this will provide a url which when run will ask for credentials :param service_file: path to service credentials file :param credentials: outh2 credentials object :returns: :class:`Client` instance. """ # @TODO handle exceptions if not credentials: if service_file: with open(service_file) as data_file: data = jload(data_file) print('service_email : '+str(data['client_email'])) credentials = ServiceAccountCredentials.from_json_keyfile_name(service_file, SCOPES) elif outh_file: credentials = get_outh_credentials(client_secret_file=outh_file, credential_dir=outh_creds_store, outh_nonlocal=outh_nonlocal) else: raise AuthenticationError rclient = Client(oauth=credentials) return rclient
def load(file_path): """Load a pyleecan object from a json file Parameters ---------- file_path: str path to the file to load """ # The file_name must end with .json if not match(".*\.json", file_path): file_path += ".json" # If it doesn't, we add .json at the end # The file (and the folder) should exist if not isfile(file_path): raise LoadMissingFileError(str(file_path) + " doesn't exist") # Get the data dictionnary with open(file_path, "r") as load_file: init_dict = jload(load_file) # Check that the dictionnay have a "__class__" key if "__class__" not in init_dict: raise LoadWrongDictClassError('Key "__class__" missing in loaded file') if init_dict["__class__"] not in load_switch: raise LoadWrongDictClassError(init_dict["__class__"] + " is not a pyleecan class") return load_switch[init_dict["__class__"]](init_dict=init_dict)
def loadTopoFromFile(self): from json import load as jload graph = None with open(self.fileName, 'r') as f: graph = jload(f) if graph is None: raise RuntimeError('Topology could not be read') # read all types of expected equipments for typeOfEquipment, listOfEquipment in graph.iteritems(): # switch on equipment type if typeOfEquipment == self.KW_HOSTS: self.hosts = listOfEquipment elif typeOfEquipment == self.KW_LINKS: self.links = listOfEquipment elif typeOfEquipment == self.KW_ROUTERS: self.routers = listOfEquipment elif typeOfEquipment == self.KW_SWITCHES: self.switches = listOfEquipment elif typeOfEquipment == self.KW_EVENTS: self.events = listOfEquipment elif typeOfEquipment == self.KW_CHECKS: self.checks = listOfEquipment else: raise RuntimeError('Unknown equipment type or keyword') # load links last as they require other elements self.loadHosts() # self.loadRouters() self.loadSwitches() self.loadLinks() self.loadEvents() self.loadChecks()
def objJSON(self, objIter): ''' This method get a object of JSON @param objIter: str - object name to iter ''' with open(self.realPath + self.jsonPath) as jsonData: jsonObject = jload(jsonData) return jsonObject[objIter]
def pretty_print_list(file_name=None, data_format="JSON"): # print YAML or JSON representations of list data assert(file_name is not None), "Provide a file name" assert((data_format == "JSON" or data_format == "YAML")), ("Format must be 'JSON'" " or 'YAML'") try: formatted_list = [] with open(file_name, "r") as f: if data_format == "JSON": some_list = jload(f) formatted_list = jdumps(some_list) elif data_format == "YAML": some_list = yload(f) formatted_list = ydump(some_list, default_flow_style=False, explicit_start=True, width=1, indent=2) except IOError as e: print "Could not read file: %s" % e except Exception as e: print "Unexpected exception: %s" % e print "======================" print "list from file: %s in %s data_format:" % (file_name, data_format) print "======================" print formatted_list print "======================" print "list from file: %s in pretty_print native python" % file_name print "======================" pp(some_list, width=1)
def load_json(file_path): """Load a json file Parameters ---------- file_path: str path to the file to load Returns ------- file_path: str edited path to the file to load json_data: json decoded data type data of the json file """ if isdir(file_path): i = max(file_path.rfind("\\"), file_path.rfind("/")) if i != -1: file_path += file_path[i:] + ".json" else: file_path += "/" + file_path + ".json" # The file_name must end with .json elif not match(".*\.json", file_path): file_path += ".json" # If it doesn't, we add .json at the end # The file (and the folder) should exist if not isfile(file_path): raise LoadMissingFileError(str(file_path) + " doesn't exist") # Get the data dictionary with open(file_path, "r") as load_file: json_data = jload(load_file) return file_path, json_data
def load(self, filename): """ inputs: filename: to load maze from """ with open(path.join(maze_store, "Generated", filename), "r") as maze_load: maze = jload(maze_load) # Set maze dimensions from file self.grid = [] self.num_columns = len(maze[0]) self.num_rows = len(maze) # Populating the grid with loaded nodes for i in range(self.num_rows): temp = [] for j in range(self.num_columns): temp.append( Node(neighbors=maze[i][j]["neighbors"], color=[0, 0, 0])) self.grid.append(temp) return
def add_older_vaccine_viruses(self, dt = 3, dtref = None): ''' addes additional vaccine viruses prior to the time interval to provide phylogenetic context ''' from date_util import numerical_date for v in self.vaccine_strains: if v['strain'] not in [x['strain'] for x in self.viruses]: tmp_date = numerical_date(v['date']) if tmp_date<self.time_interval[0] and tmp_date>=self.time_interval[0]-dt: self.viruses.append(v) print("adding ",v['strain'], v['date'], tmp_date, self.time_interval) else: print("skipping ",v['strain'], v['date'], tmp_date, self.time_interval) vaccine_strain_names = [v['strain'] for v in self.vaccine_strains] try: if dtref==None: dtref=dt*0.5 from json import load as jload with open('source-data/'+self.virus_type+'_ref_strains.json', 'r') as infile: self.reference_viruses = jload(infile) for v in self.reference_viruses: if v['strain'] not in [x['strain'] for x in self.viruses]: tmp_date = numerical_date(v['date']) tmp_strain = v['strain'] print(tmp_strain) if tmp_strain not in vaccine_strain_names: if tmp_date<self.time_interval[0] and tmp_date>=self.time_interval[0]-dtref: self.viruses.append(v) print("adding ",v['strain'], v['date'], tmp_date, self.time_interval) else: print("skipping ",v['strain'], v['date'], tmp_date, self.time_interval) except: print("can't find reference_viruses")
def import_secrets(): """ Contact Ian for secrets.json file. The file contains the protected URL needed to make POST requests to the channel of DesignHub. It is always best to keep such "secret" files, like tokens and client IDs, out of version control. [email protected] """ return jload(open('secrets.json'))
def mk_full_id_dict(_path, _file, _type): _dict = {} try: with open(_path+_file, 'r', encoding='latin1') as _handle: records = jload(_handle) if 'count' in [*records]: records.pop('count') records = records[[*records][0]] _handle.close if _type == 'peer': for record in records: try: _dict[int(record['id'])] = { 'CALLSIGN': record['callsign'], 'CITY': record['city'], 'STATE': record['state'], 'COUNTRY': record['country'], 'FREQ': record['frequency'], 'CC': record['color_code'], 'OFFSET': record['offset'], 'LINKED': record['ts_linked'], 'TRUSTEE': record['trustee'], 'NETWORK': record['ipsc_network'] } except: pass elif _type == 'subscriber': for record in records: # Try to craete a string name regardless of existing data if (('surname' in record.keys()) and ('fname'in record.keys())): _name = str(record['fname']) elif 'fname' in record.keys(): _name = str(record['fname']) elif 'surname' in record.keys(): _name = str(record['surname']) else: _name = 'NO NAME' # Make dictionary entry, if any of the information below isn't in the record, it wil be skipped try: _dict[int(record['id'])] = { 'CALLSIGN': record['callsign'], 'NAME': _name, 'CITY': record['city'], 'STATE': record['state'], 'COUNTRY': record['country'] } except: pass elif _type == 'tgid': for record in records: try: _dict[int(record['id'])] = { 'NAME': record['callsign'] } except: pass return _dict except IOError: return _dict
def read_json_file(filepath): """load json object from file :param filepath (str) - json file :return json - json list""" raw_data = open(filepath).read() json_obj = jload(raw_data) return json_obj
def getConn(): with open(tokenDir + tokenFile, "r") as file: temp = jload(file) dblogin = temp["psyconn"][BOTTOKEN] global conn conn = psyconn(host=dblogin[0], database=dblogin[1], user=dblogin[2], password=dblogin[3])
async def bored(self, ctx): """Gives a user an idea to avoid boredom""" print("Command 'bored' called") with open('data/bored/ideas.json', 'r') as f: data = jload(f) line = random.randint(1, len(data)) print(f"Line {line} : {data[str(line)]}") await ctx.send(data[str(line)]) return None
def load(self, jsonfile): from json import load as jload with open(jsonfile) as thejson: qdict = jload(thejson) for biter in qdict['blocks']: btemp = QBlock(biter['blockid'], biter['blockheader']) self.addBlock(btemp) for qiter in qdict['questions']: qtemp = Question(qiter['block'], qiter['qid'], qiter['prompt'], qiter['answers'], qiter['correctans'], qiter['explanation'], qiter['subjects']) self.addQuestion(qtemp)
def vocaber(file_ids): cou = Counter() for file_id in file_ids: with gzip.open(Path("data") / "processed" / f"{file_id}.json.gz") as fp: j = jload(fp) cou += Counter([e for l in j.values() for e in l]) with open("data/vocabulary/vocab.json", "w") as wp: jdump(cou, wp)
def get_searchpath(): #TODO: Perhaps validate paths? with open(get_cfg()) as cfg: searchpath = jload(cfg)['searchpath'] if DEFAULT_KEY in searchpath: i = searchpath.index(DEFAULT_KEY) searchpath[i:i+1] = get_searchpath_default() return searchpath
def load_mapping(mapping_path, forced): if os.path.isfile(mapping_path) and not forced: mapping = jload(open(mapping_path, 'r', encoding='utf-8')) print('Уже есть какой-то маппинг!') print('\t'.join( ['{}: {} объекта'.format(k, len(v)) for k, v in mapping.items()])) else: mapping = {} print('Маппинга ещё нет, сейчас будет') return mapping
def load(self, File: Path): """ Loads an Encrypt dictionary from a Path """ #if File isn't a path object if type(File) != type(Path()): raise TypeError("Non-Path object used") #If file doesn't exist, riase a FileNotFoundError if File.exists() == False: raise FileNotFoundError with open(File, 'r') as f: self.UserDict = jload(f)
def json_load(text): ''' Load JSON data using what ever resources are available. ''' io = StringIO(text) try: listdata = jload(io) except ValueError: # assume that text is ascii list listdata = text.split() for i, value in enumerate(listdata): listdata[i] = int(value) return listdata
def _load_bans(self): "Load the banfile and convert it to a blacklist" p = Path(self.BLACKLIST) if not p.is_file(): return self.logger("Local blacklist not found") with open(p, 'r') as f: self.BANS = jload(f) self.logger("Initial banned users:") for k, v in self.BANS.items(): self.logger(f"* {k}") return
def from_json(in_stream, out_stream): try: obj = jload(in_stream) except ValueError as ex: __error('Failed to decode json: %s' % ex) return 8 try: ubjdump(obj, out_stream, sort_keys=True) except EncoderException as ex: __error('Failed to encode to ubsjon: %s' % ex) return 16 return 0
def json_load(text): """ Load JSON data using what ever resources are available. """ # strip out leading and trailing whitespace, nulls, and newlines io = StringIO(text) try: listdata = jload(io) except ValueError: # assume that text is ascii list listdata = text.split() for i, value in enumerate(listdata): listdata[i] = int(value) return listdata
def setUp(self): super(HttpTest, self).setUp() getattr(self, 'set_up', lambda: None)() if self.create: res = self.fetch( self.res_url, method='POST', body=self.jcreate_args) assert res.code == 201 or 200 json = jload(res.body) assert_similar(json, self.create_args) try: self.oid = json['_id'] except: pass
def json_load(text): """ Load JSON data using what ever resources are available. """ if OLD_SUGAR_SYSTEM is True: _listdata = json.read(text) else: # strip out leading and trailing whitespace, nulls, and newlines text = text.lstrip() text = text.replace("\12", "") text = text.replace("\00", "") _io = StringIO(text.rstrip()) _listdata = jload(_io) # json converts tuples to lists, so we need to convert back, return _tuplify(_listdata)
def main(stop_after_init=False): from sys import argv argc = len(argv) if argc <= len(CLI_ARGS): print 'Usage: %s %s %s' % (argv[0], ' '.join(CLI_ARGS), ' '.join(["[%s]" % x for x in OPTIONAL_ARGS])) print 'Currently missing parameters arguments:', ' '.join(CLI_ARGS[len(argv)-1:]) exit() crawl_result_file = argv[1].strip() output_path = argv[2].strip() split_into_n = 1 if argc > len(CLI_ARGS) + 1: split_into_n = int(argv[len(CLI_ARGS) + 1]) t_init = time() t0 = time() print "Loading result file..." with univ_open(crawl_result_file, 'r') as f: serps = jload(f) print "Done in", time()-t0 t0 = time() print "Writing URLs to output file", output_path, "..." # the set() is because we do not need multiple times the same URL # in the seed, and from the SERP it is actually pretty likely to happen # on an example run, we went from 4070 urls to 2387 by adding the set() result = list(set([ \ url \ for query_serps in serps.values() \ for serp in query_serps \ for pos, url in serp['results'] ])) urls_n = len(result) batch_size = urls_n/split_into_n i = 0 # print urls_n, batch_size, split_into_n, (urls_n/split_into_n)*split_into_n for start in range(0, urls_n, batch_size): # print start i += 1 dir, fname = os.path.split(output_path) outp = os.path.join(dir, "%03d_%s" % (i, fname)) print "Dumping into", outp with univ_open(outp, 'w+') as out: out.write('\n'.join(result[start:start+batch_size])) print "Done in", time()-t0 print "Script executed in", time() - t_init, "seconds"
def import_existing_buckets(c): """Imports existing buckets from cats.json. Requires a mongodb connection as the first argument""" with open("external_data/cats.json") as f: buckets = jload(f) for bucket in buckets: c['bucketerer']['adgroups'].insert( { "name": bucket[1], "sites": bucket[0].split(","), "created_on": datetime.now(), "created_by": "Mardak" } )
def get_locale(): """Returns a list of locales in the given order. If none are specified, first preference is the system locale. The PND spec requires that en_US always be available for titles and descriptions, so that will always be the last entry in the list (it shouldn't matter if it appears multiple times in the list).""" #TODO: Perhaps validate language codes? #TODO: What should be done for language codes without country codes? with open(get_cfg()) as cfg: locales = jload(cfg)['locales'] if DEFAULT_KEY in locales: i = locales.index(DEFAULT_KEY) locales[i] = get_locale_default() # An unconfigured system will have None, which breaks libpnd. if not locales[i]: del locales[i] #The en_US locale should be available in all PNDs, so it should be a last resort. locales.append('en_US') return locales
def get_song(songname, artist): q = '%s+%s'.encode('utf-8') % (songname, artist) image_url = ''.join([ 'http://api.douban.com/music/subjects?q=', q, '&alt=json&start-index=1&max-results=1', ]) http_client = httpclient.HTTPClient() response = None music_image = None try: response = http_client.fetch(image_url) except httpclient.HTTPError as e: print("Error:", e) if response and response.code == 200: res = jload(response.body) music_image = res['entry'][0]['link'][2]['@href'] music_image = '<img src="%s" style="float:left"/>' % music_image if not artist: artist = res['entry'][0]['author'][0]['name']['$t'] singer = '' if artist: singer = '&singer=' + urllib.quote(artist.encode('gbk')) artist_u = '' if artist: artist_u = '&artist=' + urllib.quote(artist.encode('utf-8')) artist_u = artist_u.replace(']', ']\n') songbox = ''.join([ '<p><embed width="550" height="75" name="plugin" ', 'wmode="transparent" ', 'src="http://box.baidu.com/widget/flash/song.swf?name=', urllib.quote(songname.encode('utf-8')), artist_u, '" type="application/x-shockwave-flash"></p>' ]) link = '<br/>' return music_image, link, songbox
def show_menu(background, back_color): dt = datetime.today() data = jload(urlopen("http://127.0.0.1/functions/cron.php?next")) dr = datetime.fromtimestamp(data["heure"]) i = 0 while True : s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) s.connect("mastersocket") s.send(jdumps({"request": "get_delta"})) delta = int(s.recv(4096)) s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) s.connect("mastersocket") s.send(jdumps({"request": "get_sw_state"})) sw_state = int(s.recv(4096)) background.fill(back_color) render.render(get_ip_address('eth0'), font, background, hex_to_rgb(conf["general"]["front_color"]), 0, 0, 320, 60) if i == 0 : render.render(dt.strftime("%H:%M"), font, background, hex_to_rgb(conf["general"]["front_color"]), 0, 60, 320, 120) dt = dt.replace(minute = (dt.minute + delta) % 60, hour = dt.hour + int(floor((dt.minute + delta) / 60))) elif i == 1 : render.render(dr.strftime("%H:%M"), font, background, hex_to_rgb(conf["general"]["front_color"]), 0, 60, 320, 120) dr = dr.replace(minute = (dr.minute + delta) % 60, hour = dr.hour + int(floor((dr.minute + delta) / 60))) if sw_state : i+= 1 screen.blit(background, (0, 0)) display.flip() sleep(0.1) if i >= 2 : _linux_set_time(dt.timetuple()) urlopen("http://127.0.0.1/functions/cron.php?change=" + dr.strftime("%s")).read() break
def main(): # TODO: # load the requeried SERPs and store them as follows: # (query_id, [complete_list_of_urls_from_serps_in_order_of_appearance]) # then when we go through the users' clicks, we do: # index = urls_from_serps[queryid].find(current_domain, lamdba: extract_domain()) # urls_from_serps.pop(index) # somehow get the info about the domain ID in the logs, make the mapping, etc. ... import sys from sys import argv t_init = time() argc = len(sys.argv) if argc < (len(CLI_ARGS)+1): print "Usage:", argv[0], " ".join(CLI_ARGS), " ".join(OPT_ARGS) exit() logfile = argv[1] mdb_host = argv[2] queries_users_file = argv[3] query_str_to_ids_mapping_file = argv[4] serp_requery_result_file = argv[5] web_crawl_graph_url_to_domain_ids_mapping_file = argv[6] output_path = argv[-1] print "Loading target users..." queries_users_pairs = [tuple(map(int, _.strip().split(' '))) for _ in univ_open(queries_users_file, 'r')] targeted_users_set = set([_[1] for _ in queries_users_pairs]) targeted_queries_set = set([_[0] for _ in queries_users_pairs]) print "Loaded", len(targeted_users_set), "target users." print "Loading the set of their top similar users..." set_of_similar_users = load_set_of_similar_users(mdb_host, targeted_users_set) print "Loaded a total of", len(set_of_similar_users), "new allowed users" allowed_users = set_of_similar_users | targeted_users_set print len(allowed_users), "users allowed in total" print "Loading SERP file..." t0 = time() serps = jload(univ_open(serp_requery_result_file, 'r')) print "Done in", time() - t0 print "Loading IDs <-> query strings..." t0 = time() queries_str_indexed_by_ids = [_.strip() for _ in univ_open(query_str_to_ids_mapping_file, 'r')] print "Done in", time() - t0 print "Loading (web crawl) URL IDs <-> (logs) domain ids..." t0 = time() web_crawl_urls_to_domain_ids = [int(_.strip()) if _ != '\n' else None for _ in univ_open(web_crawl_graph_url_to_domain_ids_mapping_file, 'r')] print "Done in", time() - t0 lp = LogProcessor(logfile) t0 = time() print "Starting process..." lp.process( serps=serps, allowed_queries=targeted_queries_set, allowed_users=allowed_users, query_str_to_ids=queries_str_indexed_by_ids, web_crawl_urls_to_domain_ids=web_crawl_urls_to_domain_ids ) print "Done in", time() - t0 print "Outputting result to", output_path, "..." with open(output_path, 'w+') as out: # q, urlid, domainid # meaning (q, urlid) -> domainid out.write('\n'.join(("%d,%d,%d" % (item[0][0], item[0][1], item[1]) for item in lp.serps_to_logs_mapping.items()))) print "Done in", time() - t_init print "Terminating script."
def main(*args, **kwargs): ''' Main daemon application 1. Reads in a configuration file given 2. Opens up access to a Twitter account 3. Fetches a player's profile page 4. Checks the last match updated in a local cache 5. Finds the "newest" match to update to Twitter 6. Pulls the match ID to update to Twitter 7. Analyzes it and makes a snobbish post to Twitter ''' # Read in a configuration file from sys.argv try: conf = jload(open(argv[1], 'r')) except Exception as e: print(str(e)) print("Invalid or no configuration supplied") print("Command: D2T-Daemon JSON_CONF_FILE") quit() # Create the Twitter Oauth instance try: auth = OAuthHandler(conf["consumer_key"], conf["consumer_secret"]) auth.set_access_token(conf["access_key"], conf["access_secret"]) api = API(auth) except Exception as e: print(str(e)) print("Failed to create Twitter connection") quit() # create the logging instance log = create_logger("-d" in argv or "--debug" in argv) # Begin while loop to infinity my_pid = conf["player_id"] try: log("Beginning daemon...") while True: # Open up local cache to see what the last match was # If unable to parse the last match, set to 0 and proceed try: with open(".last_match", "r") as f: contents = f.read() last_match = int(contents) except Exception: log("Cache not found, setting to 0...") last_match = 0 log("Getting player {}...".format(conf["player_id"])) player = get_player(my_pid) # Flag for when we already did something already_done = False # If we have no match cache if last_match == 0 or last_match is None: new_match = player.matches[-1] else: if last_match == player.matches[0]: log("No new matches") already_done = True else: found_match = False checked_match = 0 for matchnum in player.matches: if last_match == matchnum: found_match = True if not found_match: checked_match = matchnum new_match = checked_match if not already_done: log("Getting match {}...".format(new_match)) match = get_match(new_match) radi_stats = [p for p in match.radiant if p.id == my_pid] dire_stats = [p for p in match.dire if p.id == my_pid] # Check for player's ID in both teams to infer their team if radi_stats: my_team = "Radiant" pstat = radi_stats.pop() else: my_team = "Dire" pstat = dire_stats.pop() # Finally... determine if you won or lost if my_team == match.victor: msg = choice(win_msgs).format(pstat.hero) else: msg = choice(fail_msgs).format(pstat.hero) # Format the tweet to look like {win/loss msg, kda, -, <url>} tweet = " ".join([msg, stats(pstat), "-", URL.format(new_match)]) log("Posting {}".format(tweet)) api.update_status(tweet) # Write to local cache with open(".last_match", "w") as f: f.write(str(new_match)) log("Resting...") sleep(sleepy_time) except Exception as e: print("Something went completely wrong") print(e)
return song def get_song(songname, artist): image_url = ''.join([ 'http://api.douban.com/music/subjects?q=', urllib.quote(songname.encode('utf-8')), '&alt=json&start-index=1&max-results=1', ]) http_client = httpclient.HTTPClient() try: response = http_client.fetch(image_url) except httpclient.HTTPError, e: print "Error:", e if response.code == 200: res = jload(response.body) music_image = res['entry'][0]['link'][2]['@href'] music_image = '<img src="%s" style="float:left"/>' % music_image if not artist: artist = res['entry'][0]['author'][0]['name']['$t'] singer = '' if artist: singer = '&singer=' + urllib.quote(artist.encode('gbk')) lyric_url = ''.join([ 'http://cgi.music.soso.com/fcgi-bin/' 'fcg_download_lrc.q?song=', urllib.quote(songname.encode('gbk')), singer, '&down=1', ])
_linux_set_time(dt.timetuple()) urlopen("http://127.0.0.1/functions/cron.php?change=" + dr.strftime("%s")).read() break # Initialisation display.init() font.init() screen = display.set_mode([320, 240]) background = Surface(screen.get_size()) background = background.convert() # Récupération de la config conf_file = open("../conf/wake.json") conf = jload(conf_file) # Définition des polices font_filename = font.match_font(conf["general"]["font"]) font = font.Font(font_filename, 135) #font_time = font.Font(font_filename, 135) # Definition et coloration des images image_temp = image.load("images/misc/temp.png") image_rise = image.load("images/misc/rise.png") image_set = image.load("images/misc/set.png") image_mail = image.load("images/misc/mail.png") image_news = image.load("images/misc/news.png") image_cal = image.load("images/misc/cal.png")
from sys import argv from os import path from json import load as jload from pandas import DataFrame from matplotlib.pyplot import subplots from matplotlib.ticker import ScalarFormatter #---------------------------------------------------- # Load arguments name_json_file_in = argv[1] #---------------------------------------------------- # Load content of GEOjson file with json module with open(name_json_file_in) as h_json: z_table = jload(h_json) # Convert into pandas dataframe data = DataFrame(z_table['data']) #---------------------------------------------------- # Plot with matplotlib fig, ax = subplots(figsize=(16,9)) ax.errorbar(data['time'], data['water_surface_height_above_reference_datum'], yerr = data['water_surface_height_uncertainty'], color = 'darkblue', ecolor = 'darkred',
from json import load as jload from os.path import dirname, join as pathjoin from .wsgiapp1 import rsyncapp basedir = dirname(dirname(__file__)) application = rsyncapp(jload(open(pathjoin(basedir, 'config.json'), 'rt')))
def main(stop_after_init=False): from sys import argv argc = len(argv) if argc <= len(CLI_ARGS): print 'Usage: %s %s %s' % (argv[0], ' '.join(CLI_ARGS), ' '.join(["[%s]" % x for x in OPTIONAL_ARGS])) print 'Currently missing parameters arguments:', ' '.join(CLI_ARGS[len(argv)-1:]) exit() serp_result_file = argv[1].strip() url_web_crawl_ids_mapping = argv[2].strip() output_path = argv[-1].strip() t_init = time() print "Loading SERP..." with univ_open(serp_result_file, 'r') as f: serps = jload(f) print "Loaded" print "Loading urls-to-ids dict..." urls_to_ids = {} with univ_open(url_web_crawl_ids_mapping, 'r') as f: i = 0 for line in f: line = line.strip().lower().replace("%0a", '') urls_to_ids[line] = i i += 1 print "Loaded" print "Converting SERP..." t0 = time() not_converted = set() total_urls = set() converted_set = set() for query_serps in serps.values(): for serp in query_serps: i = 0 while i < len(serp['results']): pos, url = serp['results'][i] url = url.lower().replace('%0a', '') total_urls.add(url) try: serp['results'][i] = (pos, urls_to_ids[url]) converted_set.add(url) except KeyError as err: # Looks like this URL has not been seen during the web crawl, as it has no assigned ID not_converted.add(url) serp['results'].pop(i) i -= 1 i += 1 print "Over", len(total_urls), "total different URLs from the SERP results,", len(not_converted), "could not be converted" if len(total_urls) - len(not_converted) < 600: print converted_set print "Done in", time()-t0 print "Writing URLs to output file", output_path, "..." t0 = time() with univ_open(output_path, 'w+') as out: jdump(serps, out) print "Done in", time()-t0 print "Script executed in", time() - t_init, "seconds"
For the full list of settings and their values, see https://docs.djangoproject.com/en/1.6/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) from json import load as jload from os.path import dirname, join BASE_DIR = dirname(dirname(__file__)) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = jload(open("secret.json"))["DJANGO_KEY"] # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True TEMPLATE_DEBUG = True ALLOWED_HOSTS = ["*"] # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes',
def _data_loader(self, data): io = StringIO(data) return jload(io)