def check_run_dp(filename_prefix): xml_cred = xml2dict( ReportDataProcessingPath() + 'DataProcessingFramework/Utilities/lug_query_credentials_readonly.xml') db = LUXDatabaseSQL(xml_cred['credentials']['host'], xml_cred['credentials']['user'], xml_cred['credentials']['password'], xml_cred['credentials']['database'], str2num(xml_cred['credentials']['port'])) db.connect() try: query1 = "SELECT entry_id FROM lug_acquisitions WHERE filename_prefix = '%s' LIMIT 1" % filename_prefix result1 = db.query_db(query1) lug_entry_id = int(result1[0][0]) query2 = "SELECT run_dp_framework FROM daq_control WHERE lug_entry_id = %d LIMIT 1" % lug_entry_id result2 = db.query_db(query2) run_dp_flag = int(result2[0][0]) except: print "ERROR: check_run_dp LUG queries failed (was the DAQ GUI used?). Not submitting..." run_dp_flag = 0 return run_dp_flag
def fileUpload(self, url, data): '''Upload a file to a repository. @type url str @param url destination location, repository with filename appended @type data str @param data file data to upload @rtype dict @return parsed server response @raise LGIServerException when the project server returned an error status ''' try: self._connection.request("PUT", url, data) response = self._connection.getresponse() except httplib.HTTPException: # silently reconnect if connection re-use comes to its limit # TODO use urllib2 for more intelligent retries self._connection.close() self._connection.connect() self._connection.request("PUT", url, data) response = self._connection.getresponse() resp = xml2dict.xml2dict(xml.dom.minidom.parseString(response.read())) if 'status' in resp and 'number' in resp['status'] and int(resp['status']['number'])>=400: status=resp['status'] raise LGIServerException('LGI error status %d: %s'%(status['number'], status['message']), resp) return resp
def generateIMDbTuples(verbose=False): movies_ids = [] movie_related_tuples = {"movies": [], "movie_has_genre": []} seen_movies = set() movie_dict = xml2dict("movie") for key in movie_dict.keys(): for item in movie_dict[key]: movies_ids.append(item[1].split("/")[-2].split("t")[-1]) ia = IMDb() for movie_id in movies_ids: if movie_id not in seen_movies: seen_movies.add(movie_id) imdbmovie = ia.get_movie(movie_id) months = { "Jan": "01", "Feb": "02", "Mar": "03", "Apr": "04", "May": "05", "Jun": "06", "Jul": "07", "Aug": "08", "Sep": "09", "Oct": "10", "Nov": "11", "Dec": "12", } date_formated = ( imdbmovie["original air date"].split()[2] + "-" + months[imdbmovie["original air date"].split()[1]] + "-" + imdbmovie["original air date"].split()[0]) movie_related_tuples["movies"].append({ "values": [ "http://www.imdb.com/title/tt" + movie_id + "/", str(imdbmovie["canonical title"]).replace("'", ""), date_formated, str(imdbmovie["directors"][0]).replace("'", ""), int(float(imdbmovie["rating"]) * 10), ] }) for genre in imdbmovie["genres"]: movie_related_tuples["movie_has_genre"].append({ "values": [ "http://www.imdb.com/title/tt" + movie_id + "/", str(genre).replace("'", ""), ] }) if (verbose): print(movie_related_tuples["movie_has_genre"][-1]) return movie_related_tuples
def do_request(self, url): req = request.Request(url, headers={'User-Agent': self.user_agent}) res = request.urlopen(req) if res.status == 200: xmldom = xmlElementTree.parse(res) return xml2dict(xmldom)
def _postToServer(self, url, variables={}, files={}): '''Send a request to the LGI server. @type url str @param url to post, relative to base url or absolute when starting with "https:" @type variables dict(str) @param variables key->value pairs to POST @type files dict(dict(str)) @param files files to upload key->(filename_sent,local_filename) @rtype dict @return Dictionary of parsed XML response @raise LGIServerException when the project server returned an error response ''' if self._connection is None: self.connect() # relative to base url if not url.lower().startswith('https:'): url = self._url + url boundary = "@$_Th1s_1s_th3_b0und@ry_@$" data = [] if variables: for key in variables: data.append("--" + boundary) data.append('Content-Disposition: form-data; name="%s"' % key) data.append("") data.append(str(variables[key])) if files: for key in files: data.append("--" + boundary) data.append('Content-Disposition: form-data; name="%s"; filename="%s"' % ( key, files[key][0] )) data.append("Content-Type: application/octet-stream") data.append("") data.append(files[key][1]) data.append("--" + boundary + "--") data.append( "" ); body = "\r\n".join(data) headers = { "Content-type": "multipart/form-data; boundary=%s"%boundary, "Accept": "text/plain", "Connection": "keep-alive" } try: self._connection.request("POST", url, body, headers) response = self._connection.getresponse() except httplib.HTTPException: # silently reconnect if connection re-use comes to its limit # TODO use urllib2 for more intelligent retries self._connection.close() self._connection.connect() self._connection.request("POST", url, body, headers) response = self._connection.getresponse() rdata = response.read() resp = xml2dict.xml2dict(xml.dom.minidom.parseString(rdata)) if 'LGI' in resp and 'response' in resp['LGI'] and 'error' in resp['LGI']['response']: error = resp['LGI']['response']['error'] raise LGIServerException('LGI error %d: %s'%(error['number'], error['message']), resp) return resp
def generateWikipediaTuples(verbose=False): bands_url_names = [] band_related_list = [] seen_bands = set() band_dict = xml2dict("music") for key in band_dict.keys(): for item in band_dict[key]: bands_url_names.append(unidecode(unquote(item[1].split("/")[-1]))) for band_url_name in bands_url_names: if band_url_name not in seen_bands: seen_bands.add(band_url_name) try: data = requests.get("http://dbpedia.org/data/" + band_url_name + ".json").json() # treats redirecting redir_k = "http://dbpedia.org/ontology/wikiPageRedirects" if redir_k in data: data = requests.get(data[redir_k]["value"] + ".json").json() band_data = data["http://dbpedia.org/resource/" + band_url_name] band_dict = { "id": "https://en.wikipedia.org/wiki/" + band_url_name, "name": None, "hometown": None, } try: for key in sorted(band_data): if key.split("/")[-1] == "name": band_dict["name"] = band_data[key][0]["value"] elif key.split("/")[-1] == "hometown": band_dict["hometown"] = band_data[key][0][ "value"].split("/")[-1] except Exception as e: print(e) print("Unable to get name/hometown of band id", band_dict["id"]) if band_dict["name"] is None: band_dict["name"] = band_url_name band_related_list.append({ "values": [ band_dict["id"], band_dict["name"], band_dict["hometown"], ] }) if (verbose): print(band_related_list[-1]) except Exception as e: print(e) print("DBPedia failed at getting bands information\nURL:", band_url_name) return band_related_list
def from_xml_2_dic(): # path = r'weps2007_data_1.1\training\description_files' # file_name = r'\Abby_Watkins.xml' path = r'weps2007_data_1.1\training\truth_files' file_name = r'\Abby_Watkins.clust.xml' dic = xml2dict(path + file_name) print dic dict2xml(dic, 'test2.xml')
def from_xml_2_dic(): # path = r'weps2007_data_1.1\training\description_files' # file_name = r'\Abby_Watkins.xml' path = r'weps2007_data_1.1\training\truth_files' file_name = r'\Abby_Watkins.clust.xml' dic = xml2dict(path+file_name) print dic dict2xml(dic,'test2.xml')
def generateLikesTuples(): like_related_tuples = {"likes_band": [], "likes_movie": []} band_dict = xml2dict("music") movie_dict = xml2dict("movie") for key in band_dict.keys(): for item in band_dict[key]: like_related_tuples["likes_band"].append( {"values": [item[0], item[1], item[2]]}) # print(like_related_tuples["likes_band"][-1]) for key in movie_dict.keys(): for item in movie_dict[key]: like_related_tuples["likes_movie"].append( {"values": [item[0], item[1], item[2]]}) # print(like_related_tuples["likes_movie"][-1]) return like_related_tuples
def __readConfig_file(self, filename): '''Parse a configuration file''' cfg = xml2dict.xml2dict(xml.dom.minidom.parse(filename)) if not 'LGI_user_config' in cfg: raise LGIClientException('Invalid LGI user configuration: %s'%filename) cfg = cfg['LGI_user_config'] self._user = cfg['user'] self._groups = cfg['groups'] self._url = cfg['defaultserver'] self._project = cfg['defaultproject'] self._certificate = os.path.abspath(filename) self._privateKey = os.path.abspath(filename) self._caChain = os.path.abspath(filename)
def parseConfig(self, LGIconf, reldir=None): '''Load configuration from resource daemon configuration file. If reldir is given, relative file paths for certificates are resolved to this directory. If reldir is None and LGIconf is a filename, reldir is assumed to be the dirname of LGIconf. If reldir is None and LGIconf is a file object, relative paths are retained. @type LGIconf str @param LGIconf path to LGI.cfg @type reldir str @param reldir optional directory to resolve relative pathnames to ''' if LGIconf is None: return conf = xml2dict.xml2dict(xml.dom.minidom.parse(LGIconf)) if reldir is None and isinstance(LGIconf, str): reldir = os.path.dirname(LGIconf) cert = conf['LGI']['resource']['resource_certificate_file'] if reldir is not None and not os.path.isabs(cert): cert = os.path.join(reldir, cert) self._certificate = cert key = conf['LGI']['resource']['resource_key_file'] if reldir is not None and not os.path.isabs(key): key = os.path.join(reldir, key) self._privateKey = key ca = conf['LGI']['ca_certificate_file'] if reldir is not None and not os.path.isabs(ca): ca = os.path.join(reldir, ca) self._caChain = ca project = conf['LGI']['resource']['project'] if isinstance(project, list): raise LGIResourceException("Only one project allowed in LGI configuration right now") self._url = project['project_master_server'] self._project = project['project_name'] if not isinstance(project['application'], list): project['application'] = [ project['application'] ] apps = map(lambda x: x['application_name'], project['application']) self.setApplications(apps)
def send_poivy(self, username, password, number_from, number_to, msg): url = 'https://www.poivy.com/myaccount/sendsms.php' if len(msg) == 0: return if username is None or password is None or number_from is None or number_to is None: return data = { 'username': username, 'password': password, 'from': number_from, 'to': number_to, 'text': msg } res = web_browser.WebBrowser()(url, get_data=data) res = xml2dict.xml2dict(res) reponse = res['SmsResponse'] if reponse['resultstring'] != 'success': raise Exception(reponse['endcause']) return reponse['partcount']
def convert_rq_mat2hdf5(mat_filename, hdf5_filename=None): """ This function converts a .rq.mat file into a .rq.hdf5 file. _________________________ Inputs: mat_filename The .rq.mat file name. Optional arguments: hdf5_filename The .rq.hdf5 file name to save. If empty, default will be mat_filename with .rq.hdf5 extension Returns: None Versioning: 20140409 CHF - Created 20140617 CHF - Changed to use /settings/ group 20140620 CHF - Now uses /rqs, /livetime groups To do: Error handling, verbosity arguments, check if file exists, rewrite option _________________________ """ # Need this to read .rq.mat file. Only pre-v7.3 mat files supported from scipy.io import matlab # Load .rq.mat file m = matlab.loadmat(mat_filename, squeeze_me=True, struct_as_record=False) # Create output file. If no output name provided, use mat file with hdf5 if not hdf5_filename: hdf5_filename = mat_filename.replace('mat', 'hdf5') if os.path.exists(hdf5_filename): os.remove(hdf5_filename) print '*******************************************************' print '*** File already exists, deleting to create new one ***' print '*******************************************************' f = h5py.File(hdf5_filename) # Get settings admin = m['admin'] # Now do the settings # Create a group, one for each settings type settings_group = f.create_group("settings") evt_settings_group = settings_group.create_group("evt_settings") daq_settings_group = settings_group.create_group("daq_settings") trigger_settings_group = settings_group.create_group("trigger_settings") daq_settings_global_group = daq_settings_group.create_group("global") daq_settings_sis3301_group = daq_settings_group.create_group("sis3301") # Need to do this since 'global' is a reserved name (FAIL). daq_temp1 = getattr(admin.daq_settings, 'global') # safe way to call 'global' daq_temp2 = getattr(admin.daq_settings.sis3301, 'global') # safe way to call 'global' # Convert settings from mat structure format to dictionary evt_settings_dict = admin.evt_settings.__dict__ daq_settings_global_dict = daq_temp1.__dict__ daq_settings_sis3301_dict = daq_temp2.__dict__ trigger_settings_dict_temp = admin.daq_settings.LUXTriggerSettings.__dict__ if '_fieldnames' in evt_settings_dict: evt_settings_dict.pop('_fieldnames') if '_fieldnames' in daq_settings_global_dict: daq_settings_global_dict.pop('_fieldnames') if '_fieldnames' in daq_settings_sis3301_dict: daq_settings_sis3301_dict.pop('_fieldnames') if '_fieldnames' in trigger_settings_dict_temp: trigger_settings_dict_temp.pop('_fieldnames') # For some reason, the LUXTriggerSettings data is still in XML format (not dictionary). # Check if it's the case, and convert to dictionary if so. cl = trigger_settings_dict_temp['TriggerBuilder'].__class__ if (cl == 'unicode') or (cl == 'str'): # Want to keep this to be loaded only if needed. Otherwise path not found will start giving errors # and we otherwise don't need this! from xml2dict import xml2dict new_dict = xml2dict(str('<TriggerBuilder>'+trigger_settings_dict_temp['TriggerBuilder']+'</TriggerBuilder>')) # Yes, for some reason xml2dict requires a high-level wrap-around tag (TriggerBuilder) # We add it and then remove it in this next line trigger_settings_dict = new_dict['TriggerBuilder'] else: trigger_settings_dict = trigger_settings_dict_temp['TriggerBuilder'] settings_dict = admin.__dict__ if '_fieldnames' in settings_dict: settings_dict.remove('_fieldnames') # Loop for each attribute and write to group write_attributes_recursive(settings_dict, settings_group) # Write livetime livetime_group = f.create_group("livetime") livetime_group['livetime_latch_samples'] = m['livetime_latch_samples'] livetime_group['livetime_end_samples'] = m['livetime_end_samples'] # Create list of rq names, remove settings and other __bookkeeping_ rq_names_keys = m.keys() keys_to_remove = ['admin','source_filename','__header__','__version__','__globals__'] for k in keys_to_remove: if k in rq_names_keys: rq_names_keys.remove(k) # We don't want livetime in two places (it's its own group, not an RQ!) if 'livetime_latch_samples' in rq_names_keys: rq_names_keys.remove('livetime_latch_samples') if 'livetime_end_samples' in rq_names_keys: rq_names_keys.remove('livetime_end_samples') # Create RQs group rqs_group = f.create_group("rqs") # Loop for each rq for rq_name in rq_names_keys: rq_data = m[rq_name] dset = rqs_group.create_dataset(rq_name, data=rq_data, compression=9, fletcher32=True) print 'Done with %s' % rq_name print 'Finished converting file. Output file produced: %s' % hdf5_filename f.flush() f.close()
def _xml_to_dict(self, xml): """ Internal method to turn XML to dictionary output. Developers can overwrite this method to use their favorite XML parser of choice. """ return xml2dict(xml)
def do_x2d(self,path): # if path=="": path = raw_input(u"===输入源xml的路径===") if path=="": path = self.path # print path self.dict = xml2dict(path) print u"转换成功,字典保存在self.dict中"