def get_ossec_conf(section=None, field=None): """ Returns ossec.conf as dictionary. :param section: Filters by section (i.e. rules). :param field: Filters by field in section (i.e. included). :return: ossec.conf as dictionary. """ if import_problem is not None: raise WazuhException(1001, import_problem) else: try: with open(common.ossec_conf, 'r') as f_ossec: read_conf = f_ossec.read() read_conf = read_conf.replace(" -- ", " -INVALID_CHAR ") # Remove invalid characters for XML Parser read_conf = __prepare_ossecconf(read_conf) json_conf = xml_json.data(fromstring(read_conf)) data = __unify_ossecconf(json_conf) except: raise WazuhException(1101) if section: try: data = data[section] except: raise WazuhException(1102) if section and field: try: data = data[field] # data[section][field] except: raise WazuhException(1103) return data
def _get_caps(self, url_base, apikey): ''' Gets caps for indexer url url_base (str): url of torznab indexer apikey (str): api key for indexer Gets indexer caps from CAPS table Returns list of caps ''' logging.info('Getting caps for {}'.format(url_base)) url = '{}api?apikey={}&t=caps'.format(url_base, apikey) try: xml = Url.open(url).text caps = gdata.data(fromstring( xml))['caps']['searching']['movie-search']['supportedParams'] core.sql.write('CAPS', {'url': url_base, 'caps': caps}) except Exception as e: logging.warning('', exc_info=True) return None return caps.split(',')
def _generate_export(self): export_file = self.export_file.format('json') clean = re.sub( r' xmlns:xsi="http:\/\/www\.w3\.org\/2001\/XMLSchema-instance" xsi:noNamespaceSchemaLocation=".*\.xsd"', '', self.input ) json = gdata.data(ElementTree.fromstring(clean)) with open(export_file, 'w') as out_file: dump(json, out_file, indent=4)
def convertXML2JSON(file): # get file name #name = str.split(file, ".")[-2] #print(name) # open the XML file, convert to json and return it as an object with open(file, "rb") as input: jsonOut = bf.data(fromstring(input.read())) jsonString = json.dumps(jsonOut, indent=2) #jsonString = json.dumps(jsonOut) result = regex.sub(EMPTY, jsonString) result = result.replace(VALUE, "value") return json.loads(result)
def export_item(item, outpath): cipher = DES.new(key, DES.MODE_CBC, iv) with open(item, 'rb') as input: decrypted = cipher.decrypt(input.read()) xml = zlib.decompress(decrypted, 16 + zlib.MAX_WBITS).decode('utf-8') # xml_schemaless = re.sub(' xmlns:xsi="[^>]+"', '', xml, count=1) json = gdata.data(ElementTree.fromstring(xml)) with open(outpath + '.json', 'w') as out: dump(json, out, indent=4) with open(outpath + '.xml', 'w') as out: out.write(xml)
def parse(xml, imdbid): logging.info('Parsing LimeTorrents results.') try: items = gdata.data(fromstring(xml))['rss']['channel']['item'] except Exception as e: logging.error('Unexpected XML format from LimeTorrents.', exc_info=True) return [] results = [] for i in items: result = {} try: result['score'] = 0 result['size'] = i['size']['$t'] result['status'] = 'Available' result['pubdate'] = None result['title'] = i['title']['$t'] result['imdbid'] = imdbid result['indexer'] = 'LimeTorrents' result['info_link'] = i['link']['$t'] result['torrentfile'] = i['enclosure']['url'] result['guid'] = result['torrentfile'].split('.')[1].split( '/')[-1].lower() result['type'] = 'torrent' result['downloadid'] = None result['freeleech'] = 0 result['download_client'] = None s = i['description']['$t'].split('Seeds: ')[1] seed_str = '' while s[0].isdigit(): seed_str += s[0] s = s[1:] result['seeders'] = int(seed_str) results.append(result) except Exception as e: logging.error('Error parsing LimeTorrents XML.', exc_info=True) continue logging.info('Found {} results from LimeTorrents.'.format( len(results))) return results
def parse(xml, imdbid): logging.info('Parsing Torrentz2 results.') try: items = gdata.data(fromstring(xml))['rss']['channel']['item'] except Exception as e: logging.error('Unexpected XML format from Torrentz2.', exc_info=True) return [] results = [] for i in items: result = {} try: desc = i['description']['$t'].split(' ') hash_ = desc[-1] m = (1024**2) if desc[2] == 'MB' else (1024**3) result['score'] = 0 result['size'] = int(desc[1]) * m result['status'] = 'Available' result['pubdate'] = None result['title'] = i['title']['$t'] result['imdbid'] = imdbid result['indexer'] = 'Torrentz2' result['info_link'] = i['link']['$t'] result['torrentfile'] = magnet(hash_) result['guid'] = hash_ result['type'] = 'magnet' result['downloadid'] = None result['seeders'] = int(desc[4]) result['download_client'] = None result['freeleech'] = 0 results.append(result) except Exception as e: logging.error('Error parsing Torrentz2 XML.', exc_info=True) continue logging.info('Found {} results from Torrentz2.'.format(len(results))) return results
def parse(xml, imdbid): logging.info('Parsing TorrentDownloads results.') try: items = gdata.data(fromstring(xml))['rss']['channel']['item'] except Exception as e: logging.error('Unexpected XML format from TorrentDownloads.', exc_info=True) return [] results = [] for i in items: result = {} try: result['score'] = 0 result['size'] = i['size']['$t'] result['status'] = 'Available' result['pubdate'] = None result['title'] = i['title']['$t'] result['imdbid'] = imdbid result['indexer'] = 'TorrentDownloads' result[ 'info_link'] = 'http://www.torrentdownloads.me{}'.format( i['link']['$t']) result['torrentfile'] = magnet(i['info_hash']['$t']) result['guid'] = i['info_hash']['$t'] result['type'] = 'magnet' result['downloadid'] = None result['freeleech'] = 0 result['download_client'] = None result['seeders'] = int(i['seeders']['$t']) results.append(result) except Exception as e: logging.error('Error parsing TorrentDownloads XML.', exc_info=True) continue logging.info('Found {} results from TorrentDownloads.'.format( len(results))) return results
def get_ossec_conf(section=None, field=None): """ Returns ossec.conf as dictionary. :param section: Filters by section (i.e. rules). :param field: Filters by field in section (i.e. included). :return: ossec.conf as dictionary. """ if import_problem is not None: raise WazuhException(1001, import_problem) else: try: with open(common.ossec_conf, 'r') as f_ossec: read_conf = f_ossec.read() read_conf = read_conf.replace( " -- ", " -INVALID_CHAR " ) # Remove invalid characters for XML Parser read_conf = __prepare_ossecconf(read_conf) json_conf = xml_json.data(fromstring(read_conf)) data = __unify_ossecconf(json_conf) except: raise WazuhException(1101) if section: try: data = data[section] except: raise WazuhException(1102) if section and field: try: data = data[field] # data[section][field] except: raise WazuhException(1103) return data
def create_settings_json(input_file): with open(input_file, "r") as file: file_string = file.read() a = gdata.data(fromstring(file_string)) info_dict = {} info_dict['software'] = 'Open Ephys GUI' info_dict['version'] = a['SETTINGS']['INFO']['VERSION']['$t'] info_dict['machine'] = a['SETTINGS']['INFO']['MACHINE']['$t'] info_dict['os'] = a['SETTINGS']['INFO']['OS']['$t'] info_dict['date'] = a['SETTINGS']['INFO']['DATE']['$t'] neuropix = {} for processor in a['SETTINGS']['SIGNALCHAIN'][1]['PROCESSOR']: #print(processor) #print(type(processor)) if str.find(processor['name'], 'Neuropix') > -1: neuropix['phase'] = processor['name'][-2:] try: settings = processor['EDITOR']['NEUROPIXELS'] hardware_info = [ info.split(': ') for info in settings['info'].split('\n')[::2] ] neuropix['ap gain'] = settings['apGainValue'] neuropix['lfp gain'] = settings['lfpGainValue'] neuropix['reference channel'] = settings['referenceChannel'] neuropix['filter cut'] = settings['filterCut'] for info in hardware_info: neuropix[str.lower(info[0])] = info[1] except KeyError: neuropix['error'] = 'probe info not found' sp0 = {} sp0['name'] = 'Neuropix-3a-100.0' sp0['type'] = 'AP band' sp0['num_channels'] = 384 sp0['sample_rate'] = 30000.0 sp0['bit_volts'] = 0.195 sp1 = {} sp1['name'] = 'Neuropix-3a-100.1' sp1['type'] = 'LFP band' sp1['num_channels'] = 384 sp1['sample_rate'] = 2500.0 sp1['bit_volts'] = 0.195 neuropix['subprocessors'] = [sp0, sp1] oe_json = {} oe_json['info'] = info_dict oe_json['neuropix'] = neuropix return oe_json
def test_connection(indexer, apikey): ''' Tests connection to NewzNab API indexer (str): url of indexer apikey (str): indexer api key Test searches for imdbid tt0063350 (Night of the Living Dead 1968) Returns dict ajax-style response ''' if not indexer: return {'response': False, 'error': _('Indexer URL is blank.')} while indexer[-1] == '/': indexer = indexer[:-1] response = {} logging.info('Testing connection to {}.'.format(indexer)) url = '{}/api?apikey={}&t=search&id=tt0063350'.format(indexer, apikey) try: r = Url.open(url) if r.status_code != 200: return { 'response': False, 'error': '{} {}'.format(r.status_code, r.reason.title()) } else: response = r.text except (SystemExit, KeyboardInterrupt): raise except Exception as e: logging.error('Newz/TorzNab connection check.', exc_info=True) return { 'response': False, 'error': _('No connection could be made because the target machine actively refused it.' ) } error_json = gdata.data(fromstring(response)) e_code = error_json.get('error', {}).get('code') if e_code: if error_json['error'].get('description') == 'Missing parameter': logging.info('Newz/TorzNab connection test successful.') return { 'response': True, 'message': _('Connection successful.') } else: logging.error('Newz/TorzNab connection test failed. {}'.format( error_json['error'].get('description'))) return { 'response': False, 'error': error_json['error'].get('description') } elif 'unauthorized' in response.lower(): logging.error( 'Newz/TorzNab connection failed - Incorrect API key.') return {'response': False, 'error': _('Incorrect API key.')} else: logging.info('Newz/TorzNab connection test successful.') return {'response': True, 'message': _('Connection successful.')}
def parse_newznab_xml(self, feed): ''' Parse xml from Newznab api. feed (str): xml feed text imdbid (str): imdb id # Replaces all namespaces with 'ns', so namespaced attributes are accessible with the key '{ns}attr' Loads feed with xmljson in gdata format Creates item dict for database table SEARCHRESULTS -- removes unused keys and ensures required keys are present (even if blank) Returns list of dicts of parsed nzb information. ''' results = [] feed = re.sub(r'xmlns:([^=]*)=[^ ]*"', r'xmlns:\1="ns"', feed) try: channel = gdata.data(fromstring(feed))['rss']['channel'] indexer = channel['title']['$t'] items = channel['item'] except Exception as e: logging.error('Unexpected XML format from NewzNab indexer.', exc_info=True) return [] for item in items: try: item['attr'] = {} for i in item['{ns}attr']: item['attr'][i['name']] = i['value'] result = { "download_client": None, "downloadid": None, "freeleech": 1 if item['attr'].get('downloadvolumefactor', 1) == 0 else 0, "guid": item.get('link', {}).get('$t'), "imdbid": self.imdbid, "indexer": indexer, "info_link": item.get('guid', {}).get('$t') if item.get('guid', {}).get('isPermaLink') else item.get( 'comments', {}).get('$t'), "pubdate": item.get('pubDate', {}).get('$t', '')[5:16], "score": 0, "seeders": 0, "size": item.get('size', {}).get('$t') or item.get('enclosure', {}).get('length'), "status": "Available", "title": item.get('title', {}).get('$t') or item.get('description', {}).get('$t'), "torrentfile": None, "type": self.feed_type } if result['type'] != 'nzb': result['torrentfile'] = result['guid'] if result['guid'].startswith('magnet'): result['guid'] = result['guid'].split('&')[0].split( ':')[-1] result['type'] = 'magnet' result['seeders'] = item['attr'].get('seeders') results.append(result) except Exception as e: logging.warning('', exc_info=True) continue return results
username = '******' password = '******' config = opts.config credential_factory = CredentialFactory() # Build the KMIP server account credentials # TODO (peter-hamilton) Move up into KMIPProxy if (username is None) and (password is None): credential = None else: credential_type = CredentialType.USERNAME_AND_PASSWORD credential_value = {'Username': username, 'Password': password} credential = credential_factory.create_credential( credential_type, credential_value) # Build the client and connect to the server client = KMIPProxy(config=config) client.open() # Read xml and convert to json tree = ET.parse('TC-OFFSET-1-13.xml') root = tree.getroot() json = gdata.data(root)['KMIP'] # Proccess json requestMessages = [json['RequestMessage'][0], json['RequestMessage'][1]] proccessXml = ProccessXml(credential, client) for resquestMessage in requestMessages: proccessXml.proccess_request(resquestMessage)