def fetch_sources(): app.logger.info('Fetching sources from {} sources.'.format( RuleSource.query.count())) rules = [] for src in RuleSource.query: # Download rules from every source. app.logger.info('Downloading from "{}".'.format(src.uri)) # If a gzip file, perform a streamed download # and save it to a temp file. stream = src.uri.endswith('gz') resp = requests.get(src.uri, stream=stream) if resp.status_code == 200: if stream: tmpdir = '/tmp/{}-{}/'.format(src.name, datetime.utcnow().isoformat()) os.mkdir(tmpdir) ziprules = StringIO() for chunk in resp.iter_content(): ziprules.write(chunk) ziprules.seek(0) try: zrules = tarfile.open(fileobj=ziprules, mode='r:gz') except tarfile.TarError as terr: app.logger.warning('Error in rule file: {}\n{}'.format( src.uri, str(terr))) else: ruleslist = [] for member in zrules.getmembers(): if member.name.endswith('.rules') and member.isfile(): # Keep track of extracted filenames. ruleslist.append(member.name) zrules.extract(member, path=tmpdir) # All rule files found are now extracted into tmpdir. for rname in ruleslist: try: rulepath = os.path.join(tmpdir, rname) with open(rulepath, 'rb') as rfile: rules.extend(from_buffer(rfile.read())) os.remove(rulepath) except Exception as e: app.logger.exception( "Unhandled exception: {}. Continuing".format( e)) continue # A subdirectory /rules/ is created when extracting, # removing that first then the whole tmpdir. os.rmdir(os.path.join(tmpdir, 'rules')) os.rmdir(tmpdir) else: # rules will contain all parsed rules. rules.extend(from_buffer(resp.text)) else: pass app.logger.info('Bulk importing {} rules.'.format(len(rules))) Rule.bulk_import(rules) render_rules()
def fetch_sources(): app.logger.info('Fetching sources from {} sources.'.format( RuleSource.query.count())) rules = [] for src in RuleSource.query: # Download rules from every source. app.logger.info('Downloading from "{}".'.format(src.uri)) # If a gzip file, perform a streamed download # and save it to a temp file. stream = src.uri.endswith('gz') resp = requests.get(src.uri, stream=stream) if resp.status_code == 200: if stream: tmpdir = '/tmp/{}-{}/'.format(src.name, datetime.utcnow().isoformat()) os.mkdir(tmpdir) ziprules = StringIO() for chunk in resp.iter_content(): ziprules.write(chunk) ziprules.seek(0) try: zrules = tarfile.open(fileobj=ziprules, mode='r:gz') except tarfile.TarError as terr: app.logger.warning( 'Error in rule file: {}\n{}'.format(src.uri, str(terr))) else: ruleslist = [] for member in zrules.getmembers(): if member.name.endswith('.rules') and member.isfile(): # Keep track of extracted filenames. ruleslist.append(member.name) zrules.extract(member, path=tmpdir) # All rule files found are now extracted into tmpdir. for rname in ruleslist: try: rulepath = os.path.join(tmpdir, rname) with open(rulepath, 'rb') as rfile: rules.extend(from_buffer(rfile.read())) os.remove(rulepath) except Exception as e: app.logger.exception("Unhandled exception: {}. Continuing".format(e)) continue # A subdirectory /rules/ is created when extracting, # removing that first then the whole tmpdir. os.rmdir(os.path.join(tmpdir, 'rules')) os.rmdir(tmpdir) else: # rules will contain all parsed rules. rules.extend(from_buffer(resp.text)) else: pass app.logger.info('Bulk importing {} rules.'.format(len(rules))) Rule.bulk_import(rules) render_rules()
def update_rule(rule_id): rule = Rule.query.filter_by(id=rule_id).first_or_404() for field in request.json.keys(): if field in Rule.editable_fields(): setattr(rule, field, request.json[field]) elif field in Rule.fields(): return error_response(errors.API_FIELD_NOT_EDITABLE.format(field), 400) else: return error_response(errors.API_FIELD_INVALID.format(field), 400) else: db.session.commit() return jsonify(rule.to_dict())
def fetch_sources(): app.logger.info('Fetching sources from {} sources.'.format( RuleSource.query.count())) rules = [] for src in RuleSource.query: # Download rules from every source. app.logger.info('Downloading from "{}".'.format(src.uri)) resp = requests.get(src.uri) if resp.status_code == 200: # rules will contain all parsed rules. rules.extend(from_buffer(resp.text)) else: pass app.logger.info('Bulk importing {} rules.'.format(len(rules))) Rule.bulk_import(rules) render_rules()
def render_rules(): app.logger.info('Rendering rules.') from flask import current_app sbuffer = Rule.renderall() fpath = current_app.config['RENDERED_RULES_PATH'] with open(fpath, 'w') as rfile: rfile.write(sbuffer) app.logger.info('Finished rendering rules.')
def get_rules(): # Getting active rules. if request.args.get("plaintext") in ["1", "true"]: # Requested rendered rules in plaintext. resp = make_response(Rule.renderall()) resp.headers["Content-Disposition"] = "attachment; filename=mhn.rules" return resp else: # Responding with active rules. rules = Rule.query.filter_by(is_active=True).group_by(Rule.sid).having(func.max(Rule.rev)) resp = make_response(json.dumps([ru.to_dict() for ru in rules])) resp.headers["Content-Type"] = "application/json" return resp
def get_rules(): # Getting active rules. if request.args.get('plaintext') in ['1', 'true']: # Requested rendered rules in plaintext. resp = make_response(Rule.renderall()) resp.headers['Content-Disposition'] = "attachment; filename=mhn.rules" return resp else: # Responding with active rules. rules = Rule.query.filter_by(is_active=True).\ group_by(Rule.sid).\ having(func.max(Rule.rev)) resp = make_response(json.dumps([ru.to_dict() for ru in rules])) resp.headers['Content-Type'] = "application/json" return resp