def parse(filename='..\\data\\enron\\processed\\small.mbox'): messages = [] input = mailbox.mbox(filename) for message in input: #see if it has a From, if not it's a bad mbox, just skip for now if (message['From']): #Create Sender Endpoint sender_add, sender_name = parse_endpoints(message['From']) if (sender_add and sender_name): sendEnd = Endpoint(address=sender_add[0], name=sender_name[0]) #Create Message object id = message['Message-ID'] if (id): print("********* "+id) else: print("********* "+str(message)) subject = message['Subject'] date = dateutil.parser.parse(message['Date']) body = message.get_payload() mess = Message(id=id, sender=sendEnd, subject=subject, datetime=date, body=body, flatmbox=str(message)) #Add receiver Endpoints recipients_add, recipients_name = parse_endpoints(message['To']) if (recipients_add and recipients_name): for (recipient_add, recipient_name) in zip(recipients_add, recipients_name): #print("*"+recipient_add+"*", recipient_name) recEnd = Endpoint(address=recipient_add, name=recipient_name) mess.addRecipient(recEnd) #get all custom headers and save as strings headers = message.items() for header in headers: if (header[0].startswith('X') or header[0].startswith('x')): ch = CustomHeader(header_key=header[0], header_value=header[1]) mess.addCH(ch) #add to the list messages.append(mess) return messages
def inspect(base_domain): domain = Domain(base_domain) domain.http = Endpoint("http", "root", base_domain) domain.httpwww = Endpoint("http", "www", base_domain) domain.https = Endpoint("https", "root", base_domain) domain.httpswww = Endpoint("https", "www", base_domain) # Analyze HTTP endpoint responsiveness and behavior. basic_check(domain.http) basic_check(domain.httpwww) basic_check(domain.https) basic_check(domain.httpswww) # Analyze HSTS header, if present, on each HTTPS endpoint. hsts_check(domain.https) hsts_check(domain.httpswww) return result_for(domain)
def delete_url(cls, user, uid): """delete the url""" ndbKey = ndb.Key('Endpoint', uid) query = Endpoint.query(Endpoint.owner == user.key, Endpoint.key == ndbKey).fetch() result = None if not query: return result result = query[0] result.key.delete() return result
def newEndpoint(endpoint): training = Training.query.filter(Training.id == endpoint.get("training_id")).one_or_none() if training is not None: if Endpoint.query.filter(Endpoint.name == endpoint.get("name")) is not None: newEndpoint = Endpoint(id = endpoint.get("id"), name = endpoint.get("name")) training.endpoints.append(newEndpoint) db.session.add(newEndpoint) db.session.commit() return jsonify(newEndpoint.serialize()), 201 else: abort(409,"Endpoint {name} exists already".format(name= endpoint.get("name")),) else: abort(409,"Training with this id:{training_id} not exists".format(training_id= id))
def load_endpoints(): config = configparser.ConfigParser() config.read('endpoints.cfg') port = config['DEFAULT']['port'] host = config['sequencer']['host'] route = config['sequencer']['route'] sequencer = Endpoint(host, port, route) host = config['shuffler']['host'] route = config['shuffler']['route'] shuffler = Endpoint(host, port, route) banks = [] hosts = config['bank_server']['host'].split('\n') routes = config['bank_server']['route'].split('\n') for host in hosts: banks.append(Endpoint(host, port, routes)) return sequencer, shuffler, banks
def update_url(cls, user, project, uid, **kwds): """update the url""" ndbKey = ndb.Key('Endpoint', uid) query = Endpoint.query(Endpoint.owner == user, Endpoint.project == project, Endpoint.key == ndbKey).fetch() result = None if not query: return result result = query[0] body = cls._parse_keywords(**kwds) for key in body: setattr(result, key, body[key]) result.put() return result
def create_ednpoint(): user = require_user() if request.json.keys() != ['name']: return abort(400) name = request.json['name'] if not name: return abort(400) endpoint = Endpoint(user.id, name) db.session.add(endpoint) db.session.commit() return jsonify(present_endpoint(endpoint))
def update_endpoint(eid): user = require_user() if request.json.keys() - {'name', 'disabled'}: return abort(400) endpoint = Endpoint.filter_by(id=eid, user_id=user.id).first_or_404() if 'name' in request.json: endpoint.name = request.json['name'] if 'disabled' in request.json: endpoint.disabled = request.json['disabled'] db.session.add(endpoint) db.session.commit() return jsonify(present_endpoint(endpoint))
def get_url(cls, user): """get the url""" query = Endpoint.query(Endpoint.owner==user.key).fetch() return query
# coding=utf-8 import os import json from random import shuffle from flask import Flask, jsonify, request, make_response from flasgger import Swagger, swag_from from flask_api import status # HTTP Status Codes from werkzeug import exceptions as ex from models import Response, UBRList, Endpoint import requests import logging from logging.config import fileConfig # Bank servers endpoints = [ Endpoint('10.0.1.12', 8080, '/requests/process'), Endpoint('10.0.1.13', 8080, '/requests/process'), Endpoint('10.0.1.14', 8080, '/requests/process'), Endpoint('10.0.1.15', 8080, '/requests/process'), ] # Pull options from environment debug = (os.getenv('DEBUG', 'False') == 'True') port = os.getenv('PORT', '8080') # Initialize Flask app = Flask(__name__) # Configure logging fileConfig('/vagrant/shuffler/logger.cfg') log = logging.getLogger()
import os from config import db from models import Notebook, Training, Endpoint NOTEBOOKS = [{"notebook_name": "FirstNotebook", "notebook_id": "12345"}] TRAININGS = [{"training_name": "FirstTraining", "training_id": "12345"}] ENDPOINTS = [{"endpoint_name": "FirstEndpoint", "endpoint_id": "12345"}] if os.path.exists("mimir.db"): os.remove("mimir.db") db.create_all() for notebook in NOTEBOOKS: n = Notebook(notebook_id=notebook.get("notebook_id"), notebook_name=notebook.get("notebook_name")) db.session.add(n) for training in TRAININGS: t = Training(training_id=training.get("training_id"), training_name=training.get("training_name")) db.session.add(t) for endpoint in ENDPOINTS: e = Endpoint(endpoint_id=endpoint.get("endpoint_id"), endpoint_name=endpoint.get("endpoint_name")) db.session.add(e) db.session.commit()
def endpoints_check(): global endpoint_definitions global alert_definitions global metrics_definitions global db thread_args = [] logger.info("collecting endpoint health") with ThreadPoolExecutor(max_workers=settings.MAX_WORKERS) as executor: for group in endpoint_definitions["groups"]: environment_group_id = group["id"] for environment in group["environments"]: environment_id = environment["id"] for endpoint_group in environment["endpoint-groups"]: endpoint_group_id = endpoint_group["id"] endpoint_group_enabled = endpoint_group["enabled"] if endpoint_group_enabled == "true": for endpoint in endpoint_group["endpoints"]: endpoint_id = endpoint["id"] endpoint_url = endpoint["url"] if "appendTraceID" in endpoint and endpoint[ "appendTraceID"]: # default argument key trace_argument_key = "cupcake_trace_id" # use custom key if provided if "traceArgumentKey" in endpoint: trace_argument_key = endpoint[ "traceArgumentKey"] endpoint_url = create_or_append_query_string( original=endpoint_url, argument="{}={}".format( trace_argument_key, get_trace_id())) if "appendAttempt" in endpoint and endpoint[ "appendAttempt"]: # default argument key append_attempt_key = "cupcake_attempt" # use custom key if provided if "attemptArgumentKey" in endpoint: append_attempt_key = endpoint[ "attemptArgumentKey"] endpoint_url = create_or_append_query_string( original=endpoint_url, argument="{}=##CUPCAKE_ATTEMPT##".format( append_attempt_key)) endpoint_expected = "" if "expected" in endpoint: endpoint_expected = endpoint["expected"] endpoint_threshold = None if "threshold" in endpoint: endpoint_threshold = Threshold( endpoint["threshold"]) endpoint_model = Endpoint( environment_group=environment_group_id, environment=environment_id, endpoint_group=endpoint_group_id, endpoint=endpoint_id, url=endpoint_url) metrics_groups = get_endpoint_default( model=endpoint_model, property="metrics-groups", default_value=["default"]) alert_groups = get_endpoint_default( model=endpoint_model, property="alert-groups", default_value=get_alerts_in_group( "default", alert_definitions)) executor.submit(run_test, endpoint_model, metrics_groups, alert_groups, endpoint_expected, endpoint_threshold)
NOTEBOOKS = [] TRAININGS = [] ENDPOINTS = [] config.createDB() db.create_all() for notebook in NOTEBOOKS: n = Notebook(id=notebook.get("id"), name=notebook.get("name")) db.session.add(n) for training in TRAININGS: t = Training(id=training.get("training_id"), name=training.get("name")) for endpoint in training.get("endpoints"): endpoint_id, endpoint_name, createdDate = endpoint n.endpoints.append( Ednpoint(id=endpoint_id, name=endpoint_name, created_date=datetime.strptime(created_date, "%Y-%m-%d %H:%M:%S"))) db.session.add(t) for endpoint in ENDPOINTS: e = Endpoint(id=endpoint.get("id"), name=endpoint.get("name")) db.session.add(e) db.session.commit()