コード例 #1
0
ファイル: Vimeo.py プロジェクト: kaplanmaxe/randapanda
    def populate(self):
        count_found = 0
        count_unfound = 0
        db = DBConnect("meteor")
        connection = db.connect()
        for count in range(0, self.loops):
            #Generate Video ID

            random = RandomVideoId(1, 8)
            video_id = random.randomGenerator()

            #Perform HTTP Request

            http = HTTPRequest(video_id, 1)
            embed_info = http.httpGet()

            #Connect and Insert Into DB if a Video Exists

            #If a video is found
            key = connection.randa_vimeo_videos.count() + 1
            if embed_info != 404:
                data = {
                    "key": key,
                    "randa_id": video_id,
                    "date": datetime.datetime.utcnow()
                }
                try:
                    inserted = connection.randa_vimeo_videos.insert_one(
                        data).inserted_id
                    count_found += 1
                    print video_id + " was inserted: " + str(count + 1)
                except pymongo.errors.DuplicateKeyError:
                    print video_id + "is already in the collection."
            else:
                print video_id + " does not exist: " + str(count + 1)
                count_unfound += 1
        #Print Results of Script
        print str(count_found) + " videos found and " + str(
            count_unfound) + " videos not found."
        #Show Output of DB After
        # db=DBConnect("meteor")
        # cursor = db.connect().randa_vimeo_videos.find()
        # for document in cursor:
        # 	print(document)

        #Debug Stuff To Use To Troubleshoot If Necessary

        # EMPTY DATABASE
        # db=DBConnect("meteor")
        # db.connect().randa_videos.remove({})

        #KNOWN VIDEO THAT WORKS
        # http=HTTPRequest("D7Qgbs8RpN4")

        # print(video_id)
        # print(embed_info)
コード例 #2
0
    def testCreateAllergiesJSONResponse(self):
        GETTest = http.HTTPRequest('GET')
        GETTest.setApiEndpoint("http://hackathon.siim.org/fhir")
        GETTest.setResource("AllergyIntolerance")
        GETTest.setHeadersDict({'content-type': 'application/json'})
        GETTest.setApiKey('d6e052ee-18c9-4f3b-a150-302c998e804c')
        response = GETTest.executeRequest()
        #print("response text: ", response.text)
        JSONResponse = allergyJSONResponse.createAllergyJSONResponse(response)

        assert (1 == 1)
コード例 #3
0
    def run(proxy_server, sock, addr):

        request = HTTPRequest(sock)
        host = request.get_header('Host')

        if proxy_server.is_restriction_enabled(
        ) and proxy_server.is_in_disallowed_hosts(host):
            logging.info('Bad website! Sending Email to admin.')
            sock.sendall(
                proxy_server.bad_response(
                    'Visiting this site is forbidden!').read())
            sock.close()
            logging.info('Socket connection with peer closed.')
            SendMail().snd_email(request.read().decode(
                request.DEFAULT_ENCODING))
            return

        if proxy_server.is_privacy_enabled():
            request.set_header('user-agent', proxy_server.privacy_user_agent)
        if not proxy_server.cache_handler.is_cached(
                host, request.method, request.route, request.version, sock):
            server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            server_socket.connect(
                (host, HTTPRequestHandler.HTTP_SERVER_LISTENING_PORT))
            logging.info(
                f'Connection established with host {host} '
                f'on port {HTTPRequestHandler.HTTP_SERVER_LISTENING_PORT}.')
            server_socket.send(request.read())
            logging.info('Request sent to server.')
            response = HTTPResponse(server_socket)
            logging.info('Response read from server.')
            logging.info('Connection with server host closed.')
            if response.is_html() and proxy_server.is_http_injection_enabled():
                response = proxy_server.body_inject(response)
                logging.info('Response injected with CN-Proxy(TM) navbar.')
            proxy_server.discharge_user(addr, response.length)
            res = response.read()
            sock.sendall(res)
            if proxy_server.is_caching_enabled():
                proxy_server.cache_handler.store(res, response.pragma,
                                                 response.modified_since,
                                                 response.expire, host,
                                                 request.route)
            logging.info('Response sent back to client.')
            server_socket.close()

        sock.close()
コード例 #4
0
    def handleRequest(self, data):

        request = HTTPRequest.HTTPRequest(data.decode(("utf-8")))

        response = None

        #if the method is GET
        if request.method == "GET":
            response = self.getHandle(request)
        elif request.method == "OPTIONS":
            #This methods is used when the client wants to know which request methods is supported by the server.
            response = self.optionHandle(request)
        else:
            #if the client is trying to use a method not yet implemented by the server:
            respone = self.notImplementedHandle(request)

        return response
コード例 #5
0
    def populate(self):
        count_found = 0
        count_unfound = 0
        for count in range(0, self.loops):
            #Generate Video ID

            random = RandomVideoId(0, 11)
            video_id = random.randomGenerator()

            #Perform HTTP Request

            http = HTTPRequest(video_id, 0)
            embed_info = http.httpGet()

            #Connect and Insert Into DB if a Video Exists

            #If a video is found

            if embed_info != 404:
                data = {
                    "randa_id": video_id,
                    "date": datetime.datetime.utcnow()
                }
                db = DBConnect("meteor")
                inserted = db.connect().randa_youtube_videos.insert_one(
                    data).inserted_id
                count_found += 1
                print video_id + " was inserted: " + str(count + 1)
            else:
                print video_id + " does not exist: " + str(count + 1)
                count_unfound += 1
        #Print Results of Script
        print str(count_found) + " videos found and " + str(
            count_unfound) + " videos not found."
        #Show Output of DB After
        db = DBConnect("meteor")
        cursor = db.connect().randa_youtube_videos.find()
        for document in cursor:
            print(document)
コード例 #6
0
from RandomVideoId import *
from DBConnect import *
from HTTPRequest import *

random = RandomVideoId()
print(random.randomGenerator())

db = DBConnect("meteor")
cursor = db.connect().randa_videos.find()
for document in cursor:
    print(document)

http = HTTPRequest('D7Qgbs8RpN4')
embed_info = http.httpGet()
print(embed_info)
コード例 #7
0
ファイル: Main.py プロジェクト: NoahAsaria/MasterPatientID
import PatientJSONResponse as jsonResponse
import CCDParser as parser
import HTTPRequest as http


POSTTest = http.HTTPRequest('POST')
payload1 = {
  "resourceType": "AllergyIntolerance",
  "id": "ai83726462664827",
  "text": {
    "status": "generated",
    "div": "<div xmlns=\"http://www.w3.org/1999/xhtml\">Penicillin - Hives and Airway compromise</div>"
  },
  "recordedDate": "2008-05-24",
  "patient": {
    "reference": "Patient/siimneela"
  },
  "reporter": {
    "reference": "Practitioner/siimjoe"
  },

  "status": "confirmed",
  "criticality": "high",
  "type": "allergy",
  "category": ["medication"],
  "reaction": [
    {
        "substance": {
            "coding": [
            {
                "system": "http://www.nlm.nih.gov/research/umls/rxnorm",
コード例 #8
0
def hello():
    global demographics_dict
    global file
    global fileType
    global demographicIdentifiers
    if request.method == "POST":
        if 'file' in request.files:  #If the 'file' form button was clicked
            file = request.files[
                "file"]  #Grab file contained with name "file" from HTML page
            if (file.filename == ''):
                return redirect(
                    request.url)  #If nothing was entered, refresh the page
            file.save(os.path.join("UploadedFiles",
                                   file.filename))  #Save the file.
            fileType = pathlib.Path(
                file.filename).suffix.lower()  #Get file extension.
            if (fileType == '.xml'):
                try:
                    demographics = parser.createNewDemographicsInstance(
                        'UploadedFiles/' + file.filename
                    )  #Pull file from uploads folder and parse
                    demographics_dict = demographics.getDemographicDict()
                    allergies_dict = demographics.getAllergiesDict()
                    allergies = allergies_dict['allergies']
                    for i in range(
                            len(allergies), 10
                    ):  #Pad the allergies from file to pass to front-end
                        allergies.append("")
                except:  #If we could not find / parse that file.
                    return render_template(
                        "index.html",
                        file_name=file.filename,
                        file_type=fileType,
                        prompt="Could not parse that file! Try again!")
                return render_template(
                    "index.html",
                    file_name=file.filename,
                    file_type=fileType,
                    given=demographics.getFieldFromDemographicDict('given'),
                    family=demographics.getFieldFromDemographicDict('family'),
                    address=demographics.getFieldFromDemographicDict(
                        'address'),
                    city=demographics.getFieldFromDemographicDict('city'),
                    state=demographics.getFieldFromDemographicDict('state'),
                    postalcode=demographics.getFieldFromDemographicDict(
                        'postalcode'),
                    birthtime=demographics.getFieldFromDemographicDict(
                        'birthtime'),
                    gender=demographics.getFieldFromDemographicDict('gender'),
                    race=demographics.getFieldFromDemographicDict('race'),
                    allergy0=allergies[0],
                    allergy1=allergies[1],
                    allergy2=allergies[2],
                    allergy3=allergies[3],
                    allergy4=allergies[4],
                    allergy5=allergies[5],
                    allergy6=allergies[6],
                    allergy7=allergies[7],
                    allergy8=allergies[8],
                    allergy9=allergies[9])
            else:
                return render_template(
                    "index.html",
                    file_name=file.filename,
                    file_type="N/A",
                    prompt="Please attach an XML file and try again!")
        elif 'query' in request.form:
            #demographicIdentifiers = demographic parameters user selects (after extracted from CCD file) in form
            #allergyidentifiers = allergy parameters user selects (after extracted from CCD file)
            #PatientJSONDicts = demographic data extracted from SIIM Server query (in the form {id : {given : value, family : value ...}}
            #allergyJSONDicts = allergy data extracted from SIIM Server query (in the form {id : {allergies: [code1, code2, ...]}})

            #We can only extract values from HTML tags, so manually get identifiers dict.
            multiselect = request.form.getlist('queryparams')
            demographicIdentifiers = {}
            allergymultiselect = request.form.getlist('allergyparams')
            allergyIdentifiers = {"code": []}
            for entry in multiselect:  #Get demographic identifiers
                #print("entry: ", entry)
                split = entry.split(':')
                demographicIdentifiers[split[0]] = split[1]

            for entry in allergymultiselect:
                #print("allergy entry: ", entry)
                split = entry.split(':')
                if (split[1] != ''):
                    allergyIdentifiers["code"].append(
                        (rx.lookup(split[1].lower()))
                    )  #This will produce a url like ../AllergyIntolerance?display="Hives"&display="Rashes"... Bascially we need to query by code per SIIM rules

            try:
                #Handle the demographics selected
                GETRequest = http.HTTPRequest('GET')
                GETRequest.setApiEndpoint("http://hackathon.siim.org/fhir")
                GETRequest.setResource("Patient")
                GETRequest.setHeadersDict({'content-type': 'application/json'})
                GETRequest.setIdentifiersDict(demographicIdentifiers)
                GETRequest.setApiKey('d6e052ee-18c9-4f3b-a150-302c998e804c')

                response = GETRequest.executeRequest()
                JSONResponse = jsonResponse.createPatientJSONResponse(response)
                PatientJSONDicts = JSONResponse.getPatientDictionaries()

                #Handle the allergies selected
                GETRequest.setResource("AllergyIntolerance")
                GETRequest.setIdentifiersDict(allergyIdentifiers)
                allergyResponse = GETRequest.executeRequest()
                print(allergyResponse.text)
                allergyJSONResponse = jsonAllergyResponse.createAllergyJSONResponse(
                    allergyResponse)
                AllergyJSONDicts = allergyJSONResponse.getAllergyDictionaries()

                print("CCD Allergy Parameters: ", allergyIdentifiers)
                print("CCD Demographic Parameters: ", demographicIdentifiers)
                print("Allergy JSON Dicts: ", AllergyJSONDicts)
                print("Demographic JSON Dicts: ", PatientJSONDicts)
                #Keep only allergiesJSONDict found also in PatientJSONDict
                AllergyJSONDicts = matcher.intersection(
                    PatientJSONDicts, AllergyJSONDicts)
                allergyMatches = matcher.unweightedAllergyDictionaryMatch(
                    allergyIdentifiers, AllergyJSONDicts)
                allMatches = matcher.weightedPatientDictionaryMatch(
                    demographics_dict, PatientJSONDicts, allergyMatches)
                matchDicts = matcher.sortStringDict(allMatches)

                matcher.convertCodesToNames(AllergyJSONDicts)
                text = matcher.formatMatchDict(matchDicts)
                text += "<strong><br>Patient Information Queried: " + str(
                    demographicIdentifiers) + "<br></strong>"
                text += str(demographics_dict)
                text += "<strong><br><br>Demographics Data matched:<br></strong>" + matcher.formatJSONDicts(
                    PatientJSONDicts)
                text += "<strong><br>Allergy Data matched:<br></strong>" + matcher.formatJSONDicts(
                    AllergyJSONDicts)
                entries = JSONResponse.getNumberOfPatientEntries()
            except:
                print("ERROR!")
                return redirect(request.url)
            return render_template("index.html",
                                   showResponse="True",
                                   numResults=entries,
                                   responseText=text)
        else:
            return redirect(request.url)
    return render_template("index.html")
コード例 #9
0
ファイル: BurpParse.py プロジェクト: billdeitrick/burpparser
def parse(src, img_dir, srch_csv, docs_csv):

    tree = ET.parse(src)
    root = tree.getroot()

    searches = []
    docs_strokes =[]

    # Loop through requests
    for item in root.getchildren():

        # Get request method
        method = item.find('method').text

        # Get request url
        url = item.find('url').text

        # Parse the request
        req = item.find('request').text
        req_obj = HTTPRequest(base64.b64decode(req))

        # Decode the request body
        raw_body = req_obj.rfile.read(int(req_obj.headers.getheader('content-length', 0)))
        body = urllib.unquote(raw_body)
        
        # Parse request fields
        fields = urlparse.parse_qs(body)

        # Parse request url params
        urlparams = urlparse.parse_qs(item.find('path').text)

        # Parse the response
        resp = item.find('response').text

        try:
            resp_obj = HTTPResponse(base64.b64decode(resp))
            resp_body = resp_obj.content
        except Exception as e:
            resp_obj = None

        # Process POST requests
        if method == 'POST':
            if 'q' in fields:
                searches.append({
                    'value':fields['q'][0],
                    'comment':'picker',
                    'timestamp': item.find('time').text

                })

            if 'picker/v2/query' in url:
                body_js = json.loads(body)

                if body_js[0] == 'qreq':
                    searches.append({
                        'value': body_js[-1][-1][-1][0],
                        'comment': 'picker',
                        'timestamp': item.find('time').text
                    })

            if 'bundles' in fields:
                try:
                    bundle = json.loads(fields['bundles'][0])[0]
                except Exception as e:
                    pass
                
                for command in bundle['commands']:
                        try:
                            docs_strokes.append({
                                'value': command['s'],
                                'comment': 'Probable Text POST to Docs',
                                'timestamp': item.find('time').text
                            })
                        except:
                            continue
        
        # Process GET requests
        if method == 'GET':
            if 'q' in urlparams:
                searches.append({
                    'value':urlparams['q'][0],
                    'comment':'Probable address bar autocomplete',
                    'timestamp': item.find('time').text
                })

            if resp_obj is not None and 'image' in resp_obj.getheader("Content-Type",""):
                xtn = resp_obj.getheader("Content-Type").split("/")[1]
                with open('{0}/IMG-{1}.{2}'.format(img_dir, item.find('time').text.replace(":", ""), xtn), 'wb') as img_file:
                    img_file.write(resp_body)

    # Write output logs
    if len(searches) != 0:
        write_dict_list(searches, srch_csv)

    if len(docs_strokes) != 0:
        write_dict_list(docs_strokes, docs_csv)